├── LICENSE ├── README.md ├── lesson_0 ├── 1.png ├── 2.png ├── 3.png ├── 4.png ├── 5.png ├── 6.png ├── 7.png ├── 8.png └── pytorch.ipynb ├── lesson_1 ├── 1.png ├── 2.png ├── 3.png └── Dogs and cats.ipynb ├── lesson_2 ├── 1.png ├── 2.png ├── 3.png ├── 4.png └── 5.png ├── lesson_3 ├── Dogs and cats.ipynb └── GPU_AMP.ipynb ├── lesson_4 ├── 1.png └── 2.png ├── lesson_5 └── Dogs and cats.ipynb ├── lesson_6 ├── Dogs and cats.ipynb └── config.yml ├── lesson_7 ├── arch.py ├── archs │ ├── __pycache__ │ │ └── psevdo_resnet.cpython-310.pyc │ └── psevdo_resnet.py ├── optimizers.py ├── options │ ├── config.yml │ └── config2.yml ├── sh │ └── start.sh └── train.py ├── lesson_8 ├── 1.png ├── Animals-10.ipynb └── Dogs and cats.py └── lesson_9 ├── 1.png ├── 2.png ├── 3.png ├── 4.png └── Segmentation_Unet.ipynb /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Mikhail Gorokhoov 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # youtube_pytorch_lessons 2 | Храню записи и код со своих YT pytorch уроков первого сезона 3 | 4 | Ссылка на Youtube канал: https://www.youtube.com/@magorokhoov 5 | 6 | ## Описание 7 | 0. Классификатор MNIST 8 | 1. Классификатор Dogs and Cats (custom dataset 9 | 2. Оптимизаторы (записи с доски) 10 | 3. Пишем VGG с нуля 11 | 4. Переносим обучение на GPU с поддержкой AMP (float16; Automatic mixed percision) 12 | 5. Пишем подобие Resnet с нуля 13 | 6. Конфигуратор на YAML 14 | 7. Многофайловый проект 15 | 8. Transfer learning - использует веса предобученныйх vgg/resnet 16 | 9. Unet. Простая сегментация 17 | 10. Стилизация (код не выложен) 18 | -------------------------------------------------------------------------------- /lesson_0/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/magorokhoov/youtube_pytorch_lessons/460597d84a3f654691fe7087087115006c10c3d9/lesson_0/1.png -------------------------------------------------------------------------------- /lesson_0/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/magorokhoov/youtube_pytorch_lessons/460597d84a3f654691fe7087087115006c10c3d9/lesson_0/2.png -------------------------------------------------------------------------------- /lesson_0/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/magorokhoov/youtube_pytorch_lessons/460597d84a3f654691fe7087087115006c10c3d9/lesson_0/3.png -------------------------------------------------------------------------------- /lesson_0/4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/magorokhoov/youtube_pytorch_lessons/460597d84a3f654691fe7087087115006c10c3d9/lesson_0/4.png -------------------------------------------------------------------------------- /lesson_0/5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/magorokhoov/youtube_pytorch_lessons/460597d84a3f654691fe7087087115006c10c3d9/lesson_0/5.png -------------------------------------------------------------------------------- /lesson_0/6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/magorokhoov/youtube_pytorch_lessons/460597d84a3f654691fe7087087115006c10c3d9/lesson_0/6.png -------------------------------------------------------------------------------- /lesson_0/7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/magorokhoov/youtube_pytorch_lessons/460597d84a3f654691fe7087087115006c10c3d9/lesson_0/7.png -------------------------------------------------------------------------------- /lesson_0/8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/magorokhoov/youtube_pytorch_lessons/460597d84a3f654691fe7087087115006c10c3d9/lesson_0/8.png -------------------------------------------------------------------------------- /lesson_1/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/magorokhoov/youtube_pytorch_lessons/460597d84a3f654691fe7087087115006c10c3d9/lesson_1/1.png -------------------------------------------------------------------------------- /lesson_1/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/magorokhoov/youtube_pytorch_lessons/460597d84a3f654691fe7087087115006c10c3d9/lesson_1/2.png -------------------------------------------------------------------------------- /lesson_1/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/magorokhoov/youtube_pytorch_lessons/460597d84a3f654691fe7087087115006c10c3d9/lesson_1/3.png -------------------------------------------------------------------------------- /lesson_1/Dogs and cats.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "id": "b792b114", 7 | "metadata": {}, 8 | "outputs": [ 9 | { 10 | "name": "stderr", 11 | "output_type": "stream", 12 | "text": [ 13 | "/home/magorokhoov/pyenv/py310/lib/python3.10/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", 14 | " from .autonotebook import tqdm as notebook_tqdm\n" 15 | ] 16 | } 17 | ], 18 | "source": [ 19 | "import torch\n", 20 | "import torch.nn as nn\n", 21 | "import torch.nn.functional as F\n", 22 | "\n", 23 | "import torchvision as tv\n", 24 | "\n", 25 | "import os\n", 26 | "import cv2\n", 27 | "import numpy as np\n", 28 | "import matplotlib.pyplot as plt\n", 29 | "from tqdm.autonotebook import tqdm\n", 30 | "\n", 31 | "from torch.cuda.amp import autocast, GradScaler" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": null, 37 | "id": "f1bd7d1c", 38 | "metadata": {}, 39 | "outputs": [], 40 | "source": [] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "execution_count": null, 45 | "id": "99e9a0ac", 46 | "metadata": {}, 47 | "outputs": [], 48 | "source": [] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": 2, 53 | "id": "8eabb6a7", 54 | "metadata": {}, 55 | "outputs": [], 56 | "source": [ 57 | "class Dataset2class(torch.utils.data.Dataset):\n", 58 | " def __init__(self, path_dir1:str, path_dir2:str):\n", 59 | " super().__init__()\n", 60 | " \n", 61 | " self.path_dir1 = path_dir1\n", 62 | " self.path_dir2 = path_dir2\n", 63 | " \n", 64 | " self.dir1_list = sorted(os.listdir(path_dir1))\n", 65 | " self.dir2_list = sorted(os.listdir(path_dir2))\n", 66 | " \n", 67 | " def __len__(self):\n", 68 | " return len(self.dir1_list) + len(self.dir2_list)\n", 69 | " \n", 70 | " def __getitem__(self, idx):\n", 71 | " \n", 72 | " if idx < len(self.dir1_list):\n", 73 | " class_id = 0\n", 74 | " img_path = os.path.join(self.path_dir1, self.dir1_list[idx])\n", 75 | " else:\n", 76 | " class_id = 1\n", 77 | " idx -= len(self.dir1_list)\n", 78 | " img_path = os.path.join(self.path_dir2, self.dir2_list[idx])\n", 79 | " \n", 80 | " img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n", 81 | " img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n", 82 | " img = img.astype(np.float32)\n", 83 | " img = img/255.0\n", 84 | " \n", 85 | " img = cv2.resize(img, (128, 128), interpolation=cv2.INTER_AREA)\n", 86 | " img = img.transpose((2, 0, 1))\n", 87 | " \n", 88 | " t_img = torch.from_numpy(img)\n", 89 | " t_class_id = torch.tensor(class_id)\n", 90 | " \n", 91 | " return {'img': t_img, 'label': t_class_id}\n", 92 | " " 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": 3, 98 | "id": "ae43db49", 99 | "metadata": {}, 100 | "outputs": [], 101 | "source": [ 102 | "train_dogs_path = './dataset/training_set/dogs/'\n", 103 | "train_cats_path = './dataset/training_set/cats/'\n", 104 | "test_dogs_path = './dataset/test_set/dogs/'\n", 105 | "test_cats_path = './dataset/test_set/cats/'\n", 106 | "\n", 107 | "train_ds_catsdogs = Dataset2class(train_dogs_path, train_cats_path)\n", 108 | "test_ds_catsdogs = Dataset2class(test_dogs_path, test_cats_path)" 109 | ] 110 | }, 111 | { 112 | "cell_type": "code", 113 | "execution_count": 4, 114 | "id": "a169d748", 115 | "metadata": {}, 116 | "outputs": [ 117 | { 118 | "data": { 119 | "text/plain": [ 120 | "8000" 121 | ] 122 | }, 123 | "execution_count": 4, 124 | "metadata": {}, 125 | "output_type": "execute_result" 126 | } 127 | ], 128 | "source": [ 129 | "len(train_ds_catsdogs)" 130 | ] 131 | }, 132 | { 133 | "cell_type": "code", 134 | "execution_count": 5, 135 | "id": "683ec6a5", 136 | "metadata": {}, 137 | "outputs": [ 138 | { 139 | "data": { 140 | "text/plain": [ 141 | "2000" 142 | ] 143 | }, 144 | "execution_count": 5, 145 | "metadata": {}, 146 | "output_type": "execute_result" 147 | } 148 | ], 149 | "source": [ 150 | "len(test_ds_catsdogs)" 151 | ] 152 | }, 153 | { 154 | "cell_type": "code", 155 | "execution_count": 6, 156 | "id": "bc34837f", 157 | "metadata": {}, 158 | "outputs": [], 159 | "source": [ 160 | "batch_size = 16\n", 161 | "\n", 162 | "train_loader = torch.utils.data.DataLoader(\n", 163 | " train_ds_catsdogs, shuffle=True, \n", 164 | " batch_size=batch_size, num_workers=1, drop_last=True\n", 165 | ")\n", 166 | "test_loader = torch.utils.data.DataLoader(\n", 167 | " train_ds_catsdogs, shuffle=True,\n", 168 | " batch_size=batch_size, num_workers=1, drop_last=False\n", 169 | ")" 170 | ] 171 | }, 172 | { 173 | "cell_type": "code", 174 | "execution_count": 7, 175 | "id": "64358fde", 176 | "metadata": {}, 177 | "outputs": [], 178 | "source": [ 179 | "class ConvNet(nn.Module):\n", 180 | " def __init__(self):\n", 181 | " super().__init__()\n", 182 | " \n", 183 | " self.act = nn.LeakyReLU(0.2)\n", 184 | " self.maxpool = nn.MaxPool2d(2,2)\n", 185 | " self.conv0 = nn.Conv2d(3, 128, 3, stride=1, padding=0)\n", 186 | " self.conv1 = nn.Conv2d(128, 128, 3, stride=1, padding=0)\n", 187 | " self.conv2 = nn.Conv2d(128, 128, 3, stride=1, padding=0)\n", 188 | " self.conv3 = nn.Conv2d(128, 256, 3, stride=1, padding=0)\n", 189 | " \n", 190 | " self.adaptivepool = nn.AdaptiveAvgPool2d((1,1))\n", 191 | " self.flatten = nn.Flatten()\n", 192 | " self.linear1 = nn.Linear(256, 20)\n", 193 | " self.linear2 = nn.Linear(20, 2)\n", 194 | " \n", 195 | " def forward(self, x):\n", 196 | " \n", 197 | "\n", 198 | " out = self.conv0(x)\n", 199 | " out = self.act(out)\n", 200 | " out = self.maxpool(out)\n", 201 | "\n", 202 | " out = self.conv1(out)\n", 203 | " out = self.act(out)\n", 204 | " out = self.maxpool(out)\n", 205 | "\n", 206 | " out = self.conv2(out)\n", 207 | " out = self.act(out)\n", 208 | " out = self.maxpool(out)\n", 209 | "\n", 210 | " out = self.conv3(out)\n", 211 | " out = self.act(out)\n", 212 | " \n", 213 | " out = self.adaptivepool(out)\n", 214 | " out = self.flatten(out)\n", 215 | " out = self.linear1(out)\n", 216 | " out = self.act(out)\n", 217 | " out = self.linear2(out)\n", 218 | "\n", 219 | " return out\n", 220 | " " 221 | ] 222 | }, 223 | { 224 | "cell_type": "code", 225 | "execution_count": 8, 226 | "id": "d27c8db2", 227 | "metadata": {}, 228 | "outputs": [], 229 | "source": [ 230 | "def count_parameters(model):\n", 231 | " return sum(p.numel() for p in model.parameters() if p.requires_grad)" 232 | ] 233 | }, 234 | { 235 | "cell_type": "code", 236 | "execution_count": 9, 237 | "id": "e155790d", 238 | "metadata": {}, 239 | "outputs": [], 240 | "source": [ 241 | "model = ConvNet()" 242 | ] 243 | }, 244 | { 245 | "cell_type": "code", 246 | "execution_count": 10, 247 | "id": "9e9ce7af", 248 | "metadata": {}, 249 | "outputs": [ 250 | { 251 | "data": { 252 | "text/plain": [ 253 | "ConvNet(\n", 254 | " (act): LeakyReLU(negative_slope=0.2)\n", 255 | " (maxpool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n", 256 | " (conv0): Conv2d(3, 128, kernel_size=(3, 3), stride=(1, 1))\n", 257 | " (conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1))\n", 258 | " (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1))\n", 259 | " (conv3): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1))\n", 260 | " (adaptivepool): AdaptiveAvgPool2d(output_size=(1, 1))\n", 261 | " (flatten): Flatten(start_dim=1, end_dim=-1)\n", 262 | " (linear1): Linear(in_features=256, out_features=20, bias=True)\n", 263 | " (linear2): Linear(in_features=20, out_features=2, bias=True)\n", 264 | ")" 265 | ] 266 | }, 267 | "execution_count": 10, 268 | "metadata": {}, 269 | "output_type": "execute_result" 270 | } 271 | ], 272 | "source": [ 273 | "model" 274 | ] 275 | }, 276 | { 277 | "cell_type": "code", 278 | "execution_count": 11, 279 | "id": "e5b0ff4f", 280 | "metadata": {}, 281 | "outputs": [ 282 | { 283 | "data": { 284 | "text/plain": [ 285 | "599102" 286 | ] 287 | }, 288 | "execution_count": 11, 289 | "metadata": {}, 290 | "output_type": "execute_result" 291 | } 292 | ], 293 | "source": [ 294 | "count_parameters(model)" 295 | ] 296 | }, 297 | { 298 | "cell_type": "code", 299 | "execution_count": 12, 300 | "id": "05bbec58", 301 | "metadata": {}, 302 | "outputs": [], 303 | "source": [ 304 | "for sample in train_loader:\n", 305 | " img = sample['img']\n", 306 | " label = sample['label']\n", 307 | " model(img)\n", 308 | " break" 309 | ] 310 | }, 311 | { 312 | "cell_type": "code", 313 | "execution_count": null, 314 | "id": "e0f15479", 315 | "metadata": {}, 316 | "outputs": [], 317 | "source": [] 318 | }, 319 | { 320 | "cell_type": "code", 321 | "execution_count": 13, 322 | "id": "14d2b0da", 323 | "metadata": {}, 324 | "outputs": [], 325 | "source": [ 326 | "loss_fn = nn.CrossEntropyLoss()\n", 327 | "optimizer = torch.optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999))" 328 | ] 329 | }, 330 | { 331 | "cell_type": "code", 332 | "execution_count": 14, 333 | "id": "c995c23b", 334 | "metadata": {}, 335 | "outputs": [], 336 | "source": [ 337 | "def accuracy(pred, label):\n", 338 | " answer = F.softmax(pred.detach()).numpy().argmax(1) == label.numpy().argmax(1) \n", 339 | " return answer.mean()" 340 | ] 341 | }, 342 | { 343 | "cell_type": "code", 344 | "execution_count": 15, 345 | "id": "85413171", 346 | "metadata": {}, 347 | "outputs": [], 348 | "source": [ 349 | "device = 'cuda' # if torch.cuda.is_available() else 'cpu'\n", 350 | "model = model.to(device)\n", 351 | "loss_fn = loss_fn.to(device)" 352 | ] 353 | }, 354 | { 355 | "cell_type": "code", 356 | "execution_count": 16, 357 | "id": "00ad73b6", 358 | "metadata": {}, 359 | "outputs": [], 360 | "source": [ 361 | "use_amp = True\n", 362 | "scaler = torch.cuda.amp.GradScaler()" 363 | ] 364 | }, 365 | { 366 | "cell_type": "code", 367 | "execution_count": null, 368 | "id": "c9e82536", 369 | "metadata": {}, 370 | "outputs": [], 371 | "source": [ 372 | "torch.backends.cudnn.benchmark = True\n", 373 | "torch.backends.cudnn.deterministic = False" 374 | ] 375 | }, 376 | { 377 | "cell_type": "code", 378 | "execution_count": 17, 379 | "id": "b90f22ff", 380 | "metadata": {}, 381 | "outputs": [ 382 | { 383 | "name": "stderr", 384 | "output_type": "stream", 385 | "text": [ 386 | " 0%| | 0/500 [00:00\u001b[0;34m()\u001b[0m\n\u001b[1;32m 4\u001b[0m loss_val \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m0\u001b[39m\n\u001b[1;32m 5\u001b[0m acc_val \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m0\u001b[39m\n\u001b[0;32m----> 6\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m sample \u001b[38;5;129;01min\u001b[39;00m (pbar \u001b[38;5;241m:=\u001b[39m tqdm(train_loader)):\n\u001b[1;32m 7\u001b[0m img, label \u001b[38;5;241m=\u001b[39m sample[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mimg\u001b[39m\u001b[38;5;124m'\u001b[39m], sample[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mlabel\u001b[39m\u001b[38;5;124m'\u001b[39m]\n\u001b[1;32m 8\u001b[0m label \u001b[38;5;241m=\u001b[39m F\u001b[38;5;241m.\u001b[39mone_hot(label, \u001b[38;5;241m2\u001b[39m)\u001b[38;5;241m.\u001b[39mfloat()\n", 459 | "File \u001b[0;32m~/pyenv/py310/lib/python3.10/site-packages/tqdm/std.py:1195\u001b[0m, in \u001b[0;36mtqdm.__iter__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1192\u001b[0m time \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_time\n\u001b[1;32m 1194\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m-> 1195\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m obj \u001b[38;5;129;01min\u001b[39;00m iterable:\n\u001b[1;32m 1196\u001b[0m \u001b[38;5;28;01myield\u001b[39;00m obj\n\u001b[1;32m 1197\u001b[0m \u001b[38;5;66;03m# Update and possibly print the progressbar.\u001b[39;00m\n\u001b[1;32m 1198\u001b[0m \u001b[38;5;66;03m# Note: does not call self.update(1) for speed optimisation.\u001b[39;00m\n", 460 | "File \u001b[0;32m~/pyenv/py310/lib/python3.10/site-packages/torch/utils/data/dataloader.py:652\u001b[0m, in \u001b[0;36m_BaseDataLoaderIter.__next__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 649\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_sampler_iter \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 650\u001b[0m \u001b[38;5;66;03m# TODO(https://github.com/pytorch/pytorch/issues/76750)\u001b[39;00m\n\u001b[1;32m 651\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_reset() \u001b[38;5;66;03m# type: ignore[call-arg]\u001b[39;00m\n\u001b[0;32m--> 652\u001b[0m data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_next_data\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 653\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_num_yielded \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[1;32m 654\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_dataset_kind \u001b[38;5;241m==\u001b[39m _DatasetKind\u001b[38;5;241m.\u001b[39mIterable \u001b[38;5;129;01mand\u001b[39;00m \\\n\u001b[1;32m 655\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_IterableDataset_len_called \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m \\\n\u001b[1;32m 656\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_num_yielded \u001b[38;5;241m>\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_IterableDataset_len_called:\n", 461 | "File \u001b[0;32m~/pyenv/py310/lib/python3.10/site-packages/torch/utils/data/dataloader.py:1330\u001b[0m, in \u001b[0;36m_MultiProcessingDataLoaderIter._next_data\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1327\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_process_data(data)\n\u001b[1;32m 1329\u001b[0m \u001b[38;5;28;01massert\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_shutdown \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_tasks_outstanding \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m0\u001b[39m\n\u001b[0;32m-> 1330\u001b[0m idx, data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_get_data\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1331\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_tasks_outstanding \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[1;32m 1332\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_dataset_kind \u001b[38;5;241m==\u001b[39m _DatasetKind\u001b[38;5;241m.\u001b[39mIterable:\n\u001b[1;32m 1333\u001b[0m \u001b[38;5;66;03m# Check for _IterableDatasetStopIteration\u001b[39;00m\n", 462 | "File \u001b[0;32m~/pyenv/py310/lib/python3.10/site-packages/torch/utils/data/dataloader.py:1296\u001b[0m, in \u001b[0;36m_MultiProcessingDataLoaderIter._get_data\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 1292\u001b[0m \u001b[38;5;66;03m# In this case, `self._data_queue` is a `queue.Queue`,. But we don't\u001b[39;00m\n\u001b[1;32m 1293\u001b[0m \u001b[38;5;66;03m# need to call `.task_done()` because we don't use `.join()`.\u001b[39;00m\n\u001b[1;32m 1294\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 1295\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[0;32m-> 1296\u001b[0m success, data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_try_get_data\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1297\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m success:\n\u001b[1;32m 1298\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m data\n", 463 | "File \u001b[0;32m~/pyenv/py310/lib/python3.10/site-packages/torch/utils/data/dataloader.py:1134\u001b[0m, in \u001b[0;36m_MultiProcessingDataLoaderIter._try_get_data\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m 1121\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_try_get_data\u001b[39m(\u001b[38;5;28mself\u001b[39m, timeout\u001b[38;5;241m=\u001b[39m_utils\u001b[38;5;241m.\u001b[39mMP_STATUS_CHECK_INTERVAL):\n\u001b[1;32m 1122\u001b[0m \u001b[38;5;66;03m# Tries to fetch data from `self._data_queue` once for a given timeout.\u001b[39;00m\n\u001b[1;32m 1123\u001b[0m \u001b[38;5;66;03m# This can also be used as inner loop of fetching without timeout, with\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 1131\u001b[0m \u001b[38;5;66;03m# Returns a 2-tuple:\u001b[39;00m\n\u001b[1;32m 1132\u001b[0m \u001b[38;5;66;03m# (bool: whether successfully get data, any: data if successful else None)\u001b[39;00m\n\u001b[1;32m 1133\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m-> 1134\u001b[0m data \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_data_queue\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtimeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtimeout\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1135\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m (\u001b[38;5;28;01mTrue\u001b[39;00m, data)\n\u001b[1;32m 1136\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 1137\u001b[0m \u001b[38;5;66;03m# At timeout and error, we manually check whether any worker has\u001b[39;00m\n\u001b[1;32m 1138\u001b[0m \u001b[38;5;66;03m# failed. Note that this is the only mechanism for Windows to detect\u001b[39;00m\n\u001b[1;32m 1139\u001b[0m \u001b[38;5;66;03m# worker failures.\u001b[39;00m\n", 464 | "File \u001b[0;32m/usr/lib/python3.10/multiprocessing/queues.py:122\u001b[0m, in \u001b[0;36mQueue.get\u001b[0;34m(self, block, timeout)\u001b[0m\n\u001b[1;32m 120\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_rlock\u001b[38;5;241m.\u001b[39mrelease()\n\u001b[1;32m 121\u001b[0m \u001b[38;5;66;03m# unserialize the data after having released the lock\u001b[39;00m\n\u001b[0;32m--> 122\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43m_ForkingPickler\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mloads\u001b[49m\u001b[43m(\u001b[49m\u001b[43mres\u001b[49m\u001b[43m)\u001b[49m\n", 465 | "File \u001b[0;32m~/pyenv/py310/lib/python3.10/site-packages/torch/multiprocessing/reductions.py:297\u001b[0m, in \u001b[0;36mrebuild_storage_fd\u001b[0;34m(cls, df, size)\u001b[0m\n\u001b[1;32m 296\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mrebuild_storage_fd\u001b[39m(\u001b[38;5;28mcls\u001b[39m, df, size):\n\u001b[0;32m--> 297\u001b[0m fd \u001b[38;5;241m=\u001b[39m \u001b[43mdf\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdetach\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 298\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 299\u001b[0m storage \u001b[38;5;241m=\u001b[39m storage_from_cache(\u001b[38;5;28mcls\u001b[39m, fd_id(fd))\n", 466 | "File \u001b[0;32m/usr/lib/python3.10/multiprocessing/resource_sharer.py:57\u001b[0m, in \u001b[0;36mDupFd.detach\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 55\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mdetach\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n\u001b[1;32m 56\u001b[0m \u001b[38;5;124;03m'''Get the fd. This should only be called once.'''\u001b[39;00m\n\u001b[0;32m---> 57\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[43m_resource_sharer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_connection\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_id\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mas\u001b[39;00m conn:\n\u001b[1;32m 58\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m reduction\u001b[38;5;241m.\u001b[39mrecv_handle(conn)\n", 467 | "File \u001b[0;32m/usr/lib/python3.10/multiprocessing/resource_sharer.py:86\u001b[0m, in \u001b[0;36m_ResourceSharer.get_connection\u001b[0;34m(ident)\u001b[0m\n\u001b[1;32m 84\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mconnection\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m Client\n\u001b[1;32m 85\u001b[0m address, key \u001b[38;5;241m=\u001b[39m ident\n\u001b[0;32m---> 86\u001b[0m c \u001b[38;5;241m=\u001b[39m \u001b[43mClient\u001b[49m\u001b[43m(\u001b[49m\u001b[43maddress\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mauthkey\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mprocess\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcurrent_process\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mauthkey\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 87\u001b[0m c\u001b[38;5;241m.\u001b[39msend((key, os\u001b[38;5;241m.\u001b[39mgetpid()))\n\u001b[1;32m 88\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m c\n", 468 | "File \u001b[0;32m/usr/lib/python3.10/multiprocessing/connection.py:513\u001b[0m, in \u001b[0;36mClient\u001b[0;34m(address, family, authkey)\u001b[0m\n\u001b[1;32m 510\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mauthkey should be a byte string\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m 512\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m authkey \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m--> 513\u001b[0m \u001b[43manswer_challenge\u001b[49m\u001b[43m(\u001b[49m\u001b[43mc\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mauthkey\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 514\u001b[0m deliver_challenge(c, authkey)\n\u001b[1;32m 516\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m c\n", 469 | "File \u001b[0;32m/usr/lib/python3.10/multiprocessing/connection.py:761\u001b[0m, in \u001b[0;36manswer_challenge\u001b[0;34m(connection, authkey)\u001b[0m\n\u001b[1;32m 759\u001b[0m message \u001b[38;5;241m=\u001b[39m message[\u001b[38;5;28mlen\u001b[39m(CHALLENGE):]\n\u001b[1;32m 760\u001b[0m digest \u001b[38;5;241m=\u001b[39m hmac\u001b[38;5;241m.\u001b[39mnew(authkey, message, \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mmd5\u001b[39m\u001b[38;5;124m'\u001b[39m)\u001b[38;5;241m.\u001b[39mdigest()\n\u001b[0;32m--> 761\u001b[0m \u001b[43mconnection\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msend_bytes\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdigest\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 762\u001b[0m response \u001b[38;5;241m=\u001b[39m connection\u001b[38;5;241m.\u001b[39mrecv_bytes(\u001b[38;5;241m256\u001b[39m) \u001b[38;5;66;03m# reject large message\u001b[39;00m\n\u001b[1;32m 763\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m response \u001b[38;5;241m!=\u001b[39m WELCOME:\n", 470 | "File \u001b[0;32m/usr/lib/python3.10/multiprocessing/connection.py:205\u001b[0m, in \u001b[0;36m_ConnectionBase.send_bytes\u001b[0;34m(self, buf, offset, size)\u001b[0m\n\u001b[1;32m 203\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m offset \u001b[38;5;241m+\u001b[39m size \u001b[38;5;241m>\u001b[39m n:\n\u001b[1;32m 204\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mbuffer length < offset + size\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m--> 205\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_send_bytes\u001b[49m\u001b[43m(\u001b[49m\u001b[43mm\u001b[49m\u001b[43m[\u001b[49m\u001b[43moffset\u001b[49m\u001b[43m:\u001b[49m\u001b[43moffset\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[43m \u001b[49m\u001b[43msize\u001b[49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\n", 471 | "File \u001b[0;32m/usr/lib/python3.10/multiprocessing/connection.py:416\u001b[0m, in \u001b[0;36mConnection._send_bytes\u001b[0;34m(self, buf)\u001b[0m\n\u001b[1;32m 410\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_send(buf)\n\u001b[1;32m 411\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 412\u001b[0m \u001b[38;5;66;03m# Issue #20540: concatenate before sending, to avoid delays due\u001b[39;00m\n\u001b[1;32m 413\u001b[0m \u001b[38;5;66;03m# to Nagle's algorithm on a TCP socket.\u001b[39;00m\n\u001b[1;32m 414\u001b[0m \u001b[38;5;66;03m# Also note we want to avoid sending a 0-length buffer separately,\u001b[39;00m\n\u001b[1;32m 415\u001b[0m \u001b[38;5;66;03m# to avoid \"broken pipe\" errors if the other end closed the pipe.\u001b[39;00m\n\u001b[0;32m--> 416\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_send\u001b[49m\u001b[43m(\u001b[49m\u001b[43mheader\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mbuf\u001b[49m\u001b[43m)\u001b[49m\n", 472 | "File \u001b[0;32m/usr/lib/python3.10/multiprocessing/connection.py:373\u001b[0m, in \u001b[0;36mConnection._send\u001b[0;34m(self, buf, write)\u001b[0m\n\u001b[1;32m 371\u001b[0m remaining \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mlen\u001b[39m(buf)\n\u001b[1;32m 372\u001b[0m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[0;32m--> 373\u001b[0m n \u001b[38;5;241m=\u001b[39m \u001b[43mwrite\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_handle\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbuf\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 374\u001b[0m remaining \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m=\u001b[39m n\n\u001b[1;32m 375\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m remaining \u001b[38;5;241m==\u001b[39m \u001b[38;5;241m0\u001b[39m:\n", 473 | "\u001b[0;31mKeyboardInterrupt\u001b[0m: " 474 | ] 475 | } 476 | ], 477 | "source": [ 478 | "epochs = 10\n", 479 | "\n", 480 | "for epoch in range(epochs):\n", 481 | " loss_val = 0\n", 482 | " acc_val = 0\n", 483 | " for sample in (pbar := tqdm(train_loader)):\n", 484 | " img, label = sample['img'], sample['label']\n", 485 | " label = F.one_hot(label, 2).float()\n", 486 | " img = img.to(device)\n", 487 | " label = label.to(device)\n", 488 | " optimizer.zero_grad()\n", 489 | " \n", 490 | " with autocast(use_amp):\n", 491 | " pred = model(img)\n", 492 | " loss = loss_fn(pred, label)\n", 493 | "\n", 494 | " scaler.scale(loss).backward()\n", 495 | " loss_item = loss.item()\n", 496 | " loss_val += loss_item\n", 497 | "\n", 498 | " scaler.step(optimizer)\n", 499 | " scaler.update()\n", 500 | "\n", 501 | " acc_current = accuracy(pred.cpu().float(), label.cpu().float())\n", 502 | " acc_val += acc_current\n", 503 | "\n", 504 | " pbar.set_description(f'loss: {loss_item:.5f}\\taccuracy: {acc_current:.3f}')\n", 505 | " print(loss_val/len(train_loader))\n", 506 | " print(acc_val/len(train_loader))" 507 | ] 508 | }, 509 | { 510 | "cell_type": "code", 511 | "execution_count": null, 512 | "id": "0da9e2d1", 513 | "metadata": {}, 514 | "outputs": [], 515 | "source": [ 516 | "loss_val = 0\n", 517 | "acc_val = 0\n", 518 | "for sample in (pbar := tqdm(test_loader)):\n", 519 | " with torch.no_grad():\n", 520 | " img, label = sample['img'], sample['label']\n", 521 | "\n", 522 | " label = F.one_hot(label, 2).float()\n", 523 | " pred = model(img)\n", 524 | "\n", 525 | " loss = loss_fn(pred, label)\n", 526 | " loss_item = loss.item()\n", 527 | " loss_val += loss_item\n", 528 | "\n", 529 | " acc_current = accuracy(pred, label)\n", 530 | " acc_val += acc_current\n", 531 | "\n", 532 | " pbar.set_description(f'loss: {loss_item:.5f}\\taccuracy: {acc_current:.3f}')\n", 533 | "print(loss_val/len(train_loader))\n", 534 | "print(acc_val/len(train_loader))" 535 | ] 536 | }, 537 | { 538 | "cell_type": "code", 539 | "execution_count": null, 540 | "id": "69edf77e", 541 | "metadata": {}, 542 | "outputs": [], 543 | "source": [] 544 | }, 545 | { 546 | "cell_type": "code", 547 | "execution_count": null, 548 | "id": "5faee4bf", 549 | "metadata": {}, 550 | "outputs": [], 551 | "source": [] 552 | } 553 | ], 554 | "metadata": { 555 | "kernelspec": { 556 | "display_name": "Python 3 (ipykernel)", 557 | "language": "python", 558 | "name": "python3" 559 | }, 560 | "language_info": { 561 | "codemirror_mode": { 562 | "name": "ipython", 563 | "version": 3 564 | }, 565 | "file_extension": ".py", 566 | "mimetype": "text/x-python", 567 | "name": "python", 568 | "nbconvert_exporter": "python", 569 | "pygments_lexer": "ipython3", 570 | "version": "3.10.8" 571 | } 572 | }, 573 | "nbformat": 4, 574 | "nbformat_minor": 5 575 | } 576 | -------------------------------------------------------------------------------- /lesson_2/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/magorokhoov/youtube_pytorch_lessons/460597d84a3f654691fe7087087115006c10c3d9/lesson_2/1.png -------------------------------------------------------------------------------- /lesson_2/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/magorokhoov/youtube_pytorch_lessons/460597d84a3f654691fe7087087115006c10c3d9/lesson_2/2.png -------------------------------------------------------------------------------- /lesson_2/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/magorokhoov/youtube_pytorch_lessons/460597d84a3f654691fe7087087115006c10c3d9/lesson_2/3.png -------------------------------------------------------------------------------- /lesson_2/4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/magorokhoov/youtube_pytorch_lessons/460597d84a3f654691fe7087087115006c10c3d9/lesson_2/4.png -------------------------------------------------------------------------------- /lesson_2/5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/magorokhoov/youtube_pytorch_lessons/460597d84a3f654691fe7087087115006c10c3d9/lesson_2/5.png -------------------------------------------------------------------------------- /lesson_3/Dogs and cats.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "id": "b792b114", 7 | "metadata": {}, 8 | "outputs": [ 9 | { 10 | "name": "stderr", 11 | "output_type": "stream", 12 | "text": [ 13 | "/home/magorokhoov/pyenv/py310/lib/python3.10/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", 14 | " from .autonotebook import tqdm as notebook_tqdm\n" 15 | ] 16 | } 17 | ], 18 | "source": [ 19 | "import torch\n", 20 | "import torch.nn as nn\n", 21 | "import torch.nn.functional as F\n", 22 | "\n", 23 | "import torchvision as tv\n", 24 | "\n", 25 | "import os\n", 26 | "import cv2\n", 27 | "import numpy as np\n", 28 | "import matplotlib.pyplot as plt\n", 29 | "from tqdm.autonotebook import tqdm" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": 48, 35 | "id": "8eabb6a7", 36 | "metadata": {}, 37 | "outputs": [], 38 | "source": [ 39 | "class Dataset2class(torch.utils.data.Dataset):\n", 40 | " def __init__(self, path_dir1:str, path_dir2:str):\n", 41 | " super().__init__()\n", 42 | " \n", 43 | " self.path_dir1 = path_dir1\n", 44 | " self.path_dir2 = path_dir2\n", 45 | " \n", 46 | " self.dir1_list = sorted(os.listdir(path_dir1))\n", 47 | " self.dir2_list = sorted(os.listdir(path_dir2))\n", 48 | " \n", 49 | " def __len__(self):\n", 50 | " return len(self.dir1_list) + len(self.dir2_list)\n", 51 | " \n", 52 | " def __getitem__(self, idx):\n", 53 | " \n", 54 | " if idx < len(self.dir1_list):\n", 55 | " class_id = 0\n", 56 | " img_path = os.path.join(self.path_dir1, self.dir1_list[idx])\n", 57 | " else:\n", 58 | " class_id = 1\n", 59 | " idx -= len(self.dir1_list)\n", 60 | " img_path = os.path.join(self.path_dir2, self.dir2_list[idx])\n", 61 | " \n", 62 | " img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n", 63 | " img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n", 64 | " img = img.astype(np.float32)\n", 65 | " img = img/255.0\n", 66 | " \n", 67 | " img = cv2.resize(img, (112, 112), interpolation=cv2.INTER_AREA)\n", 68 | " img = img.transpose((2, 0, 1))\n", 69 | " \n", 70 | " t_img = torch.from_numpy(img)\n", 71 | " t_class_id = torch.tensor([class_id])\n", 72 | " \n", 73 | " return {'img': t_img, 'label': t_class_id}\n", 74 | " " 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": 49, 80 | "id": "ae43db49", 81 | "metadata": {}, 82 | "outputs": [], 83 | "source": [ 84 | "train_dogs_path = './dataset/training_set/dogs/'\n", 85 | "train_cats_path = './dataset/training_set/cats/'\n", 86 | "test_dogs_path = './dataset/test_set/dogs/'\n", 87 | "test_cats_path = './dataset/test_set/cats/'\n", 88 | "\n", 89 | "train_ds_catsdogs = Dataset2class(train_dogs_path, train_cats_path)\n", 90 | "test_ds_catsdogs = Dataset2class(test_dogs_path, test_cats_path)" 91 | ] 92 | }, 93 | { 94 | "cell_type": "code", 95 | "execution_count": 50, 96 | "id": "a169d748", 97 | "metadata": {}, 98 | "outputs": [ 99 | { 100 | "data": { 101 | "text/plain": [ 102 | "8000" 103 | ] 104 | }, 105 | "execution_count": 50, 106 | "metadata": {}, 107 | "output_type": "execute_result" 108 | } 109 | ], 110 | "source": [ 111 | "len(train_ds_catsdogs)" 112 | ] 113 | }, 114 | { 115 | "cell_type": "code", 116 | "execution_count": 51, 117 | "id": "683ec6a5", 118 | "metadata": {}, 119 | "outputs": [ 120 | { 121 | "data": { 122 | "text/plain": [ 123 | "2000" 124 | ] 125 | }, 126 | "execution_count": 51, 127 | "metadata": {}, 128 | "output_type": "execute_result" 129 | } 130 | ], 131 | "source": [ 132 | "len(test_ds_catsdogs)" 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": 52, 138 | "id": "bc34837f", 139 | "metadata": {}, 140 | "outputs": [], 141 | "source": [ 142 | "batch_size = 4\n", 143 | "\n", 144 | "train_loader = torch.utils.data.DataLoader(\n", 145 | " train_ds_catsdogs, shuffle=True, \n", 146 | " batch_size=batch_size, num_workers=1, drop_last=True\n", 147 | ")\n", 148 | "test_loader = torch.utils.data.DataLoader(\n", 149 | " train_ds_catsdogs, shuffle=True,\n", 150 | " batch_size=batch_size, num_workers=1, drop_last=False\n", 151 | ")" 152 | ] 153 | }, 154 | { 155 | "cell_type": "code", 156 | "execution_count": 53, 157 | "id": "64358fde", 158 | "metadata": {}, 159 | "outputs": [], 160 | "source": [ 161 | "class ConvNet(nn.Module):\n", 162 | " def __init__(self):\n", 163 | " super().__init__()\n", 164 | " \n", 165 | " self.act = nn.LeakyReLU(0.2)\n", 166 | " self.maxpool = nn.MaxPool2d(2,2)\n", 167 | " self.conv0 = nn.Conv2d(3, 32, 3, stride=1, padding=0)\n", 168 | " self.conv1 = nn.Conv2d(32, 32, 3, stride=1, padding=0)\n", 169 | " self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=0)\n", 170 | " self.conv3 = nn.Conv2d(64, 128, 3, stride=1, padding=0)\n", 171 | " \n", 172 | " self.adaptivepool = nn.AdaptiveAvgPool2d((1,1))\n", 173 | " self.flatten = nn.Flatten()\n", 174 | " self.linear1 = nn.Linear(128, 20)\n", 175 | " self.linear2 = nn.Linear(20, 2)\n", 176 | " \n", 177 | " def forward(self, x):\n", 178 | " \n", 179 | "\n", 180 | " out = self.conv0(x)\n", 181 | " out = self.act(out)\n", 182 | " out = self.maxpool(out)\n", 183 | "\n", 184 | " out = self.conv1(out)\n", 185 | " out = self.act(out)\n", 186 | " out = self.maxpool(out)\n", 187 | "\n", 188 | " out = self.conv2(out)\n", 189 | " out = self.act(out)\n", 190 | " out = self.maxpool(out)\n", 191 | "\n", 192 | " out = self.conv3(out)\n", 193 | " out = self.act(out)\n", 194 | " \n", 195 | " out = self.adaptivepool(out)\n", 196 | " out = self.flatten(out)\n", 197 | " out = self.linear1(out)\n", 198 | " out = self.act(out)\n", 199 | " out = self.linear2(out)\n", 200 | "\n", 201 | " return out\n", 202 | " " 203 | ] 204 | }, 205 | { 206 | "cell_type": "code", 207 | "execution_count": 61, 208 | "id": "2870be26", 209 | "metadata": {}, 210 | "outputs": [], 211 | "source": [ 212 | "class VGG16(nn.Module):\n", 213 | " def __init__(self, out_nc):\n", 214 | " super().__init__()\n", 215 | " \n", 216 | " self.act = nn.ReLU(inplace=True)\n", 217 | " self.maxpool = nn.MaxPool2d(2,2)\n", 218 | " \n", 219 | " self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)\n", 220 | " self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)\n", 221 | " \n", 222 | " self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1)\n", 223 | " self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)\n", 224 | " \n", 225 | " self.conv3_1 = nn.Conv2d(128, 128, kernel_size=3, padding=1)\n", 226 | " self.conv3_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)\n", 227 | " self.conv3_3 = nn.Conv2d(128, 128, kernel_size=3, padding=1)\n", 228 | " \n", 229 | " self.conv4_1 = nn.Conv2d(128, 128, kernel_size=3, padding=1)\n", 230 | " self.conv4_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)\n", 231 | " self.conv4_3 = nn.Conv2d(128, 128, kernel_size=3, padding=1)\n", 232 | " \n", 233 | " self.avgpool = nn.AdaptiveAvgPool2d((1,1))\n", 234 | " \n", 235 | "# self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1)\n", 236 | "# self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)\n", 237 | "# self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)\n", 238 | " \n", 239 | " self.flat = nn.Flatten()\n", 240 | " \n", 241 | " self.fc1 = nn.Linear(128, 128)\n", 242 | " #self.fc2 = nn.Linear(4096, 4096)\n", 243 | " self.fc3 = nn.Linear(128, out_nc)\n", 244 | " \n", 245 | " def forward(self, x):\n", 246 | " out = self.conv1_1(x)\n", 247 | " out = self.act(out)\n", 248 | " out = self.conv1_2(out)\n", 249 | " out = self.act(out)\n", 250 | " \n", 251 | " out = self.maxpool(out)\n", 252 | " \n", 253 | " out = self.conv2_1(out)\n", 254 | " out = self.act(out)\n", 255 | " out = self.conv2_2(out)\n", 256 | " out = self.act(out)\n", 257 | " \n", 258 | " out = self.maxpool(out)\n", 259 | " \n", 260 | " out = self.conv3_1(out)\n", 261 | " out = self.act(out)\n", 262 | " out = self.conv3_2(out)\n", 263 | " out = self.act(out)\n", 264 | " out = self.conv3_3(out)\n", 265 | " out = self.act(out)\n", 266 | " \n", 267 | " out = self.maxpool(out)\n", 268 | " \n", 269 | " out = self.conv4_1(out)\n", 270 | " out = self.act(out)\n", 271 | " out = self.conv4_2(out)\n", 272 | " out = self.act(out)\n", 273 | " out = self.conv4_3(out)\n", 274 | " out = self.act(out)\n", 275 | " \n", 276 | " out = self.maxpool(out)\n", 277 | " \n", 278 | "# out = self.conv5_1(out)\n", 279 | "# out = self.act(out)\n", 280 | "# out = self.conv5_2(out)\n", 281 | "# out = self.act(out)\n", 282 | "# out = self.conv5_3(out)\n", 283 | "# out = self.act(out)\n", 284 | " \n", 285 | "# out = self.maxpool(out)\n", 286 | " out = self.avgpool(out)\n", 287 | " out = self.flat(out)\n", 288 | " \n", 289 | " out = self.fc1(out)\n", 290 | " out = self.act(out)\n", 291 | "# out = self.fc2(out)\n", 292 | "# out = self.act(out)\n", 293 | " out = self.fc3(out)\n", 294 | " \n", 295 | " return out" 296 | ] 297 | }, 298 | { 299 | "cell_type": "code", 300 | "execution_count": 63, 301 | "id": "d27c8db2", 302 | "metadata": {}, 303 | "outputs": [], 304 | "source": [ 305 | "def count_parameters(model):\n", 306 | " return sum(p.numel() for p in model.parameters() if p.requires_grad)" 307 | ] 308 | }, 309 | { 310 | "cell_type": "code", 311 | "execution_count": null, 312 | "id": "e155790d", 313 | "metadata": {}, 314 | "outputs": [], 315 | "source": [] 316 | }, 317 | { 318 | "cell_type": "code", 319 | "execution_count": null, 320 | "id": "9e9ce7af", 321 | "metadata": {}, 322 | "outputs": [], 323 | "source": [] 324 | }, 325 | { 326 | "cell_type": "code", 327 | "execution_count": 64, 328 | "id": "e5b0ff4f", 329 | "metadata": {}, 330 | "outputs": [], 331 | "source": [ 332 | "device = 'cpu' #'cuda' if torch.cuda.is_available() else 'cpu'\n", 333 | "\n" 334 | ] 335 | }, 336 | { 337 | "cell_type": "code", 338 | "execution_count": null, 339 | "id": "e0f15479", 340 | "metadata": {}, 341 | "outputs": [], 342 | "source": [] 343 | }, 344 | { 345 | "cell_type": "code", 346 | "execution_count": null, 347 | "id": "14d2b0da", 348 | "metadata": {}, 349 | "outputs": [], 350 | "source": [] 351 | }, 352 | { 353 | "cell_type": "code", 354 | "execution_count": 65, 355 | "id": "c995c23b", 356 | "metadata": {}, 357 | "outputs": [], 358 | "source": [ 359 | "def accuracy(pred, label):\n", 360 | " answer = (F.sigmoid(pred.detach().cpu()).numpy() > 0.5) == (label.cpu().numpy() > 0.5)\n", 361 | " return answer.mean()" 362 | ] 363 | }, 364 | { 365 | "cell_type": "code", 366 | "execution_count": 70, 367 | "id": "0f121fd9", 368 | "metadata": {}, 369 | "outputs": [ 370 | { 371 | "name": "stderr", 372 | "output_type": "stream", 373 | "text": [ 374 | "100%|███████████████████████████████████████| 2000/2000 [00:25<00:00, 79.79it/s]\n" 375 | ] 376 | }, 377 | { 378 | "name": "stdout", 379 | "output_type": "stream", 380 | "text": [ 381 | "0.6933599455058574\n", 382 | "0.503375\n" 383 | ] 384 | }, 385 | { 386 | "name": "stderr", 387 | "output_type": "stream", 388 | "text": [ 389 | "100%|███████████████████████████████████████| 2000/2000 [00:25<00:00, 78.75it/s]\n" 390 | ] 391 | }, 392 | { 393 | "name": "stdout", 394 | "output_type": "stream", 395 | "text": [ 396 | "0.6900944152921438\n", 397 | "0.5225\n" 398 | ] 399 | }, 400 | { 401 | "name": "stderr", 402 | "output_type": "stream", 403 | "text": [ 404 | "100%|███████████████████████████████████████| 2000/2000 [00:25<00:00, 77.98it/s]\n" 405 | ] 406 | }, 407 | { 408 | "name": "stdout", 409 | "output_type": "stream", 410 | "text": [ 411 | "0.6853578619360924\n", 412 | "0.556375\n" 413 | ] 414 | }, 415 | { 416 | "name": "stderr", 417 | "output_type": "stream", 418 | "text": [ 419 | "100%|███████████████████████████████████████| 2000/2000 [00:26<00:00, 76.66it/s]\n" 420 | ] 421 | }, 422 | { 423 | "name": "stdout", 424 | "output_type": "stream", 425 | "text": [ 426 | "0.6894261719882488\n", 427 | "0.534375\n" 428 | ] 429 | }, 430 | { 431 | "name": "stderr", 432 | "output_type": "stream", 433 | "text": [ 434 | "100%|███████████████████████████████████████| 2000/2000 [00:26<00:00, 76.47it/s]\n" 435 | ] 436 | }, 437 | { 438 | "name": "stdout", 439 | "output_type": "stream", 440 | "text": [ 441 | "0.6760058562606573\n", 442 | "0.583375\n" 443 | ] 444 | }, 445 | { 446 | "name": "stderr", 447 | "output_type": "stream", 448 | "text": [ 449 | "100%|███████████████████████████████████████| 2000/2000 [00:26<00:00, 75.99it/s]\n" 450 | ] 451 | }, 452 | { 453 | "name": "stdout", 454 | "output_type": "stream", 455 | "text": [ 456 | "0.6352082764878869\n", 457 | "0.643875\n" 458 | ] 459 | }, 460 | { 461 | "name": "stderr", 462 | "output_type": "stream", 463 | "text": [ 464 | "100%|███████████████████████████████████████| 2000/2000 [00:26<00:00, 75.97it/s]" 465 | ] 466 | }, 467 | { 468 | "name": "stdout", 469 | "output_type": "stream", 470 | "text": [ 471 | "0.6016034909933805\n", 472 | "0.6795\n" 473 | ] 474 | }, 475 | { 476 | "name": "stderr", 477 | "output_type": "stream", 478 | "text": [ 479 | "\n" 480 | ] 481 | } 482 | ], 483 | "source": [ 484 | "device = 'cuda' if torch.cuda.is_available() else 'cpu'\n", 485 | "model = VGG16(1) # ConvNet()\n", 486 | "model = model.to(device)\n", 487 | "count_parameters(model)\n", 488 | "\n", 489 | "epochs = 7\n", 490 | "\n", 491 | "\n", 492 | "loss_fn = nn.BCEWithLogitsLoss().to(device)\n", 493 | "optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)\n", 494 | "\n", 495 | "for epoch in range(epochs):\n", 496 | " loss_val = 0\n", 497 | " acc_val = 0\n", 498 | " for sample in (pbar := tqdm(train_loader)):\n", 499 | " img, label = sample['img'], sample['label']\n", 500 | " img = img.to(device)\n", 501 | " label = label.float().to(device)\n", 502 | " optimizer.zero_grad()\n", 503 | "\n", 504 | " #label = F.one_hot(label, 2).float()\n", 505 | " pred = model(img)\n", 506 | "\n", 507 | " #print(pred.shape, label.shape)\n", 508 | " loss = loss_fn(pred, label)\n", 509 | "\n", 510 | " loss.backward()\n", 511 | " loss_item = loss.item()\n", 512 | " loss_val += loss_item\n", 513 | "\n", 514 | " optimizer.step()\n", 515 | "\n", 516 | " acc_current = accuracy(pred, label)\n", 517 | " acc_val += acc_current\n", 518 | "\n", 519 | " pbar.set_description(f'loss: {loss_item:.5f}\\taccuracy: {acc_current:.3f}')\n", 520 | " print(loss_val/len(train_loader))\n", 521 | " print(acc_val/len(train_loader))" 522 | ] 523 | }, 524 | { 525 | "cell_type": "code", 526 | "execution_count": null, 527 | "id": "b90f22ff", 528 | "metadata": {}, 529 | "outputs": [], 530 | "source": [ 531 | "epochs = 7\n", 532 | "\n", 533 | "for epoch in range(epochs):\n", 534 | " loss_val = 0\n", 535 | " acc_val = 0\n", 536 | " for sample in (pbar := tqdm(train_loader)):\n", 537 | " img, label = sample['img'], sample['label']\n", 538 | " optimizer.zero_grad()\n", 539 | "\n", 540 | " #label = F.one_hot(label, 2).float()\n", 541 | " pred = model(img)\n", 542 | "\n", 543 | " loss = loss_fn(pred, label)\n", 544 | "\n", 545 | " loss.backward()\n", 546 | " loss_item = loss.item()\n", 547 | " loss_val += loss_item\n", 548 | "\n", 549 | " optimizer.step()\n", 550 | "\n", 551 | " acc_current = accuracy(pred, label)\n", 552 | " acc_val += acc_current\n", 553 | "\n", 554 | " pbar.set_description(f'loss: {loss_item:.5f}\\taccuracy: {acc_current:.3f}')\n", 555 | " print(loss_val/len(train_loader))\n", 556 | " print(acc_val/len(train_loader))" 557 | ] 558 | }, 559 | { 560 | "cell_type": "code", 561 | "execution_count": 35, 562 | "id": "0da9e2d1", 563 | "metadata": {}, 564 | "outputs": [ 565 | { 566 | "name": "stderr", 567 | "output_type": "stream", 568 | "text": [ 569 | "loss: 9.98807\taccuracy: 0.000: 0%| | 8/2000 [00:04<20:21, 1.63it/s]\n" 570 | ] 571 | }, 572 | { 573 | "ename": "KeyboardInterrupt", 574 | "evalue": "", 575 | "output_type": "error", 576 | "traceback": [ 577 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", 578 | "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", 579 | "Input \u001b[0;32mIn [35]\u001b[0m, in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 5\u001b[0m img, label \u001b[38;5;241m=\u001b[39m sample[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mimg\u001b[39m\u001b[38;5;124m'\u001b[39m], sample[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mlabel\u001b[39m\u001b[38;5;124m'\u001b[39m]\u001b[38;5;241m.\u001b[39mfloat()\n\u001b[1;32m 7\u001b[0m \u001b[38;5;66;03m#label = F.one_hot(label, 2).float()\u001b[39;00m\n\u001b[0;32m----> 8\u001b[0m pred \u001b[38;5;241m=\u001b[39m \u001b[43mmodel\u001b[49m\u001b[43m(\u001b[49m\u001b[43mimg\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 10\u001b[0m loss \u001b[38;5;241m=\u001b[39m loss_fn(pred, label)\n\u001b[1;32m 11\u001b[0m loss_item \u001b[38;5;241m=\u001b[39m loss\u001b[38;5;241m.\u001b[39mitem()\n", 580 | "File \u001b[0;32m~/pyenv/py310/lib/python3.10/site-packages/torch/nn/modules/module.py:1130\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m 1126\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1127\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1128\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1129\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1130\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1131\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1132\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n", 581 | "Input \u001b[0;32mIn [25]\u001b[0m, in \u001b[0;36mVGG16.forward\u001b[0;34m(self, x)\u001b[0m\n\u001b[1;32m 40\u001b[0m out \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconv2_1(out)\n\u001b[1;32m 41\u001b[0m out \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mact(out)\n\u001b[0;32m---> 42\u001b[0m out \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mconv2_2\u001b[49m\u001b[43m(\u001b[49m\u001b[43mout\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 43\u001b[0m out \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mact(out)\n\u001b[1;32m 45\u001b[0m out \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmaxpool(out)\n", 582 | "File \u001b[0;32m~/pyenv/py310/lib/python3.10/site-packages/torch/nn/modules/module.py:1130\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m 1126\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1127\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1128\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1129\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1130\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1131\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1132\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n", 583 | "File \u001b[0;32m~/pyenv/py310/lib/python3.10/site-packages/torch/nn/modules/conv.py:457\u001b[0m, in \u001b[0;36mConv2d.forward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m 456\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;28minput\u001b[39m: Tensor) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Tensor:\n\u001b[0;32m--> 457\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_conv_forward\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mweight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbias\u001b[49m\u001b[43m)\u001b[49m\n", 584 | "File \u001b[0;32m~/pyenv/py310/lib/python3.10/site-packages/torch/nn/modules/conv.py:453\u001b[0m, in \u001b[0;36mConv2d._conv_forward\u001b[0;34m(self, input, weight, bias)\u001b[0m\n\u001b[1;32m 449\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpadding_mode \u001b[38;5;241m!=\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mzeros\u001b[39m\u001b[38;5;124m'\u001b[39m:\n\u001b[1;32m 450\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m F\u001b[38;5;241m.\u001b[39mconv2d(F\u001b[38;5;241m.\u001b[39mpad(\u001b[38;5;28minput\u001b[39m, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_reversed_padding_repeated_twice, mode\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpadding_mode),\n\u001b[1;32m 451\u001b[0m weight, bias, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstride,\n\u001b[1;32m 452\u001b[0m _pair(\u001b[38;5;241m0\u001b[39m), \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdilation, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mgroups)\n\u001b[0;32m--> 453\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mF\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mconv2d\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mweight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbias\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mstride\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 454\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpadding\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdilation\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgroups\u001b[49m\u001b[43m)\u001b[49m\n", 585 | "\u001b[0;31mKeyboardInterrupt\u001b[0m: " 586 | ] 587 | } 588 | ], 589 | "source": [ 590 | "loss_val = 0\n", 591 | "acc_val = 0\n", 592 | "for sample in (pbar := tqdm(test_loader)):\n", 593 | " with torch.no_grad():\n", 594 | " img, label = sample['img'], sample['label'].float()\n", 595 | "\n", 596 | " #label = F.one_hot(label, 2).float()\n", 597 | " pred = model(img)\n", 598 | "\n", 599 | " loss = loss_fn(pred, label)\n", 600 | " loss_item = loss.item()\n", 601 | " loss_val += loss_item\n", 602 | "\n", 603 | " acc_current = accuracy(pred, label)\n", 604 | " acc_val += acc_current\n", 605 | "\n", 606 | " pbar.set_description(f'loss: {loss_item:.5f}\\taccuracy: {acc_current:.3f}')\n", 607 | "print(loss_val/len(train_loader))\n", 608 | "print(acc_val/len(train_loader))" 609 | ] 610 | }, 611 | { 612 | "cell_type": "code", 613 | "execution_count": null, 614 | "id": "69edf77e", 615 | "metadata": {}, 616 | "outputs": [], 617 | "source": [] 618 | }, 619 | { 620 | "cell_type": "code", 621 | "execution_count": null, 622 | "id": "5faee4bf", 623 | "metadata": {}, 624 | "outputs": [], 625 | "source": [] 626 | } 627 | ], 628 | "metadata": { 629 | "kernelspec": { 630 | "display_name": "Python 3 (ipykernel)", 631 | "language": "python", 632 | "name": "python3" 633 | }, 634 | "language_info": { 635 | "codemirror_mode": { 636 | "name": "ipython", 637 | "version": 3 638 | }, 639 | "file_extension": ".py", 640 | "mimetype": "text/x-python", 641 | "name": "python", 642 | "nbconvert_exporter": "python", 643 | "pygments_lexer": "ipython3", 644 | "version": "3.10.8" 645 | } 646 | }, 647 | "nbformat": 4, 648 | "nbformat_minor": 5 649 | } 650 | -------------------------------------------------------------------------------- /lesson_3/GPU_AMP.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "id": "b792b114", 7 | "metadata": {}, 8 | "outputs": [ 9 | { 10 | "name": "stderr", 11 | "output_type": "stream", 12 | "text": [ 13 | "/home/magorokhoov/pyenv/py310/lib/python3.10/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", 14 | " from .autonotebook import tqdm as notebook_tqdm\n" 15 | ] 16 | } 17 | ], 18 | "source": [ 19 | "import torch\n", 20 | "import torch.nn as nn\n", 21 | "import torch.nn.functional as F\n", 22 | "\n", 23 | "import torchvision as tv\n", 24 | "\n", 25 | "import os\n", 26 | "import cv2\n", 27 | "import numpy as np\n", 28 | "import matplotlib.pyplot as plt\n", 29 | "from tqdm.autonotebook import tqdm" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": 2, 35 | "id": "8eabb6a7", 36 | "metadata": {}, 37 | "outputs": [], 38 | "source": [ 39 | "class Dataset2class(torch.utils.data.Dataset):\n", 40 | " def __init__(self, path_dir1:str, path_dir2:str):\n", 41 | " super().__init__()\n", 42 | " \n", 43 | " self.path_dir1 = path_dir1\n", 44 | " self.path_dir2 = path_dir2\n", 45 | " \n", 46 | " self.dir1_list = sorted(os.listdir(path_dir1))\n", 47 | " self.dir2_list = sorted(os.listdir(path_dir2))\n", 48 | " \n", 49 | " def __len__(self):\n", 50 | " return len(self.dir1_list) + len(self.dir2_list)\n", 51 | " \n", 52 | " def __getitem__(self, idx):\n", 53 | " \n", 54 | " if idx < len(self.dir1_list):\n", 55 | " class_id = 0\n", 56 | " img_path = os.path.join(self.path_dir1, self.dir1_list[idx])\n", 57 | " else:\n", 58 | " class_id = 1\n", 59 | " idx -= len(self.dir1_list)\n", 60 | " img_path = os.path.join(self.path_dir2, self.dir2_list[idx])\n", 61 | " \n", 62 | " img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n", 63 | " img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n", 64 | " img = img.astype(np.float32)\n", 65 | " img = img/255.0\n", 66 | " \n", 67 | " img = cv2.resize(img, (112, 112), interpolation=cv2.INTER_AREA)\n", 68 | " img = img.transpose((2, 0, 1))\n", 69 | " \n", 70 | " t_img = torch.from_numpy(img)\n", 71 | " t_class_id = torch.tensor([class_id])\n", 72 | " \n", 73 | " return {'img': t_img, 'label': t_class_id}\n", 74 | " " 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": 3, 80 | "id": "ae43db49", 81 | "metadata": {}, 82 | "outputs": [], 83 | "source": [ 84 | "train_dogs_path = './dataset/training_set/dogs/'\n", 85 | "train_cats_path = './dataset/training_set/cats/'\n", 86 | "test_dogs_path = './dataset/test_set/dogs/'\n", 87 | "test_cats_path = './dataset/test_set/cats/'\n", 88 | "\n", 89 | "train_ds_catsdogs = Dataset2class(train_dogs_path, train_cats_path)\n", 90 | "test_ds_catsdogs = Dataset2class(test_dogs_path, test_cats_path)" 91 | ] 92 | }, 93 | { 94 | "cell_type": "code", 95 | "execution_count": 5, 96 | "id": "a169d748", 97 | "metadata": {}, 98 | "outputs": [ 99 | { 100 | "name": "stdout", 101 | "output_type": "stream", 102 | "text": [ 103 | "Train size: 8000\n", 104 | "Test size: 2000\n" 105 | ] 106 | } 107 | ], 108 | "source": [ 109 | "print(f'Train size: {len(train_ds_catsdogs)}')\n", 110 | "print(f'Test size: {len(test_ds_catsdogs)}')" 111 | ] 112 | }, 113 | { 114 | "cell_type": "code", 115 | "execution_count": null, 116 | "id": "683ec6a5", 117 | "metadata": {}, 118 | "outputs": [], 119 | "source": [] 120 | }, 121 | { 122 | "cell_type": "code", 123 | "execution_count": 7, 124 | "id": "bc34837f", 125 | "metadata": {}, 126 | "outputs": [], 127 | "source": [ 128 | "batch_size = 4\n", 129 | "\n", 130 | "train_loader = torch.utils.data.DataLoader(\n", 131 | " train_ds_catsdogs, shuffle=True, \n", 132 | " batch_size=batch_size, num_workers=1, drop_last=True\n", 133 | ")\n", 134 | "test_loader = torch.utils.data.DataLoader(\n", 135 | " test_ds_catsdogs, shuffle=False,\n", 136 | " batch_size=batch_size, num_workers=1, drop_last=False\n", 137 | ")" 138 | ] 139 | }, 140 | { 141 | "cell_type": "code", 142 | "execution_count": 8, 143 | "id": "2870be26", 144 | "metadata": {}, 145 | "outputs": [], 146 | "source": [ 147 | "class VGG13(nn.Module):\n", 148 | " def __init__(self, out_nc):\n", 149 | " super().__init__()\n", 150 | " \n", 151 | " self.act = nn.ReLU(inplace=True)\n", 152 | " self.maxpool = nn.MaxPool2d(2,2)\n", 153 | " \n", 154 | " self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, padding=1)\n", 155 | " self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, padding=1)\n", 156 | " \n", 157 | " self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1)\n", 158 | " self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)\n", 159 | " \n", 160 | " self.conv3_1 = nn.Conv2d(128, 128, kernel_size=3, padding=1)\n", 161 | " self.conv3_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)\n", 162 | " self.conv3_3 = nn.Conv2d(128, 128, kernel_size=3, padding=1)\n", 163 | " \n", 164 | " self.conv4_1 = nn.Conv2d(128, 128, kernel_size=3, padding=1)\n", 165 | " self.conv4_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1)\n", 166 | " self.conv4_3 = nn.Conv2d(128, 128, kernel_size=3, padding=1)\n", 167 | " \n", 168 | " self.avgpool = nn.AdaptiveAvgPool2d((1,1))\n", 169 | " \n", 170 | "# self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1)\n", 171 | "# self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)\n", 172 | "# self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)\n", 173 | " \n", 174 | " self.flat = nn.Flatten()\n", 175 | " \n", 176 | " self.fc1 = nn.Linear(128, 128)\n", 177 | " #self.fc2 = nn.Linear(4096, 4096)\n", 178 | " self.fc3 = nn.Linear(128, out_nc)\n", 179 | " \n", 180 | " def forward(self, x):\n", 181 | " out = self.conv1_1(x)\n", 182 | " out = self.act(out)\n", 183 | " out = self.conv1_2(out)\n", 184 | " out = self.act(out)\n", 185 | " \n", 186 | " out = self.maxpool(out)\n", 187 | " \n", 188 | " out = self.conv2_1(out)\n", 189 | " out = self.act(out)\n", 190 | " out = self.conv2_2(out)\n", 191 | " out = self.act(out)\n", 192 | " \n", 193 | " out = self.maxpool(out)\n", 194 | " \n", 195 | " out = self.conv3_1(out)\n", 196 | " out = self.act(out)\n", 197 | " out = self.conv3_2(out)\n", 198 | " out = self.act(out)\n", 199 | " out = self.conv3_3(out)\n", 200 | " out = self.act(out)\n", 201 | " \n", 202 | " out = self.maxpool(out)\n", 203 | " \n", 204 | " out = self.conv4_1(out)\n", 205 | " out = self.act(out)\n", 206 | " out = self.conv4_2(out)\n", 207 | " out = self.act(out)\n", 208 | " out = self.conv4_3(out)\n", 209 | " out = self.act(out)\n", 210 | " \n", 211 | " out = self.maxpool(out)\n", 212 | " \n", 213 | "# out = self.conv5_1(out)\n", 214 | "# out = self.act(out)\n", 215 | "# out = self.conv5_2(out)\n", 216 | "# out = self.act(out)\n", 217 | "# out = self.conv5_3(out)\n", 218 | "# out = self.act(out)\n", 219 | " \n", 220 | "# out = self.maxpool(out)\n", 221 | " out = self.avgpool(out)\n", 222 | " out = self.flat(out)\n", 223 | " \n", 224 | " out = self.fc1(out)\n", 225 | " out = self.act(out)\n", 226 | "# out = self.fc2(out)\n", 227 | "# out = self.act(out)\n", 228 | " out = self.fc3(out)\n", 229 | " \n", 230 | " return out" 231 | ] 232 | }, 233 | { 234 | "cell_type": "code", 235 | "execution_count": 9, 236 | "id": "d27c8db2", 237 | "metadata": {}, 238 | "outputs": [], 239 | "source": [ 240 | "def count_parameters(model):\n", 241 | " return sum(p.numel() for p in model.parameters() if p.requires_grad)\n", 242 | "\n", 243 | "def accuracy(pred, label):\n", 244 | " answer = (F.sigmoid(pred.detach().cpu()).numpy() > 0.5) == (label.cpu().numpy() > 0.5)\n", 245 | " return answer.mean()" 246 | ] 247 | }, 248 | { 249 | "cell_type": "code", 250 | "execution_count": 12, 251 | "id": "c995c23b", 252 | "metadata": {}, 253 | "outputs": [], 254 | "source": [ 255 | "device = 'cuda' if torch.cuda.is_available() else 'cpu'" 256 | ] 257 | }, 258 | { 259 | "cell_type": "code", 260 | "execution_count": 18, 261 | "id": "52bf8bd9", 262 | "metadata": {}, 263 | "outputs": [], 264 | "source": [ 265 | "model = VGG13(1) # ConvNet()\n", 266 | "model = model.to(device)\n", 267 | "count_parameters(model)\n", 268 | "\n", 269 | "epochs = 7\n", 270 | "\n", 271 | "\n", 272 | "loss_fn = nn.BCEWithLogitsLoss()#.to(device)\n", 273 | "optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)" 274 | ] 275 | }, 276 | { 277 | "cell_type": "code", 278 | "execution_count": 19, 279 | "id": "0f121fd9", 280 | "metadata": {}, 281 | "outputs": [ 282 | { 283 | "name": "stderr", 284 | "output_type": "stream", 285 | "text": [ 286 | "loss: 0.69970\taccuracy: 0.250: 100%|████████| 2000/2000 [00:50<00:00, 39.61it/s]\n" 287 | ] 288 | }, 289 | { 290 | "name": "stdout", 291 | "output_type": "stream", 292 | "text": [ 293 | "0.6933949950933457\n", 294 | "0.50075\n" 295 | ] 296 | }, 297 | { 298 | "name": "stderr", 299 | "output_type": "stream", 300 | "text": [ 301 | "loss: 0.69315\taccuracy: 0.500: 100%|████████| 2000/2000 [00:26<00:00, 74.46it/s]\n" 302 | ] 303 | }, 304 | { 305 | "name": "stdout", 306 | "output_type": "stream", 307 | "text": [ 308 | "0.6932838300764561\n", 309 | "0.496\n" 310 | ] 311 | }, 312 | { 313 | "name": "stderr", 314 | "output_type": "stream", 315 | "text": [ 316 | "loss: 0.69142\taccuracy: 0.500: 100%|████████| 2000/2000 [00:26<00:00, 74.70it/s]\n" 317 | ] 318 | }, 319 | { 320 | "name": "stdout", 321 | "output_type": "stream", 322 | "text": [ 323 | "0.693299315303564\n", 324 | "0.499625\n" 325 | ] 326 | }, 327 | { 328 | "name": "stderr", 329 | "output_type": "stream", 330 | "text": [ 331 | "loss: 0.66734\taccuracy: 0.750: 100%|████████| 2000/2000 [00:27<00:00, 71.99it/s]\n" 332 | ] 333 | }, 334 | { 335 | "name": "stdout", 336 | "output_type": "stream", 337 | "text": [ 338 | "0.6892712494283915\n", 339 | "0.54075\n" 340 | ] 341 | }, 342 | { 343 | "name": "stderr", 344 | "output_type": "stream", 345 | "text": [ 346 | "loss: 0.53541\taccuracy: 1.000: 100%|████████| 2000/2000 [00:27<00:00, 71.74it/s]\n" 347 | ] 348 | }, 349 | { 350 | "name": "stdout", 351 | "output_type": "stream", 352 | "text": [ 353 | "0.6843551223278046\n", 354 | "0.560125\n" 355 | ] 356 | }, 357 | { 358 | "name": "stderr", 359 | "output_type": "stream", 360 | "text": [ 361 | "loss: 0.61921\taccuracy: 0.750: 100%|████████| 2000/2000 [00:27<00:00, 73.52it/s]\n" 362 | ] 363 | }, 364 | { 365 | "name": "stdout", 366 | "output_type": "stream", 367 | "text": [ 368 | "0.682359621167183\n", 369 | "0.56225\n" 370 | ] 371 | }, 372 | { 373 | "name": "stderr", 374 | "output_type": "stream", 375 | "text": [ 376 | "loss: 0.69709\taccuracy: 0.500: 100%|████████| 2000/2000 [00:26<00:00, 74.44it/s]" 377 | ] 378 | }, 379 | { 380 | "name": "stdout", 381 | "output_type": "stream", 382 | "text": [ 383 | "0.6765189296007157\n", 384 | "0.58175\n" 385 | ] 386 | }, 387 | { 388 | "name": "stderr", 389 | "output_type": "stream", 390 | "text": [ 391 | "\n" 392 | ] 393 | } 394 | ], 395 | "source": [ 396 | "for epoch in range(epochs):\n", 397 | " loss_val = 0\n", 398 | " acc_val = 0\n", 399 | " for sample in (pbar := tqdm(train_loader)):\n", 400 | " img, label = sample['img'], sample['label']\n", 401 | " img = img.to(device)\n", 402 | " label = label.float().to(device)\n", 403 | " optimizer.zero_grad()\n", 404 | "\n", 405 | " #label = F.one_hot(label, 2).float()\n", 406 | " pred = model(img)\n", 407 | "\n", 408 | " #print(pred.shape, label.shape)\n", 409 | " loss = loss_fn(pred, label)\n", 410 | "\n", 411 | " loss.backward()\n", 412 | " loss_item = loss.item()\n", 413 | " loss_val += loss_item\n", 414 | "\n", 415 | " optimizer.step()\n", 416 | "\n", 417 | " acc_current = accuracy(pred, label)\n", 418 | " acc_val += acc_current\n", 419 | "\n", 420 | " pbar.set_description(f'loss: {loss_item:.5f}\\taccuracy: {acc_current:.3f}')\n", 421 | " print(loss_val/len(train_loader))\n", 422 | " print(acc_val/len(train_loader))" 423 | ] 424 | }, 425 | { 426 | "cell_type": "code", 427 | "execution_count": null, 428 | "id": "b90f22ff", 429 | "metadata": {}, 430 | "outputs": [], 431 | "source": [ 432 | "epochs = 7\n", 433 | "\n", 434 | "for epoch in range(epochs):\n", 435 | " loss_val = 0\n", 436 | " acc_val = 0\n", 437 | " for sample in (pbar := tqdm(train_loader)):\n", 438 | " img, label = sample['img'], sample['label']\n", 439 | " optimizer.zero_grad()\n", 440 | "\n", 441 | " #label = F.one_hot(label, 2).float()\n", 442 | " pred = model(img)\n", 443 | "\n", 444 | " loss = loss_fn(pred, label)\n", 445 | "\n", 446 | " loss.backward()\n", 447 | " loss_item = loss.item()\n", 448 | " loss_val += loss_item\n", 449 | "\n", 450 | " optimizer.step()\n", 451 | "\n", 452 | " acc_current = accuracy(pred, label)\n", 453 | " acc_val += acc_current\n", 454 | "\n", 455 | " pbar.set_description(f'loss: {loss_item:.5f}\\taccuracy: {acc_current:.3f}')\n", 456 | " print(loss_val/len(train_loader))\n", 457 | " print(acc_val/len(train_loader))" 458 | ] 459 | }, 460 | { 461 | "cell_type": "code", 462 | "execution_count": 35, 463 | "id": "0da9e2d1", 464 | "metadata": {}, 465 | "outputs": [ 466 | { 467 | "name": "stderr", 468 | "output_type": "stream", 469 | "text": [ 470 | "loss: 9.98807\taccuracy: 0.000: 0%| | 8/2000 [00:04<20:21, 1.63it/s]\n" 471 | ] 472 | }, 473 | { 474 | "ename": "KeyboardInterrupt", 475 | "evalue": "", 476 | "output_type": "error", 477 | "traceback": [ 478 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", 479 | "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", 480 | "Input \u001b[0;32mIn [35]\u001b[0m, in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 5\u001b[0m img, label \u001b[38;5;241m=\u001b[39m sample[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mimg\u001b[39m\u001b[38;5;124m'\u001b[39m], sample[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mlabel\u001b[39m\u001b[38;5;124m'\u001b[39m]\u001b[38;5;241m.\u001b[39mfloat()\n\u001b[1;32m 7\u001b[0m \u001b[38;5;66;03m#label = F.one_hot(label, 2).float()\u001b[39;00m\n\u001b[0;32m----> 8\u001b[0m pred \u001b[38;5;241m=\u001b[39m \u001b[43mmodel\u001b[49m\u001b[43m(\u001b[49m\u001b[43mimg\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 10\u001b[0m loss \u001b[38;5;241m=\u001b[39m loss_fn(pred, label)\n\u001b[1;32m 11\u001b[0m loss_item \u001b[38;5;241m=\u001b[39m loss\u001b[38;5;241m.\u001b[39mitem()\n", 481 | "File \u001b[0;32m~/pyenv/py310/lib/python3.10/site-packages/torch/nn/modules/module.py:1130\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m 1126\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1127\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1128\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1129\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1130\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1131\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1132\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n", 482 | "Input \u001b[0;32mIn [25]\u001b[0m, in \u001b[0;36mVGG16.forward\u001b[0;34m(self, x)\u001b[0m\n\u001b[1;32m 40\u001b[0m out \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconv2_1(out)\n\u001b[1;32m 41\u001b[0m out \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mact(out)\n\u001b[0;32m---> 42\u001b[0m out \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mconv2_2\u001b[49m\u001b[43m(\u001b[49m\u001b[43mout\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 43\u001b[0m out \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mact(out)\n\u001b[1;32m 45\u001b[0m out \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmaxpool(out)\n", 483 | "File \u001b[0;32m~/pyenv/py310/lib/python3.10/site-packages/torch/nn/modules/module.py:1130\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m 1126\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1127\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1128\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1129\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1130\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1131\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m 1132\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n", 484 | "File \u001b[0;32m~/pyenv/py310/lib/python3.10/site-packages/torch/nn/modules/conv.py:457\u001b[0m, in \u001b[0;36mConv2d.forward\u001b[0;34m(self, input)\u001b[0m\n\u001b[1;32m 456\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;28minput\u001b[39m: Tensor) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Tensor:\n\u001b[0;32m--> 457\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_conv_forward\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mweight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbias\u001b[49m\u001b[43m)\u001b[49m\n", 485 | "File \u001b[0;32m~/pyenv/py310/lib/python3.10/site-packages/torch/nn/modules/conv.py:453\u001b[0m, in \u001b[0;36mConv2d._conv_forward\u001b[0;34m(self, input, weight, bias)\u001b[0m\n\u001b[1;32m 449\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpadding_mode \u001b[38;5;241m!=\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mzeros\u001b[39m\u001b[38;5;124m'\u001b[39m:\n\u001b[1;32m 450\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m F\u001b[38;5;241m.\u001b[39mconv2d(F\u001b[38;5;241m.\u001b[39mpad(\u001b[38;5;28minput\u001b[39m, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_reversed_padding_repeated_twice, mode\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpadding_mode),\n\u001b[1;32m 451\u001b[0m weight, bias, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstride,\n\u001b[1;32m 452\u001b[0m _pair(\u001b[38;5;241m0\u001b[39m), \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdilation, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mgroups)\n\u001b[0;32m--> 453\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mF\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mconv2d\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mweight\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbias\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mstride\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 454\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mpadding\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdilation\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgroups\u001b[49m\u001b[43m)\u001b[49m\n", 486 | "\u001b[0;31mKeyboardInterrupt\u001b[0m: " 487 | ] 488 | } 489 | ], 490 | "source": [ 491 | "loss_val = 0\n", 492 | "acc_val = 0\n", 493 | "for sample in (pbar := tqdm(test_loader)):\n", 494 | " with torch.no_grad():\n", 495 | " img, label = sample['img'], sample['label'].float()\n", 496 | "\n", 497 | " #label = F.one_hot(label, 2).float()\n", 498 | " pred = model(img)\n", 499 | "\n", 500 | " loss = loss_fn(pred, label)\n", 501 | " loss_item = loss.item()\n", 502 | " loss_val += loss_item\n", 503 | "\n", 504 | " acc_current = accuracy(pred, label)\n", 505 | " acc_val += acc_current\n", 506 | "\n", 507 | " pbar.set_description(f'loss: {loss_item:.5f}\\taccuracy: {acc_current:.3f}')\n", 508 | "print(loss_val/len(train_loader))\n", 509 | "print(acc_val/len(train_loader))" 510 | ] 511 | }, 512 | { 513 | "cell_type": "code", 514 | "execution_count": null, 515 | "id": "69edf77e", 516 | "metadata": {}, 517 | "outputs": [], 518 | "source": [] 519 | }, 520 | { 521 | "cell_type": "code", 522 | "execution_count": null, 523 | "id": "5faee4bf", 524 | "metadata": {}, 525 | "outputs": [], 526 | "source": [] 527 | } 528 | ], 529 | "metadata": { 530 | "kernelspec": { 531 | "display_name": "Python 3 (ipykernel)", 532 | "language": "python", 533 | "name": "python3" 534 | }, 535 | "language_info": { 536 | "codemirror_mode": { 537 | "name": "ipython", 538 | "version": 3 539 | }, 540 | "file_extension": ".py", 541 | "mimetype": "text/x-python", 542 | "name": "python", 543 | "nbconvert_exporter": "python", 544 | "pygments_lexer": "ipython3", 545 | "version": "3.10.8" 546 | } 547 | }, 548 | "nbformat": 4, 549 | "nbformat_minor": 5 550 | } 551 | -------------------------------------------------------------------------------- /lesson_4/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/magorokhoov/youtube_pytorch_lessons/460597d84a3f654691fe7087087115006c10c3d9/lesson_4/1.png -------------------------------------------------------------------------------- /lesson_4/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/magorokhoov/youtube_pytorch_lessons/460597d84a3f654691fe7087087115006c10c3d9/lesson_4/2.png -------------------------------------------------------------------------------- /lesson_5/Dogs and cats.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "id": "b792b114", 7 | "metadata": {}, 8 | "outputs": [ 9 | { 10 | "name": "stderr", 11 | "output_type": "stream", 12 | "text": [ 13 | "/home/magorokhoov/pyenv/py310/lib/python3.10/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", 14 | " from .autonotebook import tqdm as notebook_tqdm\n" 15 | ] 16 | } 17 | ], 18 | "source": [ 19 | "import torch\n", 20 | "import torch.nn as nn\n", 21 | "import torch.nn.functional as F\n", 22 | "\n", 23 | "import torchvision as tv\n", 24 | "\n", 25 | "import os\n", 26 | "import cv2\n", 27 | "import numpy as np\n", 28 | "import matplotlib.pyplot as plt\n", 29 | "from tqdm.autonotebook import tqdm\n", 30 | "\n", 31 | "from torch.cuda.amp import autocast, GradScaler" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": null, 37 | "id": "f1bd7d1c", 38 | "metadata": {}, 39 | "outputs": [], 40 | "source": [] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "execution_count": null, 45 | "id": "99e9a0ac", 46 | "metadata": {}, 47 | "outputs": [], 48 | "source": [] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": 2, 53 | "id": "8eabb6a7", 54 | "metadata": {}, 55 | "outputs": [], 56 | "source": [ 57 | "class Dataset2class(torch.utils.data.Dataset):\n", 58 | " def __init__(self, path_dir1:str, path_dir2:str):\n", 59 | " super().__init__()\n", 60 | " \n", 61 | " self.path_dir1 = path_dir1\n", 62 | " self.path_dir2 = path_dir2\n", 63 | " \n", 64 | " self.dir1_list = sorted(os.listdir(path_dir1))\n", 65 | " self.dir2_list = sorted(os.listdir(path_dir2))\n", 66 | " \n", 67 | " def __len__(self):\n", 68 | " return len(self.dir1_list) + len(self.dir2_list)\n", 69 | " \n", 70 | " def __getitem__(self, idx):\n", 71 | " \n", 72 | " if idx < len(self.dir1_list):\n", 73 | " class_id = 0\n", 74 | " img_path = os.path.join(self.path_dir1, self.dir1_list[idx])\n", 75 | " else:\n", 76 | " class_id = 1\n", 77 | " idx -= len(self.dir1_list)\n", 78 | " img_path = os.path.join(self.path_dir2, self.dir2_list[idx])\n", 79 | " \n", 80 | " img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n", 81 | " img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n", 82 | " img = img.astype(np.float32)\n", 83 | " img = img/255.0\n", 84 | " \n", 85 | " img = cv2.resize(img, (128, 128), interpolation=cv2.INTER_AREA)\n", 86 | " img = img.transpose((2, 0, 1))\n", 87 | " \n", 88 | " t_img = torch.from_numpy(img)\n", 89 | " t_class_id = torch.tensor(class_id)\n", 90 | " \n", 91 | " return {'img': t_img, 'label': t_class_id}\n", 92 | " " 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": 3, 98 | "id": "ae43db49", 99 | "metadata": {}, 100 | "outputs": [], 101 | "source": [ 102 | "train_dogs_path = './dataset/training_set/dogs/'\n", 103 | "train_cats_path = './dataset/training_set/cats/'\n", 104 | "test_dogs_path = './dataset/test_set/dogs/'\n", 105 | "test_cats_path = './dataset/test_set/cats/'\n", 106 | "\n", 107 | "train_ds_catsdogs = Dataset2class(train_dogs_path, train_cats_path)\n", 108 | "test_ds_catsdogs = Dataset2class(test_dogs_path, test_cats_path)" 109 | ] 110 | }, 111 | { 112 | "cell_type": "code", 113 | "execution_count": 4, 114 | "id": "a169d748", 115 | "metadata": {}, 116 | "outputs": [ 117 | { 118 | "data": { 119 | "text/plain": [ 120 | "8000" 121 | ] 122 | }, 123 | "execution_count": 4, 124 | "metadata": {}, 125 | "output_type": "execute_result" 126 | } 127 | ], 128 | "source": [ 129 | "len(train_ds_catsdogs)" 130 | ] 131 | }, 132 | { 133 | "cell_type": "code", 134 | "execution_count": 5, 135 | "id": "683ec6a5", 136 | "metadata": {}, 137 | "outputs": [ 138 | { 139 | "data": { 140 | "text/plain": [ 141 | "2000" 142 | ] 143 | }, 144 | "execution_count": 5, 145 | "metadata": {}, 146 | "output_type": "execute_result" 147 | } 148 | ], 149 | "source": [ 150 | "len(test_ds_catsdogs)" 151 | ] 152 | }, 153 | { 154 | "cell_type": "code", 155 | "execution_count": 6, 156 | "id": "bc34837f", 157 | "metadata": {}, 158 | "outputs": [], 159 | "source": [ 160 | "batch_size = 16\n", 161 | "\n", 162 | "train_loader = torch.utils.data.DataLoader(\n", 163 | " train_ds_catsdogs, shuffle=True, \n", 164 | " batch_size=batch_size, num_workers=1, drop_last=True\n", 165 | ")\n", 166 | "test_loader = torch.utils.data.DataLoader(\n", 167 | " train_ds_catsdogs, shuffle=True,\n", 168 | " batch_size=batch_size, num_workers=1, drop_last=False\n", 169 | ")" 170 | ] 171 | }, 172 | { 173 | "cell_type": "code", 174 | "execution_count": 127, 175 | "id": "64358fde", 176 | "metadata": {}, 177 | "outputs": [], 178 | "source": [ 179 | "class ConvNet(nn.Module):\n", 180 | " def __init__(self):\n", 181 | " super().__init__()\n", 182 | " \n", 183 | " self.act = nn.LeakyReLU(0.2)\n", 184 | " self.maxpool = nn.MaxPool2d(2,2)\n", 185 | " self.conv0 = nn.Conv2d(3, 128, 3, stride=1, padding=0)\n", 186 | " self.conv1 = nn.Conv2d(128, 128, 3, stride=1, padding=0)\n", 187 | " self.conv2 = nn.Conv2d(128, 128, 3, stride=1, padding=0)\n", 188 | " self.conv3 = nn.Conv2d(128, 256, 3, stride=1, padding=0)\n", 189 | " \n", 190 | " self.adaptivepool = nn.AdaptiveAvgPool2d((1,1))\n", 191 | " self.flatten = nn.Flatten()\n", 192 | " self.linear1 = nn.Linear(256, 20)\n", 193 | " self.linear2 = nn.Linear(20, 2)\n", 194 | " \n", 195 | " def forward(self, x):\n", 196 | " \n", 197 | "\n", 198 | " out = self.conv0(x)\n", 199 | " out = self.act(out)\n", 200 | " out = self.maxpool(out)\n", 201 | "\n", 202 | " out = self.conv1(out)\n", 203 | " out = self.act(out)\n", 204 | " out = self.maxpool(out)\n", 205 | "\n", 206 | " out = self.conv2(out)\n", 207 | " out = self.act(out)\n", 208 | " out = self.maxpool(out)\n", 209 | "\n", 210 | " out = self.conv3(out)\n", 211 | " out = self.act(out)\n", 212 | " \n", 213 | " out = self.adaptivepool(out)\n", 214 | " out = self.flatten(out)\n", 215 | " out = self.linear1(out)\n", 216 | " out = self.act(out)\n", 217 | " out = self.linear2(out)\n", 218 | "\n", 219 | " return out\n", 220 | " " 221 | ] 222 | }, 223 | { 224 | "cell_type": "code", 225 | "execution_count": null, 226 | "id": "a24dfe97", 227 | "metadata": {}, 228 | "outputs": [], 229 | "source": [] 230 | }, 231 | { 232 | "cell_type": "code", 233 | "execution_count": 235, 234 | "id": "08ad0259", 235 | "metadata": {}, 236 | "outputs": [], 237 | "source": [ 238 | "class ResBlock(nn.Module):\n", 239 | " def __init__(self, nc):\n", 240 | " super().__init__()\n", 241 | " \n", 242 | " self.conv0 = nn.Conv2d(nc, nc, kernel_size=3, padding=1)\n", 243 | " self.norm0 = nn.BatchNorm2d(nc)\n", 244 | " self.act = nn.LeakyReLU(0.2)\n", 245 | " self.conv1 = nn.Conv2d(nc, nc, kernel_size=3, padding=1)\n", 246 | " self.norm1 = nn.BatchNorm2d(nc)\n", 247 | " \n", 248 | " def forward(self, x):\n", 249 | " out = self.conv0(x)\n", 250 | " out = self.norm0(out)\n", 251 | " out = self.act(out)\n", 252 | " out = self.conv1(out)\n", 253 | " #out = self.norm1(out)\n", 254 | " \n", 255 | " return x + out # self.act(x + out)" 256 | ] 257 | }, 258 | { 259 | "cell_type": "code", 260 | "execution_count": 260, 261 | "id": "5fa79977", 262 | "metadata": {}, 263 | "outputs": [], 264 | "source": [ 265 | "class BottleneckBlock(nn.Module):\n", 266 | " def __init__(self, nc):\n", 267 | " super().__init__()\n", 268 | " self.act = nn.LeakyReLU(0.2)\n", 269 | " \n", 270 | " self.conv0 = nn.Conv2d(nc, nc//4, kernel_size=1, padding=0)\n", 271 | " self.norm0 = nn.BatchNorm2d(nc//4)\n", 272 | " self.conv1 = nn.Conv2d(nc//4, nc//4, kernel_size=3, padding=1)\n", 273 | " self.norm1 = nn.BatchNorm2d(nc//4)\n", 274 | " self.conv2 = nn.Conv2d(nc//4, nc, kernel_size=1, padding=0)\n", 275 | " \n", 276 | " def forward(self, x):\n", 277 | " out = self.conv0(x)\n", 278 | " out = self.norm0(out)\n", 279 | " out = self.act(out)\n", 280 | " out = self.conv1(out)\n", 281 | " out = self.norm1(out)\n", 282 | " out = self.act(out)\n", 283 | " out = self.conv2(out)\n", 284 | " \n", 285 | " return x + out # self.act(x + out)" 286 | ] 287 | }, 288 | { 289 | "cell_type": "code", 290 | "execution_count": 261, 291 | "id": "c79d3f9f", 292 | "metadata": {}, 293 | "outputs": [], 294 | "source": [ 295 | "class ResTruck(nn.Module):\n", 296 | " def __init__(self, nc, num_blocks, block_type='classic'):\n", 297 | " super().__init__()\n", 298 | " \n", 299 | " truck = []\n", 300 | " for i in range(num_blocks):\n", 301 | " if block_type == 'classic':\n", 302 | " truck += [ResBlock(nc)]\n", 303 | " elif block_type == 'bottleneck':\n", 304 | " truck += [BottleneckBlock(nc)]\n", 305 | " else:\n", 306 | " raise NotImplementedError(f'{block_type} is not implemented')\n", 307 | " self.truck = nn.Sequential(*truck)\n", 308 | " \n", 309 | " def forward(self, x):\n", 310 | " return self.truck(x)" 311 | ] 312 | }, 313 | { 314 | "cell_type": "code", 315 | "execution_count": 262, 316 | "id": "37ead73b", 317 | "metadata": {}, 318 | "outputs": [], 319 | "source": [ 320 | "class PsevoResNet(nn.Module):\n", 321 | " def __init__(self, in_nc, nc, out_nc, block_type):\n", 322 | " super().__init__()\n", 323 | " \n", 324 | " self.conv0 = nn.Conv2d(in_nc, nc, kernel_size=7, stride=2)\n", 325 | " #self.norm\n", 326 | " self.act = nn.LeakyReLU(0.2, inplace=True)\n", 327 | " self.maxpool = nn.MaxPool2d(2,2)\n", 328 | " \n", 329 | " self.layer1 = ResTruck(nc, 3, block_type=block_type)\n", 330 | " self.conv1 = nn.Conv2d(nc, 2*nc, 3, padding=1, stride=2)\n", 331 | " self.layer2 = ResTruck(2*nc, 4, block_type=block_type)\n", 332 | " self.conv2 = nn.Conv2d(2*nc, 4*nc, 3, padding=1, stride=2)\n", 333 | " self.layer3 = ResTruck(4*nc, 6, block_type=block_type)\n", 334 | " self.conv3 = nn.Conv2d(4*nc, 4*nc, 3, padding=1, stride=2)\n", 335 | " self.layer4 = ResTruck(4*nc, 3, block_type=block_type)\n", 336 | " \n", 337 | " self.avgpool = nn.AdaptiveAvgPool2d((1,1))\n", 338 | " self.flatten = nn.Flatten()\n", 339 | " self.linear = nn.Linear(4*nc, out_nc)\n", 340 | " \n", 341 | " def forward(self, x):\n", 342 | " out = self.conv0(x)\n", 343 | " out = self.act(out)\n", 344 | " out = self.maxpool(out)\n", 345 | " out = self.layer1(out)\n", 346 | " out = self.conv1(out)\n", 347 | " out = self.layer2(out)\n", 348 | " out = self.conv2(out)\n", 349 | " out = self.layer3(out)\n", 350 | " out = self.conv3(out)\n", 351 | " out = self.layer4(out)\n", 352 | " \n", 353 | " out = self.avgpool(out)\n", 354 | " out = self.flatten(out)\n", 355 | " out = self.linear(out)\n", 356 | " \n", 357 | " return out" 358 | ] 359 | }, 360 | { 361 | "cell_type": "code", 362 | "execution_count": null, 363 | "id": "d0922f75", 364 | "metadata": {}, 365 | "outputs": [], 366 | "source": [] 367 | }, 368 | { 369 | "cell_type": "code", 370 | "execution_count": 263, 371 | "id": "d27c8db2", 372 | "metadata": {}, 373 | "outputs": [], 374 | "source": [ 375 | "def count_parameters(model):\n", 376 | " return sum(p.numel() for p in model.parameters() if p.requires_grad)" 377 | ] 378 | }, 379 | { 380 | "cell_type": "code", 381 | "execution_count": 264, 382 | "id": "e155790d", 383 | "metadata": {}, 384 | "outputs": [], 385 | "source": [ 386 | "model = PsevoResNet(3, 64, 2)" 387 | ] 388 | }, 389 | { 390 | "cell_type": "code", 391 | "execution_count": 265, 392 | "id": "9e9ce7af", 393 | "metadata": {}, 394 | "outputs": [ 395 | { 396 | "data": { 397 | "text/plain": [ 398 | "PsevoResNet(\n", 399 | " (conv0): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2))\n", 400 | " (act): LeakyReLU(negative_slope=0.2, inplace=True)\n", 401 | " (maxpool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n", 402 | " (layer1): ResTruck(\n", 403 | " (truck): Sequential(\n", 404 | " (0): BottleneckBlock(\n", 405 | " (act): LeakyReLU(negative_slope=0.2)\n", 406 | " (conv0): Conv2d(64, 16, kernel_size=(1, 1), stride=(1, 1))\n", 407 | " (norm0): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 408 | " (conv1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 409 | " (norm1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 410 | " (conv2): Conv2d(16, 64, kernel_size=(1, 1), stride=(1, 1))\n", 411 | " )\n", 412 | " (1): BottleneckBlock(\n", 413 | " (act): LeakyReLU(negative_slope=0.2)\n", 414 | " (conv0): Conv2d(64, 16, kernel_size=(1, 1), stride=(1, 1))\n", 415 | " (norm0): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 416 | " (conv1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 417 | " (norm1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 418 | " (conv2): Conv2d(16, 64, kernel_size=(1, 1), stride=(1, 1))\n", 419 | " )\n", 420 | " (2): BottleneckBlock(\n", 421 | " (act): LeakyReLU(negative_slope=0.2)\n", 422 | " (conv0): Conv2d(64, 16, kernel_size=(1, 1), stride=(1, 1))\n", 423 | " (norm0): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 424 | " (conv1): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 425 | " (norm1): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 426 | " (conv2): Conv2d(16, 64, kernel_size=(1, 1), stride=(1, 1))\n", 427 | " )\n", 428 | " )\n", 429 | " )\n", 430 | " (conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n", 431 | " (layer2): ResTruck(\n", 432 | " (truck): Sequential(\n", 433 | " (0): BottleneckBlock(\n", 434 | " (act): LeakyReLU(negative_slope=0.2)\n", 435 | " (conv0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1))\n", 436 | " (norm0): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 437 | " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 438 | " (norm1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 439 | " (conv2): Conv2d(32, 128, kernel_size=(1, 1), stride=(1, 1))\n", 440 | " )\n", 441 | " (1): BottleneckBlock(\n", 442 | " (act): LeakyReLU(negative_slope=0.2)\n", 443 | " (conv0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1))\n", 444 | " (norm0): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 445 | " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 446 | " (norm1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 447 | " (conv2): Conv2d(32, 128, kernel_size=(1, 1), stride=(1, 1))\n", 448 | " )\n", 449 | " (2): BottleneckBlock(\n", 450 | " (act): LeakyReLU(negative_slope=0.2)\n", 451 | " (conv0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1))\n", 452 | " (norm0): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 453 | " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 454 | " (norm1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 455 | " (conv2): Conv2d(32, 128, kernel_size=(1, 1), stride=(1, 1))\n", 456 | " )\n", 457 | " (3): BottleneckBlock(\n", 458 | " (act): LeakyReLU(negative_slope=0.2)\n", 459 | " (conv0): Conv2d(128, 32, kernel_size=(1, 1), stride=(1, 1))\n", 460 | " (norm0): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 461 | " (conv1): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 462 | " (norm1): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 463 | " (conv2): Conv2d(32, 128, kernel_size=(1, 1), stride=(1, 1))\n", 464 | " )\n", 465 | " )\n", 466 | " )\n", 467 | " (conv2): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n", 468 | " (layer3): ResTruck(\n", 469 | " (truck): Sequential(\n", 470 | " (0): BottleneckBlock(\n", 471 | " (act): LeakyReLU(negative_slope=0.2)\n", 472 | " (conv0): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1))\n", 473 | " (norm0): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 474 | " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 475 | " (norm1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 476 | " (conv2): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1))\n", 477 | " )\n", 478 | " (1): BottleneckBlock(\n", 479 | " (act): LeakyReLU(negative_slope=0.2)\n", 480 | " (conv0): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1))\n", 481 | " (norm0): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 482 | " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 483 | " (norm1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 484 | " (conv2): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1))\n", 485 | " )\n", 486 | " (2): BottleneckBlock(\n", 487 | " (act): LeakyReLU(negative_slope=0.2)\n", 488 | " (conv0): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1))\n", 489 | " (norm0): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 490 | " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 491 | " (norm1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 492 | " (conv2): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1))\n", 493 | " )\n", 494 | " (3): BottleneckBlock(\n", 495 | " (act): LeakyReLU(negative_slope=0.2)\n", 496 | " (conv0): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1))\n", 497 | " (norm0): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 498 | " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 499 | " (norm1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 500 | " (conv2): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1))\n", 501 | " )\n", 502 | " (4): BottleneckBlock(\n", 503 | " (act): LeakyReLU(negative_slope=0.2)\n", 504 | " (conv0): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1))\n", 505 | " (norm0): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 506 | " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 507 | " (norm1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 508 | " (conv2): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1))\n", 509 | " )\n", 510 | " (5): BottleneckBlock(\n", 511 | " (act): LeakyReLU(negative_slope=0.2)\n", 512 | " (conv0): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1))\n", 513 | " (norm0): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 514 | " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 515 | " (norm1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 516 | " (conv2): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1))\n", 517 | " )\n", 518 | " )\n", 519 | " )\n", 520 | " (conv3): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))\n", 521 | " (layer4): ResTruck(\n", 522 | " (truck): Sequential(\n", 523 | " (0): BottleneckBlock(\n", 524 | " (act): LeakyReLU(negative_slope=0.2)\n", 525 | " (conv0): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1))\n", 526 | " (norm0): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 527 | " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 528 | " (norm1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 529 | " (conv2): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1))\n", 530 | " )\n", 531 | " (1): BottleneckBlock(\n", 532 | " (act): LeakyReLU(negative_slope=0.2)\n", 533 | " (conv0): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1))\n", 534 | " (norm0): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 535 | " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 536 | " (norm1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 537 | " (conv2): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1))\n", 538 | " )\n", 539 | " (2): BottleneckBlock(\n", 540 | " (act): LeakyReLU(negative_slope=0.2)\n", 541 | " (conv0): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1))\n", 542 | " (norm0): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 543 | " (conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n", 544 | " (norm1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 545 | " (conv2): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1))\n", 546 | " )\n", 547 | " )\n", 548 | " )\n", 549 | " (avgpool): AdaptiveAvgPool2d(output_size=(1, 1))\n", 550 | " (flatten): Flatten(start_dim=1, end_dim=-1)\n", 551 | " (linear): Linear(in_features=256, out_features=2, bias=True)\n", 552 | ")" 553 | ] 554 | }, 555 | "execution_count": 265, 556 | "metadata": {}, 557 | "output_type": "execute_result" 558 | } 559 | ], 560 | "source": [ 561 | "model" 562 | ] 563 | }, 564 | { 565 | "cell_type": "code", 566 | "execution_count": 258, 567 | "id": "e5b0ff4f", 568 | "metadata": {}, 569 | "outputs": [ 570 | { 571 | "data": { 572 | "text/plain": [ 573 | "1685986" 574 | ] 575 | }, 576 | "execution_count": 258, 577 | "metadata": {}, 578 | "output_type": "execute_result" 579 | } 580 | ], 581 | "source": [ 582 | "count_parameters(model)" 583 | ] 584 | }, 585 | { 586 | "cell_type": "code", 587 | "execution_count": 266, 588 | "id": "05bbec58", 589 | "metadata": {}, 590 | "outputs": [], 591 | "source": [ 592 | "for sample in train_loader:\n", 593 | " img = sample['img']\n", 594 | " label = sample['label']\n", 595 | " model(img)\n", 596 | " break" 597 | ] 598 | }, 599 | { 600 | "cell_type": "code", 601 | "execution_count": null, 602 | "id": "e0f15479", 603 | "metadata": {}, 604 | "outputs": [], 605 | "source": [] 606 | }, 607 | { 608 | "cell_type": "code", 609 | "execution_count": 267, 610 | "id": "14d2b0da", 611 | "metadata": {}, 612 | "outputs": [], 613 | "source": [ 614 | "loss_fn = nn.CrossEntropyLoss()\n", 615 | "optimizer = torch.optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999))\n", 616 | "scheduler = torch.optim.lr_scheduler.ExponentialLR(\n", 617 | " optimizer,\n", 618 | " gamma = 0.6\n", 619 | ")" 620 | ] 621 | }, 622 | { 623 | "cell_type": "code", 624 | "execution_count": 268, 625 | "id": "c995c23b", 626 | "metadata": {}, 627 | "outputs": [], 628 | "source": [ 629 | "def accuracy(pred, label):\n", 630 | " answer = F.softmax(pred.detach()).numpy().argmax(1) == label.numpy().argmax(1) \n", 631 | " return answer.mean()" 632 | ] 633 | }, 634 | { 635 | "cell_type": "code", 636 | "execution_count": 269, 637 | "id": "85413171", 638 | "metadata": {}, 639 | "outputs": [], 640 | "source": [ 641 | "device = 'cuda' # if torch.cuda.is_available() else 'cpu'\n", 642 | "model = model.to(device)\n", 643 | "loss_fn = loss_fn.to(device)" 644 | ] 645 | }, 646 | { 647 | "cell_type": "code", 648 | "execution_count": 270, 649 | "id": "00ad73b6", 650 | "metadata": {}, 651 | "outputs": [], 652 | "source": [ 653 | "use_amp = True\n", 654 | "scaler = torch.cuda.amp.GradScaler()" 655 | ] 656 | }, 657 | { 658 | "cell_type": "code", 659 | "execution_count": 271, 660 | "id": "c9e82536", 661 | "metadata": {}, 662 | "outputs": [], 663 | "source": [ 664 | "torch.backends.cudnn.benchmark = True\n", 665 | "torch.backends.cudnn.deterministic = False" 666 | ] 667 | }, 668 | { 669 | "cell_type": "code", 670 | "execution_count": 272, 671 | "id": "b90f22ff", 672 | "metadata": {}, 673 | "outputs": [ 674 | { 675 | "name": "stderr", 676 | "output_type": "stream", 677 | "text": [ 678 | " 0%| | 0/500 [00:00]" 880 | ] 881 | }, 882 | "execution_count": 273, 883 | "metadata": {}, 884 | "output_type": "execute_result" 885 | }, 886 | { 887 | "data": { 888 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAEICAYAAABPgw/pAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAAsTAAALEwEAmpwYAAA6QElEQVR4nO3dd3hUVf7H8fdJb5AAKZQktBB6lSIgXXoRVhFwV8Xfum6xrYA0UWwoHUXdtWBZG1iRKor0DqETAiShJSEhBdJ75vz+uAMEpYS0m0y+r+fJE+beO3e+M49+5uTcc89RWmuEEEJUfnZmFyCEEKJ0SKALIYSNkEAXQggbIYEuhBA2QgJdCCFshAS6EELYCAl0UWUopXoopU6aXYcQZUUCXdyWUuqsUureClDHZ0oprZTqXGhbkFKqSDdTaK23aa2bllFtwUqp75RSiUqpFKXUEaXUBKWUfVm8XkkppRpYP0sHs2sRpUcCXVQ2l4DXzS6iMKVUY2APEAW01lp7AqOBjkA1M2sTVYsEuig2pZSzUuotpdQF689bSiln6z5vpdRqpVSyUuqSUmqbUsrOum+KUipGKZWmlDqplOp3By/7P6CNUqrXTWp6TCkVZj33aaXU3wvt662Uii5Uw/e/e+7bSqnF1n97KqU+VkrFWmt9/Rat7VeAnVrrCVrrWACt9Umt9UNa62Tr+UYopUKtn8dmpVTzQq97Vik1ydqqT1FKfaOUcrHuC1NKDSt0rINSKkEp1aFQK/tRpdR5618HLxQ61k4pNVUpFamUSlJKfauUqmndvdX6O1kpla6U6nr7j15UdBLooiReAO4G2gFtgc7ADOu+iUA04AP4AdMBrZRqCjwFdNJaVwMGAmcBlFL3KKWSb/OamcAbwKyb7I8HhgHVgceARUqpDjc4bhkwRClVzfra9sCDwNfW/Z8B+UAQ0B4YADx+k9e8F/j+JvtQSgUDS4F/Y3wea4FVSimnQoc9CAwCGgJtgPHW7UuBcYWOGwgkaq0PFNp2D9AU6Ae8VOjL4mlgJNALqAtcBt6z7utp/e2ltfbQWu+6Wf2i8pBAFyXxZ+BVrXW81joBo6X6sHVfHlAHqK+1zrP2X2ugAHAGWiilHLXWZ7XWkQBa6+1aa68ivO4HQKBSavDvd2it12itI7VhC/Ar0OMGx50DDgCjrJv6Apla691KKT9gCPBvrXWG1joeWASMvUk9tYDYW9Q7BlijtV6vtc4D5gOuQLdCxyzWWl/QWl8CVmF8SYLxBTNCKeVmffwQRsgX9orWOktrfRg4jPHlCvAP4AWtdbTWOgd4GXhA+s1tlwS6KIm6wLlCj89ZtwHMAyKAX61dH1MBtNYRGC3Vl4F4pdQypVRd7oA1nF6z/lxHKTVYKbXb2s2TjBHM3jc51ddca/0+xLXWeX3AEYi1dpEkY3yJ+N7kPEkYX143c93npLW2YPS31yt0TFyhf2cCHtZjI4AwYLg11EcUqvOWz7W+j+WF3kMYxheq3y1qFZWYBLooiQsYoXFFoHUbWus0rfVErXUjjBCacKWvXGv9tdb6HutzNTCnGK/9KeAF/OnKBmv//Q8YLWA/a2t/LaBuco7vgN5KKX+MlvqVoIwCcgBvrbWX9ae61rrlTc7zG3D/LWq97nNSSikgAIi51Rss5Eq3y33AcWvIF0UUMLjQe/DSWrtorWMwPndhYyTQRVE5KqVcCv04YATNDKWUj1LKG3gJ+BJAKTXMOqRQASkYLUOLUqqpUqqvNXyzgSzAcqfFaK3zgZnAlEKbnTC6cxKAfGuXzIBbnCMB2Izx5XBGax1m3R6L0VWzQClV3XpxsfHNLsRa6+imlJqnlKptff9BSqkvlVJewLfAUKVUP6WUI8b1hRxgZxHf7jLr+/gnf2yd38r7wCylVH1rTT5Kqfus+xIwPvdGd3A+UcFJoIuiWosRvld+XsYYPhgCHAGOYvRJXxlS2ASj5ZoO7AL+o7XehBG4s4FEjK4CX2AaXL3xJ/0OalpKob5rrXUa8AxGgF7G6EZZeZtzfI1xUfP3QfkIxhfEceu5vucm3SrWawBdgQZAqFIqBeMvhRAgTWt9EvgL8A7G+x4ODNda5xblTVq/YHZh9Ll/U5TnWL2N8f5/VUqlAbuBLtZzZmJcWN5h7ZK5+w7OKyooJQtcCCGEbZAWuhBC2AgJdCGEsBES6EIIYSMk0IUQwkaYdseYt7e3btCggVkvL4QQldL+/fsTtdY+N9pnWqA3aNCAkJAQs15eCCEqJaXUuZvtky4XIYSwERLoQghhIyTQhRDCRkigCyGEjZBAF0IIGyGBLoQQNkICXQghbESlC/Rzqed4+8Db5FnyzC5FCCEqlEoX6BvPb2TJ0SU8/svjJGQmmF2OEEJUGJUu0B9r9Rhv9niTsEthPLj6QULi5G5TIYSAShjoAMMaDeOrIV/h4ejB478+zv9C/4cs1CGEqOqKFOhKqUFKqZNKqYgrq7f/bv8ipdQh688p6wrjZapJjSYsHbqUPgF9mB8ynwmbJ5CeeyerlwkhhG25baArpeyB94DBQAtgnFKqReFjtNbPaa3baa3bYayb+GMZ1PoHHk4eLOy9kEkdJ7EpahNj14wl/HJ4eby0EEJUOEVpoXcGIrTWp62L2i4D7rvF8eMwFu8tF0opHm35KEsGLCEjL4M/r/0zq0+vLq+XF0KICqMogV4PiCr0ONq67Q+UUvWBhsDGm+x/QikVopQKSUgo5giVvGxI/+NzO9buyLfDvqV5zeZM2zaNWbtnkVtQpEXVhRDCJpT2RdGxwPda64Ib7dRaf6i17qi17ujjc8P52W/rxKoFZC1ozdGvpnP58qXr9vm4+bBk4BLGtxzPspPLeGzdY8RlxBXrdYQQorIpSqDHAAGFHvtbt93IWMq4uyXGtzd77drSOvw98t9qx6dvv8h3e0+TkmncaORo58jEjhNZ2HshkSmRjF41mp0XdpZlSUIIUSGo2w33U0o5AKeAfhhBvg94SGsd+rvjmgHrgIa6CGMIO3bsqIu7YpHWmtMHN+G06WUC0g4TaanDIssYMhoNYWjbevRv4YenqyNnUs4wYfMEIpMjear9Uzze+nHsVKUcqSmEEAAopfZrrTvecF9Rxm8rpYYAbwH2wCda61lKqVeBEK31SusxLwMuWus/DGu8kZIE+lVao0/+TM66l3BJDueYCuaV7LEcsmtOzyY+DG1Th+7B1Vl4YBZrz6ylp39P3rjnDTydPUv2ukIIYZISB3pZKJVAv6IgHw4vRW96A5V2gVNe9/BKxgPsSPPFyd6OHsHe+Nbdz7q4D/Bz82Nh74W0qNXi9ucVQogKxvYD/YrcTNjzPmx/C52bRlLQA3zh8me+OVlAXGo2zh5ReAR8jUWlM7njNMa1GF26ry+EEGWs6gT6FZmXYNsC2PshKDt0539wqP5jrDiZwZrQcNKr/w8Hjwj8VA+ebPM8A1sE4ObkUDa1CCFEKap6gX7F5XOwaRYc+RZcvaDHJCwdH2dvdBrz977DyZzlFGTXwRL3CP2CWjC0TR36NPXF1cm+bOsSQohiqrqBfkXsEfjtZYjcAJ6B0PcFaP0gm6K3MXXbVPLyLeiEcVxObIKbkz39mvsxtHUdejf1wcVRwl0IUXFIoF9xejOsfwliD4Nfa+j/MtG+TZmwZSJhl8IY7P8X7FMG8UtoPJcycnF3sufeFka49wyWcBdCmE8CvTCLBUJ/hI2vweWz0LAn2X1n8GbUWn4M/5EudbrwRvfZnLoAa45e4OdjcSRn5uHh7EB/a7j3CPbG2UHCXQhR/iTQbyQ/F/Z/ClvmQGYStLqfH4O6MOvoh9RwqcGC3gto69OWvAILOyOTWHPkAr+EXiQlK49qzg70b+nHA3f5062xt3nvQQhR5Uig30p2KuxcDLveg4I8wtrez3N5Z7iYlcDkTpMZ23QsSikAcvMt7IhMZM2RWH4JjSMtO597gryZOrgZrerJzUpCiLIngV4UaXGweTYc+JwUJ3emN27J1qwYhjQcwsyuM3FzdLvu8Oy8Ar7ac553NoaTnJnHqPb1mDggGP8abjd5ASGEKDkJ9DuRcAo2vILlxGo+9q3Lu+6ONPJsxMI+i2jo2fAPh6dk5fH+lkg+2X4GreHRbvV5sk8QXm5OJhQvhLB1EujFEbUX1r/ErvgDTPHzI8fBidd6vMGABgNvePiF5CwWrT/F9weiqebswJN9gni0WwMZGSOEKFUS6MWlNZz8mbgNLzHRIZUjLs486t+fZ/vMwdHO8YZPCYtNZc66E2w+mUBdTxcmDmjKyPb1sLdT5Vy8EMIWSaCXVEE+eQe/YF7IPJa62tNBuTG/91v4BHa96VN2RiTy5s8nOBqTQvM61Zk6uBk9m3hfvcAqhBDFIYFeWnIzWbPheV6J24KbtjDXvQWdWz8MjfuBs8cfDrdYNKuPxjLvlxNEXcqSETFCiBKTQC9l4bEhTNjwFGcLMhiRls5zKVl4N+gJzYZA8GCo5nfd8Tn5BXy12xgRczkzj5Ht6jJxQFMCasqIGCHEnZFALwOZeZl8ePh9/nf8c1xQ/Csjn7EXz+OIAv+O0HQINBsGPsFXn5Oancf7myP52Doi5pGu9Xmqr4yIEUIUnQR6GTqbcpbZe2ez48IOgjwCmO7enE7nQiD2kHFArSBruA8F/05gZ09sinVEzP5oPJwd+FefIMbLiBghRBFIoJcxrTUbozYyd+9cLmRcYHCDwUxs+mf8zu+Fk2vhzDaw5IG7DwQPMsK9UW9OJuUzZ90JNp6Ip66nCxMGNGWUjIgRQtyCBHo5yc7P5pNjn/Dx0Y+xt7PnH23/wcPNH8YxLxPC1xvhHr4eclLB0Q0a94WmQ9jn1JnXN13kcHQKzWpXY+rgZvQK9pERMUKIP5BAL2dRaVHM3TeXzVGbaVC9AdO6TKNb3W7GzvxcOLvNCPcTayHtgrGqUuDdHK/eg9fCG7A72ZPuQbWYNri5jIgRQlxHAt0kW6O3MnvvbKLSorg38F6e7/Q8dT3qXjtAa6Ov/cRaOLEG4kMBuOzemB8y27Eyux2N2nRn4sDmMiJGCAFIoJsqpyCHz0M/58MjHwLweOvHGd9qPM72zn88+PJZI9xPrkWf24nSBVzUNdhguQuaDWHwsAep4VmtfN+AEKJCkUCvAGLTY5kXMo/159bj7+HP1M5T6RXQ6+ZPyLwEp34h+9gq1OmNOFuySMeVOJ97COw2GqdmA411UoUQVYoEegWy68Iu3tz7JmdSztDLvxdTOk0hoHrArZ+Ul03MwXVEbl1G87Sd+KgULMoB1aA7qtlQY+RMjfrl8waEEKaSQK9g8gry+CrsK/57+L/kW/J5rNVj/LX1X3F1cL3tc/dEJvDDqhU0TNzMUKeDBFqijR21gowpCBr3hQb33HAqAiFE5SeBXkHFZ8azIGQBa8+spY57HSZ3mky/wH63Ha6otWbt0Tjm/nIC+0sRPOwTwUiPMLzi96Lys8DOEQLvhqB+Rsj7tQI7u3J6V0KIsiSBXsGFxIXwxt43CL8cTre63ZjaeeoNF9P4vdx8C8v2nee9TRFcTM2hW313prVMplVWCCpy49VRM7j7QuM+11rwHj5l/I6EEGVFAr0SyLfk883Jb3j34LtkF2TzcIuH+Xubv+Pu6H7b52bnFfBdSBT/2RxJbEo27QO9eKZfE3rXyUed3gwRG+D0JmMxbIDaba613gO6gIPMJSNEZSGBXokkZiXy9oG3+SniJ3xdfZnUaRKDGgwq0l2jOfkF/LA/hvc2RRCTnEVbf0+e6deEvs18UVfGvEdugIiNEL0XLPng5AENelgDvi/UbARyh6oQFZYEeiV0OOEws3bPIuxSGJ1qd2Ja52k0qdGkSM/Nzbew/GA0726KIOpSFi3rVueZfk0Y0MLv2hdDdqpxx2rEBiPkL581tnvVv9Z6b9gTXKqXzRsUQhSLBHolVWAp4IfwH1h8cDHpuemMazaOf7X7F9WcinZzUV6BhRWHLvDuxnDOJmXSvE51nukbxMCWtbH7/QRgSZEQudEI+LPbIDcd7BzAv7PRcg/qC3Xay8VVIUwmgV7JJWcns/jgYr4/9T01XWoyoeMEhjUahp0qWrjmF1hYfSSWxRvDOZ2QQbCfB0/3bcKQ1nVuPLNjfq7RJXOl9R572NjuWvP6i6vV65TiuxRCFIUEuo0ITQzljT1vcCTxCO182jG9y3Sa12pe5OcXWDRrjsbyzoZwwuPTCfL14Om+QQxrU/fWU/amJxgXVSM2GK34jHhju29Lo+XeuB8EdgVHlxK+QyHE7Uig2xCLtrAiYgVvHXiL5JxkRgeP5un2T+PpXPRZGS0WzbrQOBZvCOdEXBqNvN15sk8Q97Wri4P9bVr9WsPFY9da7+d3Q0EuOLhCm9FwzwSoefshl0KI4pFAt0Gpuam8d/A9lp1chreLN6/f8zpd63a9o3NYLJpfj19k8YZwjsemUr+WG0/2CWJU+3o43i7Yr8jNgLPbjemADy01Rs60HQc9JkCtxsV4Z0KIWylxoCulBgFvA/bAEq317Bsc8yDwMqCBw1rrh251Tgn00nE86ThTt03lTMoZxrccz9Ptn8bJ/s7GlWut+S0snsUbwjkak4J/DVee7BPE/R38cXK4g4ugqbGw423Y/ykU5EGbB6HHJPAOusN3JYS4mRIFulLKHjgF9AeigX3AOK318ULHNAG+BfpqrS8rpXy11vG3Oq8EeunJys9iQcgCvjn5Dc1rNmd2j9k08mp0x+fRWrP5ZAJvbQjncFQy9bxc+Wfvxozu6I+zwx2sd5p2EXYuhn0fQ0EOtHoAek4Cn6Z3XJMQ4nolDfSuwMta64HWx9MAtNZvFjpmLnBKa72kqEVJoJe+Tec3MXPnTLLys3i+0/OMDh5drGXstNZsDU/k7d9OceB8MrWru/DP3o0Z0yngzhayTk+wBvsSyMuCVn+Cns+Db9Ev5AohrlfSQH8AGKS1ftz6+GGgi9b6qULH/ITRiu+O0S3zstZ63Q3O9QTwBEBgYOBd586dK9YbEjeXkJnAjB0z2HlhJ70DevNKt1eo6VKzWOfSWrMzMom3fwtn79lL+FZz5h+9GjOucyCuTncQ7BmJsOtd2PuR0efe4j7oNRn8WharLiGqsvII9NVAHvAg4A9sBVprrZNvdl5poZcdi7bwVdhXLNq/CE9nT2Z1n0W3et1KdM5dkUks3hDOrtNJeHs48/eejfjz3YG4OTkU/SSZl2DXe7DnA8hNg+bDoedkqNOmRLUJUZXcKtCLcsUrBii8AoO/dVth0cBKrXWe1voMRmu9aPepi1Jnp+x4uMXDLB26FE8nT/7+29+Zu28uOQU5xT5n18a1WPrE3Xz79640q12NWWvDuGfOJv67OZKMnPyincStJvR7EZ47Cr2mwOmt8EEPWPoQXDhY7NqEEIaitNAdMAK6H0aQ7wMe0lqHFjpmEMaF0keVUt7AQaCd1jrpZueVFnr5yM7PZuH+hSw9sZTgGsHM6TGHoBolH3Wy/9wlFm+IYMupBGq4OfJ4j0b8pUt9PN0ci36SrGSjtb77PchOMVZe6jUZ6t1V4vqEsFWlMWxxCPAWRv/4J1rrWUqpV4EQrfVKZVx5WwAMAgqAWVrrZbc6pwR6+doavZUXd7xIRl4GEztOZGzTscW6YPp7h6KSWbwhnI0n4nF2sGNI6zqM7RRA54Y1i37+7FTY+4HRHZN1GYL6Gy34gE4lrk8IWyM3FgnAmJr3xR0vsj1mOz39e/Jqt1ep5VqrVM59/EIqS/ee56eDMaTl5NPI250xnQK4/y5/vD2ci3aSnDTjwunOdyDrkjFfTK8pxupLQghAAl0UorXm6xNfszBkIR5OHrze/XV6+PcotfNn5Raw5mgs3+w7z76zl3GwU/Rv4cfYzoH0CPL+4yyPN5KTDiEfw47FkJkIDXsZwd6ge6nVKURlJYEu/iD8cjiTt04mIjmCh5o9xISOE3C2L2JLuogi4tNYtjeKHw5Eczkzj3perjzYMYAHO/lTx/P2C2KTmwEhnxp3n2bEQ/17oPcUY0EOWYRDVFES6OKGcgpyeGv/W3wZ9iVBXkHM6TmH4BrBpf86+QWsP36RZXuj2B6RiJ2CXsE+jO0cSN9mvrefNyYvC/Z/BtvfgvQ4COxmXDxt1FuCXVQ5EujilrbHbGfG9hmk5aYxoeMEHmr2UKlcML2RqEuZfLMviu/2R3ExNQefas48cJc/YzsFUL/WbdZPzcuGA5/D9kWQdsFYD7XXZGP6Xgl2UUVIoIvbSspKYubOmWyJ3kL3et15vfvreLt6l9nr5RdY2HwygWX7zrPxRDwWDd0a12JMpwAGtqx96ykG8nPg4BewbRGkRkO9jkYfe5P+EuzC5kmgiyLRWvPtyW+ZFzIPd0d3Xu32Kr0CepX568alZPP9/iiW7Ysi+nIWXm6O/Km9P2M7BxDsd4vl9vJz4NDXsG0hpJyHuu2NYA8eJMEubJYEurgjkcmRTNk6hZOXTzK26VgmdpyIi0PZr0ZksWh2RCaybF8Uv4bGkVeg6RDoxdhOgQxrW+fm0wzk58KRZbB1PiSfMy6aDp4jc8UImySBLu5YbkEubx94m8+Pf05jz8bM6TmHpjXLb/rbpPQcfjwQw7J954lMyMDD2YER7eoytlMAret53riPvyDP6GPf+Jpxs1Lnv0HvaeDqVW51C1HWJNBFse2M2ckLO14gJSeFf3f4N39p8ZciL05dGrTWhJy7zNK951l7NJbsPAst6lRnXOcARrSrh6frDaYayLxkhHrIp+BWC+59Gdr9GezKr24hyooEuiiRy9mXmblzJpuiNtGtbjde7/46Pm4+5V5HSlYeKw/FsHRvFMdjU3FxNKYaGNc5kI71a/yx1R57GNY+D1F7jPlhhsyTeWJEpSeBLkpMa8334d8zd+9cXBxceLXbq/QJ7GNaLcdiUlm67zwrD10gPSefxj7ujO0UyJ861KNW4akGtIYj38D6lyA9Hjo8DP1mgnvZjeARoixJoItSczrlNFO3TiXsUhgPBj/IpE6TcHUowl2fZSQjJ581R2NZtvc8B84n42iv6NrYm/4t/Ojf3I/antaLudmpsGUO7HkfnNyhzwzo+H9gfwfzuQtRAUigi1KVV5DHO4fe4bNjn1G/en3m9pxL81rmLyt3Mi6NHw5E80toHOeSMgFoG+DFgBZ+DGjhR5CvByrxFPw8GU5vBt+WRjeMzBEjKhEJdFEm9sTuYfq26VzKucSz7Z/lkZaPlOsF05vRWhMen8764xf5NTSOw9EpADT0dqd/Cz8GNPelfeZ27H99AVKijEWsB7wG1euaXLkQtyeBLspMcnYyL+96mQ3nN9C9Xndm3zMbLxcvs8u6TlxKNuvDjHDffTqJvAJNLXcnBjf15P/UTzQ8sQRl5wC9noe7/wUOpTtJmRClSQJdlCmtNd+d+o7Ze2fj7erNwt4LaeXdyuyybig1O4/NJxP4NTSOzScTSM/Jp4ljIvOqfUO7zB0U1GiM/ZC50ORes0sV4oYk0EW5CE0MZcLmCSRkJTC181RGB48us0m+SkNOfgG7T1/i19A4fgu7SLP0vbzs+D8aqjjO+fTGaehs6jQw/9qAEIVJoItyk5ydzNTtU9kRs4PhjYbzYtcXTR0FU1QWi+ZITAobj52n+qEljMtehgMWfnC9n6R2T9K3TX1a1Kleob+gRNUggS7KlUVb+PDIh/zn0H9o7NWYRb0X0cCzgdll3ZHzZyPI/fkFgi6uI0Z782reXzhWrSf9W9ZmQEs/OjeoicPt5nEXogxIoAtT7IzZyZRtU8iz5PF699e5t34l7Jc+u5381ZNwSAwj1KUDk9L/TFh+HbzcHOnb1JcBLf3oGexz84nDhChlEujCNLHpsUzcMpGjiUd5tMWjPHvXszja3WD+lYqsIB9CPoFNr6NzMzjb+GE+tBvN2lMZpGTl4eRgR48gbwa09KNfc7+iL4otRDFIoAtT5RbkMm/fPJadXEYH3w7M7zXflLlgSiwjETa8Age+AA9fCvq9zJ5q/VkfFs+voReJSc5CKbgrsAb9mvvRqUENWtXzvPViHULcIQl0USGsOb2GV3a9gpuDG/N6zaNT7U5ml1Q8MfuNSb9i9hvL4A2Zh67dhuOxqdabmS5yPDYVAAc7RfM61WkX4EW7AC/aB3rR0NtdLq6KYpNAFxVGxOUIntv8HFFpUTzb4VnGtxxfOcPNYoHDX8P6mZCZBB0fg74vgltNABLScjgUlczB85c5FJXM4ahkMnILAPB0dbwu4NsFeOHl5mTmuxGViAS6qFDSc9N5aedLrD+3nn6B/Xit+2tUc7rFUnMVWVYybJ4Nez8El+pGqN81Huyu72YpsGgi4tOvBvyhqGROXkzjyv9+Db3daR/gRbtAL9oH1KBZnWo4yigacQMS6KLC0VrzZdiXLAxZSF2PuizsvbBcV0QqdRdD4ecpcHYb1G4DQ+ZDYJdbPiU9J58j0ckcPJ9sbc0nk5ieA4Czgx2t63laW/E1aBfoRV1Pl8r514woVRLoosI6cPEAk7ZMIi03jRe7vsiIxiPMLqn4tIbQH+GXGZB2AZoOhb4vFHltU601MclZV8P9UFQyR2NSyM23AOBTzfm6Vnwbf0/cnWW4ZFUjgS4qtMSsRCZvncy+uH2MDh7NlM5TcLavxEP/ctJh939g57uQkwqt/mSsberd5I5PlZtv4URc6nUhfyYxAwA7BcF+1a72w7cPrEGQjwd2dtKKt2US6KLCy7fk887Bd/jk2Ce0qNWChb0XUs+jntlllUzmJdj1Lux+H/KzoO046DUZajQo0WkvZ+RyqFBXzaHzl0nNzgfAw9mBtgGe1ouuNWjr74lPNWfpqrEhEuii0th4fiMzts9AKcXsHrPp4d/D7JJKLj0BdrwFez8CXQAdHoEek8CzdL6wLBbNmaQMDp1P5mCUcdE1LDaNAovx/7aXmyPBvtVo4udBsJ/xu6lfteuX6hOVhgS6qFSiUqN4bvNznLp8iifaPME/2/4TezsbuDkn9QJsWwD7/wfKDjo9Dvc8Bx6lf5NVVm4Bxy6kcCwmhVMX0wm/mMapi2lXW/IAtdydCoV8NYJ9jX/XcJchlBWZBLqodLLzs3l99+usiFxB1zpdmdNzDjVcaphdVum4fA62zDXGsTu4QJd/QLenr45hLytaay6m5nDKGu7hF9M5FW/8Ts+5FvTeHs4EW4Pe+PGgiV81PF0r2ZQNNkoCXVRKWmt+DP+RN/a8QU3XmizotYA2Pm3MLqv0JEbA5jfh2A/gXA26PgV3/9MYz16OtNbEpmRz8mKatSVvtOjD49PJtN4MBeBX3dlozfteC/lgPw+quUjQlycJdFGpHU86zoTNE7iYeZEpnaYwpukY27rIdzEUNr0BJ1aDaw3o/m/o/Ddwcje1LIvFGEYZHp/GyThrt018GhHx6WTnWa4eV9fT5Wq4N7G26pv4esiQyjIigS4qvZScFKZvn87W6K0MaTiEmV1n4uboZnZZpSvmgBHsEevB3Rd6TjLuOq1ga5wWWDTRlzM5dTH9avfNqYvpRCakXx0zD1DPy5WmtY2LsPVrulPT3QlvDydqujtRy8OZ6i4OtvXFXE5KHOhKqUHA24A9sERrPft3+8cD84AY66Z3tdZLbnVOCXRxpyzawsdHP+bdQ+/SyLMRC3svpKFnQ7PLKn3nd8PG1427Tqv7G4tXt/sz2Ffsro38AgvnL2Veuwgbb/yOTEgnr+CPOeNor6jp7kRNd+drQe/uTC0PJ2q5Xwv+Wu5O1PJwwsNZvgCghIGulLIHTgH9gWhgHzBOa3280DHjgY5a66eKWpQEuiiuXRd2MWXrFHIKcnit+2sMaDDA7JJKn9ZwZgtseA1iQoyx672nQevRf5gnpqLLK7CQmJ5DUnouSRm5JKXncCkjl8T0XC5lFNqekcOl9Nyrk5j9npOD3dVwr+nujPcNQt/4K8CZmu5OuDnZ2+QXQEkDvSvwstZ6oPXxNACt9ZuFjhmPBLooR3EZcUzcMpEjCUd4uMXDPHfXc5Vv4Yyi0BrCf4WNr0HcUfBuCn2mQ/MRYGebk3dl5xVcDX7j9++Cv9AXQlJGznX9+YW5ONpdbfFfaf17ujri4eJANWcH47eLAx7OV34b+zycjR/7CnrHbUkD/QFgkNb6cevjh4EuhcPbGuhvAgkYrfnntNZRNzjXE8ATAIGBgXedO3euWG9ICIC8gjzmh8zn6xNf0963PfN6zsPP3c/sssqGxQJhK40+9sSTULs19JkBwQPBBluhdyIzN/9q2F/KyLG2/H//hWA8Ts3Ov26I5q24OdlfDXwPF0fjS8D6RXDlS6DwF0G1QvuufUk4lPras+UR6LWAdK11jlLq78AYrXXfW51XWuiitPx85mdm7pyJq4Mr83rOo3OdzmaXVHYsBXD0e2O44+UzUK8j9J0BjXpX+WAvKotFk5FrBHt6dj5p1t/pOfmkZeeRln1tX3rO9fvTs63H5BiPizKmxNXR/g+B/3/dG3Jvi+I1Pm4V6EUZVxQDBBR67M+1i58AaK2TCj1cAsy90yKFKK7BDQcTXCOY5zY/x9/W/42n2j3FX1v/FTtlg10SdvbQdowx4dehr40blL4YCfXvMYK9flezK6zw7OwU1VwcjfHznsU/j9aazNwC6xdB4S+BPOMvgSvbrtufR3pOPpYyGl1YlBa6A0Y3Sj+MIN8HPKS1Di10TB2tdaz136OAKVrru291Xmmhi9KWkZfBKztf4eezP9O9XnfevOdN27m79Gbyc4ypBLbNh/SLEHQv9HkB6nUwuzJRRm7VQr9tE0ZrnQ88BfwChAHfaq1DlVKvKqWuTF79jFIqVCl1GHgGGF86pQtRdO6O7szpOYcX736RfbH7eGDVAxyMP2h2WWXLwRm6PAHPHIL+rxpj2T/qA8v+bNywJKoUubFI2KSwpDAmbpnIhfQLPNPhGca3HG+bXTC/l50Ke96Hne9ATho0Hw7t/wKN+1b4ceyiaOROUVElpeWmMXPnTNafW09P/57M6j4LLxcvs8sqH5mXjFDf/ylkXQY3b2h1v9H/XreDXECtxCTQRZWltWbpiaXMD5lPLddazO81n7Y+bc0uq/zk50LEb3BkGZxcBwU5UKsJtBkDbUaXeLENUf4k0EWVF5oYysQtE7mYcZF/3/VvHmnxiE3eRXhLWclwfAUc+RbObTe2BXaFNg9Cy1HGxGCiwpNAFwJIzU3lpR0vseH8BvoE9OG17q/h6VyCcWuVWfJ5OPodHP7GuFHJ3gmaDDBa7sEDK9yEYOIaCXQhrLTWfBn2JQtDFuLn7sf8XvNp5d3K7LLMozXEHjZa7Ue/g4x4cPE0WuxtxkLg3dLfXsFIoAvxO0cSjjBpyyQSshKY1HESDzV7qOp1wfxeQT6c2Wy02k+shrxM8Aq09rePAe8mZlcokEAX4oZSclKYsX0Gm6M3079+f17p9grVnKqZXVbFkJNuhPqRb+D0ZtAWY3RMmzHGaJkyWAdVFI0EuhA3obXmf6H/460Db1HHvQ4Lei+gRa0WZpdVsaTGGsvkHVlmzPio7CGonxHuTYeAk40tNFLBSaALcRuH4g8xacskLmVfYnKnyba3zF1puXgcjn4LR76D1Ghw8jCm8m07Bhr0qHRztVdGEuhCFMHl7MtM3z6d7THbGdRgEDO7zsTDycPssiomiwXO7TBa7cdXQk4qVKsLrR8wWu61q/CF5jImgS5EEVm0hU+OfcK7B9/Fv5o/C3otoGnNpmaXVbHlZcHJn42RMhHrwZIPfq2M8e2tR0P1umZXaFMk0IW4Q/sv7mfylskk5yQztctUHmjygHTBFEVGIoQuNy6mRu8DFDTsabTcmw4Bd2+zK6z0JNCFKIakrCSmb5/Ozgs7GdJwCDO7zsTNUS4AFllSpNFqP/KNsRiHsoOAu6HZUOOnpg0u8F0OJNCFKCaLtvDRkY/4z+H/EFgtkAW9FxBcI9jssioXrY3RMSfWGEMhLx4ztvu2hObDjHCv3UZuYCoiCXQhSmhv7F6mbJtCem4607tMZ2TQSOmCKa5LZ+DkWiPgz+8yxrh7BlxruQd2A/uiLKZWNUmgC1EKErMSmbp1Knvi9jCi8Qhe6PKCdMGUVEYinFpnhHvkRsjPNiYJCx5khHvjvuDkbnaVFYoEuhClpMBSwAdHPuD9w+/TyLMRC3ovoLFXY7PLsg25GUaon1hjjJrJTgYHVyPUmw01Qt69ltlVmk4CXYhStuvCLqZum0pWfhYz7p7BiMYjbv8kUXQFeXBup7XffY1xE5OyM7pjmg2FZkOq7FzuEuhClIH4zHimbJ1CyMUQRgWNYlqXabg6uJpdlu25MiPklXCPt66V6tf6Wr977dZV5qKqBLoQZSTfks9/Dv2HJUeX0NirMQt7L6ShpwzHK1OXTsOJtcaImfO7AW3MCtnMOmIm4G6bvqgqgS5EGdsRs4Np26aRXZDNzK4zGdpoqNklVQ3pCXDqZ+tF1U3GEnuuNaHpYCPcG/WxucnDJNCFKAdxGXFM2TqFA/EHeCD4AaZ2noqzvaz8U25y0iFygxHup9ZBdopxUTWon9F6Dx4IbjXNrrLEJNCFKCf5lnzeOfgOnxz7hNberVnUexF+7n5ml1X1FOQZk4ddvagaY0z7W7/bta4ZrwCzqywWCXQhytlv537jhe0v4OrgyqI+i2jv297skqourSH2EIStNsI9IczYXqctNBtuhLtv80pzUVUCXQgTRFyO4NlNz3Ih4wLTOk9jdPBoubu0IkiKNC6ohq22TiCmoUZD6zQEw8C/M9jZmV3lTUmgC2GS1NxUpmydwvaY7dzf5H6md5mOk72T2WWJK9Lirk1DcHoLWPLA3de4qNp8uDFTpEPFug4igS6EiQosBbx36D0+OvoRbXzasKj3InzdfM0uS/xedgqErzfCPXw95KaBUzVo0t/olmkyAFyqm12lBLoQFcGvZ39lxo4ZuDu6s6j3Itr5tjO7JHEz+TlwZiuErTJa8BkJYOcIjXoZ3TJNh0A1cy52S6ALUUGEXw7n2U3PEpsRy/Qu0xkdPNrsksTtWAqMvvYr/e6XzwAKAjpb71QdBrXKbz4fCXQhKpCUnBSmbJ3Cjgs7GB08mmmdp+Fo72h2WaIotIb4MCPcT6w2piQA8GluhHvzYVCnXZmOmJFAF6KCKbAU8M7Bd/j42Me082nHwt4L8XHzMbsscaeSz1+bhuDcDmNu9+r+1+aYqd+91KchkEAXooL65ewvvLjjRao5VmNhn4W09WlrdkmiuDKSCs3tvuF3c7sPs87tXvJpCCTQhajATl46ybObniU+M54Zd8/gT03+ZHZJoqRuNrd7UL9rc7sXcxoCCXQhKriUnBSe3/I8u2J3MabpGKZ0miL96rbi6tzuq69NQzBoNtz9z2KdrsSBrpQaBLwN2ANLtNazb3Lc/cD3QCet9S3TWgJdiOvlW/JZfGAxn4Z+SgffDizovQBvV2+zyxKlSWu4cNBYQ9WjeNdMbhXot72/VSllD7wHDAZaAOOUUi1ucFw14FlgT7GqFKKKc7BzYELHCcztOZfjSccZs3oMRxOOml2WKE1KQb0OxQ7z2ynKhAWdgQit9WmtdS6wDLjvBse9BswBskuxPiGqnMENB/PlkC9xtHPk0XWPsjx8udkliUqiKIFeD4gq9Djauu0qpVQHIEBrveZWJ1JKPaGUClFKhSQkJNxxsUJUFU1rNmXZ0GV08OvASztfYtbuWeRZ8swuS1RwJZ5STCllBywEJt7uWK31h1rrjlrrjj4+MuZWiFvxcvHi/XvfZ3zL8Sw7uYy//fo3krKSzC5LVGBFCfQYoPBM8P7WbVdUA1oBm5VSZ4G7gZVKqRt22gshis7BzoGJHScyu8dsQhNDGbN6DKGJoWaXJSqoogT6PqCJUqqhUsoJGAusvLJTa52itfbWWjfQWjcAdgMjbjfKRQhRdEMbDeXzwZ9jr+x55OdHWBGxwuySRAV020DXWucDTwG/AGHAt1rrUKXUq0qpEWVdoBDC0LxWc5YNW0Z73/bM2DGD2XtnS7+6uI7cWCREJZNvyWfh/oV8cfwLOvp1ZEHvBdR0qfyLH4uiKdE4dCFExeJg58DkTpN54543OJp41OhXT5J+dSGBLkSlNbzxcD4f/DkKxaM/P8qqyFVmlyRMJoEuRCXWolYLlg1bRhufNkzfPp05e+eQb8k3uyxhEgl0ISq5mi41+aD/B/yl+V/4MuxL/r7+71zKvmR2WcIEEuhC2ABHO0emdJ7CrHtmcSj+EGNXjyUsKczsskQ5k0AXwoaMaDyCzwd/jkVbeOTnR1gZuRKzRrKJ8ieBLoSNaendkm+GfUNL75a8sP0Fnlj/BKeTT5tdligHEuhC2KBarrVYMmAJ07tMJzQplPtX3s/8ffNJz003uzRRhiTQhbBRDnYOjGs2jtWjVnNf0H18fvxzhv80nFWRq6QbxkZJoAth42q61OTlbi/z9dCvqeNeh+nbp/Poukc5cemE2aWJUiaBLkQV0cq7FV8O+ZJXu73KudRzjFk9htd3v05KTorZpYlSIoEuRBVip+wY1WQUq0atYlyzcXx/6nuGLR/Gtye/pcBSYHZ5ooQk0IWogqo7VWdq56l8O/xbgryCeG33a4xbM45D8YfMLk2UgAS6EFVYcI1gPhn4CXN7ziUpK4mHf36YGdtnkJiVaHZpohgk0IWo4pRSDG44mFWjVvHXVn9lzZk1DF8+nC+OfyHzrVcyEuhCCADcHN34913/ZvmI5bT1bcvcfXN5cNWD7I3da3Zpoogk0IUQ12ng2YD/9vsvi/ssJis/i7/++lcmbZlEXEac2aWJ25BAF0L8gVKKPoF9+Om+n3iy3ZNsjtrMiJ9G8NGRj8gtyDW7PHETEuhCiJtycXDhH23/wYqRK+hetzuLDy5m1IpRbI3eanZp4gYk0IUQt1XPox6L+izig/4fYG9nz5MbnuSpDU8RlRpldmmiEAl0IUSRdavbjR+G/8DEuyayL24f9624j8UHFpOZl2l2aQIJdCHEHXK0d2R8q/GsGrWKgQ0G8tHRj7hvxX38evZXmfTLZBLoQohi8XXz5c0eb/LZoM/wdPJk4paJ/G3934hMjjS7tCpLAl0IUSJ3+d3FsmHLeKHLCxxPOs4DKx9g7r65pOWmmV1alSOBLoQoMQc7B8Y2G3t17vUvj3/J8OXDWRm5Eou2mF1elSGBLoQoNVfmXl86dCn1POrxwvYXeOTnRziacNTs0qoECXQhRKlr6d2SL4Z8wavdXiUqLYqH1j7E4788zo6YHXLhtAwpsz7cjh076pCQEFNeWwhRftJz0/n+1Pd8cfwL4rPiaVqjKY+1eowBDQbgaOdodnmVjlJqv9a64w33SaALIcpDXkEea86s4bNjnxGZEkld97o80vIRRgWNws3RzezyKg0JdCFEhWHRFrZFb+OTY59wIP4Ans6ejG06lnHNxlHLtZbZ5VV4EuhCiArpUPwhPj32KZuiNuFk78TIoJE82uJRAqoHmF1ahSWBLoSo0E6nnObz0M9ZGbmSAl1A//r9eazVY7Ss1dLs0iocCXQhRKWQkJnAl2Ff8u3Jb0nPS6dL7S481uoxutXthlLK7PIqBAl0IUSlcrORMQMbDMTBzsHs8kwlgS6EqJSujIz59NinnE45LSNjuHWgF+nGIqXUIKXUSaVUhFJq6g32/0MpdVQpdUgptV0p1aKkRQshhKO9IyODRrL8vuW80/cdarvXZvbe2Qz4YQDvHnyXpKwks0usUG7bQldK2QOngP5ANLAPGKe1Pl7omOpa61Trv0cA/9JaD7rVeaWFLoQojqo+MqakLfTOQITW+rTWOhdYBtxX+IArYW7lDsi9vUKIMtHOtx1v932bn0b+xLBGw/gx/EeG/TSMSVsmEZoUanZ5pipKoNcDCq8zFW3ddh2l1JNKqUhgLvDMjU6klHpCKRWilApJSEgoTr1CCAFAI89GvNztZdbdv47xLcezI2YHY1eP5fFfHmdnzM4qOWdMUbpcHgAGaa0ftz5+GOiitX7qJsc/BAzUWj96q/NKl4sQojRVlZExJe1yiQEKd075W7fdzDJgZJGrE0KIUuDh5MH4VuNZd/86Xuv+GnmWPKZum8rQH4fyVdhXVWLd06IE+j6giVKqoVLKCRgLrCx8gFKqSaGHQ4Hw0itRCCGK7lYjYxaGLOR0ymmzSywzRRqHrpQaArwF2AOfaK1nKaVeBUK01iuVUm8D9wJ5wGXgKa31La9OSJeLEKK8HIo/xGehn7E5ajMFuoC2Pm0ZFTSKgQ0G4uHkYXZ5d0RuLBJCCCAxK5HVkatZHrGc0ymncXVwpX/9/owMGklHv46VYnoBCXQhhChEa83RxKMsj1jOujPrSM9Lx9/Dn5FBI7kv6D5qu9c2u8SbkkAXQoibyMrP4rdzv7EiYgV74vagUHSt25VRQaPoE9gHZ3tns0u8jgS6EEIUQXRaNCsiV7AiYgWxGbFUd6rOkIZDGNVkFM1rNq8QXTIS6EIIcQcs2sKe2D0sj1jOhnMbyLXk0rRGU0YGjWRoo6HUcKlhWm0S6EIIUUwpOSmsO7OO5RHLCU0KxcHOgT4BfRgZNJJudbuV+01LEuhCCFEKTl0+xU8RP7E6cjWXcy7j6+rLiKARjAwaSf3q9culBgl0IYQoRXkFeWyN3sryiOVsi9mGRVvo4NuBkUEjGdhgYJnO1S6BLoQQZSQhM4FVp1exPHw5Z1PP4urgysAGAxkVNIr2vu1L/UKqBLoQQpQxrTWHEw7zU8RP/HzmZzLzM6lfvT4jg0YyvNFw/Nz9SuV1JNCFEKIcZeZl8tv531gevpyQiyHYKTu61e3GqKBR9A7ojZO9U7HPLYEuhBAmiUqN4qfIn1gRsYKLmRfxcvZiWudpDGk0pFjnu1Wg284kwUIIUQEFVA/g6fZP86+2/2J37G5+iviJuh51y+S1JNCFEKIc2NvZ071ed7rX615mr1GU+dCFEEJUAhLoQghhIyTQhRDCRkigCyGEjZBAF0IIGyGBLoQQNkICXQghbIQEuhBC2AjTbv1XSiUA54r5dG8gsRTLqezk87iefB7XyGdxPVv4POprrX1utMO0QC8JpVTIzeYyqIrk87iefB7XyGdxPVv/PKTLRQghbIQEuhBC2IjKGugfml1ABSOfx/Xk87hGPovr2fTnUSn70IUQQvxRZW2hCyGE+B0JdCGEsBGVLtCVUoOUUieVUhFKqalm12MWpVSAUmqTUuq4UipUKfWs2TVVBEope6XUQaXUarNrMZtSyksp9b1S6oRSKkwp1dXsmsyilHrO+v/JMaXUUqWUi9k1lYVKFehKKXvgPWAw0AIYp5RqYW5VpskHJmqtWwB3A09W4c+isGeBMLOLqCDeBtZprZsBbamin4tSqh7wDNBRa90KsAfGmltV2ahUgQ50BiK01qe11rnAMuA+k2syhdY6Vmt9wPrvNIz/WeuZW5W5lFL+wFBgidm1mE0p5Qn0BD4G0Frnaq2TTS3KXA6Aq1LKAXADLphcT5mobIFeD4gq9DiaKh5iAEqpBkB7YI/JpZjtLWAyYDG5joqgIZAAfGrtglqilHI3uygzaK1jgPnAeSAWSNFa/2puVWWjsgW6+B2llAfwA/BvrXWq2fWYRSk1DIjXWu83u5YKwgHoAPxXa90eyACq5DUnpVQNjL/kGwJ1AXel1F/MrapsVLZAjwECCj32t26rkpRSjhhh/pXW+kez6zFZd2CEUuosRldcX6XUl+aWZKpoIFprfeWvtu8xAr4quhc4o7VO0FrnAT8C3UyuqUxUtkDfBzRRSjVUSjlhXNhYaXJNplBKKYz+0TCt9UKz6zGb1nqa1tpfa90A47+LjVprm2yFFYXWOg6IUko1tW7qBxw3sSQznQfuVkq5Wf+/6YeNXiB2MLuAO6G1zldKPQX8gnGl+hOtdajJZZmlO/AwcFQpdci6bbrWeq15JYkK5mngK2vj5zTwmMn1mEJrvUcp9T1wAGN02EFsdAoAufVfCCFsRGXrchFCCHETEuhCCGEjJNCFEMJGSKALIYSNkEAXQggbIYEuhBA2QgJdCCFsxP8D0zX6NKEGbnkAAAAASUVORK5CYII=\n", 889 | "text/plain": [ 890 | "
" 891 | ] 892 | }, 893 | "metadata": { 894 | "needs_background": "light" 895 | }, 896 | "output_type": "display_data" 897 | } 898 | ], 899 | "source": [ 900 | "plt.title('Loss: Naive Convnet')\n", 901 | "plt.plot(loss_epochs_list_naive)\n", 902 | "plt.plot(loss_epochs_list_resnet_v1)\n", 903 | "plt.plot(loss_epochs_list)" 904 | ] 905 | }, 906 | { 907 | "cell_type": "code", 908 | "execution_count": 274, 909 | "id": "6a7f5bbd", 910 | "metadata": {}, 911 | "outputs": [ 912 | { 913 | "data": { 914 | "text/plain": [ 915 | "[]" 916 | ] 917 | }, 918 | "execution_count": 274, 919 | "metadata": {}, 920 | "output_type": "execute_result" 921 | }, 922 | { 923 | "data": { 924 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXoAAAEICAYAAABRSj9aAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAAsTAAALEwEAmpwYAABCT0lEQVR4nO3dd3hUZdrH8e+dTkILgVBC6KF3AghYsCC4ruKKBbHhqujaXWzr7lrwtRd0XVQQxS5gQywroq6rAkISRHoJPQklPYT0zP3+cQYYYpBAJkwyuT/XlYuZ0+aeAX555jnPeY6oKsYYY/xXgK8LMMYYU7Ms6I0xxs9Z0BtjjJ+zoDfGGD9nQW+MMX7Ogt4YY/ycBb0x1SAi/xGRq31dhzG/x4LeHEZEvheRbBEJ9XUtNU1EOoiIisiXFZa/IyIPVeUYqnqOqr5ZA7WJiNwmIqtFZL+IpIjIByLSx9uv5S0i8oaI/J+v6zC/ZUFvDhKRDsApgALnn+DXDjqRr1fBUBEZ7sPXr8wLwO3AbUAzoCswDzjXhzWZOsqC3ni6CvgZeAM4rDtCRGJF5GMRSReRTBH5t8e660VknYjsE5G1IjLQvVxFpIvHdgdbfCIy0t1KvVdEdgOzRCRSRD53v0a2+3Fbj/2bicgsEUlzr5/nXr5aRM7z2C5YRDJEZEAV3/dTwKOVrahCTd+LyHUiEioiOSLS22NdCxEpFJFo9/M/isgK93aLRaTvEV4zDrgZuExVv1PVYlUtUNV3VfUJ9zZNROQtd13bReQfIhLgXjdRRH4SkWfcNW8VkXPc6y4VkcQKr3eniMz3+DuaJiJfuP8+l4pIZ49tu4vIQhHJEpENInKJe/kk4HLgHhHJF5HPqvjZmxPAgt54ugp41/0zWkRaAohIIPA5sB3oAMQAs93rLgYecu/bGOebQGYVX68VTmu1PTAJ59/jLPfzdkAh8G+P7d8GwoFeQDQw1b38LeAKj+3+AOxS1V/cwXzfUep4CegqImdVsu5oNQGgqsXAx8BlHosvAf6nqnvdv3ReB24AooDpwPwjdJGdCaSo6rLfqflFoAnQCTgN5/O/xmP9UGAD0BznF9lrIiLAZ0A39y+TAyYA73k8Hw88DEQCybh/CYpIBLDQvW20e7uXRKSnqs7A+XfzlKo2VNXzMLWHqtqP/QCcDJQCzd3P1wN3uh8PA9KBoEr2WwDcfoRjKtDF4/kbwP+5H48ESoCw36mpP5DtftwacAGRlWzXBtgHNHY//xC4pwrvuYO7xiDgJuBn9/J3gIeOVpP7+ffAde7HZwGbPdYtAq5yP34ZeKTCsTYAp1XyGn8/UMsRagh0f3Y9PZbdAHzvfjwRSPZYF+5+n6083t8D7sdx7s8u3OPvaKbHvn8A1rsfXwr8WKGW6cCDFf9+7ad2/ViL3hxwNfC1qma4n7/Hoe6bWGC7qpZVsl8ssPk4XzNdVYsOPBGRcBGZ7u6KyAN+AJq6v1HEAlmqml3xIKqahhOq40SkKXAOTuvyWMwEWnp2AVWhpor+C4SLyFD3+Y7+wCfude2Bye5umxwRyXG/pzaVHCcT5xfbkTQHgnG+YR2wHeeb1gG7DzxQ1QL3w4buP9/j0DePCcA8j20O2xco8NivPc75DM/3cDnONzNTi/nyBJipJUSkAU43Q6C7vxwgFCfQ+gE7gXYiElRJ2O8EOlO5ApzW5AGtgBSP5xWnTp0MdAOGqupuEekP/AKI+3WaiUhTVc2p5LXeBK7D+Te9RFVTj/R+K6OqJSLyMPAIsKaKNVU8RrmIzMUJ0T3A56q6z716J/CoqlZ6LqCCb4FpIhKvqomVrM/A+fbVHljrXtYOqOp7Xgi0cL+Xy4A7q7jfTpyuqFFHWG9T4dZS1qI3ABcA5UBPnFZof6AH8CNO3+8yYBfwhIhEiEiYiIxw7zsTuEtEBomji4i0d69bAUwQkUARGYPTl/x7GuH0geeISDPgwQMrVHUX8B+cPuFI9wnXUz32nQcMxBmp8taxfwSAcw4gDBhTlZqO4D2cLo7LObzf+1XgRndrX9yf47ki0qjiAVR1E855g/fFOWkd4v7Mx4vIfapaDswFHhWRRu7P+684XTJHpaqlwAfA0zjnSBZWZT+c8zRdReRK9+cfLCKDRaSHe/0enHMGppaxoDfgdNHMUtUdqrr7wA/OScfLcVqv5wFdgB04rfJLAVT1A5yTde/h9PXOwwkPcEL3PCDHfZx5R6njeaABTov1Z+CrCuuvxGnJrgf2AnccWKGqhcBHQEeck6LAwQua7q/CZ4A7QB/wqL8qNVU8xlJgP06XzH88licC1+N8ptk4Jzkn/s6hbnNvOw3n89sM/AnnZCrAre7X2QL8hPP5v36Ut+jpPZxzCh8coUvuN9zfTs7GOQmbhtPF8yTOtz+A14Ce7m6decdQi6lhomrftox/EJEHgK6qesVRNzamHrE+euMX3N0q1+K0+o0xHqzrxtR5InI9zonC/6jqD76ux5jaxrpujDHGz1mL3hhj/Fyt66Nv3ry5dujQwddlGGNMnZKUlJShqi0qW1frgr5Dhw4kJlZ2jYgxxpgjEZHtR1pnXTfGGOPnLOiNMcbPVSnoRWSMe+7p5MqmfBWR9iLyrYisFGd+bs/5uq8WkU3uH7vlmjHGnGBHDXr3LH3TcGYE7AlcJiI9K2z2DPCWqvYFpgCPu/c9MDfIUGAI8KCIRHqvfGOMMUdTlRb9EJy5rbeoagnODSfGVtimJ/Cd+/F/PdaPBhaq6oHpZRdy+IRRxhhjalhVgj4G56rDA1I4fN5rgF+BC92P/wQ0EpGoKu6LiEwSkUQRSUxPT69q7cYYY6rAWydj7wJOE5FfcKaiTcWZ9rZKVHWGqsaranyLFpUOAzXGGHOcqjKOPhXnTjgHtKXCDQ7cd/i5EEBEGgLjVDVHRFJxbhnnue/31ajXGGP8iqqyc99Olu5eiqpySbdLvP4aVQn6BCBORDriBPx4nNuPHSQizXFu8+YC/sahebEXAI95nIA9273eGGPqrT3797Bs9zKW7lrKst3L2LV/FwD9WvTzTdCrapmI3IIT2oHA66q6RkSmAImqOh+n1f64iCjOPTVvdu+bJSKP4PyyAJiiqllefxfGGFOLZRdlk7A74WCwb8vbBkCT0CYMaTWEa3tfy5DWQ+jQuEONvH6tm70yPj5ebQoEY0xdll+ST9KeJJbuXsqyXcvYkL0BgPCgcOJbxTOk1RCGth5K18iuBIh3TpWKSJKqxle2rtbNdWOMMXVNUVkRK9JXsGzXMpbuXsqajDWUazkhASEMiB7ArQNuZUirIfRq3ovggOATXp8FvTHGHKNSVylrMtYc7IpZsXcFJa4SAiWQ3s17c22faxnaaij9ovsRGhh69APWMAt6Y4w5Cpe62JC1gWW7l/Hzrp9J2pNEYVkhgtC9WXcu634ZQ1oPYVDLQUQER/i63N+woDfGmApUla15W50W+65lJOxJILc4F4COTTpyfufzGdp6KINbDqZpWFPfFlsFFvTGGAOk5qce7GNftmsZ6YXOVfqtI1pzeuzpB0+gRodH+7jSY2dBb4ypl/JK8li6aymL0xbzc9rPpOSnABAVFsWQ1kMY2mooQ1oPoW3DtoiIj6utHgt6Y0y9UO4qZ3XmahanLmZx2mJWZayiXMuJCI5gSKshXNHzCoa2Gkrnpp3rfLBXZEFvjPFbu/J3sThtMYvSFvHzrp/ZV7IPQQ6OjBnRZgR9WvTxyZDHE8mC3hjjNwpKC0jck8jiNKfVvjV3KwDR4dGc1e4shrcZzkmtT6oTJ1C9yYLeGFNnqSobsjc4wZ66mOV7l1PqKiU0MJT4lvFcFHcRI2JG0KlJJ7/rjjkWFvTGmDolozCDJWlLWJK2hMVpi8ksygQgLjKOy3tczrA2wxjUclCtuFCptrCgN8bUaiXlJazYu4JFaYtYnLaY9VnrAYgMjeSkNicxos0IhrUZVieHPZ4oFvTGmFpFVdmWt+1gP3vC7gQKywoJkiD6R/fntgG3MTxmOD2a9fDahGD+zoLeGONzucW5LNu9jEWpi1iStoS0/WkAtGvUjrGdxzIiZgSDWw2uldML1AUW9MYYn0jLT+PT5E9ZlLaIVRmrcKmLhsENGdp6KNf2uZZhbYYR2yj26AcyR2VBb4w5oTZkbWDWmll8tfUrXOqid/PeXN/neoa3GV4vxrT7QpWCXkTGAC/g3GFqpqo+UWF9O+BNoKl7m/tU9UsR6QCsAza4N/1ZVW/0TunGmLpCVVm2exmzVs9iUdoiwoPCubzH5VzZ80paRbTydXl+76hBLyKBwDRgFJACJIjIfFVd67HZP4C5qvqyiPQEvgQ6uNdtVtX+Xq3aGFMnlLvK+WbHN8xaPYs1mWuICovi9oG3c3HXi2kS2sTX5dUbVWnRDwGSVXULgIjMBsYCnkGvQGP34yZAmjeLNMbULUVlRXya/Clvrn2Tnft20r5xex4c9iDndT7Pxrf7QFWCPgbY6fE8BRhaYZuHgK9F5FYgAjjLY11HEfkFyAP+oao/Hn+5xpjaLKcoh9kbZvP++vfJKsqib/O+TB40mZGxIwkMCPR1efWWt07GXga8oarPisgw4G0R6Q3sAtqpaqaIDALmiUgvVc3z3FlEJgGTANq1a+elkowxJ0pafhpvr32bjzZ9RGFZIae2PZVrel3DoJaD6vXUA7VFVYI+FfAc49TWvczTtcAYAFVdIiJhQHNV3QsUu5cnichmoCuQ6Lmzqs4AZgDEx8frcbwPY4wPeI6gEYQ/dPoDE3tNJC4yztelGQ9VCfoEIE5EOuIE/HhgQoVtdgBnAm+ISA8gDEgXkRZAlqqWi0gnIA7Y4rXqjTEnnI2gqXuOGvSqWiYitwALcIZOvq6qa0RkCpCoqvOBycCrInInzonZiaqqInIqMEVESgEXcKOqZtXYuzHG1BgbQVN3iWrt6imJj4/XxMTEo29ojDkhKhtBM7HXRBtBU8uISJKqxle2zq6MNcZUykbQ+A8LemPMYWwEjf+xoDfGADaCxp9Z0BtTj9kImvrBgt6YeshG0NQvFvTG1CM5RTnMS57HnA1zSMlPsTlo6gkLemP8nKrya/qvzNkwh6+3fU2Jq4SB0QOZHD+Z02NPtxE09YAFvTF+Kr8kny+2fMGcjXPYlL2JiOAILoy7kEu6XWInWOsZC3pj/Mz6rPXM2TCHL7Z8QWFZIT2a9eChYQ9xTsdzCA8O93V5xgcs6I3xA0VlRSzYtoC5G+ayMmMlYYFhjOk4hku7XUqvqF42/r2es6A3pg7bmruVuRvmMn/zfPJK8ujYpCP3Dr6X8zqfZ6NnzEEW9MbUMaXlpXy38zvmbpjLst3LCAoI4qx2Z3FJt0uIbxlvrXfzGxb0xtQRaflpfLjxQz7e9DGZRZm0iWjD7QNv54IuF9C8QXNfl2dqMQt6Y2qxclc5i9IWMWfDHH5M+RER4dSYU7mk2yUMbzPchkaaKrGgN6YWyijM4JNNn/Dhxg9J259G8wbNub7v9VwUdxGtG7b2dXnGW1zlkLMDMjdD5iYICoP4a7z+Mhb0xtQSqkrC7gTmbpzLt9u/pUzLGNp6qHNhU7vTCQ4I9nWJ5ngVZEFmMmRscgI9MxkykiFrC5QXH9ouJt6C3hh/lFucy/zN85m7YS7b8rbROKQxE3pM4OKuF9OhSQdfl2eqqqwYsrYeHuSZm5xwL/S4sV5AEER2hOZxEHcWRMU5j6O6QESLGimtSkEvImOAF3BuJThTVZ+osL4d8CbQ1L3Nfar6pXvd33BuHl4O3KaqC7xWvTF1lKqyKmMVczfM5attX1FcXky/Fv149ORHObv92YQFhfm6RFMZVdi3y6N17vFnznZQ16FtG7Z0wrvHee4gd4d5ZHsIPLHfzo4a9CISCEwDRgEpQIKIzFfVtR6b/QOYq6ovi0hP4Eugg/vxeKAX0Ab4RkS6qmq5t9+IMXVBQWkBX2z9gg82fMC6rHWEB4UztvNYLul2Cd2adfN1eeaA4nwnvD2DPHOT05dekn9ou6AGTni36Q99LnYHemdnWVjtuY6hKi36IUCyqm4BEJHZwFjAM+gVaOx+3ARIcz8eC8xW1WJgq4gku4+3xAu1G1NnlJSX8P7695mxcgZ5JXl0jezKP0/6J+d2OpeI4Ahfl1d/lZdC6nJITTrUzZKZ7LTaDxJoGuu0yNsNc0I8qosT6o3aQECAz8qvqqoEfQyw0+N5CjC0wjYPAV+LyK1ABHCWx74/V9g35rgqNaYOUlUWbFvA88ufJzU/lRExI7ix7430a9HPLmzyBVXYuxa2/A+2fA/bF0PJPmddWFMnvDud7rTKD3S3NOsEwXW7K81bJ2MvA95Q1WdFZBjwtoj0rurOIjIJmATQrl07L5VkjG8t37OcZxOfZWXGSrpGdmX6qOkMbzPc12XVP9nbnGDf+j/Y+gPsT3eWN+sMfS+GjqdB++HOiVA//eVblaBPBWI9nrd1L/N0LTAGQFWXiEgY0LyK+6KqM4AZAPHx8VrV4o2pjbblbuP55c/z7Y5viW4QzSMjHuG8TufZxU0nyv4MJ9QPtNpztjvLG7Z0WuudTnPCvWns7x7Gn1Ql6BOAOBHpiBPS44EJFbbZAZwJvCEiPYAwIB2YD7wnIs/hnIyNA5Z5qXZjapXsomxe+fUV5m6YS0hgCLf0v4Wrel1Fg6AGvi7NvxXvc7pgDrTa96x2loc2hg6nwLCbnWBv0c1vW+xHc9SgV9UyEbkFWIAzdPJ1VV0jIlOARFWdD0wGXhWRO3FOzE5UVQXWiMhcnBO3ZcDNNuLG+Jvi8mLeWfsOM1fNpLCskHFx4/hL/7/Y/DM1pawEUhIOtdpTE8FVBoGh0G4onPFP6DQSWveHQLtUCECcPK494uPjNTEx0ddlGHNULnXx5dYv+dfyf7Fr/y5Gth3JnYPupFPTTr4uzb+4XLBn1aEW+/bFUFoAEuCEeaeRTndM7FAIrr/fnkQkSVXjK1tnv+6MOQ4JuxN4OuFp1mWto0ezHvzfiP9jSOshvi7LP6g6UwNs+d59AvXHQ1eWNu8GA65wumI6nAwNmvqy0jrDgt6YY7AlZwtTk6byfcr3tI5ozWMnP8a5nc4lQGr/WOpabd9uZ0TMgVZ7rntEd+MY6DrGfQL1VGjcxrd11lEW9MZUQUZhBi+veJmPNn1Eg6AG3DHwDi7vcblNVXA8VJ0hj6lJsHOZE+zp6511YU2h4ylw8h3QcaQznr2enkD1Jgt6Y35HYVkhb699m9dWvUZJeQmXdruUG/rdQLOwZr4ure7Yn3Ho6tMDPwe6YoIaQPth0O8yp9Xeqi/YMFSvs6A3phLlrnI+2/IZL/7yInsL9nJmuzO5Y+AdNpvk0ZTsh10rDw/1A+PYEYjuAd3/ADGDnJ/onid8gq/6yILemAoWpy3mucTn2JC9gT7N+/D0qU8zsOVAX5dV+5SXQfo6j1D/xZle4MAI6iaxEDMQBl/rhHrrfhDayLc111MW9Ma4bcrexLNJz7IodRExDWN4+tSnGd1htM1JA06/es52d6C7u2HSVkBZobM+rKkT5t3OcbfWB0LDaF9WbDxY0Jt6b2/BXqatmMa85HlEBEdwV/xdXNb9MkICQ3xdmu/sz4S0Cv3qBZnOusBQp3U+aOKhUG/WyU6a1mIW9KbeKigt4I01b/DGmjcodZVyeY/LuaHvDTQJrT3ziJ8QJQWw69fDgz17m3ulQIvu0PUcJ9BjBkHLXtavXsdY0Jt6p8xVxrzkeUxbMY2MwgxGdxjN7QNuJ7ZxPZnkqjgf1n4KO5c63TCV9asPusYJ9Tb9rV/dD1jQm3pDVfkp9SeeS3qO5Jxk+rfoz9SRU+kf3d/XpZ0YuSmwdDokvQnFuc4dkGIGQbe/ukN9IDRq6esqTQ2woDf1woasDTyT+Aw/7/qZdo3aMXXkVM5sd2b9ONGakgRL/u204gF6ng8n3QRtB1u/ei2jqjXyb9KC3vg1VWXOhjk8lfAUEcER3DfkPi7pegnB/t7HXF4G6z+Hn19yumhCG8Owm2DIDfVqHvbqKHcpRaXlFJWWU1zmcj92UVR2aFnxgWXu7YrKXBR7bFNU6t6mrNxjuavCcQ9t0yemCR/fNMLr78WC3vit/aX7eWjxQ3y17StOiTmFx05+jKZhTX1dVs0qyoNf3oafX4HcHRDZAcY8CQMur5d97apKXlEZGfnFZOaXuP8sJt3jcUZ+CTkFJU7glh0K3dLy45/ZNzhQCAsKJDQ4gNCgQMKCAwgLDnT/BNCkQbCzLCiQ0OBD62Mjw7347g+xoDd+aUPWBu76313s2LeD2wfezp97/9m/Jx7L3ub0vy9/27kHarvhMOZxZ1y7n00pUFbuIqughIx9JWTuLz4Y4ukeYX5gWWZ+CSXlrt8cQwQiw0No3jCEqIhQurZsRIPgw0M3NMgdzkGHQvrAstADwe0O8dAK2wUG1K4uMQt643c+2fQJjy59lMYhjZl59kwGtxrs65JqhqrTLbNkmtNNIwHQ60Kni6bNAF9Xd0yKSstJ31d8eMt7fwnp+5w/M/YVu0O9hOyCEiq7jUZIYABRDUNo3jCU5g1D6d6qMVENQ2jRMPTg8qiIUJo3CqFZeAhBgX78i78CC3rjNwrLCnn050f5dPOnDG09lCdOecI/7/JUXuqcWF0yzRn7HtYURtwOQybV6ml8i0rLSd6bz8Y9+9iwZx8bd+9ja8Z+0vcVs7+k8hvPNQoNOhjSHZtHMLhDM6IahtLiQHA3DHVa5Q1DaRwWVD9Orh+HKgW9iIwBXsC5leBMVX2iwvqpwOnup+FAtKo2da8rB1a51+1Q1fO9ULcxh9mSu4XJ309mc85m/tLvL9zQ9wb/uxl3YbYzNHLZDMhLhagucO6zzsyPIRG+ru6gkjIXWzP2s3HPPifUd+9j0958tmfux+VuiYcEBtCpRQS9YprQslHYwZZ380Yh7lZ3KFERIYQF+9nfoY8cNehFJBCYBowCUoAEEZmvqmsPbKOqd3psfyvg+b2xUFX7e61iYyr4YssXPLzkYcICw3jlrFcYHjPc1yV5V+Zm+PllWPEelO53bsBx7nMQdzYE+K77odylbM88EOj5h7XSy9yJHhggdIgKp0frRpzfrw3dWjWia8tGdIgKr1ddJ75WlRb9ECBZVbcAiMhsYCzODb8rcxnwoHfKM+bIisuLeWrZU8zdOJeB0QN56tSnaBnhJxf8qMK2n5zhkRv+40w50Psip/+9VZ8TWorLpaTmFB4M9AOt9M3p+RSXHTrR2a5ZOF1bNmJUz5YHA71TiwhCg6xV7mtVCfoYYKfH8xRgaGUbikh7oCPwncfiMBFJBMqAJ1R1XiX7TQImAbRr165KhZv6bWfeTib/bzLrstZxTe9ruHXArQQH+MHY+LISWPOx0/++eyWER8Gpd8Pg62r8qlVVZe++Yjbs3neo22VPPsl79h3Wh966SRhdWzZiRJcourZsRLdWjegS3ZDwEDvlV1t5+29mPPChqnqeWWmvqqki0gn4TkRWqepmz51UdQYwAyA+Pv74B6+aeuHb7d/yz0X/RER48YwXGRk70tclVd/+TEh6HZbNhPzdzkRi5/0L+l4CwQ28/nJZ+0vcfef7PII9n9zC0oPbNG8YQteWjbg4PtYd6A3pEt2IJg384BdqPVOVoE8FPC+la+teVpnxwM2eC1Q11f3nFhH5Hqf/fvNvdzXm95WWlzJ1+VTeXvs2vaN688zIZ4hpGOPrsqonfaPTPfPr+1BWBJ3PhAumOX96YQSJqrIzq5C1u3JZk5bH2rQ81qTlsTuv6OA2jcOC6NaqEX/s25quLRu5fxoS1TC02q9vaoeqBH0CECciHXECfjwwoeJGItIdiASWeCyLBApUtVhEmgMjgKe8UbipX3bl7+KuH+5iZfpKJnSfwF3xd9XdaQxUYct/YclLkLzQmd+936XO/DPRPY77sCVlLpL35rMmLZe1u5xAX5eWx77iMsA5Mdq5RQTDOkfRo3UjurdqTLdWjYhuFGrDEv3cUYNeVctE5BZgAc7wytdVdY2ITAESVXW+e9PxwGzVwy5l6AFMFxEXEIDTR3+kk7jGVOqHlB+4/6f7KXOV8cxpzzC6w2hfl3R8XC5YOQcWvwh710BENJz+d4j/M0Qc23j/fUWlrNu1j7Vp7pb6rjw27ck/eBVog+BAurduxNgBbejZugm92jihbsMV6yfRyi4x86H4+HhNTEz0dRmmFihzlTFtxTRmrppJt8huPDvyWdo3bu/rso7PnrXw+R3Olawte8Owm6H3OAj6/e6RAydInS6XQy317ZkFB7eJigihZ5vG9GzTmF5tmtCzdWM6No+odZfhm5olIkmqGl/ZOjtNbmql9IJ07vnhHhL3JDIubhz3DbmPsKAwX5d17EoL4YenYdELzgySF7zsXOBUSVeJy6Vszdx/sB99TVou63blkZFfcnCb9lHh9GzdmIsHtT0Y7Nb1Yo7Ggt7UOkt3LeWeH+6hsKyQx05+jPM6n+frko7P5u/g8zudCcf6Xw6jHoGIKMCZDmDjnn0eJ0hzWb97HwXuYYzBgUJcdCNO7xZ9MNC7t25E47A6el7C+JQFvak1XOpixsoZvLTiJTo26chrZ79Gl8guvi7r2OWnw4K/waoPIKoL5VfOZ2P4AJJWZbN8x07WpOaRnJ5Pufvq0UahQfRo05hL4mPp5e6CiYtuREiQXTlqvMOC3tQKWUVZ/O3Hv7E4bTHndjqXB056gPDgmpmbu8a4XPDL2+jCB9CS/SyNvZ5XdSzL3iogv/hHwBmb3rdtU0b1bEkvd0u9bWQDAqw/3dQgC3rjc8v3LOfuH+4mpyiHB4c9yLi4cXWmz1lV2ZZZwMbVCXRNeICO+39lmas795dey9bkGLq1ggsGtGFQ+0gGtWtGbLMGdea9Gf9hQW98RlV5c82bPL/8edo0bMO7575L92bdfV3W7yosKWdlSg5JO7JZvj2H1dv3MKFkLjcGfkaBNGBms8kU9BzPlA7N6BfblIah9l/M+J79KzQ+kVucyz9++gffp3zPqPajeHj4wzQKqX23ukvLKWT5jmyStmezfHs2a9LyDs7MOC4ymc+CptPClUpe13E0Pu9JrmvUwscVG/NbFvTmhFudsZrJ309mb+Fe7htyHxO6T6gV3Rml5S7WpuWRtD3b3WLPZleuM1VAWHAA/do2ZdKpnTippTJ003OErp0LzTrBHz+lcaeRvi3emN9hQW9OGFXl/fXv83Ti07Ro0II3x7xJ3xZ9fVZPZn4xy3fkHGytr0zNoajUubI0pmkD4js0Y1C7pgxsH0mP1o0JDhBY8S58/Q8ozndmlTxlco1MOmaMN1nQmxMivySfBxc/yNfbv+a0tqfx6MmP0iS0yQl7fZdL2bQ332mtb89m+Y5stmbsByAoQOgV04QJQ9ozqH0kA9s3pXWTCuGdvtEZE7/9J2g3DP74PETX7vMJxhxgQW9q3IasDUz+32RS9qVw56A7mdhrIgFyYsaIb8/cz9zEnXyYlMKevGLAmTJgYPtILh0cy6D2kfSJaXLkOWBKi+CnqfDTc07L/bx/wYArfXpnJ2OOlQW9qTFlrjLmbJjD1KSpNAlpwmujX2NQy0E1/rpFpeUsWLObOQk7Wbw5kwCB07tFc/fo1sS3j6R9VHjVzgls/cFpxWcmQ5+LYfRj0DC6xus3xtss6E2NWLZrGY8ve5zknGRGxIzg0RGPEtUgqkZfc/3uPGYv28knv6SSW1hKbLMG3HV2Vy4aFEurJscwT87+TKcf/tf3ILIDXPExdDmzxuo2pqZZ0BuvSstP49nEZ/l6+9fENIzh+dOf54zYM2psVE1+cRmf/ZrG7ISd/Lozh5DAAEb3bsX4wbEM6xR1bFecqjo3AFnwdyjOg5P/CqfdYydbTZ1nQW+8oqisiFlrZvH6qtcBuKX/LVzd6+oamXFSVVm+I4c5CTv4fOUuCkrK6dqyIQ/8sSd/GhBDZETIsR80I9mZRnjbjxA71DnZ2rKnt0s3xics6E21qCrf7viWpxOeJm1/GqM7jGbyoMm0btja66+Vtb+Ej5enMCdhJ5v25hMeEsj5/dpw6eBY+sc2Pb5vDWXF8NPz8OMzENQA/jgVBk60k63Gr1jQm+OWnJ3MEwlPsHTXUuIi43j95NcZ3GqwV1/D5VIWbc5gdsJOFq7ZQ0m5iwHtmvLkuD6c27dN9aYY2LbIacVnbHRuAjL6cWjU0mu1G1NbVOl/iYiMAV7AuZXgTFV9osL6qcDp7qfhQLSqNnWvuxr4h3vd/6nqm16o2/hQXkkeL694mffXv09EcAT3D72fi7teTFCA99oNu3IL+SAxhbmJO0nJLqRpeDBXnNSeSwfH0q1VNadKKMiChQ/AL29D03Zw+YcQN8o7hRtTCx31f6aIBALTgFFACpAgIvM97/2qqnd6bH8rMMD9uBnwIBAPKJDk3jfbq+/CnBDlrnLmJc/jheUvkFOcw8VdL+aWAbcQGRbpleOXlrv4dt1e5iTs4H8b03EpnNylOfeO6c6oni2rf79TVVg5FxbcD4XZMOIOOO1eCKlj0yEbc4yq0gQbAiSr6hYAEZkNjAWOdJPvy3DCHWA0sFBVs9z7LgTGAO9Xp2hz4q3Yu4LHlz3O2sy1DIweyH1D7qNHVA+vHHtrxn5mJ+zgo6RUMvKLadk4lJtP78LFg2JpF+WlEM7cDF/8FbZ8DzHxcNWn0Kq3d45tTC1XlaCPAXZ6PE8Bhla2oYi0BzoC3/3OvjGV7DcJmATQrl27KpRkTpT0gnSmJk3lsy2fER0ezZOnPMk5Hc+p9nDJwpJy/rN6F7MTdrJsaxaBAcKZ3aMZPySWU+NaEBTopZOh6Rvhx2eduz2FRMC5z8KgayCgmt8OjKlDvH0ydjzwoaqWH8tOqjoDmAEQHx+vXq7JHIeS8hLeWfcO03+dTqmrlOv7XM91fa6r9l2fVqfmMidhJ/NWpLKvqIwOUeHcO6Y74wbFEN3Ii0Mx96xxbsq9Zp4zDv6kv8Dw2+xkq6mXqhL0qUCsx/O27mWVGQ/cXGHfkRX2/b7q5Rlf+CHlB55KeIrtedsZGTuSe+LvIbZx7NF3PIK8olLmr0hjdsIOVqfmERoUwB/6tObSwbEM7djMuxdTpf0C/3saNnwBIQ3h5Dtg2C0Q0dx7r2FMHVOVoE8A4kSkI05wjwcmVNxIRLoDkcASj8ULgMdE5MDZurOBv1WrYlNjduTt4MmEJ/kh5Qc6NO7Ay2e9zMkxJx/38QpLypn6zUbeWrKNolIXPVo3ZsrYXoztF0OT8GAvVg7sWOq04JMXQlgTGPk3GDIJwpt593WMqYOOGvSqWiYit+CEdiDwuqquEZEpQKKqzndvOh6YrarqsW+WiDyC88sCYMqBE7Om9igoLWDGyhm8tfYtQgJDmDxoMpf3uJzgwOMP42Vbs7jnw1/ZllnAuIFtmTi8A71jGnu39a7qXMn6w9POBGThUXDmgzD4Oghr7L3XMaaOE49crhXi4+M1MTHR12XUC6rKF1u/YGriVPYW7mVs57HcMegOmjc4/m6OgpIynvpqA28u2UZsZDhPjOvD8M5e7jZRheRvnYDf+TM0bOn0v8df45xwNaYeEpEkVY2vbJ1dGVtPrc1cy+NLH2dF+gp6R/Vm6ulTq323p8WbM7j3o5WkZBdy9bAO3DOmG+EhXvwnpgobvnQCPu0XaNwW/vCMMz98sPfn1DHGX1jQ1zNZRVm8+MuLfLTxIyLDIpkyfApju4yt1o1A8ovLePzLdby7dAcdm0cw94ZhDO7gxb5xVzmsmw8/PAN7VjtTB5/3L+h3GQQdxwRmxtQzFvT1xIGbgExbMY3C0kKu6HkFN/a7kcYh1evL/nFTOvd9tIq03EKuP6Ujfx3VjQYhXhqjXl4Gqz9yJhzL2AhRcfCn6dD7Igi0f7rGVJX9b6kHlu5ayhPLniA5J5mTWp/EfUPuo3PTztU6Zl5RKY99sY7ZCTvp3CKCD28czqD23pkKgbISWDkbfnwOsrdCdC+4aBb0HGsXOhlzHCzo/VhafhrPJD7Dwu0LvXoTkP9u2Mv9H69iT14RN57WmTvOiqv+PDTg3J/1l7dh0QuQuxPaDIDR70HXc2zaYGOqwYLeT3286WMeW/oYgnjtJiC5BaVM+XwtHy1PoWvLhrxyxQj6xTatfrEl+yHpDVj0L8jffejGH13OhBq6M5Ux9YkFvZ9RVV769SVe+fUVhrUexsPDH/bKTUAWrt3D3z9ZReb+Em49owu3nNGF0KBqtuKL8iBhJiyZBgUZ0OEUGPeq86cFvDFeY0HvR0pdpTy8+GE+3fwpf+ryJ/457J8EB1TvCtTs/SU89NkaPl2RRvdWjXh94mB6xzSpXqGF2bB0Ovz8MhTlQJdRcOpd0O6k6h3XGFMpC3o/sb90P3/9/q8sTlvMTf1u4sZ+N1a7L/6r1bv4x7zV5BSUcsdZcdw0sgshQdXoK9+f4bTel70KJfug+x/hlMkQM7BadRpjfp8FvR9IL0jnpm9vYlP2JqYMn8Kf4v5UreNl5hfzwPw1fLFyF71jGvP2tUPp0boawzD37YbFL0Li61BaCL0ugFPusvngjTlBLOjruC05W7jxmxvJKc7h32f+u1qTkKkqn6/cxYPz15BfVMbdo7sx6dROBB/v3PC7Vjp98L/OBlcZ9LnYacG36HrcNRpjjp0FfR2WtCeJW7+7lZCAEN4Y8wY9o3oe97H27ivin/NWs2DNHvq1bcLTF/eja8vjuDdraRGs/RQSX4OdSyGoAfQb70wX3KzTcddnjDl+FvR11IJtC7j/x/tp07ANr4x6hZiGv7lxV5WoKp+uSOOhz9ZQUFLOfed057qTOx77HZ6yt0HiLGccfEEmRHWB0Y9D/8uggZcupDLGHBcL+jrorTVv8UziM/SP7s+LZ7xIk9DjGwWzJ6+Iv3+yim/W7WVgu6Y8dVE/ukQ3rPoBXOXOLJIJM2HT1yAB0P0PzjTBHU+zIZLG1BIW9HWIS108nfA076x7h1HtR/H4KY8TGhh6zMdRVT5MSuGRz9dSUu7iH+f24JoRHQkMqGIw7890Wu6Jr0POdmea4NPugYFXQ5Pj+2ZhjKk5FvR1RHF5MX/78W8s3L6QK3pcwd2D7z6uGSfTcgq5/5NVfL8hnSEdmvHkRX3p2LwKc7irQkqC03pf8wmUlzgXNo162BkmWY2blBhjapYFfR2QW5zLbd/dxi97f+Hu+Lu5qtdVx3wMVWV2wk4e/WId5S7lofN6ctWwDgQcrRVfsh9WfeAE/O5VENIIBk2E+GshuvvxvSFjzAlVpaAXkTHACzi3Epypqk9Uss0lwEOAAr+q6gT38nJglXuzHap6vhfqrjdS81P5yzd/IWVfCk+f9jSjO4w+5mPszCrgbx+v4qfkDIZ1iuLJcX1pFxX++zulb3RGzqx4D4rzoGVv+ONU6HMJhB5DP74xxueOGvQiEghMA0YBKUCCiMxX1bUe28Th3PR7hKpmi0i0xyEKVbW/d8uuH9ZmruXmb2+muLyYGaNmEN+q0ruEHZHLpby7bAdPfLkOgP+7oDcThrQ7ciu+vBTWf+G03rf9CIEh0PMC5+Rq7BA7uWpMHVWVFv0QIFlVtwCIyGxgLLDWY5vrgWmqmg2gqnu9XWh981PqT/z1+7/SNLQpM8+eeczzx+/KLeTOOSv4eUsWp8Q15/EL+9A28git+Lw0SHrTmUEyfzc0aefcZHvAldCwRfXfjDHGp6oS9DHATo/nKcDQCtt0BRCRRTjdOw+p6lfudWEikgiUAU+o6ryKLyAik4BJAO3atTuW+v3SJ5s+4eElDxMXGce0M6cRHR599J08bEnP54qZS8krKuOJC/tw6eDY3857owpbf3Ba7+u/AHVBl7Ng8AsQN8pu8GGMH/HWydggIA4YCbQFfhCRPqqaA7RX1VQR6QR8JyKrVHWz586qOgOYARAfH69eqqnOUVVe+fUVXvr1JYa3Gc5zI58jIrgKI2I8rE7N5erXlwEw54aT6NWmwhj7whxnSoKEmZC5CRo0g+G3wKBroFlHL70TY0xtUpWgTwViPZ63dS/zlAIsVdVSYKuIbMQJ/gRVTQVQ1S0i8j0wANiMOUypq5RHljzCJ8mfMLbzWB4c/uAxTzGcsC2LP89KoFFYEO9cN5ROLTxOmu76FRJec0bQlBZA28HO/Vd7XgDB1bshiTGmdqtK0CcAcSLSESfgxwMTKmwzD7gMmCUizXG6craISCRQoKrF7uUjgKe8Vby/KCgt4K//+yuLUhdxY78buanfTcc8xfD3G/Zy4ztJtGnSgLevG0pM0wbueWfmOa33lARn3pm+FztDI9v0r5H3YoypfY4a9KpaJiK3AAtw+t9fV9U1IjIFSFTV+e51Z4vIWqAcuFtVM0VkODBdRFxAAE4f/dojvFS9lFGYwU3f3MTG7I08NOwhxnUdd8zH+HxlGnfOWUFcdCPeunYIzcv2wsKZsPwtKMxy5p0Z8wT0uwwaNPX+mzDG1GqiWru6xOPj4zUxMdHXZZwQW3K3cNM3N5FVlMUzpz3DqW1PPeZjvL9sB/d/sor4dk2ZdUYJDVe85pxcBej2Bxhyvc07Y0w9ICJJqlrpGGy7MtZHlu9Zzq3f3UpQQBCzRs+iV/Nex3yM6f/bzNT//Mo/26xkomsBAe+vdWaKHH4bDL4WmtoIJmOMBb1PLNy+kPt+uI82Ddvw0lkvEdso9ug7eVBVpn/6X0h8jaTw/xGRtQ9a9oHz/w19LoLgBjVUuTGmLrKgP8HeWfsOTyU8Rb8W/XjxjBdpGta06jur4tryP9Z/+gzX5/4EQQFI9/Ng6A3Qbph1zxhjKmVBf4K41MWzic/y1tq3OLPdmTxxyhOEBVVxWGPJfvh1NrpsBgHp62mpjfg55iqGX3I30vTYvg0YY+ofC/oToLi8mL//9HcWbFvAhO4TuGfwPQRW5crTrK3O0Mhf3oaiXLaHxPHv0huIO+Nqbjjz2Pv0jTH1kwV9DcstzuX2/95O0p4k7oq/i6t6XvX7Y+RVYct/YekM2PgVBARS2u2PTNl7Ku+ktWTK2D5ceVL7E/cGjDF1ngV9DUrLT+Mv3/yFnft28tSpT3FOx3OOvHFxPvz6PiybARkbIaIFnHo32T2v4KoPdrJuVx7PX9qPsf3tDk7GmGNjQV9D1met56ZvbqKovIjpo6YzuNXgyjfM3AzLXoUV7zrzvrcZ4ExN0OtP7N6vXPHaUnZmFTDjqkGc0b3liX0Txhi/YEFfAxanLubO7++kcWhj3hr1Fl0iuxy+gcsFm7+DZdOdm2oHBEOvC2DIDdA2HkTYlrGfK15bSk5BKW/+eQgndYryyXsxxtR9FvRe9uWWL/n7T3+nc9POvHTWS4dPMVyUd6h7JjMZIqLhtPsg/hpo1OrgZut25XHla8sod7l4//qT6NO2SSWvZIwxVWNB70UfbfyIh5c8zKCWg3jxjBdpGOKePTIj2Qn3Fe9ByT6IiYcLZ0LPsRAUctgxkrZnc82sZYSHBDF70jC6RDfywTsxxvgTC3oveXvt2zyV8BQnx5zM1JFTCQsIgY1fw9JXYPO3TvdM73EwdBLEDKr0GD9uSmfSW0lENw7lnWuHEtvsKPd1NcaYKrCgryZVZfrK6UxbMY1R7Ufx5LApBCfOclrwWVugYSs4/e8waCI0PPKdor5avYvb3l9BpxYRvHXtEKIb2RzxxhjvsKCvBlVlatJUZq2Zxfmdz+fhrlcSNOsc2L0SYoc6Ad/j/N90z1T0QeJO7v1oJf1jmzJr4hCahB/bDUeMMeb3WNAfJ5e6eGzpY8zZMIdLu13K/UFtCXj1dAgKg/HvQfdzq3Sc137ayiOfr+WUuOZMv3IQ4SH2V2KM8S5LleNQ5irjgUUP8NmWz/hz9wncsW0tsu5pZ973P02Hxq2PegxVZeo3m/jXt5sY06sVL1zWn9AguyG3Mcb7LOiPUUl5Cff+cC/f7PiGWzuO5fpF7yD5u+Gsh5154AMCjnoMl0uZ8vla3li8jYsHteXxC/sQFHj0/Ywx5nhUKV1EZIyIbBCRZBG57wjbXCIia0VkjYi857H8ahHZ5P652luF+0JhWSG3/fc2vtnxDfc2HcCk/05DgkLg2q/h5DuqFPJl5S7u+vBX3li8jWtP7siT4/payBtjatRRW/QiEghMA0YBKUCCiMz3vPeriMQBfwNGqGq2iES7lzcDHgTiAQWS3Ptme/+t1Kz8knxu+e4Wlu9ZzsNljbnwl0+h/+VwzpMQWrWx7kWl5dz2/i98vXYPfx3VlVvP6HLMNwE3xphjVZWumyFAsqpuARCR2cBYwPMm39cD0w4EuKrudS8fDSxU1Sz3vguBMcD73in/xMgtzuXGhTeyPnMtT2blc05RNox7zbmbUxXlF5cx6a1EFm/O5KHzejJxRMcarNgYYw6pStDHADs9nqcAQyts0xVARBYBgcBDqvrVEfb9zfSLIjIJmATQrl3tus9pRmEGkxZcx/bcLUzdvZeRzfvCxFchsupTBecUlHD1rARWp+by7MX9GDeobQ1WbIwxh/PWydggIA4YCbQFfhCRPlXdWVVnADMA4uPj1Us1Vdvu/bu57osr2Fuwh2l70jlpyO1w6t0QWPWPbU9eEVe+tpRtGQW8fPlAzu7V6ug7GWOMF1UlsVIBz/vVtXUv85QCLFXVUmCriGzECf5UnPD33Pf74y32RNqRs43rv7iMfSV5zNgH/cd/DO2HH9sxMgu44rWlZOQXM+uawYzo0ryGqjXGmCOrynCPBCBORDqKSAgwHphfYZt5uANdRJrjdOVsARYAZ4tIpIhEAme7l9VqySmLufrTCygozmVmg570v/6nYw75jXv2cdEri8ktLOXd64ZayBtjfOaoLXpVLRORW3ACOhB4XVXXiMgUIFFV53Mo0NcC5cDdqpoJICKP4PyyAJhy4MRsbbUmcTo3rvoXwaq80f16Og+7A45xZMyvO3O4etYyQgIDmHvDMLq1shkojTG+I6q1pksccProExMTT/wLlxSw/IubuTlnGY0liJmn/5vY9qcc82F+3ZnDhFd/plnDEN65dijtoyJqoFhjjDmciCSpanxl6+zKWIDdq1k8byJ3hBbSMrgRr543l1ZNjn30T1pOIde9lUhkRAgf3jiclo1tBkpjjO/V70syVeHnV/junXO4JayI2IZtmTXu8+MK+f3FZVz7ZiJFJeW8PnGwhbwxptaovy36/HT49Ca+TPuJ+6Ob0zOyOy+PnkmT0GO/bV+5S7l99i9s2J3HrGuG0LWl9ckbY2qP+hn0yd/CJzfyYWAJU6KbE99yMC+e+SIRwcfXn/7Ef9bxzbq9TBnbi9O6tvByscYYUz31K+jLiuHbKbDk37zVpjNPh5YeuvVf0PF1tby/bAev/riVicM7cNWwDt6t1xhjvKD+BH3GJvjwz+julUzvdTrTCjY7t/475UmCA4/vjk6LkjP457zVnNa1Bf84t4eXCzbGGO/w/5OxqrD8LZh+Kpq7k6nDr2BawWbO73w+T5361HGH/Ob0fP7yThKdWkTw4oQBNtWwMabW8u8WfWE2fHY7rP0UV8dTeLRjb+Zu/cK59d/Q+wmQ4wvn7P0lXPtGAsGBAbx29WAah9k9Xo0xtZf/Bv32xfDR9ZC/m7IzH+ABMvhsy+f8ufefuWPgHcc9D3xJmYsb30kiLbeI968fSmyzcC8Xbowx3uV//Q3lZfDdo/DGuRAUQsk1X3B3WQqfbfmcWwfcWq2QV1X+/skqlm7N4umL+jKofTMvF2+MMd7nXy367G1OKz5lGfSbQOHZU7hzyT9ZlLqIewffyxU9r6jW4af/sIUPklK4/cw4xvb/zbT6xhhTK/lP0GdsglfPcB6Pe438bmMO3fpv+MNcGHdhtQ7/1erdPPnVes7r14Y7zorzQsHGGHNi+E/QR3WBwdfCoGvIDW/KjV9fz/qs9Tx56pOc0/Gcah16VUoud8z5hX5tm/L0RX3tPq/GmDrFf4JeBM56yH3rv2vYnrudqadPZWTsyGoddnduEde9lUBURCivXhVPWHCgd+o1xpgTxH+CHtiVv4vrF17P3oK9TDtrGie1PqlaxysoKePaNxPILyrjo5uG06JRqJcqNcaYE8dvgj4tP42JX00kvySfGaNm0D+6f7WO53Ipd8xewbpdecy8Op7urRp7p1BjjDnB/Cbom4U1o3fz3lzX5zp6RvWs9vGeWrCBr9fu4YE/9uSM7i29UKExxvhGlcbRi8gYEdkgIskicl8l6yeKSLqIrHD/XOexrtxjecV7zXpNWFAYz418zishPzdxJ6/8bzNXnNSOa0Z0qH5xxhjjQ0dt0YtIIDANGAWkAAkiMl9V11bYdI6q3lLJIQpVtX+1Kz1BlmzO5P6PV3FKXHMePK+XjbAxxtR5VWnRDwGSVXWLqpYAs4GxNVuWb2zN2M9f3k2ifVQ4/54wkGCbqMwY4weqkmQxwE6P5ynuZRWNE5GVIvKhiMR6LA8TkUQR+VlELqjsBURkknubxPT09CoX7005Bc5EZQK8PnEwTRrYRGXGGP/grSbrZ0AHVe0LLATe9FjX3n1n8gnA8yLSueLOqjpDVeNVNb5FixN/h6bSchd/eWc5KdmFzLgqnvZRx3enKWOMqY2qEvSpgGcLva172UGqmqmqxe6nM4FBHutS3X9uAb4HBlSjXq9TVf45bzVLtmTy+IV9GNzBJiozxviXqgR9AhAnIh1FJAQYDxw2ekZEWns8PR9Y514eKSKh7sfNgRFAxZO4PjXzx63MTtjJzad3Ztygtr4uxxhjvO6oo25UtUxEbgEWAIHA66q6RkSmAImqOh+4TUTOB8qALGCie/cewHQRceH8UnmiktE6PrNw7R4e+886/tCnFZNHdfN1OcYYUyNEVX1dw2Hi4+M1MTGxxl9nTVouF7+yhC7RDZkzaRgNQmwOG2NM3SUiSe7zob9RL8cP7s0r4ro3E2nSIJiZV8VbyBtj/JrfTIFQVYUl5Vz3ViK5haV8cOMwohuH+bokY4ypUfUq6F0uZfIHK1iVmsuMK+Pp1aaJr0syxpgaV6+6bp5buJEvV+3m/nN6MKqnTVRmjKkf6k3Qf5SUwr//m8z4wbFcd0pHX5djjDEnTL0I+mVbs7jv45UM7xzFIxf0tonKjDH1it8H/fbM/dzwdiKxkeG8fPkgm6jMGFPv+HXq5RaW8uc3EnApvDZxME3CbaIyY0z947dBX1ru4pb3lrMjq4BXrhhEx+Y2UZkxpn7yy+GVqspD89fw46YMnrqoL8M6R/m6JGOM8Rm/bNHPWrSNd5fu4IbTOnFJfOzRdzDGGD/md0H/3fo9/N8Xazm7Z0vuHd3d1+UYY4zP+VXQr9+dx63v/UKP1o15fnx/AgJsGKUxxvhN0O/dV8S1byTSMCyI164eTHiIX55+MMaYY+Y3aRgaGEiP1o24/cyutGpiE5UZY8wBfhP0TcKDmXn1YF+XYYwxtY7fdN0YY4ypXJWCXkTGiMgGEUkWkfsqWT9RRNJFZIX75zqPdVeLyCb3z9XeLN4YY8zRHbXrRkQCgWnAKCAFSBCR+ZXc+3WOqt5SYd9mwINAPKBAknvfbK9Ub4wx5qiq0qIfAiSr6hZVLQFmA2OrePzRwEJVzXKH+0JgzPGVaowx5nhUJehjgJ0ez1PcyyoaJyIrReRDETlwOWqV9hWRSSKSKCKJ6enpVSzdGGNMVXjrZOxnQAdV7YvTan/zWHZW1RmqGq+q8S1atPBSScYYY6BqQZ8KeE4Y09a97CBVzVTVYvfTmcCgqu5rjDGmZlUl6BOAOBHpKCIhwHhgvucGItLa4+n5wDr34wXA2SISKSKRwNnuZcYYY06Qo466UdUyEbkFJ6ADgddVdY2ITAESVXU+cJuInA+UAVnARPe+WSLyCM4vC4Apqpr1e6+XlJSUISLbj/sdQXMgoxr7+xP7LA5nn8fh7PM4xB8+i/ZHWiGqeiILqXEikqiq8b6uozawz+Jw9nkczj6PQ/z9s7ArY40xxs9Z0BtjjJ/zx6Cf4esCahH7LA5nn8fh7PM4xK8/C7/rozfGGHM4f2zRG2OM8WBBb4wxfs5vgv5oUynXJyISKyL/FZG1IrJGRG73dU2+JiKBIvKLiHzu61p8TUSauuekWi8i60RkmK9r8iURudP9/2S1iLwvIn53izq/CHqPqZTPAXoCl4lIT99W5VNlwGRV7QmcBNxczz8PgNs5dMV2ffcC8JWqdgf6UY8/FxGJAW4D4lW1N85FoeN9W5X3+UXQU72plP2Oqu5S1eXux/tw/iNXNuNovSAibYFzceZhqtdEpAlwKvAagKqWqGqOT4vyvSCggYgEAeFAmo/r8Tp/CfqqTqVc74hIB2AAsNTHpfjS88A9gMvHddQGHYF0YJa7K2umiET4uihfUdVU4BlgB7ALyFXVr31blff5S9CbSohIQ+Aj4A5VzfN1Pb4gIn8E9qpqkq9rqSWCgIHAy6o6ANgP1NtzWu7JFsfi/AJsA0SIyBW+rcr7/CXobTrkCkQkGCfk31XVj31djw+NAM4XkW04XXpniMg7vi3Jp1KAFFU98A3vQ5zgr6/OAraqarqqlgIfA8N9XJPX+UvQH3Uq5fpERASnD3adqj7n63p8SVX/pqptVbUDzr+L71TV71psVaWqu4GdItLNvehMoOL9n+uTHcBJIhLu/n9zJn54cvqo0xTXBUeaStnHZfnSCOBKYJWIrHAvu19Vv/RdSaYWuRV4190o2gJc4+N6fEZVl4rIh8BynNFqv+CH0yHYFAjGGOPn/KXrxhhjzBFY0BtjjJ+zoDfGGD9nQW+MMX7Ogt4YY/ycBb0xxvg5C3pjjPFz/w9ydGymCKVt3gAAAABJRU5ErkJggg==\n", 925 | "text/plain": [ 926 | "
" 927 | ] 928 | }, 929 | "metadata": { 930 | "needs_background": "light" 931 | }, 932 | "output_type": "display_data" 933 | } 934 | ], 935 | "source": [ 936 | "plt.title('Accuracy: Naive Convnet')\n", 937 | "plt.plot(acc_epochs_list_naive)\n", 938 | "plt.plot(acc_epochs_list_resnet_v1)\n", 939 | "plt.plot(acc_epochs_list)" 940 | ] 941 | }, 942 | { 943 | "cell_type": "code", 944 | "execution_count": null, 945 | "id": "35a0fbf8", 946 | "metadata": {}, 947 | "outputs": [], 948 | "source": [] 949 | } 950 | ], 951 | "metadata": { 952 | "kernelspec": { 953 | "display_name": "Python 3 (ipykernel)", 954 | "language": "python", 955 | "name": "python3" 956 | }, 957 | "language_info": { 958 | "codemirror_mode": { 959 | "name": "ipython", 960 | "version": 3 961 | }, 962 | "file_extension": ".py", 963 | "mimetype": "text/x-python", 964 | "name": "python", 965 | "nbconvert_exporter": "python", 966 | "pygments_lexer": "ipython3", 967 | "version": "3.10.8" 968 | } 969 | }, 970 | "nbformat": 4, 971 | "nbformat_minor": 5 972 | } 973 | -------------------------------------------------------------------------------- /lesson_6/config.yml: -------------------------------------------------------------------------------- 1 | batch_size: 16 2 | train_dogs_path: './dataset/test_set/dogs/' 3 | train_cats_path: './dataset/test_set/cats/' 4 | test_dogs_path: './dataset/training_set/dogs/' 5 | test_cats_path: './dataset/training_set/cats/' 6 | 7 | network: 8 | in_nc: 3 9 | base_nc: 32 10 | out_nc: 2 -------------------------------------------------------------------------------- /lesson_7/arch.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | class ConvNet(nn.Module): 6 | def __init__(self): 7 | super().__init__() 8 | 9 | self.act = nn.LeakyReLU(0.2) 10 | self.maxpool = nn.MaxPool2d(2,2) 11 | self.conv0 = nn.Conv2d(3, 128, 3, stride=1, padding=0) 12 | self.conv1 = nn.Conv2d(128, 128, 3, stride=1, padding=0) 13 | self.conv2 = nn.Conv2d(128, 128, 3, stride=1, padding=0) 14 | self.conv3 = nn.Conv2d(128, 256, 3, stride=1, padding=0) 15 | 16 | self.adaptivepool = nn.AdaptiveAvgPool2d((1,1)) 17 | self.flatten = nn.Flatten() 18 | self.linear1 = nn.Linear(256, 20) 19 | self.linear2 = nn.Linear(20, 2) 20 | 21 | def forward(self, x): 22 | 23 | 24 | out = self.conv0(x) 25 | out = self.act(out) 26 | out = self.maxpool(out) 27 | 28 | out = self.conv1(out) 29 | out = self.act(out) 30 | out = self.maxpool(out) 31 | 32 | out = self.conv2(out) 33 | out = self.act(out) 34 | out = self.maxpool(out) 35 | 36 | out = self.conv3(out) 37 | out = self.act(out) 38 | 39 | out = self.adaptivepool(out) 40 | out = self.flatten(out) 41 | out = self.linear1(out) 42 | out = self.act(out) 43 | out = self.linear2(out) 44 | 45 | return out 46 | 47 | def count_parameters(model): 48 | return sum(p.numel() for p in model.parameters() if p.requires_grad) 49 | 50 | def get_network(option_network:dict): 51 | arch = option_network['arch'] 52 | 53 | if arch == 'convnet': 54 | network = ConvNet() 55 | 56 | elif arch == 'psevdoresnet': 57 | from archs.psevdo_resnet import PsevdoResNet 58 | network = PsevdoResNet(option_network) 59 | 60 | else: 61 | raise NotImplementedError(f'arch [{arch}] is not implemented') 62 | 63 | return network -------------------------------------------------------------------------------- /lesson_7/archs/__pycache__/psevdo_resnet.cpython-310.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/magorokhoov/youtube_pytorch_lessons/460597d84a3f654691fe7087087115006c10c3d9/lesson_7/archs/__pycache__/psevdo_resnet.cpython-310.pyc -------------------------------------------------------------------------------- /lesson_7/archs/psevdo_resnet.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | class ResBlock(nn.Module): 6 | def __init__(self, nc): 7 | super().__init__() 8 | 9 | self.conv0 = nn.Conv2d(nc, nc, kernel_size=3, padding=1) 10 | self.norm0 = nn.BatchNorm2d(nc) 11 | self.act = nn.LeakyReLU(0.2) 12 | self.conv1 = nn.Conv2d(nc, nc, kernel_size=3, padding=1) 13 | self.norm1 = nn.BatchNorm2d(nc) 14 | 15 | def forward(self, x): 16 | out = self.conv0(x) 17 | out = self.norm0(out) 18 | out = self.act(out) 19 | out = self.conv1(out) 20 | #out = self.norm1(out) 21 | 22 | return x + out # self.act(x + out) 23 | 24 | class BottleneckBlock(nn.Module): 25 | def __init__(self, nc): 26 | super().__init__() 27 | self.act = nn.LeakyReLU(0.2) 28 | 29 | self.conv0 = nn.Conv2d(nc, nc//4, kernel_size=1, padding=0) 30 | self.norm0 = nn.BatchNorm2d(nc//4) 31 | self.conv1 = nn.Conv2d(nc//4, nc//4, kernel_size=3, padding=1) 32 | self.norm1 = nn.BatchNorm2d(nc//4) 33 | self.conv2 = nn.Conv2d(nc//4, nc, kernel_size=1, padding=0) 34 | 35 | def forward(self, x): 36 | out = self.conv0(x) 37 | out = self.norm0(out) 38 | out = self.act(out) 39 | out = self.conv1(out) 40 | out = self.norm1(out) 41 | out = self.act(out) 42 | out = self.conv2(out) 43 | 44 | return x + out # self.act(x + out) 45 | 46 | class ResTruck(nn.Module): 47 | def __init__(self, nc, num_blocks, block_type='classic'): 48 | super().__init__() 49 | 50 | truck = [] 51 | for i in range(num_blocks): 52 | if block_type == 'classic': 53 | truck += [ResBlock(nc)] 54 | elif block_type == 'bottleneck': 55 | truck += [BottleneckBlock(nc)] 56 | else: 57 | raise NotImplementedError(f'{block_type} is not implemented') 58 | self.truck = nn.Sequential(*truck) 59 | 60 | def forward(self, x): 61 | return self.truck(x) 62 | 63 | 64 | class PsevdoResNet(nn.Module): 65 | def __init__(self, option_network:dict): 66 | super().__init__() 67 | 68 | in_nc = option_network['in_nc'] 69 | nc = option_network['base_nc'] 70 | out_nc = option_network['out_nc'] 71 | block_type = option_network['block_type'] 72 | 73 | self.conv0 = nn.Conv2d(in_nc, nc, kernel_size=7, stride=2) 74 | #self.norm 75 | self.act = nn.LeakyReLU(0.2, inplace=True) 76 | self.maxpool = nn.MaxPool2d(2,2) 77 | 78 | self.layer1 = ResTruck(nc, 3, block_type=block_type) 79 | self.conv1 = nn.Conv2d(nc, 2*nc, 3, padding=1, stride=2) 80 | self.layer2 = ResTruck(2*nc, 4, block_type=block_type) 81 | self.conv2 = nn.Conv2d(2*nc, 4*nc, 3, padding=1, stride=2) 82 | self.layer3 = ResTruck(4*nc, 6, block_type=block_type) 83 | self.conv3 = nn.Conv2d(4*nc, 4*nc, 3, padding=1, stride=2) 84 | self.layer4 = ResTruck(4*nc, 3, block_type=block_type) 85 | 86 | self.avgpool = nn.AdaptiveAvgPool2d((1,1)) 87 | self.flatten = nn.Flatten() 88 | self.linear = nn.Linear(4*nc, out_nc) 89 | 90 | def forward(self, x): 91 | out = self.conv0(x) 92 | out = self.act(out) 93 | out = self.maxpool(out) 94 | out = self.layer1(out) 95 | out = self.conv1(out) 96 | out = self.layer2(out) 97 | out = self.conv2(out) 98 | out = self.layer3(out) 99 | out = self.conv3(out) 100 | out = self.layer4(out) 101 | 102 | out = self.avgpool(out) 103 | out = self.flatten(out) 104 | out = self.linear(out) 105 | 106 | return out -------------------------------------------------------------------------------- /lesson_7/optimizers.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | def get_optimizer(params, option_optimizer:dict): 6 | name = option_optimizer['name'] 7 | 8 | if name == 'sgd': 9 | lr = float(option_optimizer['lr']) 10 | momentum = option_optimizer.get('momentum', 0.9) 11 | dampening = option_optimizer.get('dampening', 0.0) 12 | nesterov = option_optimizer.get('nesterov', False) 13 | weight_decay = option_optimizer.get('weight_decay', 0.0) 14 | 15 | optimizer = torch.optim.SGD( 16 | params=params, 17 | lr=lr, 18 | momentum=momentum, 19 | dampening=dampening, 20 | nesterov=nesterov, 21 | weight_decay=weight_decay 22 | ) 23 | 24 | elif name in ('adam', 'adamw'): 25 | lr = float(option_optimizer['lr']) 26 | beta1 = option_optimizer.get('beta1', 0.9) 27 | beta2 = option_optimizer.get('beta2', 0.999) 28 | eps = option_optimizer.get('eps', 1e-8) 29 | weight_decay = option_optimizer.get('weight_decay', 0.0) 30 | 31 | if name == 'adam': 32 | optimizer = torch.optim.Adam( 33 | params=params, 34 | lr=lr, 35 | betas=(beta1,beta2), 36 | eps=eps, 37 | weight_decay=weight_decay 38 | ) 39 | elif name == 'adamw': 40 | optimizer = torch.optim.AdamW( 41 | params=params, 42 | lr=lr, 43 | betas=(beta1,beta2), 44 | eps=eps, 45 | weight_decay=weight_decay 46 | ) 47 | 48 | else: 49 | raise NotImplementedError(f'optimizer [{name}] is not implemented') 50 | 51 | return optimizer -------------------------------------------------------------------------------- /lesson_7/options/config.yml: -------------------------------------------------------------------------------- 1 | #dataset: 2 | batch_size: 16 3 | train_dogs_path: './dataset/test_set/dogs/' 4 | train_cats_path: './dataset/test_set/cats/' 5 | test_dogs_path: './dataset/training_set/dogs/' 6 | test_cats_path: './dataset/training_set/cats/' 7 | 8 | optimizer: 9 | name: adam 10 | lr: 1e-3 11 | #momentum: 0.5 12 | 13 | network: 14 | arch: psevdoresnet 15 | 16 | in_nc: 3 17 | base_nc: 32 18 | out_nc: 2 19 | 20 | block_type: classic -------------------------------------------------------------------------------- /lesson_7/options/config2.yml: -------------------------------------------------------------------------------- 1 | #dataset: 2 | batch_size: 64 3 | train_dogs_path: './dataset/test_set/dogs/' 4 | train_cats_path: './dataset/test_set/cats/' 5 | test_dogs_path: './dataset/training_set/dogs/' 6 | test_cats_path: './dataset/training_set/cats/' 7 | 8 | optimizer: 9 | name: adam 10 | lr: 1e-3 11 | #momentum: 0.5 12 | 13 | network: 14 | arch: psevdoresnet 15 | 16 | in_nc: 3 17 | base_nc: 32 18 | out_nc: 2 19 | 20 | block_type: classic -------------------------------------------------------------------------------- /lesson_7/sh/start.sh: -------------------------------------------------------------------------------- 1 | python train.py -option options/config.yml 2 | python train.py -option options/config2.yml -------------------------------------------------------------------------------- /lesson_7/train.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | import torchvision as tv 6 | 7 | import argparse 8 | import os 9 | import cv2 10 | import numpy as np 11 | import yaml 12 | import matplotlib.pyplot as plt 13 | from tqdm.autonotebook import tqdm 14 | 15 | from torch.cuda.amp import autocast, GradScaler 16 | 17 | import arch 18 | import optimizers 19 | 20 | class Dataset2class(torch.utils.data.Dataset): 21 | def __init__(self, path_dir1:str, path_dir2:str): 22 | super().__init__() 23 | 24 | self.path_dir1 = path_dir1 25 | self.path_dir2 = path_dir2 26 | 27 | self.dir1_list = sorted(os.listdir(path_dir1)) 28 | self.dir2_list = sorted(os.listdir(path_dir2)) 29 | 30 | def __len__(self): 31 | return len(self.dir1_list) + len(self.dir2_list) 32 | 33 | def __getitem__(self, idx): 34 | 35 | if idx < len(self.dir1_list): 36 | class_id = 0 37 | img_path = os.path.join(self.path_dir1, self.dir1_list[idx]) 38 | else: 39 | class_id = 1 40 | idx -= len(self.dir1_list) 41 | img_path = os.path.join(self.path_dir2, self.dir2_list[idx]) 42 | 43 | img = cv2.imread(img_path, cv2.IMREAD_COLOR) 44 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 45 | img = img.astype(np.float32) 46 | img = img/255.0 47 | 48 | img = cv2.resize(img, (128, 128), interpolation=cv2.INTER_AREA) 49 | img = img.transpose((2, 0, 1)) 50 | 51 | t_img = torch.from_numpy(img) 52 | t_class_id = torch.tensor(class_id) 53 | 54 | return {'img': t_img, 'label': t_class_id} 55 | 56 | ### PARSING ### 57 | parser = argparse.ArgumentParser() 58 | parser.add_argument('-option', type=str, required=True, help='Path to options file.') 59 | args = parser.parse_args() 60 | print(args.option) 61 | option_path = args.option 62 | 63 | with open(option_path, 'r') as file_option: 64 | option = yaml.safe_load(file_option) 65 | ### END OF PARSING ### 66 | 67 | train_dogs_path = option['train_dogs_path'] # './dataset/training_set/dogs/' 68 | train_cats_path = option['train_cats_path'] # './dataset/training_set/cats/' 69 | test_dogs_path = option['test_dogs_path'] # './dataset/test_set/dogs/' 70 | test_cats_path = option['test_cats_path'] # './dataset/test_set/cats/' 71 | 72 | train_ds_catsdogs = Dataset2class(train_dogs_path, train_cats_path) 73 | test_ds_catsdogs = Dataset2class(test_dogs_path, test_cats_path) 74 | 75 | batch_size = option['batch_size'] 76 | 77 | train_loader = torch.utils.data.DataLoader( 78 | train_ds_catsdogs, shuffle=True, 79 | batch_size=batch_size, num_workers=1, drop_last=True 80 | ) 81 | test_loader = torch.utils.data.DataLoader( 82 | train_ds_catsdogs, shuffle=True, 83 | batch_size=batch_size, num_workers=1, drop_last=False 84 | ) 85 | 86 | 87 | 88 | def accuracy(pred, label): 89 | answer = F.softmax(pred.detach()).numpy().argmax(1) == label.numpy().argmax(1) 90 | return answer.mean() 91 | 92 | 93 | option_network = option['network'] 94 | model = arch.get_network(option_network) 95 | 96 | loss_fn = nn.CrossEntropyLoss() 97 | option_optimizer = option['optimizer'] 98 | optimizer = optimizers.get_optimizer(model.parameters(), option_optimizer) 99 | scheduler = torch.optim.lr_scheduler.ExponentialLR( 100 | optimizer, 101 | gamma = 0.6 102 | ) 103 | 104 | device = 'cuda' # if torch.cuda.is_available() else 'cpu' 105 | model = model.to(device) 106 | loss_fn = loss_fn.to(device) 107 | 108 | use_amp = True 109 | scaler = torch.cuda.amp.GradScaler() 110 | 111 | torch.backends.cudnn.benchmark = False 112 | torch.backends.cudnn.deterministic = False 113 | 114 | epochs = 10 115 | loss_epochs_list = [] 116 | acc_epochs_list = [] 117 | for epoch in range(epochs): 118 | loss_val = 0 119 | acc_val = 0 120 | for sample in (pbar := tqdm(train_loader)): 121 | img, label = sample['img'], sample['label'] 122 | label = F.one_hot(label, 2).float() 123 | img = img.to(device) 124 | label = label.to(device) 125 | optimizer.zero_grad() 126 | 127 | with autocast(use_amp): 128 | pred = model(img) 129 | loss = loss_fn(pred, label) 130 | 131 | scaler.scale(loss).backward() 132 | loss_item = loss.item() 133 | loss_val += loss_item 134 | 135 | scaler.step(optimizer) 136 | scaler.update() 137 | 138 | acc_current = accuracy(pred.cpu().float(), label.cpu().float()) 139 | acc_val += acc_current 140 | 141 | pbar.set_description(f'loss: {loss_item:.5f}\taccuracy: {acc_current:.3f}') 142 | scheduler.step() 143 | loss_epochs_list += [loss_val/len(train_loader)] 144 | acc_epochs_list += [acc_val/len(train_loader)] 145 | print(loss_epochs_list[-1]) 146 | print(acc_epochs_list[-1]) -------------------------------------------------------------------------------- /lesson_8/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/magorokhoov/youtube_pytorch_lessons/460597d84a3f654691fe7087087115006c10c3d9/lesson_8/1.png -------------------------------------------------------------------------------- /lesson_8/Dogs and cats.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | # In[50]: 5 | 6 | 7 | import torch 8 | import torch.nn as nn 9 | import torch.nn.functional as F 10 | 11 | import torchvision as tv 12 | 13 | import os 14 | import cv2 15 | import numpy as np 16 | import yaml 17 | import matplotlib.pyplot as plt 18 | from tqdm.autonotebook import tqdm 19 | 20 | from torch.cuda.amp import autocast, GradScaler 21 | 22 | 23 | # In[76]: 24 | 25 | 26 | option_path = 'config.yml' 27 | with open(option_path, 'r') as file_option: 28 | option = yaml.safe_load(file_option) 29 | 30 | 31 | # In[77]: 32 | 33 | 34 | option 35 | 36 | 37 | # In[78]: 38 | 39 | 40 | class Dataset2class(torch.utils.data.Dataset): 41 | def __init__(self, path_dir1:str, path_dir2:str): 42 | super().__init__() 43 | 44 | self.path_dir1 = path_dir1 45 | self.path_dir2 = path_dir2 46 | 47 | self.dir1_list = sorted(os.listdir(path_dir1)) 48 | self.dir2_list = sorted(os.listdir(path_dir2)) 49 | 50 | def __len__(self): 51 | return len(self.dir1_list) + len(self.dir2_list) 52 | 53 | def __getitem__(self, idx): 54 | 55 | if idx < len(self.dir1_list): 56 | class_id = 0 57 | img_path = os.path.join(self.path_dir1, self.dir1_list[idx]) 58 | else: 59 | class_id = 1 60 | idx -= len(self.dir1_list) 61 | img_path = os.path.join(self.path_dir2, self.dir2_list[idx]) 62 | 63 | img = cv2.imread(img_path, cv2.IMREAD_COLOR) 64 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 65 | img = img.astype(np.float32) 66 | img = img/255.0 67 | 68 | img = cv2.resize(img, (128, 128), interpolation=cv2.INTER_AREA) 69 | img = img.transpose((2, 0, 1)) 70 | 71 | t_img = torch.from_numpy(img) 72 | t_class_id = torch.tensor(class_id) 73 | 74 | return {'img': t_img, 'label': t_class_id} 75 | 76 | 77 | 78 | # In[79]: 79 | 80 | 81 | train_dogs_path = option['train_dogs_path'] # './dataset/training_set/dogs/' 82 | train_cats_path = option['train_cats_path'] # './dataset/training_set/cats/' 83 | test_dogs_path = option['test_dogs_path'] # './dataset/test_set/dogs/' 84 | test_cats_path = option['test_cats_path'] # './dataset/test_set/cats/' 85 | 86 | train_ds_catsdogs = Dataset2class(train_dogs_path, train_cats_path) 87 | test_ds_catsdogs = Dataset2class(test_dogs_path, test_cats_path) 88 | 89 | 90 | # In[80]: 91 | 92 | 93 | len(train_ds_catsdogs) 94 | 95 | 96 | # In[81]: 97 | 98 | 99 | len(test_ds_catsdogs) 100 | 101 | 102 | # In[82]: 103 | 104 | 105 | batch_size = option['batch_size'] 106 | 107 | train_loader = torch.utils.data.DataLoader( 108 | train_ds_catsdogs, shuffle=True, 109 | batch_size=batch_size, num_workers=1, drop_last=True 110 | ) 111 | test_loader = torch.utils.data.DataLoader( 112 | train_ds_catsdogs, shuffle=True, 113 | batch_size=batch_size, num_workers=1, drop_last=False 114 | ) 115 | 116 | 117 | # In[83]: 118 | 119 | 120 | class ConvNet(nn.Module): 121 | def __init__(self): 122 | super().__init__() 123 | 124 | self.act = nn.LeakyReLU(0.2) 125 | self.maxpool = nn.MaxPool2d(2,2) 126 | self.conv0 = nn.Conv2d(3, 128, 3, stride=1, padding=0) 127 | self.conv1 = nn.Conv2d(128, 128, 3, stride=1, padding=0) 128 | self.conv2 = nn.Conv2d(128, 128, 3, stride=1, padding=0) 129 | self.conv3 = nn.Conv2d(128, 256, 3, stride=1, padding=0) 130 | 131 | self.adaptivepool = nn.AdaptiveAvgPool2d((1,1)) 132 | self.flatten = nn.Flatten() 133 | self.linear1 = nn.Linear(256, 20) 134 | self.linear2 = nn.Linear(20, 2) 135 | 136 | def forward(self, x): 137 | 138 | 139 | out = self.conv0(x) 140 | out = self.act(out) 141 | out = self.maxpool(out) 142 | 143 | out = self.conv1(out) 144 | out = self.act(out) 145 | out = self.maxpool(out) 146 | 147 | out = self.conv2(out) 148 | out = self.act(out) 149 | out = self.maxpool(out) 150 | 151 | out = self.conv3(out) 152 | out = self.act(out) 153 | 154 | out = self.adaptivepool(out) 155 | out = self.flatten(out) 156 | out = self.linear1(out) 157 | out = self.act(out) 158 | out = self.linear2(out) 159 | 160 | return out 161 | 162 | 163 | 164 | # In[ ]: 165 | 166 | 167 | 168 | 169 | 170 | # In[84]: 171 | 172 | 173 | class ResBlock(nn.Module): 174 | def __init__(self, nc): 175 | super().__init__() 176 | 177 | self.conv0 = nn.Conv2d(nc, nc, kernel_size=3, padding=1) 178 | self.norm0 = nn.BatchNorm2d(nc) 179 | self.act = nn.LeakyReLU(0.2) 180 | self.conv1 = nn.Conv2d(nc, nc, kernel_size=3, padding=1) 181 | self.norm1 = nn.BatchNorm2d(nc) 182 | 183 | def forward(self, x): 184 | out = self.conv0(x) 185 | out = self.norm0(out) 186 | out = self.act(out) 187 | out = self.conv1(out) 188 | #out = self.norm1(out) 189 | 190 | return x + out # self.act(x + out) 191 | 192 | 193 | # In[85]: 194 | 195 | 196 | class BottleneckBlock(nn.Module): 197 | def __init__(self, nc): 198 | super().__init__() 199 | self.act = nn.LeakyReLU(0.2) 200 | 201 | self.conv0 = nn.Conv2d(nc, nc//4, kernel_size=1, padding=0) 202 | self.norm0 = nn.BatchNorm2d(nc//4) 203 | self.conv1 = nn.Conv2d(nc//4, nc//4, kernel_size=3, padding=1) 204 | self.norm1 = nn.BatchNorm2d(nc//4) 205 | self.conv2 = nn.Conv2d(nc//4, nc, kernel_size=1, padding=0) 206 | 207 | def forward(self, x): 208 | out = self.conv0(x) 209 | out = self.norm0(out) 210 | out = self.act(out) 211 | out = self.conv1(out) 212 | out = self.norm1(out) 213 | out = self.act(out) 214 | out = self.conv2(out) 215 | 216 | return x + out # self.act(x + out) 217 | 218 | 219 | # In[86]: 220 | 221 | 222 | class ResTruck(nn.Module): 223 | def __init__(self, nc, num_blocks, block_type='classic'): 224 | super().__init__() 225 | 226 | truck = [] 227 | for i in range(num_blocks): 228 | if block_type == 'classic': 229 | truck += [ResBlock(nc)] 230 | elif block_type == 'bottleneck': 231 | truck += [BottleneckBlock(nc)] 232 | else: 233 | raise NotImplementedError(f'{block_type} is not implemented') 234 | self.truck = nn.Sequential(*truck) 235 | 236 | def forward(self, x): 237 | return self.truck(x) 238 | 239 | 240 | # In[87]: 241 | 242 | 243 | class PsevoResNet(nn.Module): 244 | def __init__(self, in_nc, nc, out_nc, block_type): 245 | super().__init__() 246 | 247 | self.conv0 = nn.Conv2d(in_nc, nc, kernel_size=7, stride=2) 248 | #self.norm 249 | self.act = nn.LeakyReLU(0.2, inplace=True) 250 | self.maxpool = nn.MaxPool2d(2,2) 251 | 252 | self.layer1 = ResTruck(nc, 3, block_type=block_type) 253 | self.conv1 = nn.Conv2d(nc, 2*nc, 3, padding=1, stride=2) 254 | self.layer2 = ResTruck(2*nc, 4, block_type=block_type) 255 | self.conv2 = nn.Conv2d(2*nc, 4*nc, 3, padding=1, stride=2) 256 | self.layer3 = ResTruck(4*nc, 6, block_type=block_type) 257 | self.conv3 = nn.Conv2d(4*nc, 4*nc, 3, padding=1, stride=2) 258 | self.layer4 = ResTruck(4*nc, 3, block_type=block_type) 259 | 260 | self.avgpool = nn.AdaptiveAvgPool2d((1,1)) 261 | self.flatten = nn.Flatten() 262 | self.linear = nn.Linear(4*nc, out_nc) 263 | 264 | def forward(self, x): 265 | out = self.conv0(x) 266 | out = self.act(out) 267 | out = self.maxpool(out) 268 | out = self.layer1(out) 269 | out = self.conv1(out) 270 | out = self.layer2(out) 271 | out = self.conv2(out) 272 | out = self.layer3(out) 273 | out = self.conv3(out) 274 | out = self.layer4(out) 275 | 276 | out = self.avgpool(out) 277 | out = self.flatten(out) 278 | out = self.linear(out) 279 | 280 | return out 281 | 282 | 283 | # In[ ]: 284 | 285 | 286 | 287 | 288 | 289 | # In[88]: 290 | 291 | 292 | def count_parameters(model): 293 | return sum(p.numel() for p in model.parameters() if p.requires_grad) 294 | 295 | 296 | # In[89]: 297 | 298 | 299 | in_nc = option['network']['in_nc'] 300 | base_nc = option['network']['base_nc'] 301 | out_nc = option['network']['out_nc'] 302 | model = PsevoResNet(in_nc, base_nc, out_nc, block_type='classic') 303 | 304 | 305 | # In[91]: 306 | 307 | 308 | model 309 | 310 | 311 | # In[92]: 312 | 313 | 314 | count_parameters(model) 315 | 316 | 317 | # In[93]: 318 | 319 | 320 | for sample in train_loader: 321 | img = sample['img'] 322 | label = sample['label'] 323 | model(img) 324 | break 325 | 326 | 327 | # In[ ]: 328 | 329 | 330 | 331 | 332 | 333 | # In[94]: 334 | 335 | 336 | loss_fn = nn.CrossEntropyLoss() 337 | optimizer = torch.optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999)) 338 | scheduler = torch.optim.lr_scheduler.ExponentialLR( 339 | optimizer, 340 | gamma = 0.6 341 | ) 342 | 343 | 344 | # In[95]: 345 | 346 | 347 | def accuracy(pred, label): 348 | answer = F.softmax(pred.detach()).numpy().argmax(1) == label.numpy().argmax(1) 349 | return answer.mean() 350 | 351 | 352 | # In[96]: 353 | 354 | 355 | device = 'cuda' # if torch.cuda.is_available() else 'cpu' 356 | model = model.to(device) 357 | loss_fn = loss_fn.to(device) 358 | 359 | 360 | # In[97]: 361 | 362 | 363 | use_amp = True 364 | scaler = torch.cuda.amp.GradScaler() 365 | 366 | 367 | # In[98]: 368 | 369 | 370 | torch.backends.cudnn.benchmark = True 371 | torch.backends.cudnn.deterministic = False 372 | 373 | 374 | # In[99]: 375 | 376 | 377 | epochs = 10 378 | loss_epochs_list = [] 379 | acc_epochs_list = [] 380 | for epoch in range(epochs): 381 | loss_val = 0 382 | acc_val = 0 383 | for sample in (pbar := tqdm(train_loader)): 384 | img, label = sample['img'], sample['label'] 385 | label = F.one_hot(label, 2).float() 386 | img = img.to(device) 387 | label = label.to(device) 388 | optimizer.zero_grad() 389 | 390 | with autocast(use_amp): 391 | pred = model(img) 392 | loss = loss_fn(pred, label) 393 | 394 | scaler.scale(loss).backward() 395 | loss_item = loss.item() 396 | loss_val += loss_item 397 | 398 | scaler.step(optimizer) 399 | scaler.update() 400 | 401 | acc_current = accuracy(pred.cpu().float(), label.cpu().float()) 402 | acc_val += acc_current 403 | 404 | pbar.set_description(f'loss: {loss_item:.5f}\taccuracy: {acc_current:.3f}') 405 | scheduler.step() 406 | loss_epochs_list += [loss_val/len(train_loader)] 407 | acc_epochs_list += [acc_val/len(train_loader)] 408 | print(loss_epochs_list[-1]) 409 | print(acc_epochs_list[-1]) 410 | 411 | 412 | # In[273]: 413 | 414 | 415 | plt.title('Loss: Naive Convnet') 416 | plt.plot(loss_epochs_list_naive) 417 | plt.plot(loss_epochs_list_resnet_v1) 418 | plt.plot(loss_epochs_list) 419 | 420 | 421 | # In[274]: 422 | 423 | 424 | plt.title('Accuracy: Naive Convnet') 425 | plt.plot(acc_epochs_list_naive) 426 | plt.plot(acc_epochs_list_resnet_v1) 427 | plt.plot(acc_epochs_list) 428 | 429 | 430 | # In[3]: 431 | 432 | 433 | 434 | 435 | 436 | # In[6]: 437 | 438 | 439 | 440 | 441 | 442 | # In[49]: 443 | 444 | 445 | yaml. 446 | 447 | 448 | # In[ ]: 449 | 450 | 451 | 452 | 453 | -------------------------------------------------------------------------------- /lesson_9/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/magorokhoov/youtube_pytorch_lessons/460597d84a3f654691fe7087087115006c10c3d9/lesson_9/1.png -------------------------------------------------------------------------------- /lesson_9/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/magorokhoov/youtube_pytorch_lessons/460597d84a3f654691fe7087087115006c10c3d9/lesson_9/2.png -------------------------------------------------------------------------------- /lesson_9/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/magorokhoov/youtube_pytorch_lessons/460597d84a3f654691fe7087087115006c10c3d9/lesson_9/3.png -------------------------------------------------------------------------------- /lesson_9/4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/magorokhoov/youtube_pytorch_lessons/460597d84a3f654691fe7087087115006c10c3d9/lesson_9/4.png --------------------------------------------------------------------------------