├── AlexNet.ipynb ├── DenseNet.ipynb ├── InceptionV1.ipynb ├── LeNet.ipynb ├── NiN.ipynb ├── README.md ├── ResNet.ipynb ├── VGGNet.ipynb ├── ddp_example.py ├── freeze_unfreeze_network.ipynb ├── mmdetection_create_model.ipynb ├── modify_network.ipynb ├── resnet_weights.pth └── weights_init_copy.ipynb /InceptionV1.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": { 7 | "colab": {}, 8 | "colab_type": "code", 9 | "id": "ikzIVWRna_Cq" 10 | }, 11 | "outputs": [], 12 | "source": [ 13 | "import torch\n", 14 | "import torch.nn as nn\n", 15 | "import torch.nn.functional as F\n", 16 | "import torch.optim as optim\n", 17 | "from torch.utils.data import DataLoader\n", 18 | "from torch.autograd import Variable\n", 19 | "from torchvision import datasets, transforms\n", 20 | "import matplotlib.pyplot as plt" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": null, 26 | "metadata": { 27 | "colab": {}, 28 | "colab_type": "code", 29 | "id": "_ZL2MBjU2T1V" 30 | }, 31 | "outputs": [], 32 | "source": [ 33 | "CUDA = torch.cuda.is_available()\n", 34 | "device = torch.device(\"cuda\" if CUDA else \"cpu\")\n", 35 | "print(device)" 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": null, 41 | "metadata": { 42 | "colab": {}, 43 | "colab_type": "code", 44 | "id": "DH5Eniyh2eU0" 45 | }, 46 | "outputs": [], 47 | "source": [ 48 | "class inceptionv1_block(nn.Module):\n", 49 | " def __init__(self, in_channels, out_channels1, out_channels2_step1, out_channels2_step2, out_channels3_step1, out_channels3_step2, out_channels4):\n", 50 | " super(inceptionv1_block, self).__init__()\n", 51 | " self.branch1_conv = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=out_channels1, kernel_size=1),\n", 52 | " nn.ReLU(inplace=True))\n", 53 | " \n", 54 | " self.branch2_conv1 = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=out_channels2_step1, kernel_size=1),\n", 55 | " nn.ReLU(inplace=True))\n", 56 | " self.branch2_conv2 = nn.Sequential(nn.Conv2d(in_channels=out_channels2_step1, out_channels=out_channels2_step2, kernel_size=3, padding=1),\n", 57 | " nn.ReLU(inplace=True))\n", 58 | " \n", 59 | " self.branch3_conv1 = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=out_channels3_step1, kernel_size=1),\n", 60 | " nn.ReLU(inplace=True))\n", 61 | " self.branch3_conv2 = nn.Sequential(nn.Conv2d(in_channels=out_channels3_step1, out_channels=out_channels3_step2, kernel_size=5, padding=2),\n", 62 | " nn.ReLU(inplace=True))\n", 63 | " \n", 64 | " self.branch4_maxpooling = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)\n", 65 | " self.branch4_conv1 = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=out_channels4, kernel_size=1),\n", 66 | " nn.ReLU(inplace=True))\n", 67 | " \n", 68 | " def forward(self, x):\n", 69 | " out1 = self.branch1_conv(x)\n", 70 | " out2 = self.branch2_conv2(self.branch2_conv1(x))\n", 71 | " out3 = self.branch3_conv2(self.branch3_conv1(x))\n", 72 | " out4 = self.branch4_conv1(self.branch4_maxpooling(x))\n", 73 | " out = torch.cat([out1, out2, out3, out4], dim=1)\n", 74 | "\n", 75 | " return out" 76 | ] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": null, 81 | "metadata": { 82 | "colab": {}, 83 | "colab_type": "code", 84 | "id": "HPJCGTmA2eZT" 85 | }, 86 | "outputs": [], 87 | "source": [ 88 | "class auxiliary_classifiers(nn.Module):\n", 89 | " def __init__(self, in_channels, out_channels):\n", 90 | " super(auxiliary_classifiers, self).__init__()\n", 91 | " self.avgpooling = nn.AvgPool2d(kernel_size=5, stride=3)\n", 92 | " \n", 93 | " self.conv = nn.Conv2d(in_channels=in_channels, out_channels=128, kernel_size=1)\n", 94 | " \n", 95 | " self.fc1 = nn.Linear(in_features=128*4*4, out_features=1024)\n", 96 | "\n", 97 | " self.fc2 = nn.Linear(in_features=1024, out_features=out_channels)\n", 98 | " \n", 99 | " def forward(self, x):\n", 100 | " x = self.avgpooling(x)\n", 101 | " x = F.relu(self.conv(x))\n", 102 | " x = torch.flatten(x, start_dim=1)\n", 103 | " x = F.relu(self.fc1(x))\n", 104 | " x = F.dropout(x, p=0.5)\n", 105 | " x = self.fc2(x)\n", 106 | "\n", 107 | " return x" 108 | ] 109 | }, 110 | { 111 | "cell_type": "code", 112 | "execution_count": null, 113 | "metadata": { 114 | "colab": {}, 115 | "colab_type": "code", 116 | "id": "Kx6yCzU6paEn" 117 | }, 118 | "outputs": [], 119 | "source": [ 120 | "class InceptionV1(nn.Module):\n", 121 | " def __init__(self, num_classes, training=True):\n", 122 | " super(InceptionV1, self).__init__()\n", 123 | " self.training = training\n", 124 | " self.conv = nn.Sequential(nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=3),\n", 125 | " nn.ReLU(inplace=True),\n", 126 | " nn.MaxPool2d(kernel_size=3, stride=2, padding=1),\n", 127 | " nn.Conv2d(in_channels=64, out_channels=64, kernel_size=1, stride=1),\n", 128 | " nn.ReLU(inplace=True),\n", 129 | " nn.Conv2d(in_channels=64, out_channels=192, kernel_size=3, stride=1, padding=1),\n", 130 | " nn.ReLU(inplace=True),\n", 131 | " nn.MaxPool2d(kernel_size=3, stride=2, padding=1))\n", 132 | " \n", 133 | " self.inception1 = inceptionv1_block(in_channels=192, out_channels1=64, out_channels2_step1=96, out_channels2_step2=128, out_channels3_step1=16, out_channels3_step2=32, out_channels4=32)\n", 134 | " self.inception2 = inceptionv1_block(in_channels=256, out_channels1=128, out_channels2_step1=128, out_channels2_step2=192, out_channels3_step1=32, out_channels3_step2=96, out_channels4=64)\n", 135 | " self.maxpooling1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n", 136 | " self.inception3 = inceptionv1_block(in_channels=480, out_channels1=192, out_channels2_step1=96, out_channels2_step2=208, out_channels3_step1=16, out_channels3_step2=48, out_channels4=64)\n", 137 | "\n", 138 | " if self.training == True:\n", 139 | " self.auxiliary1 = auxiliary_classifiers(in_channels=512,out_channels=num_classes)\n", 140 | "\n", 141 | " self.inception4 = inceptionv1_block(in_channels=512 ,out_channels1=160, out_channels2_step1=112, out_channels2_step2=224, out_channels3_step1=24, out_channels3_step2=64, out_channels4=64)\n", 142 | " self.inception5 = inceptionv1_block(in_channels=512, out_channels1=128, out_channels2_step1=128, out_channels2_step2=256, out_channels3_step1=24, out_channels3_step2=64, out_channels4=64)\n", 143 | " self.inception6 = inceptionv1_block(in_channels=512, out_channels1=112, out_channels2_step1=144, out_channels2_step2=288, out_channels3_step1=32, out_channels3_step2=64, out_channels4=64)\n", 144 | "\n", 145 | " if self.training == True:\n", 146 | " self.auxiliary2 = auxiliary_classifiers(in_channels=528,out_channels=num_classes)\n", 147 | "\n", 148 | " self.inception7 = inceptionv1_block(in_channels=528, out_channels1=256, out_channels2_step1=160, out_channels2_step2=320, out_channels3_step1=32, out_channels3_step2=128, out_channels4=128)\n", 149 | " self.maxpooling2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n", 150 | " self.inception8 = inceptionv1_block(in_channels=832, out_channels1=256, out_channels2_step1=160, out_channels2_step2=320, out_channels3_step1=32, out_channels3_step2=128, out_channels4=128)\n", 151 | " self.inception9 = inceptionv1_block(in_channels=832, out_channels1=384, out_channels2_step1=192, out_channels2_step2=384, out_channels3_step1=48, out_channels3_step2=128, out_channels4=128)\n", 152 | "\n", 153 | " self.avgpooling = nn.AvgPool2d(kernel_size=7,stride=1)\n", 154 | " self.dropout = nn.Dropout(p=0.4)\n", 155 | " self.fc = nn.Linear(in_features=1024,out_features=num_classes)\n", 156 | "\n", 157 | " def forward(self, x):\n", 158 | " x = self.conv(x)\n", 159 | " x = self.inception1(x)\n", 160 | " x = self.inception2(x)\n", 161 | " x = self.maxpooling1(x)\n", 162 | " x = self.inception3(x)\n", 163 | " aux1 = self.auxiliary1(x)\n", 164 | " x = self.inception4(x)\n", 165 | " x = self.inception5(x)\n", 166 | " x = self.inception6(x)\n", 167 | " aux2 = self.auxiliary2(x)\n", 168 | " x = self.inception7(x)\n", 169 | " x = self.maxpooling2(x)\n", 170 | " x = self.inception8(x)\n", 171 | " x = self.inception9(x)\n", 172 | " x = self.avgpooling(x)\n", 173 | " x = self.dropout(x)\n", 174 | " x = torch.flatten(x, start_dim=1)\n", 175 | " out = self.fc(x)\n", 176 | "\n", 177 | " if self.training == True:\n", 178 | " return aux1, aux2, out\n", 179 | "\n", 180 | " else:\n", 181 | " return out\n" 182 | ] 183 | }, 184 | { 185 | "cell_type": "code", 186 | "execution_count": null, 187 | "metadata": { 188 | "colab": {}, 189 | "colab_type": "code", 190 | "id": "cvVoVaeWpaMW" 191 | }, 192 | "outputs": [], 193 | "source": [ 194 | "# Parameters\n", 195 | "batch_size = 128\n", 196 | "num_epochs = 5\n", 197 | "lr = 0.001\n", 198 | "\n", 199 | "num_classes=2\n", 200 | "model = InceptionV1(2, training=True)\n", 201 | "\n", 202 | "if CUDA:\n", 203 | " model = model.cuda()\n", 204 | "\n", 205 | "optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n", 206 | "criterion = nn.CrossEntropyLoss()" 207 | ] 208 | }, 209 | { 210 | "cell_type": "code", 211 | "execution_count": null, 212 | "metadata": { 213 | "colab": {}, 214 | "colab_type": "code", 215 | "id": "uW2ixXvxpaPG" 216 | }, 217 | "outputs": [], 218 | "source": [ 219 | "import json\n", 220 | "import zipfile\n", 221 | "import os\n", 222 | "\n", 223 | "# kaggle api\n", 224 | "api_token = {\"username\":\"aaa\",\"key\":\"kkk\"}\n", 225 | " \n", 226 | "if not os.path.exists(\"/root/.kaggle\"):\n", 227 | " os.makedirs(\"/root/.kaggle\")\n", 228 | "\n", 229 | "with open('/root/.kaggle/kaggle.json', 'w') as file:\n", 230 | " json.dump(api_token, file)\n", 231 | "!chmod 600 /root/.kaggle/kaggle.json\n", 232 | " \n", 233 | "if not os.path.exists(\"/kaggle\"):\n", 234 | " os.makedirs(\"/kaggle\")\n", 235 | "os.chdir('/kaggle')\n", 236 | "!kaggle datasets download -d chetankv/dogs-cats-images --force\n", 237 | " \n", 238 | "!ls /kaggle" 239 | ] 240 | }, 241 | { 242 | "cell_type": "code", 243 | "execution_count": null, 244 | "metadata": { 245 | "colab": {}, 246 | "colab_type": "code", 247 | "id": "fLBxoQ413vu8" 248 | }, 249 | "outputs": [], 250 | "source": [ 251 | "!unzip dogs-cats-images.zip" 252 | ] 253 | }, 254 | { 255 | "cell_type": "code", 256 | "execution_count": null, 257 | "metadata": { 258 | "colab": {}, 259 | "colab_type": "code", 260 | "id": "wly0Gj1f3vxT" 261 | }, 262 | "outputs": [], 263 | "source": [ 264 | "# Transform\n", 265 | "transform = transforms.Compose(\n", 266 | " [transforms.Resize(size=(227,227)),\n", 267 | " transforms.CenterCrop(224),\n", 268 | " transforms.RandomRotation(20),\n", 269 | " transforms.RandomHorizontalFlip(),\n", 270 | " transforms.ToTensor(),\n", 271 | " transforms.Normalize((0.5,), (0.5,)),]\n", 272 | " )\n", 273 | "\n", 274 | "# Data\n", 275 | "train_dataset = datasets.ImageFolder(root='/kaggle/dataset/training_set', transform=transform)\n", 276 | "valid_dataset = datasets.ImageFolder(root='/kaggle/dataset/test_set', transform=transform)\n", 277 | "train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\n", 278 | "valid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=False)" 279 | ] 280 | }, 281 | { 282 | "cell_type": "code", 283 | "execution_count": null, 284 | "metadata": { 285 | "colab": {}, 286 | "colab_type": "code", 287 | "id": "qfLflX5f3vzy" 288 | }, 289 | "outputs": [], 290 | "source": [ 291 | "def train(train_loader, model, criterion, optimizer, epoch):\n", 292 | " model.train()\n", 293 | " total_train = 0\n", 294 | " correct_train = 0\n", 295 | " train_loss = 0\n", 296 | " \n", 297 | " for batch_idx, (data, target) in enumerate(train_loader):\n", 298 | " data, target = Variable(data), Variable(target) \n", 299 | " \n", 300 | " if CUDA:\n", 301 | " data, target = data.cuda(), target.cuda()\n", 302 | "\n", 303 | " # clear gradient\n", 304 | " optimizer.zero_grad()\n", 305 | "\n", 306 | " # Forward propagation\n", 307 | " output, aux1, aux2 = model(data) \n", 308 | " loss_output = criterion(output, target) \n", 309 | " loss_aux1 = criterion(aux1, target) \n", 310 | " loss_aux2 = criterion(aux2, target) \n", 311 | " loss = loss_output + loss_aux1 * 0.3 + loss_aux2 * 0.3\n", 312 | "\n", 313 | " # Calculate gradients\n", 314 | " loss.backward()\n", 315 | "\n", 316 | " # Update parameters\n", 317 | " optimizer.step()\n", 318 | "\n", 319 | " predicted = torch.max(output.data, 1)[1]\n", 320 | " total_train += len(target)\n", 321 | " correct_train += sum((predicted == target).float())\n", 322 | " train_loss += loss.item()\n", 323 | "\n", 324 | " if batch_idx % 100 == 0:\n", 325 | " print(\"Train Epoch: {}/{} [iter: {}/{}], acc: {:.6f}, loss: {:.6f}\".format(\n", 326 | " epoch+1, num_epochs, batch_idx+1, len(train_loader),\n", 327 | " correct_train / float((batch_idx + 1) * batch_size),\n", 328 | " train_loss / float((batch_idx + 1) * batch_size)))\n", 329 | " \n", 330 | " train_acc_ = 100 * (correct_train / float(total_train))\n", 331 | " train_loss_ = train_loss / total_train\n", 332 | " \n", 333 | " return train_acc_, train_loss_" 334 | ] 335 | }, 336 | { 337 | "cell_type": "code", 338 | "execution_count": null, 339 | "metadata": { 340 | "colab": {}, 341 | "colab_type": "code", 342 | "id": "Ji_uE-hK3060" 343 | }, 344 | "outputs": [], 345 | "source": [ 346 | "def validate(valid_loader, model, criterion, epoch): \n", 347 | " model.eval()\n", 348 | " total_valid = 0\n", 349 | " correct_valid = 0\n", 350 | " valid_loss = 0\n", 351 | " \n", 352 | " for batch_idx, (data, target) in enumerate(valid_loader):\n", 353 | " data, target = Variable(data), Variable(target) \n", 354 | " \n", 355 | " if CUDA:\n", 356 | " data, target = data.cuda(), target.cuda()\n", 357 | "\n", 358 | " output = model(data)\n", 359 | " loss = criterion(output, target) \n", 360 | "\n", 361 | " predicted = torch.max(output.data, 1)[1]\n", 362 | " total_valid += len(target)\n", 363 | " correct_valid += sum((predicted == target).float())\n", 364 | " valid_loss += loss.item()\n", 365 | "\n", 366 | " if batch_idx % 100 == 0:\n", 367 | " print(\"Valid Epoch: {}/{} [iter: {}/{}], acc: {:.6f}, loss: {:.6f}\".format(\n", 368 | " epoch+1, num_epochs, batch_idx+1, len(valid_loader),\n", 369 | " correct_valid / float((batch_idx + 1) * batch_size),\n", 370 | " valid_loss / float((batch_idx + 1) * batch_size)))\n", 371 | " \n", 372 | " valid_acc_ = 100 * (correct_valid / float(total_valid))\n", 373 | " valid_loss_ = valid_loss / total_valid\n", 374 | " \n", 375 | " return valid_acc_, valid_loss_" 376 | ] 377 | }, 378 | { 379 | "cell_type": "code", 380 | "execution_count": null, 381 | "metadata": { 382 | "colab": {}, 383 | "colab_type": "code", 384 | "id": "Ph-MACjR3087" 385 | }, 386 | "outputs": [], 387 | "source": [ 388 | "def training_loop(model, criterion, optimizer, train_loader, valid_loader):\n", 389 | " # set objects for storing metrics\n", 390 | " total_train_loss = []\n", 391 | " total_valid_loss = []\n", 392 | " total_train_accuracy = []\n", 393 | " total_valid_accuracy = []\n", 394 | " \n", 395 | " # Train model\n", 396 | " for epoch in range(num_epochs):\n", 397 | " # training\n", 398 | " train_acc_, train_loss_ = train(train_loader, model, criterion, optimizer, epoch)\n", 399 | " total_train_loss.append(train_loss_)\n", 400 | " total_train_accuracy.append(train_acc_)\n", 401 | "\n", 402 | " # validation\n", 403 | " with torch.no_grad():\n", 404 | " valid_acc_, valid_loss_ = validate(valid_loader, model, criterion, epoch)\n", 405 | " total_valid_loss.append(valid_loss_)\n", 406 | " total_valid_accuracy.append(valid_acc_)\n", 407 | "\n", 408 | " print('==========================================================================')\n", 409 | " print(\"Epoch: {}/{}, Train acc: {:.6f}, Train loss: {:.6f}, Valid acc: {:.6f}, Valid loss: {:.6f}\".format(\n", 410 | " epoch+1, num_epochs, \n", 411 | " train_acc_, train_loss_,\n", 412 | " valid_acc_, valid_loss_))\n", 413 | " print('==========================================================================')\n", 414 | "\n", 415 | " print(\"====== END ==========\")\n", 416 | "\n", 417 | " return total_train_loss, total_valid_loss, total_train_accuracy, total_valid_accuracy" 418 | ] 419 | }, 420 | { 421 | "cell_type": "code", 422 | "execution_count": null, 423 | "metadata": { 424 | "colab": {}, 425 | "colab_type": "code", 426 | "id": "_nGbEbgO3v2z" 427 | }, 428 | "outputs": [], 429 | "source": [ 430 | "total_train_loss, total_valid_loss, total_train_accuracy, total_valid_accuracy = training_loop(model, criterion, optimizer, train_loader, valid_loader)" 431 | ] 432 | }, 433 | { 434 | "cell_type": "code", 435 | "execution_count": null, 436 | "metadata": { 437 | "colab": {}, 438 | "colab_type": "code", 439 | "id": "gfmFEWzGpaRO" 440 | }, 441 | "outputs": [], 442 | "source": [ 443 | "def plot_result(total_train, total_valid, label):\n", 444 | " plt.plot(range(num_epochs), total_train, 'b-', label=f'Training_{label}')\n", 445 | " plt.plot(range(num_epochs), total_valid, 'g-', label=f'validation_{label}')\n", 446 | " plt.title(f'Training & Validation {label}')\n", 447 | " plt.xlabel('Number of epochs')\n", 448 | " plt.ylabel(f'{label}')\n", 449 | " plt.legend()\n", 450 | " plt.show()" 451 | ] 452 | }, 453 | { 454 | "cell_type": "code", 455 | "execution_count": null, 456 | "metadata": { 457 | "colab": {}, 458 | "colab_type": "code", 459 | "id": "8E0RhBgT35is" 460 | }, 461 | "outputs": [], 462 | "source": [ 463 | "plot_result(total_train_loss, total_valid_loss, 'loss')" 464 | ] 465 | }, 466 | { 467 | "cell_type": "code", 468 | "execution_count": null, 469 | "metadata": { 470 | "colab": {}, 471 | "colab_type": "code", 472 | "id": "RwmduxQi35ms" 473 | }, 474 | "outputs": [], 475 | "source": [ 476 | "plot_result(total_train_accuracy, total_valid_accuracy, 'accuracy')" 477 | ] 478 | } 479 | ], 480 | "metadata": { 481 | "accelerator": "GPU", 482 | "colab": { 483 | "authorship_tag": "ABX9TyNay6WH/v5dNgKtxAZFLBe+", 484 | "collapsed_sections": [], 485 | "name": "InceptionV1.ipynb", 486 | "private_outputs": true, 487 | "provenance": [] 488 | }, 489 | "kernelspec": { 490 | "display_name": "Python 3", 491 | "language": "python", 492 | "name": "python3" 493 | }, 494 | "language_info": { 495 | "codemirror_mode": { 496 | "name": "ipython", 497 | "version": 3 498 | }, 499 | "file_extension": ".py", 500 | "mimetype": "text/x-python", 501 | "name": "python", 502 | "nbconvert_exporter": "python", 503 | "pygments_lexer": "ipython3", 504 | "version": "3.7.4" 505 | } 506 | }, 507 | "nbformat": 4, 508 | "nbformat_minor": 1 509 | } 510 | -------------------------------------------------------------------------------- /LeNet.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "colab": {}, 8 | "colab_type": "code", 9 | "executionInfo": { 10 | "elapsed": 1889, 11 | "status": "ok", 12 | "timestamp": 1600704570149, 13 | "user": { 14 | "displayName": "ChingI Lee", 15 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgDdMJrUbQatXDhq6lGSnmXrmVgJ5j16hylgay6-zE=s64", 16 | "userId": "03381739445029229035" 17 | }, 18 | "user_tz": -480 19 | }, 20 | "id": "RPMyPuRu0pbc" 21 | }, 22 | "outputs": [], 23 | "source": [ 24 | "import torch\n", 25 | "import torch.nn as nn\n", 26 | "import torch.nn.functional as F\n", 27 | "import torch.optim as optim\n", 28 | "from torch.utils.data import DataLoader\n", 29 | "from torch.autograd import Variable\n", 30 | "from torchvision import datasets, transforms\n", 31 | "import matplotlib.pyplot as plt" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": 2, 37 | "metadata": { 38 | "colab": { 39 | "base_uri": "https://localhost:8080/", 40 | "height": 34 41 | }, 42 | "colab_type": "code", 43 | "executionInfo": { 44 | "elapsed": 1464, 45 | "status": "ok", 46 | "timestamp": 1600704570521, 47 | "user": { 48 | "displayName": "ChingI Lee", 49 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgDdMJrUbQatXDhq6lGSnmXrmVgJ5j16hylgay6-zE=s64", 50 | "userId": "03381739445029229035" 51 | }, 52 | "user_tz": -480 53 | }, 54 | "id": "TxkuvrEvrTpV", 55 | "outputId": "1da0e137-a16e-4e34-e027-d5fc938a5be5" 56 | }, 57 | "outputs": [ 58 | { 59 | "name": "stdout", 60 | "output_type": "stream", 61 | "text": [ 62 | "cuda\n" 63 | ] 64 | } 65 | ], 66 | "source": [ 67 | "CUDA = torch.cuda.is_available()\n", 68 | "device = torch.device(\"cuda\" if CUDA else \"cpu\")\n", 69 | "print(device)" 70 | ] 71 | }, 72 | { 73 | "cell_type": "code", 74 | "execution_count": 3, 75 | "metadata": { 76 | "colab": {}, 77 | "colab_type": "code", 78 | "executionInfo": { 79 | "elapsed": 855, 80 | "status": "ok", 81 | "timestamp": 1600704570904, 82 | "user": { 83 | "displayName": "ChingI Lee", 84 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgDdMJrUbQatXDhq6lGSnmXrmVgJ5j16hylgay6-zE=s64", 85 | "userId": "03381739445029229035" 86 | }, 87 | "user_tz": -480 88 | }, 89 | "id": "sE3YAOPlmmd6" 90 | }, 91 | "outputs": [], 92 | "source": [ 93 | "class LeNet(nn.Module):\n", 94 | " def __init__(self):\n", 95 | " super(LeNet, self).__init__()\n", 96 | " self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5, padding=2, stride=1)\n", 97 | " self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5)\n", 98 | "\n", 99 | " self.fc1 = nn.Linear(in_features=16*5*5, out_features=120)\n", 100 | " self.fc2 = nn.Linear(in_features=120, out_features=84)\n", 101 | " self.fc3 = nn.Linear(in_features=84, out_features=10)\n", 102 | "\n", 103 | " def forward(self, x):\n", 104 | " x = F.sigmoid(self.conv1(x))\n", 105 | " x = F.avg_pool2d(x, kernel_size=2, stride=2)\n", 106 | " x = F.sigmoid(self.conv2(x))\n", 107 | " x = F.avg_pool2d(x, kernel_size=2, stride=2)\n", 108 | " # x = x.view(-1, 16*5*5)\n", 109 | " x = torch.flatten(x, 1)\n", 110 | " x = F.sigmoid(self.fc1(x))\n", 111 | " x = F.sigmoid(self.fc2(x))\n", 112 | " x = self.fc3(x)\n", 113 | " return x" 114 | ] 115 | }, 116 | { 117 | "cell_type": "code", 118 | "execution_count": 4, 119 | "metadata": { 120 | "colab": {}, 121 | "colab_type": "code", 122 | "executionInfo": { 123 | "elapsed": 4610, 124 | "status": "ok", 125 | "timestamp": 1600704576604, 126 | "user": { 127 | "displayName": "ChingI Lee", 128 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgDdMJrUbQatXDhq6lGSnmXrmVgJ5j16hylgay6-zE=s64", 129 | "userId": "03381739445029229035" 130 | }, 131 | "user_tz": -480 132 | }, 133 | "id": "nwOhi63DxQ8O" 134 | }, 135 | "outputs": [], 136 | "source": [ 137 | "# Parameters\n", 138 | "batch_size = 256\n", 139 | "num_epochs = 5\n", 140 | "lr = 0.001\n", 141 | "\n", 142 | "model = LeNet()\n", 143 | "\n", 144 | "if CUDA:\n", 145 | " model = model.cuda()\n", 146 | "\n", 147 | "optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n", 148 | "criterion = nn.CrossEntropyLoss()" 149 | ] 150 | }, 151 | { 152 | "cell_type": "code", 153 | "execution_count": 5, 154 | "metadata": { 155 | "colab": {}, 156 | "colab_type": "code", 157 | "executionInfo": { 158 | "elapsed": 3534, 159 | "status": "ok", 160 | "timestamp": 1600704576607, 161 | "user": { 162 | "displayName": "ChingI Lee", 163 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgDdMJrUbQatXDhq6lGSnmXrmVgJ5j16hylgay6-zE=s64", 164 | "userId": "03381739445029229035" 165 | }, 166 | "user_tz": -480 167 | }, 168 | "id": "GMFstrmb1E36" 169 | }, 170 | "outputs": [], 171 | "source": [ 172 | "# Transform\n", 173 | "transform = transforms.Compose(\n", 174 | " [transforms.ToTensor(),\n", 175 | " transforms.Normalize((0.5,), (0.5,)),]\n", 176 | " )\n", 177 | "\n", 178 | "# Data\n", 179 | "train_dataset = datasets.MNIST(root='MNIST', download=True, train=True, transform=transform)\n", 180 | "valid_dataset = datasets.MNIST(root='MNIST', download=True, train=False, transform=transform)\n", 181 | "train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\n", 182 | "valid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=False)" 183 | ] 184 | }, 185 | { 186 | "cell_type": "code", 187 | "execution_count": 14, 188 | "metadata": { 189 | "colab": {}, 190 | "colab_type": "code", 191 | "executionInfo": { 192 | "elapsed": 706, 193 | "status": "ok", 194 | "timestamp": 1600705170757, 195 | "user": { 196 | "displayName": "ChingI Lee", 197 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgDdMJrUbQatXDhq6lGSnmXrmVgJ5j16hylgay6-zE=s64", 198 | "userId": "03381739445029229035" 199 | }, 200 | "user_tz": -480 201 | }, 202 | "id": "PunirkRK1YOD" 203 | }, 204 | "outputs": [], 205 | "source": [ 206 | "def train(train_loader, model, criterion, optimizer, epoch):\n", 207 | " model.train()\n", 208 | " total_train = 0\n", 209 | " correct_train = 0\n", 210 | " train_loss = 0\n", 211 | " \n", 212 | " for batch_idx, (data, target) in enumerate(train_loader):\n", 213 | " data, target = Variable(data), Variable(target) \n", 214 | " \n", 215 | " if CUDA:\n", 216 | " data, target = data.cuda(), target.cuda()\n", 217 | "\n", 218 | " # clear gradient\n", 219 | " optimizer.zero_grad()\n", 220 | "\n", 221 | " # Forward propagation\n", 222 | " output = model(data) \n", 223 | " loss = criterion(output, target) \n", 224 | "\n", 225 | " # Calculate gradients\n", 226 | " loss.backward()\n", 227 | "\n", 228 | " # Update parameters\n", 229 | " optimizer.step()\n", 230 | "\n", 231 | " predicted = torch.max(output.data, 1)[1]\n", 232 | " total_train += len(target)\n", 233 | " correct_train += sum((predicted == target).float())\n", 234 | " train_loss += loss.item()\n", 235 | "\n", 236 | " # if batch_idx % 100 == 0:\n", 237 | " # print(\"Train Epoch: {}/{} [iter: {}/{}], acc: {:.6f}, loss: {:.6f}\".format(\n", 238 | " # epoch+1, num_epochs, batch_idx+1, len(train_loader),\n", 239 | " # correct_train / float((batch_idx + 1) * batch_size),\n", 240 | " # train_loss / float((batch_idx + 1) * batch_size)))\n", 241 | " \n", 242 | " train_acc_ = 100 * (correct_train / float(total_train))\n", 243 | " train_loss_ = train_loss / total_train\n", 244 | " \n", 245 | " return train_acc_, train_loss_" 246 | ] 247 | }, 248 | { 249 | "cell_type": "code", 250 | "execution_count": 15, 251 | "metadata": { 252 | "colab": {}, 253 | "colab_type": "code", 254 | "executionInfo": { 255 | "elapsed": 858, 256 | "status": "ok", 257 | "timestamp": 1600705173726, 258 | "user": { 259 | "displayName": "ChingI Lee", 260 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgDdMJrUbQatXDhq6lGSnmXrmVgJ5j16hylgay6-zE=s64", 261 | "userId": "03381739445029229035" 262 | }, 263 | "user_tz": -480 264 | }, 265 | "id": "LYW3laXuPeYu" 266 | }, 267 | "outputs": [], 268 | "source": [ 269 | "def validate(valid_loader, model, criterion, epoch): \n", 270 | " model.eval()\n", 271 | " total_valid = 0\n", 272 | " correct_valid = 0\n", 273 | " valid_loss = 0\n", 274 | " \n", 275 | " for batch_idx, (data, target) in enumerate(valid_loader):\n", 276 | " data, target = Variable(data), Variable(target) \n", 277 | " \n", 278 | " if CUDA:\n", 279 | " data, target = data.cuda(), target.cuda()\n", 280 | "\n", 281 | " # Forward propagation\n", 282 | " output = model(data)\n", 283 | " loss = criterion(output, target) \n", 284 | "\n", 285 | " predicted = torch.max(output.data, 1)[1]\n", 286 | " total_valid += len(target)\n", 287 | " correct_valid += sum((predicted == target).float())\n", 288 | " valid_loss += loss.item()\n", 289 | "\n", 290 | " # if batch_idx % 100 == 0:\n", 291 | " # print(\"Valid Epoch: {}/{} [iter: {}/{}], acc: {:.6f}, loss: {:.6f}\".format(\n", 292 | " # epoch+1, num_epochs, batch_idx+1, len(valid_loader),\n", 293 | " # correct_valid / float((batch_idx + 1) * batch_size),\n", 294 | " # valid_loss / float((batch_idx + 1) * batch_size)))\n", 295 | " \n", 296 | " valid_acc_ = 100 * (correct_valid / float(total_valid))\n", 297 | " valid_loss_ = valid_loss / total_valid\n", 298 | " \n", 299 | " return valid_acc_, valid_loss_" 300 | ] 301 | }, 302 | { 303 | "cell_type": "code", 304 | "execution_count": 16, 305 | "metadata": { 306 | "colab": {}, 307 | "colab_type": "code", 308 | "executionInfo": { 309 | "elapsed": 878, 310 | "status": "ok", 311 | "timestamp": 1600705175796, 312 | "user": { 313 | "displayName": "ChingI Lee", 314 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgDdMJrUbQatXDhq6lGSnmXrmVgJ5j16hylgay6-zE=s64", 315 | "userId": "03381739445029229035" 316 | }, 317 | "user_tz": -480 318 | }, 319 | "id": "gf7pFjWLPebk" 320 | }, 321 | "outputs": [], 322 | "source": [ 323 | "def training_loop(model, criterion, optimizer, train_loader, valid_loader):\n", 324 | " # set objects for storing metrics\n", 325 | " total_train_loss = []\n", 326 | " total_valid_loss = []\n", 327 | " total_train_accuracy = []\n", 328 | " total_valid_accuracy = []\n", 329 | " \n", 330 | " # Train model\n", 331 | " for epoch in range(num_epochs):\n", 332 | " # training\n", 333 | " train_acc_, train_loss_ = train(train_loader, model, criterion, optimizer, epoch)\n", 334 | " total_train_loss.append(train_loss_)\n", 335 | " total_train_accuracy.append(train_acc_)\n", 336 | "\n", 337 | " # validation\n", 338 | " with torch.no_grad():\n", 339 | " valid_acc_, valid_loss_ = validate(valid_loader, model, criterion, epoch)\n", 340 | " total_valid_loss.append(valid_loss_)\n", 341 | " total_valid_accuracy.append(valid_acc_)\n", 342 | "\n", 343 | " print(\"Epoch: {}/{}, Train acc: {:.6f}, Train loss: {:.6f}, Valid acc: {:.6f}, Valid loss: {:.6f}\".format(\n", 344 | " epoch+1, num_epochs, \n", 345 | " train_acc_, train_loss_,\n", 346 | " valid_acc_, valid_loss_))\n", 347 | "\n", 348 | " print(\"====== END ==========\")\n", 349 | "\n", 350 | " return total_train_loss, total_valid_loss, total_train_accuracy, total_valid_accuracy" 351 | ] 352 | }, 353 | { 354 | "cell_type": "code", 355 | "execution_count": 17, 356 | "metadata": { 357 | "colab": { 358 | "base_uri": "https://localhost:8080/", 359 | "height": 153 360 | }, 361 | "colab_type": "code", 362 | "executionInfo": { 363 | "elapsed": 49508, 364 | "status": "ok", 365 | "timestamp": 1600705226573, 366 | "user": { 367 | "displayName": "ChingI Lee", 368 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgDdMJrUbQatXDhq6lGSnmXrmVgJ5j16hylgay6-zE=s64", 369 | "userId": "03381739445029229035" 370 | }, 371 | "user_tz": -480 372 | }, 373 | "id": "ixjMBk_vyFYq", 374 | "outputId": "886d5948-3f91-4770-c990-652fff15aaba" 375 | }, 376 | "outputs": [ 377 | { 378 | "name": "stderr", 379 | "output_type": "stream", 380 | "text": [ 381 | "/usr/local/lib/python3.6/dist-packages/torch/nn/functional.py:1625: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\n", 382 | " warnings.warn(\"nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\")\n" 383 | ] 384 | }, 385 | { 386 | "name": "stdout", 387 | "output_type": "stream", 388 | "text": [ 389 | "Epoch: 1/5, Train acc: 96.315002, Train loss: 0.000491, Valid acc: 96.660004, Valid loss: 0.000428\n", 390 | "Epoch: 2/5, Train acc: 96.720001, Train loss: 0.000428, Valid acc: 97.010002, Valid loss: 0.000391\n", 391 | "Epoch: 3/5, Train acc: 97.061668, Train loss: 0.000384, Valid acc: 97.150002, Valid loss: 0.000349\n", 392 | "Epoch: 4/5, Train acc: 97.366669, Train loss: 0.000347, Valid acc: 97.559998, Valid loss: 0.000304\n", 393 | "Epoch: 5/5, Train acc: 97.566673, Train loss: 0.000321, Valid acc: 97.470001, Valid loss: 0.000311\n", 394 | "====== END ==========\n" 395 | ] 396 | } 397 | ], 398 | "source": [ 399 | "total_train_loss, total_valid_loss, total_train_accuracy, total_valid_accuracy = training_loop(model, criterion, optimizer, train_loader, valid_loader)" 400 | ] 401 | }, 402 | { 403 | "cell_type": "code", 404 | "execution_count": 26, 405 | "metadata": { 406 | "colab": {}, 407 | "colab_type": "code", 408 | "executionInfo": { 409 | "elapsed": 909, 410 | "status": "ok", 411 | "timestamp": 1600705583787, 412 | "user": { 413 | "displayName": "ChingI Lee", 414 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgDdMJrUbQatXDhq6lGSnmXrmVgJ5j16hylgay6-zE=s64", 415 | "userId": "03381739445029229035" 416 | }, 417 | "user_tz": -480 418 | }, 419 | "id": "DECXvdUEegsL" 420 | }, 421 | "outputs": [], 422 | "source": [ 423 | "def plot_result(total_train, total_valid, label):\n", 424 | " plt.plot(range(num_epochs), total_train, 'b-', label=f'Training_{label}')\n", 425 | " plt.plot(range(num_epochs), total_valid, 'g-', label=f'validation_{label}')\n", 426 | " plt.title(f'Training & Validation {label}')\n", 427 | " plt.xlabel('Number of epochs')\n", 428 | " plt.ylabel(f'{label}')\n", 429 | " plt.legend()\n", 430 | " plt.show()" 431 | ] 432 | }, 433 | { 434 | "cell_type": "code", 435 | "execution_count": 27, 436 | "metadata": { 437 | "colab": { 438 | "base_uri": "https://localhost:8080/", 439 | "height": 295 440 | }, 441 | "colab_type": "code", 442 | "executionInfo": { 443 | "elapsed": 867, 444 | "status": "ok", 445 | "timestamp": 1600705584123, 446 | "user": { 447 | "displayName": "ChingI Lee", 448 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgDdMJrUbQatXDhq6lGSnmXrmVgJ5j16hylgay6-zE=s64", 449 | "userId": "03381739445029229035" 450 | }, 451 | "user_tz": -480 452 | }, 453 | "id": "ApqSaWX8evE0", 454 | "outputId": "cc5dde61-7b21-4008-dcd7-53b83a1acc95" 455 | }, 456 | "outputs": [ 457 | { 458 | "data": { 459 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAaEAAAEWCAYAAADPZygPAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nOzdd3gV1dbA4d8iIQnSBRRpEiDSBINEVIoNRZqgKFIsgKiIFMWCoJcr8om9IB0UEBEFxEIEFJUiglKCUkIPRaVKE+kQWN8fe8I9xFTIyUlZ7/OchzlT9qyJ92Zlz6zZW1QVY4wxJhDyBDoAY4wxuZclIWOMMQFjScgYY0zAWBIyxhgTMJaEjDHGBIwlIWOMMQFjScjkCiLyjYh0yOh9szIRKS8iKiLB3vdkryvxvudxrudF5IMLiTeZdjuKyIKMbtdkHZaETJYlIod9PmdE5JjP9/vS05aqNlHV8Rm9b3qJyMUi8rWIHBSRHSLSO5X914nIQ0msf0JEYtJz7oy6LhG5SUS2JWr7FVV9+ELbNrnPef3VY0xmUNUCCcsishV4WFV/SLyfiASranxmxnYBngXCgMuAUKBaKvuPBx4ExiZa/4C3zZhszXpCJttJ+EtcRJ4TkV3AOBEpKiLTRWSPiBzwlsv4HDNPRB72ljuKyAIRecvbd4uINDnPfcNFZL6IHBKRH0RkmIh8nEL4p4C/VPWoqh5Q1YWpXO4EoL6IXO5zzmpATeBTEWkmIr+JyD8i8qeI9E/h5+Z7XUHeNe0Vkc1As0T7dhKRtd51bRaRLt76/MA3QCmfXmkpEenve90i0kJEVovI3955q/ps2yoiz4jISq9HOFlEwlL5OSQcW1dElnrHLRWRuj7bOnqxHvL+O93nra8kIj96x+wVkclpOZfJHJaETHZVErgYuBx4FPe/5XHe93LAMWBoCsdfC6wHigNvAGNERM5j30+AJUAxoD+uh5KSpUA7Eemcyn4AqOo2YG6idh8AZqrqXuAIrqdUBJdIuorInWlo+hGgOVALiALuSbT9L297IaAT8K6IXK2qR4AmwA5VLeB9dvgeKCJXAJ8CTwIlgJnA1yIS4rPbvUBjIByXUDumFrCIXAzMAAbjft7vADNEpJiXHAcDTVS1IFAXWO4d+n/Ad0BRoAwwJLVzmcxjSchkV2eAF1X1hKoeU9V9qvq518M4BAwEbkzh+N9V9X1VPY27rXUZcGl69hWRcsA1wH9V9aSqLgCikzuhiFQCRgM3AX0SnvWISKiInBSRwskcOh4vCYlIHuA+bx2qOk9VV6nqGVVdifvln9J1J7gXGKSqf6rqfuBV342qOkNVN6nzI+6XeIM0tAvQBpihqt+r6ingLSAfLjEkGKyqO7xzfw1EpqHdZsBGVZ2gqvGq+imwDrjD234GuFJE8qnqTlVd7a0/hfvjpJSqHvf+O5kswpKQya72qOrxhC8icpGIjBKR30XkH2A+UEREgpI5flfCgqoe9RYLpHPfUsB+n3UAf6YQc2cgWlXnA42AAV4iug5YoaoHkznuC+AyEbkOl8AuwvUIEJFrRWSudxvyIPAYrseWmlKJYv3dd6OINBGRRSKyX0T+Bpqmsd2Ets+2p6pnvHOV9tlnl8/yUZL/2Sfbrk/cpb0eWhvc9e8UkRkiUsXbpzcgwBLvFuG/Cj1M4FgSMtlV4uHfnwYqA9eqaiHgBm99crfYMsJO4GIRuchnXdkU9g8G8gKo6hbc7ajXgQ+8f5PkJbmpuNtuDwCTVPWkt/kTXO+rrKoWBkaStmvemSjWcgkLIhIKfI7rwVyqqkVwt9QS2k1t6P0duJ5HQnvinWt7GuJKc7uecgntquosVb0N11NdB7zvrd+lqo+oaimgCzDc65WaLMCSkMkpCuKeA/3tPTt40d8nVNXfgRigv4iEiMj1/O/WUFK+ANqIyJ1eD+0fYAVQEdcbSMl43F/6d3NuVVxBXG/suIjUAdqnMfwpQE8RKSMiRYE+PttCcJV7e4B4rxCjkc/23UCxFG4fTgGaiUhDEcmL+wPhBPBzGmNLzkzgChFpLyLBItIGV104XUQuFZGW3rOhE8Bh3O05RKS1/K9I5QAuiZ65wFhMBrEkZHKKQbjnDnuBRcC3mXTe+4DrgX3Ay8Bk3C/Bf1HVX3BJ4kXgIO6W4TxcUcCnIlIrhfPM947ZpqpLfdY/jrutdwj4Ly4BpMX7wCxcEvwVlyAT4jwE9PTaOuDFHO2zfR3u2dNmr/qtVKLrXA/cjysA2ItLzHf49N7Oi6ruwxVLPI37efcGmnsFGnmAp3C9pf2452JdvUOvARaLyGHvOp5Q1c0XEovJOGKT2hmTcbzy33Wq6veemDE5gfWEjLkAInKNiFQUkTwi0hhoCXwV6LiMyS5sxARjLkxJ3K2sYsA2oKuq/hbYkIzJPvzaExKRxiKyXkTiRKRPEttDvbel40RksYiU99nW11u/XkRuT61NEfnQe0t6ufeJ9NaLiAz29l8pIlf7HNNBRDZ6n2w/YKXJfKr6taqWVdWLVPUKVR0X6JiMyU781hPyqn+GAbfh/kJcKiLRqrrGZ7fOwAFVrSQibXFlqm3EDUvSFqiOezfgB+8tbFJp81lVnZoolCZAhPe5FhgBXOtTQRWFq5ZZ5rV1IAN/DMYYY1Lgz9txdYC4hCoUEZmEu1/um4Ra4oY6AfcexFDvnYKWuHchTgBbRCTOa480tJlYS+AjdRUYi0SkiIhchnvp73vvjW1E5HvcexufJtdQ8eLFtXz58mm7emOMMQAsW7Zsr6qWSGqbP5NQac59I3sbrieS5D6qGu+98V3MW78o0bEJb1un1OZAEfkvMBvo4yWxpOIoncL6c4jIo7ixyShXrhwxMekaPd8YY3I9EUk80sVZOak6ri9QBfdOwMXAcxnRqKqOVtUoVY0qUSLJRG6MMeY8+TMJbefcYUHK8O9hO87uI25Gx8K4l9CSOzbZNr0BC9Xr/Yzjf7fv0t2WMcaYzOHPJLQUiBA330oIrtAg8QjD0UBCVdo9wBzv2U000NarngvHFRUsSalN7zlPwjhVdwKxPud40KuSuw44qKo7cW+LNxI3D01R3LAkszL+x2CMMSY5fnsm5D3j6Y77xR4EjFXV1SIyAIhR1WhgDDDBKzzYj0sqePtNwRUcxAPdvGH0SapN75QTRaQEbpDF5bjRdMGNN9UUiMONz9XJO8d+Efk/XGIDGJBQpGCMyRpOnTrFtm3bOH78eOo7m4ALCwujTJky5M2bN83H2LA96RAVFaVWmGBM5tmyZQsFCxakWLFiSLJzDpqsQFXZt28fhw4dIjw8/JxtIrJMVaOSOi4nFSYYY3KY48ePWwLKJkSEYsWKpbvXaknIGJOlWQLKPs7nv5UloUxw+jQ8+yxs3RroSIwxJmuxJJQJNm2CDz6A666DZcsCHY0xxmQdloQywRVXwM8/Q1gY3HADzJgR6IiMManZt28fkZGRREZGUrJkSUqXLn32+8mTKc/PFxMTQ8+ePVM9R926dTMqXAA+/PBDunfvnqFt+ptN5ZBJqlaFRYugWTNo0QKGD4cuXQIdlTEmOcWKFWP58uUA9O/fnwIFCvDMM8+c3R4fH09wcNK/QqOiooiKSrIY7Bw//3yhM55nf5aEMlHJkvDjj9CmDTz2mHtGNHAg5LH+qDGpevJJ8HJChomMhEGD0r5/x44dCQsL47fffqNevXq0bduWJ554guPHj5MvXz7GjRtH5cqVmTdvHm+99RbTp0+nf//+/PHHH2zevJk//viDJ5988mwvqUCBAhw+fJh58+bRv39/ihcvTmxsLLVr1+bjjz9GRJg5cyZPPfUU+fPnp169emzevJnp06enGuvWrVt56KGH2Lt3LyVKlGDcuHGUK1eOzz77jJdeeomgoCAKFy7M/PnzWb16NZ06deLkyZOcOXOGzz//nIiIiPP9saaLJaFMVqAATJsG3bvDa6/B77/DuHEQGhroyIwxabFt2zZ+/vlngoKC+Oeff/jpp58IDg7mhx9+4Pnnn+fzzz//1zHr1q1j7ty5HDp0iMqVK9O1a9d/vdD522+/sXr1akqVKkW9evVYuHAhUVFRdOnShfnz5xMeHk67du3SHGePHj3o0KEDHTp0YOzYsfTs2ZOvvvqKAQMGMGvWLEqXLs3ff/8NwMiRI3niiSe47777OHnyJKdPn76wH1I6WBIKgOBgGDECwsOhTx/YsQO+/BKKFg10ZMZkXenpsfhT69atCQoKAuDgwYN06NCBjRs3IiKcOnUqyWOaNWtGaGgooaGhXHLJJezevZsyZcqcs0+dOnXOrouMjGTr1q0UKFCAChUqnH35s127dowePTpNcf7yyy988cUXADzwwAP07t0bgHr16tGxY0fuvfdeWrVqBcD111/PwIED2bZtG61atcq0XhBYYULAiMBzz8Enn8Avv0C9elbCbUx2kD9//rPL/fr14+abbyY2Npavv/462Rc1Q31udQQFBREfH39e+2SEkSNH8vLLL/Pnn39Su3Zt9u3bR/v27YmOjiZfvnw0bdqUOXPm+OXcSbEkFGDt2sF338HOnVbCbUx2c/DgQUqXdtOQffjhhxnefuXKldm8eTNbvb9QJ0+enOZj69aty6RJkwCYOHEiDRo0AGDTpk1ce+21DBgwgBIlSvDnn3+yefNmKlSoQM+ePWnZsiUrV67M8GtJjiWhLODGG62E25jsqHfv3vTt25datWr5peeSL18+hg8fTuPGjalduzYFCxakcOHCaTp2yJAhjBs3jpo1azJhwgTee+89AJ599llq1KjBlVdeSd26dbnqqquYMmUKV155JZGRkcTGxvLggw9m+LUkxwYwTQd/D2C6a5cr4V6+3Eq4jQFYu3YtVatWDXQYAXX48GEKFCiAqtKtWzciIiLo1atXoMNKVlL/zWwA02wioYS7cWNXwt23L5w5E+iojDGB9P777xMZGUn16tU5ePAgXXLYX6dWHZfFWAm3McZXr169/tXzGTdu3Nnbawnq1avHsGHDMjO0DGFJKAuyEm5jTEo6depEp06dAh1GhrDbcVmUlXAbY3IDvyYhEWksIutFJE5E+iSxPVREJnvbF4tIeZ9tfb3160Xk9nS0OVhEDvt8f1dElnufDSLyt8+20z7bojPy2jOKlXAbY3IyvyUhEQkChgFNgGpAOxGplmi3zsABVa0EvAu87h1bDWgLVAcaA8NFJCi1NkUkCjjnppWq9lLVSFWNBIYAX/hsPpawTVVbZNS1ZzQr4TbG5FT+7AnVAeJUdbOqngQmAS0T7dMSGO8tTwUaipuaryUwSVVPqOoWIM5rL9k2vQT1JtA7hZjaAZ9myNVlsoRRuKtUcaNwjxoV6IiMMebC+TMJlQb+9Pm+zVuX5D6qGg8cBIqlcGxKbXYHolV1Z1LBiMjlQDjgOx5FmIjEiMgiEbkzmeMe9faJ2bNnT3LXmimshNuYrK1AgQIA7Nixg3vuuSfJfW666SZSe99w0KBBHD169Oz3pk2bnh1sNCN07NiRqVOnZlh7FyJHFCaISCmgNe52W3LaAlNV1Xd42Mu9F6jaA4NEpGLig1R1tKpGqWpUiRIlMjTu85FQwt2liyvhvv9+OHEi0FEZY3yVKlXqgn7JJ05CM2fOpEiRIhkRWpbjzxLt7UBZn+9lvHVJ7bNNRIKBwsC+VI5Nan0toBIQ5+7mcZGIxHnPmhK0Bbr5nlxVt3v/bhaReV47m9J1lQFgJdwmN3ry2ydZvitjJxSKLBnJoMbJD8/dp08fypYtS7du7ldH//79CQ4OZu7cuRw4cIBTp07x8ssv07LluU8atm7dSvPmzYmNjeXYsWN06tSJFStWUKVKFY4dO3Z2v65du7J06VKOHTvGPffcw0svvcTgwYPZsWMHN998M8WLF2fu3LmUL1+emJgYihcvzjvvvMPYsWMBePjhh3nyySfZunUrTZo0oX79+vz888+ULl2aadOmkS9fvlR/BrNnz+aZZ54hPj6ea665hhEjRhAaGkqfPn2Ijo4mODiYRo0a8dZbbyU5F9GF8mdPaCkQISLhIhKCSwKJK9CigQ7e8j3AHHXjCEUDbb3quXAgAliSXJuqOkNVS6pqeVUtDxz1TUAiUgVXsPCLz7qiIhLqLRcH6gFrMvhn4DdWwm2M/7Vp04YpU6ac/T5lyhQ6dOjAl19+ya+//srcuXN5+umnSWn4sxEjRnDRRRexdu1aXnrpJZb5lLgOHDiQmJgYVq5cyY8//sjKlSvp2bMnpUqVYu7cucydO/ectpYtW8a4ceNYvHgxixYt4v333+e3334DYOPGjXTr1o3Vq1dTpEiRJOc1Suz48eN07NiRyZMns2rVKuLj4xkxYgT79u3jyy+/ZPXq1axcuZL//Oc/AGfnIlqxYgXR0RlTUOy3npCqxotId2AWEASMVdXVIjIAiFHVaGAMMEFE4oD9uKSCt98UXFKIB7ol3EZLqs00hNMWV+jg+7+UqsAoETmDS8avqWq2SUIJ2rWDUqXgzjtdCfeMGVC7dqCjMibjpdRj8ZdatWrx119/sWPHDvbs2UPRokUpWbIkvXr1Yv78+eTJk4ft27eze/duSpYsmWQb8+fPPzuTas2aNalZs+bZbVOmTGH06NHEx8ezc+dO1qxZc872xBYsWMBdd911djqJVq1a8dNPP9GiRQvCw8OJjIwEoHbt2mdH3k7J+vXrCQ8P54orrgCgQ4cODBs2jO7duxMWFkbnzp1p3rw5zZs3B5Kei+hC+XXEBFWdCcxMtO6/PsvHcc9ykjp2IDAwLW0msU+BRN/7J7HPz0CNlNrJLhJKuJs0cSXcU6a4gVCNMReudevWTJ06lV27dtGmTRsmTpzInj17WLZsGXnz5qV8+fLJziOUki1btvDWW2+xdOlSihYtSseOHc+rnQSJ5yPyve2XXsHBwSxZsoTZs2czdepUhg4dypw5cxg5ciSLFy9mxowZ1K5dm2XLllGsWLHzPg/kkMIEYyXcxvhLmzZtmDRpElOnTqV169YcPHiQSy65hLx58zJ37lx+//33FI+/4YYb+OSTTwCIjY09O1fPP//8Q/78+SlcuDC7d+/mm2++OXtMwYIFOXTo0L/aatCgAV999RVHjx7lyJEjfPnll2fnCToflStXZuvWrcTFxQEwYcIEbrzxRg4fPszBgwdp2rQp7777LitWrACSnovoQtnYcTlIQgl3mzauhHvrVhg4EPLYnxrGnLfq1atz6NAhSpcuzWWXXcZ9993HHXfcQY0aNYiKiqJKlSopHt+1a1c6depE1apVqVq1KrW9++VXXXUVtWrVokqVKpQtW5Z69eqdPebRRx+lcePGZ58NJbj66qvp2LEjderUAVxhQq1atdJ06y0pYWFhjBs3jtatW58tTHjsscfYv38/LVu25Pjx46gq77zzDuDmItq4cSOqSsOGDbnqqqvO67y+bD6hdPD3fEIZJT7ejcI9apR7ZmSjcJvsyuYTyn7SO5+Q9YRyICvhNsZkF3ajJoeyEm5jTLdu3YiMjDznM27cuECHdQ7rCeVwVsJtsjtVxXsJ3aRTZk9ydz6Pd6wnlAvYKNwmuwoLC2Pfvn3n9cvNZC5VZd++fYSFhaXrOOsJ5RIJJdzNmrkS7uHD3fhzxmRlZcqUYdu2bQR68GCTNmFhYZQpUyZdx1gSykWshNtkN3nz5iU8PDzQYRg/sl8/uYyNwm2MyUqsJ5QLWQm3MSarsJ5QLmUl3MaYrMCSUC7Xrh189x3s3OlKuH1GmTfGGL+zJGSshNsYEzCWhAxgo3AbYwLDkpA5K6GEu3FjV8Ldty+cORPoqIwxOZklIXMOK+E2xmQmvyYhEWksIutFJE5E+iSxPVREJnvbF4tIeZ9tfb3160Xk9nS0OVhEDvt87ygie0Rkufd52GdbBxHZ6H06ZOS1Z2cJJdyvvQaffgq33w4HDgQ6KmNMTuS3JCQiQcAwoAlQDWgnItUS7dYZOKCqlYB3gde9Y6sBbYHqQGNguIgEpdamiEQBSb3tMllVI73PB96+FwMvAtcCdYAXRcTelPFYCbcxJjP4sydUB4hT1c2qehKYBLRMtE9LYLy3PBVoKG643JbAJFU9oapbgDivvWTb9BLUm0DvNMZ3O/C9qu5X1QPA97iEZ3xYCbcxxp/8mYRKA74TkG/z1iW5j6rGAweBYikcm1Kb3YFoVd2ZRCx3i8hKEZkqImXTEZ/BSriNMf6TIwoTRKQU0BoYksTmr4HyqloT19sZn8Q+KbX9qIjEiEhMbh7J10q4jTH+4M8ktB0o6/O9jLcuyX1EJBgoDOxL4djk1tcCKgFxIrIVuEhE4gBUdZ+qJtR3fQAkTOmWlvhQ1dGqGqWqUSVKlEj9qnMwK+E2xmQ0fyahpUCEiISLSAiu0CA60T7RQEJV2j3AHHWzV0UDbb3quXAgAliSXJuqOkNVS6pqeVUtDxz1ih0Qkct8ztcCWOstzwIaiUhRryChkbfOpMBKuI0xGclvo2iraryIdMf9Yg8CxqrqahEZAMSoajQwBpjg9Vr245IK3n5TgDVAPNBNVU8DJNVmKqH0FJEWXjv7gY7eOfaLyP/hEhvAAFXdn0GXn6PZKNzGmIwiNm1u2kVFRWlMTEygw8hSPv0UOnaEihVh5kwoXz7QERljshoRWaaqUUltyxGFCSZwfEu4r7/eSriNMeljSchcsIQS7tBQtzxzZqAjMsZkF5aETIZIKOGuXBnuuMNKuI0xaWNJyGQYK+E2xqSXJSGToayE2xiTHn4r0Ta5l5VwG2PSynpCxi9sFG5jTFpYEjJ+ZSXcxpiUWBIyfmcl3MaY5FgSMpnCSriNMUmxJGQyjZVwG2MSsyRkMpWVcBtjfFmJtsl0VsJtjElgPSETEFbCbYwBS0ImwKyE25jczZKQCTgr4TYm97IklEmOnjoa6BCyNCvhNiZ3siSUCQ4cO0D4e+F0n9mdHYd2BDqcLCtxCffDD8O+fYGOyhjjT35NQiLSWETWi0iciPRJYnuoiEz2ti8WkfI+2/p669eLyO3paHOwiBz2+f6UiKwRkZUiMltELvfZdlpElnuf6Iy8dl/xZ+JpWbklI2NGUnFwRZ797ln2Ht3rr9Nlawkl3M89Bx9+CFWqwLhx9j6RMTmWqvrlAwQBm4AKQAiwAqiWaJ/HgZHecltgsrdczds/FAj32glKrU0gCpgAHPZZdzNwkbfcNeEc3vfD6bmm2rVr64XYuG+j3v/F/Sr9RQu8UkD7zemnB44duKA2c7KVK1Xr1VMF1fr1VVetCnRExpjzAcRoMr9X/dkTqgPEqepmVT0JTAJaJtqnJTDeW54KNBQR8dZPUtUTqroFiPPaS7ZNEQkC3gR6+55AVeeqasIDmUVAmQy+zjSrdHElJtw1gdjHY2lcqTH/N///qPBeBV796VUOnzycegO5TI0aMH8+jBkDa9dCZCT07g2H7UdlTI7hzyRUGvjT5/s2b12S+6hqPHAQKJbCsSm12R2IVtWdKcTUGfjG53uYiMSIyCIRuTOpA0TkUW+fmD179qTQdNpVK1GNz1p/xrJHl1G3bF2en/M8FQdXZNCiQRyPP54h58gp8uSBhx6C9euhUyd4802oVs293Oo6s8aY7CxHFCaISCmgNTAkhX3ux92ue9Nn9eWqGgW0BwaJSMXEx6nqaFWNUtWoEiVKZGjcV192NdPbT2fhQwu58pIr6TWrF5UGV2JkzEhOnj6ZoefK7ooVg/ffh4UL3cgKrVq5KrotWwIdmTHmQvgzCW0Hyvp8L+OtS3IfEQkGCgP7Ujg2ufW1gEpAnIhsBS4SkbiEnUTkVuAFoIWqnh2pTFW3e/9uBuZ57WS6umXrMvvB2cx+cDblCpej64yuVBlahfHLx3P6zOlAhJRl1a3rXmh9+21XSVe9OrzyCpy0nG1MtuTPJLQUiBCRcBEJwRUeJK5AiwY6eMv3AHO8h1jRQFuvei4ciACWJNemqs5Q1ZKqWl5VywNHVbUSgIjUAkbhEtBfCScWkaIiEuotFwfqAWv88HNIs1vCb2HhQwuZ0X4GRcKK0HFaR64ccSVTVk/hjFp5WILgYHjqKfecqFkzeOEFuOoqmDs30JEZY9LLb0nIe8bTHZgFrAWmqOpqERkgIi283cYAxbxey1NAH+/Y1cAUXFL4FuimqqeTazOVUN4ECgCfJSrFrgrEiMgKYC7wmqoGNAkBiAhNI5oS82gMU1tPJY/koc3UNlw96mq+Xv91QlWfAcqUgc8+cyMsnDwJt9ziRuXevTvQkRlj0krsl1raRUVFaUxMTKae8/SZ03wa+yn95/Vn04FNXFv6Wl6+5WUahjfEFRIagGPH4NVX4fXXIV8+d4uuSxcICgp0ZMYYEVnmPX//lxxRmJCTBeUJ4v6a97O221pGNx/N9kPbuW3Cbdw8/mYW/LEg0OFlGfnywYABsHIlREVBt25w3XU2IKoxWZ0loWwib1BeHqn9CBt7bGRw48Gs27uOBuMa0GRiE2J2ZG7vLCurXBm+/95NEbFtG9SpAz16wMGDgY7MGJMUS0LZTFhwGD2u7cGmnpt4/dbXWbJ9Cde8fw2tJrci9q/YQIeXJYi4KSLWrXM9ouHDXXL65BN7t8iYrMaSUDaVPyQ/vev1ZssTW+h/Y39mb5lNzRE1af95ezbu2xjo8LKEwoVh8GBYsgTKlYP77oPbbnMvvhpjsgZLQtlcodBCvHjTi2zuuZnn6j3HtPXTqDqsKp2ndeb3v38PdHhZQu3abvbW4cMhJgZq1oR+/VwxgzEmsCwJ5RDFLirGq7e+yqaem+hepzsfr/qYiCERNn2EJygIunZ1vaB774WXX3YvutoEesYEliWhHKZkgZIMajyIuB5xdIrsxKhlo2z6CB+XXgoTJsCcOW4m12bN4O67XRGDMSbzWRLKocoWLsuoO0axrts6Wldrzdu/vE34e+H0m9OPv4//HejwAu7mm2HFCvc+0TffuHmL3n4bTp0KdGTG5C6WhHK4ihdX5KO7PiL28ViaVGrCyz+9TPh74bzy0yu5fvqIkBDo2xdWr3ZJ6Zln3POjhQsDHZkxuYcloVyiWolqTGk9hV8f/ZX65erzwpwXqPBeBd795V2OncrdT+jDwyE62k0P8UlB7hIAACAASURBVPffUL++m1p8r929NMbvLAnlMrUuq8XX7b7m54d+pualNXnqu6eIGBKR66ePEIE773SDovbuDePHu1t0Y8bY1OLG+JMloVzq+rLX88ODPzDnwTlcXuTyc6aPiD8TH+jwAiZ/fjf+3G+/ucnzHn4YGjRwwwEZYzKeJaFc7ubwm1nQaQEz28+kaL6ibvqI4VcyOXZyrp4+4sor3XxF48bBhg1w9dXumZFNLW5MxrIkZBARmkQ0IeaRGD6/93OC8gTR9vO21BpVi+j10bl2+ggR6NjRDf/z0EOueq5qVfjiCxv+x5iMYknInCUitKraipWPreTjuz7myMkjtJzUkuvGXMf3m77PtcmoWDEYPdqNulCsmHuvqHlz2Lw50JEZk/1ZEjL/EpQniPtq3sfabmt5/4732XloJ40+bsRN42/K1dNHXHedG/bn3Xdh/nw34sLLL8OJE6kfa4xJmiUhk6y8QXl5+OqH2dhjI0OaDGHDvg00GNeAxh83zrXTRwQHw5NPult0d9zhxqC76iqYPTvQkRmTPaUpCYnIEyJSSJwxIvKriDRKw3GNRWS9iMSJSJ8ktoeKyGRv+2IRKe+zra+3fr2I3J6ONgeLyGGf7+k+hzlXaHAo3et0Z1PPTbxx6xss3bGUa96/hrsm38Wq3asCHV5AlC4NU6bAt99CfDzceiu0bw+7dgU6MmOyGVVN9QOs8P69HfgCqA78msoxQcAmoAIQAqwAqiXa53FgpLfcFpjsLVfz9g8Fwr12glJrE4gCJgCHz/ccKV1T7dq11agePH5QX5r3khZ6tZBKf9F2U9vp+r3rAx1WwBw7pvrii6ohIaqFCqkOGaIaHx/oqIzJOoAYTeb3alpvx4n3b1Nggqqu9lmXnDpAnKpuVtWTwCSgZaJ9WgLjveWpQEMREW/9JFU9oapbgDivvWTbFJEg4E2g9wWew6SiUGgh/nvjf9nyxJaz00dUG1aNztM6s/XvrYEOL9OFhUH//hAbC9de62ZyvfZa9/zIGJOytCahZSLyHS4JzRKRgkBqL5GUBv70+b7NW5fkPqoaDxwEiqVwbEptdgeiVXXnBZ7DpNHF+S7m1VtfZXPPzfSo04OJqyZyxZAr6DajW66cPiIiAmbNgkmTYMcON7V4t25uKCBjTNLSmoQ6A32Aa1T1KJAX6OS3qNJJREoBrYEhfmj7URGJEZGYPXv2ZHTzOcKlBS7l3cbvsrHHRh6q9RCjfx1NxcEVeea7Z9hzJHf9zESgTRtXuNCzJ4wc6aYW//hje7fImKSkNQldD6xX1b9F5H7gP7geRUq2A2V9vpfx1iW5j4gEA4WBfSkcm9z6WkAlIE5EtgIXiUjceZ7jHKo6WlWjVDWqRIkSqVxy7la2cFlGNh/Jum7ruLf6vby76F0qDK6QK6ePKFQIBg1yt+TCw+GBB6BhQ5ecjDE+kntYpOcWEKzEPQO6CvgN6Ab8mMoxwcBm3EP/hCKC6on26ca5RQNTvOXqnFs0sBlXlJBqm97xh8/3HCldkxUmpM+av9Zo6ymtlf5okdeK6MD5A/XQiUOBDivTnT6tOnKkapEiqnnzqj7/vOqRI4GOypjMQwqFCWlNQr96//4X6Oy7LpXjmgIbcJVnL3jrBgAtvOUw4DNcUcASoILPsS94x60HmqTUZhLn9U1C6T5Hch9LQufnt52/afNPmiv90RJvlNB3fn5Hj548GuiwMt3u3aoPPuj+X1e+vOrXXwc6ImMyR0pJSDQNN6pF5EfgW+AhoAHwF65su0aqB+cgUVFRGmMlT+dt0bZF9Jvbjx82/0CpgqX4T4P/0PnqzoQEhQQ6tEz144/QtaubNuLOO+G996BcuUBHZYz/iMgyVY1Kaltanwm1AU4AD6nqLtzzkzczKD6TS1xX5jq+f+B75naYS/ki5Xl85uNUHlqZD5d/mKumj7jxRli+HF57zVXTVasGb75pU4ub3ClNSchLPBOBwiLSHDiuqh/5NTKTY91U/qaz00dcnO9iOk3rlOumjwgJgeeec72hhg3dRHpXXw0Lcu/QfCaXSuuwPffinqe0Bu4FFovIPf4MzORsvtNHfHHvFwTnCc6V00dcfjlMm+Y+//zjJtB76CGwtwFMbpHW23Ev4N4R6qCqD+JGFujnv7BMbiEi3FX1LlY8toKJrSZy9NRRWk5qybUfXMu8rfMCHV6madEC1qyBPn1gwgQ3tfj779vU4ibnS2sSyqOqf/l835eOY41JVVCeINrXaM+ax9fwwR0fsOvwLm4efzOtP2uda4YCyp8fXn0VVqxwM7s++ijUr+++G5NTpTWRfCsis0Sko4h0BGYAM/0Xlsmt8gblpfPVnVnffT0DbhrAjA0zqDK0Cv3m9OPIySOBDi9TVKsG8+bB+PEQFwe1a8NTT8GhQ4GOzJiMl6YSbQARuRuo5339SVW/9FtUWZSVaGe+Pw/+SZ/Zffhk1SeULlia1299nfY12uPGoM359u+H5593M7uWKuVGYbj7bjc8kDHZRUol2mlOQsaSUCAt/GMhT3z7BMt2LuP6MtfzXuP3uKb0NYEOK9MsXgyPPeZKuxs3hqFDoWLFQEdlTNqc93tCInJIRP5J4nNIRP7xT7jG/Fu9cvVY8sgSxrYYy+YDm6nzQR06TevEzkOJB03Pma69FpYudS+2LlzophYfMMCmFjfZX4pJSFULqmqhJD4FVbVQZgVpDEAeyUOnWp3Y0GMDvev2ZuLKiVwx9ApeX/A6J+Jz/m/j4GA3Mve6dW6khRdfhBo14IcfAh2ZMefPKtxMtlMotBCv3/Y6qx9fzS3ht9Bndh+qD6/OtHXTcsX7RaVKuTmLvvvOTQ9x223Qrh1s/9cY8MZkfZaETLYVUSyCaW2nMev+WYQGh3Ln5Dtp9HEjYv+KDXRomeK222DVKjer65dfQqVK8MwzsHdvoCMzJu0sCZlsr1HFRizvspzBjQcTsyOGyJGR9JjZg/3H9gc6NL8LC3O35daudZPpvfuum7/oxRfhYGozfhmTBVgSMjlC3qC89Li2Bxt7bKRL7S4MjxlOxJAIhi0ZlisGRw0Phw8/hNhYVz03YABUqABvvAFHjwY6OmOSZ0nI5CjFLyrOsGbDWN5lOVddehXdv+lO5MhIZm+eHejQMkXVqvDZZ/Drr3DddW6Q1IoVXUm3VdKZrMiSkMmRalxag9kPzuaLe7/g6Kmj3DrhVu6afBeb9m8KdGiZolYtmDHDjcpduTL06AFXXAHjxkF8zu8YmmzEkpDJsRIGR13TbQ2v3PIK32/6nmrDq9H3h74cOpE7xsCpVw/mznWVdJdc4kbovvJKmDzZBkc1WYMlIZPjhQWH0bdBXzb02EDbK9vy2sLXuGLoFYxfPj5XzF8k4irplixxVXR580Lbtm7+ounTXZm3MYHi1yQkIo1FZL2IxIlInyS2h4rIZG/7YhEp77Otr7d+vYjcnlqbIjJGRFaIyEoRmSoiBbz174rIcu+zQUT+9jnmtM+2aH/9HEzWUKpgKcbfOZ5FnRdRrnA5Ok7ryHUfXMeibYsCHVqmEHEvuS5fDhMnwuHDcMcd/+stGRMQquqXDxAEbAIqACHACqBaon0eB0Z6y22Byd5yNW//UCDcaycopTaBQj7tvgP0SSKmHsBYn++H03NNtWvXVpMznD5zWj9a/pFe9tZlSn/0/i/u120HtwU6rEx18qTq6NGqZcqogmrDhqqLFgU6KpMTATGazO9Vf/aE6gBxqrpZVU8Ck4CWifZpCYz3lqcCDcUNj9wSmKSqJ1R1CxDntZdsm6r6D4B3fD4gqZsM7YBPM/AaTTaVR/LwwFUPsKHHBp6v/zyfrf6MK4ZewcD5Azl26ligw8sUefPCI4/Axo3u/aKVK11FXYsWbtmYzODPJFQa+NPn+zZvXZL7qGo8cBAolsKxKbYpIuOAXUAVYIjviUTkclyvao7P6jARiRGRRSJyZ1IXISKPevvE7LE5l3OcAiEFGNhwIGu6raFxpcb8Z+5/qDa8Gp+v+TxXDAEE7oXXJ5+EzZvh5Zdh/nyIjHRDAW3YEOjoTE6XowoTVLUTUApYC7RJtLktMFVVT/usu1zd8OLtgUEi8q/B8VV1tKpGqWpUiRIl/BW6CbAKRSvw+b2fM/vB2RQMKcg9n93DLR/dwsrduadLUKAAvPACbNkCfftCdLSbYO/hh+GPPwIdncmp/JmEtgNlfb6X8dYluY+IBAOFcVOHJ3dsqm16SWYScHeic7Ul0a04Vd3u/bsZmAfUSsuFmZzrlvBb+LXLrwxvOpxVu1dRa1Qtuk7vyt6juWdAtqJFYeBA1zPq3h0mTICICDeC965dgY7O5DT+TEJLgQgRCReREFwSSFyBFg108JbvAeZ4D7GigbZe9Vw4EAEsSa5NcSrB2WdCLYB1CScRkSpAUeAXn3VFRSTUWy6OmzV2TYb+BEy2FJwnmK7XdGVDjw10v6Y77//6PhFDInhv0XucOn0q0OFlmksvdTO5btwIDz4Iw4e70Rf69nUzvhqTEfyWhLxnPN2BWbjbY1NUdbWIDBCRFt5uY4BiIhIHPAX08Y5dDUzBJYVvgW6qejq5NgEBxovIKmAVcBkwwCectrhCB9+b/FWBGBFZAcwFXlNVS0LmrIvzXcx7Td5jZdeVXFPqGp6c9SQ1R9ZkVtysQIeWqcqVg/ffd4Ok3nknvP66G5fu5ZfhUO5459f4kU3vnQ42vXfupapM3zCdp757irj9cTS/ojnvNHqHiGIRgQ4t061aBf36wbRpULy46xl17Qr58gU6MpNVnff03sYYR0S4o/IdxHaN5Y1b3+DHrT9SfXh1nv3uWQ4ez11zJtSoAV99BYsXuzHqnn7aPTMaNQpO5Z67lSaDWBIyJh1Cg0N5tt6zbOixgQdqPsDbv7zNFUOvYMyvYzh95nTqDeQgdeq4MenmzoXLL4fHHoMqVeDjj+F07vpRmAtgSciY81CyQEnGtBzDkkeWUOniSjz89cPU+aAOC/5YEOjQMt1NN7nRumfMgEKF4IEHoGZN+OILG5fOpM6SkDEXIKpUFAs6LeCTVp/w15G/aDCuAe0+b8cfB3PXizUi0LQpLFsGU6a4Ebrvvtv1lmbNsmRkkmdJyJgLJCK0q9GOdd3W8d8b/stX676iytAqvDTvJY6eyl3TmubJA61bu+KFceNgzx430+uNN8JPPwU6OpMVWRIyJoPkD8nPSze/xLpu67ij8h30/7E/VYZWYXLs5FwzBFCC4GDo2BHWr3ezum7cCDfcAE2auN6SMQksCRmTwS4vcjmT75nMjx1/pNhFxWj7eVtu+PAGft35a6BDy3ShodCtG2zaBG+84eY0iopyt+rW2Ft5BktCxvjNDZffQMwjMYxuPpp1e9cRNTqKR6If4a8jfwU6tEx30UXw7LNuKKAXX4Tvv3czvD74oFtnci9LQsb4UVCeIB6p/Qgbe2yk13W9+HDFh0QMieDtn9/m5OmTgQ4v0xUuDP37u8TzzDPw2WdQubJ72XV74pElTa5gSciYTFAkrAhv3/42sV1jqV+uPs98/ww1RtRg5saZgQ4tIIoXd7fnNm2CRx+FMWOgUiX34qvNmJK7WBIyJhNVLl6ZGe1nMKP9DASh2SfNaDKxCev2rkv94ByoVCkYNswVMLRp4wZMrVAB/vtfOJi7BqLItSwJGRMATSOasrLrSt5p9A6//PkLNUbUoNe3vfj7+N+BDi0gwsPhww8hNtaVdP/f/7l1r70GR44EOjrjT5aEjAmQkKAQel3fiw09NvBQ5EO8t/g9IoZEMCpmVK4bAihB1aruOdGvv8L117vBUStWhCFD4MSJQEdn/MGSkDEBdkn+Sxh1xyiWPbqMaiWq8diMx6g9ujY/bv0x0KEFTK1abhigBQvceHQ9e8IVV8DYsRAfH+joTEayJGRMFlHrslrM6zCPKfdM4cDxA9w0/ibu/exetv69NdChBUy9em6A1O++c5Psde4M1avDpEluaCCT/VkSMiYLERFaV2/Num7rGHDTAKZvmE7VYVXpN6cfR07mzocjInDbbW7qiC+/hJAQaNfO9Za+/trGpcvuLAkZkwXly5uPfjf2Y3339bSq2oqXf3qZykMrM3HlxFw3BFACETez6/LlMHGiK1ho0QLq1oU5cwIdnTlffk1CItJYRNaLSJyI9Elie6iITPa2LxaR8j7b+nrr14vI7am1KSJjRGSFiKwUkakiUsBb31FE9ojIcu/zsM8xHURko/fp4K+fgzHnq2zhskxsNZEFnRZQskBJ7v/yfuqNrcfS7UsDHVrABAVB+/ZuuvHRo2HbNmjY0H0WLQp0dCbdVNUvHyAI2ARUAEKAFUC1RPs8Doz0ltsCk73lat7+oUC4105QSm0ChXzafQfo4y13BIYmEd/FwGbv36LectGUrql27dpqTKCcPnNax/46Vi9981KlP9rxq46689DOQIcVcMeOqb77rmqJEqqgescdqsuXBzoq4wuI0WR+r/qzJ1QHiFPVzap6EpgEtEy0T0tgvLc8FWgoIuKtn6SqJ1R1CxDntZdsm6r6D4B3fD4gtXsWtwPfq+p+VT0AfA80vqArNsaP8kgeOtXqxIYeG+hdtzcTV04kYkgEbyx8gxPxubd+OSwMnnzSDQU0cCDMnw+RkdC2LWzYEOjoTGr8mYRKA3/6fN/mrUtyH1WNBw4CxVI4NsU2RWQcsAuoAgzx2e9un9t0ZdMRnzFZTqHQQrx+2+usfnw1t4TfwnM/PEf14dWZsWFGoEMLqAIF4PnnYcsW9+/06VCtmquo+/33QEdnkpOjChNUtRNQClgLtPFWfw2UV9WauN7O+GQOT5KIPCoiMSISs8cGtTJZSESxCKa1ncas+2cREhRC80+b0/qz1uw4tCPQoQVU0aKuR7RpE3TvDh9/7N4x6tEDdu0KdHQmMX8moe1AWZ/vZbx1Se4jIsFAYWBfCsem2qaqnsbdprvb+75PVRPuVXwA1E5HfKjqaFWNUtWoEiVKpHC5xgRGo4qNWP7YcgbeMpCv139N1WFVGb50OGc0d79Ic+mlbiy6uDjo0AFGjHDj0vXpA/v3Bzo6k8CfSWgpECEi4SISgis8iE60TzSQUJV2DzDHe4gVDbT1qufCgQhgSXJtilMJzj4TagGs875f5nO+FrheEsAsoJGIFBWRokAjb50x2U5IUAjPN3ie2MdjuabUNXSb2Y16Y+uxaveqQIcWcGXLuiq6tWvhrrvc6N3h4dC7N6xcGejojN+q41wuoSmwAVfR9oK3bgDQwlsOAz7DFR4sASr4HPuCd9x6oEkqbeYBFgKrgFhgIl61HPAqsBpXSTcXqOLT1kPeueOATqldj1XHmezgzJkzOmHFBC3+RnENHhCsz33/nB45eSTQYWUZK1eqtmqlGhTkqumuvFL1lVdUt24NdGQ5FylUx4nm0hffzkdUVJTGxMQEOgxj0mTf0X30/r43Y5ePJbxIOCOajeD2SrenfmAusWePGyx14kT4+We3rn599w5S69ZuziOTMURkmapGJbUtRxUmGGP+p9hFxRjTcgzzOswjJCiExhMb0/7z9uw+vDvQoWUJJUrA44/DwoX/K+/ev9+tu+wyuOMO+PRTm0rC36wnlA7WEzLZ1Yn4E7y24DVeWfAKF+W9iDdufYPOV3cmj9jfob5U3XOiiRNdAtq2DfLnd8+S2rd3Y9gFBwc6yuwnpZ6QJaF0sCRksrt1e9fx2PTH+PH3H6lfrj6jmo+iWolqgQ4rSzpzBn76CT75xN22O3DA9Z7atHEJ6brr3Hh2JnV2O84YA0CV4lWY22EuY1uMZc2eNUSOjKTfnH4cjz8e6NCynDx54MYbYdQo2LkTvvoKbr4ZPvjADZpasSL85z+u6s6cP+sJpYP1hExOsufIHp7+7mkmrJxApYsrMbLZSBpWaBjosLK8f/5xU0p88gn88IPrMUVGwn33uSkmStu4K/9iPSFjzL+UyF+Cj+76iB8e+AGAWyfcSoevOrDniI0MkpJChdzLr7Nmwfbt7oXYkBB49ln3TlJCb+nAgUBHmj1YTygdrCdkcqpjp44x8KeBvLHwDQqFFuKtRm/R4aoOiD30SLONG13vaOJEtxwSAk2buh5S8+ZuoNXcygoTMoglIZPTrf5rNV2md2Hhnwu5qfxNjGw2ksrFKwc6rGxFFZYtc8lo0iQ3Xl2hQtCqlUtIN9/s5kTKTex2nDEmTapfUp35neYzuvlolu9aTs2RNXlp3ku5eqqI9BKBqCh4911X4v3993D33fDFF67Eu0wZ6NULYmJsanKwnlC6WE/I5Ca7Du+i16xeTIqdROVilRnVfBQ3lr8x0GFlW8eOwcyZroc0YwacPOlG927f3n0iIgIdof9YT8gYk24lC5Tk07s/5Zv7vuHk6ZPcNP4mOk/rzP5jNgT1+ciX7389ol27XPFC6dLw0ksuGdWpA++9l/umm7AkZIxJUeNKjYl9PJbedXszfsV4qgytwscrP8buopy/okXdZHtz5sAff8Bbb0F8vJshtnRpaNQIxo935eA5nd2OSwe7HWdyu5W7V/Lo14+yePtibq1wKyOajaDSxZUCHVaOsXatq7D75BM3nl1YmBvD7r77oHFjCA0NdITnx6rjMoglIWPg9JnTjFo2ir6z+3Ly9En63dCPZ+o+Q0hQSKBDyzFUYfFi9/xo8mQ34nfRonDPPS4hNWjgRnTILiwJZRBLQsb8z45DO3ji2yeYumYq1UtUZ1TzUdQrVy/QYeU4p07B7NkuIX35pRvVu0wZNzpD+/Zw1VVZfww7K0wwxmS4UgVL8Vnrz/i63dccOnmI+uPq89j0x/j7+N+BDi1HyZvX3YqbMAF273aje0dGuhLwWrXgyivdNBRbtgQ60vNjPaF0sJ6QMUk7fPIwL859kUGLB1HiohK81/g97q1+r4244Ed798LUqa6HtGCBW1e3rusd3XuvG/E7q7CekDHGrwqEFODt299m6SNLKVOoDG0/b0vTT5qy5UA2/fM8GyheHB57zE03sXUrvPqqq6br3t1NytesmUtQhw8HOtKU+TUJiUhjEVkvInEi0ieJ7aEiMtnbvlhEyvts6+utXy8it6fWpoiMEZEVIrJSRKaKSAFv/VMissZbP1tELvc55rSILPc+0f76ORiTW1x92dUsfngxg24fxII/FlB9eHXeWPgGp06fCnRoOdrll0OfPrBqlZuU75ln3PL998Oll7pihhkz3POlLEdV/fIBgoBNQAUgBFgBVEu0z+PASG+5LTDZW67m7R8KhHvtBKXUJlDIp913gD7e8s3ARd5y14RzeN8Pp+eaateurcaYtPnj7z+0xactlP5ozRE1ddGfiwIdUq5y+rTq/PmqXbqoXnyxKqgWK6batavqggWqZ85kXixAjCbze9WfPaE6QJyqblbVk8AkoGWifVoC473lqUBDcTeRWwKTVPWEqm4B4rz2km1TVf8B8I7PB6i3fq6qHvXOsQgo45erNcaco2zhskxrO40v7v2CfUf3cf2Y6+k+szv/nMgFb2BmAXnyuFLukSPdpHzR0XDrrfDhh1C/PlSoAM8/D6tXBzhOP7ZdGvjT5/s2b12S+6hqPHAQKJbCsSm2KSLjgF1AFWBIEjF1Br7x+R4mIjEiskhE7kzqIkTkUW+fmD17bJ4VY9Lrrqp3sabbGrrX6c7wpcOpOqwqX6z9wkZcyEQhIe6l10mTXIXdRx9B5crwxhuuuu6qq9zyn3+m3lZGy1GFCaraCSgFrAXa+G4TkfuBKOBNn9WXq6vYaA8MEpGKSbQ5WlWjVDWqRFYqNzEmGykUWojBTQaz6OFFXJL/Eu6ecjctJ7Xkj4N/BDq0XKdgQXjgAfj2Wzcp3+DBbly7556DcuXclOajR8P+TBoi0J9JaDtQ1ud7GW9dkvuISDBQGNiXwrGptqmqp3G36e5OWCcitwIvAC1U9YTPvtu9fzcD84Ba6btEY0x61Cldh6WPLOWt295i9pbZVBtWjUGLBhF/Jj7QoeVKl14KPXrAokUQFwcDBrieUpcuULIktGwJU6bA0aOpt3W+/JmElgIRIhIuIiG4woPEFWjRQAdv+R5gjvcQKxpo61XPhQMRwJLk2hSnEpx9JtQCWOd9rwWMwiWgvxJOLCJFRSTUWy4O1APWZPhPwRhzjuA8wTxd92lWP76aG8vfSK9Zvbj2g2tZtmNZoEPL1SpWhH793Ph1y5ZBz55uzqM2bVyyevxx/5zXb0nIe8bTHZiFuz02RVVXi8gAEWnh7TYGKCYiccBTQB/v2NXAFFxS+Bbopqqnk2sTEGC8iKwCVgGXAQO8c7wJFAA+S1SKXRWIEZEVwFzgNVW1JGRMJilfpDzT201nyj1T2HFoB3U+qEOvb3tx6MShQIeWq4nA1Ve7kb3/+MMNGdSmjf8m4LMRE9LBRkwwxj/+Pv43z89+npExIylTqAxDmw6lReUWqR9osgUbMcEYk6UVCSvC8GbDWfjQQgqHFablpJbcPeVutv+T+DGyyWksCRljsozry17Pr4/+yqsNX2XmxplUHVaVoUuGcvrM6UCHZvzEkpAxJkvJG5SXPvX7ENs1luvKXEePb3pQd2xdVuxaEejQjB9YEjLGZEkVL67IrPtnMbHVRLYc2ELt0bXp/X1vjpw8EujQTAayJGSMybJEhPY12rOu+zo6RXbizZ/f5MoRV/LNxm9SP9hkC5aEjDFZ3sX5Lub9Fu8zv+N88gXno+knTWk7tS27Du8KdGjmAlkSMsZkGw0ub8BvXX5jwE0D+GrdV1QZWoVRMaM4o2cCHZo5T5aEjDHZSmhwKP1u7MfKriu5+rKreWzGYzQY14DYv2IDHVqOdezUMf468lfqO54He1k1HexlVWOyFlXloxUf8fR3T3PwxEGerfss/W7oR768+QIdWpZ3Rs+w9+hedh3exa7Du9h5aOf/lg/vPOfff078Q72y9Vjw0ILzOldKtjWZRAAAC2VJREFUL6taEkoHS0LGZE17j+7lme+eYfyK8VQoWoGRzUZyW8XbAh1WQBw7dexs8vBNLuesO7yT3Yd3c1r//f5VwZCClCxQkpIFSnJZwcsomd8tVy5emVZVW51XTJaEMoglIWOytrlb5tJlehc27t/IfTXu453b3/n/9u49RsrqjOP497fcRFZr4nJZ8bLeEY0oCtHUUqxKaTVgqhZsrUVoq3irGm3cxtjqHxZLaG1RixZEqbRKtalbvICp1KK1AvUGgiCiRRua3SrFooJdePrHOQvDdGZ2ZnfmfffyfJIJ78z7vuc8e8g7z5wz75zDgH4D0g6r3UrttWSrUhUD+w3cnVyqa3cnmYzXBlYPpLp3ddnj9yRUJp6EnOv4tjVv47altzHtuWlU965m+lnTmXziZMIE+x1Le3st1b2r90wo/QblTC41e9fQo6pHCn9h4EmoTDwJOdd5rGlaw6ULL2XpxqWMOmQU95xzD0NqhlS83nL0Wgb0G7A7uVTX7jk8VuFeSyV4EioTT0LOdS47bSdzX57LDU/fwNZPt1J/Wj31n6tnr557lVzWx//9uGBiKaXXUmhILO1eSyV4EioTT0LOdU6NHzVy3aLrmL9yPkftfxSzzp7F6YeenrfXkp1Yiu215Esug6oHdZpeSyV4EioTT0LOdW6L31rM1MensmHzBmqra2n8qDFvr2WPhJJjSGxQ9SD6792/y/VaKqFQEuqZdDDOOZeWMYePYdXUVcx4YcauRJQruXTnXkvSKpqEJI0Ffgb0AGab2bSs/X2AecBJwPvABDN7J+6rB6YAO4CrzWxRoTIlzQFOJiz1vQ6YZGZb21KHc67r6turLzeNuintMFxUsWl7JPUA7gK+BAwFLpQ0NOuwKcBmMzsC+Clwezx3KDAROBYYC9wtqUcrZV5rZsPM7HhgI3BlW+ooczM455wroJJzx40E1pvZBjP7FHgIGJ91zHjggbj9CHCGws3844GHzGy7mb0NrI/l5S3TzD4EiOf3BayNdTjnnEtIJZPQYODdjOfvxddyHmNmzcAWYP8C5xYsU9Jc4J/AEGBmG+vYg6TvSFohaUVTU1Nrf7NzzrkSdKlZtM3sEuAAYA0woUxl3mtmJ5vZyf379y9Hkc4556JKJqF/AAdlPD8wvpbzGEk9gc8Qbh7Id26rZZrZDsIw3XltrMM551xCKpmElgNHSjpUUm/CTQANWcc0AN+M2+cDz1j44VIDMFFSH0mHAkcCy/KVqeAI2PWd0DjgjTbW4ZxzLiEVu0XbzJolXQksItxOfZ+ZvS7pVmCFmTUAc4BfSVoPfEBIKsTjFgCrgWbgitjDIU+ZVcADkvYl3KL9KjA1hlJyHc4555LhMyaUwGdMcM650vm0PWUiqQn4ezuKqAH+VaZwysnjKo3HVRqPqzRdMa5DzCznnV2ehBIkaUW+TwNp8rhK43GVxuMqTXeLq0vdou2cc65z8STknHMuNZ6EknVv2gHk4XGVxuMqjcdVmm4Vl38n5JxzLjXeE3LOOZcaT0LOOedS40mozCSNlbRW0npJN+bY30fSw3H/i5LqOkhckyQ1SXolPr6VUFz3SWqUtCrPfkn6eYz7NUnDO0hcoyVtyWivmxOK6yBJSyStlvS6pO/mOCbxNisyrsTbTNJekpZJejXGdUuOYxK/JouMK5VrMtbdQ9LLkhbm2Ffe9jIzf5TpQZhK6C3gMKA3YfqgoVnHXA7MitsTgYc7SFyTgDtTaLNRwHBgVZ79XwaeJEzHdArwYgeJazSwMIX2qgWGx+19CKsIZ/9fJt5mRcaVeJvFNqiO272AF4FTso5J45osJq5UrslY93XAr3P9f5W7vbwnVF7tWcgv7bhSYWZ/Jszpl894YJ4FfwX2k1TbAeJKhZltMrOX4vZ/CMuWZK+DlXibFRlX4mIbbI1Pe8VH9t1YiV+TRcaVCkkHAmcDs/McUtb28iRUXu1ZyC/tuADOi8M3j0g6KMf+NBQbexpOjcMpT0o6NunK4zDIiYRP0ZlSbbMCcUEKbRaHll4BGoGnzSxveyV4TRYTF6RzTd4BfA/YmWd/WdvLk5Br8QegzsyOB55m9ycdl9tLhPmwhhFW8f19kpVLqgYeBa6xuLR9R9BKXKm0mZntMLMTCGuGjZR0XBL1tqaIuBK/JiWdAzSa2d8qXVcLT0Ll1Z6F/FKNy8zeN7Pt8els4KQKx1SsDrn4oJl92DKcYmZPAL0k1SRRt6RehDf6+Wb2uxyHpNJmrcWVZpvFOv8NLAHGZu1K45psNa6UrsnPAuMkvUMYtv+CpAezjilre3kSKq/2LOSXalxZ3xmMI4zpdwQNwMXxjq9TgC1mtintoCQNahkHlzSScC1V/I0r1jkHWGNmP8lzWOJtVkxcabSZpP6S9ovbfYGz2L3gZYvEr8li4krjmjSzejM70MzqCO8Tz5jZRVmHlbW9KraoXXdk7VjIrwPEdbWkcYQF/j4g3JlTcZJ+Q7hrqkbSe8APCF/SYmazgCcId3utBz4GLukgcZ0PTJXUDHwCTEzgwwSET6rfAFbG7xMAvg8cnBFbGm1WTFxptFktYcHLHoSkt8DMFqZ9TRYZVyrXZC6VbC+ftsc551xqfDjOOedcajwJOeecS40nIeecc6nxJOSccy41noScc86lxpOQczlIMkkzMp5fL+mHZSr7fknnl6OsVuq5QNIaSUsqXVdWvZMk3Zlkna7z8iTkXG7bga8k+Yv+YsRfqBdrCvBtMzu9UvE4116ehJzLrRm4F7g2e0d2T0bS1vjvaEnPSnpM0gZJ0yR9XWHdmJWSDs8o5kxJKySti/N1tUxoOV3S8jhp5aUZ5S6V1ACszhHPhbH8VZJuj6/dDJwGzJE0Pcc5N2TUc0t8rU7SG5Lmxx7UI5L2jvvOUFhfZqXCWkt94usjJP1FYVLSZZL2iVUcIOkpSW9K+nHG33d/jHOlpP9rW9f9+IwJzuV3F/Bay5tokYYBxxB+Sb4BmG1mIxUWebsKuCYeV0dYYuNwYImkI4CLCVPsjIhv8s9LWhyPHw4cZ2ZvZ1Ym6QDgdsK8YpuBxZLONbNbJX0BuN7MVmSdMwY4MtYvoEHSKGAjcDQwxcyel3QfcHkcWrsfOMPM1kmaR5j54G7gYWCCmS2XtC9hJgSAEwgzaW8H1kqaCQwABpvZcTGO/UpoV9dFeU/IuTziLNDzgKtLOG15XFtnO2EhwZYkspKQeFosMLOdZvYmIVkNAcYQ5nx7hbAMwv6EZAGwLDsBRSOAP5lZU5xWfz5hQb5CxsTHy4SZrYdk1POumT0ftx8k9KaOBt42s3Xx9QdiHUcDm8xsOeyaoLQ5HvNHM9tiZtsIvbdD4t95mKSZksYCHWb2b5ce7wk5V9gdhDfquRmvNRM/wEmqIqxW22J7xvbOjOc72fN6y54vywi9kqvMbFHmDkmjgY/aFn5OAn5kZvdk1VOXJ662yGyHHUBPM9ssaRjwReAy4KvA5DaW77oI7wk5V4CZfQAsIHzJ3+Iddk+rP444sWmJLpBUFb8nOgxYS5hgdqrCkghIOkpSv1bKWQZ8XlJNnAzzQuDZVs5ZBExWWPsHSYMlDYj7DpZ0atz+GvBcjK0uDhlCmKj02fh6raQRsZx9Ct04EW/yqDKzR4GbCEOMrpvznpBzrZsBXJnx/JfAY5JeBZ6ibb2UjYQEsi9wmZltkzSbMGT3kiQBTcC5hQoxs02SbiSsRyPgcTN7rJVzFks6BnghVMNW4CJCj2UtcEX8Pmg18IsY2yXAb2OSWQ7MMrNPJU0AZiosR/AJcGaBqgcDc2PvEaC+UJyue/BZtJ1zwK7huIUtNw44lwQfjnPOOZca7wk555xLjfeEnHPOpcaTkHPOudR4EnLOOZcaT0LOOedS40nIOedcav4HCSBqFl9hZLMAAAAASUVORK5CYII=\n", 460 | "text/plain": [ 461 | "
" 462 | ] 463 | }, 464 | "metadata": { 465 | "needs_background": "light", 466 | "tags": [] 467 | }, 468 | "output_type": "display_data" 469 | } 470 | ], 471 | "source": [ 472 | "plot_result(total_train_loss, total_valid_loss, 'loss')" 473 | ] 474 | }, 475 | { 476 | "cell_type": "code", 477 | "execution_count": 28, 478 | "metadata": { 479 | "colab": { 480 | "base_uri": "https://localhost:8080/", 481 | "height": 295 482 | }, 483 | "colab_type": "code", 484 | "executionInfo": { 485 | "elapsed": 772, 486 | "status": "ok", 487 | "timestamp": 1600705612868, 488 | "user": { 489 | "displayName": "ChingI Lee", 490 | "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgDdMJrUbQatXDhq6lGSnmXrmVgJ5j16hylgay6-zE=s64", 491 | "userId": "03381739445029229035" 492 | }, 493 | "user_tz": -480 494 | }, 495 | "id": "-QvUpUsTfQL_", 496 | "outputId": "6236a341-8600-496e-a6b7-8d2a599ee6af" 497 | }, 498 | "outputs": [ 499 | { 500 | "data": { 501 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYgAAAEWCAYAAAB8LwAVAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAgAElEQVR4nOzdd3zNZ/vA8c8lRuy9R6OlSlWMFLWVqsZetVdtqrTVVvt06NN6ft1DqU1tqVkUtUfR2NQqShAzNRMRWffvj/tIg4QgJyfJud6vV17O+c7rJMe5zr3FGINSSil1pzSuDkAppVTypAlCKaVUnDRBKKWUipMmCKWUUnHSBKGUUipOmiCUUkrFSROEShQiskxEuib2scmZiHiJiBGRtI7n8b6uO499iHu9JyITHiVepR6U6DgI9yUiIbGeZgJuAlGO532MMTOSPqpHIyK5gClALeA68J0x5ot7HH8I+MIYM+mO7YOAzsYYn3uc6wUcB9IZYyLvE9eDHFsHmG6MKXKv45RyNi1BuDFjTJZbP8BJoEmsbTHJ4WG/9brIW4AnUBB4Gth0n+OnAF3i2N7ZsU85UQp7b7kdTRDqLiJSR0QCReQdETkHTBaRnCKyRESCROSy43GRWOesE5GejsfdROR3EfnKcexxEXnpIY8tLiIbRCRYRFaJyCgRmX6P8COAC8aYUGPMZWPM/RLENKCGiDwW655lgHLALBFpJCK7ROSaiJwSkWH3+L3Ffl0ejtf0j4gcAxrdcWx3ETnoeF3HRKSPY3tmYBlQSERCHD+FRGRY7NctIk1FZL+IXHHct3SsfQEiMkRE9orIVRHxExHPeGJ+QkTWiMhFR6wzRCRHrP1FRWS+4+9+UURGxtrXK9ZrOCAiFR3bjYiUiHXcTyLyqePxw7y3conIZBE549i/0LF9n4g0iXVcOsdrqBDf30g9GE0QKj4FgFzAY0Bv7HtlsuN5MeAGMDLes6EK8BeQB/gCmCgi8hDHzgS2ArmBYdhv9veyDWgvIj3ucxwAxphAYO0d1+0MLDXG/IOtpuoC5MB+yPcTkeYJuHQvoDFQAfABWt+x/4JjfzagO/CtiFQ0xlwHXgLOxCrNnYl9oog8CcwCBgN5gaXAYhFJH+uwl4GGQHFssusWT5wC/B9QCCgNFMX+nhERD2AJcALwAgoDsx372jiO6+J4DU2Biwn4vcCDv7emYatAnwbyAd86tk8FOsU6zhc4a4zZlcA41P0YY/RHfwACgPqOx3WAcMDzHseXBy7Her4O6Ol43A04GmtfJsAABR7kWOyHRSSQKdb+6dj6+bhiKgGcxbY/HAFecWzP4Hg92eM5rxPwl+NxGmx1W4t4jv0O+Nbx2MsRa9o4XtcaoG+s8xrEPjaO6y4EBsX6/QfesX/YrdcNfAD8HGtfGuA0UCfW37JTrP1fAGMS+D5oDuxyPH4OCIorZuC3W/HGsc8AJWI9/wn49GHeW9iqwmggZxzHFQKCgWyO53OBt139fyk1/Wj9n4pPkDEm7NYTEcmE/ebWEMjp2JxVRDyMMVFxnH/u1gNjTKijQJAlnnvFd2we4JIxJjTWsaew33Lj0gNYZIzZICINgI2Oa/0N7DHGXI3nvPnAjyJSFZugMgG/Ol53FeAzoCyQHpts5sRzndgKOWK95UTsnY5qtI+AJ7Ef8JmAPxNw3VvXjrmeMSZaRE5hv+Hfci7W41DHOXcRkfzA90BNIKsjlsuO3UWBEybuRvWi2N/rw0jwe8txn0vGmMt3XsQYc0ZENgGtRGQBtuQ16CFjUnHQKiYVnzu7t70JlAKqGGOyYb+lg62icJazQC7HB8gt8SUHgLRAOgBjzHHsB87nwATHv3FyJKC52OqSzsBsY0y4Y/dMYBFQ1BiTHRhDwl7z2TtiLXbrgYhkAOYBXwH5jTE5sNVEt657v66FZ7DVMbeuJ457nU5AXHf6n+N+zzj+rp1ixXEKKCZxNySfAp6I55qh2IR3S4E79j/Ie+sU9j2Qg7hNccTcBthijHmY34GKhyYIlVBZsXXDV8R2Jf3I2Tc0xpwAtgPDRCS9iDwHNLnHKfOBtiLS3PHt8xqwB/tBFnqP88B+0LQFWnF776Ws2G+wYSJSGeiQwPB/Bl4TkSIikhMYGmvfrZJIEBDpKE00iLX/PJBbRLLf49qNRKSeiKTDfsDeBDYnMLbYsgIhwFURKYztBXbLVmyi+0xEMouIp4hUd+ybAAwRkUpilZB/G/p3Ax0cDfUNgdoJiCHO95Yx5iy20f5HR2N2OhGpFevchUBFbMlh6kO8fnUPmiBUQn0HZAT+Af4AlifRfTti68IvAp8CftgPw7sYY7ZgP8A/Aq4CG7DtAq2xPZLu1btlg+OcQGPMtljb+wP/FZFg4EPsh3NCjMfW0+8BdmKT1604g4HXHNe67Ih5Uaz9h7CN0MccvZRuqx4yxvyF/db8A/bv0QTbRTmcB/cx9gP2KrZaLXacUY5rl8C2ywRikyjGmDnAcGwJKxj7QZ3Lceogx3lXsH+/hfeJ4X7vrc7Y3mmHsI37g2PFeANbGiseO3aVOHSgnEpRRMQPOGSMcXoJRqUMIvIh8KQxptN9D1YPREsQKlkTkWcdffXTOKormnH/b6TKTTiqpHoA41wdS2qkCUIldwWw1UQhwAign9F+7go7UA/biL3MGLPB1fGkRlrFpJRSKk5aglBKKRWnVDVQLk+ePMbLy8vVYSilVIqxY8eOf4wxeePal6oShJeXF9u3b3d1GEoplWKIyIn49jm1iklEBjlmXNwvIoMd2/xEZLfjJ0BEdsdzbg4RmSsihxwzRj7nzFiVUkrdzmklCBEpi53RsjJ2cq7lIrLEGNM21jFfYwfoxOV7YLkxprVjlspM8RynlFLKCZxZgigN+Bs7L38ksB5oeWunY/6Yl7EjRm/jmGKgFjARwBgTboy54sRYlVJK3cGZbRD7gOEikhs7z4ovdl6dW2oC540xR+I4tzh2nprJIuIN7MBOLXz9zgNFpDd2TnmKFSt2524iIiIIDAwkLCzsrn3KfXl6elKkSBHSpUvn6lCUSracliCMMQdF5HNgBXbRld38u94xQHviKD3EiqsiMNAY4y8i32MnO/sgjvuMwzGK0sfH565BHYGBgWTNmhUvLy/iX69GuRNjDBcvXiQwMJDixYu7Ohylki2nNlIbYyYaYyoZY2phJyU7DDHr0LbETrwWl0DspGn+judzsQnjgYWFhZE7d25NDiqGiJA7d24tVSp1H87uxZTP8W8xbEKY6dhVHzvhWmBc5xljzgGnRKSUY1M94MAjxPGwp6pUSt8TSt2fs0dSzxORA8BiYECshuZ23FG9JHZh9qWxNg0EZojIXuwShP9zcqxKKZWiREXB4sXwebzLYT0apw6UM8bUjGd7tzi2ncE2ZN96vhu72LtSyo3sObeHXed2US5/OcrkLYNnWk9Xh5TsBAXBxIkwZgycOAGPPQaDB0OGDIl7n1Q1kjq5uXjxIvXq1QPg3LlzeHh4kDevHdG+detW0qdPH++527dvZ+rUqYwYMeKe96hWrRqbNz/MQmJKJT/T9kyjx6IeRERHAOAhHpTOW5ryBcpTPn95yhcoj3cBb/JkyuPiSJOeMfDHHzBqFMyZA+HhULcufP01NG0KzuiQpwnCiXLnzs3u3Xag+LBhw8iSJQtDhgyJ2R8ZGUnatHH/CXx8fPDxuX8BKiUnh6ioKDw8PFwdhkoGjDF8tO4jPtnwCXW86vDdi99x5NIRdp/bze5zu1kXsI7pe6fHHF84a2GbNBw/3vm9eSLXE6SR1Df/6PXrMHMm/Pgj7N4N2bJBnz7Qrx+ULu3ce7tVghg82P6CE1P58vDddwk/vlu3bnh6erJr1y6qV69Ou3btGDRoEGFhYWTMmJHJkydTqlQp1q1bx1dffcWSJUsYNmwYJ0+e5NixY5w8eZLBgwfz2muvAZAlSxZCQkJYt24dw4YNI0+ePOzbt49KlSoxffp0RISlS5fyxhtvkDlzZqpXr86xY8dYsmRJnPFt3bo1zniioqJ45513WL58OWnSpKFXr14MHDiQbdu2MWjQIK5fv06GDBlYvXo18+bNY/v27YwcORKAxo0bM2TIEOrUqUOWLFno06cPq1atYtSoUaxZs4bFixdz48YNqlWrxtixYxERjh49St++fQkKCsLDw4M5c+bw8ccf07JlS5o3bw5Ax44defnll2nWrNmj/RGVS4VFhtH9l+7M3jebV8q/wujGo0nvkR7vAt60LtM65rh/Qv9hz7k9Nmmct4lj+dHlRBnbez5L+iyUy1/utpJG2XxlyZQuZU7C8NdfNilMmQJXr8Izz9gqpY4dIUuWpInBrRJEchEYGMjmzZvx8PDg2rVrbNy4kbRp07Jq1Sree+895s2bd9c5hw4dYu3atQQHB1OqVCn69et31yCvXbt2sX//fgoVKkT16tXZtGkTPj4+9OnThw0bNlC8eHHat29/z9ieeuqpOOMZN24cAQEB7N69m7Rp03Lp0iXCw8Np27Ytfn5+PPvss1y7do2MGTPe8/rXr1+nSpUqfP311wCUKVOGDz/8EIDOnTuzZMkSmjRpQseOHRk6dCgtWrQgLCyM6OhoevTowbfffkvz5s25evUqmzdvZsqUKQ/yq1fJzIXrF2g+uzlbArfwWb3PeLv62/H2MMuTKQ/1Hq9HvcfrxWwLiwzjQNCBmJLG7nO7mf7ndH7c/iMAaSQNpXKXuqu0kT9L/iR5fQ8qMhIWLbKJYfVqW23UujUMGADVqkFSd75zqwTxIN/0nalNmzYxVStXr16la9euHDlyBBEhIiIiznMaNWpEhgwZyJAhA/ny5eP8+fMUKVLktmMqV64cs618+fIEBASQJUsWHn/88ZgBYe3bt2fcuPhXZ4wvnlWrVtG3b9+YKrFcuXLx559/UrBgQZ599lkAsmXLdt/X7uHhQatWrWKer127li+++ILQ0FAuXbrE008/TZ06dTh9+jQtWrQA7KhngNq1a9O/f3+CgoKYN28erVq1ireKTiV/+y/sp/GsxpwLOcfcNnNpVabV/U+6g2daTyoWrEjFgv8OkzLGEHAl4N+kcX43m05tYta+fztOFshS4K52jZK5SuKRxjVVnmfPwoQJMHYsnD4NRYvC8OHQowfkd2Eu0/9dLpA5c+aYxx988AF169ZlwYIFBAQEUKdOnTjPyRCre4KHhweRkZEPdcz9JDSee0mbNi3R0dExz2MPSPP09IxJjmFhYfTv35/t27dTtGhRhg0bdt/Ba126dGH69OnMnj2byZMnP3BsKnlY8fcK2sxpQ8a0GVnfbT2VC1dOtGuLCMVzFqd4zuK0KN0iZvulG5fYe37vbaWN1cdWxzSIZ0yb0VZRxSppPJP/GbKkd059jjGwcaNtdJ4/35YeGjSwpQdfX0gO332SQQju7erVqxQuXBiAn376KdGvX6pUKY4dO0ZAQABeXl74+cU3eP3e8bzwwguMHTuWunXrxlQxlSpVirNnz7Jt2zaeffZZgoODyZgxI15eXvz4449ER0dz+vRptm7dGue9biWDPHnyEBISwty5c2ndujVZs2alSJEiLFy4kObNm3Pz5k2ioqLIlCkT3bp1o3LlyhQoUIAyZcokzi9JJakx28fw6tJXKZO3DEs6LKFY9rvnUHOGXBlzUcerDnW86sRsC48K52DQwdtKG377/Ri7YywAglAyd8mY0oZ3AW/KFyhPwSwFH3qwZXAwTJtmE8H+/ZAjB7z2GvTtCyVLJsYrTTyaIFzs7bffpmvXrnz66ac0atQo0a+fMWNGfvzxRxo2bEjmzJljqoMeNJ6ePXty+PBhypUrR7p06ejVqxevvvoqfn5+DBw4kBs3bpAxY0ZWrVpF9erVKV68OGXKlKF06dJUrBj3LCk5cuSgV69elC1blgIFCtwW27Rp0+jTpw8ffvgh6dKlY86cOTz++OPkz5+f0qVLxzRUq5QjKjqKt1a+xbd/fMtLJV5iduvZZMtw/2pJZ7rVGO5dwJuudAVsFdWpa6duK2lsO72Nn/f/HHNe3kx5bytplC9QnlJ5SpE2Tfwfqfv2wejRMHUqhIRAxYp2LEO7dpApmbajizF3zW+XYvn4+Jg7V5Q7ePAgpZ3dFyyZCwkJIUuWLBhjGDBgACVLluT11193dVgPJTQ0lGeeeYadO3eSPXv2R7qWvjeSTkh4CB3nd2TRX4sYWHkg37z4zT0/TJOjq2FXb6+iOr+bfRf2ER4VDkAGjww8k/+Z20oaT+Usx5pl2Rg1CjZssAPZ2ra1jc7PPpv0jc5xEZEdxpg4+9SnrL+Qeijjx49nypQphIeHU6FCBfr06ePqkB7KqlWr6NGjB6+//vojJweVdAKvBdJkVhP2nt/LDy/9wKuVX3V1SA8lu2d2aj5Wk5qP/TtBRERUBH9d/Ou20saCQwuYsGvCvydeeoJMj5fnxRfL0/kFb2o9WZ4i2YqkiPnAtAThpiZPnsz3339/27bq1aszatQoF0WU9PS94Xw7z+6kyawmXLt5Db/WfviW9L3/SSmUMbBmDYz60fDLmjNE59vNk7V2k/eZ3ZxnN0cvH405NlfGXHe1a5TOU5p0Hkm/PomWINRdunfvTvfu3V0dhkrFfjn0Cx3mdyB3xtxsemUT5fKXc3VITnHlih3MNnq0HdyWO7cwpHdh+vYtTPHi/7bjBd8M5s8Lf95W2vhx+4+ERdrOGuk90vN03qdva9fwLuBNDs8crnppmiCUUonLGMM3W77hrZVv4VPIh1/a/ULBrAVdHVai273b9kSaMQNCQ6FqVdsA3aYNeMYxv2DWDFmpVrQa1YpWi9kWGR3JkYtHbmvXWHJ4CZN3/9uF2yuH121Jo3yB8jyW/bEkqaLSBKGUSjQRUREMXDaQsTvG0qp0K6a2mJpip7qIy82bMHeuTQybN0PGjNChA/Tvb3slPai0adJSOm9pSuctTftn7CwHxhjOhZxjz/k9t5U2fjn0CwbbJJA9Q/a7kkb5AuUTPWloglBKJYorYVd4ec7LrDy2kqHVhzK83vBUM3neiRN2HqSJE+1U2yVLwrffQteukDNn4t5LRCiYtSAFsxakYYmGMduvh19n34V9t5U2JuyaQGhEKLkz5iboraDEDQRNEEqpRHD88nEazWzEkUtHmNR0Et0rpPz2rehoWLHClhaWLLFdUps2taWFevUgTRLnvszpM1OlSBWqFKkSsy0qOoqjl45yJviMU6qcUkd6T0WyOKZpPHPmDK1bt47zmDp16nBnb607fffdd4SGhsY89/X15cqVK/c4Q6mHs/nUZqpMqMK5kHOs6LQixSeHS5fsGgtPPgkvvQT+/vDeexAQAAsWwAsvJH1yiI9HGg9K5SlF3eJ1nXL9ZPIy1Z0KFSrE3LlzH/r8OxPE0qVLyZHDdb0hHtbDzCelks6sP2fx/JTnyZYhG1t6bHHaB1VS2L4duneHwoVhyBAoWBBmzYJTp+DTT+0Eeu7GraqYBi8fzO5zibsgRPkC5fmuYfzTxA4dOpSiRYsyYMAAwC4clDZtWtauXcvly5eJiIjg008/vWtNg4CAABo3bsy+ffu4ceMG3bt3Z8+ePTz11FPcuHEj5rh+/fqxbds2bty4QevWrfn4448ZMWIEZ86coW7duuTJk4e1a9fi5eXF9u3byZMnD9988w2TJk0C7BQagwcPJiAggJdeeokaNWqwefNmChcuzC+//BLv9N3jx49n3LhxhIeHU6JECaZNm0amTJk4f/48ffv25dixYwCMHj2aatWqMXXqVL766itEhHLlyjFt2jS6detG48aNY0pKsde2+OCDD8iZMyeHDh3i8OHDNG/enFOnThEWFsagQYPo3bs3AMuXL+e9994jKiqKPHnysHLlSkqVKsXmzZvJmzcv0dHRPPnkk2zZsiVmNT/16IwxfLLhEz5a9xE1i9Vkftv5KXKVtxs3wM/PViNt2waZM0O3bnYxnnKps1fuA3GrBOEKbdu2ZfDgwTEJ4ueff+a3337jtddeI1u2bPzzzz9UrVqVpk2bxluHOHr0aDJlysTBgwfZu3fvbXMbDR8+nFy5chEVFUW9evXYu3cvr732Gt988w1r164lT57b/9Pu2LGDyZMn4+/vjzGGKlWqULt2bXLmzMmRI0eYNWsW48eP5+WXX2bevHl06tQpzphatmxJr169AHj//feZOHEiAwcO5LXXXqN27dosWLCAqKgoQkJC2L9/P59++imbN28mT548XLp06b6/t507d7Jv376YaconTZpErly5uHHjBs8++yytWrUiOjqaXr16xax1cenSJdKkSUOnTp2YMWMGgwcPZtWqVXh7e2tySEQ3I2/Sc3FPpu+dTudynRnfZDwZ0ibyYshO9vffttF50iRbpVS6NIwcCZ072xXblOVWCeJe3/SdpUKFCly4cIEzZ84QFBREzpw5KVCgAK+//jobNmwgTZo0nD59mvPnz1OgQIE4r7Fhw4aYFeTKlStHuVhfbX7++WfGjRtHZGQkZ8+e5cCBA7ftv9Pvv/9OixYtYqYcb9myJRs3bqRp06YUL16c8uXLA1CpUiUCAgLivc6+fft4//33uXLlCiEhIbz44osArFmzhqlTpwJ2yvHs2bMzdepU2rRpE5OscuXKdd/fW+XKlWOSA8CIESNYsGABAKdOneLIkSMEBQVRq1atmONuXfeVV16hWbNmDB48mEmTJumAwET0T+g/tPBrwe8nf+eTup/wn5r/SRFTRgBERcHSpba0sHw5eHhAixZ2XqTatZPHvEjJjVslCFdp06YNc+fO5dy5c7Rt25YZM2YQFBTEjh07SJcuHV5eXvddByEux48f56uvvmLbtm3kzJmTbt26PdR1brlzPYnYVVl36tatGwsXLsTb25uffvqJdevWPfD9Yq8bER0dTXh4eMy+2GtmrFu3jlWrVrFlyxYyZcpEnTp17vk6ixYtSv78+VmzZg1bt25lxowZDxybutuhfw7ReGZjAq8FMrvVbNqWbevqkBIkKMh2Tx0zxnZXLVQIhg2DXr3sYxU/pzZSi8ggEdknIvtFZLBjm5+I7Hb8BIhIvI0CIuIhIrtEJO4FlFOItm3bMnv2bObOnUubNm24evUq+fLlI126dKxdu5YTJ07c8/xatWoxc+ZMwH5z37t3LwDXrl0jc+bMZM+enfPnz7Ns2bKYc7JmzUpwcPBd16pZsyYLFy4kNDSU69evs2DBAmrWrHnXcfcTHBxMwYIFiYiIuO0DuF69eowePRqAqKgorl69yvPPP8+cOXO4ePEiQEwVk5eXFzt27ABg0aJF8a6md/XqVXLmzEmmTJk4dOgQf/zxBwBVq1Zlw4YNHD9+/Lbrgm1b6dSp022r96mHt+b4Gp6b+BzXbl5jbde1yT45GANbtkCnTlCkCLz7Ljz+uB3kFhAAH32kySEhnJYgRKQs0AuoDHgDjUWkhDGmrTGmvDGmPDAPmH+PywwCDjorxqTy9NNPExwcTOHChSlYsCAdO3Zk+/btPPPMM0ydOpWnnnrqnuf369ePkJAQSpcuzYcffkilSpUA8Pb2pkKFCjz11FN06NCB6tWrx5zTu3dvGjZsSN26t/cqqVixYsyiO1WqVKFnz55UqFDhgV/TJ598QpUqVahevfpt8X///fesXbuWZ555hkqVKnHgwAGefvpp/vOf/1C7dm28vb154403AOjVqxfr16/H29ubLVu23FZqiK1hw4ZERkZSunRphg4dStWqVQHImzcv48aNo2XLlnh7e9O27b8fWk2bNiUkJESrlxLBxJ0TeXH6ixTKWgj/nv48V/Q5V4cUr+vXYfx4O6q5WjVYvBj69IEDB+xEeq1a2XWeVQIZY5zyA7QBJsZ6/gHwdqznApwCSsZzfhFgNfA8sCQh96xUqZK504EDB+7aplK/bdu2mRo1atzzGH1v3FtUdJR5e8XbhmGYBtMamCs3rrg6pHgdOmTMoEHGZM9uDBjzzDPGjBljTHCwqyNL/oDtJp7PVGe2QewDhotIbuAG4AvEHt1VEzhvjDkSz/nfAW8DWZ0Yo0qFPvvsM0aPHq1tD48gNCKUzgs6M//gfPpW6ssPvj8kuwV+IiNtCWHUKFi92pYMWre2jc7Vqmmjc2Jw2l/cGHNQRD4HVgDXgd1AVKxD2gOz4jpXRBoDF4wxO0Skzr3uIyK9gd4AxYolzdq27mTAgAFs2rTptm2DBg1K1lU3Q4cOZejQoa4OI8U6G3yWprObsuPMDr5p8A2Dqw5OVj2Vzp2z1Uhjx8Lp03YA2/Dh0KMH5M/v6uhSF6d+JTDGTAQmAojI/4BAx+O0QEugUjynVgeaiogv4AlkE5Hpxpi7OuUbY8YB48AuGBRPHMnqDZ6SpNYFhEwqWigrMe05t4cms5pw6cYlFrZbSNNSTV0dEmAbnTdutF1U582zpYcGDWzpoVEjSJu8CjephlN/rSKSzxhzQUSKYRNCVceu+sAhY0xgXOcZY94F3nVcow4wJK7kkBCenp5cvHiR3Llza5JQgE0OFy9exDOuSfvd2K+Hf6XdvHZkz5Cdjd03UqHgg3deSGzBwTB9uk0M+/ZBjhwwcKAd6VyypKujS/2cnXfnOdogIoABxphbs8W1447qJREpBEwwxiTqmoRFihQhMDCQoKDEnwpXpVyenp4UKVLE1WEkGz/4/8Dg3wbjnd+bxe0XUzhbYZfGs3+/TQpTp0JIiO2VNHEitGsHmVLP8hLJXqpfk1opFb/I6EheX/46I7eNpFmpZsxoOYPM6ePubpwUTpywJYTFiyFDBmjb1jY6P/usNjo7i65JrZS6y7Wb12g3tx3Lji5jyHND+Kz+Z3ikcc2gwqgo+OEHeP99297w6ad2/EKelDf/X6qiCUIpN3Ty6kkaz2zMgaADjG08lt6Verssll27oHdvO922r6+tWnrsMZeFo2LRBKGUm9l6eitNZzUlLDKMZR2X8cITL7gkjtBQOyfSN9/YkoKfH7Rpo1VJyYkmCKXcyNwDc+m8oDMFsxRkTdc1lMlbxiVxrFgBffvC8ePQsyd88UXir+2sHp2uKKeUGzDG8H8b/482c9pQoUAF/uj5h0uSw4ULdgK9F1+E9Olh3To76E2TQ/KkJQilUoxz5DQAACAASURBVLnwqHD6LunL5N2TaV+2PZOaTcIzbdKOATEGpkyBN9+0Yxs+/NDOsKpDUZI3TRBKpWKXblyi1c+tWBewjo9qf8RHtT9K8gGjR47Y6qQ1a6B6dRg3Dsq4pmZLPSBNEEqlUkcuHqHxrMYEXAlgeovpdCzXMUnvHxEBX34J//2vHdMwZoxdpCeNVmynGJoglEqFNpzYQAu/FgjC6i6rqVGsRpLe/48/bDLYt8/OsPr997pAT0qkuVypVGbqnqnUn1qfvJny4t/TP0mTw7VrdiR0tWpw5Qr88gvMmaPJIaXSBKFUKhFtonl/zft0XdiVmo/VZEuPLTyR64kku/8vv9i2hVGjbJI4cACaJo/JYNVD0iompVKBGxE36P5Ld/z2+9GjQg9GNxpNOo+kWVvzzBmbEObPh3Ll7HTcVaokya2Vk2mCUCqFOx9ynuZ+zfEP9OeL+l8wpNqQJOmpFB1tF+0ZOhTCw+Gzz+CNN3TN59REE4RSKdj+C/tpNLMRF65fYN7L82hRukXS3He/nT9p82aoV8/2UCpRIklurZKQtkEolUL9dvQ3qk2qRnhUOBu6b0iS5BAWBh98ABUqwF9/2cFvK1dqckitNEEolQKN3jaaRjMb4ZXDC/+e/vgUinM6/0S1bp1tY/j0U7twz8GD0KWLTq6XmmmCUCoFiYqO4vXlr9N/aX8almjI791/p2j2ok6956VLdkK9unXtug0rVtiV3vLmdeptVTKgbRBKpRAh4SF0mNeBxYcXM6jKIL5u8LVTF/gxxk7BPWgQXLwI77xj51DSJT/dhyYIpVKAwGuBNJnVhL3n9zLypZEMqDzAqfcLCID+/WHZMrvc54oV4O3t1FuqZEgThFLJ3I4zO2g6uynBN4P5tcOvNCzR0Gn3ioyEESNsQ7SInSJjwADwcM1KpMrFNEEolYwtPLSQjvM7kidTHjb32EzZfGWddq+dO+38STt3QuPGdkR0sWJOu51KAbSRWqlkyBjD15u/pqVfS8rmK4t/T3+nJYfr12HIEFuVdPo0/PwzLFqkyUFpCUKpZCciKoIBSwcwfud42pRpw5TmU8iYLqNT7rVsGfTrBydO2IFvn32mq7upfzm1BCEig0Rkn4jsF5HBjm1+IrLb8RMgIrvjOK+oiKwVkQOOcwc5M06lkosrYVfwnenL+J3jea/Ge8xuPdspyeH8eejQAXx9IWNG2LDBTpuhyUHF5rQShIiUBXoBlYFwYLmILDHGtI11zNfA1ThOjwTeNMbsFJGswA4RWWmMOeCseJVytWOXj9F4ZmOOXjrK5GaT6Va+W6LfwxiYPNlWKV2/DsOG2bmUMmRI9FupVMCZVUylAX9jTCiAiKwHWgJfOJ4L8DLw/J0nGmPOAmcdj4NF5CBQGNAEoVKlzac202x2M6Kio1jZeSW1vWon+j0OH4Y+feyI6Jo1bYmhdOlEv41KRZxZxbQPqCkiuUUkE+ALxB7yWRM4b4w5cq+LiIgXUAHwj2d/bxHZLiLbg4KCEiVwpZLSrD9n8fyU58npmZM/ev6R6MkhPNxOj1GuHOzaZdeEXrdOk4O6P6clCGPMQeBzYAWwHNgNRMU6pD0w617XEJEswDxgsDHmWjz3GWeM8THG+OTVsf8qBTHG8PG6j+kwvwNVilRhS48tPJn7yUS9x+bNULGiHdfQrJmdP0nXhVYJ5dS3iTFmojGmkjGmFnAZOAwgImmx1U1+8Z0rIumwyWGGMWa+M+NUKqmFRYbReUFnhq0fRlfvrqzsvJLcmXIn2vWvXrUjoWvUsMuALl5sp80oWDDRbqHcgFO7uYpIPmPMBREphk0IVR276gOHjDGB8ZwnwETgoDHmG2fGqFRSC7oeRAu/Fmw6tYnhzw/n3RrvJuoCPwsWwKuvwrlzdh6lTz6BLFkS7fLKjTh7HMQ8EckNRAADjDFXHNvbcUf1kogUAiYYY3yB6kBn4M9Y3WDfM8YsdXK8SjnVoX8O0WhmI84En8GvtR8vP/1yol07MNAu/blwoZ03aeFCO/hNqYfl1ARhjKkZz/ZucWw7g23IxhjzO6CzzKtUZfWx1bSe05r0HulZ13UdVYokzsLNUVF2Rbd337VzKX3xBQwerEt/qkenTVVKJYEJOyfQcEZDCmctjH9P/0RLDn/+adsZXn0VqlaFffvgrbc0OajEoVNtKOUkIeEhrD62mjkH5jDjzxm8+MSL+LX2I7tn9ke+9o0btuvqF19AjhwwfbodGa2ru6nEpAlCqUR0+OJhlh5ZytIjS1l/Yj3hUeFkTZ+VN597k8/qf0baNI/+X27NGjvg7ehR6NoVvvoK8uRJhOCVuoMmCKUeQVhkGBtObODXw7+y9OhSjl46CkDpPKUZWHkgviV9qVGsBuk90j/yvS5etFNk/PQTPPEErFoF9eo98mWVipcmCKUe0Kmrp1h6ZCm/HvmV1cdXExoRimdaT+p61WVwlcG8VPIlHs/5eKLdzxiYOdM2PF+5YhujP/jATrKnlDNpglDqPiKjI9lyagu/HvmVpUeW8ueFPwF4LPtjdPPuRqMnG1HHqw6Z0iX+Ys3Hj9vpuH/7DSpXhvHj7ZQZSiUFTRBKxeHC9QssP7qcX4/8yoq/V3Al7App06SlZrGafPnCl/iW9KV0ntKJOsAttshI+O47+PBDu9znDz/YRKFLf6qkpAlCKSDaRLPjzA7bwHx0KdtOb8NgKJClAC2faolvSV/qP14/UXog3c/27Xa+pN27oWlTGDkSiha9/3lKJTZNEMptXQm7woq/V7D0yFKWHV3GhesXEIQqRarwcZ2PafRkI8oXKE8aSZrhQiEhtsTw/feQPz/MmwctWmjXVeU6miCU2zDGsD9of0wD86aTm4gyUeT0zEnDEg3xLelLwxINyZMp6fuMLl1qq5BOnrT//t//QXbnF1aUuidNECpVux5+nTXH18RUHZ28ehKA8gXK8071d/At6UuVIlUSZXzCwzh3zvZO8vOz6zP8/jtUr+6SUJS6iyYIler8fenvmB5H6wLWcTPqJpnTZeaFJ17gg1of8FKJlyicrbBLY4yOhkmT7LQYoaHw3//C22/r0p8qedEEoVK8m5E32XhyY0zV0eGLhwEolbsU/Z/tT6OSjahRrAYZ0iaPT99Dh+xI6A0boFYtu8JbqVKujkqpu2mCUClS4LVAlh1ZxtKjS1l1bBUh4SFk8MhA3eJ1efXZV/Et6csTuZ5wdZi3CQ+Hzz+3cyhlygQTJkD37rq6m0q+NEGoFCEyOhL/QP+YqqM95/cAUCx7MTo904lGTzairlddMqfP7OJI47ZpE/TuDQcOQLt2doxD/vyujkqpe0tQghCR+dgV3pYZY6KdG5JSVtD1IH77+zd+PfIrvx39jcthl/EQD2oUq8Hn9T+nUclGlMlbxmmD1RJDaKhtWxg1CooVg19/BV9fV0elVMIktATxI9AdGCEic4DJxpi/nBeWckfRJppdZ3fF9DjyD/THYMifOT/NnmqGbwlfXnjiBXJ45nB1qAmycyd07GjbHAYNslVLuvSnSkkSlCCMMauAVSKSHWjveHwKGA9MN8ZEODFGlYpdDbvKymMrYwarnQs5hyBULlyZYXWG4VvSl4oFKybZYLXEEB1tp+B+/33ImxdWroT69V0dlVIPLsFtEI61pTth14reBcwAagBdgTrOCE6lPsYYDv5zMKbH0e8nfycyOpIcnjl48YkXaVSyES+WeJF8mfO5OtSHcuqUXaNh7Vpo1QrGjoXcuV0dlVIPJ6FtEAuAUsA0oIkx5qxjl5+IbHdWcCp1CI0IZe3xtTFVRwFXAgAol78cQ54bQqMnG1G1SFWXDVZLLHPm2IboiAiYONH2UErGzSNK3VdC/0eOMMasjWuHMcYnEeNRqcTxy8djehytDVhLWGQYmdNlpv7j9Xm3xru8VOIlimZPHTPQBQfDwIEwZYqdknvGDChRwtVRKfXoEpogyojILmPMFQARyQm0N8b86LzQVEoSHhXO7yd/j6k6OvTPIQBK5ipJn0p9aFSyEbUeq5VsBqslli1boFMnCAiwi/h88AGkS+fqqJRKHAlNEL2MMaNuPTHGXBaRXtjeTcpNnQk+EzNYbeXfKwkODya9R3rqeNWhb6W++Jb0pWTukq4O0ykiI2H4cPjkEzsV9/r1UKOGq6NSKnElNEF4iIgYYwyAiHgA911kV0QGAb0AAcYbY74TET9sewZADuCKMaZ8HOc2BL4HPIAJxpjPEhircqLI6EhG+I9g+t7p7Dq3C4Ai2YrQ4ZkO+Jb05fniz5Mlferuy3nsmC013Co9jBypM6+q1CmhCWI5tkF6rON5H8e2eIlIWWxyqAyEA8tFZIkxpm2sY74GrsZxrgcwCngBCAS2icgiY8yBBMarnODU1VN0nN+RjSc3Uq1oNT6r9xm+JX0pm69ssh6slliMgWnT4NVX7fQYM2dC+/aujkop50logngHmxT6OZ6vBCbc55zSgL8xJhRARNYDLYEvHM8FeBl4Po5zKwNHjTHHHMfOBpoBmiBcZP7B+fRc1JOI6AimNp9KZ+/Org4pSV2+DH37ws8/2wn2pk6Fxx5zdVRKOVdCB8pFA6MdPwm1DxjuGD9xA/AFYneJrQmcN8YciePcwsCpWM8DgSpx3UREegO9AYoVK/YA4amEuBFxgzd+e4MxO8bgU8iHWa1mUSKXe3XRWbcOOne2azf873926gxdG1q5g4SOgygJ/B9QBvC8td0Y83h85xhjDorI58AK4DqwG4iKdUh7YNZDxHznfcYB4wB8fHzMo15P/WvfhX20m9uO/UH7eavaW3z6/Kek97hv01OqER5ulwD94gvbbXXLFvDRTt3KjSS0imky8BHwLVAXOy/Tfec+MMZMxE7yh4j8D1sSQETSYqubKsVz6mkgdif5Io5tKgkYYxizfQxvrHiD7Bmy81un32jwRANXh5WkDh2y8yjt3Am9esG330Lm5DlRrFJOk9AJbjIaY1YDYow5YYwZBjS630kiks/xbzFsQpjp2FUfOGSMCYzn1G1ASREpLiLpgXbAogTGqh7BpRuXaPlzS/ov7U8drzrs6bvHrZKDMTBmDFSsCCdOwIIFdkEfTQ7KHSW0BHFTRNIAR0TkVey3+YT0ZZznaIOIAAbcGmiH/cC/rXpJRAphu7P6GmMiHff5DdvNdZIxZn8CY1UPaX3Aejot6MT5kPN83eBrBlcdnKImyXtUQUHQowcsXgwNGsDkyVCokKujUsp1xDG04d4HiTwLHMSOW/gEyAZ8aYz5w7nhPRgfHx+zfbtODfWgIqMj+WT9J3y68VOeyPkEs1rNolKh+Gr/Uqfly6FbN9tb6fPP4bXXdKU35R5EZEd8UybdtwThGJPQ1hgzBAjBtj+oVOLElRN0nN+RTac20dW7KyN9R6b6gW6x3bgBQ4fCiBFQtiysWAHlyrk6KqWSh/smCGNMlIjoJAKp0NwDc+m1uBdR0VFMbzGdjuU6ujqkJLV3L3ToAPv32wV9PvsMPD3vf55S7iKhbRC7RGQRMAfbZRUAY8x8p0SlnCo0IpTXl7/OuJ3jqFy4MjNbzuSJXE+4OqwkEx0N339vSw65ctnqpRdfdHVUSiU/CU0QnsBFbh/1bABNECnM3vN7aTe3HQf/Ocg71d/hk7qfkM7DfaYfPXPGtjWsXAlNm8KECXbVN6XU3RI6klrbHVI4Yww/bvuRN1e8Sc6MOVnZeSX1H3evdTAXLLBjGkJDbVfW3r11QR+l7iWhI6knY0sMtzHGvJLoEalEdzH0Iq8seoVFfy3Ct6QvPzX7ibyZ3edrc0gIvP66LS1UrGgX9HnqKVdHpVTyl9AqpiWxHnsCLYAziR+OSmzrAtbRaX4nLly/wLcvfsugKoPcYubVW7ZtsyOijx61bQ4ffwzp3We2EKUeSUKrmObFfi4is4DfnRKRShSR0ZF8vO5jhm8cTsncJfFv70+FghVcHVaSiYqy4xk++ggKFoQ1a6BOHVdHpVTK8rCrxJcE8iVmICrxBFwJoMO8DmwJ3MIr5V/h+5e+d6uxDSdO2NlXN26Etm1h9GjImdPVUSmV8iS0DSKY29sgzmHXiFDJzM/7f6b34t4YDLNazaJd2XauDilJzZoF/frZrqxTp9oV39yoRk2pRJXQKqaszg5EPZrr4dcZvHwwE3ZNoGqRqsxsOZPiOYu7Oqwkc/UqDBhgG6CrVYPp06G4+7x8pZwiQbPNiEgLEcke63kOEWnuvLDUg9hzbg8+432YuGsi79V4jw3dNrhVcvj9d/D2htmzbSP0+vWaHJRKDAmdjuwjY0zM2tGOWVk/ck5IKqGMMYzwH0HlCZW5GnaVVV1WMbzecLcZ+BYRAR98ALVr2xXefv/dLvCT9mFb1pRSt0nof6W4Eon+N3ShoOtBvLLoFZYcXkLjJxszudlk8mTK4+qwksyRI7Z9YetWOzJ6xAjIqhWhSiWqhH7IbxeRb4BRjucDgB3OCUndz5rja+g0vxMXb1xkRMMRvFr5VbcZ22AMTJpkJ9dLnx7mzIHWrV0dlVKpU0KrmAYC4YAfMBsIwyYJlYQioiJ4b/V71J9an+ye2fHv6c/AKgPdJjlcvGiTQc+eUKWKnY1Vk4NSzpPQXkzXgaFOjkXdw/HLx2k/rz3+p/3pWaEn3zX8jszp3WcdzFWroGtXu+rbl1/CG2/ogj5KOVtCezGtFJEcsZ7nFJHfnBeWim32vtmUH1ueQ/8cwq+1H+Objneb5HDzJgwZAi+8ANmzg7+/fa7JQSnnS2gbRJ5Y60ljjLksIjqS2slCwkN4bdlrTN49meeKPMfMVjPxyuHl6rCSzP79dh6lPXugf39bcsiUydVRKeU+EpogokWkmDHmJICIeBHH7K4q8ew6u4t289px5OIR3q/5Ph/V+Yi0adyj45gxMGoUvPWW7Zm0eDE0buzqqJRyPwn9xPkP8LuIrAcEqAn0dlpUbswYw/f+3/POqnfIkykPq7uspm7xuq4OK8mcPw+vvAJLl4Kvr+2xlD+/q6NSyj0ltJF6uYj4YJPCLmAhcMOZgbmjC9cv0P2X7iw9spSmpZoyselEtxrbsGSJTQ7BwTBypK1WcpMOWkolSwltpO4JrAbeBIYA04BhCThvkIjsE5H9IjI41vaBInLIsf2LeM593bF/n4jMEpFUvZz8qmOr8B7jzepjqxn50kgWtl3oNskhNNQmgyZNoFAh2LHDzqukyUEp10poX5BBwLPACWNMXaACcOVeJ4hIWaAXUBnwBhqLSAkRqQs0A7yNMU8DX8VxbmHgNcDHGFMW8ABS5bSkEVERDF01lAbTGpDTMydbe21lQOUBbjO2YdcuqFTJTsk9ZIjtpVSmjKujUkpBwtsgwowxYSKCiGQwxhwSkVL3Oac04G+MCQVwtF+0BHyAz4wxNwGMMRfuEVtGEYkAMpEKV7D7+9LfdJjfga2nt9K7Ym++bfgtmdK5Rzed6Gj4+mv4z38gb15YuRLqu9cS2UolewktQQQ6xkEsBFaKyC/Aifucsw+oKSK5RSQT4AsUBZ50bPcXkfUi8uydJxpjTmNLFieBs8BVY8yKuG4iIr1FZLuIbA8KCkrgy3G9mX/OpMLYChy+eJg5beYwtslYt0kOgYF2XMPbb0PTpnZEtCYHpZKfhDZSt3A8HCYia4HswPL7nHNQRD4HVgDXgd1AlOOeuYCq2Gqrn0XkcWNMTLdZEcmJrYYqjq3KmiMinYwx0+O4zzhgHICPj0+y73obfDOYgcsGMmXPFKoXrc6MljN4LMdjrg4rycyZA336QHg4TJwI3btrW4NSydUDj0c1xqw3xiwyxoQn4NiJxphKxphawGXgMBAIzDfWViAauLM1tj5w3BgTZIyJAOYD1R401uRmx5kdVBpXiWl7p/FhrQ9Z122d2ySH4GCbDF5+GUqWhN27bY8lTQ5KJV9OHXklIvmMMRdEpBi2/aEqNiHUBdaKyJNAeuCfO049CVR1VE3dAOoB250ZqzNFm2i+3fIt765+l/xZ8rOmyxpqe9V2dVhJ5o8/7IjogAC7fsMHH0A691iyQqkUzdlDc+eJSG4gAhhgjLkiIpOASSKyDztDbFdjjBGRQsAEY4yvMcZfROYCO4FI7NiLcU6O1SnOh5yn2y/dWH50Oc2fas6EJhPInSm3q8NKEpGR8L//wX//C0WL2pXeatRwdVRKqYSSWFX/KZ6Pj4/Zvj35FDRW/L2CLgu6cPXmVb5p8A19ffq6TffVY8egc2fYvNku7DNypJ1sTymVvIjIDmOMT1z73GNynyQWHhXO+2ve58vNX/J03qdZ1WUVZfOVdXVYScIYmDYNXn3Vzrg6cya0b+/qqJRSD0MTRCI7euko7ee1Z/uZ7fSt1JevX/zabbqvXr4M/fqBnx/UqgVTp8Jj7tEGr1SqpAkiEU3bM43+S/uTLk065r08j5alW7o6pCSzbh106QJnz9p2h7ffBg8PV0ellHoUmiASQfDNYPov7c/0vdOpWawm01tOp1j2Yq4OK0mEh8NHH8Hnn0OJErBlC/jEWZuplEppNEE8ou1nttNubjuOXznOsNrD+E+t/7jNug1//QUdOsDOndCrF3z7LWR2j4XulHIL7vFJ5gTRJpqvN3/Ne2veo2CWgqzvtp4axdyjD6cxMG4cvP66XeFtwQJo3tzVUSmlEpsmiIdwLuQcXRZ0YeWxlbQs3ZLxTcaTK2MuV4eVJEJCbFvDggXQoAFMnmyn6FZKpT6aIB7QsiPL6PZLN67dvMaYRmPoXam324xtOHnSrtmwfz989ZUtQaR54MlalFIphSaIBLoZeZP3Vr/HN398Q9l8ZVnTZQ1P53va1WElmT/+sNVIN27Y5UAbNHB1REopZ9MEkQCHLx6m/bz27Dy7k/4+/fmqwVdkTJfR1WElmdmzoVs3KFwY1qzRBX2UchdaQXAPxhim7J5CxbEVCbgSwIK2CxjVaJTbJAdjbBfW9u2hcmVd7U0pd6MliHhcu3mNfr/2Y+afM6n9WG2mt5xOkWxFXB1Wkrlxw07P7ednSw9jxkCGDK6OSimVlDRBxME/0J8O8ztw4soJPqn7Ce/WeBePNO4zLPjsWdvesG2bHQD31lu6boNS7kgTRCzRJpovN33J+2vfp1DWQqzvtp7qxaq7OqwktXu37al06RLMn6/jG5RyZ5ogHM4Gn6XLwi6sOraK1mVaM67xOHJmzOnqsJLUokV2ZHTOnPD771ChgqsjUkq5kjZSA0uPLKXcmHJsOrmJ8U3G83Prn90qORgDX35pSwtlysDWrZoclFJaguDSjUu0m9uO4jmLM7vVbErnLe3qkJJUeDj07WtHRL/8Mvz0E2R0j05aSqn7cPsEkStjLlZ2Xol3AW8803q6Opwk9c8/0KoVbNgAH35ou7TqyGil1C1unyAAqhSp4uoQktzBg9C4MZw+DTNm2LYHpZSKTROEG1qxwlYnZcgAa9fCc8+5OiKlVHKkFQpu5scfwdcXihWzjdGaHJRS8dEE4SYiI2HgQBgwAF56CTZt0vWilVL3pgnCDVy9atsbRo6EN9+EhQsha1ZXR6WUSu6cmiBEZJCI7BOR/SIyONb2gSJyyLH9i3jOzSEicx3HHRQRrQx5CMeO2Wqk1ath/Hi7joOH+8waopR6BE5rpBaRskAvoDIQDiwXkSVAUaAZ4G2MuSki+eK5xPfAcmNMaxFJD2RyVqyp1caN0KIFREfbhum6dV0dkVIqJXFmCaI04G+MCTXGRALrgZZAP+AzY8xNAGPMhTtPFJHsQC1gouOYcGPMFSfGmupMmQL16kHu3Haabk0OSqkH5cwEsQ+oKSK5RSQT4IstPTzp2O4vIutF5Nk4zi0OBAGTRWSXiEwQkcxx3UREeovIdhHZHhQU5KzXkmJER8PQoXaK7lq17EpwJUu6OiqlVErktARhjDkIfA6sAJYDu4EobLVWLqAq8Bbws9y9qHNaoCIw2hhTAbgODI3nPuOMMT7GGJ+8efM65bWkFNev25HRn38OffrAsmV24j2llHoYTm2kNsZMNMZUMsbUAi4Dh4FAYL6xtgLRQJ47Tg0EAo0x/o7nc7EJQ8UjMBBq1LAzsn73HYweDenSuToqpVRK5tSR1CKSzxhzQUSKYdsfqmITQl1grYg8CaQH/ol9njHmnIicEpFSxpi/gHrAAWfGmpJt2wbNmkFICCxZYsc5KKXUo3L2VBvzRCQ3EAEMMMZcEZFJwCQR2Yft3dTVGGNEpBAwwRjj6zh3IDDD0YPpGNDdybGmSHPmQJcuUKCA7alUtqyrI1JKpRZOTRDGmJpxbAsHOsWx/Qy2IfvW892AjzPjS8mMgeHD4YMPoHp1u/pbvvg6DCul1EPQyfpSoLAw6NEDZs6Ezp3tALgMGVwdlVIqtdGpNlKY8+fh+edtchg+3I530OSglHIGLUGkIH/+aedUCgqCuXNtl1allHIWLUGkEEuWQLVqdlbWjRs1OSilnE8TRDJnDHzzDTRtCk8+addwqFTJ1VEppdyBJohkLDzcjoh+80076d6GDVC4sKujUkq5C00QydSlS9Cwoe2h9N57drxD5jhno1JKKefQRupk6PBh2xh94gRMnWq7siqlVFLTBJHMrF4NrVvbeZTWrLGD4JRSyhW0iikZGTfOVisVLmwbozU5KKVcSRNEMhAVBa+/bhukX3gBNm8GLy9XR6WUcneaIFzs2jXbhfW772DQIDtdd7Zsro5KKaW0DcKlAgKgSRM4eBDGjLElCKWUSi40QbjI5s3QvDlERMDy5VC/vqsjUkqp22kVkwtMnw5160L27HbNaE0OSqnkSBNEEoqOhvfft+MaqlWzyaFUKVdHpZRScdMqpiQSGgpdu9pZWHv2hFGjIH16V0ellFLx0wSRBM6csT2Vdu6Er7+2XVpFXB2VUkrdmyYIJ9u50/ZUunYNfvnFPlZKqZRA2yCcaP58qFkT0qaFTZs0OSilUhZNaEF4IQAAC45JREFUEE5gDPzf/9lFfcqVs9NmlCvn6qiUUurBaBVTIrt5E3r3trOwtm8PkyaBp6ero1JKqQfn1BKEiAwSkX0isl9EBsfaPlBEDjm2f3GP8z1EZJeILHFmnIklKAjq1bPJ4b//hRkzNDkopVIup5UgRKQs0AuoDIQDyx0f9EWBZoC3MeamiOS7x2UGAQeBZD870f79dg2Hc+fAzw9eftnVESml1KNxZgmiNOBvjAk1xkQC64GWQD/gM2PMTQBjzIW4ThaRIkAjYIITY0wUy5fDc89BWBisX6/JQSmVOjgzQewDaopIbhH5//buPdiqsozj+PcnKqViToIT4oW8U06aCoNjEaKhgw46palpitpFRc0avDWNpv8UOTWMUJkBXtJSEyfRUNEEMlMuKoo3kNDUcgZUvJWiwNMf73t0u13nnH3w7LU47N9n5gxr7/Wu9T7n5az17Pdde71rM2AUqfewW35/rqQ5kga3s/0E4DxgbUeVSPqupAWSFqxYsaI74+9UBFx+ORx2GOy8c7oYPWRIqSGYmTVN0xJERDwFjAdmAncCC4E1pGGtTwNDgXOBm6QP3zYm6XBgeUQ81EA9V0bEfhGxX79+/br5t2jfe+/BGWekKbpHj4b77oPtty+tejOzpmvqReqImBIR+0bEMGAlsAR4EbglknmkHkLfuk0PAEZLeg64ARgh6bpmxtoVK1fCqFFpiu7zz4dp02CLLaqOysysezX1a66StomI5ZJ2IF1/GEpKCAcCsyTtBmwKvFy7XURcCFyY9zEcGBcRJzQz1kYtXZouRi9bBlddBWPGVB2RmVlzNPs+iGmStgbeA8ZGxGuSpgJTJT1O+nbTSRERkrYFJkfEqCbHtM5mz043v0lwzz0wbFjVEZmZNU9TE0REfLngvXeBj/QGIuI/pAvZ9e/PBmY3IbwumTIFTjsNdt0VbrstXZQ2M9uQeaqNTqxZA+PGpSm6R4yABx5wcjCz1uCpNjrw5ptw/PGpxzB2LEyYkCbeMzNrBT7dteP559Psq088AZMmpQRhZtZKnCAKPPggHHkkvP02zJgBI0dWHZGZWfl8DaLODTfA8OGw+eYpUTg5mFmrcoLIIuDii9MU3UOGwNy5MGhQ1VGZmVXHQ0ykoaSTT06zsI4Zk+6Q7t276qjMzKrV8gnitdfgkENg/nwYPx7OPTfdCGdm1upafoipTx/YZZf0/OjzznNyMDNr0/I9iF690pPfzMzsw1q+B2FmZsWcIMzMrJAThJmZFXKCMDOzQk4QZmZWyAnCzMwKOUGYmVkhJwgzMyukiKg6hm4jaQXwr3XcvC/wcjeG010cV9c4rq5xXF2zIca1Y0T0K1qxQSWIj0PSgojYr+o46jmurnFcXeO4uqbV4vIQk5mZFXKCMDOzQk4QH7iy6gDa4bi6xnF1jePqmpaKy9cgzMyskHsQZmZWyAnCzMwKtVyCkHSopMWSlkq6oGB9b0k35vVzJQ1cT+IaI2mFpIX559slxDRV0nJJj7ezXpIuzzE/JmmfZsfUYFzDJb1e01YXlRTX9pJmSXpS0hOSvl9QpvQ2azCu0ttM0ickzZP0aI7rkoIypR+PDcZV+vFYU3cvSY9Iur1gXfe2V0S0zA/QC/gnsBOwKfAo8Lm6MmcAV+TlY4Eb15O4xgCTSm6vYcA+wOPtrB8F3AEIGArMXU/iGg7cXsHfV39gn7zcB1hS8P9Yeps1GFfpbZbbYIu8vAkwFxhaV6aK47GRuEo/Hmvq/iHwh6L/r+5ur1brQQwBlkbEsoh4F7gBOKKuzBHANXn5ZuAgqelPqm4krtJFxN+AVzsocgRwbSQPAltJ6r8exFWJiHgpIh7Oy28CTwED6oqV3mYNxlW63AZv5Zeb5J/6b82Ufjw2GFclJG0HHAZMbqdIt7ZXqyWIAcALNa9f5KMHyvtlImI18Dqw9XoQF8DX87DEzZK2b3JMjWg07irsn4cI7pD0+bIrz137L5I+fdaqtM06iAsqaLM8XLIQWA7cHRHttleJx2MjcUE1x+ME4DxgbTvru7W9Wi1B9GS3AQMj4gvA3XzwKcE+6mHS/DJ7AROBP5dZuaQtgGnAORHxRpl1d6STuCpps4hYExF7A9sBQyTtWUa9nWkgrtKPR0mHA8sj4qFm19Wm1RLEv4HaTL9dfq+wjKSNgU8Br1QdV0S8EhGr8svJwL5NjqkRjbRn6SLijbYhgoiYAWwiqW8ZdUvahHQSvj4ibikoUkmbdRZXlW2W63wNmAUcWreqiuOx07gqOh4PAEZLeo40DD1C0nV1Zbq1vVotQcwHdpX0WUmbki7iTK8rMx04KS8fBdwb+YpPlXHVjVOPJo0jV206cGL+Zs5Q4PWIeKnqoCR9pm3cVdIQ0t95008quc4pwFMR8ct2ipXeZo3EVUWbSeonaau8/Engq8DTdcVKPx4biauK4zEiLoyI7SJiIOkccW9EnFBXrFvba+N13bAniojVks4E7iJ9c2hqRDwh6VJgQURMJx1Iv5e0lHQh9Nj1JK6zJY0GVue4xjQ7Lkl/JH27pa+kF4GLSRfsiIgrgBmkb+UsBf4HnNzsmBqM6yjgdEmrgbeBY0tI8pA+4X0LWJTHrwF+BOxQE1sVbdZIXFW0WX/gGkm9SAnppoi4verjscG4Sj8e29PM9vJUG2ZmVqjVhpjMzKxBThBmZlbICcLMzAo5QZiZWSEnCDMzK+QEYT2OpJD0i5rX4yT9pJv2fbWko7pjX53Uc7SkpyTNanZddfWOkTSpzDqt53KCsJ5oFfC1Mu/0bUS+c7VRpwLfiYgDmxWP2cflBGE90WrSM3h/UL+ivgcg6a3873BJcyTdKmmZpJ9JOl5p3v9Fknau2c3BkhZIWpLnv2mbvO0ySfPzBG3fq9nvfZKmA08WxHNc3v/jksbn9y4CvgRMkXRZwTbn1tRzSX5voKSnJV2fex43S9osrztI6fkAi5SeldE7vz9Y0j+UJuCbJ6lPrmJbSXdKekbSz2t+v6tznIskfaRtrfW01J3UtkH5FfBY2wmuQXsBg0h3mC4DJkfEEKUH6JwFnJPLDSRNwb4zMEvSLsCJpGkxBucT8P2SZuby+wB7RsSztZVJ2hYYT5qnZyUwU9KREXGppBHAuIhYULfNSGDXXL+A6ZKGAc8DuwOnRsT9kqYCZ+ThoquBgyJiiaRrSXdE/xq4ETgmIuZL2pJ0hzTA3qQZXVcBiyVNBLYBBkTEnjmOrbrQrraBcg/CeqQ8G+m1wNld2Gx+fjbCKtIDmtpO8ItISaHNTRGxNiKeISWSPYCRpDmUFpKmyt6adCIHmFefHLLBwOyIWJGnXr6e9LCjjozMP4+QZljdo6aeFyLi/rx8HakXsjvwbEQsye9fk+vYHXgpIubD+5Pxrc5l/hoRr0fEO6Rez47599xJ0kRJhwLrzSy0Vh33IKwnm0A6iV5V895q8gcfSRuRntDXZlXN8tqa12v58LFQP/9MkD7NnxURd9WukDQc+O+6hV9IwE8j4rd19QxsJ651UdsOa4CNI2KlpL2AQ4DTgG8Ap6zj/m0D4R6E9VgR8SpwE+mCb5vn+GDq5dHkSfy66GhJG+XrEjsBi0kTKZ6uNG02knaTtHkn+5kHfEVS3zzx23HAnE62uQs4RenZDUgaIGmbvG4HSfvn5W8Cf8+xDczDYJAm5ZuT3+8vaXDeT5+OLqLnC/4bRcQ04MekYTNrce5BWE/3C+DMmte/A26V9ChwJ+v26f550sl9S+C0iHhH0mTSMNTDkgSsAI7saCcR8ZKkC0jPExDwl4i4tZNtZkoaBDyQquEt4ATSJ/3FwNh8/eFJ4Dc5tpOBP+UEMJ/0TOJ3JR0DTFSasvpt4OAOqh4AXJV7XQAXdhSntQbP5mrWA+QhptvbLiKblcFDTGZmVsg9CDMzK+QehJmZFXKCMDOzQk4QZmZWyAnCzMwKOUGYmVmh/wOG7aUzwJLkYAAAAABJRU5ErkJggg==\n", 502 | "text/plain": [ 503 | "
" 504 | ] 505 | }, 506 | "metadata": { 507 | "needs_background": "light", 508 | "tags": [] 509 | }, 510 | "output_type": "display_data" 511 | } 512 | ], 513 | "source": [ 514 | "plot_result(total_train_accuracy, total_valid_accuracy, 'accuracy')" 515 | ] 516 | }, 517 | { 518 | "cell_type": "code", 519 | "execution_count": null, 520 | "metadata": { 521 | "colab": {}, 522 | "colab_type": "code", 523 | "id": "Xxogh-sMZTa8" 524 | }, 525 | "outputs": [], 526 | "source": [] 527 | }, 528 | { 529 | "cell_type": "code", 530 | "execution_count": null, 531 | "metadata": { 532 | "colab": {}, 533 | "colab_type": "code", 534 | "id": "zPiSk-YCK509" 535 | }, 536 | "outputs": [], 537 | "source": [] 538 | } 539 | ], 540 | "metadata": { 541 | "accelerator": "GPU", 542 | "colab": { 543 | "authorship_tag": "ABX9TyPjMakA5QDuYlkwJvt+M/C7", 544 | "collapsed_sections": [], 545 | "name": "LeNet.ipynb", 546 | "provenance": [] 547 | }, 548 | "kernelspec": { 549 | "display_name": "Python 3", 550 | "language": "python", 551 | "name": "python3" 552 | }, 553 | "language_info": { 554 | "codemirror_mode": { 555 | "name": "ipython", 556 | "version": 3 557 | }, 558 | "file_extension": ".py", 559 | "mimetype": "text/x-python", 560 | "name": "python", 561 | "nbconvert_exporter": "python", 562 | "pygments_lexer": "ipython3", 563 | "version": "3.7.4" 564 | } 565 | }, 566 | "nbformat": 4, 567 | "nbformat_minor": 1 568 | } 569 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Pytorch_note 2 | framework: pytorch 3 | 4 | **分散式訓練** 5 | 6 | * ddp_example.py 7 | 8 | 詳細介紹可以參考我的 Medium: 9 | 10 | [Pytorch 基本介紹與教學](https://medium.com/ching-i/pytorch-%E5%9F%BA%E6%9C%AC%E4%BB%8B%E7%B4%B9%E8%88%87%E6%95%99%E5%AD%B8-ac0e1ebfd7ec) 11 | 12 | [Anaconda 及 Deep learning framework 安裝教學](https://medium.com/ching-i/anaconda-%E5%8F%8A-deep-learning-framework-%E5%AE%89%E8%A3%9D%E6%95%99%E5%AD%B8-749451823aa2) 13 | 14 | [卷積神經網絡 CNN 經典模型 — LeNet、AlexNet、VGG、NiN with Pytorch code](https://medium.com/ching-i/%E5%8D%B7%E7%A9%8D%E7%A5%9E%E7%B6%93%E7%B6%B2%E7%B5%A1-cnn-%E7%B6%93%E5%85%B8%E6%A8%A1%E5%9E%8B-lenet-alexnet-vgg-nin-with-pytorch-code-84462d6cf60c) 15 | 16 | [卷積神經網絡 CNN 經典模型 — GoogleLeNet、ResNet、DenseNet with Pytorch code](https://medium.com/ching-i/%E5%8D%B7%E7%A9%8D%E7%A5%9E%E7%B6%93%E7%B6%B2%E7%B5%A1-cnn-%E7%B6%93%E5%85%B8%E6%A8%A1%E5%9E%8B-googlelenet-resnet-densenet-with-pytorch-code-1688015808d9) 17 | 18 | [Pytorch 分散式訓練 DistributedDataParallel — 概念篇](https://medium.com/ching-i/pytorch-%E5%88%86%E6%95%A3%E5%BC%8F%E8%A8%93%E7%B7%B4-distributeddataparallel-%E6%A6%82%E5%BF%B5%E7%AF%87-8378e0ead77) 19 | 20 | [Pytorch 分散式訓練 DistributedDataParallel — 實作篇](https://medium.com/ching-i/pytorch-%E5%88%86%E6%95%A3%E5%BC%8F%E8%A8%93%E7%B7%B4-distributeddataparallel-%E5%AF%A6%E4%BD%9C%E7%AF%87-35c762cb7e08) 21 | 22 | [Pytorch 修改、新增、刪除 pre-trained model layer](https://medium.com/ching-i/pytorch-%E4%BF%AE%E6%94%B9-%E6%96%B0%E5%A2%9E-%E5%88%AA%E9%99%A4-pre-trained-model-layer-d5556654813a) 23 | 24 | [Pytorch 凍結與解凍模型參數、layer 設置不同 learning rate](https://medium.com/ching-i/pytorch-%E5%87%8D%E7%B5%90%E8%88%87%E8%A7%A3%E5%87%8D%E6%A8%A1%E5%9E%8B%E5%8F%83%E6%95%B8-layer-%E8%A8%AD%E7%BD%AE%E4%B8%8D%E5%90%8C-learning-rate-8af99d2d4c14) 25 | 26 | [Pytorch 模型權重初始化、複製操作](https://medium.com/ching-i/pytorch-%E6%A8%A1%E5%9E%8B%E6%AC%8A%E9%87%8D%E5%88%9D%E5%A7%8B%E5%8C%96-%E8%A4%87%E8%A3%BD%E6%93%8D%E4%BD%9C-194a34fec3ff) -------------------------------------------------------------------------------- /ddp_example.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import torch 4 | import torch.nn as nn 5 | from torch.optim import lr_scheduler 6 | import torch.distributed as dist 7 | from torch.utils.data.distributed import DistributedSampler 8 | from torch.utils.data import DataLoader 9 | import torchvision 10 | import torchvision.transforms as transforms 11 | 12 | def train_epoch(train_loader, optimizer, criterion, lr_scheduler, model, world_size): 13 | model.train() 14 | 15 | train_running_loss = 0.0 16 | train_running_acc = 0.0 17 | for batch_idx, (data, target) in enumerate(train_loader): 18 | data = data.to(device, non_blocking=True) 19 | target = target.to(device, non_blocking=True) 20 | 21 | output = model(data) 22 | preds = torch.max(output, 1)[1] 23 | 24 | loss = criterion(output, target) 25 | 26 | optimizer.zero_grad() 27 | loss.backward() 28 | optimizer.step() 29 | 30 | train_running_loss += loss.item() 31 | train_running_acc += torch.eq(preds, target).sum().item() 32 | 33 | lr_scheduler.step() 34 | train_loss_value = train_running_loss/ (len(train_dataset) / world_size) 35 | train_acc_value = train_running_acc/ (len(train_dataset) / world_size) 36 | 37 | return train_loss_value, train_acc_value 38 | 39 | def valid_epoch(valid_loader, criterion, model, world_size): 40 | model.eval() 41 | 42 | valid_running_loss = 0.0 43 | valid_running_acc = 0.0 44 | with torch.no_grad(): 45 | for batch_idx, (data, target) in enumerate(valid_loader): 46 | data = data.to(device, non_blocking=True) 47 | target = target.to(device, non_blocking=True) 48 | 49 | outputs = model(data) 50 | preds = torch.max(outputs, 1)[1] 51 | 52 | loss = criterion(outputs, target) 53 | 54 | valid_running_loss += loss.item() 55 | valid_running_acc += torch.eq(preds, target).sum().item() 56 | 57 | valid_loss_value = valid_running_loss/ (len(valid_dataset) / world_size) 58 | valid_acc_value = valid_running_acc/ (len(valid_dataset) / world_size) 59 | 60 | return valid_loss_value, valid_acc_value 61 | 62 | if __name__ == '__main__': 63 | parser = argparse.ArgumentParser() 64 | parser.add_argument("--local_rank", default=0, type=int) 65 | 66 | args = parser.parse_args() 67 | 68 | dist.init_process_group(backend='nccl') 69 | dist.barrier() 70 | # rank = dist.get_rank() 71 | world_size = dist.get_world_size() 72 | 73 | 74 | train_transform = transforms.Compose([transforms.RandomResizedCrop(224), 75 | transforms.RandomHorizontalFlip(), 76 | transforms.ToTensor(), 77 | transforms.Normalize(mean=[0.485, 0.456, 0.406], 78 | std=[0.229, 0.224, 0.225])]) 79 | 80 | valid_transform = transforms.Compose([transforms.Resize(256), 81 | transforms.CenterCrop(224), 82 | transforms.ToTensor(), 83 | transforms.Normalize(mean=[0.485, 0.456, 0.406], 84 | std=[0.229, 0.224, 0.225])]) 85 | 86 | train_dataset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=train_transform) 87 | valid_dataset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=valid_transform) 88 | 89 | train_sampler = DistributedSampler(train_dataset) 90 | valid_sampler = DistributedSampler(valid_dataset) 91 | 92 | train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=256, 93 | pin_memory=False, prefetch_factor=2, num_workers=4) 94 | 95 | valid_loader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=256, 96 | pin_memory=False, prefetch_factor=2, num_workers=4) 97 | 98 | if torch.cuda.is_available(): 99 | device = torch.device("cuda", args.local_rank) 100 | else: 101 | device = torch.device("cpu") 102 | 103 | model = torch.hub.load('pytorch/vision:v0.10.0', 'resnet18') 104 | model.fc = nn.Sequential(nn.Linear(in_features=512, out_features=128), nn.LeakyReLU(), 105 | nn.Dropout(0.5), nn.Linear(128, 10)) 106 | 107 | model = model.to(device) 108 | model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) 109 | model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], 110 | output_device=args.local_rank) 111 | 112 | optimizer = torch.optim.AdamW(model.parameters(), lr=0.001, weight_decay=0.05) 113 | lr_scheduler_values = lr_scheduler.StepLR(optimizer, step_size = 30, gamma = 0.1) 114 | criterion = nn.CrossEntropyLoss().to(device) 115 | 116 | num_epochs = 100 117 | for epoch in range(num_epochs): 118 | train_sampler.set_epoch(epoch) 119 | valid_sampler.set_epoch(epoch) 120 | 121 | train_loss_value, train_acc_value = train_epoch(train_loader, optimizer, criterion, lr_scheduler_values, model, world_size) 122 | 123 | valid_loss_value, valid_acc_value = valid_epoch(valid_loader, criterion, model, world_size) 124 | 125 | print("Train_local_rank: {} Train_Epoch: {}/{} Training_Loss: {} Training_acc: {:.2f}\ 126 | ".format(args.local_rank, epoch, num_epochs-1, train_loss_value, train_acc_value)) 127 | 128 | print("Valid_local_rank: {} Valid_Epoch: {}/{} Valid_Loss: {} Valid_acc: {:.2f}\ 129 | ".format(args.local_rank, epoch, num_epochs-1, valid_loss_value, valid_acc_value)) 130 | 131 | print('--------------------------------') 132 | 133 | print("finished.") -------------------------------------------------------------------------------- /freeze_unfreeze_network.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "id": "58dc68d2", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "import torch\n", 11 | "import torch.nn as nn\n", 12 | "import torchvision.models as models" 13 | ] 14 | }, 15 | { 16 | "cell_type": "markdown", 17 | "id": "ae364b92", 18 | "metadata": {}, 19 | "source": [ 20 | "# 凍結 pre-trained model weight\n", 21 | "可使用 pytorch 的 torchvision.models 中所提供的模型權重,也可以使用自己訓練或下載的模型權重檔。\n", 22 | "\n", 23 | "## 使用 pytorch 提供的 pre-trained model weight" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": 2, 29 | "id": "264e9f6f-f8de-47ac-9478-fbe9ce3cfb40", 30 | "metadata": {}, 31 | "outputs": [], 32 | "source": [ 33 | "model_1 = models.resnet18(pretrained=True)" 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "execution_count": 3, 39 | "id": "e41dc6cf", 40 | "metadata": { 41 | "scrolled": true 42 | }, 43 | "outputs": [ 44 | { 45 | "name": "stdout", 46 | "output_type": "stream", 47 | "text": [ 48 | "name: conv1.weight\n", 49 | "requires_grad: True\n", 50 | "name: bn1.weight\n", 51 | "requires_grad: True\n", 52 | "name: bn1.bias\n", 53 | "requires_grad: True\n", 54 | "name: layer1.0.conv1.weight\n", 55 | "requires_grad: True\n", 56 | "name: layer1.0.bn1.weight\n", 57 | "requires_grad: True\n", 58 | "name: fc.weight\n", 59 | "requires_grad: True\n", 60 | "name: fc.bias\n", 61 | "requires_grad: True\n" 62 | ] 63 | } 64 | ], 65 | "source": [ 66 | "cnt = 0\n", 67 | "for name, param in model_1.named_parameters():\n", 68 | " print(\"name: \", name)\n", 69 | " print(\"requires_grad: \", param.requires_grad)\n", 70 | " \n", 71 | " cnt += 1\n", 72 | " if cnt == 5:\n", 73 | " break\n", 74 | "\n", 75 | "print(\"name: fc.weight\")\n", 76 | "print(\"requires_grad: \", model_1.fc.weight.requires_grad)\n", 77 | "\n", 78 | "print(\"name: fc.bias\")\n", 79 | "print(\"requires_grad: \", model_1.fc.bias.requires_grad)" 80 | ] 81 | }, 82 | { 83 | "cell_type": "markdown", 84 | "id": "7a72aeec", 85 | "metadata": {}, 86 | "source": [ 87 | "### 將除了全連接層以外的網路層都凍結" 88 | ] 89 | }, 90 | { 91 | "cell_type": "code", 92 | "execution_count": 4, 93 | "id": "7c0f1beb", 94 | "metadata": {}, 95 | "outputs": [], 96 | "source": [ 97 | "for name, param in model_1.named_parameters():\n", 98 | " if name not in ['fc.weight', 'fc.bias']:\n", 99 | " param.requires_grad = False" 100 | ] 101 | }, 102 | { 103 | "cell_type": "code", 104 | "execution_count": 5, 105 | "id": "ab1542e4", 106 | "metadata": {}, 107 | "outputs": [ 108 | { 109 | "name": "stdout", 110 | "output_type": "stream", 111 | "text": [ 112 | "name: conv1.weight\n", 113 | "requires_grad: False\n", 114 | "name: bn1.weight\n", 115 | "requires_grad: False\n", 116 | "name: bn1.bias\n", 117 | "requires_grad: False\n", 118 | "name: layer1.0.conv1.weight\n", 119 | "requires_grad: False\n", 120 | "name: layer1.0.bn1.weight\n", 121 | "requires_grad: False\n", 122 | "name: fc.weight\n", 123 | "requires_grad: True\n", 124 | "name: fc.bias\n", 125 | "requires_grad: True\n" 126 | ] 127 | } 128 | ], 129 | "source": [ 130 | "cnt = 0\n", 131 | "for name, param in model_1.named_parameters():\n", 132 | " print(\"name: \", name)\n", 133 | " print(\"requires_grad: \", param.requires_grad)\n", 134 | " \n", 135 | " cnt += 1\n", 136 | " if cnt == 5:\n", 137 | " break\n", 138 | "\n", 139 | "print(\"name: fc.weight\")\n", 140 | "print(\"requires_grad: \", model_1.fc.weight.requires_grad)\n", 141 | "\n", 142 | "print(\"name: fc.bias\")\n", 143 | "print(\"requires_grad: \", model_1.fc.bias.requires_grad)" 144 | ] 145 | }, 146 | { 147 | "cell_type": "markdown", 148 | "id": "2570dd74", 149 | "metadata": {}, 150 | "source": [ 151 | "### 在優化器中加入 filter 進行過濾\n", 152 | "\n", 153 | "filter() 用於過濾不符合條件的元素,其語法為 filter(function, iterable),其中 function 為判斷條件、iterable 為元素列表,返回值為一個迭代器。進行過濾的過程是將 iterable 的值傳遞給 function 進行判斷,返回值為 True 的元素才會放入要回傳的迭代器中。" 154 | ] 155 | }, 156 | { 157 | "cell_type": "code", 158 | "execution_count": 6, 159 | "id": "f693e719", 160 | "metadata": {}, 161 | "outputs": [ 162 | { 163 | "data": { 164 | "text/plain": [ 165 | "[1, 3, 5, 7, 9]" 166 | ] 167 | }, 168 | "execution_count": 6, 169 | "metadata": {}, 170 | "output_type": "execute_result" 171 | } 172 | ], 173 | "source": [ 174 | "### filter example\n", 175 | "\n", 176 | "def is_odd(n):\n", 177 | " return n % 2 == 1\n", 178 | " \n", 179 | "newlist = filter(is_odd, [i for i in range(10)])\n", 180 | "\n", 181 | "list(newlist)" 182 | ] 183 | }, 184 | { 185 | "cell_type": "code", 186 | "execution_count": 7, 187 | "id": "76103901", 188 | "metadata": {}, 189 | "outputs": [], 190 | "source": [ 191 | "# 進行過濾\n", 192 | "parameters_1 = filter(lambda p: p.requires_grad, model_1.parameters())\n", 193 | "optimizer_1 = torch.optim.Adam(parameters_1, lr=0.001, weight_decay=1e-5)" 194 | ] 195 | }, 196 | { 197 | "cell_type": "markdown", 198 | "id": "908e2bc8", 199 | "metadata": {}, 200 | "source": [ 201 | "## 使用自己訓練或下載的權重檔" 202 | ] 203 | }, 204 | { 205 | "cell_type": "code", 206 | "execution_count": null, 207 | "id": "2df5254b", 208 | "metadata": {}, 209 | "outputs": [], 210 | "source": [ 211 | "import gdown\n", 212 | "\n", 213 | "resnet_model = 'https://download.pytorch.org/models/resnet18-5c106cde.pth'\n", 214 | "gdown.download(resnet_model, \"resnet-5c106cde.pth\")" 215 | ] 216 | }, 217 | { 218 | "cell_type": "code", 219 | "execution_count": 8, 220 | "id": "6e44bc14", 221 | "metadata": {}, 222 | "outputs": [], 223 | "source": [ 224 | "checkpoint = torch.load('resnet-5c106cde.pth')" 225 | ] 226 | }, 227 | { 228 | "cell_type": "code", 229 | "execution_count": 9, 230 | "id": "177b71a2", 231 | "metadata": {}, 232 | "outputs": [ 233 | { 234 | "name": "stdout", 235 | "output_type": "stream", 236 | "text": [ 237 | "name: conv1.weight\n", 238 | "requires_grad: True\n", 239 | "name: bn1.running_mean\n", 240 | "requires_grad: False\n", 241 | "name: bn1.running_var\n", 242 | "requires_grad: False\n", 243 | "name: bn1.weight\n", 244 | "requires_grad: True\n", 245 | "name: bn1.bias\n", 246 | "requires_grad: True\n", 247 | "name: fc.weight\n", 248 | "requires_grad: True\n", 249 | "name: fc.bias\n", 250 | "requires_grad: True\n" 251 | ] 252 | } 253 | ], 254 | "source": [ 255 | "cnt = 0\n", 256 | "for k, v in checkpoint.items():\n", 257 | " print(\"name: \", k)\n", 258 | " print(\"requires_grad: \", v.requires_grad)\n", 259 | " \n", 260 | " cnt += 1\n", 261 | " if cnt == 5:\n", 262 | " break\n", 263 | " \n", 264 | "print(\"name: fc.weight\")\n", 265 | "print(\"requires_grad: \", checkpoint['fc.weight'].requires_grad)\n", 266 | "\n", 267 | "print(\"name: fc.bias\")\n", 268 | "print(\"requires_grad: \", checkpoint['fc.bias'].requires_grad)" 269 | ] 270 | }, 271 | { 272 | "cell_type": "markdown", 273 | "id": "091a6a0c", 274 | "metadata": {}, 275 | "source": [ 276 | "### 將除了全連接層以外的網路層都凍結" 277 | ] 278 | }, 279 | { 280 | "cell_type": "code", 281 | "execution_count": 10, 282 | "id": "1a8a3d51", 283 | "metadata": {}, 284 | "outputs": [], 285 | "source": [ 286 | "for k, v in checkpoint.items():\n", 287 | " if k not in ['fc.weight', 'fc.bias']:\n", 288 | " v.requires_grad = False" 289 | ] 290 | }, 291 | { 292 | "cell_type": "code", 293 | "execution_count": 11, 294 | "id": "9e9ffaf9", 295 | "metadata": {}, 296 | "outputs": [ 297 | { 298 | "name": "stdout", 299 | "output_type": "stream", 300 | "text": [ 301 | "name: conv1.weight\n", 302 | "requires_grad: False\n", 303 | "name: bn1.running_mean\n", 304 | "requires_grad: False\n", 305 | "name: bn1.running_var\n", 306 | "requires_grad: False\n", 307 | "name: bn1.weight\n", 308 | "requires_grad: False\n", 309 | "name: bn1.bias\n", 310 | "requires_grad: False\n", 311 | "name: fc.weight\n", 312 | "requires_grad: True\n", 313 | "name: fc.bias\n", 314 | "requires_grad: True\n" 315 | ] 316 | } 317 | ], 318 | "source": [ 319 | "cnt = 0\n", 320 | "for k, v in checkpoint.items():\n", 321 | " print(\"name: \", k)\n", 322 | " print(\"requires_grad: \", v.requires_grad)\n", 323 | " \n", 324 | " cnt += 1\n", 325 | " if cnt == 5:\n", 326 | " break\n", 327 | " \n", 328 | "print(\"name: fc.weight\")\n", 329 | "print(\"requires_grad: \", checkpoint['fc.weight'].requires_grad)\n", 330 | "\n", 331 | "print(\"name: fc.bias\")\n", 332 | "print(\"requires_grad: \", checkpoint['fc.bias'].requires_grad)" 333 | ] 334 | }, 335 | { 336 | "cell_type": "markdown", 337 | "id": "997f106e", 338 | "metadata": {}, 339 | "source": [ 340 | "### 在優化器中加入 filter 進行過濾" 341 | ] 342 | }, 343 | { 344 | "cell_type": "code", 345 | "execution_count": 12, 346 | "id": "a28603c2", 347 | "metadata": {}, 348 | "outputs": [ 349 | { 350 | "data": { 351 | "text/plain": [ 352 | "" 353 | ] 354 | }, 355 | "execution_count": 12, 356 | "metadata": {}, 357 | "output_type": "execute_result" 358 | } 359 | ], 360 | "source": [ 361 | "model_2 = models.resnet18()\n", 362 | "model_state = model_2.state_dict()\n", 363 | "\n", 364 | "pretrained_dict = {k: v for k, v in checkpoint.items() if k in model_state}\n", 365 | "model_state.update(pretrained_dict)\n", 366 | "model_2.load_state_dict(model_state)" 367 | ] 368 | }, 369 | { 370 | "cell_type": "code", 371 | "execution_count": 13, 372 | "id": "a0ef378b", 373 | "metadata": {}, 374 | "outputs": [], 375 | "source": [ 376 | "parameters_2 = filter(lambda p: p.requires_grad, model_2.parameters())\n", 377 | "optimizer_2 = torch.optim.Adam(parameters_2, lr=0.001, weight_decay=1e-5)" 378 | ] 379 | }, 380 | { 381 | "cell_type": "markdown", 382 | "id": "db49c021", 383 | "metadata": {}, 384 | "source": [ 385 | "### 將 50 層內的 layer 凍結" 386 | ] 387 | }, 388 | { 389 | "cell_type": "code", 390 | "execution_count": 14, 391 | "id": "90b53d35", 392 | "metadata": {}, 393 | "outputs": [], 394 | "source": [ 395 | "checkpoint = torch.load('resnet-5c106cde.pth')\n", 396 | "for i, (k, v) in enumerate(checkpoint.items()):\n", 397 | " if i < 50:\n", 398 | " v.requires_grad = False" 399 | ] 400 | }, 401 | { 402 | "cell_type": "code", 403 | "execution_count": 15, 404 | "id": "4a96f7a5", 405 | "metadata": {}, 406 | "outputs": [ 407 | { 408 | "data": { 409 | "text/plain": [ 410 | "" 411 | ] 412 | }, 413 | "execution_count": 15, 414 | "metadata": {}, 415 | "output_type": "execute_result" 416 | } 417 | ], 418 | "source": [ 419 | "model_3 = models.resnet18()\n", 420 | "model_state = model_3.state_dict()\n", 421 | "\n", 422 | "pretrained_dict = {k: v for k, v in checkpoint.items() if k in model_state}\n", 423 | "model_state.update(pretrained_dict)\n", 424 | "model_3.load_state_dict(model_state)" 425 | ] 426 | }, 427 | { 428 | "cell_type": "code", 429 | "execution_count": 16, 430 | "id": "128cbef2", 431 | "metadata": {}, 432 | "outputs": [], 433 | "source": [ 434 | "parameters_3 = filter(lambda p: p.requires_grad, model_3.parameters())\n", 435 | "optimizer_3 = torch.optim.Adam(parameters_3, lr=0.001, weight_decay=1e-5)" 436 | ] 437 | }, 438 | { 439 | "cell_type": "markdown", 440 | "id": "6cceac48", 441 | "metadata": {}, 442 | "source": [ 443 | "# 解除凍結\n", 444 | "### 使用 pytorch 提供的 pre-trained model weight" 445 | ] 446 | }, 447 | { 448 | "cell_type": "code", 449 | "execution_count": 17, 450 | "id": "2c93dc11", 451 | "metadata": {}, 452 | "outputs": [ 453 | { 454 | "name": "stdout", 455 | "output_type": "stream", 456 | "text": [ 457 | "name: conv1.weight\n", 458 | "requires_grad: False\n", 459 | "name: bn1.weight\n", 460 | "requires_grad: False\n", 461 | "name: bn1.bias\n", 462 | "requires_grad: False\n", 463 | "name: layer1.0.conv1.weight\n", 464 | "requires_grad: False\n", 465 | "name: layer1.0.bn1.weight\n", 466 | "requires_grad: False\n" 467 | ] 468 | } 469 | ], 470 | "source": [ 471 | "cnt = 0\n", 472 | "for name, param in model_1.named_parameters():\n", 473 | " print(\"name: \", name)\n", 474 | " print(\"requires_grad: \", param.requires_grad)\n", 475 | " \n", 476 | " cnt += 1\n", 477 | " if cnt == 5:\n", 478 | " break" 479 | ] 480 | }, 481 | { 482 | "cell_type": "code", 483 | "execution_count": 18, 484 | "id": "db5df00a", 485 | "metadata": {}, 486 | "outputs": [], 487 | "source": [ 488 | "for name, param in model_1.named_parameters():\n", 489 | " if param.requires_grad == False:\n", 490 | " param.requires_grad = True\n", 491 | " optimizer_1.add_param_group({'params': param})" 492 | ] 493 | }, 494 | { 495 | "cell_type": "code", 496 | "execution_count": 19, 497 | "id": "5679646a", 498 | "metadata": {}, 499 | "outputs": [ 500 | { 501 | "name": "stdout", 502 | "output_type": "stream", 503 | "text": [ 504 | "name: conv1.weight\n", 505 | "requires_grad: True\n", 506 | "name: bn1.weight\n", 507 | "requires_grad: True\n", 508 | "name: bn1.bias\n", 509 | "requires_grad: True\n", 510 | "name: layer1.0.conv1.weight\n", 511 | "requires_grad: True\n", 512 | "name: layer1.0.bn1.weight\n", 513 | "requires_grad: True\n" 514 | ] 515 | } 516 | ], 517 | "source": [ 518 | "cnt = 0\n", 519 | "for name, param in model_1.named_parameters():\n", 520 | " print(\"name: \", name)\n", 521 | " print(\"requires_grad: \", param.requires_grad)\n", 522 | " \n", 523 | " cnt += 1\n", 524 | " if cnt == 5:\n", 525 | " break" 526 | ] 527 | }, 528 | { 529 | "cell_type": "markdown", 530 | "id": "b1a019b1", 531 | "metadata": {}, 532 | "source": [ 533 | "### 使用自己訓練或下載的權重檔\n" 534 | ] 535 | }, 536 | { 537 | "cell_type": "code", 538 | "execution_count": 20, 539 | "id": "cc0b5650", 540 | "metadata": {}, 541 | "outputs": [ 542 | { 543 | "name": "stdout", 544 | "output_type": "stream", 545 | "text": [ 546 | "name: conv1.weight\n", 547 | "requires_grad: True\n", 548 | "name: bn1.weight\n", 549 | "requires_grad: True\n", 550 | "name: bn1.bias\n", 551 | "requires_grad: True\n", 552 | "name: layer1.0.conv1.weight\n", 553 | "requires_grad: True\n", 554 | "name: layer1.0.bn1.weight\n", 555 | "requires_grad: True\n" 556 | ] 557 | } 558 | ], 559 | "source": [ 560 | "cnt = 0\n", 561 | "for name, param in model_2.named_parameters():\n", 562 | " print(\"name: \", name)\n", 563 | " print(\"requires_grad: \", param.requires_grad)\n", 564 | " \n", 565 | " cnt += 1\n", 566 | " if cnt == 5:\n", 567 | " break" 568 | ] 569 | }, 570 | { 571 | "cell_type": "code", 572 | "execution_count": 21, 573 | "id": "6c87adba", 574 | "metadata": {}, 575 | "outputs": [], 576 | "source": [ 577 | "for name, param in model_2.named_parameters():\n", 578 | " if param.requires_grad == False:\n", 579 | " param.requires_grad = True\n", 580 | " optimizer_2.add_param_group({'params': param})" 581 | ] 582 | }, 583 | { 584 | "cell_type": "code", 585 | "execution_count": 22, 586 | "id": "ebbed267", 587 | "metadata": {}, 588 | "outputs": [ 589 | { 590 | "name": "stdout", 591 | "output_type": "stream", 592 | "text": [ 593 | "name: conv1.weight\n", 594 | "requires_grad: True\n", 595 | "name: bn1.weight\n", 596 | "requires_grad: True\n", 597 | "name: bn1.bias\n", 598 | "requires_grad: True\n", 599 | "name: layer1.0.conv1.weight\n", 600 | "requires_grad: True\n", 601 | "name: layer1.0.bn1.weight\n", 602 | "requires_grad: True\n" 603 | ] 604 | } 605 | ], 606 | "source": [ 607 | "cnt = 0\n", 608 | "for name, param in model_2.named_parameters():\n", 609 | " print(\"name: \", name)\n", 610 | " print(\"requires_grad: \", param.requires_grad)\n", 611 | " \n", 612 | " cnt += 1\n", 613 | " if cnt == 5:\n", 614 | " break" 615 | ] 616 | }, 617 | { 618 | "cell_type": "markdown", 619 | "id": "862df866", 620 | "metadata": {}, 621 | "source": [ 622 | "# 設置不同 learning rate\n", 623 | "\n", 624 | "針對不同層的 layer 設定各自的 learning rate,有三種方法: 直接設定參數名稱、設定某一層的全部參數、將所有 layer 拆分成幾部分後再設定" 625 | ] 626 | }, 627 | { 628 | "cell_type": "code", 629 | "execution_count": null, 630 | "id": "a09285c9", 631 | "metadata": {}, 632 | "outputs": [], 633 | "source": [ 634 | "# 查看 layer 名稱\n", 635 | "# for name, param in model_1.named_parameters():\n", 636 | "# print(name)" 637 | ] 638 | }, 639 | { 640 | "cell_type": "markdown", 641 | "id": "7c7867ed", 642 | "metadata": {}, 643 | "source": [ 644 | "### 直接設定參數名稱" 645 | ] 646 | }, 647 | { 648 | "cell_type": "code", 649 | "execution_count": 25, 650 | "id": "b1955175", 651 | "metadata": {}, 652 | "outputs": [], 653 | "source": [ 654 | "optimizer_4 = torch.optim.Adam([\n", 655 | " {'params': model_1.fc.weight, 'lr': 1e-5},\n", 656 | " {'params': model_1.fc.bias, 'lr': 1e-3}],\n", 657 | " lr=0.001) # 默認 learning rate 值" 658 | ] 659 | }, 660 | { 661 | "cell_type": "markdown", 662 | "id": "2220971b", 663 | "metadata": {}, 664 | "source": [ 665 | "### 設定某一層的全部參數" 666 | ] 667 | }, 668 | { 669 | "cell_type": "code", 670 | "execution_count": 26, 671 | "id": "a4f9b8b5", 672 | "metadata": {}, 673 | "outputs": [], 674 | "source": [ 675 | "optimizer_5 = torch.optim.Adam([\n", 676 | " {'params': model_1.layer1.parameters(), 'lr': 1e-5},\n", 677 | " {'params': model_1.fc.parameters(), 'lr': 1e-3}],\n", 678 | " lr=0.001) # 默認 learning rate 值" 679 | ] 680 | }, 681 | { 682 | "cell_type": "markdown", 683 | "id": "cc75e057", 684 | "metadata": {}, 685 | "source": [ 686 | "### 將所有layer拆分成幾部分後再設定" 687 | ] 688 | }, 689 | { 690 | "cell_type": "code", 691 | "execution_count": 27, 692 | "id": "5c6f9895", 693 | "metadata": {}, 694 | "outputs": [], 695 | "source": [ 696 | "fc_layer = list(map(id, model_1.fc.parameters()))\n", 697 | "other_layer = filter(lambda p: id(p) not in fc_layer, model_1.parameters())" 698 | ] 699 | }, 700 | { 701 | "cell_type": "code", 702 | "execution_count": 28, 703 | "id": "d1beb57c", 704 | "metadata": {}, 705 | "outputs": [], 706 | "source": [ 707 | "optimizer_6 = torch.optim.Adam([\n", 708 | " {'params': other_layer, 'lr': 1e-5},\n", 709 | " {'params': model_1.fc.parameters(), 'lr': 1e-3}],\n", 710 | " lr=0.001) # 默認 learning rate 值" 711 | ] 712 | } 713 | ], 714 | "metadata": { 715 | "kernelspec": { 716 | "display_name": "pytorch", 717 | "language": "python", 718 | "name": "pytorch" 719 | }, 720 | "language_info": { 721 | "codemirror_mode": { 722 | "name": "ipython", 723 | "version": 3 724 | }, 725 | "file_extension": ".py", 726 | "mimetype": "text/x-python", 727 | "name": "python", 728 | "nbconvert_exporter": "python", 729 | "pygments_lexer": "ipython3", 730 | "version": "3.7.11" 731 | } 732 | }, 733 | "nbformat": 4, 734 | "nbformat_minor": 5 735 | } 736 | -------------------------------------------------------------------------------- /mmdetection_create_model.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": { 7 | "id": "VoiX2aCRCrVL" 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "!pip install -U openmim\n", 12 | "!mim install mmcv-full" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": null, 18 | "metadata": { 19 | "id": "oJOA2squDIu_" 20 | }, 21 | "outputs": [], 22 | "source": [ 23 | "!git clone https://github.com/open-mmlab/mmdetection.git" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": null, 29 | "metadata": { 30 | "id": "jTkFzSfoCrX7" 31 | }, 32 | "outputs": [], 33 | "source": [ 34 | "%cd mmdetection\n", 35 | "!pip install -v -e ." 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": null, 41 | "metadata": {}, 42 | "outputs": [], 43 | "source": [ 44 | "!curl -L \"https://public.roboflow.com/ds/xcrhGBJ1aB?key=bdAQK4gH2e\" > data_pets.zip" 45 | ] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": null, 50 | "metadata": {}, 51 | "outputs": [], 52 | "source": [ 53 | "!unzip 'data_pets.zip' -d data_pets" 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": null, 59 | "metadata": { 60 | "id": "z1j3cmyYDTEA" 61 | }, 62 | "outputs": [], 63 | "source": [ 64 | "!mkdir data_pets/annotations\n", 65 | "!mv data_pets/train/_annotations.coco.json data_pets/annotations/instances_train.json\n", 66 | "!mv data_pets/valid/_annotations.coco.json data_pets/annotations/instances_val.json\n", 67 | "!mv data_pets/test/_annotations.coco.json data_pets/annotations/instances_test.json" 68 | ] 69 | }, 70 | { 71 | "cell_type": "code", 72 | "execution_count": null, 73 | "metadata": { 74 | "id": "wGA0udZNlWYi" 75 | }, 76 | "outputs": [], 77 | "source": [ 78 | "%%writefile mmdet/models/backbones/my_model.py\n", 79 | "import torch.nn as nn\n", 80 | "from mmcv.runner import BaseModule\n", 81 | "from ..builder import BACKBONES\n", 82 | "\n", 83 | "@BACKBONES.register_module()\n", 84 | "class MyModel(BaseModule):\n", 85 | " def __init__(self, in_channels):\n", 86 | " super(MyModel, self).__init__()\n", 87 | "\n", 88 | " self.layers = nn.ModuleList()\n", 89 | "\n", 90 | " for i, out_channels in enumerate([128, 256, 512]):\n", 91 | " block = nn.Sequential(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, \n", 92 | " kernel_size=3, stride=2, padding=1, bias=False),\n", 93 | "\n", 94 | " nn.BatchNorm2d(out_channels),\n", 95 | " nn.ReLU(inplace=True))\n", 96 | "\n", 97 | " in_channels = out_channels\n", 98 | " self.layers.append(block)\n", 99 | "\n", 100 | " def forward(self, x):\n", 101 | " outputs = []\n", 102 | " for module in self.layers:\n", 103 | " x = module(x)\n", 104 | " outputs.append(x)\n", 105 | " return tuple(outputs)" 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "execution_count": null, 111 | "metadata": {}, 112 | "outputs": [], 113 | "source": [ 114 | "%%writefile mmdet/models/backbones/my_model.py\n", 115 | "import torch.nn as nn\n", 116 | "from mmcv.cnn import build_conv_layer, build_norm_layer, build_activation_layer\n", 117 | "from mmcv.runner import BaseModule\n", 118 | "from ..builder import BACKBONES\n", 119 | "\n", 120 | "@BACKBONES.register_module()\n", 121 | "class MyModel(BaseModule):\n", 122 | " def __init__(self, in_channels, stride=2, padding=1, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU')):\n", 123 | " super(MyModel, self).__init__()\n", 124 | "\n", 125 | " self.layers = nn.ModuleList()\n", 126 | "\n", 127 | " for i, out_channels in enumerate([128, 256, 512]):\n", 128 | " block = nn.Sequential(build_conv_layer(conv_cfg, in_channels, out_channels, kernel_size=3, \n", 129 | " stride=stride, padding=padding, bias=False),\n", 130 | " \n", 131 | " build_norm_layer(norm_cfg, out_channels)[1],\n", 132 | " build_activation_layer(act_cfg))\n", 133 | "\n", 134 | " in_channels = out_channels\n", 135 | " self.layers.append(block)\n", 136 | "\n", 137 | " def forward(self, x):\n", 138 | " outputs = []\n", 139 | " for module in self.layers:\n", 140 | " x = module(x)\n", 141 | " outputs.append(x)\n", 142 | " return tuple(outputs)" 143 | ] 144 | }, 145 | { 146 | "cell_type": "code", 147 | "execution_count": null, 148 | "metadata": { 149 | "id": "DhN7BCxmnboD" 150 | }, 151 | "outputs": [], 152 | "source": [ 153 | "%%writefile mmdet/models/backbones/__init__.py\n", 154 | "# Copyright (c) OpenMMLab. All rights reserved.\n", 155 | "from .csp_darknet import CSPDarknet\n", 156 | "from .darknet import Darknet\n", 157 | "from .detectors_resnet import DetectoRS_ResNet\n", 158 | "from .detectors_resnext import DetectoRS_ResNeXt\n", 159 | "from .efficientnet import EfficientNet\n", 160 | "from .hourglass import HourglassNet\n", 161 | "from .hrnet import HRNet\n", 162 | "from .mobilenet_v2 import MobileNetV2\n", 163 | "from .pvt import PyramidVisionTransformer, PyramidVisionTransformerV2\n", 164 | "from .regnet import RegNet\n", 165 | "from .res2net import Res2Net\n", 166 | "from .resnest import ResNeSt\n", 167 | "from .resnet import ResNet, ResNetV1d\n", 168 | "from .resnext import ResNeXt\n", 169 | "from .ssd_vgg import SSDVGG\n", 170 | "from .swin import SwinTransformer\n", 171 | "from .trident_resnet import TridentResNet\n", 172 | "from .my_model import MyModel\n", 173 | "\n", 174 | "__all__ = [\n", 175 | " 'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet',\n", 176 | " 'MobileNetV2', 'Res2Net', 'HourglassNet', 'DetectoRS_ResNet',\n", 177 | " 'DetectoRS_ResNeXt', 'Darknet', 'ResNeSt', 'TridentResNet', 'CSPDarknet',\n", 178 | " 'SwinTransformer', 'PyramidVisionTransformer',\n", 179 | " 'PyramidVisionTransformerV2', 'EfficientNet', 'MyModel'\n", 180 | "]\n" 181 | ] 182 | }, 183 | { 184 | "cell_type": "code", 185 | "execution_count": null, 186 | "metadata": { 187 | "id": "cVZiebJa63sw" 188 | }, 189 | "outputs": [], 190 | "source": [ 191 | "!mkdir configs/my_model" 192 | ] 193 | }, 194 | { 195 | "cell_type": "code", 196 | "execution_count": null, 197 | "metadata": { 198 | "id": "eCHiYMIVopaD" 199 | }, 200 | "outputs": [], 201 | "source": [ 202 | "%%writefile configs/my_model/my_model_fpn_1x_coco.py\n", 203 | "_base_ = [\n", 204 | " '../_base_/schedules/schedule_1x.py', \n", 205 | " '../_base_/default_runtime.py'\n", 206 | "]\n", 207 | "\n", 208 | "# model settings\n", 209 | "model = dict(\n", 210 | " type='FasterRCNN',\n", 211 | " backbone=dict(\n", 212 | " type='MyModel',\n", 213 | " in_channels=3),\n", 214 | " neck=dict(\n", 215 | " type='FPN',\n", 216 | " in_channels=[128, 256, 512],\n", 217 | " out_channels=256,\n", 218 | " num_outs=5),\n", 219 | " rpn_head=dict(\n", 220 | " type='RPNHead',\n", 221 | " in_channels=256,\n", 222 | " feat_channels=256,\n", 223 | " anchor_generator=dict(\n", 224 | " type='AnchorGenerator',\n", 225 | " scales=[8],\n", 226 | " ratios=[0.5, 1.0, 2.0],\n", 227 | " strides=[4, 8, 16, 32, 64]),\n", 228 | " bbox_coder=dict(\n", 229 | " type='DeltaXYWHBBoxCoder',\n", 230 | " target_means=[.0, .0, .0, .0],\n", 231 | " target_stds=[1.0, 1.0, 1.0, 1.0]),\n", 232 | " loss_cls=dict(\n", 233 | " type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n", 234 | " loss_bbox=dict(type='L1Loss', loss_weight=1.0)),\n", 235 | " roi_head=dict(\n", 236 | " type='StandardRoIHead',\n", 237 | " bbox_roi_extractor=dict(\n", 238 | " type='SingleRoIExtractor',\n", 239 | " roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),\n", 240 | " out_channels=256,\n", 241 | " featmap_strides=[4, 8, 16, 32]),\n", 242 | " bbox_head=dict(\n", 243 | " type='Shared2FCBBoxHead',\n", 244 | " in_channels=256,\n", 245 | " fc_out_channels=1024,\n", 246 | " roi_feat_size=7,\n", 247 | " num_classes=80,\n", 248 | " bbox_coder=dict(\n", 249 | " type='DeltaXYWHBBoxCoder',\n", 250 | " target_means=[0., 0., 0., 0.],\n", 251 | " target_stds=[0.1, 0.1, 0.2, 0.2]),\n", 252 | " reg_class_agnostic=False,\n", 253 | " loss_cls=dict(\n", 254 | " type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n", 255 | " loss_bbox=dict(type='L1Loss', loss_weight=1.0))),\n", 256 | "\n", 257 | " # model training and testing settings\n", 258 | " train_cfg=dict(\n", 259 | " rpn=dict(\n", 260 | " assigner=dict(\n", 261 | " type='MaxIoUAssigner',\n", 262 | " pos_iou_thr=0.7,\n", 263 | " neg_iou_thr=0.3,\n", 264 | " min_pos_iou=0.3,\n", 265 | " match_low_quality=True,\n", 266 | " ignore_iof_thr=-1),\n", 267 | " sampler=dict(\n", 268 | " type='RandomSampler',\n", 269 | " num=256,\n", 270 | " pos_fraction=0.5,\n", 271 | " neg_pos_ub=-1,\n", 272 | " add_gt_as_proposals=False),\n", 273 | " allowed_border=-1,\n", 274 | " pos_weight=-1,\n", 275 | " debug=False),\n", 276 | " rpn_proposal=dict(\n", 277 | " nms_pre=2000,\n", 278 | " max_per_img=1000,\n", 279 | " nms=dict(type='nms', iou_threshold=0.7),\n", 280 | " min_bbox_size=0),\n", 281 | " rcnn=dict(\n", 282 | " assigner=dict(\n", 283 | " type='MaxIoUAssigner',\n", 284 | " pos_iou_thr=0.5,\n", 285 | " neg_iou_thr=0.5,\n", 286 | " min_pos_iou=0.5,\n", 287 | " match_low_quality=False,\n", 288 | " ignore_iof_thr=-1),\n", 289 | " sampler=dict(\n", 290 | " type='RandomSampler',\n", 291 | " num=512,\n", 292 | " pos_fraction=0.25,\n", 293 | " neg_pos_ub=-1,\n", 294 | " add_gt_as_proposals=True),\n", 295 | " pos_weight=-1,\n", 296 | " debug=False)),\n", 297 | " test_cfg=dict(\n", 298 | " rpn=dict(\n", 299 | " nms_pre=1000,\n", 300 | " max_per_img=1000,\n", 301 | " nms=dict(type='nms', iou_threshold=0.7),\n", 302 | " min_bbox_size=0),\n", 303 | " rcnn=dict(\n", 304 | " score_thr=0.05,\n", 305 | " nms=dict(type='nms', iou_threshold=0.5),\n", 306 | " max_per_img=100)\n", 307 | " # soft-nms is also supported for rcnn testing\n", 308 | " # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)\n", 309 | " ))\n", 310 | "\n", 311 | "# dataset settings\n", 312 | "dataset_type = 'CocoDataset'\n", 313 | "data_root = 'data_pets/'\n", 314 | "img_norm_cfg = dict(\n", 315 | " mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\n", 316 | "train_pipeline = [\n", 317 | " dict(type='LoadImageFromFile'),\n", 318 | " dict(type='LoadAnnotations', with_bbox=True),\n", 319 | " dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),\n", 320 | " dict(type='RandomFlip', flip_ratio=0.5),\n", 321 | " dict(type='Normalize', **img_norm_cfg),\n", 322 | " dict(type='Pad', size_divisor=32),\n", 323 | " dict(type='DefaultFormatBundle'),\n", 324 | " dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n", 325 | "]\n", 326 | "test_pipeline = [\n", 327 | " dict(type='LoadImageFromFile'),\n", 328 | " dict(\n", 329 | " type='MultiScaleFlipAug',\n", 330 | " img_scale=(1333, 800),\n", 331 | " flip=False,\n", 332 | " transforms=[\n", 333 | " dict(type='Resize', keep_ratio=True),\n", 334 | " dict(type='RandomFlip'),\n", 335 | " dict(type='Normalize', **img_norm_cfg),\n", 336 | " dict(type='Pad', size_divisor=32),\n", 337 | " dict(type='ImageToTensor', keys=['img']),\n", 338 | " dict(type='Collect', keys=['img']),\n", 339 | " ])\n", 340 | "]\n", 341 | "data = dict(\n", 342 | " samples_per_gpu=2,\n", 343 | " workers_per_gpu=2,\n", 344 | " train=dict(\n", 345 | " type=dataset_type,\n", 346 | " ann_file=data_root + 'annotations/instances_train.json',\n", 347 | " img_prefix=data_root + 'train/',\n", 348 | " pipeline=train_pipeline),\n", 349 | " val=dict(\n", 350 | " type=dataset_type,\n", 351 | " ann_file=data_root + 'annotations/instances_val.json',\n", 352 | " img_prefix=data_root + 'valid/',\n", 353 | " pipeline=test_pipeline),\n", 354 | " test=dict(\n", 355 | " type=dataset_type,\n", 356 | " ann_file=data_root + 'annotations/instances_test.json',\n", 357 | " img_prefix=data_root + 'test/',\n", 358 | " pipeline=test_pipeline))\n", 359 | "evaluation = dict(interval=1, metric='bbox')\n" 360 | ] 361 | }, 362 | { 363 | "cell_type": "code", 364 | "execution_count": null, 365 | "metadata": { 366 | "id": "508YcJpAnbqY" 367 | }, 368 | "outputs": [], 369 | "source": [ 370 | "! python tools/train.py configs/my_model/my_model_fpn_1x_coco.py" 371 | ] 372 | }, 373 | { 374 | "cell_type": "code", 375 | "execution_count": null, 376 | "metadata": { 377 | "id": "65IcSYPllPw6" 378 | }, 379 | "outputs": [], 380 | "source": [] 381 | } 382 | ], 383 | "metadata": { 384 | "accelerator": "GPU", 385 | "colab": { 386 | "authorship_tag": "ABX9TyMmlC6I7/xnqudQ7IqJTJmc", 387 | "collapsed_sections": [], 388 | "private_outputs": true, 389 | "provenance": [] 390 | }, 391 | "gpuClass": "standard", 392 | "kernelspec": { 393 | "display_name": "Python 3", 394 | "language": "python", 395 | "name": "python3" 396 | }, 397 | "language_info": { 398 | "codemirror_mode": { 399 | "name": "ipython", 400 | "version": 3 401 | }, 402 | "file_extension": ".py", 403 | "mimetype": "text/x-python", 404 | "name": "python", 405 | "nbconvert_exporter": "python", 406 | "pygments_lexer": "ipython3", 407 | "version": "3.8.8" 408 | } 409 | }, 410 | "nbformat": 4, 411 | "nbformat_minor": 1 412 | } 413 | -------------------------------------------------------------------------------- /modify_network.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "id": "665e1593-5572-466e-9ae4-bd31130f0486", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "import torch\n", 11 | "import torch.nn as nn\n", 12 | "import torchvision.models as models\n", 13 | "from collections import OrderedDict" 14 | ] 15 | }, 16 | { 17 | "cell_type": "markdown", 18 | "id": "c92207ac", 19 | "metadata": {}, 20 | "source": [ 21 | "# pre-trained model weight 讀取並查看\n", 22 | "可使用 pytorch 的 torchvision.models 中所提供的模型權重,也可以使用自己訓練或下載的模型權重檔。\n", 23 | "\n", 24 | "## 使用 pytorch 提供的 pre-trained model weight" 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": 2, 30 | "id": "6e034fee", 31 | "metadata": {}, 32 | "outputs": [ 33 | { 34 | "name": "stdout", 35 | "output_type": "stream", 36 | "text": [ 37 | "model_state type: \n" 38 | ] 39 | } 40 | ], 41 | "source": [ 42 | "model = models.resnet18(pretrained=True)\n", 43 | "model_state = model.state_dict()\n", 44 | "\n", 45 | "print(\"model_state type:\", type(model_state))" 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "execution_count": 3, 51 | "id": "d1726f5a", 52 | "metadata": { 53 | "scrolled": false 54 | }, 55 | "outputs": [ 56 | { 57 | "name": "stdout", 58 | "output_type": "stream", 59 | "text": [ 60 | "name: conv1.weight\n", 61 | "value: torch.Size([64, 3, 7, 7])\n", 62 | "name: bn1.weight\n", 63 | "value: torch.Size([64])\n", 64 | "name: bn1.bias\n", 65 | "value: torch.Size([64])\n", 66 | "name: bn1.running_mean\n", 67 | "value: torch.Size([64])\n", 68 | "name: bn1.running_var\n", 69 | "value: torch.Size([64])\n", 70 | "name: bn1.num_batches_tracked\n", 71 | "value: torch.Size([])\n", 72 | "name: layer1.0.conv1.weight\n", 73 | "value: torch.Size([64, 64, 3, 3])\n", 74 | "name: layer1.0.bn1.weight\n", 75 | "value: torch.Size([64])\n", 76 | "name: layer1.0.bn1.bias\n", 77 | "value: torch.Size([64])\n", 78 | "name: layer1.0.bn1.running_mean\n", 79 | "value: torch.Size([64])\n" 80 | ] 81 | } 82 | ], 83 | "source": [ 84 | "cnt = 0\n", 85 | "\n", 86 | "for param_tensor in model_state:\n", 87 | " print(\"name:\", param_tensor)\n", 88 | " print(\"value:\", model_state[param_tensor].shape)\n", 89 | " \n", 90 | " cnt += 1\n", 91 | " if cnt == 10:\n", 92 | " break" 93 | ] 94 | }, 95 | { 96 | "cell_type": "markdown", 97 | "id": "375c0ce0", 98 | "metadata": {}, 99 | "source": [ 100 | "## 使用自己訓練或下載的權重檔" 101 | ] 102 | }, 103 | { 104 | "cell_type": "code", 105 | "execution_count": 4, 106 | "id": "cc94fa31", 107 | "metadata": { 108 | "scrolled": false 109 | }, 110 | "outputs": [ 111 | { 112 | "name": "stderr", 113 | "output_type": "stream", 114 | "text": [ 115 | "Downloading...\n", 116 | "From: https://download.pytorch.org/models/resnet18-5c106cde.pth\n", 117 | "To: C:\\Users\\joyle\\pythonwork\\torch_work\\resnet-5c106cde.pth\n", 118 | "100%|█████████████████████████████████████████████████████████████████████████████| 46.8M/46.8M [00:05<00:00, 8.63MB/s]" 119 | ] 120 | }, 121 | { 122 | "name": "stdout", 123 | "output_type": "stream", 124 | "text": [ 125 | "model has been downloaded.\n" 126 | ] 127 | }, 128 | { 129 | "name": "stderr", 130 | "output_type": "stream", 131 | "text": [ 132 | "\n" 133 | ] 134 | } 135 | ], 136 | "source": [ 137 | "import gdown\n", 138 | "\n", 139 | "resnet_model = 'https://download.pytorch.org/models/resnet18-5c106cde.pth'\n", 140 | "gdown.download(resnet_model, \"resnet-5c106cde.pth\")" 141 | ] 142 | }, 143 | { 144 | "cell_type": "code", 145 | "execution_count": 4, 146 | "id": "d2178189", 147 | "metadata": {}, 148 | "outputs": [ 149 | { 150 | "name": "stdout", 151 | "output_type": "stream", 152 | "text": [ 153 | "checkpoint type: \n" 154 | ] 155 | } 156 | ], 157 | "source": [ 158 | "checkpoint = torch.load('resnet-5c106cde.pth')\n", 159 | "print(\"checkpoint type:\", type(checkpoint))" 160 | ] 161 | }, 162 | { 163 | "cell_type": "code", 164 | "execution_count": 5, 165 | "id": "e09eb118", 166 | "metadata": {}, 167 | "outputs": [ 168 | { 169 | "name": "stdout", 170 | "output_type": "stream", 171 | "text": [ 172 | "name: conv1.weight\n", 173 | "value: torch.Size([64, 3, 7, 7])\n", 174 | "name: bn1.running_mean\n", 175 | "value: torch.Size([64])\n", 176 | "name: bn1.running_var\n", 177 | "value: torch.Size([64])\n", 178 | "name: bn1.weight\n", 179 | "value: torch.Size([64])\n", 180 | "name: bn1.bias\n", 181 | "value: torch.Size([64])\n", 182 | "name: layer1.0.conv1.weight\n", 183 | "value: torch.Size([64, 64, 3, 3])\n", 184 | "name: layer1.0.bn1.running_mean\n", 185 | "value: torch.Size([64])\n", 186 | "name: layer1.0.bn1.running_var\n", 187 | "value: torch.Size([64])\n", 188 | "name: layer1.0.bn1.weight\n", 189 | "value: torch.Size([64])\n", 190 | "name: layer1.0.bn1.bias\n", 191 | "value: torch.Size([64])\n" 192 | ] 193 | } 194 | ], 195 | "source": [ 196 | "cnt = 0\n", 197 | "for k, v in checkpoint.items():\n", 198 | " print(\"name:\", k)\n", 199 | " print(\"value:\", v.shape)\n", 200 | " \n", 201 | " cnt += 1\n", 202 | " if cnt == 10:\n", 203 | " break" 204 | ] 205 | }, 206 | { 207 | "cell_type": "markdown", 208 | "id": "0d33a136-7fbc-40a8-bf8b-5a35581d1d27", 209 | "metadata": {}, 210 | "source": [ 211 | "# 修改layer\n", 212 | "這部分會分為修改 layer 的參數和名稱\n", 213 | "\n", 214 | "## 參數\n", 215 | "### 最後一層的輸出值" 216 | ] 217 | }, 218 | { 219 | "cell_type": "code", 220 | "execution_count": 6, 221 | "id": "a9928ec2-2971-44cf-8881-2913066f899e", 222 | "metadata": {}, 223 | "outputs": [ 224 | { 225 | "data": { 226 | "text/plain": [ 227 | "Linear(in_features=512, out_features=1000, bias=True)" 228 | ] 229 | }, 230 | "execution_count": 6, 231 | "metadata": {}, 232 | "output_type": "execute_result" 233 | } 234 | ], 235 | "source": [ 236 | "model = models.resnet18(pretrained=True)\n", 237 | "model.fc" 238 | ] 239 | }, 240 | { 241 | "cell_type": "code", 242 | "execution_count": 7, 243 | "id": "1bdbd8e7-e969-4b54-b256-6a6ab68179e7", 244 | "metadata": {}, 245 | "outputs": [ 246 | { 247 | "name": "stdout", 248 | "output_type": "stream", 249 | "text": [ 250 | "Linear(in_features=512, out_features=10, bias=True)\n" 251 | ] 252 | } 253 | ], 254 | "source": [ 255 | "in_features = model.fc.in_features\n", 256 | "num_class = 10\n", 257 | "\n", 258 | "model.fc = nn.Linear(in_features, num_class)\n", 259 | "print(model.fc)" 260 | ] 261 | }, 262 | { 263 | "cell_type": "code", 264 | "execution_count": 8, 265 | "id": "30a5a987", 266 | "metadata": {}, 267 | "outputs": [ 268 | { 269 | "name": "stdout", 270 | "output_type": "stream", 271 | "text": [ 272 | "weight: torch.Size([10, 512])\n", 273 | "bias: torch.Size([10])\n" 274 | ] 275 | } 276 | ], 277 | "source": [ 278 | "model_state = model.state_dict()\n", 279 | "print(\"weight: \", model_state['fc.weight'].shape)\n", 280 | "print(\"bias: \", model_state['fc.bias'].shape)" 281 | ] 282 | }, 283 | { 284 | "cell_type": "code", 285 | "execution_count": 9, 286 | "id": "9e126f23", 287 | "metadata": {}, 288 | "outputs": [ 289 | { 290 | "name": "stdout", 291 | "output_type": "stream", 292 | "text": [ 293 | "weight: tensor([[-0.0178, 0.0195, 0.0206, ..., -0.0246, -0.0031, 0.0218],\n", 294 | " [ 0.0218, -0.0324, -0.0375, ..., -0.0089, 0.0187, -0.0119],\n", 295 | " [ 0.0048, -0.0325, -0.0199, ..., -0.0054, -0.0382, -0.0221],\n", 296 | " ...,\n", 297 | " [-0.0324, -0.0263, -0.0365, ..., 0.0139, 0.0190, 0.0053],\n", 298 | " [ 0.0231, -0.0329, 0.0187, ..., 0.0001, -0.0297, 0.0108],\n", 299 | " [ 0.0147, 0.0201, -0.0160, ..., 0.0025, 0.0128, 0.0210]])\n", 300 | "bias: tensor([ 0.0158, 0.0223, 0.0025, -0.0293, 0.0103, 0.0068, -0.0419, 0.0389,\n", 301 | " 0.0371, -0.0370])\n" 302 | ] 303 | } 304 | ], 305 | "source": [ 306 | "print(\"weight: \", model_state['fc.weight'])\n", 307 | "print(\"bias: \", model_state['fc.bias'])" 308 | ] 309 | }, 310 | { 311 | "cell_type": "markdown", 312 | "id": "b4f73cc7-25da-4799-8419-c5e9e16904a0", 313 | "metadata": {}, 314 | "source": [ 315 | "### 某層參數 \n", 316 | "* 使用 pytorch 提供的 pre-trained model 權重" 317 | ] 318 | }, 319 | { 320 | "cell_type": "code", 321 | "execution_count": 10, 322 | "id": "5c18f96b-3352-4c31-9cb6-a89969d93ceb", 323 | "metadata": {}, 324 | "outputs": [], 325 | "source": [ 326 | "model = models.resnet18(pretrained=True)\n", 327 | "model_state = model.state_dict()" 328 | ] 329 | }, 330 | { 331 | "cell_type": "code", 332 | "execution_count": 15, 333 | "id": "3d1fcf24", 334 | "metadata": {}, 335 | "outputs": [ 336 | { 337 | "name": "stdout", 338 | "output_type": "stream", 339 | "text": [ 340 | "name: fc.weight\n", 341 | "name: fc.bias\n" 342 | ] 343 | } 344 | ], 345 | "source": [ 346 | "for param_tensor in model_state:\n", 347 | " if param_tensor.startswith('fc'):\n", 348 | " print(\"name:\", param_tensor)" 349 | ] 350 | }, 351 | { 352 | "cell_type": "code", 353 | "execution_count": 16, 354 | "id": "aa61a9c9", 355 | "metadata": {}, 356 | "outputs": [ 357 | { 358 | "name": "stdout", 359 | "output_type": "stream", 360 | "text": [ 361 | "org: torch.Size([1000, 512])\n", 362 | "now: torch.Size([10, 512])\n" 363 | ] 364 | } 365 | ], 366 | "source": [ 367 | "print(\"org: \", model_state['fc.weight'].shape)\n", 368 | "model_state['fc.weight'] = torch.rand((10, 512))\n", 369 | "print(\"now: \", model_state['fc.weight'].shape)" 370 | ] 371 | }, 372 | { 373 | "cell_type": "code", 374 | "execution_count": 17, 375 | "id": "e4145ccf", 376 | "metadata": {}, 377 | "outputs": [ 378 | { 379 | "name": "stdout", 380 | "output_type": "stream", 381 | "text": [ 382 | "org: torch.Size([1000])\n", 383 | "now: torch.Size([10])\n" 384 | ] 385 | } 386 | ], 387 | "source": [ 388 | "print(\"org: \", model_state['fc.bias'].shape)\n", 389 | "model_state['fc.bias'] = torch.ones(10)\n", 390 | "print(\"now: \", model_state['fc.bias'].shape)" 391 | ] 392 | }, 393 | { 394 | "cell_type": "code", 395 | "execution_count": 18, 396 | "id": "b5d9ca27", 397 | "metadata": {}, 398 | "outputs": [ 399 | { 400 | "name": "stdout", 401 | "output_type": "stream", 402 | "text": [ 403 | "name: fc.weight\n", 404 | "value: tensor([[0.4836, 0.1596, 0.4073, ..., 0.1078, 0.6504, 0.2826],\n", 405 | " [0.6172, 0.7873, 0.0915, ..., 0.9723, 0.0903, 0.8155],\n", 406 | " [0.1876, 0.0691, 0.0087, ..., 0.6894, 0.4616, 0.0709],\n", 407 | " ...,\n", 408 | " [0.7497, 0.3456, 0.3441, ..., 0.1699, 0.3600, 0.4801],\n", 409 | " [0.1419, 0.5504, 0.0085, ..., 0.6065, 0.9412, 0.6216],\n", 410 | " [0.4666, 0.0663, 0.2911, ..., 0.0243, 0.8549, 0.3349]])\n", 411 | "name: fc.bias\n", 412 | "value: tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])\n" 413 | ] 414 | } 415 | ], 416 | "source": [ 417 | "for param_tensor in model_state:\n", 418 | " if param_tensor in ['fc.weight', 'fc.bias']:\n", 419 | " print(\"name:\", param_tensor)\n", 420 | " print(\"value:\", model_state[param_tensor])" 421 | ] 422 | }, 423 | { 424 | "cell_type": "markdown", 425 | "id": "bfd2221a", 426 | "metadata": {}, 427 | "source": [ 428 | "* 使用自己訓練或下載的權重檔" 429 | ] 430 | }, 431 | { 432 | "cell_type": "code", 433 | "execution_count": 6, 434 | "id": "7de23667-454c-4797-a09e-b3a7a1010c6b", 435 | "metadata": {}, 436 | "outputs": [], 437 | "source": [ 438 | "checkpoint = torch.load('resnet-5c106cde.pth')" 439 | ] 440 | }, 441 | { 442 | "cell_type": "code", 443 | "execution_count": 10, 444 | "id": "1c3767c2", 445 | "metadata": {}, 446 | "outputs": [ 447 | { 448 | "name": "stdout", 449 | "output_type": "stream", 450 | "text": [ 451 | "name: fc.weight\n", 452 | "value: torch.Size([1000, 512])\n", 453 | "name: fc.bias\n", 454 | "value: torch.Size([1000])\n" 455 | ] 456 | } 457 | ], 458 | "source": [ 459 | "for k, v in checkpoint.items():\n", 460 | " if k in ['fc.weight', 'fc.bias']:\n", 461 | " print(\"name:\", k)\n", 462 | " print(\"value:\", v.shape)" 463 | ] 464 | }, 465 | { 466 | "cell_type": "code", 467 | "execution_count": 21, 468 | "id": "bd7b94ef-0e99-4aa7-813a-2d74b69fc4c3", 469 | "metadata": {}, 470 | "outputs": [ 471 | { 472 | "name": "stdout", 473 | "output_type": "stream", 474 | "text": [ 475 | "org: torch.Size([1000, 512])\n", 476 | "now: torch.Size([10, 512])\n" 477 | ] 478 | } 479 | ], 480 | "source": [ 481 | "print(\"org: \", checkpoint['fc.weight'].shape)\n", 482 | "checkpoint['fc.weight'] = torch.rand((10, 512))\n", 483 | "print(\"now: \", checkpoint['fc.weight'].shape)" 484 | ] 485 | }, 486 | { 487 | "cell_type": "code", 488 | "execution_count": 22, 489 | "id": "ad656f33-8137-4c99-8de8-4de226503a5f", 490 | "metadata": {}, 491 | "outputs": [ 492 | { 493 | "name": "stdout", 494 | "output_type": "stream", 495 | "text": [ 496 | "org: torch.Size([1000])\n", 497 | "now: torch.Size([10])\n" 498 | ] 499 | } 500 | ], 501 | "source": [ 502 | "print(\"org: \", checkpoint['fc.bias'].shape)\n", 503 | "checkpoint['fc.bias'] = torch.ones(10)\n", 504 | "print(\"now: \", checkpoint['fc.bias'].shape)" 505 | ] 506 | }, 507 | { 508 | "cell_type": "code", 509 | "execution_count": 23, 510 | "id": "86ff395d-f5ef-40d0-a792-064d07b0000f", 511 | "metadata": {}, 512 | "outputs": [ 513 | { 514 | "name": "stdout", 515 | "output_type": "stream", 516 | "text": [ 517 | "name: fc.weight\n", 518 | "value: tensor([[0.2027, 0.7287, 0.3048, ..., 0.4458, 0.6887, 0.3502],\n", 519 | " [0.4577, 0.7108, 0.3471, ..., 0.6735, 0.6224, 0.3283],\n", 520 | " [0.9822, 0.5041, 0.3341, ..., 0.7837, 0.7394, 0.6175],\n", 521 | " ...,\n", 522 | " [0.3742, 0.1213, 0.6600, ..., 0.7086, 0.8760, 0.8814],\n", 523 | " [0.0372, 0.5985, 0.8533, ..., 0.3187, 0.7410, 0.6001],\n", 524 | " [0.6699, 0.5557, 0.1950, ..., 0.1332, 0.1001, 0.1326]])\n", 525 | "name: fc.bias\n", 526 | "value: tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])\n" 527 | ] 528 | } 529 | ], 530 | "source": [ 531 | "for k, v in checkpoint.items():\n", 532 | " if k in ['fc.weight', 'fc.bias']:\n", 533 | " print(\"name:\", k)\n", 534 | " print(\"value:\", v)" 535 | ] 536 | }, 537 | { 538 | "cell_type": "markdown", 539 | "id": "bdfd1d12-8bbc-4eb7-903c-a48dc6b29c40", 540 | "metadata": {}, 541 | "source": [ 542 | "## layer 名稱" 543 | ] 544 | }, 545 | { 546 | "cell_type": "code", 547 | "execution_count": 11, 548 | "id": "5179bf59", 549 | "metadata": {}, 550 | "outputs": [ 551 | { 552 | "name": "stdout", 553 | "output_type": "stream", 554 | "text": [ 555 | "name: conv1.weight\n", 556 | "value: torch.Size([64, 3, 7, 7])\n", 557 | "name: bn1.weight\n", 558 | "value: torch.Size([64])\n", 559 | "name: bn1.bias\n", 560 | "value: torch.Size([64])\n", 561 | "name: bn1.running_mean\n", 562 | "value: torch.Size([64])\n", 563 | "name: bn1.running_var\n", 564 | "value: torch.Size([64])\n", 565 | "name: bn1.num_batches_tracked\n", 566 | "value: torch.Size([])\n", 567 | "name: layer1.0.conv1.weight\n", 568 | "value: torch.Size([64, 64, 3, 3])\n", 569 | "name: layer1.0.bn1.weight\n", 570 | "value: torch.Size([64])\n", 571 | "name: layer1.0.bn1.bias\n", 572 | "value: torch.Size([64])\n", 573 | "name: layer1.0.bn1.running_mean\n", 574 | "value: torch.Size([64])\n" 575 | ] 576 | } 577 | ], 578 | "source": [ 579 | "model = models.resnet18()\n", 580 | "\n", 581 | "cnt = 0\n", 582 | "for param_tensor in model.state_dict():\n", 583 | " print(\"name:\", param_tensor)\n", 584 | " print(\"value:\", model.state_dict()[param_tensor].size())\n", 585 | " \n", 586 | " cnt += 1\n", 587 | " if cnt == 10:\n", 588 | " break" 589 | ] 590 | }, 591 | { 592 | "cell_type": "code", 593 | "execution_count": 12, 594 | "id": "8ffb0d3c", 595 | "metadata": {}, 596 | "outputs": [ 597 | { 598 | "name": "stdout", 599 | "output_type": "stream", 600 | "text": [ 601 | "name: resnet.conv1.weight\n", 602 | "value: torch.Size([64, 3, 7, 7])\n", 603 | "name: resnet.bn1.weight\n", 604 | "value: torch.Size([64])\n", 605 | "name: resnet.bn1.bias\n", 606 | "value: torch.Size([64])\n", 607 | "name: resnet.bn1.running_mean\n", 608 | "value: torch.Size([64])\n", 609 | "name: resnet.bn1.running_var\n", 610 | "value: torch.Size([64])\n", 611 | "name: resnet.bn1.num_batches_tracked\n", 612 | "value: torch.Size([])\n", 613 | "name: resnet.layer1.0.conv1.weight\n", 614 | "value: torch.Size([64, 64, 3, 3])\n", 615 | "name: resnet.layer1.0.bn1.weight\n", 616 | "value: torch.Size([64])\n", 617 | "name: resnet.layer1.0.bn1.bias\n", 618 | "value: torch.Size([64])\n", 619 | "name: resnet.layer1.0.bn1.running_mean\n", 620 | "value: torch.Size([64])\n" 621 | ] 622 | } 623 | ], 624 | "source": [ 625 | "checkpoint = torch.load('resnet_weights.pth')\n", 626 | "\n", 627 | "cnt = 0\n", 628 | "for k, v in checkpoint.items():\n", 629 | " print(\"name:\", k)\n", 630 | " print(\"value:\", v.size())\n", 631 | " \n", 632 | " cnt += 1\n", 633 | " if cnt == 10:\n", 634 | " break" 635 | ] 636 | }, 637 | { 638 | "cell_type": "code", 639 | "execution_count": 13, 640 | "id": "ccf0ece4", 641 | "metadata": {}, 642 | "outputs": [], 643 | "source": [ 644 | "state_dict = OrderedDict()\n", 645 | "\n", 646 | "for k, v in checkpoint.items():\n", 647 | " state_dict[k[len('resnet.'):]] = v" 648 | ] 649 | }, 650 | { 651 | "cell_type": "code", 652 | "execution_count": 14, 653 | "id": "435241c8", 654 | "metadata": {}, 655 | "outputs": [ 656 | { 657 | "data": { 658 | "text/plain": [ 659 | "odict_keys(['conv1.weight', 'bn1.weight', 'bn1.bias', 'bn1.running_mean', 'bn1.running_var', 'bn1.num_batches_tracked', 'layer1.0.conv1.weight', 'layer1.0.bn1.weight', 'layer1.0.bn1.bias', 'layer1.0.bn1.running_mean', 'layer1.0.bn1.running_var', 'layer1.0.bn1.num_batches_tracked', 'layer1.0.conv2.weight', 'layer1.0.bn2.weight', 'layer1.0.bn2.bias', 'layer1.0.bn2.running_mean', 'layer1.0.bn2.running_var', 'layer1.0.bn2.num_batches_tracked', 'layer1.1.conv1.weight', 'layer1.1.bn1.weight', 'layer1.1.bn1.bias', 'layer1.1.bn1.running_mean', 'layer1.1.bn1.running_var', 'layer1.1.bn1.num_batches_tracked', 'layer1.1.conv2.weight', 'layer1.1.bn2.weight', 'layer1.1.bn2.bias', 'layer1.1.bn2.running_mean', 'layer1.1.bn2.running_var', 'layer1.1.bn2.num_batches_tracked', 'layer2.0.conv1.weight', 'layer2.0.bn1.weight', 'layer2.0.bn1.bias', 'layer2.0.bn1.running_mean', 'layer2.0.bn1.running_var', 'layer2.0.bn1.num_batches_tracked', 'layer2.0.conv2.weight', 'layer2.0.bn2.weight', 'layer2.0.bn2.bias', 'layer2.0.bn2.running_mean', 'layer2.0.bn2.running_var', 'layer2.0.bn2.num_batches_tracked', 'layer2.0.downsample.0.weight', 'layer2.0.downsample.1.weight', 'layer2.0.downsample.1.bias', 'layer2.0.downsample.1.running_mean', 'layer2.0.downsample.1.running_var', 'layer2.0.downsample.1.num_batches_tracked', 'layer2.1.conv1.weight', 'layer2.1.bn1.weight', 'layer2.1.bn1.bias', 'layer2.1.bn1.running_mean', 'layer2.1.bn1.running_var', 'layer2.1.bn1.num_batches_tracked', 'layer2.1.conv2.weight', 'layer2.1.bn2.weight', 'layer2.1.bn2.bias', 'layer2.1.bn2.running_mean', 'layer2.1.bn2.running_var', 'layer2.1.bn2.num_batches_tracked', 'layer3.0.conv1.weight', 'layer3.0.bn1.weight', 'layer3.0.bn1.bias', 'layer3.0.bn1.running_mean', 'layer3.0.bn1.running_var', 'layer3.0.bn1.num_batches_tracked', 'layer3.0.conv2.weight', 'layer3.0.bn2.weight', 'layer3.0.bn2.bias', 'layer3.0.bn2.running_mean', 'layer3.0.bn2.running_var', 'layer3.0.bn2.num_batches_tracked', 'layer3.0.downsample.0.weight', 'layer3.0.downsample.1.weight', 'layer3.0.downsample.1.bias', 'layer3.0.downsample.1.running_mean', 'layer3.0.downsample.1.running_var', 'layer3.0.downsample.1.num_batches_tracked', 'layer3.1.conv1.weight', 'layer3.1.bn1.weight', 'layer3.1.bn1.bias', 'layer3.1.bn1.running_mean', 'layer3.1.bn1.running_var', 'layer3.1.bn1.num_batches_tracked', 'layer3.1.conv2.weight', 'layer3.1.bn2.weight', 'layer3.1.bn2.bias', 'layer3.1.bn2.running_mean', 'layer3.1.bn2.running_var', 'layer3.1.bn2.num_batches_tracked', 'layer4.0.conv1.weight', 'layer4.0.bn1.weight', 'layer4.0.bn1.bias', 'layer4.0.bn1.running_mean', 'layer4.0.bn1.running_var', 'layer4.0.bn1.num_batches_tracked', 'layer4.0.conv2.weight', 'layer4.0.bn2.weight', 'layer4.0.bn2.bias', 'layer4.0.bn2.running_mean', 'layer4.0.bn2.running_var', 'layer4.0.bn2.num_batches_tracked', 'layer4.0.downsample.0.weight', 'layer4.0.downsample.1.weight', 'layer4.0.downsample.1.bias', 'layer4.0.downsample.1.running_mean', 'layer4.0.downsample.1.running_var', 'layer4.0.downsample.1.num_batches_tracked', 'layer4.1.conv1.weight', 'layer4.1.bn1.weight', 'layer4.1.bn1.bias', 'layer4.1.bn1.running_mean', 'layer4.1.bn1.running_var', 'layer4.1.bn1.num_batches_tracked', 'layer4.1.conv2.weight', 'layer4.1.bn2.weight', 'layer4.1.bn2.bias', 'layer4.1.bn2.running_mean', 'layer4.1.bn2.running_var', 'layer4.1.bn2.num_batches_tracked', 'fc.weight', 'fc.bias'])" 660 | ] 661 | }, 662 | "execution_count": 14, 663 | "metadata": {}, 664 | "output_type": "execute_result" 665 | } 666 | ], 667 | "source": [ 668 | "state_dict.keys()" 669 | ] 670 | }, 671 | { 672 | "cell_type": "code", 673 | "execution_count": 15, 674 | "id": "81d16552", 675 | "metadata": {}, 676 | "outputs": [ 677 | { 678 | "data": { 679 | "text/plain": [ 680 | "tensor([[[[-0.0322, -0.0509, -0.0117, -0.0062, 0.0003, -0.0347, 0.0073],\n", 681 | " [-0.0072, -0.0488, -0.0295, -0.0035, -0.0362, -0.0497, -0.0226],\n", 682 | " [ 0.0087, 0.0136, 0.0176, 0.0150, -0.0127, 0.0358, 0.0585],\n", 683 | " [-0.0243, 0.0452, 0.0083, 0.0163, -0.0355, 0.0162, -0.0159],\n", 684 | " [-0.0291, 0.0263, 0.0014, 0.0211, -0.0300, 0.0307, 0.0133],\n", 685 | " [ 0.0156, -0.0002, 0.0679, 0.0492, -0.0200, -0.0276, 0.0333],\n", 686 | " [-0.0059, -0.0139, 0.0266, -0.0367, -0.0117, 0.0113, -0.0111]],\n", 687 | "\n", 688 | " [[-0.0222, -0.0303, 0.0024, 0.0153, -0.0004, 0.0151, 0.0063],\n", 689 | " [ 0.0276, 0.0107, 0.0152, 0.0082, -0.0362, 0.0104, 0.0325],\n", 690 | " [ 0.0083, -0.0053, -0.0374, -0.0166, 0.0003, -0.0019, 0.0062],\n", 691 | " [ 0.0144, 0.0045, 0.0279, -0.0087, 0.0057, -0.0145, 0.0090],\n", 692 | " [ 0.0215, 0.0047, -0.0268, 0.0387, 0.0050, -0.0034, 0.0031],\n", 693 | " [-0.0269, -0.0255, 0.0234, -0.0368, 0.0168, 0.0030, 0.0048],\n", 694 | " [-0.0396, -0.0137, 0.0259, -0.0556, 0.0247, 0.0295, -0.0559]],\n", 695 | "\n", 696 | " [[ 0.0167, 0.0488, 0.0255, 0.0004, 0.0625, -0.0064, 0.0254],\n", 697 | " [-0.0048, 0.0275, -0.0244, -0.0212, 0.0144, -0.0172, -0.0117],\n", 698 | " [ 0.0123, 0.0283, 0.0245, 0.0116, -0.0126, 0.0103, 0.0128],\n", 699 | " [ 0.0072, -0.0020, 0.0022, 0.0124, -0.0031, -0.0144, 0.0220],\n", 700 | " [-0.0092, 0.0358, 0.0187, 0.0149, 0.0051, -0.0064, -0.0024],\n", 701 | " [ 0.0042, -0.0257, -0.0124, 0.0215, -0.0191, 0.0076, 0.0157],\n", 702 | " [ 0.0128, 0.0378, 0.0356, -0.0197, 0.0204, -0.0041, 0.0058]]]])" 703 | ] 704 | }, 705 | "execution_count": 15, 706 | "metadata": {}, 707 | "output_type": "execute_result" 708 | } 709 | ], 710 | "source": [ 711 | "state_dict['conv1.weight'][:1]" 712 | ] 713 | }, 714 | { 715 | "cell_type": "code", 716 | "execution_count": 28, 717 | "id": "f4a5ecc0-aff3-44d1-9e70-ef9a7996b232", 718 | "metadata": { 719 | "jupyter": { 720 | "outputs_hidden": true 721 | }, 722 | "scrolled": true, 723 | "tags": [] 724 | }, 725 | "outputs": [ 726 | { 727 | "data": { 728 | "text/plain": [ 729 | "" 730 | ] 731 | }, 732 | "execution_count": 28, 733 | "metadata": {}, 734 | "output_type": "execute_result" 735 | } 736 | ], 737 | "source": [ 738 | "model.load_state_dict(state_dict)" 739 | ] 740 | }, 741 | { 742 | "cell_type": "markdown", 743 | "id": "60865313-f79d-410b-8b4c-9b0479ef3c46", 744 | "metadata": {}, 745 | "source": [ 746 | "# 新增 layer" 747 | ] 748 | }, 749 | { 750 | "cell_type": "code", 751 | "execution_count": 29, 752 | "id": "a0af96f2", 753 | "metadata": {}, 754 | "outputs": [], 755 | "source": [ 756 | "class MyResNet18(nn.Module):\n", 757 | " def __init__(self, net_block, layers, num_classes=10):\n", 758 | " super(MyResNet18, self).__init__()\n", 759 | " self.in_channels = 64\n", 760 | " self.conv1 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=3, bias=False)\n", 761 | " self.bn1 = nn.BatchNorm2d(64)\n", 762 | " self.relu = nn.ReLU(inplace=True)\n", 763 | " self.maxpooling = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n", 764 | "\n", 765 | " self.layer1 = self.net_block_layer(net_block, 64, layers[0])\n", 766 | " self.layer2 = self.net_block_layer(net_block, 128, layers[1], stride=2)\n", 767 | " self.layer3 = self.net_block_layer(net_block, 256, layers[2], stride=2)\n", 768 | " self.layer4 = self.net_block_layer(net_block, 512, layers[3], stride=2)\n", 769 | "\n", 770 | " ## ============== 新增的網路層 ============ ##\n", 771 | " self.layer5 = nn.Sequential(nn.Conv2d(layers[3], 128, kernel_size=3, stride=2, padding=1),\n", 772 | " nn.BatchNorm2d(128),\n", 773 | " nn.ReLU(inplace=True))\n", 774 | " \n", 775 | " ## ======================================= ##\n", 776 | " \n", 777 | " self.avgpooling = nn.AvgPool2d(7, stride=1)\n", 778 | " self.fc = nn.Linear(512 * net_block.expansion, num_classes)\n", 779 | "\n", 780 | " # 參數初始化\n", 781 | " for m in self.modules():\n", 782 | " if isinstance(m, nn.Conv2d):\n", 783 | " nn.init.kaiming_normal_(m.weight, mode=\"fan_out\", nonlinearity=\"relu\")\n", 784 | "\n", 785 | " elif isinstance(m, nn.BatchNorm2d):\n", 786 | " nn.init.constant_(m.weight, 1)\n", 787 | " nn.init.constant_(m.bias, 0) \n", 788 | "\n", 789 | " def net_block_layer(self, net_block, out_channels, num_blocks, stride=1):\n", 790 | " downsample = None\n", 791 | "\n", 792 | " # 在 shortcut 時,若維度不一樣,要更改維度\n", 793 | " if stride != 1 or self.in_channels != out_channels * net_block.expansion:\n", 794 | " downsample = nn.Sequential(nn.Conv2d(self.in_channels, out_channels * net_block.expansion, kernel_size=1, stride=stride, bias=False),\n", 795 | " nn.BatchNorm2d(out_channels * net_block.expansion))\n", 796 | "\n", 797 | " layers = []\n", 798 | " layers.append(net_block(self.in_channels, out_channels, stride, downsample))\n", 799 | " if net_block.expansion != 1:\n", 800 | " self.in_channels = out_channels * net_block.expansion\n", 801 | "\n", 802 | " else:\n", 803 | " self.in_channels = out_channels\n", 804 | "\n", 805 | " for i in range(1, num_blocks):\n", 806 | " layers.append(net_block(self.in_channels, out_channels, 1, None))\n", 807 | "\n", 808 | " return nn.Sequential(*layers)\n", 809 | "\n", 810 | " def forward(self, x):\n", 811 | " x = self.conv1(x)\n", 812 | " x = self.bn1(x)\n", 813 | " x = self.relu(x)\n", 814 | " x = self.maxpooling(x)\n", 815 | " x = self.layer1(x)\n", 816 | " x = self.layer2(x)\n", 817 | " x = self.layer3(x)\n", 818 | " x = self.layer4(x)\n", 819 | " \n", 820 | " x = self.layer5(x)\n", 821 | " \n", 822 | " x = self.avgpooling(x)\n", 823 | " x = torch.flatten(x, start_dim=1)\n", 824 | " x = self.fc(x)\n", 825 | "\n", 826 | " return x\n", 827 | " \n", 828 | "class basic_block(nn.Module):\n", 829 | " # 輸出通道乘的倍數\n", 830 | " expansion = 1\n", 831 | "\n", 832 | " def __init__(self, in_channels, out_channels, stride, downsample):\n", 833 | " super(basic_block, self).__init__() \n", 834 | " self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=1, bias=False)\n", 835 | " self.bn1 = nn.BatchNorm2d(out_channels)\n", 836 | " self.relu = nn.ReLU(inplace=True)\n", 837 | " self.conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1, bias=False)\n", 838 | " self.bn2 = nn.BatchNorm2d(out_channels)\n", 839 | "\n", 840 | " # 在 shortcut 時,若維度不一樣,要更改維度\n", 841 | " self.downsample = downsample \n", 842 | "\n", 843 | "\n", 844 | " def forward(self, x):\n", 845 | " residual = x\n", 846 | "\n", 847 | " out = self.conv1(x)\n", 848 | " out = self.bn1(out)\n", 849 | " out = self.relu(out)\n", 850 | " out = self.conv2(out)\n", 851 | " out = self.bn2(out)\n", 852 | "\n", 853 | " if self.downsample is not None:\n", 854 | " residual = self.downsample(x)\n", 855 | "\n", 856 | " out += residual\n", 857 | " out = self.relu(out)\n", 858 | "\n", 859 | " return out" 860 | ] 861 | }, 862 | { 863 | "cell_type": "code", 864 | "execution_count": 30, 865 | "id": "ed6ccc21", 866 | "metadata": {}, 867 | "outputs": [], 868 | "source": [ 869 | "num_classes = 10\n", 870 | "\n", 871 | "model = MyResNet18(basic_block, [2, 2, 2, 2], num_classes)\n", 872 | "model_state = model.state_dict()" 873 | ] 874 | }, 875 | { 876 | "cell_type": "code", 877 | "execution_count": 31, 878 | "id": "65a43eb2-875c-4b98-ba8a-2891e49c1c29", 879 | "metadata": {}, 880 | "outputs": [], 881 | "source": [ 882 | "checkpoint = torch.load('resnet-5c106cde.pth')\n", 883 | "\n", 884 | "checkpoint['fc.weight'] = torch.zeros((num_classes, 512))\n", 885 | "checkpoint['fc.bias'] = torch.zeros(num_classes)" 886 | ] 887 | }, 888 | { 889 | "cell_type": "code", 890 | "execution_count": 32, 891 | "id": "cf26d3ae", 892 | "metadata": {}, 893 | "outputs": [], 894 | "source": [ 895 | "pretrained_dict = {k: v for k, v in checkpoint.items() if k in model_state}" 896 | ] 897 | }, 898 | { 899 | "cell_type": "code", 900 | "execution_count": 33, 901 | "id": "4ef0701a", 902 | "metadata": {}, 903 | "outputs": [], 904 | "source": [ 905 | "model_state.update(pretrained_dict)" 906 | ] 907 | }, 908 | { 909 | "cell_type": "code", 910 | "execution_count": 34, 911 | "id": "f94a437d", 912 | "metadata": { 913 | "scrolled": true 914 | }, 915 | "outputs": [ 916 | { 917 | "data": { 918 | "text/plain": [ 919 | "" 920 | ] 921 | }, 922 | "execution_count": 34, 923 | "metadata": {}, 924 | "output_type": "execute_result" 925 | } 926 | ], 927 | "source": [ 928 | "model.load_state_dict(model_state)" 929 | ] 930 | }, 931 | { 932 | "cell_type": "markdown", 933 | "id": "28cf03d4", 934 | "metadata": {}, 935 | "source": [ 936 | "# 刪除 layer" 937 | ] 938 | }, 939 | { 940 | "cell_type": "code", 941 | "execution_count": 33, 942 | "id": "d0aa4840", 943 | "metadata": {}, 944 | "outputs": [], 945 | "source": [ 946 | "checkpoint = torch.load('resnet-5c106cde.pth')" 947 | ] 948 | }, 949 | { 950 | "cell_type": "code", 951 | "execution_count": 34, 952 | "id": "2c8110cf", 953 | "metadata": {}, 954 | "outputs": [ 955 | { 956 | "name": "stdout", 957 | "output_type": "stream", 958 | "text": [ 959 | "layer4.1.conv1.weight\n", 960 | "layer4.1.bn1.running_mean\n", 961 | "layer4.1.bn1.running_var\n", 962 | "layer4.1.bn1.weight\n", 963 | "layer4.1.bn1.bias\n", 964 | "layer4.1.conv2.weight\n", 965 | "layer4.1.bn2.running_mean\n", 966 | "layer4.1.bn2.running_var\n", 967 | "layer4.1.bn2.weight\n", 968 | "layer4.1.bn2.bias\n" 969 | ] 970 | } 971 | ], 972 | "source": [ 973 | "for k in list(checkpoint.keys()):\n", 974 | " if k.startswith('layer4.1'):\n", 975 | " print(k)" 976 | ] 977 | }, 978 | { 979 | "cell_type": "code", 980 | "execution_count": 35, 981 | "id": "43a5a6f0", 982 | "metadata": { 983 | "scrolled": true 984 | }, 985 | "outputs": [ 986 | { 987 | "name": "stdout", 988 | "output_type": "stream", 989 | "text": [ 990 | "['None']\n" 991 | ] 992 | } 993 | ], 994 | "source": [ 995 | "import numpy as np\n", 996 | "\n", 997 | "for k in list(checkpoint.keys()): \n", 998 | " if k.startswith('layer4.1'):\n", 999 | " del checkpoint[k]\n", 1000 | "\n", 1001 | "# 驗證是否刪除成功\n", 1002 | "for k in list(checkpoint.keys()):\n", 1003 | " a = [\"None\" if not k.startswith('layer4.1') else \"Exists\" for k in list(checkpoint.keys())]\n", 1004 | "\n", 1005 | "print(np.unique(a))" 1006 | ] 1007 | }, 1008 | { 1009 | "cell_type": "code", 1010 | "execution_count": null, 1011 | "id": "bab04fb8", 1012 | "metadata": {}, 1013 | "outputs": [], 1014 | "source": [] 1015 | } 1016 | ], 1017 | "metadata": { 1018 | "kernelspec": { 1019 | "display_name": "pytorch", 1020 | "language": "python", 1021 | "name": "pytorch" 1022 | }, 1023 | "language_info": { 1024 | "codemirror_mode": { 1025 | "name": "ipython", 1026 | "version": 3 1027 | }, 1028 | "file_extension": ".py", 1029 | "mimetype": "text/x-python", 1030 | "name": "python", 1031 | "nbconvert_exporter": "python", 1032 | "pygments_lexer": "ipython3", 1033 | "version": "3.7.11" 1034 | } 1035 | }, 1036 | "nbformat": 4, 1037 | "nbformat_minor": 5 1038 | } 1039 | -------------------------------------------------------------------------------- /resnet_weights.pth: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chingi071/Pytorch_note/ffe1c08285c5cbbda66307e479d5b0cdf160d4e9/resnet_weights.pth -------------------------------------------------------------------------------- /weights_init_copy.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 2, 6 | "id": "adb52f6b", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [ 10 | "import torch\n", 11 | "import torch.nn as nn\n", 12 | "import torchvision.models as models" 13 | ] 14 | }, 15 | { 16 | "cell_type": "markdown", 17 | "id": "7f67ddc7", 18 | "metadata": {}, 19 | "source": [ 20 | "# 權重初始化定義在模型內部" 21 | ] 22 | }, 23 | { 24 | "cell_type": "markdown", 25 | "id": "c0a61e59", 26 | "metadata": {}, 27 | "source": [ 28 | "## torch.nn.Parameter" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": 3, 34 | "id": "d277ce25", 35 | "metadata": {}, 36 | "outputs": [], 37 | "source": [ 38 | "class MyModel(nn.Module):\n", 39 | " def __init__(self):\n", 40 | " super(MyModel, self).__init__()\n", 41 | " self.net = models.resnet18()\n", 42 | " print(\"weights: \", self.net.fc.weight[0][:10])\n", 43 | " print(\"bias: \", self.net.fc.bias[:10])\n", 44 | " print(\"=====================\")\n", 45 | " \n", 46 | " self.net.fc.weight = torch.nn.Parameter(torch.ones(self.net.fc.weight.shape)*0.9, requires_grad=True)\n", 47 | " self.net.fc.bias = torch.nn.Parameter(torch.zeros(self.net.fc.bias.shape), requires_grad=True)\n", 48 | " print(\"weights: \", self.net.fc.weight[0][:10])\n", 49 | " print(\"bias: \", self.net.fc.bias[:10])\n", 50 | " \n", 51 | " def forward(self, x):\n", 52 | " output = self.net(x)\n", 53 | " return output" 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": 4, 59 | "id": "8f7d5583", 60 | "metadata": {}, 61 | "outputs": [ 62 | { 63 | "name": "stdout", 64 | "output_type": "stream", 65 | "text": [ 66 | "weights: tensor([ 0.0426, 0.0268, -0.0006, -0.0057, -0.0088, -0.0295, -0.0015, 0.0246,\n", 67 | " -0.0041, 0.0095], grad_fn=)\n", 68 | "bias: tensor([ 0.0128, 0.0003, 0.0330, -0.0188, -0.0301, -0.0290, 0.0102, -0.0316,\n", 69 | " 0.0381, -0.0402], grad_fn=)\n", 70 | "=====================\n", 71 | "weights: tensor([0.9000, 0.9000, 0.9000, 0.9000, 0.9000, 0.9000, 0.9000, 0.9000, 0.9000,\n", 72 | " 0.9000], grad_fn=)\n", 73 | "bias: tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], grad_fn=)\n" 74 | ] 75 | } 76 | ], 77 | "source": [ 78 | "model = MyModel()" 79 | ] 80 | }, 81 | { 82 | "cell_type": "markdown", 83 | "id": "100d363a", 84 | "metadata": {}, 85 | "source": [ 86 | "## data.normal_、data.zero_" 87 | ] 88 | }, 89 | { 90 | "cell_type": "code", 91 | "execution_count": 5, 92 | "id": "72968ab8", 93 | "metadata": {}, 94 | "outputs": [], 95 | "source": [ 96 | "class MyModel(nn.Module):\n", 97 | " def __init__(self):\n", 98 | " super(MyModel, self).__init__()\n", 99 | " self.net = models.resnet18()\n", 100 | " print(\"weights: \", self.net.fc.weight[0][:10])\n", 101 | " print(\"bias: \", self.net.fc.bias[:10])\n", 102 | " print(\"=====================\")\n", 103 | " \n", 104 | " self.net.fc.weight.data.normal_(mean=0.0, std=1.0)\n", 105 | " self.net.fc.bias.data.zero_()\n", 106 | " \n", 107 | " print(\"weights: \", self.net.fc.weight[0][:10])\n", 108 | " print(\"bias: \", self.net.fc.bias[:10])\n", 109 | " \n", 110 | " def forward(self, x):\n", 111 | " output = self.net(x)\n", 112 | " return output" 113 | ] 114 | }, 115 | { 116 | "cell_type": "code", 117 | "execution_count": 6, 118 | "id": "c7c82a59", 119 | "metadata": {}, 120 | "outputs": [ 121 | { 122 | "name": "stdout", 123 | "output_type": "stream", 124 | "text": [ 125 | "weights: tensor([-0.0244, -0.0371, -0.0281, 0.0006, 0.0299, -0.0292, -0.0032, -0.0422,\n", 126 | " 0.0313, -0.0164], grad_fn=)\n", 127 | "bias: tensor([ 0.0148, -0.0264, -0.0145, 0.0291, -0.0015, 0.0304, -0.0170, 0.0353,\n", 128 | " -0.0319, 0.0003], grad_fn=)\n", 129 | "=====================\n", 130 | "weights: tensor([ 0.2429, -0.5703, -1.5922, 0.3605, 0.5135, 0.3904, -0.4094, -0.3470,\n", 131 | " 0.2323, 0.1666], grad_fn=)\n", 132 | "bias: tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], grad_fn=)\n" 133 | ] 134 | } 135 | ], 136 | "source": [ 137 | "model = MyModel()" 138 | ] 139 | }, 140 | { 141 | "cell_type": "markdown", 142 | "id": "c217139b", 143 | "metadata": {}, 144 | "source": [ 145 | "## nn.init.normal_、nn.init.zeros_" 146 | ] 147 | }, 148 | { 149 | "cell_type": "code", 150 | "execution_count": 7, 151 | "id": "1ad8eb69", 152 | "metadata": {}, 153 | "outputs": [], 154 | "source": [ 155 | "class MyModel(nn.Module):\n", 156 | " def __init__(self):\n", 157 | " super(MyModel, self).__init__()\n", 158 | " self.net = models.resnet18()\n", 159 | " print(\"weights: \", self.net.fc.weight[0][:10])\n", 160 | " print(\"bias: \", self.net.fc.bias[:10])\n", 161 | " print(\"=====================\")\n", 162 | " \n", 163 | " nn.init.normal_(self.net.fc.weight.data, mean=0.0, std=1.0)\n", 164 | " nn.init.zeros_(self.net.fc.bias.data)\n", 165 | " \n", 166 | " print(\"weights: \", self.net.fc.weight[0][:10])\n", 167 | " print(\"bias: \", self.net.fc.bias[:10])\n", 168 | " \n", 169 | " def forward(self, x):\n", 170 | " output = self.net(x)\n", 171 | " return output" 172 | ] 173 | }, 174 | { 175 | "cell_type": "code", 176 | "execution_count": 8, 177 | "id": "559f7280", 178 | "metadata": {}, 179 | "outputs": [ 180 | { 181 | "name": "stdout", 182 | "output_type": "stream", 183 | "text": [ 184 | "weights: tensor([ 0.0296, 0.0150, 0.0162, 0.0311, 0.0227, 0.0142, 0.0257, 0.0052,\n", 185 | " 0.0267, -0.0273], grad_fn=)\n", 186 | "bias: tensor([-0.0106, -0.0324, 0.0441, -0.0311, -0.0144, -0.0150, 0.0284, 0.0063,\n", 187 | " 0.0424, -0.0204], grad_fn=)\n", 188 | "=====================\n", 189 | "weights: tensor([ 0.1584, 0.3998, 0.8412, -0.6938, -2.1568, -1.2821, -0.7416, -1.8385,\n", 190 | " 1.6660, -0.3524], grad_fn=)\n", 191 | "bias: tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], grad_fn=)\n" 192 | ] 193 | } 194 | ], 195 | "source": [ 196 | "model = MyModel()" 197 | ] 198 | }, 199 | { 200 | "cell_type": "markdown", 201 | "id": "b3779250", 202 | "metadata": {}, 203 | "source": [ 204 | "## nn.init.constant_" 205 | ] 206 | }, 207 | { 208 | "cell_type": "code", 209 | "execution_count": 9, 210 | "id": "de3a0083", 211 | "metadata": {}, 212 | "outputs": [], 213 | "source": [ 214 | "class MyModel(nn.Module):\n", 215 | " def __init__(self):\n", 216 | " super(MyModel, self).__init__()\n", 217 | " self.net = models.resnet18()\n", 218 | " print(\"weights: \", self.net.fc.weight[0][:10])\n", 219 | " print(\"bias: \", self.net.fc.bias[:10])\n", 220 | " print(\"=====================\")\n", 221 | " \n", 222 | " nn.init.constant_(self.net.fc.weight, 1)\n", 223 | " nn.init.constant_(self.net.fc.bias, 0)\n", 224 | " \n", 225 | " print(\"weights: \", self.net.fc.weight[0][:10])\n", 226 | " print(\"bias: \", self.net.fc.bias[:10])\n", 227 | " \n", 228 | " def forward(self, x):\n", 229 | " output = self.net(x)\n", 230 | " return output" 231 | ] 232 | }, 233 | { 234 | "cell_type": "code", 235 | "execution_count": 10, 236 | "id": "48ccdb1d", 237 | "metadata": {}, 238 | "outputs": [ 239 | { 240 | "name": "stdout", 241 | "output_type": "stream", 242 | "text": [ 243 | "weights: tensor([ 0.0248, 0.0262, 0.0107, 0.0300, -0.0369, 0.0325, 0.0136, -0.0440,\n", 244 | " 0.0023, 0.0258], grad_fn=)\n", 245 | "bias: tensor([ 9.7215e-05, -4.1210e-03, -3.4068e-02, 2.0162e-02, 3.1296e-02,\n", 246 | " -3.7656e-03, 3.4689e-02, -2.4909e-02, 3.8025e-02, 1.4642e-02],\n", 247 | " grad_fn=)\n", 248 | "=====================\n", 249 | "weights: tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], grad_fn=)\n", 250 | "bias: tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], grad_fn=)\n" 251 | ] 252 | } 253 | ], 254 | "source": [ 255 | "model = MyModel()" 256 | ] 257 | }, 258 | { 259 | "cell_type": "markdown", 260 | "id": "1dcbee20", 261 | "metadata": {}, 262 | "source": [ 263 | "## apply\n", 264 | "### nn.init 寫法" 265 | ] 266 | }, 267 | { 268 | "cell_type": "code", 269 | "execution_count": 11, 270 | "id": "578ae06c", 271 | "metadata": {}, 272 | "outputs": [], 273 | "source": [ 274 | "class MyModel(nn.Module):\n", 275 | " def __init__(self):\n", 276 | " super(MyModel, self).__init__()\n", 277 | " self.net = models.resnet18()\n", 278 | " print(\"weights: \", self.net.fc.weight[0][:10])\n", 279 | " print(\"bias: \", self.net.fc.bias[:10])\n", 280 | " print(\"=====================\")\n", 281 | " \n", 282 | " self.apply(self.init_weights)\n", 283 | " print(\"weights: \", self.net.fc.weight[0][:10])\n", 284 | " print(\"bias: \", self.net.fc.bias[:10])\n", 285 | " \n", 286 | " def init_weights(self, module):\n", 287 | " if isinstance(module, nn.Linear):\n", 288 | " nn.init.normal_(module.weight.data, mean=0.0, std=1.0)\n", 289 | " \n", 290 | " if module.bias is not None:\n", 291 | " nn.init.zeros_(module.bias.data)\n", 292 | " \n", 293 | " def forward(self, x):\n", 294 | " output = self.net(x)\n", 295 | " return output" 296 | ] 297 | }, 298 | { 299 | "cell_type": "code", 300 | "execution_count": 12, 301 | "id": "e0e38f8a", 302 | "metadata": {}, 303 | "outputs": [ 304 | { 305 | "name": "stdout", 306 | "output_type": "stream", 307 | "text": [ 308 | "weights: tensor([ 0.0124, -0.0300, 0.0275, -0.0354, -0.0005, 0.0229, 0.0324, -0.0056,\n", 309 | " 0.0028, -0.0215], grad_fn=)\n", 310 | "bias: tensor([-0.0225, -0.0149, 0.0258, 0.0410, 0.0232, -0.0307, -0.0196, 0.0391,\n", 311 | " -0.0055, -0.0414], grad_fn=)\n", 312 | "=====================\n", 313 | "weights: tensor([ 0.3663, 1.0643, 0.0746, 0.0075, 0.8304, -0.5193, -0.0839, 0.1247,\n", 314 | " 0.7318, -1.8451], grad_fn=)\n", 315 | "bias: tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], grad_fn=)\n" 316 | ] 317 | } 318 | ], 319 | "source": [ 320 | "model = MyModel()" 321 | ] 322 | }, 323 | { 324 | "cell_type": "markdown", 325 | "id": "5dde5ba8", 326 | "metadata": {}, 327 | "source": [ 328 | "### data.normal_、data.zero_ 寫法" 329 | ] 330 | }, 331 | { 332 | "cell_type": "code", 333 | "execution_count": 13, 334 | "id": "babaa940", 335 | "metadata": {}, 336 | "outputs": [], 337 | "source": [ 338 | "class MyModel(nn.Module):\n", 339 | " def __init__(self):\n", 340 | " super(MyModel, self).__init__()\n", 341 | " self.net = models.resnet18()\n", 342 | " print(\"weights: \", self.net.fc.weight[0][:10])\n", 343 | " print(\"bias: \", self.net.fc.bias[:10])\n", 344 | " print(\"=====================\")\n", 345 | " \n", 346 | " self.apply(self.init_weights)\n", 347 | " print(\"weights: \", self.net.fc.weight[0][:10])\n", 348 | " print(\"bias: \", self.net.fc.bias[:10])\n", 349 | " \n", 350 | " def init_weights(self, module):\n", 351 | " if isinstance(module, nn.Linear):\n", 352 | " module.weight.data.normal_(mean=0.0, std=1.0)\n", 353 | " \n", 354 | " if module.bias is not None:\n", 355 | " module.bias.data.zero_()\n", 356 | " \n", 357 | " def forward(self, x):\n", 358 | " output = self.net(x)\n", 359 | " return output" 360 | ] 361 | }, 362 | { 363 | "cell_type": "code", 364 | "execution_count": 14, 365 | "id": "3a49fc04", 366 | "metadata": {}, 367 | "outputs": [ 368 | { 369 | "name": "stdout", 370 | "output_type": "stream", 371 | "text": [ 372 | "weights: tensor([-0.0099, 0.0295, -0.0219, 0.0023, -0.0422, -0.0371, 0.0238, -0.0077,\n", 373 | " 0.0160, 0.0006], grad_fn=)\n", 374 | "bias: tensor([-0.0103, 0.0023, -0.0279, 0.0155, -0.0325, 0.0441, 0.0082, 0.0163,\n", 375 | " 0.0407, -0.0351], grad_fn=)\n", 376 | "=====================\n", 377 | "weights: tensor([ 0.8214, -1.7671, 0.5230, -0.1822, -0.0327, 0.8022, 0.2237, 0.1050,\n", 378 | " 0.5963, -0.1409], grad_fn=)\n", 379 | "bias: tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], grad_fn=)\n" 380 | ] 381 | } 382 | ], 383 | "source": [ 384 | "model = MyModel()" 385 | ] 386 | }, 387 | { 388 | "cell_type": "markdown", 389 | "id": "cf7d7e3d", 390 | "metadata": {}, 391 | "source": [ 392 | "## 將多個不同 layer 進行初始化設定" 393 | ] 394 | }, 395 | { 396 | "cell_type": "code", 397 | "execution_count": 15, 398 | "id": "de31051a", 399 | "metadata": {}, 400 | "outputs": [], 401 | "source": [ 402 | "class MyModel(nn.Module):\n", 403 | " def __init__(self):\n", 404 | " super(MyModel, self).__init__()\n", 405 | " self.net = models.resnet18()\n", 406 | " print(\"weights: \", self.net.conv1.weight[0][:10])\n", 407 | " print(\"=====================\")\n", 408 | " \n", 409 | " self.apply(self.init_weights)\n", 410 | " print(\"weights: \", self.net.conv1.weight[0][:10])\n", 411 | " \n", 412 | " def init_weights(self, module):\n", 413 | " if isinstance(module, (nn.Linear, nn.Conv2d)):\n", 414 | " nn.init.normal_(module.weight.data, mean=0.0, std=1.0)\n", 415 | " \n", 416 | " if module.bias is not None:\n", 417 | " nn.init.zeros_(module.bias.data)\n", 418 | " \n", 419 | " def forward(self, x):\n", 420 | " output = self.net(x)\n", 421 | " return output" 422 | ] 423 | }, 424 | { 425 | "cell_type": "code", 426 | "execution_count": 16, 427 | "id": "77fd07de", 428 | "metadata": { 429 | "scrolled": true 430 | }, 431 | "outputs": [ 432 | { 433 | "name": "stdout", 434 | "output_type": "stream", 435 | "text": [ 436 | "weights: tensor([[[-0.0319, -0.0145, -0.0185, 0.0057, 0.0001, 0.0433, -0.0117],\n", 437 | " [ 0.0405, -0.0316, -0.0135, 0.0019, 0.0007, 0.0032, 0.0305],\n", 438 | " [ 0.0050, -0.0084, -0.0146, 0.0184, -0.0172, -0.0029, 0.0319],\n", 439 | " [ 0.0082, 0.0106, 0.0229, 0.0060, 0.0499, -0.0197, 0.0040],\n", 440 | " [ 0.0133, 0.0116, -0.0512, 0.0193, -0.0222, -0.0523, 0.0177],\n", 441 | " [-0.0311, -0.0272, 0.0080, -0.0590, 0.0138, 0.0333, -0.0300],\n", 442 | " [-0.0417, -0.0587, 0.0343, 0.0311, 0.0013, 0.0216, 0.0217]],\n", 443 | "\n", 444 | " [[ 0.0033, 0.0007, 0.0176, -0.0081, -0.0109, -0.0139, 0.0220],\n", 445 | " [-0.0155, -0.0324, -0.0097, -0.0308, -0.0033, -0.0112, -0.0142],\n", 446 | " [-0.0187, 0.0068, -0.0196, -0.0388, -0.0116, 0.0081, -0.0016],\n", 447 | " [ 0.0434, -0.0057, -0.0240, -0.0335, 0.0303, -0.0025, -0.0194],\n", 448 | " [ 0.0018, 0.0358, -0.0005, 0.0317, -0.0479, 0.0113, 0.0030],\n", 449 | " [ 0.0159, -0.0216, -0.0118, -0.0303, 0.0004, -0.0065, 0.0262],\n", 450 | " [-0.0066, -0.0389, 0.0145, -0.0067, -0.0134, -0.0472, 0.0015]],\n", 451 | "\n", 452 | " [[-0.0014, -0.0159, 0.0558, -0.0046, 0.0112, -0.0291, 0.0169],\n", 453 | " [ 0.0174, 0.0369, -0.0317, 0.0005, -0.0011, -0.0384, -0.0065],\n", 454 | " [ 0.0201, -0.0081, 0.0402, -0.0104, -0.0052, -0.0488, -0.0139],\n", 455 | " [-0.0014, -0.0252, 0.0328, -0.0183, 0.0337, 0.0138, -0.0432],\n", 456 | " [ 0.0007, -0.0179, 0.0056, -0.0030, 0.0191, 0.0150, -0.0236],\n", 457 | " [ 0.0142, -0.0236, -0.0284, -0.0281, -0.0274, 0.0445, 0.0332],\n", 458 | " [-0.0264, 0.0070, -0.0441, 0.0289, -0.0322, 0.0095, -0.0481]]],\n", 459 | " grad_fn=)\n", 460 | "=====================\n", 461 | "weights: tensor([[[-8.8904e-01, -2.0701e+00, 1.0050e-01, 7.2429e-01, -7.4054e-01,\n", 462 | " -9.5613e-02, 1.7815e-01],\n", 463 | " [ 1.3712e-01, 4.1837e-01, 7.3798e-01, 6.6692e-01, 1.8456e+00,\n", 464 | " 8.2059e-01, 1.2366e-01],\n", 465 | " [ 6.5705e-01, -3.4305e-01, -1.6582e-01, 1.0174e+00, -3.1926e-01,\n", 466 | " 6.1580e-01, 7.7755e-04],\n", 467 | " [ 2.7073e-01, -6.9397e-01, 3.0917e-01, -1.0479e+00, -1.0288e+00,\n", 468 | " 5.9288e-01, -1.9933e-01],\n", 469 | " [ 2.0626e-01, 3.0711e-01, -8.5745e-01, 5.9592e-01, -1.2377e+00,\n", 470 | " -4.2610e-01, -1.0987e+00],\n", 471 | " [ 2.4862e-01, 1.3476e+00, 9.5693e-01, 2.0993e-01, -5.2088e-01,\n", 472 | " -5.0817e-02, -4.3623e-01],\n", 473 | " [-1.1765e+00, -9.1132e-01, -7.5022e-01, 8.9937e-02, -6.4415e-01,\n", 474 | " -1.0608e+00, -6.4631e-02]],\n", 475 | "\n", 476 | " [[ 5.9647e-01, 1.0757e+00, 4.2822e-01, 6.6960e-02, -4.3867e-01,\n", 477 | " -1.4799e+00, -1.0366e+00],\n", 478 | " [ 1.2957e+00, -2.0170e+00, 7.3855e-02, 1.2510e+00, -1.2714e+00,\n", 479 | " -1.0453e+00, 6.1350e-01],\n", 480 | " [-1.3531e+00, 1.2225e+00, 4.3358e-01, -2.3015e+00, 8.1318e-01,\n", 481 | " 1.2633e-01, 4.8070e-01],\n", 482 | " [-9.4836e-01, 2.9620e-02, 1.4548e-01, -6.8729e-02, -4.5828e-02,\n", 483 | " 1.7245e+00, 6.2282e-01],\n", 484 | " [-1.7027e-02, 1.1834e+00, 7.5943e-01, 1.3469e+00, 9.7271e-01,\n", 485 | " -7.7879e-01, 2.5491e-01],\n", 486 | " [-6.4239e-03, 3.6917e-01, 2.9496e+00, -3.9017e-01, -1.5723e+00,\n", 487 | " -7.1125e-01, -3.4555e-01],\n", 488 | " [ 2.5523e-01, -1.1647e+00, 1.6443e-01, -1.1271e+00, -1.6533e-01,\n", 489 | " -4.7834e-01, 5.4907e-01]],\n", 490 | "\n", 491 | " [[ 1.6279e-01, 3.5430e-02, 1.1539e+00, 1.7508e+00, -3.1444e+00,\n", 492 | " 2.9327e-01, -1.3771e+00],\n", 493 | " [-6.8586e-02, 8.4898e-01, -7.8244e-01, -2.2995e+00, -3.1493e-01,\n", 494 | " 8.0619e-01, -1.0681e+00],\n", 495 | " [ 1.1008e-01, 5.9438e-01, -9.7449e-02, 4.8654e-01, 7.0861e-01,\n", 496 | " -1.0497e+00, -7.3548e-01],\n", 497 | " [-5.1572e-03, 1.4781e+00, -1.5989e+00, 5.3535e-01, -1.3887e-01,\n", 498 | " 1.0668e-01, -1.1563e+00],\n", 499 | " [ 2.3267e+00, 9.4164e-01, -1.4906e+00, 1.2799e+00, -2.1437e-01,\n", 500 | " 1.2647e-01, 1.1495e+00],\n", 501 | " [-5.8708e-01, 1.5703e+00, 2.1097e+00, 1.7637e-01, 8.9124e-01,\n", 502 | " 9.4806e-01, 9.7769e-01],\n", 503 | " [ 4.1859e-01, 4.8259e-01, 5.3775e-02, -2.3132e+00, 6.0595e-01,\n", 504 | " 6.1304e-01, 3.1356e-01]]], grad_fn=)\n" 505 | ] 506 | } 507 | ], 508 | "source": [ 509 | "model = MyModel()" 510 | ] 511 | }, 512 | { 513 | "cell_type": "markdown", 514 | "id": "d739a471", 515 | "metadata": {}, 516 | "source": [ 517 | "# 權重初始化定義在模型外部" 518 | ] 519 | }, 520 | { 521 | "cell_type": "markdown", 522 | "id": "13bc3fd6", 523 | "metadata": {}, 524 | "source": [ 525 | "## 直接定義\n", 526 | "### data.normal_、data.zero_" 527 | ] 528 | }, 529 | { 530 | "cell_type": "code", 531 | "execution_count": 38, 532 | "id": "c1a94fbc", 533 | "metadata": {}, 534 | "outputs": [], 535 | "source": [ 536 | "class MyModel(nn.Module):\n", 537 | " def __init__(self):\n", 538 | " super(MyModel, self).__init__()\n", 539 | " self.net = models.resnet18()\n", 540 | " \n", 541 | " def forward(self, x):\n", 542 | " output = self.net(x)\n", 543 | " return output" 544 | ] 545 | }, 546 | { 547 | "cell_type": "code", 548 | "execution_count": 39, 549 | "id": "51ea662d", 550 | "metadata": {}, 551 | "outputs": [], 552 | "source": [ 553 | "model = MyModel()" 554 | ] 555 | }, 556 | { 557 | "cell_type": "code", 558 | "execution_count": 40, 559 | "id": "72ee0d74", 560 | "metadata": {}, 561 | "outputs": [ 562 | { 563 | "name": "stdout", 564 | "output_type": "stream", 565 | "text": [ 566 | "weights: tensor([-0.0177, -0.0383, 0.0188, 0.0390, 0.0256, -0.0224, -0.0312, 0.0102,\n", 567 | " 0.0342, 0.0323], grad_fn=)\n", 568 | "bias: tensor([ 0.0340, 0.0321, 0.0181, 0.0400, 0.0119, 0.0009, -0.0185, -0.0041,\n", 569 | " 0.0240, 0.0040], grad_fn=)\n", 570 | "=====================\n", 571 | "weights: tensor([-0.2647, -0.7330, 0.1211, -0.1159, -0.1106, 0.3854, 0.4741, 1.1223,\n", 572 | " -0.5903, -0.0706], grad_fn=)\n", 573 | "bias: tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], grad_fn=)\n" 574 | ] 575 | } 576 | ], 577 | "source": [ 578 | "print(\"weights: \", model.net.fc.weight[0][:10])\n", 579 | "print(\"bias: \", model.net.fc.bias[:10])\n", 580 | "print(\"=====================\")\n", 581 | "model.net.fc.weight.data.normal_(mean=0.0, std=1.0)\n", 582 | "model.net.fc.bias.data.zero_()\n", 583 | "print(\"weights: \", model.net.fc.weight[0][:10])\n", 584 | "print(\"bias: \", model.net.fc.bias[:10])" 585 | ] 586 | }, 587 | { 588 | "cell_type": "markdown", 589 | "id": "081bda43", 590 | "metadata": {}, 591 | "source": [ 592 | "## apply\n", 593 | "### torch.nn.Parameter" 594 | ] 595 | }, 596 | { 597 | "cell_type": "code", 598 | "execution_count": 20, 599 | "id": "53e173d9", 600 | "metadata": {}, 601 | "outputs": [], 602 | "source": [ 603 | "model = MyModel()" 604 | ] 605 | }, 606 | { 607 | "cell_type": "code", 608 | "execution_count": 21, 609 | "id": "5bc5ece8", 610 | "metadata": {}, 611 | "outputs": [], 612 | "source": [ 613 | "def init_weights(module):\n", 614 | " if isinstance(module, nn.Linear):\n", 615 | " module.weight = torch.nn.Parameter(torch.ones(module.weight.shape)*0.9, requires_grad=True)\n", 616 | "\n", 617 | " if module.bias is not None:\n", 618 | " module.bias = torch.nn.Parameter(torch.zeros(module.bias.shape), requires_grad=True)" 619 | ] 620 | }, 621 | { 622 | "cell_type": "code", 623 | "execution_count": 22, 624 | "id": "1e1b8fae", 625 | "metadata": {}, 626 | "outputs": [ 627 | { 628 | "name": "stdout", 629 | "output_type": "stream", 630 | "text": [ 631 | "weights: tensor([ 0.0149, 0.0048, 0.0107, -0.0305, -0.0032, -0.0392, -0.0277, 0.0231,\n", 632 | " 0.0438, 0.0220], grad_fn=)\n", 633 | "bias: tensor([-0.0240, 0.0434, 0.0303, -0.0400, 0.0099, 0.0414, -0.0095, -0.0247,\n", 634 | " 0.0179, -0.0266], grad_fn=)\n", 635 | "=====================\n", 636 | "weights: tensor([0.9000, 0.9000, 0.9000, 0.9000, 0.9000, 0.9000, 0.9000, 0.9000, 0.9000,\n", 637 | " 0.9000], grad_fn=)\n", 638 | "bias: tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], grad_fn=)\n" 639 | ] 640 | } 641 | ], 642 | "source": [ 643 | "print(\"weights: \", model.net.fc.weight[0][:10])\n", 644 | "print(\"bias: \", model.net.fc.bias[:10])\n", 645 | "print(\"=====================\")\n", 646 | "\n", 647 | "model.apply(init_weights)\n", 648 | "print(\"weights: \", model.net.fc.weight[0][:10])\n", 649 | "print(\"bias: \", model.net.fc.bias[:10])" 650 | ] 651 | }, 652 | { 653 | "cell_type": "markdown", 654 | "id": "925f0946", 655 | "metadata": {}, 656 | "source": [ 657 | "### data.normal_、data.zero_" 658 | ] 659 | }, 660 | { 661 | "cell_type": "code", 662 | "execution_count": 23, 663 | "id": "85cf119d", 664 | "metadata": {}, 665 | "outputs": [], 666 | "source": [ 667 | "model = MyModel()" 668 | ] 669 | }, 670 | { 671 | "cell_type": "code", 672 | "execution_count": 24, 673 | "id": "792b1009", 674 | "metadata": {}, 675 | "outputs": [], 676 | "source": [ 677 | "def init_weights(module):\n", 678 | " if isinstance(module, nn.Linear):\n", 679 | " module.weight.data.normal_(mean=0.0, std=1.0)\n", 680 | " \n", 681 | " if module.bias is not None:\n", 682 | " module.bias.data.zero_()" 683 | ] 684 | }, 685 | { 686 | "cell_type": "code", 687 | "execution_count": 25, 688 | "id": "4543457f", 689 | "metadata": {}, 690 | "outputs": [ 691 | { 692 | "name": "stdout", 693 | "output_type": "stream", 694 | "text": [ 695 | "weights: tensor([ 0.0011, 0.0332, 0.0402, -0.0004, -0.0139, 0.0054, -0.0275, -0.0018,\n", 696 | " -0.0239, 0.0356], grad_fn=)\n", 697 | "bias: tensor([ 0.0101, 0.0019, 0.0201, 0.0248, -0.0377, -0.0390, -0.0401, -0.0009,\n", 698 | " -0.0267, -0.0100], grad_fn=)\n", 699 | "=====================\n", 700 | "weights: tensor([ 0.0938, -0.6255, 1.1889, 0.6034, 1.6854, 0.1906, 1.3271, 1.3844,\n", 701 | " -1.7708, -1.0778], grad_fn=)\n", 702 | "bias: tensor([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], grad_fn=)\n" 703 | ] 704 | } 705 | ], 706 | "source": [ 707 | "print(\"weights: \", model.net.fc.weight[0][:10])\n", 708 | "print(\"bias: \", model.net.fc.bias[:10])\n", 709 | "print(\"=====================\")\n", 710 | "\n", 711 | "model.apply(init_weights)\n", 712 | "print(\"weights: \", model.net.fc.weight[0][:10])\n", 713 | "print(\"bias: \", model.net.fc.bias[:10])" 714 | ] 715 | }, 716 | { 717 | "cell_type": "markdown", 718 | "id": "1cae27f1", 719 | "metadata": {}, 720 | "source": [ 721 | "# 模型權重複製" 722 | ] 723 | }, 724 | { 725 | "cell_type": "markdown", 726 | "id": "87060900", 727 | "metadata": {}, 728 | "source": [ 729 | "## 複製一個模型權重至另一個模型" 730 | ] 731 | }, 732 | { 733 | "cell_type": "code", 734 | "execution_count": 26, 735 | "id": "bd39e77f", 736 | "metadata": {}, 737 | "outputs": [], 738 | "source": [ 739 | "checkpoint = torch.load('resnet_weights.pth')" 740 | ] 741 | }, 742 | { 743 | "cell_type": "code", 744 | "execution_count": 27, 745 | "id": "5a44e601", 746 | "metadata": {}, 747 | "outputs": [], 748 | "source": [ 749 | "class MyModel(nn.Module):\n", 750 | " def __init__(self):\n", 751 | " super(MyModel, self).__init__()\n", 752 | " self.resnet = models.resnet18()\n", 753 | " self.resnet.fc = nn.Linear(512, 256)\n", 754 | " self.linear = nn.Linear(256, 10)\n", 755 | " \n", 756 | " def forward(self, x):\n", 757 | " x = self.net(x)\n", 758 | " output = self.linear(x)\n", 759 | " \n", 760 | " return output" 761 | ] 762 | }, 763 | { 764 | "cell_type": "code", 765 | "execution_count": 28, 766 | "id": "11f3326c", 767 | "metadata": {}, 768 | "outputs": [], 769 | "source": [ 770 | "model = MyModel()" 771 | ] 772 | }, 773 | { 774 | "cell_type": "code", 775 | "execution_count": 29, 776 | "id": "55b65ae4", 777 | "metadata": {}, 778 | "outputs": [ 779 | { 780 | "name": "stdout", 781 | "output_type": "stream", 782 | "text": [ 783 | "weights: tensor([[-0.0006, -0.0150, 0.0463, -0.0079, -0.0152, 0.0115, 0.0249],\n", 784 | " [-0.0154, 0.0046, 0.0252, 0.0479, 0.0018, -0.0133, -0.0048],\n", 785 | " [ 0.0047, 0.0077, 0.0154, 0.0016, -0.0147, -0.0068, -0.0362],\n", 786 | " [ 0.0425, -0.0370, 0.0107, 0.0094, -0.0246, 0.0161, -0.0203],\n", 787 | " [ 0.0256, -0.0016, -0.0147, 0.0002, 0.0113, 0.0191, 0.0252],\n", 788 | " [ 0.0187, -0.0308, 0.0402, 0.0105, -0.0355, -0.0450, 0.0135],\n", 789 | " [ 0.0285, 0.0155, -0.0073, -0.0131, -0.0070, 0.0490, 0.0098]],\n", 790 | " grad_fn=)\n" 791 | ] 792 | } 793 | ], 794 | "source": [ 795 | "print(\"weights: \", model.resnet.conv1.weight[0][0])" 796 | ] 797 | }, 798 | { 799 | "cell_type": "code", 800 | "execution_count": 30, 801 | "id": "e16e779f", 802 | "metadata": {}, 803 | "outputs": [], 804 | "source": [ 805 | "pretrained_dict = {k: v for k, v in checkpoint.items() if k not in ['resnet.fc.weight', 'resnet.fc.bias']}" 806 | ] 807 | }, 808 | { 809 | "cell_type": "code", 810 | "execution_count": 31, 811 | "id": "21485d1f", 812 | "metadata": {}, 813 | "outputs": [ 814 | { 815 | "data": { 816 | "text/plain": [ 817 | "tensor([[-0.0322, -0.0509, -0.0117, -0.0062, 0.0003, -0.0347, 0.0073],\n", 818 | " [-0.0072, -0.0488, -0.0295, -0.0035, -0.0362, -0.0497, -0.0226],\n", 819 | " [ 0.0087, 0.0136, 0.0176, 0.0150, -0.0127, 0.0358, 0.0585],\n", 820 | " [-0.0243, 0.0452, 0.0083, 0.0163, -0.0355, 0.0162, -0.0159],\n", 821 | " [-0.0291, 0.0263, 0.0014, 0.0211, -0.0300, 0.0307, 0.0133],\n", 822 | " [ 0.0156, -0.0002, 0.0679, 0.0492, -0.0200, -0.0276, 0.0333],\n", 823 | " [-0.0059, -0.0139, 0.0266, -0.0367, -0.0117, 0.0113, -0.0111]])" 824 | ] 825 | }, 826 | "execution_count": 31, 827 | "metadata": {}, 828 | "output_type": "execute_result" 829 | } 830 | ], 831 | "source": [ 832 | "pretrained_dict['resnet.conv1.weight'][0][0]" 833 | ] 834 | }, 835 | { 836 | "cell_type": "code", 837 | "execution_count": 32, 838 | "id": "80214bcd", 839 | "metadata": { 840 | "scrolled": true 841 | }, 842 | "outputs": [ 843 | { 844 | "data": { 845 | "text/plain": [ 846 | "" 847 | ] 848 | }, 849 | "execution_count": 32, 850 | "metadata": {}, 851 | "output_type": "execute_result" 852 | } 853 | ], 854 | "source": [ 855 | "model_state = model.state_dict()\n", 856 | "model_state.update(pretrained_dict)\n", 857 | "model.load_state_dict(model_state, strict=False)" 858 | ] 859 | }, 860 | { 861 | "cell_type": "code", 862 | "execution_count": 33, 863 | "id": "75c6f63a", 864 | "metadata": {}, 865 | "outputs": [ 866 | { 867 | "name": "stdout", 868 | "output_type": "stream", 869 | "text": [ 870 | "weights: tensor([[-0.0322, -0.0509, -0.0117, -0.0062, 0.0003, -0.0347, 0.0073],\n", 871 | " [-0.0072, -0.0488, -0.0295, -0.0035, -0.0362, -0.0497, -0.0226],\n", 872 | " [ 0.0087, 0.0136, 0.0176, 0.0150, -0.0127, 0.0358, 0.0585],\n", 873 | " [-0.0243, 0.0452, 0.0083, 0.0163, -0.0355, 0.0162, -0.0159],\n", 874 | " [-0.0291, 0.0263, 0.0014, 0.0211, -0.0300, 0.0307, 0.0133],\n", 875 | " [ 0.0156, -0.0002, 0.0679, 0.0492, -0.0200, -0.0276, 0.0333],\n", 876 | " [-0.0059, -0.0139, 0.0266, -0.0367, -0.0117, 0.0113, -0.0111]],\n", 877 | " grad_fn=)\n" 878 | ] 879 | } 880 | ], 881 | "source": [ 882 | "print(\"weights: \", model.resnet.conv1.weight[0][0])" 883 | ] 884 | }, 885 | { 886 | "cell_type": "markdown", 887 | "id": "7848e912", 888 | "metadata": {}, 889 | "source": [ 890 | "## 複製某些層權重至其他層" 891 | ] 892 | }, 893 | { 894 | "cell_type": "code", 895 | "execution_count": 34, 896 | "id": "53a01c95", 897 | "metadata": {}, 898 | "outputs": [], 899 | "source": [ 900 | "from collections import OrderedDict\n", 901 | "\n", 902 | "class MyModel(nn.Module):\n", 903 | " def __init__(self):\n", 904 | " super(MyModel, self).__init__()\n", 905 | " self.net1 = nn.Sequential(OrderedDict([\n", 906 | " ('conv1', nn.Conv2d(256, 128, 3)), \n", 907 | " ('relu1', nn.ReLU()), \n", 908 | " ('conv2', nn.Conv2d(128, 10, 3)),\n", 909 | " ('relu2', nn.ReLU()),\n", 910 | " ]))\n", 911 | " \n", 912 | " self.net2 = nn.Sequential(OrderedDict([\n", 913 | " ('conv1', nn.Conv2d(256, 128, 3)), \n", 914 | " ('relu1', nn.ReLU()), \n", 915 | " ('conv2', nn.Conv2d(128, 10, 3)),\n", 916 | " ('relu2', nn.ReLU()),\n", 917 | " ]))\n", 918 | " \n", 919 | " def forward(self, x):\n", 920 | " x1 = self.net1(x)\n", 921 | " x2 = self.net2(x)\n", 922 | " \n", 923 | " return x1, x2" 924 | ] 925 | }, 926 | { 927 | "cell_type": "code", 928 | "execution_count": 35, 929 | "id": "88c230b4", 930 | "metadata": {}, 931 | "outputs": [], 932 | "source": [ 933 | "model_1 = MyModel()" 934 | ] 935 | }, 936 | { 937 | "cell_type": "code", 938 | "execution_count": 36, 939 | "id": "df54315b", 940 | "metadata": { 941 | "scrolled": true 942 | }, 943 | "outputs": [ 944 | { 945 | "name": "stdout", 946 | "output_type": "stream", 947 | "text": [ 948 | "name: net1.conv1.weight\n", 949 | "name: net1.conv1.bias\n", 950 | "name: net1.conv2.weight\n", 951 | "name: net1.conv2.bias\n", 952 | "name: net2.conv1.weight\n", 953 | "name: net2.conv1.bias\n", 954 | "name: net2.conv2.weight\n", 955 | "name: net2.conv2.bias\n" 956 | ] 957 | } 958 | ], 959 | "source": [ 960 | "for name, param in model_1.named_parameters():\n", 961 | " print(\"name: \", name)" 962 | ] 963 | }, 964 | { 965 | "cell_type": "code", 966 | "execution_count": 37, 967 | "id": "dbabb565", 968 | "metadata": {}, 969 | "outputs": [ 970 | { 971 | "name": "stdout", 972 | "output_type": "stream", 973 | "text": [ 974 | "========= 0 =========\n", 975 | "org:\n", 976 | "net1 weights: tensor([[-0.0167, 0.0080, -0.0142],\n", 977 | " [-0.0078, -0.0056, -0.0023],\n", 978 | " [ 0.0091, -0.0146, -0.0120]], requires_grad=True)\n", 979 | "net2 weights: tensor([[-0.0045, 0.0046, 0.0058],\n", 980 | " [-0.0091, 0.0138, -0.0193],\n", 981 | " [ 0.0091, 0.0119, -0.0138]], requires_grad=True)\n", 982 | "=====================\n", 983 | "new:\n", 984 | "net1 weights: tensor([[-0.0167, 0.0080, -0.0142],\n", 985 | " [-0.0078, -0.0056, -0.0023],\n", 986 | " [ 0.0091, -0.0146, -0.0120]], requires_grad=True)\n", 987 | "net2 weights: tensor([[-0.0167, 0.0080, -0.0142],\n", 988 | " [-0.0078, -0.0056, -0.0023],\n", 989 | " [ 0.0091, -0.0146, -0.0120]], requires_grad=True)\n", 990 | "========= 2 =========\n", 991 | "org:\n", 992 | "net1 weights: tensor([[-0.0119, 0.0113, -0.0167],\n", 993 | " [-0.0195, 0.0121, -0.0249],\n", 994 | " [-0.0240, 0.0281, -0.0243]], requires_grad=True)\n", 995 | "net2 weights: tensor([[ 0.0269, -0.0043, 0.0169],\n", 996 | " [ 0.0139, 0.0185, -0.0151],\n", 997 | " [ 0.0261, -0.0009, -0.0192]], requires_grad=True)\n", 998 | "=====================\n", 999 | "new:\n", 1000 | "net1 weights: tensor([[-0.0119, 0.0113, -0.0167],\n", 1001 | " [-0.0195, 0.0121, -0.0249],\n", 1002 | " [-0.0240, 0.0281, -0.0243]], requires_grad=True)\n", 1003 | "net2 weights: tensor([[-0.0119, 0.0113, -0.0167],\n", 1004 | " [-0.0195, 0.0121, -0.0249],\n", 1005 | " [-0.0240, 0.0281, -0.0243]], requires_grad=True)\n" 1006 | ] 1007 | } 1008 | ], 1009 | "source": [ 1010 | "with torch.no_grad():\n", 1011 | " for i in range(len(model_1.net1)):\n", 1012 | " if isinstance(model_1.net1[i], nn.Conv2d):\n", 1013 | " print(\"========= {} =========\".format(i))\n", 1014 | " print(\"org:\")\n", 1015 | " print(\"net1 weights:\", model_1.net1[i].weight[0][0])\n", 1016 | " print(\"net2 weights:\", model_1.net2[i].weight[0][0])\n", 1017 | " \n", 1018 | " model_1.net2[i].weight.copy_(model_1.net1[i].weight)\n", 1019 | "\n", 1020 | " print(\"=====================\")\n", 1021 | " print(\"new:\")\n", 1022 | " print(\"net1 weights:\", model_1.net1[i].weight[0][0])\n", 1023 | " print(\"net2 weights:\", model_1.net2[i].weight[0][0])" 1024 | ] 1025 | }, 1026 | { 1027 | "cell_type": "code", 1028 | "execution_count": null, 1029 | "id": "53abba73", 1030 | "metadata": {}, 1031 | "outputs": [], 1032 | "source": [] 1033 | } 1034 | ], 1035 | "metadata": { 1036 | "kernelspec": { 1037 | "display_name": "pytorch_v13", 1038 | "language": "python", 1039 | "name": "pytorch_v13" 1040 | }, 1041 | "language_info": { 1042 | "codemirror_mode": { 1043 | "name": "ipython", 1044 | "version": 3 1045 | }, 1046 | "file_extension": ".py", 1047 | "mimetype": "text/x-python", 1048 | "name": "python", 1049 | "nbconvert_exporter": "python", 1050 | "pygments_lexer": "ipython3", 1051 | "version": "3.9.15" 1052 | } 1053 | }, 1054 | "nbformat": 4, 1055 | "nbformat_minor": 5 1056 | } 1057 | --------------------------------------------------------------------------------