├── 1-cnns ├── images │ ├── conv.png │ ├── lena.jpg │ ├── mlp.png │ ├── tire.jpg │ ├── alexnet.png │ ├── cifar10.png │ ├── dropout.png │ ├── lines.jpg │ ├── resnet.png │ ├── tLKYz.png │ ├── Variable.png │ ├── building.jpg │ ├── cnn-arch.png │ ├── conv-all.png │ ├── conv-formula.png │ ├── conv-process.png │ ├── environment.png │ ├── network_arch.png │ ├── transfer-all.png │ ├── after-dropout.png │ ├── before-dropout.png │ ├── learning-rate.png │ ├── digit-recognition.png │ ├── padding_strides.gif │ ├── resnet-bottleneck.png │ ├── transfer-freeze.png │ ├── no_padding_strides.gif │ ├── padding_strides_odd.gif │ ├── no_padding_no_strides.gif │ ├── 3D_Convolution_Animation.gif │ ├── full_padding_no_strides.gif │ ├── same_padding_no_strides.gif │ ├── arbitrary_padding_no_strides.gif │ └── qwerty.txt ├── 5-ResNet-CNN.ipynb ├── .ipynb_checkpoints │ ├── 5-ResNet-CNN-checkpoint.ipynb │ ├── 2-CNNs-Basics-checkpoint.ipynb │ ├── 4-Optimizer-checkpoint.ipynb │ └── 0-PyTorch-Basics-checkpoint.ipynb ├── 4-Optimizer.ipynb ├── 0-PyTorch-Basics.ipynb ├── 2-CNNs-Basics.ipynb └── 3-Weight_Initialization.ipynb ├── rnn_lab ├── static │ ├── binAdd.png │ ├── diags.jpeg │ ├── logo.png │ ├── nn_embed.png │ ├── binaryinput.jpg │ ├── binarynet.jpg │ ├── prediction.png │ └── KthNo_architecture.jpg ├── weights │ └── shakespeare-e10.weights.checkpoint └── 4-NewsHeadlines_SummerSchool.ipynb └── README.md /1-cnns/images/conv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/conv.png -------------------------------------------------------------------------------- /1-cnns/images/lena.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/lena.jpg -------------------------------------------------------------------------------- /1-cnns/images/mlp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/mlp.png -------------------------------------------------------------------------------- /1-cnns/images/tire.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/tire.jpg -------------------------------------------------------------------------------- /1-cnns/images/alexnet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/alexnet.png -------------------------------------------------------------------------------- /1-cnns/images/cifar10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/cifar10.png -------------------------------------------------------------------------------- /1-cnns/images/dropout.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/dropout.png -------------------------------------------------------------------------------- /1-cnns/images/lines.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/lines.jpg -------------------------------------------------------------------------------- /1-cnns/images/resnet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/resnet.png -------------------------------------------------------------------------------- /1-cnns/images/tLKYz.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/tLKYz.png -------------------------------------------------------------------------------- /rnn_lab/static/binAdd.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/rnn_lab/static/binAdd.png -------------------------------------------------------------------------------- /rnn_lab/static/diags.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/rnn_lab/static/diags.jpeg -------------------------------------------------------------------------------- /rnn_lab/static/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/rnn_lab/static/logo.png -------------------------------------------------------------------------------- /1-cnns/images/Variable.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/Variable.png -------------------------------------------------------------------------------- /1-cnns/images/building.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/building.jpg -------------------------------------------------------------------------------- /1-cnns/images/cnn-arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/cnn-arch.png -------------------------------------------------------------------------------- /1-cnns/images/conv-all.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/conv-all.png -------------------------------------------------------------------------------- /rnn_lab/static/nn_embed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/rnn_lab/static/nn_embed.png -------------------------------------------------------------------------------- /1-cnns/images/conv-formula.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/conv-formula.png -------------------------------------------------------------------------------- /1-cnns/images/conv-process.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/conv-process.png -------------------------------------------------------------------------------- /1-cnns/images/environment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/environment.png -------------------------------------------------------------------------------- /1-cnns/images/network_arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/network_arch.png -------------------------------------------------------------------------------- /1-cnns/images/transfer-all.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/transfer-all.png -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SS19 2 | 3 | Python notebooks for 4th computer vision summer school, 2019 organised by IIITH. 4 | -------------------------------------------------------------------------------- /rnn_lab/static/binaryinput.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/rnn_lab/static/binaryinput.jpg -------------------------------------------------------------------------------- /rnn_lab/static/binarynet.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/rnn_lab/static/binarynet.jpg -------------------------------------------------------------------------------- /rnn_lab/static/prediction.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/rnn_lab/static/prediction.png -------------------------------------------------------------------------------- /1-cnns/images/after-dropout.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/after-dropout.png -------------------------------------------------------------------------------- /1-cnns/images/before-dropout.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/before-dropout.png -------------------------------------------------------------------------------- /1-cnns/images/learning-rate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/learning-rate.png -------------------------------------------------------------------------------- /1-cnns/images/digit-recognition.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/digit-recognition.png -------------------------------------------------------------------------------- /1-cnns/images/padding_strides.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/padding_strides.gif -------------------------------------------------------------------------------- /1-cnns/images/resnet-bottleneck.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/resnet-bottleneck.png -------------------------------------------------------------------------------- /1-cnns/images/transfer-freeze.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/transfer-freeze.png -------------------------------------------------------------------------------- /1-cnns/images/no_padding_strides.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/no_padding_strides.gif -------------------------------------------------------------------------------- /1-cnns/images/padding_strides_odd.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/padding_strides_odd.gif -------------------------------------------------------------------------------- /rnn_lab/static/KthNo_architecture.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/rnn_lab/static/KthNo_architecture.jpg -------------------------------------------------------------------------------- /1-cnns/images/no_padding_no_strides.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/no_padding_no_strides.gif -------------------------------------------------------------------------------- /1-cnns/images/3D_Convolution_Animation.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/3D_Convolution_Animation.gif -------------------------------------------------------------------------------- /1-cnns/images/full_padding_no_strides.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/full_padding_no_strides.gif -------------------------------------------------------------------------------- /1-cnns/images/same_padding_no_strides.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/same_padding_no_strides.gif -------------------------------------------------------------------------------- /1-cnns/images/arbitrary_padding_no_strides.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/1-cnns/images/arbitrary_padding_no_strides.gif -------------------------------------------------------------------------------- /rnn_lab/weights/shakespeare-e10.weights.checkpoint: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dikshant2210/SS19/HEAD/rnn_lab/weights/shakespeare-e10.weights.checkpoint -------------------------------------------------------------------------------- /1-cnns/5-ResNet-CNN.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "collapsed": true 7 | }, 8 | "source": [ 9 | "## Residual Networks\n", 10 | "In this notebook we will implement a resnet block and use it as one of the block for training a CNN on CIFAR dataset. The residual module is shown below:-\n", 11 | "\n", 12 | "![resnet](images/resnet.png)" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": null, 18 | "metadata": {}, 19 | "outputs": [], 20 | "source": [ 21 | "# Import all libraries\n", 22 | "\n", 23 | "import os\n", 24 | "\n", 25 | "import torch\n", 26 | "import torch.utils.data\n", 27 | "import torch.nn as nn\n", 28 | "import torch.optim as optim\n", 29 | "from torchvision import datasets, transforms\n", 30 | "\n", 31 | "import numpy as np\n", 32 | "import matplotlib.pyplot as plt\n", 33 | "%matplotlib notebook\n", 34 | "\n", 35 | "\n", 36 | "print(torch.__version__)\n", 37 | "\n", 38 | "device = torch.device(\"cuda\" )\n", 39 | "DATA_ROOT = '/tmp/data/lab1/'" 40 | ] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "execution_count": null, 45 | "metadata": {}, 46 | "outputs": [], 47 | "source": [ 48 | "# Data loader\n", 49 | "\n", 50 | "train_loader = torch.utils.data.DataLoader(\n", 51 | " datasets.CIFAR10(DATA_ROOT, train=True, download=False,\n", 52 | " transform=transforms.ToTensor()),\n", 53 | " batch_size=4, shuffle=True)\n", 54 | "\n", 55 | "test_loader = torch.utils.data.DataLoader(\n", 56 | " datasets.CIFAR10(DATA_ROOT, train=False, download=False,\n", 57 | " transform=transforms.ToTensor()),\n", 58 | " batch_size=4, shuffle=False)" 59 | ] 60 | }, 61 | { 62 | "cell_type": "markdown", 63 | "metadata": {}, 64 | "source": [ 65 | "#### Basic ResNet Module" 66 | ] 67 | }, 68 | { 69 | "cell_type": "code", 70 | "execution_count": null, 71 | "metadata": {}, 72 | "outputs": [], 73 | "source": [ 74 | "#Definition of basic residual block which would be replicated\n", 75 | "\n", 76 | "class BasicBlock(nn.Module):\n", 77 | " def __init__(self, inplanes, planes, stride=1):\n", 78 | " super(BasicBlock, self).__init__()\n", 79 | " self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n", 80 | " \n", 81 | " ### Batch normalization\n", 82 | " self.bn1 = nn.BatchNorm2d(planes)\n", 83 | " ###\n", 84 | " \n", 85 | " self.relu = nn.ReLU(inplace=True)\n", 86 | " self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n", 87 | " self.bn2 = nn.BatchNorm2d(planes)\n", 88 | " self.stride = stride\n", 89 | "\n", 90 | " def forward(self, x):\n", 91 | " residual = x\n", 92 | "\n", 93 | " out = self.conv1(x)\n", 94 | " out = self.bn1(out)\n", 95 | " out = self.relu(out)\n", 96 | "\n", 97 | " out = self.conv2(out)\n", 98 | " out = self.bn2(out)\n", 99 | " \n", 100 | " #adding the skip connection\n", 101 | " out += residual\n", 102 | " out = self.relu(out)\n", 103 | "\n", 104 | " return out\n" 105 | ] 106 | }, 107 | { 108 | "cell_type": "markdown", 109 | "metadata": {}, 110 | "source": [ 111 | "### Defining the CNN architecture using ResNet modules" 112 | ] 113 | }, 114 | { 115 | "cell_type": "code", 116 | "execution_count": null, 117 | "metadata": {}, 118 | "outputs": [], 119 | "source": [ 120 | "#Defining the entire CNN architecture using multiple resnet modules\n", 121 | "\n", 122 | "class ResNet(nn.Module):\n", 123 | " \n", 124 | " def __init__(self, block, layers, num_classes=10):\n", 125 | " self.input_channels = 64\n", 126 | " super(ResNet, self).__init__()\n", 127 | " \n", 128 | " #Initial non-resnet layers\n", 129 | " self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,bias=False) # input ch = 3, output ch = 64 \n", 130 | " self.bn1 = nn.BatchNorm2d(64)\n", 131 | " self.relu = nn.ReLU(inplace=True)\n", 132 | " self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n", 133 | " \n", 134 | " #Creating the resnet modules\n", 135 | " self.layer1 = self._make_layer(block, 64, layers[0])\n", 136 | " self.fc = nn.Linear(4096, num_classes)\n", 137 | " \n", 138 | " #Duplicating the resnet module\n", 139 | " def _make_layer(self, block, target_output_channels, blocks, stride=1):\n", 140 | " layers = []\n", 141 | " self.input_channels = target_output_channels\n", 142 | " for i in range(0, blocks):\n", 143 | " layers.append(block(self.input_channels, target_output_channels))\n", 144 | "\n", 145 | " return nn.Sequential(*layers)\n", 146 | " \n", 147 | " def forward(self, x):\n", 148 | " x = self.conv1(x)\n", 149 | " x = self.bn1(x)\n", 150 | " x = self.relu(x)\n", 151 | " x = self.maxpool(x)\n", 152 | " \n", 153 | " #attaching the resnet modules\n", 154 | " x = self.layer1(x)\n", 155 | " x = x.view(x.size(0), -1)\n", 156 | " x = self.fc(x)\n", 157 | " \n", 158 | " return x" 159 | ] 160 | }, 161 | { 162 | "cell_type": "markdown", 163 | "metadata": {}, 164 | "source": [ 165 | "### Printing Model" 166 | ] 167 | }, 168 | { 169 | "cell_type": "code", 170 | "execution_count": null, 171 | "metadata": {}, 172 | "outputs": [], 173 | "source": [ 174 | "model = ResNet(BasicBlock, [2]).to(device)\n", 175 | "print(model)" 176 | ] 177 | }, 178 | { 179 | "cell_type": "code", 180 | "execution_count": null, 181 | "metadata": {}, 182 | "outputs": [], 183 | "source": [ 184 | "# Defining the loss function\n", 185 | "criterion = nn.CrossEntropyLoss().cuda()\n", 186 | "\n", 187 | "#Using adam optimizer\n", 188 | "optimizer = optim.Adam(model.parameters(), lr = 0.0001)" 189 | ] 190 | }, 191 | { 192 | "cell_type": "markdown", 193 | "metadata": {}, 194 | "source": [ 195 | "### Training" 196 | ] 197 | }, 198 | { 199 | "cell_type": "code", 200 | "execution_count": null, 201 | "metadata": {}, 202 | "outputs": [], 203 | "source": [ 204 | "plotIter = 2000\n", 205 | "plotIterCntr = 0\n", 206 | "numEpochs = 1\n", 207 | "\n", 208 | "trainLoss = np.zeros((plotIter*numEpochs,1))\n", 209 | "trainIter = np.arange(plotIter*numEpochs)\n", 210 | "\n", 211 | "for epoch in range(numEpochs): # loop over the dataset multiple times\n", 212 | "\n", 213 | " running_loss = 0.0\n", 214 | " for i, data in enumerate(train_loader, 0):\n", 215 | " # get the inputs\n", 216 | " inputs, labels = data\n", 217 | " \n", 218 | " inputs, labels = inputs.to(device), labels.to(device)\n", 219 | "\n", 220 | " # zero the parameter gradients\n", 221 | " optimizer.zero_grad()\n", 222 | "\n", 223 | " # forward + backward + optimize\n", 224 | " outputs = model(inputs)\n", 225 | " loss = criterion(outputs, labels)\n", 226 | " loss.backward()\n", 227 | " optimizer.step()\n", 228 | "\n", 229 | " # print statistics\n", 230 | " running_loss += loss.item()\n", 231 | " if i % plotIter == plotIter-1: # print every plotIter mini-batches\n", 232 | " trainLoss[plotIterCntr] = running_loss / plotIter\n", 233 | " plotIterCntr+=1\n", 234 | " \n", 235 | " print('[%d, %5d] loss: %.3f' %\n", 236 | " (epoch + 1, i + 1, running_loss / plotIter))\n", 237 | " running_loss = 0.0\n", 238 | " \n", 239 | "plt.plot(np.arange(plotIterCntr)*plotIter,trainLoss[0:plotIterCntr], label=\"train\")\n", 240 | "plt.xlabel('Iteration')\n", 241 | "plt.ylabel('Loss')\n", 242 | "plt.show()\n", 243 | "\n", 244 | "print('Finished Training')" 245 | ] 246 | }, 247 | { 248 | "cell_type": "markdown", 249 | "metadata": {}, 250 | "source": [ 251 | "#### Testing" 252 | ] 253 | }, 254 | { 255 | "cell_type": "code", 256 | "execution_count": null, 257 | "metadata": {}, 258 | "outputs": [], 259 | "source": [ 260 | "## Prediction of accuracy\n", 261 | "correct = 0\n", 262 | "total = 0\n", 263 | "\n", 264 | "for data in test_loader:\n", 265 | " inputs, labels = data\n", 266 | " inputs, labels = inputs.to(device), labels.to(device)\n", 267 | " outputs = model(inputs)\n", 268 | " _, predicted = torch.max(outputs, 1)\n", 269 | " total += labels.size(0) \n", 270 | " correct += (predicted == labels).sum().item()\n", 271 | "\n", 272 | "print('Accuracy of the network on the 10000 test images: %f %%' % (\n", 273 | " 100 * correct / total))" 274 | ] 275 | }, 276 | { 277 | "cell_type": "markdown", 278 | "metadata": {}, 279 | "source": [ 280 | "### Self Exercise\n", 281 | "1. Add multiple resnet modules and verify the performance in CIFAR dataset\n", 282 | "2. Create a bottleneck based ResNet module as shown below and validate the training performance:-\n", 283 | "![resnet-bottleneck](images/resnet-bottleneck.png)" 284 | ] 285 | }, 286 | { 287 | "cell_type": "markdown", 288 | "metadata": {}, 289 | "source": [ 290 | "### References\n", 291 | "[1] He, Kaiming, et al. \"[Deep residual learning for image recognition.](https://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/He_Deep_Residual_Learning_CVPR_2016_paper.pdf)\" CVPR. 2016." 292 | ] 293 | } 294 | ], 295 | "metadata": { 296 | "kernelspec": { 297 | "display_name": "Python 3", 298 | "language": "python", 299 | "name": "python3" 300 | }, 301 | "language_info": { 302 | "codemirror_mode": { 303 | "name": "ipython", 304 | "version": 3 305 | }, 306 | "file_extension": ".py", 307 | "mimetype": "text/x-python", 308 | "name": "python", 309 | "nbconvert_exporter": "python", 310 | "pygments_lexer": "ipython3", 311 | "version": "3.6.5" 312 | } 313 | }, 314 | "nbformat": 4, 315 | "nbformat_minor": 2 316 | } 317 | -------------------------------------------------------------------------------- /1-cnns/.ipynb_checkpoints/5-ResNet-CNN-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "collapsed": true 7 | }, 8 | "source": [ 9 | "## Residual Networks\n", 10 | "In this notebook we will implement a resnet block and use it as one of the block for training a CNN on CIFAR dataset. The residual module is shown below:-\n", 11 | "\n", 12 | "![resnet](images/resnet.png)" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": null, 18 | "metadata": {}, 19 | "outputs": [], 20 | "source": [ 21 | "# Import all libraries\n", 22 | "\n", 23 | "import os\n", 24 | "\n", 25 | "import torch\n", 26 | "import torch.utils.data\n", 27 | "import torch.nn as nn\n", 28 | "import torch.optim as optim\n", 29 | "from torchvision import datasets, transforms\n", 30 | "\n", 31 | "import numpy as np\n", 32 | "import matplotlib.pyplot as plt\n", 33 | "%matplotlib notebook\n", 34 | "\n", 35 | "\n", 36 | "print(torch.__version__)\n", 37 | "\n", 38 | "device = torch.device(\"cuda\" )\n", 39 | "DATA_ROOT = '/tmp/data/lab1/'" 40 | ] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "execution_count": null, 45 | "metadata": {}, 46 | "outputs": [], 47 | "source": [ 48 | "# Data loader\n", 49 | "\n", 50 | "train_loader = torch.utils.data.DataLoader(\n", 51 | " datasets.CIFAR10(DATA_ROOT, train=True, download=False,\n", 52 | " transform=transforms.ToTensor()),\n", 53 | " batch_size=4, shuffle=True)\n", 54 | "\n", 55 | "test_loader = torch.utils.data.DataLoader(\n", 56 | " datasets.CIFAR10(DATA_ROOT, train=False, download=False,\n", 57 | " transform=transforms.ToTensor()),\n", 58 | " batch_size=4, shuffle=False)" 59 | ] 60 | }, 61 | { 62 | "cell_type": "markdown", 63 | "metadata": {}, 64 | "source": [ 65 | "#### Basic ResNet Module" 66 | ] 67 | }, 68 | { 69 | "cell_type": "code", 70 | "execution_count": null, 71 | "metadata": {}, 72 | "outputs": [], 73 | "source": [ 74 | "#Definition of basic residual block which would be replicated\n", 75 | "\n", 76 | "class BasicBlock(nn.Module):\n", 77 | " def __init__(self, inplanes, planes, stride=1):\n", 78 | " super(BasicBlock, self).__init__()\n", 79 | " self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n", 80 | " \n", 81 | " ### Batch normalization\n", 82 | " self.bn1 = nn.BatchNorm2d(planes)\n", 83 | " ###\n", 84 | " \n", 85 | " self.relu = nn.ReLU(inplace=True)\n", 86 | " self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n", 87 | " self.bn2 = nn.BatchNorm2d(planes)\n", 88 | " self.stride = stride\n", 89 | "\n", 90 | " def forward(self, x):\n", 91 | " residual = x\n", 92 | "\n", 93 | " out = self.conv1(x)\n", 94 | " out = self.bn1(out)\n", 95 | " out = self.relu(out)\n", 96 | "\n", 97 | " out = self.conv2(out)\n", 98 | " out = self.bn2(out)\n", 99 | " \n", 100 | " #adding the skip connection\n", 101 | " out += residual\n", 102 | " out = self.relu(out)\n", 103 | "\n", 104 | " return out\n" 105 | ] 106 | }, 107 | { 108 | "cell_type": "markdown", 109 | "metadata": {}, 110 | "source": [ 111 | "### Defining the CNN architecture using ResNet modules" 112 | ] 113 | }, 114 | { 115 | "cell_type": "code", 116 | "execution_count": null, 117 | "metadata": {}, 118 | "outputs": [], 119 | "source": [ 120 | "#Defining the entire CNN architecture using multiple resnet modules\n", 121 | "\n", 122 | "class ResNet(nn.Module):\n", 123 | " \n", 124 | " def __init__(self, block, layers, num_classes=10):\n", 125 | " self.input_channels = 64\n", 126 | " super(ResNet, self).__init__()\n", 127 | " \n", 128 | " #Initial non-resnet layers\n", 129 | " self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,bias=False) # input ch = 3, output ch = 64 \n", 130 | " self.bn1 = nn.BatchNorm2d(64)\n", 131 | " self.relu = nn.ReLU(inplace=True)\n", 132 | " self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n", 133 | " \n", 134 | " #Creating the resnet modules\n", 135 | " self.layer1 = self._make_layer(block, 64, layers[0])\n", 136 | " self.fc = nn.Linear(4096, num_classes)\n", 137 | " \n", 138 | " #Duplicating the resnet module\n", 139 | " def _make_layer(self, block, target_output_channels, blocks, stride=1):\n", 140 | " layers = []\n", 141 | " self.input_channels = target_output_channels\n", 142 | " for i in range(0, blocks):\n", 143 | " layers.append(block(self.input_channels, target_output_channels))\n", 144 | "\n", 145 | " return nn.Sequential(*layers)\n", 146 | " \n", 147 | " def forward(self, x):\n", 148 | " x = self.conv1(x)\n", 149 | " x = self.bn1(x)\n", 150 | " x = self.relu(x)\n", 151 | " x = self.maxpool(x)\n", 152 | " \n", 153 | " #attaching the resnet modules\n", 154 | " x = self.layer1(x)\n", 155 | " x = x.view(x.size(0), -1)\n", 156 | " x = self.fc(x)\n", 157 | " \n", 158 | " return x" 159 | ] 160 | }, 161 | { 162 | "cell_type": "markdown", 163 | "metadata": {}, 164 | "source": [ 165 | "### Printing Model" 166 | ] 167 | }, 168 | { 169 | "cell_type": "code", 170 | "execution_count": null, 171 | "metadata": {}, 172 | "outputs": [], 173 | "source": [ 174 | "model = ResNet(BasicBlock, [2]).to(device)\n", 175 | "print(model)" 176 | ] 177 | }, 178 | { 179 | "cell_type": "code", 180 | "execution_count": null, 181 | "metadata": {}, 182 | "outputs": [], 183 | "source": [ 184 | "# Defining the loss function\n", 185 | "criterion = nn.CrossEntropyLoss().cuda()\n", 186 | "\n", 187 | "#Using adam optimizer\n", 188 | "optimizer = optim.Adam(model.parameters(), lr = 0.0001)" 189 | ] 190 | }, 191 | { 192 | "cell_type": "markdown", 193 | "metadata": {}, 194 | "source": [ 195 | "### Training" 196 | ] 197 | }, 198 | { 199 | "cell_type": "code", 200 | "execution_count": null, 201 | "metadata": {}, 202 | "outputs": [], 203 | "source": [ 204 | "plotIter = 2000\n", 205 | "plotIterCntr = 0\n", 206 | "numEpochs = 1\n", 207 | "\n", 208 | "trainLoss = np.zeros((plotIter*numEpochs,1))\n", 209 | "trainIter = np.arange(plotIter*numEpochs)\n", 210 | "\n", 211 | "for epoch in range(numEpochs): # loop over the dataset multiple times\n", 212 | "\n", 213 | " running_loss = 0.0\n", 214 | " for i, data in enumerate(train_loader, 0):\n", 215 | " # get the inputs\n", 216 | " inputs, labels = data\n", 217 | " \n", 218 | " inputs, labels = inputs.to(device), labels.to(device)\n", 219 | "\n", 220 | " # zero the parameter gradients\n", 221 | " optimizer.zero_grad()\n", 222 | "\n", 223 | " # forward + backward + optimize\n", 224 | " outputs = model(inputs)\n", 225 | " loss = criterion(outputs, labels)\n", 226 | " loss.backward()\n", 227 | " optimizer.step()\n", 228 | "\n", 229 | " # print statistics\n", 230 | " running_loss += loss.item()\n", 231 | " if i % plotIter == plotIter-1: # print every plotIter mini-batches\n", 232 | " trainLoss[plotIterCntr] = running_loss / plotIter\n", 233 | " plotIterCntr+=1\n", 234 | " \n", 235 | " print('[%d, %5d] loss: %.3f' %\n", 236 | " (epoch + 1, i + 1, running_loss / plotIter))\n", 237 | " running_loss = 0.0\n", 238 | " \n", 239 | "plt.plot(np.arange(plotIterCntr)*plotIter,trainLoss[0:plotIterCntr], label=\"train\")\n", 240 | "plt.xlabel('Iteration')\n", 241 | "plt.ylabel('Loss')\n", 242 | "plt.show()\n", 243 | "\n", 244 | "print('Finished Training')" 245 | ] 246 | }, 247 | { 248 | "cell_type": "markdown", 249 | "metadata": {}, 250 | "source": [ 251 | "#### Testing" 252 | ] 253 | }, 254 | { 255 | "cell_type": "code", 256 | "execution_count": null, 257 | "metadata": {}, 258 | "outputs": [], 259 | "source": [ 260 | "## Prediction of accuracy\n", 261 | "correct = 0\n", 262 | "total = 0\n", 263 | "\n", 264 | "for data in test_loader:\n", 265 | " inputs, labels = data\n", 266 | " inputs, labels = inputs.to(device), labels.to(device)\n", 267 | " outputs = model(inputs)\n", 268 | " _, predicted = torch.max(outputs, 1)\n", 269 | " total += labels.size(0) \n", 270 | " correct += (predicted == labels).sum().item()\n", 271 | "\n", 272 | "print('Accuracy of the network on the 10000 test images: %f %%' % (\n", 273 | " 100 * correct / total))" 274 | ] 275 | }, 276 | { 277 | "cell_type": "markdown", 278 | "metadata": {}, 279 | "source": [ 280 | "### Self Exercise\n", 281 | "1. Add multiple resnet modules and verify the performance in CIFAR dataset\n", 282 | "2. Create a bottleneck based ResNet module as shown below and validate the training performance:-\n", 283 | "![resnet-bottleneck](images/resnet-bottleneck.png)" 284 | ] 285 | }, 286 | { 287 | "cell_type": "markdown", 288 | "metadata": {}, 289 | "source": [ 290 | "### References\n", 291 | "[1] He, Kaiming, et al. \"[Deep residual learning for image recognition.](https://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/He_Deep_Residual_Learning_CVPR_2016_paper.pdf)\" CVPR. 2016." 292 | ] 293 | } 294 | ], 295 | "metadata": { 296 | "kernelspec": { 297 | "display_name": "Python 3", 298 | "language": "python", 299 | "name": "python3" 300 | }, 301 | "language_info": { 302 | "codemirror_mode": { 303 | "name": "ipython", 304 | "version": 3 305 | }, 306 | "file_extension": ".py", 307 | "mimetype": "text/x-python", 308 | "name": "python", 309 | "nbconvert_exporter": "python", 310 | "pygments_lexer": "ipython3", 311 | "version": "3.6.5" 312 | } 313 | }, 314 | "nbformat": 4, 315 | "nbformat_minor": 2 316 | } 317 | -------------------------------------------------------------------------------- /1-cnns/.ipynb_checkpoints/2-CNNs-Basics-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## Training CNN on CIFAR10 Dataset [1]\n", 8 | "\n", 9 | "We begin with building a CNN architecture for image classification task on CIFAR10 dataset. \n", 10 | "\n", 11 | "In this first part of the tutorial, we will understand how to arrange the different architectural components of CNN network, defining the appropriate loss, training the network using backpropagation and finally testing it on the test data.To make data loading simple, we would use the torchvision package created as part of PyTorch which has data loaders for standard datasets such as ImageNet, CIFAR10, MNIST.\n", 12 | "![CIFAR10](images/cifar10.png)" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": null, 18 | "metadata": { 19 | "collapsed": true 20 | }, 21 | "outputs": [], 22 | "source": [ 23 | "import torch\n", 24 | "import torch.nn as nn\n", 25 | "import torch.nn.functional as F\n", 26 | "import torch.utils.data as Data\n", 27 | "import torchvision\n", 28 | "import torchvision.transforms as transforms\n", 29 | "\n", 30 | "#To schedule the learning rate\n", 31 | "from torch.optim import lr_scheduler\n", 32 | "\n", 33 | "import matplotlib.pyplot as plt\n", 34 | "import numpy as np\n", 35 | "%matplotlib inline\n", 36 | "device = torch.device(\"cuda\")\n", 37 | "print(\"torch version: %s\"%torch.__version__)" 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": null, 43 | "metadata": { 44 | "collapsed": true 45 | }, 46 | "outputs": [], 47 | "source": [ 48 | "DATA_ROOT = '/tmp/data/lab1'\n", 49 | "\n", 50 | "# Hyper Parameters\n", 51 | "num_epoch = 10 # train the training data n times, to save time, we just train 1 epoch\n", 52 | "DOWNLOAD_CIFAR10 = True # set to False if you have downloaded" 53 | ] 54 | }, 55 | { 56 | "cell_type": "markdown", 57 | "metadata": {}, 58 | "source": [ 59 | "#### Dataloader\n", 60 | "\n", 61 | "For efficient training, we need to make sure that our code is efficient enough to send data from RAM to GPU and vice-versa. For some standard datasets like MNIST, CIFAR-10 etc., we already have some well structured dataloaders. In this tutorial, we will be using the CIFAR-10 dataloader.\n", 62 | "\n", 63 | "For more you can visit the following links:\n", 64 | "\n", 65 | "- Existing dataloaders - _\n", 66 | "- How to write dataloaders for your custom dataset - _" 67 | ] 68 | }, 69 | { 70 | "cell_type": "code", 71 | "execution_count": null, 72 | "metadata": { 73 | "collapsed": true 74 | }, 75 | "outputs": [], 76 | "source": [ 77 | "# We can perform different data augmentation\n", 78 | "# techniques to increase the size of the dataset and make your model more robust\n", 79 | "transform = transforms.Compose(\n", 80 | " [transforms.ToTensor(),\n", 81 | " transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n", 82 | "\n", 83 | "# NOTE: PLEASE DON'T CHANGE batch_size and num_workers here. We have limited resources.\n", 84 | "trainset = torchvision.datasets.CIFAR10(root=DATA_ROOT, train=True,\n", 85 | " download=True, transform=transform)\n", 86 | "trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,\n", 87 | " shuffle=True, num_workers=2)\n", 88 | "\n", 89 | "testset = torchvision.datasets.CIFAR10(root=DATA_ROOT, train=False,\n", 90 | " download=True, transform=transform)\n", 91 | "testloader = torch.utils.data.DataLoader(testset, batch_size=4,\n", 92 | " shuffle=False, num_workers=2)\n", 93 | "\n", 94 | "# classes in the CIFAR-10 dataset\n", 95 | "classes = ('plane', 'car', 'bird', 'cat',\n", 96 | " 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')" 97 | ] 98 | }, 99 | { 100 | "cell_type": "markdown", 101 | "metadata": {}, 102 | "source": [ 103 | "Let us show some of the training images, for fun." 104 | ] 105 | }, 106 | { 107 | "cell_type": "code", 108 | "execution_count": null, 109 | "metadata": { 110 | "collapsed": true 111 | }, 112 | "outputs": [], 113 | "source": [ 114 | "def imshow(img):\n", 115 | " img = img / 2 + 0.5 # unnormalize\n", 116 | " npimg = img.numpy()\n", 117 | " plt.imshow(np.transpose(npimg, (1, 2, 0)))\n", 118 | "\n", 119 | "\n", 120 | "# get some random training images\n", 121 | "dataiter = iter(trainloader)\n", 122 | "images, labels = dataiter.next()\n", 123 | "\n", 124 | "# show images\n", 125 | "imshow(torchvision.utils.make_grid(images))\n", 126 | "# print labels\n", 127 | "print(' '.join('%10s' % classes[labels[j]] for j in range(4)))" 128 | ] 129 | }, 130 | { 131 | "cell_type": "markdown", 132 | "metadata": {}, 133 | "source": [ 134 | "### Convolutional net architecture\n", 135 | "![image](images/tLKYz.png)" 136 | ] 137 | }, 138 | { 139 | "cell_type": "markdown", 140 | "metadata": {}, 141 | "source": [ 142 | "### Define a Convolutional Neural Network" 143 | ] 144 | }, 145 | { 146 | "cell_type": "code", 147 | "execution_count": null, 148 | "metadata": { 149 | "collapsed": true 150 | }, 151 | "outputs": [], 152 | "source": [ 153 | "class Net(nn.Module):\n", 154 | " def __init__(self):\n", 155 | " super(Net, self).__init__()\n", 156 | " self.conv1 = nn.Conv2d(3, 6, 5)\n", 157 | " self.pool = nn.MaxPool2d(2, 2)\n", 158 | " self.conv2 = nn.Conv2d(6, 16, 5)\n", 159 | " self.fc1 = nn.Linear(16 * 5 * 5, 120)\n", 160 | " self.fc2 = nn.Linear(120, 84)\n", 161 | " self.fc3 = nn.Linear(84, 10)\n", 162 | "\n", 163 | " def forward(self, x):\n", 164 | " x = self.pool(F.relu(self.conv1(x)))\n", 165 | " x = self.pool(F.relu(self.conv2(x)))\n", 166 | " x = x.view(-1, 16 * 5 * 5)\n", 167 | " x = F.relu(self.fc1(x))\n", 168 | " x = F.relu(self.fc2(x))\n", 169 | " x = self.fc3(x)\n", 170 | " return x\n", 171 | "\n", 172 | "\n", 173 | "net = Net().to(device)" 174 | ] 175 | }, 176 | { 177 | "cell_type": "markdown", 178 | "metadata": {}, 179 | "source": [ 180 | "### Define a loss function and an optimizer" 181 | ] 182 | }, 183 | { 184 | "cell_type": "code", 185 | "execution_count": null, 186 | "metadata": { 187 | "collapsed": true 188 | }, 189 | "outputs": [], 190 | "source": [ 191 | "criterion = nn.CrossEntropyLoss()\n", 192 | "optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9)\n", 193 | "\n", 194 | "# Decay LR by a factor of 0.1 on every epoch\n", 195 | "exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.1)" 196 | ] 197 | }, 198 | { 199 | "cell_type": "markdown", 200 | "metadata": {}, 201 | "source": [ 202 | "### Train the network\n", 203 | "\n", 204 | "Now, we will be training the network defined above on CIFAR-10 dataset.\n", 205 | "\n", 206 | "We will train the network for num_epoch times (defined above)\n", 207 | "\n", 208 | "- We fetch a batch of images and labels from dataloader\n", 209 | "- We feed it to CNN network for forward pass\n", 210 | "- Based on the output of forward pass, we calculate loss/error\n", 211 | "- Then we calculate gradients of loss w.r.t. the parameters of the network\n", 212 | "- Finally, we update the parameters based on the gradients using Gradient Descent algorithm" 213 | ] 214 | }, 215 | { 216 | "cell_type": "code", 217 | "execution_count": null, 218 | "metadata": { 219 | "collapsed": true 220 | }, 221 | "outputs": [], 222 | "source": [ 223 | "for epoch in range(num_epoch): # loop over the dataset multiple times\n", 224 | "\n", 225 | " running_loss = 0.0\n", 226 | " exp_lr_scheduler.step()\n", 227 | " for i, data in enumerate(trainloader, 0):\n", 228 | " # get the inputs\n", 229 | " inputs, labels = data\n", 230 | " inputs, labels = inputs.to(device), labels.to(device)\n", 231 | "\n", 232 | " # zero the parameter gradients\n", 233 | " optimizer.zero_grad()\n", 234 | "\n", 235 | " # forward + backward + optimize\n", 236 | " outputs = net(inputs)\n", 237 | " loss = criterion(outputs, labels)\n", 238 | " loss.backward()\n", 239 | " optimizer.step()\n", 240 | "\n", 241 | " # print statistics\n", 242 | " running_loss += loss.item()\n", 243 | " if i % 2000 == 1999: # print every 2000 mini-batches\n", 244 | " print('[%d, %5d] loss: %.3f' %\n", 245 | " (epoch + 1, i + 1, running_loss / 2000))\n", 246 | " running_loss = 0.0\n", 247 | "\n", 248 | "print('Finished Training')" 249 | ] 250 | }, 251 | { 252 | "cell_type": "markdown", 253 | "metadata": {}, 254 | "source": [ 255 | "### Test the network on the test datset\n", 256 | "\n", 257 | "We will check this by predicting the class label that the neural network outputs, and checking it against the ground-truth. If the prediction is correct, we add the sample to the list of correct predictions." 258 | ] 259 | }, 260 | { 261 | "cell_type": "code", 262 | "execution_count": null, 263 | "metadata": { 264 | "collapsed": true 265 | }, 266 | "outputs": [], 267 | "source": [ 268 | "# Quantitative Analysis\n", 269 | "correct = 0\n", 270 | "total = 0\n", 271 | "with torch.no_grad():\n", 272 | " for data in testloader:\n", 273 | " images, labels = data\n", 274 | " images, labels = images.to(device), labels.to(device)\n", 275 | " outputs = net(images)\n", 276 | " _, predicted = torch.max(outputs.data, 1)\n", 277 | " total += labels.size(0)\n", 278 | " correct += (predicted == labels).sum().item()\n", 279 | "\n", 280 | "print('Accuracy of the network on the 10000 test images: %d %%' % (\n", 281 | " 100 * correct / total))" 282 | ] 283 | }, 284 | { 285 | "cell_type": "code", 286 | "execution_count": null, 287 | "metadata": { 288 | "collapsed": true 289 | }, 290 | "outputs": [], 291 | "source": [ 292 | "# Qualitative Analysis\n", 293 | "dataiter = iter(testloader)\n", 294 | "images, labels = dataiter.next()\n", 295 | "images, labels = images.to(device), labels.to(device)\n", 296 | "\n", 297 | "# print images\n", 298 | "imshow(torchvision.utils.make_grid(images.cpu()))\n", 299 | "print('GroundTruth: ', ' '.join('%4s' % classes[labels[j]] for j in range(4)))\n", 300 | "\n", 301 | "outputs = net(images)\n", 302 | "_, predicted = torch.max(outputs, 1)\n", 303 | "\n", 304 | "print('Predicted: ', ' '.join('%5s' % classes[predicted[j]]\n", 305 | " for j in range(4)))" 306 | ] 307 | } 308 | ], 309 | "metadata": { 310 | "kernelspec": { 311 | "display_name": "Python 3", 312 | "language": "python", 313 | "name": "python3" 314 | }, 315 | "language_info": { 316 | "codemirror_mode": { 317 | "name": "ipython", 318 | "version": 3 319 | }, 320 | "file_extension": ".py", 321 | "mimetype": "text/x-python", 322 | "name": "python", 323 | "nbconvert_exporter": "python", 324 | "pygments_lexer": "ipython3", 325 | "version": "3.6.5" 326 | } 327 | }, 328 | "nbformat": 4, 329 | "nbformat_minor": 2 330 | } 331 | -------------------------------------------------------------------------------- /1-cnns/images/qwerty.txt: -------------------------------------------------------------------------------- 1 | 2 | # coding: utf-8 3 | 4 | # ## CNN architecture for Digit Recognition 5 | # ![digit](images/digit-recognition.png) 6 | 7 | # In[1]: 8 | 9 | 10 | # Import all libraries 11 | import os 12 | import numpy as np 13 | from skimage import io, transform 14 | import torch 15 | import torch.utils.data 16 | import torch.nn as nn 17 | import torch.optim as optim 18 | import torch.nn.functional as F 19 | from torch.autograd import Variable 20 | from torchvision import datasets, transforms 21 | import random 22 | import matplotlib.pyplot as plt 23 | get_ipython().magic(u'matplotlib inline') 24 | # Ignore warnings 25 | import warnings 26 | warnings.filterwarnings("ignore") 27 | 28 | plt.ion() # interactive mode 29 | 30 | 31 | # In[2]: 32 | 33 | 34 | def show_samples(image, digit): 35 | plt.imshow(image, cmap = 'gray') 36 | plt.pause(0.001) # pause a bit so that plots are updated 37 | 38 | 39 | # In[4]: 40 | 41 | 42 | class Rescale(object): 43 | """Rescale the image in a sample to a given size. 44 | 45 | Args: 46 | output_size (tuple or tuple): Desired output size. If tuple, output is 47 | matched to output_size. If int, smaller of image edges is matched 48 | to output_size keeping aspect ratio the same. 49 | """ 50 | 51 | def __init__(self, output_size): 52 | assert isinstance(output_size, (int, tuple)) 53 | self.output_size = output_size 54 | 55 | def __call__(self, sample): 56 | image, label = sample['image'], sample['digit'] 57 | h, w = image.shape 58 | if isinstance(self.output_size, int): 59 | if h > w: 60 | new_h, new_w = self.output_size * h / w, self.output_size 61 | else: 62 | new_h, new_w = self.output_size, self.output_size * w / h 63 | else: 64 | new_h, new_w = self.output_size 65 | 66 | new_h, new_w = int(new_h), int(new_w) 67 | 68 | img = transform.resize(image, (new_h, new_w)) 69 | 70 | return {'image': img, 'digit': label} 71 | 72 | class Resize(object): 73 | """Rescale the image in a sample to a given size. 74 | 75 | Args: 76 | output_size (tuple or tuple): Desired output size. If tuple, output is 77 | matched to output_size. If int, smaller of image edges is matched 78 | to output_size keeping aspect ratio the same. 79 | """ 80 | 81 | def __init__(self, output_size): 82 | assert isinstance(output_size, (int, tuple)) 83 | self.output_size = output_size 84 | 85 | def __call__(self, sample): 86 | image, label = sample['image'], sample['digit'] 87 | h, w = image.shape 88 | new_h, new_w = self.output_size 89 | 90 | new_h, new_w = int(new_h), int(new_w) 91 | 92 | img = transform.resize(image, (new_h, new_w)) 93 | return {'image': img, 'digit': label} 94 | 95 | class RandomCrop(object): 96 | """Crop randomly the image in a sample. 97 | 98 | Args: 99 | output_size (tuple or int): Desired output size. If int, square crop 100 | is made. 101 | """ 102 | 103 | def __init__(self, output_size): 104 | assert isinstance(output_size, (int, tuple)) 105 | if isinstance(output_size, int): 106 | self.output_size = (output_size, output_size) 107 | else: 108 | assert len(output_size) == 2 109 | self.output_size = output_size 110 | 111 | def __call__(self, sample): 112 | image, label = sample['image'], sample['digit'] 113 | h, w = image.shape # changed from [:2] to nothing as the images are gray scale 114 | new_h, new_w = self.output_size 115 | 116 | top = np.random.randint(0, h - new_h) 117 | left = np.random.randint(0, w - new_w) 118 | 119 | image = image[top: top + new_h, 120 | left: left + new_w] 121 | 122 | 123 | 124 | return {'image': image, 'digit': label} 125 | 126 | 127 | class ToTensor(object): 128 | """Convert ndarrays in sample to Tensors.""" 129 | 130 | def __call__(self, sample): 131 | image, label = sample['image'], sample['digit'] 132 | # swap color axis because 133 | # numpy image: H x W x C 134 | # torch image: C X H X W 135 | image = image[np.newaxis,:,:] 136 | return {'image': torch.from_numpy(image).float(), 'digit': (torch.from_numpy(np.array([label])))} 137 | 138 | 139 | # In[6]: 140 | 141 | 142 | class ThreeGramDigitDataset(torch.utils.data.Dataset): 143 | 144 | def __init__(self, gt_file, dataset_root, shuffle = False, transform = None): 145 | 146 | self.dataset_root = dataset_root 147 | self.all_file_names = [x.split(' ')[0] for x in open(gt_file).readlines()] 148 | self.labels = [int(x.split(' ')[1]) for x in open(gt_file).readlines()] 149 | self.shuffle = shuffle 150 | 151 | if self.shuffle: 152 | temp = list(zip(self.all_file_names, self.labels)) 153 | random.shuffle(temp) 154 | self.all_file_names, self.labels = zip(*temp) 155 | 156 | self.transform = transform 157 | 158 | 159 | def __len__(self): 160 | return len(self.all_file_names) 161 | 162 | 163 | def __getitem__(self, idx): 164 | 165 | img_name = os.path.join(self.dataset_root, self.all_file_names[idx]) 166 | image = io.imread(img_name) 167 | sample = {'image': image, 'digit': self.labels[idx]} 168 | 169 | if self.transform: 170 | sample = self.transform(sample) 171 | 172 | return sample 173 | 174 | 175 | 176 | # In[8]: 177 | 178 | 179 | train_dataset = ThreeGramDigitDataset('3Gram_ann_train.txt', '../../data/lab1/imgFolders/3Gram_Digits', shuffle = False) 180 | #printing the no. of training samples 181 | len(train_dataset) 182 | 183 | 184 | # In[9]: 185 | 186 | 187 | fig = plt.figure() 188 | 189 | for i in range(len(train_dataset)): 190 | sample = train_dataset[i] 191 | 192 | print(i, sample['image'].shape) 193 | 194 | ax = plt.subplot(1, 4, i + 1) 195 | plt.tight_layout() 196 | ax.set_title('Sample #{}'.format(str(sample['digit']))) 197 | ax.axis('off') 198 | show_samples(**sample) # way of passing a dictionary 199 | if i == 3: 200 | plt.show() 201 | break 202 | 203 | 204 | # In[10]: 205 | 206 | 207 | scale = Rescale(256) 208 | crop = RandomCrop(64) 209 | composed = transforms.Compose([Rescale(99),Resize((64,128))]) 210 | 211 | # Apply each of the above transforms on sample. 212 | fig = plt.figure() 213 | sample = train_dataset[1] 214 | 215 | for i, tsfrm in enumerate([scale, composed]): 216 | print(tsfrm) 217 | print(i) 218 | transformed_sample = tsfrm(sample) 219 | print(transformed_sample['image'].shape) 220 | ax = plt.subplot(1, 3, i + 1) 221 | plt.tight_layout() 222 | ax.set_title(type(tsfrm).__name__) 223 | show_samples(**transformed_sample) 224 | 225 | plt.show() 226 | 227 | 228 | # In[11]: 229 | 230 | 231 | train_dataset = ThreeGramDigitDataset('3Gram_ann_train.txt', '../../data/lab1/imgFolders/3Gram_Digits', 232 | shuffle = True, transform=transforms.Compose([ 233 | Resize((64,128)), 234 | ToTensor() 235 | ])) 236 | test_dataset = ThreeGramDigitDataset('3Gram_ann_test.txt', '../../data/lab1/imgFolders/3Gram_Digits', 237 | shuffle = True, transform=transforms.Compose([ 238 | Resize((64,128)), 239 | ToTensor() 240 | ])) 241 | 242 | 243 | # In[12]: 244 | 245 | 246 | # Batch the dataloader 247 | train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=4, 248 | shuffle=True, num_workers=1) 249 | test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=4, 250 | shuffle=False, num_workers=1) 251 | 252 | 253 | # In[14]: 254 | 255 | 256 | # Model definition 257 | import pdb 258 | class Net(nn.Module): 259 | def __init__(self): 260 | super(Net, self).__init__() 261 | self.conv1 = nn.Conv2d(1, 10, kernel_size=5) 262 | self.conv2 = nn.Conv2d(10, 20, kernel_size=5) 263 | self.fc1 = nn.Linear(7540, 50) 264 | self.fc2_1 = nn.Linear(50, 10) 265 | self.fc2_2 = nn.Linear(50, 10) 266 | self.fc2_3 = nn.Linear(50, 10) 267 | 268 | def forward(self, x): 269 | x = F.relu(F.max_pool2d(self.conv1(x), 2)) 270 | x = F.relu(F.max_pool2d(self.conv2(x), 2)) 271 | 272 | x = x.view(-1, 7540) 273 | x = F.relu(self.fc1(x)) 274 | x1 = self.fc2_1(x) 275 | x2 = self.fc2_2(x) 276 | x3 = self.fc2_3(x) 277 | return x1, x2, x3 278 | 279 | model = Net().cuda() 280 | criterion = nn.CrossEntropyLoss() 281 | optimizer = optim.Adam(model.parameters(), lr=0.001) 282 | 283 | 284 | # In[15]: 285 | 286 | 287 | #Training loop 288 | 289 | plotIter = 2000 290 | plotIterCntr = 0 291 | numEpochs = 10 292 | trainLoss = np.zeros((plotIter*numEpochs,1)) 293 | trainIter = np.arange(plotIter*numEpochs) 294 | 295 | for epoch in range(numEpochs): # loop over the dataset multiple times 296 | 297 | running_loss = 0.0 298 | for i, data in enumerate(train_dataloader): 299 | 300 | # get the inputs 301 | inputs, labels = data['image'], data['digit'] 302 | 303 | #get each label separately 304 | labels_1 = torch.div(labels, 100) 305 | labels_2 = torch.div(torch.fmod(labels, 100), 10) 306 | labels_3 = torch.fmod(torch.fmod(labels, 100), 10) 307 | 308 | # wrap them in Variable 309 | inputs, labels_1, labels_2, labels_3 = Variable(inputs.cuda()),Variable(labels_1.cuda()), Variable(labels_2.cuda()), Variable(labels_3.cuda()) 310 | 311 | # zero the parameter gradients 312 | optimizer.zero_grad() 313 | 314 | # forward + backward + optimize 315 | outputs_1, outputs_2, outputs_3 = model(inputs) 316 | loss = criterion(outputs_1, labels_1[:,0])+criterion(outputs_2, labels_2[:,0])+criterion(outputs_3, labels_3[:,0]) 317 | loss.backward() 318 | optimizer.step() 319 | 320 | # print statistics 321 | running_loss += loss.data[0] 322 | 323 | if i % plotIter == plotIter-1: # print every plotIter mini-batches 324 | trainLoss[plotIterCntr] = running_loss / plotIter 325 | plotIterCntr+=1 326 | 327 | print('[%d, %5d] loss: %.3f' % 328 | (epoch + 1, i + 1, running_loss / plotIter)) 329 | running_loss = 0.0 330 | 331 | plt.plot(np.arange(plotIterCntr)*plotIter,trainLoss[0:plotIterCntr], label="train") 332 | plt.xlabel('Iteration') 333 | plt.ylabel('Loss') 334 | plt.show() 335 | 336 | print('Finished Training') 337 | 338 | 339 | # In[17]: 340 | 341 | 342 | ## Prediction of accuracy 343 | 344 | import pdb 345 | correct = 0 346 | total = 0 347 | 348 | #Iterating over the batches returned from testloader 349 | for data in test_dataloader: 350 | images, labels = data['image'], data['digit'] 351 | 352 | outputs_1, outputs_2, outputs_3 = model(Variable(images.cuda())) 353 | 354 | _, predicted_1 = torch.max(outputs_1.data, 1) 355 | _, predicted_2 = torch.max(outputs_2.data, 1) 356 | _, predicted_3 = torch.max(outputs_3.data, 1) 357 | 358 | predicted = predicted_1.mul(100) + predicted_2*10 + predicted_3 359 | pdb.set_trace() 360 | total += labels.size(0) 361 | labels = labels.cuda() 362 | correct += (predicted == labels).sum() 363 | 364 | print('Accuracy of the network on the 1350 test images: %d %%' % ( 365 | 100 * correct / total)) 366 | 367 | 368 | # In[ ]: 369 | 370 | 371 | 372 | -------------------------------------------------------------------------------- /1-cnns/4-Optimizer.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## Training CNN on CIFAR10 Dataset [1]\n", 8 | "\n", 9 | "We begin with building a CNN architecture for image classification task on CIFAR10 dataset. \n", 10 | "\n", 11 | "In this first part of the tutorial, we will understand how to arrange the different architectural components of CNN network, defining the appropriate loss, training the network using backpropagation and finally testing it on the test data.To make data loading simple, we would use the torchvision package created as part of PyTorch which has data loaders for standard datasets such as ImageNet, CIFAR10, MNIST.\n", 12 | "![CIFAR10](images/cifar10.png)" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": null, 18 | "metadata": {}, 19 | "outputs": [], 20 | "source": [ 21 | "import torch\n", 22 | "import torch.nn as nn\n", 23 | "import torch.nn.functional as F\n", 24 | "import torch.utils.data as Data\n", 25 | "import torchvision\n", 26 | "import torchvision.transforms as transforms\n", 27 | "import torch.optim as optim\n", 28 | "\n", 29 | "#Weight Initialization\n", 30 | "import torch.nn.init as weight_init\n", 31 | "\n", 32 | "import matplotlib.pyplot as plt\n", 33 | "import numpy as np\n", 34 | "%matplotlib inline\n", 35 | "device = torch.device(\"cuda\" )\n", 36 | "print(\"torch version: %s\"%torch.__version__)" 37 | ] 38 | }, 39 | { 40 | "cell_type": "code", 41 | "execution_count": null, 42 | "metadata": {}, 43 | "outputs": [], 44 | "source": [ 45 | "DATA_ROOT = '/tmp/data/lab1'\n", 46 | "\n", 47 | "# Hyper Parameters\n", 48 | "num_epoch = 10 # train the training data n times, to save time, we just train 1 epoch\n", 49 | "DOWNLOAD_CIFAR10 = True # set to False if you have downloaded" 50 | ] 51 | }, 52 | { 53 | "cell_type": "markdown", 54 | "metadata": {}, 55 | "source": [ 56 | "#### Dataloader\n", 57 | "\n", 58 | "For efficient training, we need to make sure that our code is efficient enough to send data from RAM to GPU and vice-versa. For some standard datasets like MNIST, CIFAR-10 etc., we already have some well structured dataloaders. In this tutorial, we will be using the CIFAR-10 dataloader.\n", 59 | "\n", 60 | "For more you can visit the following links:\n", 61 | "\n", 62 | "- Existing dataloaders - _\n", 63 | "- How to write dataloaders for your custom dataset - _" 64 | ] 65 | }, 66 | { 67 | "cell_type": "code", 68 | "execution_count": null, 69 | "metadata": {}, 70 | "outputs": [], 71 | "source": [ 72 | "# We can perform different data augmentation\n", 73 | "# techniques to increase the size of the dataset and make your model more robust\n", 74 | "transform = transforms.Compose(\n", 75 | " [transforms.ToTensor(),\n", 76 | " transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n", 77 | "\n", 78 | "# NOTE: PLEASE DON'T CHANGE batch_size and num_workers here. We have limited resources.\n", 79 | "trainset = torchvision.datasets.CIFAR10(root=DATA_ROOT, train=True,\n", 80 | " download=True, transform=transform)\n", 81 | "trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,\n", 82 | " shuffle=True, num_workers=2)\n", 83 | "\n", 84 | "testset = torchvision.datasets.CIFAR10(root=DATA_ROOT, train=False,\n", 85 | " download=True, transform=transform)\n", 86 | "testloader = torch.utils.data.DataLoader(testset, batch_size=4,\n", 87 | " shuffle=False, num_workers=2)\n", 88 | "\n", 89 | "# classes in the CIFAR-10 dataset\n", 90 | "classes = ('plane', 'car', 'bird', 'cat',\n", 91 | " 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')" 92 | ] 93 | }, 94 | { 95 | "cell_type": "markdown", 96 | "metadata": {}, 97 | "source": [ 98 | "Let us show some of the training images, for fun." 99 | ] 100 | }, 101 | { 102 | "cell_type": "code", 103 | "execution_count": null, 104 | "metadata": {}, 105 | "outputs": [], 106 | "source": [ 107 | "def imshow(img):\n", 108 | " img = img / 2 + 0.5 # unnormalize\n", 109 | " npimg = img.numpy()\n", 110 | " plt.imshow(np.transpose(npimg, (1, 2, 0)))\n", 111 | "\n", 112 | "\n", 113 | "# get some random training images\n", 114 | "dataiter = iter(trainloader)\n", 115 | "images, labels = dataiter.next()\n", 116 | "\n", 117 | "# show images\n", 118 | "imshow(torchvision.utils.make_grid(images))\n", 119 | "# print labels\n", 120 | "print(' '.join('%10s' % classes[labels[j]] for j in range(4)))" 121 | ] 122 | }, 123 | { 124 | "cell_type": "markdown", 125 | "metadata": {}, 126 | "source": [ 127 | "### Define a Convolutional Neural Network" 128 | ] 129 | }, 130 | { 131 | "cell_type": "code", 132 | "execution_count": null, 133 | "metadata": {}, 134 | "outputs": [], 135 | "source": [ 136 | "class Net(nn.Module):\n", 137 | " def __init__(self):\n", 138 | " super(Net, self).__init__()\n", 139 | " self.conv1 = nn.Conv2d(3, 6, 5)\n", 140 | " self.pool = nn.MaxPool2d(2, 2)\n", 141 | " self.conv2 = nn.Conv2d(6, 16, 5)\n", 142 | " self.fc1 = nn.Linear(16 * 5 * 5, 120)\n", 143 | " self.fc2 = nn.Linear(120, 84)\n", 144 | " self.fc3 = nn.Linear(84, 10)\n", 145 | "\n", 146 | " def forward(self, x):\n", 147 | " x = self.pool(F.relu(self.conv1(x)))\n", 148 | " x = self.pool(F.relu(self.conv2(x)))\n", 149 | " x = x.view(-1, 16 * 5 * 5)\n", 150 | " x = F.relu(self.fc1(x))\n", 151 | " x = F.relu(self.fc2(x))\n", 152 | " x = self.fc3(x)\n", 153 | " return x\n", 154 | "\n", 155 | "\n", 156 | "net = Net().to(device)" 157 | ] 158 | }, 159 | { 160 | "cell_type": "markdown", 161 | "metadata": {}, 162 | "source": [ 163 | "### Define a loss function and an optimizer" 164 | ] 165 | }, 166 | { 167 | "cell_type": "code", 168 | "execution_count": null, 169 | "metadata": {}, 170 | "outputs": [], 171 | "source": [ 172 | "# optimization scheme can be 'sgd', 'RMSProp', 'Adam', 'Adadelta', 'Adagrad'\n", 173 | "optimization_scheme = \"Adagrad\"\n", 174 | "learning_rate = 0.001\n", 175 | "criterion = nn.CrossEntropyLoss()\n", 176 | "\n", 177 | "if optimization_scheme == 'sgd':\n", 178 | " optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9)\n", 179 | "elif optimization_scheme == 'RMSProp':\n", 180 | " optimizer = optim.RMSprop(net.parameters(), lr=learning_rate, weight_decay=0)\n", 181 | "elif optimization_scheme == \"Adadelta\":\n", 182 | " optimizer = optim.Adadelta(net.parameters(), lr=learning_rate, weight_decay=0)\n", 183 | "elif optimization_scheme == \"Adam\":\n", 184 | " optimizer = optim.Adam(net.parameters(), lr=learning_rate, weight_decay=0)\n", 185 | "elif optimization_scheme == \"Adagrad\":\n", 186 | " optimizer = optim.Adagrad(net.parameters(), lr=learning_rate, weight_decay=0)\n", 187 | " " 188 | ] 189 | }, 190 | { 191 | "cell_type": "markdown", 192 | "metadata": {}, 193 | "source": [ 194 | "### Train the network\n", 195 | "\n", 196 | "Now, we will be training the network defined above on CIFAR-10 dataset.\n", 197 | "\n", 198 | "We will train the network for num_epoch times (defined above)\n", 199 | "\n", 200 | "- We fetch a batch of images and labels from dataloader\n", 201 | "- We feed it to CNN network for forward pass\n", 202 | "- Based on the output of forward pass, we calculate loss/error\n", 203 | "- Then we calculate gradients of loss w.r.t. the parameters of the network\n", 204 | "- Finally, we update the parameters based on the gradients using Gradient Descent algorithm" 205 | ] 206 | }, 207 | { 208 | "cell_type": "code", 209 | "execution_count": null, 210 | "metadata": {}, 211 | "outputs": [], 212 | "source": [ 213 | "for epoch in range(num_epoch): # loop over the dataset multiple times\n", 214 | "\n", 215 | " running_loss = 0.0\n", 216 | " for i, data in enumerate(trainloader, 0):\n", 217 | " # get the inputs\n", 218 | " inputs, labels = data\n", 219 | " inputs, labels = inputs.to(device), labels.to(device)\n", 220 | "\n", 221 | " # zero the parameter gradients\n", 222 | " optimizer.zero_grad()\n", 223 | "\n", 224 | " # forward + backward + optimize\n", 225 | " outputs = net(inputs)\n", 226 | " loss = criterion(outputs, labels)\n", 227 | " loss.backward()\n", 228 | " optimizer.step()\n", 229 | "\n", 230 | " # print statistics\n", 231 | " running_loss += loss.item()\n", 232 | " if i % 2000 == 1999: # print every 2000 mini-batches\n", 233 | " print('[%d, %5d] loss: %.3f' %\n", 234 | " (epoch + 1, i + 1, running_loss / 2000))\n", 235 | " running_loss = 0.0\n", 236 | "\n", 237 | "print('Finished Training')" 238 | ] 239 | }, 240 | { 241 | "cell_type": "markdown", 242 | "metadata": {}, 243 | "source": [ 244 | "### Test the network on the test datset\n", 245 | "\n", 246 | "We will check this by predicting the class label that the neural network outputs, and checking it against the ground-truth. If the prediction is correct, we add the sample to the list of correct predictions." 247 | ] 248 | }, 249 | { 250 | "cell_type": "code", 251 | "execution_count": null, 252 | "metadata": {}, 253 | "outputs": [], 254 | "source": [ 255 | "# Quantitative Analysis\n", 256 | "correct = 0\n", 257 | "total = 0\n", 258 | "with torch.no_grad():\n", 259 | " for data in testloader:\n", 260 | " images, labels = data\n", 261 | " images, labels = images.to(device), labels.to(device)\n", 262 | " outputs = net(images)\n", 263 | " _, predicted = torch.max(outputs.data, 1)\n", 264 | " total += labels.size(0)\n", 265 | " correct += (predicted == labels).sum().item()\n", 266 | "\n", 267 | "print('Accuracy of the network on the 10000 test images: %d %%' % (\n", 268 | " 100 * correct / total))" 269 | ] 270 | }, 271 | { 272 | "cell_type": "code", 273 | "execution_count": null, 274 | "metadata": {}, 275 | "outputs": [], 276 | "source": [ 277 | "# Qualitative Analysis\n", 278 | "dataiter = iter(testloader)\n", 279 | "images, labels = dataiter.next()\n", 280 | "images, labels = images.to(device), labels.to(device)\n", 281 | "\n", 282 | "# print images\n", 283 | "imshow(torchvision.utils.make_grid(images.cpu()))\n", 284 | "print('GroundTruth: ', ' '.join('%4s' % classes[labels[j]] for j in range(4)))\n", 285 | "\n", 286 | "outputs = net(images)\n", 287 | "_, predicted = torch.max(outputs, 1)\n", 288 | "\n", 289 | "print('Predicted: ', ' '.join('%5s' % classes[predicted[j]]\n", 290 | " for j in range(4)))" 291 | ] 292 | } 293 | ], 294 | "metadata": { 295 | "kernelspec": { 296 | "display_name": "Python 3", 297 | "language": "python", 298 | "name": "python3" 299 | }, 300 | "language_info": { 301 | "codemirror_mode": { 302 | "name": "ipython", 303 | "version": 3 304 | }, 305 | "file_extension": ".py", 306 | "mimetype": "text/x-python", 307 | "name": "python", 308 | "nbconvert_exporter": "python", 309 | "pygments_lexer": "ipython3", 310 | "version": "3.6.5" 311 | } 312 | }, 313 | "nbformat": 4, 314 | "nbformat_minor": 2 315 | } 316 | -------------------------------------------------------------------------------- /1-cnns/.ipynb_checkpoints/4-Optimizer-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## Training CNN on CIFAR10 Dataset [1]\n", 8 | "\n", 9 | "We begin with building a CNN architecture for image classification task on CIFAR10 dataset. \n", 10 | "\n", 11 | "In this first part of the tutorial, we will understand how to arrange the different architectural components of CNN network, defining the appropriate loss, training the network using backpropagation and finally testing it on the test data.To make data loading simple, we would use the torchvision package created as part of PyTorch which has data loaders for standard datasets such as ImageNet, CIFAR10, MNIST.\n", 12 | "![CIFAR10](images/cifar10.png)" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": null, 18 | "metadata": {}, 19 | "outputs": [], 20 | "source": [ 21 | "import torch\n", 22 | "import torch.nn as nn\n", 23 | "import torch.nn.functional as F\n", 24 | "import torch.utils.data as Data\n", 25 | "import torchvision\n", 26 | "import torchvision.transforms as transforms\n", 27 | "import torch.optim as optim\n", 28 | "\n", 29 | "#Weight Initialization\n", 30 | "import torch.nn.init as weight_init\n", 31 | "\n", 32 | "import matplotlib.pyplot as plt\n", 33 | "import numpy as np\n", 34 | "%matplotlib inline\n", 35 | "device = torch.device(\"cuda\" )\n", 36 | "print(\"torch version: %s\"%torch.__version__)" 37 | ] 38 | }, 39 | { 40 | "cell_type": "code", 41 | "execution_count": null, 42 | "metadata": {}, 43 | "outputs": [], 44 | "source": [ 45 | "DATA_ROOT = '/tmp/data/lab1'\n", 46 | "\n", 47 | "# Hyper Parameters\n", 48 | "num_epoch = 10 # train the training data n times, to save time, we just train 1 epoch\n", 49 | "DOWNLOAD_CIFAR10 = True # set to False if you have downloaded" 50 | ] 51 | }, 52 | { 53 | "cell_type": "markdown", 54 | "metadata": {}, 55 | "source": [ 56 | "#### Dataloader\n", 57 | "\n", 58 | "For efficient training, we need to make sure that our code is efficient enough to send data from RAM to GPU and vice-versa. For some standard datasets like MNIST, CIFAR-10 etc., we already have some well structured dataloaders. In this tutorial, we will be using the CIFAR-10 dataloader.\n", 59 | "\n", 60 | "For more you can visit the following links:\n", 61 | "\n", 62 | "- Existing dataloaders - _\n", 63 | "- How to write dataloaders for your custom dataset - _" 64 | ] 65 | }, 66 | { 67 | "cell_type": "code", 68 | "execution_count": null, 69 | "metadata": {}, 70 | "outputs": [], 71 | "source": [ 72 | "# We can perform different data augmentation\n", 73 | "# techniques to increase the size of the dataset and make your model more robust\n", 74 | "transform = transforms.Compose(\n", 75 | " [transforms.ToTensor(),\n", 76 | " transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n", 77 | "\n", 78 | "# NOTE: PLEASE DON'T CHANGE batch_size and num_workers here. We have limited resources.\n", 79 | "trainset = torchvision.datasets.CIFAR10(root=DATA_ROOT, train=True,\n", 80 | " download=True, transform=transform)\n", 81 | "trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,\n", 82 | " shuffle=True, num_workers=2)\n", 83 | "\n", 84 | "testset = torchvision.datasets.CIFAR10(root=DATA_ROOT, train=False,\n", 85 | " download=True, transform=transform)\n", 86 | "testloader = torch.utils.data.DataLoader(testset, batch_size=4,\n", 87 | " shuffle=False, num_workers=2)\n", 88 | "\n", 89 | "# classes in the CIFAR-10 dataset\n", 90 | "classes = ('plane', 'car', 'bird', 'cat',\n", 91 | " 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')" 92 | ] 93 | }, 94 | { 95 | "cell_type": "markdown", 96 | "metadata": {}, 97 | "source": [ 98 | "Let us show some of the training images, for fun." 99 | ] 100 | }, 101 | { 102 | "cell_type": "code", 103 | "execution_count": null, 104 | "metadata": {}, 105 | "outputs": [], 106 | "source": [ 107 | "def imshow(img):\n", 108 | " img = img / 2 + 0.5 # unnormalize\n", 109 | " npimg = img.numpy()\n", 110 | " plt.imshow(np.transpose(npimg, (1, 2, 0)))\n", 111 | "\n", 112 | "\n", 113 | "# get some random training images\n", 114 | "dataiter = iter(trainloader)\n", 115 | "images, labels = dataiter.next()\n", 116 | "\n", 117 | "# show images\n", 118 | "imshow(torchvision.utils.make_grid(images))\n", 119 | "# print labels\n", 120 | "print(' '.join('%10s' % classes[labels[j]] for j in range(4)))" 121 | ] 122 | }, 123 | { 124 | "cell_type": "markdown", 125 | "metadata": {}, 126 | "source": [ 127 | "### Define a Convolutional Neural Network" 128 | ] 129 | }, 130 | { 131 | "cell_type": "code", 132 | "execution_count": null, 133 | "metadata": {}, 134 | "outputs": [], 135 | "source": [ 136 | "class Net(nn.Module):\n", 137 | " def __init__(self):\n", 138 | " super(Net, self).__init__()\n", 139 | " self.conv1 = nn.Conv2d(3, 6, 5)\n", 140 | " self.pool = nn.MaxPool2d(2, 2)\n", 141 | " self.conv2 = nn.Conv2d(6, 16, 5)\n", 142 | " self.fc1 = nn.Linear(16 * 5 * 5, 120)\n", 143 | " self.fc2 = nn.Linear(120, 84)\n", 144 | " self.fc3 = nn.Linear(84, 10)\n", 145 | "\n", 146 | " def forward(self, x):\n", 147 | " x = self.pool(F.relu(self.conv1(x)))\n", 148 | " x = self.pool(F.relu(self.conv2(x)))\n", 149 | " x = x.view(-1, 16 * 5 * 5)\n", 150 | " x = F.relu(self.fc1(x))\n", 151 | " x = F.relu(self.fc2(x))\n", 152 | " x = self.fc3(x)\n", 153 | " return x\n", 154 | "\n", 155 | "\n", 156 | "net = Net().to(device)" 157 | ] 158 | }, 159 | { 160 | "cell_type": "markdown", 161 | "metadata": {}, 162 | "source": [ 163 | "### Define a loss function and an optimizer" 164 | ] 165 | }, 166 | { 167 | "cell_type": "code", 168 | "execution_count": null, 169 | "metadata": {}, 170 | "outputs": [], 171 | "source": [ 172 | "# optimization scheme can be 'sgd', 'RMSProp', 'Adam', 'Adadelta', 'Adagrad'\n", 173 | "optimization_scheme = \"Adagrad\"\n", 174 | "learning_rate = 0.001\n", 175 | "criterion = nn.CrossEntropyLoss()\n", 176 | "\n", 177 | "if optimization_scheme == 'sgd':\n", 178 | " optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9)\n", 179 | "elif optimization_scheme == 'RMSProp':\n", 180 | " optimizer = optim.RMSprop(net.parameters(), lr=learning_rate, weight_decay=0)\n", 181 | "elif optimization_scheme == \"Adadelta\":\n", 182 | " optimizer = optim.Adadelta(net.parameters(), lr=learning_rate, weight_decay=0)\n", 183 | "elif optimization_scheme == \"Adam\":\n", 184 | " optimizer = optim.Adam(net.parameters(), lr=learning_rate, weight_decay=0)\n", 185 | "elif optimization_scheme == \"Adagrad\":\n", 186 | " optimizer = optim.Adagrad(net.parameters(), lr=learning_rate, weight_decay=0)\n", 187 | " " 188 | ] 189 | }, 190 | { 191 | "cell_type": "markdown", 192 | "metadata": {}, 193 | "source": [ 194 | "### Train the network\n", 195 | "\n", 196 | "Now, we will be training the network defined above on CIFAR-10 dataset.\n", 197 | "\n", 198 | "We will train the network for num_epoch times (defined above)\n", 199 | "\n", 200 | "- We fetch a batch of images and labels from dataloader\n", 201 | "- We feed it to CNN network for forward pass\n", 202 | "- Based on the output of forward pass, we calculate loss/error\n", 203 | "- Then we calculate gradients of loss w.r.t. the parameters of the network\n", 204 | "- Finally, we update the parameters based on the gradients using Gradient Descent algorithm" 205 | ] 206 | }, 207 | { 208 | "cell_type": "code", 209 | "execution_count": null, 210 | "metadata": {}, 211 | "outputs": [], 212 | "source": [ 213 | "for epoch in range(num_epoch): # loop over the dataset multiple times\n", 214 | "\n", 215 | " running_loss = 0.0\n", 216 | " for i, data in enumerate(trainloader, 0):\n", 217 | " # get the inputs\n", 218 | " inputs, labels = data\n", 219 | " inputs, labels = inputs.to(device), labels.to(device)\n", 220 | "\n", 221 | " # zero the parameter gradients\n", 222 | " optimizer.zero_grad()\n", 223 | "\n", 224 | " # forward + backward + optimize\n", 225 | " outputs = net(inputs)\n", 226 | " loss = criterion(outputs, labels)\n", 227 | " loss.backward()\n", 228 | " optimizer.step()\n", 229 | "\n", 230 | " # print statistics\n", 231 | " running_loss += loss.item()\n", 232 | " if i % 2000 == 1999: # print every 2000 mini-batches\n", 233 | " print('[%d, %5d] loss: %.3f' %\n", 234 | " (epoch + 1, i + 1, running_loss / 2000))\n", 235 | " running_loss = 0.0\n", 236 | "\n", 237 | "print('Finished Training')" 238 | ] 239 | }, 240 | { 241 | "cell_type": "markdown", 242 | "metadata": {}, 243 | "source": [ 244 | "### Test the network on the test datset\n", 245 | "\n", 246 | "We will check this by predicting the class label that the neural network outputs, and checking it against the ground-truth. If the prediction is correct, we add the sample to the list of correct predictions." 247 | ] 248 | }, 249 | { 250 | "cell_type": "code", 251 | "execution_count": null, 252 | "metadata": {}, 253 | "outputs": [], 254 | "source": [ 255 | "# Quantitative Analysis\n", 256 | "correct = 0\n", 257 | "total = 0\n", 258 | "with torch.no_grad():\n", 259 | " for data in testloader:\n", 260 | " images, labels = data\n", 261 | " images, labels = images.to(device), labels.to(device)\n", 262 | " outputs = net(images)\n", 263 | " _, predicted = torch.max(outputs.data, 1)\n", 264 | " total += labels.size(0)\n", 265 | " correct += (predicted == labels).sum().item()\n", 266 | "\n", 267 | "print('Accuracy of the network on the 10000 test images: %d %%' % (\n", 268 | " 100 * correct / total))" 269 | ] 270 | }, 271 | { 272 | "cell_type": "code", 273 | "execution_count": null, 274 | "metadata": {}, 275 | "outputs": [], 276 | "source": [ 277 | "# Qualitative Analysis\n", 278 | "dataiter = iter(testloader)\n", 279 | "images, labels = dataiter.next()\n", 280 | "images, labels = images.to(device), labels.to(device)\n", 281 | "\n", 282 | "# print images\n", 283 | "imshow(torchvision.utils.make_grid(images.cpu()))\n", 284 | "print('GroundTruth: ', ' '.join('%4s' % classes[labels[j]] for j in range(4)))\n", 285 | "\n", 286 | "outputs = net(images)\n", 287 | "_, predicted = torch.max(outputs, 1)\n", 288 | "\n", 289 | "print('Predicted: ', ' '.join('%5s' % classes[predicted[j]]\n", 290 | " for j in range(4)))" 291 | ] 292 | } 293 | ], 294 | "metadata": { 295 | "kernelspec": { 296 | "display_name": "Python 3", 297 | "language": "python", 298 | "name": "python3" 299 | }, 300 | "language_info": { 301 | "codemirror_mode": { 302 | "name": "ipython", 303 | "version": 3 304 | }, 305 | "file_extension": ".py", 306 | "mimetype": "text/x-python", 307 | "name": "python", 308 | "nbconvert_exporter": "python", 309 | "pygments_lexer": "ipython3", 310 | "version": "3.6.5" 311 | } 312 | }, 313 | "nbformat": 4, 314 | "nbformat_minor": 2 315 | } 316 | -------------------------------------------------------------------------------- /1-cnns/0-PyTorch-Basics.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## What is PyTorch?\n", 8 | "\n", 9 | "It’s a Python based scientific computing package targeted at two sets of\n", 10 | "audiences:\n", 11 | "- A replacement for numpy to use the power of GPUs\n", 12 | "- a deep learning research platform that provides maximum flexibility and speed" 13 | ] 14 | }, 15 | { 16 | "cell_type": "markdown", 17 | "metadata": {}, 18 | "source": [ 19 | "#### Tensors\n", 20 | "Tensors are similar to numpy’s ndarrays, with the addition being that they can also be used on a GPU to accelerate computing." 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": 1, 26 | "metadata": {}, 27 | "outputs": [ 28 | { 29 | "name": "stdout", 30 | "output_type": "stream", 31 | "text": [ 32 | "0.4.0\n" 33 | ] 34 | } 35 | ], 36 | "source": [ 37 | "# import Pytorch\n", 38 | "import torch\n", 39 | "print(torch.__version__)" 40 | ] 41 | }, 42 | { 43 | "cell_type": "markdown", 44 | "metadata": {}, 45 | "source": [ 46 | " Construct a 5x3 matrix, uninitialized:" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": 2, 52 | "metadata": {}, 53 | "outputs": [ 54 | { 55 | "name": "stdout", 56 | "output_type": "stream", 57 | "text": [ 58 | "tensor([[ 1.2303e-37, 0.0000e+00, 5.7453e-44],\n", 59 | " [ 0.0000e+00, nan, 6.4893e-07],\n", 60 | " [ 1.3733e-14, 6.4076e+07, 2.0706e-19],\n", 61 | " [ 7.3909e+22, 2.4176e-12, 1.1625e+33],\n", 62 | " [ 8.9605e-01, 1.1632e+33, 5.6003e-02]])\n" 63 | ] 64 | } 65 | ], 66 | "source": [ 67 | "x = torch.Tensor(5, 3)\n", 68 | "print(x)" 69 | ] 70 | }, 71 | { 72 | "cell_type": "markdown", 73 | "metadata": {}, 74 | "source": [ 75 | " Construct a randomly initialized matrix" 76 | ] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": 3, 81 | "metadata": {}, 82 | "outputs": [ 83 | { 84 | "name": "stdout", 85 | "output_type": "stream", 86 | "text": [ 87 | "tensor([[ 0.9453, 0.5786, 0.8760],\n", 88 | " [ 0.6387, 0.5484, 0.3137],\n", 89 | " [ 0.2800, 0.0729, 0.2273],\n", 90 | " [ 0.0281, 0.9334, 0.3736],\n", 91 | " [ 0.7685, 0.6841, 0.3241]])\n" 92 | ] 93 | } 94 | ], 95 | "source": [ 96 | "x = torch.rand(5, 3)\n", 97 | "print(x)" 98 | ] 99 | }, 100 | { 101 | "cell_type": "markdown", 102 | "metadata": {}, 103 | "source": [ 104 | " Get its size" 105 | ] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "execution_count": 4, 110 | "metadata": {}, 111 | "outputs": [ 112 | { 113 | "data": { 114 | "text/plain": [ 115 | "torch.Size([5, 3])" 116 | ] 117 | }, 118 | "execution_count": 4, 119 | "metadata": {}, 120 | "output_type": "execute_result" 121 | } 122 | ], 123 | "source": [ 124 | "x.size()" 125 | ] 126 | }, 127 | { 128 | "cell_type": "markdown", 129 | "metadata": {}, 130 | "source": [ 131 | "Operations supported:" 132 | ] 133 | }, 134 | { 135 | "cell_type": "code", 136 | "execution_count": 5, 137 | "metadata": {}, 138 | "outputs": [ 139 | { 140 | "name": "stdout", 141 | "output_type": "stream", 142 | "text": [ 143 | "tensor([[ 0.9513, 1.1290, 0.9448],\n", 144 | " [ 0.7734, 1.2357, 1.0777],\n", 145 | " [ 0.4542, 0.3850, 0.9336],\n", 146 | " [ 0.1764, 0.9566, 1.2332],\n", 147 | " [ 1.0841, 0.6899, 0.8097]])\n", 148 | "tensor([[ 0.9513, 1.1290, 0.9448],\n", 149 | " [ 0.7734, 1.2357, 1.0777],\n", 150 | " [ 0.4542, 0.3850, 0.9336],\n", 151 | " [ 0.1764, 0.9566, 1.2332],\n", 152 | " [ 1.0841, 0.6899, 0.8097]])\n" 153 | ] 154 | } 155 | ], 156 | "source": [ 157 | "y = torch.rand(5, 3)\n", 158 | "print(x + y)\n", 159 | "print(torch.add(x,y))" 160 | ] 161 | }, 162 | { 163 | "cell_type": "markdown", 164 | "metadata": {}, 165 | "source": [ 166 | "In-place operation:" 167 | ] 168 | }, 169 | { 170 | "cell_type": "code", 171 | "execution_count": 6, 172 | "metadata": {}, 173 | "outputs": [ 174 | { 175 | "name": "stdout", 176 | "output_type": "stream", 177 | "text": [ 178 | "tensor([[ 0.9513, 1.1290, 0.9448],\n", 179 | " [ 0.7734, 1.2357, 1.0777],\n", 180 | " [ 0.4542, 0.3850, 0.9336],\n", 181 | " [ 0.1764, 0.9566, 1.2332],\n", 182 | " [ 1.0841, 0.6899, 0.8097]])\n" 183 | ] 184 | } 185 | ], 186 | "source": [ 187 | "y.add_(x)\n", 188 | "print(y)" 189 | ] 190 | }, 191 | { 192 | "cell_type": "markdown", 193 | "metadata": {}, 194 | "source": [ 195 | "**Note:** Any operation that mutates a tensor in-place is post-fixed with a _." 196 | ] 197 | }, 198 | { 199 | "cell_type": "markdown", 200 | "metadata": {}, 201 | "source": [ 202 | "100+ Tensor operations, including transposing, indexing, slicing, mathematical operations, linear algebra, random numbers, etc are described here _" 203 | ] 204 | }, 205 | { 206 | "cell_type": "markdown", 207 | "metadata": {}, 208 | "source": [ 209 | "#### Numpy Bridge\n", 210 | "\n", 211 | "Converting a torch Tensor to a numpy array and vice versa is a breeze.\n", 212 | "**Note:** The torch Tensor and numpy array will share their underlying memory locations, and changing one will change the other." 213 | ] 214 | }, 215 | { 216 | "cell_type": "code", 217 | "execution_count": 7, 218 | "metadata": {}, 219 | "outputs": [ 220 | { 221 | "name": "stdout", 222 | "output_type": "stream", 223 | "text": [ 224 | "tensor([ 1., 1., 1., 1., 1.])\n" 225 | ] 226 | } 227 | ], 228 | "source": [ 229 | "# Converting torch Tensor to numpy Array\n", 230 | "a = torch.ones(5)\n", 231 | "print(a)" 232 | ] 233 | }, 234 | { 235 | "cell_type": "code", 236 | "execution_count": 8, 237 | "metadata": {}, 238 | "outputs": [ 239 | { 240 | "name": "stdout", 241 | "output_type": "stream", 242 | "text": [ 243 | "[1. 1. 1. 1. 1.]\n" 244 | ] 245 | } 246 | ], 247 | "source": [ 248 | "b = a.numpy()\n", 249 | "print(b)" 250 | ] 251 | }, 252 | { 253 | "cell_type": "markdown", 254 | "metadata": {}, 255 | "source": [ 256 | "Now, Try changing the values of b and then print a" 257 | ] 258 | }, 259 | { 260 | "cell_type": "code", 261 | "execution_count": 9, 262 | "metadata": {}, 263 | "outputs": [ 264 | { 265 | "name": "stdout", 266 | "output_type": "stream", 267 | "text": [ 268 | "tensor([ 2., 2., 2., 2., 2.])\n", 269 | "[2. 2. 2. 2. 2.]\n" 270 | ] 271 | } 272 | ], 273 | "source": [ 274 | " a.add_(1)\n", 275 | "print(a)\n", 276 | "print(b)" 277 | ] 278 | }, 279 | { 280 | "cell_type": "markdown", 281 | "metadata": {}, 282 | "source": [ 283 | "Converting numpy arrays to torch tensors" 284 | ] 285 | }, 286 | { 287 | "cell_type": "code", 288 | "execution_count": 10, 289 | "metadata": {}, 290 | "outputs": [ 291 | { 292 | "name": "stdout", 293 | "output_type": "stream", 294 | "text": [ 295 | "[2. 2. 2. 2. 2.]\n", 296 | "tensor([ 2., 2., 2., 2., 2.], dtype=torch.float64)\n" 297 | ] 298 | } 299 | ], 300 | "source": [ 301 | "import numpy as np\n", 302 | "a = np.ones(5)\n", 303 | "b = torch.from_numpy(a)\n", 304 | "np.add(a, 1, out=a)\n", 305 | "print(a)\n", 306 | "print(b)" 307 | ] 308 | }, 309 | { 310 | "cell_type": "markdown", 311 | "metadata": {}, 312 | "source": [ 313 | "#### CUDA Tensors (Using GPU)" 314 | ] 315 | }, 316 | { 317 | "cell_type": "code", 318 | "execution_count": 11, 319 | "metadata": {}, 320 | "outputs": [], 321 | "source": [ 322 | " # let us run this cell only if CUDA is available\n", 323 | "if torch.cuda.is_available():\n", 324 | " x = x.to('cuda')\n", 325 | " y = y.to('cuda')\n", 326 | " x + y" 327 | ] 328 | }, 329 | { 330 | "cell_type": "markdown", 331 | "metadata": {}, 332 | "source": [ 333 | "## Autograd: automatic differentiation\n", 334 | "Central to all neural networks in PyTorch is the autograd package. The autograd package provides automatic differentiation for all operations on Tensors. It is a define-by-run framework, which means that your backprop is defined by how your code is run, and that every single iteration can be different.\n", 335 | "\n", 336 | "### requires_grad parameter\n", 337 | "If you set its attribute ```.requires_grad``` as ```True```, it starts to track all operations on it. When you finish your computation you can call ```.backward()``` and have all the gradients computed automatically. The gradient for this tensor will be accumulated into ```.grad``` attribute." 338 | ] 339 | }, 340 | { 341 | "cell_type": "code", 342 | "execution_count": 12, 343 | "metadata": {}, 344 | "outputs": [ 345 | { 346 | "name": "stdout", 347 | "output_type": "stream", 348 | "text": [ 349 | "tensor([[ 1., 1.],\n", 350 | " [ 1., 1.]])\n" 351 | ] 352 | } 353 | ], 354 | "source": [ 355 | "x = torch.ones(2, 2, requires_grad=True)\n", 356 | "print(x)" 357 | ] 358 | }, 359 | { 360 | "cell_type": "code", 361 | "execution_count": 13, 362 | "metadata": {}, 363 | "outputs": [ 364 | { 365 | "name": "stdout", 366 | "output_type": "stream", 367 | "text": [ 368 | "tensor([[ 3., 3.],\n", 369 | " [ 3., 3.]])\n" 370 | ] 371 | } 372 | ], 373 | "source": [ 374 | "y = x + 2\n", 375 | "print(y)" 376 | ] 377 | }, 378 | { 379 | "cell_type": "markdown", 380 | "metadata": {}, 381 | "source": [ 382 | "```y``` was created as a result of an operation, so it has a ```grad_fn```." 383 | ] 384 | }, 385 | { 386 | "cell_type": "code", 387 | "execution_count": 14, 388 | "metadata": {}, 389 | "outputs": [ 390 | { 391 | "name": "stdout", 392 | "output_type": "stream", 393 | "text": [ 394 | "\n" 395 | ] 396 | } 397 | ], 398 | "source": [ 399 | "print(y.grad_fn)" 400 | ] 401 | }, 402 | { 403 | "cell_type": "code", 404 | "execution_count": 15, 405 | "metadata": {}, 406 | "outputs": [ 407 | { 408 | "name": "stdout", 409 | "output_type": "stream", 410 | "text": [ 411 | "tensor([[ 27., 27.],\n", 412 | " [ 27., 27.]]) tensor(27.)\n" 413 | ] 414 | } 415 | ], 416 | "source": [ 417 | "z = y * y * 3\n", 418 | "out = z.mean()\n", 419 | "print(z,out)" 420 | ] 421 | }, 422 | { 423 | "cell_type": "markdown", 424 | "metadata": {}, 425 | "source": [ 426 | "### Gradients\n", 427 | "Let’s backprop now Because out contains a single scalar, ```out.backward()``` is equivalent to ```out.backward(torch.tensor(1))```." 428 | ] 429 | }, 430 | { 431 | "cell_type": "code", 432 | "execution_count": 16, 433 | "metadata": {}, 434 | "outputs": [ 435 | { 436 | "name": "stdout", 437 | "output_type": "stream", 438 | "text": [ 439 | "tensor([[ 4.5000, 4.5000],\n", 440 | " [ 4.5000, 4.5000]])\n" 441 | ] 442 | } 443 | ], 444 | "source": [ 445 | "out.backward()\n", 446 | "print(x.grad)" 447 | ] 448 | }, 449 | { 450 | "cell_type": "markdown", 451 | "metadata": {}, 452 | "source": [ 453 | "We got the tensor matrix of ```4.5```. Lets call the ```out``` tensor \"O\". so we have that $O = \\frac{1}{4} \\sum_i z_i\\ \\ \\ z_i=3(x_i+2)^2\\ and\\ z_i |_{x_i=1} = 27.$ Therefore $ \\frac{\\partial o}{\\partial x_i} = 4.5$ " 454 | ] 455 | }, 456 | { 457 | "cell_type": "markdown", 458 | "metadata": {}, 459 | "source": [ 460 | "[For the documentation, read here.](https://pytorch.org/docs/stable/autograd.html)" 461 | ] 462 | } 463 | ], 464 | "metadata": { 465 | "kernelspec": { 466 | "display_name": "Python 3", 467 | "language": "python", 468 | "name": "python3" 469 | }, 470 | "language_info": { 471 | "codemirror_mode": { 472 | "name": "ipython", 473 | "version": 3 474 | }, 475 | "file_extension": ".py", 476 | "mimetype": "text/x-python", 477 | "name": "python", 478 | "nbconvert_exporter": "python", 479 | "pygments_lexer": "ipython3", 480 | "version": "3.6.5" 481 | } 482 | }, 483 | "nbformat": 4, 484 | "nbformat_minor": 2 485 | } 486 | -------------------------------------------------------------------------------- /1-cnns/.ipynb_checkpoints/0-PyTorch-Basics-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## What is PyTorch?\n", 8 | "\n", 9 | "It’s a Python based scientific computing package targeted at two sets of\n", 10 | "audiences:\n", 11 | "- A replacement for numpy to use the power of GPUs\n", 12 | "- a deep learning research platform that provides maximum flexibility and speed" 13 | ] 14 | }, 15 | { 16 | "cell_type": "markdown", 17 | "metadata": {}, 18 | "source": [ 19 | "#### Tensors\n", 20 | "Tensors are similar to numpy’s ndarrays, with the addition being that they can also be used on a GPU to accelerate computing." 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": 1, 26 | "metadata": {}, 27 | "outputs": [ 28 | { 29 | "name": "stdout", 30 | "output_type": "stream", 31 | "text": [ 32 | "0.4.0\n" 33 | ] 34 | } 35 | ], 36 | "source": [ 37 | "# import Pytorch\n", 38 | "import torch\n", 39 | "print(torch.__version__)" 40 | ] 41 | }, 42 | { 43 | "cell_type": "markdown", 44 | "metadata": {}, 45 | "source": [ 46 | " Construct a 5x3 matrix, uninitialized:" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": 2, 52 | "metadata": {}, 53 | "outputs": [ 54 | { 55 | "name": "stdout", 56 | "output_type": "stream", 57 | "text": [ 58 | "tensor([[ 1.8158e-37, 0.0000e+00, 5.7453e-44],\n", 59 | " [ 0.0000e+00, nan, 1.9433e-19],\n", 60 | " [ 1.3733e-14, 6.4076e+07, 2.0706e-19],\n", 61 | " [ 7.3909e+22, 2.4176e-12, 1.1625e+33],\n", 62 | " [ 8.9605e-01, 1.1632e+33, 5.6003e-02]])\n" 63 | ] 64 | } 65 | ], 66 | "source": [ 67 | "x = torch.Tensor(5, 3)\n", 68 | "print(x)" 69 | ] 70 | }, 71 | { 72 | "cell_type": "markdown", 73 | "metadata": {}, 74 | "source": [ 75 | " Construct a randomly initialized matrix" 76 | ] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": 3, 81 | "metadata": {}, 82 | "outputs": [ 83 | { 84 | "name": "stdout", 85 | "output_type": "stream", 86 | "text": [ 87 | "tensor([[ 0.6684, 0.3686, 0.0791],\n", 88 | " [ 0.1594, 0.5871, 0.4040],\n", 89 | " [ 0.0545, 0.8134, 0.0757],\n", 90 | " [ 0.8284, 0.3479, 0.0614],\n", 91 | " [ 0.5253, 0.2756, 0.3560]])\n" 92 | ] 93 | } 94 | ], 95 | "source": [ 96 | "x = torch.rand(5, 3)\n", 97 | "print(x)" 98 | ] 99 | }, 100 | { 101 | "cell_type": "markdown", 102 | "metadata": {}, 103 | "source": [ 104 | " Get its size" 105 | ] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "execution_count": 4, 110 | "metadata": {}, 111 | "outputs": [ 112 | { 113 | "data": { 114 | "text/plain": [ 115 | "torch.Size([5, 3])" 116 | ] 117 | }, 118 | "execution_count": 4, 119 | "metadata": {}, 120 | "output_type": "execute_result" 121 | } 122 | ], 123 | "source": [ 124 | "x.size()" 125 | ] 126 | }, 127 | { 128 | "cell_type": "markdown", 129 | "metadata": {}, 130 | "source": [ 131 | "Operations supported:" 132 | ] 133 | }, 134 | { 135 | "cell_type": "code", 136 | "execution_count": 5, 137 | "metadata": {}, 138 | "outputs": [ 139 | { 140 | "name": "stdout", 141 | "output_type": "stream", 142 | "text": [ 143 | "tensor([[ 1.5397, 0.4976, 0.9709],\n", 144 | " [ 0.9179, 0.8417, 0.9053],\n", 145 | " [ 0.7814, 1.1443, 0.8242],\n", 146 | " [ 1.5619, 0.9236, 0.5042],\n", 147 | " [ 1.3648, 0.4559, 1.1128]])\n", 148 | "tensor([[ 1.5397, 0.4976, 0.9709],\n", 149 | " [ 0.9179, 0.8417, 0.9053],\n", 150 | " [ 0.7814, 1.1443, 0.8242],\n", 151 | " [ 1.5619, 0.9236, 0.5042],\n", 152 | " [ 1.3648, 0.4559, 1.1128]])\n" 153 | ] 154 | } 155 | ], 156 | "source": [ 157 | "y = torch.rand(5, 3)\n", 158 | "print(x + y)\n", 159 | "print(torch.add(x,y))" 160 | ] 161 | }, 162 | { 163 | "cell_type": "markdown", 164 | "metadata": {}, 165 | "source": [ 166 | "In-place operation:" 167 | ] 168 | }, 169 | { 170 | "cell_type": "code", 171 | "execution_count": 6, 172 | "metadata": {}, 173 | "outputs": [ 174 | { 175 | "name": "stdout", 176 | "output_type": "stream", 177 | "text": [ 178 | "tensor([[ 1.5397, 0.4976, 0.9709],\n", 179 | " [ 0.9179, 0.8417, 0.9053],\n", 180 | " [ 0.7814, 1.1443, 0.8242],\n", 181 | " [ 1.5619, 0.9236, 0.5042],\n", 182 | " [ 1.3648, 0.4559, 1.1128]])\n" 183 | ] 184 | } 185 | ], 186 | "source": [ 187 | "y.add_(x)\n", 188 | "print(y)" 189 | ] 190 | }, 191 | { 192 | "cell_type": "markdown", 193 | "metadata": {}, 194 | "source": [ 195 | "**Note:** Any operation that mutates a tensor in-place is post-fixed with a _." 196 | ] 197 | }, 198 | { 199 | "cell_type": "markdown", 200 | "metadata": {}, 201 | "source": [ 202 | "100+ Tensor operations, including transposing, indexing, slicing, mathematical operations, linear algebra, random numbers, etc are described here _" 203 | ] 204 | }, 205 | { 206 | "cell_type": "markdown", 207 | "metadata": {}, 208 | "source": [ 209 | "#### Numpy Bridge\n", 210 | "\n", 211 | "Converting a torch Tensor to a numpy array and vice versa is a breeze.\n", 212 | "**Note:** The torch Tensor and numpy array will share their underlying memory locations, and changing one will change the other." 213 | ] 214 | }, 215 | { 216 | "cell_type": "code", 217 | "execution_count": 7, 218 | "metadata": {}, 219 | "outputs": [ 220 | { 221 | "name": "stdout", 222 | "output_type": "stream", 223 | "text": [ 224 | "tensor([ 1., 1., 1., 1., 1.])\n" 225 | ] 226 | } 227 | ], 228 | "source": [ 229 | "# Converting torch Tensor to numpy Array\n", 230 | "a = torch.ones(5)\n", 231 | "print(a)" 232 | ] 233 | }, 234 | { 235 | "cell_type": "code", 236 | "execution_count": 8, 237 | "metadata": {}, 238 | "outputs": [ 239 | { 240 | "name": "stdout", 241 | "output_type": "stream", 242 | "text": [ 243 | "[ 1. 1. 1. 1. 1.]\n" 244 | ] 245 | } 246 | ], 247 | "source": [ 248 | "b = a.numpy()\n", 249 | "print(b)" 250 | ] 251 | }, 252 | { 253 | "cell_type": "markdown", 254 | "metadata": {}, 255 | "source": [ 256 | "Now, Try changing the values of b and then print a" 257 | ] 258 | }, 259 | { 260 | "cell_type": "code", 261 | "execution_count": 9, 262 | "metadata": {}, 263 | "outputs": [ 264 | { 265 | "name": "stdout", 266 | "output_type": "stream", 267 | "text": [ 268 | "tensor([ 2., 2., 2., 2., 2.])\n", 269 | "[ 2. 2. 2. 2. 2.]\n" 270 | ] 271 | } 272 | ], 273 | "source": [ 274 | " a.add_(1)\n", 275 | "print(a)\n", 276 | "print(b)" 277 | ] 278 | }, 279 | { 280 | "cell_type": "markdown", 281 | "metadata": {}, 282 | "source": [ 283 | "Converting numpy arrays to torch tensors" 284 | ] 285 | }, 286 | { 287 | "cell_type": "code", 288 | "execution_count": 10, 289 | "metadata": {}, 290 | "outputs": [ 291 | { 292 | "name": "stdout", 293 | "output_type": "stream", 294 | "text": [ 295 | "[ 2. 2. 2. 2. 2.]\n", 296 | "tensor([ 2., 2., 2., 2., 2.], dtype=torch.float64)\n" 297 | ] 298 | } 299 | ], 300 | "source": [ 301 | "import numpy as np\n", 302 | "a = np.ones(5)\n", 303 | "b = torch.from_numpy(a)\n", 304 | "np.add(a, 1, out=a)\n", 305 | "print(a)\n", 306 | "print(b)" 307 | ] 308 | }, 309 | { 310 | "cell_type": "markdown", 311 | "metadata": {}, 312 | "source": [ 313 | "#### CUDA Tensors (Using GPU)" 314 | ] 315 | }, 316 | { 317 | "cell_type": "code", 318 | "execution_count": 11, 319 | "metadata": {}, 320 | "outputs": [], 321 | "source": [ 322 | " # let us run this cell only if CUDA is available\n", 323 | "if torch.cuda.is_available():\n", 324 | " x = x.to('cuda')\n", 325 | " y = y.to('cuda')\n", 326 | " x + y" 327 | ] 328 | }, 329 | { 330 | "cell_type": "markdown", 331 | "metadata": {}, 332 | "source": [ 333 | "## Autograd: automatic differentiation\n", 334 | "Central to all neural networks in PyTorch is the autograd package. The autograd package provides automatic differentiation for all operations on Tensors. It is a define-by-run framework, which means that your backprop is defined by how your code is run, and that every single iteration can be different.\n", 335 | "\n", 336 | "### requires_grad parameter\n", 337 | "If you set its attribute ```.requires_grad``` as ```True```, it starts to track all operations on it. When you finish your computation you can call ```.backward()``` and have all the gradients computed automatically. The gradient for this tensor will be accumulated into ```.grad``` attribute." 338 | ] 339 | }, 340 | { 341 | "cell_type": "code", 342 | "execution_count": 12, 343 | "metadata": {}, 344 | "outputs": [ 345 | { 346 | "name": "stdout", 347 | "output_type": "stream", 348 | "text": [ 349 | "tensor([[ 1., 1.],\n", 350 | " [ 1., 1.]])\n" 351 | ] 352 | } 353 | ], 354 | "source": [ 355 | "x = torch.ones(2, 2, requires_grad=True)\n", 356 | "print(x)" 357 | ] 358 | }, 359 | { 360 | "cell_type": "code", 361 | "execution_count": 13, 362 | "metadata": {}, 363 | "outputs": [ 364 | { 365 | "name": "stdout", 366 | "output_type": "stream", 367 | "text": [ 368 | "tensor([[ 3., 3.],\n", 369 | " [ 3., 3.]])\n" 370 | ] 371 | } 372 | ], 373 | "source": [ 374 | "y = x + 2\n", 375 | "print(y)" 376 | ] 377 | }, 378 | { 379 | "cell_type": "markdown", 380 | "metadata": {}, 381 | "source": [ 382 | "```y``` was created as a result of an operation, so it has a ```grad_fn```." 383 | ] 384 | }, 385 | { 386 | "cell_type": "code", 387 | "execution_count": 14, 388 | "metadata": {}, 389 | "outputs": [ 390 | { 391 | "name": "stdout", 392 | "output_type": "stream", 393 | "text": [ 394 | "\n" 395 | ] 396 | } 397 | ], 398 | "source": [ 399 | "print(y.grad_fn)" 400 | ] 401 | }, 402 | { 403 | "cell_type": "code", 404 | "execution_count": 15, 405 | "metadata": {}, 406 | "outputs": [ 407 | { 408 | "name": "stdout", 409 | "output_type": "stream", 410 | "text": [ 411 | "tensor([[ 27., 27.],\n", 412 | " [ 27., 27.]]) tensor(27.)\n" 413 | ] 414 | } 415 | ], 416 | "source": [ 417 | "z = y * y * 3\n", 418 | "out = z.mean()\n", 419 | "print(z,out)" 420 | ] 421 | }, 422 | { 423 | "cell_type": "markdown", 424 | "metadata": {}, 425 | "source": [ 426 | "### Gradients\n", 427 | "Let’s backprop now Because out contains a single scalar, ```out.backward()``` is equivalent to ```out.backward(torch.tensor(1))```." 428 | ] 429 | }, 430 | { 431 | "cell_type": "code", 432 | "execution_count": 16, 433 | "metadata": {}, 434 | "outputs": [ 435 | { 436 | "name": "stdout", 437 | "output_type": "stream", 438 | "text": [ 439 | "tensor([[ 4.5000, 4.5000],\n", 440 | " [ 4.5000, 4.5000]])\n" 441 | ] 442 | } 443 | ], 444 | "source": [ 445 | "out.backward()\n", 446 | "print(x.grad)" 447 | ] 448 | }, 449 | { 450 | "cell_type": "markdown", 451 | "metadata": {}, 452 | "source": [ 453 | "We got the tensor matrix of ```4.5```. Lets call the ```out``` tensor \"O\". so we have that $O = \\frac{1}{4} \\sum_i z_i\\ \\ \\ z_i=3(x_i+2)^2\\ and\\ z_i |_{x_i=1} = 27.$ Therefore $ \\frac{\\partial o}{\\partial x_i} = 4.5$ " 454 | ] 455 | }, 456 | { 457 | "cell_type": "markdown", 458 | "metadata": {}, 459 | "source": [ 460 | "[For the documentation, read here.](https://pytorch.org/docs/stable/autograd.html)" 461 | ] 462 | } 463 | ], 464 | "metadata": { 465 | "kernelspec": { 466 | "display_name": "Python 3", 467 | "language": "python", 468 | "name": "python3" 469 | }, 470 | "language_info": { 471 | "codemirror_mode": { 472 | "name": "ipython", 473 | "version": 3 474 | }, 475 | "file_extension": ".py", 476 | "mimetype": "text/x-python", 477 | "name": "python", 478 | "nbconvert_exporter": "python", 479 | "pygments_lexer": "ipython3", 480 | "version": "3.6.5" 481 | } 482 | }, 483 | "nbformat": 4, 484 | "nbformat_minor": 2 485 | } 486 | -------------------------------------------------------------------------------- /rnn_lab/4-NewsHeadlines_SummerSchool.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Generate News Headlines using RNN\n", 8 | "We will use the kaggle Indian news headline dataset (https://www.kaggle.com/therohk/india-headlines-news-dataset/downloads/india-headlines-news-dataset.zip/5)
\n", 9 | "A cleaned dataset of 100,000 is produced from this. We want to generate new headlines." 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 1, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "import torch\n", 19 | "import torch.nn as nn\n", 20 | "import torch.nn.functional as F\n", 21 | "import torch.optim as optim" 22 | ] 23 | }, 24 | { 25 | "cell_type": "code", 26 | "execution_count": 2, 27 | "metadata": {}, 28 | "outputs": [ 29 | { 30 | "name": "stdout", 31 | "output_type": "stream", 32 | "text": [ 33 | "Yes! GPU!\n" 34 | ] 35 | } 36 | ], 37 | "source": [ 38 | "use_cuda = torch.cuda.is_available()\n", 39 | "device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n", 40 | "if use_cuda:\n", 41 | " print('Yes! GPU!')" 42 | ] 43 | }, 44 | { 45 | "cell_type": "markdown", 46 | "metadata": {}, 47 | "source": [ 48 | "## Read Data" 49 | ] 50 | }, 51 | { 52 | "cell_type": "code", 53 | "execution_count": 5, 54 | "metadata": {}, 55 | "outputs": [ 56 | { 57 | "name": "stdout", 58 | "output_type": "stream", 59 | "text": [ 60 | "10000\n" 61 | ] 62 | } 63 | ], 64 | "source": [ 65 | "with open('data/news-headlines-trimmed.txt') as f:\n", 66 | " data = f.read()\n", 67 | "\n", 68 | "data = data.split('\\n')[:10000] # fast training\n", 69 | "print(len(data))" 70 | ] 71 | }, 72 | { 73 | "cell_type": "markdown", 74 | "metadata": {}, 75 | "source": [ 76 | "Start of Sentence (SOS) is added to the begining of every headline.
\n", 77 | "End of Sentence (EOS) is to indicate when to stop generating characters." 78 | ] 79 | }, 80 | { 81 | "cell_type": "code", 82 | "execution_count": 6, 83 | "metadata": {}, 84 | "outputs": [], 85 | "source": [ 86 | "SOS = 0\n", 87 | "EOS = 127" 88 | ] 89 | }, 90 | { 91 | "cell_type": "markdown", 92 | "metadata": {}, 93 | "source": [ 94 | "### Encode sentence as sequence of one-hot vectors" 95 | ] 96 | }, 97 | { 98 | "cell_type": "code", 99 | "execution_count": 7, 100 | "metadata": {}, 101 | "outputs": [], 102 | "source": [ 103 | "# One hot encoding\n", 104 | "def one_hotter(c):\n", 105 | " vec = torch.zeros(128)\n", 106 | " vec[ord(c)] = 1.0\n", 107 | " return vec\n", 108 | "\n", 109 | "def encode_sentence(s):\n", 110 | " v = torch.zeros(1, len(s)+1, 128)\n", 111 | " \n", 112 | " # append SOS\n", 113 | " vec = torch.zeros(128)\n", 114 | " vec[SOS] = 1.0\n", 115 | " v[0, 0, :] = vec\n", 116 | " \n", 117 | " for i in range(len(s)):\n", 118 | " v[0, i+1, :] = one_hotter(s[i])\n", 119 | " \n", 120 | " # append EOS\n", 121 | " # vec = torch.zeros(128)\n", 122 | " # vec[EOS] = 1.0\n", 123 | " # v[0, len(s)+1, :] = vec\n", 124 | " \n", 125 | " return v.to(device)" 126 | ] 127 | }, 128 | { 129 | "cell_type": "code", 130 | "execution_count": 8, 131 | "metadata": {}, 132 | "outputs": [], 133 | "source": [ 134 | "e = encode_sentence('ab')" 135 | ] 136 | }, 137 | { 138 | "cell_type": "markdown", 139 | "metadata": {}, 140 | "source": [ 141 | "## Model" 142 | ] 143 | }, 144 | { 145 | "cell_type": "code", 146 | "execution_count": 9, 147 | "metadata": {}, 148 | "outputs": [], 149 | "source": [ 150 | "class RnnNet(nn.Module):\n", 151 | " def __init__(self):\n", 152 | " \n", 153 | " super(RnnNet, self).__init__()\n", 154 | " self.input_dim = 128 # one-hot encoding of ascii \n", 155 | " # self.seq_len = 28\n", 156 | " self.hidden_dim = 100\n", 157 | " self.batch_size = 1 # sorry! variable length sentences. \n", 158 | " # We can pad and make batches though. But let's stick to simplicity\n", 159 | " self.num_class = self.input_dim\n", 160 | " \n", 161 | " self.rnn = nn.GRU(self.input_dim, self.hidden_dim, batch_first=True)\n", 162 | " self.fc = nn.Linear(self.hidden_dim, self.num_class)\n", 163 | "\n", 164 | " def forward(self, x, h0):\n", 165 | " \n", 166 | " # h0 = torch.randn(1, self.batch_size, self.hidden_dim).to(device)\n", 167 | " # run the LSTM along the sequences of length seq_len\n", 168 | " \n", 169 | " x, h = self.rnn(x, h0) # dim: batch_size x seq_len x hidden_dim\n", 170 | " \n", 171 | " # make the Variable contiguous in memory (a PyTorch artefact)\n", 172 | " x = x.contiguous()\n", 173 | "\n", 174 | " # reshape the Variable so that each row contains one token\n", 175 | " x = x.view(-1, x.shape[2]) # dim: batch_size*seq_len x hidden_dim (note batch_size=1)\n", 176 | "\n", 177 | " # apply the fully connected layer and obtain the output (before softmax) for each token\n", 178 | " x = self.fc(x) # dim: batch_size*seq_len x num_class\n", 179 | "\n", 180 | " # apply log softmax on each token's output (this is recommended over applying softmax\n", 181 | " # since it is numerically more stable)\n", 182 | " return F.log_softmax(x, dim=1), h # dim: batch_size*seq_len x num_class & dim(h): 1 x 1(batch) x hidden_dim\n", 183 | " \n", 184 | " def genh(self):\n", 185 | " return torch.randn(1, self.batch_size, self.hidden_dim).to(device) " 186 | ] 187 | }, 188 | { 189 | "cell_type": "code", 190 | "execution_count": 10, 191 | "metadata": {}, 192 | "outputs": [], 193 | "source": [ 194 | "model = RnnNet().to(device)\n", 195 | "optimizer = optim.Adam(model.parameters(), lr=0.001)" 196 | ] 197 | }, 198 | { 199 | "cell_type": "code", 200 | "execution_count": 11, 201 | "metadata": {}, 202 | "outputs": [ 203 | { 204 | "data": { 205 | "text/plain": [ 206 | "10000" 207 | ] 208 | }, 209 | "execution_count": 11, 210 | "metadata": {}, 211 | "output_type": "execute_result" 212 | } 213 | ], 214 | "source": [ 215 | "len(data)" 216 | ] 217 | }, 218 | { 219 | "cell_type": "markdown", 220 | "metadata": {}, 221 | "source": [ 222 | "## Train" 223 | ] 224 | }, 225 | { 226 | "cell_type": "code", 227 | "execution_count": 12, 228 | "metadata": {}, 229 | "outputs": [], 230 | "source": [ 231 | "from tqdm import trange\n", 232 | "import logging\n", 233 | "\n", 234 | "# logging.basicConfig(format='%(asctime)s [%(levelname)-8s] %(message)s')\n", 235 | "# logger = logging.getLogger()\n", 236 | "# logger.setLevel(logging.INFO)" 237 | ] 238 | }, 239 | { 240 | "cell_type": "markdown", 241 | "metadata": {}, 242 | "source": [ 243 | "### Generate Heading" 244 | ] 245 | }, 246 | { 247 | "cell_type": "code", 248 | "execution_count": 13, 249 | "metadata": {}, 250 | "outputs": [], 251 | "source": [ 252 | "def gen_headlines(num=5):\n", 253 | " model.eval()\n", 254 | " \n", 255 | " for i in range(num):\n", 256 | " gen= ''\n", 257 | " h = model.genh()\n", 258 | " i = 0\n", 259 | " prev = torch.zeros(1, 1, 128).to(device)\n", 260 | " prev[0,0,0] = 1.0\n", 261 | " \n", 262 | " while(True):\n", 263 | " output, h = model(prev, h)\n", 264 | " s = torch.argmax(output, dim=1)\n", 265 | "\n", 266 | " # Stop if EOS is generated\n", 267 | " if s == 127:\n", 268 | " break\n", 269 | "\n", 270 | " # update generated sentence\n", 271 | " gen += chr(s) \n", 272 | " prev = torch.zeros(1, 1, 128).to(device)\n", 273 | " prev[0,0,s] = 1.0\n", 274 | "\n", 275 | " i += 1\n", 276 | " if i > 200:\n", 277 | " break\n", 278 | "\n", 279 | " print(gen)" 280 | ] 281 | }, 282 | { 283 | "cell_type": "markdown", 284 | "metadata": {}, 285 | "source": [ 286 | "### Start Training" 287 | ] 288 | }, 289 | { 290 | "cell_type": "code", 291 | "execution_count": 14, 292 | "metadata": {}, 293 | "outputs": [ 294 | { 295 | "name": "stderr", 296 | "output_type": "stream", 297 | "text": [ 298 | " 0%| | 35/10000 [00:00<00:29, 343.38it/s, loss=4.862]" 299 | ] 300 | }, 301 | { 302 | "name": "stdout", 303 | "output_type": "stream", 304 | "text": [ 305 | "\n", 306 | "epoch 1/10\n" 307 | ] 308 | }, 309 | { 310 | "name": "stderr", 311 | "output_type": "stream", 312 | "text": [ 313 | "100%|██████████| 10000/10000 [00:27<00:00, 368.94it/s, loss=1.922]\n" 314 | ] 315 | }, 316 | { 317 | "name": "stdout", 318 | "output_type": "stream", 319 | "text": [ 320 | "proment on to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to b\n", 321 | "for to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be to be\n" 322 | ] 323 | }, 324 | { 325 | "name": "stderr", 326 | "output_type": "stream", 327 | "text": [ 328 | " 0%| | 0/10000 [00:00_\n", 70 | "- How to write dataloaders for your custom dataset - _" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": 3, 76 | "metadata": {}, 77 | "outputs": [ 78 | { 79 | "name": "stdout", 80 | "output_type": "stream", 81 | "text": [ 82 | "Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to data/cifar-10-python.tar.gz\n", 83 | "Files already downloaded and verified\n" 84 | ] 85 | } 86 | ], 87 | "source": [ 88 | "# We can perform different data augmentation\n", 89 | "# techniques to increase the size of the dataset and make your model more robust\n", 90 | "transform = transforms.Compose(\n", 91 | " [transforms.ToTensor(),\n", 92 | " transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n", 93 | "\n", 94 | "# NOTE: PLEASE DON'T CHANGE batch_size and num_workers here. We have limited resources.\n", 95 | "trainset = torchvision.datasets.CIFAR10(root=DATA_ROOT, train=True,\n", 96 | " download=True, transform=transform)\n", 97 | "trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,\n", 98 | " shuffle=True, num_workers=2)\n", 99 | "\n", 100 | "testset = torchvision.datasets.CIFAR10(root=DATA_ROOT, train=False,\n", 101 | " download=True, transform=transform)\n", 102 | "testloader = torch.utils.data.DataLoader(testset, batch_size=4,\n", 103 | " shuffle=False, num_workers=2)\n", 104 | "\n", 105 | "# classes in the CIFAR-10 dataset\n", 106 | "classes = ('plane', 'car', 'bird', 'cat',\n", 107 | " 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')" 108 | ] 109 | }, 110 | { 111 | "cell_type": "markdown", 112 | "metadata": {}, 113 | "source": [ 114 | "Let us show some of the training images, for fun." 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "execution_count": 4, 120 | "metadata": {}, 121 | "outputs": [ 122 | { 123 | "name": "stdout", 124 | "output_type": "stream", 125 | "text": [ 126 | " ship frog ship ship\n" 127 | ] 128 | }, 129 | { 130 | "data": { 131 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAB6CAYAAACvHqiXAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4xLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvAOZPmwAAIABJREFUeJztfWmMZNd13ndrX3tfppfZF3K4DSmTFEXKkkxJkWTJlhHYghzZIRIB/GMndmAgluMfjoD8sJHAjgM4DgTbsewIlmVZsQTBsWTREimJ4jLclyFnetaemd6Xqu7aq97Nj3PuO6e6q2d6Zqjp6fb9ALJr7nt137333ffqnPOdxVhr4eHh4eGx/RHZ6gF4eHh4eLwz8C90Dw8Pjx0C/0L38PDw2CHwL3QPDw+PHQL/Qvfw8PDYIfAvdA8PD48dAv9C9/Dw8NghuKEXujHmo8aYt40xE8aYz71Tg/Lw8PDwuHaY6w0sMsZEAZwE8GEAFwE8D+AXrbVvvnPD8/Dw8PDYLGI38N0HAUxYa88AgDHmywA+CWDDF3omk7E9PT03cEkPDw+Pf36Ympqat9YOXu28G3mhjwGYVP++CODdV/pCT08PHn/88Ru4pIeHh8c/P3z+858/v5nzbsSGbjq0rbPfGGMeN8YcN8YcL5fLN3A5Dw8PD48r4UZe6BcB7Fb/Hgdwee1J1tovWGvvt9ben8lkbuByHh4eHh5Xwo280J8HcNgYs98YkwDwaQDfeGeG5eHh4eFxrbhuG7q1tmmM+VUA3wIQBfBn1to3rrWfX/qlf7OuLRKh3xkTEauODagtGnVDlmPGGDcmOR8BAGD+9Ath29DQXgBAYNIAgFQsCI8tG+q3sTQTtuV7R6hNGZICXrIm/xa2rIyjFVgea0vG0WrQsXpV+mjRcZtIUUMjIX1UiwCASkPG0UASAJDO7grbnvzWX0Jj7M4l+dx/EACQSw+EbX3jwwCAc3U5b664AAA49dIrAIBXvvdceOwgn3/s8L6wbRFZAMCeu46GbQP776YxVmlOQVTGnUoU6FihFLbVa7R+1Zl62Fbk48WlRQBArbYg/e/tAgDcdu+DYdvoIF3z7MTpsG05Okfz3HMHAKAZVMJjQZXGcerbsj1fe/n71FdiPe2T6DlE3wu0VTEKAIiYaNhi2MJo0QQAVGqN8FihRPe71lB7wdCeKa8uh22LC3M8SOojFY+Hx9LpBM9FxlEs16it2ZTx8ncSCTo/Hpf9VK/S+aXSatjWCmjtrZU+Ast7MojyMZlnJ0+4eIyOJ9V4H7qrr/2cgUfVGKNtYwWAaIzWIxYVuTLCbU7UdO8CAIjFaO8kk0m5BvfnxgMAp99+CQCw/9A9AIBUNhsecyupuoV199HKvQqP8fNtA1mDVos+N5vy/gj4mddrFfB3mvy31ZL+3edGQ+5Biz9Pn/jaunFsFjdCisJa+/cA/v5G+vDw8PDweGdwQy/0dwIB/2Y6KRsAWtwWqF/AFkskiQT9tAZKCpZfcSVRBSQtrRYWw6Z83x4AwDxLOQN5+Zm2vBI1JWXF6nR9q37OA/41b/LfQP0ii4Qu4wZ/bqlf+MAGPERua0n/tRofM3JrWqydNOobxwxcPCsORyvMZAxkZsO2QpE+z9SVFFkmaX3q1TP0vQXRImZTJMVFDsg44jUitadeE60nlaQxrVRJym6U5ZqN0goAYPLti2HbaO+9AIDuaH/YtrxA0uPMHN2r0WN3hsfSfeSpdeHUdNjW1TNExwZEUpuapLG3+N6W60qCTdGajtwzHraduTzMF8c6mAjNWcvnTkO0SiNr8L2v8X0srtbCY+Uqr3NU1q9SoTHOz86FbdUKzT3C+6kGcRyolEnqbKrbXmdpPar2ZLVFc61WRStxcM+Qfr5cd82W0mjdPuY/Rj9KvIcDta+dZFmtyZyBdgk9auR8wyK3lrijUZ6LegtFo+3XN0bGGI/Q50RUBpdgiV4p80gZejckDI0tlVjP3cViWgOhcTab6y3QfAhaWYvyIkW0NB5aCeS8JlsJTIfzDa+lVWvUwafkmuFD/z08PDx2CPwL3cPDw2OHYMtNLi3WZSJtBCibM5SZolYjE0AsRiSI9ml3ZFBM6W6OjKwok0u1QW1lJn5qiqSIBKw6KtWq7tRrpRU5AqXlzCaaLAn1M+nXqVZtupibn9Nym3JRR8SZiKiEbpjNxnrSxuH405fCz1E2e2RS0kcQJxU5m5NI3WbLjZGuOZYXlXlhkkwBtWNCKO3q6QUATF+aD9vmXrtAY+ylzqKByAiFS3Q/Zs+LaWQgSf0lu6TfTJAHAPRkaLzDRw6Fx3rGyDTz7Ld+ELa14mQn2XvkvrDNsPng5AtE7K6sivko2UVjGx4+GLYNDhBBXu9gcmmGpjO5L01erFpNzBpVvh+lOu3NujJnORNGvSr7dG6O7stKsRC2RXlzOZOLsugg4swNRpGGfEJEOwWEY2zw+GXDhmSesqG4j9qE4kxJ0ch6E6g7r9GQ+UlfncJR3LiUicE404965vj1E43Ic+uuH5pjorKHnZlEE6CJOK1NrbIStnXlyNmgK+sIUz0O15d6Hrmxqa7lnuuA73tLvZ+afF+MMpc44tpqC4ozL4X3QL9ImHxW7yDEbly+9hK6h4eHxw7BlkvoAZOdtk2C4M/aDdG5afHfZk1JYPyL3SYtMFEU1ERCilgn5Sf42tKH+6XXv7COk9WShkW7e1KbqyT/2kbQgfxoKumG3aNMZL1EH+nQbyiia7J1DWxEJO/CCknXsWg6bGuwZJlNixTemyOXwK4ukmj6uqW/xSVay9v3vzds685SfwNxJdYm+b6wB2YsEJe52hAdO5gT4qyrj64fU9pGi10e4ymS2lPqPl46MQEAOP3yRNhWvECsb3NVyK6eESJPC4vk9tlbFQ2gvEpaWr0umsXRLnJJfQVCtobns6ufui2o1+kf1YrMpdai++G46pZyJaxWaN8Fqq1WKXG/ypWRJfSYm7Jy4Yvyvo4oCbbOxGrb4BitwJFwgsCpl0r0d/3pve4k9Fa475Q7XWu9S558b2MiL6YGEuU9H1Guku551fOL8vxDaTwux2LOzVF1HGOiFAlpGxkj994ca6jxqLjNmhjt4bRs09AVtdX27DtXQ+q31pL+q+xxW2+odxbfDqsk+WjQ/reh1S9ua4lSgEiw8VpuFl5C9/Dw8Ngh8C90Dw8Pjx2CLTe5GDaNtJQ5IYwmUxpKktWsuKHzjTKXLM6QCaC3T8wJUVbtupRuFaxSFKMJ6PxETqbfqNNvW70mqmyGSY2gKVGNzvwSZXWxrEivOBO2LWVeceSs86EFgCaTZ60WfdfWZO5xNgs1W0K+RdlP3c29E7oSveHn7nFSOfOZVNjW05MDAPQOjUlbnkwWDTZLDfcJefkTd5HP/nBeok1tk8wN/d25sC3fS32USzSnUlnWKs3E01BvPmxrhKYWkSUOjo8CABbKZCqqXpaoxvQC9XcgcUfYdmniHADg6annw7Zj76WowO6AxpZoiv1oOENr05oR4rg3of2n27G4TKSltmq0Qouf3Ksqk6ENtl3UKsqHfJX6SMZFp462aM9GlG91hMlvZ2nRMQzRiDMlyjiaDRq3NnU43+4kOwdEFWnY4AjUluYAXZyHMjG0Wo60DNb170wjbUQpk4VXshLoPe/mHFXRmI4MjajzolH6zEGh4b8BIMKfI1B9OItLSp7zfJ5MiYkGEaWlU38THkuPUWRwbFgI9QSbSbTZ15lfXERutKHNovwXKmrYOCI9bEKL19LtD6vue4vNrXrukag3uXh4eHh4MLZcQp+5RG5vvT0iYc7NsYSmfjHfeuNlAEB/D0l7qZRIn5ksSYmrVqSulQJFQRanJUrx1VePAwAClmS61K96kvOfDIweCduiZXaFaolkvMS5Ny5dpHGnVF6JXi7e0WqJhB7lX+eo+iUuLNHYlsvU73Df3vCYaZKUVw8kmnC5RNJBMi+uWWvRnxVJerCP1iii3N2SKRqnzr2RTtIajvaTNJtJKAIKdN7CnIwjzVG6WgJ0ZK+1zmVOkWks3bRUZJ9hyS+qyKMU34/BOElWJeVy2JumcY/f/76w7eLUYQDA+akpaXuR8r8sWJLCM8oFbXyU1qYnIesXC9xcZR85lEukHdWUG6LT3LTraK3OeVVcvh6lyaHF0rvS+GLMhCVjss4uUtoR5VoyrrMGoIlVt36RNndB23Ysqh0MmFRsNGUc9VBDlDanjbh721J73uVUaqk+QqeAjlm0CTpfiiM740pjicdcfiY5L8V7JZd0e006cXmTdJuN0JimLp8L2/K3kdtrZZWe/ZnjXw6PjTlPwsFj0q/TFPS68V/njqijdcPocKVtxJyUryZtWi56lDV9RazGnUbWJlJ7Cd3Dw8PDg+Ff6B4eHh47BFtucnn7NUp12acIzWp1PfHz1iuUEMqRTaOjo+GxI4dJBc/lhKwrrpA/cmNZ0sXWZk8CANKcUKqiEmBF0uTHvFKQKL5mkvyWZy5L4qsf/eBJ6n+Jjh27+y65Jl9/aVHSv+Yz5PdqlRmmwuTf2+dIJRzokblEOWI13yvsymqVxnvorvXmAYdUQnzO+/vIdNFUfq/TM7Qe0VVJnnVglMjT3jSnK1aMTsYlQVPOuY3QBCBywGWOejRRR2TrLcXqqmLkXGIjLUq4KGBHEFpl6nCEdFeXzO/2vVRXZbRffO8nJsk3vbL4Fp2fFnW4j/3b82npI5txe2V9Qqsam9pqVeVzzknb2qMr2ZzGf2MqpNhFPusoYKdft5T5LWATldvrbaEULdv2FwCSMTJB6aRwdTZ7VdkRQJtBYky0JXQyLyZWg7pifTnK2vKN0QRhaAlQPvK2td7ssBaa8BMfchX1apwPvqxbjE0uzj9fW3Si7C8eU+RhKyDz3EpB9nXQIkLfxOkex0beI5303A4AqCt/eDcXHeXsYmBaYYrk9fcsogleXr9AEbZh9K+LOo3oVMq27e/az9cLL6F7eHh47BBcVUI3xvwZgE8AmLXW3sVtfQD+GsA+AOcAfMpau7RRH1fC4jQRWzOTUgN1/4EDAIRkBIB33UMpVc9NvNX2bwAYGCTSq79PUrIWiiSRnl06GbYd3U3H+7P0C1tryvSbQ3SskdZSMEs8TRXpmCECrztFEn1vTqS+/ftIclzMSx8XL5wDANQrIgk6KcU0KIKtMC/udPk0jSk73BW2pZJ0jcuTUtABSSE3ASCn3DMtuyEuLikXuCTNry+jIlY5/0WJ16GqJONUmojVuCJKGywJVhrinldcpc+ROM05EVfrZ51EpcgmJ12pQhGlCrui8novqfw7aXZXtSqkzjDBm1ZumQf3UORneg/n9iiJxNaVp7lksuLK2ArztKyX0BvlZXdS2Ba622nSl6fSZMk7qkhoNz8dOeh8/CKBkL55vo+OwNaFK1JM2keM7DGnvTY0UbqGmGzLucJpbjOqyEOGifF6VUhcJ2FGeb11XiRRG9YXb2jLK7sGkTaXQ9eXWlPuV2s2rtBGndde8YiIh2NSZD+7Ch8+dESdx89XN7no7nn/r0v/rOG02vwtmfBW4whn7LTWtnHT51RKnkHndh2oNXL92ZZLw620GdZebERpfJGbI6H/OYCPrmn7HIAnrLWHATzB//bw8PDw2EJcVUK31j5ljNm3pvmTAD7An78I4HsAfvN6BnDm9CkAQKByZJSWSUI6cq/Yp7PsmtiIkNSya++B8NjQEBU8SCgXQsPSzWpLXNVWas4dkqWAsgSwpCLfBQD07Hk4bDs7RYUfurtFWv7AT1FgwtIyaQCFouSJeOL5pwEAaeWGNZgnySiTUfkqOJfMXUnSMvp6B+VYnF3EIFLc8ipJkaO9Q2Hb5Hy7QrRvjxzriZDUfHpCtJ4IuzIO7hKptlmn82pcFq67VzSc4irnROkXd9IY252DqkywL8X5VFgEa5TlPja57F4kKtpDg+22cSVVunwdqxyYE0vIGJ02U1dSJ5vcJU8JgGwXjcNUSatrFCRviyunls/JXOqhbVvcMmWQtN5G2csj7rNy9WuyFN5g3kCn6mhZF3ijXNvYRhqDSMZJlspcVsGqCpgbYc2zqgLPVphDiutSbhFX5GF95tKAtaq6zmnE9yqttDqXwyXCwU9W8SlhzhWlgYRlH68QDGPaPgdtf9tO0KZrl3qG3UqNLoUXuHw6Mpdcxmlrwp8ZltAbvJaxuAS2uTJzsbYCOXSNtmI1YeEd+rd6pMOsqlbZywOzPkOsCxKTtVRr5Wz4moMwG/MRm8X12tCHrbVTAMB/h65yvoeHh4fHjxk/dlLUGPO4Mea4Mea4zmHu4eHh4fHO4nrdFmeMMSPW2iljzAiA2Y1OtNZ+AcAXAGB0dHSdfnb+/FkAwGBSVKZ5VnM+fEjMKrE4qVYTZ8iFsBVR5pUEmTWCqCbw6FKlVVFvbYVU+QYTpnZFXBQHilQRPp/aJX2sMMHWEDItbagu5cVZmnKXMoNE8nReX48QUH1MmpYKYvoZH9sPAFjgPioqH8zsPJmbkllJDXt+ilK85ooqv0tWzAcA8MH3SeRbaZFIxRMXZblnOdo0UCFvsRwTchwVGlUqX5mr1i8WZf2yGRpTd1ZMIglWoV0647ziauvsqtlUhLAju2IQos/wvcwxIW2yciwa5tORNUpVyVS2oApF1Bo0thirzSvKLGTK1EeqJOp+LidmtLVI8TbSvJnT0COKJE6Gf536rPIRsdqf7hGzRoajmzMJMW25SOOQVFMmnf4BOu/8pRmZC2hNs8oFM8kpYVeLvMd0SueYcx3VeYBcUQ1NRrILZmgnUep/6GmqUvC6fDBX4PEibfUyO/QbXlu5Q7JLZdS58GmikD/Wq2KWskmOHk2ojceEcZiONqIjbd06q7mH9UvNuvPC3DpqGI06rXPEyDvLmdtayoTiCo24iFwdfes+W7t+bDeC65XQvwHgMf78GICv3/BIPDw8PDxuCJtxW/wrEAE6YIy5COB3APwugK8YYz4L4AKAX7jeAfT0EonVn5PAot0H9wGQ7HEAkOWgnRxLiTX1K+1+sRNJlVmRg1+6c1J2LJYg1zZbIsJsbk7laOEq8YXTIg21+qgyfGlVpMPnT3wfAHDHPfcDAMbH9oTH4jyH5qoUgCixpHvhkpBv9RZJZXfeRmObnZGcJIUykaw6qOrcWSJnL5yX837mU5+BRmVRXB+/8xTlvVlekd/rOw+QC1cirnKR8PSrDQ7OWBWSuMxVGy7Ni2Zx9ACtX29etJiAXTpj7M6XUa6ESXa3qwTSR4WrxJumSFQBu8/FODhKS5PxHGk7xYpoCtMssUaj0kesQcfTLF73DUhWyRYHRM0vyvwCXVlgDVrB+iAiR0JqMhJMNEaDCvcpxO3YIBHd9x07GraNjlAAWSIt2lWZ5+6uFVEBQAXO+lgoybh7i7Qew/3yvGT4OZlgd89aVTQil7KmLdsiu/qllLbhcpbEXUCU+oJzpdRjc0R2vbkxkWeUdB2WnmvzcmQpX93vKs81yvc9amS9TcyRl7LOE6dOAAAO7JfnMNpD2lelRJp4XAcupZggjYuG4yT6qBZvXaCXWw9Foq4U6Fm2WSXR87unpcbWaq4pNamKhrjPQaCC6Fo3TopuxsvlFzc49MEbvrqHh4eHxzsGHynq4eHhsUOw5blcHnnvTwIAkkrbcIRgW4LQkMSgE7UPrUunWVZ+5Q1Wcx78+KfCtsVZyvfRFaeD5YXL4bFWmdSzmXnxX05wfpnsLsm1MnKIrrHvIOWEWC6I6efOw0SY9imf88UpytfyxiuvhW3pFJmPnnmOCjTcc4/42y+8/joAoFgS//ZDB28DABw8cDs2wle+8nfh5/NzZOKoQ/zbB7poTPFBIXgDS2NPZ0lFtUr1DTiCsrAkZp7iEqm/l2Pia57mYgL9XfS3qkwj6SSpyD19EvEbDYuXqGICzCjVnfoZE8K7yf7nFy/LvapWaS/kkspvmEnTeIz3Tlr6qHE91VRU5dNZkXmtRZmjK9vqWTLhbuLSb1gns+F8zmU8eY5OHR0R81T/AJlaFgqyfhNnTvOc6F6Mj4upqLhK9zGVknGMjdI9zaeFNLdsO+viyMVSS/pfZgK5rtjLWIJ9t7Pin+3SKzsSGlaeL5fGV5cPTbKJIdrcmMjTEazuk9EEqKvF2lAFZMJ8NNRvaVFMlS5FbbEkZPip1+m5yrTkeakOkql06k16vroicn5mLzkP2PSwtDEZPzgibUUm8utsxurdJSadi5PnAAA93WLa6huke1uqyDhcyuKZedq7iwsSAT00QCmzsyp62fr0uR4eHh4eDlsuoa+skBSS7hKiKMvSTU+X/Hq5SDpX3k0XSKix29/CgmQ5NOwOthCoCE3O11LknAq5XUIsRViqGVH5TLq76fhCRRVcGHdJ+YlU+dbfPxEe+/hHPgIAGMpJsYmRg5Tf5e3DUsRiZJw+v/IW5ZmZvCzS4tE776Y1UJkjc3n6bJWr1dOvvAGNN6flWI2jNXvyqhQZk5yFJSFsHRk1OkZrpHPhOMIsp6TDEydeBQAslUXqHBwiqcaypJZX1dRNmoithHInTXSTZFmpyjq7snRVRypakdhWVykidmVF7m06Qn1kdT4bvkatSVJTuSlzPztNJOrzz/8wbBsboDWN738Ia7HMJLiW0ONxltpV/hNw5KQNSMILVF6TicskFbaefz1sO3SQ3FXzymUyyoRmnTWymiIZ4+zm2Dco+3R8D43bKMm4MEtaZRcXdtCq7WXOClpRUnCS70ug5Llldqtt8B6LqKoTiRRnDNVl7/h4IriSq53MxbkENlURkCbPtaVyAzVZGzAcsbqoCtSsLJGEW1fRtOkEjePMSVlne5KejWaZ9kIypkj21TcBAEeOyNxzWdrPjTnxvi7zc3L2macAAHd+9OPhsWqRxvHSiRfDtmiC1maeI8gBwN2i6Rnaf0tLcuwAa91d3TpnE7veRsXt+VrhJXQPDw+PHQL/Qvfw8PDYIdhyk8v4OBGJDVVMoMk+nzoyzaXazHUoGOFqExaWlFrO5prpOfErz/B3s0ma9pJKWNTfTWaeXX1CjLgkWicnToRtC1xvcs8eimK97y5J49tiAvHM9KmwbWmOCJFEU9StRpHG9OF/8SEAQF2l/tnFJgxdX+Dpp38EAPjOt/8hbBu/XYhUACjV5be5GdA8IzGVFIu1ZVXiEqsFUvMjcYpEzWSE8Etx5N2uAVEJX50h9ffFN6Xgx6EimUQux8lXfvewkEd3cDyBjYtaXmUzT0OZJ2Ic/bgwR+aBckP5rddpjItFuVf3HaV+W0r1XuYoSWem0BGxl+dojD969YyMew/18cB+rMPcNPn0O3MgAGQ4/kEXUenuoyjhAtdAbdSFJLvAKXLnloQIc2Tv/j0y91EmQXuYgI+peq1xNscMRCQaOc/kc12ZAXu6aUwZTmqWVimgaxx9WK2LqcNFXddU2//50pcAAOcuMPmsyEuXWtcoM6ck7JK2owfEFEfXkbm4yMiXX/1B2FZYJsIzqMk4+mM03oEc9VtLCbH/5slzAICScn7YvZfMlzNMVALArmEyeZY5AdvU5HR4bICLoozfcV/YNnGBTJ9R5QeezNB5Rx+hZH1n3v5ReOwCm3d0YGeN7/3J07LHqhWOVq9w1KkR0885vA0AiCfVg86HH3jPx3C98BK6h4eHxw7BlkvogxxRV1WRn9//PkVjzqlSbgcO7AMA9HNk6fKiknyYrBlTZenqTIgcVtGENSYl6hxJd/rtt8JjsyzZnUxJBNllvsa8Std5x7veBQAoc/+DKr3sS09+CwCwMHUubOvK0vVXlUS1xOXxjv4kSejzKkfL6XOU26a0Iu5Ply+x25Naj3G0Y0Cl4LUtIjd7e8Utbdc4rU02JVL72SppErPzRAb19gsp2p2j9cjlpY/hEbrqK08KubgwRVLYex6+BwBQXJVxVxzBrMqOVditK5oSt7sWE3HLfH9miyJRvXnqAgAgkxSp88OPUArj6XnR0lbKJOWN9g/xtUXaKlVJCrr9trvDtmRCrr8Whon3ckEI5ARLp927RFo+tJfWo1AkiTGtNJw4l3VPqfVucJTsW2+9Ldc6SuddukRawdysuOnFWUtqNESyO3KECjnUa6LRzi+QZuPcBMfHxPVx917SmM5yoRUaE+3xqCI+Y5zzZXGO1r5YFCnYRcd2KScFp0lo8nQtSktyzRpHYl88LWS+4XKL2aT0m3E5jBq0F5IZ0ZgHOb3z3rHdYVuUXTDPLr4ctuXHSVNwXrAuhw4grpJTk+KIUJqmZ27Xbrm3F14kB4BHP/YzNM+kPAfPHyeJfmz//WFbknNmx9W1MpzYKLlCz3ckIsdMxDl5KM26fuORol5C9/Dw8Ngh8C90Dw8Pjx2CLTe5lDki68LkhbDtMKuVM9Oiek+cJPNIU/nTOqQ4lahOpgRWSXuroq422Ye4wYmk6krFMRwhV1ZpOBNsTnn3ox8I2x5+mCJbY2zKaaoc7/lBSl6V7RI/0iyrYmVVcafOy/7UM8/Rv5Wm1WSCKJ+WPnq6aRwP3v9A2CYGHMKRQ5KErMTpgfPd0keOSd9qScjZni5SASfO0zqfnZQEX8NDvTweuUaU07SOdItZaoljALq7yBQ23Cek4SpXLIqpVMCuDmhdqeorNTLD9PaReWKxJSr4+TNkbnrvA2IuKbEJLK1SyLpo0xaT5dWWbO2BXSM8Abm38/OyDmtx7Bit86FDYj6KRF38gYy7sOR8tzmlrUpM5nz6BwfFjNXHEbPaNOOqbY2NkfmmWJRxuf2sydkuJkX1XncRqi5Na0Kp/Qk2SVSV00EmQ/tCr19fSMpyKmUVO+DaXF8AkOLI0lhs41fIxTPPhp9dhPDRfWJCGef1eO51qaz17WfIJJPmPL6HD4vJarVEz+258/KuGB4l81KgkomdnaDo25E99B7Jd8uebLCpdHVZKn5FWa59400ZR32K9t3yLDkwnHpDzGR9OdrPvSlZ07kleoaSCVXvlCN3WwkmPlVJq7k5MmmVVFyDLj97vfASuoeHh8cOwZZL6M88+wwA4OQpcfU7epRSjh45eChsO8SkqCPVdFVylwOhqaLsHImVV3UQk0ysxZIkmWRUxJ6T8Kw6v8VkXkRJmK+BmsccAAAgAElEQVS8SrkjmjWu1ahyl5Q5XawqoYk65/vI9AvhkozTOBJpkkRzKqdGjImtjMoZ8vwzJOlMXhB3wcFDt0Hj7Plz4ecRJu60dBhw2JrmsNJddI0Iz3NWEc3jI9THSlkkGSdA5NIi6aayJGVlOcotqlKmXrrExFNKJO6+XpL8GyWROuc4x8VoL0mO/XWRPg8fIlLP3X8AKDHxmsyIBpLivDHlMvWbSMma7h1n98JFITkXFmhessMEDz38AQDtdTWDsD6kzG+a0x5PTJD0VlBFTJpOU1CpbFc5PXEuL+OemyOi27kappR0HY9z/dzhkbDNXV2f199PhHiC3Rx12ukoS9Dve98HwjZ3npPKaZx1/i6tm8oWG+ZyaSq3PldfVBeFWAubvif83ODo7GyX7Ot0jvqYXjwZti3MECn87nvILXi4T+5jrUbRt3MqerSfa8nuUjlz8pzLZfzgYQDA0quvhMcqy6xVrQjpu3c/re+Jf/iWzC9G61tlbf7cSdEKhvK0h+8akfs4FaW1XK4oApZTBk+wa6+uDVsu0v6YW5A8M3Fz4/K1l9A9PDw8dgg2U+BiN4C/ALALVLvqC9baPzTG9AH4awD7AJwD8Clr7dJG/WyE554nO3JGScHPPkdS+8y0uBa96957AQD97Fqn3RwdIlI/C012q5qNaCmLJI06B4IEq/Lr2GAJNqGkwxxXEo+ozH2WJX8X9JTPSSbBYZYScmltfzTug+qDxuby7keNHCtz4FJD0QFOAykWRcIUJ0XCD58VV8J3P/AIAODO26WEX5U1G63FFEtcWIJzoiwou3LAlelLqrhCkvmF0VGxg+b7aCQpXvuK4hQWCuyuNacCvrhwwUpZpFl3jWqCxtaVlPU4dgdlmMxlRCuwHLRTVvb9GEu4dQ4oysVEks6l6Jq5vNiMdWbEtYiydmR1gYuw+oHssV276PPsDNlPzynXwBUuLqJt4vPztA66CEg2S2NzEndWPQdOo3T2ampLcpvMxUncTqNoqXE7TTapir+4QhWppPQxOEBazKGDHNSn9mSj6cqlrV8z3VYqXG47Vm8p6Z0Dycoqh89yk9wtjx2WYLTxYXKv7c1Rv65wCgAcHSHJ+Oie94dtztaf6BEtMNvPfBEH273vp34qPFZbpT2ZUzzXwdv3AQD23y5BggnW3oscVDiUU5oCP0ItNRfnlqxL0A3xOKZX+b7UZS7pKLWlI7Kvk3Gl2l8nNiOhNwH8hrX2KICHAPyKMeYOAJ8D8IS19jCAJ/jfHh4eHh5bhKu+0K21U9baF/nzCoATAMYAfBLAF/m0LwL4uR/XID08PDw8ro5rIkWNMfsA3AfgWQDD1topgF76xpihK3x1Q/y7X/tVAMB3vvOdsO3MaXI7euuURHJOc4rQ3ZxD5ciRw+Gx4WEyAbh8GwCQZJesWlTUT0fqOJUzEhGGMGAWKBrIb1yaVdJ4Yr266tzXNI9hw9oNomqKiUbVV2Q1knkiWKXeNg1dq6IKEjS4TmG5Km50azE6KGaQvbuJ5GkpV66BLup3oShq3ySniZ3kXBdlFRH78ksUedeTF+I4nyfi5/BtQsjm2WSQ5jVqquIKGc5lUS5I9OPiHK1bvSYmlzybtJIciZiArHcy4opiqhqyDVKbF5albbbArqjsnqdJ864I9Te+W8jFA3NiblsLy8SuLjjgUps06jK/GruidnHq58EBWT+XOjYa1aQhE+/Kfa3Jpj4XeVypSP9uj2lXSZfS15leAHEddHtTuxKG+1TtSed+mFLRuq72bcD7zqhxO7dFXVPUdaetMKU1S+qiZek8mqeubF/n/RlRuWci/Eqq8nNbqcqaDvTS/usZGVHnE9LKJIcGDWSRycj7jsl+dSZbvUbDXNiiNy/ujZbnOrFKe7c7Ls9eme2hvYNi+Hz4GOV8mVEpvE+fIfJ2/50UXa5S1mBpkcbWNyWu2UvLYlK9XmyaFDXG5AD8LYBft9Zu7MS7/nuPG2OOG2OOl8vlq3/Bw8PDw+O6sCkJ3RgTB73Mv2St/Ro3zxhjRlg6HwEw2+m71tovAPgCAIyOjq5jVZykMaJ+dec40bz+FV1gl7rJSSJejj//fHhsaJCyqx06LFL7nn37AABjnPEPAAYGpPAEAAQ6XRpDE5QuWEFLJiLdc3EDVVHcWCeNq/Ndm9U5L9qXoaWkcZfjQZO+CSb1unrXUqGC3eOS3eXAPvqcTYmU1WDt5OKsiFET5ykz3MnTlMsiq/KlBByAdOSIEKv9IAI4rohmJ8kUGzTurqxI9EMDJDXNKHfBef6cSQl5lIm5oCCukg5x9YsYkowagYg3JRZ1TpwS6eaFVykg5W52eb3toOQzSbCk2NsnYztwkNao1FwfqJZMcZ4NlRHSuenFFHGV4OIK+QxJgIcPixNkJMpz0f5/XFbNBZwAIkE7t0i91zqRkE5CN9q9lu+tk8K1RO/6a6jMik7jNGqf1mqO4FtP/kaclqnV0VDi35hcbjZk7i1OrKL4bpQi9DyOHZZn/152uU3zPGMdRM6kctl0OVS6stJWYceCY6xpDStJuspaaL0uz1cfBx5pIjjKWklulPLGTMyJDFvmLKX3feSRsG3XXrr36QF5XjL9tBdP8/NlMnJfBhNDPH4Zd9/yxlrjZnFVCd3QLvlTACestb+vDn0DwGP8+TEAX7/h0Xh4eHh4XDc2I6E/AuCXAbxmjHEpzf4TgN8F8BVjzGcBXADwCz+eIXp4eHh4bAZXfaFba3+AtiqFbfjgjQ7ARc/t3Ss1N8+fp5wKOl/FOKd/rXNulslLEi12nv1/L09JLpL4M6RW7jskKtA9x461Xau3V1LfusIFEZ1Qgc0fVvnCGkNtzbAyvPKPZr/btoAvR4B10ExdDcqWOhjnIgjlppAwNkptuw8cXd8JY+9eMSdVWK3MJLXvNtftVCaGyUvsP81r6UgnAGjm6Jon3p4I21pcQb48IL73sSxdN4iSOePofolOzXeTOllpCdm0vERkUColfsAZ9g9vVEjlLBYlnKHKpFhxWUjUuiEy74zKPfPS61R0IMU+5Ef2im9zV57GUVP5dGA2NhU0OH1uoGq4OvNHW2Sku7ch8a1VdmqL6e3E5yVU4YdozO0xF8m7Ph1tS0VoushPHcVq4Ah6JuzbyEsegCIo3TVWS2J2cERt0vmmm/Xj0P12ZEXXwMVU8CwAADFlBqxwgZKhAdl3+3aTKSxgU2ZKmbiSbK/p6xXTWY7zqpw/JUVozp54CYCs90DXe8Njk6c5ZfSMFL4ZGCCTzPAuMdM5c1GKnRQGuuVdcbFM415QlOBolProzcgem16m99ilSfqbHhC/kYBr/OrlSytz5fXCR4p6eHh47BBseS4X52qoc1M4yblYFJKgxpFYzh3t4AGR6Fc4h8bcvLjH1djF77WXXwrb3niFcjo4cnRoSMiSffuoFtnYbul3hDPg5VWRByfJuwg//QsbWFcFXjXyx8DoiFXOjuf+rbQCy+SfJr2WV0gUsFGJ7AOUtAmgWBbJe9iV51MZB0OJzoq24darzGu7GJFcLqVVktAWFWkzs0Dnj6p8HIOcVTDBuVzmJ1TRhDS1GZXfpcRFIxoHRIJe5fW1rAGUyiI5Ts3TvS2XZL5FJvBeffN02NZgV7KFArm3dnfLuF1WwWJFol4T7JZXal9GAIrAVvfAwq2fnOfuvXNvDNRBR6jr+xhxUj5E4nbHnTKqFQA5psj7psusqNxxOZIzJC87SOhayo/x2JoNGW+DU342A+eyqTUAQquDMG6uQIrWqjpbJa1pUZGRMfbbXSnIvmvWKdraRcm2rbdbI2UwaPHcT7wuhTMuTJwDAMTZPXPvfvHXWOXMlbWKaA/O+66uXVJdZlZ3/1KiZU4u0D6KQDTgzBJJ/E//4Jmw7Vs/pM8Rjn49OCJ9VJscRR1XUeU+26KHh4eHh4N/oXt4eHjsEGy5yeWFF14AILVFAWD/fjJ/aD/csxNEzllXKFCpnAMDFGHY1S2Rb8vs71xV4VnlEqlWi+znPq+Sf731BqlsRpEwOS7aMKTG5sw1Ljp1QBEdAwPD6+biTEkxRZ6mWJ1McltLqZB1JmPSShVz5KkqLYnomjtXrYpKfXmG5r6iKs6zhQjl4nzYdt8d5D89Nkgmrmdfej08ZtkkEVPmktUyqdDT8+JXnrxEkXE5JvpSOqqRibV8WtoynODr0gUhNFcbfC0uDpBMqwjGiovglcm7Ag57R4SA/cy//CgA4MXjNIfnX5Qakw1nAlMpiSPRjfVbFxmp99+VElOt/as/dzJ/mA7mt07XsR3iJDql8V3fl4prMOuv48w1ts18ZNvO0yYX11/QVBtwzbFOaAZiz4pw0qq8Ku5x9x3ku51UJPH8LBH1Lpo1p5Ji9XBBDO3b79znx5RTRZQPJzmp2cCgPKPODz2mIm3jfP1l7QfOa1Sen143zXSe9t1rTz0Vtj37TTLlvHxWYiMW2Pf/2AiNLVDJylo2xnPRpq0bt7l4Cd3Dw8Njh2DLJfQZ5T7ksI+jPB3xCABDw/QrO32RijzUVW00RzLFlFQ7PEy/ohby6++kkGKBor4KKrVpoUC/zrogwTS7VF5UaVGdBOOqnufzqmJ5mkiPnm5p6+khqUJHqbqyYz391Natzu/i/tJd4iaVZmlifkbyROhq8gBgm0I21XhtevJCDDbZZU+7Xz36fiqn98STTwAAnn5eCgF0d5MLVUO5OVZYRWgqqWKVSwIWOL1sQkmkjuTJVhV5lKbvFhUp1WANJMUpXqMR8Qcrcb6ZR+4R99ODeyl6b6Ei47iL82WsrlJf589KHqBMnooT9ClJLZyXKoThINKqkp6MaTum4Y51ivLUbo4Rd572fAylcMPX1P1fXSvYLNryC7k+1LWcK2PcSfT2ytKi+6bBei3CIZlQ+4/Xu1wWotRpfOmskP3FlSUeG++TJdk7hXnqb2VSnvMefgyGVJTzwAFyZnARsTGVN2hPD71TIkpTWGJnibOnpchOitfrJOeTmpkWYrXBe/6Hr4tLb4VT4yqeGTlOk1zlVNE1ZS0IXU2VVH6llM6bhZfQPTw8PHYI/Avdw8PDY4dgy00uLjnX3Jz4kDuVUCfsQotVthUi+mZnRAWy7HedUESHq2QSy4jvp/v96uVkXj0DuqYimSxKK6LuL7NJxqU2BYASm2Gc73FJVT0qLpFJZG5apzsllVGrvC6iL8EkXZcyLXVzCs9kXkwjR+57iMbbLRGagCrXA2DyglQsf3CQ+shlhTR0EXcDCVFh85wa9/I0E6VG1q+ba4VOzUkVGmNcilxRm1stTlvLqmNVpxNmDXKhpJNt0d+4SrwW4/UocerYpjIFpDhytqdL7qMjmBcuytimp4jgvv0IE+rKBLVUoM/VuhDCtQqp9uO334W1CDqYNa5kVrmS+UMfc37q2nc7sqYmZ6drXo2cXTf+YGMzSFu/WN9v+FW9X69kUroCkddqKR98Tny1UhBC/XtPUMrsXE5MLjE23cVdyt627mmvpRbkPg65GrJqX2fY9GoD+qtTGNfY17yqCN553p+XVDRykc2Ak6vUVqipClhcE7ZbVZeybNJMqADbHk761dXlzHodTGhqr0s1sfVRupuFl9A9PDw8dgi2XEJ30JGirg6jTp87NkbRVssLRKLqtBKrRfoVrSjCJcE1F0sr8qvr0m5Gg/Z0owAQYZKzq0fyKXT3EkFZq4o07KLKykzqlVflms0aSYI1FXHWYs1CC1YuRa8rcBFURaKq2lpb/wCQZKmlV0mpxUq7hH7+orhg7h4l6b63T9wnXVX5WFzmvMC1MO8+fBAA8MbBc+ExlzNH5/RwWo9piBYj0iCTeh3JNOWuxVJKBCIhuShC54kaU7Uuu7tIizp/SfK7vHaWtLkx5bboyrjWK3S/kypP6yrniJlVxNZykebQSUJHB8lbR1rKaddKYvH5Ko+MK4AiLo3r16+ThL5Zqf1KZG6HkYXQUn4nF0z3ObDr1yXso6k0ET4/kxESenQXPV+a30+xC2GRNeXuLnEYiLi6rpzSFgBsnSTjulq2k+eIBH/yez9aN6ZwREqJCdg1t6V8gV3a6yS/RzIqSDvKvSwqt+AUp0QeHFbOD8MjPAfSdvUtaLJm2x5V7klRDw8PDw/GlkvoYaGIDrZD50oIAAmu4n47FzCYnRZ3xyKfd2FS7Mgldj+MpETCXFmh81yVdJ0oPyzfpSTSCEuWGRXo4j4723GgAl6chK6LUzj7e7UmEnXVVQiv0XfLqyrrXYPGduyBu8O2LhYP3nrztbBtdP8oNHI50SxchsS4qg5w7B7KNBmF2MnfPkPn3X14HwDgEx+UrHTf/CeSblyif41O96pTQs5OBRfCUmTKOOps5nX+26Oyzg30kcQzvSouX022j3/wkOSDefMU5XV5/cQJ7l/mXiiSFlVU6xxPieS3Edql2vXZ8Tqfd8UeAQBBW6bHa5PQHbQEfSWbuZaq156v+3X5YOouV0ywXgPoFCRlgytI6HU55jSRlnYFjdBeNKps4eoqSb3lCo1naEi0sARzJ82GjKPBonZFPV+X56mPTC9pA5mUiNezs6Td6Xozbj1MUwKhoiH3BR6PaKVlvmZvj9Y2aJ/uGpeMjelMF8/TvWa16yj1oYvsdAoku1Z4Cd3Dw8Njh8C/0D08PDx2CK5qcjHGpAA8BSDJ53/VWvs7xpj9AL4MoA/AiwB+2Vq7vkDjVRCqOx1UTU2KFjlisNWk83aPHgmPzUaJ3GsojeXSFEWUrhQkGjTHaVrrnGOhrNS0dI4jyKLC0LSY8GlzzVozTBOV38QEm0aS2g2xz6VAFRW2zqaWKqeJNerY3XeTaWRkTIifl16iSLa6IqDWmlzGxkTVe/GVZwEA80sSWeqqyT/6/ofDtgKTOgsrpNbu3yORlNkMETnJpHLjXFnguShV2vF8rqEt/WsHMwyf0Gh1MGe48yNy3+ug+6HNFIfGSb11dWYB4AcvUn3UizPk0haLquIenAI1EUuoto23qnMrbHeZYxODNpfYtj+bgDOrqBZzdZOLRsdCG2uOXQ2dznOmkIDJX31KWEdXXVJMjhubXGo1MXG51L6637OnyUTao+rLRticVmXzx3cmzkp/TICurEga5MIyfdYR3lWO2gyjswN5zuP8fDfUXatyv01NfLMbYrLJ+YUSsidzHM29e3xX2DY4QI4ILh04IKbGBq9tU7lKOstTu9tip1w514bNSOg1AI9aa48BuBfAR40xDwH4PQB/YK09DGAJwGdveDQeHh4eHteNzZSgswDcT2Kc/7MAHgXwr7j9iwD+M4A/vtYBtEKJQH6pXLCR/sVqMFlTrnGwT1PEhV0j9Es5JBwnpucp6CSvfjErJSLHDLsnuVwPgLg+uhJwNA52c4wJg+IkTLtONNUfO5EbIjHGOWdJ/yC5NR06eDg8tszBDc8cl8IcNqDf3e5+HVjUjnMXpSTfIpPEl5S7ZSpL0tC7fuLBsO3goTtptCx1zs1KoE4iQmNMqwIXs0vtrpI8OvX/zoEmzQ4l1LQk4YoUOBI6oTJeuqyTU4vicrjSTZLX5KzKwJggqWnvKBFRTcV6xZOkmZXKItlFrqBMuvwkEZUV0YRumYosdIFCHcrZdQq8Eal6c6ToZl0UNyvdrx+HwBGOsdCFVLscmnVzCVyiHiuvEJUyhQ4Fcn6VJd64urczc/SFly5IcYoYr2WUJeLpadHCYhEeo8rZ5DSLptL4rKHz5pboXRG0hOx0eytQRUacW3JcVZjIsrbd3UV/08q3Mp93AUPiRpzlwjfan1qKhfBY1fvMdpClYx3KD14rNmVDN8ZEuUD0LIB/BHAawLK1YfmbiwDGNvju48aY48aY486H28PDw8PjncemXujW2pa19l4A4wAeBNCpWnFHA5619gvW2vuttfdr+5KHh4eHxzuLa/JDt9YuG2O+B+AhAD3GmBhL6eMALl/xyxsNIObyLSizBquE2uQSYZ/qKqvBtZQiEwxJ/kPDkv/kwfsfAACcP3shbFvkivN19o8tlkRjiDJZ4ggSAGhxboVUWvxY44k1xQ/aVPBwBusnqmp5OlW0ycSPjjgrrbL/vFLdXOL9llIT9+BgW/cnT58MP7ucEIMDQpx299HnmXmZXz/76cYsXfPsBYk2LTABWlAFMfQcHGScG+cdaTPDsBraUGaYvi4ikZ2/vVFE2z6OvAtiYvopNeg+ZiKi8ubydF/qPPdWXUiyLMcOVFVbxLanH9ZwvtVt5o3Q73p9BGUndPIN7+QTvtax/Woml834w3fq42pjdN+QMa5PHawRC8/bWCbUS+DiGaLKXJLn2ImZOSHv62ya6cSyR6NkJos0lGnCWX7a7os7P7puhAFHYifi0prLcr6gnDgzZDj0OM1pdrNpdYxzuGgTSYwJ90AR+s2WS4nMe0e9z+qcgjdQhXqiEWeWvX7B96oSujFm0BjTw5/TAD4E4ASA7wL4eT7tMQBfv+5ReHh4eHjcMDYjoY8A+KIxJgr6AfiKtfabxpg3AXzZGPNfALwE4E+vZwCd3BbDPBH6J54/NjJMjIxLBNlihA5Wp0WavJddlmLqF7N7kSS7wFL/Zy8IkXjxEkWeavckF224WlI5Yup8fUfuKbfFyBWiJmG11MSZCZmMWVkVss4JpzqKdXGZCM1LMyJB3/uTQm4CwMgucTl06Wve/cD90scczfWpHzwZtnUnSWIYYVJ5YVmk98UlUrh0Rj4nwSyqCN6AXQJdhfquvETPVTi6TmdWvO0ARXe2VOGMPUN0rxK8lk++IoUDymDpvUcI4TOnaC4DgxJRmu2ma6xOkZtbsSC5X1aqtN7dPZJdM5taHwG7Fp32pMZmsyyuRaecKJsnNN350nYl198rjUNrxc45ISSoO2kT7SPh/jeObrRKo3RXqlfkvtc5X1FMRSOHBLortKHI2bA8nlWl7Xh/tmXv5P7irA0kVIm7NJel0xK6c1NNq2eulwthdHNuJ71Wbkh6bDV2gW5CxhYwORtV2mU47g7r24Fbv2ZsxsvlVQD3dWg/A7Kne3h4eHjcAvCRoh4eHh47BFuenOtK/rcaDbZFLDHptbQqBTG6+8kH+WCfqPtOHdq1S6K58j1Emp6fJNPF+O79Mo4YERFnFbkY58T4ukaoS2tb42hTreol2b/cqcVt6GBycT60aZUov1Fnv9pACDyn1F6p5uDPfexj4eez5ylyNq1SlZY4cnJuQaqST7NPf7KL0uwuFoUkTqSIcLxz/51hW2GRom9toJKgsarp6qnuHd8rx7j4QGlFSN8PPXgPAOCRh46FbT98jvyQJy9T/7cdkPtS5Ui9S1Nihjl/gRJxdfWoSL1dVEHecmriZkPMZMUKrVssJuv3nne/BxuhExnZKaHVlSBpa9eTdbrNWRU7+aiLiWF9GtpO6JykyxWukD6cubDTM+ccEaI6EVcn88AmTESFgjyjzrc/plLUVqtkaoxEZGxxrhAh90Dm5Eya2vyRZHNKUsWUJNjUIg4XMqZoxCXcUwUxXO1PlbI6wt9JMSmql9a03HXi6nxao1ZDzEwtNu22OPlYEKh15HdEs6GcJZS//PXCS+geHh4eOwTm2pP0Xz9GR0ft448/ftOu5+Hh4bET8PnPf/4Fa+39VzvPS+geHh4eOwT+he7h4eGxQ+Bf6B4eHh47BP6F7uHh4bFDcFNJUWPMHIASgPmrnXuLYwDbew7bffzA9p/Ddh8/sP3nsJ3Gv9daO3i1k27qCx0AjDHHN8PW3srY7nPY7uMHtv8ctvv4ge0/h+0+/k7wJhcPDw+PHQL/Qvfw8PDYIdiKF/oXtuCa7zS2+xy2+/iB7T+H7T5+YPvPYbuPfx1uug3dw8PDw+PHA29y8fDw8NghuKkvdGPMR40xbxtjJowxn7uZ174eGGN2G2O+a4w5YYx5wxjza9zeZ4z5R2PMKf7be7W+thJc5PslY8w3+d/7jTHP8vj/2hizcT22WwDGmB5jzFeNMW/xvXjPNrwH/4H30OvGmL8yxqRu5ftgjPkzY8ysMeZ11dZxzQ3hf/Bz/aox5l1bN3LBBnP4r7yPXjXG/F9XjY2P/RbP4W1jzEe2ZtQ3hpv2QueKR38E4GMA7gDwi8aYO27W9a8TTQC/Ya09Cqqj+is85s8BeMJaexjAE/zvWxm/Biob6PB7AP6Ax78E4LNbMqrN4w8B/IO19nYAx0Bz2Tb3wBgzBuDfA7jfWnsXqIDPp3Fr34c/B/DRNW0brfnHABzm/x4H8Mc3aYxXw59j/Rz+EcBd1tp7AJwE8FsAwM/1pwHcyd/5n/zO2la4mRL6gwAmrLVnrLV1AF8G8MmbeP1rhrV2ylr7In9eAb1IxkDj/iKf9kUAP7c1I7w6jDHjAD4O4E/43wbAowC+yqfc6uPvAvA+cIlDa23dWruMbXQPGDEAaWNMDFQFeAq38H2w1j4FYHFN80Zr/kkAf2EJz4AKyI/cnJFujE5zsNZ+mwvbA8AzoAL3AM3hy9bamrX2LIAJbMOKbDfzhT4GYFL9+yK3bQsYY/aBSvE9C2DYWjsF0EsfwNDG39xy/HcA/xFSJ6MfwLLa1Lf6fTgAYA7A/2az0Z8YY7LYRvfAWnsJwH8DcAH0Ii8AeAHb6z4AG6/5dn22/y2A/8eft+sc2nAzX+idSpxsCxcbY0wOwN8C+HVrbXGrx7NZGGM+AWDWWvuCbu5w6q18H2IA3gXgj62194FSR9yy5pVOYFvzJwHsBzAKIAsyU6zFrXwfroTttqdgjPltkEn1S66pw2m39Bw64Wa+0C8C2K3+PQ7g8k28/nXBGBMHvcy/ZK39GjfPOJWS/85u1fiugkcA/Kwx5hzIxPUoSGLvYdUfuPXvw0UAF621z/K/vwp6wW+XewAAHwJw1lo7Z6lk/dcAPIztdR+Ajdd8Wz3bxpjHAHwCwGes+G1vqzlshJv5Qn8ewGFm9hMgAuIbN/H61wy2N/8pgBPW2moZthUAAAFfSURBVN9Xh74B4DH+/BiAr9/ssW0G1trfstaOW2v3gdb7n6y1nwHwXQA/z6fdsuMHAGvtNIBJY8xt3PRBAG9im9wDxgUADxljMryn3By2zX1gbLTm3wDwr9nb5SEABWeaudVgjPkogN8E8LPW2rI69A0AnzbGJI0x+0EE73NbMcYbgrX2pv0H4KdBzPJpAL99M699neN9L0jtehXAy/zfT4Ps0E8AOMV/+7Z6rJuYywcAfJM/HwBt1gkAfwMgudXju8rY7wVwnO/D3wHo3W73AMDnAbwF4HUAfwkgeSvfBwB/BbL3N0DS62c3WnOQueKP+Ll+DeTNc6vOYQJkK3fP8/9S5/82z+FtAB/b6vFfz38+UtTDw8Njh8BHinp4eHjsEPgXuoeHh8cOgX+he3h4eOwQ+Be6h4eHxw6Bf6F7eHh47BD4F7qHh4fHDoF/oXt4eHjsEPgXuoeHh8cOwf8Hjr7i8zYtPj0AAAAASUVORK5CYII=\n", 132 | "text/plain": [ 133 | "" 134 | ] 135 | }, 136 | "metadata": {}, 137 | "output_type": "display_data" 138 | } 139 | ], 140 | "source": [ 141 | "def imshow(img):\n", 142 | " img = img / 2 + 0.5 # unnormalize\n", 143 | " npimg = img.numpy()\n", 144 | " plt.imshow(np.transpose(npimg, (1, 2, 0)))\n", 145 | "\n", 146 | "\n", 147 | "# get some random training images\n", 148 | "dataiter = iter(trainloader)\n", 149 | "images, labels = dataiter.next()\n", 150 | "\n", 151 | "# show images\n", 152 | "imshow(torchvision.utils.make_grid(images))\n", 153 | "# print labels\n", 154 | "print(' '.join('%10s' % classes[labels[j]] for j in range(4)))" 155 | ] 156 | }, 157 | { 158 | "cell_type": "markdown", 159 | "metadata": {}, 160 | "source": [ 161 | "### Convolutional net architecture\n", 162 | "![image](images/tLKYz.png)" 163 | ] 164 | }, 165 | { 166 | "cell_type": "markdown", 167 | "metadata": {}, 168 | "source": [ 169 | "### Define a Convolutional Neural Network" 170 | ] 171 | }, 172 | { 173 | "cell_type": "code", 174 | "execution_count": null, 175 | "metadata": { 176 | "collapsed": true 177 | }, 178 | "outputs": [], 179 | "source": [ 180 | "class Net(nn.Module):\n", 181 | " def __init__(self):\n", 182 | " super(Net, self).__init__()\n", 183 | " self.conv1 = nn.Conv2d(3, 6, 5)\n", 184 | " self.pool = nn.MaxPool2d(2, 2)\n", 185 | " self.conv2 = nn.Conv2d(6, 16, 5)\n", 186 | " self.fc1 = nn.Linear(16 * 5 * 5, 120)\n", 187 | " self.fc2 = nn.Linear(120, 84)\n", 188 | " self.fc3 = nn.Linear(84, 10)\n", 189 | "\n", 190 | " def forward(self, x):\n", 191 | " x = self.pool(F.relu(self.conv1(x)))\n", 192 | " x = self.pool(F.relu(self.conv2(x)))\n", 193 | " x = x.view(-1, 16 * 5 * 5)\n", 194 | " x = F.relu(self.fc1(x))\n", 195 | " x = F.relu(self.fc2(x))\n", 196 | " x = self.fc3(x)\n", 197 | " return x\n", 198 | "\n", 199 | "\n", 200 | "net = Net().to(device)" 201 | ] 202 | }, 203 | { 204 | "cell_type": "markdown", 205 | "metadata": {}, 206 | "source": [ 207 | "### Define a loss function and an optimizer" 208 | ] 209 | }, 210 | { 211 | "cell_type": "code", 212 | "execution_count": null, 213 | "metadata": { 214 | "collapsed": true 215 | }, 216 | "outputs": [], 217 | "source": [ 218 | "criterion = nn.CrossEntropyLoss()\n", 219 | "optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9)\n", 220 | "\n", 221 | "# Decay LR by a factor of 0.1 on every epoch\n", 222 | "exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.1)" 223 | ] 224 | }, 225 | { 226 | "cell_type": "markdown", 227 | "metadata": {}, 228 | "source": [ 229 | "### Train the network\n", 230 | "\n", 231 | "Now, we will be training the network defined above on CIFAR-10 dataset.\n", 232 | "\n", 233 | "We will train the network for num_epoch times (defined above)\n", 234 | "\n", 235 | "- We fetch a batch of images and labels from dataloader\n", 236 | "- We feed it to CNN network for forward pass\n", 237 | "- Based on the output of forward pass, we calculate loss/error\n", 238 | "- Then we calculate gradients of loss w.r.t. the parameters of the network\n", 239 | "- Finally, we update the parameters based on the gradients using Gradient Descent algorithm" 240 | ] 241 | }, 242 | { 243 | "cell_type": "code", 244 | "execution_count": null, 245 | "metadata": { 246 | "collapsed": true 247 | }, 248 | "outputs": [], 249 | "source": [ 250 | "for epoch in range(num_epoch): # loop over the dataset multiple times\n", 251 | "\n", 252 | " running_loss = 0.0\n", 253 | " exp_lr_scheduler.step()\n", 254 | " for i, data in enumerate(trainloader, 0):\n", 255 | " # get the inputs\n", 256 | " inputs, labels = data\n", 257 | " inputs, labels = inputs.to(device), labels.to(device)\n", 258 | "\n", 259 | " # zero the parameter gradients\n", 260 | " optimizer.zero_grad()\n", 261 | "\n", 262 | " # forward + backward + optimize\n", 263 | " outputs = net(inputs)\n", 264 | " loss = criterion(outputs, labels)\n", 265 | " loss.backward()\n", 266 | " optimizer.step()\n", 267 | "\n", 268 | " # print statistics\n", 269 | " running_loss += loss.item()\n", 270 | " if i % 2000 == 1999: # print every 2000 mini-batches\n", 271 | " print('[%d, %5d] loss: %.3f' %\n", 272 | " (epoch + 1, i + 1, running_loss / 2000))\n", 273 | " running_loss = 0.0\n", 274 | "\n", 275 | "print('Finished Training')" 276 | ] 277 | }, 278 | { 279 | "cell_type": "markdown", 280 | "metadata": {}, 281 | "source": [ 282 | "### Test the network on the test datset\n", 283 | "\n", 284 | "We will check this by predicting the class label that the neural network outputs, and checking it against the ground-truth. If the prediction is correct, we add the sample to the list of correct predictions." 285 | ] 286 | }, 287 | { 288 | "cell_type": "code", 289 | "execution_count": null, 290 | "metadata": { 291 | "collapsed": true 292 | }, 293 | "outputs": [], 294 | "source": [ 295 | "# Quantitative Analysis\n", 296 | "correct = 0\n", 297 | "total = 0\n", 298 | "with torch.no_grad():\n", 299 | " for data in testloader:\n", 300 | " images, labels = data\n", 301 | " images, labels = images.to(device), labels.to(device)\n", 302 | " outputs = net(images)\n", 303 | " _, predicted = torch.max(outputs.data, 1)\n", 304 | " total += labels.size(0)\n", 305 | " correct += (predicted == labels).sum().item()\n", 306 | "\n", 307 | "print('Accuracy of the network on the 10000 test images: %d %%' % (\n", 308 | " 100 * correct / total))" 309 | ] 310 | }, 311 | { 312 | "cell_type": "code", 313 | "execution_count": null, 314 | "metadata": { 315 | "collapsed": true 316 | }, 317 | "outputs": [], 318 | "source": [ 319 | "# Qualitative Analysis\n", 320 | "dataiter = iter(testloader)\n", 321 | "images, labels = dataiter.next()\n", 322 | "images, labels = images.to(device), labels.to(device)\n", 323 | "\n", 324 | "# print images\n", 325 | "imshow(torchvision.utils.make_grid(images.cpu()))\n", 326 | "print('GroundTruth: ', ' '.join('%4s' % classes[labels[j]] for j in range(4)))\n", 327 | "\n", 328 | "outputs = net(images)\n", 329 | "_, predicted = torch.max(outputs, 1)\n", 330 | "\n", 331 | "print('Predicted: ', ' '.join('%5s' % classes[predicted[j]]\n", 332 | " for j in range(4)))" 333 | ] 334 | } 335 | ], 336 | "metadata": { 337 | "kernelspec": { 338 | "display_name": "Python 3", 339 | "language": "python", 340 | "name": "python3" 341 | }, 342 | "language_info": { 343 | "codemirror_mode": { 344 | "name": "ipython", 345 | "version": 3 346 | }, 347 | "file_extension": ".py", 348 | "mimetype": "text/x-python", 349 | "name": "python", 350 | "nbconvert_exporter": "python", 351 | "pygments_lexer": "ipython3", 352 | "version": "3.6.5" 353 | } 354 | }, 355 | "nbformat": 4, 356 | "nbformat_minor": 2 357 | } 358 | -------------------------------------------------------------------------------- /1-cnns/3-Weight_Initialization.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## Training CNN on CIFAR10 Dataset [1]\n", 8 | "\n", 9 | "We begin with building a CNN architecture for image classification task on CIFAR10 dataset. \n", 10 | "\n", 11 | "In this first part of the tutorial, we will understand how to arrange the different architectural components of CNN network, defining the appropriate loss, training the network using backpropagation and finally testing it on the test data.To make data loading simple, we would use the torchvision package created as part of PyTorch which has data loaders for standard datasets such as ImageNet, CIFAR10, MNIST.\n", 12 | "![CIFAR10](images/cifar10.png)" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": 11, 18 | "metadata": {}, 19 | "outputs": [ 20 | { 21 | "name": "stdout", 22 | "output_type": "stream", 23 | "text": [ 24 | "torch version: 0.4.0\n" 25 | ] 26 | } 27 | ], 28 | "source": [ 29 | "import torch\n", 30 | "import torch.nn as nn\n", 31 | "import torch.nn.functional as F\n", 32 | "import torch.utils.data as Data\n", 33 | "import torchvision\n", 34 | "import torchvision.transforms as transforms\n", 35 | "import torch.nn.init as weight_init\n", 36 | "\n", 37 | "import matplotlib.pyplot as plt\n", 38 | "import numpy as np\n", 39 | "%matplotlib inline\n", 40 | "device = torch.device(\"cuda\" )\n", 41 | "print(\"torch version: %s\"%torch.__version__)" 42 | ] 43 | }, 44 | { 45 | "cell_type": "code", 46 | "execution_count": 7, 47 | "metadata": {}, 48 | "outputs": [], 49 | "source": [ 50 | "DATA_ROOT = '/tmp/data/lab1'\n", 51 | "\n", 52 | "# Hyper Parameters\n", 53 | "num_epoch = 10 # train the training data n times, to save time, we just train 1 epoch\n", 54 | "DOWNLOAD_CIFAR10 = True # set to False if you have downloaded" 55 | ] 56 | }, 57 | { 58 | "cell_type": "markdown", 59 | "metadata": {}, 60 | "source": [ 61 | "#### Dataloader\n", 62 | "\n", 63 | "For efficient training, we need to make sure that our code is efficient enough to send data from RAM to GPU and vice-versa. For some standard datasets like MNIST, CIFAR-10 etc., we already have some well structured dataloaders. In this tutorial, we will be using the CIFAR-10 dataloader.\n", 64 | "\n", 65 | "For more you can visit the following links:\n", 66 | "\n", 67 | "- Existing dataloaders - _\n", 68 | "- How to write dataloaders for your custom dataset - _" 69 | ] 70 | }, 71 | { 72 | "cell_type": "code", 73 | "execution_count": 8, 74 | "metadata": {}, 75 | "outputs": [ 76 | { 77 | "name": "stdout", 78 | "output_type": "stream", 79 | "text": [ 80 | "Files already downloaded and verified\n", 81 | "Files already downloaded and verified\n" 82 | ] 83 | } 84 | ], 85 | "source": [ 86 | "# We can perform different data augmentation\n", 87 | "# techniques to increase the size of the dataset and make your model more robust\n", 88 | "transform = transforms.Compose(\n", 89 | " [transforms.ToTensor(),\n", 90 | " transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n", 91 | "\n", 92 | "# NOTE: PLEASE DON'T CHANGE batch_size and num_workers here. We have limited resources.\n", 93 | "trainset = torchvision.datasets.CIFAR10(root=DATA_ROOT, train=True,\n", 94 | " download=True, transform=transform)\n", 95 | "trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,\n", 96 | " shuffle=True, num_workers=2)\n", 97 | "\n", 98 | "testset = torchvision.datasets.CIFAR10(root=DATA_ROOT, train=False,\n", 99 | " download=True, transform=transform)\n", 100 | "testloader = torch.utils.data.DataLoader(testset, batch_size=4,\n", 101 | " shuffle=False, num_workers=2)\n", 102 | "\n", 103 | "# classes in the CIFAR-10 dataset\n", 104 | "classes = ('plane', 'car', 'bird', 'cat',\n", 105 | " 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')" 106 | ] 107 | }, 108 | { 109 | "cell_type": "markdown", 110 | "metadata": {}, 111 | "source": [ 112 | "Let us show some of the training images, for fun." 113 | ] 114 | }, 115 | { 116 | "cell_type": "code", 117 | "execution_count": 9, 118 | "metadata": {}, 119 | "outputs": [ 120 | { 121 | "name": "stdout", 122 | "output_type": "stream", 123 | "text": [ 124 | " dog plane frog horse\n" 125 | ] 126 | }, 127 | { 128 | "data": { 129 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAB6CAYAAACvHqiXAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJztfWmQXNd13nf79TY9+wZgZrAMQAAkQZAgKYqrFou0TGqxKFsyo8W2UlEVnSqnZLtcieX4h6OqpMqupJxYVY5ilu2YShzJiqxIsi1ro6idoghuAAkSBAgMgAFmMINZe6bX9/rmxzn3ndPTPYMBQGEw4/tVkdO47/V7d3uvzznfWYy1Fh4eHh4e6x+Jte6Ah4eHh8cbA/9C9/Dw8Ngg8C90Dw8Pjw0C/0L38PDw2CDwL3QPDw+PDQL/Qvfw8PDYIPAvdA8PD48Ngit6oRtjHjLGHDXGHDfGfPKN6pSHh4eHx6XDXG5gkTEmAPAagHcCGAXwDIAPW2uPvHHd8/Dw8PBYLZJX8N07ARy31p4AAGPM5wE8DGDZF3oul7NdXV1XcEsPDw+Pf34YGxu7YK3tv9h5V/JCHwJwRv17FMBdK32hq6sLjz766BXc0sPDw+OfHz71qU+dWs15P3NS1BjzqDHmoDHmYKFQ+FnfzsPDw+OfLa7khX4WwDb1763cVgdr7WPW2justXfkcrkruJ2Hh4eHx0q4khf6MwD2GGN2GmPSAD4E4KtvTLc8PDw8PC4Vl21Dt9aGxph/A+AbAAIAf2WtfflSr/Nn//jnAIDAmrgtm21xd1H3qwEAEgk6L92alYsE1GaCWtyUZO+dRJCJ2xKpNACgu42I2VyqJT42s7gAAJjLz8RtlWK0tBuoGWoLK/S3Gsk9Df881iol6VrOXUPGZ0vcX25KtsixRMJdKyX3jKoAgFK+Grf9xrv+NTROvP6aukbAF5HzA0tjN9qriW8bRdxHI/2I0Oj95DyiAnd9AAFfJAyr3G+RERI8IUamCMbwdXkeeYT0f3dLdX13T+2NZXgvGNVHd4tw6bUAJFluCfSc1ui8nXtvbBjnJz7xCQBAtSrz16wfQUD9TPCYm3mMufvozwk9R4nlZSp3vr5GPKakPLqG161ZH913V+p3s+/qe7o2fb67p+7Hpz/96bo+zmAw/lwJ6ZnIdIhTxPatw3SvajlumxgfBQDk5y4AAErFYnysVKZrhJB1yWRaAQA9fUNx29D2nQCAvr5NAIBUUt4B5UoIAJhdyMdti4v0uTQvbQuz9B4Iy4s0XjSugYbb/4WSjGW+QP0thTRXPZsG4mN9W3YAANK5trgt4Hu0nf/eivdaCVdCisJa+zUAX7uSa3h4eHh4vDG4ohf6GwHLEm6kulKpUFsgghpSbWluY2koUNIFS2BpK1J7RwtJ37uHd8dtN15H0tj2ge0AgFyL2PQPHTsMAPjmj78dt83UFriPIt2EtQoAIJmiPpZLofQxR+dlO9T4kjSIakUk0iBLkqJJUVuL/EgjmWRpK1JScEDXzbQsv1ym1h5/bm+jDlglyYCvVwtDLIVx91QSb5XPq2nJjseSVAsTsBQeJWksRkn51kl5SrgJkiy1q8aaDfn+rLk0kdBrNS2hW3eDuC1kLcPUIr6W7I/A0LxlM7I/JiYa6J4YLbx3UimR6JtJvw7NpOylUjONoVHKc+ct/au/u9L3lvvu0ntear+bSejNoCX0pQgjkVZrbn2gtEB+9quViupvVHd/9706qP5Ebr2bzHOzeUOTvWPdPWyo2ip8uru/nO/mLVDPQXwJdV2T4Lnh58Yqa0HNNlmXxuW7ZPjQfw8PD48NAv9C9/Dw8NggWHOTSxCQWpswot4aVmVMWlSakDWTqMoqjdLEnIZ+/dCeuO0X738QAHD3rbfFbTsGiTjJMpGSSso933zLHQCAzrbOuO3vv0NOO+MTE3GbM7+kUvQ31yZ6UiZLbSYpqlsEUt8zSjWNoiQfo7GksvK72tJST5gCQNVSP1Ntyga1BMz3AgCS3Lcwkn44M0UipVVp+iuqo9w0nUryOcrUEZsRlPrJY0gmGo/VnJoayHWd+ciqBawxoZQM6J6adI3PUdpzLXSkqCaTHQHLxKNaW3eenqNEcnmS61LNFCtdQ2Ml08hqvrdc20rmINfWzETT7Lxm12jW76bmjCWIIjGluKsGgTwHzUxKS80l9b1x/2ocS90+5eNurRLKNJJI0L5TWxKBI+prsifdKidiEl+N1x1U85FgE3AdccwmnBTv56RyBFjaRwAI3gDx2kvoHh4eHhsEay6hwxGOShq37lcrqX5Z+cewxu5/USi/dkP95B71wfd/NG57880HAACVc4fjttMzxwAA3QMUD9XZszk+1pEjSfqRh94ft6VY0v7slz8vfStQf9M5InyyVqYwzRJgMim/k5UKSYqZjPzCF8t0vFwikqSzQ67R1sGkb0LEyXzBEUriDrkUNSVdhI7QVPJNgj9r2cyRoLFbmpbi4g9NpEmj5IClkp2Sttw36yXCRkkwlqSctNVMSq37bOr+0nfpGkl2TdSEFYKQ+yFzZGuXxkA1la6Nu5at+6uPNSUvVb/dGqws0Wu5q3H+lkrVzdwLmxOmy5OdF5PoV9Iy4utbIeUNE9O6b+7+euwhP9dR0z3QqL26uUkorc4k6vtmm7pgKq3RrZUmSmOy1ZH9WgNwWk+dPy71XzfVaN+1BKwVlGblngFZC5JKY0kkLi9RooaX0D08PDw2CPwL3cPDw2ODYO1NLqyihFUhUBKsDie0A3OGumqY2WpPtcaH3vXzHwAABJ2SWubx//MF+jD2TNy2pYfU8VtvvwUAcOOtt8fHsq1b6TZpue7b7/kFAMDzr7wUt7188hAAoIXdvpM1pTKx7pbIibkkYJUt0yq/ncV58ocOS3QsqzIKZ7vJlDM9L+aVbJrukU6voObqyEjjCE19gru/9qddEu2n1fgm6rhTkU0z04wzBWgzj4t6Vd12ZpW0Ii0t98n5JetrOH9dU0dAcX9r2uRS3x9t1qgxOWzroljl/g1wqq8i3g3PX7UsRLNb71Ta7U053zbxhXbjrNXNH0ffssVAr1kzH3xn7tI7IY7NiNe27iJ0jbrN4NZRk8/LR5su7Y/GagleN/V6X7kIaKvI+9jEITeQ82MziTYpNZKRgfP/Nks2hepHvXmRfe91P9hcEpPKdfd0i6WvwbEzVjZNFz+vC1PkVFFVJHGazcmJlJKpo8YYkUuFl9A9PDw8NgjWXEK3jghDo3Sof83dL2rERMt1uw7Ex7btpjTsTz8n+UwO/fAJAEBHUnJBzBW6AQC1LEUJHh+TX8y911NE6d69N8Vt7b1EXDz84AfjtvDrlONhungOABAoKTEmSbISEdbeQ/dPKwmir62HrhVx7ogWIY8WWSzMJJWLU5LmoVxZ3lXMKAIqaEaAOlJHc4VOsrMuulLNNxO7tUhLZc6dT9/YieFO6msk6xRHjFR8XeUiFie1oT/lUOVQifukSCmWwDRZuFR41HtHXOZkRpIrEFC1eJwiMb10mDSzkydOxG27du8CAPRvoroDCXV9lz8knZLo1GwrRSZHdWI4u67G5Kgi65gYT6ZF4wtZyqsnDV10p/uXnivT2Kb0mHjM7pmLNaLlo0gv1hbD6mvwmumxM3kfhRJRajm61F3VKim4GjnCVNbFTU2iidTuJPpIrbXlZ9Qq7cRt8YpytHDajpPGa2osNdbSCgvzcVuec7/MTI3FbT3czy05itzOFyVXTNZQKnEbSIR3GDaJir1EeAndw8PDY4PAv9A9PDw8NgjW3OTiSC+d2tQlqMq2SfKsRJrJD1YrBwYlKjRfofPm5sXP07BqOqeSYqUXSAXKH50EANhAVKaDR04DAN73gFRVuvU+MtEcOPC2uK1cmgYAfPm7n6V/R4syGON8oDWJRR0OlYrc30ssaCZLBOxiMBIfK50lFb0vK6l9wwypoTMTy5sJdASeS57VNLFRE//iZkSYM8No//Yg2egXHccDGBf5Jrdybr1GzYczZ1Rr2kd5SUKoWqM5pm7kjrCq6QRtzszUaE5wppD66EosiwC0jlFN9sKTT/49t4kJrxqdBwAcepHGEqq9lkrSnnRrDAAd3bTukbJS5Ofn6J5xamJ5DnI5Usd7envitm07Keait2eL9NfwXnGhALaRXLNWR0tyJLbRk+D6vnySsJUiTJtBm71cArhAp2jm5a4q5+0a7yfLaWgzah1b2L4Sqf3niMZaRdbF+X+7+QiVnTEydA2bqKg2vpZyGKi45Fkcop5QPuoz5ynF7/iZkbhtlk0tczMSVd46SGs0tJniXX585IX4WPdmikjfcte747YLJe+H7uHh4eHBuKiEboz5KwDvBTBhrd3PbT0A/hbAMIARAI9Ya2eWu8ZKcBJ6Uv8St5N0k1DJDSpV+uVub6VftrAmOWdn5khKzmSEjCyx9BapDLIV/rU1LBHUFPm2UKBf+H984ifqupQ+d99bJXr0jjt/EQAwMv46AOBHr3wzPpbLkXQdQcgPl3ckqyLZXKrXWpXGXFFSS3cnjaEjI+ObXKR+zmIByyFQc9U8F4k73phmtJnkFfHEpdOyRfr6SWNJpaRtfHwKAFAuVbgfMk4njeuI1bBJWl7n4ucKbejuNIugTDBRldBSp3PnY6kzrCNF2b1wlfqoc/s8MyY10Ls6HUEp8zw5SQTp0CaSmhMZOTY7R6T55Lys2ThrWNk2kdorFZJE56bp8dFpllNMqIaKmO7pI6lvcGBX3HbTjW8CAOzadQMAIJ1Ra8DTp7UZYcZVrp9Yw2os1rFSPpiVSFFbR8669VFELFzUt6xxirWzzT1EJA72SaH7wa1UIGLLdikUcfYCzdvkrGhTljWyJI+vjojlWyVCkdDLedLsF2ZUcZsSXS9cKPD58q6YYcm8OisEaKpCz3xgheBNsFYyxvsomhmPj5178Uka5+6b47ZkUsZ6uViNhP7XAB5a0vZJAE9Ya/cAeIL/7eHh4eGxhriozGKt/b4xZnhJ88MAfo4/Pw7guwB+73I6kM2RtJJMimtWpUi/hpmcdM+5c23bvp9bxMZ88tirAIBUKNJQlctaVVQBihpL6CGXFqsoR/8Kl6Y6qVwDSyFV1Cv33RC33XjnOwAA99/3KwCAhcp0fOz4JOWNMWpaDUtt7eKdhOoi/VKHJRpDxyaxkZYNl9myEljE9RbQ26siV5TpHqgPPmlWMsy5F4bKdu20hyKX+dL5LZy2sX2HlPZKZVijUCX2Nm/uBQCcP0+2w3JZuaA1tc2ztKfu5aRICQ7SUnmTjIexvVydFxcMcOfoaBKatzqtoMl1Hc6PkY30u088Ebe9+vIRAMBtd9wSt+3YugMAMHue3GADtZ+qXLoso3IU9W/ZxN2R/VGu0vFWzvWjc8wUC2ybVxJ6qUBS5GtHn1X9HQEAHD++E0B9Wb1cjjTa1pbuuK2tjTUtlStJeIx690X6vPw6rjb7ZOya2qQAio6Xu2H3MABgaytrqjnRZnq3kWTeP7QpbhvY3EfXCoRvy5dpHeZZaywUlAswb/9iWh7Icy/Q81g4KxqZ20iVeeLZSnPynJfmiYOrFefitlqVnolyVd43r43TefPJeb6kzOmFY8cBAO0/lj226y4KZLx4Hsvlcbk29M3WWqdvjAPYvNLJHh4eHh4/e1wxKWrpp3pZcccY86gx5qAx5mChUFjuNA8PDw+PK8Tlui2eN8YMWGvHjDEDACaWO9Fa+xiAxwBgcHCw4cXvilkktCsS5zgpRPIDsHcfFap46889AgB45ifPx8eOHvohAODGvZLLBUxsmYSYGBzRFxapG1Xl1lcokqqUEUtOnE9l8pxyRZog9emmLdcDAN7zll+Jj33uG6R6zxbFHTLI0hhae0WRWqiQOaONCbxiUfpYnmM3vawmV8gk0tcjxTcaTS5y/ZiYVLNdYTOTVQqdIyjn2XVOX2NwkMxMu67bGbfNMKmzoMi0NJvKNm0i1Xd0VNTWmORUhG2zvjk3uziqsS5ast61EtDpUHXEJeoQ13MEYjW/jr5bwUPs6FHK3fPyy5LD59mDtN8WC2JueutbKEL51MgIAODADdvjY+kUqfTzRZnT0ZNEIF+YFfU9ze6pzjTY2ioFaYsF2gMpFSnat4WOJ9WcJjg969wc9ePQIbn+5CTtxWpFzn/gfnKV23WduP62xPV1G+d2pdqmK7kt1uXfWZIiGQBqbBYdHuiL29528zAAoDBCUd89fWIaMR1khpm6IGRkgqNp21QK6s183uZOes5KeXmPnHjpKABg4dS5uC03R6TokDIfzS+Q+bbI0Z2LitAs5cnsFamavWU22XZtkndQ2wCR5YVpWvfZSTEJR0Xq7/mX5T22bYDMm0H75XuTX66E/lUAH+PPHwPwlcvugYeHh4fHG4LVuC1+DkSA9hljRgH8IYA/AvAFY8zHAZwC8MjldsByIIFRrlbZViJC9u6UvCrveJCKVyS6rwMApLsvxMe6+0mC1sRIXy/96hfn5bzQEaUsYZZV5rxqlUke5XZXKpAYPH9Bfp2jBfo1PzlHfdyVFdJwext9LpSEHOvpoWCSQk2kphpIqujoJGmlIKejlqVf/URaCNCWDrpXLiPLVVX8DQAkVM4LJzRVdU4Ud1wFk7Rx4NbsLGsKJXG3LJdYmtCBRRyUkU6KGlNY4Fw1TLB1tAqJlWcpJ1DkWyrVSFBOzJIE4xSmXKu4bMauqFpCd8EniiyMw4maFIVo5loXrlDcYWKa9sz0gpBe3QPkUnaWCVAAmJ0jSS3JxN0LR1+X8ztp3Ws1yeUyv0j9Vp6MqM7SfupoJUm0UhYpeH6e9p8m1Gen6PyWFrludw+RnBETcqXi+fhYB583UZCxfP8HXwMAFEr3xm37biT3uVyO5l6Ts0J8XryohYYOa7Mu8E1nRSzR3tneJUFSWzgvzugMSdC1lCKJF2i+bVXU0662fu6vPMshP39OkytPyjvg0BP/BAA4e0qe6XQLrV+QF83azNLndEjPREoVl3EaZUXds7WfxrDppjfLmHkus8M09oFQrnHuMEnm0xOn4rbXDhHRfeN9d+FysRovlw8vc+iBy76rh4eHh8cbDh8p6uHh4bFBsOa5XNxPilX5Pm64/lYAwL33StraRDf51o7NkTrVu313fKyzg9RbU56K23IBqXMzI0KIzE6TaabCJg/lMh2rZ9Wi6MMFjvTKT4l6BvYDnpsmVfbsS5Kf4Y4MmYMye8WvfCagtKszVelH6NLhso0h1y6mkQJHDnZ0q3wprhhE6/I1RXVqWGsbI0VdutX2djGX9PaRqj4xyX7UJbnGyCmKgqxU5Z5DQ2RSSiuSrsJ+7Y4Abe8WUq+Lr69zyuTnSYU9e1ZMF+fHieS6MEVmgZ27ZG1376bPOmq4kCeVu1qSBVxaLV5HzlpLe8bURY8uX0xg61aKwty67bq4bZqJs+t2C0mcYELYBXemszL2fInmJalys0xyNKhRJqiA17bkcpcoVn6hQHtxZk729SaOWdARuUlO1VtgE+HYOZnb1nZ6Nno3bY3bToxQ3qJsi9hydvGct7XxGJQJT2rI6tiBi3tL13SsgSugob63yObQbELIZMN++y1DZEopT8rYp3lcxVlxUoj6hwEAm/bL3M8yCVl0Jr8FefbmzpGtsjwvJqiFWfIXn5uVXFDVCjsRpKi/LbLlkWJHizAjZq9tN1E676hNCN5ilVMj95L//HXDEt279waKZzj4jS/Hbbb36kSKenh4eHisA6y5hG751z9l5NcuYUlyOHZeyI/eVmYOA5JodN6WJOfGqCVF0m3bMgwAmBuTohdVJtNqLNzUFRpggkMXdHCZBBdVIvuFSZImg+Mk5Ux/89vxsX2//E4AwI63SQa17x1nAlb9+lc4N4sBjaGnU0jAckiahW0XybgtQVJZVSemWQItkbp8JrqKuCvvllKRi2mOWGxrp/v39UktvO5ucpHMtYjEOMS5NHS0X5H71MbMXV9fb3xsbpakoCMviPvf+DhpO5OTk3Gb0yQ6+Rpakh4/y4VEgsa8LcmgMdJRohp1FCkTzarSRjqzPMF34JY7AABTU0Jku6jNu+68NW6rlknbKHNkYlYF8la4aMPEpEiCnI4IC3m5bksL7YFayNkIVUGMzi6ay3NjInGf5vnYtlUk7tExduPjaTPqWTp3lqTVOdnCyLbSOk9dUH2rOq3OlbPT5f0a5b7YDXGFSNEgofck/dXFLI6NUPRt5W5xn4yq1KfCFI3zyPeejI8lORI3r7KqHi+QBvzwNpHyo0WSyDNVepYmzkpRkomxEQDAzKxo4o4gr3unZKnDM4t0z1Qg+2mojddsu0SQdw4MAwAm51TWx6Xl8VRRjdZBcnu+/Zd+VfrBGhlKp3G58BK6h4eHxwaBf6F7eHh4bBCsucnFRcEtzomJ4cVnXwQA7GsVAqpvmEiEIMF1CFWSKcs+2wmIH3rKkpki2SJkifOBTTCJpFNi1lytRmWGcWp+pKLmZl8gX9GWl8h/1ED07MUB6u+N3ULq3bH9PQCAuZqKoCzS50yNvtuiUtT2czEDK9of2gPq52JVGpeG5lqrvX45HW2tkfibm5UUoSH7xQ4NkSll/35RIZ05xkWTAkBbG5lmEorUQ4bGMMQmgHblNH3sNTJ39fcroojVYe0bPj1NJohshkxnKZWoTcpVaBPA8rUt4+uqU5w5Sn+t2dw4pHk/ve1t74jbBjjq79AhSYoVcbK3fTdR+tp5VdxgrsARmpPKZ5pJr+5e2ZPz7Ms+z2abKWUb6emmvbBz9964LcVT096m0kdz2teRk2RaSKq0wm1cv7akYiMQkNkjSMozV+GYjLi6vY5riCfz0tJGBWoRUkzau5qhADDHZHxVkZznj9Kc/ujrFKuYVeRlkKK+vfzyK3FbcYGud/tr98RtHVtoL06fPQkA+NqXPhcfGz1Hz22o0gkn2Ec+mZI5rTApWqpw4ZFITCm79w0DAGp79sdtF1K0d43yV3fJ7pzZMpWU59wV/whU3Ea1tCT8+zLgJXQPDw+PDYI1l9CNi0RMi4TpqmoHSlouzJKLU44rp+uIRyddGxWJlWBpL9cruRUyZyiPQ8S/lNrzKmTiTP2IIsGSc1CW65afPkjdDenXPPrAB+JjMzuJ6Cgq6W97N0Xg3V58X9z2akiRetMzRO6YrEgy/Z00vkpFfq0ri/xrXlmeyAsVYeo+R5H0I+DcJloyXVgkaXD3nmEAQJsqvBCx9pJQOVGKRa5UrrrRylXLqxz1d1qRnRVOy9vZKTloBgZIG6hWpb/ZLJF4rnJanbLBqMvl0qRtqRtdUpXkc2ep6aiLhFyKGkjq03vh5pv3c5tIv3/+2GcAAGdHKd3uO94ukZdzp44BABZV+uYiu1tuHZBUtjuGhwEAx04QCTg1JW56i5zLxRHUANDRRfujWBaJO8d5WPo309wOqJJ1w8O7uR8yPy9yPpOuLiHBe3tIi4oil/dGp66mMdRlY15CQjdDXUkNLsVolLY7fZ7I3MkzQgIOd5M0aziq26hUw8+wxnf0pORh6eD01GfHj8Zt5Qw9ryeOPAMAKM7LnOZLdCwyqmSjc4gQIRzFIp1XYscInXZ6nknn7laZv0SlcT5chLSzQmjX4iTngUkp3+moqrSoy4SX0D08PDw2CPwL3cPDw2ODYM1NLi6pTq5TyMWOHEUktrWL6lhdJOIs4gi/lKpkYjgS0So13nIEnks2BEiK0CKr9NmM3DNMMFGqCaUMnZ+eEL/h6iITbL/0y3T+PW+Pj41zBOoxpTntZr/iWwd/IW7r5GRAh099BwBQSwthGoL0vpKK2sxz9OjUmCTPWrpwkYrGlGhJXZyTVMGpaRnLbJ4IpyKbvUbOiiq7mZOb7b5uh7oLXXd2RiUaY9NXfpquNaFSDc/PkElnIS/moxKrmBWVhjbJNS5Dl6a1GflWl4qVPocqcZgTTdKc/KuOOOU11T7TwQr+0+672v3ama8WCjL2LFv/Tp6hpFxvhSRV2nMDEZkjKp2wq75Ti2R8hQXaLK2cRrekok3b2+n8lhZZ7fxcno/Jeble2uMBmxG6esWk08JmtKQy63VxZPWdd7xZtZFZxz1CRsl6JuZJZb7jZG8ryITaqBWvWVXsGjPzROb+8OBP47YDt5PZcjFBc/Tlb30rPjZWpmtkMjL2dJbOm1Lrki7QPFQiMrX0bRKi/ugo7dOqqmPq9lGozB+u7m/VkpmxpFPrJmm+2yHviqhM46pU5OFPBc7UQucZFTcRuf2potXNCmbA1cJL6B4eHh4bBGsuoaeZ5AxSQh7ZFEeCGSXqch3L0AVd6RLuzKJZFWEYLBI51xaK21PSkROc7D+joitD/gWuKdKrg/uWtCqd6/tI0k7eeR+dX5OfWE5TghMJia4M2qlPO7MSvbe3n9zhNrdReuDRuR/Ex54//XUAwOKMEDlhwbkrKnfBJYhUhGs1JjRVLUWWl7ZsllSlJ1l6PPRtqmuoeGbs20V5TH7nt38zbutmEi3bIppNMkVz9OJBcjWdOCOpW13+/5KqtRnF5KVIIxG7RkaxhC6Ia4/qoF4nmRs5M5GgubHcFiopGHGhDdVWWz7qFi5aUklMLsL1/Pho3LZ1K9W2zOVoPrKt4jabydBey2WlbaHMleQrso4trCUuzI3zkNQa8yORScl8t7UT8alrt05foOfFEXLz85K7pLWdpPW0ikC9bicRpbfecpsMmecoZHe9qkpRCy6gkc7KM5cw1KdmqYkdjIqMhE3ydWXeK9zf189LetuvfOXvAQDP/ISK1rw8If3o2Un5nLKbh+O2WprW6sSUjK+9jyYu2UttvaFoLL3HeB2nVKpo59ZaVUVi2K0xxfs0DOT6mQ6K4C2r12exQnNeVvvOZf5N8fXV6wYVfg4qkU7Lu8KeXCW8hO7h4eGxQbCaAhfbAHwWVAjaAnjMWvunxpgeAH8LYBjACIBHrLUzy11nOUT8K56wIqGXCuTONHHuJ3FbeoDya2QDTuZfVNn02P6dCuXXvMPQL3GtTdpac+wSyG59STTmNWnvFImqo5NqX7fefEDOu57c16KQfpG1LdgYLhihpM8XQpLUJhPSj80VkkjynN8ll7s+PranjySfQ1NmJjohAAAfVklEQVSSIyZcpACMFuXaufS3XEtKoas8buSsgS0U1LLnegl6ynWRbfHr3/omAOD0KUm275L4t6rAB8dBaPvzkSPkLjY2xhKmzqHCXQpVqYMa55fRNv+IbbMuwCOl5IyIpfG6nCEsjaNOAuQcLiwhJdQxuzToCFpTaIQrvqH76PJ83Hij5HLZuZPs5C5zZEeb1Ep3LmtbByVPyY9PPAUAKM2LHfmee8mOPcAV7V995Xh8rMC5Pcpqsd/M58/Ni4TpMhjOc9uh51+Mj505TbxIf79k8qtFtCf1FIxzxssJzlWUycr8TXNOnj1798Vt7e2ugMfy81iXfydeM1nHZEBz2tYpgWcvv0j5XcbOkYaa6tgk9+wnPifVNSjXyJE2HLXK+KrtFOCX7iANuNI2Eh/rOkP7On9E9nqJ5zml/HE7mINb5H1nWsRFMZuj58aqsTj+J1R7xvE+LlOoVcdcsZ2opjOoXh0JPQTwu9bafQDuBvCbxph9AD4J4Alr7R4AT/C/PTw8PDzWCBd9oVtrx6y1z/HnPIBXAAwBeBjA43za4wDe/7PqpIeHh4fHxXFJpKgxZhjAbQCeBrDZWuvKb4+DTDKXjCpHvNVUlGcqRerZ1MRzcVsXuye1BHSbKK/cqgIiLTdlxGzTv4nMGZUeIRfbz9NwyxGr9koFj0msdqVa9VBOiFSfDC1kU0SC3dhsSvKrJByBorSuqREiCU+clUIYtfOUTvbsGZq+oS3innnjHiJ+9g//Ytw20UbRqSemJA1tg8lF5zphNTHXKu5aO3cO072Vq9/QZlJnP/zIrwAAfvSjH8bHdl9HyfiTinx26UBHz4h744ljpLo6Qlpfv+JquNa5FzbmWnGaa5KPJZUqa9n1UqdwdURpVBcpSveIXcSUyaXIRF+zKvTN4M5LqlDR3l4iwrq6uhvO7+sd4mOyd5wpovedA3Hb9ASZRJ575pm4bZZrV153A833+z8g+UEintNqVe9rWrMRZR5zkbajo8/wNYUUPXyY8p5s3y4mmhybD44dk9TSP37qRwCAjg661vAuqZV7+CW6RiKQvX7LzUyoLs/TI0jqvUN/EypCM8HFP9JpcSJIJOn+IedIau9R5pVWek7Kkaxj2jI52yZkf9hD5lnnQji7ICmd2/fTd3d0SvGS4AzNZXl0RPrLThiW8z4N7JE8Ry3s9lnR5ibef9pMtzTFcF1KZ04jbGqN7sZXglWTosaYNgB/B+C3rbXz+pgler1pDLAx5lFjzEFjzMFCodDsFA8PDw+PNwCrktCNMSnQy/xvrLVf4ubzxpgBa+2YMWYAjQkAAQDW2scAPAYAg4ODDS99wxJvEtoliiXdspBHo6NEkC4WKaigtUV+izpa6Vc9kRVyJZnlYgmRJLLP5Oi6rVwmKqV/zpxkkFZSlisnpSQIV0rL5YIwKSFRK4bOmzwlwSSHnvxHAEC/FXe3M2dGAAAtTLQFijQ8cZKkhfc8+K647c13/xIAoHRYxKFjK5RQcxJmZ5fkAEmxe9y5c5I3Y26OyK4HH3wQAHDvXXfIRVgybkmLVHZ2lDSKF54/HLdVi6ypONdRXeudywpmAi3G0XW1JO+CWBIsEwSqMIfTCvT5liW0tCpIkDD0ucx5d6rKVdJpGcmMZHHUZfSWwklKOi+Hm1OnPdJ5dLyvz+07lW+G3fRaVIGQvUxIv3rk+bjt4HOkfSW4xtmOXVKoocoVMfILIl3P5Wl8m7ZIgQuXbfHOu8mVtrQgc/XCCyR579kjZLjLR/Pii9KPp3/yNADJtXPmtJRdPDtOpO/gVimhdj0TpJmMkOZLEROhEOk0pdwn9+0nZ4NsuzxDE6OUIbGWoec31y+aQrnGOZhCVbiFg8asytBp2QGhXKXzSmVZgxK7MJY75RqtrOHM58WnY/4sfU61kUbUOSgBdhHnN9ISt3Uae0270i4pcKHz3kTOMqHLKPK6XYHv4UW/amgn/yWAV6y1f6IOfRXAx/jzxwB85fK74eHh4eFxpViNhH4fgF8DcNgY4wzB/x7AHwH4gjHm4wBOAXjkZ9NFDw8PD4/V4KIvdGvtD1GflkHjgSvtgCtY0WJEfa5y9KXRKkpIqtXiAqmcnVlRo7oyXIiiIjb6hQXqclXlTHVadlcHnV+OhDQs1OhgiyJXku1MpgQSqRe5xPgcTVgLRcmZPU9mlYNf/7u47cRB8vHu7RCVcC5PFMTgJlLVSwXxUXeq/cSkqH9D3UTIvOOAqKbHfvC/oaHNA85kVVUpVkdOjgAARs8KmdbGPuYBn79JFaIosLnrgoriO3iQSLe8KsLQ6hL7u5TEdelDuU6mLpbAKmmTDLlxIQVNXgZMgOqiF7UmbE3IqUddBGVWVWRPs6lFm1wyK5hcnIrcjKSKNMHLMa0Li2QSyeXknhcuUG6R8XNjcduLL5B5ZWZa5tRxaLdw1Oa27aLa5+dpXwwpX3ZnCtP1L1PsM93VSebC1rTs67ExWu9SSWj0XI7NQaq/e68nn3pnapmcFFPl4FZyCpibUSaJ+Wm+ZyNJ7FBXspdNcsmEPEs3HrgdADBzQUj2CY7rSHRxgZo2uX6x3BghXOU8R1YZGxI12gsh1yctlS6o8+l9UI3k1RdxFGhRPcshm3fae2jslZQibm29iRAAqmzqC9X7xu1jl8fGqtTVtsppfCviDBL7qf8sTS4eHh4eHusDa5/LJa5QL7+6LUw6JFRWjyy7TCWdhFyRX/qeNP2K6yx9c9N03YL6zQqTLEl1ckEFCFFkDUls2Q6JOEuk3HkCx8e43CkLo0IyHvrBNwAAx56WKM9yntwmC3NKguYujbA0kmkRaaurm6SsUkUkwfwiSWq9nVKsYyl09GaaXfZmL4jL5vkq5bapqnwV6SD2JQMAJBVLvHCeJLQXnxN3yzxnT9REqVu2OA+M9uRi0qhmm7hjaXct6/ZAmv8KmRYEjoDSGgj91QU8nKSWYyk8m1V9dG6R6hqpFXKQONQV1eDPgSqc4Qo/FIs0VxemZC9McvGGlxSB/DxrOBNjIul+5NeIhrr7LiqOkVblCNtynDVTFRmxbjeqTekiEctclOHAbRLZ/MAvELn+5JPfkWtwRPOe60X6LXM+HZeB8+Mf/434WA9nYnzp8I/jtrkpIv5rm5b3VrYqH0yNc79Y5bYYcUWJ2XmZjwV2Me3oIM2vVBVNwflIaJfTKu/FhMq7UyvT/lmYo/0/NSNaablEezhRVcUmWLsLC2pPcubUXA9p6VXtNsv7TrscLhaoc04DALSGwsVllHIX8TijqnYOuHJ4Cd3Dw8Njg8C/0D08PDw2CNbc5BJwqJlWb1uzTFiplLCFRVJNWkGEz8y8qDanzpMppbdVSJ6AEz6FKmKwZzNFh5kMkUyFqhCgKSY+E0lV9KLqCBcxGdScqhSR6nbisCTnP/ZTSkNbzgsJE4aNdQIdWeLMNirXPjJZOv/5w5JgyY3lwH6JIlwJIavPOl9+jVVGq3LkljmV6dQUqbwd7eJT/NLzhwAAY6fPxm1pF4WpM9O65FnuPto31yXM0jYrvr9VCqYzI7jv6gRfzq9cX9fVC9WpWJPcN+f3rRNDOR/5Wijn18JmtCwfYzJUk6KyZnXJfQEIQTlyStb9pcMvA5C5BYD2djJdHLjl7rjtg4/8i7pr6DHF86fJNG5LqZS6rmCGi1JMqejehx4ik8ucIrIPHyYz0FNPHYzb8uzf/pGP/DoA4N573xof+9H3vgcA6O+XRFnz83S9Ykknl6rH5h5JpRwThMp0UePkcWOvz8qXLD1zN91EppyWTjm/VHJ1SQWtrdTWnVVFYvL0PkhVxvmY9KOaINNIMhRSvMDphstWxlLluIB2TpbXrpL8WSZdKyXZ2B3ZYt0xAGhjE2/G0B6w6kEPKkQqZxJq/uIH6/KNL15C9/Dw8NggWHMJ3XAWeGNF4igtcPpSlcPiwO1UFOLN99wPAJiZEnewsTGSJlXRegx2Uw6ILYOSg+Gmmyn16bMnSbp48lUpW+WIp7CqfqVLZXcwbpsZJxer86cpD8b4mRPxsfI8SWi1SElZTsqrc7Wj60UsORrl1ldicuXkyMm4rVKkX/q+3uVdxJxUDgBIZxv67dw3I1X0olQlyeTZZylnzmtHXo2PTZzj9MMVkSqiwLkQKumQu15lKVJrM7YJGerGqt1JnYTp+EbtGuhcMJNKInUEsElkGs5rJkk718dAFS+xzTNV0LWWRvgBTQs52JjMpb7t2CHuheUCja+jXQKo733rOwEAtx6QiNz2DspDFNr6XDT6+vre1jb22x1339XntLeTRvvBD34wbrv55psB1JN027YOAwA2byKt9bvflbw+F7hs4f79O+O2iF2LUxlxpV2KHTvEjdildNZ9C9gV8MKQjPn1TpJwb7mVJPTubrXuNZKSdeERY4r894i0MeG5uY3u2ZdT0ZguTXdCJPTJSX42jGgxVb7H3u20Bzb1y7GIXahrkeyP/duY9LVy3USKvpNOk7Zdl253s5sP9dyyhH5hXNyHLxVeQvfw8PDYIPAvdA8PD48NgjU3ucxPk1rW1y3qc4nNB7mspM58xzt/FQDwwFveRMeUz/TUDKm1kTJdZJKkziVU7ciOFKk0WSYr4nqVEFUsUiRmlaMOF/Nimjn2DBGf02de43uqZE1MtIXKVOTUfU2w1ThO0pkJdBrYUpFUyLIimyK+rlmBLNFpO51GulgVVTNfqvA15LxkitTD8XHyUZ9Sv+/JOKGQ3CMRuUo+ilxcUhE+oUhXR2TWWQmcG7Vaq2yObWVxbVhVl5FNLUGgzR/0Odkkd6uNqw01krNGbfd6cvPiaBY1GidB66Coxu4uSdM6uIUiPmtqXQIm3nU6XFc9a6XanNqc1uy8lb7rUsi2tUld3PvuoyRe2t8/5ERWIyNELv70mafjY2960y0AgIKKNh3aSsnBsipF81J0ZWU+yolG5wDw87hzh/iyT+8js043R1Zn04E6vaPhEs6sZ3RaXuvMH40J41zdTpOWOZ1JuuhR6WMXpxEe2kTXbVNmG2sbUzQH/GxqCdmCU/DW6B1XZ8Ljr6oliOMlLkhetEuGl9A9PDw8NgjWXELfuYMKOmiXuQWWiFNp+eWemqaIsaefIsmhp13l9mBCM50T0jCbpF/H9pSSDDitZppdhbJKQs8v0K9oYUFcqGanyd1p9Ii4Jk6PHgMARC66TFWaiNgtqaak/DTny0gqSaNY4F97JlW01JzmdLwDm6XoRSZLEsdcXtKoLoXOIbFQpLHkVY3VUuhS0ypSlAnPgPNWBCpfhUveb5Uk6+qBagnaEY0x2aTGkuKox2Qga+W0l6SKuHRkkXH5PpIiZwQumlWpCi7NaFIRiE5qdxJppaJcGpNOetLk4uUVE6j/niNgmeSONFnHmpnS4OLcOkqLkQjfZlJ2IwEqrqCNqX2bIe6HWrMi7w/dD+dOOD1De37bdnn20pwrqatHJO7+ftKew3D5eUylldunW0fVbxckeUDVad07RPV1F0OuUZvR0Zu0pnr/Oek70KJu6ApKuHlRtU05rXHSyJ506YZTKdFihoboOezo5Lam+0Wvt2tR2mXCuWq6Xqhjhp4DnVnamMvbkxpeQvfw8PDYIFhzCf33/u1/AgBYVd4qXOAcDBMSqOGS7Rl2yM9PqAIGnJGtmJfcJacvkFtji7bBJUgyDrMk/WZVsv1ZFrXLBZGCL7xO7kb585ILwmVVcy6Vocpo6ASHbFZPq5Ncxf3KCbERnGQgv/Q7tpHk09ct2sYUZ7lLZkSCXgrn9gZI0vxIuUS5cmBGuU6VWapPGDq/piTCRM3ZnaVvVRZDdKbCBEvtVZaIc63ixpblYBnthhhyxjxdsKIlQevQ0kLfjZTak2oSRORcIyNViC/BUljI7pNBUtvcazxeWSs9X0vRzF1QPi/vyqjt5XaJGyX1ydleG++1ko0+qNNEGqXxpd+9WKk9EwvLjTxDmp+X/TeJu+/g0GDDdV3RkJXcP22d9Ok0FqVZ8B7ThWa2bCfb/PFR4nUqNeGSAuM0HL12jndROVGE2KF/JxqDzAIje2d2lu5VVlpde6vj4Nx+1QVleAymoQU20bhnnOapszPaWLtT2toK5fxWCy+he3h4eGwQ+Be6h4eHxwbBRU0uxpgsgO8DyPD5X7TW/qExZieAzwPoBfAsgF+z1jbxTVoZhYCioqqqK70DpHbt2y5qTlQiIqfKpNdiUdJqVrmwRVbpbvkiuRW+flqS588wyXnj/nsAACkjqXKdSpVUNUJLnGpTuzIicvkk+K8iGaMyqWy1pJhyMi1clVz9dJbYFBLfMyv3zHOdz5fmxfTjiL5TOq8K6qHztsSqnmp07pO27jc8UffdpKqXmeZGbS5xxJPW4l1UbCubSzIqtW5hkdYljLRphM02KRlBS5rdzNg906o8K9Wo0fXRzb1VZhPLfTNRMxdPvq5yfcQKROJKJpdmkZpxDVI0koyJRKNe3tyNs/GezcwlzUjRpWjWb22WsfE9TUNbhqOMd+wYjo/lcrS2JeVKG5s47PIyYa0mjg61eK2UySWg680tTsZtLfwMZ9hEWMjLsye1ZnW/XfEINW9uPeIxq33iXGmTUq+4t4ujkdU1pmfpvrH5T6UCdiY/o0hlYT61eSdR14+a3gtsStTvBf28Xi5WI6GXAdxvrT0A4FYADxlj7gbwxwD+q7V2N4AZAB+/4t54eHh4eFw2VlOCzgJw4nCK/7MA7gfwEW5/HMB/APCZS+3AkVcoB0NSVQ8fcYUldO6DosulQGRFsSy5FSZOPwsAuP66fXHb0KZhAEBXn7haVQsUtDC4jVwlT7+iEt/z9SPlzhS0kQRfKh2TDnOX0jmSKuOCGwAiJu4W55R7YUDj6tkukmtUZOljwUl2oomMj5MWESoXOFeh/qmnnorb3n675AMBIBE+AAyzUQm1vJWKu5dyOeRAl4CLD2ivKed6mVWlzhwcMQwAaZak0ixxVAoqIIpJpmxGk6gsDam1rXLQU5wVUZGLTkp1pC71zUmHOmCECWaO2LC6Tl0c4NRYCm8lrCQFazSTpFd7jWbE52q+25ywXf7+K0n7Gj297DCQFS3TSffpOjI8sWy/HaI6IpHXVpPRrH2VC+IqXOIcK50B53lZkHww5XTjmsVjUARvSihKAPXzU2MS0qpCL7k07dN25XMwn6e22XmS5DvVvd0z5Eos1vWjrnOsKcfPoV4zXhf1HKxyu62IVdnQjTEBF4ieAPAtAK8DmLWSWWYUwNAy333UGHPQGHOwUCg0O8XDw8PD4w3Aql7o1trIWnsrgK0A7gRww0W+or/7mLX2DmvtHc4W5+Hh4eHxxuOS/NCttbPGmCcB3AOgyxiTZCl9K4CzK3+7OUZf+zoAoCWr/LQTFJ0VJCRPxNQi+Z+nWwe4M6K6nR4hf/HZWSFAt/ST6tieET1m19Awncd5K+byQnRMXyDzSxWKyOFq4Eb5q4chmVOS7FOcUBGPLb3sR10Vk0vJkTqBmCJ23EB9K41TP2ZKonJ29NDYy3OqpmiR1L+RU5LEf6nJJVBkU0uGTSnKdBGVnG+6jlKkv+Ifq4pIsD+yzm/hcoqklPkj4GjQMpto6tL4sqobKnU4zTk3tM977BftiiAEshcc0RjpHDFSTSNuSyyxOsSRiRDCrKpMABXl174Uzpyg8+OsJhqzWQ3SZuet9N1m39PmEmf+uBjxuVLfVhpLZ2fnsscuFfouzv9ct6WYAE3L44XiLJkcL5wdpX8byaGS6KVnU/ff7bdApxgO6u+vx+7WtKb2DtdEwVvfJumPn32OnrWjR14HANx127DqeaJhgBIp2mSNE40EebOoUL3fLhcXldCNMf3GmC7+3ALgnQBeAfAkAJdk+WMAvnLFvfHw8PDwuGysRkIfAPC4odRiCQBfsNb+g6GM8p83xvxHAM8D+MvL6cBrrz8JAOhoF6ksF5Ckm1QSerVC5EQ+TT+nUVXlZ6hSJOXkuESKTk/Tz35kxb3x1CSRmz1dJMnP5mX48/OUsXEhL5JbmXOupNsky5stc4QoOLG+ynWS7Kbr9ea64rbZUS6H1SoSemoTXXfmBF1r06DKWOfIVkWKFphcXJyXsSzF3Iy4fs3MXOA+inRdZCJRE1WOmMywlqErzrtMlLUmUqomwhLWkZDsmqUlTP4cKHdIF/kJXcTCZaoLlohWAKI4R42Sbth9rabd0ZzLF7uKaQksZnuVFDczQ3zO9behAbEbYhOJW7c5ybmZhOw+a+l6JelbskSurBW46yaVlrT0uxcjQJsdXyodNuv3aqJUNXRBERPPqRyvOndSRTgGGdrr6VgaV/mFko1RtW4v1mto9X2qj7QNGq7RnSOt+JZbxQFgbpbzPnHEcUJdw+UX0qSoid2CBa6YixDIMqeJoDGSeCXNabVYjZfLIQAN295aewJkT/fw8PDwuAbgI0U9PDw8NgjWPDlXtUCqT16RVLV2MmdUw7m4LRmSOlKoOt9VUZlcGs4IKkqRG0vKVXL2PBGrPV30t1hVUaHsW52fFbNNVHFJeJS6uiQxT6hUtyhP5oG2HmF5Ovvo88K0nJftJJ/3qQKZRPo3SSKuRe5vrlfIqeFuMsmcOq6qoy/Bhz767vjzK0df484q31mnampyjMfikvNbHYEnJ8n5McGm2pxv9wo+tDoazn3Z1PUjZpSoj00uVkew8XGriCXxMW+Sjta42q3SlA4a/esdnIqu1fI4+naF2qJ1ibhWmWlpqRlGm1KW3ns5uOMrffdi/vDuu5dax3RFH3z9bETOVKTr3DpTm4qhyHE/nGOEunU24uR6TQp+hFUxL7rEcq44Sp2JJuniIPT6UJvK4I33vpcK6aScz7lRUbKuFrDaf7H5TZt3uB/OpFhPzjqT3KXvmZXgJXQPDw+PDQKz2mi4NwKDg4P20UcfvWr38/Dw8NgI+NSnPvWstfaOi53nJXQPDw+PDQL/Qvfw8PDYIPAvdA8PD48NAv9C9/Dw8NgguKqkqDFmEsAigAsXO/caRx/W9xjWe/+B9T+G9d5/YP2PYT31f4e1tv9iJ13VFzoAGGMOroatvZax3sew3vsPrP8xrPf+A+t/DOu9/83gTS4eHh4eGwT+he7h4eGxQbAWL/TH1uCebzTW+xjWe/+B9T+G9d5/YP2PYb33vwFX3Ybu4eHh4fGzgTe5eHh4eGwQXNUXujHmIWPMUWPMcWPMJ6/mvS8HxphtxpgnjTFHjDEvG2N+i9t7jDHfMsYc47/dF7vWWoKLfD9vjPkH/vdOY8zTvA5/a4xJX+waawljTJcx5ovGmFeNMa8YY+5Zh2vwO7yHXjLGfM4Yk72W18EY81fGmAljzEuqremcG8KneRyHjDG3r13PBcuM4T/zPjpkjPl/rhobH/t9HsNRY8yDa9PrK8NVe6FzxaM/A/AuAPsAfNgYs+9q3f8yEQL4XWvtPgB3A/hN7vMnATxhrd0D4An+97WM3wKVDXT4YwD/1Vq7G8AMgI+vSa9Wjz8F8HVr7Q0ADoDGsm7WwBgzBOATAO6w1u4HEAD4EK7tdfhrAA8taVtuzt8FYA//9yiAz1ylPl4Mf43GMXwLwH5r7S0AXgPw+wDAz/WHANzE3/nvxpgrz2d7lXE1JfQ7ARy31p6w1lYAfB7Aw1fx/pcMa+2YtfY5/pwHvUiGQP1+nE97HMD716aHF4cxZiuA9wD4C/63AXA/gC/yKdd6/zsBvA1c4tBaW7HWzmIdrQEjCaDFGJMEkAMwhmt4Hay13wcwvaR5uTl/GMBnLeEnoALyA1enp8uj2Ristd/kwvYA8BNQgXuAxvB5a23ZWnsSwHGsw4psV/OFPgTgjPr3KLetCxhjhkGl+J4GsNlaO8aHxgFsXqNurQb/DcC/gxRa7AUwqzb1tb4OOwFMAvifbDb6C2NMK9bRGlhrzwL4LwBOg17kcwCexfpaB2D5OV+vz/a/AvBP/Hm9jqEOnhRdBYwxbQD+DsBvW2vn9TFLbkLXpKuQMea9ACastc+udV+uAEkAtwP4jLX2NlDqiDrzyrW8BgDAtuaHQT9OgwBa0WgKWFe41uf8YjDG/AHIpPo3a92XNxJX84V+FsA29e+t3HZNwxiTAr3M/8Za+yVuPu9USv47sVb9uwjuA/A+Y8wIyMR1P8ge3cWqP3Dtr8MogFFr7dP87y+CXvDrZQ0A4OcBnLTWTlprqwC+BFqb9bQOwPJzvq6ebWPMvwTwXgAfteK3va7GsByu5gv9GQB7mNlPgwiIr17F+18y2N78lwBesdb+iTr0VQAf488fA/CVq9231cBa+/vW2q3W2mHQfH/HWvtRAE8C+CCfds32HwCsteMAzhhjruemBwAcwTpZA8ZpAHcbY3K8p9wY1s06MJab868C+HX2drkbwJwyzVxTMMY8BDJBvs9aW1CHvgrgQ8aYjDFmJ4jg/ela9PGKYK29av8BeDeIWX4dwB9czXtfZn/fAlIrDwF4gf97N8gO/QSAYwC+DaBnrfu6irH8HIB/4M+7QJv1OID/CyCz1v27SN9vBXCQ1+HLALrX2xoA+BSAVwG8BOB/Achcy+sA4HMge38VpCV9fLk5B1VZ/jN+rg+DvHmu1TEcB9nK3fP8P9T5f8BjOArgXWvd/8v5z0eKenh4eGwQeFLUw8PDY4PAv9A9PDw8Ngj8C93Dw8Njg8C/0D08PDw2CPwL3cPDw2ODwL/QPTw8PDYI/Avdw8PDY4PAv9A9PDw8Ngj+Pzl0fN2dVqJJAAAAAElFTkSuQmCC\n", 130 | "text/plain": [ 131 | "
" 132 | ] 133 | }, 134 | "metadata": {}, 135 | "output_type": "display_data" 136 | }, 137 | { 138 | "name": "stderr", 139 | "output_type": "stream", 140 | "text": [ 141 | "Process Process-3:\n", 142 | "Process Process-4:\n", 143 | "Traceback (most recent call last):\n", 144 | "Traceback (most recent call last):\n", 145 | " File \"/opt/packages/school/18/lib/python3.6/multiprocessing/process.py\", line 249, in _bootstrap\n", 146 | " self.run()\n", 147 | " File \"/opt/packages/school/18/lib/python3.6/multiprocessing/process.py\", line 93, in run\n", 148 | " self._target(*self._args, **self._kwargs)\n", 149 | " File \"/opt/packages/school/18/lib/python3.6/multiprocessing/process.py\", line 249, in _bootstrap\n", 150 | " self.run()\n", 151 | " File \"/opt/packages/school/18/lib/python3.6/site-packages/torch/utils/data/dataloader.py\", line 52, in _worker_loop\n", 152 | " r = index_queue.get()\n", 153 | " File \"/opt/packages/school/18/lib/python3.6/multiprocessing/queues.py\", line 343, in get\n", 154 | " res = self._reader.recv_bytes()\n", 155 | " File \"/opt/packages/school/18/lib/python3.6/multiprocessing/process.py\", line 93, in run\n", 156 | " self._target(*self._args, **self._kwargs)\n", 157 | " File \"/opt/packages/school/18/lib/python3.6/multiprocessing/connection.py\", line 216, in recv_bytes\n", 158 | " buf = self._recv_bytes(maxlength)\n", 159 | " File \"/opt/packages/school/18/lib/python3.6/site-packages/torch/utils/data/dataloader.py\", line 52, in _worker_loop\n", 160 | " r = index_queue.get()\n", 161 | " File \"/opt/packages/school/18/lib/python3.6/multiprocessing/connection.py\", line 407, in _recv_bytes\n", 162 | " buf = self._recv(4)\n", 163 | " File \"/opt/packages/school/18/lib/python3.6/multiprocessing/connection.py\", line 379, in _recv\n", 164 | " chunk = read(handle, remaining)\n", 165 | " File \"/opt/packages/school/18/lib/python3.6/multiprocessing/queues.py\", line 343, in get\n", 166 | " res = self._reader.recv_bytes()\n", 167 | "KeyboardInterrupt\n", 168 | " File \"/opt/packages/school/18/lib/python3.6/multiprocessing/connection.py\", line 216, in recv_bytes\n", 169 | " buf = self._recv_bytes(maxlength)\n", 170 | " File \"/opt/packages/school/18/lib/python3.6/multiprocessing/connection.py\", line 407, in _recv_bytes\n", 171 | " buf = self._recv(4)\n", 172 | " File \"/opt/packages/school/18/lib/python3.6/multiprocessing/connection.py\", line 379, in _recv\n", 173 | " chunk = read(handle, remaining)\n", 174 | "KeyboardInterrupt\n" 175 | ] 176 | } 177 | ], 178 | "source": [ 179 | "def imshow(img):\n", 180 | " img = img / 2 + 0.5 # unnormalize\n", 181 | " npimg = img.numpy()\n", 182 | " plt.imshow(np.transpose(npimg, (1, 2, 0)))\n", 183 | "\n", 184 | "\n", 185 | "# get some random training images\n", 186 | "dataiter = iter(trainloader)\n", 187 | "images, labels = dataiter.next()\n", 188 | "\n", 189 | "# show images\n", 190 | "imshow(torchvision.utils.make_grid(images))\n", 191 | "# print labels\n", 192 | "print(' '.join('%10s' % classes[labels[j]] for j in range(4)))" 193 | ] 194 | }, 195 | { 196 | "cell_type": "markdown", 197 | "metadata": {}, 198 | "source": [ 199 | "### Define a Convolutional Neural Network" 200 | ] 201 | }, 202 | { 203 | "cell_type": "code", 204 | "execution_count": 15, 205 | "metadata": {}, 206 | "outputs": [ 207 | { 208 | "name": "stderr", 209 | "output_type": "stream", 210 | "text": [ 211 | "/opt/packages/school/18/lib/python3.6/site-packages/ipykernel_launcher.py:52: UserWarning: nn.init.xavier_normal is now deprecated in favor of nn.init.xavier_normal_.\n", 212 | "/opt/packages/school/18/lib/python3.6/site-packages/ipykernel_launcher.py:53: UserWarning: nn.init.xavier_normal is now deprecated in favor of nn.init.xavier_normal_.\n", 213 | "/opt/packages/school/18/lib/python3.6/site-packages/ipykernel_launcher.py:54: UserWarning: nn.init.xavier_normal is now deprecated in favor of nn.init.xavier_normal_.\n", 214 | "/opt/packages/school/18/lib/python3.6/site-packages/ipykernel_launcher.py:55: UserWarning: nn.init.xavier_normal is now deprecated in favor of nn.init.xavier_normal_.\n", 215 | "/opt/packages/school/18/lib/python3.6/site-packages/ipykernel_launcher.py:56: UserWarning: nn.init.xavier_normal is now deprecated in favor of nn.init.xavier_normal_.\n" 216 | ] 217 | } 218 | ], 219 | "source": [ 220 | "# This scheme can be one of 'uniform', 'normal', 'constant' 'Xavier' and 'custom'\n", 221 | "\n", 222 | "weight_initialization_scheme = 'Xavier'\n", 223 | "\n", 224 | "\n", 225 | "def weight_init_custom_conv(module):\n", 226 | " import math\n", 227 | " n = module.kernel_size[0] * module.kernel_size[1] * module.out_channels\n", 228 | " module.weight.data.normal_(0, math.sqrt(2. / n))\n", 229 | "\n", 230 | "def weight_init_custom_linear(module):\n", 231 | " import math\n", 232 | " n = module.in_features * module.out_features\n", 233 | " module.weight.data.normal_(0, math.sqrt(2. / n))\n", 234 | "\n", 235 | " \n", 236 | "class Net(nn.Module):\n", 237 | " def __init__(self):\n", 238 | " super(Net, self).__init__()\n", 239 | " self.conv1 = nn.Conv2d(3, 6, 5)\n", 240 | " self.pool = nn.MaxPool2d(2, 2)\n", 241 | " self.conv2 = nn.Conv2d(6, 16, 5)\n", 242 | " self.fc1 = nn.Linear(16 * 5 * 5, 120)\n", 243 | " self.fc2 = nn.Linear(120, 84)\n", 244 | " self.fc3 = nn.Linear(84, 10) \n", 245 | " \n", 246 | " # Setting the weights for the conv1 layer\n", 247 | " for m in self.modules():\n", 248 | " if weight_initialization_scheme == 'uniform':\n", 249 | "# print('Initializating with uniform scheme')\n", 250 | " weight_init.uniform(self.conv1.weight)\n", 251 | " weight_init.uniform(self.conv2.weight)\n", 252 | " weight_init.uniform(self.fc1.weight)\n", 253 | " weight_init.uniform(self.fc2.weight)\n", 254 | " weight_init.uniform(self.fc3.weight)\n", 255 | " if weight_initialization_scheme == 'normal':\n", 256 | "# print('Initializating with normal scheme')\n", 257 | " weight_init.normal(self.conv1.weight)\n", 258 | " weight_init.normal(self.conv2.weight)\n", 259 | " weight_init.normal(self.fc1.weight)\n", 260 | " weight_init.normal(self.fc2.weight)\n", 261 | " weight_init.normal(self.fc3.weight)\n", 262 | " if weight_initialization_scheme == 'constant':\n", 263 | "# print('Initializating with constant scheme')\n", 264 | " weight_init.constant(self.conv1.weight, 0.1)\n", 265 | " weight_init.constant(self.conv2.weight, 0.1)\n", 266 | " weight_init.constant(self.fc1.weight, 0.1)\n", 267 | " weight_init.constant(self.fc2.weight, 0.1)\n", 268 | " weight_init.constant(self.fc3.weight, 0.1)\n", 269 | " if weight_initialization_scheme == 'Xavier':\n", 270 | "# print('Initializating with Xavier scheme')\n", 271 | " weight_init.xavier_normal(self.conv1.weight)\n", 272 | " weight_init.xavier_normal(self.conv2.weight)\n", 273 | " weight_init.xavier_normal(self.fc1.weight)\n", 274 | " weight_init.xavier_normal(self.fc2.weight)\n", 275 | " weight_init.xavier_normal(self.fc3.weight)\n", 276 | " if weight_initialization_scheme == 'custom':\n", 277 | "# print('Initializating with custom scheme')\n", 278 | " weight_init_custom_conv(self.conv1)\n", 279 | " weight_init_custom_conv(self.conv2)\n", 280 | " weight_init_custom_linear(self.fc1)\n", 281 | " weight_init_custom_linear(self.fc2)\n", 282 | " weight_init_custom_linear(self.fc3)\n", 283 | "\n", 284 | " def forward(self, x):\n", 285 | " x = self.pool(F.relu(self.conv1(x)))\n", 286 | " x = self.pool(F.relu(self.conv2(x)))\n", 287 | " x = x.view(-1, 16 * 5 * 5)\n", 288 | " x = F.relu(self.fc1(x))\n", 289 | " x = F.relu(self.fc2(x))\n", 290 | " x = self.fc3(x)\n", 291 | " return x\n", 292 | "\n", 293 | "\n", 294 | "net = Net().to(device)" 295 | ] 296 | }, 297 | { 298 | "cell_type": "markdown", 299 | "metadata": {}, 300 | "source": [ 301 | "### Define a loss function and an optimizer" 302 | ] 303 | }, 304 | { 305 | "cell_type": "code", 306 | "execution_count": 16, 307 | "metadata": {}, 308 | "outputs": [], 309 | "source": [ 310 | "criterion = nn.CrossEntropyLoss()\n", 311 | "optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9)" 312 | ] 313 | }, 314 | { 315 | "cell_type": "markdown", 316 | "metadata": {}, 317 | "source": [ 318 | "### Train the network\n", 319 | "\n", 320 | "Now, we will be training the network defined above on CIFAR-10 dataset.\n", 321 | "\n", 322 | "We will train the network for num_epoch times (defined above)\n", 323 | "\n", 324 | "- We fetch a batch of images and labels from dataloader\n", 325 | "- We feed it to CNN network for forward pass\n", 326 | "- Based on the output of forward pass, we calculate loss/error\n", 327 | "- Then we calculate gradients of loss w.r.t. the parameters of the network\n", 328 | "- Finally, we update the parameters based on the gradients using Gradient Descent algorithm" 329 | ] 330 | }, 331 | { 332 | "cell_type": "code", 333 | "execution_count": null, 334 | "metadata": {}, 335 | "outputs": [ 336 | { 337 | "name": "stdout", 338 | "output_type": "stream", 339 | "text": [ 340 | "[1, 2000] loss: 1.904\n", 341 | "[1, 4000] loss: 1.648\n", 342 | "[1, 6000] loss: 1.546\n", 343 | "[1, 8000] loss: 1.521\n" 344 | ] 345 | } 346 | ], 347 | "source": [ 348 | "for epoch in range(num_epoch): # loop over the dataset multiple times\n", 349 | "\n", 350 | " running_loss = 0.0\n", 351 | " for i, data in enumerate(trainloader, 0):\n", 352 | " # get the inputs\n", 353 | " inputs, labels = data\n", 354 | " inputs, labels = inputs.to(device), labels.to(device)\n", 355 | "\n", 356 | " # zero the parameter gradients\n", 357 | " optimizer.zero_grad()\n", 358 | "\n", 359 | " # forward + backward + optimize\n", 360 | " outputs = net(inputs)\n", 361 | " loss = criterion(outputs, labels)\n", 362 | " loss.backward()\n", 363 | " optimizer.step()\n", 364 | "\n", 365 | " # print statistics\n", 366 | " running_loss += loss.item()\n", 367 | " if i % 2000 == 1999: # print every 2000 mini-batches\n", 368 | " print('[%d, %5d] loss: %.3f' %\n", 369 | " (epoch + 1, i + 1, running_loss / 2000))\n", 370 | " running_loss = 0.0\n", 371 | "\n", 372 | "print('Finished Training')" 373 | ] 374 | }, 375 | { 376 | "cell_type": "markdown", 377 | "metadata": {}, 378 | "source": [ 379 | "### Test the network on the test datset\n", 380 | "\n", 381 | "We will check this by predicting the class label that the neural network outputs, and checking it against the ground-truth. If the prediction is correct, we add the sample to the list of correct predictions." 382 | ] 383 | }, 384 | { 385 | "cell_type": "code", 386 | "execution_count": null, 387 | "metadata": {}, 388 | "outputs": [], 389 | "source": [ 390 | "# Quantitative Analysis\n", 391 | "correct = 0\n", 392 | "total = 0\n", 393 | "with torch.no_grad():\n", 394 | " for data in testloader:\n", 395 | " images, labels = data\n", 396 | " images, labels = images.to(device), labels.to(device)\n", 397 | " outputs = net(images)\n", 398 | " _, predicted = torch.max(outputs.data, 1)\n", 399 | " total += labels.size(0)\n", 400 | " correct += (predicted == labels).sum().item()\n", 401 | "\n", 402 | "print('Accuracy of the network on the 10000 test images: %d %%' % (\n", 403 | " 100 * correct / total))" 404 | ] 405 | }, 406 | { 407 | "cell_type": "code", 408 | "execution_count": null, 409 | "metadata": {}, 410 | "outputs": [], 411 | "source": [ 412 | "# Qualitative Analysis\n", 413 | "dataiter = iter(testloader)\n", 414 | "images, labels = dataiter.next()\n", 415 | "images, labels = images.to(device), labels.to(device)\n", 416 | "\n", 417 | "# print images\n", 418 | "imshow(torchvision.utils.make_grid(images.cpu()))\n", 419 | "print('GroundTruth: ', ' '.join('%4s' % classes[labels[j]] for j in range(4)))\n", 420 | "\n", 421 | "outputs = net(images)\n", 422 | "_, predicted = torch.max(outputs, 1)\n", 423 | "\n", 424 | "print('Predicted: ', ' '.join('%5s' % classes[predicted[j]]\n", 425 | " for j in range(4)))" 426 | ] 427 | } 428 | ], 429 | "metadata": { 430 | "kernelspec": { 431 | "display_name": "Python 3", 432 | "language": "python", 433 | "name": "python3" 434 | }, 435 | "language_info": { 436 | "codemirror_mode": { 437 | "name": "ipython", 438 | "version": 3 439 | }, 440 | "file_extension": ".py", 441 | "mimetype": "text/x-python", 442 | "name": "python", 443 | "nbconvert_exporter": "python", 444 | "pygments_lexer": "ipython3", 445 | "version": "3.6.5" 446 | } 447 | }, 448 | "nbformat": 4, 449 | "nbformat_minor": 2 450 | } 451 | --------------------------------------------------------------------------------