└── tutorials ├── Auto_Encoder_MNIST.ipynb ├── Learning_PyTorch_with_Examples.ipynb ├── PyTorch_NLP.ipynb ├── Pytorch_Tutorials-Teaching.ipynb ├── Pytorch_Tutorials.ipynb ├── Transfer_Learning_tutorial.ipynb └── readme.md /tutorials/Learning_PyTorch_with_Examples.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## 自作活性化関数" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "活性化関数reluを自作して組み込む" 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": 1, 20 | "metadata": { 21 | "collapsed": false 22 | }, 23 | "outputs": [ 24 | { 25 | "name": "stdout", 26 | "output_type": "stream", 27 | "text": [ 28 | "(0, 35215792.0)\n", 29 | "(100, 463.51239013671875)\n", 30 | "(200, 1.6253066062927246)\n", 31 | "(300, 0.00940221268683672)\n", 32 | "(400, 0.00022180218365974724)\n" 33 | ] 34 | } 35 | ], 36 | "source": [ 37 | "import torch\n", 38 | "from torch.autograd import Variable\n", 39 | "\n", 40 | "#自作ReLUクラスの定義\n", 41 | "class MyReLU(torch.autograd.Function):\n", 42 | " \n", 43 | " #forwardの活性化関数とbackwardの計算のみ記述すれば良い\n", 44 | " def forward(self, input):\n", 45 | " \n", 46 | " #値の記憶\n", 47 | " self.save_for_backward(input)\n", 48 | " \n", 49 | " #ReLUの定義部分\n", 50 | " #x.clamp(min=0) <=> max(x, 0)\n", 51 | " return input.clamp(min=0)\n", 52 | "\n", 53 | " #backpropagationの記述\n", 54 | " #勾配情報を返せば良い\n", 55 | " def backward(self, grad_output):\n", 56 | "\n", 57 | " #記憶したTensorの呼び出し\n", 58 | " input, = self.saved_tensors\n", 59 | " \n", 60 | " #参照渡しにならないようコピー\n", 61 | " grad_input = grad_output.clone()\n", 62 | " \n", 63 | " #input<0 => 0 else input\n", 64 | " grad_input[input < 0] = 0\n", 65 | " return grad_input\n", 66 | "\n", 67 | "#dtypeの明示:gpuでは不要\n", 68 | "dtype = torch.FloatTensor\n", 69 | "# dtype = torch.cuda.FloatTensor # Uncomment this to run on GPU\n", 70 | "\n", 71 | "# N is batch size; D_in is input dimension;\n", 72 | "# H is hidden dimension; D_out is output dimension.\n", 73 | "N, D_in, H, D_out = 64, 1000, 100, 10\n", 74 | "\n", 75 | "#x, yはダミーデータ。値は不変なのでrequires_grad=Falseとして勾配更新を行わない\n", 76 | "#.type(dtype)でtypeをFloatTensorに変更\n", 77 | "# Create random Tensors to hold input and outputs, and wrap them in Variables.\n", 78 | "x = Variable(torch.randn(N, D_in).type(dtype), requires_grad=False)\n", 79 | "y = Variable(torch.randn(N, D_out).type(dtype), requires_grad=False)\n", 80 | "\n", 81 | "# Create random Tensors for weights, and wrap them in Variables.\n", 82 | "w1 = Variable(torch.randn(D_in, H).type(dtype), requires_grad=True)\n", 83 | "w2 = Variable(torch.randn(H, D_out).type(dtype), requires_grad=True)\n", 84 | "\n", 85 | "learning_rate = 1e-6\n", 86 | "for t in range(500):\n", 87 | " # Construct an instance of our MyReLU class to use in our network\n", 88 | " relu = MyReLU()\n", 89 | "\n", 90 | " # Forward pass: compute predicted y using operations on Variables; we compute\n", 91 | " # ReLU using our custom autograd operation.\n", 92 | " y_pred = relu(x.mm(w1)).mm(w2)\n", 93 | "\n", 94 | " # Compute and print loss\n", 95 | " loss = (y_pred - y).pow(2).sum()\n", 96 | " if t % 100 == 0:\n", 97 | " print(t, loss.data[0])\n", 98 | "\n", 99 | " # backwordの定義をしたことで、.backword()で誤差逆伝播される\n", 100 | " # Use autograd to compute the backward pass.\n", 101 | " loss.backward()\n", 102 | "\n", 103 | " # Update weights using gradient descent\n", 104 | " w1.data -= learning_rate * w1.grad.data\n", 105 | " w2.data -= learning_rate * w2.grad.data\n", 106 | "\n", 107 | " # Manually zero the gradients after updating weights\n", 108 | " w1.grad.data.zero_()\n", 109 | " w2.grad.data.zero_()" 110 | ] 111 | }, 112 | { 113 | "cell_type": "markdown", 114 | "metadata": {}, 115 | "source": [ 116 | "pytorchの組み込み関数を利用すると下のように書ける" 117 | ] 118 | }, 119 | { 120 | "cell_type": "code", 121 | "execution_count": 2, 122 | "metadata": { 123 | "collapsed": false 124 | }, 125 | "outputs": [ 126 | { 127 | "name": "stdout", 128 | "output_type": "stream", 129 | "text": [ 130 | "(0, 660.4030151367188)\n", 131 | "(100, 43.92659378051758)\n", 132 | "(200, 0.6924678087234497)\n", 133 | "(300, 0.015051346272230148)\n", 134 | "(400, 0.0003239525540266186)\n" 135 | ] 136 | } 137 | ], 138 | "source": [ 139 | "import torch\n", 140 | "from torch.autograd import Variable\n", 141 | "\n", 142 | "# N is batch size; D_in is input dimension;\n", 143 | "# H is hidden dimension; D_out is output dimension.\n", 144 | "N, D_in, H, D_out = 64, 1000, 100, 10\n", 145 | "\n", 146 | "# Create random Tensors to hold inputs and outputs, and wrap them in Variables.\n", 147 | "x = Variable(torch.randn(N, D_in))\n", 148 | "y = Variable(torch.randn(N, D_out), requires_grad=False)\n", 149 | "\n", 150 | "# Use the nn package to define our model and loss function.\n", 151 | "model = torch.nn.Sequential(\n", 152 | " torch.nn.Linear(D_in, H),\n", 153 | " torch.nn.ReLU(),\n", 154 | " torch.nn.Linear(H, D_out),\n", 155 | ")\n", 156 | "loss_fn = torch.nn.MSELoss(size_average=False)\n", 157 | "\n", 158 | "# Use the optim package to define an Optimizer that will update the weights of\n", 159 | "# the model for us. Here we will use Adam; the optim package contains many other\n", 160 | "# optimization algoriths. The first argument to the Adam constructor tells the\n", 161 | "# optimizer which Variables it should update.\n", 162 | "learning_rate = 1e-4\n", 163 | "optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n", 164 | "for t in range(500):\n", 165 | " # Forward pass: compute predicted y by passing x to the model.\n", 166 | " y_pred = model(x)\n", 167 | "\n", 168 | " # Compute and print loss.\n", 169 | " loss = loss_fn(y_pred, y)\n", 170 | " if t % 100 == 0:\n", 171 | " print(t, loss.data[0])\n", 172 | "\n", 173 | " # Before the backward pass, use the optimizer object to zero all of the\n", 174 | " # gradients for the variables it will update (which are the learnable weights\n", 175 | " # of the model)\n", 176 | " optimizer.zero_grad()\n", 177 | "\n", 178 | " # Backward pass: compute gradient of the loss with respect to model\n", 179 | " # parameters\n", 180 | " loss.backward()\n", 181 | "\n", 182 | " # Calling the step function on an Optimizer makes an update to its\n", 183 | " # parameters\n", 184 | " optimizer.step()" 185 | ] 186 | }, 187 | { 188 | "cell_type": "markdown", 189 | "metadata": {}, 190 | "source": [ 191 | "## 自作部分を盛り込んだ2層のネットワーク構造" 192 | ] 193 | }, 194 | { 195 | "cell_type": "code", 196 | "execution_count": 3, 197 | "metadata": { 198 | "collapsed": false 199 | }, 200 | "outputs": [ 201 | { 202 | "name": "stdout", 203 | "output_type": "stream", 204 | "text": [ 205 | "(0, 725.130126953125)\n", 206 | "(100, 1.8491665124893188)\n", 207 | "(200, 0.04270491003990173)\n", 208 | "(300, 0.0031812286470085382)\n", 209 | "(400, 0.00032688092323951423)\n" 210 | ] 211 | } 212 | ], 213 | "source": [ 214 | "import torch\n", 215 | "from torch.autograd import Variable\n", 216 | "\n", 217 | "\n", 218 | "class TwoLayerNet(torch.nn.Module):\n", 219 | " \n", 220 | " #__init__に引数をとることで、外部からモデルレイヤーの層数の定義が可能\n", 221 | " def __init__(self, D_in, H, D_out):\n", 222 | " \"\"\"\n", 223 | " In the constructor we instantiate two nn.Linear modules and assign them as\n", 224 | " member variables.\n", 225 | " \"\"\"\n", 226 | " super(TwoLayerNet, self).__init__()\n", 227 | " self.linear1 = torch.nn.Linear(D_in, H)\n", 228 | " self.linear2 = torch.nn.Linear(H, D_out)\n", 229 | "\n", 230 | " #入出力はVariableにより行われる。よって、Variable内の関数を用いることで内部的に自由に演算ができる\n", 231 | " def forward(self, x):\n", 232 | " \"\"\"\n", 233 | " In the forward function we accept a Variable of input data and we must return\n", 234 | " a Variable of output data. We can use Modules defined in the constructor as\n", 235 | " well as arbitrary operators on Variables.\n", 236 | " \"\"\"\n", 237 | " h_relu = self.linear1(x).clamp(min=0)\n", 238 | " y_pred = self.linear2(h_relu)\n", 239 | " return y_pred\n", 240 | "\n", 241 | "#層の入出力サイズの定義\n", 242 | "# N is batch size; D_in is input dimension;\n", 243 | "# H is hidden dimension; D_out is output dimension.\n", 244 | "N, D_in, H, D_out = 64, 1000, 100, 10\n", 245 | "\n", 246 | "# 入力データ作成\n", 247 | "# Create random Tensors to hold inputs and outputs, and wrap them in Variables\n", 248 | "x = Variable(torch.randn(N, D_in))\n", 249 | "y = Variable(torch.randn(N, D_out), requires_grad=False)\n", 250 | "\n", 251 | "# ネットワーク定義\n", 252 | "# Construct our model by instantiating the class defined above\n", 253 | "model = TwoLayerNet(D_in, H, D_out)\n", 254 | "\n", 255 | "# Loss関数・optimizerの設定\n", 256 | "# Construct our loss function and an Optimizer. The call to model.parameters()\n", 257 | "# in the SGD constructor will contain the learnable parameters of the two\n", 258 | "# nn.Linear modules which are members of the model.\n", 259 | "criterion = torch.nn.MSELoss(size_average=False)\n", 260 | "optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)\n", 261 | "\n", 262 | "#トレーニング\n", 263 | "for t in range(500):\n", 264 | " # Forward pass: Compute predicted y by passing x to the model\n", 265 | " y_pred = model(x)\n", 266 | "\n", 267 | " # Compute and print loss\n", 268 | " loss = criterion(y_pred, y)\n", 269 | " if t % 100 == 0:\n", 270 | " print(t, loss.data[0])\n", 271 | "\n", 272 | " # Zero gradients, perform a backward pass, and update the weights.\n", 273 | " optimizer.zero_grad()\n", 274 | " loss.backward()\n", 275 | " optimizer.step()" 276 | ] 277 | }, 278 | { 279 | "cell_type": "markdown", 280 | "metadata": {}, 281 | "source": [ 282 | "## ダイナミックなネットワーク構造" 283 | ] 284 | }, 285 | { 286 | "cell_type": "markdown", 287 | "metadata": {}, 288 | "source": [ 289 | "ループごとに構造を変化させるようなネットワークの構築" 290 | ] 291 | }, 292 | { 293 | "cell_type": "code", 294 | "execution_count": 4, 295 | "metadata": { 296 | "collapsed": false 297 | }, 298 | "outputs": [ 299 | { 300 | "name": "stdout", 301 | "output_type": "stream", 302 | "text": [ 303 | "(0, 681.4361572265625)\n", 304 | "(100, 319.8040466308594)\n", 305 | "(200, 3.017669677734375)\n", 306 | "(300, 1.0424047708511353)\n", 307 | "(400, 0.20460940897464752)\n" 308 | ] 309 | } 310 | ], 311 | "source": [ 312 | "import random\n", 313 | "import torch\n", 314 | "from torch.autograd import Variable\n", 315 | "\n", 316 | "\n", 317 | "class DynamicNet(torch.nn.Module):\n", 318 | " \n", 319 | " #層の定義\n", 320 | " def __init__(self, D_in, H, D_out):\n", 321 | " \"\"\"\n", 322 | " In the constructor we construct three nn.Linear instances that we will use\n", 323 | " in the forward pass.\n", 324 | " \"\"\"\n", 325 | " super(DynamicNet, self).__init__()\n", 326 | " self.input_linear = torch.nn.Linear(D_in, H)\n", 327 | " self.middle_linear = torch.nn.Linear(H, H)\n", 328 | " self.output_linear = torch.nn.Linear(H, D_out)\n", 329 | "\n", 330 | " #フォワードステップではランダムに中間層を0~3に変更する\n", 331 | " #middle_linearはHxHなので層が変化しても問題ない\n", 332 | " #同じモジュールを繰り返し使ってもOK\n", 333 | " def forward(self, x):\n", 334 | " \"\"\"\n", 335 | " For the forward pass of the model, we randomly choose either 0, 1, 2, or 3\n", 336 | " and reuse the middle_linear Module that many times to compute hidden layer\n", 337 | " representations.\n", 338 | "\n", 339 | " Since each forward pass builds a dynamic computation graph, we can use normal\n", 340 | " Python control-flow operators like loops or conditional statements when\n", 341 | " defining the forward pass of the model.\n", 342 | "\n", 343 | " Here we also see that it is perfectly safe to reuse the same Module many\n", 344 | " times when defining a computational graph. This is a big improvement from Lua\n", 345 | " Torch, where each Module could be used only once.\n", 346 | " \"\"\"\n", 347 | " h_relu = self.input_linear(x).clamp(min=0)\n", 348 | " for _ in range(random.randint(0, 3)):\n", 349 | " h_relu = self.middle_linear(h_relu).clamp(min=0)\n", 350 | " y_pred = self.output_linear(h_relu)\n", 351 | " return y_pred\n", 352 | "\n", 353 | "\n", 354 | "# N is batch size; D_in is input dimension;\n", 355 | "# H is hidden dimension; D_out is output dimension.\n", 356 | "N, D_in, H, D_out = 64, 1000, 100, 10\n", 357 | "\n", 358 | "# Create random Tensors to hold inputs and outputs, and wrap them in Variables\n", 359 | "x = Variable(torch.randn(N, D_in))\n", 360 | "y = Variable(torch.randn(N, D_out), requires_grad=False)\n", 361 | "\n", 362 | "# Construct our model by instantiating the class defined above\n", 363 | "model = DynamicNet(D_in, H, D_out)\n", 364 | "\n", 365 | "# Construct our loss function and an Optimizer. Training this strange model with\n", 366 | "# vanilla stochastic gradient descent is tough, so we use momentum\n", 367 | "criterion = torch.nn.MSELoss(size_average=False)\n", 368 | "optimizer = torch.optim.SGD(model.parameters(), lr=1e-4, momentum=0.9)\n", 369 | "for t in range(500):\n", 370 | " # Forward pass: Compute predicted y by passing x to the model\n", 371 | " y_pred = model(x)\n", 372 | "\n", 373 | " # Compute and print loss\n", 374 | " loss = criterion(y_pred, y)\n", 375 | " if t % 100 == 0:\n", 376 | " print(t, loss.data[0])\n", 377 | "\n", 378 | " # Zero gradients, perform a backward pass, and update the weights.\n", 379 | " optimizer.zero_grad()\n", 380 | " loss.backward()\n", 381 | " optimizer.step()" 382 | ] 383 | } 384 | ], 385 | "metadata": { 386 | "kernelspec": { 387 | "display_name": "Python 2", 388 | "language": "python", 389 | "name": "python2" 390 | }, 391 | "language_info": { 392 | "codemirror_mode": { 393 | "name": "ipython", 394 | "version": 2 395 | }, 396 | "file_extension": ".py", 397 | "mimetype": "text/x-python", 398 | "name": "python", 399 | "nbconvert_exporter": "python", 400 | "pygments_lexer": "ipython2", 401 | "version": "2.7.11" 402 | } 403 | }, 404 | "nbformat": 4, 405 | "nbformat_minor": 2 406 | } 407 | -------------------------------------------------------------------------------- /tutorials/PyTorch_NLP.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## pytorchの演算の扱い" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 50, 13 | "metadata": { 14 | "collapsed": false 15 | }, 16 | "outputs": [ 17 | { 18 | "data": { 19 | "text/plain": [ 20 | "" 21 | ] 22 | }, 23 | "execution_count": 50, 24 | "metadata": {}, 25 | "output_type": "execute_result" 26 | } 27 | ], 28 | "source": [ 29 | "import torch\n", 30 | "from torch.autograd import Variable\n", 31 | "import torch.nn as nn\n", 32 | "import torch.nn.functional as F\n", 33 | "import torch.optim as optim\n", 34 | "\n", 35 | "torch.manual_seed(1)" 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": 51, 41 | "metadata": { 42 | "collapsed": false 43 | }, 44 | "outputs": [ 45 | { 46 | "name": "stdout", 47 | "output_type": "stream", 48 | "text": [ 49 | "\n", 50 | "-2.9718 1.7070 -0.4305 -2.2820 0.5237\n", 51 | " 0.0004 -1.2039 3.5283 0.4434 0.5848\n", 52 | " 0.8407 0.5510 0.3863 0.9124 -0.8410\n", 53 | " 1.2282 -1.8661 1.4146 -1.8781 -0.4674\n", 54 | "-0.7576 0.4215 -0.4827 -1.1198 0.3056\n", 55 | "[torch.FloatTensor of size 5x5]\n", 56 | "\n", 57 | "\n", 58 | " 1.0386 0.5206 -0.5006 -1.9441 -0.9596 0.5489 -0.9901 -0.3826\n", 59 | " 1.2182 0.2117 -1.0613 1.5037 1.8267 0.5561 1.6445 0.4973\n", 60 | "[torch.FloatTensor of size 2x8]\n", 61 | "\n" 62 | ] 63 | } 64 | ], 65 | "source": [ 66 | "#行を連結\n", 67 | "# By default, it concatenates along the first axis (concatenates rows)\n", 68 | "x_1 = torch.randn(2, 5)\n", 69 | "y_1 = torch.randn(3, 5)\n", 70 | "z_1 = torch.cat([x_1, y_1])\n", 71 | "print(z_1)\n", 72 | "\n", 73 | "#列を連結\n", 74 | "# Concatenate columns:\n", 75 | "x_2 = torch.randn(2, 3)\n", 76 | "y_2 = torch.randn(2, 5)\n", 77 | "# second arg specifies which axis to concat along\n", 78 | "z_2 = torch.cat([x_2, y_2], 1)\n", 79 | "print(z_2)" 80 | ] 81 | }, 82 | { 83 | "cell_type": "code", 84 | "execution_count": 52, 85 | "metadata": { 86 | "collapsed": false 87 | }, 88 | "outputs": [ 89 | { 90 | "name": "stdout", 91 | "output_type": "stream", 92 | "text": [ 93 | "\n", 94 | "(0 ,.,.) = \n", 95 | " -1.5067 1.7661 -0.3569 -0.1713\n", 96 | " 0.4068 -0.4284 -1.1299 1.4274\n", 97 | " -1.4027 1.4825 -1.1559 1.6190\n", 98 | "\n", 99 | "(1 ,.,.) = \n", 100 | " 0.9581 0.7747 0.1940 0.1687\n", 101 | " 0.3061 1.0743 -1.0327 1.0930\n", 102 | " 0.7769 -1.3128 0.7099 0.9944\n", 103 | "[torch.FloatTensor of size 2x3x4]\n", 104 | "\n", 105 | "\n", 106 | "\n", 107 | "Columns 0 to 9 \n", 108 | "-1.5067 1.7661 -0.3569 -0.1713 0.4068 -0.4284 -1.1299 1.4274 -1.4027 1.4825\n", 109 | " 0.9581 0.7747 0.1940 0.1687 0.3061 1.0743 -1.0327 1.0930 0.7769 -1.3128\n", 110 | "\n", 111 | "Columns 10 to 11 \n", 112 | "-1.1559 1.6190\n", 113 | " 0.7099 0.9944\n", 114 | "[torch.FloatTensor of size 2x12]\n", 115 | "\n", 116 | "\n", 117 | "\n", 118 | "Columns 0 to 9 \n", 119 | "-1.5067 1.7661 -0.3569 -0.1713 0.4068 -0.4284 -1.1299 1.4274 -1.4027 1.4825\n", 120 | " 0.9581 0.7747 0.1940 0.1687 0.3061 1.0743 -1.0327 1.0930 0.7769 -1.3128\n", 121 | "\n", 122 | "Columns 10 to 11 \n", 123 | "-1.1559 1.6190\n", 124 | " 0.7099 0.9944\n", 125 | "[torch.FloatTensor of size 2x12]\n", 126 | "\n" 127 | ] 128 | } 129 | ], 130 | "source": [ 131 | "#.view:reshape関数\n", 132 | "x = torch.randn(2, 3, 4)\n", 133 | "print(x)\n", 134 | "\n", 135 | "#2*12にreshape\n", 136 | "print(x.view(2, 12)) # Reshape to 2 rows, 12 columns\n", 137 | "\n", 138 | "#上記と同じ。最終層を一つ減らす。サイズは推論される\n", 139 | "# Same as above. If one of the dimensions is -1, its size can be inferred\n", 140 | "print(x.view(2, -1))" 141 | ] 142 | }, 143 | { 144 | "cell_type": "markdown", 145 | "metadata": {}, 146 | "source": [ 147 | "## NLP導入" 148 | ] 149 | }, 150 | { 151 | "cell_type": "code", 152 | "execution_count": 53, 153 | "metadata": { 154 | "collapsed": true 155 | }, 156 | "outputs": [], 157 | "source": [ 158 | "#作成したデータ。文章を単語ごとに分けて格納する\n", 159 | "data = [(\"me gusta comer en la cafeteria\".split(), \"SPANISH\"),\n", 160 | " (\"Give it to me\".split(), \"ENGLISH\"),\n", 161 | " (\"No creo que sea una buena idea\".split(), \"SPANISH\"),\n", 162 | " (\"No it is not a good idea to get lost at sea\".split(), \"ENGLISH\")]\n", 163 | "\n", 164 | "test_data = [(\"Yo creo que si\".split(), \"SPANISH\"),\n", 165 | " (\"it is lost on me\".split(), \"ENGLISH\")]" 166 | ] 167 | }, 168 | { 169 | "cell_type": "code", 170 | "execution_count": 54, 171 | "metadata": { 172 | "collapsed": false 173 | }, 174 | "outputs": [ 175 | { 176 | "name": "stdout", 177 | "output_type": "stream", 178 | "text": [ 179 | "{'en': 3, 'No': 9, 'buena': 14, 'it': 7, 'at': 22, 'sea': 12, 'cafeteria': 5, 'Yo': 23, 'la': 4, 'to': 8, 'creo': 10, 'is': 16, 'a': 18, 'good': 19, 'get': 20, 'idea': 15, 'que': 11, 'not': 17, 'me': 0, 'on': 25, 'gusta': 1, 'lost': 21, 'Give': 6, 'una': 13, 'si': 24, 'comer': 2}\n", 180 | "\n", 181 | "Num of vocab size: 26\n" 182 | ] 183 | } 184 | ], 185 | "source": [ 186 | "#出てくる単語にidを割り振る\n", 187 | "# word_to_ix maps each word in the vocab to a unique integer, which will be its\n", 188 | "# index into the Bag of words vector\n", 189 | "word_to_ix = {}\n", 190 | "for sent, _ in data + test_data:\n", 191 | " for word in sent:\n", 192 | " if word not in word_to_ix:\n", 193 | " #長さは1ずつ伸びるのでこれをidとして利用\n", 194 | " word_to_ix[word] = len(word_to_ix)\n", 195 | "print(word_to_ix)\n", 196 | "\n", 197 | "VOCAB_SIZE = len(word_to_ix)\n", 198 | "NUM_LABELS = 2\n", 199 | "\n", 200 | "print\n", 201 | "print (('Num of vocab size: ') + str(VOCAB_SIZE))" 202 | ] 203 | }, 204 | { 205 | "cell_type": "markdown", 206 | "metadata": {}, 207 | "source": [ 208 | "### Bag of Wordsでのロジスティック回帰をpytorchで実装する" 209 | ] 210 | }, 211 | { 212 | "cell_type": "markdown", 213 | "metadata": {}, 214 | "source": [ 215 | "$$ output = Log\\_Softmax(Logistic(input)) $$" 216 | ] 217 | }, 218 | { 219 | "cell_type": "markdown", 220 | "metadata": {}, 221 | "source": [ 222 | "$$ Logistic(x) = \\frac{1}{1+e^{-(ax+b)}} $$
\n", 223 | "$$ Log\\_Softmax(x_i) = log(\\frac{e^x_i}{\\sum_j(e^{x_j})}) $$ " 224 | ] 225 | }, 226 | { 227 | "cell_type": "code", 228 | "execution_count": 61, 229 | "metadata": { 230 | "collapsed": false 231 | }, 232 | "outputs": [], 233 | "source": [ 234 | "#BoW識別器の実装\n", 235 | "class BoWClassifier(nn.Module): # inheriting from nn.Module!\n", 236 | "\n", 237 | " def __init__(self, num_labels, vocab_size):\n", 238 | " #必ず継承の必要がある\n", 239 | " super(BoWClassifier, self).__init__()\n", 240 | " \n", 241 | " #input: vocabraryの種類の数\n", 242 | " #output: labelの数\n", 243 | " self.linear = nn.Linear(vocab_size, num_labels)\n", 244 | "\n", 245 | " def forward(self, bow_vec):\n", 246 | " return F.log_softmax(self.linear(bow_vec))\n", 247 | "\n", 248 | "#文章をBoW表現のvectorに変換\n", 249 | "def make_bow_vector(sentence, word_to_ix):\n", 250 | " #vocabraryの種類の長さのvector\n", 251 | " vec = torch.zeros(len(word_to_ix))\n", 252 | " \n", 253 | " #文章中の単語をそれぞれのvectorに+1ずつ行う\n", 254 | " for word in sentence:\n", 255 | " vec[word_to_ix[word]] += 1\n", 256 | " return vec.view(1, -1)\n", 257 | "\n", 258 | "#labelをTensor型に変換\n", 259 | "def make_target(label, label_to_ix):\n", 260 | " return torch.LongTensor([label_to_ix[label]])\n", 261 | "\n", 262 | "#モデル定義\n", 263 | "model = BoWClassifier(NUM_LABELS, VOCAB_SIZE)" 264 | ] 265 | }, 266 | { 267 | "cell_type": "code", 268 | "execution_count": 62, 269 | "metadata": { 270 | "collapsed": false 271 | }, 272 | "outputs": [ 273 | { 274 | "name": "stdout", 275 | "output_type": "stream", 276 | "text": [ 277 | "Parameter containing:\n", 278 | "\n", 279 | "Columns 0 to 9 \n", 280 | "-0.1560 0.0131 -0.0337 0.1765 0.0763 -0.0027 -0.0337 0.0159 -0.1765 0.1041\n", 281 | " 0.1206 -0.0480 -0.0401 0.0151 -0.1313 0.0597 0.1677 -0.0544 -0.0597 0.0279\n", 282 | "\n", 283 | "Columns 10 to 19 \n", 284 | " 0.0141 -0.1783 0.0642 -0.1412 0.0058 0.1147 0.1744 -0.1844 0.0339 0.1503\n", 285 | " 0.0984 0.0541 0.0886 -0.1466 0.1503 0.0746 0.0485 0.0580 0.0984 -0.0573\n", 286 | "\n", 287 | "Columns 20 to 25 \n", 288 | " 0.1582 0.0160 -0.1422 -0.0204 -0.1415 0.1538\n", 289 | "-0.0593 0.1032 -0.0902 -0.0563 0.1553 0.0992\n", 290 | "[torch.FloatTensor of size 2x26]\n", 291 | "\n", 292 | "Parameter containing:\n", 293 | "-0.0282\n", 294 | " 0.1496\n", 295 | "[torch.FloatTensor of size 2]\n", 296 | "\n" 297 | ] 298 | } 299 | ], 300 | "source": [ 301 | "#モデルのパラメータ数の確認\n", 302 | "#単語の種類が26, English, Spanishでラベルが2種類\n", 303 | "for param in model.parameters():\n", 304 | " print(param)" 305 | ] 306 | }, 307 | { 308 | "cell_type": "code", 309 | "execution_count": 63, 310 | "metadata": { 311 | "collapsed": false 312 | }, 313 | "outputs": [ 314 | { 315 | "name": "stdout", 316 | "output_type": "stream", 317 | "text": [ 318 | "データ\n", 319 | "(['me', 'gusta', 'comer', 'en', 'la', 'cafeteria'], 'SPANISH')\n", 320 | "入力用ベクトル\n", 321 | "\n", 322 | "\n", 323 | "Columns 0 to 12 \n", 324 | " 1 1 1 1 1 1 0 0 0 0 0 0 0\n", 325 | "\n", 326 | "Columns 13 to 25 \n", 327 | " 0 0 0 0 0 0 0 0 0 0 0 0 0\n", 328 | "[torch.FloatTensor of size 1x26]\n", 329 | "\n", 330 | "スペイン語, 英語の対数確率\n", 331 | "Variable containing:\n", 332 | "-0.7341 -0.6538\n", 333 | "[torch.FloatTensor of size 1x2]\n", 334 | "\n" 335 | ] 336 | } 337 | ], 338 | "source": [ 339 | "#データ・入力ベクトル・対数確率の確認\n", 340 | "sample = data[0]\n", 341 | "print (\"データ\")\n", 342 | "print (sample)\n", 343 | "\n", 344 | "bow_vector = make_bow_vector(sample[0], word_to_ix)\n", 345 | "print (\"入力用ベクトル\")\n", 346 | "print (bow_vector)\n", 347 | "\n", 348 | "log_probs = model(Variable(bow_vector))\n", 349 | "print (\"スペイン語, 英語の対数確率\")\n", 350 | "print(log_probs)" 351 | ] 352 | }, 353 | { 354 | "cell_type": "code", 355 | "execution_count": 64, 356 | "metadata": { 357 | "collapsed": false 358 | }, 359 | "outputs": [ 360 | { 361 | "name": "stdout", 362 | "output_type": "stream", 363 | "text": [ 364 | "Variable containing:\n", 365 | "-1.1405 -0.3852\n", 366 | "[torch.FloatTensor of size 1x2]\n", 367 | "\n", 368 | "Variable containing:\n", 369 | "-0.8490 -0.5583\n", 370 | "[torch.FloatTensor of size 1x2]\n", 371 | "\n", 372 | "What is the parameter of the layer of spanish word 'creo'\n", 373 | "Variable containing:\n", 374 | "1.00000e-02 *\n", 375 | " 1.4080\n", 376 | " 9.8377\n", 377 | "[torch.FloatTensor of size 2]\n", 378 | "\n", 379 | "SPANISH\n", 380 | "probability: Variable containing:\n", 381 | "-0.2095 -1.6658\n", 382 | "[torch.FloatTensor of size 1x2]\n", 383 | "\n", 384 | "ENGLISH\n", 385 | "probability: Variable containing:\n", 386 | "-2.6878 -0.0705\n", 387 | "[torch.FloatTensor of size 1x2]\n", 388 | "\n", 389 | "Check the spanish word 'creo' goes up\n", 390 | "Variable containing:\n", 391 | " 0.4846\n", 392 | "-0.3721\n", 393 | "[torch.FloatTensor of size 2]\n", 394 | "\n" 395 | ] 396 | } 397 | ], 398 | "source": [ 399 | "label_to_ix = {\"SPANISH\": 0, \"ENGLISH\": 1}\n", 400 | "\n", 401 | "# forwardのみ\n", 402 | "for instance, label in test_data:\n", 403 | " bow_vec = Variable(make_bow_vector(instance, word_to_ix))\n", 404 | " log_probs = model(bow_vec)\n", 405 | " print(log_probs)\n", 406 | "\n", 407 | "\n", 408 | "# Print the matrix column corresponding to \"creo\"\n", 409 | "print(\"What is the parameter of the layer of spanish word 'creo'\")\n", 410 | "print(next(model.parameters())[:, word_to_ix[\"creo\"]])\n", 411 | "\n", 412 | "#Loss関数の計算\n", 413 | "loss_function = nn.NLLLoss()\n", 414 | "\n", 415 | "#optimizerの設定\n", 416 | "optimizer = optim.SGD(model.parameters(), lr=0.1)\n", 417 | "\n", 418 | "#普通は5~30エポック程度でOK\n", 419 | "#今度はback propagation有り\n", 420 | "for epoch in range(100):\n", 421 | " for instance, label in data:\n", 422 | " \n", 423 | " # 前の勾配情報をリセットする\n", 424 | " model.zero_grad()\n", 425 | "\n", 426 | " #Pytorchのモデルに入れられるように変換\n", 427 | " bow_vec = Variable(make_bow_vector(instance, word_to_ix))\n", 428 | " target = Variable(make_target(label, label_to_ix))\n", 429 | "\n", 430 | " # forward step\n", 431 | " log_probs = model(bow_vec)\n", 432 | "\n", 433 | " #Lossの計算とbackpropagation\n", 434 | " #最適化\n", 435 | " loss = loss_function(log_probs, target)\n", 436 | " loss.backward()\n", 437 | " optimizer.step()\n", 438 | "\n", 439 | "for instance, label in test_data:\n", 440 | " bow_vec = Variable(make_bow_vector(instance, word_to_ix))\n", 441 | " log_probs = model(bow_vec)\n", 442 | " print (label)\n", 443 | " print((\"probability: \") + str(log_probs))\n", 444 | "\n", 445 | "# Index corresponding to Spanish goes up, English goes down!\n", 446 | "print(\"Check the spanish word 'creo' goes up\")\n", 447 | "print(next(model.parameters())[:, word_to_ix[\"creo\"]])" 448 | ] 449 | }, 450 | { 451 | "cell_type": "markdown", 452 | "metadata": { 453 | "collapsed": true 454 | }, 455 | "source": [ 456 | "たしかに文章1はスペイン語の確率大・文章2は英語の確率大になるよう学習されている
\n", 457 | "\"creo\"というワードもスペイン語の可能性が高いようにレイヤー学習が進んでいる" 458 | ] 459 | }, 460 | { 461 | "cell_type": "markdown", 462 | "metadata": {}, 463 | "source": [ 464 | "## Word Embeddings" 465 | ] 466 | }, 467 | { 468 | "cell_type": "markdown", 469 | "metadata": {}, 470 | "source": [ 471 | "Bag of Words形式は単語にそれぞれidを振るが、\n", 472 | "\n", 476 | "といった問題点がある" 477 | ] 478 | }, 479 | { 480 | "cell_type": "markdown", 481 | "metadata": {}, 482 | "source": [ 483 | "### Word Embeddings in Pytorch" 484 | ] 485 | }, 486 | { 487 | "cell_type": "code", 488 | "execution_count": 65, 489 | "metadata": { 490 | "collapsed": false 491 | }, 492 | "outputs": [ 493 | { 494 | "name": "stdout", 495 | "output_type": "stream", 496 | "text": [ 497 | "\n", 498 | " 0\n", 499 | "[torch.LongTensor of size 1]\n", 500 | "\n", 501 | "Variable containing:\n", 502 | "-0.2694 0.1495 -0.0336 -0.6076 -1.0048\n", 503 | "[torch.FloatTensor of size 1x5]\n", 504 | "\n" 505 | ] 506 | } 507 | ], 508 | "source": [ 509 | "#各単語をインデックス化\n", 510 | "word_to_ix = {\"hello\": 0, \"world\": 1}\n", 511 | "\n", 512 | "#合計二つの単語を5次元ベクトルに変換\n", 513 | "embeds = nn.Embedding(2, 5) # 2 words in vocab, 5 dimensional embeddings\n", 514 | "\n", 515 | "#Tensorに型変換\n", 516 | "lookup_tensor = torch.LongTensor([word_to_ix[\"hello\"]])\n", 517 | "print(lookup_tensor)\n", 518 | "\n", 519 | "#\"hello\"をベクトル化(ここでは最適化されていない)\n", 520 | "hello_embed = embeds(Variable(lookup_tensor))\n", 521 | "print(hello_embed)" 522 | ] 523 | }, 524 | { 525 | "cell_type": "markdown", 526 | "metadata": {}, 527 | "source": [ 528 | "### N-Gram Language Modeling" 529 | ] 530 | }, 531 | { 532 | "cell_type": "markdown", 533 | "metadata": {}, 534 | "source": [ 535 | "N-Gramを実装する。N-gramは単語$wi$前のN単語から次の単語を推測する
\n", 536 | "$P(w_i|w_{i−1},w_{i−2},…,w_{i−n+1})$" 537 | ] 538 | }, 539 | { 540 | "cell_type": "code", 541 | "execution_count": 66, 542 | "metadata": { 543 | "collapsed": true 544 | }, 545 | "outputs": [], 546 | "source": [ 547 | "#前の前後関係:前2単語のみ見る\n", 548 | "CONTEXT_SIZE = 2\n", 549 | "\n", 550 | "#埋め込んだ次元\n", 551 | "EMBEDDING_DIM = 10\n", 552 | "\n", 553 | "# 文章サンプル\n", 554 | "# We will use Shakespeare Sonnet 2\n", 555 | "test_sentence = \"\"\"When forty winters shall besiege thy brow,\n", 556 | "And dig deep trenches in thy beauty's field,\n", 557 | "Thy youth's proud livery so gazed on now,\n", 558 | "Will be a totter'd weed of small worth held:\n", 559 | "Then being asked, where all thy beauty lies,\n", 560 | "Where all the treasure of thy lusty days;\n", 561 | "To say, within thine own deep sunken eyes,\n", 562 | "Were an all-eating shame, and thriftless praise.\n", 563 | "How much more praise deserv'd thy beauty's use,\n", 564 | "If thou couldst answer 'This fair child of mine\n", 565 | "Shall sum my count, and make my old excuse,'\n", 566 | "Proving his beauty by succession thine!\n", 567 | "This were to be new made when thou art old,\n", 568 | "And see thy blood warm when thou feel'st it cold.\"\"\".split()" 569 | ] 570 | }, 571 | { 572 | "cell_type": "code", 573 | "execution_count": 99, 574 | "metadata": { 575 | "collapsed": false 576 | }, 577 | "outputs": [ 578 | { 579 | "name": "stdout", 580 | "output_type": "stream", 581 | "text": [ 582 | "[(['When', 'forty'], 'winters'), (['forty', 'winters'], 'shall'), (['winters', 'shall'], 'besiege')]\n" 583 | ] 584 | } 585 | ], 586 | "source": [ 587 | "# tri-gramの例\n", 588 | "# we should tokenize the input, but we will ignore that for now\n", 589 | "# build a list of tuples. Each tuple is ([ word_i-2, word_i-1 ], target word)\n", 590 | "trigrams = [([test_sentence[i], test_sentence[i + 1]], test_sentence[i + 2])\n", 591 | " for i in range(len(test_sentence) - 2)]\n", 592 | "\n", 593 | "#中身を確認\n", 594 | "#when fortyときたら次はwintersを予測する\n", 595 | "# print the first 3, just so you can see what they look like\n", 596 | "print(trigrams[:3])\n", 597 | "\n", 598 | "#登場する単語にインデックスを割り振る\n", 599 | "vocab = set(test_sentence)\n", 600 | "word_to_ix = {word: i for i, word in enumerate(vocab)}" 601 | ] 602 | }, 603 | { 604 | "cell_type": "code", 605 | "execution_count": 100, 606 | "metadata": { 607 | "collapsed": false 608 | }, 609 | "outputs": [ 610 | { 611 | "name": "stdout", 612 | "output_type": "stream", 613 | "text": [ 614 | "['When', 'forty']\n", 615 | "[41, 25]\n", 616 | "vocabrary size: 97\n", 617 | "vocabrary size: 10\n", 618 | "Variable containing:\n", 619 | "-0.8429 -0.2405 -0.5897 1.4151 0.3399 0.0112 1.1586 -0.2975 -1.7608 1.7539\n", 620 | "-0.1815 -0.6644 0.4104 0.8742 -0.1276 0.6828 -1.9091 1.4970 -1.2883 -0.0708\n", 621 | "[torch.FloatTensor of size 2x10]\n", 622 | "\n" 623 | ] 624 | } 625 | ], 626 | "source": [ 627 | "context, target = trigrams[0]\n", 628 | "context_idxs = [word_to_ix[w] for w in context]\n", 629 | "\n", 630 | "# 変換元の単語\n", 631 | "print context\n", 632 | "\n", 633 | "# 単語のid\n", 634 | "print context_idxs\n", 635 | "\n", 636 | "# Tensor変換\n", 637 | "context_var = Variable(torch.LongTensor(context_idxs))\n", 638 | "\n", 639 | "# 単語id -> vector変換\n", 640 | "# 2単語 × vector変換 : (embedding層 × 2) のサイズ\n", 641 | "embeddings = nn.Embedding(len(vocab), EMBEDDING_DIM)\n", 642 | "\n", 643 | "print \"vocabrary size: \" + str(len(vocab))\n", 644 | "print \"vocabrary size: \" + str(EMBEDDING_DIM)\n", 645 | "print embeddings(context_var)" 646 | ] 647 | }, 648 | { 649 | "cell_type": "code", 650 | "execution_count": 101, 651 | "metadata": { 652 | "collapsed": true 653 | }, 654 | "outputs": [], 655 | "source": [ 656 | "#N-Gramモデルの生成\n", 657 | "class NGramLanguageModeler(nn.Module):\n", 658 | "\n", 659 | " def __init__(self, vocab_size, embedding_dim, context_size):\n", 660 | " super(NGramLanguageModeler, self).__init__()\n", 661 | " \n", 662 | " # 合計の単語の種類を潜在次元に落とし込む\n", 663 | " self.embeddings = nn.Embedding(vocab_size, embedding_dim)\n", 664 | " \n", 665 | " self.linear1 = nn.Linear(context_size * embedding_dim, 128)\n", 666 | " self.linear2 = nn.Linear(128, vocab_size)\n", 667 | "\n", 668 | " def forward(self, inputs):\n", 669 | " embeds = self.embeddings(inputs).view((1, -1))\n", 670 | " out = F.relu(self.linear1(embeds))\n", 671 | " out = self.linear2(out)\n", 672 | " log_probs = F.log_softmax(out)\n", 673 | " return log_probs" 674 | ] 675 | }, 676 | { 677 | "cell_type": "code", 678 | "execution_count": 102, 679 | "metadata": { 680 | "collapsed": true 681 | }, 682 | "outputs": [], 683 | "source": [ 684 | "#ロスの確認\n", 685 | "losses = []\n", 686 | "\n", 687 | "#ロス関数の定義\n", 688 | "#The negative log likelihood loss\n", 689 | "loss_function = nn.NLLLoss()\n", 690 | "\n", 691 | "#モデル定義\n", 692 | "model = NGramLanguageModeler(len(vocab), EMBEDDING_DIM, CONTEXT_SIZE)\n", 693 | "\n", 694 | "#optimizerの定義\n", 695 | "optimizer = optim.SGD(model.parameters(), lr=0.001)" 696 | ] 697 | }, 698 | { 699 | "cell_type": "code", 700 | "execution_count": 103, 701 | "metadata": { 702 | "collapsed": false 703 | }, 704 | "outputs": [ 705 | { 706 | "name": "stdout", 707 | "output_type": "stream", 708 | "text": [ 709 | " 128)\n", 712 | " (linear2): Linear (128 -> 97)\n", 713 | ")>\n" 714 | ] 715 | } 716 | ], 717 | "source": [ 718 | "print model.parameters" 719 | ] 720 | }, 721 | { 722 | "cell_type": "code", 723 | "execution_count": 104, 724 | "metadata": { 725 | "collapsed": false 726 | }, 727 | "outputs": [ 728 | { 729 | "name": "stdout", 730 | "output_type": "stream", 731 | "text": [ 732 | "[\n", 733 | " 519.7821\n", 734 | "[torch.FloatTensor of size 1]\n", 735 | ", \n", 736 | " 517.4017\n", 737 | "[torch.FloatTensor of size 1]\n", 738 | ", \n", 739 | " 515.0360\n", 740 | "[torch.FloatTensor of size 1]\n", 741 | ", \n", 742 | " 512.6841\n", 743 | "[torch.FloatTensor of size 1]\n", 744 | ", \n", 745 | " 510.3459\n", 746 | "[torch.FloatTensor of size 1]\n", 747 | ", \n", 748 | " 508.0204\n", 749 | "[torch.FloatTensor of size 1]\n", 750 | ", \n", 751 | " 505.7052\n", 752 | "[torch.FloatTensor of size 1]\n", 753 | ", \n", 754 | " 503.4016\n", 755 | "[torch.FloatTensor of size 1]\n", 756 | ", \n", 757 | " 501.1090\n", 758 | "[torch.FloatTensor of size 1]\n", 759 | ", \n", 760 | " 498.8246\n", 761 | "[torch.FloatTensor of size 1]\n", 762 | "]\n" 763 | ] 764 | } 765 | ], 766 | "source": [ 767 | "#トレーニング\n", 768 | "for epoch in range(10):\n", 769 | " total_loss = torch.Tensor([0])\n", 770 | " \n", 771 | " # 各単語の予測\n", 772 | " # 前のN単語を見て次の単語を予測する\n", 773 | " for context, target in trigrams:\n", 774 | " \n", 775 | " # 文章を読み込める形式に変える\n", 776 | " # Step 1. Prepare the inputs to be passed to the model (i.e, turn the words\n", 777 | " # into integer indices and wrap them in variables)\n", 778 | " context_idxs = [word_to_ix[w] for w in context]\n", 779 | " context_var = Variable(torch.LongTensor(context_idxs))\n", 780 | "\n", 781 | " # 勾配初期化\n", 782 | " # Step 2. Recall that torch *accumulates* gradients. Before passing in a\n", 783 | " # new instance, you need to zero out the gradients from the old\n", 784 | " # instance\n", 785 | " model.zero_grad()\n", 786 | "\n", 787 | " # モデル\n", 788 | " # Step 3. Run the forward pass, getting log probabilities over next\n", 789 | " # words\n", 790 | " log_probs = model(context_var)\n", 791 | "\n", 792 | " #ロス関数の計算\n", 793 | " # Step 4. Compute your loss function. (Again, Torch wants the target\n", 794 | " # word wrapped in a variable)\n", 795 | " loss = loss_function(log_probs, Variable(\n", 796 | " torch.LongTensor([word_to_ix[target]])))\n", 797 | "\n", 798 | " #誤差逆伝播法\n", 799 | " # Step 5. Do the backward pass and update the gradient\n", 800 | " loss.backward()\n", 801 | " optimizer.step()\n", 802 | "\n", 803 | " total_loss += loss.data\n", 804 | " losses.append(total_loss)\n", 805 | "print(losses) # The loss decreased every iteration over the training data!" 806 | ] 807 | }, 808 | { 809 | "cell_type": "markdown", 810 | "metadata": {}, 811 | "source": [ 812 | "## Computing Word Embeddings: Continuous Bag-of-Words" 813 | ] 814 | }, 815 | { 816 | "cell_type": "markdown", 817 | "metadata": {}, 818 | "source": [ 819 | "CBoW: 前後の数ワードから中の単語を予測する
\n", 820 | "embeddingsの初期化に用いられる" 821 | ] 822 | }, 823 | { 824 | "cell_type": "markdown", 825 | "metadata": {}, 826 | "source": [ 827 | "単語$w_i$に対して、前後N個のキーワード$w_i−1,…,w_{i−N}$、$w_{i+1},…,w_{i+N}$が与えられた時に、
\n", 828 | "$q_w$をembeddingしたwordとすれば、以下の確率を最小化するものを考える" 829 | ] 830 | }, 831 | { 832 | "cell_type": "markdown", 833 | "metadata": {}, 834 | "source": [ 835 | "$−log⁡p(w_i|C)=−log⁡Softmax(A(\\sum_{w∈C}q_w)+b)$" 836 | ] 837 | }, 838 | { 839 | "cell_type": "markdown", 840 | "metadata": {}, 841 | "source": [ 842 | "Implement this model in Pytorch by filling in the class below. Some tips:" 843 | ] 844 | }, 845 | { 846 | "cell_type": "markdown", 847 | "metadata": {}, 848 | "source": [ 849 | "Think about which parameters you need to define.
\n", 850 | "Make sure you know what shape each operation expects. Use .view() if you need to reshape." 851 | ] 852 | }, 853 | { 854 | "cell_type": "code", 855 | "execution_count": 114, 856 | "metadata": { 857 | "collapsed": false 858 | }, 859 | "outputs": [ 860 | { 861 | "name": "stdout", 862 | "output_type": "stream", 863 | "text": [ 864 | "[(['We', 'are', 'to', 'study'], 'about'), (['are', 'about', 'study', 'the'], 'to'), (['about', 'to', 'the', 'idea'], 'study'), (['to', 'study', 'idea', 'of'], 'the'), (['study', 'the', 'of', 'a'], 'idea')]\n" 865 | ] 866 | } 867 | ], 868 | "source": [ 869 | "# 前後2単語ずつを推測に用いる\n", 870 | "CONTEXT_SIZE = 2 # 2 words to the left, 2 to the right\n", 871 | "\n", 872 | "#埋め込んだ次元\n", 873 | "EMBEDDING_DIM = 10\n", 874 | "\n", 875 | "# 原文\n", 876 | "raw_text = \"\"\"We are about to study the idea of a computational process.\n", 877 | "Computational processes are abstract beings that inhabit computers.\n", 878 | "As they evolve, processes manipulate other abstract things called data.\n", 879 | "The evolution of a process is directed by a pattern of rules\n", 880 | "called a program. People create programs to direct processes. In effect,\n", 881 | "we conjure the spirits of the computer with our spells.\"\"\".split()\n", 882 | "\n", 883 | "# インデックス化\n", 884 | "word_to_ix = {word: i for i, word in enumerate(raw_text)}\n", 885 | "\n", 886 | "# 前後2単語ずつと予測単語の組み合わせを作成する\n", 887 | "data = []\n", 888 | "for i in range(2, len(raw_text) - 2):\n", 889 | " context = [raw_text[i - 2], raw_text[i - 1],\n", 890 | " raw_text[i + 1], raw_text[i + 2]]\n", 891 | " target = raw_text[i]\n", 892 | " data.append((context, target))\n", 893 | "print(data[:5])" 894 | ] 895 | }, 896 | { 897 | "cell_type": "code", 898 | "execution_count": 141, 899 | "metadata": { 900 | "collapsed": false 901 | }, 902 | "outputs": [ 903 | { 904 | "data": { 905 | "text/plain": [ 906 | "Variable containing:\n", 907 | " 0\n", 908 | " 13\n", 909 | " 47\n", 910 | " 4\n", 911 | "[torch.LongTensor of size 4]" 912 | ] 913 | }, 914 | "execution_count": 141, 915 | "metadata": {}, 916 | "output_type": "execute_result" 917 | } 918 | ], 919 | "source": [ 920 | "#CBOWのクラス定義\n", 921 | "class CBOW(nn.Module):\n", 922 | "\n", 923 | " def __init__(self, vocab_size, embedding_dim, context_size):\n", 924 | " super(CBOW, self).__init__()\n", 925 | " self.embeddings = nn.Embedding(vocab_size, embedding_dim)\n", 926 | " self.linear1 = nn.Linear(2 * context_size * embedding_dim, 128)\n", 927 | " self.linear2 = nn.Linear(128, vocab_size)\n", 928 | "\n", 929 | " def forward(self, inputs):\n", 930 | " embeds = self.embeddings(inputs).view((1, -1))\n", 931 | " out = F.relu(self.linear1(embeds))\n", 932 | " out = self.linear2(out)\n", 933 | " log_probs = F.log_softmax(out)\n", 934 | " return log_probs\n", 935 | "\n", 936 | "# create your model and train. here are some functions to help you make\n", 937 | "# the data ready for use by your module\n", 938 | "\n", 939 | "\n", 940 | "def make_context_vector(context, word_to_ix):\n", 941 | " idxs = [word_to_ix[w] for w in context]\n", 942 | " tensor = torch.LongTensor(idxs)\n", 943 | " return Variable(tensor)\n", 944 | "\n", 945 | "\n", 946 | "make_context_vector(data[0][0], word_to_ix) # example" 947 | ] 948 | }, 949 | { 950 | "cell_type": "code", 951 | "execution_count": 148, 952 | "metadata": { 953 | "collapsed": false 954 | }, 955 | "outputs": [], 956 | "source": [ 957 | "#ロスの確認\n", 958 | "losses = []\n", 959 | "\n", 960 | "#ロス関数の定義\n", 961 | "#The negative log likelihood loss\n", 962 | "loss_function = nn.NLLLoss()\n", 963 | "\n", 964 | "#モデル定義\n", 965 | "model = CBOW(len(vocab), EMBEDDING_DIM, CONTEXT_SIZE)\n", 966 | "\n", 967 | "#optimizerの定義\n", 968 | "optimizer = optim.SGD(model.parameters(), lr=0.001)" 969 | ] 970 | }, 971 | { 972 | "cell_type": "code", 973 | "execution_count": 149, 974 | "metadata": { 975 | "collapsed": false 976 | }, 977 | "outputs": [ 978 | { 979 | "data": { 980 | "text/plain": [ 981 | " 128)\n", 984 | " (linear2): Linear (128 -> 97)\n", 985 | ")>" 986 | ] 987 | }, 988 | "execution_count": 149, 989 | "metadata": {}, 990 | "output_type": "execute_result" 991 | } 992 | ], 993 | "source": [ 994 | "model.parameters" 995 | ] 996 | }, 997 | { 998 | "cell_type": "code", 999 | "execution_count": 150, 1000 | "metadata": { 1001 | "collapsed": false 1002 | }, 1003 | "outputs": [ 1004 | { 1005 | "name": "stdout", 1006 | "output_type": "stream", 1007 | "text": [ 1008 | "[\n", 1009 | " 266.7071\n", 1010 | "[torch.FloatTensor of size 1]\n", 1011 | ", \n", 1012 | " 264.9733\n", 1013 | "[torch.FloatTensor of size 1]\n", 1014 | ", \n", 1015 | " 263.2492\n", 1016 | "[torch.FloatTensor of size 1]\n", 1017 | ", \n", 1018 | " 261.5334\n", 1019 | "[torch.FloatTensor of size 1]\n", 1020 | ", \n", 1021 | " 259.8259\n", 1022 | "[torch.FloatTensor of size 1]\n", 1023 | ", \n", 1024 | " 258.1265\n", 1025 | "[torch.FloatTensor of size 1]\n", 1026 | ", \n", 1027 | " 256.4332\n", 1028 | "[torch.FloatTensor of size 1]\n", 1029 | ", \n", 1030 | " 254.7435\n", 1031 | "[torch.FloatTensor of size 1]\n", 1032 | ", \n", 1033 | " 253.0574\n", 1034 | "[torch.FloatTensor of size 1]\n", 1035 | ", \n", 1036 | " 251.3734\n", 1037 | "[torch.FloatTensor of size 1]\n", 1038 | "]\n" 1039 | ] 1040 | } 1041 | ], 1042 | "source": [ 1043 | "#トレーニング\n", 1044 | "for epoch in range(10):\n", 1045 | " total_loss = torch.Tensor([0])\n", 1046 | " \n", 1047 | " # 各単語の予測\n", 1048 | " # 前のN単語を見て次の単語を予測する\n", 1049 | " for context, target in data:\n", 1050 | " \n", 1051 | " # 文章を読み込める形式に変える\n", 1052 | " context_idxs = [word_to_ix[w] for w in context]\n", 1053 | " context_var = Variable(torch.LongTensor(context_idxs))\n", 1054 | "\n", 1055 | " # 勾配初期化\n", 1056 | " model.zero_grad()\n", 1057 | "\n", 1058 | " # モデル\n", 1059 | " log_probs = model(context_var)\n", 1060 | "\n", 1061 | " #ロス関数の計算\n", 1062 | " loss = loss_function(log_probs, Variable(\n", 1063 | " torch.LongTensor([word_to_ix[target]])))\n", 1064 | "\n", 1065 | " #誤差逆伝播法\n", 1066 | " loss.backward()\n", 1067 | " optimizer.step()\n", 1068 | "\n", 1069 | " total_loss += loss.data\n", 1070 | " losses.append(total_loss)\n", 1071 | "print(losses) # The loss decreased every iteration over the training data!" 1072 | ] 1073 | }, 1074 | { 1075 | "cell_type": "markdown", 1076 | "metadata": {}, 1077 | "source": [ 1078 | "## Sequence Models and Long-Short Term Memory Networks" 1079 | ] 1080 | }, 1081 | { 1082 | "cell_type": "markdown", 1083 | "metadata": {}, 1084 | "source": [ 1085 | "### LSTMの挙動をチェック" 1086 | ] 1087 | }, 1088 | { 1089 | "cell_type": "code", 1090 | "execution_count": 152, 1091 | "metadata": { 1092 | "collapsed": false 1093 | }, 1094 | "outputs": [ 1095 | { 1096 | "data": { 1097 | "text/plain": [ 1098 | "[Variable containing:\n", 1099 | " -1.5859 -1.4814 0.4191\n", 1100 | " [torch.FloatTensor of size 1x3], Variable containing:\n", 1101 | " -0.9734 0.4680 1.6193\n", 1102 | " [torch.FloatTensor of size 1x3], Variable containing:\n", 1103 | " -0.8317 1.1417 0.2224\n", 1104 | " [torch.FloatTensor of size 1x3], Variable containing:\n", 1105 | " 1.3163 1.7850 1.3064\n", 1106 | " [torch.FloatTensor of size 1x3], Variable containing:\n", 1107 | " 0.3383 -0.6922 0.9433\n", 1108 | " [torch.FloatTensor of size 1x3]]" 1109 | ] 1110 | }, 1111 | "execution_count": 152, 1112 | "metadata": {}, 1113 | "output_type": "execute_result" 1114 | } 1115 | ], 1116 | "source": [ 1117 | "#LSTM層の定義\n", 1118 | "lstm = nn.LSTM(3, 3) # Input dim is 3, output dim is 3\n", 1119 | "inputs = [Variable(torch.randn((1, 3)))\n", 1120 | " for _ in range(5)] # make a sequence of length 5\n", 1121 | "inputs" 1122 | ] 1123 | }, 1124 | { 1125 | "cell_type": "code", 1126 | "execution_count": 154, 1127 | "metadata": { 1128 | "collapsed": false 1129 | }, 1130 | "outputs": [ 1131 | { 1132 | "data": { 1133 | "text/plain": [ 1134 | "(Variable containing:\n", 1135 | " (0 ,.,.) = \n", 1136 | " 0.7190 0.9216 -1.0710\n", 1137 | " [torch.FloatTensor of size 1x1x3], Variable containing:\n", 1138 | " (0 ,.,.) = \n", 1139 | " -0.2065 1.0174 -0.3371\n", 1140 | " [torch.FloatTensor of size 1x1x3])" 1141 | ] 1142 | }, 1143 | "execution_count": 154, 1144 | "metadata": {}, 1145 | "output_type": "execute_result" 1146 | } 1147 | ], 1148 | "source": [ 1149 | "#隠れ層の初期化\n", 1150 | "# initialize the hidden state.\n", 1151 | "hidden = (Variable(torch.randn(1, 1, 3)),\n", 1152 | " Variable(torch.randn((1, 1, 3))))\n", 1153 | "hidden" 1154 | ] 1155 | }, 1156 | { 1157 | "cell_type": "code", 1158 | "execution_count": 160, 1159 | "metadata": { 1160 | "collapsed": false 1161 | }, 1162 | "outputs": [ 1163 | { 1164 | "name": "stdout", 1165 | "output_type": "stream", 1166 | "text": [ 1167 | "Variable containing:\n", 1168 | "-1.5859 -1.4814 0.4191\n", 1169 | "[torch.FloatTensor of size 1x3]\n", 1170 | "\n", 1171 | "Variable containing:\n", 1172 | "(0 ,.,.) = \n", 1173 | " -1.5859 -1.4814 0.4191\n", 1174 | "[torch.FloatTensor of size 1x1x3]\n", 1175 | "\n", 1176 | "\n" 1177 | ] 1178 | } 1179 | ], 1180 | "source": [ 1181 | "#1x3を3次元にするために1x1x3(バッチサイズ1に等しい)に変換\n", 1182 | "print(inputs[0])\n", 1183 | "print(inputs[0].view(1, 1, -1))\n", 1184 | "print " 1185 | ] 1186 | }, 1187 | { 1188 | "cell_type": "code", 1189 | "execution_count": 159, 1190 | "metadata": { 1191 | "collapsed": false 1192 | }, 1193 | "outputs": [ 1194 | { 1195 | "name": "stdout", 1196 | "output_type": "stream", 1197 | "text": [ 1198 | "Variable containing:\n", 1199 | "-1.5859 -1.4814 0.4191\n", 1200 | "[torch.FloatTensor of size 1x3]\n", 1201 | "\n", 1202 | "Variable containing:\n", 1203 | "(0 ,.,.) = \n", 1204 | " -1.5859 -1.4814 0.4191\n", 1205 | "[torch.FloatTensor of size 1x1x3]\n", 1206 | "\n", 1207 | "\n", 1208 | "Variable containing:\n", 1209 | "(0 ,.,.) = \n", 1210 | " 0.1221 0.2631 0.1254\n", 1211 | "[torch.FloatTensor of size 1x1x3]\n", 1212 | "\n", 1213 | "(Variable containing:\n", 1214 | "(0 ,.,.) = \n", 1215 | " 0.1221 0.2631 0.1254\n", 1216 | "[torch.FloatTensor of size 1x1x3]\n", 1217 | ", Variable containing:\n", 1218 | "(0 ,.,.) = \n", 1219 | " 0.1675 0.5136 0.3686\n", 1220 | "[torch.FloatTensor of size 1x1x3]\n", 1221 | ")\n" 1222 | ] 1223 | } 1224 | ], 1225 | "source": [ 1226 | "#inputs内の各値に対してLSTMを通す\n", 1227 | "for i in inputs:\n", 1228 | " # Step through the sequence one element at a time.\n", 1229 | " # after each step, hidden contains the hidden state.\n", 1230 | " out, hidden = lstm(i.view(1, 1, -1), hidden)\n", 1231 | "print(out)\n", 1232 | "print(hidden)" 1233 | ] 1234 | }, 1235 | { 1236 | "cell_type": "code", 1237 | "execution_count": 163, 1238 | "metadata": { 1239 | "collapsed": false 1240 | }, 1241 | "outputs": [ 1242 | { 1243 | "name": "stdout", 1244 | "output_type": "stream", 1245 | "text": [ 1246 | "[Variable containing:\n", 1247 | "-1.5859 -1.4814 0.4191\n", 1248 | "[torch.FloatTensor of size 1x3]\n", 1249 | ", Variable containing:\n", 1250 | "-0.9734 0.4680 1.6193\n", 1251 | "[torch.FloatTensor of size 1x3]\n", 1252 | ", Variable containing:\n", 1253 | "-0.8317 1.1417 0.2224\n", 1254 | "[torch.FloatTensor of size 1x3]\n", 1255 | ", Variable containing:\n", 1256 | " 1.3163 1.7850 1.3064\n", 1257 | "[torch.FloatTensor of size 1x3]\n", 1258 | ", Variable containing:\n", 1259 | " 0.3383 -0.6922 0.9433\n", 1260 | "[torch.FloatTensor of size 1x3]\n", 1261 | "]\n", 1262 | "Variable containing:\n", 1263 | "-1.5859 -1.4814 0.4191\n", 1264 | "-0.9734 0.4680 1.6193\n", 1265 | "-0.8317 1.1417 0.2224\n", 1266 | " 1.3163 1.7850 1.3064\n", 1267 | " 0.3383 -0.6922 0.9433\n", 1268 | "[torch.FloatTensor of size 5x3]\n", 1269 | "\n", 1270 | "Variable containing:\n", 1271 | "(0 ,.,.) = \n", 1272 | " -1.5859 -1.4814 0.4191\n", 1273 | "\n", 1274 | "(1 ,.,.) = \n", 1275 | " -0.9734 0.4680 1.6193\n", 1276 | "\n", 1277 | "(2 ,.,.) = \n", 1278 | " -0.8317 1.1417 0.2224\n", 1279 | "\n", 1280 | "(3 ,.,.) = \n", 1281 | " 1.3163 1.7850 1.3064\n", 1282 | "\n", 1283 | "(4 ,.,.) = \n", 1284 | " 0.3383 -0.6922 0.9433\n", 1285 | "[torch.FloatTensor of size 5x1x3]\n", 1286 | "\n" 1287 | ] 1288 | } 1289 | ], 1290 | "source": [ 1291 | "#for文を使わないための変換\n", 1292 | "#元のinputs\n", 1293 | "print(inputs)\n", 1294 | "\n", 1295 | "#1x3が5個あるやつをくっつける\n", 1296 | "print(torch.cat(inputs))\n", 1297 | "\n", 1298 | "#5x1x3に変換\n", 1299 | "print(torch.cat(inputs).view(len(inputs), 1, -1))" 1300 | ] 1301 | }, 1302 | { 1303 | "cell_type": "code", 1304 | "execution_count": 41, 1305 | "metadata": { 1306 | "collapsed": false 1307 | }, 1308 | "outputs": [ 1309 | { 1310 | "name": "stdout", 1311 | "output_type": "stream", 1312 | "text": [ 1313 | "Variable containing:\n", 1314 | "(0 ,.,.) = \n", 1315 | " -0.5690 0.2984 -0.2307\n", 1316 | "\n", 1317 | "(1 ,.,.) = \n", 1318 | " -0.4488 0.2394 0.0703\n", 1319 | "\n", 1320 | "(2 ,.,.) = \n", 1321 | " -0.3698 0.0887 0.3511\n", 1322 | "\n", 1323 | "(3 ,.,.) = \n", 1324 | " -0.4498 0.3913 0.2789\n", 1325 | "\n", 1326 | "(4 ,.,.) = \n", 1327 | " -0.4714 0.1113 0.4174\n", 1328 | "[torch.FloatTensor of size 5x1x3]\n", 1329 | "\n", 1330 | "(Variable containing:\n", 1331 | "(0 ,.,.) = \n", 1332 | " -0.4714 0.1113 0.4174\n", 1333 | "[torch.FloatTensor of size 1x1x3]\n", 1334 | ", Variable containing:\n", 1335 | "(0 ,.,.) = \n", 1336 | " -0.6453 0.1624 0.5910\n", 1337 | "[torch.FloatTensor of size 1x1x3]\n", 1338 | ")\n" 1339 | ] 1340 | } 1341 | ], 1342 | "source": [ 1343 | "#for文なしでまとめて実行\n", 1344 | "# alternatively, we can do the entire sequence all at once.\n", 1345 | "# the first value returned by LSTM is all of the hidden states throughout\n", 1346 | "# the sequence. the second is just the most recent hidden state\n", 1347 | "# (compare the last slice of \"out\" with \"hidden\" below, they are the same)\n", 1348 | "# The reason for this is that:\n", 1349 | "# \"out\" will give you access to all hidden states in the sequence\n", 1350 | "# \"hidden\" will allow you to continue the sequence and backpropogate,\n", 1351 | "# by passing it as an argument to the lstm at a later time\n", 1352 | "# Add the extra 2nd dimension\n", 1353 | "inputs = torch.cat(inputs).view(len(inputs), 1, -1)\n", 1354 | "hidden = (Variable(torch.randn(1, 1, 3)), Variable(\n", 1355 | " torch.randn((1, 1, 3)))) # clean out hidden state\n", 1356 | "out, hidden = lstm(inputs, hidden)\n", 1357 | "print(out)\n", 1358 | "print(hidden)" 1359 | ] 1360 | }, 1361 | { 1362 | "cell_type": "markdown", 1363 | "metadata": {}, 1364 | "source": [ 1365 | "### Example: An LSTM for Part-of-Speech Tagging" 1366 | ] 1367 | }, 1368 | { 1369 | "cell_type": "code", 1370 | "execution_count": 166, 1371 | "metadata": { 1372 | "collapsed": false 1373 | }, 1374 | "outputs": [ 1375 | { 1376 | "name": "stdout", 1377 | "output_type": "stream", 1378 | "text": [ 1379 | "{'Everybody': 5, 'ate': 2, 'apple': 4, 'that': 7, 'read': 6, 'dog': 1, 'book': 8, 'the': 3, 'The': 0}\n" 1380 | ] 1381 | } 1382 | ], 1383 | "source": [ 1384 | "def prepare_sequence(seq, to_ix):\n", 1385 | " idxs = [to_ix[w] for w in seq]\n", 1386 | " tensor = torch.LongTensor(idxs)\n", 1387 | " return Variable(tensor)\n", 1388 | "\n", 1389 | "#トレーニングデータ\n", 1390 | "#単語と品詞の対応\n", 1391 | "training_data = [\n", 1392 | " (\"The dog ate the apple\".split(), [\"DET\", \"NN\", \"V\", \"DET\", \"NN\"]),\n", 1393 | " (\"Everybody read that book\".split(), [\"NN\", \"V\", \"DET\", \"NN\"])\n", 1394 | "]\n", 1395 | "\n", 1396 | "#単語のインデックス\n", 1397 | "word_to_ix = {}\n", 1398 | "for sent, tags in training_data:\n", 1399 | " for word in sent:\n", 1400 | " if word not in word_to_ix:\n", 1401 | " word_to_ix[word] = len(word_to_ix)\n", 1402 | "print(word_to_ix)\n", 1403 | "\n", 1404 | "#品詞のインデックス\n", 1405 | "tag_to_ix = {\"DET\": 0, \"NN\": 1, \"V\": 2}\n", 1406 | "\n", 1407 | "#通常は32か64次元\n", 1408 | "# These will usually be more like 32 or 64 dimensional.\n", 1409 | "# We will keep them small, so we can see how the weights change as we train.\n", 1410 | "EMBEDDING_DIM = 6\n", 1411 | "HIDDEN_DIM = 6" 1412 | ] 1413 | }, 1414 | { 1415 | "cell_type": "code", 1416 | "execution_count": 167, 1417 | "metadata": { 1418 | "collapsed": true 1419 | }, 1420 | "outputs": [], 1421 | "source": [ 1422 | "class LSTMTagger(nn.Module):\n", 1423 | "\n", 1424 | " def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size):\n", 1425 | " super(LSTMTagger, self).__init__()\n", 1426 | " \n", 1427 | " # 隠れ層の次元\n", 1428 | " self.hidden_dim = hidden_dim\n", 1429 | " \n", 1430 | " # embedding層\n", 1431 | " self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)\n", 1432 | "\n", 1433 | " # LSTMの定義\n", 1434 | " # The LSTM takes word embeddings as inputs, and outputs hidden states\n", 1435 | " # with dimensionality hidden_dim.\n", 1436 | " self.lstm = nn.LSTM(embedding_dim, hidden_dim)\n", 1437 | "\n", 1438 | " # 潜在空間\n", 1439 | " # The linear layer that maps from hidden state space to tag space\n", 1440 | " self.hidden2tag = nn.Linear(hidden_dim, tagset_size)\n", 1441 | " \n", 1442 | " #LSTMの隠れ層の定義\n", 1443 | " self.hidden = self.init_hidden()\n", 1444 | " \n", 1445 | " #隠れ層の定義\n", 1446 | " def init_hidden(self):\n", 1447 | " # 隠れ層は(num_layers, minibatch_size, hidden_dim)のように定義する必要がある\n", 1448 | " # Before we've done anything, we dont have any hidden state.\n", 1449 | " # Refer to the Pytorch documentation to see exactly\n", 1450 | " # why they have this dimensionality.\n", 1451 | " # The axes semantics are (num_layers, minibatch_size, hidden_dim)\n", 1452 | " return (Variable(torch.zeros(1, 1, self.hidden_dim)),\n", 1453 | " Variable(torch.zeros(1, 1, self.hidden_dim)))\n", 1454 | " \n", 1455 | " #順伝播\n", 1456 | " def forward(self, sentence):\n", 1457 | " \n", 1458 | " #embedding\n", 1459 | " embeds = self.word_embeddings(sentence)\n", 1460 | " \n", 1461 | " #LSTM\n", 1462 | " lstm_out, self.hidden = self.lstm(\n", 1463 | " embeds.view(len(sentence), 1, -1), self.hidden)\n", 1464 | " \n", 1465 | " #Linear\n", 1466 | " tag_space = self.hidden2tag(lstm_out.view(len(sentence), -1))\n", 1467 | " \n", 1468 | " #Log_Softmax\n", 1469 | " tag_scores = F.log_softmax(tag_space)\n", 1470 | " return tag_scores" 1471 | ] 1472 | }, 1473 | { 1474 | "cell_type": "code", 1475 | "execution_count": 168, 1476 | "metadata": { 1477 | "collapsed": false 1478 | }, 1479 | "outputs": [], 1480 | "source": [ 1481 | "#モデル定義諸々\n", 1482 | "model = LSTMTagger(EMBEDDING_DIM, HIDDEN_DIM, len(word_to_ix), len(tag_to_ix))\n", 1483 | "loss_function = nn.NLLLoss()\n", 1484 | "optimizer = optim.SGD(model.parameters(), lr=0.1)" 1485 | ] 1486 | }, 1487 | { 1488 | "cell_type": "code", 1489 | "execution_count": 169, 1490 | "metadata": { 1491 | "collapsed": false 1492 | }, 1493 | "outputs": [ 1494 | { 1495 | "name": "stdout", 1496 | "output_type": "stream", 1497 | "text": [ 1498 | "Variable containing:\n", 1499 | "-0.9500 -1.1105 -1.2593\n", 1500 | "-0.9220 -1.1113 -1.2978\n", 1501 | "-1.0782 -1.0665 -1.1534\n", 1502 | "-0.9738 -1.0909 -1.2502\n", 1503 | "-1.0779 -1.0665 -1.1537\n", 1504 | "[torch.FloatTensor of size 5x3]\n", 1505 | "\n" 1506 | ] 1507 | } 1508 | ], 1509 | "source": [ 1510 | "# LSTMに通してみる\n", 1511 | "# See what the scores are before training\n", 1512 | "# Note that element i,j of the output is the score for tag j for word i.\n", 1513 | "inputs = prepare_sequence(training_data[0][0], word_to_ix)\n", 1514 | "tag_scores = model(inputs)\n", 1515 | "print(tag_scores)" 1516 | ] 1517 | }, 1518 | { 1519 | "cell_type": "code", 1520 | "execution_count": 171, 1521 | "metadata": { 1522 | "collapsed": false 1523 | }, 1524 | "outputs": [], 1525 | "source": [ 1526 | "#トレーニング\n", 1527 | "for epoch in range(300): # again, normally you would NOT do 300 epochs, it is toy data\n", 1528 | " \n", 1529 | " #文章ごとの処理\n", 1530 | " for sentence, tags in training_data:\n", 1531 | " \n", 1532 | " #勾配初期化\n", 1533 | " # Step 1. Remember that Pytorch accumulates gradients.\n", 1534 | " # We need to clear them out before each instance\n", 1535 | " model.zero_grad()\n", 1536 | "\n", 1537 | " #隠れ層(LSTMの記憶部分)の状態を初期化\n", 1538 | " # Also, we need to clear out the hidden state of the LSTM,\n", 1539 | " # detaching it from its history on the last instance.\n", 1540 | " model.hidden = model.init_hidden()\n", 1541 | "\n", 1542 | " #文章変換\n", 1543 | " # Step 2. Get our inputs ready for the network, that is, turn them into\n", 1544 | " # Variables of word indices.\n", 1545 | " sentence_in = prepare_sequence(sentence, word_to_ix)\n", 1546 | " targets = prepare_sequence(tags, tag_to_ix)\n", 1547 | "\n", 1548 | " # Step 3. Run our forward pass.\n", 1549 | " tag_scores = model(sentence_in)\n", 1550 | "\n", 1551 | " # Step 4. Compute the loss, gradients, and update the parameters by\n", 1552 | " # calling optimizer.step()\n", 1553 | " loss = loss_function(tag_scores, targets)\n", 1554 | " loss.backward()\n", 1555 | " optimizer.step()" 1556 | ] 1557 | }, 1558 | { 1559 | "cell_type": "code", 1560 | "execution_count": 203, 1561 | "metadata": { 1562 | "collapsed": false, 1563 | "scrolled": true 1564 | }, 1565 | "outputs": [ 1566 | { 1567 | "name": "stdout", 1568 | "output_type": "stream", 1569 | "text": [ 1570 | "Variable containing:\n", 1571 | "-0.1564 -1.9627 -5.4380\n", 1572 | "-4.4316 -0.0133 -6.6546\n", 1573 | "-5.4240 -4.4229 -0.0165\n", 1574 | "-0.0224 -4.0915 -5.2090\n", 1575 | "-5.2533 -0.0105 -5.2639\n", 1576 | "[torch.FloatTensor of size 5x3]\n", 1577 | "\n" 1578 | ] 1579 | } 1580 | ], 1581 | "source": [ 1582 | "# See what the scores are after training\n", 1583 | "inputs = prepare_sequence(training_data[0][0], word_to_ix)\n", 1584 | "#スコアチェック\n", 1585 | "tag_scores = model(inputs)\n", 1586 | "# The sentence is \"the dog ate the apple\". i,j corresponds to score for tag j\n", 1587 | "# for word i. The predicted tag is the maximum scoring tag.\n", 1588 | "# Here, we can see the predicted sequence below is 0 1 2 0 1\n", 1589 | "# since 0 is index of the maximum value of row 1,\n", 1590 | "# 1 is the index of maximum value of row 2, etc.\n", 1591 | "# Which is DET NOUN VERB DET NOUN, the correct sequence!\n", 1592 | "print(tag_scores)" 1593 | ] 1594 | }, 1595 | { 1596 | "cell_type": "code", 1597 | "execution_count": 204, 1598 | "metadata": { 1599 | "collapsed": false 1600 | }, 1601 | "outputs": [ 1602 | { 1603 | "name": "stdout", 1604 | "output_type": "stream", 1605 | "text": [ 1606 | "raw: ['The', 'dog', 'ate', 'the', 'apple']\n", 1607 | "target: ['DET', 'NN', 'V', 'DET', 'NN']\n", 1608 | "predict: ['DET', 'NN', 'V', 'DET', 'NN']\n" 1609 | ] 1610 | } 1611 | ], 1612 | "source": [ 1613 | "#予測値についての確認\n", 1614 | "print \"raw: \"+ str(training_data[0][0])\n", 1615 | "print \"target: \"+ str(training_data[0][1])\n", 1616 | "print \"predict: \" + str([tag_to_ix.keys()[i] for i in torch.max(tag_scores,1)[1].data.numpy().T[0]])" 1617 | ] 1618 | }, 1619 | { 1620 | "cell_type": "markdown", 1621 | "metadata": { 1622 | "collapsed": true 1623 | }, 1624 | "source": [ 1625 | "## Exercise: Augmenting the LSTM part-of-speech tagger with character-level features" 1626 | ] 1627 | }, 1628 | { 1629 | "cell_type": "markdown", 1630 | "metadata": {}, 1631 | "source": [ 1632 | "LSTMの品詞予測に文字レベルの特徴量を追加する" 1633 | ] 1634 | }, 1635 | { 1636 | "cell_type": "code", 1637 | "execution_count": null, 1638 | "metadata": { 1639 | "collapsed": true 1640 | }, 1641 | "outputs": [], 1642 | "source": [] 1643 | }, 1644 | { 1645 | "cell_type": "markdown", 1646 | "metadata": {}, 1647 | "source": [ 1648 | "In the example above, each word had an embedding, which served as the inputs to our sequence model. Let’s augment the word embeddings with a representation derived from the characters of the word. We expect that this should help significantly, since character-level information like affixes have a large bearing on part-of-speech. For example, words with the affix -ly are almost always tagged as adverbs in English.\n", 1649 | "\n", 1650 | "Do do this, let cwcw be the character-level representation of word ww. Let xwxw be the word embedding as before. Then the input to our sequence model is the concatenation of xwxw and cwcw. So if xwxw has dimension 5, and cwcw dimension 3, then our LSTM should accept an input of dimension 8.\n", 1651 | "\n", 1652 | "To get the character level representation, do an LSTM over the characters of a word, and let cwcw be the final hidden state of this LSTM. Hints:\n", 1653 | "\n", 1654 | "There are going to be two LSTM’s in your new model. The original one that outputs POS tag scores, and the new one that outputs a character-level representation of each word.\n", 1655 | "To do a sequence model over characters, you will have to embed characters. The character embeddings will be the input to the character LSTM." 1656 | ] 1657 | }, 1658 | { 1659 | "cell_type": "code", 1660 | "execution_count": null, 1661 | "metadata": { 1662 | "collapsed": true 1663 | }, 1664 | "outputs": [], 1665 | "source": [] 1666 | }, 1667 | { 1668 | "cell_type": "markdown", 1669 | "metadata": {}, 1670 | "source": [ 1671 | "In the example above, each word had an embedding, which served as the inputs to our sequence model. Let’s augment the word embeddings with a representation derived from the characters of the word. We expect that this should help significantly, since character-level information like affixes have a large bearing on part-of-speech. For example, words with the affix -ly are almost always tagged as adverbs in English." 1672 | ] 1673 | }, 1674 | { 1675 | "cell_type": "code", 1676 | "execution_count": null, 1677 | "metadata": { 1678 | "collapsed": true 1679 | }, 1680 | "outputs": [], 1681 | "source": [] 1682 | }, 1683 | { 1684 | "cell_type": "code", 1685 | "execution_count": null, 1686 | "metadata": { 1687 | "collapsed": true 1688 | }, 1689 | "outputs": [], 1690 | "source": [] 1691 | }, 1692 | { 1693 | "cell_type": "markdown", 1694 | "metadata": {}, 1695 | "source": [ 1696 | "## Advanced: Making Dynamic Decisions and the Bi-LSTM CRF" 1697 | ] 1698 | }, 1699 | { 1700 | "cell_type": "markdown", 1701 | "metadata": {}, 1702 | "source": [ 1703 | "### Dynamic versus Static Deep Learning Toolkits" 1704 | ] 1705 | }, 1706 | { 1707 | "cell_type": "markdown", 1708 | "metadata": {}, 1709 | "source": [ 1710 | "KerasやTheano:Static
\n", 1711 | "Pytorch:Dynamic
\n", 1712 | "なので、毎ループの度にグラフが計算される" 1713 | ] 1714 | }, 1715 | { 1716 | "cell_type": "markdown", 1717 | "metadata": {}, 1718 | "source": [ 1719 | "この違いを考えるために、木構造のグラフを考える" 1720 | ] 1721 | }, 1722 | { 1723 | "cell_type": "markdown", 1724 | "metadata": {}, 1725 | "source": [ 1726 | "
    \n", 1727 | "
  • ボトムアップの木を作成する\n", 1728 | "
  • 根に単語や文のノードをつける\n", 1729 | "
  • NNやEmbeddingを用いて、この木構造を定める\n", 1730 | "
" 1731 | ] 1732 | }, 1733 | { 1734 | "cell_type": "markdown", 1735 | "metadata": {}, 1736 | "source": [ 1737 | "このような場合、KerasのようなStaticなネットワークは難しい
\n", 1738 | "またTensorFlowなどと比較するとより通常のpythonに近い(class定義でモデルを指定するなど)" 1739 | ] 1740 | }, 1741 | { 1742 | "cell_type": "markdown", 1743 | "metadata": {}, 1744 | "source": [ 1745 | "### Bi-LSTM Conditional Random Field Discussion" 1746 | ] 1747 | }, 1748 | { 1749 | "cell_type": "markdown", 1750 | "metadata": {}, 1751 | "source": [ 1752 | "固有名詞の抽出" 1753 | ] 1754 | }, 1755 | { 1756 | "cell_type": "markdown", 1757 | "metadata": {}, 1758 | "source": [ 1759 | "xを入力の単語のシーケンス、yを単語についての品詞のシーケンスだとすると、これらの事象の確率は以下のように表されるm" 1760 | ] 1761 | }, 1762 | { 1763 | "cell_type": "markdown", 1764 | "metadata": {}, 1765 | "source": [ 1766 | "\\begin{align}P(y|x) = \\frac{\\exp{(\\text{Score}(x, y)})}{\\sum_{y'} \\exp{(\\text{Score}(x, y')})}\\end{align}" 1767 | ] 1768 | }, 1769 | { 1770 | "cell_type": "markdown", 1771 | "metadata": {}, 1772 | "source": [ 1773 | "$\\log \\psi_i(x,y)$をLogポテンシャルとすると、上記のスコアは以下のように表される" 1774 | ] 1775 | }, 1776 | { 1777 | "cell_type": "markdown", 1778 | "metadata": {}, 1779 | "source": [ 1780 | "\\begin{align}\\text{Score}(x,y) = \\sum_i \\log \\psi_i(x,y)\\end{align}" 1781 | ] 1782 | }, 1783 | { 1784 | "cell_type": "markdown", 1785 | "metadata": {}, 1786 | "source": [ 1787 | "Bi-LSTM CRFにおいて、二つのポテンシャル(emissionとtransition)を定義する" 1788 | ] 1789 | }, 1790 | { 1791 | "cell_type": "markdown", 1792 | "metadata": {}, 1793 | "source": [ 1794 | "index iの単語のBiLSTMのTimestep i における隠れ状態とする" 1795 | ] 1796 | }, 1797 | { 1798 | "cell_type": "markdown", 1799 | "metadata": {}, 1800 | "source": [ 1801 | "\n", 1802 | "In the Bi-LSTM CRF, we define two kinds of potentials: emission and\n", 1803 | "transition. The emission potential for the word at index $i$ comes\n", 1804 | "from the hidden state of the Bi-LSTM at timestep $i$. The\n", 1805 | "transition scores are stored in a $|T|x|T|$ matrix\n", 1806 | "$\\textbf{P}$, where $T$ is the tag set. In my\n", 1807 | "implementation, $\\textbf{P}_{j,k}$ is the score of transitioning\n", 1808 | "to tag $j$ from tag $k$. So:\n", 1809 | "\n", 1810 | "\\begin{align}\\text{Score}(x,y) = \\sum_i \\log \\psi_\\text{EMIT}(y_i \\rightarrow x_i) + \\log \\psi_\\text{TRANS}(y_{i-1} \\rightarrow y_i)\\end{align}\n", 1811 | "\n", 1812 | "\\begin{align}= \\sum_i h_i[y_i] + \\textbf{P}_{y_i, y_{i-1}}\\end{align}\n" 1813 | ] 1814 | }, 1815 | { 1816 | "cell_type": "markdown", 1817 | "metadata": {}, 1818 | "source": [ 1819 | "### Implementation Notes" 1820 | ] 1821 | }, 1822 | { 1823 | "cell_type": "code", 1824 | "execution_count": 208, 1825 | "metadata": { 1826 | "collapsed": true 1827 | }, 1828 | "outputs": [], 1829 | "source": [ 1830 | "#ヘルパー関数\n", 1831 | "def to_scalar(var):\n", 1832 | " # returns a python float\n", 1833 | " return var.view(-1).data.tolist()[0]\n", 1834 | "\n", 1835 | "\n", 1836 | "def argmax(vec):\n", 1837 | " # return the argmax as a python int\n", 1838 | " _, idx = torch.max(vec, 1)\n", 1839 | " return to_scalar(idx)\n", 1840 | "\n", 1841 | "\n", 1842 | "def prepare_sequence(seq, to_ix):\n", 1843 | " idxs = [to_ix[w] for w in seq]\n", 1844 | " tensor = torch.LongTensor(idxs)\n", 1845 | " return Variable(tensor)\n", 1846 | "\n", 1847 | "\n", 1848 | "# Compute log sum exp in a numerically stable way for the forward algorithm\n", 1849 | "def log_sum_exp(vec):\n", 1850 | " max_score = vec[0, argmax(vec)]\n", 1851 | " max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])\n", 1852 | " return max_score + \\\n", 1853 | " torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))" 1854 | ] 1855 | }, 1856 | { 1857 | "cell_type": "code", 1858 | "execution_count": 209, 1859 | "metadata": { 1860 | "collapsed": true 1861 | }, 1862 | "outputs": [], 1863 | "source": [ 1864 | "#モデル定義\n", 1865 | "class BiLSTM_CRF(nn.Module):\n", 1866 | "\n", 1867 | " def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim):\n", 1868 | " super(BiLSTM_CRF, self).__init__()\n", 1869 | " self.embedding_dim = embedding_dim\n", 1870 | " self.hidden_dim = hidden_dim\n", 1871 | " self.vocab_size = vocab_size\n", 1872 | " self.tag_to_ix = tag_to_ix\n", 1873 | " self.tagset_size = len(tag_to_ix)\n", 1874 | "\n", 1875 | " self.word_embeds = nn.Embedding(vocab_size, embedding_dim)\n", 1876 | " self.lstm = nn.LSTM(embedding_dim, hidden_dim // 2,\n", 1877 | " num_layers=1, bidirectional=True)\n", 1878 | " \n", 1879 | " # LSTMをtagに変換するレイヤー\n", 1880 | " # Maps the output of the LSTM into tag space.\n", 1881 | " self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)\n", 1882 | "\n", 1883 | " \n", 1884 | " # Matrix of transition parameters. Entry i,j is the score of\n", 1885 | " # transitioning *to* i *from* j.\n", 1886 | " self.transitions = nn.Parameter(\n", 1887 | " torch.randn(self.tagset_size, self.tagset_size))\n", 1888 | "\n", 1889 | " self.hidden = self.init_hidden()\n", 1890 | " \n", 1891 | " #隠れ層の初期化\n", 1892 | " def init_hidden(self):\n", 1893 | " return (Variable(torch.randn(2, 1, self.hidden_dim)),\n", 1894 | " Variable(torch.randn(2, 1, self.hidden_dim)))\n", 1895 | "\n", 1896 | " def _forward_alg(self, feats):\n", 1897 | " # Do the forward algorithm to compute the partition function\n", 1898 | " init_alphas = torch.Tensor(1, self.tagset_size).fill_(-10000.)\n", 1899 | " # START_TAG has all of the score.\n", 1900 | " init_alphas[0][self.tag_to_ix[START_TAG]] = 0.\n", 1901 | "\n", 1902 | " # Wrap in a variable so that we will get automatic backprop\n", 1903 | " forward_var = Variable(init_alphas)\n", 1904 | "\n", 1905 | " # Iterate through the sentence\n", 1906 | " for feat in feats:\n", 1907 | " alphas_t = [] # The forward variables at this timestep\n", 1908 | " for next_tag in range(self.tagset_size):\n", 1909 | " # broadcast the emission score: it is the same regardless of\n", 1910 | " # the previous tag\n", 1911 | " emit_score = feat[next_tag].view(\n", 1912 | " 1, -1).expand(1, self.tagset_size)\n", 1913 | " # the ith entry of trans_score is the score of transitioning to\n", 1914 | " # next_tag from i\n", 1915 | " trans_score = self.transitions[next_tag].view(1, -1)\n", 1916 | " # The ith entry of next_tag_var is the value for the\n", 1917 | " # edge (i -> next_tag) before we do log-sum-exp\n", 1918 | " next_tag_var = forward_var + trans_score + emit_score\n", 1919 | " # The forward variable for this tag is log-sum-exp of all the\n", 1920 | " # scores.\n", 1921 | " alphas_t.append(log_sum_exp(next_tag_var))\n", 1922 | " forward_var = torch.cat(alphas_t).view(1, -1)\n", 1923 | " terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]\n", 1924 | " alpha = log_sum_exp(terminal_var)\n", 1925 | " return alpha\n", 1926 | "\n", 1927 | " def _get_lstm_features(self, sentence):\n", 1928 | " self.hidden = self.init_hidden()\n", 1929 | " embeds = self.word_embeds(sentence).view(len(sentence), 1, -1)\n", 1930 | " lstm_out, self.hidden = self.lstm(embeds)\n", 1931 | " lstm_out = lstm_out.view(len(sentence), self.hidden_dim)\n", 1932 | " lstm_feats = self.hidden2tag(lstm_out)\n", 1933 | " return lstm_feats\n", 1934 | "\n", 1935 | " def _score_sentence(self, feats, tags):\n", 1936 | " # Gives the score of a provided tag sequence\n", 1937 | " score = Variable(torch.Tensor([0]))\n", 1938 | " tags = torch.cat([torch.LongTensor([self.tag_to_ix[START_TAG]]), tags])\n", 1939 | " for i, feat in enumerate(feats):\n", 1940 | " score = score + \\\n", 1941 | " self.transitions[tags[i + 1], tags[i]] + feat[tags[i + 1]]\n", 1942 | " score = score + self.transitions[self.tag_to_ix[STOP_TAG], tags[-1]]\n", 1943 | " return score\n", 1944 | "\n", 1945 | " def _viterbi_decode(self, feats):\n", 1946 | " backpointers = []\n", 1947 | "\n", 1948 | " # Initialize the viterbi variables in log space\n", 1949 | " init_vvars = torch.Tensor(1, self.tagset_size).fill_(-10000.)\n", 1950 | " init_vvars[0][self.tag_to_ix[START_TAG]] = 0\n", 1951 | "\n", 1952 | " # forward_var at step i holds the viterbi variables for step i-1\n", 1953 | " forward_var = Variable(init_vvars)\n", 1954 | " for feat in feats:\n", 1955 | " bptrs_t = [] # holds the backpointers for this step\n", 1956 | " viterbivars_t = [] # holds the viterbi variables for this step\n", 1957 | "\n", 1958 | " for next_tag in range(self.tagset_size):\n", 1959 | " # next_tag_var[i] holds the viterbi variable for tag i at the\n", 1960 | " # previous step, plus the score of transitioning\n", 1961 | " # from tag i to next_tag.\n", 1962 | " # We don't include the emission scores here because the max\n", 1963 | " # does not depend on them (we add them in below)\n", 1964 | " next_tag_var = forward_var + self.transitions[next_tag]\n", 1965 | " best_tag_id = argmax(next_tag_var)\n", 1966 | " bptrs_t.append(best_tag_id)\n", 1967 | " viterbivars_t.append(next_tag_var[0][best_tag_id])\n", 1968 | " # Now add in the emission scores, and assign forward_var to the set\n", 1969 | " # of viterbi variables we just computed\n", 1970 | " forward_var = (torch.cat(viterbivars_t) + feat).view(1, -1)\n", 1971 | " backpointers.append(bptrs_t)\n", 1972 | "\n", 1973 | " # Transition to STOP_TAG\n", 1974 | " terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]\n", 1975 | " best_tag_id = argmax(terminal_var)\n", 1976 | " path_score = terminal_var[0][best_tag_id]\n", 1977 | "\n", 1978 | " # Follow the back pointers to decode the best path.\n", 1979 | " best_path = [best_tag_id]\n", 1980 | " for bptrs_t in reversed(backpointers):\n", 1981 | " best_tag_id = bptrs_t[best_tag_id]\n", 1982 | " best_path.append(best_tag_id)\n", 1983 | " # Pop off the start tag (we dont want to return that to the caller)\n", 1984 | " start = best_path.pop()\n", 1985 | " assert start == self.tag_to_ix[START_TAG] # Sanity check\n", 1986 | " best_path.reverse()\n", 1987 | " return path_score, best_path\n", 1988 | "\n", 1989 | " def neg_log_likelihood(self, sentence, tags):\n", 1990 | " self.hidden = self.init_hidden()\n", 1991 | " feats = self._get_lstm_features(sentence)\n", 1992 | " forward_score = self._forward_alg(feats)\n", 1993 | " gold_score = self._score_sentence(feats, tags)\n", 1994 | " return forward_score - gold_score\n", 1995 | "\n", 1996 | " def forward(self, sentence): # dont confuse this with _forward_alg above.\n", 1997 | " self.hidden = self.init_hidden()\n", 1998 | " # Get the emission scores from the BiLSTM\n", 1999 | " lstm_feats = self._get_lstm_features(sentence)\n", 2000 | "\n", 2001 | " # Find the best path, given the features.\n", 2002 | " score, tag_seq = self._viterbi_decode(lstm_feats)\n", 2003 | " return score, tag_seq" 2004 | ] 2005 | }, 2006 | { 2007 | "cell_type": "code", 2008 | "execution_count": 210, 2009 | "metadata": { 2010 | "collapsed": false 2011 | }, 2012 | "outputs": [ 2013 | { 2014 | "name": "stdout", 2015 | "output_type": "stream", 2016 | "text": [ 2017 | "(Variable containing:\n", 2018 | " 15.9463\n", 2019 | "[torch.FloatTensor of size 1]\n", 2020 | ", [2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2])\n", 2021 | "(Variable containing:\n", 2022 | " 39.0223\n", 2023 | "[torch.FloatTensor of size 1]\n", 2024 | ", [0, 1, 1, 1, 2, 2, 2, 0, 1, 2, 2])\n" 2025 | ] 2026 | } 2027 | ], 2028 | "source": [ 2029 | "START_TAG = \"\"\n", 2030 | "STOP_TAG = \"\"\n", 2031 | "EMBEDDING_DIM = 5\n", 2032 | "HIDDEN_DIM = 4\n", 2033 | "\n", 2034 | "# Make up some training data\n", 2035 | "training_data = [(\n", 2036 | " \"the wall street journal reported today that apple corporation made money\".split(),\n", 2037 | " \"B I I I O O O B I O O\".split()\n", 2038 | "), (\n", 2039 | " \"georgia tech is a university in georgia\".split(),\n", 2040 | " \"B I O O O O B\".split()\n", 2041 | ")]\n", 2042 | "\n", 2043 | "word_to_ix = {}\n", 2044 | "for sentence, tags in training_data:\n", 2045 | " for word in sentence:\n", 2046 | " if word not in word_to_ix:\n", 2047 | " word_to_ix[word] = len(word_to_ix)\n", 2048 | "\n", 2049 | "tag_to_ix = {\"B\": 0, \"I\": 1, \"O\": 2, START_TAG: 3, STOP_TAG: 4}\n", 2050 | "\n", 2051 | "model = BiLSTM_CRF(len(word_to_ix), tag_to_ix, EMBEDDING_DIM, HIDDEN_DIM)\n", 2052 | "optimizer = optim.SGD(model.parameters(), lr=0.01, weight_decay=1e-4)\n", 2053 | "\n", 2054 | "# Check predictions before training\n", 2055 | "precheck_sent = prepare_sequence(training_data[0][0], word_to_ix)\n", 2056 | "precheck_tags = torch.LongTensor([tag_to_ix[t] for t in training_data[0][1]])\n", 2057 | "print(model(precheck_sent))\n", 2058 | "\n", 2059 | "# Make sure prepare_sequence from earlier in the LSTM section is loaded\n", 2060 | "for epoch in range(\n", 2061 | " 300): # again, normally you would NOT do 300 epochs, it is toy data\n", 2062 | " for sentence, tags in training_data:\n", 2063 | " # Step 1. Remember that Pytorch accumulates gradients.\n", 2064 | " # We need to clear them out before each instance\n", 2065 | " model.zero_grad()\n", 2066 | "\n", 2067 | " # Step 2. Get our inputs ready for the network, that is,\n", 2068 | " # turn them into Variables of word indices.\n", 2069 | " sentence_in = prepare_sequence(sentence, word_to_ix)\n", 2070 | " targets = torch.LongTensor([tag_to_ix[t] for t in tags])\n", 2071 | "\n", 2072 | " # Step 3. Run our forward pass.\n", 2073 | " neg_log_likelihood = model.neg_log_likelihood(sentence_in, targets)\n", 2074 | "\n", 2075 | " # Step 4. Compute the loss, gradients, and update the parameters by\n", 2076 | " # calling optimizer.step()\n", 2077 | " neg_log_likelihood.backward()\n", 2078 | " optimizer.step()\n", 2079 | "\n", 2080 | "# Check predictions after training\n", 2081 | "precheck_sent = prepare_sequence(training_data[0][0], word_to_ix)\n", 2082 | "print(model(precheck_sent))\n", 2083 | "# We got it!" 2084 | ] 2085 | }, 2086 | { 2087 | "cell_type": "code", 2088 | "execution_count": null, 2089 | "metadata": { 2090 | "collapsed": true 2091 | }, 2092 | "outputs": [], 2093 | "source": [] 2094 | } 2095 | ], 2096 | "metadata": { 2097 | "kernelspec": { 2098 | "display_name": "Python 2", 2099 | "language": "python", 2100 | "name": "python2" 2101 | }, 2102 | "language_info": { 2103 | "codemirror_mode": { 2104 | "name": "ipython", 2105 | "version": 2 2106 | }, 2107 | "file_extension": ".py", 2108 | "mimetype": "text/x-python", 2109 | "name": "python", 2110 | "nbconvert_exporter": "python", 2111 | "pygments_lexer": "ipython2", 2112 | "version": "2.7.11" 2113 | } 2114 | }, 2115 | "nbformat": 4, 2116 | "nbformat_minor": 2 2117 | } 2118 | -------------------------------------------------------------------------------- /tutorials/Pytorch_Tutorials-Teaching.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# pytorchの基本演算" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "pytorchはTensorという型で演算を行う。pytorchを利用するには全ての変数はTensorに変換する必要がある
\n", 15 | "テンソルはnumpyのような書き方が可能" 16 | ] 17 | }, 18 | { 19 | "cell_type": "code", 20 | "execution_count": 1, 21 | "metadata": { 22 | "collapsed": true 23 | }, 24 | "outputs": [], 25 | "source": [ 26 | "from __future__ import print_function\n", 27 | "import torch" 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": 2, 33 | "metadata": { 34 | "collapsed": false 35 | }, 36 | "outputs": [ 37 | { 38 | "name": "stdout", 39 | "output_type": "stream", 40 | "text": [ 41 | "\n", 42 | " 0.0000e+00 0.0000e+00 0.0000e+00\n", 43 | " 0.0000e+00 5.6052e-45 0.0000e+00\n", 44 | " 2.5223e-44 0.0000e+00 0.0000e+00\n", 45 | " 2.9427e-44 0.0000e+00 0.0000e+00\n", 46 | " 2.6415e-29 -1.5846e+29 3.9690e-29\n", 47 | "[torch.FloatTensor of size 5x3]\n", 48 | "\n" 49 | ] 50 | } 51 | ], 52 | "source": [ 53 | "#torchのTensor定義\n", 54 | "#初期化されていないので完全に0にはならない\n", 55 | "x = torch.Tensor(5, 3)\n", 56 | "print(x)" 57 | ] 58 | }, 59 | { 60 | "cell_type": "code", 61 | "execution_count": 3, 62 | "metadata": { 63 | "collapsed": false 64 | }, 65 | "outputs": [ 66 | { 67 | "name": "stdout", 68 | "output_type": "stream", 69 | "text": [ 70 | "\n", 71 | " 0.0276 0.9555 0.3755\n", 72 | " 0.0343 0.2881 0.0430\n", 73 | " 0.5422 0.1671 0.7017\n", 74 | " 0.3169 0.7044 0.3255\n", 75 | " 0.2512 0.7634 0.2316\n", 76 | "[torch.FloatTensor of size 5x3]\n", 77 | "\n" 78 | ] 79 | } 80 | ], 81 | "source": [ 82 | "#乱数で初期化したTensor\n", 83 | "x = torch.rand(5, 3)\n", 84 | "print(x)" 85 | ] 86 | }, 87 | { 88 | "cell_type": "code", 89 | "execution_count": 4, 90 | "metadata": { 91 | "collapsed": false 92 | }, 93 | "outputs": [ 94 | { 95 | "name": "stdout", 96 | "output_type": "stream", 97 | "text": [ 98 | "torch.Size([5, 3])\n" 99 | ] 100 | } 101 | ], 102 | "source": [ 103 | "#Tensorのサイズの出力\n", 104 | "print(x.size())" 105 | ] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "execution_count": 5, 110 | "metadata": { 111 | "collapsed": false 112 | }, 113 | "outputs": [ 114 | { 115 | "name": "stdout", 116 | "output_type": "stream", 117 | "text": [ 118 | "\n", 119 | " 0.2646 1.0537 1.3565\n", 120 | " 0.6482 0.6579 0.2776\n", 121 | " 1.3476 0.7363 0.7927\n", 122 | " 0.7810 1.4644 0.5566\n", 123 | " 1.0113 0.9607 0.9306\n", 124 | "[torch.FloatTensor of size 5x3]\n", 125 | "\n" 126 | ] 127 | } 128 | ], 129 | "source": [ 130 | "#足し算可能\n", 131 | "y = torch.rand(5, 3)\n", 132 | "print(x + y)" 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": 6, 138 | "metadata": { 139 | "collapsed": false 140 | }, 141 | "outputs": [ 142 | { 143 | "name": "stdout", 144 | "output_type": "stream", 145 | "text": [ 146 | "\n", 147 | " 0.2646 1.0537 1.3565\n", 148 | " 0.6482 0.6579 0.2776\n", 149 | " 1.3476 0.7363 0.7927\n", 150 | " 0.7810 1.4644 0.5566\n", 151 | " 1.0113 0.9607 0.9306\n", 152 | "[torch.FloatTensor of size 5x3]\n", 153 | "\n" 154 | ] 155 | } 156 | ], 157 | "source": [ 158 | "#足し算のやり方その2\n", 159 | "result = torch.Tensor(5, 3)\n", 160 | "torch.add(x, y, out=result)\n", 161 | "print(result)" 162 | ] 163 | }, 164 | { 165 | "cell_type": "code", 166 | "execution_count": 7, 167 | "metadata": { 168 | "collapsed": false 169 | }, 170 | "outputs": [ 171 | { 172 | "name": "stdout", 173 | "output_type": "stream", 174 | "text": [ 175 | "\n", 176 | " 0.2646 1.0537 1.3565\n", 177 | " 0.6482 0.6579 0.2776\n", 178 | " 1.3476 0.7363 0.7927\n", 179 | " 0.7810 1.4644 0.5566\n", 180 | " 1.0113 0.9607 0.9306\n", 181 | "[torch.FloatTensor of size 5x3]\n", 182 | "\n" 183 | ] 184 | } 185 | ], 186 | "source": [ 187 | "#足し算のやり方その3:破壊的な足し算のメソッド\n", 188 | "y.add_(x)\n", 189 | "print(y)" 190 | ] 191 | }, 192 | { 193 | "cell_type": "code", 194 | "execution_count": 8, 195 | "metadata": { 196 | "collapsed": false 197 | }, 198 | "outputs": [ 199 | { 200 | "name": "stdout", 201 | "output_type": "stream", 202 | "text": [ 203 | "\n", 204 | " 0.9555\n", 205 | " 0.2881\n", 206 | " 0.1671\n", 207 | " 0.7044\n", 208 | " 0.7634\n", 209 | "[torch.FloatTensor of size 5]\n", 210 | "\n" 211 | ] 212 | } 213 | ], 214 | "source": [ 215 | "#numpyのように特定行・列だけスライスして取り出すことも可能\n", 216 | "print(x[:, 1])" 217 | ] 218 | }, 219 | { 220 | "cell_type": "code", 221 | "execution_count": 9, 222 | "metadata": { 223 | "collapsed": false 224 | }, 225 | "outputs": [ 226 | { 227 | "name": "stdout", 228 | "output_type": "stream", 229 | "text": [ 230 | "\n", 231 | " 1\n", 232 | " 1\n", 233 | " 1\n", 234 | " 1\n", 235 | " 1\n", 236 | "[torch.FloatTensor of size 5]\n", 237 | "\n" 238 | ] 239 | } 240 | ], 241 | "source": [ 242 | "#numpy.onesのようなtorchの関数\n", 243 | "a = torch.ones(5)\n", 244 | "print(a)" 245 | ] 246 | }, 247 | { 248 | "cell_type": "code", 249 | "execution_count": 10, 250 | "metadata": { 251 | "collapsed": false 252 | }, 253 | "outputs": [ 254 | { 255 | "name": "stdout", 256 | "output_type": "stream", 257 | "text": [ 258 | "[ 1. 1. 1. 1. 1.]\n" 259 | ] 260 | } 261 | ], 262 | "source": [ 263 | "#Tensor -> numpy変換\n", 264 | "b = a.numpy()\n", 265 | "print(b)" 266 | ] 267 | }, 268 | { 269 | "cell_type": "code", 270 | "execution_count": 11, 271 | "metadata": { 272 | "collapsed": false 273 | }, 274 | "outputs": [ 275 | { 276 | "name": "stdout", 277 | "output_type": "stream", 278 | "text": [ 279 | "\n", 280 | " 2\n", 281 | " 2\n", 282 | " 2\n", 283 | " 2\n", 284 | " 2\n", 285 | "[torch.FloatTensor of size 5]\n", 286 | "\n", 287 | "[ 2. 2. 2. 2. 2.]\n" 288 | ] 289 | } 290 | ], 291 | "source": [ 292 | "#add_の場合、参照先にも影響を与える\n", 293 | "a.add_(1)\n", 294 | "print(a)\n", 295 | "print(b)" 296 | ] 297 | }, 298 | { 299 | "cell_type": "code", 300 | "execution_count": 12, 301 | "metadata": { 302 | "collapsed": false 303 | }, 304 | "outputs": [ 305 | { 306 | "name": "stdout", 307 | "output_type": "stream", 308 | "text": [ 309 | "\n", 310 | " 3\n", 311 | " 3\n", 312 | " 3\n", 313 | " 3\n", 314 | " 3\n", 315 | "[torch.FloatTensor of size 5]\n", 316 | "\n", 317 | "[ 2. 2. 2. 2. 2.]\n" 318 | ] 319 | } 320 | ], 321 | "source": [ 322 | "#普通の演算は問題ない\n", 323 | "a=a+1\n", 324 | "print(a)\n", 325 | "print(b)" 326 | ] 327 | }, 328 | { 329 | "cell_type": "code", 330 | "execution_count": 13, 331 | "metadata": { 332 | "collapsed": false 333 | }, 334 | "outputs": [ 335 | { 336 | "name": "stdout", 337 | "output_type": "stream", 338 | "text": [ 339 | "[ 2. 2. 2. 2. 2.]\n", 340 | "\n", 341 | " 2\n", 342 | " 2\n", 343 | " 2\n", 344 | " 2\n", 345 | " 2\n", 346 | "[torch.DoubleTensor of size 5]\n", 347 | "\n" 348 | ] 349 | } 350 | ], 351 | "source": [ 352 | "#numpy -> Tensor\n", 353 | "\n", 354 | "import numpy as np\n", 355 | "a = np.ones(5)\n", 356 | "b = torch.from_numpy(a)\n", 357 | "np.add(a, 1, out=a)\n", 358 | "print(a)\n", 359 | "print(b)" 360 | ] 361 | }, 362 | { 363 | "cell_type": "markdown", 364 | "metadata": {}, 365 | "source": [ 366 | "# pytorchの自動微分機能" 367 | ] 368 | }, 369 | { 370 | "cell_type": "markdown", 371 | "metadata": {}, 372 | "source": [ 373 | "pytorchはautogradというTensorを自動微分機能を備えている
\n", 374 | "この機能はdefined-by-run形式で、要するにどのようにコードが走ったかで微分方向が変わる
\n", 375 | "そのため、ループごとに構造を変えるなどの柔軟な記述もできる" 376 | ] 377 | }, 378 | { 379 | "cell_type": "code", 380 | "execution_count": 14, 381 | "metadata": { 382 | "collapsed": true 383 | }, 384 | "outputs": [], 385 | "source": [ 386 | "import torch\n", 387 | "from torch.autograd import Variable #自動微分機能のimport" 388 | ] 389 | }, 390 | { 391 | "cell_type": "markdown", 392 | "metadata": {}, 393 | "source": [ 394 | "Variable変数はTensorのラッパー
\n", 395 | "最後に.backward()を呼ぶと、それまでの経路を遡って自動微分してくれる" 396 | ] 397 | }, 398 | { 399 | "cell_type": "code", 400 | "execution_count": 15, 401 | "metadata": { 402 | "collapsed": false 403 | }, 404 | "outputs": [ 405 | { 406 | "name": "stdout", 407 | "output_type": "stream", 408 | "text": [ 409 | "Variable containing:\n", 410 | " 1 1\n", 411 | " 1 1\n", 412 | "[torch.FloatTensor of size 2x2]\n", 413 | "\n" 414 | ] 415 | } 416 | ], 417 | "source": [ 418 | "#requires_gradで微分を行うかどうかを明示する。デフォルトTrue\n", 419 | "x = Variable(torch.ones(2, 2), requires_grad=True)\n", 420 | "print(x)" 421 | ] 422 | }, 423 | { 424 | "cell_type": "code", 425 | "execution_count": 16, 426 | "metadata": { 427 | "collapsed": false 428 | }, 429 | "outputs": [ 430 | { 431 | "name": "stdout", 432 | "output_type": "stream", 433 | "text": [ 434 | "Variable containing:\n", 435 | " 3 3\n", 436 | " 3 3\n", 437 | "[torch.FloatTensor of size 2x2]\n", 438 | "\n" 439 | ] 440 | } 441 | ], 442 | "source": [ 443 | "#VariableはTensorのラッパーなので通常の演算も可能\n", 444 | "y = x + 2\n", 445 | "print(y)" 446 | ] 447 | }, 448 | { 449 | "cell_type": "code", 450 | "execution_count": 17, 451 | "metadata": { 452 | "collapsed": false 453 | }, 454 | "outputs": [ 455 | { 456 | "name": "stdout", 457 | "output_type": "stream", 458 | "text": [ 459 | "\n" 460 | ] 461 | } 462 | ], 463 | "source": [ 464 | "#呼び出される関数を確認できる\n", 465 | "#後のDNNでのNetwork作成時に再度説明する\n", 466 | "print(y.creator)" 467 | ] 468 | }, 469 | { 470 | "cell_type": "code", 471 | "execution_count": 18, 472 | "metadata": { 473 | "collapsed": false 474 | }, 475 | "outputs": [ 476 | { 477 | "name": "stdout", 478 | "output_type": "stream", 479 | "text": [ 480 | "Variable containing:\n", 481 | " 27 27\n", 482 | " 27 27\n", 483 | "[torch.FloatTensor of size 2x2]\n", 484 | " Variable containing:\n", 485 | " 27\n", 486 | "[torch.FloatTensor of size 1]\n", 487 | "\n" 488 | ] 489 | } 490 | ], 491 | "source": [ 492 | "#掛け算もできるし平均もできる\n", 493 | "z = y * y * 3\n", 494 | "out = z.mean()\n", 495 | "\n", 496 | "print(z, out)" 497 | ] 498 | }, 499 | { 500 | "cell_type": "code", 501 | "execution_count": 19, 502 | "metadata": { 503 | "collapsed": true 504 | }, 505 | "outputs": [], 506 | "source": [ 507 | "#自動微分する\n", 508 | "out.backward()" 509 | ] 510 | }, 511 | { 512 | "cell_type": "code", 513 | "execution_count": 20, 514 | "metadata": { 515 | "collapsed": false 516 | }, 517 | "outputs": [ 518 | { 519 | "name": "stdout", 520 | "output_type": "stream", 521 | "text": [ 522 | "Variable containing:\n", 523 | " 4.5000 4.5000\n", 524 | " 4.5000 4.5000\n", 525 | "[torch.FloatTensor of size 2x2]\n", 526 | "\n" 527 | ] 528 | } 529 | ], 530 | "source": [ 531 | "#d(out)/dxを出力する\n", 532 | "print(x.grad)" 533 | ] 534 | }, 535 | { 536 | "cell_type": "markdown", 537 | "metadata": {}, 538 | "source": [ 539 | "さて、ここで行われた計算についてもう少し詳しく説明する
\n", 540 | "そもそもout(=oと略記)がどのように計算されたかというと
\n", 541 | "$o = \\frac{1}{4}\\sum_iz_i$
\n", 542 | "$z_i = y_i \\cdot y_i \\cdot 3 = 3 \\cdot (x_i + 2)^2$" 543 | ] 544 | }, 545 | { 546 | "cell_type": "markdown", 547 | "metadata": {}, 548 | "source": [ 549 | "よって、
\n", 550 | "$\\frac{\\partial o}{\\partial x_i} = \\frac{1}{4} \\cdot 3 \\cdot 2 \\cdot (x_i + 2) = 4.5$
\n", 551 | "これが各成分となる" 552 | ] 553 | }, 554 | { 555 | "cell_type": "markdown", 556 | "metadata": {}, 557 | "source": [ 558 | "pytorchのこの自動微分機能は様々なことができる" 559 | ] 560 | }, 561 | { 562 | "cell_type": "code", 563 | "execution_count": 58, 564 | "metadata": { 565 | "collapsed": false 566 | }, 567 | "outputs": [ 568 | { 569 | "name": "stdout", 570 | "output_type": "stream", 571 | "text": [ 572 | "Variable containing:\n", 573 | " -766.0985\n", 574 | " 1223.3398\n", 575 | " 31.8218\n", 576 | "[torch.FloatTensor of size 3]\n", 577 | "\n" 578 | ] 579 | } 580 | ], 581 | "source": [ 582 | "x = torch.randn(3)\n", 583 | "x = Variable(x, requires_grad=True)\n", 584 | "\n", 585 | "y = x * 2\n", 586 | "while y.data.norm() < 1000:\n", 587 | " y = y * 2\n", 588 | "\n", 589 | "print(y)" 590 | ] 591 | }, 592 | { 593 | "cell_type": "code", 594 | "execution_count": 59, 595 | "metadata": { 596 | "collapsed": false 597 | }, 598 | "outputs": [ 599 | { 600 | "name": "stdout", 601 | "output_type": "stream", 602 | "text": [ 603 | "Variable containing:\n", 604 | " 102.4000\n", 605 | " 1024.0000\n", 606 | " 0.1024\n", 607 | "[torch.FloatTensor of size 3]\n", 608 | "\n" 609 | ] 610 | } 611 | ], 612 | "source": [ 613 | "gradients = torch.FloatTensor([0.1, 1.0, 0.0001])\n", 614 | "y.backward(gradients)\n", 615 | "\n", 616 | "print(x.grad)" 617 | ] 618 | }, 619 | { 620 | "cell_type": "markdown", 621 | "metadata": {}, 622 | "source": [ 623 | "# pytorchによるDNN入門" 624 | ] 625 | }, 626 | { 627 | "cell_type": "code", 628 | "execution_count": 10, 629 | "metadata": { 630 | "collapsed": true 631 | }, 632 | "outputs": [], 633 | "source": [ 634 | "import torch\n", 635 | "import torch.nn as nn\n", 636 | "import torch.optim as optim\n", 637 | "from torch.autograd import Variable\n", 638 | "import torch.nn.functional as F\n", 639 | "import torch.utils.data" 640 | ] 641 | }, 642 | { 643 | "cell_type": "code", 644 | "execution_count": 11, 645 | "metadata": { 646 | "collapsed": true 647 | }, 648 | "outputs": [], 649 | "source": [ 650 | "import numpy as np\n", 651 | "import matplotlib.pyplot as plt\n", 652 | "from sklearn.datasets import fetch_mldata\n", 653 | "from sklearn.model_selection import train_test_split\n", 654 | "%matplotlib inline" 655 | ] 656 | }, 657 | { 658 | "cell_type": "code", 659 | "execution_count": 12, 660 | "metadata": { 661 | "collapsed": true 662 | }, 663 | "outputs": [], 664 | "source": [ 665 | "# load MNIST data\n", 666 | "mnist = fetch_mldata('MNIST original')\n", 667 | "\n", 668 | "X_train, X_test, y_train, y_test = train_test_split(\n", 669 | " mnist.data.astype('float32'), mnist.target.astype('uint8'), test_size=0.25, random_state=42)\n", 670 | "\n", 671 | "X_train /= 255.0\n", 672 | "X_test /= 255.0" 673 | ] 674 | }, 675 | { 676 | "cell_type": "code", 677 | "execution_count": 13, 678 | "metadata": { 679 | "collapsed": false 680 | }, 681 | "outputs": [], 682 | "source": [ 683 | "#データセットのDataLoader化\n", 684 | "train = torch.utils.data.TensorDataset(torch.from_numpy(X_train), torch.from_numpy(y_train))\n", 685 | "train_loader = torch.utils.data.DataLoader(train, batch_size=100, shuffle=True)\n", 686 | "test = torch.utils.data.TensorDataset(torch.from_numpy(X_test), torch.from_numpy(y_test))\n", 687 | "test_loader = torch.utils.data.DataLoader(train, batch_size=100, shuffle=True)" 688 | ] 689 | }, 690 | { 691 | "cell_type": "code", 692 | "execution_count": 14, 693 | "metadata": { 694 | "collapsed": true 695 | }, 696 | "outputs": [], 697 | "source": [ 698 | "class Net(nn.Module):\n", 699 | " def __init__(self):\n", 700 | " super(Net, self).__init__()\n", 701 | " self.fc1 = nn.Linear(784,500)\n", 702 | " self.fc2 = nn.Linear(500, 84)\n", 703 | " self.fc3 = nn.Linear(84, 10)\n", 704 | "\n", 705 | " def forward(self, x):\n", 706 | " x = F.relu(self.fc1(x))\n", 707 | " x = F.relu(self.fc2(x))\n", 708 | " x = F.log_softmax(self.fc3(x))\n", 709 | " return x\n", 710 | "\n", 711 | "model = Net()" 712 | ] 713 | }, 714 | { 715 | "cell_type": "code", 716 | "execution_count": 15, 717 | "metadata": { 718 | "collapsed": false 719 | }, 720 | "outputs": [], 721 | "source": [ 722 | "#最適化手法\n", 723 | "optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)" 724 | ] 725 | }, 726 | { 727 | "cell_type": "code", 728 | "execution_count": 16, 729 | "metadata": { 730 | "collapsed": true 731 | }, 732 | "outputs": [], 733 | "source": [ 734 | "def train(epoch, model):\n", 735 | " model.train()\n", 736 | " loss_history = []\n", 737 | " for _ in range(epoch):\n", 738 | " running_loss = 0.0\n", 739 | " for i, data in enumerate(train_loader):\n", 740 | " \n", 741 | " inputs, labels = data\n", 742 | " inputs, labels = Variable(inputs), Variable(labels)\n", 743 | " optimizer.zero_grad()\n", 744 | " output = model(inputs)\n", 745 | "\n", 746 | " #negative log-likelihood loss\n", 747 | " loss = F.nll_loss(output, labels)\n", 748 | " running_loss += loss.data[0]\n", 749 | "\n", 750 | " #backpropagation\n", 751 | " loss.backward()\n", 752 | " optimizer.step()\n", 753 | " \n", 754 | " print('[epoch %d] loss: %.3f' % (_ + 1, running_loss / len(train_loader)))\n", 755 | " loss_history.append(running_loss / len(train_loader))\n", 756 | "\n", 757 | " return loss_history" 758 | ] 759 | }, 760 | { 761 | "cell_type": "markdown", 762 | "metadata": {}, 763 | "source": [ 764 | "networkの定義は構造部分init部分とforward部分のみで良い
\n", 765 | "backword部分はautogradが実行する" 766 | ] 767 | }, 768 | { 769 | "cell_type": "code", 770 | "execution_count": 17, 771 | "metadata": { 772 | "collapsed": false 773 | }, 774 | "outputs": [ 775 | { 776 | "name": "stdout", 777 | "output_type": "stream", 778 | "text": [ 779 | "Net (\n", 780 | " (fc1): Linear (784 -> 500)\n", 781 | " (fc2): Linear (500 -> 84)\n", 782 | " (fc3): Linear (84 -> 10)\n", 783 | ")\n" 784 | ] 785 | } 786 | ], 787 | "source": [ 788 | "print(model)" 789 | ] 790 | }, 791 | { 792 | "cell_type": "code", 793 | "execution_count": 18, 794 | "metadata": { 795 | "collapsed": false 796 | }, 797 | "outputs": [ 798 | { 799 | "name": "stdout", 800 | "output_type": "stream", 801 | "text": [ 802 | "[epoch 1] loss: 2.096\n", 803 | "[epoch 2] loss: 1.042\n", 804 | "[epoch 3] loss: 0.542\n", 805 | "[epoch 4] loss: 0.429\n", 806 | "[epoch 5] loss: 0.379\n", 807 | "[epoch 6] loss: 0.350\n", 808 | "[epoch 7] loss: 0.329\n", 809 | "[epoch 8] loss: 0.312\n", 810 | "[epoch 9] loss: 0.298\n", 811 | "[epoch 10] loss: 0.285\n", 812 | "[epoch 11] loss: 0.275\n", 813 | "[epoch 12] loss: 0.264\n", 814 | "[epoch 13] loss: 0.254\n", 815 | "[epoch 14] loss: 0.244\n", 816 | "[epoch 15] loss: 0.236\n", 817 | "[epoch 16] loss: 0.227\n", 818 | "[epoch 17] loss: 0.219\n", 819 | "[epoch 18] loss: 0.212\n", 820 | "[epoch 19] loss: 0.205\n", 821 | "[epoch 20] loss: 0.198\n" 822 | ] 823 | } 824 | ], 825 | "source": [ 826 | "history = train(20, model)" 827 | ] 828 | }, 829 | { 830 | "cell_type": "code", 831 | "execution_count": 19, 832 | "metadata": { 833 | "collapsed": false 834 | }, 835 | "outputs": [ 836 | { 837 | "data": { 838 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAX4AAAEICAYAAABYoZ8gAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3Xl0nHd97/H3VzMaLaPFkkaWd0tOXLAIxAm6IZQAKYmD\nnUuTtrendUohpXB82ktaaHuX9NISbuD00HLaW6ApkAu56QJJWtZAExKnBEILDlFSJ16yOd53WbK1\nbyN97x/zSB7LWsbSSDOa5/M6Z46e5fdovhqPP79nnuc3z2PujoiIhEdRrgsQEZGFpeAXEQkZBb+I\nSMgo+EVEQkbBLyISMgp+EZGQUfBL6JnZQTO7Mdd1iCwUBb+ISMgo+EVEQkbBLxIwsxIz+2szOx48\n/trMSoJ1CTP7npmdM7MOM/uxmRUF6/6nmR0zs24ze9nMbsjtXyIyvWiuCxDJIx8DrgU2Ag58B/gT\n4E+BPwKOAvVB22sBN7PXAXcA/8ndj5tZIxBZ2LJFLo32+EXOey9wt7ufdvc24H8D7wvWDQPLgbXu\nPuzuP/bUha5GgBKg2cyK3f2gu7+Wk+pFMqTgFzlvBXAobf5QsAzgM8A+4HEz229mdwK4+z7go8An\ngNNm9qCZrUAkjyn4Rc47DqxNm18TLMPdu939j9x9HXAL8Idjx/Ld/Wvufl2wrQN/vrBli1waBb/I\neQ8Af2Jm9WaWAD4O/COAmb3HzC43MwM6SR3iGTWz15nZu4KTwANAPzCao/pFMqLgFznvU0Ar8AKw\nC3guWAawHngC6AF+Cvytuz9J6vj+p4EzwElgKfDHC1u2yKUx3YhFRCRctMcvIhIyCn4RkZBR8IuI\nhIyCX0QkZPLykg2JRMIbGxtzXYaIyKLx7LPPnnH3+plb5mnwNzY20tramusyREQWDTM7NHOrFB3q\nEREJGQW/iEjIKPhFREJGwS8iEjIKfhGRkFHwi4iEjIJfRCRkCib4kyOj3PPkPp56pS3XpYiI5LWC\nCf5IkfGlH73G43tP5roUEZG8VjDBb2Y01Vdw8ExfrksREclrBRP8AE115Rw405vrMkRE8lpBBX9j\nIs7xzn4GhkdyXYqISN4qqOBvSsRxh8MdOtwjIjKVggt+gP1tOtwjIjKVggr+xiD4D7Yr+EVEpjJj\n8JvZajN70sz2mtkeM/vIJG3MzD5nZvvM7AUzuzpt3e1m9mrwuD3bf0C6qtJiEhUxDuoEr4jIlDK5\nEUsS+CN3f87MKoFnzWy7u+9Na7MFWB883gJ8AXiLmdUCdwEtgAfbPuzuZ7P6V6RprIuzX8EvIjKl\nGff43f2Euz8XTHcDLwIrJzS7Ffh7T9kBLDGz5cC7ge3u3hGE/XZgc1b/ggkaE3Ht8YuITOOSjvGb\nWSNwFfD0hFUrgSNp80eDZVMtn+x3bzOzVjNrbWub/WUXmhJxTncP0juYnPXvEBEpZBkHv5lVAN8A\nPuruXdkuxN3vdfcWd2+pr8/ofsGTGhvZoy9yiYhMLqPgN7NiUqH/VXf/5iRNjgGr0+ZXBcumWj5v\nGus0skdEZDqZjOox4CvAi+7+V1M0exh4fzC651qg091PAI8BN5lZjZnVADcFy+ZNY6IcQMf5RUSm\nkMmonrcB7wN2mdnOYNn/AtYAuPsXgUeAm4F9QB/wgWBdh5l9Engm2O5ud+/IXvkXK49FWVZVqpE9\nIiJTmDH43f3fAJuhjQMfnmLdfcB9s6pulhoT5drjFxGZQkF9c3dMU6KCg+26Xo+IyGQKNPjL6egd\norNvONeliIjknYIM/rGRPQc0skdE5CIFGfzr6sfG8vfkuBIRkfxTkMG/uracIoMDug2jiMhFCjL4\nS6IRViwp08geEZFJFGTwQ+rSDbpsg4jIxQo6+A+e6SX1FQMRERlTsMHfWBenezBJe+9QrksREckr\nBRv8TfW6SqeIyGQKN/jrFPwiIpMp2OBfVVNGtMg0skdEZIKCDf5opIg1teXa4xcRmaBggx9S999V\n8IuIXKiwg78uzqH2PkZHNaRTRGRMQQd/U32c/uERTnUP5LoUEZG8UdjBr5E9IiIXyeSeu/eZ2Wkz\n2z3F+v9uZjuDx24zGzGz2mDdQTPbFaxrzXbxMzl//11drE1EZEwme/z3A5unWunun3H3je6+Efhj\n4EcT7qv7C8H6lrmVeulWVJcRixbp8swiImlmDH53fwrI9AbptwEPzKmiLCoqMhrrynV5ZhGRNFk7\nxm9m5aQ+GXwjbbEDj5vZs2a2bYbtt5lZq5m1trW1ZassGuviHNSduERExmXz5O4vAv8+4TDPde5+\nNbAF+LCZvWOqjd39XndvcfeW+vr6rBXVVB/ncHsfIxrSKSICZDf4tzLhMI+7Hwt+nga+BVyTxefL\nSFNdnKGRUY6f61/opxYRyUtZCX4zqwbeCXwnbVnczCrHpoGbgElHBs2nxoSGdIqIpIvO1MDMHgCu\nBxJmdhS4CygGcPcvBs1+GXjc3dPTtQH4lpmNPc/X3P372Ss9M+vSgv8dP5e9Q0giIovVjMHv7rdl\n0OZ+UsM+05ftB66cbWHZUl9ZQjwW0R6/iEigoL+5C2BmrNXIHhGRcQUf/JAa2aM9fhGRlHAEf12c\no2f7GR4ZzXUpIiI5F4rgb0zEGRl1jnToG7wiIqEI/iYN6RQRGafgFxEJmVAEf015MVWlUY3sEREh\nJMFvZjTVV2iPX0SEkAQ/QFNduW7IIiJCiIK/MRHneGc/A8MjuS5FRCSnQhP8TYk47nCoXXv9IhJu\noQp+0MgeEZHQBP/Y5Zk1skdEwi40wV9VWkyiIsaBNgW/iIRbaIIfUvffPaA9fhEJuVAFf1MizkEd\n4xeRkAtV8Dcm4pzuHqRnMJnrUkREcmbG4Dez+8zstJlNer9cM7vezDrNbGfw+Hjaus1m9rKZ7TOz\nO7NZ+GyMjezRXr+IhFkme/z3A5tnaPNjd98YPO4GMLMIcA+wBWgGbjOz5rkUO1dNGtkjIjJz8Lv7\nU0DHLH73NcA+d9/v7kPAg8Cts/g9WdNYF4zl18geEQmxbB3jf6uZPW9mj5rZG4JlK4EjaW2OBssm\nZWbbzKzVzFrb2tqyVNaFymIRllWVamSPiIRaNoL/OWCtu18JfB749mx+ibvf6+4t7t5SX1+fhbIm\np5E9IhJ2cw5+d+9y955g+hGg2MwSwDFgdVrTVcGynGpM6MbrIhJucw5+M1tmZhZMXxP8znbgGWC9\nmTWZWQzYCjw81+ebq6ZEOWf7hunsG851KSIiORGdqYGZPQBcDyTM7ChwF1AM4O5fBH4V+F0zSwL9\nwFZ3dyBpZncAjwER4D533zMvf8UlaEpUAHCgvZeN5UtyXI2IyMKbMfjd/bYZ1v8N8DdTrHsEeGR2\npc2PpkQ5AAfO9LBxtYJfRMInVN/cBVhdW06RwQHdjUtEQip0wV8SjbCypkwje0QktEIX/BBcpVPB\nLyIhFcrgHxvLnzoHLSISLqEN/u7BJO29Q7kuRURkwYUy+Bt1/10RCbFQBn9TnYJfRMIrlMG/qqaM\naJFpZI+IhFIogz8aKWJNbbn2+EUklEIZ/KCLtYlIeIU2+JsScQ619zE6qiGdIhIuoQ3+xkSc/uER\nTnUP5LoUEZEFFdrg18geEQmr8AZ/fXDjdV2sTURCJrTBv7yqlJJoEQfO9OS6FBGRBRXa4C8qMtbW\nlevyzCISOqENfggu1tauY/wiEi4zBr+Z3Wdmp81s9xTr32tmL5jZLjP7iZldmbbuYLB8p5m1ZrPw\nbGhMxDnc3seIhnSKSIhkssd/P7B5mvUHgHe6+xuBTwL3Tlj/C+6+0d1bZlfi/GmqizM0Msrxc/25\nLkVEZMHMGPzu/hTQMc36n7j72WB2B7AqS7XNuyZdpVNEQijbx/g/CDyaNu/A42b2rJltm25DM9tm\nZq1m1trW1pblsian4BeRMIpm6xeZ2S+QCv7r0hZf5+7HzGwpsN3MXgo+QVzE3e8lOEzU0tKyIAfd\n6ytLiMciCn4RCZWs7PGb2ZuALwO3unv72HJ3Pxb8PA18C7gmG8+XLWZGo0b2iEjIzDn4zWwN8E3g\nfe7+StryuJlVjk0DNwGTjgzKJV2lU0TCZsZDPWb2AHA9kDCzo8BdQDGAu38R+DhQB/ytmQEkgxE8\nDcC3gmVR4Gvu/v15+BvmpKkuzvd3n2R4ZJTiSKi/1iAiITFj8Lv7bTOs/xDwoUmW7weuvHiL/NKU\niDMy6hzp6GNdfUWuyxERmXeh38XVjddFJGxCH/wa0ikiYRP64K8pL6a6rFgje0QkNEIf/GNDOrXH\nLyJhEfrgB2iqK9cNWUQkNBT8QFOiguOd/QwMj+S6FBGReafgBxoT5bjDoXbt9YtI4VPwo5E9IhIu\nCn7Oj+XXyB4RCQMFP1BVWkyiIsaBNgW/iBQ+BX+gsS7OAe3xi0gIKPgDTYk4B3WMX0RCQMEfaEzE\nOd09SM9gMteliIjMKwV/YGxkj/b6RaTQKfgDTRrZIyIhoeAPNNYFY/k1skdECpyCP1AWi7AuEedn\nBztyXYqIyLzKKPjN7D4zO21mk94z11I+Z2b7zOwFM7s6bd3tZvZq8Lg9W4XPhxs2LGXH/na6BoZz\nXYqIyLzJdI//fmDzNOu3AOuDxzbgCwBmVkvqHr1vAa4B7jKzmtkWO982NS9jeMT50cttuS5FRGTe\nZBT87v4UMN0xkFuBv/eUHcASM1sOvBvY7u4d7n4W2M70HUhOvXltDbXxGNv3nsp1KSIi8yZbx/hX\nAkfS5o8Gy6ZafhEz22ZmrWbW2taWmz3uSJHxrtcv5cmXTzM8MpqTGkRE5lvenNx193vdvcXdW+rr\n63NWx6bmBroHkvzsgE7yikhhylbwHwNWp82vCpZNtTxvvX19gpJokQ73iEjBylbwPwy8Pxjdcy3Q\n6e4ngMeAm8ysJjipe1OwLG+Vx6K8fX2C7XtP4e65LkdEJOuimTQysweA64GEmR0lNVKnGMDdvwg8\nAtwM7AP6gA8E6zrM7JPAM8Gvutvd8/4YyqbmBp548TR7T3TxhhXVuS5HRCSrMgp+d79thvUOfHiK\ndfcB9116abnzrtc3YLaL7XtPKfhFpODkzcndfFJfWcLVa2p0nF9ECpKCfwo3bmhgz/Eujp3rz3Up\nIiJZpeCfwqbmBgCe0F6/iBQYBf8ULl9awbpEXId7RKTgKPinsam5gR372+ns10XbRKRwKPinsam5\ngeSo86NXdNE2ESkcCv5pXLWmhjpdtE1ECoyCfxqRIuOGDUv54UunGUrqom0iUhgU/DPY1LyM7sEk\nTx9oz3UpIiJZoeCfwXWXJygt1kXbRKRwKPhnUBaL8Pb19Tyhi7aJSIFQ8Gdg04YGjncOsOd4V65L\nERGZMwV/Bt61YSlm8LgO94hIAVDwZyBRUcKbddE2ESkQCv4MbWpu4MUTXRw925frUkRE5kTBnyFd\ntE1ECoWCP0Pr6iu4rD7O9hcV/CKyuGUU/Ga22cxeNrN9ZnbnJOv/j5ntDB6vmNm5tHUjaesezmbx\nC21T8zKe3t+hi7aJyKI2Y/CbWQS4B9gCNAO3mVlzeht3/wN33+juG4HPA99MW90/ts7db8li7Qtu\n7KJtP3z5dK5LERGZtUz2+K8B9rn7fncfAh4Ebp2m/W3AA9koLt9ctXoJiYoSDesUkUUtk+BfCRxJ\nmz8aLLuIma0FmoAfpC0uNbNWM9thZr801ZOY2bagXWtbW35eBrmoyLhxw1J+9HIbg8mRXJcjIjIr\n2T65uxX4urunp+Jad28BfgP4azO7bLIN3f1ed29x95b6+vosl5U9N25ooGcwyY79HbkuRURkVjIJ\n/mPA6rT5VcGyyWxlwmEedz8W/NwP/BC46pKrzCPXrU9QVhxh+96TuS5FRGRWMgn+Z4D1ZtZkZjFS\n4X7R6Bwzez1QA/w0bVmNmZUE0wngbcDebBSeK6XFEd6+PsETe0/rom0isijNGPzungTuAB4DXgT+\nyd33mNndZpY+Smcr8KBfmIYbgFYzex54Evi0uy/q4IfU6J6TXQPsPqaLtonI4hPNpJG7PwI8MmHZ\nxyfMf2KS7X4CvHEO9eWlGzY0UGSwfe9J3riqOtfliIhcEn1zdxZq4zFa1tZqWKeILEoK/lna1NzA\nSye7OdKhi7aJyOKi4J+lsYu26VLNIrLYKPhnqTERZ/3SCgW/iCw6Cv452NTcwM8OdnCubyjXpYiI\nZEzBPwc3NjcwMuo8qYu2icgiouCfg42rllBfWaLDPSKyqCj450AXbRORxUjBP0ebmhvoHRrhp6+1\n57oUEZGMKPjn6OcvS1Aei+hwj4gsGgr+OSotjvCO9fU88eIpRkd10TYRyX8K/izY1NzAqa5Bdh3r\nzHUpIiIzUvBnwbtev5RIkelwj4gsCgr+LKiJx2hZW6PgF5FFQcGfJZuaG3j5VDeH23XRNhHJbwr+\nLBm7aNtDrYdzXImIyPQU/Fmyti7OjRsauOfJ1/jDh3bSO5jMdUkiIpPKKPjNbLOZvWxm+8zszknW\n/5aZtZnZzuDxobR1t5vZq8Hj9mwWn2++9L4385Eb1vPtncd4z+f/jd0a5SMieWjG4DezCHAPsAVo\nBm4zs+ZJmj7k7huDx5eDbWuBu4C3ANcAd5lZTdaqzzORIuMPNv0cX/3QtfQNJfmVv/0J9//7Ad2U\nXUTySiZ7/NcA+9x9v7sPAQ8Ct2b4+98NbHf3Dnc/C2wHNs+u1MXjrZfV8ehH3sF16xN84rt72fYP\nz+rSzSKSNzIJ/pXAkbT5o8Gyif6Lmb1gZl83s9WXuC1mts3MWs2sta2tLYOy8lttPMZXbm/hT9/T\nzA9fPs3Nn/0xzxzsyHVZIiJZO7n7XaDR3d9Eaq/+7y71F7j7ve7e4u4t9fX1WSort8yMD17XxDd+\n9+cpjhbx61/6KZ//11cZ0aUdRCSHMgn+Y8DqtPlVwbJx7t7u7oPB7JeBN2e6bRi8adUSvvd71/Ge\nN63gL7e/wvu+8jSnugZyXZaIhFQmwf8MsN7MmswsBmwFHk5vYGbL02ZvAV4Mph8DbjKzmuCk7k3B\nstCpLC3ms1s38he/+ib+4/A5bv7sj3XnLhHJiRmD392TwB2kAvtF4J/cfY+Z3W1mtwTNft/M9pjZ\n88DvA78VbNsBfJJU5/EMcHewLJTMjF9rWc13f+9t1FeW8IH/9wx/9siLDCVHc12aiISI5eNQw5aW\nFm9tbc11GfNqYHiET/3LXv5xx2GuXL2Ez2+9ijV15bkuS0QWKTN71t1bMmmrb+7mSGlxhE/90hv5\nwnuvZn9bD//5cz/mey8cz3VZIhICCv4c2/LG5Tzy+2/n8oYK7vjaf/DeL+/ga08f5kzP4Mwbi4jM\ngg715InhkVHufWo//9x6hIPtfRQZXNNUy5YrlrP5imU0VJXmukQRyWOXcqhHwZ9n3J2XTnbz6K4T\nPLr7JK+e7gHgzWtr2HLFMjZfsYxVNToXICIXUvAXkH2nu3l010ke3X2SvSe6AHjTqmq2XLGcLVcs\nozERz3GFIpIPFPwF6uCZXr6/5ySP7jrB80dTV/7csLyKLVcsY8sVy1jfUJnjCkUkVxT8IXD0bB/f\n332S7+8+SeuhswBcvrSC6y5P0Ly8iuYVVfxcQyWxqM7fi4SBgj9kTnUN8NieVCew88g5+oZGACiO\nGOuXVvKGFVWpx8pqNiyvoqIkmuOKRSTbFPwhNjLqHGrvZc/xruDRyd7jXbT3nr8sdGNdOW9YUU3z\nWIewopr6ypIcVi0ic3Upwa9dvwITKTLW1Vewrr6CX7xyBZAaKXSqa5C9JzrZcyzVIbxw7Bz/suvE\n+Hb1lSW8YUUV65dWsLYuztq6ctbWxlmxpJRoRIeLRAqJgj8EzIxl1aUsqy7lXa9vGF/e2T/M3uNd\n7D1x/pPBT15rv+DaQdEiY2VNWaozqC1nbV05a2rLWVsXZ01tOWWxSC7+JBGZAwV/iFWXFfPWy+p4\n62V148tGR51T3QMcau/jcHsfB9t7OdSRmt55+CxdAxfeRL6hqoS1tXHW1JXTWFfOqppyllWXsry6\nlIaqUkqL1TGI5BsFv1ygqMhYXl3G8uoyrl1Xd9H6c31DHGzv41B7L4fb+8Y7hadeaePr3RdfZqI2\nHmN50BGkOoQyllWl5pcvSU3rU4PIwlLwyyVZUh5jY3mMjauXXLSuf2iE4539nOwc4ETnACc7+zne\nOcDJzgGOnRvg2UNnOds3PMnvLB7vDBqqSqmriJGoKBl/1Fem5qvLijGzhfgzRQqagl+ypiwW4bL6\nCi6rr5iyzcDwCCc6BzhxQQeRmj9+boDdx7vo6B2a9PaUxRGjLl5CovLCjiFREaO+MjVdVxGjNh6j\ntjymk9IiU1Dwy4IqLY7QlIjTNM2lJkZHnbN9Q5zpGeJMzyBnegZp6x68YP5MzyAvneimvXeQ4ZHJ\nhyRXlxVTFw86gnjsfKcQL5l0eUlUh5wkHBT8kneKioy6ihLqKkp4HdNfhsLd6ewfDjqHIdp7B+no\nHaK9Z4iO3tSjvXeQg+29PHf4LB29Q0x1r/uKkig18WKWlMVYUl5MdVkxS8onzseoKS8O5mNUlxXr\n29Gy6GQU/Ga2GfgsEAG+7O6fnrD+D4EPAUmgDfhtdz8UrBsBdgVND7v7LYhkiZmxpDzGkvIYly+d\nuf3oaKqjaO8d6xgG6egdpqN3kPbeIc71DXOub4hz/cMcO9vPuf7U/FSdBUA8FmFJeex8RxF0CqlO\nY8J8WkeiEU+SKzMGv5lFgHuATcBR4Bkze9jd96Y1+w+gxd37zOx3gb8Afj1Y1+/uG7Nct8isFBUZ\nNfEYNfFYxtuMjjo9Q0k6+4Y51zfM2aBj6OwLOor+4fEOo7N/mFdO9XCub5jO/qEpD0MBlESLxjuC\n6rROoqo09emiqiz1MzUdHV9WVapOQ+Ymkz3+a4B97r4fwMweBG4FxoPf3Z9Ma78D+M1sFimSS0VF\nRlVpKnBX12a+nbvTNzQy/qmhM72T6B8a70jO9ac6kEPtfbxwdJjO/mH6h0em/d0l0aILO4bS8x1D\nZWmUqtJiKktTHUZl6fllVaVRqsqKKYkWaYRUiGUS/CuBI2nzR4G3TNP+g8CjafOlZtZK6jDQp939\n25dcpcgiZGbES6LES6KsXFJ2SdsOJUfpGhimqz/VEXT2D9M1kEz9HHsMnF93pmeI19p66RoYpnsg\nOemoqHTFEUt1DKXR8x1ESaqDqCwtpqI0SlVplIqSKBVjy0qCZcHyeCxKUZE6j8Uoqyd3zew3gRbg\nnWmL17r7MTNbB/zAzHa5+2uTbLsN2AawZs2abJYlsujEokXjw1Uv1dgnje6BZNARDNPVn5ruGkiO\nz3dfMD/M6a5BugeS9AymHjMxg4pYlMrSCzuHitIolSXnO42KkqBNSfGE+dT6eCxKRB3Igsok+I8B\nq9PmVwXLLmBmNwIfA97p7uNf4XT3Y8HP/Wb2Q+Aq4KLgd/d7gXshdXXOzP8EEUmX/kljWfXs7tU8\nMur0DiVTHcFAkp7BVCfRMxAsG0x9shjrKLoHhukZTHK2b4gjZ/uCbZLjlwifSTwWSXUCYx1Gyfnp\neEkk1WmURMb/rsrgZ3qbypJi4iURfX8jA5kE/zPAejNrIhX4W4HfSG9gZlcBXwI2u/vptOU1QJ+7\nD5pZAngbqRO/IpLHImnnNeYiOTJK79BI6lNEWodxfj550XzPYJLewSQdvX3j0z2DyWlPlKeLRYuo\nKIlSHouMdyBj0+WxKBUlEcqDDqM8FnQmsVTnMda2PBYlHku1KyuOFNwnkhmD392TZnYH8Bip4Zz3\nufseM7sbaHX3h4HPABXAPwcnjMaGbW4AvmRmo0ARqWP8eyd9IhEpONFIEdVlRVSXza0DARhMjtA7\nODLeEaR3EqllI/QMJOkbStI7lBxvO/bJ5WTnAH1BJ9Q7mCQ5w3mQdKXFRZTHxjqFSNr0WOdxfros\nFqG8ODVfFrQvS2tbVnz+d5QW5+Yku27EIiKhNJgcoW9wZPyQVKqzSE33DQU/B0cumO8dStI/NELv\n0Ah9E9sG05fQn2DGeEdQFouwvKqMf/qdt87q79GNWEREZlASjVASjVzSdzpm4u4MJkfHO4H+8Q5h\nhP7h8x1E//jPZKoTCaYX6vsZCn4RkSwxM0qLI5QWR6jNYoeSbTr9LSISMgp+EZGQUfCLiISMgl9E\nJGQU/CIiIaPgFxEJGQW/iEjIKPhFREImLy/ZYGZtwKFZbp4AzmSxnGxTfXOj+uZG9c1NPte31t3r\nM2mYl8E/F2bWmun1KnJB9c2N6psb1Tc3+V5fpnSoR0QkZBT8IiIhU4jBf2+uC5iB6psb1Tc3qm9u\n8r2+jBTcMX4REZleIe7xi4jINBT8IiIhs2iD38w2m9nLZrbPzO6cZH2JmT0UrH/azBoXsLbVZvak\nme01sz1m9pFJ2lxvZp1mtjN4fHyh6gue/6CZ7Qqe+6L7XFrK54LX7wUzu3oBa3td2uuy08y6zOyj\nE9os6OtnZveZ2Wkz2522rNbMtpvZq8HPmim2vT1o86qZ3b6A9X3GzF4K/v2+ZWZLpth22vfCPNb3\nCTM7lvZvePMU2077f30e63sorbaDZrZzim3n/fXLOndfdA9SN31/DVgHxIDngeYJbf4r8MVgeivw\n0ALWtxy4OpiuBF6ZpL7rge/l8DU8CCSmWX8z8ChgwLXA0zn8tz5J6sspOXv9gHcAVwO705b9BXBn\nMH0n8OeTbFcL7A9+1gTTNQtU301ANJj+88nqy+S9MI/1fQL4bxn8+0/7f32+6puw/i+Bj+fq9cv2\nY7Hu8V8D7HP3/e4+BDwI3Dqhza3A3wXTXwdusAW6nb27n3D354LpbuBFYOVCPHcW3Qr8vafsAJaY\n2fIc1HED8Jq7z/ab3Fnh7k8BHRMWp7/H/g74pUk2fTew3d073P0ssB3YvBD1ufvj7p4MZncAq7L9\nvJma4vXLRCb/1+dsuvqC3Pg14IFsP2+uLNbgXwkcSZs/ysXBOt4mePN3AnULUl2a4BDTVcDTk6x+\nq5k9b2aPmtkbFrQwcOBxM3vWzLZNsj6T13ghbGXq/3C5fP0AGtz9RDB9EmiYpE2+vI6/TeoT3GRm\nei/Mpzs1GmyuAAACjElEQVSCQ1H3TXGoLB9ev7cDp9z91SnW5/L1m5XFGvyLgplVAN8APuruXRNW\nP0fq8MWVwOeBby9wede5+9XAFuDDZvaOBX7+GZlZDLgF+OdJVuf69buApz7z5+XYaDP7GJAEvjpF\nk1y9F74AXAZsBE6QOpySj25j+r39vP+/NNFiDf5jwOq0+VXBsknbmFkUqAbaF6S61HMWkwr9r7r7\nNyeud/cud+8Jph8Bis0ssVD1ufux4Odp4FukPlKny+Q1nm9bgOfc/dTEFbl+/QKnxg5/BT9PT9Im\np6+jmf0W8B7gvUHndJEM3gvzwt1PufuIu48C/3eK58316xcFfgV4aKo2uXr95mKxBv8zwHozawr2\nCrcCD09o8zAwNoLiV4EfTPXGz7bgmOBXgBfd/a+maLNs7JyDmV1D6t9iQTomM4ubWeXYNKmTgLsn\nNHsYeH8wuudaoDPtsMZCmXJPK5evX5r099jtwHcmafMYcJOZ1QSHMm4Kls07M9sM/A/gFnfvm6JN\nJu+F+aov/ZzRL0/xvJn8X59PNwIvufvRyVbm8vWbk1yfXZ7tg9Sok1dInfH/WLDsblJvcoBSUocI\n9gE/A9YtYG3XkfrY/wKwM3jcDPwO8DtBmzuAPaRGKewAfn4B61sXPO/zQQ1jr196fQbcE7y+u4CW\nBf73jZMK8uq0ZTl7/Uh1QCeAYVLHmT9I6pzRvwKvAk8AtUHbFuDLadv+dvA+3Ad8YAHr20fq+PjY\ne3BslNsK4JHp3gsLVN8/BO+tF0iF+fKJ9QXzF/1fX4j6guX3j73n0tou+OuX7Ycu2SAiEjKL9VCP\niIjMkoJfRCRkFPwiIiGj4BcRCRkFv4hIyCj4RURCRsEvIhIy/x+9idcg4N1fLgAAAABJRU5ErkJg\ngg==\n", 839 | "text/plain": [ 840 | "" 841 | ] 842 | }, 843 | "metadata": {}, 844 | "output_type": "display_data" 845 | } 846 | ], 847 | "source": [ 848 | "plt.plot(history)\n", 849 | "plt.title(\"loss\")\n", 850 | "plt.show()" 851 | ] 852 | }, 853 | { 854 | "cell_type": "code", 855 | "execution_count": 20, 856 | "metadata": { 857 | "collapsed": false 858 | }, 859 | "outputs": [ 860 | { 861 | "name": "stdout", 862 | "output_type": "stream", 863 | "text": [ 864 | "Accuracy of the network on the 10000 test images: 94 %\n" 865 | ] 866 | } 867 | ], 868 | "source": [ 869 | "#Accuracy\n", 870 | "correct = 0\n", 871 | "total = 0\n", 872 | "for data in test_loader:\n", 873 | " images, labels = data\n", 874 | " outputs = model(Variable(images))\n", 875 | " _, predicted = torch.max(outputs.data, 1)\n", 876 | " total += labels.size(0)\n", 877 | " correct += (predicted == labels).sum()\n", 878 | "\n", 879 | "print('Accuracy of the network on the 10000 test images: %d %%' % (\n", 880 | " 100 * correct / total))" 881 | ] 882 | }, 883 | { 884 | "cell_type": "markdown", 885 | "metadata": {}, 886 | "source": [ 887 | "## Denoising Auto Encoder" 888 | ] 889 | }, 890 | { 891 | "cell_type": "code", 892 | "execution_count": 24, 893 | "metadata": { 894 | "collapsed": true 895 | }, 896 | "outputs": [], 897 | "source": [ 898 | "class DAE(nn.Module):\n", 899 | " def __init__(self):\n", 900 | " super(DAE, self).__init__()\n", 901 | " self.fc1 = nn.Linear(784,500)\n", 902 | " self.fc2 = nn.Linear(500, 84)\n", 903 | " self.fc3 = nn.Linear(84, 500)\n", 904 | " self.fc4 = nn.Linear(500, 784)\n", 905 | "\n", 906 | " def forward(self, x):\n", 907 | " x = F.relu(self.fc1(x))\n", 908 | " x = F.relu(self.fc2(x))\n", 909 | " x = F.relu(self.fc3(x))\n", 910 | " x = F.sigmoid(self.fc4(x))\n", 911 | " return x" 912 | ] 913 | }, 914 | { 915 | "cell_type": "code", 916 | "execution_count": 25, 917 | "metadata": { 918 | "collapsed": true 919 | }, 920 | "outputs": [], 921 | "source": [ 922 | "#ネットワーク定義\n", 923 | "model_dae = DAE()\n", 924 | "#最適化手法\n", 925 | "optimizer = optim.Adam(model_dae.parameters(), lr=1e-3)\n", 926 | "#loss関数の定義\n", 927 | "criterion = nn.BCELoss()" 928 | ] 929 | }, 930 | { 931 | "cell_type": "code", 932 | "execution_count": 26, 933 | "metadata": { 934 | "collapsed": true 935 | }, 936 | "outputs": [], 937 | "source": [ 938 | "def train(epoch, model):\n", 939 | " model.train()\n", 940 | " loss_history = []\n", 941 | " for loop in range(epoch):\n", 942 | " running_loss = 0.0\n", 943 | " for i, data in enumerate(train_loader):\n", 944 | " \n", 945 | " inputs, _ = data\n", 946 | " inputs = Variable(inputs)\n", 947 | " inputs_noise = inputs + Variable(0.5 * torch.randn(inputs.size()))\n", 948 | " optimizer.zero_grad()\n", 949 | " output = model(inputs_noise)\n", 950 | "\n", 951 | " loss = criterion(output, inputs)\n", 952 | " running_loss += loss.data[0]\n", 953 | "\n", 954 | " #backpropagation\n", 955 | " loss.backward()\n", 956 | " optimizer.step()\n", 957 | " \n", 958 | " print('[epoch %d] loss: %.3f' % (loop + 1, running_loss / len(train_loader)))\n", 959 | " loss_history.append(running_loss / len(train_loader))\n", 960 | "\n", 961 | " return loss_history" 962 | ] 963 | }, 964 | { 965 | "cell_type": "code", 966 | "execution_count": 27, 967 | "metadata": { 968 | "collapsed": false 969 | }, 970 | "outputs": [ 971 | { 972 | "name": "stdout", 973 | "output_type": "stream", 974 | "text": [ 975 | "[epoch 1] loss: 0.179\n", 976 | "[epoch 2] loss: 0.123\n", 977 | "[epoch 3] loss: 0.115\n", 978 | "[epoch 4] loss: 0.111\n", 979 | "[epoch 5] loss: 0.108\n", 980 | "[epoch 6] loss: 0.107\n", 981 | "[epoch 7] loss: 0.105\n", 982 | "[epoch 8] loss: 0.104\n", 983 | "[epoch 9] loss: 0.104\n", 984 | "[epoch 10] loss: 0.103\n" 985 | ] 986 | } 987 | ], 988 | "source": [ 989 | "history = train(10,model_dae)" 990 | ] 991 | }, 992 | { 993 | "cell_type": "code", 994 | "execution_count": 28, 995 | "metadata": { 996 | "collapsed": true 997 | }, 998 | "outputs": [], 999 | "source": [ 1000 | "#1バッチ分の画像とラベルの読み込み\n", 1001 | "dataiter = iter(train_loader)\n", 1002 | "images, labels = dataiter.next()\n", 1003 | "images_noise = images + 0.5 * torch.randn(images.size())\n", 1004 | "outputs = model_dae(Variable(images_noise))" 1005 | ] 1006 | }, 1007 | { 1008 | "cell_type": "code", 1009 | "execution_count": 29, 1010 | "metadata": { 1011 | "collapsed": false 1012 | }, 1013 | "outputs": [ 1014 | { 1015 | "data": { 1016 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAACFCAYAAABL2gNbAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAGRxJREFUeJztnXuMldXVxp9V5FIVhZFLKSJoRBG8IiCIIl5QsV5oatpK\nQunVNoVWozbiZ2tM+o+hrUlrS5W2BDRG1IJK1aplvACWgFAoCIoDWuUql0EZQCrQ/f3BYbPWmjln\nhpkz57LP80uMa886c949Z73v5j3Pu/ZaEkIAIYSQ8ucLxZ4AIYSQ/MAFnRBCEoELOiGEJAIXdEII\nSQQu6IQQkghc0AkhJBG4oBNCSCK0aEEXkWtFZI2IrBWRSfmaFCkujGu6MLZpI83dWCQibQC8B2AU\ngA0A3gJwSwhhdf6mRwoN45oujG36HNOC3x0CYG0I4X0AEJGZAG4CkPXkEBFuSy0RQgiSxXXUcW3X\nrl049thj4/gLX8j+xW///v3R1r8DAAcOHNDvaXwHDx6M9p49e4yvY8eO0f7vf//r59bgsf3xvc+j\n5+Zf27lz52hv377d+I455sgl5j+XXDdTIkfC07ZtW+P73//+F+0vfvGL0a6trcWePXuyxRU4ytjy\nei0ptocQujb2opYs6D0BrFfjDQAuasH7kdLgqON67LHH4tJLLzXjw+iFCQA2btwY7QsuuMD4du7c\nGe1evXpl9f3rX/8yvssuuyzaa9euNb4+ffo0eGx//E2bNhmfn7c+/pYtW4zvq1/9arSnTZtmfF26\ndIl2hw4djE//I+WPpxf/L33pS8a3d+/eaJ9zzjnR/u1vf4tG4DVbvnzYlBe1ZEFvEiJyK4BbW/s4\npLDouOq7RFLe8Hotb1ryUHQjAH0bdXLmZ4YQwtQQwqAQwqAWHIsUjqOOq5dHSMnSaGx5vZY3LblD\nfwtAXxE5FYdOim8CGJuXWZFictRxbdOmDTp16hTH7777brTr6urMa0ePHh3t+fPnG1/Pnj2z/p6W\nWbwc8+9//zvaQ4cONb6amppoe4ln9+7d0d61a5fx6b8BAMaMGRNtr5MvWrQo2lVVVcZ3+eWXR9vL\nOm+//Xa0BwwYYHwnnXRStP3nNHjw4GgvWLAg2v4zawBes4nT7AU9hHBARCYCeBlAGwDTQgir8jYz\nUhQY13RhbNOnRRp6COFFAC/maS6kRGBc04WxTZtWfyhK0qd9+/Y4/fTT41inDvoUPz3WGSgA0Ldv\n32j/8Y9/NL6LLjqSjOHfU0s1Dz30kPFdc8010X7nnXeMT2fE6FRAALjxxhvN+L333ov2sGHDjE9n\nz3z66afGp6UinV4JWOnIZ7ls3bo12v369TO+7t27R1tn3OgUSVKZcOs/IYQkAhd0QghJBC7ohBCS\nCBTdSIs5ePAgduzYEcerVh1JnJg4caJ57ezZs6P9ySefGJ/WyfXOU8Dq5n4r/H/+859oT5pk601N\nmTIl2nrXJmA17G3bthnfkiVLzFjv1vS7Ua+44opoP/nkk8an5+p1+ltuuSXaK1asMD6tjft5a3r3\n7h1t7gcgvEMnhJBE4IJOCCGJQMmFtJjPPvvM7Hrct29ftL2sondgDhpkd5d/9NFH0fZpfHpXp5cg\ndFqf3+GpU/x0ES0AWLp0abRPOOEE47vkkkvMePny5dH+8ENbJ2nGjBnRvv76641P70DV6ZUA8Mgj\nj0T7wgsvND5fRTHbe+oCX4TwDp0QQhKBCzohhCQCF3RCCEkEauikxbRt2xYnn3xyHB933HHR9umA\nutqibxShG1XMnDnT+M4999xo6zRFADjllFOirbVuwKY/PvPMM8bXteuRBjC6WiQAPP3002asdfqR\nI0can07Z9Fq8bvbhj3/HHXdEe9asWcanyyf46pKLFy9u0Ke7KpHKhHfohBCSCFzQCSEkESi5kBZz\n8OBBk1p3/PHHR1s3UAZsg2effvjPf/4z2rp6IwBs3rw52m3atDG+efPmNXhsAJg7d260TzvtNOPT\nMsv69euNTzeYAKy0oRtqALZxxptvvml8ukqjT4X84IMPou37hlZXV0fbp3fqPqI6ZVKndpYSugqk\nbwzev3//aPsm2p9//nm0dfwBW43SS025mm/nIldfV/+eftdvqcA7dEIISQQu6IQQkghc0AkhJBGk\nuXpTsw4mkpeDffnLX462bpIL2Opznr1790bbd8TR27n91m5fea+pfPbZZ2bsu9lko3379mas9Trf\nrae5hBCk8Vc1jY4dO4bzzz8/jnWFQR8P/dnq1DwAOPHEE6M9ZMgQ49Nb3HWaIgCcd9550X7llVeM\nT6cRaj0dsJ/lqFGjjE83lwZsQ+e77rrL+HT6pT939FxfffVV49Png0+b1Nq719evvvrqaK9evTra\nzz77LLZt25a3uDb3evXPOPTziDPOOMP4br755mh/7WtfMz4dO69Z62c2vtzDunXroq2fUwD1K1Lq\nZy4+Bvo89ueDfsahYwBY7T+PLA0hDGrsRbxDJ4SQROCCTgghiVCWaYtayrjvvvuMb9y4cdHWKVGA\nlWr0Lr2jwac25ZKs/Nc9nZY3f/5849Opd1VVVca3ffv2aPvmCqXAvn37sGbNmjjWcon/vLQE4dMI\ndUNlHzv9GejdpkB9mUVz6qmnRts3zdBz8XJYt27dzHjChAnR9imOuqLj3/72N+PTu0h9k+qBAwdG\nW++uBWxqov+6rxuI6HRK3/ijWHh5RKeq+uqbWgLxTbR1iqO/znQ1Sp9iOmLEiGj7VEh/Pmopz79W\np1t6efCqq66K9re//W3j+/jjj1EseIdOCCGJwAWdEEISgQs6IYQkQlmmLTaVPn36mLFOb/vRj35k\nfG+99Va0Bw8enPU9dcoYkL9txnV1ddH+zne+Y3y+Sl8+yGfaYqdOnYKuQDh06NBo6xQywOrdAwYM\nMD6dnud1Y50O6DV07fOVELVm68sC6JRKrbUD9StBan3flyXQWrx+TgPYypD6PQDbMcnrzjoVTjeh\nBqxOr7XcF154ATt27Ch62mID7xNtn9KoUwV9Q3H9HMVX7dRppP56HTZsWLR1FdCGjq+fx/mURq3h\n++tVP9PxKba+GmieYNoiIYRUEo0u6CIyTUS2isjb6mdVIvIPEanJ/L9zrvcgpQfjmi6MbeXSqOQi\nIiMA7AbwaAjh7MzPJgOoDSE8ICKTAHQOIdzd6MEKLLm0BrrRAlD/q7L+6u53vumxrzqn46BTLwHg\niSeeaN5kc3MZ8hTXqqqqoHdabty4MdpeLtDSlq+Sp1PIdBokAHzjG9+Itk4ny8w72v4r9cqVK6O9\naNEi49Npg17G0Y2YAVvJ0O8Gveaaa6Ltq0u+9NJL0dYpjID9e7XkBljJ5/777ze+hx56KNq6SUd1\ndTVqa2slX9dsIa5XHbsOHToYn5ZA9C5v/3s+3VFfo7ppClA/JVjLXhdffLHx6evQ797etGlTtHW1\nTcDKMXkkP5JLCGEegFr345sAHK7bOQPAmKOeHikqjGu6MLaVS3M3FnUPIRwuULwFQPdsLxSRWwHc\n2szjkMLSrLj6bxukJGlSbHm9ljctfigaDmkFWb+ahRCmhhAGNeXrAikdjiau/usoKW1yxZbXa3nT\n3Dv0j0WkRwhhs4j0ALC10d9IhBUrVuT063Qqvw1cpzz6u1qdlqV15gLTrLiKiNEY9TZpn6qnqzL6\nMgbvv/9+tL32qTvW+IqTOjVN6+IA8Otf/zqrT6c4eg3bd8jRZRx82qLWwn3XoIcffjjakydPNr7n\nnnsu63vqLf1z5swxPr21XJcI8M8WHCV5zepnR357va5amOtZny8noCuw+ucm/pmXPm/95zd+/Pis\nx9fXuT93iklz79DnADj8144H8FyO15LygXFNF8a2AmhK2uITABYCOFNENojI9wA8AGCUiNQAuCoz\nJmUE45oujG3l0qjkEkK4JYvryjzPJQl0Af/f/OY3xqer8vmvcNOnT492ISoq5juu+qus3rmn5QjA\nVt7TNgCcddZZ0faNmMeOHRtt3SAYsCmO/j2///3vR9unIi5btizaWroA6n+N1+mIfmeirn6oJSXA\npr75nar6fPDVFnWanpfu9E5cLT8dPqfK9Zr114ROQfVSiX6tP8f02MtzuXaq+gqsunrlzp07je/e\ne++Ndis1tGgW3ClKCCGJwAWdEEISgQs6IYQkQll2LColdOcUAPjZz34Wbb+dXOt+M2bMML577rmn\nFWZXGEIIJuVM69g+/VA3P/ZNtHVHH60vA7ZKo99qrXVSX11PV77zOrVOTfQV8ryGrrf3e84888xo\n665UgNVhfScbXfrAa7s6ddU3pV6+fHm09+3bF+1CVk5tDfz89fZ+X+3Qa+pNxZcX+OlPfxrtfv36\nZT3G1KlTjU8/Nymlz5136IQQkghc0AkhJBEoubQQL5X45hQaXaXvl7/8ZavNqdC0b98effv2jeNH\nHnkk2n6n6PDhw6OtGxEAVvbwcoyWQHyz5SuvPJKNp3fwAfZr9MyZM41Ppx/W1tpaVhdeeKEZ60bE\nfm66iYeXfN54441o+ybO+hgLFy40Pt2U2jf70OmPXbp0iXYjO0XLDt20OVfaYlPfA6jfoOb222/P\n+lpdUXHKlCnGV0qpihreoRNCSCJwQSeEkETggk4IIYmQluhWALSWCgA//vGPm/y7N954Y7RbqZFs\nUWjXrp1pyK1T6XRKH2DLGvjGzLrBsk8x1FUuN2zYYHy5ut7oLfS+25R+puG35WttGrCVIC+//HLj\n+/Of/9zg3wDYFM7vfve7xqefo1x//fXG99e//jXr3Hr06BFt/fc1N5WvVPCpibl8epxLT/fdjH7x\ni1+YsT5f9HkLAPfdd1+0tZ7e2DGLCe/QCSEkEbigE0JIInBBJ4SQRKCGfpSsXr3ajHX5Tc+kSZPM\n2OdPp8Inn3yCZ555Jo61NuxLm+rt9n57vc7t9b+nO8tce+21xqe3/vvStq+//nq0ff74CSecEG1d\nZheoH+cbbrgh2s8++6zx6dK7vju91rXXr19vfLo7/JIlS4xP/x3+mUG3bt2irTXidu3aIVV8jr3+\nXP2zA53v7/d7nH322Wasz53HHnvM+J566qmsxyhVeIdOCCGJwAWdEEISgZJLA/gKinfffXe09ddd\noP5XsZ///OfR1g2KU+aYY44xaX5z5841Po3u/OMbMevG2bm2sWt5B7AVLqurq40vW4ofYL9++y5I\nOg0TAGbPnp3Vp7/i+y3huhSAr8SoZR4/N92xqXfv3sanpRVd9c+n3ZUbPhVQV/D02/L12Ddc/9Wv\nfhVtnyrq0x/1OTht2rSsxy8XeIdOCCGJwAWdEEISgQs6IYQkAjX0Brj55pvNWOviXjN/+OGHzXj6\n9OmtNq9SZdeuXUY315qvLxn70ksvRbtnz57Gd+mll0bba8q6PK/XitesWRPtM844w/i2bdsW7YED\nBxqf1l59ZyWfwqb/Jq/Tf/DBB9H25Qz0swCd3ghYHdj/TVdccUW0dZcnwJYI1l2QylHzzYXW1H0a\nq8annP7gBz+Ittfed+zYYcZ33HFHtH3p5VzHLFV4h04IIYnABZ0QQhKBkksGXUVx/PjxWV/n09J0\nOhtQPxWvEujUqRO+8pWvxLHeEenTAR988MFo62qHgK2o6KWt559/Ptq+afLLL78cbb/7VMdD7woE\ngBNPPDHau3fvNj4f508//TTavqLj+eef3+DrAFu1cenSpcanZSS/M1VLTu+++67x6YqOei5eXkgZ\nfb3ef//9xqc/B787eOLEiWb897//PetrS7WiYi4q5wwghJDEaXRBF5FeIvKaiKwWkVUiclvm51Ui\n8g8Rqcn8v3Nj70VKB8Y1TRjXyqYpd+gHANwZQugPYCiACSLSH8AkANUhhL4AqjNjUj4wrmnCuFYw\njWroIYTNADZn7DoReQdATwA3ARiZedkMAK8DuLuBtygLxo0bF+2RI0dmfd0Pf/hDM/YpbOVCPuO6\nf/9+bNmyJY719upBgwaZ1y5evDjavjqgTilbtmyZ8d10003R9iUVdLrZqFGjjE93AhowYIDxnXfe\nedHu37+/8fn3Oe2006JdVVVlfAsWLIi231r+0UcfRdunFb744osNzgU4lAp6GK21AzY1cv78+dE+\nePBgster/1ynTJkSbV01E7Dat34dUL9SZrmXS/Ac1UNREekD4AIAiwB0z5w8ALAFQPcsv3MrgFub\nP0XS2rQ0rr72DSkNeL1WHk1+KCoixwOYBeD2EMIu7QuH/kls8JFwCGFqCGFQCGFQQ35SXPIR15Tr\ncJcrvF4rE2lKao6ItAXwPICXQwgPZn62BsDIEMJmEekB4PUQwpmNvE/J5AH5qolvvPFGtP1XXE2u\nKoDlRAhB8hXXnj17hgkTJsSx3rm5ceNG89ohQ4ZE+8033zQ+narnKwyecsop0fax000sdDVHAPjW\nt74V7Ysvvtj4nnzyyWj7lD+/U/Siiy6KtpduZsyYEe2dO3can27w3LVrV+PT55I/vj7ewoULjU9/\nvpqamhrs3bs3b3EtpevV3zTodFidfgrYVFUvpfm01jJiaVP+kW1KlosA+AuAdw6fHBnmADicsD0e\nwHPNmSUpDoxrmjCulU1TbjeHAxgHYKWILM/87P8APADgKRH5HoAPAXy9daZIWgnGNU0Y1wqmKVku\nCwBIFveV+Z0OKRSMa5owrpVNGoJwE9ENdX26oa/Sp/GNZomlrq4Or732WhzryoX+GU1tbW209RZ2\nwDaC9jqxrtrodXmd7uh1ct35Z968ecantXdfasCnEeoyBf599DF1Y2EAGDZsWLRzpchprR2wzxf0\neQsA1113XbR12mLKW/9HjBhhxvoz8bH705/+FG2d/lkJpHsGEEJIhcEFnRBCEqGiJJfjjjsu2med\ndZbxaWnAp5797ne/a92JlTkdO3Y0MouWRHwDX13FcOzYscanP+ehQ4caX69evaKtm2l4fANn3aTA\n7zbUDZz9+eAlOZ2q6HeK6vHVV1+ddW6+gqNuTqH/PsCmcK5bt874dAqnlnvWrl2b9djliK4kqVND\nASsv+UqZjz/+eLTLsWJiS+AdOiGEJAIXdEIISQQu6IQQkggVpaH7yn/ZeOGFF8xYp9qR+oQQTDcg\nnUboU8q01u4r4Wmfb4x84MCBaPfr18/4tI68atUq4xs8eHC0t2/fbnxaF9+zZ4/x3XDDDWasUxV9\nNU6t0/rm1jU1NdEePXq08ekSBjr9ELBdkFauXGl8PXr0iLbWkv0zgnLDp13qyqa+3IM+r/zn4+Nc\nSfAOnRBCEoELOiGEJEJFSS5Llixp0ut0UwTANqTVcgI5xOeff24aQ+v66F6u0o2SfeVK3SjCN5hY\nvnx5tHUqIgC8+uqr0b7tttuMb/jw4dH28s/kyZOj7ZsknHvuuWas5bpHH33U+HT6o9/VOWbMmGh7\nKS9X5U4tAf3kJz8xPi3j6J2Q/nMpN3wM9Gfu/zadAuqva53GyLRFQgghZQkXdEIISQQu6IQQkggV\npaE3lQceeMCMfSkAYunQoYOpVrl69epoe0154MCB0dZdZwCrfdbV1RmfbgTtKxPqrfdes9db+H1K\nodZlfXVFXSYCsB1z7rrrLuN75ZVXou11cZ1Sp7snAcCsWbOi7VMhtS7sK4Hquepj+9IC5YBOVfRp\ni3PmzIn26aefbnw6lr///e+Nrxw/h3zBO3RCCEkELuiEEJIITWoSnbeDlVDT2UonhJC3bYXdunUL\nX//6kY5muSoj6kbJixcvNj4tV3h5YtOmTdHWDb0B4M4774z2zJkzjU+f37qBBmB3nOrUQ6B+aqKW\nipYtW2Z8usLjli1bjE//vV5GOuecc6LtKwbqnbI6DRSwzT50RcK5c+eitrY2b3Hl9VpS5KdJNCGE\nkPKACzohhCQCF3RCCEmEQmvo2wB8CKALgFIpiVaJc+kdQuja+MuaBuPaKIxr/qjUuTQptgVd0ONB\nRZY0ReAvBJxL/iil+XMu+aOU5s+55IaSCyGEJAIXdEIISYRiLehTi3TchuBc8kcpzZ9zyR+lNH/O\nJQdF0dAJIYTkH0ouhBCSCAVd0EXkWhFZIyJrRWRSIY+dOf40EdkqIm+rn1WJyD9EpCbz/84FmEcv\nEXlNRFaLyCoRua1Yc8kHjKuZSzKxZVzNXMoirgVb0EWkDYA/ABgNoD+AW0Skf6GOn2E6gGvdzyYB\nqA4h9AVQnRm3NgcA3BlC6A9gKIAJmc+iGHNpEYxrPZKILeNaj/KIawihIP8BGAbgZTW+B8A9hTq+\nOm4fAG+r8RoAPTJ2DwBrijCn5wCMKoW5MK6MLeNavnEtpOTSE8B6Nd6Q+Vmx6R5C2JyxtwDoXsiD\ni0gfABcAWFTsuTQTxjULZR5bxjULpRxXPhRVhEP/zBYs7UdEjgcwC8DtIYRd2lfouaRMMT5Lxrb1\nYVzrU8gFfSOAXmp8cuZnxeZjEekBAJn/b23k9XlBRNri0InxeAhhdjHn0kIYV0cisWVcHeUQ10Iu\n6G8B6Csip4pIOwDfBDCnkd8pBHMAjM/Y43FIG2tVREQA/AXAOyGEB4s5lzzAuCoSii3jqiibuBb4\nQcJ1AN4DsA7AvUV4kPEEgM0A9uOQJvg9ACfh0NPpGgBzAVQVYB6X4NBXsxUAlmf+u64Yc2FcGVvG\nNZ24cqcoIYQkAh+KEkJIInBBJ4SQROCCTgghicAFnRBCEoELOiGEJAIXdEIISQQu6IQQkghc0Akh\nJBH+H9cQs5BUCCDkAAAAAElFTkSuQmCC\n", 1017 | "text/plain": [ 1018 | "" 1019 | ] 1020 | }, 1021 | "metadata": {}, 1022 | "output_type": "display_data" 1023 | }, 1024 | { 1025 | "data": { 1026 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAACFCAYAAABL2gNbAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAF8FJREFUeJztnXmQVNX1x78nAoqKyjIsigoKohgXIhoJiqiYCC641U9c\nCGURMFHKmA1Fqyz9w4SUkVLBmFjBIkYU1Liggogj4BJcIKIIyCIqYJgBhAQBBca5vz9oL+cc6FmY\nnp7p299PFcW5fV73u/3Oe3den3cWCSGAEEJI4fO9hp4AIYSQ3MAFnRBCEoELOiGEJAIXdEIISQQu\n6IQQkghc0AkhJBG4oBNCSCLUaUEXkfNFZImILBeRW3M1KdKw0K7pQtumjextYpGI7ANgKYDzAKwG\n8B6Aq0IIi3I3PZJvaNd0oW3Tp0kd3nsagOUhhBUAICKTAAwEkPXkEBGmpTYSQgiSRVVruzZv3jy0\naNEijisqKqLctGlTs63WVVZWGt3mzZuzvq9169ZR3rp1q9Hts88+UfY3KN/73q4fof597du3z6rb\ntGmTGe+3335R3rJli9E1b948ygcccIDR7dixY4/z9HzzzTdmrI9Ns2bNjE5/R308N2/ejG3btmWz\nK1BL2/J6bVSsDyGUVLdRXRb0wwCsUuPVAH5Yh88jjYNa27VFixa44oor4njjxo1RLimx56DW6QUc\nAObMmRPlDh06GN3gwYOjPHfuXKM7+OCDo6wXOMAusPPmzTO6UaNGRfnf//630c2YMcOMjznmmCi/\n9957Rte9e/co//CH9lCtW7cuygceeKDR6YX5448/Nrpt27ZF+fDDDzc6/Udi7dq1UZ4+fTqqgdds\n4fJ5TTaqy4JeI0RkOIDh9b0fkl+0Xf1CRQoXXq+FTV0ein4BQN86dMy8ZgghPBxC6BlC6FmHfZH8\nUWu7apcDadRUa1ter4VNXe7Q3wPQVUQ6Y+dJMQjA1TmZFWlIam3XiooKfPnll3Gs3S9Tp04127Zs\n2dK8T/ODH/wgyr169TK69evXR3nkyJFGN3v27Ci/8847Rjdr1qwoN2liT3f9mUcddZTRjRkzxown\nT54c5Q8++MDounTpEuXbb7/d6H7/+99H+a233jI6/VzA/1H84otd66z3ob/77rtR7ty5c5T98dwD\nvGYTZ68X9BBChYiMADAdwD4AHgkhLMzZzEiDQLumC22bPnXyoYcQpgKYWu2GpKCgXdOFtk2ben8o\nStJHREyY4eOPPx5l7+b4+c9/HmUfLbJo0a7oOR0GCQAbNmyIsneHHHbYYVG+4YYbjO6OO+6I8qmn\nnmp0Cxfuujlt27at0Wm3BgB8++23UT7rrLOMbvXq1VG+8cYbje6zzz6Lso6UAYC33347yt///veN\nTocxnnbaaUb3/vvvR/nKK6+Mso+UIcUHU/8JISQRuKATQkgicEEnhJBEoA+d1JmKigrj49a+cO//\nveeee6J8/PHHG91///vfKOtwPwC49NJLo3z22Wcb3QsvvBDlsrIyozv22GOjrP3ZAPDJJ59E+cc/\n/rHR6VBEwGaS+hBHnSk6fvx4o+vfv3+UO3XqZHSvvvpqlFetWmV0+rjprFEA6Nq1a5SXLFkSZV8+\ngBQfvEMnhJBE4IJOCCGJsNflc/dqZ0VQve2vf/1rlHXGJAAMGDAgyj6jMd9UUW2x1rRv3z789Kc/\njWNdmVC7QwDryvj666+N7ogjjohyVa6T+fPnG53+nJNOOsnotJvlxBNPNDqdZalDDwGgtLTUjHV2\n6kEHHWR048aNi7IP0ywvL4+yd+vobFBdZAuw39+7eHSo4rRp06I8ceJElJeX58yuKVyvvsKlPz90\nZu8hhxxidEOHDo3yypUrjc5XCs0D82pSjoF36IQQkghc0AkhJBG4oBNCSCLQh15HTjnlFDPW6ez+\n2M6cOTPK/fr1q9+JVUMufehHHHFE+N3vfhfHS5cujbJuPgEARx99dJRXrFhhdMuXL4/yySefbHSP\nPvpolP2zCV3psV27dkan/dQ+9f6+++6Lsg6LBIBzzz3XjLUv3NtVN+rwvvCPPvooyt7vqpt9+MYY\nOgzUV1HUZQh084vHHnsMZWVl9KEr/DONyy+/3IwfeuihKOvuVoAtMfHHP/7R6HwoaR6gD50QQooJ\nLuiEEJIIzBStI7opQ3XojMKU2LFjB9asWRPHOvzLuyd0r07fuOF///tflH0TCe2S8G4N3Rzi9ddf\nN7qOHTtG2Vcj1BUOfXPnl19+2YxvuummKOtqh4BtwecbeujQSB3OCdgQOv99daboSy+9ZHT6577+\nfJGceVuSwbtRevToYcb63PHHz9urEOAdOiGEJAIXdEIISQQu6IQQkgj0odcS38x3xIgRNX6vru6X\nEr5J9CWXXBLlgQMHmm11pUQdCgjYkDIf4qdDA3WJAAB45JFH9vgZfttPP/3U6PQ+9DMAwIZXArbZ\ntEdXUdTfHbBVGrWvHbDlH3xo4qZNm6Lswy21Tj9P2LFjR9Y5Fis+bHbQoEFmvO+++0bZV6vUz3tq\n0IC7UcA7dEIISQQu6IQQkgh0udQSXTERAE444YSs206aNMmMa+OeKSSaNWtmGjXrxszXXnut2VZv\np8MUAeCZZ56J8nnnnWd0Tz/9dJR9Ywxd/dB/pq6iqBtoAMCWLVuirJtGALs3o9DZqXPnzjW6sWPH\nRllnf/r3abcRALz44otR1i4rAGjZsmWUfeXJbt267fEzfWXBYkWHH/qM7EMPPTTr+3RWLwD861//\nirLOzm3M8A6dEEISgQs6IYQkAhd0QghJBPrQa4kPi6uq8p6u5Abs7l9NhY0bN+LZZ5+NY+3/9Q2d\ndYq7P3Y6rM+n4g8ZMiTK3heuw/i8H1n713XzasCGVHqdLhkAAHPmzImy78Kk56PLCfj9v/HGG0an\n/eb6+wH2OYE+ngAwe/bsKJeUlESZYYs70efRnXfeaXS++qIOR7z55puNrgEqKtYZ3qETQkgiVLug\ni8gjIrJWRD5Sr7USkRkisizzf8uqPoM0PmjXdKFti5eauFwmABgH4FH12q0ASkMIo0Xk1sz4ltxP\nr3Ggmx/4zEfP+PHjo/zmm2/W25xywATkyK7777+/qWLXtm3bKD/11FNmW92AYtiwYUb3ox/9KMrr\n1q0zOv2ZPjRRh5T55tu6wbKvdqndM96VNmXKFDPWIY7ePaPn5vev3UH+3NGZq7oxCmC/42WXXWZ0\n2nWnK0iq4zABRXTN+oqKZ5xxRpR1A5A9oW3pw1ELkWrv0EMIrwPY4F4eCODvGfnvAC4BKSho13Sh\nbYuXvX0o2i6E8F3xizIA7bJtKCLDAQzfy/2Q/LJXdvUPMEmjpEa25fVa2NT5oWjYGaqQtfdgCOHh\nEELPmvTDI42H2ti1EBsBFDNV2ZbXa2Gzt3fo5SLSIYSwRkQ6AFhb7TsKmF69ekW5usXLd7MpMPba\nrtqPvX379ij7MDHtU/ZdgXQHI3/Xr0P1fCq89sv7DlJ6Hz6dX6fQt2/f3uiOPfZYM9bVF70P/6uv\nvoqyPlcAYOvWrVHW3x2w3XMWLlxodDoU0ndB0n7fK6+8co+v74Fkr1nfaUin++tqisDuobL3339/\nlAulomJV7O0d+hQA3wXODgHwfG6mQxoY2jVdaNsioCZhi08AmAOgm4isFpGhAEYDOE9ElgHolxmT\nAoJ2TRfatnip1uUSQrgqi+rcHM+l0TJ48OAo+593b731lhn/4x//yMuc6kou7bp9+3asWrUqjrUL\n4ic/+YnZtmnTplH2IY1XXbVrSr7hRJcuXaJ81llnGZ12x+gQQv85vim11nl3xeeff27GOju0devW\nRqdDI31jDt004bHHHjO6kSNHRtlX87vwwgujPGHChKz7Ky0tjfJ3x73YrlmfHdy3b98o++vVN7HQ\ndvfhjxpv18YKM0UJISQRuKATQkgicEEnhJBEYLXFPaBT/QGgTZs2UfZhT6k2fq4NTZo0McdIVwr0\n5Q/233//KPsQUN3weMWKFUanffQ+nVv7N30Kvbblk08+aXR/+tOfojxv3jyj01UMAVvBz1c/nDZt\nWpRPPvlko9NNon0o5KOP7srM16GXAHDfffdFWYeBAsDXX38dZd1pyYeIFgv+2OnG4N6Hrs8xwIag\n+m39tV4I8A6dEEISgQs6IYQkQnH+RquGSy6xdYuqCmd66aWX6ns6jR4R2S0j7zt85qR2pejwRsA2\nfPChaKeeemqUfUVDneX54YcfZv3Mn/3sZ0b33HPPRfnMM880Ou1G8Z/jG2xoV4c/V3SIpXcx6c/R\nDYkB4J577omyr/yoG19rnc9gLRZ843bvEtPMmDHDjHXFyxQahPAOnRBCEoELOiGEJAIXdEIISQT6\n0PeATq32+PC21157rb6n0+ipqKgwPmadXq3D/QAbDujDwnTFwVatWhmd7hh0yy220c4dd9wRZR0y\nCdj0fl35EAB+/etfR/m2224zOh1eCVgf/tKlS41Of3dfMuCQQw6Jsg43BOxzgquvvtroHnjggSj7\n46QrUeqwRd/lKWX0s4pf/epXRqePqy+p8OCDD5qxDwktdHiHTgghicAFnRBCEoELOiGEJAJ96Bl0\np/DevXtn3W7WrFlmrP2nxUqzZs1w6KGHxrHu7vPBBx+YbbVvvKpUa1/qdv78+VGePHmy0V1//fVR\n1uVkAWtL71/X5Wy9D9vnF6xcuTLKixcvNrrrrrsuygsWLDC69evXR1mnpAPA5ZdfHuWnn37a6PSx\n8O/Tx1d3ZKoqXyI1dPepqp55+TLMH3/8sRkXYnp/VRTPGUAIIYnDBZ0QQhKBLpcMv/3tb7Pq9M8y\nXT2P7EKHiumftRs3bjTbvfLKK1H2TZvPP//8KP/lL38xOt0l6IorrjA6ne7v3SraXeFTu88555wo\njxs3zuh8CKB+b/PmzY1Ouz2WLFlidGeffXaUly9fbnQ63d+77nQnLF9qQDci79OnT5R9SYSUufvu\nu6Osu2AB9nodNmyY0W3btq1+J9bA8A6dEEISgQs6IYQkAhd0QghJhKL1oev0aQDo1q1b1m116N3z\nzz9fb3MqVCorK01avT6WLVq0MNvq1PipU6canfYV+5Rt7W+fO3eu0ekSvTrVHgAGDx4c5b/97W9G\np1P4fYck311IlzPwHZO0n177+gFbMteH0OkuTBMnTjQ6Hd5ZUVGRdd66BHHK5XN9eeYLLrgg67a6\nTIR+FgGkF6bo4R06IYQkAhd0QghJhKJ1uZxwwglmfMwxx2TdVv8c9hXzyM7KhLo5sv7p77sS6ePe\no0cPo9PumTFjxhhdeXl5lHWVRMCGkmrXCGCbePvM1C5dukTZu2p0eCVgO9voUETAhk36TFHtOvGV\nJ3V2sv5+gHXH+CqROsO0c+fOUdZdjlLjoosuMmNfDVPzn//8J8q+mmIKjaCrgnfohBCSCNUu6CJy\nuIjMFJFFIrJQRH6Zeb2ViMwQkWWZ/7M38iONDto1TWjX4qYmd+gVAH4TQugO4HQAN4pIdwC3AigN\nIXQFUJoZk8KBdk0T2rWIqdaHHkJYA2BNRv5KRBYDOAzAQAB9M5v9HcAsALfs4SMaJdp/CVjfmvez\npUgu7VpZWWlCxTZs2BBln/qvK+P5MD7tm+7fv7/R6dICuvIiYFO/tT8bsKGQAwcONLqxY8dGec6c\nOUbnQxPPPPPMKPvnLTqlX4dQAvZ5gk7ZB4C33347yqecckpW3XHHHWd0ZWVlUdYVFrdv357U9apt\nPmLECKPT39v7wWfOnBnlysrKeppd46RWD0VFpBOAHgDeAdAuc/IAQBmAdlneMxzA8L2fIqlv6mrX\ngw8+uP4nSWoNr9fio8YPRUXkQAD/BHBzCGGT1oWdfyL3+Lg4hPBwCKFnCKFnnWZK6oVc2NUnaZGG\nh9drcVKjO3QRaYqdJ8fEEMIzmZfLRaRDCGGNiHQAsLa+Jlkf6NAvoOrwJd98IBVyZdeKigrjWtHh\neTfccIPZdsCAAVHWjZAB4K677oqyb9bQr1+/KD/77LNZ5/LEE0+YsQ7/++yzz4xO78OHIvbsadez\nKVOmRHn27NlGpyss+gxTnSl65JFHGp0Ot/zFL35hdDqMUn93wGaKrl27yzzfVYRM5XrVIZlVhRX7\nCorePsVETaJcBMB4AItDCDo4eAqAIRl5CADmxBcQtGua0K7FTU3u0HsDGAxggYh89zTqNgCjATwp\nIkMBfA7g/+pniqSeoF3ThHYtYmoS5fImgGxhH+fmdjokX9CuaUK7FjdFlfqvU8t9GrbG+1p1GB7Z\nnW+++QaLFi2KYx0S+uKLL5pttS/Up+lfe+21Uda+YcCWEBg6dKjR6dDAzZs3G50OQfW+VX0OaF87\nYMMGAXtO+AbWuhKgD3fU1f708wPAhlj6Ztr6XPWdjnRlSN3pqNC78fhw4csuuyzKvmqnrkDpw1jf\neOONKPtnY6ml+nuY+k8IIYnABZ0QQhKhqFwu+id39+7ds27nGxT7ioHEcsABB+D000+PY+0S8FUE\nBw0aFGVdCREA1q9fH2Xf4EK7R7xbQ9uytLTU6PS2unk1YKst+jBJH/6oK0PqUEQAGDVqVJR1liJg\n3THvvvuu0Q0fvit/x7uRrr/++ijfe++9RnfNNddEuW/fvlFevXo1ChmdGQpU7UrTGcC+Mqd2Q/nz\nKHV4h04IIYnABZ0QQhKBCzohhCSC5DOMR0TSjhkqIEIIOSspWVJSEnQlQ50Kr33rgO38o/2/gA1F\n058BAG3atImyP2e1T/uhhx4yOh36dvHFFxudfqaim1cDu6f+6+cCvoOQ5qSTTjJj7cP985//bHR6\nPr4qpX72MGzYMKPTYXr6ucPkyZNRXl6eM7vm+3r1YYvap+4LwOkQTV3pE0g2NHFeTerr8A6dEEIS\ngQs6IYQkAl0uRUouXS4HHXRQ0C6KkpKSKPfu3dtsqysl6vA7AJg1a1aUfXU9nampPx+wlQk7duxo\ndK+99lqUdSMMAOjUqVOUfcjc0UcfbcY6xFKH0wHWBdK2bdus7/PZyboRtW4mDVg3y6ZNpvqtaXys\nw/mmT5+OL7/8smBdLqRK6HIhhJBiggs6IYQkAhd0QghJhKJK/Sf1Q5MmTUxYoQ7B8+n9y5Yti7IP\n/9P+dh+22KdPnyiPHj3a6HQD4WnTphmd7hI0adIko9ONn31T6j/84Q9m3K1btyjr8ErA+tt9CYGW\nLVtGuV0728ZT+/t9eOeJJ54Y5eeee87otO9f+959FUhSfPAOnRBCEoELOiGEJAJdLqTO7LfffsYl\noUMFfeMGXX1w3bp1RqdDB7XLAbAZkb4y4YIFC6JcVlZmdCtXrozycccdZ3StW7eO8sSJE41OVzsE\nbDOOxYsXG51uhqGrSQK2MYPeH2CrP/oMU12Z0TeX1lmr2m1TWVkJUtzwDp0QQhKBCzohhCQCF3RC\nCEmEfKf+rwPwOYA2ANZXs3m+KMa5HBlCKKl+s5pBu1YL7Zo7inUuNbJtXhf0uFORuTWpS5APOJfc\n0Zjmz7nkjsY0f86lauhyIYSQROCCTgghidBQC/rDDbTfPcG55I7GNH/OJXc0pvlzLlXQID50Qggh\nuYcuF0IISYS8Lugicr6ILBGR5SJyaz73ndn/IyKyVkQ+Uq+1EpEZIrIs83/Lqj4jR/M4XERmisgi\nEVkoIr9sqLnkAtrVzCUZ29KuZi4FYde8Legisg+ABwH0B9AdwFUi0j1f+88wAcD57rVbAZSGELoC\nKM2M65sKAL8JIXQHcDqAGzPHoiHmUido191Iwra0624Uhl1DCHn5B6AXgOlqPArAqHztX+23E4CP\n1HgJgA4ZuQOAJQ0wp+cBnNcY5kK70ra0a+HaNZ8ul8MArFLj1ZnXGpp2IYQ1GbkMQLuqNs41ItIJ\nQA8A7zT0XPYS2jULBW5b2jULjdmufCiqCDv/zOYt7EdEDgTwTwA3hxBMa/d8zyVlGuJY0rb1D+26\nO/lc0L8AcLgad8y81tCUi0gHAMj8vzYfOxWRpth5YkwMITzTkHOpI7SrIxHb0q6OQrBrPhf09wB0\nFZHOItIMwCAAU/K4/2xMATAkIw/BTt9YvSIiAmA8gMUhhDENOZccQLsqErIt7aooGLvm+UHCAABL\nAXwC4PYGeJDxBIA1AHZgp09wKIDW2Pl0ehmAVwG0ysM8zsDOn2YfApif+TegIeZCu9K2tGs6dmWm\nKCGEJAIfihJCSCJwQSeEkETggk4IIYnABZ0QQhKBCzohhCQCF3RCCEkELuiEEJIIXNAJISQR/h9E\nh87G85drhQAAAABJRU5ErkJggg==\n", 1027 | "text/plain": [ 1028 | "" 1029 | ] 1030 | }, 1031 | "metadata": {}, 1032 | "output_type": "display_data" 1033 | }, 1034 | { 1035 | "data": { 1036 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAACFCAYAAABL2gNbAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAGvNJREFUeJztnXuwlVX5x79L5SIXjZsEYqCJFGmIEoJgmoACmlZOFr8k\nZmI0LUsnp1JxsizHakpzlMkolVLGxMRApQiPF0RRAUMQEEEKwZBLcgA1TGj9/mC7+D4P7H1u++y9\nz7u/nxnH5z3Pe953nb32Xuz3u55LiDFCCCFEy+egcg9ACCFEcdCCLoQQGUELuhBCZAQt6EIIkRG0\noAshREbQgi6EEBlBC7oQQmSEJi3oIYTRIYRVIYQ1IYSrizUoUV40r9lFc5ttQmMTi0IIBwN4FcAo\nABsALAQwLsa4onjDE6VG85pdNLfZ55Am/O5gAGtijGsBIITwRwDnA8j75gghKC21QogxhjyuBs9r\nq1atYtu2bdNxx44dk33ooYeac995551kv/vuu/46yf7Pf/5jfIcffniyW7dubXy1tbV5fXv27En2\n+++/b3xt2rRJNo8fALZt25b3/u3btze+f/3rX8k+7LDDjO9///tfsg86yD4Qh7BvCvy4+e/ftWuX\n8e3evTvZ7dq1S/bOnTuxa9eufPMKNHBu9XmtKLbGGLvVdVJTFvQjAayn4w0ATmnC9URl0OB5bdu2\nLU466aR0fPrppyf7E5/4hDl34cKFyV66dKnxffjDH072smXLjG/MmDHJPuqoo4xv1qxZyf7IRz5i\nfLwwb9y40fj69u2b7OOOO874HnzwQXM8duzYZA8aNMj4brjhhmSPHDnS+N57771k8+ILAAcffHDe\ncb/00kvJXrNmjfFt2bIl2SeffHLeMR8AfWZbLuvqc1JTFvR6EUK4BMAlzX0fUVp4XvmbrmjZ6PPa\nsmnKpugbAPirUq/czwwxxikxxkExxkHeJyqSBs8rSyWioqlzbvV5bdk05Rv6QgB9QwhHY++b4ssA\n/q8ooxLlpMHzevjhh+Pcc89Nx/PmzUv2/PnzzblnnnlmslnGAIBXXnkl2eeff77x/fOf/0z2qaee\nanwf+9jHks36PbBXV873eytXrkz2kCFDjG/UqFHm+P7770+2l3xYYuJxAgAHHUyYMMH4Fi1alOzp\n06cb36ZNm5LNfx9gpam333472azX50Gf2YzT6AU9xrg7hHA5gDkADgZwV4xxedFGJsqC5jW7aG6z\nT5M09BjjbACzizQWUSFoXrOL5jbbNPumqMg+7777Ll544YV0fNlllyX7u9/9rjm3pqYm2V66YGmh\nZ8+exnf00Ucn++mnnzY+Dik85BD7lubr8HkAsH379mQvX26/qPpzjz322GRzpA5gQxWHDh1qfCwj\nPfvss8bHm8kDBw40vgEDBiT7nnvuMb5u3fZFr7H84kMfRfWh1H8hhMgIWtCFECIjaEEXQoiMIA1d\nNJlWrVoZrZozMjmDFLDp7j4VnlP4TzzxRONbsWJfdrrP6ly/fl/y41NPPWV8HP7IGZaA1bu7dOli\nfHfccYc57tWrV95zuYQB7xEANhuVsz8BmynqQw6fe+65ZL/22mvG95nPfCbZgwcPTnY9MkVFxtE3\ndCGEyAha0IUQIiNkWnL50Ic+ZI4ffvjhZHNYGAD069cv2b6IkyjMrl278Oqrr6bjxYsXJ/v66683\n5950003J9iGNv/71r5PtMz7Xrl2bbB+a2L1792T/8pe/NL6JEycm20slfB1fFfKss84yx3/729+S\n7eWRt956K9m+SiTfw0s+I0aMSPaLL75ofFzIa/To0cbHWav8+vqqjC0BluDYrotCZb8bWxLcS4Ac\nBlqosJqvvskVRdkG7BzVI7O3wegbuhBCZAQt6EIIkRG0oAshREbItIZ+2223meNhw4Yl+y9/+Yvx\n1Vc39xX7OCzN3+P444+v1zU9GzZsMMc///nPk80V+iqF1q1bmwYNrBWzng7YcLxnnnnG+M4555xk\n/+53vzM+rubITR0AYPPmzclesGCB8fG5XnvntHx/Tc/48eOTzdUVAauF+hICfHzLLbcYH+87sA4P\n2P0frhgJAB/96EeTzbozh3ZWEjxGPwddu3ZNti+3wK9Bp06djK9Q4xIu6eD1dK/Td+7cOdlf+tKX\njI/3Lnr37m18HCrrO2HxfohfZ7hEhp/XYqBv6EIIkRG0oAshREbItOTSo0ePvL6Pf/zj5vjxxx9P\ntn8U4mxHrm4H2PClpsCPgv4x8cgjj0w2SzqVQm1tLWbMmJGOx40bl/fcSy+9NNk7duwwPg7H41BE\nwIZ/+fA87rl5yim2ReZ///vfZPusVW5i4R+NOTMUsJUguU8oYJtE+x6q/H7xEhNngPqGGnPmzEl2\n//79jY//fn6dVq9ejUrAyxrc0co30eaM4E9/+tPGx1mwRxxxhPFxo2zfApFDDPk8YP8wQpZc2Pbj\n9tfhe/qG4r4/LMPNyCW5CCGEyIsWdCGEyAha0IUQIiNkWkMvRJ8+fQoelxoOOfPhZ5We0t2lSxdc\ndNFF6Zh1Sg4pBGxY4aBBtrE869RcBgCwYWs+vf7yyy9P9vPPP298PC6/N8HhlSNHjjQ+1k8B2zHJ\nN4Lmc30IHYe0+XIGX/ziF5PNjbUBGwq5bNky4+N9FH49/ZgrBX7dfYgf69Zf+MIXjI/3wFh7Bmya\nfqGSAXv27DHH/v78Ovv9MK6iyQ3FAeCJJ55INpeFAIA333wz2byHAzT/HOkbuhBCZAQt6EIIkRGq\nVnJpCPxo7h+vfIU2zv576KGHjI/D2zwsI/jQqkqndevWpuEzh3T5xsgsF3iZi7NDzz77bOPj7EwO\n4wSARx99NNk+dO8nP/lJsjlLD7CPw/yYDOyfucmNoV9//XXj47C1MWPG5P29devWGR+/dz71qU8Z\nH4df+qYZU6dOTTaH9hUrhLapeGmLZQ/fyJqblRQKG/Shojx3/nPFmdbTp083Pt9khLNTC1X49DIb\nj8dXTeRjvz40N/qGLoQQGUELuhBCZAQt6EIIkRGqVkO/8cYbzTFrnR4OUWqOdN2Wzo4dOzB37tx0\nzKF7vkoda9z+teQ0+WOOOcb4uIKdr2g4adKkZN97773Gd9111yXbV1T8+te/nmyvfXu9m9PSr7zy\nSuPjLkw+vI2v4/8mriboQ1U5pd+XBeAwRi6f4EP0KhGvhW/dujXZ/v3A+0q++fftt9+ebK58CNhq\niz5s0Ovd9Q15bGwXpFLPib6hCyFERqhzQQ8h3BVC2BxCeJl+1jmEMDeEsDr3/06FriEqD81rdtHc\nVi/1kVymArgdwB/oZ1cDqIkx/jSEcHXu+PvFH17D4TAkH97G+Opps2bNarYxVShTUaR5bdu2rcny\n5BA8H6a2dOnSZPusuWOPPTbZnKUH2OqYHTp0ML7Zs2cn22dqspTBFQwB4LLLLku2b/TrpaJNmzYl\n+zvf+Y7xcWMOHybH78d///vfxsfZj76i4uTJk5M9fPhw4+MGFz179jzQ9aaigj6zheQKnq833njD\n+DhUkMNPARuq6GUVDhX0DTUKNbzwclBjZZZyUuc39BjjPABvuR+fD+D3Ofv3AD5X5HGJZkbzml00\nt9VLYzdFu8cYP/in9U0A3fOdGEK4BMAljbyPKC2NmlefkCEqknrNrT6vLZsmb4rGvc8leZ9NYoxT\nYoyDYoyD8p0jKo+GzCs3FBCVT6G51ee1ZdPYb+ibQgg9YowbQwg9AGyu8zdKBOuLHBYG2BTtJ598\nMu81vJ7ZrVu3ZJ9xxhnGx9pxXbAuOn/+/Hr/Xglp1LwedNBBOPTQQ9Mxp1f7Mgac7u/3MTj12+up\nL7+c9vf22xvh0D2vg/Jc+mp+HArpO0H57kKf+9w+heKBBx4wPtbmuQQCYMPkfDcj1tA5zRywDcZ9\nBxx+bVhn9h2gHBXxmfW6ND/d1dbWGh/Puf8ss/Y9YsQI4+MqnlxaANj/PcfrwJQpU4yP903871Uq\njf2GPgvAhJw9AcDM4gxHlBnNa3bR3FYB9QlbvA/AAgD9QggbQggTAfwUwKgQwmoAI3PHogWhec0u\nmtvqpU7JJcaYr+PviDw/Lyvjx49Ptm80zI9N3BQBsA0VOBQM2L+xbWPhR35uEAzYx71f/epXRblf\nIYo5r3v27DHSyttvv51s36ibH5U5S9Cf269fP+PjUDSWdwD7uvpqeuedd16y//rXvxofP46fcMIJ\nxucbIXAmLFd+BOx76Rvf+IbxcYMPbpLhr+kzGHls3DwbsFUVOdv1gwzaSvvM8pz7ioosdfk559fA\nv+bcNNpLpFzt02eCesmHQ2y9JMdy1l133WV83FS8khrQKFNUCCEyghZ0IYTICFrQhRAiI4RSpreG\nEJr9Zqzleq21OfA6MIc6+TC1QrAOd8EFFxgf63XFIsaYv8xcA+ncuXPk0DHeg1i0aJE5l0MOvZ7K\nexU+ZZvDBn0XmH/84x8HvIb3cRo+YEMMfUcanyzFzZg///nPGx/Pne98xL67777b+Licga8YyOns\nXiPmRtj8etbU1GDbtm1Fm9difV55Lv3fMm3atGRz6Qf/e35Pg32FugL5EhI+/JCv40tRcNkKf507\n77wz2T/4wQ+Mr47w0cayuD65AfqGLoQQGUELuhBCZISqbXBRCP/YzE2IfaF9n/FZSHL51re+lWwO\nrwRs1uAvfvEL4+OsxWZ6nGsSrVq1Mo/+nPHHmbuAzYj0j9j8ez5rkB9//fxwJiW/joANG/QNvk89\n9dRk+8YU3OwbsPLQn//8Z+PjkDpuUAzYcMivfvWrxseVGTnUEwBGjhyZ7MWLFxvf2LFjkz1z5r78\nIF91sBLx8giHYPom13yu9/HnwGfZcoghN6cBbNMMwMosvqrlj370o2Rz8xUAuPjii5PNIZSAbYDi\nm40Xo2lGIfQNXQghMoIWdCGEyAha0IUQIiNkLmyRm/n66mmdOu3ruuW1cA4Fu/XWW42vWJXWOA35\niiuuML5rrrkm2VzdEbCa+ve+972ijKWYYYvdunWLHGp51llnJfvmm28253J1Sq9319TUJJu7CQG2\n288nP/lJ4+OQNt8hiTXULl26GB93T/KdjnwVTQ6B5VBIwKbt+3llvZ3T9AFbpfGFF14wPt4X8NUl\nOcSS7z1jxgxs2bKlosMWfSco7kTEewOA1Zufe+454+PP6Lx584yPK276kgoNgcNcfcckDqP13HPP\nPcn2ZTw4zLmBY1PYohBCVBNa0IUQIiNoQRdCiIyQOQ2d8Vrr9u3bk71u3bpSDqVOOH34hz/8ofFx\nqd2+ffsW5X7F1NB79+4deQ+A9yqefvppcy532/Ea+jnnnJPs119/3fj+8Id9Dey55Clg9ya4dCpg\nyxBwp3gA6NWrV7J92rfvaNWhQ4cDXhOwMes+94D1b59DwOf6OPS///3vyeY8CMDuUXDXpRUrVuCd\nd96pOA2d48l95yEuGXHSSScZH+cNPPvss8bHMfelWMP4PQ3YGPWvfOUrxsdlHLxv1apVyW7g3pw0\ndCGEqCa0oAshREbItOTSkuDmyRy+530+XOrhhx9u1P2KKbm0adMmsgzBoWleVuEUat/Bh8sm+C4w\np5xySrJ9+jaHrXnJg7sUnXbaacbH4Yft27c3Pl9tkavt+fDHY445Jtk+vI5LBvi/l0MjH3/8cePj\n18lX9OTSBxz69uijj2Lr1q0VJ7mwJOYrXnKYqV+L+O8u5Tp1IHz1Tw6rveGGG4yPy038+Mc/Nr4/\n/elPyW5gpyNJLkIIUU1oQRdCiIygBV0IITKCyudWCFxa1Wut3IWpsZp5c9K+fXujcS9btizZPt2d\nw/gmTZpkfLw/4MMPubuP1yw5jNGXtuWyAD4Uko99yQAfHspz4vc4GF8umMPrvE7PIXwcFgkU1vc5\n9PP4449Pti8xWynwuLwWzT4OKwbKr5sX0v7HjRuXbD9O1tC5ZAVgyxk0B/qGLoQQGUELuhBCZIQW\nL7kMGDDAHPPjjs/E4tAzljFKBT9e+o5Fvrogw9mAlUibNm1M6B53IvIZmNwU+NprrzW+66+/PtkX\nXXSR8d1xxx3J9lXq+DHeZ1yefPLJyeam0ABw3HHHJdvLMVu2bDHHLLlwGKm/rv97OftxxowZxseP\n31whELDVP/k9DQATJkxINodwskRQSXDoqs/yZTnJd6LibFnfpLkQDZFq+DXz3ZQ4dNTP3Yknnphs\nnwHMkiNneQOSXIQQQtSTOhf0EMJRIYQnQggrQgjLQwhX5H7eOYQwN4SwOvf/TnVdS1QOmtdsonmt\nburzDX03gKtijP0BDAHwzRBCfwBXA6iJMfYFUJM7Fi0HzWs20bxWMXVq6DHGjQA25uydIYSVAI4E\ncD6AM3Kn/R7AkwC+3yyjLABrWQBw99135z13+fLlyeaqZ57HHnvMHK9Zs6be4+FU7wsvvND42rVr\nl+zRo0fX+5o+FK8YFHNe33vvPaMVsh7sO0Nxar5Pd584cWKyfdcmfl291sk6MqdWA7YSoq/Yx9U4\nvS7P9wNsmr7vAM/7B770AHd993s6CxcuTLYP0+SwxTPPPNP4unbtmuzf/OY3yd6+fXtFfl45JPPs\ns882vmHDhiXb6+Rr165N9oIFC4yP9zi8Zs5z6csmHHbYYeaYNfzzzjvP+PjYV4nke65evdr47r//\n/mT7vZnmDsVs0KZoCKEPgIEAngfQPffmAYA3AXTP8zuXALik8UMUzU1T55X/oRKVgz6v1Ue9N0VD\nCB0APAjgyhij2daNe//ZOeA/PTHGKTHGQfUpLCNKTzHm1X+7FOVHn9fqpF7VFkMIrQA8AmBOjPHm\n3M9WATgjxrgxhNADwJMxxn51XKfozxv+EYpDn3ylv0pm0aJFyfZ/0/Dhw5PtHyEbS4wxFGteO3bs\nGAcN2vf552a/XtrihhcnnHCC8fGjspc1OOOUmyQDNkzsa1/7mvEtWbIk2f6RnrP4evToYXwc0gjY\nMEIf/shVIkeNGmV8LCudfvrpxnfjjTcme/HixcbHzc5ZevD346Ygt956KzZs2FC0eS3W55Xlq4sv\nvtj4OEPav67cmNtLYiy7cTYwYEMRvczls2n5XB9yWijzdtu2bcm+9NJLjY8bkvj3XBMkl+JUWwx7\n/+I7Aaz84M2RYxaADwJiJwCY2ZhRivKgec0mmtfqpj4a+jAA4wEsCyF88HXnWgA/BTA9hDARwDoA\nF+b5fVGZaF6ziea1iqlPlMt8APlS0Ebk+bmocDSv2UTzWt1krmMRV0Hr189KhKyvcoPgUsHhfD6V\nmLXFhqQ5N5Zidiw65JBDIuv+3/72t5PtSxxcddVVya6trTU+1tT79+9vfKyF+sp3nPrvK/ZxyJyv\naMjhjr5puN/o5SqKXs/duHFjsjkUEbAlJrp3t4ElPB5/f34tuNIkYFPid+7cmexp06Zh06ZNFdex\niCtZ+n2TCy64INm8DwMAQ4YMOeA1cmNLdiFd3JdD8Osdp+J7H3cUWrlypfFdd911yX7mmWeMz3fU\nKhLqWCSEENWEFnQhhMgImZNcCsGhcP7xbuTIkclev3698XHjBc5sA4BXXnnFHD/00EPJ5owxwDY7\n8L9XaoopuXTo0CFyxi4/jr700kvm3COOOCLZHAoI2Kp1Xh7hUEWfjTlw4MBk++xTni//2Mxj4aqM\nwP6SC99/6dKlxsdhpffdd5/xcaXEoUOHGh+/B3yILTei9o2nP/vZzyZ77ty5yV6yZAl27txZcZIL\n4xtcsHzGVToBm53JrzFgG3t4CYzDHf3r6uUZllVWrFhhfLNnz062D7/lJiPNXUExhyQXIYSoJrSg\nCyFERtCCLoQQGaGqNHSxj2Jq6H369IncbYi7C3FTaMA2X2YdFADuvffeZHtdlENQfbgjlxrwGjaH\nyfnfY/2ZdWlg/z2Wn/3sZ8n2Gj7rqRyGBwDz5s1Lti8nwHr/4MGDjY9DITk0EbAhjnzNp556CrW1\ntRWtoRfCV9Es1E2IdetCnZp8Or+/DofD+vccH3tfGZCGLoQQ1YQWdCGEyAgtvkm0KD+7d+82FQBP\nO+20ZHsJhCvj+eL/XFfdhy0yvtnAAw88kGyWdADbpNc3KJ48eXKyf/vb3xofN6YAbBihb2jAj+O+\nEQNXEPShqueee26yCzXt8NmvLG+xTNXSKSRrNDY0sEQhhRWDvqELIURG0IIuhBAZQQu6EEJkBIUt\nVinFDFts165d5LBCDsHzqdasoXt9s2PHjsn2afrcXJhLKHhuueUWc8zdZLyGzSUDfAXHRx55xBxz\nhxofYsjNx7nRNWCbOHN5CQCYM2dOskeMsJVteZ/Aa+jcQJq7AVVqtUVRFBS2KIQQ1YQWdCGEyAgK\nWxRNpl27dka+4IYMPhyPM/dmzZplfCylcMVGALjtttuSzeF+gK2OyU0RANtwwmdqzpy5r60mV4sE\n9q/8x2GEAwYMML6amppks4wCAD179kz2iy++aHydOnVKNldlBKzMwk0y/DX59SzU1FhUB/qGLoQQ\nGUELuhBCZAQt6EIIkRFKHba4BcA6AF0BbC3ZjQtTjWPpHWPsVqyLaV7rRPNaPKp1LPWa25Iu6Omm\nISyqT0xlKdBYikcljV9jKR6VNH6NpTCSXIQQIiNoQRdCiIxQrgV9SpnueyA0luJRSePXWIpHJY1f\nYylAWTR0IYQQxUeSixBCZISSLughhNEhhFUhhDUhhKtLee/c/e8KIWwOIbxMP+scQpgbQlid+3+n\nQtco0jiOCiE8EUJYEUJYHkK4olxjKQaaVzOWzMyt5tWMpUXMa8kW9BDCwQAmAxgDoD+AcSGE/qW6\nf46pAEa7n10NoCbG2BdATe64udkN4KoYY38AQwB8M/dalGMsTULzuh+ZmFvN6360jHmNMZbkPwBD\nAcyh42sAXFOq+9N9+wB4mY5XAeiRs3sAWFWGMc0EMKoSxqJ51dxqXlvuvJZScjkSwHo63pD7Wbnp\nHmPcmLPfBNC9lDcPIfQBMBDA8+UeSyPRvOahhc+t5jUPlTyv2hQl4t5/ZksW9hNC6ADgQQBXxhh3\nlHMsWaYcr6XmtvnRvO5PKRf0NwAcRce9cj8rN5tCCD0AIPf/zXWcXxRCCK2w940xLcY4o5xjaSKa\nV0dG5lbz6mgJ81rKBX0hgL4hhKNDCK0BfBnArDp+pxTMAjAhZ0/AXm2sWQkhBAB3AlgZY7y5nGMp\nAppXIkNzq3klWsy8lngjYSyAVwG8BmBSGTYy7gOwEcD72KsJTgTQBXt3p1cDeAxA5xKMYzj2Ppot\nBbAk99/YcoxF86q51bxmZ16VKSqEEBlBm6JCCJERtKALIURG0IIuhBAZQQu6EEJkBC3oQgiREbSg\nCyFERtCCLoQQGUELuhBCZIT/B4JymSDGTUrtAAAAAElFTkSuQmCC\n", 1037 | "text/plain": [ 1038 | "" 1039 | ] 1040 | }, 1041 | "metadata": {}, 1042 | "output_type": "display_data" 1043 | }, 1044 | { 1045 | "data": { 1046 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAACFCAYAAABL2gNbAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAG79JREFUeJztnXu0lFX5x787Q4qbekARRYUQb2hGouKlIhHRpAUSGViK\npuJa+iszLFEzXLhcmSXmLdDE1MALecXQBAFTw0wwvIHIReUiyEUSvOVB9+8PDtvv88CcM5wzZy7v\n+X7WYvG888zMu2f2zD7zfvdzCTFGCCGEqHy+UOoBCCGEKAxa0IUQIiNoQRdCiIygBV0IITKCFnQh\nhMgIWtCFECIjaEEXQoiM0KAFPYRwfAhhfghhYQhhRKEGJUqL5jW7aG6zTahvYlEIYTsArwPoA2AZ\ngOcBDIkxzi3c8ESx0bxmF81t9vliAx57GICFMcbFABBCuAdAfwA5PxwhBKWllgkxxpDDtc3z2rp1\n69i2bdt0vGTJkmS3a9cu5xg++eQTc9ysWbNkN2/e3PjWr1+f7DZt2uR8zo0bN5rjDz/8MNk77LCD\n8b3//vvJDsG+HV/60pdynoMf5/nCF+xF73bbbZfs1q1bG9+6deuS7V/v9ttvn+xVq1YZX8eOHZO9\nevXqZH/88ceorq7ONa/ANs6tvq9lxZoY48513akhC/ruAJbS8TIAhzfg+UR5sM3z2rZtW4wcOTId\nn3feeckeNGiQuS8vcEuXLjW+XXbZJdl777238U2dOjXZvXv3Nj5eRHmBA4DZs2cnu1+/fsY3c+bM\nZPMfEwDo2rVrznE//fTTOc//5S9/2fj4j8g3vvEN43vwwQeT3alTJ+Pr3Llzsq+//nrjGzVqVLJv\nvvnmZM+ZMwd1oO9s5fJWPndqyIKeFyGEYQCGNfZ5RHHheeVf56Ky0fe1smnIpuhyAHvQccea2wwx\nxltijD1ijD0acC5RPLZ5Xlu1alW0wYkGUefc6vta2TTkF/rzALqGEDpj04diMIBTCjIqUUq2eV4/\n++wzo3Hvvvvuyfaa9o477pjTx38YFi5caHzLli1L9oQJE4zvK1/5SrIPPvhg4xswYECy582bZ3ys\nafOYAWDatGnm+Oijj072IYccYnz8mvh9AIDlyz9fL2+99Vbjq6qqSvabb75pfCwB7bbbbjnH8vjj\njyd77tw69zb1nc049V7QY4wbQwj/B+BxANsBuC3G+GrBRiZKguY1u2hus0+DNPQY46MAHi3QWESZ\noHnNLprbbNPom6Ii+6xduxbjx49Px4ceemiyWVYAgA0bNiTbR318/PHHyZ48ebLxdejQIdk//elP\nje++++5L9uLFi42PZZz58+cbH0fZnHvuucZ3+OE2+OOjjz5Kts/d6N69e7J/97vfGV/fvn23OhbA\nRu4cdNBBxvfiiy8m20tT//rXv5J91FFHJfupp56CaNoo9V8IITKCFnQhhMgIWtCFECIjSEMXDaZl\ny5bo2bNnOmat+o033jD3Pfvss5PN2jdgNfSTTjrJ+Fg3vvbaa43vjDPOSPbbb79tfHvuuWeyfeo/\nhx/26GHDri+88EJzfMkllySbMzwBoFu3bsn2Gadr165NNmd/AnZ/wZcaOOecc5Lt9xPeeeedZHM4\n5//+9z+Ipo1+oQshREbQgi6EEBlBkotoMJ988onJdORwwAsuuMDct7aCWFx98NNPPzU+LoA1ZMgQ\n47vnnnuSzWGRgA13/O9//2t8LMdceeWVxnfZZZeZ4zvuuCPZXjp54YUXkn3ssccaH4cfsg0AO+20\nU7K5+Bdg36fPPvvM+FiaYpmlvqWwRXbQL3QhhMgIWtCFECIjaEEXQoiM0KQ0dA5b4/R0wGqfxxxz\njPH5kLba4M43N9xwg/FNmTIl2ayRAlYzfu+99/I+XznQpk0b9OnTJx1zOjq/ZsB24uFwPwA48sgj\nk81VBAGgS5cuyeZGGIBNqef7AcDgwYOT7cP6/v3vfyf78ssvN76xY8ea42uuuSbZixYtMr7HHnss\n2b4yIld/fPfdd42P9X7fzYjDEX2HpkmTJiWbq0RWV1ejkvFdo774xc+XJ95vAIATTjgh2X7fYv/9\n9082f94A28EKsPsqvhMVz4//HHM1Tg5NBbbc/ykm+oUuhBAZQQu6EEJkhFDMUKdiNJ3ljDtfze+u\nu+5Ktq8CWGr4cvy6664zPm6MsHLlyoKcr5Ym0dtMVVVV5Mterg7oZRXuuclhioBthuwvjXleW7Zs\naXwDBw5Mtm9w8fDDDye7f//+xsf9R1955RXj82PjS3yf4brzzp/37l2wYIHx7bPPPsn2zS+4guP0\n6dONj2UkPjcAPP/888neb7/9kn3NNddgyZIlBZvXYn9fjzjiCOMbMWJEsrmiJWAlKi+rcIirD/n0\nshT7fYNvPvYyyltvfd7i039fH3jggWT7zxGffxvX3tn5dJHSL3QhhMgIWtCFECIjaEEXQoiMUPEa\neqdOnczxL3/5y2RzxbqGwKnWL7/8cq335c4zvoJefZkxY0ay+/Xrl3Ns20IhNfRdd901/uhHP0rH\nrGP7xsyM15Q5XNPrqRwO6LsSsU7PmjkAfPe73022Dy/jz74Pi/vggw/M8axZs5LtU/9Zh+WG1YDt\nfOS1d/4s+feCm1v/6U9/Mj4urcAa+rRp07Bu3bqy1tBbtGhhjocPH55sX+GS7+u1cJ47Dm8ENpWi\n2MyKFSuMz1f/5P0ev5ZwSCjv/fhz8N4PAPzlL39J9kMPPWR8XInU7xP51+iQhi6EEE0JLehCCJER\nKj5TlBsPAMCZZ55Zr+fhSzjfwIAb/3J24dY47LDDkv2tb33L+IYNG5Zsf2leG9/+9reTzZeBwJZZ\ni6Vg7dq1mDBhQjqeM2dOsl9//XVz35NPPjnZPnPym9/8ZrL9+8MyzpIlS4yPMwz5+QF7ST137lzj\n40vjq6++2vi4giNgQyX9JT5XbVyzZo3x8SU+hykCwPHHH5/s0aNHGx+/hz7DdejQocnmcFH/+soF\nnh8vGf7kJz9Jtm+izRLE8uXLjY+re3LmLGDDBv18eImZQxO9zMffV589zmGTPgSa5TIObwRsAxYv\nl9YhueSFfqELIURG0IIuhBAZQQu6EEJkhIrX0LcF1rNuuukm45s6dWqyX3rppXqfgzV2r7fff//9\nyfYp4vly+umnm2PfWacUtG7dGkcffXQ6Zn3zj3/8o7kvhxH6bkbjxo1Ltq+Guffeeyfba5asiz76\n6KM5fV5PPffcc5PNoYDAliGnXInvySefNL4f/vCHyeYwRX9+HxrJVSl9eB3vjfjqklymgD+r3Dy6\nnODwQz/nPJe+2iLrzfy5AezeDO+TAPXv3PSPf/zDHC9cuDDZ/FkBgLPOOivZvtsU77H4zzE3O2+M\n+dIvdCGEyAh1LughhNtCCKtCCK/QbVUhhKkhhAU1/+9U23OI8kPzml00t02XfCSX2wHcCOBOum0E\ngGkxxqtCCCNqji8q/PAKy2mnnZbsZ555pujnZymCQ+YA4NRTT83rObiyXwO5HQWa1xijuezlLDqW\nsgAbZuclKW7ozA0zACuB9OrVy/gmTpyYbJ/Jy9Xudt11V+O79957k81NEYAtKzoyPqPx+uuvT7Zv\nRN22bdtks8QC2DDG3r17Gx+HKvpwRx7b3XffnWyShW5HGX1neV47duyY835eOhkzZkyyX3vtNeNr\njCYS/vycAerDg3l+uHEOYBuScCMMwFZLbYws/Tp/occYnwLwrru5P4DNbdDvADAAoqLQvGYXzW3T\npb6bou1jjJt3cVYCaJ/rjiGEYQCG5fKLsqJe8+rrXIiyJK+51fe1smnwpmjcdN2Q89ohxnhLjLFH\nPoVlRPmwLfPavHnzIo5MNJTa5lbf18qmvr/Q3wkhdIgxrgghdACwqs5HFBAOb/JdRmqDKyEWQ0P3\nnVRYJx0yZEi9npND5ADb1cXrt/WgXvNaXV1ttEEO1fLNj88444xke329a9euyX7kkUeMj7v0+JIK\n3FDah7dxqOiOO+5ofBwK6TVabi4N2IqHvvQAh2z6sFLWuH3ja94X8GPj97O2LkhcEsF3x3GU7DvL\nIZ+8vwLYdHffTYhDOf13idPmC6VF+7BJLunAVVwBG27p9XwOK33hhReMj/dD/PkK8Trq+wt9EoDN\nBSWGAni4lvuKykHzml00t02AfMIW7wbwLIB9QwjLQghnArgKQJ8QwgIAx9YciwpC85pdNLdNlzol\nlxhjLm2gd47bGx3WbH31QQ4n8touV3bz4UR8WduQSx+ujOirwPni/vnC2YD8/ED9ZZZCzmuzZs1M\naBrLYL6IP2ff+VBBbvLAUhJgL7H9+8pZe1OmTDG+Ll26JPtrX/ua8bGM4qUaXwmPpaNf/OIXxsfS\niW92wNKNb77Bmcsc3gjYUDjf3PrVV19NNjeQ/s9//gOg/L6z3CyEpTPAhpJ6+fTiiy9Otg8pZElu\nWxpF+O82Z3n6pvL8OfPfXT6Hb5xyxRVXJNtnILPkVJKwRSGEEJWBFnQhhMgIWtCFECIjVHyTaM/Y\nsWOTffbZZ+f9uPHjxyf7yiuvND6u7MahTMCWuuztt9+ebJ8SnC8+pHLkyJHJ9pX+6kshm0S3a9cu\ncicaDsHz6e4c0sXVFQGrPfpmvhySxw2pPTNnzjTHHDLnU/+bNWu21XEBWzaJ3rBhQ7K91sqhmL7T\nEWutX/3qV42P9204vBKwIXt+32T16tXJZg144sSJWLVqVdk1iWZtnDt6ATCdrrgROGBfm98r4qqF\nHBoK2KqJPG/+OQHgxBNPTPZtt91mfBwq6ddJ/jxefvnlxvfnP/852dvYCLo21CRaCCGaElrQhRAi\nI2ROcuHLfb6cA2xT3trwTXk5o9FfMvrmA/Xl6aefTvb3v/994+NL7EJRSMmlc+fOkWUhDs/zdV7a\ntWuXbJ9xyZmUw4cPNz4ODeRwNsC+dwceeKDxsQTi5bLf/OY3yfYyG4dXAjYb1DeJ5mbXvpkxv15f\nUXPgwIHJ9uG3LPn4cEduosGZh5MnT8aaNWvKTnJhWOYCbNNo33SG3zu/TnF2Joe7AjY7c/r06cbn\nm49/73vfS3ZtlUx9Q/Nbb7012b/97W+Nj+WhAq6vklyEEKIpoQVdCCEyghZ0IYTICJnT0BnflJc1\n9b59+xZzKACABx98MNnXXnut8XFoZGNo5p5CauhVVVXxuOOOS8esVfvGzHvttVeyN6eqb4b1TR+K\nxl2KfJo8H3uNlMPSHnroIePj97lz587Gx+GOgE399qFnvI/iww9ZM/YaPqfB+6qUHDLn94J+/etf\nJ5vT0+fMmYMNGzaUtYbu4dfpK4ny62Q9HbD7GL5qIevrvoSDr9rIz+NDDOfOnZtsv6fD5Tj8nlsj\nranS0IUQoimhBV0IITKCFnQhhMgImdbQPZyKz/oYsGVaeCHwXU5Gjx6d7GK+71ujkBr6DjvsEI88\n8sh0zJ2HfJf3ZcuWJdvPAWvhe+yxh/Hx83NqtX8eLv0AWN3ap/eznr948WLj43LAgNVpOe4dsFr4\nSSedZHwcv+w7CnH5XB+vzynqXiNu1apVsnnvZdq0aVi3bl1FaegM5xoAtmzE+eefb3xHHHFEsn23\np9o6mvn9j+eeey7ZY8aMMT4uwcGfW8Dq9EX6LktDF0KIpoQWdCGEyAj1bRJdkbAEUqiU/dqYP3++\nOS61zNJYtGjRAt27d0/HnPrsQxPffPPNZB9yyCHGx2n6PoyQQ8MGDRpkfJwaz817ARtG6MPLuEm0\nP9+9996b876TJ082Pq646VPE+RKf5REAmDdvXrKHDRtmfFyJ8e233zY+fr0/+MEPkl2MxueNiW+2\nvGjRomSzNALYypU+5NOXF2D8Z4Crlz777LPG99577yW7tu9uYzR7ri/6hS6EEBlBC7oQQmQELehC\nCJERMqehc7jXqFGjjO+iiy5Ktte9GO7MDdgwNQ6XArbsBs5wqj9gO8v//e9/z/m4SiOEYFKoOfys\nurra3JdLGHP3egAYMuTzZvW+KxDr1j7c8aCDDko2d6sBbFlaH5rKZVZ96dRTTjnFHM+ePTvZvhPW\njBkzku11YNbiu3XrZnxcutWHuPL75EvrcnkDTmWv7TNdLvgx8veVw10B4Kyzzkr217/+9ZyP82Ua\nWEP3naf++c9/mmPujNWpUyfj41BS/zyskzegC1HB0S90IYTICFrQhRAiI1S85OJDlLhC24gRI/J+\nHpZALrnkEuN78cUXk+2rrl199dU5n9NnqfElZJYkl40bN5qqipwR+dhjj5n7snzAFRQB4Iknnki2\nl1X23XffZJ9zzjnGx9Laj3/8Y+O77777ku0/K9zNxlfl812R+vTpk2yWagBgv/32S7bPYuVsR9/c\nmjOHffVNDgPt3bu38XGII382uXtROcEyi6+aOHTo0GT7jk6c5evDDVla89Imd5tauXKl8fluU3xO\nL/NxyKvvROWbVpcL+oUuhBAZoc4FPYSwRwhhRghhbgjh1RDC+TW3V4UQpoYQFtT8v1NdzyXKB81r\nNtG8Nm3y+YW+EcDwGOMBAHoCOC+EcACAEQCmxRi7AphWcywqB81rNtG8NmHq1NBjjCsArKixN4QQ\n5gHYHUB/AL1q7nYHgCcBXLSVp2hUDj30UHPs9e9c+E46l112WbJZl/Tceeed5rh9+/bm2GvsjK88\nWEoKOa8tWrRAjx6fF4LjVHVfYZDDPLlKIQDstttuye7fv7/xcVq7TwPnCodLliwxPj7/3/72N+Nb\ntWpVstu0aWN8/hwcyur1Uw7N9KGRHFbo901Yz501a5bxceVJvy/Aj+OxfPrpp2X5feX34Oc//7nx\nnXbaacn2Kfycej916lTj+9WvfpVsnkfAhhH6MElfxZPXDx+6esIJJyTbfx74ecuppMc2bYqGEDoB\n6A7gOQDtaz48ALASQPscjxkGYNjWfKI8aOi8+pZwojzQ97XpkfemaAihFYD7AfwsxriefXHTn6it\n/pmKMd4SY+yRTy1fUXwKMa9cn1uUB/q+Nk3y+oUeQmiGTR+OCTHGB2pufieE0CHGuCKE0AHAqtzP\n0Hicfvrp9Xrc73//e3PsQ9Fy4TMfOZyuLvbff/+871sMCjWv77//vgkj44y/DRs2mPs+8sgjyeYM\nTwAYMGBAsv/whz8YH9+3efPmxvfaa68l24eKchMJ3zScL5u9VOKzWFu2bLnVcQLAXXfdlezDDz/c\n+PhSnTOFARuK5xtYs3TkG18vXbo02dz4Y3OFxnL7vnKoon/vWGbx0gXLZTfffLPxrV27Ntk+U5M/\nAz5M0Vf43GeffZLtm4zwHHiphsfm14TaaGx5Jp8olwBgHIB5McbR5JoEYHMQ6VAAD/vHivJF85pN\nNK9Nm3x+oR8F4FQAL4cQ5tTcdgmAqwBMDCGcCeAtACc3zhBFI6F5zSaa1yZMPlEuzwDIVfWnd47b\nRZmjec0mmtemTcWn/m8LN9xwQ7Kvu+66vB/HIW0+bPHEE0/M+3k41CpLhBCMrs2att834E483KQZ\nsGF9Xu9mrZg1ZMCGNPqKfZyy7avpcdcb3wTYb/TyPLN+6885Z84c4+NuRj68jjsYDR482Phefvnl\nZHPYH2A76/BzrF9v9j5Lhg8V5Oqbfv+D7+srVbKm7cOTufOVfxyHxnrNfOTIkebYN5hmOIzWV2Dl\nsfnzs6Zf7JBGpf4LIURG0IIuhBAZoUlJLlwJz19CMT179jTHnEXKjQfq4oILLjDHN954Y96PrSR8\npihLLj6TlmUPbhoBAAMHDky2zzBlycWHmLKs4c/H8oTPDubGz1z5ELCNmAEbmuYbTnCVSH+JzZmc\n/B4BtjGDl5E49O6vf/2r8fXr1y/ZnG1bygYXfG4fOsqyhs+IPeaYY5LNoaGATVi78MILjY8bkPhK\niByq6GU2H37I4/YhtjyXXbp0MT6Wtz788EPjK2XDC/1CF0KIjKAFXQghMoIWdCGEyAhNSkMfM2ZM\nsr1Gys2fjzvuOOPzoVa1wbq518zLqSpbIamurjYa8JQpU5Ltu0ZxxUMfGsgNfPv27Wt83EFol112\nMT7uCjR27Fjj45BCH8JWVVWV7Kuuusr4OKQRsBU4OVQQAFasWJFsHzbJnZBmzpxpfFw2YtGiRcbH\newg+tI4rUXIYre+6VExqa5rMeyq+gxWn3nsNncNDV69ebXxcMsCHuLKG7nVxH57K5Re40xFg58vP\nOT9vbZq539coeeq/EEKIykALuhBCZIRQTBkghJBNzaECiTEWLMatXbt2kTMpOYzLNw3gy9POnTsb\nH2eO+qxKDnfzYXH8OJ+pyXKZr1rIY/ENLri5AmCzAVliAWy45eTJk42PX4cPoWOZystBHJrpGyRz\n5Um+36xZs7B+/fqCzWsxvq8sSdQWdunnnI/943iu/PpW2/N4OFS1lKGINczOp6SxfqELIURG0IIu\nhBAZQQu6EEJkBGnoTZRCaujNmjWLHFo3atSoZPvOUByq58MPn3zyyWSfeuqpxsfp3R988IHxsWZ6\n8MEHG9/EiROT7cMku3XrlmzfoahXr17mmBtf+zDCPffcM9lekx03blyyOTQWsBX7Fi9ebHz8Gv35\nOEyPyxfcdNNNWL58eUVp6CJvpKELIURTQgu6EEJkBEkuTZRCSi5VVVWRs2t9yB9z4IEHJvuNN94w\nPm7o7JtfcGVCnxHJGX6XXnppzsf5bEOumOczGL08w00TfEPpjz76KNmDBg0yvpdeeinZvhECNyFm\n2QawGac+9JOPp0+fnuzx48dj5cqVklyyiSQXIYRoSmhBF0KIjKAFXQghMkKxNfTVAN4C0A7Amjru\nXiya4lj2ijHuXPfd8kPzWiea18LRVMeS19wWdUFPJw1hVj4CfzHQWApHOY1fYykc5TR+jaV2JLkI\nIURG0IIuhBAZoVQL+i0lOu/W0FgKRzmNX2MpHOU0fo2lFkqioQshhCg8klyEECIjFHVBDyEcH0KY\nH0JYGEIYUfcjCn7+20IIq0IIr9BtVSGEqSGEBTX/71TbcxRoHHuEEGaEEOaGEF4NIZxfqrEUAs2r\nGUtm5lbzasZSEfNatAU9hLAdgJsAnADgAABDQggHFOv8NdwO4Hh32wgA02KMXQFMqzlubDYCGB5j\nPABATwDn1bwXpRhLg9C8bkEm5lbzugWVMa8xxqL8A3AEgMfp+GIAFxfr/HTeTgBeoeP5ADrU2B0A\nzC/BmB4G0KccxqJ51dxqXit3XospuewOYCkdL6u5rdS0jzFu7vq7EkD7Yp48hNAJQHcAz5V6LPVE\n85qDCp9bzWsOynletSlKxE1/ZosW9hNCaAXgfgA/izGuZ1+xx5JlSvFeam4bH83rlhRzQV8OYA86\n7lhzW6l5J4TQAQBq/l9VjJOGEJph0wdjQozxgVKOpYFoXh0ZmVvNq6MS5rWYC/rzALqGEDqHELYH\nMBjApCKePxeTAAytsYdikzbWqIQQAoBxAObFGEeXciwFQPNKZGhuNa9ExcxrkTcSvgPgdQCLAFxa\ngo2MuwGsAFCNTZrgmQDaYtPu9AIATwCoKsI4jsamS7OXAMyp+fedUoxF86q51bxmZ16VKSqEEBlB\nm6JCCJERtKALIURG0IIuhBAZQQu6EEJkBC3oQgiREbSgCyFERtCCLoQQGUELuhBCZIT/B7aABXSh\n5/STAAAAAElFTkSuQmCC\n", 1047 | "text/plain": [ 1048 | "" 1049 | ] 1050 | }, 1051 | "metadata": {}, 1052 | "output_type": "display_data" 1053 | }, 1054 | { 1055 | "data": { 1056 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAACFCAYAAABL2gNbAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAGrNJREFUeJztnXuwVWX5x79vCuIFS+R45KaSkgp4QQ1RTDQuCZaKmZef\n/YYmC22Q6WKNoDPWpE2NhVb2i6JB0TLACRUrplRM8AYqSlwDROXm0QOCeQHSQ+/vD46v3+eBvc8+\n5+yzL+t8PzOOzzrPPmuts5+9Xvb6rucSYowQQghR/Xys3CcghBCiOGhBF0KIjKAFXQghMoIWdCGE\nyAha0IUQIiNoQRdCiIygBV0IITJCqxb0EMJ5IYRVIYSXQggTinVSorwortlFsc02oaWFRSGEfQCs\nBjAcwEYAzwG4Isa4oninJ0qN4ppdFNvss28rfncggJdijC8DQAhhBoALAeT8cIQQVJZaIcQYQw5X\ns+O6//77x86dO+/Vt337drP98Y9/PNm7du0yvvfffz/ZIdjTa2hoSHanTp2Mj/fzsY/Zm07ej//y\nsnPnzpz79K/dZ599kv3uu+8anz8m07Fjx2Tvt99+xsd/kz/ejh07kn3YYYcZ33vvvZdsfn937tyJ\n999/P1dcgWbGVtdrRbElxljT1Itas6D3ALCBtjcCOL0V+xOVQbPj2rlzZ1xyySV79b344otme+TI\nkcnmhQkAXn311WTzAgoAW7duTfaxxx5rfG+//XayDzzwQOPj/fh/QFas+Ggd69evn/H95z//Mdv8\nD9EzzzxjfLxoe4444ohkf/KTnzS+N998M9n//e9/jW/JkiXJHjdunPE9//zzTdo50DVbvawr5EWt\nWdALIoQwFsDYtj6OKC0c14MOOqjMZyOKha7X6qY1D0U3AehF2z0bf2aIMU6JMZ4WYzytFccSpaPZ\ncd1///1LdnKiVTQZW12v1U1rHorui90PWIZi94fiOQD/E2Ncnud3pMlVCLk09JbEtaamJo4ePTpt\ns8zgtemXX3455zn16dMn2WvXrjW+f//738nevHmz8Y0YMSLZy5Yty7l/lk0Aq4V7XXzw4MFme/78\n+cnu3r278R166KHJrq2tNb4nn3wy2f5vOvPMM5P9yiuvGF/fvn2TvW6dvds+5JBDkn3UUUcle9Kk\nSdiwYUNODb25sdX1WlEsKuQf2RZLLjHGhhDCtQD+DmAfAHfmu+hFdaC4ZhfFNvu0SkOPMc4BMKdI\n5yIqBMU1uyi22abNH4qK7NOxY0f07NkzbXP2htfXWZJ47LHHjI8zPXwqIGfEXH311cb3wx/+MNkX\nX3yx8dXX1yf7hBNOML6pU6cm+4wzzjA+n2LYpUuXZHfo0MH4WK754IMPjI9TE7/4xS8a38KFC5N9\n8MEHG9/ixYuTPXDgQONbv359srds2bLXY4n2iUr/hRAiI2hBF0KIjKAFXQghMoI0dNFqdu7ciX/9\n619pm1PpWE8HrBZ+6qmnGl/v3r2TvXy5Tb645pprkv3ggw8a3ze+8Y1k19XVGd8nPvGJZPtKSk61\nfOqpp4zvnXfeMdv8LGDBggXGN2DAgGT7Ctc1a9Yku0ePHsbH6Z1Lly41Pq4qfemll4zvuOOOSzb/\nvb7atL3g20QwLU3Lrlb0DV0IITKCFnQhhMgI7UpyOeCAA5L9m9/8xviuvPLKnL/HKXRN3dY+8MAD\nyb755puNb/Xq1cnmbnrVTocOHUza4uuvv55s/3f26tVrr68DbNqd7954xx13JPvII480Pj6Glzy4\nqvPss882vuHDhyf76KOPNj6WagCbYujTD7mR1/333298p5/+Ue+rrl27Gh9/loYMGWJ8LBWwNAPY\nzxF3qKx2ecFLJ5zK+dWvftX4rr322mT7bpT8GfCpnL4ieO7cucmeM8em53PMX3vtNePL976XMw76\nhi6EEBlBC7oQQmQELehCCJERWtxtsUUHK3H3Nu5YB9hSc69n5iPf1JvmwF3z/vKXvxjf7bffnmwe\n9NBW5JlY1Gy6du0aL7jggrTNZfNvvPGGeS3H5OmnnzY+TtXzmiVr2ieeeKLxsRbudViOOb8OsIMx\nfLojTzMCrE7uy/t5cIZPMeRhHN26dTO+2bNnJ5uf7wDAvvt+9HjL95ufN29esi+88MJk//KXv8TG\njRuLFtdSXK8cLx/XmTNnJtsPB+H3J981mS+l0eP1do6574Z50003JdunsbZRO4aCui3qG7oQQmQE\nLehCCJERMi25cKobYCsK8+EHETD+/fKd93xKXaHMmjUr2ZySBew50KEYFFNy6d69e+QOiFx16bsI\ncjriSSedZHx/+tOfku1TDLnC9PDDDzc+HqLhb9t5NicP0ADsrTF3MASAc88912zzMf1MUR6+cf75\n5xvf73//+2R7WYXT63gQBmDfGy/BccdKfl9mzpyJ+vr6qpJc+HrxqcQcA3+dsZTh5ZBVq1Yl2w9Y\n+dSnPmW2OeXRH4OvdU5TBGwl8cMPP2x8t9xyS7L9QJdWVPNKchFCiPaEFnQhhMgIWtCFECIjZLr0\n3+up+Zg2bVqyubMfkD/1yKeUnXfeecm+9NJLje9zn/tcsg888EDj43Jyry2ffPLJyfbpdJXAjh07\nTLdATgn1WvTKlSuTfffddxsfv3crVqwwPk4h8ymN48ePT7bXU7lLIh8bsB0deSIRANx3331mmzVU\nr8Xz3/vHP/7R+HigM6cwAsDvfve7ZHOHSgDo169fsn1KJT/j4dYGzUnRKxd+EhWncvpnI/x8gLtW\nAsDEiROT7dNfOa3Uvyf+euX3/ZRTTjE+nn7Vv39/4+OB4/4z/re//S3Z/vPY1ugbuhBCZAQt6EII\nkREyLbk0hxtvvDHZzanu8t3bOPWObcCmsF1xxRU593nMMceYbb4t5EESlcLBBx+MYcOGpe1nn302\n2ZyOCQBDhw5NtpcgWE5iKQGwKX5+UARX4G7fvt347rzzzmRzNStg09vOOuss4/OponxML/lwOqI/\nNz5vXzU7aNCgZPvqU5YRnnjiCePjW3zuxNixY0dUOl5y4Wrd+fPnG9+mTZuS/dvf/tb4OFW0Ofjf\n++c//5lsP1SF3/d77rnH+Lw8xHC6Y6k7L+obuhBCZAQt6EIIkRG0oAshREbItIb+85//3GxzGiOn\nKQLAtm3b2vx8eAJKPg292mhoaEB9fX3a5hQ/r4Wzxu07Xg4cODDZfvIQP6vg7oaA1aZ/8YtfGB9P\nAvJl16z7cxsAfzzApj9yOh1gnxl8+ctfNj7u9jhy5Ejj446Bjz/+uPFxmqbfJ/uWLVuW7EqdgsWx\n9Dr/1q1bkz1p0iTj44lWvvS+FPCzK9+5lTuK+mdu/PmQhi6EEKJFNLmghxDuDCHUhxCW0c+6hBAe\nCSGsafz/Ifn2ISoPxTW7KLbtlya7LYYQzgbwLoB7Yoz9G392K4CtMcafhBAmADgkxnh9kwcrcbdF\nD6fJ+Q52fBtbLHwqHKde+ZQ9Zvr06Wb7K1/5SrK93NAKhqBIcT388MMjywL83vpUPR7IwHIEYIdY\n+MpNHhzhO1r27t075/G4K55PNVu0aFGyfaooyyGATXfzrz3++OOT7T9HnNL4hz/8wfi405/vAsi3\n9HyegJWHjjvuuGTfddddqKurC8W6Zot1vXKqYnOqWYv4Wc8Jd1g855xzjG/GjBnJ5opfj08l5k6h\nXsprBcXpthhjnA9gq/vxhQA+rNu+G8BFzT49UVYU1+yi2LZfWvpQtDbG+GGDidcB1OZ6YQhhLICx\nLTyOKC0tiiv3OBcVS0Gx1fVa3bT6oWjcrdnkvDWLMU6JMZ5WyO2CqByaE1c/uEFUNvliq+u1umnp\nN/Q3QgjdYox1IYRuAOqb/I0KgEu924rLL7882VOmTDG+Qhe+H/3oR2a7FFpiIy2K665du/DWW2+l\nbdYbfRdBTlPzXQtZa62pqTE+1s1ZFwesbu41dC4f9/o269Re69y4caPZHjFiRLJ79eplfLxffg4A\n2FRJP+iYU/G8RsvdJk844QTje+GFF5LNaaBNTMMp2zXbiik9BeF1ed72z0K4SyJgU0J58DOwZywZ\nblnw61//2vjefPPNJs647WjpN/SHAIxptMcAmJ3ntaJ6UFyzi2LbDigkbXE6gGcAHBtC2BhCuArA\nTwAMDyGsATCscVtUEYprdlFs2y9NSi4xxlwljUNz/LzqOe00Kx9ydduECROM79RTTzXbPBSZqwub\ngru5+W5+bUEx47pjxw7TqY7lCT/Ig7sIcrofYAcle5nhzDPP3Os+ADscxA/s5UERXIkK2LRBP0CB\nBy8AtqOjH3x96KGHJtunAd9+++3J9tXBkydPTjZLUYCV57ykwFLVhg0b9jh2Vq5Z/rv9tcTSif+M\nscwyYMAA4xs9erTZ/uxnP5tsH1fGV6qyzDJ16lTjK3V1KKNKUSGEyAha0IUQIiNoQRdCiIzQZOl/\nUQ9W5tJ/LpP+/ve/b3yjRo1KNpcDAzadzqdBtZQHHnjAbF922WXJbus0LwCIMRZtonD37t3j2LEf\n1aKsXr062X5ILpf+c4dGwJbU8yQZv09fJs96sy/v55h36tTJ+LgLIHdM3Nu5ffe73022L/Xmc/MD\nnTmFzQ/45tS3iy6yhZs8QJufSQC2aycXdf35z3/Gli1bihbXcl+vPND56quvNj4e5M7PMACrYfuu\nnf7a5m3/rII7bt5www3Gx8PAOY7++EWkOKX/QgghqgMt6EIIkRHaleTCFXYnnXRSwb/HkktTcgj7\n/a35LbfckuyZM2cWfPy2oJiSi4/rt7/97WR76YLlA66GBPYcKsFw6qivuGR8J0auDvYpptzF0A/b\n8OfCw1G4KhawnRg5jRCwt/S+UnXevHnJ9mmaXCnr97l27dpk8wCRapdcvOTB16gfuM6fgXwdHP36\nlm/bDy555JFHks2SGwCsX78+2aWQSCHJRQgh2hda0IUQIiNoQRdCiIyQ6SHRHu7S15xnB6yRNfV7\n3LWPy/mBPVMVs0JNTQ0uueSStM3Dff2zCtbN/cBg1pF9Z0x+31nbBGwKm+/E+IUvfCHZPoVy0KBB\nyeaJSMCenfY45THfVCTfQmDz5s3J5s6IAHDyyScn2w94Zl3eDyHmDpZcEu9T9KoNn1LIQ7V9z/18\n12Fzrm3W370WzuX+HI/mHqOU6Bu6EEJkBC3oQgiREbSgCyFERmhXGvqYMWOSPX78eOPjiTHcqhXY\nc7JOPjh/+Mc//rHx9ejRI9nXX28Hrvsc5WqiU6dO6Nu3b9pmPdjnmvfv3z/ZDz30kPFxy1r/fvD7\n6ku9Z82alezhw4cb32233ZZsH0duk7xgwQLju+qqq8w255779sq33nprso844gjj4+cE/pkBtyl4\n/vnnjY814y5duhgfv0+co54vH7tS4XP2E71effXVZD/66KPGx+8z14kAthSf20kAe7aGyFdjws88\nfNuISkXf0IUQIiNoQRdCiIzQrkr/WwpPmrnuuuuMz99i+9vjXPzqV78y2yzBlEJ+actui1wWPW3a\nNPPaH/zgB8n26Yc9e/ZMtk/xO/3005Pdu3dv4/OyDsPpZj42+Y7nWwFw5z/fCZIlOt8WgAeF+1RI\n3vbplrwfLxssXbp0r69btWoVtm/fXtGl/14WYsnDTwyqra1Ntk/rZFnFd7HkCUb+vZs4caLZ5hYC\nfoD0yy+/nOyf/exnxjd37txk++u1jVoBqPRfCCHaE1rQhRAiI2hBF0KIjCANvZVwGh4AfOYzn0n2\nHXfcUfB+eLKOL0NvC4qpoXfu3DlyGXuvXr2S7bVonsJ+7733Gh8/j/B6N7cF8Fona/ZeC3/ssceS\n7UvjOYXt6KOPNj6/H9a7vdbLbZI5/gCwcePGZC9evNj4OP2QWzsDNmXPp0nyZ+W5555L9rx58/DW\nW2+VXUPPlz7pJ35xeianpgL2+Qe31ACsTu33ybHq16+f8X3+858329xSmVsqAFabf/rpp42Pn4Gt\nXLnS+LzeXySkoQshRHtCC7oQQmSEiq0U5dsoX/3HaVt8S1sOfJWarzJtD9TU1JghvsuWLUu2l/Sm\nT5+ebB/XdevWJdtLNcuXL0+2l0M2bdqUbK74BayU4rsWspTRlMzFg6n322+/nMf3ctCLL76YbE7D\nA4DDDjss2dw10Z+bl5+4UnbcuHHJ9rf+5cJfE/x++UpNlq98JS3H2cs4XLnpU4fPP//8ZPvpVpx+\nCtj30leDcrfFM844w/i4QtevQW0kuRSEvqELIURGaHJBDyH0CiH8I4SwIoSwPITwzcafdwkhPBJC\nWNP4/0Oa2peoHBTXbKK4tm8K+YbeAOC6GGNfAIMAjAsh9AUwAcDcGGMfAHMbt0X1oLhmE8W1HdOk\nhh5jrANQ12i/E0JYCaAHgAsBnNP4srsBPA7g+r3sokWwNjhp0iTj49LrCRPs55L1K1+Gffzxxxd0\nbK+XsS7uy75Hjx5ttgst/fe6W76J921BMePa0NCArVu3pm3WPn0Mvv71r/M5GB+X9D/xxBPGx5OP\n/D5PPPHEZLPuCdhnMb5Em8vtvfbt4ecCXgtn/dtr8Xw+XgfmDoteP2bq6urMNv8d/Hnctm1b2a5X\n1s39cwxOOR04cKDx8fW6bdu2gvd55ZVXJvuyyy4zPn7trl27jM+X5fNn0GvfnObKrQYAG1d/jHwd\nHNuaZj0UDSEcBWAAgIUAahs/PADwOoDaHL8zFsDYvflEZdDauPqLTVQGul7bHwU/FA0hHARgFoBv\nxRjNP1dx9z9zey1CiDFOiTGeVkhSvCg9xYirzxwQ5UfXa/ukoG/oIYQO2P3huDfGeH/jj98IIXSL\nMdaFELoBqC/mid100005fXz7/eCDDxof3/74W+yWfpPkW2VftdccWGa54IILjI8HK5eKYsV18+bN\nmDx5ctrm6juueARsVSd3UPQ+ljgAO3zZD5/gCsOHH37Y+Fhm4/MCrHTjux1ypz0A+PSnP53s1157\nzfi4a2T37t2Njz+Pa9euNT6WeXwqJG9zeiNgb+P5dR9KBOW4XjnNdOjQocbHg7r5fQSs1ORlDZbL\n/LXL6Ya+UpRlFC+H+JRXTpX1Uh4PA/cxZ7nMd3ssJ4VkuQQAUwGsjDHeRq6HAHw4AmgMgNnFPz3R\nViiu2URxbd8U8g19MID/BbA0hPBhM4obAPwEwH0hhKsArANwaducomgjFNdsori2YwrJcnkSQK5u\nO0Nz/FxUOIprNlFc2zcVW/rP6V9e+2S89sj4obOlYPPmzcmeMWOG8U2dOjXZXiOuZmpra/Gd73wn\nbbO+yMOjAZua6LsPdujQIdk80BuwsfQpjRdffHGyfRorD6L2pfGcYuq190svtV9gZ86cmWyeiAPY\ncnLuhAgACxcuTLYve+eulL4TJOuy3D4AsO0MWCMudYocw1q4vybzTQXi0n/uPgnY98S3E8gHvye+\nS+O8efNybnPXTMC+nz6tmDtB+nMr57Bulf4LIURG0IIuhBAZoWIHXPBt7M0332x848ePL95JFQB3\n6fvpT39qfH/961/NNlfulbPrWlMUc8BFly5d4ogRI9I23/KuXr3avPZLX/pSsv2tOe+DUxgBm1Lm\nU9FYyvDSBacK+ltjTnn1Fb4+vY0rgn3MuRPjnDlzjG/YsGHJ9vn6POCjT58+xsefo3POOcf4lixZ\nkmxO4bz11luxfv36sgy4YMnFdzjkoRLnnnuu8bF85FMTWYLzMgZLHrNn24Qdljbr6212ppdOWFbx\nnyteG31qZL5K0TZaUzXgQggh2hNa0IUQIiNoQRdCiIxQsRq6aFuKqaH36NEjXnPNNWmbdUnfRZC1\nYT9gm8uwvfbO+/G/99577yW7Z8+exsepcFyi71/rtVWeigXY4dZPPfWU8V1//UdNC++66y7j406d\nvrSd9WPfwoL/Jq8t87MHTumdPHkyNm3aVPYh0U3sM6evlGtRFSINXQgh2hNa0IUQIiNUbKWoqB4a\nGhrMcALuHMnSAWArKX03TJYguKIUsCmHK1asMD4eoOCrKhcsWJBsX537ta99Ldk8hBrYcxgKVxGe\ncsopxseVsX64NXfs8wMcODXSS0X5qpz931hNSFZpW/QNXQghMoIWdCGEyAha0IUQIiNIQxetZufO\nnUZj5qk0nTp1Mq/lyUM8aQiwWjSn+wG2FYBPMXzhhReS7dMduWSfS8n9a4cMGWJ8fmIRtyzg4wG2\n++LgwYONj1MVfTdEnpL0yiuvGB+3QfDdJb/3ve8l+9lnn825f9H+0Dd0IYTICFrQhRAiI0hyEa3m\ngAMOMIO7N2zYkGzuSgfYalAvgRxzzDHJ9kObOY1w+PDhxjdr1qxke+mEJQk/KIU7LPoOihdddJHZ\nXrVqVbK95MMdHRctWmR8PMzYD1AYNWpUsr1cwvv0lbEsFXGXxnzDXkT7QN/QhRAiI2hBF0KIjKAF\nXQghMkKpuy1uBrAOQFcAW5p4ealoj+dyZIyxpumXFYbi2iSKa/For+dSUGxLuqCng4bwfCGtIEuB\nzqV4VNL561yKRyWdv84lP5JchBAiI2hBF0KIjFCuBX1KmY67N3QuxaOSzl/nUjwq6fx1Lnkoi4Yu\nhBCi+EhyEUKIjFDSBT2EcF4IYVUI4aUQwoRSHrvx+HeGEOpDCMvoZ11CCI+EENY0/v+QfPso0nn0\nCiH8I4SwIoSwPITwzXKdSzFQXM25ZCa2iqs5l6qIa8kW9BDCPgD+D8BIAH0BXBFC6Fuq4zcyDcB5\n7mcTAMyNMfYBMLdxu61pAHBdjLEvgEEAxjW+F+U4l1ahuO5BJmKruO5BdcQ1xliS/wCcAeDvtD0R\nwMRSHZ+OexSAZbS9CkC3RrsbgFVlOKfZAIZXwrkoroqt4lq9cS2l5NIDwAba3tj4s3JTG2Osa7Rf\nB1BbyoOHEI4CMADAwnKfSwtRXHNQ5bFVXHNQyXHVQ1Ei7v5ntmRpPyGEgwDMAvCtGOPb7Cv1uWSZ\ncryXim3bo7juSSkX9E0AetF2z8aflZs3QgjdAKDx//WlOGgIoQN2fzDujTHeX85zaSWKqyMjsVVc\nHdUQ11Iu6M8B6BNC6B1C6AjgcgAPlfD4uXgIwJhGewx2a2NtSgghAJgKYGWM8bZynksRUFyJDMVW\ncSWqJq4lfpAwCsBqAGsB3FiGBxnTAdQB+AC7NcGrAByK3U+n1wB4FECXEpzHWdh9a7YEwOLG/0aV\n41wUV8VWcc1OXFUpKoQQGUEPRYUQIiNoQRdCiIygBV0IITKCFnQhhMgIWtCFECIjaEEXQoiMoAVd\nCCEyghZ0IYTICP8PqD6o6olwkwcAAAAASUVORK5CYII=\n", 1057 | "text/plain": [ 1058 | "" 1059 | ] 1060 | }, 1061 | "metadata": {}, 1062 | "output_type": "display_data" 1063 | } 1064 | ], 1065 | "source": [ 1066 | "for i in range(5):\n", 1067 | " plt.subplot(131)\n", 1068 | " plt.imshow(images.numpy()[i].reshape(28,28),cmap='gray')\n", 1069 | " plt.subplot(132)\n", 1070 | " plt.imshow(images_noise.numpy()[i].reshape(28,28),cmap='gray')\n", 1071 | " plt.subplot(133)\n", 1072 | " plt.imshow(outputs.data.numpy()[i].reshape(28,28),cmap='gray')\n", 1073 | " plt.show()" 1074 | ] 1075 | }, 1076 | { 1077 | "cell_type": "code", 1078 | "execution_count": null, 1079 | "metadata": { 1080 | "collapsed": true 1081 | }, 1082 | "outputs": [], 1083 | "source": [] 1084 | } 1085 | ], 1086 | "metadata": { 1087 | "kernelspec": { 1088 | "display_name": "Python 2", 1089 | "language": "python", 1090 | "name": "python2" 1091 | }, 1092 | "language_info": { 1093 | "codemirror_mode": { 1094 | "name": "ipython", 1095 | "version": 2 1096 | }, 1097 | "file_extension": ".py", 1098 | "mimetype": "text/x-python", 1099 | "name": "python", 1100 | "nbconvert_exporter": "python", 1101 | "pygments_lexer": "ipython2", 1102 | "version": "2.7.11" 1103 | } 1104 | }, 1105 | "nbformat": 4, 1106 | "nbformat_minor": 2 1107 | } 1108 | -------------------------------------------------------------------------------- /tutorials/readme.md: -------------------------------------------------------------------------------- 1 | Pytorch日本語チュートリアル 2 | ==== 3 | 4 | Pytorchのチュートリアル( http://pytorch.org/tutorials/ )の日本語化と補完。自分が躓いた部分も適当にノートチックにまとめたのでよしなに活用してください。初めての人は下の順番でやると良いと思います。 5 | 6 | ## Description 7 | 8 | ### Pytorch_Tutorials 9 | 10 | pytorchのチュートリアルから 11 | 12 | ### Learning_PyTorch_with_Examples 13 | 14 | pytorchのチュートリアルから 15 | 16 | ### Transfer_Learning_tutorial 17 | 18 | pytorchのチュートリアルから 19 | 20 | ### PyTorch_NLP 21 | 22 | pytorchのチュートリアルから(未完) 23 | 24 | ### Auto_Encoder_MNIST_from_Keras 25 | 26 | 自作 27 | 28 | 29 | ## Author 30 | 31 | [sh-tatsuno](https://github.com/sh-tatsuno) 32 | --------------------------------------------------------------------------------