├── Neural_networks_with_keras.ipynb ├── Neural_networks_with_keras_solved.ipynb ├── README.md ├── convolution.ipynb ├── figures ├── activation.png ├── chainrule.png ├── deeper.png ├── descent.png ├── dropout.png ├── filter.png ├── graph.png ├── landscape.png ├── lasso.png ├── local.png ├── logistic.png ├── neuralnet.png ├── overfitting.png ├── perceptron.png ├── power.png └── universal.png ├── images ├── cnn.jpeg ├── conv_layer.gif ├── maxpool.jpeg ├── neural_net2.jpeg └── pool.jpeg ├── keras.ipynb ├── logistic.ipynb ├── perceptron.ipynb └── recurrent.ipynb /Neural_networks_with_keras.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 2, 6 | "metadata": {}, 7 | "outputs": [ 8 | { 9 | "name": "stderr", 10 | "output_type": "stream", 11 | "text": [ 12 | "Using TensorFlow backend.\n" 13 | ] 14 | } 15 | ], 16 | "source": [ 17 | "from __future__ import division, print_function, generators\n", 18 | "\n", 19 | "import tensorflow as tf\n", 20 | "import keras\n", 21 | "\n", 22 | "import numpy as np\n", 23 | "import keras.datasets.mnist as mnist\n", 24 | "import keras.datasets.cifar10 as cifar10\n", 25 | "import matplotlib.pyplot as plt\n", 26 | "%matplotlib inline " 27 | ] 28 | }, 29 | { 30 | "cell_type": "markdown", 31 | "metadata": {}, 32 | "source": [ 33 | "## Schedule\n", 34 | "\n", 35 | "- What is MNIST? What is CIFAR? (10 minutes)\n", 36 | "\n", 37 | "- Loading images and converting them to numpy (5 minutes)\n", 38 | "\n", 39 | "- What is Keras? Creating a basic model, Callbacks, saving/loading a model, changing optimizer, loss (30 minutes)\n", 40 | "\n", 41 | "- Mini-Project: Train a feed forward NN on MNIST/CIFAR10 with Keras (No scaling) (15 minutes)\n", 42 | "\n", 43 | "- Preprocessing images: Scaling (5 minutes)\n", 44 | "\n", 45 | "- Mini-Project: Train a feed forward NN on MNIST/CIFAR10 with Keras (10 minutes)\n", 46 | "\n", 47 | "- Preprocessing images: Data augmentation and generators with Keras (10 minutes)\n", 48 | "\n", 49 | "- Mini-Project: Train a feed forward NN on MNIST/CIFAR10 using data augmentation with Keras (15 minutes)\n", 50 | "\n", 51 | "- Break (20 minutes)\n", 52 | "\n", 53 | "- Convolutional NN, Pooling, Flattening, etc (20 minutes)\n", 54 | "\n", 55 | "- How to build a CNN in Keras (10 minutes)\n", 56 | "\n", 57 | "- Mini-Project: Train CNN on MNIST/CIFAR10 (30 minutes)" 58 | ] 59 | }, 60 | { 61 | "cell_type": "markdown", 62 | "metadata": {}, 63 | "source": [ 64 | "## What is CIFAR10?\n", 65 | "\n", 66 | "More information at\n", 67 | "https://www.cs.toronto.edu/~kriz/cifar.html" 68 | ] 69 | }, 70 | { 71 | "cell_type": "code", 72 | "execution_count": null, 73 | "metadata": { 74 | "collapsed": true 75 | }, 76 | "outputs": [], 77 | "source": [ 78 | "# load dataset\n", 79 | "(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n", 80 | "\n", 81 | "# This is needed to know what each class means\n", 82 | "label_id_to_class_name = {0:'airplane', 1:'automovile', 2:'bird', 3:'cat', 4:'deer', 5:'dog', \n", 83 | " 6:'frog', 7:'horse', 8:'ship', 9:'truck'}\n", 84 | "\n", 85 | "print(x_train.shape[0], 'Number of train samples')\n", 86 | "print(x_test.shape[0], 'Number of test samples')\n", 87 | "print('x_train shape:', x_train.shape)\n", 88 | "\n", 89 | "# plot images\n", 90 | "for image_id in range(0, 5):\n", 91 | " plt.imshow(x_train[image_id])\n", 92 | " plt.title(\"The true label is %s\" % label_id_to_class_name[int(y_train[image_id])])\n", 93 | " plt.show()\n", 94 | " \n", 95 | "# This makes sure the image has the correct order in the axis for Tensorflow, it would be different for Theano backend\n", 96 | "x_train = x_train.reshape(x_train.shape[0], 32, 32, 3)\n", 97 | "x_test = x_test.reshape(x_test.shape[0], 32, 32, 3)\n", 98 | "\n", 99 | "# Convert values to floats, originally they are integers\n", 100 | "x_train = x_train.astype('float32')\n", 101 | "x_test = x_test.astype('float32')\n", 102 | "\n", 103 | "# Convert values of labels from 0 to 9 to categorical (one_hot encoding)\n", 104 | "y_train = keras.utils.to_categorical(y_train, 10)\n", 105 | "y_test = keras.utils.to_categorical(y_test, 10)" 106 | ] 107 | }, 108 | { 109 | "cell_type": "markdown", 110 | "metadata": {}, 111 | "source": [ 112 | "## What is MNIST?\n", 113 | "\n", 114 | "More information at http://yann.lecun.com/exdb/mnist/" 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "execution_count": null, 120 | "metadata": { 121 | "collapsed": true, 122 | "scrolled": true 123 | }, 124 | "outputs": [], 125 | "source": [ 126 | "# load dataset\n", 127 | "(x_train, y_train), (x_test, y_test) = mnist.load_data()\n", 128 | "\n", 129 | "print(x_train.shape[0], 'Number of train samples')\n", 130 | "print(x_test.shape[0], 'Number of test samples')\n", 131 | "print('x_train shape:', x_train.shape)\n", 132 | "\n", 133 | "# plot images as gray scale\n", 134 | "for image_id in range(0, 5):\n", 135 | " plt.imshow(x_train[image_id], cmap=plt.get_cmap('gray'))\n", 136 | " plt.title(\"The true label is %s\" % str(y_train[image_id]))\n", 137 | " plt.show()\n", 138 | " \n", 139 | "# This makes sure the image has the correct order in the axis for Tensorflow, it would be different for Theano backend\n", 140 | "x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)\n", 141 | "x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)\n", 142 | "\n", 143 | "# Convert values to floats, originally they are integers\n", 144 | "x_train = x_train.astype('float32')\n", 145 | "x_test = x_test.astype('float32')\n", 146 | "\n", 147 | "# Convert values of labels from 0 to 9 to categorical (one_hot encoding)\n", 148 | "y_train = keras.utils.to_categorical(y_train, 10)\n", 149 | "y_test = keras.utils.to_categorical(y_test, 10)" 150 | ] 151 | }, 152 | { 153 | "cell_type": "code", 154 | "execution_count": null, 155 | "metadata": { 156 | "collapsed": true 157 | }, 158 | "outputs": [], 159 | "source": [ 160 | "print(x_train.shape, y_train.shape)" 161 | ] 162 | }, 163 | { 164 | "cell_type": "markdown", 165 | "metadata": {}, 166 | "source": [ 167 | "## Why use Keras?\n", 168 | "\n", 169 | "\"Keras is a high-level neural networks API, written in Python and capable of running on top of TensorFlow, CNTK, or Theano. \n", 170 | "It was developed with a focus on enabling fast experimentation. Being able to go from idea to result with the least possible delay is key to doing good research.\n", 171 | "\n", 172 | "Use Keras if you need a deep learning library that:\n", 173 | "\n", 174 | "- Allows for easy and fast prototyping (through user friendliness, modularity, and extensibility).\n", 175 | "\n", 176 | "- Supports both convolutional networks and recurrent networks, as well as combinations of the two.\n", 177 | "\n", 178 | "- Runs seamlessly on CPU and GPU.\" (Chollet, F.)\n", 179 | "\n", 180 | "More information at https://keras.io Descriptions about the functions and documentation are also taken from this website \n", 181 | "\n", 182 | "In summary, it makes your life way easier if you don't require to go to the level of granularity of Tensorflow" 183 | ] 184 | }, 185 | { 186 | "cell_type": "markdown", 187 | "metadata": {}, 188 | "source": [ 189 | "## How does it work?\n", 190 | "\n" 191 | ] 192 | }, 193 | { 194 | "cell_type": "markdown", 195 | "metadata": {}, 196 | "source": [ 197 | "To train a model we have 3 main steps:\n", 198 | "\n", 199 | " - Define your architecture (number of layers, type of layers, activations, etc)\n", 200 | " \n", 201 | " - Compile your model (Define optimizer, callbacks, etc)\n", 202 | " \n", 203 | " - Train your model (Fit model to your data)" 204 | ] 205 | }, 206 | { 207 | "cell_type": "markdown", 208 | "metadata": {}, 209 | "source": [ 210 | "### Define your architecture" 211 | ] 212 | }, 213 | { 214 | "cell_type": "code", 215 | "execution_count": 7, 216 | "metadata": { 217 | "collapsed": true 218 | }, 219 | "outputs": [], 220 | "source": [ 221 | "from keras.models import Sequential\n", 222 | "from keras.layers import Dense, Flatten, Dropout, Activation\n", 223 | "\n", 224 | "model = Sequential()\n", 225 | "model.add(Flatten(input_shape=(28, 28, 1))) # Images are a 3D matrix, we have to flatten them to be 1D\n", 226 | "model.add(Dense(200, kernel_initializer='normal', activation='relu'))\n", 227 | "model.add(Dropout(0.5)) # drop a unit with 50% probability.\n", 228 | "model.add(Dense(150, kernel_initializer='orthogonal'))\n", 229 | "model.add(Activation('sigmoid'))\n", 230 | "model.add(Dense(10, kernel_initializer='normal', activation='softmax')) # last layer, this has a softmax to do the classification" 231 | ] 232 | }, 233 | { 234 | "cell_type": "markdown", 235 | "metadata": {}, 236 | "source": [ 237 | "The sequential model is one layer after the other, no fancy connections. You can also use the Functional API if you want to have connections which skip layers and so on.\n", 238 | "\n", 239 | "To add a layer all we have to do is call the add method and then add a layer object.\n", 240 | "\n", 241 | "The first layer needs to have the input shape specified. After this the sizes are inferred automatically except for the output layer.\n", 242 | "\n", 243 | "We need to use the flatten layer to flatten the input since it is an image, which in this case is a 3 dimensional matrix. It has height, width and depth. In the case of a grayscale image like the ones from MNIST it has 1 dimensional depth. If it is a color image it has 3 layers of depth (Red, Green, Blue).\n", 244 | "\n", 245 | "A dense layer is a layer in which all units are connected to all units in the next layer. This is the most usual type of layer. You can specify things like the number of units and how the weights in the units are initialized (kernel_initializer). You can also specify an activation function, by default a linear function is used (No activation).\n", 246 | "\n", 247 | "If you want to use dropout in your model you can just add it as an extra layer in between layers or activations. Same thing for batch normalization.\n", 248 | "\n", 249 | "At the end we create a layer which will help us with the classification. For this we use a softmax layer in which the number of units will match the number of classes we have in our data.\n", 250 | "\n" 251 | ] 252 | }, 253 | { 254 | "cell_type": "markdown", 255 | "metadata": {}, 256 | "source": [ 257 | "#### Dense Layer\n", 258 | "\n", 259 | "keras.layers.Dense(units, activation=None, use_bias=True, \n", 260 | " kernel_initializer='glorot_uniform', bias_initializer='zeros', \n", 261 | " kernel_regularizer=None, bias_regularizer=None, \n", 262 | " activity_regularizer=None, \n", 263 | " kernel_constraint=None, bias_constraint=None)\n", 264 | " \n", 265 | "Arguments\n", 266 | "\n", 267 | "- units: Positive integer, dimensionality of the output space.\n", 268 | "- activation: Activation function to use (see activations). If you don't specify anything, no activation is applied (ie. \"linear\" activation: a(x) = x).\n", 269 | "- use_bias: Boolean, whether the layer uses a bias vector.\n", 270 | "- kernel_initializer: Initializer for the kernel weights matrix.\n", 271 | "- bias_initializer: Initializer for the bias vector.\n", 272 | "- kernel_regularizer: Regularizer function applied to the kernel weights matrix. (L1, L2)\n", 273 | "- bias_regularizer: Regularizer function applied to the bias vector.\n", 274 | "- activity_regularizer: Regularizer function applied to the output of the layer (its \"activation\").\n", 275 | "- kernel_constraint: Constraint function applied to the kernel weights matrix. (non-negative, etc)\n", 276 | "- bias_constraint: Constraint function applied to the bias vector." 277 | ] 278 | }, 279 | { 280 | "cell_type": "markdown", 281 | "metadata": {}, 282 | "source": [ 283 | "### Compile the model\n", 284 | "\n", 285 | "Here we specify the loss, in our case categorical crossentropy. We can add an extra metric we want to measure, like accuracy\n", 286 | "\n", 287 | "We also specify the optimizer. Some examples are Adam and SGD" 288 | ] 289 | }, 290 | { 291 | "cell_type": "code", 292 | "execution_count": null, 293 | "metadata": { 294 | "collapsed": true 295 | }, 296 | "outputs": [], 297 | "source": [ 298 | "sgd = keras.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9)\n", 299 | "\n", 300 | "model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])" 301 | ] 302 | }, 303 | { 304 | "cell_type": "markdown", 305 | "metadata": {}, 306 | "source": [ 307 | "#### Compile function\n", 308 | "\n", 309 | "Arguments\n", 310 | "\n", 311 | "- optimizer: String (name of optimizer) or optimizer object. (SGD, RMSprop, Adam, Adagrad, etc)\n", 312 | "- loss: String (name of objective function) or objective function. You can also have multiple loss functions\n", 313 | "- metrics: List of metrics to be evaluated by the model during training and testing. \n", 314 | "- sample_weight_mode: If you need to do timestep-wise sample weighting (2D weights), set this to \"temporal\". None defaults to sample-wise weights (1D).\n", 315 | "- weighted_metrics: List of metrics to be evaluated and weighted by sample_weight or class_weight during training and testing." 316 | ] 317 | }, 318 | { 319 | "cell_type": "markdown", 320 | "metadata": {}, 321 | "source": [ 322 | "### Callbacks\n", 323 | "\n", 324 | "A callback is a set of functions to be applied at given stages of the training procedure. You can use callbacks to get a view on internal states and statistics of the model during training.\n", 325 | "\n", 326 | "Some examples:" 327 | ] 328 | }, 329 | { 330 | "cell_type": "code", 331 | "execution_count": null, 332 | "metadata": { 333 | "collapsed": true 334 | }, 335 | "outputs": [], 336 | "source": [ 337 | "from keras.callbacks import EarlyStopping\n", 338 | "\n", 339 | "early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto')\n", 340 | "my_callbacks = [early_stopping]" 341 | ] 342 | }, 343 | { 344 | "cell_type": "markdown", 345 | "metadata": {}, 346 | "source": [ 347 | "### Train the model" 348 | ] 349 | }, 350 | { 351 | "cell_type": "code", 352 | "execution_count": null, 353 | "metadata": { 354 | "collapsed": true 355 | }, 356 | "outputs": [], 357 | "source": [ 358 | "history = model.fit(x_train, y_train, validation_split=0.2, epochs=10, batch_size=100, verbose=1, callbacks=my_callbacks)" 359 | ] 360 | }, 361 | { 362 | "cell_type": "markdown", 363 | "metadata": {}, 364 | "source": [ 365 | "#### Fit function\n", 366 | "\n", 367 | "Arguments\n", 368 | "\n", 369 | "- x: Numpy array of training data. \n", 370 | "- y: Numpy array of target (label) data.\n", 371 | "- batch_size: Integer or None. Number of samples per gradient update. If unspecified, it will default to 32.\n", 372 | "- epochs: Integer. Number of epochs to train the model. An epoch is an iteration over the entire x and y data provided.\n", 373 | "- verbose: 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per epoch.\n", 374 | "- callbacks: List of keras.callbacks.Callback instances. List of callbacks to apply during training.\n", 375 | "- validation_split: Float between 0 and 1. Fraction of the training data to be used as validation data.\n", 376 | "- validation_data: tuple (x_val, y_val) or tuple (x_val, y_val, val_sample_weights) on which to evaluate the loss and any model metrics at the end of each epoch.\n", 377 | "- shuffle: Boolean (whether to shuffle the training data before each epoch)\n", 378 | "- class_weight: Optional dictionary mapping class indices (integers) to a weight (float) value, used for weighting the loss function (during training only).\n", 379 | "- sample_weight: Optional Numpy array of weights for the training samples, used for weighting the loss function (during training only).\n", 380 | "- initial_epoch: Epoch at which to start training (useful for resuming a previous training run).\n", 381 | "- steps_per_epoch: Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. When training with input tensors such as TensorFlow data tensors, the default None is equal to the number of samples in your dataset divided by the batch size, or 1 if that cannot be determined.\n", 382 | "- validation_steps: Only relevant if steps_per_epoch is specified. Total number of steps (batches of samples) to validate before stopping." 383 | ] 384 | }, 385 | { 386 | "cell_type": "markdown", 387 | "metadata": {}, 388 | "source": [ 389 | "### History object\n", 390 | "\n", 391 | "Fit function returns a history object. This object stores information about your model while it was training. For example if you would like to plot the training loss and validation loss across the number of epochs you could do this:" 392 | ] 393 | }, 394 | { 395 | "cell_type": "code", 396 | "execution_count": null, 397 | "metadata": { 398 | "collapsed": true 399 | }, 400 | "outputs": [], 401 | "source": [ 402 | "print(\"Keys for history object\", history.history.keys())\n", 403 | "\n", 404 | "train_loss = history.history['loss']\n", 405 | "valid_loss = history.history['val_loss']\n", 406 | "epochs = list(range(1, len(train_loss)+1))\n", 407 | "\n", 408 | "plt.plot(epochs, train_loss, label=\"train loss\")\n", 409 | "plt.plot(epochs, valid_loss, label=\"validation loss\")\n", 410 | "plt.xlabel('Epochs')\n", 411 | "plt.ylabel('Loss')\n", 412 | "plt.legend(loc='upper right')\n", 413 | "plt.show()" 414 | ] 415 | }, 416 | { 417 | "cell_type": "code", 418 | "execution_count": null, 419 | "metadata": { 420 | "collapsed": true 421 | }, 422 | "outputs": [], 423 | "source": [ 424 | "train_accuracy = history.history['acc']\n", 425 | "valid_accuracy = history.history['val_acc']\n", 426 | "\n", 427 | "epochs = list(range(1, len(train_accuracy)+1))\n", 428 | "\n", 429 | "plt.plot(epochs, train_accuracy, label=\"train accuracy\")\n", 430 | "plt.plot(epochs, valid_accuracy, label=\"validation accuracy\")\n", 431 | "\n", 432 | "plt.xlabel('Epochs')\n", 433 | "plt.ylabel('Accuracy')\n", 434 | "plt.legend(loc='lower right')\n", 435 | "plt.show()" 436 | ] 437 | }, 438 | { 439 | "cell_type": "markdown", 440 | "metadata": {}, 441 | "source": [ 442 | "### Making predictions" 443 | ] 444 | }, 445 | { 446 | "cell_type": "code", 447 | "execution_count": null, 448 | "metadata": { 449 | "collapsed": true 450 | }, 451 | "outputs": [], 452 | "source": [ 453 | "predictions = model.predict(x_test)\n", 454 | "\n", 455 | "print(predictions.shape)\n", 456 | "print(np.argmax(predictions, axis=1)[0:10])\n", 457 | "print(np.argmax(y_test, axis=1)[0:10])" 458 | ] 459 | }, 460 | { 461 | "cell_type": "markdown", 462 | "metadata": {}, 463 | "source": [ 464 | "Arguments\n", 465 | "\n", 466 | "- x: the input data, as a Numpy array.\n", 467 | "- batch_size: Integer. If unspecified, it will default to 32.\n", 468 | "- verbose: verbosity mode, 0 or 1.\n", 469 | "- steps: Total number of steps (batches of samples) before declaring the prediction round finished. Ignored with the default value of None.\n", 470 | "\n", 471 | "Returns\n", 472 | "\n", 473 | "A Numpy array of predictions." 474 | ] 475 | }, 476 | { 477 | "cell_type": "markdown", 478 | "metadata": {}, 479 | "source": [ 480 | "### Loading and saving a model" 481 | ] 482 | }, 483 | { 484 | "cell_type": "code", 485 | "execution_count": null, 486 | "metadata": { 487 | "collapsed": true 488 | }, 489 | "outputs": [], 490 | "source": [ 491 | "from keras.models import load_model\n", 492 | "\n", 493 | "model_path = \"my_new_model.h5\"\n", 494 | "model.save(model_path)\n", 495 | "del model # deletes the existing model\n", 496 | "model = load_model(model_path)" 497 | ] 498 | }, 499 | { 500 | "cell_type": "markdown", 501 | "metadata": {}, 502 | "source": [ 503 | "# Mini-Project: Train you own fully connected neural network to classify handwritten digits\n", 504 | "\n", 505 | "- Train a logistic regression to create a benchmark\n", 506 | "- Train a neural network and compare. Start with a few layers, then experiment with more layers and different parameters.\n", 507 | "- Are you overfitting? Underfitting? How can you improve your model? Try other hyperparameters or adding regularization. Make some plots to understand the behaviour of your model.\n", 508 | "- Take a look at samples for which your model is predicting an incorrect label, what do you think is happening?" 509 | ] 510 | }, 511 | { 512 | "cell_type": "code", 513 | "execution_count": null, 514 | "metadata": { 515 | "collapsed": true 516 | }, 517 | "outputs": [], 518 | "source": [ 519 | "from keras.models import Sequential\n", 520 | "from keras.layers import Dense, Flatten, Dropout" 521 | ] 522 | }, 523 | { 524 | "cell_type": "markdown", 525 | "metadata": {}, 526 | "source": [ 527 | "## Logistic regression (Benchmark)" 528 | ] 529 | }, 530 | { 531 | "cell_type": "code", 532 | "execution_count": null, 533 | "metadata": { 534 | "collapsed": true 535 | }, 536 | "outputs": [], 537 | "source": [] 538 | }, 539 | { 540 | "cell_type": "markdown", 541 | "metadata": {}, 542 | "source": [ 543 | "## Shallow Neural Network" 544 | ] 545 | }, 546 | { 547 | "cell_type": "code", 548 | "execution_count": null, 549 | "metadata": { 550 | "collapsed": true 551 | }, 552 | "outputs": [], 553 | "source": [] 554 | }, 555 | { 556 | "cell_type": "markdown", 557 | "metadata": {}, 558 | "source": [ 559 | "### Visualize training" 560 | ] 561 | }, 562 | { 563 | "cell_type": "code", 564 | "execution_count": null, 565 | "metadata": { 566 | "collapsed": true 567 | }, 568 | "outputs": [], 569 | "source": [] 570 | }, 571 | { 572 | "cell_type": "markdown", 573 | "metadata": {}, 574 | "source": [ 575 | "### Visualize errors" 576 | ] 577 | }, 578 | { 579 | "cell_type": "code", 580 | "execution_count": null, 581 | "metadata": { 582 | "collapsed": true 583 | }, 584 | "outputs": [], 585 | "source": [ 586 | "predictions = model.predict(x_test)\n", 587 | "\n", 588 | "predicted_labels = np.argmax(predictions, axis=1) # transform back from one_hot encoding\n", 589 | "true_labels = np.argmax(y_test, axis=1)\n", 590 | "misclassified_samples_positions = np.where(predicted_labels != true_labels)[0]\n", 591 | "\n", 592 | "for image_id in misclassified_samples_positions[0:5]:\n", 593 | " print(image_id)\n", 594 | " print(x_test[image_id].shape)\n", 595 | " plt.imshow(x_test[image_id].reshape(28, 28), cmap=plt.get_cmap('gray'))\n", 596 | " plt.title(\"The true label is %s but was classified as %s\" % (str(true_labels[image_id]), \n", 597 | " str(predicted_labels[image_id])\n", 598 | " ))\n", 599 | " plt.show()" 600 | ] 601 | }, 602 | { 603 | "cell_type": "markdown", 604 | "metadata": {}, 605 | "source": [ 606 | "## Scaling matters" 607 | ] 608 | }, 609 | { 610 | "cell_type": "code", 611 | "execution_count": null, 612 | "metadata": { 613 | "collapsed": true 614 | }, 615 | "outputs": [], 616 | "source": [ 617 | "# Scale the inputs\n", 618 | "x_train /= 255.0 # The image is in grayscale and has values between 0 and 255\n", 619 | "x_test /= 255.0" 620 | ] 621 | }, 622 | { 623 | "cell_type": "markdown", 624 | "metadata": {}, 625 | "source": [ 626 | "# Mini-Project: Train you own fully connected neural network to classify handwritten digits (Now with scaled data)" 627 | ] 628 | }, 629 | { 630 | "cell_type": "markdown", 631 | "metadata": {}, 632 | "source": [ 633 | "- Train a logistic regression to create a benchmark\n", 634 | "- Train a neural network and compare. Start with a few layers, then experiment with more layers and different parameters.\n", 635 | "- Are you overfitting? Underfitting? How can you improve your model? Try other hyperparameters or adding regularization. Make some plots to understand the behaviour of your model." 636 | ] 637 | }, 638 | { 639 | "cell_type": "markdown", 640 | "metadata": {}, 641 | "source": [ 642 | "## Extra credit\n", 643 | "\n", 644 | "- Try shuffling the order of the features before training (move the 5th pixel to a new random position) but do the same shuffling for all samples and retrain you model. How does it perform compared to the original model?" 645 | ] 646 | }, 647 | { 648 | "cell_type": "markdown", 649 | "metadata": {}, 650 | "source": [ 651 | "## Logistic regression" 652 | ] 653 | }, 654 | { 655 | "cell_type": "code", 656 | "execution_count": null, 657 | "metadata": { 658 | "collapsed": true 659 | }, 660 | "outputs": [], 661 | "source": [ 662 | "# Define model architecture\n", 663 | "model = Sequential()\n", 664 | "model.add(Flatten(input_shape=(28, 28, 1))) # Images are a 3D matrix, we have to flatten them to be 1D\n", 665 | "model.add(Dense(10, kernel_initializer='normal', activation='softmax'))\n", 666 | "\n", 667 | "# Compile model\n", 668 | "model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])" 669 | ] 670 | }, 671 | { 672 | "cell_type": "code", 673 | "execution_count": null, 674 | "metadata": { 675 | "collapsed": true, 676 | "scrolled": true 677 | }, 678 | "outputs": [], 679 | "source": [ 680 | "# Train the model\n", 681 | "log_history = model.fit(x_train, y_train, validation_split=0.2, epochs=10, batch_size=100, verbose=2)" 682 | ] 683 | }, 684 | { 685 | "cell_type": "markdown", 686 | "metadata": {}, 687 | "source": [ 688 | "## Shallow neural network" 689 | ] 690 | }, 691 | { 692 | "cell_type": "code", 693 | "execution_count": null, 694 | "metadata": { 695 | "collapsed": true 696 | }, 697 | "outputs": [], 698 | "source": [ 699 | "# Define model architecture\n", 700 | "model = Sequential()\n", 701 | "model.add(Flatten(input_shape=(28, 28, 1))) # Images are a 3D matrix, we have to flatten them to be 1D\n", 702 | "model.add(Dense(200, kernel_initializer='normal', activation='relu'))\n", 703 | "model.add(Dense(200, kernel_initializer='normal', activation='relu'))\n", 704 | "model.add(Dense(200, kernel_initializer='normal', activation='relu'))\n", 705 | "model.add(Dense(10, kernel_initializer='normal', activation='softmax'))\n", 706 | "\n", 707 | "# Compile model\n", 708 | "model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])" 709 | ] 710 | }, 711 | { 712 | "cell_type": "code", 713 | "execution_count": null, 714 | "metadata": { 715 | "collapsed": true 716 | }, 717 | "outputs": [], 718 | "source": [ 719 | "# Train the model\n", 720 | "nn_history = model.fit(x_train, y_train, validation_split=0.2, epochs=10, batch_size=100, verbose=2)" 721 | ] 722 | }, 723 | { 724 | "cell_type": "markdown", 725 | "metadata": {}, 726 | "source": [ 727 | "### Visualize training" 728 | ] 729 | }, 730 | { 731 | "cell_type": "code", 732 | "execution_count": null, 733 | "metadata": { 734 | "collapsed": true 735 | }, 736 | "outputs": [], 737 | "source": [ 738 | "log_train_loss = log_history.history['loss']\n", 739 | "log_valid_loss = log_history.history['val_loss']\n", 740 | "nn_train_loss = nn_history.history['loss']\n", 741 | "nn_valid_loss = nn_history.history['val_loss']\n", 742 | "epochs = list(range(1, len(log_train_loss)+1))\n", 743 | "\n", 744 | "plt.plot(epochs, log_train_loss, label=\"log reg train loss\")\n", 745 | "plt.plot(epochs, log_valid_loss, label=\"log reg validation loss\")\n", 746 | "plt.plot(epochs, nn_train_loss, label=\"NN train loss\")\n", 747 | "plt.plot(epochs, nn_valid_loss, label=\"NN validation loss\")\n", 748 | "plt.xlabel('Epochs')\n", 749 | "plt.ylabel('Loss')\n", 750 | "plt.legend(loc='upper right')\n", 751 | "plt.show()\n", 752 | "\n", 753 | "log_train_accuracy = log_history.history['acc']\n", 754 | "log_valid_accuracy = log_history.history['val_acc']\n", 755 | "nn_train_accuracy = nn_history.history['acc']\n", 756 | "nn_valid_accuracy = nn_history.history['val_acc']\n", 757 | "epochs = list(range(1, len(log_train_accuracy)+1))\n", 758 | "\n", 759 | "plt.plot(epochs, log_train_accuracy, label=\"log reg train accuracy\")\n", 760 | "plt.plot(epochs, log_valid_accuracy, label=\"log reg validation accuracy\")\n", 761 | "plt.plot(epochs, nn_train_accuracy, label=\"NN train accuracy\")\n", 762 | "plt.plot(epochs, nn_valid_accuracy, label=\"NN validation accuracy\")\n", 763 | "plt.xlabel('Epochs')\n", 764 | "plt.ylabel('Accuracy')\n", 765 | "plt.legend(loc='lower right')\n", 766 | "plt.show()" 767 | ] 768 | }, 769 | { 770 | "cell_type": "markdown", 771 | "metadata": {}, 772 | "source": [ 773 | "### Visualize errors" 774 | ] 775 | }, 776 | { 777 | "cell_type": "code", 778 | "execution_count": null, 779 | "metadata": { 780 | "collapsed": true 781 | }, 782 | "outputs": [], 783 | "source": [ 784 | "predictions = model.predict(x_test)\n", 785 | "\n", 786 | "predicted_labels = np.argmax(predictions, axis=1) # transform back from one_hot encoding\n", 787 | "true_labels = np.argmax(y_test, axis=1)\n", 788 | "misclassified_samples_positions = np.where(predicted_labels != true_labels)[0]\n", 789 | "\n", 790 | "for image_id in misclassified_samples_positions[0:5]:\n", 791 | " print(image_id)\n", 792 | " print(x_test[image_id].shape)\n", 793 | " plt.imshow(x_test[image_id].reshape(28, 28), cmap=plt.get_cmap('gray'))\n", 794 | " plt.title(\"The true label is %s but was classified as %s\" % (str(true_labels[image_id]), \n", 795 | " str(predicted_labels[image_id])\n", 796 | " ))\n", 797 | " plt.show()" 798 | ] 799 | }, 800 | { 801 | "cell_type": "markdown", 802 | "metadata": {}, 803 | "source": [ 804 | "### Reload data before procedding" 805 | ] 806 | }, 807 | { 808 | "cell_type": "code", 809 | "execution_count": 3, 810 | "metadata": { 811 | "collapsed": true 812 | }, 813 | "outputs": [], 814 | "source": [ 815 | "# load dataset\n", 816 | "(x_train, y_train), (x_test, y_test) = mnist.load_data()" 817 | ] 818 | }, 819 | { 820 | "cell_type": "code", 821 | "execution_count": 4, 822 | "metadata": { 823 | "collapsed": true 824 | }, 825 | "outputs": [], 826 | "source": [ 827 | "# This makes sure the image has the correct order in the axis for Tensorflow, it would be different for Theano backend\n", 828 | "x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)\n", 829 | "x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)\n", 830 | "\n", 831 | "# Convert values to floats, originally they are integers\n", 832 | "x_train = x_train.astype('float32')\n", 833 | "x_test = x_test.astype('float32')\n", 834 | "\n", 835 | "# Convert values of labels from 0 to 9 to categorical (one_hot encoding)\n", 836 | "y_train = keras.utils.to_categorical(y_train, 10)\n", 837 | "y_test = keras.utils.to_categorical(y_test, 10)" 838 | ] 839 | }, 840 | { 841 | "cell_type": "markdown", 842 | "metadata": {}, 843 | "source": [ 844 | "## Data augmentation and generators\n", 845 | "\n", 846 | "Augmenting the data allows us to use more data for training by taking artificial modifications from the samples we already have. One example of this is rotating the image. This will allow our model to learn some rotational invariance to the data and prevent overfitting.\n", 847 | "\n", 848 | "To train a model when a dataset is too large to load in RAM memory we can use generators. Generators allow us to only pass a small portion of the data at a time so it doesn't use all the memory from the computer. In addition once it loops through all the data we can set the generator so it starts again from the beginning. This allows us to loop infinitely through the data.\n", 849 | "\n", 850 | "Keras gives us a nice tool for augmenting data which uses a generator. It uses a generator because the modifications are done on the fly and it would take too much memory to save the augmented dataset. Keras also provides us with a method to fit the model when we use a generator, fit_generator(). This method requires us to specify how many batches are equivalent to an epoch so the model knows when to do updates, otherwise since the generator loops through the data, it would go on training infinitely." 851 | ] 852 | }, 853 | { 854 | "cell_type": "code", 855 | "execution_count": 5, 856 | "metadata": { 857 | "collapsed": true 858 | }, 859 | "outputs": [], 860 | "source": [ 861 | "from keras.preprocessing.image import ImageDataGenerator\n", 862 | "\n", 863 | "data_generator = ImageDataGenerator(rescale=1./255,\n", 864 | " #featurewise_center=True,\n", 865 | " #featurewise_std_normalization=True,\n", 866 | " rotation_range=10,\n", 867 | " width_shift_range=0.1,\n", 868 | " height_shift_range=0.1,\n", 869 | " #horizontal_flip=True,\n", 870 | " zoom_range=0.1\n", 871 | " )\n", 872 | "\n", 873 | "# we git the model to the data. This needed for calculating mean and std.\n", 874 | "data_generator.fit(x_train)" 875 | ] 876 | }, 877 | { 878 | "cell_type": "markdown", 879 | "metadata": {}, 880 | "source": [ 881 | "### What does this look like?" 882 | ] 883 | }, 884 | { 885 | "cell_type": "code", 886 | "execution_count": null, 887 | "metadata": { 888 | "collapsed": true 889 | }, 890 | "outputs": [], 891 | "source": [ 892 | "augmented_data_generator = data_generator.flow(x_train[0:40000], y_train[0:40000], batch_size=100)\n", 893 | "augmented_batch, labels = augmented_data_generator.next()\n", 894 | "\n", 895 | "print(augmented_batch.shape)\n", 896 | "print(augmented_batch[0].shape)\n", 897 | "print(labels[0])\n", 898 | "\n", 899 | "# plot images as gray scale\n", 900 | "for image_id in range(0, 5):\n", 901 | " plt.imshow(augmented_batch[image_id].reshape(28, 28), cmap=plt.get_cmap('gray'))\n", 902 | " plt.title(\"The true label is %s\" % str(np.argmax(labels[image_id])))\n", 903 | " plt.show()" 904 | ] 905 | }, 906 | { 907 | "cell_type": "markdown", 908 | "metadata": {}, 909 | "source": [ 910 | "### Training a model using a generator" 911 | ] 912 | }, 913 | { 914 | "cell_type": "code", 915 | "execution_count": null, 916 | "metadata": { 917 | "collapsed": true 918 | }, 919 | "outputs": [], 920 | "source": [ 921 | "history = model.fit_generator(data_generator.flow(x_train[0:40000], y_train[0:40000], batch_size=100), \n", 922 | " steps_per_epoch=40000/100,\n", 923 | " validation_data=(x_train[40000:], y_train[40000:]), \n", 924 | " epochs=10, verbose=1)" 925 | ] 926 | }, 927 | { 928 | "cell_type": "markdown", 929 | "metadata": {}, 930 | "source": [ 931 | "Arguments\n", 932 | "\n", 933 | "- generator: A generator. The output of the generator must be either\n", 934 | "a tuple (inputs, targets)\n", 935 | "a tuple (inputs, targets, sample_weights). All arrays should contain the same number of samples. The generator is expected to loop over its data indefinitely. An epoch finishes when steps_per_epoch batches have been seen by the model.\n", 936 | "- steps_per_epoch: Total number of steps (batches of samples) to yield from generator before declaring one epoch finished and starting the next epoch. It should typically be equal to the number of samples of your dataset divided by the batch size.\n", 937 | "- epochs: Integer, total number of iterations on the data. \n", 938 | "- verbose: Verbosity mode, 0, 1, or 2.\n", 939 | "- callbacks: List of callbacks to be called during training.\n", 940 | "- validation_data: This can be either\n", 941 | "A generator for the validation data\n", 942 | "A tuple (inputs, targets)\n", 943 | "A tuple (inputs, targets, sample_weights).\n", 944 | "- validation_steps: Only relevant if validation_data is a generator. Number of steps to yield from validation generator at the end of every epoch. It should typically be equal to the number of samples of your validation dataset divided by the batch size. Optional for Sequence: if unspecified, will use the len(validation_data) as a number of steps.\n", 945 | "- class_weight: Dictionary mapping class indices to a weight for the class.\n", 946 | "- max_queue_size: Maximum size for the generator queue\n", 947 | "- workers: Maximum number of processes to spin up\n", 948 | "- use_multiprocessing: if True, use process based threading.\n", 949 | "- initial_epoch: Epoch at which to start training (useful for resuming a previous training run).\n", 950 | "\n", 951 | "Returns\n", 952 | "\n", 953 | "A History object." 954 | ] 955 | }, 956 | { 957 | "cell_type": "markdown", 958 | "metadata": {}, 959 | "source": [ 960 | "# Mini-Project: Train you own fully connected neural network to classify handwritten digits (Now with augmented data)\n", 961 | "\n", 962 | "- Train a logistic regression to create a benchmark\n", 963 | "- Train a neural network and compare. Start with a few layers, then experiment with more layers and different parameters.\n", 964 | "- Are you overfitting? Underfitting? How can you improve your model? Try other hyperparameters or adding regularization. Make some plots to understand the behaviour of your model." 965 | ] 966 | }, 967 | { 968 | "cell_type": "markdown", 969 | "metadata": {}, 970 | "source": [ 971 | "## Logistic regression" 972 | ] 973 | }, 974 | { 975 | "cell_type": "code", 976 | "execution_count": null, 977 | "metadata": { 978 | "collapsed": true 979 | }, 980 | "outputs": [], 981 | "source": [ 982 | "# Define model architecture\n", 983 | "model = Sequential()\n", 984 | "model.add(Flatten(input_shape=(28, 28, 1))) # Images are a 3D matrix, we have to flatten them to be 1D\n", 985 | "model.add(Dense(10, kernel_initializer='normal', activation='softmax'))\n", 986 | "\n", 987 | "# Compile model\n", 988 | "model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])" 989 | ] 990 | }, 991 | { 992 | "cell_type": "code", 993 | "execution_count": null, 994 | "metadata": { 995 | "collapsed": true, 996 | "scrolled": true 997 | }, 998 | "outputs": [], 999 | "source": [ 1000 | "# Train the model\n", 1001 | "log_history = model.fit_generator(data_generator.flow(x_train[0:40000], y_train[0:40000], batch_size=100), \n", 1002 | " steps_per_epoch=40000/100,\n", 1003 | " validation_data=(x_train[40000:], y_train[40000:]), \n", 1004 | " epochs=10, verbose=2)" 1005 | ] 1006 | }, 1007 | { 1008 | "cell_type": "markdown", 1009 | "metadata": {}, 1010 | "source": [ 1011 | "## Shallow neural network" 1012 | ] 1013 | }, 1014 | { 1015 | "cell_type": "code", 1016 | "execution_count": null, 1017 | "metadata": { 1018 | "collapsed": true 1019 | }, 1020 | "outputs": [], 1021 | "source": [ 1022 | "# Define model architecture\n", 1023 | "model = Sequential()\n", 1024 | "model.add(Flatten(input_shape=(28, 28, 1))) # Images are a 3D matrix, we have to flatten them to be 1D\n", 1025 | "model.add(Dense(200, kernel_initializer='normal', activation='relu'))\n", 1026 | "model.add(Dense(200, kernel_initializer='normal', activation='relu'))\n", 1027 | "model.add(Dense(200, kernel_initializer='normal', activation='relu'))\n", 1028 | "model.add(Dense(10, kernel_initializer='normal', activation='softmax'))\n", 1029 | "\n", 1030 | "# Compile model\n", 1031 | "model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])" 1032 | ] 1033 | }, 1034 | { 1035 | "cell_type": "code", 1036 | "execution_count": null, 1037 | "metadata": { 1038 | "collapsed": true 1039 | }, 1040 | "outputs": [], 1041 | "source": [ 1042 | "# Train the model\n", 1043 | "nn_history = model.fit_generator(data_generator.flow(x_train[0:40000], y_train[0:40000], batch_size=100), \n", 1044 | " steps_per_epoch=40000/100,\n", 1045 | " validation_data=(x_train[40000:], y_train[40000:]), \n", 1046 | " epochs=10, verbose=2)" 1047 | ] 1048 | }, 1049 | { 1050 | "cell_type": "markdown", 1051 | "metadata": {}, 1052 | "source": [ 1053 | "### Visualize training" 1054 | ] 1055 | }, 1056 | { 1057 | "cell_type": "code", 1058 | "execution_count": null, 1059 | "metadata": { 1060 | "collapsed": true 1061 | }, 1062 | "outputs": [], 1063 | "source": [ 1064 | "log_train_loss = log_history.history['loss']\n", 1065 | "log_valid_loss = log_history.history['val_loss']\n", 1066 | "nn_train_loss = nn_history.history['loss']\n", 1067 | "nn_valid_loss = nn_history.history['val_loss']\n", 1068 | "epochs = list(range(1, len(log_train_loss)+1))\n", 1069 | "\n", 1070 | "plt.plot(epochs, log_train_loss, label=\"log reg train loss\")\n", 1071 | "plt.plot(epochs, log_valid_loss, label=\"log reg validation loss\")\n", 1072 | "plt.plot(epochs, nn_train_loss, label=\"NN train loss\")\n", 1073 | "plt.plot(epochs, nn_valid_loss, label=\"NN validation loss\")\n", 1074 | "plt.xlabel('Epochs')\n", 1075 | "plt.ylabel('Loss')\n", 1076 | "plt.legend(loc='upper right')\n", 1077 | "plt.show()\n", 1078 | "\n", 1079 | "log_train_accuracy = log_history.history['acc']\n", 1080 | "log_valid_accuracy = log_history.history['val_acc']\n", 1081 | "nn_train_accuracy = nn_history.history['acc']\n", 1082 | "nn_valid_accuracy = nn_history.history['val_acc']\n", 1083 | "epochs = list(range(1, len(log_train_accuracy)+1))\n", 1084 | "\n", 1085 | "plt.plot(epochs, log_train_accuracy, label=\"log reg train accuracy\")\n", 1086 | "plt.plot(epochs, log_valid_accuracy, label=\"log reg validation accuracy\")\n", 1087 | "plt.plot(epochs, nn_train_accuracy, label=\"NN train accuracy\")\n", 1088 | "plt.plot(epochs, nn_valid_accuracy, label=\"NN validation accuracy\")\n", 1089 | "plt.xlabel('Epochs')\n", 1090 | "plt.ylabel('Accuracy')\n", 1091 | "plt.legend(loc='lower right')\n", 1092 | "plt.show()" 1093 | ] 1094 | }, 1095 | { 1096 | "cell_type": "markdown", 1097 | "metadata": {}, 1098 | "source": [ 1099 | "# Convolutional Neural Networks" 1100 | ] 1101 | }, 1102 | { 1103 | "cell_type": "markdown", 1104 | "metadata": { 1105 | "collapsed": true 1106 | }, 1107 | "source": [ 1108 | "Convolutional neural networks are similar to the previous networks we saw with a main difference, they start with the assumption that the input will be an image and optimize the architecture for that assumption. Now they are used in many other contexts besides images, but this is what they were created for.\n", 1109 | "\n", 1110 | "Convolutional layer define a kernel (weight matrix) which is then multiplied element by element with a section of the input of the same size. Sum all the resulting values. Move the kernel a number of pixels equal to a defined stride size and repeat until you go throught the whole image. Afterwards apply an activation function to each one of the values. Now repeat all of this for each filter in your layer.\n", 1111 | "\n", 1112 | "\"convolutional\n", 1113 | "Animation from Karpathy (http://cs231n.github.io/convolutional-networks/)\n", 1114 | "\n", 1115 | "A key aspect is the fact that the weights from the kernel are the same when applied in all sections of the input for the same filter. This means the convolutional layer has way less free parameters than a fully connected layer. This also gives the model location invariance since the filters will activate in the same way if the object is in the top left of the image or at the bottom right.\n", 1116 | "\n", 1117 | "Images are 3D objects which have height, length and depth. In the case of grayscale images the depth is 1, however in color images you have 3 channels, red, green and blue. So convolutional layers take as input a 3D tensor and outputs a 3D tensor.\n", 1118 | "\n", 1119 | "\"convolutional\n", 1120 | "Image from Karpathy (http://cs231n.github.io/convolutional-networks/)\n", 1121 | "\n", 1122 | "Notice the convolutional operation will reduce the size of the input. Depending on your architecture you might want to keep the size constant, you can do this by includding padding in the borders of the input. " 1123 | ] 1124 | }, 1125 | { 1126 | "cell_type": "markdown", 1127 | "metadata": {}, 1128 | "source": [ 1129 | "## Pooling layers\n", 1130 | "\n", 1131 | "The idea of the pooling layer is to reduce the size of the input and also help regularize the model. The most famous type of pooling is max pooling. In this case you select an area which is the size of the filter and from it you will only pass through the maximum values among the inputs. Then move the filter a specific stride size and repeat.\n", 1132 | "\n", 1133 | "\"convolutional\n", 1134 | "Image from Karpathy (http://cs231n.github.io/convolutional-networks/)" 1135 | ] 1136 | }, 1137 | { 1138 | "cell_type": "markdown", 1139 | "metadata": {}, 1140 | "source": [ 1141 | "## Best Practices" 1142 | ] 1143 | }, 1144 | { 1145 | "cell_type": "markdown", 1146 | "metadata": {}, 1147 | "source": [ 1148 | "In practice it is best to use blocks of stacked convolutional layers with rectified linear units followed by a max pooling operation. Then at the end have a small number of fully connected layers and the output layer.\n", 1149 | "\n", 1150 | "Each convolutional layer usually has a small filter size (2, 2), (3, 3) and use max pooling with stride (2, 2)" 1151 | ] 1152 | }, 1153 | { 1154 | "cell_type": "markdown", 1155 | "metadata": {}, 1156 | "source": [ 1157 | "# Convolutional Neural Networks in Keras" 1158 | ] 1159 | }, 1160 | { 1161 | "cell_type": "code", 1162 | "execution_count": 6, 1163 | "metadata": { 1164 | "collapsed": true 1165 | }, 1166 | "outputs": [], 1167 | "source": [ 1168 | "from keras.layers import Conv2D, MaxPooling2D" 1169 | ] 1170 | }, 1171 | { 1172 | "cell_type": "markdown", 1173 | "metadata": {}, 1174 | "source": [ 1175 | "### Convolutional layer" 1176 | ] 1177 | }, 1178 | { 1179 | "cell_type": "code", 1180 | "execution_count": null, 1181 | "metadata": { 1182 | "collapsed": true 1183 | }, 1184 | "outputs": [], 1185 | "source": [ 1186 | "Conv2D(filters, kernel_size, strides=(1, 1), padding='valid', \n", 1187 | " activation=None, use_bias=True, \n", 1188 | " kernel_initializer='glorot_uniform', bias_initializer='zeros', \n", 1189 | " kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, \n", 1190 | " kernel_constraint=None, bias_constraint=None)" 1191 | ] 1192 | }, 1193 | { 1194 | "cell_type": "code", 1195 | "execution_count": null, 1196 | "metadata": { 1197 | "collapsed": true 1198 | }, 1199 | "outputs": [], 1200 | "source": [ 1201 | "Conv2D(32, (3, 3), padding='same') # Keeps output of the same size as input\n", 1202 | "Conv2D(32, (3, 3)) # Reduces the size of the output" 1203 | ] 1204 | }, 1205 | { 1206 | "cell_type": "markdown", 1207 | "metadata": {}, 1208 | "source": [ 1209 | "Arguments\n", 1210 | "\n", 1211 | "- filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution).\n", 1212 | "- kernel_size: An integer or tuple/list of 2 integers, specifying the width and height of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions.\n", 1213 | "- strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the width and height. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any dilation_rate value != 1.\n", 1214 | "- padding: one of \"valid\" or \"same\" (case-insensitive).\n", 1215 | "- activation: Activation function to use (see activations). If you don't specify anything, no activation is applied (ie. \"linear\" activation: a(x) = x).\n", 1216 | "- use_bias: Boolean, whether the layer uses a bias vector.\n", 1217 | "- kernel_initializer: Initializer for the kernel weights matrix (see initializers).\n", 1218 | "- bias_initializer: Initializer for the bias vector (see initializers).\n", 1219 | "- kernel_regularizer: Regularizer function applied to the kernel weights matrix (see regularizer).\n", 1220 | "- bias_regularizer: Regularizer function applied to the bias vector (see regularizer).\n", 1221 | "- activity_regularizer: Regularizer function applied to the output of the layer (its \"activation\"). (see regularizer).\n", 1222 | "- kernel_constraint: Constraint function applied to the kernel matrix (see constraints).\n", 1223 | "- bias_constraint: Constraint function applied to the bias vector (see constraints).\n", 1224 | "\n", 1225 | "Input shape\n", 1226 | "\n", 1227 | "4D tensor with shape: (samples, channels, rows, cols) if data_format='channels_first' or 4D tensor with shape: (samples, rows, cols, channels) if data_format='channels_last'.\n", 1228 | "\n", 1229 | "Output shape\n", 1230 | "\n", 1231 | "4D tensor with shape: (samples, filters, new_rows, new_cols) if data_format='channels_first' or 4D tensor with shape: (samples, new_rows, new_cols, filters) if data_format='channels_last'. rows and cols values might have changed due to padding." 1232 | ] 1233 | }, 1234 | { 1235 | "cell_type": "markdown", 1236 | "metadata": { 1237 | "collapsed": true 1238 | }, 1239 | "source": [ 1240 | "### Pooling layer" 1241 | ] 1242 | }, 1243 | { 1244 | "cell_type": "code", 1245 | "execution_count": null, 1246 | "metadata": { 1247 | "collapsed": true 1248 | }, 1249 | "outputs": [], 1250 | "source": [ 1251 | "MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid') # This will reduce the size of the output" 1252 | ] 1253 | }, 1254 | { 1255 | "cell_type": "markdown", 1256 | "metadata": {}, 1257 | "source": [ 1258 | "Arguments\n", 1259 | "\n", 1260 | "- pool_size: integer or tuple of 2 integers, factors by which to downscale (vertical, horizontal). (2, 2) will halve the input in both spatial dimension. If only one integer is specified, the same window length will be used for both dimensions.\n", 1261 | "- strides: Integer, tuple of 2 integers, or None. Strides values. If None, it will default to pool_size.\n", 1262 | "- padding: One of \"valid\" or \"same\" (case-insensitive).\n", 1263 | "\n", 1264 | "Input shape\n", 1265 | "\n", 1266 | "4D tensor with shape: (batch_size, rows, cols, channels)\n", 1267 | "\n", 1268 | "Output shape\n", 1269 | "\n", 1270 | "4D tensor with shape: (batch_size, pooled_rows, pooled_cols, channels)" 1271 | ] 1272 | }, 1273 | { 1274 | "cell_type": "markdown", 1275 | "metadata": {}, 1276 | "source": [ 1277 | "# Mini-Project: Train you own convolutional neural network to classify handwritten digits (With or without augmented data)\n", 1278 | "\n", 1279 | "- Train a logistic regression to create a benchmark\n", 1280 | "- Train a convolutional neural network and compare it with the logistic regression and fully connected neural network. Start with a few layers, then experiment with more layers and different parameters.\n", 1281 | "- Are you overfitting? Underfitting? How can you improve your model? Try other hyperparameters or adding regularization. Make some plots to understand the behaviour of your model." 1282 | ] 1283 | }, 1284 | { 1285 | "cell_type": "markdown", 1286 | "metadata": {}, 1287 | "source": [ 1288 | "## Convolutional neural network" 1289 | ] 1290 | }, 1291 | { 1292 | "cell_type": "code", 1293 | "execution_count": 8, 1294 | "metadata": { 1295 | "collapsed": true 1296 | }, 1297 | "outputs": [], 1298 | "source": [ 1299 | "from keras.layers import Conv2D, MaxPooling2D, Activation\n", 1300 | "# Define model architecture\n", 1301 | "model = Sequential()\n", 1302 | "model.add(Conv2D(32, (3, 3), padding='same', input_shape=(28, 28, 1), activation='relu'))\n", 1303 | "model.add(Conv2D(32, (3, 3), activation='relu'))\n", 1304 | "model.add(MaxPooling2D(pool_size=(2, 2)))\n", 1305 | "model.add(Dropout(0.25))\n", 1306 | "\n", 1307 | "model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))\n", 1308 | "model.add(Conv2D(64, (3, 3), activation='relu'))\n", 1309 | "model.add(MaxPooling2D(pool_size=(2, 2)))\n", 1310 | "model.add(Dropout(0.25))\n", 1311 | "\n", 1312 | "model.add(Flatten())\n", 1313 | "model.add(Dense(512, activation='relu'))\n", 1314 | "model.add(Dropout(0.5))\n", 1315 | "model.add(Dense(10, activation='softmax'))\n", 1316 | "\n", 1317 | "# Compile model\n", 1318 | "model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])" 1319 | ] 1320 | }, 1321 | { 1322 | "cell_type": "code", 1323 | "execution_count": 9, 1324 | "metadata": {}, 1325 | "outputs": [ 1326 | { 1327 | "name": "stdout", 1328 | "output_type": "stream", 1329 | "text": [ 1330 | "Epoch 1/10\n", 1331 | " - 158s - loss: 0.4987 - acc: 0.8372 - val_loss: 0.3219 - val_acc: 0.9799\n", 1332 | "Epoch 2/10\n", 1333 | " - 156s - loss: 0.1578 - acc: 0.9515 - val_loss: 0.2249 - val_acc: 0.9859\n", 1334 | "Epoch 3/10\n", 1335 | " - 156s - loss: 0.1122 - acc: 0.9653 - val_loss: 0.1746 - val_acc: 0.9890\n", 1336 | "Epoch 4/10\n", 1337 | " - 156s - loss: 0.0967 - acc: 0.9705 - val_loss: 0.1823 - val_acc: 0.9886\n", 1338 | "Epoch 5/10\n", 1339 | " - 170s - loss: 0.0834 - acc: 0.9739 - val_loss: 0.1648 - val_acc: 0.9897\n", 1340 | "Epoch 6/10\n", 1341 | " - 161s - loss: 0.0759 - acc: 0.9765 - val_loss: 0.1394 - val_acc: 0.9913\n", 1342 | "Epoch 7/10\n", 1343 | " - 155s - loss: 0.0692 - acc: 0.9783 - val_loss: 0.1536 - val_acc: 0.9904\n", 1344 | "Epoch 8/10\n", 1345 | " - 155s - loss: 0.0662 - acc: 0.9793 - val_loss: 0.1256 - val_acc: 0.9920\n", 1346 | "Epoch 9/10\n", 1347 | " - 155s - loss: 0.0613 - acc: 0.9815 - val_loss: 0.1355 - val_acc: 0.9916\n", 1348 | "Epoch 10/10\n", 1349 | " - 155s - loss: 0.0590 - acc: 0.9822 - val_loss: 0.1201 - val_acc: 0.9925\n" 1350 | ] 1351 | } 1352 | ], 1353 | "source": [ 1354 | "# Train the model\n", 1355 | "cnn_history = model.fit_generator(data_generator.flow(x_train[0:40000], y_train[0:40000], batch_size=100), \n", 1356 | " steps_per_epoch=40000/100,\n", 1357 | " validation_data=(x_train[40000:], y_train[40000:]), \n", 1358 | " epochs=10, verbose=2)" 1359 | ] 1360 | }, 1361 | { 1362 | "cell_type": "markdown", 1363 | "metadata": {}, 1364 | "source": [ 1365 | "### Visualize training" 1366 | ] 1367 | }, 1368 | { 1369 | "cell_type": "code", 1370 | "execution_count": null, 1371 | "metadata": { 1372 | "collapsed": true 1373 | }, 1374 | "outputs": [], 1375 | "source": [ 1376 | "log_train_loss = log_history.history['loss']\n", 1377 | "log_valid_loss = log_history.history['val_loss']\n", 1378 | "nn_train_loss = nn_history.history['loss']\n", 1379 | "nn_valid_loss = nn_history.history['val_loss']\n", 1380 | "cnn_train_loss = cnn_history.history['loss']\n", 1381 | "cnn_valid_loss = cnn_history.history['val_loss']\n", 1382 | "epochs = list(range(1, len(log_train_loss)+1))\n", 1383 | "\n", 1384 | "plt.plot(epochs, log_train_loss, label=\"log reg train loss\")\n", 1385 | "plt.plot(epochs, log_valid_loss, label=\"log reg validation loss\")\n", 1386 | "plt.plot(epochs, nn_train_loss, label=\"NN train loss\")\n", 1387 | "plt.plot(epochs, nn_valid_loss, label=\"NN validation loss\")\n", 1388 | "plt.plot(epochs, cnn_train_loss, label=\"CNN train loss\")\n", 1389 | "plt.plot(epochs, cnn_valid_loss, label=\"CNN validation loss\")\n", 1390 | "plt.xlabel('Epochs')\n", 1391 | "plt.ylabel('Loss')\n", 1392 | "plt.legend(loc='upper right')\n", 1393 | "plt.show()\n", 1394 | "\n", 1395 | "log_train_accuracy = log_history.history['acc']\n", 1396 | "log_valid_accuracy = log_history.history['val_acc']\n", 1397 | "nn_train_accuracy = nn_history.history['acc']\n", 1398 | "nn_valid_accuracy = nn_history.history['val_acc']\n", 1399 | "cnn_train_accuracy = cnn_history.history['acc']\n", 1400 | "cnn_valid_accuracy = cnn_history.history['val_acc']\n", 1401 | "epochs = list(range(1, len(log_train_accuracy)+1))\n", 1402 | "\n", 1403 | "plt.plot(epochs, log_train_accuracy, label=\"log reg train accuracy\")\n", 1404 | "plt.plot(epochs, log_valid_accuracy, label=\"log reg validation accuracy\")\n", 1405 | "plt.plot(epochs, nn_train_accuracy, label=\"NN train accuracy\")\n", 1406 | "plt.plot(epochs, nn_valid_accuracy, label=\"NN validation accuracy\")\n", 1407 | "plt.plot(epochs, cnn_train_accuracy, label=\"CNN train accuracy\")\n", 1408 | "plt.plot(epochs, cnn_valid_accuracy, label=\"CNN validation accuracy\")\n", 1409 | "plt.xlabel('Epochs')\n", 1410 | "plt.ylabel('Accuracy')\n", 1411 | "plt.legend(loc='lower right')\n", 1412 | "plt.show()" 1413 | ] 1414 | }, 1415 | { 1416 | "cell_type": "code", 1417 | "execution_count": null, 1418 | "metadata": { 1419 | "collapsed": true 1420 | }, 1421 | "outputs": [], 1422 | "source": [] 1423 | } 1424 | ], 1425 | "metadata": { 1426 | "anaconda-cloud": {}, 1427 | "kernelspec": { 1428 | "display_name": "Python [py3Keras2_env]", 1429 | "language": "python", 1430 | "name": "Python [py3Keras2_env]" 1431 | }, 1432 | "language_info": { 1433 | "codemirror_mode": { 1434 | "name": "ipython", 1435 | "version": 3 1436 | }, 1437 | "file_extension": ".py", 1438 | "mimetype": "text/x-python", 1439 | "name": "python", 1440 | "nbconvert_exporter": "python", 1441 | "pygments_lexer": "ipython3", 1442 | "version": "3.6.0" 1443 | } 1444 | }, 1445 | "nbformat": 4, 1446 | "nbformat_minor": 1 1447 | } 1448 | -------------------------------------------------------------------------------- /Neural_networks_with_keras_solved.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 3, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "from __future__ import division, print_function, generators\n", 10 | "\n", 11 | "import tensorflow as tf\n", 12 | "import keras\n", 13 | "\n", 14 | "import numpy as np\n", 15 | "import keras.datasets.mnist as mnist\n", 16 | "import keras.datasets.cifar10 as cifar10\n", 17 | "import matplotlib.pyplot as plt\n", 18 | "%matplotlib inline " 19 | ] 20 | }, 21 | { 22 | "cell_type": "markdown", 23 | "metadata": {}, 24 | "source": [ 25 | "## Schedule\n", 26 | "\n", 27 | "- What is MNIST? What is CIFAR? (10 minutes)\n", 28 | "\n", 29 | "- Loading images and converting them to numpy (5 minutes)\n", 30 | "\n", 31 | "- What is Keras? Creating a basic model, Callbacks, saving/loading a model, changing optimizer, loss (30 minutes)\n", 32 | "\n", 33 | "- Mini-Project: Train a feed forward NN on MNIST/CIFAR10 with Keras (No scaling) (15 minutes)\n", 34 | "\n", 35 | "- Preprocessing images: Scaling (5 minutes)\n", 36 | "\n", 37 | "- Mini-Project: Train a feed forward NN on MNIST/CIFAR10 with Keras (10 minutes)\n", 38 | "\n", 39 | "- Preprocessing images: Data augmentation and generators with Keras (10 minutes)\n", 40 | "\n", 41 | "- Mini-Project: Train a feed forward NN on MNIST/CIFAR10 using data augmentation with Keras (15 minutes)\n", 42 | "\n", 43 | "- Break (20 minutes)\n", 44 | "\n", 45 | "- Convolutional NN, Pooling, Flattening, etc (20 minutes)\n", 46 | "\n", 47 | "- How to build a CNN in Keras (10 minutes)\n", 48 | "\n", 49 | "- Mini-Project: Train CNN on MNIST/CIFAR10 (30 minutes)" 50 | ] 51 | }, 52 | { 53 | "cell_type": "markdown", 54 | "metadata": {}, 55 | "source": [ 56 | "## What is CIFAR10?\n", 57 | "\n", 58 | "More information at\n", 59 | "https://www.cs.toronto.edu/~kriz/cifar.html" 60 | ] 61 | }, 62 | { 63 | "cell_type": "code", 64 | "execution_count": null, 65 | "metadata": { 66 | "collapsed": true 67 | }, 68 | "outputs": [], 69 | "source": [ 70 | "# load dataset\n", 71 | "(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n", 72 | "\n", 73 | "# This is needed to know what each class means\n", 74 | "label_id_to_class_name = {0:'airplane', 1:'automovile', 2:'bird', 3:'cat', 4:'deer', 5:'dog', \n", 75 | " 6:'frog', 7:'horse', 8:'ship', 9:'truck'}\n", 76 | "\n", 77 | "print(x_train.shape[0], 'Number of train samples')\n", 78 | "print(x_test.shape[0], 'Number of test samples')\n", 79 | "print('x_train shape:', x_train.shape)\n", 80 | "\n", 81 | "# plot images\n", 82 | "for image_id in range(0, 5):\n", 83 | " plt.imshow(x_train[image_id])\n", 84 | " plt.title(\"The true label is %s\" % label_id_to_class_name[int(y_train[image_id])])\n", 85 | " plt.show()\n", 86 | " \n", 87 | "# This makes sure the image has the correct order in the axis for Tensorflow, it would be different for Theano backend\n", 88 | "x_train = x_train.reshape(x_train.shape[0], 32, 32, 3)\n", 89 | "x_test = x_test.reshape(x_test.shape[0], 32, 32, 3)\n", 90 | "\n", 91 | "# Convert values to floats, originally they are integers\n", 92 | "x_train = x_train.astype('float32')\n", 93 | "x_test = x_test.astype('float32')\n", 94 | "\n", 95 | "# Convert values of labels from 0 to 9 to categorical (one_hot encoding)\n", 96 | "y_train = keras.utils.to_categorical(y_train, 10)\n", 97 | "y_test = keras.utils.to_categorical(y_test, 10)" 98 | ] 99 | }, 100 | { 101 | "cell_type": "markdown", 102 | "metadata": {}, 103 | "source": [ 104 | "## What is MNIST?\n", 105 | "\n", 106 | "More information at http://yann.lecun.com/exdb/mnist/" 107 | ] 108 | }, 109 | { 110 | "cell_type": "code", 111 | "execution_count": 5, 112 | "metadata": { 113 | "scrolled": true 114 | }, 115 | "outputs": [ 116 | { 117 | "name": "stdout", 118 | "output_type": "stream", 119 | "text": [ 120 | "60000 Number of train samples\n", 121 | "10000 Number of test samples\n", 122 | "x_train shape: (60000, 28, 28)\n" 123 | ] 124 | }, 125 | { 126 | "data": { 127 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAP8AAAEICAYAAACQ6CLfAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAEg5JREFUeJzt3X2QXXV9x/H3xyC1hECIaIgxEIM0DjAYpyE4TqbI0PBM\nIco4RplioURbYnHGUpk41mAbhyrQIQOjCQVMrEaYIiUwjiCP0WFMWSFACCKIYUhcEmlYSMKTyX77\nxz2hN8ve3717n87N/j6vmZ2993zPwzc3+9lz7jn37E8RgZnl5x1lN2Bm5XD4zTLl8JtlyuE3y5TD\nb5Yph98sUw5/F0haJOk/y+6jWZJC0gcbmG9qMe8+TWwjuaykJyR9fKTrtdoc/jaQtL3qa1DSa1XP\nP9vmbX1P0r+2c517g4g4KiLuH8kyVb9Qqv9/vtahFvc6I/4NbW8XEfvvfixpA/C3EXF31bRF3epF\n0j4RsbNb29tLjPdr8nbe83fPvpJWSNpWHMLO3F2Q9D5Jt0j6g6TfSfqH4VYgaT7wWeCfir3Y7cX0\nDZK+IukxYIekfYYeqg89YpB0hqS1kgYkPSjpmEb+EZJOl/SIpFckPV/jF9v5kn4vqV/SP1Yt+w5J\nl0r6raT/lXSzpAkNbneDpL8sHs+S1Ff0sFnSVY2sw/bk8HfPXwE/AsYDq4BroBII4HbgUWAycCLw\nJUknD11BRCwDfgB8KyL2j4gzq8rzgNNpYC8n6SPADcDngXcDS4FVkv6kgX/HDuCvi3/H6cDfSTp7\nyDwnAEcAJwFf2R1a4IvA2cDxwPuAl4BrG9jmUFcDV0fEAcDhwM115n9O0kZJN0o6uIntjUoOf/f8\nIiJ+EhG7gO8DHy6mHwu8JyK+ERFvRsSzwHXAp0e4/iUR8XxEvNbAvPOBpRGxJiJ2RcRy4A3go/UW\njIj7I+LxiBiMiMeAlVTCXO2yiNgREY8DN1L5xQTwBeCrEbExIt4AFgHnNHGC8I/AByUdHBHbI+KX\nNeZ7kcrrexjw58A4Kr88DYe/m16oevwq8K7ih/4w4H3F4feApAFgITBxhOt/fgTzHgZ8ecg2p1DZ\nGydJOk7SfcVblJepBHro3rS6l+eq1nsYcGvVNp8EdjHyf+sFwJ8Bv5b0kKQzhpup+MXQFxE7I2Iz\nsAA4SdK4EW5vVHL4y/c88LuIGF/1NS4iTqsxf63bMIdOfxXYr+r5IUO2uXjINveLiJUN9PtDKm9b\npkTEgcB3AQ2ZZ0rV40OB31dt99Qh231XRGxqYLtviYinI2Ie8F7g34D/kjS2kUWL7/65xy9CL/gf\nYFtxwu5PJY2RdLSkY2vMvxmY1sB61wKfKdZ3Cnseml8HfKHYi0vS2OJEXiN7xHHA1oh4XdIs4DPD\nzPM1SftJOgr4G+CmYvp3gcWSDgOQ9B5JZzWwzT1IOlfSeyJiEBgoJg8OM99xkqYXJxrfDSwB7o+I\nl0e6zdHI4S9ZcQ7gDGAG8Dsq71P/AziwxiLXA0cWh87/nVj1xcCZVMLxWeCteSOiD7iQyknHl4Bn\ngM812PLfA9+QtA34Z4Y/2fZAsc57gCsi4q5i+tVUjhruKpb/JXBcg9utdgrwhKTtxTo/XeNcxzTg\np8A2YB2V8xrzhpkvS/If8zDLk/f8Zply+M0y5fCbZcrhN8tUV2/skeSzi2YdFhFDP3cxrJb2/JJO\nkfSUpGckXdrKusysu5q+1CdpDPAbYA6wEXgImBcR6xPLeM9v1mHd2PPPAp6JiGcj4k0qd6yN+NNa\nZlaOVsI/mT1v4NhYTNuDpPnFvdd9LWzLzNqs4yf8invQl4EP+816SSt7/k3seffW+4tpZrYXaCX8\nDwFHSPqApH2p/PGJVe1py8w6renD/ojYKWkBcCcwBrghIp5oW2dm1lFdvavP7/nNOq8rH/Ixs72X\nw2+WKYffLFMOv1mmHH6zTDn8Zply+M0y5fCbZcrhN8uUw2+WKYffLFMOv1mmHH6zTDn8Zply+M0y\n5fCbZcrhN8uUw2+WKYffLFMOv1mmHH6zTDn8Zply+M0y5fCbZcrhN8uUw2+WKYffLFMOv1mmHH6z\nTDU9RLftHcaMGZOsH3jggR3d/oIFC2rW9ttvv+Sy06dPT9YvuuiiZP2KK66oWZs3b15y2ddffz1Z\nv/zyy5P1yy67LFnvBS2FX9IGYBuwC9gZETPb0ZSZdV479vwnRMSLbViPmXWR3/ObZarV8Adwt6Rf\nSZo/3AyS5kvqk9TX4rbMrI1aPeyfHRGbJL0X+JmkX0fE6uoZImIZsAxAUrS4PTNrk5b2/BGxqfi+\nBbgVmNWOpsys85oOv6SxksbtfgycBKxrV2Nm1lmtHPZPBG6VtHs9P4yIn7alq1Hm0EMPTdb33Xff\nZP1jH/tYsj579uyatfHjxyeX/eQnP5msl2njxo3J+pIlS5L1uXPn1qxt27Ytueyjjz6arD/wwAPJ\n+t6g6fBHxLPAh9vYi5l1kS/1mWXK4TfLlMNvlimH3yxTDr9ZphTRvQ/djdZP+M2YMSNZv/fee5P1\nTt9W26sGBweT9fPPPz9Z3759e9Pb7u/vT9ZfeumlZP2pp55qetudFhFqZD7v+c0y5fCbZcrhN8uU\nw2+WKYffLFMOv1mmHH6zTPk6fxtMmDAhWV+zZk2yPm3atHa201b1eh8YGEjWTzjhhJq1N998M7ls\nrp9/aJWv85tZksNvlimH3yxTDr9Zphx+s0w5/GaZcvjNMuUhuttg69atyfoll1ySrJ9xxhnJ+iOP\nPJKs1/sT1ilr165N1ufMmZOs79ixI1k/6qijatYuvvji5LLWWd7zm2XK4TfLlMNvlimH3yxTDr9Z\nphx+s0w5/GaZ8v38PeCAAw5I1usNJ7106dKatQsuuCC57Lnnnpusr1y5Mlm33tO2+/kl3SBpi6R1\nVdMmSPqZpKeL7we10qyZdV8jh/3fA04ZMu1S4J6IOAK4p3huZnuRuuGPiNXA0M+vngUsLx4vB85u\nc19m1mHNfrZ/YkTsHuzsBWBirRklzQfmN7kdM+uQlm/siYhInciLiGXAMvAJP7Ne0uylvs2SJgEU\n37e0ryUz64Zmw78KOK94fB5wW3vaMbNuqXvYL2kl8HHgYEkbga8DlwM3S7oAeA74VCebHO1eeeWV\nlpZ/+eWXm172wgsvTNZvuummZH1wcLDpbVu56oY/IubVKJ3Y5l7MrIv88V6zTDn8Zply+M0y5fCb\nZcrhN8uUb+kdBcaOHVuzdvvttyeXPf7445P1U089NVm/6667knXrPg/RbWZJDr9Zphx+s0w5/GaZ\ncvjNMuXwm2XK4TfLlK/zj3KHH354sv7www8n6wMDA8n6fffdl6z39fXVrF177bXJZbv5szma+Dq/\nmSU5/GaZcvjNMuXwm2XK4TfLlMNvlimH3yxTvs6fublz5ybrN954Y7I+bty4pre9cOHCZH3FihXJ\nen9/f7KeK1/nN7Mkh98sUw6/WaYcfrNMOfxmmXL4zTLl8Jtlytf5Lenoo49O1q+66qpk/cQTmx/M\neenSpcn64sWLk/VNmzY1ve29Wduu80u6QdIWSeuqpi2StEnS2uLrtFaaNbPua+Sw/3vAKcNM//eI\nmFF8/aS9bZlZp9UNf0SsBrZ2oRcz66JWTvh9UdJjxduCg2rNJGm+pD5Jtf+Ym5l1XbPh/w4wDZgB\n9ANX1poxIpZFxMyImNnktsysA5oKf0RsjohdETEIXAfMam9bZtZpTYVf0qSqp3OBdbXmNbPeVPc6\nv6SVwMeBg4HNwNeL5zOAADYAn4+IujdX+zr/6DN+/Phk/cwzz6xZq/e3AqT05ep77703WZ8zZ06y\nPlo1ep1/nwZWNG+YydePuCMz6yn+eK9Zphx+s0w5/GaZcvjNMuXwm2XKt/Raad54441kfZ990hej\ndu7cmayffPLJNWv3339/ctm9mf90t5klOfxmmXL4zTLl8JtlyuE3y5TDb5Yph98sU3Xv6rO8HXPM\nMcn6Oeeck6wfe+yxNWv1ruPXs379+mR99erVLa1/tPOe3yxTDr9Zphx+s0w5/GaZcvjNMuXwm2XK\n4TfLlK/zj3LTp09P1hcsWJCsf+ITn0jWDznkkBH31Khdu3Yl6/396b8WPzg42M52Rh3v+c0y5fCb\nZcrhN8uUw2+WKYffLFMOv1mmHH6zTNW9zi9pCrACmEhlSO5lEXG1pAnATcBUKsN0fyoiXupcq/mq\ndy193rzhBlKuqHcdf+rUqc201BZ9fX3J+uLFi5P1VatWtbOd7DSy598JfDkijgQ+Clwk6UjgUuCe\niDgCuKd4bmZ7ibrhj4j+iHi4eLwNeBKYDJwFLC9mWw6c3akmzaz9RvSeX9JU4CPAGmBiROz+fOUL\nVN4WmNleouHP9kvaH7gF+FJEvCL9/3BgERG1xuGTNB+Y32qjZtZeDe35Jb2TSvB/EBE/LiZvljSp\nqE8Ctgy3bEQsi4iZETGzHQ2bWXvUDb8qu/jrgScj4qqq0irgvOLxecBt7W/PzDql7hDdkmYDPwce\nB3bfI7mQyvv+m4FDgeeoXOrbWmddWQ7RPXFi+nTIkUcemaxfc801yfqHPvShEffULmvWrEnWv/3t\nb9es3XZben/hW3Kb0+gQ3XXf80fEL4BaKztxJE2ZWe/wJ/zMMuXwm2XK4TfLlMNvlimH3yxTDr9Z\npvynuxs0YcKEmrWlS5cml50xY0ayPm3atKZ6aocHH3wwWb/yyiuT9TvvvDNZf+2110bck3WH9/xm\nmXL4zTLl8JtlyuE3y5TDb5Yph98sUw6/Waayuc5/3HHHJeuXXHJJsj5r1qyatcmTJzfVU7u8+uqr\nNWtLlixJLvvNb34zWd+xY0dTPVnv857fLFMOv1mmHH6zTDn8Zply+M0y5fCbZcrhN8tUNtf5586d\n21K9FevXr0/W77jjjmR9586dyXrqnvuBgYHkspYv7/nNMuXwm2XK4TfLlMNvlimH3yxTDr9Zphx+\ns0wpItIzSFOAFcBEIIBlEXG1pEXAhcAfilkXRsRP6qwrvTEza1lEqJH5Ggn/JGBSRDwsaRzwK+Bs\n4FPA9oi4otGmHH6zzms0/HU/4RcR/UB/8XibpCeBcv90jZm1bETv+SVNBT4CrCkmfVHSY5JukHRQ\njWXmS+qT1NdSp2bWVnUP+9+aUdofeABYHBE/ljQReJHKeYB/ofLW4Pw66/Bhv1mHte09P4CkdwJ3\nAHdGxFXD1KcCd0TE0XXW4/CbdVij4a972C9JwPXAk9XBL04E7jYXWDfSJs2sPI2c7Z8N/Bx4HBgs\nJi8E5gEzqBz2bwA+X5wcTK3Le36zDmvrYX+7OPxmnde2w34zG50cfrNMOfxmmXL4zTLl8JtlyuE3\ny5TDb5Yph98sUw6/WaYcfrNMOfxmmXL4zTLl8JtlyuE3y1S3h+h+EXiu6vnBxbRe1Ku99Wpf4N6a\n1c7eDmt0xq7ez/+2jUt9ETGztAYSerW3Xu0L3FuzyurNh/1mmXL4zTJVdviXlbz9lF7trVf7AvfW\nrFJ6K/U9v5mVp+w9v5mVxOE3y1Qp4Zd0iqSnJD0j6dIyeqhF0gZJj0taW/b4gsUYiFskrauaNkHS\nzyQ9XXwfdozEknpbJGlT8dqtlXRaSb1NkXSfpPWSnpB0cTG91Ncu0Vcpr1vX3/NLGgP8BpgDbAQe\nAuZFxPquNlKDpA3AzIgo/QMhkv4C2A6s2D0UmqRvAVsj4vLiF+dBEfGVHultESMctr1DvdUaVv5z\nlPjatXO4+3YoY88/C3gmIp6NiDeBHwFnldBHz4uI1cDWIZPPApYXj5dT+eHpuhq99YSI6I+Ih4vH\n24Ddw8qX+tol+ipFGeGfDDxf9XwjJb4Awwjgbkm/kjS/7GaGMbFqWLQXgIllNjOMusO2d9OQYeV7\n5rVrZrj7dvMJv7ebHREzgFOBi4rD254UlfdsvXSt9jvANCpjOPYDV5bZTDGs/C3AlyLilepama/d\nMH2V8rqVEf5NwJSq5+8vpvWEiNhUfN8C3ErlbUov2bx7hOTi+5aS+3lLRGyOiF0RMQhcR4mvXTGs\n/C3ADyLix8Xk0l+74foq63UrI/wPAUdI+oCkfYFPA6tK6ONtJI0tTsQgaSxwEr039Pgq4Lzi8XnA\nbSX2sodeGba91rDylPza9dxw9xHR9S/gNCpn/H8LfLWMHmr0NQ14tPh6ouzegJVUDgP/SOXcyAXA\nu4F7gKeBu4EJPdTb96kM5f4YlaBNKqm32VQO6R8D1hZfp5X92iX6KuV188d7zTLlE35mmXL4zTLl\n8JtlyuE3y5TDb5Yph98sUw6/Wab+D/SyD4qiDGDaAAAAAElFTkSuQmCC\n", 128 | "text/plain": [ 129 | "" 130 | ] 131 | }, 132 | "metadata": {}, 133 | "output_type": "display_data" 134 | }, 135 | { 136 | "data": { 137 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAP8AAAEICAYAAACQ6CLfAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAEk1JREFUeJzt3XuwXXV5xvHvA0oEQiIXDZFwEQ3TUQcOEmL+YEqUSjFg\nE0tBIjVYnQZbsehYK2qA1MJYGaATbEUCpAaJSYCABEeLShB0qpYjRC6CEjFAwiEh3HICCpK8/WOv\n6M7x7N/eZ9/Wzvk9n5k9Z5/1rst7NnlY970UEZhZfnYpuwEzK4fDb5Yph98sUw6/WaYcfrNMOfxm\nmXL4u0DSfEnXlt1HsySFpDc3MN4hxbivamIZyWklPSBp+kjna7U5/G0gaUvVa5uk31b9fnqbl/U1\nSRe0c547g4h4a0T8YKTTSTpO0kOSXpR0u6SDO9DeTsnhb4OIGLv9BTwGvLdq2JJu9tLMWne0krQf\ncCNwLrAP0A8sL7WpHuLwd89ukq6RNFhswk7ZXpD0BkkrJD0l6TeS/mm4GUiaC5wO/EuxVXFLMXyt\npM9Iuhd4QdKrhm6qD91ikHSSpNWSnpP0v5IOb+SPkHSipHskbZb0uKT5w4z2YUlPSBqQ9M9V0+4i\n6RxJv5b0tKTrJO3T4HLXSvqL4v1USf1FDxskXVpjsr8GHoiI6yPid8B84AhJf9bIMkc7h797/gpY\nBrwWWAn8J1QCAdwC/Bw4ADgO+ISkvxw6g4hYCCwBLiq2Kt5bVZ4NnAi8NiJeSTUi6UhgEXAmsC9w\nBbBS0pgG/o4XgDnF33Ei8A+SZg0Z553AZOB44DPbQwt8HJgFHAu8AXgW+K8GljnUAmBBRIwD3gRc\nV2O8t1L5XAGIiBeANcXw7Dn83fOjiPh2RGwFvg4cUQw/GnhdRHwhIl6OiEeAK4HTRjj/yyLi8Yj4\nbQPjzgWuiIifRsTWiFgMvARMqzdhRPwgIu6LiG0RcS+wlEqYq/1rRLwQEfcB/03lf0wAHwU+HxHr\nIuIlKmviv2liV+X3wJsl7RcRWyLiJzXGGws8P2TYZmCvES5vVHL4u+fJqvcvAq8p/tEfDLyh2Px+\nTtJzwOeACSOc/+MjGPdg4FNDlnkglbVxkqR3FAfOnpL0PJVA75fo5dGq+R4M3FS1zAeBrYz8b/0I\ncBjwkKS7JJ1UY7wtwLghw8YDgyNc3qjk8JfvceA3EfHaqtdeETGjxvi1bsMcOvxFYI+q3/cfsswL\nhyxzj4hY2kC/36Cy23JgRIwHvgpoyDgHVr0/CHiiarnvGbLc10TE+gaW+wcR8XBEzAZeD3wJuEHS\nnsOM+gB/3MKiGOdNxfDsOfzl+z9gsDhgt7ukXSW9TdLRNcbfABzawHxXAx8o5ncCO26aXwl8tFiL\nS9KexYG8RjaH9wKeiYjfSZoKfGCYcc6VtIektwJ/xx+PsH8VuHD76TZJr5M0s4Fl7kDS30p6XURs\nA54rBm8bZtSbgLdJOlnSa4DzgZ9HxEMjXeZo5PCXrDgGcBLQB/wG2ARcRWXzdDhXA28pNp2/mZj1\n2cB7qYTjdOAP40ZEP/D3VA46PkvlINiHGmz5H4EvSBoEzmP4g213FPO8Dbg4Ir5bDF9AZavhu8X0\nPwHe0eByq50APCBpSzHP04Y71hERTwEnAxdS+TunMvJjKaOW/GUeZnnymt8sUw6/WaYcfrNMOfxm\nmerqTSCSfHTRrMMiYuh1F8Nqac0v6QRJv5S0RtI5rczLzLqr6VN9knYFfgW8G1gH3AXMjohfJKbx\nmt+sw7qx5p8KrImIRyLiZSp3rI34ai0zK0cr4T+AHW/gWFcM24GkucW91/0tLMvM2qzjB/yKe9AX\ngjf7zXpJK2v+9ex499akYpiZ7QRaCf9dwGRJb5S0G5UbJla2py0z67SmN/sj4hVJZwG3ArsCiyLC\n90mb7SS6elef9/nNOq8rF/mY2c7L4TfLlMNvlimH3yxTDr9Zphx+s0w5/GaZcvjNMuXwm2XK4TfL\nlMNvlimH3yxTDr9Zphx+s0w5/GaZcvjNMuXwm2XK4TfLlMNvlimH3yxTDr9Zprr6iG4bfY466qhk\n/ayzzqpZmzNnTnLaa665Jln/8pe/nKzffffdyXruvOY3y5TDb5Yph98sUw6/WaYcfrNMOfxmmXL4\nzTLlp/RaUl9fX7K+atWqZH3cuHHtbGcHzz//fLK+7777dmzZvazRp/S2dJGPpLXAILAVeCUiprQy\nPzPrnnZc4ffOiNjUhvmYWRd5n98sU62GP4DvS/qZpLnDjSBprqR+Sf0tLsvM2qjVzf5jImK9pNcD\n35P0UETcWT1CRCwEFoIP+Jn1kpbW/BGxvvi5EbgJmNqOpsys85oOv6Q9Je21/T1wPHB/uxozs85q\nZbN/AnCTpO3z+UZE/E9burKumTo1vbG2YsWKZH38+PHJeuo6ksHBweS0L7/8crJe7zz+tGnTatbq\n3etfb9mjQdPhj4hHgCPa2IuZdZFP9ZllyuE3y5TDb5Yph98sUw6/WaZ8S+8osMcee9Ssvf3tb09O\ne+211ybrkyZNStaLU701pf591TvddtFFFyXry5YtS9ZTvc2bNy857Re/+MVkvZc1ekuv1/xmmXL4\nzTLl8JtlyuE3y5TDb5Yph98sUw6/Wab8iO5R4IorrqhZmz17dhc7GZl61yCMHTs2Wb/jjjuS9enT\np9esHX744clpc+A1v1mmHH6zTDn8Zply+M0y5fCbZcrhN8uUw2+WKZ/n3wkcddRRyfqJJ55Ys1bv\nfvt66p1Lv+WWW5L1iy++uGbtiSeeSE57zz33JOvPPvtssv6ud72rZq3Vz2U08JrfLFMOv1mmHH6z\nTDn8Zply+M0y5fCbZcrhN8uUv7e/B/T19SXrq1atStbHjRvX9LK/853vJOv1vg/g2GOPTdZT981f\nddVVyWmfeuqpZL2erVu31qy9+OKLyWnr/V31njlQprZ9b7+kRZI2Srq/atg+kr4n6eHi596tNGtm\n3dfIZv/XgBOGDDsHuC0iJgO3Fb+b2U6kbvgj4k7gmSGDZwKLi/eLgVlt7svMOqzZa/snRMRA8f5J\nYEKtESXNBeY2uRwz65CWb+yJiEgdyIuIhcBC8AE/s17S7Km+DZImAhQ/N7avJTPrhmbDvxI4o3h/\nBnBze9oxs26pe55f0lJgOrAfsAE4H/gmcB1wEPAocGpEDD0oONy8stzsP+yww5L1888/P1k/7bTT\nkvVNmzbVrA0MDNSsAVxwwQXJ+g033JCs97LUef56/+6XL1+erJ9++ulN9dQNjZ7nr7vPHxG1rvI4\nbkQdmVlP8eW9Zply+M0y5fCbZcrhN8uUw2+WKX91dxuMGTMmWU99fTXAjBkzkvXBwcFkfc6cOTVr\n/f39yWl33333ZD1XBx10UNktdJzX/GaZcvjNMuXwm2XK4TfLlMNvlimH3yxTDr9Zpnyevw2OPPLI\nZL3eefx6Zs6cmazXe4y22XC85jfLlMNvlimH3yxTDr9Zphx+s0w5/GaZcvjNMuXz/G1w6aWXJutS\n+puU652n93n85uyyS+1127Zt27rYSW/ymt8sUw6/WaYcfrNMOfxmmXL4zTLl8JtlyuE3y5TP8zfo\npJNOqlnr6+tLTlvvcdArV65sqidLS53Lr/ffZPXq1e1up+fUXfNLWiRpo6T7q4bNl7Re0uri1dq3\nVZhZ1zWy2f814IRhhv9HRPQVr2+3ty0z67S64Y+IO4FnutCLmXVRKwf8Pi7p3mK3YO9aI0maK6lf\nUvqhcWbWVc2G/3LgUKAPGAAuqTViRCyMiCkRMaXJZZlZBzQV/ojYEBFbI2IbcCUwtb1tmVmnNRV+\nSROrfn0fcH+tcc2sN9U9zy9pKTAd2E/SOuB8YLqkPiCAtcCZHeyxJ6SeY7/bbrslp924cWOyvnz5\n8qZ6Gu3GjBmTrM+fP7/pea9atSpZ/+xnP9v0vHcWdcMfEbOHGXx1B3oxsy7y5b1mmXL4zTLl8Jtl\nyuE3y5TDb5Yp39LbBS+99FKyPjAw0KVOeku9U3nz5s1L1j/96U8n6+vWratZu+SSmhelArBly5Zk\nfTTwmt8sUw6/WaYcfrNMOfxmmXL4zTLl8JtlyuE3y5TP83dBzl/Nnfpa83rn6d///vcn6zfffHOy\nfvLJJyfrufOa3yxTDr9Zphx+s0w5/GaZcvjNMuXwm2XK4TfLlM/zN0hSUzWAWbNmJetnn312Uz31\ngk9+8pPJ+rnnnluzNn78+OS0S5YsSdbnzJmTrFua1/xmmXL4zTLl8JtlyuE3y5TDb5Yph98sUw6/\nWaYaeUT3gcA1wAQqj+ReGBELJO0DLAcOofKY7lMj4tnOtVquiGiqBrD//vsn65dddlmyvmjRomT9\n6aefrlmbNm1actoPfvCDyfoRRxyRrE+aNClZf+yxx2rWbr311uS0X/nKV5J1a00ja/5XgE9FxFuA\nacDHJL0FOAe4LSImA7cVv5vZTqJu+CNiICLuLt4PAg8CBwAzgcXFaIuB9GVsZtZTRrTPL+kQ4Ejg\np8CEiNj+nKknqewWmNlOouFr+yWNBVYAn4iIzdXXs0dESBp2x1fSXGBuq42aWXs1tOaX9GoqwV8S\nETcWgzdImljUJwIbh5s2IhZGxJSImNKOhs2sPeqGX5VV/NXAgxFxaVVpJXBG8f4MIP1VqmbWU1Tv\nNJWkY4AfAvcB24rBn6Oy338dcBDwKJVTfc/UmVd6YT3slFNOqVlbunRpR5e9YcOGZH3z5s01a5Mn\nT253Ozv48Y9/nKzffvvtNWvnnXdeu9sxICLS95gX6u7zR8SPgFozO24kTZlZ7/AVfmaZcvjNMuXw\nm2XK4TfLlMNvlimH3yxTdc/zt3VhO/F5/tStq9dff31y2qOPPrqlZdf7avBW/humbgcGWLZsWbK+\nM3/t+GjV6Hl+r/nNMuXwm2XK4TfLlMNvlimH3yxTDr9Zphx+s0z5PH8bTJw4MVk/88wzk/V58+Yl\n662c51+wYEFy2ssvvzxZX7NmTbJuvcfn+c0syeE3y5TDb5Yph98sUw6/WaYcfrNMOfxmmfJ5frNR\nxuf5zSzJ4TfLlMNvlimH3yxTDr9Zphx+s0w5/GaZqht+SQdKul3SLyQ9IOnsYvh8SeslrS5eMzrf\nrpm1S92LfCRNBCZGxN2S9gJ+BswCTgW2RMTFDS/MF/mYdVyjF/m8qoEZDQADxftBSQ8CB7TWnpmV\nbUT7/JIOAY4EfloM+rikeyUtkrR3jWnmSuqX1N9Sp2bWVg1f2y9pLHAHcGFE3ChpArAJCODfqOwa\nfLjOPLzZb9ZhjW72NxR+Sa8GvgXcGhGXDlM/BPhWRLytznwcfrMOa9uNPap8dezVwIPVwS8OBG73\nPuD+kTZpZuVp5Gj/McAPgfuAbcXgzwGzgT4qm/1rgTOLg4OpeXnNb9Zhbd3sbxeH36zzfD+/mSU5\n/GaZcvjNMuXwm2XK4TfLlMNvlimH3yxTDr9Zphx+s0w5/GaZcvjNMuXwm2XK4TfLlMNvlqm6X+DZ\nZpuAR6t+368Y1ot6tbde7QvcW7Pa2dvBjY7Y1fv5/2ThUn9ETCmtgYRe7a1X+wL31qyyevNmv1mm\nHH6zTJUd/oUlLz+lV3vr1b7AvTWrlN5K3ec3s/KUveY3s5I4/GaZKiX8kk6Q9EtJaySdU0YPtUha\nK+m+4rHjpT5fsHgG4kZJ91cN20fS9yQ9XPwc9hmJJfXWE49tTzxWvtTPrtced9/1fX5JuwK/At4N\nrAPuAmZHxC+62kgNktYCUyKi9AtCJP05sAW4Zvuj0CRdBDwTEf9e/I9z74j4TI/0Np8RPra9Q73V\neqz8hyjxs2vn4+7boYw1/1RgTUQ8EhEvA8uAmSX00fMi4k7gmSGDZwKLi/eLqfzj6boavfWEiBiI\niLuL94PA9sfKl/rZJfoqRRnhPwB4vOr3dZT4AQwjgO9L+pmkuWU3M4wJVY9FexKYUGYzw6j72PZu\nGvJY+Z757Jp53H27+YDfnzomIvqA9wAfKzZve1JU9tl66Vzt5cChVJ7hOABcUmYzxWPlVwCfiIjN\n1bUyP7th+irlcysj/OuBA6t+n1QM6wkRsb74uRG4icpuSi/ZsP0JycXPjSX38wcRsSEitkbENuBK\nSvzsisfKrwCWRMSNxeDSP7vh+irrcysj/HcBkyW9UdJuwGnAyhL6+BOS9iwOxCBpT+B4eu/R4yuB\nM4r3ZwA3l9jLDnrlse21HitPyZ9dzz3uPiK6/gJmUDni/2vg82X0UKOvQ4GfF68Hyu4NWEplM/D3\nVI6NfATYF7gNeBj4PrBPD/X2dSqPcr+XStAmltTbMVQ26e8FVhevGWV/dom+SvncfHmvWaZ8wM8s\nUw6/WaYcfrNMOfxmmXL4zTLl8JtlyuE3y9T/A/k0FxVjH2hNAAAAAElFTkSuQmCC\n", 138 | "text/plain": [ 139 | "" 140 | ] 141 | }, 142 | "metadata": {}, 143 | "output_type": "display_data" 144 | }, 145 | { 146 | "data": { 147 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAP8AAAEICAYAAACQ6CLfAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAEQBJREFUeJzt3X2wVPV9x/H3B42ND6ighKJSxASdMQ7iDJJoHSWDsQS1\nmpnqSLCSxvGamhqdSac6ZprQWDuaMenYaU2CDxWVmDhFlGS0CTKJNlOlgCXIg4pBUJCHGKWCWgzw\n7R97sMv17tm9u2f37L2/z2tm5+6e33n43jP3c3/nYXd/igjMLD1Dyi7AzMrh8JslyuE3S5TDb5Yo\nh98sUQ6/WaIc/g6QNEvSg2XX0SxJIekTDcx3fDbvgU1sI3dZSaskTe7veq02h78AknZWPfZKeq/q\n9YyCt3WfpL8vcp0DQUR8MiJ+2ezykr6R/XM5t8CyBjSHvwARcdi+B/AqcGHVtLmdrKWZXnewk/Rx\n4BJgc9m1dBOHv3MOknS/pB3ZIezEfQ2SjpE0T9JvJb0i6at9rUBSDzAD+JvsqOIn2fT1km6QtAJ4\nR9KBvQ/Vex8xSLpA0nJJ2yX9p6TxjfwSks6X9N+S3pb0mqRZfcz2JUmvS9os6a+rlh0i6UZJv5H0\nO0kPSxre4HbX7+u1JU2StDSrYauk79ZZ/F+AG4D3G9lWKhz+zvlT4EfAkcAC4J+hEgjgJ8CvgWOB\nKcD1kv6k9woiYjYwF/h2dlRxYVXzdOB84MiI2J1XiKTTgHuBq4GjgB8ACyT9QQO/xzvAFdnvcT7w\nl5Iu7jXPZ4BxwHnADVWH2tcCFwPnAMcAb1EJZn/dAdwREYcDHwcerjWjpEuAXRHxeBPbGdQc/s75\nVUQ8HhF7gAeAU7PppwMjIuJbEfF+RKwD7gIu6+f6/ykiXouI9xqYtwf4QUQsjog9ETEH2AV8ut6C\nEfHLiHg+IvZGxArgISphrvZ3EfFORDwP/CuVf0wAXwa+HhEbI2IXMAv4syZOVX4PfELS0RGxMyKe\n7WsmSUOBfwCu6+f6k+Dwd86WqufvAh/N/ujHAMdkh9/bJW0HbgJG9nP9r/Vj3jHA13ptczSV3jiX\npE9J+kV2ivI/VAJ9dE4tG6rWOwaYX7XNNcAe+v+7XgmcCLwgaYmkC2rMNwt4ICLW93P9SXD4y/ca\n8EpEHFn1GBoR02rMX+tjmL2nvwscUvX6D3tt85Ze2zwkIh5qoN4fUjltGR0RRwDfB9RrntFVz/8I\neL1qu5/rtd2PRsSmBrb7gYhYGxHTgY8BtwH/JunQPmadAnxV0hZJW7K6HpZ0Q3+2N1g5/OX7L2BH\ndsHuYEkHSDpF0uk15t8KnNDAepcDX8jWN5X9D83vAr6c9eKSdGh2IW9oA+sdCrwZEf8raRLwhT7m\n+VtJh0j6JPAXwI+z6d8HbpE0BkDSCEkXNbDN/Ui6XNKIiNgLbM8m7+1j1inAKcCE7PE6lesczVxn\nGHQc/pJl1wAuoPLH+QrwBnA3cESNRe4BTs4OnR/NWfV1wIVUwjED+GDeiFgKXEXlouNbwMvAFxss\n+RrgW5J2AN+g74ttT2XrXATcHhE/z6bfQeWo4efZ8s8Cn2pwu9WmAqsk7czWeVlf1zoi4ncRsWXf\ng8opxlsRsbOJbQ468pd5mKXJPb9Zohx+s0Q5/GaJcvjNEtXRD4FI8tVFszaLiN7vu+hTSz2/pKmS\nXpT0sqQbW1mXmXVW07f6JB0AvAR8FtgILAGmR8TqnGXc85u1WSd6/knAyxGxLiLep/KJtX6/W8vM\nytFK+I9l/w9wbMym7UdST/bZ66UtbMvMCtb2C37ZZ9Bngw/7zbpJKz3/Jvb/9NZx2TQzGwBaCf8S\nYJyksZIOovLlEwuKKcvM2q3pw/6I2C3pr4CfAQcA90bEqsIqM7O26uin+nzOb9Z+HXmTj5kNXA6/\nWaIcfrNEOfxmiXL4zRLl8JslyuE3S5TDb5Yoh98sUQ6/WaIcfrNEOfxmiXL4zRLl8JslyuE3S5TD\nb5Yoh98sUQ6/WaIcfrNEOfxmiXL4zRLl8JslyuE3S5TDb5Yoh98sUQ6/WaIcfrNEOfxmiXL4zRLV\n9BDdZt1uypQpNdvmzp2bu+w555yT2/7iiy82VVM3aSn8ktYDO4A9wO6ImFhEUWbWfkX0/J+JiDcK\nWI+ZdZDP+c0S1Wr4A3hS0jJJPX3NIKlH0lJJS1vclpkVqNXD/rMiYpOkjwELJb0QEU9XzxARs4HZ\nAJKixe2ZWUFa6vkjYlP2cxswH5hURFFm1n5Nh1/SoZKG7nsOnAesLKowM2uvVg77RwLzJe1bzw8j\n4t8LqaoNzj777Nz2o446Krd9/vz5RZZjHXD66afXbFuyZEkHK+lOTYc/ItYBpxZYi5l1kG/1mSXK\n4TdLlMNvliiH3yxRDr9ZopL5SO/kyZNz28eNG5fb7lt93WfIkPy+a+zYsTXbxowZk7tsdgt7UHPP\nb5Yoh98sUQ6/WaIcfrNEOfxmiXL4zRLl8JslKpn7/FdccUVu+zPPPNOhSqwoo0aNym2/6qqrarY9\n+OCDucu+8MILTdU0kLjnN0uUw2+WKIffLFEOv1miHH6zRDn8Zoly+M0Slcx9/nqf/baB5+677256\n2bVr1xZYycDkRJglyuE3S5TDb5Yoh98sUQ6/WaIcfrNEOfxmiRo09/nHjx+f2z5y5MgOVWKdcsQR\nRzS97MKFCwusZGCq2/NLulfSNkkrq6YNl7RQ0trs57D2lmlmRWvksP8+YGqvaTcCiyJiHLAoe21m\nA0jd8EfE08CbvSZfBMzJns8BLi64LjNrs2bP+UdGxObs+Rag5gm1pB6gp8ntmFmbtHzBLyJCUuS0\nzwZmA+TNZ2ad1eytvq2SRgFkP7cVV5KZdUKz4V8AzMyezwQeK6YcM+uUuof9kh4CJgNHS9oIfBO4\nFXhY0pXABuDSdhbZiGnTpuW2H3zwwR2qxIpS770ZY8eObXrdmzZtanrZwaJu+CNieo2mKQXXYmYd\n5Lf3miXK4TdLlMNvliiH3yxRDr9ZogbNR3pPOumklpZftWpVQZVYUW6//fbc9nq3Al966aWabTt2\n7GiqpsHEPb9Zohx+s0Q5/GaJcvjNEuXwmyXK4TdLlMNvlqhBc5+/VUuWLCm7hAHp8MMPz22fOrX3\nd7/+v8svvzx32fPOO6+pmva5+eaba7Zt3769pXUPBu75zRLl8JslyuE3S5TDb5Yoh98sUQ6/WaIc\nfrNE+T5/Zvjw4aVt+9RTT81tl5Tbfu6559ZsO+6443KXPeigg3LbZ8yYkds+ZEh+//Hee+/VbFu8\neHHusrt27cptP/DA/D/fZcuW5banzj2/WaIcfrNEOfxmiXL4zRLl8JslyuE3S5TDb5YoRUTnNia1\nbWN33nlnbvvVV1+d217v892vvvpqv2tq1Pjx43Pb693n3717d822d999N3fZ1atX57bXuxe/dOnS\n3PannnqqZtvWrVtzl924cWNu+7Bhw3Lb672HYbCKiPw/mEzdnl/SvZK2SVpZNW2WpE2SlmePaa0U\na2ad18hh/31AX1/H8o8RMSF7PF5sWWbWbnXDHxFPA292oBYz66BWLvhdK2lFdlpQ8+RLUo+kpZLy\nTw7NrKOaDf/3gBOACcBm4Du1ZoyI2RExMSImNrktM2uDpsIfEVsjYk9E7AXuAiYVW5aZtVtT4Zc0\nqurl54GVteY1s+5U9/P8kh4CJgNHS9oIfBOYLGkCEMB6IP8megdcc801ue0bNmzIbT/zzDOLLKdf\n6r2H4NFHH81tX7NmTc22Z599tqmaOqGnpye3fcSIEbnt69atK7Kc5NQNf0RM72PyPW2oxcw6yG/v\nNUuUw2+WKIffLFEOv1miHH6zRCXz1d233XZb2SVYL1OmTGlp+Xnz5hVUSZrc85slyuE3S5TDb5Yo\nh98sUQ6/WaIcfrNEOfxmiUrmPr8NPvPnzy+7hAHNPb9Zohx+s0Q5/GaJcvjNEuXwmyXK4TdLlMNv\nliiH3yxRDr9Zohx+s0Q5/GaJcvjNEuXwmyXK4TdLlMNvlqhGhugeDdwPjKQyJPfsiLhD0nDgx8Dx\nVIbpvjQi3mpfqZYaSbntJ554Ym57Nw9P3g0a6fl3A1+LiJOBTwNfkXQycCOwKCLGAYuy12Y2QNQN\nf0Rsjojnsuc7gDXAscBFwJxstjnAxe0q0syK169zfknHA6cBi4GREbE5a9pC5bTAzAaIhr/DT9Jh\nwDzg+oh4u/p8LCJCUtRYrgfoabVQMytWQz2/pI9QCf7ciHgkm7xV0qisfRSwra9lI2J2REyMiIlF\nFGxmxagbflW6+HuANRHx3aqmBcDM7PlM4LHiyzOzdmnksP+PgT8Hnpe0PJt2E3Ar8LCkK4ENwKXt\nKdFSFdHnmeQHhgzx21RaUTf8EfEroNYN19YGWDez0vhfp1miHH6zRDn8Zoly+M0S5fCbJcrhN0uU\nh+i2AeuMM87Ibb/vvvs6U8gA5Z7fLFEOv1miHH6zRDn8Zoly+M0S5fCbJcrhN0uU7/Nb16r31d3W\nGvf8Zoly+M0S5fCbJcrhN0uUw2+WKIffLFEOv1mifJ/fSvPEE0/ktl9yySUdqiRN7vnNEuXwmyXK\n4TdLlMNvliiH3yxRDr9Zohx+s0Sp3hjokkYD9wMjgQBmR8QdkmYBVwG/zWa9KSIer7Ou/I2ZWcsi\noqEvQmgk/KOAURHxnKShwDLgYuBSYGdE3N5oUQ6/Wfs1Gv667/CLiM3A5uz5DklrgGNbK8/Mytav\nc35JxwOnAYuzSddKWiHpXknDaizTI2mppKUtVWpmhap72P/BjNJhwFPALRHxiKSRwBtUrgPcTOXU\n4Et11uHDfrM2K+ycH0DSR4CfAj+LiO/20X488NOIOKXOehx+szZrNPx1D/tV+QrVe4A11cHPLgTu\n83lgZX+LNLPyNHK1/yzgP4Dngb3Z5JuA6cAEKof964Grs4uDeetyz2/WZoUe9hfF4Tdrv8IO+81s\ncHL4zRLl8JslyuE3S5TDb5Yoh98sUQ6/WaIcfrNEOfxmiXL4zRLl8JslyuE3S5TDb5Yoh98sUZ0e\novsNYEPV66Ozad2oW2vr1rrAtTWryNrGNDpjRz/P/6GNS0sjYmJpBeTo1tq6tS5wbc0qqzYf9psl\nyuE3S1TZ4Z9d8vbzdGtt3VoXuLZmlVJbqef8Zlaesnt+MyuJw2+WqFLCL2mqpBclvSzpxjJqqEXS\neknPS1pe9viC2RiI2yStrJo2XNJCSWuzn32OkVhSbbMkbcr23XJJ00qqbbSkX0haLWmVpOuy6aXu\nu5y6StlvHT/nl3QA8BLwWWAjsASYHhGrO1pIDZLWAxMjovQ3hEg6G9gJ3L9vKDRJ3wbejIhbs3+c\nwyLihi6pbRb9HLa9TbXVGlb+i5S474oc7r4IZfT8k4CXI2JdRLwP/Ai4qIQ6ul5EPA282WvyRcCc\n7PkcKn88HVejtq4QEZsj4rns+Q5g37Dype67nLpKUUb4jwVeq3q9kRJ3QB8CeFLSMkk9ZRfTh5FV\nw6JtAUaWWUwf6g7b3km9hpXvmn3XzHD3RfMFvw87KyImAJ8DvpId3nalqJyzddO92u8BJ1AZw3Ez\n8J0yi8mGlZ8HXB8Rb1e3lbnv+qirlP1WRvg3AaOrXh+XTesKEbEp+7kNmE/lNKWbbN03QnL2c1vJ\n9XwgIrZGxJ6I2AvcRYn7LhtWfh4wNyIeySaXvu/6qqus/VZG+JcA4ySNlXQQcBmwoIQ6PkTSodmF\nGCQdCpxH9w09vgCYmT2fCTxWYi376ZZh22sNK0/J+67rhruPiI4/gGlUrvj/Bvh6GTXUqOsE4NfZ\nY1XZtQEPUTkM/D2VayNXAkcBi4C1wJPA8C6q7QEqQ7mvoBK0USXVdhaVQ/oVwPLsMa3sfZdTVyn7\nzW/vNUuUL/iZJcrhN0uUw2+WKIffLFEOv1miHH6zRDn8Zon6P7b2eWhzcmVkAAAAAElFTkSuQmCC\n", 148 | "text/plain": [ 149 | "" 150 | ] 151 | }, 152 | "metadata": {}, 153 | "output_type": "display_data" 154 | }, 155 | { 156 | "data": { 157 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAP8AAAEICAYAAACQ6CLfAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAEGZJREFUeJzt3X+wVOV9x/H3BxNDRSsgBsEQiI39w2QMzhCSmTItGQlF\nxUJmOozEtNg4uaZNaZyJUxxjG0zrjI2JLTWa5FrlR2pInKCBZJwmyvijDqMVLUFUiAahcoOXWKWC\nsSFyv/1jD3a53j27d3fPnr33+bxm7tzd85x9ni87fO75sXvOo4jAzNIzpuwCzKwcDr9Zohx+s0Q5\n/GaJcvjNEuXwmyXK4e8ASSsl/WvZdTRLUkh6fwPrzcjWfUcTY+S+VtLTkuYOt1+rzeFvA0mHq34G\nJL1R9fzSNo+1RtLft7PPkSAiPhARDw7nNZJOlPR9SXuyPyxzi6luZHL42yAiTj72A/wXcHHVsjs7\nWUszW91R7hHgU8BLZRfSbRz+zjlR0jpJh7Jd2FnHGiRNlbRB0i8lvSDpr4bqQFIPcCnw19lexQ+z\n5XskrZC0HXhd0jsG76oP3mOQtFDSNkkHJW2RdG4j/whJF0n6T0mvSXpR0sohVvu0pF9I2i/pqqrX\njpF0taSfS/pvSXdJmtjguHskzcsez5a0NauhX9JNQ70mIo5ExD9FxCPA0UbGSYnD3zl/BHwXGA9s\nAr4OlUAAPwR+CpwJnA9cKekPB3cQEb3AncBXsr2Ki6ualwIXAeMj4s28QiSdB9wBXAGcBnwL2CTp\nXQ38O14H/jT7d1wE/LmkxYPW+RhwNjAfWHEstMByYDHwB8BU4FXglgbGHGwVsCoifhv4HeCuJvpI\nnsPfOY9ExL0RcRT4NvChbPmHgdMj4svZlmo3cBtwyTD7/+eIeDEi3mhg3R7gWxHxWEQcjYi1wK+B\nj9Z7YUQ8GBFPRcRARGwH1lMJc7XrIuL1iHgKWE3lDxPAZ4EvRsS+iPg1sBL44yYOVX4DvF/SpIg4\nHBGPDvP1hsPfSdXHnL8Cxmb/6acDU7Pd74OSDgLXAJOH2f+Lw1h3OvCFQWNOo7I1ziXpI5IeyA5R\n/odKoCfl1LK3qt/pwD1VYz5LZXd8uP/Wy4HfBXZKelzSwmG+3nD4u8GLwAsRMb7q55SIuLDG+rUu\nwxy8/FfASVXPzxg05vWDxjwpItY3UO93qBy2TIuIU4FvAhq0zrSqx+8FflE17gWDxh0bEX0NjPuW\niHguIpYC7wb+Afi+pHHD6cMc/m7wH8Ch7ITdb0k6QdIHJX24xvr9wFkN9LsN+GTW3wKO3zW/Dfhs\nthWXpHHZibxTGuj3FOCViPhfSbOBTw6xzt9IOknSB4A/A76XLf8mcL2k6QCSTpe0qIExjyPpU5JO\nj4gB4GC2eKDGuu+SNDZ7eqKksZIG/7FKksNfsuwcwEJgJvAC8DLwL8CpNV5yO3BOtuv8g5yuPw9c\nTCUclwJvrRsRW4HPUDnp+CrwPHBZgyX/BfBlSYeAv2Xok20PZX1uBr4aET/Jlq+istfwk+z1jwIf\naXDcaguApyUdzvq8JOdcxy7gDSonU3+cPZ7exJijjnwzD7M0ectvliiH3yxRDr9Zohx+s0R19CIQ\nST67aFawiGjoo8yWtvySFkjaJel5SVe30peZdVbTH/VJOgH4GfBxYB/wOLA0Ip7JeY23/GYF68SW\nfzbwfETsjogjVK5YG/a3tcysHK2E/0yOv4BjX7bsOJJ6smuvt7Ywlpm1WeEn/LJr0HvBu/1m3aSV\nLX8fx1+99Z5smZmNAK2E/3HgbEnvk3QilZtPbGpPWWZWtKZ3+yPiTUl/SeVKqROAOyLi6bZVZmaF\n6uhVfT7mNyteR77kY2Yjl8NvliiH3yxRDr9Zohx+s0Q5/GaJcvjNEuXwmyXK4TdLlMNvliiH3yxR\nDr9Zohx+s0Q5/GaJcvjNEuXwmyXK4TdLlMNvliiH3yxRDr9Zohx+s0Q5/GaJcvjNEuXwmyXK4TdL\nlMNvliiH3yxRDr9Zohx+s0Q1PUW3WdGuvfba3Pbrrrsut33MmNrbtrlz5+a+9qGHHsptHw1aCr+k\nPcAh4CjwZkTMakdRZla8dmz5PxYRL7ehHzPrIB/zmyWq1fAHcL+kJyT1DLWCpB5JWyVtbXEsM2uj\nVnf750REn6R3A/dJ2hkRD1evEBG9QC+ApGhxPDNrk5a2/BHRl/0+ANwDzG5HUWZWvKbDL2mcpFOO\nPQbmAzvaVZiZFauV3f7JwD2SjvXznYj4t7ZUZUm47LLLcttXrFiR2z4wMND02BE+Am06/BGxG/hQ\nG2sxsw7yR31miXL4zRLl8JslyuE3S5TDb5YoX9JrpZk+fXpu+9ixYztUSZq85TdLlMNvliiH3yxR\nDr9Zohx+s0Q5/GaJcvjNEuXP+a1Q8+bNq9m2fPnylvreuXNnbvvChQtrtvX397c09mjgLb9Zohx+\ns0Q5/GaJcvjNEuXwmyXK4TdLlMNvlih/zm8tmTNnTm776tWra7adeuqpLY1944035rbv3bu3pf5H\nO2/5zRLl8JslyuE3S5TDb5Yoh98sUQ6/WaIcfrNE+XN+a8myZcty26dOndp03w8++GBu+7p165ru\n2xrY8ku6Q9IBSTuqlk2UdJ+k57LfE4ot08zarZHd/jXAgkHLrgY2R8TZwObsuZmNIHXDHxEPA68M\nWrwIWJs9XgssbnNdZlawZo/5J0fE/uzxS8DkWitK6gF6mhzHzArS8gm/iAhJkdPeC/QC5K1nZp3V\n7Ed9/ZKmAGS/D7SvJDPrhGbDvwk49hnPMmBje8oxs05RRP6euKT1wFxgEtAPfAn4AXAX8F5gL7Ak\nIgafFByqL+/2jzCTJk3Kba93//uBgYGabQcPHsx97ZIlS3LbH3jggdz2VEWEGlmv7jF/RCyt0XT+\nsCoys67ir/eaJcrhN0uUw2+WKIffLFEOv1mifElv4mbMmJHbvmHDhsLGvvnmm3Pb/VFesbzlN0uU\nw2+WKIffLFEOv1miHH6zRDn8Zoly+M0S5c/5E7dgweB7sx7v3HPPban/zZs312xbtWpVS31ba7zl\nN0uUw2+WKIffLFEOv1miHH6zRDn8Zoly+M0SVffW3W0dzLfu7rjFi/OnUVyzZk1u+7hx43Lbt2zZ\nktued/vterf9tuY0eutub/nNEuXwmyXK4TdLlMNvliiH3yxRDr9Zohx+s0T5ev5RIO/e+0Xedx9g\n9+7due3+LL971d3yS7pD0gFJO6qWrZTUJ2lb9nNhsWWaWbs1stu/Bhjqdi//GBEzs59721uWmRWt\nbvgj4mHglQ7UYmYd1MoJv+WStmeHBRNqrSSpR9JWSVtbGMvM2qzZ8H8DOAuYCewHvlZrxYjojYhZ\nETGrybHMrABNhT8i+iPiaEQMALcBs9tblpkVranwS5pS9fQTwI5a65pZd6r7Ob+k9cBcYJKkfcCX\ngLmSZgIB7AGuKLBGq2PFihU12wYGBgod+4Ybbii0fytO3fBHxNIhFt9eQC1m1kH+eq9Zohx+s0Q5\n/GaJcvjNEuXwmyXKl/SOADNnzsxtnz9/fmFjb9y4Mbd9165dhY1txfKW3yxRDr9Zohx+s0Q5/GaJ\ncvjNEuXwmyXK4TdLlKfoHgEOHDiQ2z5hQs27qNX16KOP5rZfcMEFue2HDx9uemwrhqfoNrNcDr9Z\nohx+s0Q5/GaJcvjNEuXwmyXK4TdLlK/nHwFOO+203PZWbs9966235rb7c/zRy1t+s0Q5/GaJcvjN\nEuXwmyXK4TdLlMNvliiH3yxRjUzRPQ1YB0ymMiV3b0SskjQR+B4wg8o03Usi4tXiSh29Vq9ends+\nZkxxf6O3bNlSWN/W3Rr5X/Um8IWIOAf4KPA5SecAVwObI+JsYHP23MxGiLrhj4j9EfFk9vgQ8Cxw\nJrAIWJutthZYXFSRZtZ+w9qflDQDOA94DJgcEfuzppeoHBaY2QjR8Hf7JZ0MbACujIjXpP+/TVhE\nRK3780nqAXpaLdTM2quhLb+kd1IJ/p0RcXe2uF/SlKx9CjDkXSYjojciZkXErHYUbGbtUTf8qmzi\nbweejYibqpo2Acuyx8uA/OlczayrNLLb/3vAnwBPSdqWLbsGuAG4S9LlwF5gSTEljnz1ptieN29e\nbnu9S3aPHDlSs+2WW27JfW1/f39uu41edcMfEY8Ate4Dfn57yzGzTvE3/MwS5fCbJcrhN0uUw2+W\nKIffLFEOv1mifOvuDhg/fnxu+xlnnNFS/319fTXbrrrqqpb6ttHLW36zRDn8Zoly+M0S5fCbJcrh\nN0uUw2+WKIffLFEOv1miHH6zRDn8Zoly+M0S5fCbJcrhN0uUw2+WKIffLFG+nr8Ddu7cmdteb5rs\nOXPmtLMcM8BbfrNkOfxmiXL4zRLl8JslyuE3S5TDb5Yoh98sUYqI/BWkacA6YDIQQG9ErJK0EvgM\n8Mts1Wsi4t46feUPZmYtiwg1sl4j4Z8CTImIJyWdAjwBLAaWAIcj4quNFuXwmxWv0fDX/YZfROwH\n9mePD0l6FjiztfLMrGzDOuaXNAM4D3gsW7Rc0nZJd0iaUOM1PZK2StraUqVm1lZ1d/vfWlE6GXgI\nuD4i7pY0GXiZynmAv6NyaPDpOn14t9+sYG075geQ9E7gR8CPI+KmIdpnAD+KiA/W6cfhNytYo+Gv\nu9svScDtwLPVwc9OBB7zCWDHcIs0s/I0crZ/DvDvwFPAQLb4GmApMJPKbv8e4Irs5GBeX97ymxWs\nrbv97eLwmxWvbbv9ZjY6OfxmiXL4zRLl8JslyuE3S5TDb5Yoh98sUQ6/WaIcfrNEOfxmiXL4zRLl\n8JslyuE3S5TDb5aoTk/R/TKwt+r5pGxZN+rW2rq1LnBtzWpnbdMbXbGj1/O/bXBpa0TMKq2AHN1a\nW7fWBa6tWWXV5t1+s0Q5/GaJKjv8vSWPn6dba+vWusC1NauU2ko95jez8pS95Tezkjj8ZokqJfyS\nFkjaJel5SVeXUUMtkvZIekrStrLnF8zmQDwgaUfVsomS7pP0XPZ7yDkSS6ptpaS+7L3bJunCkmqb\nJukBSc9IelrS57Plpb53OXWV8r51/Jhf0gnAz4CPA/uAx4GlEfFMRwupQdIeYFZElP6FEEm/DxwG\n1h2bCk3SV4BXIuKG7A/nhIhY0SW1rWSY07YXVFutaeUvo8T3rp3T3bdDGVv+2cDzEbE7Io4A3wUW\nlVBH14uIh4FXBi1eBKzNHq+l8p+n42rU1hUiYn9EPJk9PgQcm1a+1Pcup65SlBH+M4EXq57vo8Q3\nYAgB3C/pCUk9ZRczhMlV06K9BEwus5gh1J22vZMGTSvfNe9dM9Pdt5tP+L3dnIiYCVwAfC7bve1K\nUTlm66bPar8BnEVlDsf9wNfKLCabVn4DcGVEvFbdVuZ7N0RdpbxvZYS/D5hW9fw92bKuEBF92e8D\nwD1UDlO6Sf+xGZKz3wdKructEdEfEUcjYgC4jRLfu2xa+Q3AnRFxd7a49PduqLrKet/KCP/jwNmS\n3ifpROASYFMJdbyNpHHZiRgkjQPm031Tj28ClmWPlwEbS6zlON0ybXutaeUp+b3ruunuI6LjP8CF\nVM74/xz4Yhk11KjrLOCn2c/TZdcGrKeyG/gbKudGLgdOAzYDzwH3AxO7qLZvU5nKfTuVoE0pqbY5\nVHbptwPbsp8Ly37vcuoq5X3z13vNEuUTfmaJcvjNEuXwmyXK4TdLlMNvliiH3yxRDr9Zov4PJI5t\n+G9VwgkAAAAASUVORK5CYII=\n", 158 | "text/plain": [ 159 | "" 160 | ] 161 | }, 162 | "metadata": {}, 163 | "output_type": "display_data" 164 | }, 165 | { 166 | "data": { 167 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAP8AAAEICAYAAACQ6CLfAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAEddJREFUeJzt3XuwVfV5xvHvo0TxGiUgRYMalP6hjtWRYGbqiBmNGq9E\nW5Voq2iDsSmNMzrVIcbQqJ00k5gh05gE0IKRSDBoRMfRoBNv04aKliiKCV6wchG0SEWiIcLbP/bC\nHI5n//Y++7Y25/d8Zs6cc9a7Li8bHtbaa+21fooIzCw/O5XdgJmVw+E3y5TDb5Yph98sUw6/WaYc\nfrNMOfwdIGmqpDvK7qNRkkLSoXXMd3Ax76AGtpFcVtLzkk7o73qtOoe/BSS92+Nrq6T3evx+YYu3\nNUvSja1c544gIg6PiEf7u5ykv5P0UvF38aCk/dvQ3g7J4W+BiNhz2xfwP8CZPabN6WQvjex1B6ri\nSOFfgLOBIcCrwJ1l9tRNHP7O2UXS7ZI2FoewY7YVJO0vab6kNyW9Kukf+1qBpEnAhcA/FXuy+4rp\nKyRdI+lZYJOkQb0P1XsfMUg6Q9ISSRsk/YekI+v5Q0g6XdJ/S3pH0uuSpvYx26WSVktaI+nqHsvu\nJOlaSS9L+l9J8yQNqXO7KySdVPw8VtLiooe1km6ustgZwM8j4vmI2AzcABwv6ZB6tjnQOfydcxYw\nF9gHWAD8G1QCAdwH/AY4ADgRuFLSKb1XEBHTgTnAt4ujijN7lCcApwP7RMQHqUYkHQ3cBlwOfAL4\nMbBA0q51/Dk2AX9b/DlOB66QNL7XPJ8FRgMnA9dsCy0wGRgPjAP2B94GflDHNnubBkyLiL2BQ4B5\ndS6n4vsRDWxzwHH4O+fJiHggIrYAPwH+opj+aWBYRHwzIjZHxCvADOCCfq7/+xHxekS8V8e8k4Af\nR8SiiNgSEbOBPwCfqbVgRDwaEc9FxNaIeJbKYfS4XrP9c0RsiojngH+n8h8TwJeBr0XEyoj4AzAV\n+KsG3qr8EThU0tCIeDcifl1lvgeBv5Z0pKTdgOuBAHbv5/YGJIe/c97o8fPvgcHFP/qDgP2Lw+8N\nkjYAU4Dh/Vz/6/2Y9yDgql7bHEllb5wk6VhJvyreovwflUAPTfTyWo/1HgTc02Oby4At9P/Pehnw\n58CLkp6SdEZfM0XEw1T+g5kPrCi+NgIr+7m9AcnhL9/rwKsRsU+Pr70i4rQq81e7DbP39N+z/R7u\nz3pt86Ze29w9Iuo5GfZTKm9bRkbEx4Ef8afD6W1G9vj5QGB1j+1+vtd2B0fEqjq2+6GIWB4RE4D9\ngH8Ffi5pjyrz/iAiRkfEcCr/CQwClvZnewOVw1++/wI2FifsdpO0s6QjJH26yvxrgVF1rHcJ8MVi\nfaey/aH5DODLxV5ckvYoTuTtVcd69wLWR8T7ksYCX+xjnq9L2l3S4cBE4GfF9B8BN0k6CEDSMEln\n17HN7Ui6SNKwiNgKbCgmb+1jvsHFaylJBwLTqZwreLu/2xyIHP6SFecAzgCOonIp6i1gJvDxKovc\nChxWHDr/IrHqrwJnUgnHhcCH80bEYuBLVE46vg28BFxSZ8t/D3xT0kYq76H7Otn2WLHOR4DvRMQv\ni+nTqBw1/LJY/tfAsXVut6dTgeclvVus84Iq5zoGUzlSeZfKf7L/CXy9ge0NSPLDPMzy5D2/WaYc\nfrNMOfxmmXL4zTLV0ZtAJPnsolmbRUTvz130qak9v6RTJf22uGXy2mbWZWad1fClPkk7A78DPkfl\n45JPARMi4oXEMt7zm7VZJ/b8Y4GXIuKV4nbJuVTumzazHUAz4T+A7W/gWFlM246kScW914ub2JaZ\ntVjbT/gV96BPBx/2m3WTZvb8q9j+7q1PFtPMbAfQTPifAkZL+pSkXag8fGJBa9oys3Zr+LA/Ij6Q\n9A/AQ8DOwG0R8XzLOjOzturoXX1+z2/Wfh35kI+Z7bgcfrNMOfxmmXL4zTLl8JtlyuE3y5TDb5Yp\nh98sUw6/WaYcfrNMOfxmmXL4zTLl8JtlyuE3y5TDb5Yph98sUw6/WaYcfrNMOfxmmXL4zTLl8Jtl\nyuE3y5TDb5Yph98sUw6/WaYcfrNMOfxmmXL4zTLl8JtlyuE3y9SgZhaWtALYCGwBPoiIMa1oysza\nr6nwFz4bEW+1YD1m1kE+7DfLVLPhD+BhSU9LmtTXDJImSVosaXGT2zKzFlJENL6wdEBErJK0H7AQ\nmBwRjyfmb3xjZlaXiFA98zW154+IVcX3dcA9wNhm1mdmndNw+CXtIWmvbT8DJwNLW9WYmbVXM2f7\nhwP3SNq2np9GxIMt6crM2q6p9/z93pjf85u1XUfe85vZjsvhN8uUw2+WKYffLFMOv1mmWnFjj3Wx\nY489Nlm/6KKLkvVx48Yl64cffni/e9rm6quvTtZXr16drB933HHJ+h133FG1tmjRouSyOfCe3yxT\nDr9Zphx+s0w5/GaZcvjNMuXwm2XK4TfLlO/qGwDOP//8qrVp06Yllx06dGiyXtyyXdWjjz6arA8b\nNqxq7bDDDksuW0ut3u66666qtQsuuKCpbXcz39VnZkkOv1mmHH6zTDn8Zply+M0y5fCbZcrhN8uU\n7+fvAoMGpf8axoxJD348Y8aMqrXdd989uezjj1cdYAmAG264IVl/8sknk/Vdd921am3evHnJZU8+\n+eRkvZbFiz1CXIr3/GaZcvjNMuXwm2XK4TfLlMNvlimH3yxTDr9ZpnydvwvUenb+zJkzG173woUL\nk/XUswAA3nnnnYa3XWv9zV7HX7lyZbI+e/bsptY/0NXc80u6TdI6SUt7TBsiaaGk5cX3fdvbppm1\nWj2H/bOAU3tNuxZ4JCJGA48Uv5vZDqRm+CPicWB9r8lnA9uOqWYD41vcl5m1WaPv+YdHxJri5zeA\n4dVmlDQJmNTgdsysTZo+4RcRkXowZ0RMB6aDH+Bp1k0avdS3VtIIgOL7uta1ZGad0Gj4FwAXFz9f\nDNzbmnbMrFNqPrdf0p3ACcBQYC3wDeAXwDzgQOA14LyI6H1SsK91ZXnYX+ue+ClTpiTrtf6Obrnl\nlqq16667Lrlss9fxa1m2bFnV2ujRo5ta97nnnpus33tvnvukep/bX/M9f0RMqFI6sV8dmVlX8cd7\nzTLl8JtlyuE3y5TDb5Yph98sU76ltwWuv/76ZL3WpbzNmzcn6w899FCyfs0111Stvffee8llaxk8\neHCyXuu23AMPPLBqrdYQ2zfeeGOynuulvFbxnt8sUw6/WaYcfrNMOfxmmXL4zTLl8JtlyuE3y1TN\nW3pburEd+JbeffbZp2rtxRdfTC47dOjQZP3+++9P1sePb98jEg899NBkfc6cOcn6Mccc0/C258+f\nn6xfeumlyfqmTZsa3vZAVu8tvd7zm2XK4TfLlMNvlimH3yxTDr9Zphx+s0w5/GaZ8nX+Ou23335V\na6tXr25q3aNGjUrW33///WR94sSJVWtnnXVWctkjjjgiWd9zzz2T9Vr/flL1c845J7nsfffdl6xb\n33yd38ySHH6zTDn8Zply+M0y5fCbZcrhN8uUw2+WKV/nr1Pqfv7UMNQAw4YNS9ZrPb++nX9HtT6j\nUKu3ESNGJOtvvvlmw8taY1p2nV/SbZLWSVraY9pUSaskLSm+TmumWTPrvHoO+2cBp/Yx/XsRcVTx\n9UBr2zKzdqsZ/oh4HFjfgV7MrIOaOeE3WdKzxduCfavNJGmSpMWSFjexLTNrsUbD/0NgFHAUsAb4\nbrUZI2J6RIyJiDENbsvM2qCh8EfE2ojYEhFbgRnA2Na2ZWbt1lD4JfW8RvMFYGm1ec2sOw2qNYOk\nO4ETgKGSVgLfAE6QdBQQwArg8jb22BU2bNhQtVbrufq1nss/ZMiQZP3ll19O1lPj1M+aNSu57Pr1\n6XO5c+fOTdZrXauvtbyVp2b4I2JCH5NvbUMvZtZB/nivWaYcfrNMOfxmmXL4zTLl8JtlqubZfqtt\n0aJFyXqtW3rLdPzxxyfr48aNS9a3bt2arL/yyiv97sk6w3t+s0w5/GaZcvjNMuXwm2XK4TfLlMNv\nlimH3yxTvs6fud122y1Zr3Udv9ZjxX1Lb/fynt8sUw6/WaYcfrNMOfxmmXL4zTLl8JtlyuE3y5SH\n6LakLVu2JOu1/v2kHu2dGr7bGteyIbrNbGBy+M0y5fCbZcrhN8uUw2+WKYffLFMOv1mm6hmieyRw\nOzCcypDc0yNimqQhwM+Ag6kM031eRLzdvlatHU455ZSyW7CS1LPn/wC4KiIOAz4DfEXSYcC1wCMR\nMRp4pPjdzHYQNcMfEWsi4pni543AMuAA4GxgdjHbbGB8u5o0s9br13t+SQcDRwOLgOERsaYovUHl\nbYGZ7SDqfoafpD2B+cCVEfGO9KePD0dEVPvcvqRJwKRmGzWz1qprzy/pY1SCPyci7i4mr5U0oqiP\nANb1tWxETI+IMRExphUNm1lr1Ay/Krv4W4FlEXFzj9IC4OLi54uBe1vfnpm1Sz2H/X8J/A3wnKQl\nxbQpwLeAeZIuA14DzmtPi9ZOo0aNKrsFK0nN8EfEk0C1+4NPbG07ZtYp/oSfWaYcfrNMOfxmmXL4\nzTLl8JtlyuE3y5SH6M7cE088kazvtFN6/1BrCG/rXt7zm2XK4TfLlMNvlimH3yxTDr9Zphx+s0w5\n/GaZ8nX+zC1dujRZX758ebJe63kAhxxySNWah+gul/f8Zply+M0y5fCbZcrhN8uUw2+WKYffLFMO\nv1mmFNHnKFvt2ViVIb2se11yySXJ+syZM5P1xx57rGpt8uTJyWVfeOGFZN36FhHVHrW/He/5zTLl\n8JtlyuE3y5TDb5Yph98sUw6/WaYcfrNM1bzOL2kkcDswHAhgekRMkzQV+BKw7absKRHxQI11+Tr/\nDmbvvfdO1ufNm5esn3TSSVVrd999d3LZiRMnJuubNm1K1nNV73X+eh7m8QFwVUQ8I2kv4GlJC4va\n9yLiO402aWblqRn+iFgDrCl+3ihpGXBAuxszs/bq13t+SQcDRwOLikmTJT0r6TZJ+1ZZZpKkxZIW\nN9WpmbVU3eGXtCcwH7gyIt4BfgiMAo6icmTw3b6Wi4jpETEmIsa0oF8za5G6wi/pY1SCPyci7gaI\niLURsSUitgIzgLHta9PMWq1m+CUJuBVYFhE395g+osdsXwDSj4E1s65Sz6W+44AngOeAbeMxTwEm\nUDnkD2AFcHlxcjC1Ll/qG2BqXQq86aabqtauuOKK5LJHHnlksu5bfvvWskt9EfEk0NfKktf0zay7\n+RN+Zply+M0y5fCbZcrhN8uUw2+WKYffLFN+dLfZAONHd5tZksNvlimH3yxTDr9Zphx+s0w5/GaZ\ncvjNMlXP03tb6S3gtR6/Dy2mdaNu7a1b+wL31qhW9nZQvTN29EM+H9m4tLhbn+3Xrb11a1/g3hpV\nVm8+7DfLlMNvlqmywz+95O2ndGtv3doXuLdGldJbqe/5zaw8Ze/5zawkDr9ZpkoJv6RTJf1W0kuS\nri2jh2okrZD0nKQlZY8vWIyBuE7S0h7ThkhaKGl58b3PMRJL6m2qpFXFa7dE0mkl9TZS0q8kvSDp\neUlfLaaX+tol+irldev4e35JOwO/Az4HrASeAiZERFeMwCBpBTAmIkr/QIik44F3gdsj4ohi2reB\n9RHxreI/zn0j4pou6W0q8G7Zw7YXo0mN6DmsPDAeuIQSX7tEX+dRwutWxp5/LPBSRLwSEZuBucDZ\nJfTR9SLicWB9r8lnA7OLn2dT+cfTcVV66woRsSYinil+3ghsG1a+1Ncu0Vcpygj/AcDrPX5fSYkv\nQB8CeFjS05Imld1MH4b3GBbtDWB4mc30oeaw7Z3Ua1j5rnntGhnuvtV8wu+jjouIo4DPA18pDm+7\nUlTes3XTtdq6hm3vlD6Glf9Qma9do8Pdt1oZ4V8FjOzx+yeLaV0hIlYV39cB99B9Q4+v3TZCcvF9\nXcn9fKibhm3va1h5uuC166bh7ssI/1PAaEmfkrQLcAGwoIQ+PkLSHsWJGCTtAZxM9w09vgC4uPj5\nYuDeEnvZTrcM215tWHlKfu26brj7iOj4F3AalTP+LwNfK6OHKn2NAn5TfD1fdm/AnVQOA/9I5dzI\nZcAngEeA5cDDwJAu6u0nVIZyf5ZK0EaU1NtxVA7pnwWWFF+nlf3aJfoq5XXzx3vNMuUTfmaZcvjN\nMuXwm2XK4TfLlMNvlimH3yxTDr9Zpv4fb4v8973Rs5MAAAAASUVORK5CYII=\n", 168 | "text/plain": [ 169 | "" 170 | ] 171 | }, 172 | "metadata": {}, 173 | "output_type": "display_data" 174 | } 175 | ], 176 | "source": [ 177 | "# load dataset\n", 178 | "(x_train, y_train), (x_test, y_test) = mnist.load_data()\n", 179 | "\n", 180 | "print(x_train.shape[0], 'Number of train samples')\n", 181 | "print(x_test.shape[0], 'Number of test samples')\n", 182 | "print('x_train shape:', x_train.shape)\n", 183 | "\n", 184 | "# plot images as gray scale\n", 185 | "for image_id in range(0, 5):\n", 186 | " plt.imshow(x_train[image_id], cmap=plt.get_cmap('gray'))\n", 187 | " plt.title(\"The true label is %s\" % str(y_train[image_id]))\n", 188 | " plt.show()\n", 189 | " \n", 190 | "# This makes sure the image has the correct order in the axis for Tensorflow, it would be different for Theano backend\n", 191 | "x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)\n", 192 | "x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)\n", 193 | "\n", 194 | "# Convert values to floats, originally they are integers\n", 195 | "x_train = x_train.astype('float32')\n", 196 | "x_test = x_test.astype('float32')\n", 197 | "\n", 198 | "# Convert values of labels from 0 to 9 to categorical (one_hot encoding)\n", 199 | "y_train = keras.utils.to_categorical(y_train, 10)\n", 200 | "y_test = keras.utils.to_categorical(y_test, 10)" 201 | ] 202 | }, 203 | { 204 | "cell_type": "code", 205 | "execution_count": null, 206 | "metadata": { 207 | "collapsed": true 208 | }, 209 | "outputs": [], 210 | "source": [ 211 | "print(x_train.shape, y_train.shape)" 212 | ] 213 | }, 214 | { 215 | "cell_type": "markdown", 216 | "metadata": {}, 217 | "source": [ 218 | "## Why use Keras?\n", 219 | "\n", 220 | "\"Keras is a high-level neural networks API, written in Python and capable of running on top of TensorFlow, CNTK, or Theano. \n", 221 | "It was developed with a focus on enabling fast experimentation. Being able to go from idea to result with the least possible delay is key to doing good research.\n", 222 | "\n", 223 | "Use Keras if you need a deep learning library that:\n", 224 | "\n", 225 | "- Allows for easy and fast prototyping (through user friendliness, modularity, and extensibility).\n", 226 | "\n", 227 | "- Supports both convolutional networks and recurrent networks, as well as combinations of the two.\n", 228 | "\n", 229 | "- Runs seamlessly on CPU and GPU.\" (Chollet, F.)\n", 230 | "\n", 231 | "More information at https://keras.io Descriptions about the functions and documentation are also taken from this website \n", 232 | "\n", 233 | "In summary, it makes your life way easier if you don't require to go to the level of granularity of Tensorflow" 234 | ] 235 | }, 236 | { 237 | "cell_type": "markdown", 238 | "metadata": {}, 239 | "source": [ 240 | "## How does it work?\n", 241 | "\n" 242 | ] 243 | }, 244 | { 245 | "cell_type": "markdown", 246 | "metadata": {}, 247 | "source": [ 248 | "To train a model we have 3 main steps:\n", 249 | "\n", 250 | " - Define your architecture (number of layers, type of layers, activations, etc)\n", 251 | " \n", 252 | " - Compile your model (Define optimizer, callbacks, etc)\n", 253 | " \n", 254 | " - Train your model (Fit model to your data)" 255 | ] 256 | }, 257 | { 258 | "cell_type": "markdown", 259 | "metadata": {}, 260 | "source": [ 261 | "### Define your architecture" 262 | ] 263 | }, 264 | { 265 | "cell_type": "code", 266 | "execution_count": 4, 267 | "metadata": { 268 | "collapsed": true 269 | }, 270 | "outputs": [], 271 | "source": [ 272 | "from keras.models import Sequential\n", 273 | "from keras.layers import Dense, Flatten, Dropout, Activation\n", 274 | "\n", 275 | "model = Sequential()\n", 276 | "model.add(Flatten(input_shape=(28, 28, 1))) # Images are a 3D matrix, we have to flatten them to be 1D\n", 277 | "model.add(Dense(200, kernel_initializer='normal', activation='relu'))\n", 278 | "model.add(Dropout(0.5)) # drop a unit with 50% probability.\n", 279 | "model.add(Dense(150, kernel_initializer='orthogonal'))\n", 280 | "model.add(Activation('sigmoid'))\n", 281 | "model.add(Dense(10, kernel_initializer='normal', activation='softmax')) # last layer, this has a softmax to do the classification" 282 | ] 283 | }, 284 | { 285 | "cell_type": "markdown", 286 | "metadata": {}, 287 | "source": [ 288 | "The sequential model is one layer after the other, no fancy connections. You can also use the Functional API if you want to have connections which skip layers and so on.\n", 289 | "\n", 290 | "To add a layer all we have to do is call the add method and then add a layer object.\n", 291 | "\n", 292 | "The first layer needs to have the input shape specified. After this the sizes are inferred automatically except for the output layer.\n", 293 | "\n", 294 | "We need to use the flatten layer to flatten the input since it is an image, which in this case is a 3 dimensional matrix. It has height, width and depth. In the case of a grayscale image like the ones from MNIST it has 1 dimensional depth. If it is a color image it has 3 layers of depth (Red, Green, Blue).\n", 295 | "\n", 296 | "A dense layer is a layer in which all units are connected to all units in the next layer. This is the most usual type of layer. You can specify things like the number of units and how the weights in the units are initialized (kernel_initializer). You can also specify an activation function, by default a linear function is used (No activation).\n", 297 | "\n", 298 | "If you want to use dropout in your model you can just add it as an extra layer in between layers or activations. Same thing for batch normalization.\n", 299 | "\n", 300 | "At the end we create a layer which will help us with the classification. For this we use a softmax layer in which the number of units will match the number of classes we have in our data.\n", 301 | "\n" 302 | ] 303 | }, 304 | { 305 | "cell_type": "markdown", 306 | "metadata": {}, 307 | "source": [ 308 | "#### Dense Layer\n", 309 | "\n", 310 | "keras.layers.Dense(units, activation=None, use_bias=True, \n", 311 | " kernel_initializer='glorot_uniform', bias_initializer='zeros', \n", 312 | " kernel_regularizer=None, bias_regularizer=None, \n", 313 | " activity_regularizer=None, \n", 314 | " kernel_constraint=None, bias_constraint=None)\n", 315 | " \n", 316 | "Arguments\n", 317 | "\n", 318 | "- units: Positive integer, dimensionality of the output space.\n", 319 | "- activation: Activation function to use (see activations). If you don't specify anything, no activation is applied (ie. \"linear\" activation: a(x) = x).\n", 320 | "- use_bias: Boolean, whether the layer uses a bias vector.\n", 321 | "- kernel_initializer: Initializer for the kernel weights matrix.\n", 322 | "- bias_initializer: Initializer for the bias vector.\n", 323 | "- kernel_regularizer: Regularizer function applied to the kernel weights matrix. (L1, L2)\n", 324 | "- bias_regularizer: Regularizer function applied to the bias vector.\n", 325 | "- activity_regularizer: Regularizer function applied to the output of the layer (its \"activation\").\n", 326 | "- kernel_constraint: Constraint function applied to the kernel weights matrix. (non-negative, etc)\n", 327 | "- bias_constraint: Constraint function applied to the bias vector." 328 | ] 329 | }, 330 | { 331 | "cell_type": "markdown", 332 | "metadata": {}, 333 | "source": [ 334 | "### Compile the model\n", 335 | "\n", 336 | "Here we specify the loss, in our case categorical crossentropy. We can add an extra metric we want to measure, like accuracy\n", 337 | "\n", 338 | "We also specify the optimizer. Some examples are Adam and SGD" 339 | ] 340 | }, 341 | { 342 | "cell_type": "code", 343 | "execution_count": null, 344 | "metadata": { 345 | "collapsed": true 346 | }, 347 | "outputs": [], 348 | "source": [ 349 | "sgd = keras.optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9)\n", 350 | "\n", 351 | "model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])" 352 | ] 353 | }, 354 | { 355 | "cell_type": "markdown", 356 | "metadata": {}, 357 | "source": [ 358 | "#### Compile function\n", 359 | "\n", 360 | "Arguments\n", 361 | "\n", 362 | "- optimizer: String (name of optimizer) or optimizer object. (SGD, RMSprop, Adam, Adagrad, etc)\n", 363 | "- loss: String (name of objective function) or objective function. You can also have multiple loss functions\n", 364 | "- metrics: List of metrics to be evaluated by the model during training and testing. \n", 365 | "- sample_weight_mode: If you need to do timestep-wise sample weighting (2D weights), set this to \"temporal\". None defaults to sample-wise weights (1D).\n", 366 | "- weighted_metrics: List of metrics to be evaluated and weighted by sample_weight or class_weight during training and testing." 367 | ] 368 | }, 369 | { 370 | "cell_type": "markdown", 371 | "metadata": {}, 372 | "source": [ 373 | "### Callbacks\n", 374 | "\n", 375 | "A callback is a set of functions to be applied at given stages of the training procedure. You can use callbacks to get a view on internal states and statistics of the model during training.\n", 376 | "\n", 377 | "Some examples:" 378 | ] 379 | }, 380 | { 381 | "cell_type": "code", 382 | "execution_count": null, 383 | "metadata": { 384 | "collapsed": true 385 | }, 386 | "outputs": [], 387 | "source": [ 388 | "from keras.callbacks import EarlyStopping\n", 389 | "\n", 390 | "early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto')\n", 391 | "my_callbacks = [early_stopping]" 392 | ] 393 | }, 394 | { 395 | "cell_type": "markdown", 396 | "metadata": {}, 397 | "source": [ 398 | "### Train the model" 399 | ] 400 | }, 401 | { 402 | "cell_type": "code", 403 | "execution_count": null, 404 | "metadata": { 405 | "collapsed": true 406 | }, 407 | "outputs": [], 408 | "source": [ 409 | "history = model.fit(x_train, y_train, validation_split=0.2, epochs=10, batch_size=100, verbose=1, callbacks=my_callbacks)" 410 | ] 411 | }, 412 | { 413 | "cell_type": "markdown", 414 | "metadata": {}, 415 | "source": [ 416 | "#### Fit function\n", 417 | "\n", 418 | "Arguments\n", 419 | "\n", 420 | "- x: Numpy array of training data. \n", 421 | "- y: Numpy array of target (label) data.\n", 422 | "- batch_size: Integer or None. Number of samples per gradient update. If unspecified, it will default to 32.\n", 423 | "- epochs: Integer. Number of epochs to train the model. An epoch is an iteration over the entire x and y data provided.\n", 424 | "- verbose: 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per epoch.\n", 425 | "- callbacks: List of keras.callbacks.Callback instances. List of callbacks to apply during training.\n", 426 | "- validation_split: Float between 0 and 1. Fraction of the training data to be used as validation data.\n", 427 | "- validation_data: tuple (x_val, y_val) or tuple (x_val, y_val, val_sample_weights) on which to evaluate the loss and any model metrics at the end of each epoch.\n", 428 | "- shuffle: Boolean (whether to shuffle the training data before each epoch)\n", 429 | "- class_weight: Optional dictionary mapping class indices (integers) to a weight (float) value, used for weighting the loss function (during training only).\n", 430 | "- sample_weight: Optional Numpy array of weights for the training samples, used for weighting the loss function (during training only).\n", 431 | "- initial_epoch: Epoch at which to start training (useful for resuming a previous training run).\n", 432 | "- steps_per_epoch: Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch. When training with input tensors such as TensorFlow data tensors, the default None is equal to the number of samples in your dataset divided by the batch size, or 1 if that cannot be determined.\n", 433 | "- validation_steps: Only relevant if steps_per_epoch is specified. Total number of steps (batches of samples) to validate before stopping." 434 | ] 435 | }, 436 | { 437 | "cell_type": "markdown", 438 | "metadata": {}, 439 | "source": [ 440 | "### History object\n", 441 | "\n", 442 | "Fit function returns a history object. This object stores information about your model while it was training. For example if you would like to plot the training loss and validation loss across the number of epochs you could do this:" 443 | ] 444 | }, 445 | { 446 | "cell_type": "code", 447 | "execution_count": null, 448 | "metadata": { 449 | "collapsed": true 450 | }, 451 | "outputs": [], 452 | "source": [ 453 | "print(\"Keys for history object\", history.history.keys())\n", 454 | "\n", 455 | "train_loss = history.history['loss']\n", 456 | "valid_loss = history.history['val_loss']\n", 457 | "epochs = list(range(1, len(train_loss)+1))\n", 458 | "\n", 459 | "plt.plot(epochs, train_loss, label=\"train loss\")\n", 460 | "plt.plot(epochs, valid_loss, label=\"validation loss\")\n", 461 | "plt.xlabel('Epochs')\n", 462 | "plt.ylabel('Loss')\n", 463 | "plt.legend(loc='upper right')\n", 464 | "plt.show()" 465 | ] 466 | }, 467 | { 468 | "cell_type": "code", 469 | "execution_count": null, 470 | "metadata": { 471 | "collapsed": true 472 | }, 473 | "outputs": [], 474 | "source": [ 475 | "train_accuracy = history.history['acc']\n", 476 | "valid_accuracy = history.history['val_acc']\n", 477 | "\n", 478 | "epochs = list(range(1, len(train_accuracy)+1))\n", 479 | "\n", 480 | "plt.plot(epochs, train_accuracy, label=\"train accuracy\")\n", 481 | "plt.plot(epochs, valid_accuracy, label=\"validation accuracy\")\n", 482 | "\n", 483 | "plt.xlabel('Epochs')\n", 484 | "plt.ylabel('Accuracy')\n", 485 | "plt.legend(loc='lower right')\n", 486 | "plt.show()" 487 | ] 488 | }, 489 | { 490 | "cell_type": "markdown", 491 | "metadata": {}, 492 | "source": [ 493 | "### Making predictions" 494 | ] 495 | }, 496 | { 497 | "cell_type": "code", 498 | "execution_count": null, 499 | "metadata": { 500 | "collapsed": true 501 | }, 502 | "outputs": [], 503 | "source": [ 504 | "predictions = model.predict(x_test)\n", 505 | "\n", 506 | "print(predictions.shape)\n", 507 | "print(np.argmax(predictions, axis=1)[0:10])\n", 508 | "print(np.argmax(y_test, axis=1)[0:10])" 509 | ] 510 | }, 511 | { 512 | "cell_type": "markdown", 513 | "metadata": {}, 514 | "source": [ 515 | "Arguments\n", 516 | "\n", 517 | "- x: the input data, as a Numpy array.\n", 518 | "- batch_size: Integer. If unspecified, it will default to 32.\n", 519 | "- verbose: verbosity mode, 0 or 1.\n", 520 | "- steps: Total number of steps (batches of samples) before declaring the prediction round finished. Ignored with the default value of None.\n", 521 | "\n", 522 | "Returns\n", 523 | "\n", 524 | "A Numpy array of predictions." 525 | ] 526 | }, 527 | { 528 | "cell_type": "markdown", 529 | "metadata": {}, 530 | "source": [ 531 | "### Loading and saving a model" 532 | ] 533 | }, 534 | { 535 | "cell_type": "code", 536 | "execution_count": null, 537 | "metadata": { 538 | "collapsed": true 539 | }, 540 | "outputs": [], 541 | "source": [ 542 | "from keras.models import load_model\n", 543 | "\n", 544 | "model_path = \"my_new_model.h5\"\n", 545 | "model.save(model_path)\n", 546 | "del model # deletes the existing model\n", 547 | "model = load_model(model_path)" 548 | ] 549 | }, 550 | { 551 | "cell_type": "markdown", 552 | "metadata": {}, 553 | "source": [ 554 | "# Mini-Project: Train you own fully connected neural network to classify handwritten digits\n", 555 | "\n", 556 | "- Train a logistic regression to create a benchmark\n", 557 | "- Train a neural network and compare. Start with a few layers, then experiment with more layers and different parameters.\n", 558 | "- Are you overfitting? Underfitting? How can you improve your model? Try other hyperparameters or adding regularization. Make some plots to understand the behaviour of your model.\n", 559 | "- Take a look at samples for which your model is predicting an incorrect label, what do you think is happening?" 560 | ] 561 | }, 562 | { 563 | "cell_type": "code", 564 | "execution_count": null, 565 | "metadata": { 566 | "collapsed": true 567 | }, 568 | "outputs": [], 569 | "source": [ 570 | "from keras.models import Sequential\n", 571 | "from keras.layers import Dense, Flatten, Dropout" 572 | ] 573 | }, 574 | { 575 | "cell_type": "markdown", 576 | "metadata": {}, 577 | "source": [ 578 | "## Logistic regression (Benchmark)" 579 | ] 580 | }, 581 | { 582 | "cell_type": "code", 583 | "execution_count": null, 584 | "metadata": { 585 | "collapsed": true 586 | }, 587 | "outputs": [], 588 | "source": [ 589 | "# Define model architecture\n", 590 | "model = Sequential()\n", 591 | "model.add(Flatten(input_shape=(28, 28, 1))) # Images are a 3D matrix, we have to flatten them to be 1D\n", 592 | "model.add(Dense(10, kernel_initializer='normal', activation='softmax'))\n", 593 | "\n", 594 | "# Compile model\n", 595 | "model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n", 596 | "\n", 597 | "# Train the model\n", 598 | "log_history = model.fit(x_train, y_train, validation_split=0.2, epochs=10, batch_size=100, verbose=2)\n" 599 | ] 600 | }, 601 | { 602 | "cell_type": "markdown", 603 | "metadata": {}, 604 | "source": [ 605 | "## Shallow Neural Network" 606 | ] 607 | }, 608 | { 609 | "cell_type": "code", 610 | "execution_count": null, 611 | "metadata": { 612 | "collapsed": true 613 | }, 614 | "outputs": [], 615 | "source": [ 616 | "# Define model architecture\n", 617 | "model = Sequential()\n", 618 | "model.add(Flatten(input_shape=(28, 28, 1))) # Images are a 3D matrix, we have to flatten them to be 1D\n", 619 | "model.add(Dense(200, kernel_initializer='normal', activation='relu'))\n", 620 | "model.add(Dense(200, kernel_initializer='normal', activation='relu'))\n", 621 | "model.add(Dense(200, kernel_initializer='normal', activation='relu'))\n", 622 | "model.add(Dense(10, kernel_initializer='normal', activation='softmax'))\n", 623 | "\n", 624 | "# Compile model\n", 625 | "model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n", 626 | "\n", 627 | "# Train the model\n", 628 | "nn_history = model.fit(x_train, y_train, validation_split=0.2, epochs=10, batch_size=100, verbose=2)" 629 | ] 630 | }, 631 | { 632 | "cell_type": "markdown", 633 | "metadata": {}, 634 | "source": [ 635 | "### Visualize training" 636 | ] 637 | }, 638 | { 639 | "cell_type": "code", 640 | "execution_count": null, 641 | "metadata": { 642 | "collapsed": true 643 | }, 644 | "outputs": [], 645 | "source": [ 646 | "log_train_loss = log_history.history['loss']\n", 647 | "log_valid_loss = log_history.history['val_loss']\n", 648 | "nn_train_loss = nn_history.history['loss']\n", 649 | "nn_valid_loss = nn_history.history['val_loss']\n", 650 | "epochs = list(range(1, len(nn_train_loss)+1))\n", 651 | "\n", 652 | "plt.plot(epochs, log_train_loss, label=\"log reg train loss\")\n", 653 | "plt.plot(epochs, log_valid_loss, label=\"log reg validation loss\")\n", 654 | "plt.plot(epochs, nn_train_loss, label=\"NN train loss\")\n", 655 | "plt.plot(epochs, nn_valid_loss, label=\"NN validation loss\")\n", 656 | "plt.xlabel('Epochs')\n", 657 | "plt.ylabel('Loss')\n", 658 | "plt.legend(loc='upper right')\n", 659 | "plt.show()\n", 660 | "\n", 661 | "log_train_accuracy = log_history.history['acc']\n", 662 | "log_valid_accuracy = log_history.history['val_acc']\n", 663 | "nn_train_accuracy = nn_history.history['acc']\n", 664 | "nn_valid_accuracy = nn_history.history['val_acc']\n", 665 | "epochs = list(range(1, len(log_train_accuracy)+1))\n", 666 | "\n", 667 | "plt.plot(epochs, log_train_accuracy, label=\"log reg train accuracy\")\n", 668 | "plt.plot(epochs, log_valid_accuracy, label=\"log reg validation accuracy\")\n", 669 | "plt.plot(epochs, nn_train_accuracy, label=\"NN train accuracy\")\n", 670 | "plt.plot(epochs, nn_valid_accuracy, label=\"NN validation accuracy\")\n", 671 | "plt.xlabel('Epochs')\n", 672 | "plt.ylabel('Accuracy')\n", 673 | "plt.legend(loc='lower right')\n", 674 | "plt.show()" 675 | ] 676 | }, 677 | { 678 | "cell_type": "markdown", 679 | "metadata": {}, 680 | "source": [ 681 | "### Visualize errors" 682 | ] 683 | }, 684 | { 685 | "cell_type": "code", 686 | "execution_count": null, 687 | "metadata": { 688 | "collapsed": true 689 | }, 690 | "outputs": [], 691 | "source": [ 692 | "predictions = model.predict(x_test)\n", 693 | "\n", 694 | "predicted_labels = np.argmax(predictions, axis=1) # transform back from one_hot encoding\n", 695 | "true_labels = np.argmax(y_test, axis=1)\n", 696 | "misclassified_samples_positions = np.where(predicted_labels != true_labels)[0]\n", 697 | "\n", 698 | "for image_id in misclassified_samples_positions[0:5]:\n", 699 | " print(image_id)\n", 700 | " print(x_test[image_id].shape)\n", 701 | " plt.imshow(x_test[image_id].reshape(28, 28), cmap=plt.get_cmap('gray'))\n", 702 | " plt.title(\"The true label is %s but was classified as %s\" % (str(true_labels[image_id]), \n", 703 | " str(predicted_labels[image_id])\n", 704 | " ))\n", 705 | " plt.show()" 706 | ] 707 | }, 708 | { 709 | "cell_type": "markdown", 710 | "metadata": {}, 711 | "source": [ 712 | "## Scaling matters" 713 | ] 714 | }, 715 | { 716 | "cell_type": "code", 717 | "execution_count": null, 718 | "metadata": { 719 | "collapsed": true 720 | }, 721 | "outputs": [], 722 | "source": [ 723 | "# Scale the inputs\n", 724 | "x_train /= 255.0 # The image is in grayscale and has values between 0 and 255\n", 725 | "x_test /= 255.0" 726 | ] 727 | }, 728 | { 729 | "cell_type": "markdown", 730 | "metadata": {}, 731 | "source": [ 732 | "# Mini-Project: Train you own fully connected neural network to classify handwritten digits (Now with scaled data)" 733 | ] 734 | }, 735 | { 736 | "cell_type": "markdown", 737 | "metadata": {}, 738 | "source": [ 739 | "- Train a logistic regression to create a benchmark\n", 740 | "- Train a neural network and compare. Start with a few layers, then experiment with more layers and different parameters.\n", 741 | "- Are you overfitting? Underfitting? How can you improve your model? Try other hyperparameters or adding regularization. Make some plots to understand the behaviour of your model." 742 | ] 743 | }, 744 | { 745 | "cell_type": "markdown", 746 | "metadata": {}, 747 | "source": [ 748 | "## Extra credit\n", 749 | "\n", 750 | "- Try shuffling the order of the features before training (move the 5th pixel to a new random position) but do the same shuffling for all samples and retrain you model. How does it perform compared to the original model?" 751 | ] 752 | }, 753 | { 754 | "cell_type": "markdown", 755 | "metadata": {}, 756 | "source": [ 757 | "## Logistic regression" 758 | ] 759 | }, 760 | { 761 | "cell_type": "code", 762 | "execution_count": null, 763 | "metadata": { 764 | "collapsed": true 765 | }, 766 | "outputs": [], 767 | "source": [ 768 | "# Define model architecture\n", 769 | "model = Sequential()\n", 770 | "model.add(Flatten(input_shape=(28, 28, 1))) # Images are a 3D matrix, we have to flatten them to be 1D\n", 771 | "model.add(Dense(10, kernel_initializer='normal', activation='softmax'))\n", 772 | "\n", 773 | "# Compile model\n", 774 | "model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])" 775 | ] 776 | }, 777 | { 778 | "cell_type": "code", 779 | "execution_count": null, 780 | "metadata": { 781 | "collapsed": true, 782 | "scrolled": true 783 | }, 784 | "outputs": [], 785 | "source": [ 786 | "# Train the model\n", 787 | "log_history = model.fit(x_train, y_train, validation_split=0.2, epochs=10, batch_size=100, verbose=2)" 788 | ] 789 | }, 790 | { 791 | "cell_type": "markdown", 792 | "metadata": {}, 793 | "source": [ 794 | "## Shallow neural network" 795 | ] 796 | }, 797 | { 798 | "cell_type": "code", 799 | "execution_count": null, 800 | "metadata": { 801 | "collapsed": true 802 | }, 803 | "outputs": [], 804 | "source": [ 805 | "# Define model architecture\n", 806 | "model = Sequential()\n", 807 | "model.add(Flatten(input_shape=(28, 28, 1))) # Images are a 3D matrix, we have to flatten them to be 1D\n", 808 | "model.add(Dense(200, kernel_initializer='normal', activation='relu'))\n", 809 | "model.add(Dense(200, kernel_initializer='normal', activation='relu'))\n", 810 | "model.add(Dense(200, kernel_initializer='normal', activation='relu'))\n", 811 | "model.add(Dense(10, kernel_initializer='normal', activation='softmax'))\n", 812 | "\n", 813 | "# Compile model\n", 814 | "model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])" 815 | ] 816 | }, 817 | { 818 | "cell_type": "code", 819 | "execution_count": null, 820 | "metadata": { 821 | "collapsed": true 822 | }, 823 | "outputs": [], 824 | "source": [ 825 | "# Train the model\n", 826 | "nn_history = model.fit(x_train, y_train, validation_split=0.2, epochs=10, batch_size=100, verbose=2)" 827 | ] 828 | }, 829 | { 830 | "cell_type": "markdown", 831 | "metadata": {}, 832 | "source": [ 833 | "### Visualize training" 834 | ] 835 | }, 836 | { 837 | "cell_type": "code", 838 | "execution_count": null, 839 | "metadata": { 840 | "collapsed": true 841 | }, 842 | "outputs": [], 843 | "source": [ 844 | "log_train_loss = log_history.history['loss']\n", 845 | "log_valid_loss = log_history.history['val_loss']\n", 846 | "nn_train_loss = nn_history.history['loss']\n", 847 | "nn_valid_loss = nn_history.history['val_loss']\n", 848 | "epochs = list(range(1, len(log_train_loss)+1))\n", 849 | "\n", 850 | "plt.plot(epochs, log_train_loss, label=\"log reg train loss\")\n", 851 | "plt.plot(epochs, log_valid_loss, label=\"log reg validation loss\")\n", 852 | "plt.plot(epochs, nn_train_loss, label=\"NN train loss\")\n", 853 | "plt.plot(epochs, nn_valid_loss, label=\"NN validation loss\")\n", 854 | "plt.xlabel('Epochs')\n", 855 | "plt.ylabel('Loss')\n", 856 | "plt.legend(loc='upper right')\n", 857 | "plt.show()\n", 858 | "\n", 859 | "log_train_accuracy = log_history.history['acc']\n", 860 | "log_valid_accuracy = log_history.history['val_acc']\n", 861 | "nn_train_accuracy = nn_history.history['acc']\n", 862 | "nn_valid_accuracy = nn_history.history['val_acc']\n", 863 | "epochs = list(range(1, len(log_train_accuracy)+1))\n", 864 | "\n", 865 | "plt.plot(epochs, log_train_accuracy, label=\"log reg train accuracy\")\n", 866 | "plt.plot(epochs, log_valid_accuracy, label=\"log reg validation accuracy\")\n", 867 | "plt.plot(epochs, nn_train_accuracy, label=\"NN train accuracy\")\n", 868 | "plt.plot(epochs, nn_valid_accuracy, label=\"NN validation accuracy\")\n", 869 | "plt.xlabel('Epochs')\n", 870 | "plt.ylabel('Accuracy')\n", 871 | "plt.legend(loc='lower right')\n", 872 | "plt.show()" 873 | ] 874 | }, 875 | { 876 | "cell_type": "markdown", 877 | "metadata": {}, 878 | "source": [ 879 | "### Visualize errors" 880 | ] 881 | }, 882 | { 883 | "cell_type": "code", 884 | "execution_count": null, 885 | "metadata": { 886 | "collapsed": true 887 | }, 888 | "outputs": [], 889 | "source": [ 890 | "predictions = model.predict(x_test)\n", 891 | "\n", 892 | "predicted_labels = np.argmax(predictions, axis=1) # transform back from one_hot encoding\n", 893 | "true_labels = np.argmax(y_test, axis=1)\n", 894 | "misclassified_samples_positions = np.where(predicted_labels != true_labels)[0]\n", 895 | "\n", 896 | "for image_id in misclassified_samples_positions[0:5]:\n", 897 | " print(image_id)\n", 898 | " print(x_test[image_id].shape)\n", 899 | " plt.imshow(x_test[image_id].reshape(28, 28), cmap=plt.get_cmap('gray'))\n", 900 | " plt.title(\"The true label is %s but was classified as %s\" % (str(true_labels[image_id]), \n", 901 | " str(predicted_labels[image_id])\n", 902 | " ))\n", 903 | " plt.show()" 904 | ] 905 | }, 906 | { 907 | "cell_type": "markdown", 908 | "metadata": {}, 909 | "source": [ 910 | "### Reload data before procedding" 911 | ] 912 | }, 913 | { 914 | "cell_type": "code", 915 | "execution_count": 6, 916 | "metadata": { 917 | "collapsed": true 918 | }, 919 | "outputs": [], 920 | "source": [ 921 | "# load dataset\n", 922 | "(x_train, y_train), (x_test, y_test) = mnist.load_data()" 923 | ] 924 | }, 925 | { 926 | "cell_type": "code", 927 | "execution_count": 7, 928 | "metadata": { 929 | "collapsed": true 930 | }, 931 | "outputs": [], 932 | "source": [ 933 | "# This makes sure the image has the correct order in the axis for Tensorflow, it would be different for Theano backend\n", 934 | "x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)\n", 935 | "x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)\n", 936 | "\n", 937 | "# Convert values to floats, originally they are integers\n", 938 | "x_train = x_train.astype('float32')\n", 939 | "x_test = x_test.astype('float32')\n", 940 | "\n", 941 | "# Convert values of labels from 0 to 9 to categorical (one_hot encoding)\n", 942 | "y_train = keras.utils.to_categorical(y_train, 10)\n", 943 | "y_test = keras.utils.to_categorical(y_test, 10)" 944 | ] 945 | }, 946 | { 947 | "cell_type": "markdown", 948 | "metadata": {}, 949 | "source": [ 950 | "## Data augmentation and generators\n", 951 | "\n", 952 | "Augmenting the data allows us to use more data for training by taking artificial modifications from the samples we already have. One example of this is rotating the image. This will allow our model to learn some rotational invariance to the data and prevent overfitting.\n", 953 | "\n", 954 | "To train a model when a dataset is too large to load in RAM memory we can use generators. Generators allow us to only pass a small portion of the data at a time so it doesn't use all the memory from the computer. In addition once it loops through all the data we can set the generator so it starts again from the beginning. This allows us to loop infinitely through the data.\n", 955 | "\n", 956 | "Keras gives us a nice tool for augmenting data which uses a generator. It uses a generator because the modifications are done on the fly and it would take too much memory to save the augmented dataset. Keras also provides us with a method to fit the model when we use a generator, fit_generator(). This method requires us to specify how many batches are equivalent to an epoch so the model knows when to do updates, otherwise since the generator loops through the data, it would go on training infinitely." 957 | ] 958 | }, 959 | { 960 | "cell_type": "code", 961 | "execution_count": 8, 962 | "metadata": {}, 963 | "outputs": [], 964 | "source": [ 965 | "from keras.preprocessing.image import ImageDataGenerator\n", 966 | "\n", 967 | "data_generator = ImageDataGenerator(rescale=1./255,\n", 968 | " #featurewise_center=True,\n", 969 | " #featurewise_std_normalization=True,\n", 970 | " rotation_range=10,\n", 971 | " width_shift_range=0.1,\n", 972 | " height_shift_range=0.1,\n", 973 | " #horizontal_flip=True,\n", 974 | " zoom_range=0.1\n", 975 | " )\n", 976 | "\n", 977 | "# we git the model to the data. This needed for calculating mean and std.\n", 978 | "data_generator.fit(x_train)" 979 | ] 980 | }, 981 | { 982 | "cell_type": "markdown", 983 | "metadata": {}, 984 | "source": [ 985 | "### What does this look like?" 986 | ] 987 | }, 988 | { 989 | "cell_type": "code", 990 | "execution_count": null, 991 | "metadata": { 992 | "collapsed": true 993 | }, 994 | "outputs": [], 995 | "source": [ 996 | "augmented_data_generator = data_generator.flow(x_train[0:40000], y_train[0:40000], batch_size=100)\n", 997 | "augmented_batch, labels = augmented_data_generator.next()\n", 998 | "\n", 999 | "print(augmented_batch.shape)\n", 1000 | "print(augmented_batch[0].shape)\n", 1001 | "print(labels[0])\n", 1002 | "\n", 1003 | "# plot images as gray scale\n", 1004 | "for image_id in range(0, 5):\n", 1005 | " plt.imshow(augmented_batch[image_id].reshape(28, 28), cmap=plt.get_cmap('gray'))\n", 1006 | " plt.title(\"The true label is %s\" % str(np.argmax(labels[image_id])))\n", 1007 | " plt.show()" 1008 | ] 1009 | }, 1010 | { 1011 | "cell_type": "markdown", 1012 | "metadata": {}, 1013 | "source": [ 1014 | "### Training a model using a generator" 1015 | ] 1016 | }, 1017 | { 1018 | "cell_type": "code", 1019 | "execution_count": null, 1020 | "metadata": { 1021 | "collapsed": true 1022 | }, 1023 | "outputs": [], 1024 | "source": [ 1025 | "history = model.fit_generator(data_generator.flow(x_train[0:40000], y_train[0:40000], batch_size=100), \n", 1026 | " steps_per_epoch=40000/100,\n", 1027 | " validation_data=(x_train[40000:], y_train[40000:]), \n", 1028 | " epochs=10, verbose=1)" 1029 | ] 1030 | }, 1031 | { 1032 | "cell_type": "markdown", 1033 | "metadata": {}, 1034 | "source": [ 1035 | "Arguments\n", 1036 | "\n", 1037 | "- generator: A generator. The output of the generator must be either\n", 1038 | "a tuple (inputs, targets)\n", 1039 | "a tuple (inputs, targets, sample_weights). All arrays should contain the same number of samples. The generator is expected to loop over its data indefinitely. An epoch finishes when steps_per_epoch batches have been seen by the model.\n", 1040 | "- steps_per_epoch: Total number of steps (batches of samples) to yield from generator before declaring one epoch finished and starting the next epoch. It should typically be equal to the number of samples of your dataset divided by the batch size.\n", 1041 | "- epochs: Integer, total number of iterations on the data. \n", 1042 | "- verbose: Verbosity mode, 0, 1, or 2.\n", 1043 | "- callbacks: List of callbacks to be called during training.\n", 1044 | "- validation_data: This can be either\n", 1045 | "A generator for the validation data\n", 1046 | "A tuple (inputs, targets)\n", 1047 | "A tuple (inputs, targets, sample_weights).\n", 1048 | "- validation_steps: Only relevant if validation_data is a generator. Number of steps to yield from validation generator at the end of every epoch. It should typically be equal to the number of samples of your validation dataset divided by the batch size. Optional for Sequence: if unspecified, will use the len(validation_data) as a number of steps.\n", 1049 | "- class_weight: Dictionary mapping class indices to a weight for the class.\n", 1050 | "- max_queue_size: Maximum size for the generator queue\n", 1051 | "- workers: Maximum number of processes to spin up\n", 1052 | "- use_multiprocessing: if True, use process based threading.\n", 1053 | "- initial_epoch: Epoch at which to start training (useful for resuming a previous training run).\n", 1054 | "\n", 1055 | "Returns\n", 1056 | "\n", 1057 | "A History object." 1058 | ] 1059 | }, 1060 | { 1061 | "cell_type": "markdown", 1062 | "metadata": {}, 1063 | "source": [ 1064 | "# Mini-Project: Train you own fully connected neural network to classify handwritten digits (Now with augmented data)\n", 1065 | "\n", 1066 | "- Train a logistic regression to create a benchmark\n", 1067 | "- Train a neural network and compare. Start with a few layers, then experiment with more layers and different parameters.\n", 1068 | "- Are you overfitting? Underfitting? How can you improve your model? Try other hyperparameters or adding regularization. Make some plots to understand the behaviour of your model." 1069 | ] 1070 | }, 1071 | { 1072 | "cell_type": "markdown", 1073 | "metadata": {}, 1074 | "source": [ 1075 | "## Logistic regression" 1076 | ] 1077 | }, 1078 | { 1079 | "cell_type": "code", 1080 | "execution_count": null, 1081 | "metadata": { 1082 | "collapsed": true 1083 | }, 1084 | "outputs": [], 1085 | "source": [ 1086 | "# Define model architecture\n", 1087 | "model = Sequential()\n", 1088 | "model.add(Flatten(input_shape=(28, 28, 1))) # Images are a 3D matrix, we have to flatten them to be 1D\n", 1089 | "model.add(Dense(10, kernel_initializer='normal', activation='softmax'))\n", 1090 | "\n", 1091 | "# Compile model\n", 1092 | "model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])" 1093 | ] 1094 | }, 1095 | { 1096 | "cell_type": "code", 1097 | "execution_count": null, 1098 | "metadata": { 1099 | "collapsed": true, 1100 | "scrolled": true 1101 | }, 1102 | "outputs": [], 1103 | "source": [ 1104 | "# Train the model\n", 1105 | "log_history = model.fit_generator(data_generator.flow(x_train[0:40000], y_train[0:40000], batch_size=100), \n", 1106 | " steps_per_epoch=40000/100,\n", 1107 | " validation_data=(x_train[40000:], y_train[40000:]), \n", 1108 | " epochs=10, verbose=2)" 1109 | ] 1110 | }, 1111 | { 1112 | "cell_type": "markdown", 1113 | "metadata": {}, 1114 | "source": [ 1115 | "## Shallow neural network" 1116 | ] 1117 | }, 1118 | { 1119 | "cell_type": "code", 1120 | "execution_count": null, 1121 | "metadata": { 1122 | "collapsed": true 1123 | }, 1124 | "outputs": [], 1125 | "source": [ 1126 | "# Define model architecture\n", 1127 | "model = Sequential()\n", 1128 | "model.add(Flatten(input_shape=(28, 28, 1))) # Images are a 3D matrix, we have to flatten them to be 1D\n", 1129 | "model.add(Dense(200, kernel_initializer='normal', activation='relu'))\n", 1130 | "model.add(Dense(200, kernel_initializer='normal', activation='relu'))\n", 1131 | "model.add(Dense(200, kernel_initializer='normal', activation='relu'))\n", 1132 | "model.add(Dense(10, kernel_initializer='normal', activation='softmax'))\n", 1133 | "\n", 1134 | "# Compile model\n", 1135 | "model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])" 1136 | ] 1137 | }, 1138 | { 1139 | "cell_type": "code", 1140 | "execution_count": null, 1141 | "metadata": { 1142 | "collapsed": true 1143 | }, 1144 | "outputs": [], 1145 | "source": [ 1146 | "# Train the model\n", 1147 | "nn_history = model.fit_generator(data_generator.flow(x_train[0:40000], y_train[0:40000], batch_size=100), \n", 1148 | " steps_per_epoch=40000/100,\n", 1149 | " validation_data=(x_train[40000:], y_train[40000:]), \n", 1150 | " epochs=10, verbose=2)" 1151 | ] 1152 | }, 1153 | { 1154 | "cell_type": "markdown", 1155 | "metadata": {}, 1156 | "source": [ 1157 | "### Visualize training" 1158 | ] 1159 | }, 1160 | { 1161 | "cell_type": "code", 1162 | "execution_count": null, 1163 | "metadata": { 1164 | "collapsed": true 1165 | }, 1166 | "outputs": [], 1167 | "source": [ 1168 | "log_train_loss = log_history.history['loss']\n", 1169 | "log_valid_loss = log_history.history['val_loss']\n", 1170 | "nn_train_loss = nn_history.history['loss']\n", 1171 | "nn_valid_loss = nn_history.history['val_loss']\n", 1172 | "epochs = list(range(1, len(log_train_loss)+1))\n", 1173 | "\n", 1174 | "plt.plot(epochs, log_train_loss, label=\"log reg train loss\")\n", 1175 | "plt.plot(epochs, log_valid_loss, label=\"log reg validation loss\")\n", 1176 | "plt.plot(epochs, nn_train_loss, label=\"NN train loss\")\n", 1177 | "plt.plot(epochs, nn_valid_loss, label=\"NN validation loss\")\n", 1178 | "plt.xlabel('Epochs')\n", 1179 | "plt.ylabel('Loss')\n", 1180 | "plt.legend(loc='upper right')\n", 1181 | "plt.show()\n", 1182 | "\n", 1183 | "log_train_accuracy = log_history.history['acc']\n", 1184 | "log_valid_accuracy = log_history.history['val_acc']\n", 1185 | "nn_train_accuracy = nn_history.history['acc']\n", 1186 | "nn_valid_accuracy = nn_history.history['val_acc']\n", 1187 | "epochs = list(range(1, len(log_train_accuracy)+1))\n", 1188 | "\n", 1189 | "plt.plot(epochs, log_train_accuracy, label=\"log reg train accuracy\")\n", 1190 | "plt.plot(epochs, log_valid_accuracy, label=\"log reg validation accuracy\")\n", 1191 | "plt.plot(epochs, nn_train_accuracy, label=\"NN train accuracy\")\n", 1192 | "plt.plot(epochs, nn_valid_accuracy, label=\"NN validation accuracy\")\n", 1193 | "plt.xlabel('Epochs')\n", 1194 | "plt.ylabel('Accuracy')\n", 1195 | "plt.legend(loc='lower right')\n", 1196 | "plt.show()" 1197 | ] 1198 | }, 1199 | { 1200 | "cell_type": "markdown", 1201 | "metadata": {}, 1202 | "source": [ 1203 | "# Convolutional Neural Networks" 1204 | ] 1205 | }, 1206 | { 1207 | "cell_type": "markdown", 1208 | "metadata": { 1209 | "collapsed": true 1210 | }, 1211 | "source": [ 1212 | "Convolutional neural networks are similar to the previous networks we saw with a main difference, they start with the assumption that the input will be an image and optimize the architecture for that assumption. Now they are used in many other contexts besides images, but this is what they were created for.\n", 1213 | "\n", 1214 | "Convolutional layer define a kernel (weight matrix) which is then multiplied element by element with a section of the input of the same size. Sum all the resulting values. Move the kernel a number of pixels equal to a defined stride size and repeat until you go throught the whole image. Afterwards apply an activation function to each one of the values. Now repeat all of this for each filter in your layer.\n", 1215 | "\n", 1216 | "\"convolutional\n", 1217 | "Animation from Karpathy (http://cs231n.github.io/convolutional-networks/)\n", 1218 | "\n", 1219 | "A key aspect is the fact that the weights from the kernel are the same when applied in all sections of the input for the same filter. This means the convolutional layer has way less free parameters than a fully connected layer. This also gives the model location invariance since the filters will activate in the same way if the object is in the top left of the image or at the bottom right.\n", 1220 | "\n", 1221 | "Images are 3D objects which have height, length and depth. In the case of grayscale images the depth is 1, however in color images you have 3 channels, red, green and blue. So convolutional layers take as input a 3D tensor and outputs a 3D tensor.\n", 1222 | "\n", 1223 | "\"convolutional\n", 1224 | "Image from Karpathy (http://cs231n.github.io/convolutional-networks/)\n", 1225 | "\n", 1226 | "Notice the convolutional operation will reduce the size of the input. Depending on your architecture you might want to keep the size constant, you can do this by includding padding in the borders of the input. " 1227 | ] 1228 | }, 1229 | { 1230 | "cell_type": "markdown", 1231 | "metadata": {}, 1232 | "source": [ 1233 | "## Pooling layers\n", 1234 | "\n", 1235 | "The idea of the pooling layer is to reduce the size of the input and also help regularize the model. The most famous type of pooling is max pooling. In this case you select an area which is the size of the filter and from it you will only pass through the maximum values among the inputs. Then move the filter a specific stride size and repeat.\n", 1236 | "\n", 1237 | "\"convolutional\n", 1238 | "Image from Karpathy (http://cs231n.github.io/convolutional-networks/)" 1239 | ] 1240 | }, 1241 | { 1242 | "cell_type": "markdown", 1243 | "metadata": {}, 1244 | "source": [ 1245 | "## Best Practices" 1246 | ] 1247 | }, 1248 | { 1249 | "cell_type": "markdown", 1250 | "metadata": {}, 1251 | "source": [ 1252 | "In practice it is best to use blocks of stacked convolutional layers with rectified linear units followed by a max pooling operation. Then at the end have a small number of fully connected layers and the output layer.\n", 1253 | "\n", 1254 | "Each convolutional layer usually has a small filter size (2, 2), (3, 3) and use max pooling with stride (2, 2)" 1255 | ] 1256 | }, 1257 | { 1258 | "cell_type": "markdown", 1259 | "metadata": {}, 1260 | "source": [ 1261 | "# Convolutional Neural Networks in Keras" 1262 | ] 1263 | }, 1264 | { 1265 | "cell_type": "code", 1266 | "execution_count": 9, 1267 | "metadata": { 1268 | "collapsed": true 1269 | }, 1270 | "outputs": [], 1271 | "source": [ 1272 | "from keras.layers import Conv2D, MaxPooling2D" 1273 | ] 1274 | }, 1275 | { 1276 | "cell_type": "markdown", 1277 | "metadata": {}, 1278 | "source": [ 1279 | "### Convolutional layer" 1280 | ] 1281 | }, 1282 | { 1283 | "cell_type": "code", 1284 | "execution_count": null, 1285 | "metadata": { 1286 | "collapsed": true 1287 | }, 1288 | "outputs": [], 1289 | "source": [ 1290 | "Conv2D(filters, kernel_size, strides=(1, 1), padding='valid', \n", 1291 | " activation=None, use_bias=True, \n", 1292 | " kernel_initializer='glorot_uniform', bias_initializer='zeros', \n", 1293 | " kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, \n", 1294 | " kernel_constraint=None, bias_constraint=None)" 1295 | ] 1296 | }, 1297 | { 1298 | "cell_type": "code", 1299 | "execution_count": null, 1300 | "metadata": { 1301 | "collapsed": true 1302 | }, 1303 | "outputs": [], 1304 | "source": [ 1305 | "Conv2D(32, (3, 3), padding='same') # Keeps output of the same size as input\n", 1306 | "Conv2D(32, (3, 3)) # Reduces the size of the output" 1307 | ] 1308 | }, 1309 | { 1310 | "cell_type": "markdown", 1311 | "metadata": {}, 1312 | "source": [ 1313 | "Arguments\n", 1314 | "\n", 1315 | "- filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution).\n", 1316 | "- kernel_size: An integer or tuple/list of 2 integers, specifying the width and height of the 2D convolution window. Can be a single integer to specify the same value for all spatial dimensions.\n", 1317 | "- strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the width and height. Can be a single integer to specify the same value for all spatial dimensions. Specifying any stride value != 1 is incompatible with specifying any dilation_rate value != 1.\n", 1318 | "- padding: one of \"valid\" or \"same\" (case-insensitive).\n", 1319 | "- activation: Activation function to use (see activations). If you don't specify anything, no activation is applied (ie. \"linear\" activation: a(x) = x).\n", 1320 | "- use_bias: Boolean, whether the layer uses a bias vector.\n", 1321 | "- kernel_initializer: Initializer for the kernel weights matrix (see initializers).\n", 1322 | "- bias_initializer: Initializer for the bias vector (see initializers).\n", 1323 | "- kernel_regularizer: Regularizer function applied to the kernel weights matrix (see regularizer).\n", 1324 | "- bias_regularizer: Regularizer function applied to the bias vector (see regularizer).\n", 1325 | "- activity_regularizer: Regularizer function applied to the output of the layer (its \"activation\"). (see regularizer).\n", 1326 | "- kernel_constraint: Constraint function applied to the kernel matrix (see constraints).\n", 1327 | "- bias_constraint: Constraint function applied to the bias vector (see constraints).\n", 1328 | "\n", 1329 | "Input shape\n", 1330 | "\n", 1331 | "4D tensor with shape: (samples, channels, rows, cols) if data_format='channels_first' or 4D tensor with shape: (samples, rows, cols, channels) if data_format='channels_last'.\n", 1332 | "\n", 1333 | "Output shape\n", 1334 | "\n", 1335 | "4D tensor with shape: (samples, filters, new_rows, new_cols) if data_format='channels_first' or 4D tensor with shape: (samples, new_rows, new_cols, filters) if data_format='channels_last'. rows and cols values might have changed due to padding." 1336 | ] 1337 | }, 1338 | { 1339 | "cell_type": "markdown", 1340 | "metadata": { 1341 | "collapsed": true 1342 | }, 1343 | "source": [ 1344 | "### Pooling layer" 1345 | ] 1346 | }, 1347 | { 1348 | "cell_type": "code", 1349 | "execution_count": null, 1350 | "metadata": { 1351 | "collapsed": true 1352 | }, 1353 | "outputs": [], 1354 | "source": [ 1355 | "MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid') # This will reduce the size of the output" 1356 | ] 1357 | }, 1358 | { 1359 | "cell_type": "markdown", 1360 | "metadata": {}, 1361 | "source": [ 1362 | "Arguments\n", 1363 | "\n", 1364 | "- pool_size: integer or tuple of 2 integers, factors by which to downscale (vertical, horizontal). (2, 2) will halve the input in both spatial dimension. If only one integer is specified, the same window length will be used for both dimensions.\n", 1365 | "- strides: Integer, tuple of 2 integers, or None. Strides values. If None, it will default to pool_size.\n", 1366 | "- padding: One of \"valid\" or \"same\" (case-insensitive).\n", 1367 | "\n", 1368 | "Input shape\n", 1369 | "\n", 1370 | "4D tensor with shape: (batch_size, rows, cols, channels)\n", 1371 | "\n", 1372 | "Output shape\n", 1373 | "\n", 1374 | "4D tensor with shape: (batch_size, pooled_rows, pooled_cols, channels)" 1375 | ] 1376 | }, 1377 | { 1378 | "cell_type": "markdown", 1379 | "metadata": {}, 1380 | "source": [ 1381 | "# Mini-Project: Train you own convolutional neural network to classify handwritten digits (With or without augmented data)\n", 1382 | "\n", 1383 | "- Train a logistic regression to create a benchmark\n", 1384 | "- Train a convolutional neural network and compare it with the logistic regression and fully connected neural network. Start with a few layers, then experiment with more layers and different parameters.\n", 1385 | "- Are you overfitting? Underfitting? How can you improve your model? Try other hyperparameters or adding regularization. Make some plots to understand the behaviour of your model." 1386 | ] 1387 | }, 1388 | { 1389 | "cell_type": "markdown", 1390 | "metadata": {}, 1391 | "source": [ 1392 | "## Convolutional neural network" 1393 | ] 1394 | }, 1395 | { 1396 | "cell_type": "code", 1397 | "execution_count": 10, 1398 | "metadata": {}, 1399 | "outputs": [], 1400 | "source": [ 1401 | "from keras.layers import Conv2D, MaxPooling2D, Activation\n", 1402 | "# Define model architecture\n", 1403 | "model = Sequential()\n", 1404 | "model.add(Conv2D(32, (3, 3), padding='same', input_shape=(28, 28, 1), activation='relu'))\n", 1405 | "model.add(Conv2D(32, (3, 3), activation='relu'))\n", 1406 | "model.add(MaxPooling2D(pool_size=(2, 2)))\n", 1407 | "model.add(Dropout(0.25))\n", 1408 | "\n", 1409 | "model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))\n", 1410 | "model.add(Conv2D(64, (3, 3), activation='relu'))\n", 1411 | "model.add(MaxPooling2D(pool_size=(2, 2)))\n", 1412 | "model.add(Dropout(0.25))\n", 1413 | "\n", 1414 | "model.add(Flatten())\n", 1415 | "model.add(Dense(512, activation='relu'))\n", 1416 | "model.add(Dropout(0.5))\n", 1417 | "model.add(Dense(10, activation='softmax'))\n", 1418 | "\n", 1419 | "# Compile model\n", 1420 | "model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])" 1421 | ] 1422 | }, 1423 | { 1424 | "cell_type": "code", 1425 | "execution_count": 11, 1426 | "metadata": {}, 1427 | "outputs": [ 1428 | { 1429 | "name": "stdout", 1430 | "output_type": "stream", 1431 | "text": [ 1432 | "Epoch 1/10\n", 1433 | " - 158s - loss: 0.4550 - acc: 0.8518 - val_loss: 0.3061 - val_acc: 0.9806\n", 1434 | "Epoch 2/10\n", 1435 | " - 156s - loss: 0.1381 - acc: 0.9584 - val_loss: 0.1904 - val_acc: 0.9881\n", 1436 | "Epoch 3/10\n", 1437 | " - 156s - loss: 0.0993 - acc: 0.9694 - val_loss: 0.1709 - val_acc: 0.9892\n", 1438 | "Epoch 4/10\n", 1439 | " - 156s - loss: 0.0813 - acc: 0.9752 - val_loss: 0.1744 - val_acc: 0.9891\n", 1440 | "Epoch 5/10\n", 1441 | " - 11744s - loss: 0.0710 - acc: 0.9783 - val_loss: 0.1543 - val_acc: 0.9903\n", 1442 | "Epoch 6/10\n", 1443 | " - 222s - loss: 0.0662 - acc: 0.9795 - val_loss: 0.1180 - val_acc: 0.9925\n", 1444 | "Epoch 7/10\n", 1445 | " - 173s - loss: 0.0574 - acc: 0.9821 - val_loss: 0.1387 - val_acc: 0.9914\n", 1446 | "Epoch 8/10\n", 1447 | " - 173s - loss: 0.0535 - acc: 0.9840 - val_loss: 0.1165 - val_acc: 0.9927\n", 1448 | "Epoch 9/10\n", 1449 | " - 170s - loss: 0.0525 - acc: 0.9837 - val_loss: 0.1327 - val_acc: 0.9917\n", 1450 | "Epoch 10/10\n", 1451 | " - 170s - loss: 0.0478 - acc: 0.9856 - val_loss: 0.1061 - val_acc: 0.9934\n" 1452 | ] 1453 | } 1454 | ], 1455 | "source": [ 1456 | "# Train the model\n", 1457 | "cnn_history = model.fit_generator(data_generator.flow(x_train[0:40000], y_train[0:40000], batch_size=100), \n", 1458 | " steps_per_epoch=40000/100,\n", 1459 | " validation_data=(x_train[40000:], y_train[40000:]), \n", 1460 | " epochs=10, verbose=2)" 1461 | ] 1462 | }, 1463 | { 1464 | "cell_type": "markdown", 1465 | "metadata": {}, 1466 | "source": [ 1467 | "### Visualize training" 1468 | ] 1469 | }, 1470 | { 1471 | "cell_type": "code", 1472 | "execution_count": 12, 1473 | "metadata": {}, 1474 | "outputs": [ 1475 | { 1476 | "ename": "NameError", 1477 | "evalue": "name 'log_history' is not defined", 1478 | "output_type": "error", 1479 | "traceback": [ 1480 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", 1481 | "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", 1482 | "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mlog_train_loss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlog_history\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhistory\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'loss'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0mlog_valid_loss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlog_history\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhistory\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'val_loss'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mnn_train_loss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnn_history\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhistory\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'loss'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mnn_valid_loss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnn_history\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhistory\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'val_loss'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0mcnn_train_loss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcnn_history\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhistory\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'loss'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 1483 | "\u001b[0;31mNameError\u001b[0m: name 'log_history' is not defined" 1484 | ] 1485 | } 1486 | ], 1487 | "source": [ 1488 | "log_train_loss = log_history.history['loss']\n", 1489 | "log_valid_loss = log_history.history['val_loss']\n", 1490 | "nn_train_loss = nn_history.history['loss']\n", 1491 | "nn_valid_loss = nn_history.history['val_loss']\n", 1492 | "cnn_train_loss = cnn_history.history['loss']\n", 1493 | "cnn_valid_loss = cnn_history.history['val_loss']\n", 1494 | "epochs = list(range(1, len(log_train_loss)+1))\n", 1495 | "\n", 1496 | "plt.plot(epochs, log_train_loss, label=\"log reg train loss\")\n", 1497 | "plt.plot(epochs, log_valid_loss, label=\"log reg validation loss\")\n", 1498 | "plt.plot(epochs, nn_train_loss, label=\"NN train loss\")\n", 1499 | "plt.plot(epochs, nn_valid_loss, label=\"NN validation loss\")\n", 1500 | "plt.plot(epochs, cnn_train_loss, label=\"CNN train loss\")\n", 1501 | "plt.plot(epochs, cnn_valid_loss, label=\"CNN validation loss\")\n", 1502 | "plt.xlabel('Epochs')\n", 1503 | "plt.ylabel('Loss')\n", 1504 | "plt.legend(loc='upper right')\n", 1505 | "plt.show()\n", 1506 | "\n", 1507 | "log_train_accuracy = log_history.history['acc']\n", 1508 | "log_valid_accuracy = log_history.history['val_acc']\n", 1509 | "nn_train_accuracy = nn_history.history['acc']\n", 1510 | "nn_valid_accuracy = nn_history.history['val_acc']\n", 1511 | "cnn_train_accuracy = cnn_history.history['acc']\n", 1512 | "cnn_valid_accuracy = cnn_history.history['val_acc']\n", 1513 | "epochs = list(range(1, len(log_train_accuracy)+1))\n", 1514 | "\n", 1515 | "plt.plot(epochs, log_train_accuracy, label=\"log reg train accuracy\")\n", 1516 | "plt.plot(epochs, log_valid_accuracy, label=\"log reg validation accuracy\")\n", 1517 | "plt.plot(epochs, nn_train_accuracy, label=\"NN train accuracy\")\n", 1518 | "plt.plot(epochs, nn_valid_accuracy, label=\"NN validation accuracy\")\n", 1519 | "plt.plot(epochs, cnn_train_accuracy, label=\"CNN train accuracy\")\n", 1520 | "plt.plot(epochs, cnn_valid_accuracy, label=\"CNN validation accuracy\")\n", 1521 | "plt.xlabel('Epochs')\n", 1522 | "plt.ylabel('Accuracy')\n", 1523 | "plt.legend(loc='lower right')\n", 1524 | "plt.show()" 1525 | ] 1526 | }, 1527 | { 1528 | "cell_type": "code", 1529 | "execution_count": null, 1530 | "metadata": { 1531 | "collapsed": true 1532 | }, 1533 | "outputs": [], 1534 | "source": [] 1535 | } 1536 | ], 1537 | "metadata": { 1538 | "anaconda-cloud": {}, 1539 | "kernelspec": { 1540 | "display_name": "Python [py3Keras2_env]", 1541 | "language": "python", 1542 | "name": "Python [py3Keras2_env]" 1543 | }, 1544 | "language_info": { 1545 | "codemirror_mode": { 1546 | "name": "ipython", 1547 | "version": 3 1548 | }, 1549 | "file_extension": ".py", 1550 | "mimetype": "text/x-python", 1551 | "name": "python", 1552 | "nbconvert_exporter": "python", 1553 | "pygments_lexer": "ipython3", 1554 | "version": "3.6.0" 1555 | } 1556 | }, 1557 | "nbformat": 4, 1558 | "nbformat_minor": 1 1559 | } 1560 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # (Deep) Learning With Python 2 | 3 | ## Morning 4 | 5 | **Neural network basics** 6 | 7 | *Mini-Project: Implement a feedforward neural network* 8 | 9 | *Mini-Project: Implement gradient descent in Tensorflow to train a logistic regression model* 10 | 11 | **Break** 12 | 13 | **Theory and practice** 14 | 15 | *Mini-Project: Build and train a neural network in Tensorflow to predict Higgs boson* 16 | 17 | ## Afternoon 18 | 19 | **Image processing** 20 | 21 | What is MNIST? What is CIFAR? 22 | 23 | Loading images and converting them to numpy 24 | 25 | What is Keras? Creating a basic model, Callbacks, saving/loading a model, changing optimizer, loss (30 minutes) 26 | 27 | *Mini-Project: Train a feed forward NN on MNIST/CIFAR10 with Keras* 28 | 29 | Preprocessing images: Scaling 30 | 31 | *Mini-Project: Train a feed forward NN on MNIST/CIFAR10 with Keras with scaling* 32 | 33 | Preprocessing images: Data augmentation and generators with Keras 34 | 35 | *Mini-Project: Train a feed forward NN on MNIST/CIFAR10 using data augmentation with Keras* 36 | 37 | **Break** 38 | 39 | **Convolutional neural networks** 40 | 41 | Convolutional NN, Pooling 42 | 43 | How to build a CNN in Keras 44 | 45 | *Mini-Project: Train CNN on MNIST/CIFAR10* 46 | -------------------------------------------------------------------------------- /convolution.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "import numpy as np" 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": 2, 17 | "metadata": { 18 | "collapsed": true 19 | }, 20 | "outputs": [], 21 | "source": [ 22 | "import matplotlib\n", 23 | "matplotlib.use('Agg')\n", 24 | "import matplotlib.pyplot as plt\n", 25 | "%matplotlib inline" 26 | ] 27 | }, 28 | { 29 | "cell_type": "markdown", 30 | "metadata": {}, 31 | "source": [ 32 | "## Convolution\n", 33 | "\n", 34 | "Convolutional layer define a kernel (weight matrix) which is then multiplied element by element with a section of the input of the same size. Sum all the resulting values. Move the kernel a number of pixels equal to a defined stride size and repeat until you go throught the whole image. Afterwards apply an activation function to each one of the values. Now repeat all of this for each filter in your layer." 35 | ] 36 | }, 37 | { 38 | "cell_type": "markdown", 39 | "metadata": {}, 40 | "source": [ 41 | "![Convolution](https://raw.githubusercontent.com/UCIDataScienceInitiative/LearningWithPython/master/images/conv_layer.gif)" 42 | ] 43 | }, 44 | { 45 | "cell_type": "code", 46 | "execution_count": 3, 47 | "metadata": { 48 | "collapsed": true 49 | }, 50 | "outputs": [], 51 | "source": [ 52 | "def convolution(input_image, conv_filter, stride):\n", 53 | " \"\"\"\n", 54 | " Perform convolution on input_image with conv_filter and return feature_map.\n", 55 | " \n", 56 | " Args\n", 57 | " input_image: (d x d) numpy array\n", 58 | " conv_filter: (s x s) square numpy array\n", 59 | " stride: sliding width / height\n", 60 | " \"\"\"\n", 61 | " d = input_image.shape[0]\n", 62 | " s = conv_filter.shape[0]\n", 63 | " n = (d - s) / stride\n", 64 | " feature_map = np.zeros((n, n))\n", 65 | " for i in range(n):\n", 66 | " for j in range(n):\n", 67 | " feature_map[i, j] = 0\n", 68 | " return feature_map" 69 | ] 70 | }, 71 | { 72 | "cell_type": "markdown", 73 | "metadata": {}, 74 | "source": [ 75 | "Convolution detects features in the image that match the filter, e.g. horizontal edges." 76 | ] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": 4, 81 | "metadata": { 82 | "collapsed": false 83 | }, 84 | "outputs": [], 85 | "source": [ 86 | "input_image = np.zeros((20, 20))\n", 87 | "input_image[9:11, :] = 1" 88 | ] 89 | }, 90 | { 91 | "cell_type": "code", 92 | "execution_count": 5, 93 | "metadata": { 94 | "collapsed": false 95 | }, 96 | "outputs": [ 97 | { 98 | "data": { 99 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQgAAAD8CAYAAACLgjpEAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAADcRJREFUeJzt3X+oX/V9x/Hna/EXOp2m1tRfa6XL\nBC1dVkJcmRs6WxtFlnaULjI2tzniSoUVBsNtUEv3T8dwsqEotgva0aplW9ZA44+QDazQpkaJv1p/\nZGIxd9a0pjN1dtroe3/cE7m7+X68d9/zvfd779fnAy7fcz7n8z3nfbjwyjnne/N9p6qQpEF+ZtwF\nSFq6DAhJTQaEpCYDQlKTASGpyYCQ1GRASGoyICQ1GRCSmo4YdwGDHJWj6xiOG3cZ0sT6H/6b1+rV\nzDVvSQbEMRzHeblo3GVIE2tn7ZjXvF63GEnWJ3kyyZ4k1wzYfnSSO7vtO5O8p8/xJC2uoQMiyQrg\nRuAS4Bzg8iTnzJp2JfCjqvoF4Hrgr4c9nqTF1+cKYh2wp6qeqarXgDuADbPmbABu65b/CbgoyZz3\nPZKWhj4BcTrw3Iz1vd3YwDlVdRB4CXhHj2NKWkRL5iFlkk3AJoBjOHbM1UiCflcQU8CZM9bP6MYG\nzklyBPBzwIuDdlZVt1TV2qpaeyRH9yhL0qj0CYgHgNVJzkpyFLAR2Dprzlbgim7548C/lV9hJS0b\nQ99iVNXBJFcD9wArgM1V9XiSzwG7qmor8A/APybZA+xnOkQkLRNZiv+gn5CV5R9KSQtnZ+3gQO2f\n8xNF/y+GpCYDQlKTASGpyYCQ1GRASGoyICQ1GRCSmgwISU0GhKQmA0JSkwEhqcmAkNRkQEhqMiAk\nNRkQkpoMCElNBoSkJgNCUlOfzlpnJvn3JN9J8niSPxkw54IkLyXZ3f18pl+5khZTn74YB4E/raqH\nkhwPPJhke1V9Z9a8b1TVZT2OI2lMhr6CqKrnq+qhbvnHwHc5vLOWpGVsJM8guq7dvwzsHLD5g0ke\nTnJXknNHcTxJi6N3670kPwv8M/Dpqjowa/NDwLur6uUklwL/Cqxu7MfWe9IS0+sKIsmRTIfDl6vq\nX2Zvr6oDVfVyt7wNODLJyYP2Zes9aenp8ylGmO6c9d2q+tvGnHd180iyrjvewN6ckpaePrcYvwr8\nLvBokt3d2F8APw9QVTcz3Y/zk0kOAj8BNtqbU1o++vTmvB94y9ZdVXUDcMOwx5A0Xv4lpaQmA0JS\nkwEhqcmAkNRkQEhqMiAkNRkQkpoMCElNBoSkJgNCUpMBIanJgJDUZEBIajIgJDUZEJKaDAhJTQaE\npCYDQlJT74BI8mySR7vWersGbE+Sv0+yJ8kjST7Q95iSFkfvvhidC6vqh41tlzDdC2M1cB5wU/cq\naYlbjFuMDcCXatq3gBOTnLoIx5XU0ygCooB7kzzYdcea7XTguRnre7GHp7QsjOIW4/yqmkpyCrA9\nyRNVdd//dye23pOWnt5XEFU11b3uA7YA62ZNmQLOnLF+Rjc2ez+23pOWmL69OY9LcvyhZeBi4LFZ\n07YCv9d9mvErwEtV9Xyf40paHH1vMVYBW7r2m0cAX6mqu5P8MbzZfm8bcCmwB3gF+IOex5S0SHoF\nRFU9A/zSgPGbZywX8Kk+x5E0Hv4lpaQmA0JSkwEhqcmAkNRkQEhqMiAkNRkQkpoMCElNBoSkJgNC\nUpMBIanJgJDUZEBIajIgJDUZEJKaDAhJTQaEpCYDQlKTASGpaeiASHJ214/z0M+BJJ+eNeeCJC/N\nmPOZ/iVLWixDf2ltVT0JrAFIsoLpXhdbBkz9RlVdNuxxJI3PqG4xLgL+o6q+N6L9SVoCRhUQG4Hb\nG9s+mOThJHclObe1gySbkuxKsuunvDqisiT1kem2FT12kBwF/CdwblW9MGvbCcAbVfVykkuBv6uq\n1XPt84SsrPNyUa+6JLXtrB0cqP2Za94oriAuAR6aHQ4AVXWgql7ulrcBRyY5eQTHlLQIRhEQl9O4\nvUjyrnR9+ZKs64734giOKWkR9Gq91zXs/TBw1YyxmX05Pw58MslB4CfAxup7TyNp0fR+BrEQfAYh\nLazFfAYhaUIZEJKaDAhJTQaEpCYDQlJTr485F8ovvv8V7rln97jLkCbWuo+8Mq95XkFIajIgJDUZ\nEJKaDAhJTQaEpCYDQlKTASGpyYCQ1GRASGoyICQ1GRCSmgwISU3zCogkm5PsS/LYjLGVSbYnebp7\nPanx3iu6OU8nuWJUhUtaePO9grgVWD9r7BpgR9fnYke3/n8kWQlcC5wHrAOubQWJpKVnXgFRVfcB\n+2cNbwBu65ZvAz464K0fAbZX1f6q+hGwncODRtIS1ecZxKqqer5b/j6wasCc04HnZqzv7cYkLQMj\neUjZ9bro9f35M3tz/uDF10dRlqSe+gTEC0lOBehe9w2YMwWcOWP9jG7sMFV1S1Wtraq173zHih5l\nSRqVPgGxFTj0qcQVwNcGzLkHuDjJSd3DyYu7MUnLwHw/5rwd+CZwdpK9Sa4EPg98OMnTwIe6dZKs\nTfJFgKraD/wV8ED387luTNIyMK8vra2qyxubDuuPV1W7gD+asb4Z2DxUdZLGakl+q/VTjxzLR05b\nM+4ypIn1VL04r3n+qbWkJgNCUpMBIanJgJDUZEBIajIgJDUZEJKaDAhJTQaEpCYDQlKTASGpyYCQ\n1GRASGoyICQ1GRCSmgwISU0GhKSmOQOi0Xbvb5I8keSRJFuSnNh477NJHk2yO8muURYuaeHN5wri\nVg7vhrUdeF9VvR94Cvjzt3j/hVW1pqrWDleipHGZMyAGtd2rqnur6mC3+i2m+11ImjCjeAbxh8Bd\njW0F3JvkwSSbRnAsSYuo17daJ/lL4CDw5caU86tqKskpwPYkT3RXJIP2tQnYBHAMx/YpS9KIDH0F\nkeT3gcuA3+l6cx6mqqa6133AFmBda38zW+8dydHDliVphIYKiCTrgT8DfrOqXmnMOS7J8YeWmW67\n99iguZKWpvl8zDmo7d4NwPFM3zbsTnJzN/e0JNu6t64C7k/yMPBt4OtVdfeCnIWkBZHG3cFYnZCV\ndV4O6+onaUR21g4O1P7MNc+/pJTUZEBIajIgJDUZEJKaDAhJTQaEpCYDQlKTASGpyYCQ1GRASGoy\nICQ1GRCSmgwISU0GhKQmA0JSkwEhqcmAkNRkQEhqGrb13meTTHXfR7k7yaWN965P8mSSPUmuGWXh\nkhbesK33AK7vWuqtqaptszcmWQHcCFwCnANcnuScPsVKWlxDtd6bp3XAnqp6pqpeA+4ANgyxH0lj\n0ucZxNVdd+/NSU4asP104LkZ63u7MUnLxLABcRPwXmAN8DxwXd9CkmxKsivJrp/yat/dSRqBoQKi\nql6oqter6g3gCwxuqTcFnDlj/YxurLVPW+9JS8ywrfdOnbH6MQa31HsAWJ3krCRHARuBrcMcT9J4\nzNndu2u9dwFwcpK9wLXABUnWAAU8C1zVzT0N+GJVXVpVB5NcDdwDrAA2V9XjC3IWkhaErfektyFb\n70nqzYCQ1GRASGoyICQ1GRCSmgwISU0GhKQmA0JSkwEhqcmAkNRkQEhqMiAkNRkQkpoMCElNBoSk\nJgNCUpMBIanJgJDUNJ/vpNwMXAbsq6r3dWN3Amd3U04E/quq1gx477PAj4HXgYNVtXZEdUtaBHMG\nBNOt924AvnRooKp++9BykuuAl97i/RdW1Q+HLVDS+MwZEFV1X5L3DNqWJMAngN8YbVmSloK+zyB+\nDXihqp5ubC/g3iQPJtnU81iSFtl8bjHeyuXA7W+x/fyqmkpyCrA9yRNdM+DDdAGyCeAYju1ZlqRR\nGPoKIskRwG8Bd7bmVNVU97oP2MLgFn2H5tp6T1pi+txifAh4oqr2DtqY5Lgkxx9aBi5mcIs+SUvU\nnAHRtd77JnB2kr1Jruw2bWTW7UWS05Js61ZXAfcneRj4NvD1qrp7dKVLWmi23pPehmy9J6k3A0JS\nkwEhqcmAkNRkQEhqMiAkNRkQkpoMCElNBoSkJgNCUpMBIanJgJDUZEBIajIgJDUZEJKaDAhJTQaE\npKYl+Y1SSX4AfG/W8MnAJDbgmdTzgsk9t0k4r3dX1TvnmrQkA2KQJLsmsXXfpJ4XTO65Tep5DeIt\nhqQmA0JS03IKiFvGXcACmdTzgsk9t0k9r8Msm2cQkhbfcrqCkLTIlkVAJFmf5Mkke5JcM+56RiXJ\ns0keTbI7ya5x19NHks1J9iV5bMbYyiTbkzzdvZ40zhqH0TivzyaZ6n5vu5NcOs4aF9KSD4gkK4Ab\ngUuAc4DLk5wz3qpG6sKqWjMBH5vdCqyfNXYNsKOqVgM7uvXl5lYOPy+A67vf25qq2jZg+0RY8gHB\ndEfwPVX1TFW9BtwBbBhzTZqlqu4D9s8a3gDc1i3fBnx0UYsagcZ5vW0sh4A4HXhuxvrebmwSFHBv\nkgeTbBp3MQtgVVU93y1/n+mGzpPi6iSPdLcgy+7Wab6WQ0BMsvOr6gNM3z59Ksmvj7ughVLTH5dN\nykdmNwHvBdYAzwPXjbechbMcAmIKOHPG+hnd2LJXVVPd6z5gC9O3U5PkhSSnAnSv+8Zcz0hU1QtV\n9XpVvQF8gcn7vb1pOQTEA8DqJGclOQrYCGwdc029JTkuyfGHloGLgcfe+l3Lzlbgim75CuBrY6xl\nZA6FXudjTN7v7U1HjLuAuVTVwSRXA/cAK4DNVfX4mMsahVXAliQw/Xv4SlXdPd6ShpfkduAC4OQk\ne4Frgc8DX01yJdP/O/cT46twOI3zuiDJGqZvmZ4FrhpbgQvMv6SU1LQcbjEkjYkBIanJgJDUZEBI\najIgJDUZEJKaDAhJTQaEpKb/BTYL03J/XrABAAAAAElFTkSuQmCC\n", 100 | "text/plain": [ 101 | "" 102 | ] 103 | }, 104 | "metadata": {}, 105 | "output_type": "display_data" 106 | } 107 | ], 108 | "source": [ 109 | "plt.imshow(input_image)\n", 110 | "plt.show()" 111 | ] 112 | }, 113 | { 114 | "cell_type": "code", 115 | "execution_count": 6, 116 | "metadata": { 117 | "collapsed": true 118 | }, 119 | "outputs": [], 120 | "source": [ 121 | "conv_filter = np.zeros((4, 4))\n", 122 | "conv_filter[1:3, :] = 1" 123 | ] 124 | }, 125 | { 126 | "cell_type": "code", 127 | "execution_count": 7, 128 | "metadata": { 129 | "collapsed": false 130 | }, 131 | "outputs": [ 132 | { 133 | "name": "stdout", 134 | "output_type": "stream", 135 | "text": [ 136 | "[[ 0. 0. 0. 0.]\n", 137 | " [ 1. 1. 1. 1.]\n", 138 | " [ 1. 1. 1. 1.]\n", 139 | " [ 0. 0. 0. 0.]]\n" 140 | ] 141 | } 142 | ], 143 | "source": [ 144 | "print(conv_filter)" 145 | ] 146 | }, 147 | { 148 | "cell_type": "code", 149 | "execution_count": 8, 150 | "metadata": { 151 | "collapsed": true 152 | }, 153 | "outputs": [], 154 | "source": [ 155 | "output = convolution(input_image, conv_filter, 1)" 156 | ] 157 | }, 158 | { 159 | "cell_type": "code", 160 | "execution_count": 9, 161 | "metadata": { 162 | "collapsed": false 163 | }, 164 | "outputs": [ 165 | { 166 | "data": { 167 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAP8AAAD8CAYAAAC4nHJkAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAADPdJREFUeJzt3X2sZPVdx/H3x10eZIuwFKQUNgKV\nkGB9gGyQ1gYbV3FBwtakiRCrUJoQoig0GEIlsY1/tdbW+tCUIEVRCRApWNKAZaVtjIlsC+vyTMuC\nyEOXBymBSiOw7dc/5mxz93Lv7mXmnMNdf+9XMpkzc34z57u/uZ97zpw7O99UFZLa8yNvdgGS3hyG\nX2qU4ZcaZfilRhl+qVGGX2qU4ZcaZfilRhl+qVErx9zY3tmn9mXVmJuUmvK/vMyr9UqWMnbU8O/L\nKn4+68bcpNSUTXX7ksd62C81aqbwJ1mf5JtJtia5tK+iJA1v6vAnWQF8FjgVOA44K8lxfRUmaViz\n7PlPBLZW1aNV9SpwHbChn7IkDW2W8B8OPDHn9pPdfZL2AIOf7U9yHnAewL7sN/TmJC3RLHv+p4A1\nc24f0d23k6q6oqrWVtXavdhnhs1J6tMs4f8GcEySo5LsDZwJ3NxPWZKGNvVhf1VtT3IB8GVgBXBV\nVd3fW2WSBjXTe/6qugW4padaJI3IT/hJjTL8UqMMv9Qowy81yvBLjTL8UqMMv9Qowy81yvBLjTL8\nUqMMv9Qowy81yvBLjTL8UqMMv9Qowy81yvBLjTL8UqMMv9SoWdp1rUny1SQPJLk/yYV9FiZpWLN8\nged24OKq2pxkf+CuJBur6oGeapM0oKn3/FW1rao2d8vfBR7Edl3SHqOXdl1JjgSOBzYtsM52XdIy\nNPMJvyRvAb4AXFRVL81fb7suaXmaKfxJ9mIS/Guq6sZ+SpI0hlnO9gf4PPBgVX26v5IkjWGWPf8v\nAL8F/FKSLd3ltJ7qkjSwWRp1/huQHmuRNCI/4Sc1yvBLjTL8UqMMv9Qowy81yvBLjTL8UqMMv9Qo\nwy81yvBLjTL8UqMMv9Qowy81yvBLjTL8UqMMv9Qowy81yvBLjerjq7tXJPmPJF/qoyBJ4+hjz38h\nk249kvYgs35v/xHArwFX9lOOpLHMuuf/DHAJ8IMeapE0olmadpwOPFtVd+1m3HlJ7kxy52u8Mu3m\nJPVs1qYdZyR5DLiOSfOOf5g/yF590vI0S4vuj1TVEVV1JHAm8JWq+kBvlUkalH/nlxo1dbuuuarq\na8DX+nguSeNwzy81yvBLjTL8UqMMv9Qowy81yvBLjTL8UqMMv9Qowy81yvBLjTL8UqMMv9Qowy81\nyvBLjTL8UqMMv9Qowy81yvBLjZq1aceBSW5I8lCSB5O8q6/CJA1r1u/w+3Pgn6vq/Un2BvbroSZJ\nI5g6/EkOAE4GzgGoqleBV/spS9LQZjnsPwp4DvibrkvvlUlW9VSXpIHNEv6VwAnA56rqeOBl4NL5\ng2zXJS1Ps4T/SeDJqtrU3b6ByS+DndiuS1qeZmnX9TTwRJJju7vWAQ/0UpWkwc16tv/3gGu6M/2P\nAh+cvSRJY5gp/FW1BVjbUy2SRuQn/KRG9dKoc6leWbOKrRefNOYmpaa88qk7ljzWPb/UKMMvNcrw\nS40y/FKjDL/UKMMvNcrwS40y/FKjDL/UKMMvNcrwS40y/FKjDL/UqFH/V99Pr36Or//G5WNuUmrK\niVc9t+Sx7vmlRhl+qVGztuv6cJL7k9yX5Nok+/ZVmKRhTR3+JIcDvw+srap3AiuAM/sqTNKwZj3s\nXwn8aJKVTPr0fXv2kiSNYZbv7X8K+FPgcWAb8GJV3dZXYZKGNcth/2pgA5OefW8HViX5wALjftiu\n67nnvz99pZJ6Ncth/y8D/1lVz1XVa8CNwLvnD5rbruuQt66YYXOS+jRL+B8HTkqyX5Iwadf1YD9l\nSRraLO/5NzFpzrkZuLd7rit6qkvSwGZt1/VR4KM91SJpRH7CT2qU4ZcaNer/6rv3hUN4x/Xnj7lJ\nqSnffuEzSx7rnl9qlOGXGmX4pUYZfqlRhl9qlOGXGmX4pUYZfqlRhl9qlOGXGmX4pUYZfqlRo/7H\nnn2eeJmf/PAdY25Sasrz9fKSx7rnlxpl+KVG7Tb8Sa5K8myS++bcd1CSjUke7q5XD1umpL4tZc//\nt8D6efddCtxeVccAt3e3Je1Bdhv+qvpX4Dvz7t4AXN0tXw28r+e6JA1s2vf8h1bVtm75aeDQnuqR\nNJKZT/hVVQG12Pq57bpe45VZNyepJ9OG/5kkhwF0188uNnBuu6692GfKzUnq27Thvxk4u1s+G/hi\nP+VIGstS/tR3LfDvwLFJnkzyIeDjwK8keZhJw86PD1umpL7t9uO9VXXWIqvW9VyLpBH5CT+pUYZf\napThlxpl+KVGGX6pUYZfapThlxpl+KVGGX6pUYZfapThlxpl+KVGGX6pUYZfapThlxpl+KVGGX6p\nUYZfapThlxo1ba++TyZ5KMk9SW5KcuCwZUrq27S9+jYC76yqnwG+BXyk57okDWyqXn1VdVtVbe9u\n3gEcMUBtkgbUx3v+c4FbF1tpuy5peZop/EkuA7YD1yw2xnZd0vK026Ydi0lyDnA6sK5r1ilpDzJV\n+JOsBy4BfrGqvtdvSZLGMG2vvr8C9gc2JtmS5PKB65TUs2l79X1+gFokjchP+EmNMvxSowy/1CjD\nLzXK8EuNMvxSowy/1CjDLzXK8EuNMvxSowy/1CjDLzXK8EuNMvxSowy/1CjDLzXK8EuNMvxSo6Zq\n1zVn3cVJKsnBw5QnaSjTtusiyRrgFODxnmuSNIKp2nV1/ozJ13f7nf3SHmiq9/xJNgBPVdXdSxhr\nuy5pGXrDTTuS7Af8IZND/t2qqiuAKwB+LAd5lCAtE9Ps+d8BHAXcneQxJh16Nyd5W5+FSRrWG97z\nV9W9wI/vuN39AlhbVf/dY12SBjZtuy5Je7hp23XNXX9kb9VIGo2f8JMaZfilRhl+qVGGX2qU4Zca\nZfilRhl+qVGGX2qU4ZcaZfilRhl+qVGGX2qU4ZcaZfilRhl+qVGpGu9r9ZI8B/zXIqsPBpbDtwFZ\nx86sY2fLvY6fqKpDlvIEo4Z/V5LcWVVrrcM6rGOcOjzslxpl+KVGLafwX/FmF9Cxjp1Zx87+39Sx\nbN7zSxrXctrzSxrRqOFPsj7JN5NsTXLpAuv3SXJ9t35TkiMHqGFNkq8meSDJ/UkuXGDMe5O8mGRL\nd/mjvuuYs63HktzbbefOBdYnyV90c3JPkhN63v6xc/6dW5K8lOSieWMGm4+FWsAnOSjJxiQPd9er\nF3ns2d2Yh5OcPUAdn0zyUDfvNyU5cJHH7vI17KGOjyV5as78n7bIY3eZr9epqlEuwArgEeBoYG/g\nbuC4eWN+B7i8Wz4TuH6AOg4DTuiW9we+tUAd7wW+NNK8PAYcvIv1pwG3AgFOAjYN/Bo9zeRvxaPM\nB3AycAJw35z7/gS4tFu+FPjEAo87CHi0u17dLa/uuY5TgJXd8icWqmMpr2EPdXwM+IMlvHa7zNf8\ny5h7/hOBrVX1aFW9ClwHbJg3ZgNwdbd8A7AuSfosoqq2VdXmbvm7wIPA4X1uo2cbgL+riTuAA5Mc\nNtC21gGPVNViH8TqXS3cAn7uz8HVwPsWeOivAhur6jtV9QKwEVjfZx1VdVtVbe9u3sGkL+WgFpmP\npVhKvnYyZvgPB56Yc/tJXh+6H47pJv1F4K1DFdS9rTge2LTA6ncluTvJrUl+aqgagAJuS3JXkvMW\nWL+UeevLmcC1i6wbaz4ADq2qbd3y08ChC4wZc14AzmVyBLaQ3b2Gfbige/tx1SJvg97wfDR7wi/J\nW4AvABdV1UvzVm9mcuj7s8BfAv80YCnvqaoTgFOB301y8oDbWlSSvYEzgH9cYPWY87GTmhzTvql/\nkkpyGbAduGaRIUO/hp9j0h3754BtwKf6eNIxw/8UsGbO7SO6+xYck2QlcADwfN+FJNmLSfCvqaob\n56+vqpeq6n+65VuAvZIc3Hcd3fM/1V0/C9zE5PBtrqXMWx9OBTZX1TML1DjafHSe2fHWprt+doEx\no8xLknOA04Hf7H4Rvc4SXsOZVNUzVfX9qvoB8NeLPP8bno8xw/8N4JgkR3V7mTOBm+eNuRnYcdb2\n/cBXFpvwaXXnED4PPFhVn15kzNt2nGtIciKTeRril9CqJPvvWGZygum+ecNuBn67O+t/EvDinEPi\nPp3FIof8Y83HHHN/Ds4GvrjAmC8DpyRZ3R0Gn9Ld15sk64FLgDOq6nuLjFnKazhrHXPP8fz6Is+/\nlHztrI8zlG/gTOZpTM6uPwJc1t33x0wmF2BfJoedW4GvA0cPUMN7mBxG3gNs6S6nAecD53djLgDu\nZ3LG9A7g3QPNx9HdNu7utrdjTubWEuCz3ZzdC6wdoI5VTMJ8wJz7RpkPJr9wtgGvMXmf+iEm53lu\nBx4G/gU4qBu7FrhyzmPP7X5WtgIfHKCOrUzeR+/4Odnxl6i3A7fs6jXsuY6/7177e5gE+rD5dSyW\nr11d/ISf1KhmT/hJrTP8UqMMv9Qowy81yvBLjTL8UqMMv9Qowy816v8AYXq4N93bXRMAAAAASUVO\nRK5CYII=\n", 168 | "text/plain": [ 169 | "" 170 | ] 171 | }, 172 | "metadata": {}, 173 | "output_type": "display_data" 174 | } 175 | ], 176 | "source": [ 177 | "plt.imshow(output)\n", 178 | "plt.show()" 179 | ] 180 | }, 181 | { 182 | "cell_type": "markdown", 183 | "metadata": {}, 184 | "source": [ 185 | "More complex features such as a cross can be detected with a combination of different filters." 186 | ] 187 | }, 188 | { 189 | "cell_type": "code", 190 | "execution_count": 10, 191 | "metadata": { 192 | "collapsed": true 193 | }, 194 | "outputs": [], 195 | "source": [ 196 | "input_image = np.zeros((20, 20))\n", 197 | "input_image[9:11, :] = 1\n", 198 | "input_image[:, 9:11] = 1" 199 | ] 200 | }, 201 | { 202 | "cell_type": "code", 203 | "execution_count": 11, 204 | "metadata": { 205 | "collapsed": false 206 | }, 207 | "outputs": [ 208 | { 209 | "data": { 210 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQgAAAD8CAYAAACLgjpEAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAADfFJREFUeJzt3XuoZeV5x/Hvr+MNp9J4iRNvNZJO\nBRPiNAxjQ23RmnhDMklJ05HSTlrL2BChgUKxLWhI/0kpVmgVJZdBUxI1vUwykNFxsAUjJOoo4y3x\nMh0MMyfGSTTVGKNmzNM/zho5PbNf53Svfc7e5/j9wLDXete713oWB36stfae/aSqkKRBfmncBUia\nXAaEpCYDQlKTASGpyYCQ1GRASGoyICQ1GRCSmgwISU2HjLuAQQ7L4XUEy8ddxlver7/35XnZ75MP\nHzkv+9XcvcJPea1ezcHmTWRAHMFyzsp54y7jLW/r1h3zst8LTlw1L/vV3N1bd81pXq9bjCQXJnki\nyc4kVw7YfniS27rt9yZ5Z5/jSVpYQwdEkmXA9cBFwBnApUnOmDXtMuDHVfVrwLXA3w97PEkLr88V\nxBpgZ1XtqqrXgFuBtbPmrAVu7pb/DTgvyUHveyRNhj4BcRKwe8b6nm5s4Jyq2ge8ABzb45iSFtDE\nPKRMsgHYAHAEPuWWJkGfK4gp4JQZ6yd3YwPnJDkE+BXguUE7q6rPVdXqqlp9KIf3KEvSqPQJiPuB\nlUlOS3IYsA7YPGvOZmB9t/xR4D/Ln7CSFo2hbzGqal+SK4CtwDJgY1U9luQzwPaq2gx8EfiXJDuB\n55kOEUmLRK9nEFW1Bdgya+yqGcuvAL/f5xiSxsf/iyGpyYCQ1GRASGoyICQ1GRCSmgwISU0GhKQm\nA0JSkwEhqcmAkNRkQEhqMiAkNRkQkpoMCElNBoSkJgNCUpMBIanJgJDU1Kez1ilJ/ivJd5I8luQv\nBsw5J8kLSXZ0/64atC9Jk6nPb1LuA/6yqh5MchTwQJJtVfWdWfO+WVWX9DiOpDEZ+gqiqp6pqge7\n5Z8A3+XAzlqSFrGRPIPounb/BnDvgM3vT/JQktuTvHsUx5O0MHq33kvyy8C/A5+qqhdnbX4QOLWq\nXkpyMfA1YGVjP7bekyZMryuIJIcyHQ5frqr/mL29ql6sqpe65S3AoUmOG7QvW+9Jk6fPpxhhunPW\nd6vqHxtz3tHNI8ma7ngDe3NKmjx9bjF+C/gj4JEkO7qxvwF+FaCqbmS6H+cnkuwDfgasszentHj0\n6c15D5CDzLkOuG7YY0gaL79JKanJgJDUZEBIajIgJDUZEJKaDAhJTQaEpCYDQlKTASGpyYCQ1GRA\nSGoyICQ1GRCSmgwISU0GhKQmA0JSkwEhqcmAkNTUOyCSPJ3kka613vYB25Pkn5LsTPJwkvf1Paak\nhdG7L0bn3Kr6UWPbRUz3wlgJnAXc0L1KmnALcYuxFvhSTfs28LYkJyzAcSX1NIqAKODOJA903bFm\nOwnYPWN9D/bwlBaFUdxinF1VU0mOB7Ylebyq7v7/7sTWe9Lk6X0FUVVT3eteYBOwZtaUKeCUGesn\nd2Oz92PrPWnC9O3NuTzJUfuXgfOBR2dN2wz8cfdpxm8CL1TVM32OK2lh9L3FWAFs6tpvHgJ8paru\nSPLn8Eb7vS3AxcBO4GXgT3oeU9IC6RUQVbULOHPA+I0zlgv4ZJ/jSBoPv0kpqcmAkNRkQEhqMiAk\nNRkQkpoMCElNBoSkJgNCUpMBIanJgJDUZEBIajIgJDUZEJKaDAhJTQaEpCYDQlKTASGpyYCQ1GRA\nSGoaOiCSnN7149z/78Ukn5o155wkL8yYc1X/kiUtlKF/tLaqngBWASRZxnSvi00Dpn6zqi4Z9jiS\nxmdUtxjnAf9dVd8b0f4kTYBRBcQ64JbGtvcneSjJ7Une3dpBkg1JtifZ/nNeHVFZkvroHRBJDgM+\nBPzrgM0PAqdW1ZnAPwNfa+3H1nvS5BnFFcRFwINV9ezsDVX1YlW91C1vAQ5NctwIjilpAYwiIC6l\ncXuR5B3p+vIlWdMd77kRHFPSAujVeq9r2PtB4PIZYzP7cn4U+ESSfcDPgHVdKz5Ji0Df3pw/BY6d\nNTazL+d1wHV9jiFpfPwmpaQmA0JSkwEhqcmAkNRkQEhqyiR+6rj6zCPqvq2njLsMaclac8Futj/0\nSg42zysISU0GhKQmA0JSkwEhqcmAkNRkQEhqMiAkNRkQkpoMCElNBoSkJgNCUpMBIalpTgGRZGOS\nvUkenTF2TJJtSZ7qXo9uvHd9N+epJOtHVbik+TfXK4ibgAtnjV0J3FVVK4G7uvX/I8kxwNXAWcAa\n4OpWkEiaPHMKiKq6G3h+1vBa4OZu+WbgwwPeegGwraqer6ofA9s4MGgkTag+zyBWVNUz3fIPgBUD\n5pwE7J6xvqcbk7QIjOQhZdfrotcvz8zszfnD514fRVmSeuoTEM8mOQGge907YM4UMPOnoU7uxg4w\nszfn249d1qMsSaPSJyA2A/s/lVgPfH3AnK3A+UmO7h5Ont+NSVoE5vox5y3At4DTk+xJchnwWeCD\nSZ4CPtCtk2R1ki8AVNXzwN8B93f/PtONSVoE5tR6r6oubWw6b8Dc7cCfzVjfCGwcqjpJY9WrN+d8\nefLhI7ngxFXjLuMtb+v3d8zLfv3bjt+T9dyc5vlVa0lNBoSkJgNCUpMBIanJgJDUZEBIajIgJDUZ\nEJKaDAhJTQaEpCYDQlKTASGpyYCQ1GRASGoyICQ1GRCSmgwISU0HDYhG271/SPJ4koeTbErytsZ7\nn07ySJIdSbaPsnBJ828uVxA3cWA3rG3Ae6rqvcCTwF+/yfvPrapVVbV6uBIljctBA2JQ272qurOq\n9nWr32a634WkJWYUzyD+FLi9sa2AO5M8kGTDCI4laQH1+lXrJH8L7AO+3JhydlVNJTke2Jbk8e6K\nZNC+NgAbAI7gyD5lSRqRoa8gknwcuAT4w6435wGqaqp73QtsAta09jez9d6hHD5sWZJGaKiASHIh\n8FfAh6rq5cac5UmO2r/MdNu9RwfNlTSZ5vIx56C2e9cBRzF927AjyY3d3BOTbOneugK4J8lDwH3A\nN6rqjnk5C0nz4qDPIBpt977YmPt94OJueRdwZq/qJI2V36SU1GRASGoyICQ1GRCSmgwISU0GhKQm\nA0JSkwEhqcmAkNRkQEhqMiAkNRkQkpoMCElNBoSkJgNCUpMBIanJgJDUZEBIahq29d6nk0x1v0e5\nI8nFjfdemOSJJDuTXDnKwiXNv2Fb7wFc27XUW1VVW2ZvTLIMuB64CDgDuDTJGX2KlbSwhmq9N0dr\ngJ1VtauqXgNuBdYOsR9JY9LnGcQVXXfvjUmOHrD9JGD3jPU93ZikRWLYgLgBeBewCngGuKZvIUk2\nJNmeZPvPebXv7iSNwFABUVXPVtXrVfUL4PMMbqk3BZwyY/3kbqy1T1vvSRNm2NZ7J8xY/QiDW+rd\nD6xMclqSw4B1wOZhjidpPA7aWatrvXcOcFySPcDVwDlJVgEFPA1c3s09EfhCVV1cVfuSXAFsBZYB\nG6vqsXk5C0nzYt5a73XrW4ADPgKVtDj4TUpJTQaEpCYDQlKTASGpyYCQ1GRASGoyICQ1GRCSmgwI\nSU0GhKQmA0JSkwEhqcmAkNRkQEhqMiAkNRkQkpoMCElNBoSkprn8JuVG4BJgb1W9pxu7DTi9m/I2\n4H+qatWA9z4N/AR4HdhXVatHVLekBXDQgGC69d51wJf2D1TVH+xfTnIN8MKbvP/cqvrRsAVKGp+5\n/Gjt3UneOWhbkgAfA353tGVJmgR9n0H8NvBsVT3V2F7AnUkeSLKh57EkLbC53GK8mUuBW95k+9lV\nNZXkeGBbkse7ZsAH6AJkA8ARHNmzLEmjMPQVRJJDgN8DbmvNqaqp7nUvsInBLfr2z7X1njRh+txi\nfAB4vKr2DNqYZHmSo/YvA+czuEWfpAl10IDoWu99Czg9yZ4kl3Wb1jHr9iLJiUn2d9JaAdyT5CHg\nPuAbVXXH6EqXNN+Gbb1HVX18wNgbrfeqahdwZs/6JI2R36SU1GRASGoyICQ1GRCSmgwISU0GhKQm\nA0JSkwEhqcmAkNRkQEhqMiAkNRkQkpoMCElNBoSkJgNCUpMBIanJgJDUlKoadw0HSPJD4Huzho8D\nlmIDnqV6XrB0z20pnNepVfX2g02ayIAYJMn2pdi6b6meFyzdc1uq5zWItxiSmgwISU2LKSA+N+4C\n5slSPS9Yuue2VM/rAIvmGYSkhbeYriAkLbBFERBJLkzyRJKdSa4cdz2jkuTpJI8k2ZFk+7jr6SPJ\nxiR7kzw6Y+yYJNuSPNW9Hj3OGofROK9PJ5nq/m47klw8zhrn08QHRJJlwPXARcAZwKVJzhhvVSN1\nblWtWgIfm90EXDhr7ErgrqpaCdzVrS82N3HgeQFc2/3dVlXVlgHbl4SJDwimO4LvrKpdVfUacCuw\ndsw1aZaquht4ftbwWuDmbvlm4MMLWtQINM7rLWMxBMRJwO4Z63u6saWggDuTPJBkw7iLmQcrquqZ\nbvkHTDd0XiquSPJwdwuy6G6d5moxBMRSdnZVvY/p26dPJvmdcRc0X2r647Kl8pHZDcC7gFXAM8A1\n4y1n/iyGgJgCTpmxfnI3tuhV1VT3uhfYxPTt1FLybJITALrXvWOuZySq6tmqer2qfgF8nqX3d3vD\nYgiI+4GVSU5LchiwDtg85pp6S7I8yVH7l4HzgUff/F2LzmZgfbe8Hvj6GGsZmf2h1/kIS+/v9oZD\nxl3AwVTVviRXAFuBZcDGqnpszGWNwgpgUxKY/jt8paruGG9Jw0tyC3AOcFySPcDVwGeBrya5jOn/\nnfux8VU4nMZ5nZNkFdO3TE8Dl4+twHnmNyklNS2GWwxJY2JASGoyICQ1GRCSmgwISU0GhKQmA0JS\nkwEhqel/ARiL50/jSCiIAAAAAElFTkSuQmCC\n", 211 | "text/plain": [ 212 | "" 213 | ] 214 | }, 215 | "metadata": {}, 216 | "output_type": "display_data" 217 | } 218 | ], 219 | "source": [ 220 | "plt.imshow(input_image)\n", 221 | "plt.show()" 222 | ] 223 | }, 224 | { 225 | "cell_type": "code", 226 | "execution_count": 12, 227 | "metadata": { 228 | "collapsed": true 229 | }, 230 | "outputs": [], 231 | "source": [ 232 | "conv_filter1 = np.zeros((4, 4))\n", 233 | "conv_filter1[1:3, :] = 1\n", 234 | "conv_filter2 = np.zeros((4, 4))\n", 235 | "conv_filter2[:, 1:3] = 1" 236 | ] 237 | }, 238 | { 239 | "cell_type": "code", 240 | "execution_count": 13, 241 | "metadata": { 242 | "collapsed": false 243 | }, 244 | "outputs": [ 245 | { 246 | "name": "stdout", 247 | "output_type": "stream", 248 | "text": [ 249 | "[[ 0. 0. 0. 0.]\n", 250 | " [ 1. 1. 1. 1.]\n", 251 | " [ 1. 1. 1. 1.]\n", 252 | " [ 0. 0. 0. 0.]]\n" 253 | ] 254 | } 255 | ], 256 | "source": [ 257 | "print(conv_filter1)" 258 | ] 259 | }, 260 | { 261 | "cell_type": "code", 262 | "execution_count": 14, 263 | "metadata": { 264 | "collapsed": false 265 | }, 266 | "outputs": [ 267 | { 268 | "name": "stdout", 269 | "output_type": "stream", 270 | "text": [ 271 | "[[ 0. 1. 1. 0.]\n", 272 | " [ 0. 1. 1. 0.]\n", 273 | " [ 0. 1. 1. 0.]\n", 274 | " [ 0. 1. 1. 0.]]\n" 275 | ] 276 | } 277 | ], 278 | "source": [ 279 | "print(conv_filter2)" 280 | ] 281 | }, 282 | { 283 | "cell_type": "code", 284 | "execution_count": 15, 285 | "metadata": { 286 | "collapsed": false 287 | }, 288 | "outputs": [ 289 | { 290 | "data": { 291 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAP8AAAD8CAYAAAC4nHJkAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAADYJJREFUeJzt3W2MXOV5h/Hrjm1w7TjYDpSAbQpE\nCIlGTbEsQ9LIjeKWGBfhRMoHo6aFEAmhlhYqKuQUqYn6KWmaNH2JErlAQ1sLUAkkVgQB10mEKtVL\nwLUBYxIMpQZjMA0UKAi/hLsf5jgaLzv2eM6Ld/1cP2m1Z+Y8M+feZ+a/58yZ0dyRmUgqz7uOdQGS\njg3DLxXK8EuFMvxSoQy/VCjDLxXK8EuFMvxSoQy/VKjpXW7shDgxZzK7y00el96ee/RzuH9OC4U0\nbMbro93uXf/7RrOFTGFv8Qb7cm8MM7bT8M9kNhfE8i43eVx682MXHPVtnl821PPhmDr9gdE+aj7r\n7rGGK5m6xnLj0GM97JcKVSv8EbEiIn4SETsiYk1TRUlq38jhj4hpwNeBi4HzgMsi4rymCpPUrjp7\n/qXAjsx8OjP3AbcDq5opS1Lb6oR/AfBs3+XnquskTQGtn+2PiKuAqwBmMqvtzUkaUp09/y5gUd/l\nhdV1h8jMtZm5JDOXzODEGpuT1KQ64f8xcE5EnBURJwCrgfXNlCWpbSMf9mfmgYi4BrgPmAbckpnb\nGqtMUqtqvebPzHuAexqqRVKH/ISfVCjDLxXK8EuFMvxSoQy/VCjDLxXK8EuFMvxSoQy/VCjDLxXK\n8EuFMvxSoQy/VCjDLxXK8EuFMvxSoQy/VCjDLxXK8EuFqtOua1FE/DAiHo+IbRFxbZOFSWpXnS/w\nPABcn5mbI2IO8HBEbMjMxxuqTVKLRt7zZ+buzNxcLb8ObMd2XdKU0Ui7rog4EzgfGJtgne26pEmo\n9gm/iHg38G3gusx8bfx623VJk1Ot8EfEDHrBX5eZdzVTkqQu1DnbH8DNwPbM/GpzJUnqQp09/28A\nvwd8LCK2VD8rG6pLUsvqNOr8dyAarEVSh/yEn1Qowy8VyvBLhTL8UqEMv1Qowy8VyvBLhTL8UqEM\nv1Qowy8VyvBLhTL8UqEMv1Qowy8VyvBLhTL8UqEMv1Qowy8Vqomv7p4WEf8ZEd9roiBJ3Whiz38t\nvW49kqaQut/bvxD4HeCmZsqR1JW6e/6vATcAbzdQi6QO1WnacQmwJzMfPsK4qyLioYh4aD97R92c\npIbVbdpxaUQ8A9xOr3nHv4wfZK8+aXKq06L7c5m5MDPPBFYDP8jMTzdWmaRW+T6/VKiR23X1y8wf\nAT9q4r4kdcM9v1Qowy8VyvBLhTL8UqEMv1Qowy8VyvBLhTL8UqEMv1Qowy8VyvBLhTL8UqEMv1Qo\nwy8VyvBLhTL8UqEMv1Qowy8Vqm7TjrkRcWdEPBER2yPiQ00VJqlddb/D72+A72fmpyLiBGBWAzVJ\n6sDI4Y+Ik4BlwBUAmbkP2NdMWZLaVuew/yzgJeAfqy69N0XE7IbqktSyOuGfDiwGvpGZ5wNvAGvG\nD7JdlzQ51Qn/c8BzmTlWXb6T3j+DQ9iuS5qc6rTregF4NiLOra5aDjzeSFWSWlf3bP8fAeuqM/1P\nA5+pX5KkLtQKf2ZuAZY0VIukDvkJP6lQjTTqHNbeRbPZcf2FXW7yuDTzjNeP+jbLFuxsoZJmPXjG\nGSPd7vllPqcO2vuVTUOPdc8vFcrwS4Uy/FKhDL9UKMMvFcrwS4Uy/FKhDL9UKMMvFcrwS4Uy/FKh\nDL9UKMMvFSoys7ONLfngzHzwvkWdbU8qzdKPP8tDW9+KYca655cKZfilQtVt1/UnEbEtIh6LiNsi\nYmZThUlq18jhj4gFwB8DSzLzA8A0YHVThUlqV93D/unAL0XEdHp9+p6vX5KkLtT53v5dwF8BO4Hd\nwKuZeX9ThUlqV53D/nnAKno9+04HZkfEpycY94t2XS/97OejVyqpUXUO+38L+K/MfCkz9wN3AR8e\nP6i/Xdcp751WY3OSmlQn/DuBCyNiVkQEvXZd25spS1Lb6rzmH6PXnHMz8Gh1X2sbqktSy+q26/o8\n8PmGapHUIT/hJxXK8EuF6rRX36OvnML777i6y00el0bp1bd0KvTq2zVar763ds5puJKp6/lXvjb0\nWPf8UqEMv1Qowy8VyvBLhTL8UqEMv1Qowy8VyvBLhTL8UqEMv1Qowy8VyvBLheq0Xdd7Yn5eEMs7\n297x6s1PXnDUt3l+2VAdnI6p0x8Y7bk46+6xhiuZusZyI6/ly7brkjSY4ZcKdcTwR8QtEbEnIh7r\nu25+RGyIiCer3/PaLVNS04bZ838LWDHuujXAxsw8B9hYXZY0hRwx/Jn5APDyuKtXAbdWy7cCn2i4\nLkktG/U1/6mZubtafgE4taF6JHWk9gm/7L1XOPA9mv52XfvZW3dzkhoyavhfjIjTAKrfewYN7G/X\nNYMTR9ycpKaNGv71wOXV8uXAd5spR1JXhnmr7zbgP4BzI+K5iPgs8EXgtyPiSXoNO7/YbpmSmnbE\n7+3PzMsGrPJzutIU5if8pEIZfqlQhl8qlOGXCmX4pUIZfqlQhl8qlOGXCmX4pUIZfqlQhl8qlOGX\nCmX4pUIZfqlQhl8qlOGXCmX4pUIZfqlQhl8q1Ki9+r4cEU9ExCMRcXdEzG23TElNG7VX3wbgA5n5\na8BPgc81XJeklo3Uqy8z78/MA9XFTcDCFmqT1KImXvNfCdw7aKXtuqTJqVb4I+JG4ACwbtAY23VJ\nk9MRm3YMEhFXAJcAy6tmnZKmkJHCHxErgBuA38zMN5stSVIXRu3V9/fAHGBDRGyJiG+2XKekho3a\nq+/mFmqR1CE/4ScVyvBLhTL8UqEMv1Qowy8VyvBLhTL8UqEMv1Qowy8VyvBLhTL8UqEMv1Qowy8V\nyvBLhTL8UqEMv1Qowy8VyvBLhRqpXVffuusjIiPi5HbKk9SWUdt1ERGLgIuAnQ3XJKkDI7Xrqvw1\nva/v9jv7pSlopNf8EbEK2JWZW4cYa7suaRI66qYdETEL+DN6h/xHlJlrgbUA74n5HiVIk8Qoe/73\nA2cBWyPiGXodejdHxPuaLExSu456z5+ZjwK/fPBy9Q9gSWb+T4N1SWrZqO26JE1xo7br6l9/ZmPV\nSOqMn/CTCmX4pUIZfqlQhl8qlOGXCmX4pUIZfqlQhl8qlOGXCmX4pUIZfqlQhl8qlOGXCmX4pUIZ\nfqlQkdnd1+pFxEvAfw9YfTIwGb4NyDoOZR2Hmux1/EpmnjLMHXQa/sOJiIcyc4l1WId1dFOHh/1S\noQy/VKjJFP61x7qAinUcyjoOddzUMWle80vq1mTa80vqUKfhj4gVEfGTiNgREWsmWH9iRNxRrR+L\niDNbqGFRRPwwIh6PiG0Rce0EYz4aEa9GxJbq58+brqNvW89ExKPVdh6aYH1ExN9Wc/JIRCxuePvn\n9v2dWyLitYi4btyY1uZjohbwETE/IjZExJPV73kDbnt5NebJiLi8hTq+HBFPVPN+d0TMHXDbwz6G\nDdTxhYjY1Tf/Kwfc9rD5eofM7OQHmAY8BZwNnABsBc4bN+YPgG9Wy6uBO1qo4zRgcbU8B/jpBHV8\nFPheR/PyDHDyYdavBO4FArgQGGv5MXqB3nvFncwHsAxYDDzWd91fAmuq5TXAlya43Xzg6er3vGp5\nXsN1XARMr5a/NFEdwzyGDdTxBeBPh3jsDpuv8T9d7vmXAjsy8+nM3AfcDqwaN2YVcGu1fCewPCKi\nySIyc3dmbq6WXwe2Awua3EbDVgH/lD2bgLkRcVpL21oOPJWZgz6I1bicuAV8//PgVuATE9z048CG\nzHw5M18BNgArmqwjM+/PzAPVxU30+lK2asB8DGOYfB2iy/AvAJ7tu/wc7wzdL8ZUk/4q8N62Cqpe\nVpwPjE2w+kMRsTUi7o2IX22rBiCB+yPi4Yi4aoL1w8xbU1YDtw1Y19V8AJyambur5ReAUycY0+W8\nAFxJ7whsIkd6DJtwTfXy45YBL4OOej6KPeEXEe8Gvg1cl5mvjVu9md6h7weBvwO+02IpH8nMxcDF\nwB9GxLIWtzVQRJwAXAr86wSru5yPQ2TvmPaYviUVETcCB4B1A4a0/Rh+g1537F8HdgNfaeJOuwz/\nLmBR3+WF1XUTjomI6cBJwM+aLiQiZtAL/rrMvGv8+sx8LTP/r1q+B5gRESc3XUd1/7uq33uAu+kd\nvvUbZt6acDGwOTNfnKDGzuaj8uLBlzbV7z0TjOlkXiLiCuAS4Herf0TvMMRjWEtmvpiZP8/Mt4F/\nGHD/Rz0fXYb/x8A5EXFWtZdZDawfN2Y9cPCs7aeAHwya8FFV5xBuBrZn5lcHjHnfwXMNEbGU3jy1\n8U9odkTMObhM7wTTY+OGrQd+vzrrfyHwat8hcZMuY8Ahf1fz0af/eXA58N0JxtwHXBQR86rD4Iuq\n6xoTESuAG4BLM/PNAWOGeQzr1tF/jueTA+5/mHwdqokzlEdxJnMlvbPrTwE3Vtf9Bb3JBZhJ77Bz\nB/AgcHYLNXyE3mHkI8CW6mclcDVwdTXmGmAbvTOmm4APtzQfZ1fb2Fpt7+Cc9NcSwNerOXsUWNJC\nHbPphfmkvus6mQ96/3B2A/vpvU79LL3zPBuBJ4F/A+ZXY5cAN/Xd9srqubID+EwLdeyg9zr64PPk\n4DtRpwP3HO4xbLiOf64e+0foBfq08XUMytfhfvyEn1SoYk/4SaUz/FKhDL9UKMMvFcrwS4Uy/FKh\nDL9UKMMvFer/ARsr3RRcboFTAAAAAElFTkSuQmCC\n", 292 | "text/plain": [ 293 | "" 294 | ] 295 | }, 296 | "metadata": {}, 297 | "output_type": "display_data" 298 | } 299 | ], 300 | "source": [ 301 | "output1 = convolution(input_image, conv_filter1, 1)\n", 302 | "plt.imshow(output1)\n", 303 | "plt.show()" 304 | ] 305 | }, 306 | { 307 | "cell_type": "code", 308 | "execution_count": 16, 309 | "metadata": { 310 | "collapsed": false 311 | }, 312 | "outputs": [ 313 | { 314 | "data": { 315 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAP8AAAD8CAYAAAC4nHJkAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAADYZJREFUeJzt3WusXNV5h/Hnxdf6UjAlJYBNgRQh\nkfQCsohJIzeKW2pchBMpUo2a1oRICLW0EFEhp0hN1E9J06bXKMgFUtpagEqgsSJocCERqlSbi2tj\njElsKDUYc2mTgjGKL+Hth9kmcw5n7DmzLxx3PT/paPbMXnvW6zXnf/ZlxrMiM5FUnhPe7QIkvTsM\nv1Qowy8VyvBLhTL8UqEMv1Qowy8VyvBLhTL8UqGmd9nZzJiVs5nbZZeqHFg02rj/3IJXR9pu2w/e\nM+ltZj2/f6S+9GM/ZD8H80AM07bT8M9mLh+MZV12qcquG5aMtN0jv3HzSNu9765rJr3Nz35m40h9\n6cc25YNDt/WwXypUrfBHxPKI+G5E7IqINU0VJal9I4c/IqYBXwEuBc4HroiI85sqTFK76uz5LwJ2\nZeazmXkQuBNY2UxZktpWJ/xnAM/33X+hekzScaD1q/0RcTVwNcBs5rTdnaQh1dnz7wEW9d1fWD02\nRmauzczFmbl4BrNqdCepSXXC/yhwbkScHREzgVXA+mbKktS2kQ/7M/NwRFwLfAuYBtyWmdsbq0xS\nq2qd82fmfcB9DdUiqUN+wk8qlOGXCmX4pUIZfqlQhl8qlOGXCmX4pUIZfqlQhl8qlOGXCmX4pUIZ\nfqlQhl8qlOGXCmX4pUIZfqlQhl8qlOGXCmX4pULVma5rUUR8OyKeiojtEXFdk4VJaledL/A8DNyQ\nmZsjYj7weERsyMynGqpNUotG3vNn5t7M3Fwt7wN24HRd0nGjkem6IuIs4AJg0wTrnK5LmoJqX/CL\niHnA14HrM/P18eudrkuammqFPyJm0Av+usy8p5mSJHWhztX+AG4FdmTml5srSVIX6uz5fwn4LeCj\nEbGl+lnRUF2SWlZnos5/A6LBWiR1yE/4SYUy/FKhDL9UKMMvFcrwS4Uy/FKhDL9UKMMvFcrwS4Uy\n/FKhDL9UKMMvFcrwS4Uy/FKhDL9UKMMvFcrwS4Uy/FKhmvjq7mkR8R8R8c0mCpLUjSb2/NfRm61H\n0nGk7vf2LwR+HbilmXIkdaXunv8vgBuBtxqoRVKH6kzacRnwSmY+fox2V0fEYxHx2CEOjNqdpIbV\nnbTj8oh4DriT3uQd/zi+kXP1SVNTnSm6P5uZCzPzLGAV8FBmfrKxyiS1yvf5pUKNPF1Xv8z8DvCd\nJp5LUjfc80uFMvxSoQy/VCjDLxXK8EuFMvxSoQy/VCjDLxXK8EuFMvxSoQy/VCjDLxXK8EuFMvxS\noQy/VKhG/j//sN46aS5vfvSDXXapyuwz9035/t78uL8bdb310Mah27rnlwpl+KVC1Z2046SIuDsi\nno6IHRFxcVOFSWpX3XP+vwT+JTM/EREzgTkN1CSpAyOHPyJOBJYCVwJk5kHgYDNlSWpbncP+s4FX\nga9Vs/TeEhFzG6pLUsvqhH86cCHw1cy8ANgPrBnfaMx0XQfeqNGdpCbVCf8LwAuZuam6fze9PwZj\njJmua9a8Gt1JalKd6bpeAp6PiPOqh5YBTzVSlaTW1b3a/3vAuupK/7PAp+qXJKkLtcKfmVuAxQ3V\nIqlDfsJPKlSn/7Hn0Hx4cWl02aUqS8/Y3Wl/F43Q38NL399CJWU59Ojwbd3zS4Uy/FKhDL9UKMMv\nFcrwS4Uy/FKhDL9UKMMvFcrwS4Uy/FKhDL9UKMMvFcrwS4Uy/FKhDL9UKMMvFarudF2fiYjtEfFk\nRNwREbObKkxSu0YOf0ScAfw+sDgzPwBMA1Y1VZikdtU97J8O/ERETKc3T9+L9UuS1IU639u/B/hT\nYDewF3gtMx9oqjBJ7apz2L8AWElvzr7TgbkR8ckJ2r09XdeP3tg/eqWSGlXnsP9XgP/MzFcz8xBw\nD/Ch8Y36p+uaNs95PKWpok74dwNLImJORAS96bp2NFOWpLbVOeffRG9yzs3Atuq51jZUl6SW1Z2u\n63PA5xqqRVKH/ISfVCjDLxXK8EuFMvxSoQy/VCjDLxXK8EuFMvxSoQy/VCjDLxXK8EuFMvxSoQy/\nVCjDLxXK8EuFqvX/+Sdrxj44/eHssktVHjnzzNE2HHGzR/ZMfkN/N+p7dd/wbd3zS4Uy/FKhjhn+\niLgtIl6JiCf7Hjs5IjZExM7qdkG7ZUpq2jB7/r8Dlo97bA3wYGaeCzxY3Zd0HDlm+DPzYeD74x5e\nCdxeLd8OfKzhuiS1bNRz/lMzc2+1/BJwakP1SOpI7Qt+mZnAwPdo+qfrOnTgjbrdSWrIqOF/OSJO\nA6huXxnUsH+6rhmz5o3YnaSmjRr+9cDqank18I1mypHUlWHe6rsD+HfgvIh4ISI+DXwB+NWI2Elv\nws4vtFumpKYd8+O9mXnFgFXLGq5FUof8hJ9UKMMvFarT/9V3wv/uZ869m7rsUpUXly4ZbcOLR9vs\nh7vnT3qbOfduHK0zve2E3D982xbrkDSFGX6pUIZfKpThlwpl+KVCGX6pUIZfKpThlwpl+KVCGX6p\nUIZfKpThlwpl+KVCGX6pUIZfKpThlwo16lx9X4qIpyPiiYi4NyJOardMSU0bda6+DcAHMvPnge8B\nn224LkktG2muvsx8IDMPV3c3AgtbqE1Si5o4578KuH/QyjHTdXGgge4kNaFW+CPiJuAwsG5QmzHT\ndTGrTneSGjTyt/dGxJXAZcCyarJOSceRkcIfEcuBG4Ffzsw3my1JUhdGnavvb4D5wIaI2BIRN7dc\np6SGjTpX360t1CKpQ37CTyqU4ZcKZfilQhl+qVCGXyqU4ZcKZfilQhl+qVCGXyqU4ZcKZfilQhl+\nqVCGXyqU4ZcKZfilQhl+qVCGXyqU4ZcKNdJ0XX3rboiIjIhT2ilPUltGna6LiFgEXALsbrgmSR0Y\nabquyp/T+/puv7NfOg6NdM4fESuBPZm5dYi2TtclTUGTnrQjIuYAf0jvkP+YMnMtsBbgJ+NkjxKk\nKWKUPf/7gLOBrRHxHL0ZejdHxHubLExSuya958/MbcBPH7lf/QFYnJn/3WBdklo26nRdko5zo07X\n1b/+rMaqkdQZP+EnFcrwS4Uy/FKhDL9UKMMvFcrwS4Uy/FKhDL9UKMMvFcrwS4Uy/FKhDL9UKMMv\nFcrwS4Uy/FKhIrO7r9WLiFeB/xqw+hRgKnwbkHWMZR1jTfU6fiYz3zPME3Qa/qOJiMcyc7F1WId1\ndFOHh/1SoQy/VKipFP6173YBFesYyzrG+n9Tx5Q555fUram055fUoU7DHxHLI+K7EbErItZMsH5W\nRNxVrd8UEWe1UMOiiPh2RDwVEdsj4roJ2nwkIl6LiC3Vzx81XUdfX89FxLaqn8cmWB8R8VfVmDwR\nERc23P95ff/OLRHxekRcP65Na+Mx0RTwEXFyRGyIiJ3V7YIB266u2uyMiNUt1PGliHi6Gvd7I+Kk\nAdse9TVsoI7PR8SevvFfMWDbo+brHTKzkx9gGvAMcA4wE9gKnD+uze8AN1fLq4C7WqjjNODCank+\n8L0J6vgI8M2OxuU54JSjrF8B3A8EsATY1PJr9BK994o7GQ9gKXAh8GTfY38CrKmW1wBfnGC7k4Fn\nq9sF1fKChuu4BJheLX9xojqGeQ0bqOPzwB8M8dodNV/jf7rc818E7MrMZzPzIHAnsHJcm5XA7dXy\n3cCyiIgmi8jMvZm5uVreB+wAzmiyj4atBP4+ezYCJ0XEaS31tQx4JjMHfRCrcTnxFPD9vwe3Ax+b\nYNNfAzZk5vcz8wfABmB5k3Vk5gOZebi6u5HevJStGjAewxgmX2N0Gf4zgOf77r/AO0P3dptq0F8D\nfqqtgqrTiguATROsvjgitkbE/RHx/rZqABJ4ICIej4irJ1g/zLg1ZRVwx4B1XY0HwKmZubdafgk4\ndYI2XY4LwFX0jsAmcqzXsAnXVqcftw04DZr0eBR7wS8i5gFfB67PzNfHrd5M79D3F4C/Bv65xVI+\nnJkXApcCvxsRS1vsa6CImAlcDvzTBKu7HI8xsndM+66+JRURNwGHgXUDmrT9Gn6V3uzYvwjsBf6s\niSftMvx7gEV99xdWj03YJiKmAycC/9N0IRExg17w12XmPePXZ+brmflGtXwfMCMiTmm6jur591S3\nrwD30jt86zfMuDXhUmBzZr48QY2djUfl5SOnNtXtKxO06WRcIuJK4DLgN6s/RO8wxGtYS2a+nJk/\nysy3gL8d8PyTHo8uw/8ocG5EnF3tZVYB68e1WQ8cuWr7CeChQQM+quoawq3Ajsz88oA27z1yrSEi\nLqI3Tm38EZobEfOPLNO7wPTkuGbrgd+urvovAV7rOyRu0hUMOOTvajz69P8erAa+MUGbbwGXRMSC\n6jD4kuqxxkTEcuBG4PLMfHNAm2Few7p19F/j+fiA5x8mX2M1cYVyElcyV9C7uv4McFP12B/TG1yA\n2fQOO3cBjwDntFDDh+kdRj4BbKl+VgDXANdUba4FttO7YroR+FBL43FO1cfWqr8jY9JfSwBfqcZs\nG7C4hTrm0gvziX2PdTIe9P7g7AUO0TtP/TS96zwPAjuBfwVOrtouBm7p2/aq6ndlF/CpFurYRe88\n+sjvyZF3ok4H7jvaa9hwHf9QvfZP0Av0aePrGJSvo/34CT+pUMVe8JNKZ/ilQhl+qVCGXyqU4ZcK\nZfilQhl+qVCGXyrU/wEw19UFg9FdtwAAAABJRU5ErkJggg==\n", 316 | "text/plain": [ 317 | "" 318 | ] 319 | }, 320 | "metadata": {}, 321 | "output_type": "display_data" 322 | } 323 | ], 324 | "source": [ 325 | "output2 = convolution(input_image, conv_filter2, 1)\n", 326 | "plt.imshow(output2)\n", 327 | "plt.show()" 328 | ] 329 | }, 330 | { 331 | "cell_type": "markdown", 332 | "metadata": {}, 333 | "source": [ 334 | "As images have multiple channels, convolution is actually three dimensional." 335 | ] 336 | }, 337 | { 338 | "cell_type": "markdown", 339 | "metadata": {}, 340 | "source": [ 341 | "![Filter](https://raw.githubusercontent.com/UCIDataScienceInitiative/LearningWithPython/master/images/cnn.jpeg)" 342 | ] 343 | }, 344 | { 345 | "cell_type": "markdown", 346 | "metadata": {}, 347 | "source": [ 348 | "Convolution is translation invariant, which means the same features can de detected regardless of their locations." 349 | ] 350 | }, 351 | { 352 | "cell_type": "code", 353 | "execution_count": 33, 354 | "metadata": { 355 | "collapsed": false 356 | }, 357 | "outputs": [], 358 | "source": [ 359 | "input_image = np.zeros((50, 50))\n", 360 | "for i in range(5):\n", 361 | " x = int(np.random.uniform(5, 45, 1))\n", 362 | " y = int(np.random.uniform(5, 45, 1))\n", 363 | " input_image[x, y] = 1\n", 364 | " input_image[(x - 2):(x + 3), y] = 1\n", 365 | " input_image[x, (y - 2):(y + 3)] = 1" 366 | ] 367 | }, 368 | { 369 | "cell_type": "code", 370 | "execution_count": 34, 371 | "metadata": { 372 | "collapsed": false 373 | }, 374 | "outputs": [ 375 | { 376 | "data": { 377 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAP4AAAD8CAYAAABXXhlaAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAACzVJREFUeJzt3V/I3Qd9x/H3Z/nrFGnrSsiSsna0\nTHqhKTx0le5C2pVlVWwvyrDIyEUgNw4qOly6wUDYhb2xerGbYIu5EFutQkMRJMsiMhhpo63aNmhj\nQUyXNhs2qIPFRL+7eH6Vx5D0OXnOn+ecfN8veHjO73fO6e9LOW9+5/c7v/MkVYWkXv5gvQeQNHuG\nLzVk+FJDhi81ZPhSQ4YvNWT4UkOGLzU0VvhJdif5UZKTSfZPaihJ05W1XrmXZAPwY+Ae4BTwHPBg\nVb18uedszpbayjvXtD1Jq/s//pdf17ms9riNY2zjduBkVb0KkOQJ4D7gsuFv5Z38ee4eY5OS3s6x\nOjLS48Z5q78D+NmK5VPDOklzbpw9/kiS7AP2AWzlD6e9OUkjGGeP/xpww4rlncO631NVB6pqqaqW\nNrFljM1JmpRxwn8OuCXJTUk2Ax8FDk1mLEnTtOa3+lV1IcnfAd8CNgCPV9VLE5tM0tSMdYxfVd8E\nvjmhWSTNiFfuSQ0ZvtSQ4UsNGb7UkOFLDRm+1JDhSw0ZvtSQ4UsNGb7UkOFLDRm+1JDhSw0ZvtSQ\n4UsNGb7UkOFLDRm+1JDhSw0ZvtSQ4UsNGb7UkOFLDRm+1JDhSw0ZvtSQ4UsNGb7UkOFLDRm+1JDh\nSw0ZvtSQ4UsNGb7U0KrhJ3k8yZkkL65Yd12Sw0leGX5fO90xJU3SKHv8LwG7L1q3HzhSVbcAR4Zl\nSQti1fCr6jvAzy9afR9wcLh9ELh/wnNJmqK1HuNvq6rTw+3XgW0TmkfSDIx9cq+qCqjL3Z9kX5Lj\nSY6f59y4m5M0AWsN/40k2wGG32cu98CqOlBVS1W1tIkta9ycpElaa/iHgD3D7T3A05MZR9IsjPJx\n3leA/wT+LMmpJHuBzwL3JHkF+MthWdKC2LjaA6rqwcvcdfeEZ5E0I165JzVk+FJDhi81ZPhSQ4Yv\nNWT4UkOGLzVk+FJDhi81ZPhSQ4YvNWT4UkOGLzVk+FJDhi81ZPhSQ4YvNWT4UkOGLzVk+FJDhi81\nZPhSQ4YvNWT4UkOGLzVk+FJDhi81ZPhSQ4YvNWT4UkOGLzVk+FJDhi81ZPhSQ4YvNbRq+EluSHI0\nyctJXkry0LD+uiSHk7wy/L52+uNKmoRR9vgXgE9V1a3AHcDHk9wK7AeOVNUtwJFhWdICWDX8qjpd\nVd8bbv8SOAHsAO4DDg4POwjcP60hJU3WFR3jJ7kRuA04BmyrqtPDXa8D2yY6maSpGTn8JO8Cvg58\noqp+sfK+qiqgLvO8fUmOJzl+nnNjDStpMkYKP8kmlqP/clV9Y1j9RpLtw/3bgTOXem5VHaiqpapa\n2sSWScwsaUwbV3tAkgCPASeq6nMr7joE7AE+O/x+eioTal19679e+L3lv/rjXes0iSZp1fCBO4G/\nBX6Y5K1XwT+yHPxXk+wFfgr8zXRGlDRpq4ZfVf8B5DJ33z3ZcSTNglfuSQ2N8la/LY9vdbVyjy81\nZPhSQ4YvNeQxfmMXn8OY1HM8FzL/3ONLDRm+1JDhSw0ZvtRQ25N7ntgabVYvYro6uceXGjJ8qSHD\nlxpqe4zv8a06c48vNWT4UkOGLzXU9hhfo/G8xtXJPb7UkOFLDRm+1JDhSw15cu9teGJLVyv3+FJD\nhi81ZPhSQ4YvNWT4UkOGLzVk+FJDhi81ZPhSQ4YvNbRq+Em2Jnk2yfeTvJTkM8P6m5IcS3IyyZNJ\nNk9/XEmTMMoe/xxwV1W9H9gF7E5yB/AI8GhV3Qy8Ceyd3piSJmnV8GvZr4bFTcNPAXcBTw3rDwL3\nT2VCSRM30jF+kg1JXgDOAIeBnwBnq+rC8JBTwI7pjChp0kYKv6p+U1W7gJ3A7cB7R91Akn1Jjic5\nfp5zaxxT0iRd0Vn9qjoLHAU+AFyT5K3v8+8EXrvMcw5U1VJVLW1iy1jDSpqMVf8QR5LrgfNVdTbJ\nO4B7WD6xdxR4AHgC2AM8Pc1BF5X/Go/m0Sh/gWc7cDDJBpbfIXy1qp5J8jLwRJJ/AZ4HHpvinJIm\naNXwq+oHwG2XWP8qy8f7khaMV+5JDRm+1JDhSw0ZvtSQ4UsNGb7UkP+SzhguvjhnUs/xIh9Nm3t8\nqSHDlxoyfKkhj/HHMMqxuF/S0Txyjy81ZPhSQ4YvNWT4UkOGLzVk+FJDhi81ZPhSQ17AMwa/pKNF\n5R5fasjwpYYMX2rIY/wx+CUdLSr3+FJDhi81ZPhSQ4YvNWT4UkOGLzVk+FJDhi815AU8UzbPF+xc\n6gtD8zyvJsc9vtSQ4UsNjRx+kg1Jnk/yzLB8U5JjSU4meTLJ5umNKWmSrmSP/xBwYsXyI8CjVXUz\n8Cawd5KDSZqekcJPshP4EPDFYTnAXcBTw0MOAvdPY0BJkzfqHv/zwKeB3w7L7wHOVtWFYfkUsONS\nT0yyL8nxJMfPc26sYSVNxqrhJ/kwcKaqvruWDVTVgapaqqqlTWxZy39C0oSN8jn+ncBHktwLbAXe\nDXwBuCbJxmGvvxN4bXpjSpqkVcOvqoeBhwGSfBD4+6r6WJKvAQ8ATwB7gKenOKfWYBp/BdgLfK4O\n43yO/w/AJ5OcZPmY/7HJjCRp2q7okt2q+jbw7eH2q8Dtkx9J0rR55Z7UkF/SuYqtdjzul3T6co8v\nNWT4UkOGLzVk+FJDhi81ZPhSQ4YvNWT4UkNewNOYF+v05R5fasjwpYYMX2rI8KWGDF9qyPClhgxf\nasjwpYYMX2rI8KWGDF9qyPClhgxfasjwpYYMX2rI8KWGDF9qyPClhgxfasjwpYYMX2rI8KWGDF9q\nyPClhgxfaihVNbuNJf8N/BT4I+B/Zrbh8SzSrLBY8y7SrLAY8/5JVV2/2oNmGv7vNpocr6qlmW94\nDRZpVliseRdpVli8ed+Ob/Wlhgxfami9wj+wTttdi0WaFRZr3kWaFRZv3stal2N8SevLt/pSQzMN\nP8nuJD9KcjLJ/lluexRJHk9yJsmLK9Zdl+RwkleG39eu54xvSXJDkqNJXk7yUpKHhvXzOu/WJM8m\n+f4w72eG9TclOTa8Jp5Msnm9Z31Lkg1Jnk/yzLA8t7NeqZmFn2QD8K/AXwO3Ag8muXVW2x/Rl4Dd\nF63bDxypqluAI8PyPLgAfKqqbgXuAD4+/P+c13nPAXdV1fuBXcDuJHcAjwCPVtXNwJvA3nWc8WIP\nASdWLM/zrFdklnv824GTVfVqVf0aeAK4b4bbX1VVfQf4+UWr7wMODrcPAvfPdKjLqKrTVfW94fYv\nWX6B7mB+562q+tWwuGn4KeAu4Klh/dzMm2Qn8CHgi8NymNNZ12KW4e8AfrZi+dSwbt5tq6rTw+3X\ngW3rOcylJLkRuA04xhzPO7x1fgE4AxwGfgKcraoLw0Pm6TXxeeDTwG+H5fcwv7NeMU/uXYFa/ghk\nrj4GSfIu4OvAJ6rqFyvvm7d5q+o3VbUL2MnyO8D3rvNIl5Tkw8CZqvrues8yLRtnuK3XgBtWLO8c\n1s27N5Jsr6rTSbazvLeaC0k2sRz9l6vqG8PquZ33LVV1NslR4APANUk2DnvSeXlN3Al8JMm9wFbg\n3cAXmM9Z12SWe/zngFuGM6ObgY8Ch2a4/bU6BOwZbu8Bnl7HWX5nOOZ8DDhRVZ9bcde8znt9kmuG\n2+8A7mH5vMRR4IHhYXMxb1U9XFU7q+pGll+n/15VH2MOZ12zqprZD3Av8GOWj+3+aZbbHnG+rwCn\ngfMsH8PtZfnY7gjwCvBvwHXrPecw61+w/Db+B8ALw8+9czzv+4Dnh3lfBP55WP+nwLPASeBrwJb1\nnvWiuT8IPLMIs17Jj1fuSQ15ck9qyPClhgxfasjwpYYMX2rI8KWGDF9qyPClhv4f9mpJmTC8NgEA\nAAAASUVORK5CYII=\n", 378 | "text/plain": [ 379 | "" 380 | ] 381 | }, 382 | "metadata": {}, 383 | "output_type": "display_data" 384 | } 385 | ], 386 | "source": [ 387 | "plt.imshow(input_image)\n", 388 | "plt.show()" 389 | ] 390 | }, 391 | { 392 | "cell_type": "code", 393 | "execution_count": 35, 394 | "metadata": { 395 | "collapsed": true 396 | }, 397 | "outputs": [], 398 | "source": [ 399 | "conv_filter = np.zeros((5, 5))\n", 400 | "conv_filter[2, :] = 1\n", 401 | "conv_filter[:, 2] = 1" 402 | ] 403 | }, 404 | { 405 | "cell_type": "code", 406 | "execution_count": 36, 407 | "metadata": { 408 | "collapsed": false 409 | }, 410 | "outputs": [ 411 | { 412 | "name": "stdout", 413 | "output_type": "stream", 414 | "text": [ 415 | "[[ 0. 0. 1. 0. 0.]\n", 416 | " [ 0. 0. 1. 0. 0.]\n", 417 | " [ 1. 1. 1. 1. 1.]\n", 418 | " [ 0. 0. 1. 0. 0.]\n", 419 | " [ 0. 0. 1. 0. 0.]]\n" 420 | ] 421 | } 422 | ], 423 | "source": [ 424 | "print(conv_filter)" 425 | ] 426 | }, 427 | { 428 | "cell_type": "code", 429 | "execution_count": 37, 430 | "metadata": { 431 | "collapsed": true 432 | }, 433 | "outputs": [], 434 | "source": [ 435 | "output = convolution(input_image, conv_filter, 1)" 436 | ] 437 | }, 438 | { 439 | "cell_type": "code", 440 | "execution_count": 38, 441 | "metadata": { 442 | "collapsed": false 443 | }, 444 | "outputs": [ 445 | { 446 | "data": { 447 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAP8AAAD8CAYAAAC4nHJkAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAADslJREFUeJzt3W2MHeV5xvH/VWNj4sQiTqhr2aQ4\nQItQVRyJECzygZrSum4aiIQiUFS5kotTpUhEjRpMK7VESiSQmjj5UCKRQPGHNCYliYyQm9YxjqJI\nyLw61LwEG4ckXi04UbAMqDW2ufvhPK62e2Z3Z8+ZOS++r590tGdm5+y5x9rLc+bZmftRRGBm+fzG\nsAsws+Fw+M2ScvjNknL4zZJy+M2ScvjNknL4zZJy+M2S6iv8ktZL+omkg5K2NFWUmbVPvV7hJ2kB\n8CJwLXAYeBy4KSKem+k1i3R2LGZJT+9nZnP7H97krTiuOtue1cf7XAEcjIhDAJK2A9cBM4Z/MUv4\nkK7p4y3NbDZ7Y3ftbfv52L8S+MWU5cNlnZmNgX6O/LVI2gxsBljMO9p+OzOrqZ8j/wRw/pTlVWXd\n/xMR90TE5RFx+ULO7uPtzKxJ/YT/ceBiSaslLQJuBB5qpiwza1vPH/sj4qSkW4D/ABYA90XEs41V\nZmat6uucPyJ2AjsbqsXMBshX+Jkl5fCbJeXwmyXl8Jsl5fCbJeXwmyXl8Jsl5fCbJeXwmyXl8Jsl\n5fCbJeXwmyXl8Jsl5fCbJeXwmyXV1/38kl4GXgdOAScj4vImijKz9jXRwPMPIuJXDfwcMxsgf+w3\nS6rf8Afwn5KeLC26zWxM9Pux/8MRMSHpN4Fdkl6IiB9O3cB9+81GU19H/oiYKF+PAN+lM4XX9G3c\nt99sBPUcfklLJL3r9HPgj4D9TRVmZu3q52P/cuC7kk7/nH+NiO81UpWZta6fSTsOAZc1WIuZDZD/\n1GeWlMNvlpTDb5aUw2+WlMNvlpTDb5aUw2+WlMNvlpTDb5aUw2+WlMNvlpTDb5aUw2+WlMNvlpTD\nb5bUnOGXdJ+kI5L2T1m3TNIuSQfK13e3W6aZNa3Okf9+YP20dVuA3RFxMbC7LJvZGJkz/KUb76+n\nrb4O2FaebwOub7guM2tZr+f8yyNisjx/hU4/PzMbI30P+EVE0Jm8o5KkzZKekPTECY73+3Zm1pBe\nw/+qpBUA5euRmTZ0336z0dRr+B8CNpbnG4EdzZRjZoNS50993wQeBX5X0mFJm4A7gWslHQD+sCyb\n2RiZs29/RNw0w7euabgWMxsgX+FnlpTDb5aUw2+WlMNvlpTDb5aUw2+WlMNvlpTDb5aUw2+WlMNv\nlpTDb5aUw2+WlMNvlpTDb5aUw2+WlMNvllSvk3bcIWlC0r7y2NBumWbWtF4n7QDYGhFrymNns2WZ\nWdt6nbTDzMZcP+f8t0h6ppwWzDhXn/v2m42mXsP/VeBCYA0wCXxxpg3dt99sNPUU/oh4NSJORcTb\nwNeAK5oty8za1lP4T8/WU3wM2D/TtmY2mubs218m7bgaeK+kw8A/AldLWkNnjr6XgU+2WKOZtaDX\nSTvubaEWMxsgX+FnltScR34zgAW/c2HXulMvvjSESqwpPvKbJeXwmyXl8Jsl5fCbJeUBv8IDWrP7\n7/cv61q3yP8+Y81HfrOkHH6zpBx+s6QcfrOkPOBXZB7Qemv9B+fc5tj7un9VltZ4HcCi7z0+75qs\nfT7ymyXl8Jsl5fCbJVWnb//5kvZIek7Ss5JuLeuXSdol6UD5OmMTTzMbPXUG/E4Cn4mIpyS9C3hS\n0i7gL4DdEXGnpC3AFuC29krtTZ3BLOh9QCvLYNabK9W1bunPh1CINaZO3/7JiHiqPH8deB5YCVwH\nbCubbQOub6tIM2vevP7UJ+kC4APAXmB5REyWb70CLJ/hNZuBzQCLeUevdZpZw2oP+El6J/Bt4NMR\ncWzq9yIi6DTz7OK+/WajqVb4JS2kE/xvRMR3yupXT7fwLl+PtFOimbWhTutu0enW+3xEfGnKtx4C\nNgJ3lq87WqlwQM7EAa2q25SrrmSsGuyc/u/xws13d21zCZ/qWrdkovsDYNXA6TmHuqd/9C3Ug1Xn\nnP8q4M+B/5K0r6z7Ozqh/5akTcDPgI+3U6KZtaFO3/4fAd2HxY5rmi3HzAbFV/iZJeXwmyU1trf0\nNjmYBfUGtMZtMKvqPatuU67ah+mDnVWDeysePVGrjqqrIE/VeqW1yUd+s6QcfrOkHH6zpMb2nL/J\n81no/Zw2y/ls1XiHjTcf+c2ScvjNknL4zZJy+M2SGtsBv6ZlHtCq04qsauA0SwuzM5WP/GZJOfxm\nSTn8Zkn107f/DkkTkvaVx4b2yzWzpvTTtx9ga0T8U3vl9a/uoJQHtGZXeafiEOqw5tTp5DMJTJbn\nr0s63bffzMbYvM75p/XtB7hF0jOS7vN0XWbjpZ++/V8FLgTW0Plk8MUZXrdZ0hOSnjjB8QZKNrMm\n9Ny3PyJejYhTEfE28DXgiqrXetIOs9HUc99+SSumTNf1MWB/OyUOhge0Zuee+meefvr23yRpDZ1p\nul4GPtlKhWbWin769u9svhwzGxRf4WeWlMNvlpRv6S2aHNCqmlPAA2Y2anzkN0vK4TdLyuE3S8rh\nN0vKA34tqJowtGpCEbNh8pHfLCmH3ywph98sKZ/zz8NbFa2+qhx7X/c/a1WbsCpuHWaD4iO/WVIO\nv1lSDr9ZUnX69i+W9JikH5e+/Z8r61dL2ivpoKQHJC1qv1wza0qdI/9xYF1EXEanWed6SVcCd9Hp\n238R8Bqwqb0yx8ubK9X1MBs1c4Y/Ot4oiwvLI4B1wINl/Tbg+lYqNLNW1O3eu6D07zsC7AJeAo5G\nxMmyyWE8kYfZWKkV/tKiew2wik6L7kvqvoH79puNpnmN9kfEUWAPsBY4V9Lpq1lWARMzvMZ9+81G\nUJ2+/ecBJyLiqKRzgGvpDPbtAW4AtgMbgR1tFtq2qtZb0+/Oq7pyr2ow74Wb7+5adwmf6lq3ZCK6\n1lVdCTh9TgG3BLMm1Lm8dwWwTdICOp8UvhURD0t6Dtgu6fPA03Qm9jCzMVGnb/8zdCbnnL7+EDNM\n0WVmo89X+Jkl5fCbJeVbeouqQbSu1lub13ZtUzVot3rnX3atW1axXZWqW3o9Yai1wUd+s6QcfrOk\nHH6zpBx+s6QcfrOkHH6zpBx+s6QcfrOkHH6zpHyFX1Hnlt4qVbf0/nTD17vWXTJR75beqolBfEuv\ntcFHfrOkHH6zpPrp23+/pJ9K2lcea9ov18yaUuec/3Tf/jckLQR+JOnfy/f+NiIenOW1Zjai6nTy\nCaCqb/8Zpc4tvVX99Zb+vPtnVfXrW/HoiVp1DOOW3qrBTg8qnvl66tsfEXvLt74g6RlJWyW5Na/Z\nGOmpb7+k3wNup9O//4PAMuC2qte6b7/ZaOq1b//6iJgsU3kdB/6FGZp5um+/2WjquW+/pBURMSlJ\ndObp299yrWOj6uKdUVZ1MVNXCzM74/TTt/+R8h+DgH3AX7VYp5k1rJ++/etaqcjMBsJX+Jkl5fCb\nJeW7+uah6gKcKlUXA9V9bZOq7hCsUjUBadU+TDeMfbLm+MhvlpTDb5aUw2+WlMNvlpQH/Fowve0W\njPZkm1WtyKruVrQzi4/8Zkk5/GZJOfxmSTn8Zkl5wK8FbbfAqjvHQNWVe1WDey/cfHfXuumtyKpu\nU666CrBysNO3B48kH/nNknL4zZKqHf7SxPNpSQ+X5dWS9ko6KOkBSYvaK9PMmjafI/+twPNTlu8C\ntkbERcBrwKYmCzOzdqnTln+OjaRVwDbgC8DfAH8G/BL4rYg4KWktcEdE/PFsP2eplsWHdE3/VVst\ndW/pnVy7sGtdnXkGfEvv6NkbuzkWv+4e1a1Q98j/ZeCzwNtl+T3A0Yg4WZYPAyvnVaWZDVWdufo+\nAhyJiCd7eQP37TcbTXX+zn8V8FFJG4DFwFLgK8C5ks4qR/9VwETViyPiHuAe6Hzsb6RqM+vbnEf+\niLg9IlZFxAXAjcAjEfEJOpN33FA22wjsaK1KM2tcP1f43QZsl/R54Gng3mZKskEbt0lGrBnzCn9E\n/AD4QXl+iBmm6DKz0ecr/MyScvjNkvJdfWewcZtnwAbLR36zpBx+s6QcfrOkHH6zpDzgZ2M3z4A1\nw0d+s6QcfrOkHH6zpBx+s6Q84Gfuq5+Uj/xmSTn8Zkk5/GZJOfxmSdXq29/Ym0m/BH4GvBf41cDe\nuB3jvg+uf/ja2Iffjojz6mw40PD/35tKT0TE5QN/4waN+z64/uEb9j74Y79ZUg6/WVLDCv89Q3rf\nJo37Prj+4RvqPgzlnN/Mhs8f+82SGnj4Ja2X9BNJByVtGfT7z5ek+yQdkbR/yrplknZJOlC+vnuY\nNc5G0vmS9kh6TtKzkm4t68dpHxZLekzSj8s+fK6sXy1pb/ldekDSomHXOhtJCyQ9LenhsjzU+gca\nfkkLgH8G/gS4FLhJ0qWDrKEH9wPrp63bAuyOiIuB3WV5VJ0EPhMRlwJXAn9d/s3HaR+OA+si4jJg\nDbBe0pXAXcDWiLgIeA3YNMQa67gVeH7K8lDrH/SR/wrgYEQcioi3gO3AdQOuYV4i4ofA9D5X1wHb\nyvNtwPUDLWoeImIyIp4qz1+n88u3kvHah4iIN8riwvIIYB3wYFk/0vsgaRXwp8DXy7IYcv2DDv9K\n4BdTlg+XdeNmeURMluevAMuHWUxdki4APgDsZcz2oXxk3gccAXYBLwFHyxTxMPq/S18GPgu8XZbf\nw5Dr94Bfn6Lz55KR/5OJpHcC3wY+HRHHpn5vHPYhIk5FxBpgFZ1PkJcMuaTaJH0EOBIRTw67lqkG\n3cxjAjh/yvKqsm7cvCppRURMSlpB52g0siQtpBP8b0TEd8rqsdqH0yLiqKQ9wFrgXElnlaPnKP8u\nXQV8VNIGYDGwFPgKQ65/0Ef+x4GLyyjnIuBG4KEB19CEh4CN5flGYMcQa5lVObe8F3g+Ir405Vvj\ntA/nSTq3PD8HuJbO2MUe4Iay2cjuQ0TcHhGrIuICOr/zj0TEJxh2/REx0AewAXiRzjnb3w/6/Xuo\n95vAJHCCznnZJjrna7uBA8D3gWXDrnOW+j9M5yP9M8C+8tgwZvvw+8DTZR/2A/9Q1r8feAw4CPwb\ncPawa62xL1cDD49C/b7CzywpD/iZJeXwmyXl8Jsl5fCbJeXwmyXl8Jsl5fCbJeXwmyX1v2SwKqJh\niSycAAAAAElFTkSuQmCC\n", 448 | "text/plain": [ 449 | "" 450 | ] 451 | }, 452 | "metadata": {}, 453 | "output_type": "display_data" 454 | } 455 | ], 456 | "source": [ 457 | "plt.imshow(output)\n", 458 | "plt.show()" 459 | ] 460 | }, 461 | { 462 | "cell_type": "markdown", 463 | "metadata": {}, 464 | "source": [ 465 | "Large convolution filters have substantially more parameters and are more difficult to learn. Therefore, to learn features at different scales, we would downsample the images instead." 466 | ] 467 | }, 468 | { 469 | "cell_type": "markdown", 470 | "metadata": {}, 471 | "source": [ 472 | "![Maxpool](https://raw.githubusercontent.com/UCIDataScienceInitiative/LearningWithPython/master/images/maxpool.jpeg)" 473 | ] 474 | }, 475 | { 476 | "cell_type": "code", 477 | "execution_count": 39, 478 | "metadata": { 479 | "collapsed": true 480 | }, 481 | "outputs": [], 482 | "source": [ 483 | "def pooling(feature_map, size, stride):\n", 484 | " \"\"\"\n", 485 | " Perform max pooling on feature_map.\n", 486 | " \n", 487 | " Args\n", 488 | " feature_map: (d x d) numpy array\n", 489 | " size: pooling window size\n", 490 | " stride: sliding width / height\n", 491 | " \"\"\"\n", 492 | " d = feature_map.shape[0]\n", 493 | " n = (d - size) / stride\n", 494 | " reduced_map = np.zeros((n, n))\n", 495 | " for i in range(n):\n", 496 | " for j in range(n):\n", 497 | " reduced_map[i, j] = 0\n", 498 | " return reduced_map" 499 | ] 500 | }, 501 | { 502 | "cell_type": "code", 503 | "execution_count": 40, 504 | "metadata": { 505 | "collapsed": false 506 | }, 507 | "outputs": [ 508 | { 509 | "data": { 510 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPgAAAD8CAYAAABaQGkdAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAACjFJREFUeJzt3V+InXedx/H3Z/PXxF0Vdlk0KW1Y\nXCUIS3XW7R+Q2nhRq9gL96JChVVowF21iiB1b9R7Eb1wC9mqNxZ7kfZCpESX1V4sKyGTtKBJqtSq\naWKl8WJVCps/+t2LmYVYzJwnmefpM/Pl/YJC5vT01w/DvPucc+bMNFWFpJ7+bO4BkqZj4FJjBi41\nZuBSYwYuNWbgUmMGLjVm4FJjBi41tnWKQ7dnR+1k9xRHSwL+l5e4WBey6H6TBL6T3fxDDkxxtCTg\naP3noPv5EF1qzMClxgxcaszApcYMXGrMwKXGBgWe5K4kP07ybJIHpx4laRwLA0+yBfgK8G5gP/CB\nJPunHiZp/YZcwd8OPFtVz1XVReBR4J5pZ0kaw5DA9wDPX/Hx2dXb/kiSg0mWkyxf4sJY+yStw2gv\nslXVoapaqqqlbewY61hJ6zAk8HPADVd8vHf1Nkkb3JDAjwFvTLIvyXbgXuBb086SNIaFP01WVZeT\nfBT4DrAF+FpVnZx8maR1G/TjolX1BPDExFskjcx3skmNGbjUmIFLjRm41JiBS40ZuNSYgUuNGbjU\nmIFLjRm41JiBS40ZuNSYgUuNGbjUmIFLjRm41JiBS40ZuNSYgUuNGbjUmIFLjRm41JiBS40ZuNSY\ngUuNGbjUmIFLjRm41JiBS40ZuNSYgUuNGbjUmIFLjRm41NjCwJPckOT7SU4lOZnkgVdimKT12zrg\nPpeBT1XViSR/DhxP8h9VdWribZLWaeEVvKpeqKoTq3/+HXAa2DP1MEnrd03PwZPcBNwMHJ1ijKRx\nDXmIDkCSVwOPAZ+oqt/+ib9/EDgIsJNdow2UdP0GXcGTbGMl7keq6vE/dZ+qOlRVS1W1tI0dY26U\ndJ2GvIoe4KvA6ar64vSTJI1lyBX8duCDwJ1Jnl796+6Jd0kawcLn4FX1X0BegS2SRuY72aTGDFxq\nzMClxgxcaszApcYMXGrMwKXGDFxqzMClxgxcaszApcYMXGrMwKXGDFxqzMClxgxcaszApcYMXGrM\nwKXGDFxqzMClxgxcaszApcYMXGrMwKXGDFxqzMClxgb//8E7u3jX349+5vYjx0Y/c0q/PnjrJOf+\nxZnLk5y72T6/c/EKLjVm4FJjBi41ZuBSYwYuNWbgUmMGLjU2OPAkW5I8leTbUw6SNJ5ruYI/AJye\naoik8Q0KPMle4D3Aw9POkTSmoVfwLwGfBv5wtTskOZhkOcnyJS6MMk7S+iwMPMl7gRer6vha96uq\nQ1W1VFVL29gx2kBJ12/IFfx24H1Jfg48CtyZ5BuTrpI0ioWBV9VnqmpvVd0E3At8r6rum3yZpHXz\n++BSY9f08+BV9STw5CRLJI3OK7jUmIFLjRm41JiBS40ZuNTYpvqtqr/4/G2TnPvM/f82+pl33/H+\n0c+c0vHPPTTJuW/+93+e5Nwbj0xybDtewaXGDFxqzMClxgxcaszApcYMXGrMwKXGDFxqzMClxgxc\naszApcYMXGrMwKXGDFxqzMClxgxcaszApcYMXGrMwKXGDFxqzMClxjbVb1V9/Q8uTXLu2859ZPQz\n/5oXRz8T4Pc/+ekk577zw/dPcu6NR/57knM1jFdwqTEDlxozcKkxA5caM3CpMQOXGhsUeJLXJjmc\n5Jkkp5PcOvUwSes39PvgXwaOVNU/JtkO7Jpwk6SRLAw8yWuAdwD/BFBVF4GL086SNIYhD9H3AeeB\nryd5KsnDSXZPvEvSCIYEvhV4K/BQVd0MvAQ8+PI7JTmYZDnJ8iUujDxT0vUYEvhZ4GxVHV39+DAr\nwf+RqjpUVUtVtbSNHWNulHSdFgZeVb8Cnk/yptWbDgCnJl0laRRDX0X/GPDI6ivozwEfmm6SpLEM\nCryqngaWJt4iaWS+k01qzMClxgxcaszApcYMXGrMwKXGNtVvVd1+5Ngk5770+dtGP/OJJx8b/UyA\nu+94/yTnTvW51by8gkuNGbjUmIFLjRm41JiBS40ZuNSYgUuNGbjUmIFLjRm41JiBS40ZuNSYgUuN\nGbjUmIFLjRm41JiBS40ZuNSYgUuNGbjU2Kb6pYuazpa//ZtJzv39T346ybkaxiu41JiBS40ZuNSY\ngUuNGbjUmIFLjRm41NigwJN8MsnJJD9K8s0kO6ceJmn9FgaeZA/wcWCpqt4CbAHunXqYpPUb+hB9\nK/CqJFuBXcAvp5skaSwLA6+qc8AXgDPAC8Bvquq7L79fkoNJlpMsX+LC+EslXbMhD9FfB9wD7APe\nAOxOct/L71dVh6pqqaqWtrFj/KWSrtmQh+jvAn5WVeer6hLwOHDbtLMkjWFI4GeAW5LsShLgAHB6\n2lmSxjDkOfhR4DBwAvjh6j9zaOJdkkYw6OfBq+qzwGcn3iJpZL6TTWrMwKXGDFxqzMClxgxcaszf\nqgq8/geXRj/zbec+MvqZANwxzbHHP/fQJOe+88P3T3Lu9iPHJjm3G6/gUmMGLjVm4FJjBi41ZuBS\nYwYuNWbgUmMGLjVm4FJjBi41ZuBSYwYuNWbgUmMGLjVm4FJjBi41ZuBSYwYuNWbgUmMGLjVm4FJj\nqarxD03OA78YcNe/BH49+oDpbKa9m2krbK69G2HrjVX1V4vuNEngQyVZrqql2QZco820dzNthc21\ndzNt9SG61JiBS43NHfihmf/912oz7d1MW2Fz7d00W2d9Di5pWnNfwSVNaLbAk9yV5MdJnk3y4Fw7\nFklyQ5LvJzmV5GSSB+beNESSLUmeSvLtubesJclrkxxO8kyS00lunXvTWpJ8cvXr4EdJvplk59yb\n1jJL4Em2AF8B3g3sBz6QZP8cWwa4DHyqqvYDtwD/soG3XukB4PTcIwb4MnCkqt4M/B0beHOSPcDH\ngaWqeguwBbh33lVrm+sK/nbg2ap6rqouAo8C98y0ZU1V9UJVnVj98+9Y+QLcM++qtSXZC7wHeHju\nLWtJ8hrgHcBXAarqYlX9z7yrFtoKvCrJVmAX8MuZ96xprsD3AM9f8fFZNng0AEluAm4Gjs67ZKEv\nAZ8G/jD3kAX2AeeBr68+nXg4ye65R11NVZ0DvgCcAV4AflNV35131dp8kW2gJK8GHgM+UVW/nXvP\n1SR5L/BiVR2fe8sAW4G3Ag9V1c3AS8BGfj3mdaw80twHvAHYneS+eVetba7AzwE3XPHx3tXbNqQk\n21iJ+5GqenzuPQvcDrwvyc9ZeepzZ5JvzDvpqs4CZ6vq/x8RHWYl+I3qXcDPqup8VV0CHgdum3nT\nmuYK/BjwxiT7kmxn5YWKb820ZU1JwspzxNNV9cW59yxSVZ+pqr1VdRMrn9fvVdWGvMpU1a+A55O8\nafWmA8CpGSctcga4Jcmu1a+LA2zgFwVh5SHSK66qLif5KPAdVl6J/FpVnZxjywC3Ax8Efpjk6dXb\n/rWqnphxUycfAx5Z/Q/9c8CHZt5zVVV1NMlh4AQr3115ig3+rjbfySY15otsUmMGLjVm4FJjBi41\nZuBSYwYuNWbgUmMGLjX2f3zmJ0fhTw6SAAAAAElFTkSuQmCC\n", 511 | "text/plain": [ 512 | "" 513 | ] 514 | }, 515 | "metadata": {}, 516 | "output_type": "display_data" 517 | } 518 | ], 519 | "source": [ 520 | "plt.imshow(pooling(output, 4, 4))\n", 521 | "plt.show()" 522 | ] 523 | }, 524 | { 525 | "cell_type": "markdown", 526 | "metadata": {}, 527 | "source": [ 528 | "## Hierarchical representation\n", 529 | "\n", 530 | "https://distill.pub/2017/feature-visualization/" 531 | ] 532 | }, 533 | { 534 | "cell_type": "markdown", 535 | "metadata": {}, 536 | "source": [ 537 | "![Filter](https://raw.githubusercontent.com/UCIDataScienceInitiative/LearningWithPython/master/figures/filter.png)" 538 | ] 539 | }, 540 | { 541 | "cell_type": "code", 542 | "execution_count": 340, 543 | "metadata": { 544 | "collapsed": true 545 | }, 546 | "outputs": [], 547 | "source": [ 548 | "import keras.datasets.mnist as mnist" 549 | ] 550 | }, 551 | { 552 | "cell_type": "code", 553 | "execution_count": 341, 554 | "metadata": { 555 | "collapsed": false 556 | }, 557 | "outputs": [ 558 | { 559 | "name": "stdout", 560 | "output_type": "stream", 561 | "text": [ 562 | "Downloading data from https://s3.amazonaws.com/img-datasets/mnist.npz\n", 563 | "11493376/11490434 [==============================] - 7s 1us/step\n", 564 | "11501568/11490434 [==============================] - 7s 1us/step\n" 565 | ] 566 | } 567 | ], 568 | "source": [ 569 | "(x_train, y_train), (x_test, y_test) = mnist.load_data()" 570 | ] 571 | }, 572 | { 573 | "cell_type": "code", 574 | "execution_count": 354, 575 | "metadata": { 576 | "collapsed": true 577 | }, 578 | "outputs": [], 579 | "source": [ 580 | "# This makes sure the image has the correct order in the axis for Tensorflow, it would be different for Theano backend\n", 581 | "x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)\n", 582 | "x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)\n", 583 | "\n", 584 | "# Convert values to floats, originally they are integers\n", 585 | "x_train = x_train.astype('float32')\n", 586 | "x_test = x_test.astype('float32')\n", 587 | "\n", 588 | "# Convert values of labels from 0 to 9 to categorical (one_hot encoding)\n", 589 | "y_train = keras.utils.to_categorical(y_train, 10)\n", 590 | "y_test = keras.utils.to_categorical(y_test, 10)" 591 | ] 592 | }, 593 | { 594 | "cell_type": "code", 595 | "execution_count": 344, 596 | "metadata": { 597 | "collapsed": true 598 | }, 599 | "outputs": [], 600 | "source": [ 601 | "from keras.models import Sequential\n", 602 | "from keras.layers import Dense, Flatten, Dropout\n", 603 | "from keras.layers import Conv2D, MaxPooling2D" 604 | ] 605 | }, 606 | { 607 | "cell_type": "markdown", 608 | "metadata": {}, 609 | "source": [ 610 | "Keras documentation: https://keras.io/layers/convolutional/" 611 | ] 612 | }, 613 | { 614 | "cell_type": "code", 615 | "execution_count": 357, 616 | "metadata": { 617 | "collapsed": true 618 | }, 619 | "outputs": [], 620 | "source": [ 621 | "model = Sequential()\n", 622 | "model.add(Conv2D(32, (3, 3), padding='same', input_shape=(28, 28, 1), activation='relu'))\n", 623 | "model.add(MaxPooling2D(pool_size=(2, 2)))\n", 624 | "model.add(Flatten())\n", 625 | "model.add(Dropout(0.1))\n", 626 | "model.add(Dense(10, activation='softmax'))" 627 | ] 628 | }, 629 | { 630 | "cell_type": "code", 631 | "execution_count": 358, 632 | "metadata": { 633 | "collapsed": true 634 | }, 635 | "outputs": [], 636 | "source": [ 637 | "model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])" 638 | ] 639 | }, 640 | { 641 | "cell_type": "code", 642 | "execution_count": 359, 643 | "metadata": { 644 | "collapsed": false 645 | }, 646 | "outputs": [ 647 | { 648 | "name": "stdout", 649 | "output_type": "stream", 650 | "text": [ 651 | "_________________________________________________________________\n", 652 | "Layer (type) Output Shape Param # \n", 653 | "=================================================================\n", 654 | "conv2d_3 (Conv2D) (None, 28, 28, 32) 320 \n", 655 | "_________________________________________________________________\n", 656 | "max_pooling2d_3 (MaxPooling2 (None, 14, 14, 32) 0 \n", 657 | "_________________________________________________________________\n", 658 | "flatten_3 (Flatten) (None, 6272) 0 \n", 659 | "_________________________________________________________________\n", 660 | "dropout_3 (Dropout) (None, 6272) 0 \n", 661 | "_________________________________________________________________\n", 662 | "dense_3 (Dense) (None, 10) 62730 \n", 663 | "=================================================================\n", 664 | "Total params: 63,050\n", 665 | "Trainable params: 63,050\n", 666 | "Non-trainable params: 0\n", 667 | "_________________________________________________________________\n" 668 | ] 669 | } 670 | ], 671 | "source": [ 672 | "model.summary()" 673 | ] 674 | }, 675 | { 676 | "cell_type": "markdown", 677 | "metadata": {}, 678 | "source": [ 679 | "Common pre-processing steps include rotation and zoom, which will make the neural network learn features that are angle and scale invariant. These steps can be performed within a data generator on-the-fly." 680 | ] 681 | }, 682 | { 683 | "cell_type": "code", 684 | "execution_count": 360, 685 | "metadata": { 686 | "collapsed": true 687 | }, 688 | "outputs": [], 689 | "source": [ 690 | "from keras.preprocessing.image import ImageDataGenerator\n", 691 | "\n", 692 | "data_generator = ImageDataGenerator(rescale=1./255,\n", 693 | " #featurewise_center=True,\n", 694 | " #featurewise_std_normalization=True,\n", 695 | " rotation_range=10,\n", 696 | " width_shift_range=0.1,\n", 697 | " height_shift_range=0.1,\n", 698 | " #horizontal_flip=True,\n", 699 | " zoom_range=0.1\n", 700 | " )" 701 | ] 702 | }, 703 | { 704 | "cell_type": "code", 705 | "execution_count": 361, 706 | "metadata": { 707 | "collapsed": false 708 | }, 709 | "outputs": [ 710 | { 711 | "name": "stdout", 712 | "output_type": "stream", 713 | "text": [ 714 | "Epoch 1/10\n", 715 | " - 26s - loss: 0.8140 - acc: 0.7562 - val_loss: 5.2912 - val_acc: 0.6683\n", 716 | "Epoch 2/10\n", 717 | " - 25s - loss: 0.3456 - acc: 0.9010 - val_loss: 6.3462 - val_acc: 0.6038\n", 718 | "Epoch 3/10\n", 719 | " - 25s - loss: 0.2771 - acc: 0.9173 - val_loss: 5.8151 - val_acc: 0.6369\n", 720 | "Epoch 4/10\n", 721 | " - 29s - loss: 0.2475 - acc: 0.9269 - val_loss: 6.2446 - val_acc: 0.6103\n", 722 | "Epoch 5/10\n", 723 | " - 23s - loss: 0.2387 - acc: 0.9291 - val_loss: 6.2826 - val_acc: 0.6079\n", 724 | "Epoch 6/10\n", 725 | " - 24s - loss: 0.2260 - acc: 0.9309 - val_loss: 5.8294 - val_acc: 0.6362\n", 726 | "Epoch 7/10\n", 727 | " - 24s - loss: 0.2183 - acc: 0.9341 - val_loss: 6.2320 - val_acc: 0.6112\n", 728 | "Epoch 8/10\n", 729 | " - 24s - loss: 0.2077 - acc: 0.9376 - val_loss: 6.1975 - val_acc: 0.6131\n", 730 | "Epoch 9/10\n", 731 | " - 25s - loss: 0.2009 - acc: 0.9392 - val_loss: 6.2829 - val_acc: 0.6071\n", 732 | "Epoch 10/10\n", 733 | " - 28s - loss: 0.1958 - acc: 0.9424 - val_loss: 7.7532 - val_acc: 0.5164\n" 734 | ] 735 | }, 736 | { 737 | "data": { 738 | "text/plain": [ 739 | "" 740 | ] 741 | }, 742 | "execution_count": 361, 743 | "metadata": {}, 744 | "output_type": "execute_result" 745 | } 746 | ], 747 | "source": [ 748 | "model.fit_generator(data_generator.flow(x_train[0:40000], y_train[0:40000], batch_size=100), steps_per_epoch=40000/100,\n", 749 | " validation_data=(x_train[40000:], y_train[40000:]), \n", 750 | " epochs=10, verbose=2)" 751 | ] 752 | }, 753 | { 754 | "cell_type": "markdown", 755 | "metadata": {}, 756 | "source": [ 757 | "## Further readings\n", 758 | "\n", 759 | "https://qz.com/1034972/the-data-that-changed-the-direction-of-ai-research-and-possibly-the-world/\n", 760 | "\n", 761 | "https://www.youtube.com/watch?v=KfV8CJh7hE0&list=PLkDaE6sCZn6Gl29AoE31iwdVwSG-KnDzF&index=18\n", 762 | "\n", 763 | "https://blog.openai.com/adversarial-example-research/" 764 | ] 765 | }, 766 | { 767 | "cell_type": "markdown", 768 | "metadata": { 769 | "collapsed": true 770 | }, 771 | "source": [ 772 | "![Adverserial](https://blog.openai.com/content/images/2017/02/adversarial_img_1.png)" 773 | ] 774 | }, 775 | { 776 | "cell_type": "code", 777 | "execution_count": null, 778 | "metadata": { 779 | "collapsed": true 780 | }, 781 | "outputs": [], 782 | "source": [] 783 | } 784 | ], 785 | "metadata": { 786 | "anaconda-cloud": {}, 787 | "kernelspec": { 788 | "display_name": "Python [conda env:tensorflow]", 789 | "language": "python", 790 | "name": "conda-env-tensorflow-py" 791 | }, 792 | "language_info": { 793 | "codemirror_mode": { 794 | "name": "ipython", 795 | "version": 2 796 | }, 797 | "file_extension": ".py", 798 | "mimetype": "text/x-python", 799 | "name": "python", 800 | "nbconvert_exporter": "python", 801 | "pygments_lexer": "ipython2", 802 | "version": "2.7.13" 803 | } 804 | }, 805 | "nbformat": 4, 806 | "nbformat_minor": 1 807 | } 808 | -------------------------------------------------------------------------------- /figures/activation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UCIDataScienceInitiative/LearningWithPython/a9e34f1e8e306166c025b9385365bbda31d5e13c/figures/activation.png -------------------------------------------------------------------------------- /figures/chainrule.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UCIDataScienceInitiative/LearningWithPython/a9e34f1e8e306166c025b9385365bbda31d5e13c/figures/chainrule.png -------------------------------------------------------------------------------- /figures/deeper.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UCIDataScienceInitiative/LearningWithPython/a9e34f1e8e306166c025b9385365bbda31d5e13c/figures/deeper.png -------------------------------------------------------------------------------- /figures/descent.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UCIDataScienceInitiative/LearningWithPython/a9e34f1e8e306166c025b9385365bbda31d5e13c/figures/descent.png -------------------------------------------------------------------------------- /figures/dropout.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UCIDataScienceInitiative/LearningWithPython/a9e34f1e8e306166c025b9385365bbda31d5e13c/figures/dropout.png -------------------------------------------------------------------------------- /figures/filter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UCIDataScienceInitiative/LearningWithPython/a9e34f1e8e306166c025b9385365bbda31d5e13c/figures/filter.png -------------------------------------------------------------------------------- /figures/graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UCIDataScienceInitiative/LearningWithPython/a9e34f1e8e306166c025b9385365bbda31d5e13c/figures/graph.png -------------------------------------------------------------------------------- /figures/landscape.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UCIDataScienceInitiative/LearningWithPython/a9e34f1e8e306166c025b9385365bbda31d5e13c/figures/landscape.png -------------------------------------------------------------------------------- /figures/lasso.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UCIDataScienceInitiative/LearningWithPython/a9e34f1e8e306166c025b9385365bbda31d5e13c/figures/lasso.png -------------------------------------------------------------------------------- /figures/local.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UCIDataScienceInitiative/LearningWithPython/a9e34f1e8e306166c025b9385365bbda31d5e13c/figures/local.png -------------------------------------------------------------------------------- /figures/logistic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UCIDataScienceInitiative/LearningWithPython/a9e34f1e8e306166c025b9385365bbda31d5e13c/figures/logistic.png -------------------------------------------------------------------------------- /figures/neuralnet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UCIDataScienceInitiative/LearningWithPython/a9e34f1e8e306166c025b9385365bbda31d5e13c/figures/neuralnet.png -------------------------------------------------------------------------------- /figures/overfitting.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UCIDataScienceInitiative/LearningWithPython/a9e34f1e8e306166c025b9385365bbda31d5e13c/figures/overfitting.png -------------------------------------------------------------------------------- /figures/perceptron.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UCIDataScienceInitiative/LearningWithPython/a9e34f1e8e306166c025b9385365bbda31d5e13c/figures/perceptron.png -------------------------------------------------------------------------------- /figures/power.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UCIDataScienceInitiative/LearningWithPython/a9e34f1e8e306166c025b9385365bbda31d5e13c/figures/power.png -------------------------------------------------------------------------------- /figures/universal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UCIDataScienceInitiative/LearningWithPython/a9e34f1e8e306166c025b9385365bbda31d5e13c/figures/universal.png -------------------------------------------------------------------------------- /images/cnn.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UCIDataScienceInitiative/LearningWithPython/a9e34f1e8e306166c025b9385365bbda31d5e13c/images/cnn.jpeg -------------------------------------------------------------------------------- /images/conv_layer.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UCIDataScienceInitiative/LearningWithPython/a9e34f1e8e306166c025b9385365bbda31d5e13c/images/conv_layer.gif -------------------------------------------------------------------------------- /images/maxpool.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UCIDataScienceInitiative/LearningWithPython/a9e34f1e8e306166c025b9385365bbda31d5e13c/images/maxpool.jpeg -------------------------------------------------------------------------------- /images/neural_net2.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UCIDataScienceInitiative/LearningWithPython/a9e34f1e8e306166c025b9385365bbda31d5e13c/images/neural_net2.jpeg -------------------------------------------------------------------------------- /images/pool.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/UCIDataScienceInitiative/LearningWithPython/a9e34f1e8e306166c025b9385365bbda31d5e13c/images/pool.jpeg -------------------------------------------------------------------------------- /recurrent.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 66, 6 | "metadata": { 7 | "collapsed": false 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "from keras.datasets import reuters\n", 12 | "\n", 13 | "(x_train, y_train), (x_test, y_test) = reuters.load_data(num_words=None, test_split=0.2)" 14 | ] 15 | }, 16 | { 17 | "cell_type": "markdown", 18 | "metadata": {}, 19 | "source": [ 20 | "Words are typically represented by tokens in a numeric dictionary where each word corresponds to an integer." 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": 69, 26 | "metadata": { 27 | "collapsed": false 28 | }, 29 | "outputs": [ 30 | { 31 | "name": "stdout", 32 | "output_type": "stream", 33 | "text": [ 34 | "[1, 27595, 28842, 8, 43, 10, 447, 5, 25, 207, 270, 5, 3095, 111, 16, 369, 186, 90, 67, 7, 89, 5, 19, 102, 6, 19, 124, 15, 90, 67, 84, 22, 482, 26, 7, 48, 4, 49, 8, 864, 39, 209, 154, 6, 151, 6, 83, 11, 15, 22, 155, 11, 15, 7, 48, 9, 4579, 1005, 504, 6, 258, 6, 272, 11, 15, 22, 134, 44, 11, 15, 16, 8, 197, 1245, 90, 67, 52, 29, 209, 30, 32, 132, 6, 109, 15, 17, 12]\n", 35 | "87\n" 36 | ] 37 | } 38 | ], 39 | "source": [ 40 | "print(x_train[0])" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": 98, 46 | "metadata": { 47 | "collapsed": true 48 | }, 49 | "outputs": [], 50 | "source": [ 51 | "word_index = reuters.get_word_index(path=\"reuters_word_index.json\")" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": 102, 57 | "metadata": { 58 | "collapsed": false 59 | }, 60 | "outputs": [ 61 | { 62 | "name": "stdout", 63 | "output_type": "stream", 64 | "text": [ 65 | "803\n" 66 | ] 67 | } 68 | ], 69 | "source": [ 70 | "print(word_index['computer'])" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": 99, 76 | "metadata": { 77 | "collapsed": true 78 | }, 79 | "outputs": [], 80 | "source": [ 81 | "reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])\n", 82 | "decoded_news = ' '.join([reverse_word_index.get(i - 3, '') for i in x_train[0]])" 83 | ] 84 | }, 85 | { 86 | "cell_type": "code", 87 | "execution_count": 100, 88 | "metadata": { 89 | "collapsed": false 90 | }, 91 | "outputs": [ 92 | { 93 | "name": "stdout", 94 | "output_type": "stream", 95 | "text": [ 96 | " mcgrath rentcorp said as a result of its december acquisition of space co it expects earnings per share in 1987 of 1 15 to 1 30 dlrs per share up from 70 cts in 1986 the company said pretax net should rise to nine to 10 mln dlrs from six mln dlrs in 1986 and rental operation revenues to 19 to 22 mln dlrs from 12 5 mln dlrs it said cash flow per share this year should be 2 50 to three dlrs reuter 3\n" 97 | ] 98 | } 99 | ], 100 | "source": [ 101 | "print(decoded_news)" 102 | ] 103 | }, 104 | { 105 | "cell_type": "markdown", 106 | "metadata": {}, 107 | "source": [ 108 | "![Embedding](https://www.tensorflow.org/images/audio-image-text.png)" 109 | ] 110 | }, 111 | { 112 | "cell_type": "markdown", 113 | "metadata": {}, 114 | "source": [ 115 | "## Word embedding\n", 116 | "\n", 117 | "Words live in a discrete space that is sparse and orthogonal, which severely suffers from the curse of dimensionality. Word embedding is basically a mapping from this challenging space to a lower dimensional vector space that is more dense and correlated. \n", 118 | "\n", 119 | "https://blog.acolyer.org/2016/04/21/the-amazing-power-of-word-vectors/" 120 | ] 121 | }, 122 | { 123 | "cell_type": "markdown", 124 | "metadata": {}, 125 | "source": [ 126 | "![Recurrent](http://colah.github.io/posts/2015-08-Understanding-LSTMs/img/RNN-unrolled.png)" 127 | ] 128 | }, 129 | { 130 | "cell_type": "markdown", 131 | "metadata": {}, 132 | "source": [ 133 | "## Recurrent neural networks\n", 134 | "\n", 135 | "Recurrent neural networks allow for language patterns beyond keywords as entire sentences can be entered as input sequences. Recurrent neural networks can handle input sequences of varying lengths and share parameters in time.\n", 136 | "\n", 137 | "http://colah.github.io/posts/2015-08-Understanding-LSTMs/\n", 138 | "\n", 139 | "\n", 140 | "https://www.coursera.org/lecture/nlp-sequence-models/backpropagation-through-time-bc7ED" 141 | ] 142 | }, 143 | { 144 | "cell_type": "code", 145 | "execution_count": 106, 146 | "metadata": { 147 | "collapsed": true 148 | }, 149 | "outputs": [], 150 | "source": [ 151 | "from keras.preprocessing import sequence\n", 152 | "from keras.models import Sequential\n", 153 | "from keras.layers import Dense, Embedding\n", 154 | "from keras.layers import LSTM" 155 | ] 156 | }, 157 | { 158 | "cell_type": "code", 159 | "execution_count": 70, 160 | "metadata": { 161 | "collapsed": false 162 | }, 163 | "outputs": [ 164 | { 165 | "name": "stdout", 166 | "output_type": "stream", 167 | "text": [ 168 | "Pad sequences (samples x time)\n", 169 | "('x_train shape:', (8982, 100))\n", 170 | "('x_test shape:', (2246, 100))\n" 171 | ] 172 | } 173 | ], 174 | "source": [ 175 | "print('Pad sequences (samples x time)')\n", 176 | "x_train = sequence.pad_sequences(x_train, maxlen=100)\n", 177 | "x_test = sequence.pad_sequences(x_test, maxlen=100)\n", 178 | "print('x_train shape:', x_train.shape)\n", 179 | "print('x_test shape:', x_test.shape)" 180 | ] 181 | }, 182 | { 183 | "cell_type": "code", 184 | "execution_count": 81, 185 | "metadata": { 186 | "collapsed": false 187 | }, 188 | "outputs": [ 189 | { 190 | "name": "stdout", 191 | "output_type": "stream", 192 | "text": [ 193 | "Convert class vector to binary class matrix (for use with categorical_crossentropy)\n", 194 | "('y_train shape:', (8982, 46))\n", 195 | "('y_test shape:', (2246, 46))\n" 196 | ] 197 | } 198 | ], 199 | "source": [ 200 | "print('Convert class vector to binary class matrix '\n", 201 | " '(for use with categorical_crossentropy)')\n", 202 | "y_train = keras.utils.to_categorical(y_train, num_classes)\n", 203 | "y_test = keras.utils.to_categorical(y_test, num_classes)\n", 204 | "print('y_train shape:', y_train.shape)\n", 205 | "print('y_test shape:', y_test.shape)" 206 | ] 207 | }, 208 | { 209 | "cell_type": "code", 210 | "execution_count": 94, 211 | "metadata": { 212 | "collapsed": true 213 | }, 214 | "outputs": [], 215 | "source": [ 216 | "model = Sequential()\n", 217 | "model.add(Embedding(30980, 20))\n", 218 | "model.add(LSTM(20, dropout=0.2, recurrent_dropout=0.2))\n", 219 | "model.add(Dense(46, activation='softmax'))" 220 | ] 221 | }, 222 | { 223 | "cell_type": "code", 224 | "execution_count": 95, 225 | "metadata": { 226 | "collapsed": false 227 | }, 228 | "outputs": [ 229 | { 230 | "name": "stdout", 231 | "output_type": "stream", 232 | "text": [ 233 | "_________________________________________________________________\n", 234 | "Layer (type) Output Shape Param # \n", 235 | "=================================================================\n", 236 | "embedding_4 (Embedding) (None, None, 20) 619600 \n", 237 | "_________________________________________________________________\n", 238 | "lstm_4 (LSTM) (None, 20) 3280 \n", 239 | "_________________________________________________________________\n", 240 | "dense_4 (Dense) (None, 46) 966 \n", 241 | "=================================================================\n", 242 | "Total params: 623,846\n", 243 | "Trainable params: 623,846\n", 244 | "Non-trainable params: 0\n", 245 | "_________________________________________________________________\n" 246 | ] 247 | } 248 | ], 249 | "source": [ 250 | "model.summary()" 251 | ] 252 | }, 253 | { 254 | "cell_type": "code", 255 | "execution_count": 96, 256 | "metadata": { 257 | "collapsed": false 258 | }, 259 | "outputs": [], 260 | "source": [ 261 | "model.compile(loss='categorical_crossentropy',\n", 262 | " optimizer='adam', metrics=['accuracy'])" 263 | ] 264 | }, 265 | { 266 | "cell_type": "code", 267 | "execution_count": 97, 268 | "metadata": { 269 | "collapsed": false 270 | }, 271 | "outputs": [ 272 | { 273 | "name": "stdout", 274 | "output_type": "stream", 275 | "text": [ 276 | "Train on 8982 samples, validate on 2246 samples\n", 277 | "Epoch 1/15\n", 278 | "8982/8982 [==============================] - 37s 4ms/step - loss: 2.6426 - acc: 0.3244 - val_loss: 2.4174 - val_acc: 0.3620\n", 279 | "Epoch 2/15\n", 280 | "8982/8982 [==============================] - 35s 4ms/step - loss: 2.3603 - acc: 0.3712 - val_loss: 2.1401 - val_acc: 0.4724\n", 281 | "Epoch 3/15\n", 282 | "8982/8982 [==============================] - 34s 4ms/step - loss: 2.0013 - acc: 0.4841 - val_loss: 1.8407 - val_acc: 0.5071\n", 283 | "Epoch 4/15\n", 284 | "8982/8982 [==============================] - 35s 4ms/step - loss: 1.7718 - acc: 0.5295 - val_loss: 1.7512 - val_acc: 0.5485\n", 285 | "Epoch 5/15\n", 286 | "8982/8982 [==============================] - 36s 4ms/step - loss: 1.6472 - acc: 0.5635 - val_loss: 1.7033 - val_acc: 0.5663\n", 287 | "Epoch 6/15\n", 288 | "8982/8982 [==============================] - 35s 4ms/step - loss: 1.5508 - acc: 0.5915 - val_loss: 1.6725 - val_acc: 0.5739\n", 289 | "Epoch 7/15\n", 290 | "8982/8982 [==============================] - 34s 4ms/step - loss: 1.4480 - acc: 0.6145 - val_loss: 1.6524 - val_acc: 0.5877\n", 291 | "Epoch 8/15\n", 292 | "8982/8982 [==============================] - 36s 4ms/step - loss: 1.3792 - acc: 0.6339 - val_loss: 1.6433 - val_acc: 0.5944\n", 293 | "Epoch 9/15\n", 294 | "8982/8982 [==============================] - 36s 4ms/step - loss: 1.3045 - acc: 0.6549 - val_loss: 1.6436 - val_acc: 0.5935\n", 295 | "Epoch 10/15\n", 296 | "8982/8982 [==============================] - 37s 4ms/step - loss: 1.2387 - acc: 0.6713 - val_loss: 1.6620 - val_acc: 0.5957\n", 297 | "Epoch 11/15\n", 298 | "8982/8982 [==============================] - 37s 4ms/step - loss: 1.1822 - acc: 0.6853 - val_loss: 1.6673 - val_acc: 0.6095\n", 299 | "Epoch 12/15\n", 300 | "8982/8982 [==============================] - 40s 4ms/step - loss: 1.1227 - acc: 0.7060 - val_loss: 1.6587 - val_acc: 0.6215\n", 301 | "Epoch 13/15\n", 302 | "8982/8982 [==============================] - 38s 4ms/step - loss: 1.0699 - acc: 0.7247 - val_loss: 1.6759 - val_acc: 0.6198\n", 303 | "Epoch 14/15\n", 304 | "8982/8982 [==============================] - 38s 4ms/step - loss: 1.0143 - acc: 0.7408 - val_loss: 1.6904 - val_acc: 0.6282\n", 305 | "Epoch 15/15\n", 306 | "8982/8982 [==============================] - 37s 4ms/step - loss: 0.9601 - acc: 0.7568 - val_loss: 1.6906 - val_acc: 0.6322\n", 307 | "2246/2246 [==============================] - 1s 549us/step\n" 308 | ] 309 | } 310 | ], 311 | "source": [ 312 | "model.fit(x_train, y_train,\n", 313 | " batch_size=batch_size,\n", 314 | " epochs=15,\n", 315 | " validation_data=(x_test, y_test))" 316 | ] 317 | }, 318 | { 319 | "cell_type": "markdown", 320 | "metadata": {}, 321 | "source": [ 322 | "## Further readings\n", 323 | "\n", 324 | "https://stanford.edu/~shervine/teaching/cs-230/cheatsheet-recurrent-neural-networks\n", 325 | "\n", 326 | "https://bair.berkeley.edu/blog/2018/08/06/recurrent/" 327 | ] 328 | }, 329 | { 330 | "cell_type": "code", 331 | "execution_count": null, 332 | "metadata": { 333 | "collapsed": true 334 | }, 335 | "outputs": [], 336 | "source": [] 337 | } 338 | ], 339 | "metadata": { 340 | "kernelspec": { 341 | "display_name": "Python [conda env:tensorflow]", 342 | "language": "python", 343 | "name": "conda-env-tensorflow-py" 344 | }, 345 | "language_info": { 346 | "codemirror_mode": { 347 | "name": "ipython", 348 | "version": 2 349 | }, 350 | "file_extension": ".py", 351 | "mimetype": "text/x-python", 352 | "name": "python", 353 | "nbconvert_exporter": "python", 354 | "pygments_lexer": "ipython2", 355 | "version": "2.7.13" 356 | } 357 | }, 358 | "nbformat": 4, 359 | "nbformat_minor": 1 360 | } 361 | --------------------------------------------------------------------------------