├── .gitignore ├── Cnn-mxnet-Copy1.ipynb ├── Cnn-mxnet.ipynb ├── Handwritten digit.ipynb ├── README.md ├── cnn.ipynb ├── images ├── Stop.jpg ├── turn-left-ahead2.jpg └── vis.png ├── signnames.csv └── traffic-data ├── training.p └── valid.p /.gitignore: -------------------------------------------------------------------------------- 1 | traffic-signs-data 2 | .ipynb_checkpoints 3 | handwritten-digits 4 | .idea 5 | test.py 6 | models/* 7 | traffic-data 8 | !model/.gitkeep 9 | -------------------------------------------------------------------------------- /Handwritten digit.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "import numpy as np\n", 12 | "import os\n", 13 | "import urllib\n", 14 | "import gzip\n", 15 | "import struct\n", 16 | "import logging\n", 17 | "logger = logging.getLogger()\n", 18 | "logger.setLevel(logging.DEBUG)\n", 19 | "def download_data(url, force_download=True): \n", 20 | " fname = url.split(\"/\")[-1]\n", 21 | " if force_download or not os.path.exists(fname):\n", 22 | " urllib.urlretrieve(url, fname)\n", 23 | " return fname\n", 24 | "\n", 25 | "def read_data(label_url, image_url):\n", 26 | " with gzip.open(label_url) as flbl:\n", 27 | " magic, num = struct.unpack(\">II\", flbl.read(8))\n", 28 | " label = np.fromstring(flbl.read(), dtype=np.int8)\n", 29 | " with gzip.open(image_url, 'rb') as fimg:\n", 30 | " magic, num, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n", 31 | " image = np.fromstring(fimg.read(), dtype=np.uint8).reshape(len(label), rows, cols)\n", 32 | " return (label, image)\n", 33 | "\n", 34 | "path='handwritten-digits/'\n", 35 | "(train_lbl, train_img) = read_data(\n", 36 | " path+'train-labels-idx1-ubyte.gz', path+'train-images-idx3-ubyte.gz')\n", 37 | "(val_lbl, val_img) = read_data(\n", 38 | " path+'t10k-labels-idx1-ubyte.gz', path+'t10k-images-idx3-ubyte.gz')" 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "execution_count": 2, 44 | "metadata": {}, 45 | "outputs": [ 46 | { 47 | "data": { 48 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAA/CAYAAADwizNIAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAEZxJREFUeJztnX9MU9f7xw8gBh1EQOIQggINW3QaTTBiaDDOiLKMDCdq\ndM6J6BygQw0xJjo33RaNC0ImVbNhAi5mIciCP4JbJmYoyvAHgs6lE0WQKUQQKVBoob3n/f3D3fuh\nUAstvS30+7ySJ9EWuO+e85z3Pff8qhsARhAEQYx93J0tgCAIgrAPZOgEQRAuAhk6QRCEi0CGThAE\n4SKQoRMEQbgIZOgEQRAuAhk6QRCEi0CGThAE4SKQoRMEQbgI4xx5MTc3N4dvSwXgRjpIB+kgHa6m\nwxzUQycIgnARyNAJgiBcBDJ0giAIF4EM3QYWLVrESktLGeeclZaWsoULFzpbEkGwM2fOMACsubmZ\nNTc3M4VC4WxJhJWo1WqmVqtt/wMAHBaMMVgTHh4eCAgIMAmVSoX8/HzcvHkT06ZNw9WrVwEABoMB\nx44dG/Q37KFDDKVSCaVSCZ1OB0EQpOjp6Rnyd+2pw1KsWrUKWq0Wc+bMcYqOo0ePgnMOAFi+fLnT\ny8MZ9TJp0iSEhIRg9+7dyM7OhpeXl+w6IiIi0NPTA865lJdr1651eHnMnj0b8+bNw759+yDCOR8U\nN27cwPjx42WtF09PTyxfvhx1dXWoq6sbNfnxuigoKIDBYEBJScmwdJjVNpoMPTw8HDNnzsTOnTtx\n8eJFVFRUmE0Gzjk0Gg0qKyvBOYder8fDhw+xYsUK2SpkyZIl6OjoQEdHh9RodDodtFotBEFAXFzc\niBM0ISEBmzZtGlFSHDlyBNXV1U4x9IyMDPT29kIQBHDOkZCQMCoaiqWwl4633noL58+fx/nz5/Hs\n2TOTG35RUZHsOry9vXHz5k2nGfq8efNw+vRpaDQadHR0SDnQX8/AuHTpEnx9fWWrlzfffBOcc3R1\ndaGrqwvBwcFOy4+hIj8/HwaDAXq9HqmpqcPSMaoNPSYmBjqd7rUG3j8EQcCOHTuwefNmbN68GcuW\nLZPNwN544w3Ex8dDo9FIiShqaGhowJYtW6TXsrOzR5QYKpUKZWVlNieFu7s7SkpK0NTUhLlz5zo8\nQY8dO2ZSRnIbemxsLIqLi9HU1ISmpiYpPw4ePIi0tDQUFxcjNjZW1gY7d+5cXLhwQbqRiZ9do9Gg\nubkZgiBAq9W+tj7sWR6FhYVOM/SqqqpBhj2UoQuCgPfff1+28hANXQylUumw8rA2Hjx4AEEQ8Pff\nfw9bx6g29ICAALS1tZk1cPGRqaamRrqLOapCysrKzCaq+O+MjAyo1WpwzlFeXj4iHW1tbSMy9OnT\np4Nzjj/++MPhCbp69Wr09PRAEAQ0NzdDoVBgwoQJsulIS0uTno7EPFGr1WhqajKpp2vXrsmSH35+\nfrhw4cKg4TdBENDS0gKFQoFZs2ZJr8XHx8uap/7+/vjnn3+cZugHDhyQrtvV1YXc3FycPHkSJ0+e\nRG5uLnJzc6FWq51i6CLOMvSEhAT89ddf0rDxwPe3bduGnp4etLa2Ijo6etg6RrWhM8aQnJyM3377\nDV999ZXUSBsbG+Ht7Q1vb28w9urR7uLFiw6pkEWLFpk8NajVaqjVahw6dAicc3R2diI6OhobN24E\nAIvmMRwd7e3tIzL0u3fvgnOOnJwcWRN0YMTHx6Ozs1NqpDt37pStXsaNG4e4uDjo9XoIggC1Wo3E\nxEQkJibC09MTPj4+qKmpkQz98OHDsujIyMgYZE6tra1obW1FWFgYGGMONfRp06bh5cuXJoZ+9OhR\nKBQKq+pyJPUSFhaGsLCw1w5t+Pr6SsMxgiCgsrLytfML9jR0MeLi4qzObXvoaGlpAecc8fHxZvOg\nubkZACwOt45JQxcr3c3NDRcvXgTnHJ9//rnVlWCPChk4+Xnnzh34+PjAx8cH69evR1ZWFgIDA6Wf\n55yjt7cXCxcutEnHggUL0NfXNyJDr6urA+ccy5YtkzVBB8avv/5qctOTs176G+ndu3cHjcFu375d\nel+j0ZjUkT11VFdXS9dpa2tDeXk5wsPDER4eLv3Mhg0bHGbojDHk5OQMGuLYv3+/Q9rLcCIlJcVk\naKqwsFBWHQMN3dqysJeOf//9F4IgYPXq1Vi9erXJe0qlEnq9HpxzbNu2zSodY8LQxTh9+rRkEO7u\n7nB3d7cpiWzRMXv2bFy5cgWcc2i1Wjx79gxbtmyx+DtiQ7py5YpNOo4cOQLOuc2GHhQUhK6uLnDO\npR6iXAnav8GIjUYQBHR3d2PlypWy1Utubq50raKiIrMTai0tLZJhbNy4URYdjL3qER8/fhzvvfce\npk6davZndu/e7VBD75+Ho83Qt27dOmjI5XUTovbS4e/vb/KEXVBQYLXuker48ccfpWHIwMBAkw6G\nt7c3ysvLIQgC6urq4OnpaZWOMWXo3t7e0tj0mjVrsGbNGpsSyVodXl5e0gSPTqfD2rVrMWXKFISG\nhg6rIT18+NAmHaWlpeCc48iRIzZ9zrKyMnDO0dLSAn9/f1kbCmOvlsk1NjaisbFR+uwqlUq2elGp\nVOCcw2Aw4NatW5g4caL03oQJEzBhwgQkJSWhr68PnHOcOHHCIQ3WUvz+++8ON3QAI+qV2rM80tPT\nkZ6ejubmZhgMBhMzb2xsNKlDuXRUVVU5zdDDwsKg1WphMBjMLuEtKSmBIAjo6OiwSceYMnTGGGbM\nmAG9Xg+NRgONRoPLly9j//79+O9wHFkqJC4uTko6S+uoB4a9DH3VqlXDvqavry9SU1NRVVUFg8EA\nzjnS09NlS9D+8cUXX5hMPt67dw9+fn6y1Iu/v780AXrr1i2T92bOnImGhgY0NDRIeq5fvy7Nudg7\nP14XX3/9NbKyspCVlYXs7GxkZWVJmmpra+Hh4eEQHc7qoUdERCAnJwe1tbVSmFvlotPpsGfPniE7\nSGPd0KOiotDW1gZBEMxe9/DhwzAajRAEAXv37rVJx5gzdMYYNm3aBJ1OZ/LolJmZiZCQEFkqRByH\nHu5YcL/rgHOOR48e2aRDNPSUlBST34uOjkZMTAwOHz6MwsJCnDt3Dnq9Hnq9Hr29veju7sadO3ek\nSUJHLI9LTk42mV+ora1FUFCQbA1l6tSp0rXCw8MxdepUZGZmoq6uThp/7G8cGzZskL3BMvZqSevi\nxYtx584dk16xmAvixPmMGTNk1dE/nGHoUVFRaG9vH9ayxdu3b8teL/2jv6FbWv1lLx3jxo1DRkaG\nSR7U19dLG8y8vLwQEhKChoYGGI1GXLp0yWYd5oK2/hMEQbgKo72Hzv7rAURFReH+/fvS3fbcuXOY\nPn26Xe+wn3zyCfr6+iAIAr799lubekbFxcU26Th37hw45+jp6cHTp0+lED+v0WiETqdDXV0dCgoK\nUFBQgPT0dISGhsLT0xNarRZGo1H2nk9ERMSgfQKlpaWy9nz8/f3R1dU1aA+AOP4ohrgGWi4dYnh6\neko7hwVBQG9vLzo7O1FRUYGKigrpaUnU891331ncRWyPehmYh87ooQ/MC5GBr3/88cey56kY/Xvo\nOp1O1jxlzHSVFeccra2t0v/r6+tRX19vU64O22PHgqGL4e/vjx07dkiF9bpdVbZWSEpKilTQwx3S\n8fLyQl5eHjjnuH//Pnx8fGzWkZmZidu3bw+KXbt2WVyKuGfPHnDO0dbWJntDOX/+/KBH69ft0rWn\njtjYWHR3d0uN5KeffkJkZCSCg4Px4MEDaaedteOk1uoYP348kpKSpM+ek5MjbY4RN44M3PovCALS\n0tIsnulir/Yy0EArKipkLQ8xFAoFvv/+e7z77ruYP3/+oCgoKJDKwpGGfvDgQYcZelpaGgRBgMFg\nQFdXFxITE6FUKget7ul/0+3o6LB5SG7MG7oYRqNR6rWaO7/F1goRDb29vX1YOry8vKRlSRqNxuLO\nPDnL4/r16+Cc49SpU7I2FKVSiZcvX5psDKmsrLRJs73KIyEhASKccxw4cEA2HZ6ensjLy5M+f3V1\ntbSiKDAwEE+ePMGTJ0+k1TgnTpxAZWWl9PP37t3DypUrERMTg5iYGFnKw9xW+8jISIfXy8Dw8/Nz\niqF/+umnkqH39fXJutFKrVbj5cuX2LVrl8nrkZGRePTokVlDH+64vksZ+oIFC7BgwQL88MMP0m5I\nzjmePn065Pp0Wwz9zJkzQ2pSKpW4cuWKZGzOaiiM/c/Qh7MTbiQ6uru7pUR89OiRtNHKFs32Ko+1\na9eaTL4NtZHIVh0eHh7Iz8+HIAjQ6/X48ssvMXnyZDDGsHjxYtTX10tl09LSgsTERDD2aiXSRx99\nhLKyMpOhmIGdBnuVx9mzZwcZuqUNPI7K09TUVKcY+saNG00Mffbs2bLl6TfffGN2D0hCQoLJIoLU\n1FRERkYiMjLS4lr8oXSMOUOfM2cOiouL0dnZic7OTpNHSaPRiJqaGrtWSGpqKjh/dbCSpb956NAh\nadXNSO6w9mgojDnO0Pv3LCztanN0eTjC0Pft2yeZ+datWxEQEIB169ahoqJC2v2oUqmgUqleu7Fr\n27ZtqK6uRnV1NWbNmiVLefQ/U0VuQ/f09MT69estridnjGHXrl0mNzNHGjpjDM+fP8fz58+luTdH\n5qmfnx+Ki4vBOceLFy/w4sULmz7DmDb04OBgHDp0SDqbYmA0NDQgKSnJ7hUi9tCNRiOKioqgVCoR\nFhaGlJQUVFVVSevhOedob29HeXm5xbFtuRJ0YFy/fh0AZD1DpbS0FMD/xmetfXSVqzwc1UMXJ2X7\n+vrQ2NhosiNVEF6dm+Lh4THkenNH5IdoHP0nJ4czTmuNjg8++AB3796FIAhmb2ABAQHYvn07tm/f\nbtI77e3tlZ5eHFUehYWFKCwshF6vt3hgnBw6srKypHm50NDQIdffW6PDXIxjo4igoCAWHR3NVCoV\nmzJlyqD36+vr2cGDB1leXh7jnMumw83NjX344Yds6dKlTK/Xs8mTJ5u8//jxY3b58mX22WefyabB\nWgAwd3d5VqEqlUo2f/58BoAJgsAKCwtZU1OTLNeyloiICIdcp6Ojg02cOJF5eHiw4OBgxhhjNTU1\nrLS0lJ06dYrV1tYyQRAcomUoHj9+zBhjzM/PT7ZrnDhxggUGBjLGGMvMzGQajcbk/SVLlrCQkBDG\nGBNNkD148IBlZ2ezX375RTZdlgDAent7HXY9hULBkpKSGGOMnT59mjU0NMh/0dHQQw8ICMCff/5p\n9vjchw8fIjk5GcnJyUM+2pkLa3SEhobi8ePHZicvtFotzpw5M6zx9ZHqsDbEIRdz33RiDx0rVqyQ\nymM4K2kcWR5RUVEQ4ZzL1kOfNGkSduzYgYKCAmRlZSEoKGjIpYjOyo9169Zh3bp1Jnls7x66uZU8\nr9tY1NXVhZKSEqt6x3L00Dnn2Lx5s8Pq5cWLFxAEAZcvX5YlP8xqc6ahx8bGorKyUvoWoP7R29uL\nkydPDnsLt70qJCQkBMePHzcx9J9//nnQmKfcOqwJccjl/6Ohiw1HbDzDHQJzRL04S4dCoYBCoZC+\nYEMOQ4+JiTE5q6Z/tLa24unTpygqKkJRURGioqKcWh7iNxYZDIYhd1LbU0d2djYEQbD6JjJcHaPO\n0PPz801MvLm5GXl5ecjNzbV4wJSzEmO06hC3Gstl6MHBwdLZHKPR0DMyMqRjddVq9ahYpjea8kMu\nHV5eXti7d6+0+qmyshJ79+616ave5CyPa9eu4dq1a3j27JnDzod3RL2YC7f/BDqE/w7VcigA3EiH\na+vw9fVljDF29epV9s4777AbN26wpUuXMq1W61AdI4F0kA5rdZiDznIhxjwajYZpNBq2cOFCdvbs\nWRYVFcXefvttZ8siCIdDPXTSQTpIB+kYgzrM4VBDJwiCIOSDhlwIgiBcBDJ0giAIF4EMnSAIwkUg\nQycIgnARyNAJgiBcBDJ0giAIF4EMnSAIwkUgQycIgnARyNAJgiBcBDJ0giAIF4EMnSAIwkUgQycI\ngnARyNAJgiBcBDJ0giAIF4EMnSAIwkUgQycIgnARyNAJgiBcBDJ0giAIF4EMnSAIwkUgQycIgnAR\nyNAJgiBcBDJ0giAIF4EMnSAIwkX4P9JCW/Bhbv/nAAAAAElFTkSuQmCC\n", 49 | "text/plain": [ 50 | "" 51 | ] 52 | }, 53 | "metadata": {}, 54 | "output_type": "display_data" 55 | }, 56 | { 57 | "name": "stdout", 58 | "output_type": "stream", 59 | "text": [ 60 | "label: [5 0 4 1 9 2 1 3 1 4]\n" 61 | ] 62 | } 63 | ], 64 | "source": [ 65 | "%matplotlib inline\n", 66 | "import matplotlib.pyplot as plt\n", 67 | "for i in range(10):\n", 68 | " plt.subplot(1,10,i+1)\n", 69 | " plt.imshow(train_img[i], cmap='Greys_r')\n", 70 | " plt.axis('off')\n", 71 | "plt.show()\n", 72 | "print('label: %s' % (train_lbl[0:10],))" 73 | ] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "execution_count": 3, 78 | "metadata": {}, 79 | "outputs": [ 80 | { 81 | "data": { 82 | "text/plain": [ 83 | "array([5, 0, 4, 1, 9, 2, 1, 3, 1, 4], dtype=int8)" 84 | ] 85 | }, 86 | "execution_count": 3, 87 | "metadata": {}, 88 | "output_type": "execute_result" 89 | } 90 | ], 91 | "source": [ 92 | "train_lbl[0:10]" 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": 4, 98 | "metadata": { 99 | "collapsed": true 100 | }, 101 | "outputs": [], 102 | "source": [ 103 | "import mxnet as mx\n", 104 | "\n", 105 | "def to4d(img):\n", 106 | " return img.reshape(img.shape[0], 1, 28, 28).astype(np.float32)/255\n", 107 | "\n", 108 | "batch_size = 100\n", 109 | "train_iter = mx.io.NDArrayIter(to4d(train_img), train_lbl, batch_size, shuffle=True)\n", 110 | "val_iter = mx.io.NDArrayIter(to4d(val_img), val_lbl, batch_size)" 111 | ] 112 | }, 113 | { 114 | "cell_type": "code", 115 | "execution_count": null, 116 | "metadata": { 117 | "collapsed": true 118 | }, 119 | "outputs": [], 120 | "source": [ 121 | "data = mx.symbol.Variable('data')\n", 122 | "# first conv layer\n", 123 | "conv1 = mx.sym.Convolution(data=data, kernel=(5,5), num_filter=20)\n", 124 | "tanh1 = mx.sym.Activation(data=conv1, act_type=\"tanh\")\n", 125 | "pool1 = mx.sym.Pooling(data=tanh1, pool_type=\"max\", kernel=(2,2), stride=(2,2))\n", 126 | "# second conv layer\n", 127 | "conv2 = mx.sym.Convolution(data=pool1, kernel=(5,5), num_filter=50)\n", 128 | "tanh2 = mx.sym.Activation(data=conv2, act_type=\"tanh\")\n", 129 | "pool2 = mx.sym.Pooling(data=tanh2, pool_type=\"max\", kernel=(2,2), stride=(2,2))\n", 130 | "# first fullc layer\n", 131 | "flatten = mx.sym.Flatten(data=pool2)\n", 132 | "fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500)\n", 133 | "tanh3 = mx.sym.Activation(data=fc1, act_type=\"tanh\")\n", 134 | "# second fullc\n", 135 | "fc2 = mx.sym.FullyConnected(data=tanh3, num_hidden=10)\n", 136 | "# softmax loss\n", 137 | "lenet = mx.sym.SoftmaxOutput(data=fc2, name='softmax')" 138 | ] 139 | }, 140 | { 141 | "cell_type": "code", 142 | "execution_count": null, 143 | "metadata": {}, 144 | "outputs": [ 145 | { 146 | "name": "stderr", 147 | "output_type": "stream", 148 | "text": [ 149 | "INFO:root:Start training with [gpu(0)]\n", 150 | "INFO:root:Epoch[0] Batch [200]\tSpeed: 11436.71 samples/sec\tTrain-accuracy=0.098850\n", 151 | "INFO:root:Epoch[0] Batch [400]\tSpeed: 11509.65 samples/sec\tTrain-accuracy=0.100000\n", 152 | "INFO:root:Epoch[0] Batch [600]\tSpeed: 11435.95 samples/sec\tTrain-accuracy=0.097300\n", 153 | "INFO:root:Epoch[0] Resetting Data Iterator\n", 154 | "INFO:root:Epoch[0] Time cost=5.520\n", 155 | "INFO:root:Epoch[0] Validation-accuracy=0.098000\n", 156 | "INFO:root:Epoch[1] Batch [200]\tSpeed: 11618.76 samples/sec\tTrain-accuracy=0.098850\n", 157 | "INFO:root:Epoch[1] Batch [400]\tSpeed: 11105.13 samples/sec\tTrain-accuracy=0.100000\n", 158 | "INFO:root:Epoch[1] Batch [600]\tSpeed: 11445.94 samples/sec\tTrain-accuracy=0.097300\n", 159 | "INFO:root:Epoch[1] Resetting Data Iterator\n", 160 | "INFO:root:Epoch[1] Time cost=5.288\n", 161 | "INFO:root:Epoch[1] Validation-accuracy=0.098000\n", 162 | "INFO:root:Epoch[2] Batch [200]\tSpeed: 11546.46 samples/sec\tTrain-accuracy=0.098850\n", 163 | "INFO:root:Epoch[2] Batch [400]\tSpeed: 11541.21 samples/sec\tTrain-accuracy=0.100000\n", 164 | "INFO:root:Epoch[2] Batch [600]\tSpeed: 11510.45 samples/sec\tTrain-accuracy=0.097300\n", 165 | "INFO:root:Epoch[2] Resetting Data Iterator\n", 166 | "INFO:root:Epoch[2] Time cost=5.218\n", 167 | "INFO:root:Epoch[2] Validation-accuracy=0.098000\n", 168 | "INFO:root:Epoch[3] Batch [200]\tSpeed: 11551.18 samples/sec\tTrain-accuracy=0.098850\n", 169 | "INFO:root:Epoch[3] Batch [400]\tSpeed: 11434.52 samples/sec\tTrain-accuracy=0.100000\n", 170 | "INFO:root:Epoch[3] Batch [600]\tSpeed: 11420.25 samples/sec\tTrain-accuracy=0.097300\n", 171 | "INFO:root:Epoch[3] Resetting Data Iterator\n", 172 | "INFO:root:Epoch[3] Time cost=5.240\n", 173 | "INFO:root:Epoch[3] Validation-accuracy=0.098000\n", 174 | "INFO:root:Epoch[4] Batch [200]\tSpeed: 11499.03 samples/sec\tTrain-accuracy=0.098850\n", 175 | "INFO:root:Epoch[4] Batch [400]\tSpeed: 11302.64 samples/sec\tTrain-accuracy=0.100000\n", 176 | "INFO:root:Epoch[4] Batch [600]\tSpeed: 11406.15 samples/sec\tTrain-accuracy=0.097300\n", 177 | "INFO:root:Epoch[4] Resetting Data Iterator\n", 178 | "INFO:root:Epoch[4] Time cost=5.268\n", 179 | "INFO:root:Epoch[4] Validation-accuracy=0.098000\n", 180 | "INFO:root:Epoch[5] Batch [200]\tSpeed: 11401.83 samples/sec\tTrain-accuracy=0.098850\n", 181 | "INFO:root:Epoch[5] Batch [400]\tSpeed: 11526.17 samples/sec\tTrain-accuracy=0.100000\n" 182 | ] 183 | } 184 | ], 185 | "source": [ 186 | "# @@@ AUTOTEST_OUTPUT_IGNORED_CELL\n", 187 | "model = mx.model.FeedForward(\n", 188 | " ctx = mx.gpu(0), # use GPU 0 for training, others are same as before\n", 189 | " symbol = lenet, \n", 190 | " num_epoch = 10, \n", 191 | " learning_rate = 0.1)\n", 192 | "model.fit(\n", 193 | " X=train_iter, \n", 194 | " eval_data=val_iter, \n", 195 | " batch_end_callback = mx.callback.Speedometer(batch_size, 200)\n", 196 | ")" 197 | ] 198 | }, 199 | { 200 | "cell_type": "code", 201 | "execution_count": null, 202 | "metadata": { 203 | "collapsed": true 204 | }, 205 | "outputs": [], 206 | "source": [ 207 | "" 208 | ] 209 | } 210 | ], 211 | "metadata": { 212 | "kernelspec": { 213 | "display_name": "Python 3", 214 | "language": "python", 215 | "name": "python3" 216 | }, 217 | "language_info": { 218 | "codemirror_mode": { 219 | "name": "ipython", 220 | "version": 3.0 221 | }, 222 | "file_extension": ".py", 223 | "mimetype": "text/x-python", 224 | "name": "python", 225 | "nbconvert_exporter": "python", 226 | "pygments_lexer": "ipython3", 227 | "version": "3.5.3" 228 | } 229 | }, 230 | "nbformat": 4, 231 | "nbformat_minor": 0 232 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Classifying traffic signs with MXNet: An introduction to customizing a neural network 2 | 3 | Although there are many deep learning frameworks, including TensorFlow, Keras, Torch, and Caffe, MXNet in particular is gaining popularity due to its scalability across multiple GPUs. In this blog post, we'll tackle a computer vision problem: classifying German traffic signs using a convolutional neural network. The network takes a color photo containing a traffic sign image as input, and tries to identify the type of sign. 4 | 5 | The full notebook can be found [here](https://github.com/manujeevanprakash/mxnet-ccn-samples/blob/master/Cnn-mxnet.ipynb) 6 | 7 | In order to work through this notebook, we expect you'll have a very basic understanding of neural network, convolution, activation units, gradient descent, NumPy, and OpenCV. These prerequisites are not mandatory, but having a basic understanding will help. 8 | 9 | By the end of the notebook, you will be able to: 10 | 11 | 1. Prepare a data set for training a neural network; 12 | 2. Generate and augment data to balance the data set; and 13 | 3. Implement a custom neural network architecture for a multiclass classification problem. 14 | 15 | ## Preparing your environment 16 | If you're working in the AWS Cloud, you can save yourself the installation management by using an [Amazon Machine Image (AMI)](https://aws.amazon.com/marketplace/pp/B01M0AXXQB#support) preconfigured for deep learning. This will enable you to skip steps 1-5 below. 17 | 18 | Note that if you are using a conda environment, remember to install pip inside conda, by typing 'conda install pip' after you activate an environment. This step will save you a lot of problems down the road. 19 | 20 | Here's how to get set up: 21 | 22 | 1. First, get [Anaconda](https://www.continuum.io/downloads), a package manager. It will help you to install dependent Python libraries with ease. 23 | 2. Install the OpenCV-python library, a powerful computer vision library. We will use this to process our image. To install OpenCV inside the Anaconda environment, use 'pip install opencv-python'. You can also build from source. (Note: conda install opencv3.0 does not work.) 24 | 3. Next, install [scikit learn](http://scikit-learn.org/stable/install.html), a general-purpose scientific computing library. We'll use this preprocess our data. You can install it with 'conda install scikit-learn'. 25 | 4. Then grab the Jupyter Notebook, with 'conda install jupyter notebook'. 26 | 5. And finally, get [MXNet](http://mxnet.io/get_started/install.html), an open source deep learning library. 27 | 28 | Here are the commands you need to type inside the anaconda environment (after activation of the environment): 29 | 1. conda install pip 30 | 2. pip install opencv-python 31 | 3. conda install scikit-learn 32 | 4. conda install jupyter notebook 33 | 5. pip install mxnet 34 | 35 | 36 | 37 | 38 | ## The data set 39 | In order to learn about any deep neural network, we need data. For this notebook, we use a data set already stored as a NumPy array. You can also load data from any image file. We'll show that process later in the notebook. 40 | 41 | The data set we'll use is the [German Traffic Sign Recognition Benchmark](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset) (J. Stallkamp, M. Schlipsing, J. Salmen, and C. Igel. "The German Traffic Sign Recognition Benchmark: A multi-class classification competition." In _Proceedings of the IEEE International Joint Conference on Neural Networks_, pages 1453–1460. 2011.). 42 | 43 | This data set consists of 39,209 training samples and 12,630 testing samples, representing 43 different traffic signs—stop signs, speed limits, various warning signs, and so on). 44 | 45 | We'll use a [pickled](https://en.wikipedia.org/wiki/Serialization#Pickle) version of the data, [training.p](https://www.dropbox.com/s/k2o4n09fhja8x85/train.p?dl=0) and [valid.p](https://www.dropbox.com/s/wsofgk6otd8qaew/valid.p?dl=0). 46 | 47 | Each image in the dataset is 32*32 size with three channel (RGB) color, and it belongs to a particular image class. The image class is an integer label between 0 and 43. The 'signnames.csv' file contains the mapping between the sign name and the class labels. 48 | 49 | Here's the code for loading the data: 50 | 51 | ```python 52 | import pickle 53 | 54 | # TODO: Fill this in based on where you saved the training and testing data 55 | training_file = "traffic-data/train.p" 56 | validation_file = "traffic-data/valid.p" 57 | 58 | with open(training_file, mode='rb') as f: 59 | train = pickle.load(f) 60 | 61 | with open(validation_file, mode='rb') as f: 62 | valid = pickle.load(f) 63 | 64 | X_train, y_train = train['features'], train['labels'] 65 | X_valid, y_valid = valid['features'], valid['labels'] 66 | ``` 67 | We are loading the data from a stored NumPy array. In this array, the data is split between training, validation, and test sets. The training set contains the features of 39,209 images of size 32 X 32 with 3 (R,G,B) channels. As a result, the NumPy array dimension is 39,209 X 32 X 32 X 3. We will only be using the training set and validation set in this notebook. We will use real images from the internet to test our model. 68 | 69 | So X_train is of dimension 39,209 * 32 X 32 X 3. The y_train is of dimesion 39,209 and contains a number between 0-43 for each image. 70 | 71 | Next, we load the file that maps each image class ID to natural-language names: 72 | ```python 73 | # The actual name of the classes are given in a separate file. Here we load the csv file which allows mapping from classes/labels to 74 | # file name 75 | import csv 76 | def read_csv_and_parse(): 77 | traffic_labels_dict ={} 78 | with open('signnames.csv') as f: 79 | reader = csv.reader(f) 80 | count = -1; 81 | for row in reader: 82 | count = count + 1 83 | if(count == 0): 84 | continue 85 | label_index = int(row[0]) 86 | traffic_labels_dict[label_index] = row[1] 87 | return traffic_labels_dict 88 | traffic_labels_dict = read_csv_and_parse() 89 | print(traffic_labels_dict) 90 | ``` 91 | We can see there are 43 labels for the 43 image classes. For example, 92 | 0 image class represents a 20 km/h speed limit: 93 | 94 | ```python 95 | {0: 'Speed limit (20km/h)', 1: 'Speed limit (30km/h)', 2: 'Speed limit (50km/h)', 3: 'Speed limit (60km/h)', 4: 'Speed limit (70km/h)', 5: 'Speed limit (80km/h)', 6: 'End of speed limit (80km/h)', 7: 'Speed limit (100km/h)', 8: 'Speed limit (120km/h)', 9: 'No passing', 10: 'No passing for vehicles over 3.5 metric tons', 11: 'Right-of-way at the next intersection', 12: 'Priority road', 13: 'Yield', 14: 'Stop', 15: 'No vehicles', 16: 'Vehicles over 3.5 metric tons prohibited', 17: 'No entry', 18: 'General caution', 19: 'Dangerous curve to the left', 20: 'Dangerous curve to the right', 21: 'Double curve', 22: 'Bumpy road', 23: 'Slippery road', 24: 'Road narrows on the right', 25: 'Road work', 26: 'Traffic signals', 27: 'Pedestrians', 28: 'Children crossing', 29: 'Bicycles crossing', 30: 'Beware of ice/snow', 31: 'Wild animals crossing', 32: 'End of all speed and passing limits', 33: 'Turn right ahead', 34: 'Turn left ahead', 35: 'Ahead only', 36: 'Go straight or right', 37: 'Go straight or left', 38: 'Keep right', 39: 'Keep left', 40: 'Roundabout mandatory', 41: 'End of no passing', 42: 'End of no passing by vehicles over 3.5 metric tons'} 96 | ``` 97 | ## Visualization 98 | The following code will help us to visualize the images along with the labels (image classes): 99 | 100 | ```python 101 | # Exploratory data visualization 102 | # This gives a better, intuitive understanding of the data 103 | 104 | 105 | import matplotlib.pyplot as plt 106 | from matplotlib.figure import Figure 107 | # Visualizations will be shown in the notebook. 108 | %matplotlib inline 109 | 110 | #This functions selects one image per class to plot 111 | def get_images_to_plot(images, labels): 112 | selected_image = [] 113 | idx = [] 114 | for i in range(n_classes): 115 | selected = np.where(labels == i)[0][0] 116 | selected_image.append(images[selected]) 117 | idx.append(selected) 118 | return selected_image,idx 119 | 120 | # function to plot the images in a grid 121 | def plot_images(selected_image,y_val,row=5,col=10,idx = None): 122 | count =0; 123 | f, axarr = plt.subplots(row, col,figsize=(50, 50)) 124 | 125 | for i in range(row): 126 | for j in range(col): 127 | if(count < len(selected_image)): 128 | axarr[i,j].imshow(selected_image[count]) 129 | if(idx != None): 130 | axarr[i,j].set_title(traffic_labels_dict[y_val[idx[count]]], fontsize=20) 131 | axarr[i,j].axis('off') 132 | count = count + 1 133 | 134 | selected_image,idx = get_images_to_plot(X_train,y_train) 135 | plot_images(selected_image,row=10,col=4,idx=idx,y_val=y_train) 136 | ``` 137 | Here are the visualized traffic signs, with their labels: 138 | ![Alt text](images/vis.png?raw=true "traffic sign visualization") 139 | 140 | 141 | ## Preparing the data set 142 | 143 | X_train and Y_train make the training data set. We'll employ real images for the purpose of testing. 144 | 145 | You could also generate a validation set by splitting the training data into train and validation sets using scikit-learn (this is how you avoid testing your model on images that it's already seen). Here's the Python code for that: 146 | 147 | ``` 148 | #split the train-set as validation and test set 149 | from sklearn.model_selection import train_test_split 150 | X_train_set,X_validation_set,Y_train_set,Y_validation_set = train_test_split( X_train, Y_train, test_size=0.02, random_state=42) 151 | ``` 152 | The image dimension order of MXNet is similar to Theano and uses the format 3X32X32. The number of channels is the first dimension, followed by height and width of the image. TensorFlow uses image dimension ordering of 32X32X3, i.e the color channels come last. If you're switching from TensorFlow to MXNet [this discussion of dimension ordering](https://datascience.stackexchange.com/questions/14467/what-does-theano-dimension-ordering-mean) may be helpful. Below is the helper function to convert image ordering to MXNet's 3X32X32 format from 32X32X3: 153 | 154 | ```python 155 | #change the image dimensioning from 32 X 32 X 3 to 3 X 32 X 32 for train 156 | X_train_reshape = np.transpose(X_train, (0, 3, 1, 2)) 157 | plt.imshow(X_train_reshape[0].transpose((1,2,0))) 158 | print(X_train_reshape.shape) 159 | 160 | 161 | #change the image dimensioning from 32 X 32 X 3 to 3 X 32 X 32 for validation 162 | X_valid_reshape = np.transpose(X_valid, (0, 3, 1, 2)) 163 | plt.imshow(X_valid_reshape[1].transpose((1,2,0))) 164 | print(X_valid_reshape.shape) 165 | ``` 166 | 167 | ## Building the deepnet 168 | 169 | Now, enough of preparing our data set. Let's actually code up the neural network. You'll note that there are some commented-out lines; I've left these in as artifacts from the development process—building a successful deep learning model is all about iteration and experimentation to find what works best. Building neural networks is something of a black art at this point in history; while you might experiment to solve your particular problem, for a well-explored issue like image recognition, you'll do best to implement a published architecture with proven performance. Here, we'll build up a simplified version of the [AlexNet](https://en.wikipedia.org/wiki/AlexNet) architecture, which is based on convolutional neural networks.. 170 | 171 | The neural code is concise and simple, thanks to MXNet's symbolic API: 172 | 173 | ```python 174 | data = mx.symbol.Variable('data') 175 | conv1 = mx.sym.Convolution(data=data, pad=(1,1), kernel=(3,3), num_filter=24, name="conv1") 176 | relu1 = mx.sym.Activation(data=conv1, act_type="relu", name= "relu1") 177 | pool1 = mx.sym.Pooling(data=relu1, pool_type="max", kernel=(2,2), stride=(2,2),name="max_pool1") 178 | # second conv layer 179 | conv2 = mx.sym.Convolution(data=pool1, kernel=(3,3), num_filter=48, name="conv2", pad=(1,1)) 180 | relu2 = mx.sym.Activation(data=conv2, act_type="relu", name="relu2") 181 | pool2 = mx.sym.Pooling(data=relu2, pool_type="max", kernel=(2,2), stride=(2,2),name="max_pool2") 182 | 183 | conv3 = mx.sym.Convolution(data=pool2, kernel=(5,5), num_filter=64, name="conv3") 184 | relu3 = mx.sym.Activation(data=conv3, act_type="relu", name="relu3") 185 | pool3 = mx.sym.Pooling(data=relu3, pool_type="max", kernel=(2,2), stride=(2,2),name="max_pool3") 186 | 187 | #conv4 = mx.sym.Convolution(data=conv3, kernel=(5,5), num_filter=64, name="conv4") 188 | #relu4 = mx.sym.Activation(data=conv4, act_type="relu", name="relu4") 189 | #pool4 = mx.sym.Pooling(data=relu4, pool_type="max", kernel=(2,2), stride=(2,2),name="max_pool4") 190 | 191 | # first fullc layer 192 | flatten = mx.sym.Flatten(data=pool3) 193 | fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500, name="fc1") 194 | relu3 = mx.sym.Activation(data=fc1, act_type="relu" , name="relu3") 195 | # second fullc 196 | fc2 = mx.sym.FullyConnected(data=relu3, num_hidden=43,name="final_fc") 197 | # softmax loss 198 | mynet = mx.sym.SoftmaxOutput(data=fc2, name='softmax') 199 | 200 | ``` 201 | 202 | 203 | Let's break down the code a bit. First, it creates a data layer(input layer) that actually holds the dataset while training: 204 | 205 | ```python 206 | data = mx.symbol.Variable('data') 207 | ``` 208 | 209 | The conv1 layer performs a convolution operator on the image, and is connected to the data layer: 210 | 211 | ```python 212 | conv1 = mx.sym.Convolution(data=data, pad=(1,1), kernel=(3,3), num_filter=24, name="conv1") 213 | ``` 214 | The relu2 layer performs non-linear activation on the input, and is connected to convolution 1 layer: 215 | 216 | ```python 217 | relu2 = mx.sym.Activation(data=conv2, act_type="relu", name="relu2") 218 | ``` 219 | 220 | The max pool layer performs a pooling operation (dropping some pixels and reducing image size) on the previous layer's output (relu2). 221 | 222 | ```python 223 | pool2 = mx.sym.Pooling(data=relu2, pool_type="max", kernel=(2,2), stride=(2,2),name="max_pool2") 224 | ``` 225 | 226 | A neural network is like a Lego block—we can easily repeat some of the layers (to increase the learning capacity of model)— and then follow them with a dense layer. A dense layer is a fully connected layer, in which every neuron from the previous layer is connected to every neuron in the dense layer. 227 | 228 | ```python 229 | fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500, name="fc1") 230 | ``` 231 | 232 | This layer is followed again by a fully connected layer with 43 neurons, each neuron representing a class of the image. Since the output from the neuron is real valued, but our classification requires a single label as output, we use another activation function. This step makes the output of one particular neuron (out of 43 neurons) as 1 and remaining neurons as zero. 233 | 234 | ```python 235 | fc2 = mx.sym.FullyConnected(data=relu3, num_hidden=43,name="final_fc") 236 | # softmax loss 237 | mxnet = mx.sym.SoftmaxOutput(data=fc2, name='softmax') 238 | ``` 239 | 240 | ## Tweaking training data 241 | A neural network takes a lot of time and memory to train. We're going to split our data into minibatches of 64, not just so that they fit into memory but also because it enables MXNet to make the most of GPU computational efficiency demands it (among other reasons). 242 | 243 | We'll also normalize the value of the image colors (0-255) to the range of 0 to 1. This helps the learning algorithm to converge faster. You can read about the [reasons to normalize the input](http://deeplearning.stanford.edu/wiki/index.php/Data_Preprocessing#Data_Normalization). 244 | 245 | Here's the code to normalize the value of the image color: 246 | ```python 247 | 248 | batch_size = 64 249 | X_train_set_as_float = X_train_reshape.astype('float32') 250 | X_train_set_norm = X_train_set_as_float[:] / 255.0; 251 | 252 | X_validation_set_as_float = X_valid_reshape.astype('float32') 253 | X_validation_set_norm = X_validation_set_as_float[:] / 255.0 ; 254 | 255 | 256 | train_iter =mx.io.NDArrayIter(X_train_set_as_float, y_train_extra, batch_size, shuffle=True) 257 | val_iter = mx.io.NDArrayIter(X_validation_set_as_float, y_valid, batch_size,shuffle=True) 258 | 259 | 260 | print("train set : ", X_train_set_norm.shape) 261 | print("validation set : ", X_validation_set_norm.shape) 262 | 263 | 264 | print("y train set : ", y_train.shape) 265 | print("y validation set :", y_valid.shape) 266 | ``` 267 | 268 | ## Training the network 269 | We are training the network using GPUs, since it's faster. A single pass-through of the training set is referred to as one "epoch," and we are training the network for 10 epochs "num_epoch = 10". We also periodically store the trained model in a JSON file, and measure the train and validation accuracy to see our neural network 'learn.' 270 | 271 | Here is the code: 272 | ```python 273 | #create adam optimiser 274 | adam = mx.optimizer.create('adam') 275 | 276 | #checking point (saving the model). Make sure there is folder named models exist 277 | model_prefix = 'models/chkpt' 278 | checkpoint = mx.callback.do_checkpoint(model_prefix) 279 | 280 | #loading the module API. Previously mxnet used feedforward (deprecated) 281 | model = mx.mod.Module( 282 | context = mx.gpu(0), # use GPU 0 for training if you dont have gpu use mx.cpu(). 283 | symbol = mynet, 284 | data_names=['data'] 285 | ) 286 | 287 | #actually fit the model for 10 epochs. Can take 5 minutes 288 | model.fit( 289 | train_iter, 290 | eval_data=val_iter, 291 | batch_end_callback = mx.callback.Speedometer(batch_size, 64), 292 | num_epoch = 10, 293 | eval_metric='acc', # evaluation metric is accuracy. 294 | optimizer = adam, 295 | epoch_end_callback=checkpoint 296 | ) 297 | ``` 298 | 299 | ## Loading the trained model from the filesystem 300 | Since we have check-pointed the model during training, we can load any epoch and check its classification power. In the following example, we load the 10th epoch. We also set the binding in the model loaded to training as false, since we are using this network for testing, not training. Furthermore, we reduce the batch size of input from 64 to 1 (data_shapes=[('data', (1,3,32,32))), since we are going to test it on a single image. 301 | 302 | You can use the same technique to load any other pre-trained machine learning model: 303 | 304 | ```python 305 | #load the model from the checkpoint , we are loading the 10 epoch 306 | sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, 10) 307 | 308 | # assign the loaded parameters to the module 309 | mod = mx.mod.Module(symbol=sym, context=mx.cpu()) 310 | mod.bind(for_training=False, data_shapes=[('data', (1,3,32,32))]) 311 | mod.set_params(arg_params, aux_params) 312 | ``` 313 | 314 | ## Prediction 315 | To use the loaded model for prediction, we convert a traffic sign image (Stop.jpg) into 32 * 32 * 3 (32 * 32 dimension image with 3 channels) and try to predict their label. Here's the image I downloaded. 316 | 317 | ![Alt text](images/Stop.jpg?raw=true "test image") 318 | 319 | 320 | ```python 321 | #Prediction for random traffic sign from internet 322 | from collections import namedtuple 323 | Batch = namedtuple('Batch', ['data']) 324 | 325 | #load the image , resizes it to 32*32 and converts it to 1*3*32*32 326 | def get_image(url, show=False): 327 | # download and show the image 328 | img =cv2.imread(url) 329 | img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) 330 | if img is None: 331 | return None 332 | if show: 333 | plt.imshow(img) 334 | plt.axis('off') 335 | # convert into format (batch, RGB, width, height) 336 | img = cv2.resize(img, (32, 32)) 337 | img = np.swapaxes(img, 0, 2) 338 | img = np.swapaxes(img, 1, 2) #swaps axis to make it 3*32*32 339 | #plt.imshow(img.transpose(1,2,0)) 340 | #plt.axis('off') 341 | img = img[np.newaxis, :] # Add a extra axis to the image so it becomes 1*3*32*32 342 | return img 343 | 344 | def predict(url): 345 | img = get_image(url, show=True) 346 | # compute the predict probabilities 347 | mod.forward(Batch([mx.nd.array(img)])) 348 | prob = mod.get_outputs()[0].asnumpy() 349 | # print the top-5 350 | prob = np.squeeze(prob) 351 | prob = np.argsort(prob)[::-1] 352 | for i in prob[0:5]: 353 | print('class=%s' %(traffic_labels_dict[i])) 354 | 355 | predict('traffic-data/Stop.jpg',) 356 | ``` 357 | We then get the model's top five predictions for what this image is, find that our model got it right! The predictions: 358 | 359 | class=Stop 360 | 361 | class=Speed limit (30km/h) 362 | 363 | class=Speed limit (20km/h) 364 | 365 | class=Speed limit (70km/h) 366 | 367 | class=Bicycles crossing 368 | 369 | ## Conclusion 370 | 371 | In this [notebook](https://github.com/manujeevanprakash/mxnet-ccn-samples/blob/master/Cnn-mxnet.ipynb), we explored how to use MXNet to perform a multi-class image classification. While the network we built was simpler than the most sophisticated image-recognition neural network architectures available, even this simpler version was surprisingly performant! We also learned techniques to pre-process image data, we trained the network and stored the trained neural network on the disk. Later, we loaded the pre-trained neural network model to classify images from the web. This model could be deployed as a web service or app (you could build your own [what-dog](https://what-dog.net)!). You could also use these techniques on other data for the purpose of classification, whether that's analyzing sentiment and intent in chats with your help desk, or discovering illegal intent in financial behaviors. 372 | 373 | In our next notebook, we'll develop a state-of-the-art sentiment classifier using MXNet. 374 | -------------------------------------------------------------------------------- /cnn.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Tutorial on Hand Written Digit Recognition\n", 8 | "\n", 9 | "In this tutorial we will go through the basic use case of MXNet and also touch on some advanced usages. This example is based on the MNIST dataset, which contains 70,000 images of hand written characters with 28-by-28 pixel size.\n", 10 | "\n", 11 | "This tutorial covers the following topics:\n", 12 | "- network definition.\n", 13 | "- Variable naming.\n", 14 | "- Basic data loading and training with feed-forward deep neural networks.\n", 15 | "- Monitoring intermediate outputs for debuging.\n", 16 | "- Custom training loop for advanced models.\n", 17 | "\n", 18 | "First let's import the modules and setup logging:" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 1, 24 | "metadata": {}, 25 | "outputs": [ 26 | { 27 | "ename": "ModuleNotFoundError", 28 | "evalue": "No module named 'cv2'", 29 | "output_type": "error", 30 | "traceback": [ 31 | "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", 32 | "\u001b[1;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", 33 | "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m()\u001b[0m\n\u001b[0;32m 2\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mmxnet\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mmx\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 3\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mnumpy\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 4\u001b[1;33m \u001b[1;32mimport\u001b[0m \u001b[0mcv2\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 5\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mmatplotlib\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpyplot\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mplt\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 6\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mlogging\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", 34 | "\u001b[1;31mModuleNotFoundError\u001b[0m: No module named 'cv2'" 35 | ] 36 | } 37 | ], 38 | "source": [ 39 | "%matplotlib inline\n", 40 | "import mxnet as mx\n", 41 | "import numpy as np\n", 42 | "import cv2\n", 43 | "import matplotlib.pyplot as plt\n", 44 | "import logging\n", 45 | "\n", 46 | "logger = logging.getLogger()\n", 47 | "logger.setLevel(logging.DEBUG)" 48 | ] 49 | }, 50 | { 51 | "cell_type": "markdown", 52 | "metadata": { 53 | "collapsed": true 54 | }, 55 | "source": [ 56 | "## Network Definition\n", 57 | "Now we can start constructing our network:" 58 | ] 59 | }, 60 | { 61 | "cell_type": "code", 62 | "execution_count": 3, 63 | "metadata": { 64 | "collapsed": true 65 | }, 66 | "outputs": [], 67 | "source": [ 68 | "# Variables are place holders for input arrays. We give each variable a unique name.\n", 69 | "data = mx.symbol.Variable('data')\n", 70 | "\n", 71 | "# The input is fed to a fully connected layer that computes Y=WX+b.\n", 72 | "# This is the main computation module in the network.\n", 73 | "# Each layer also needs an unique name. We'll talk more about naming in the next section.\n", 74 | "fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128)\n", 75 | "# Activation layers apply a non-linear function on the previous layer's output.\n", 76 | "# Here we use Rectified Linear Unit (ReLU) that computes Y = max(X, 0).\n", 77 | "act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type=\"relu\")\n", 78 | "\n", 79 | "fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64)\n", 80 | "act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type=\"relu\")\n", 81 | "\n", 82 | "fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)\n", 83 | "# Finally we have a loss layer that compares the network's output with label and generates gradient signals.\n", 84 | "mlp = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax')" 85 | ] 86 | }, 87 | { 88 | "cell_type": "markdown", 89 | "metadata": {}, 90 | "source": [ 91 | "We can visualize the network we just defined with MXNet's visualization module:" 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": 4, 97 | "metadata": {}, 98 | "outputs": [ 99 | { 100 | "data": { 101 | "image/svg+xml": [ 102 | "\n", 103 | "\n", 105 | "\n", 107 | "\n", 108 | "\n", 110 | "\n", 111 | "plot\n", 112 | "\n", 113 | "\n", 114 | "fc1\n", 115 | "\n", 116 | "FullyConnected\n", 117 | "128\n", 118 | "\n", 119 | "\n", 120 | "relu1\n", 121 | "\n", 122 | "Activation\n", 123 | "relu\n", 124 | "\n", 125 | "\n", 126 | "relu1->fc1\n", 127 | "\n", 128 | "\n", 129 | "\n", 130 | "\n", 131 | "fc2\n", 132 | "\n", 133 | "FullyConnected\n", 134 | "64\n", 135 | "\n", 136 | "\n", 137 | "fc2->relu1\n", 138 | "\n", 139 | "\n", 140 | "\n", 141 | "\n", 142 | "relu2\n", 143 | "\n", 144 | "Activation\n", 145 | "relu\n", 146 | "\n", 147 | "\n", 148 | "relu2->fc2\n", 149 | "\n", 150 | "\n", 151 | "\n", 152 | "\n", 153 | "fc3\n", 154 | "\n", 155 | "FullyConnected\n", 156 | "10\n", 157 | "\n", 158 | "\n", 159 | "fc3->relu2\n", 160 | "\n", 161 | "\n", 162 | "\n", 163 | "\n", 164 | "softmax\n", 165 | "\n", 166 | "SoftmaxOutput\n", 167 | "\n", 168 | "\n", 169 | "softmax->fc3\n", 170 | "\n", 171 | "\n", 172 | "\n", 173 | "\n", 174 | "\n" 175 | ], 176 | "text/plain": [ 177 | "" 178 | ] 179 | }, 180 | "execution_count": 4, 181 | "metadata": {}, 182 | "output_type": "execute_result" 183 | } 184 | ], 185 | "source": [ 186 | "mx.viz.plot_network(mlp)" 187 | ] 188 | }, 189 | { 190 | "cell_type": "markdown", 191 | "metadata": {}, 192 | "source": [ 193 | "## Variable Naming\n", 194 | "\n", 195 | "MXNet requires variable names to follow certain conventions:\n", 196 | "- All input arrays have a name. This includes inputs (data & label) and model parameters (weight, bias, etc).\n", 197 | "- Arrays can be renamed by creating named variable. Otherwise, a default name is given as 'SymbolName_ArrayName'. For example, FullyConnected symbol fc1's weight array is named as 'fc1_weight'.\n", 198 | "- Although you can also rename weight arrays with variables, weight array's name should always end with '_weight' and bias array '_bias'. MXNet relies on the suffixes of array names to correctly initialize & update them.\n", 199 | "\n", 200 | "Call list_arguments method on a symbol to get the names of all its inputs:" 201 | ] 202 | }, 203 | { 204 | "cell_type": "code", 205 | "execution_count": 7, 206 | "metadata": {}, 207 | "outputs": [ 208 | { 209 | "data": { 210 | "text/plain": [ 211 | "['data',\n", 212 | " 'fc1_weight',\n", 213 | " 'fc1_bias',\n", 214 | " 'fc2_weight',\n", 215 | " 'fc2_bias',\n", 216 | " 'fc3_weight',\n", 217 | " 'fc3_bias',\n", 218 | " 'softmax_label']" 219 | ] 220 | }, 221 | "execution_count": 7, 222 | "metadata": {}, 223 | "output_type": "execute_result" 224 | } 225 | ], 226 | "source": [ 227 | "mlp.list_arguments()" 228 | ] 229 | }, 230 | { 231 | "cell_type": "markdown", 232 | "metadata": {}, 233 | "source": [ 234 | "## Data Loading\n", 235 | "\n", 236 | "We fetch and load the MNIST dataset and partition it into two sets: 60000 examples for training and 10000 examples for testing. We also visualize a few examples to get an idea of what the dataset looks like." 237 | ] 238 | }, 239 | { 240 | "cell_type": "code", 241 | "execution_count": 6, 242 | "metadata": {}, 243 | "outputs": [ 244 | { 245 | "data": { 246 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXEAAAA9CAYAAABbalkHAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJztnXlUVGea/z+1sRdLFRSLiMi+KpuIKy4xKqg4th5jd+KS\npO10eqbT6T7dPemZTPc5fTpzksniTB97skybnIwdDe5OxLhGBRUVISqCgOzIUuxLUQW13N8fDrdj\ng0mQQsL87ucc/qm61Pvl1r3f+77P87wPMkEQkJCQkJCYnMgnWoCEhISExKMjmbiEhITEJEYycQkJ\nCYlJjGTiEhISEpMYycQlJCQkJjGSiUtISEhMYiQTl5CQkJjESCYuISEhMYmRTFxCQkJiEiOZuISE\nhMQkRjneA8hksse+r18QBJmkQ9IxGi2SDknHZNAxEtJMfBKhVCpJS0vjT3/6E6+++upEy5GQkPgO\nMO4zcQn7oFQqWbVqFf/wD/+A0WjkyJEjEy1pQpHL5SxevJi///u/R6PRcOHCBbKzsykrK2NwcHCi\n5UlIPDYmhYkrlUqWLFnC9u3bKSkp4f3336ehoWGiZT1W0tLS2Lp1KykpKezdu5fLly9PiA6VSkVk\nZCSLFy8mISEBb29v2tvbOXnyJIcPH8ZkMo27Bnd3d37wgx+wdetWZsyYgYODA2FhYYSEhPD2229z\n/fr1cdcgIfFdYVKYeGBgICtXriQrK4vk5GSMRiNvvvkmZrN5oqU9FgICAli1ahVz5swhPz+fv/zl\nL/T09Dx2HXK5nCVLlrBt2zaSkpLw9/fH1dWV/v5+oqOj8fDw4P3332e82hsrFApCQ0P5xS9+wcKF\nCwkPD6erq4vjx48THx9PQkICvr6+4zL2dw0HBwemTJnCjBkzSEpKIigoSHyvvb2dW7duceLECZqb\nmydQ5XcDuVyORqNBpVJhNBrp6+vDYrGM65i+vr787Gc/w2q1cvToUYqKioiIiCA6OhoXFxcCAgKY\nOnUqrq6uGAwGzp07x759+x5prO+EiU+bNo3BwUHa2tpGNGYPDw/8/f1RKBR4eXkxffp0bDbbBCh9\n/MjlcubNm8cTTzxBd3c3R44cIT8/f0K0TJ06lVWrVpGZmYmrqysmk4nW1lYUCgWxsbGsWLGCXbt2\njcvDValUEhUVxc9+9jOeeuopnJycqKio4P333+fy5ctER0eTlZWFSqVCoVBgtVrtrmE0yGQyHB0d\nUSqV+Pv7ExMTQ0BAAGazmUOHDtHe3v7Inx0UFMTKlSuZO3cuYWFhhIaG4uPjI77f29tLTU0NM2fO\nZPfu3dy6dWvcJjwKhYJp06axYsUKHBwcAOjv7+fMmTPU1taOu1n+LUqlkqCgINauXSu+5ujoSFBQ\nEM7OzvT09KDX69Hr9dTV1dHU1IRKpcJisfDll1/aRUNgYCAvv/wyW7duxWazkZiYSGVlJVOmTGHq\n1Kk4Ojqi0Wjw9vbGyckJk8nEtGnTuHfvHpcuXRr932wX1Y+In58fK1asIC0tDb1ez6effsrt27e/\n9nfMZjN9fX3jfpMqFApSU1NJS0vDxcWFkpISLly4MKabb7TIZDLmzp3Lpk2bCAgI4MiRI5w6dWrC\nYr5ms5n6+nry8vLo7OykuroavV5PYGAgWVlZBAYGEhsba7ebYQilUklsbCw//vGP2bBhAwBnzpxh\nz5497N+/n76+Pm7cuEF3dzd1dXXI5fIJMXF/f390Oh0RERFotVq8vb1Rq9Xia76+vgwODuLh4cFb\nb7016s93cHAgMjKSdevW8b3vfY+oqCgUCsWw49RqNfHx8UydOhWdTsd//Md/cPXqVXv8iQ+MERUV\nRVJSEvHx8axcuRKVSgWA0WgkJSWFs2fPcuLECTo7O+069lcZmtVqtVra29tpbm5m1qxZ/PznPxeP\nUalUeHp6olKpMJlM9PX10dXVRUNDA3q9HqVSSXt7O7t27eLKlStj0qPT6Vi/fj3btm3Dy8sLgIyM\njK/9HScnJ8LCwpgxY8bkM/GYmBh+8pOfkJCQQHt7O/X19VRXV9Pf3z+RstBqtcyaNYutW7eydOlS\nnJycuH37Nk5OTmRnZz82gwgLC2Pz5s0sWLCAq1evsnfvXioqKh7L2CPR2NjIoUOHuHnzJv39/dTV\n1WEymdiwYQM2mw25XC5euPYkJCSErVu3smnTJmw2G8ePH+eDDz7g3Llz4ndhNBr5/PPPEQTB7t9P\ndHQ0M2fOxGaz0dHRAdx/wAqCIM64AwICCAkJISAggPj4eHx8fPDw8MDV1VX8HKvVisFg4Mknnxy1\nibu5uZGUlMRTTz1FRkaGuDIdQhAE6urqEAQBrVaLq6srnp6e/N3f/R1FRUUUFxfb5b6SyWQEBgaS\nnp7O6tWrmT17Nh4eHhiNRmw2GwqFgilTpjB9+nQSExPp7e3l3LlzGAyGMY/9t7i6ujJv3jzWrFmD\nWq3m7NmzZGdnYzAYcHZ2xtXVFYVCgc1mo62tDScnJ1xcXPD09ESj0RAWFgaAzWbj3r171NTUjNnE\nfX19SU9Pf+h9UFlZiY+PD+7u7uJrJpOJyspKbt269UhjTqiJ63Q68UTrdDpSU1O5cOECZWVlDxw3\nODiIwWAYt1jrV/Hw8CAjI4PnnnuOmTNnUlZWRm1tLbGxsSxatIgDBw48FhPXarWsX7+eJUuW0NDQ\nwMcff0xeXt64j/u3yGQypkyZgq+vL1qtFp1Oh7OzM+7u7qSmpuLo6Mjq1asZHBzk8OHD37iSGi1+\nfn5kZWWxceNGLBYLp06d4o033qCwsHDYseP18F+4cCG/+MUvkMvlNDY2Dnvf1dWVyMhIHBwcUCgU\nyGR/Le0dGBigra2NxsZGmpqaqK2tpby8fFTjazQa5s+fz9NPP83SpUvx9PQcdozNZqOkpISamho0\nGg0zZ84UQwjJyclERUWNeM5Gi1arZdWqVfzwhz8kNjaW3t5eCgsLyc3NxWw24+joSEpKCvPnz2fG\njBls27aNpqYmbty4Ydf7RqVSMXv2bF588UVWrlxJfX09BQUFGI1GLl68yAcffIBGo0Gr1dLd3U1V\nVRVqtRp3d3ecnZ3R6XSEhobi6+uLzWajpaWF6urqMesaGBigpaWFvr4+XFxcxPBNYWEhZrOZgoIC\ntm3bRkJCgvg7tbW1fPbZZ1y8ePGRxvxOxMSHCA8PJyAgYJiJDwwMiDeo2Wymt7d3XMZ3d3dn2bJl\nvPDCCyQkJFBSUsIf//hHLl26xMqVK8XZ13jj5OTEE088wfr163F2duaDDz7gzJkzExIimD59Ohs3\nbmTWrFmEh4czbdo0VCoVcvn9LQY2m4329nY+/PBD3nzzTYxGo93GdnBwYMGCBWzatAm1Wk1eXh5v\nvfWWXcxoNFgsFpRKJcHBwYSEhDz0OJvNhtFopKWlBYPBQHt7O42NjZSVlVFYWMidO3eoqqoadT4n\nISGBH/3oRyxevBhHR8cRj5HJZOh0OioqKrh9+zbu7u54eXnh6OjIwoULyc/Pp6SkZMzVQ8nJyWzY\nsIGwsDBKSkrIzc3l6NGjnD17VpyJR0REsHfvXqKiosjKyuLQoUOUlJTY9fr18fFh7dq1rFixgv7+\nfm7cuMGNGzcA6Ozs5B//8R+B+3mc5ubmB3ICzs7OhIWFkZ6eTlxcHFarleLiYg4dOjRmXa2trXz+\n+ecolUq8vb1pbGykuLiYjz76CBcXF0JCQnjqqafE4xsbG/mf//kfTp069chjTqiJ9/X1YTabxWVp\nV1cXfX19w45Tq9X4+PggCAItLS3jchMPJRBffvllUlJSKC0tZceOHRw4cACTycR7772Hg4MDGo0G\nHx8f+vr6qKurs3uCVSaTERcXx5YtWwgLC+PQoUMcP36ctrY2u47zbdm4cSM/+clPCAgIeOB1m82G\nzWZDJpOJccYhY7cXISEhPPHEE8TGxnL37l2ys7O5du2aXcf4NhQXF1NYWEhwcPADr5vNZjo6Oujr\n66Ovrw+TyURDQwN5eXnU19dTVFQ05tmdTCYjKSmJmJgYHB0dsVqtoll+9XzL5XKSk5NJTk4WtQ1d\nm56enkyZMgV3d/cxm/icOXMICQkhNzeXXbt2ceTIkQeSl1arldLSUq5du8a0adNwd3dHLpc/sDqx\nB/Hx8cTFxSEIAteuXeODDz4Y0Rfq6+uHvWY0Grl169Yjhy++js7OTg4fPszhw4fF1zQaDXFxccyZ\nM4fvf//7xMfHA/cjDJ999hnvvvsulZWVjzzmhJp4aWmpmPQwmUxcu3btay96i8VCS0sLNTU1YiLD\nXgQEBIjLnI6ODt577z0OHTqEyWRCLpfj4uLC9OnTWbNmDWvWrOHKlSu88sorIz50xoJWqxXrwYuL\ni/nkk0+4efPmiMfKZDKcnZ1xdHSkt7d3XCoB/P39cXd3RxAELBYLAwMDGAwGuru76erqYmBgALVa\nzZNPPkl+fj7nz5+3y7guLi5kZmayatUq+vv7KSgo4PTp0w89XqlUotFoxJnq4OAgvb29Yw6xODk5\n4erqilL511vFbDbT1tZGdXU1X3zxBTdv3qSkpITe3l6am5vtmnhWq9VER0eLpZNNTU3U1NTg6emJ\nWq0G/loJMxR/d3R0FJOMAF1dXfT29trFSGUyGefPn2fnzp1fmyzdt28fy5YteyD2a0+SkpKIjY2l\nu7ubsrIy7t69i9FoFCtNhh54E1mGLJfL8ff3Z+3atfzhD3/Aw8NDfM9gMFBWVsa5c+fGZOAwwSbu\n7OwsJmcqKytpa2sb8UIbem2oYuTdd9/l8OHDvP7663bRIZPJ2Lp1K3PmzEEmk3H06FHOnTtHf38/\nSqUSnU7H8uXLefbZZ0lOTkalUolZbXuTlZXFsmXLGBwc5D//8z85d+7csGOUSiUuLi5otVqSkpKI\njIzk0KFDlJeX2z3kcuLECUJCQoiPj6etrY2SkhLy8/PJy8ujsrKSvr4+FixYwKuvvsqaNWvsZuLh\n4eGkpqbi5+fHhQsX+K//+q8RZ1Vw/zoKCQlh+/btREZGAvfjjEPL/LGEeJKSkvjpT39KRkYGgiBg\nNpupqKhg586dfPzxx+OehA8NDRXL0mw2G/v27eOf/umf8PPzw8/PD7gfH46OjmbOnDnI5XLi4+MJ\nDg4WZ8H+/v78+Mc/xmq18s4774xpNp6Tk4PJZBJDF0PIZDKxxFAul4thN0AstbQnzs7OODk5oVar\n2bx5MxqNhn/913/F1dWV5uZmYmJi6Ozs5M6dO/T392O1Wh9LOFIul+Po6IijoyO+vr788z//M5s2\nbRJ9bii5ferUKd5+++1HqkYZhiAI4/oDCA/7Wbt2rXDz5k3BarUK/f39wuHDh4W0tLRhxyUlJQn7\n9u0TLBaLYDQaheLiYuHXv/71Qz93tDq8vLyEzz//XLBYLMKFCxeE9PR0QSaTCQqFQkhOThb+/Oc/\nC3q9XjCZTILFYhH0er3wxhtvCP/bFMduOlQqlfDRRx8JPT09wrvvviuEh4cPO0YulwszZswQ/vSn\nPwlNTU2C2WwWBgYGhIsXLwphYWF20THSmAqFQpDL5SO+n5KSIuzZs0d455137HY+NBqNsGPHDsFq\ntQoFBQXChg0bhh2jVCoFZ2dn4dlnnxVu374tDA4OClarVbBYLILZbBZu3LghPPvss99Ky8N0/P73\nvxe6u7sFm80mDAwMCJcuXRIyMzMFhULxwM83XQvf9pz87ftbtmwR7xG9Xi+88sor3/iZkZGRwuuv\nvy5UV1cLZrNZsFqtgtVqFYqKioRnnnlmRK2Pen3IZDJBqVQKWq1WWLNmjbBu3Tph+/btQmNjo2Cx\nWASbzSbs2bNHSEpKeuj18yg6XnrpJaG2tlaw2WyCzWYTjEaj0NraKvT09Ag1NTWC1WoV+vr6hLfe\nektYsWKFEBUVJbi6utrte3nYj7+/v/D8888L2dnZwt8yODgolJeXC7/85S+FwMDAR9Ix0s+Ez8RV\nKpW4HMzLy6O4uHjYcV+NvxoMBs6cOfNIdbYPQy6Xi0/KoqIimpqamD59OpmZmWzevJmgoCCOHTtG\nf38/zz33HLW1teTm5to9ybl06VISEhJobGzkyJEj3L17d9gxQxtenn76aRobGzl48CC+vr5oNBrc\n3NzsqmeIbxP39/DwwMPDQ9y8MFYMBoM4y+3q6qKuru6B95VKJf/yL/9CZmYm4eHhuLi4YLVaaW5u\nFmdeZrMZnU43Jh1qtfqB86pSqYiKiiIiIuKB7//IkSPU19fbPaQ1depUcRne0dFBa2vrN/5OeXk5\nOTk53Lx5k+eee4558+ahVCqJiYlhzZo15ObmUlNTM2ZtTk5OJCYmsnXrVhYtWsTUqVPFVfPQrBwg\nMzMTtVrNv/3bv9ltpXbs2DESExPZvHmzOJ5GowHuh+KGQo3bt2/n2WefxWKx8Morr7Bv3z66u7vt\nomEktFotK1asICsra9h7v/nNb/jkk0/Q6/V2XRVMqIm3t7djMpnExGZiYiIhISEPxIAdHR2Ji4tj\n1qxZYnWI1Wodl/ivTCZj1apVpKWl4eXlhZubGyUlJbzyyitUVlby9NNP09nZyZkzZzh58qTdx58x\nYwZarZb6+nr6+vqGPSS0Wi3Lli1j3rx57N27l1/+8pf4+Piwfft2Fi9ebPfk0WhQKBQEBASwePFi\njh8/PubPGxwc5N69e9y7d4/a2toHklbe3t7s2rWLBQsW4OLiglKpxGw2s3PnTnbv3k1jYyPLly/n\nhRdeIDk5mbCwsBEfiN+Gjo4Ouru78fT0RKlUEh8fT1hY2LBKpV/96lfU1dXx5ptvcurUKbsZhUwm\nE7/XbztpEASBy5cvo1AoKC4u5re//S1ZWVkolUoSExNZt24db7/99ph0LVmyhJ/+9Kdi/xxHR8eH\nJjBdXV1ZsmQJERERnD59mkOHDlFQUDCmTUDV1dX89re/5YsvvhDL9UwmE7W1tfT29hISEoLJZMLX\n15f169ej0+mYPn06Xl5e42riQUFBZGRkiOEjm81GTU0Na9eupaqqiv7+frtP/ibUxC9evEhpaSkR\nERE4OzuzbNkyvL29xS2xvb296HQ6oqOjmTJlCnC/oqW2tnbcNAUGBqJSqSgsLOTYsWPk5ubS09PD\nunXrWL58Offu3ePKlSsMDAzYfeze3l7MZjNBQUHExsZy69Yturq6gPux0RdeeIENGzZw+fJl3njj\nDRwcHNiwYQNZWVmUlZWNWMM8GpRKJVardVQXmUwmw9PTEy8vL4qKiuwT4+O+ERmNRoxG4wMJKp1O\nx29+8xuWLFmCs7MzRqORnJwcPvroI65evUpraysWiwW1Wk1wcDDV1dVj6h9y9OhR/Pz8WLNmDV5e\nXuIMz2azPXCenJ2d0Wg0/P73v6enp4ezZ8/aZaIxtPtztAwlV0tKSh64Lry8vIZV2YwGpVLJxo0b\neemll4iKisLFxYXe3l4KCgoYHBwkPj4enU6HTCajr6+PL7/8UqzHDg4OZtOmTSxatIjz58/z4Ycf\nPvJOUqvVSn19PQcPHhQnDTabjcHBQWw2m5hDUKlU/OUvf+Gdd95hy5YtmM1mPvzww3HzEIVCgbOz\nM3B/38LBgwd57bXXxiVfNcSEmrjBYGDHjh0EBQUxd+5cNBoN8+bNw2KxiDevg4ODmG0XBAG9Xm/3\nLnXd3d28/PLLPPnkk7i4uFBUVMTdu3dpaWmhp6eH4OBgoqOj8fb2prCwkNLSUruOP8SZM2fIyspi\n/vz5/PznPyc8PJzTp09jMBhYvXo13//+9xkYGKCqqork5GSysrJITU1Fr9fz+uuvj6laZ+3ataSn\np/Pee+9RXl7+rUsnZ86cyfPPP09AQAAXL1606yynpaWFlpYWYmNjycjIICcnBw8PD9asWYOTkxNV\nVVW8+eab5OXlUV1djdFoRBAEFixYwJNPPolcLqe+vn5MycfS0lJef/11PvzwQ1QqFU5OTri5uQ0r\n+fTz8+O1115j+vTpvPrqq5jNZs6dOzfmWZdarX5obfi3wWw2P/AwUavVYkL0URgKJ8XHx+Pg4EBP\nTw/Hjx9nx44dLFiwgIiICOD+Q/j06dPs3LkTg8FAYmIizzzzDKmpqbi5uaHT6YiMjOTTTz/lv//7\nvx/pO7LZbPT29o64b+SrVWPt7e3U1dWRkJDAnDlzOHv27LhOBL+qr729nYqKinFNqk74Zp/y8nJa\nW1uxWq0oFApcXFwARixN6uvro7i4eFhmfKxYLBaKi4tpaWlBqVTS2dn5QFx3yMSrqqr47LPPxu0C\nqK6u5q233sJgMJCens6mTZtYvHgxFouFKVOm4OfnR39/P+vWrcNms6HT6bh9+zY7duzg0qVLY5r5\nbdy4kfT0dPr7+9m5c+e3mtVHRkaybds2Fi1aRFlZGXv27Hnk8UeisrKSO3fusG7dOjZv3kxLSwsV\nFRX87ne/44UXXuDkyZMcOXIEvV4vPnQWLlzID3/4Q9LS0ujq6qK+vn5MtfwDAwPU1taK37lCoUCp\nVA5biQ215A0ODiYlJYWQkBAuXbo05hVbc3Mzvb29uLu74+7u/kCZ2qNgMBjGtOdALpeLE6vBwUGK\nior485//jMViEVfSAGfPnuXf//3fuXr1KiaTifLycm7dukVGRgaZmZlEREQwe/ZsfHx88Pf353e/\n+92Y/i6lUomDg4O4gvsqVquVCxcuMHv2bAICAsTY+Xig1+vJz88nLS0NJycnli9fzq9+9atxGw++\nAyZuMBj4+OOPGRwcRKPRMHXqVKZPny62jTSbzWICtKmpiUuXLo3bjs2RkkYqlYrQ0FBCQ0O5cuUK\nJ0+etOuuxK9iNpvJzc2ls7OTEydOEBkZyfz584mPj8fJyQm9Xk9DQ4OY6CspKeHq1atcv359zJqm\nTJmCVqslMzOT/fv3f62Jy2QyQkNDeeaZZ1i7di1yuZzr16/bvfFVXV0dRUVFZGRksHjxYtzd3fny\nyy+pr6/n008/5fr16/T09ODm5oazszPLly9nw4YNpKamYrFYOH78+Jh2wo3EV0vV5HI5vr6+fO97\n3yMmJobg4GAUCoXYP8TDwwO9Xj+m8Wpqaujq6sLd3R2NRkNAQIAYRvo2REVFiaFIuH+/fZvk6MMY\nykkNnQOdTsemTZsIDAxk7ty5ODo60tTUxK5dux64Ljs6Orh8+TJ1dXWcO3eO6OhoFi5cyLRp08Yc\nBhwqQoiPj0ev1/Paa689cH6USiVubm5inHo8SoOHaGlp4fLly6SlpaFUKgkJCeGZZ57hk08+GTff\nmHATN5vNnDp1ioaGBtzc3PD392fq1Kk4ODjQ399PZGQkWVlZ+Pj4iHHHx0l8fDyLFi3C3d2dxsbG\nMV9w34TJZKKgoIDS0lJ0Oh3l5eX86Ec/IiYmhrNnz7Jnzx76+vpobW2lsbFRbMg0VgoLC4mLiyM0\nNJSsrCyMRiNVVVUPbFxRqVQEBwcza9Ys0tPTWbhwIQaDgf3797N//367d1fs7+8nLy+P3bt3k5GR\nwRNPPMGsWbO4d+8e1dXVJCUlideNm5sbycnJhIeHY7FYOHjwIB9//PG4NAxTq9WkpKSQkJDAzJkz\nmTt3Lv7+/mJVREdHB21tbXbJm5w+fZr58+fj7+8vtiFYt26duJP469BqtWzcuJGUlBTxtaENSY/K\n4OAgV65c4dKlS8yePZuQkBCmTZuGo6MjjY2NnDx5kosXL3L69OlhIRKr1UpdXR11dXXk5+eTn5+P\nn5/fmJtOhYaGsmXLFmJiYujo6BC/g4KCAjE+vmjRIrRaLS0tLWMa66usXr2auLg42tvbuXLlCjdu\n3ECv13P69GkWLVpEYmIiSqWSl156iSNHjvzfNXG4HyYZ2k49tCySy+XI5XKefvppsTewRqMhKioK\npVL52PoUT58+naioKOrq6igsLHxs/UsMBgPV1dXk5uaydOlSIiIiqKio4PPPPx+XXWiffPLJA1uD\ng4ODuXbtGnfu3KG3txetVktISAgJCQkkJyfj4+PDrVu3yM7O5uLFi1RVVdldE9wPt+3atYvKykrW\nrVtHWloaMTExzJgxA4Cenh4cHBxwcHDAarVSUVHB2bNn2bt3Lzdv3rRbJYBMJkOj0RAfH8/8+fOZ\nP38+MTExBAYGiscMbQTKycnh/PnzdvnHHUVFRRw/fpywsDDCw8OJj4/nxRdfRK1Wc+nSJWpqaoaN\no1QqxcnPxo0bxX8YMTg4yN27dykoKHhkPRaLhby8PHFlHBUVRVdXF6WlpRQVFXH+/HnKy8u/8bz3\n9PSM2byH6O/vp729HZlMhp+fHy+++KLYT0UQBBQKhVjBcuHChVE3IBuJlJQUsXyzs7OToqIiCgsL\n0ev1KBQKsWvj0Oar8Zz9fydM/KtYLBbRoF1dXcVZFvy1L7C7u7vdZqDfhJeXFy4uLly4cMEuiarR\n0tzczNGjR6mrqxvXviHXrl3j3XffRaFQkJyczA9+8AOWLFnCnTt36O7uRqfTERISgkajQRAE8vLy\nePvtt/niiy/G9cE2MDBAeXk5zc3NVFZWsnLlSnx9fUlKSsLFxQUPDw9kMhktLS18+eWX5OTkkJOT\nY5eOdEPIZDIiIyPJzMxk6dKlzJ49W2w1KgiC2KL22rVrHDt2jGPHjtHQ0GCXa8VqtXLixAnCw8Px\n9PTEx8eHlJQUfH19uXjxIiUlJbS0tCAIAjU1NQQFBeHq6kpKSgorV67Ez88PmUyG2WymsLCQvXv3\njrgXYzS0t7eTk5NDVVWVOPu9ffu22Ar3cXP37l0OHDiAv78/sbGxeHp64unpOazfT35+PtnZ2ZSU\nlIx5zFmzZhEVFYW3tzfe3t6Eh4ezevVqmpqa6OzsHFMF0Gj5zpn4V7FarWKNcHh4OHK5nObm5hEb\n4Y8Hvr6+zJgxA6vVypUrV4ZtOHkcdHR0sHv3bnbv3j2u4wiCwIEDBxAEgfXr15OYmIirqyvx8fG4\nu7vj6OhIS0sLeXl5VFVVceLECc6cOfPYbtqenh7Onz/P9evX8fT0ZOnSpXh5eaHT6fDw8KChoYHc\n3FyKiors/q/rFAoFM2fOZPPmzYSHhwP3Z389PT1iK4KKigpOnjxJYWGh3Xtn3717lz179tDd3c3C\nhQuZOXMj/8P+AAAC2ElEQVQmwcHBBAcHix0+bTYbN27cIC4u7oFe2gANDQ3cvHmTTz/9lIMHD9pl\nWd/f309hYeFj7yg5Em1tbZw6dQo3NzcyMjIIDAwUE6xw/yE89M/FS0pK7LKK7+zsHHYeh9o/jMTy\n5cs5cOCA3XstAcjG+yb83y2+j4xWq2X27NliG87s7Oxv7KstCMKwHQePomPp0qX8+te/pquriz/8\n4Q+jroqxl46xMlodcXFxpKeni5s4AgMDcXNzo7i4mM8++4ybN28+0q7M78r5GEnL1+lQKBTMmjWL\ntWvXiklCi8UitpnNycmhvb39kR5oo9EBMG/ePLZu3SoauYuLi1iXPDg4iNFoRKlUYjKZ0Ov1mEwm\nTp8+TXZ2NqWlpQ818NHqGC/GosPFxYW4uDgSExPFHjr/+xl0dnayf/9+ysrKvtXK8Zt0REVFsWXL\nFpYtWybW8bu5uT3w8Pgq165d48UXX+TGjRujCoeOdM/8Ld95E38U7GUWmZmZPP/88xQUFPDHP/5x\n1DO874ppSTq+Wctk0qFWq0lNTWX16tVi6akgCHR3d9Pc3IyzszOtra1cuXKF1tZWysvLH9o8bCw6\nxoPJpmPx4sUkJSUB93drDrUC/luOHz9OQUEBly9fHpWPSCY+Rh2bN28mNTWVAwcO8MUXX0yYjrEi\n6fhmLZIOScdk0DES9u3i/38MhUJBS0sL9+7dm2gpEhISEiMizcQlHf/f6RhJi6RD0jEZdIzEuJu4\nhISEhMT4IYVTJCQkJCYxkolLSEhITGIkE5eQkJCYxEgmLiEhITGJkUxcQkJCYhIjmbiEhITEJEYy\ncQkJCYlJjGTiEhISEpMYycQlJCQkJjGSiUtISEhMYiQTl5CQkJjESCYuISEhMYmRTFxCQkJiEiOZ\nuISEhMQkRjJxCQkJiUmMZOISEhISkxjJxCUkJCQmMZKJS0hISExiJBOXkJCQmMRIJi4hISExifl/\n58aS+q+DnZwAAAAASUVORK5CYII=\n", 247 | "text/plain": [ 248 | "" 249 | ] 250 | }, 251 | "metadata": {}, 252 | "output_type": "display_data" 253 | } 254 | ], 255 | "source": [ 256 | "from sklearn.datasets import fetch_mldata\n", 257 | "mnist = fetch_mldata('MNIST original')\n", 258 | "np.random.seed(1234) # set seed for deterministic ordering\n", 259 | "p = np.random.permutation(mnist.data.shape[0])\n", 260 | "X = mnist.data[p]\n", 261 | "Y = mnist.target[p]\n", 262 | "\n", 263 | "for i in range(10):\n", 264 | " plt.subplot(1,10,i+1)\n", 265 | " plt.imshow(X[i].reshape((28,28)), cmap='Greys_r')\n", 266 | " plt.axis('off')\n", 267 | "plt.show()\n", 268 | "\n", 269 | "X = X.astype(np.float32)/255\n", 270 | "X_train = X[:60000]\n", 271 | "X_test = X[60000:]\n", 272 | "Y_train = Y[:60000]\n", 273 | "Y_test = Y[60000:]" 274 | ] 275 | }, 276 | { 277 | "cell_type": "markdown", 278 | "metadata": {}, 279 | "source": [ 280 | "Now we can create data iterators from our MNIST data. A data iterator returns a batch of data examples each time for the network to process. MXNet provide a suite of basic DataIters for parsing different data format. Here we use NDArrayIter, which wraps around a numpy array and each time slice a chunk from it along the first dimension." 281 | ] 282 | }, 283 | { 284 | "cell_type": "code", 285 | "execution_count": 7, 286 | "metadata": { 287 | "collapsed": true 288 | }, 289 | "outputs": [], 290 | "source": [ 291 | "batch_size = 100\n", 292 | "train_iter = mx.io.NDArrayIter(X_train, Y_train, batch_size=batch_size)\n", 293 | "test_iter = mx.io.NDArrayIter(X_test, Y_test, batch_size=batch_size)" 294 | ] 295 | }, 296 | { 297 | "cell_type": "markdown", 298 | "metadata": {}, 299 | "source": [ 300 | "## Training\n", 301 | "\n", 302 | "With the network and data source defined, we can finally start to train our model. We do this with MXNet's convenience wrapper for feed forward neural networks (it can also be made to handle RNNs with explicit unrolling). " 303 | ] 304 | }, 305 | { 306 | "cell_type": "code", 307 | "execution_count": 8, 308 | "metadata": {}, 309 | "outputs": [ 310 | { 311 | "name": "stderr", 312 | "output_type": "stream", 313 | "text": [ 314 | "INFO:root:Start training with [gpu(0)]\n", 315 | "INFO:root:Epoch[0] Batch [200]\tSpeed: 69175.44 samples/sec\tTrain-accuracy=0.389050\n", 316 | "INFO:root:Epoch[0] Batch [400]\tSpeed: 70879.78 samples/sec\tTrain-accuracy=0.903850\n", 317 | "INFO:root:Epoch[0] Batch [600]\tSpeed: 70539.99 samples/sec\tTrain-accuracy=0.937100\n", 318 | "INFO:root:Epoch[0] Resetting Data Iterator\n", 319 | "INFO:root:Epoch[0] Time cost=1.119\n", 320 | "INFO:root:Epoch[0] Validation-accuracy=0.950800\n", 321 | "INFO:root:Epoch[1] Batch [200]\tSpeed: 70849.61 samples/sec\tTrain-accuracy=0.947300\n", 322 | "INFO:root:Epoch[1] Batch [400]\tSpeed: 70369.21 samples/sec\tTrain-accuracy=0.961350\n", 323 | "INFO:root:Epoch[1] Batch [600]\tSpeed: 70205.72 samples/sec\tTrain-accuracy=0.963700\n", 324 | "INFO:root:Epoch[1] Resetting Data Iterator\n", 325 | "INFO:root:Epoch[1] Time cost=0.857\n", 326 | "INFO:root:Epoch[1] Validation-accuracy=0.960600\n", 327 | "INFO:root:Epoch[2] Batch [200]\tSpeed: 70656.44 samples/sec\tTrain-accuracy=0.965600\n", 328 | "INFO:root:Epoch[2] Batch [400]\tSpeed: 70186.51 samples/sec\tTrain-accuracy=0.971100\n", 329 | "INFO:root:Epoch[2] Batch [600]\tSpeed: 70212.13 samples/sec\tTrain-accuracy=0.972400\n", 330 | "INFO:root:Epoch[2] Resetting Data Iterator\n", 331 | "INFO:root:Epoch[2] Time cost=0.858\n", 332 | "INFO:root:Epoch[2] Validation-accuracy=0.965300\n", 333 | "INFO:root:Epoch[3] Batch [200]\tSpeed: 70510.46 samples/sec\tTrain-accuracy=0.973100\n", 334 | "INFO:root:Epoch[3] Batch [400]\tSpeed: 70383.14 samples/sec\tTrain-accuracy=0.977300\n", 335 | "INFO:root:Epoch[3] Batch [600]\tSpeed: 71392.10 samples/sec\tTrain-accuracy=0.979250\n", 336 | "INFO:root:Epoch[3] Resetting Data Iterator\n", 337 | "INFO:root:Epoch[3] Time cost=0.854\n", 338 | "INFO:root:Epoch[3] Validation-accuracy=0.969300\n", 339 | "INFO:root:Epoch[4] Batch [200]\tSpeed: 70488.59 samples/sec\tTrain-accuracy=0.980000\n", 340 | "INFO:root:Epoch[4] Batch [400]\tSpeed: 69089.86 samples/sec\tTrain-accuracy=0.981450\n", 341 | "INFO:root:Epoch[4] Batch [600]\tSpeed: 69732.54 samples/sec\tTrain-accuracy=0.982200\n", 342 | "INFO:root:Epoch[4] Resetting Data Iterator\n", 343 | "INFO:root:Epoch[4] Time cost=0.865\n", 344 | "INFO:root:Epoch[4] Validation-accuracy=0.971000\n", 345 | "INFO:root:Epoch[5] Batch [200]\tSpeed: 70675.91 samples/sec\tTrain-accuracy=0.982950\n", 346 | "INFO:root:Epoch[5] Batch [400]\tSpeed: 70601.79 samples/sec\tTrain-accuracy=0.984400\n", 347 | "INFO:root:Epoch[5] Batch [600]\tSpeed: 69826.80 samples/sec\tTrain-accuracy=0.984700\n", 348 | "INFO:root:Epoch[5] Resetting Data Iterator\n", 349 | "INFO:root:Epoch[5] Time cost=0.857\n", 350 | "INFO:root:Epoch[5] Validation-accuracy=0.969800\n", 351 | "INFO:root:Epoch[6] Batch [200]\tSpeed: 69092.77 samples/sec\tTrain-accuracy=0.985200\n", 352 | "INFO:root:Epoch[6] Batch [400]\tSpeed: 66663.98 samples/sec\tTrain-accuracy=0.986800\n", 353 | "INFO:root:Epoch[6] Batch [600]\tSpeed: 68948.41 samples/sec\tTrain-accuracy=0.987400\n", 354 | "INFO:root:Epoch[6] Resetting Data Iterator\n", 355 | "INFO:root:Epoch[6] Time cost=0.884\n", 356 | "INFO:root:Epoch[6] Validation-accuracy=0.971500\n", 357 | "INFO:root:Epoch[7] Batch [200]\tSpeed: 69331.52 samples/sec\tTrain-accuracy=0.986400\n", 358 | "INFO:root:Epoch[7] Batch [400]\tSpeed: 70849.91 samples/sec\tTrain-accuracy=0.987500\n", 359 | "INFO:root:Epoch[7] Batch [600]\tSpeed: 70912.20 samples/sec\tTrain-accuracy=0.989650\n", 360 | "INFO:root:Epoch[7] Resetting Data Iterator\n", 361 | "INFO:root:Epoch[7] Time cost=0.858\n", 362 | "INFO:root:Epoch[7] Validation-accuracy=0.972400\n", 363 | "INFO:root:Epoch[8] Batch [200]\tSpeed: 69788.06 samples/sec\tTrain-accuracy=0.986250\n", 364 | "INFO:root:Epoch[8] Batch [400]\tSpeed: 69414.77 samples/sec\tTrain-accuracy=0.988250\n", 365 | "INFO:root:Epoch[8] Batch [600]\tSpeed: 69092.99 samples/sec\tTrain-accuracy=0.990100\n", 366 | "INFO:root:Epoch[8] Resetting Data Iterator\n", 367 | "INFO:root:Epoch[8] Time cost=0.869\n", 368 | "INFO:root:Epoch[8] Validation-accuracy=0.977000\n", 369 | "INFO:root:Epoch[9] Batch [200]\tSpeed: 69202.95 samples/sec\tTrain-accuracy=0.990950\n", 370 | "INFO:root:Epoch[9] Batch [400]\tSpeed: 69427.58 samples/sec\tTrain-accuracy=0.988450\n", 371 | "INFO:root:Epoch[9] Batch [600]\tSpeed: 70445.32 samples/sec\tTrain-accuracy=0.990050\n", 372 | "INFO:root:Epoch[9] Resetting Data Iterator\n", 373 | "INFO:root:Epoch[9] Time cost=0.866\n", 374 | "INFO:root:Epoch[9] Validation-accuracy=0.973300\n" 375 | ] 376 | } 377 | ], 378 | "source": [ 379 | "model = mx.model.FeedForward(\n", 380 | " ctx = mx.gpu(0), # Run on GPU 0\n", 381 | " symbol = mlp, # Use the network we just defined\n", 382 | " num_epoch = 10, # Train for 10 epochs\n", 383 | " learning_rate = 0.1, # Learning rate\n", 384 | " momentum = 0.9, # Momentum for SGD with momentum\n", 385 | " wd = 0.00001) # Weight decay for regularization\n", 386 | "model.fit(\n", 387 | " X=train_iter, # Training data set\n", 388 | " eval_data=test_iter, # Testing data set. MXNet computes scores on test set every epoch\n", 389 | " batch_end_callback = mx.callback.Speedometer(batch_size, 200)) # Logging module to print out progress" 390 | ] 391 | }, 392 | { 393 | "cell_type": "markdown", 394 | "metadata": {}, 395 | "source": [ 396 | "## Evaluation\n", 397 | "\n", 398 | "After the model is trained, we can evaluate it on a held out test set.\n", 399 | "First, lets classity a sample image:" 400 | ] 401 | }, 402 | { 403 | "cell_type": "code", 404 | "execution_count": 9, 405 | "metadata": {}, 406 | "outputs": [ 407 | { 408 | "data": { 409 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAP4AAAD8CAYAAABXXhlaAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJztnV1sbNlV5//LLttl15d9753ultJDMwhpkEZCLUYTadQj\nTRCIiUYjNeIhE4WHZJAQDwSQeEngpV8JD5GikXiBJmoQEQNIme55gQRFo1GQGFpDeuhAhyCNuiGQ\nvul7r12usl0fLu95sNe5q1btfapsV5VP1fn/pKM6Vb52nfL1/6y1114fEkIAIaRcbNz1BRBClg+F\nT0gJofAJKSEUPiElhMInpIRQ+ISUkFsJX0Q+KiLfEpFvi8hn5nVRhJDFIjfdxxeRDQDfBvBjAP4J\nwJsAPh5C+Jb7d0wUIOSOCCFI7PXbWPwPA/i7EMJ7IYQhgN8H8PItfh4hZEncRvgfAvAP5vl3rl4j\nhBQcBvcIKSG3Ef4/Avg+8/z5q9cIIQXnNsJ/E8APisgLIrIN4OMA3pjPZRFCFknlpt8YQhiJyKcB\nfAWXN5BXQwjvzO3KCCEL48bbeTO/AbfzCLkzFrGdRwhZUSh8QkoIhU9ICaHwCSkhFD4hJYTCJ6SE\nUPiElBAKn5ASQuETUkIofEJKCIVPSAmh8AkpIRQ+ISWEwiekhFD4hJQQCp+QEkLhE1JCKHxCSgiF\nT0gJofAJKSEUPiElhMInpIRQ+ISUEAqfkBJC4RNSQih8QkoIhU9ICaHwCSkhFD4hJYTCJ6SEUPiE\nlJDKbb5ZRN4F0AZwAWAYQvjwPC6KELJYbiV8XAr+IyGEw3lcDCFkOdzW1Zc5/AxCyJK5rWgDgK+K\nyJsi8rPzuCBCyOK5rav/UgjhuyLyz3B5A3gnhPD1eVwYIWRx3MrihxC+e/X4AYAvA2Bwj5AV4MbC\nF5E9EalfndcA/ASAb87rwgghi+M2rv6zAL4sIuHq5/xeCOEr87ksQsgikRDCYt/g8sZACLkDQggS\ne51bcYSUEAqfkBJC4RNSQm67j08KhohARLCxsRE918P+e/s4jRACLi4ucHFxkZ3b1+w12HP73MeV\n7HM9DyGMnft/R24Hhb/CeLGqyCuVCiqVCra2trJzPTY3N8duBv4GocTECACj0QjD4RCDwQDD4XDi\nCCFgY2MDm5ub0UcVvj30PWI3FX9Q/POBwl9RvKXWx83NTWxvb2NnZyd6bG1tjQlRDy9MIG51h8Mh\nzs7O0Ov1cHZ2NnauYrU3Gn/z0Z/vPQYr9PPzc4xGI4xGo+zcXw+5HRT+CuLdaHuuwt/d3cXe3t7Y\nsbu7i52dHWxubmJzczPzAOzzmEW2guv3++h2u2PH5uYmQgiZSLe2trC9vY3t7e2x8+3tbWxsbODi\n4gKj0Shq0dWjOD8/x3A4zD6X3hjIfKDwV5TYml1EUKlUMuHX63XU63U0Gg00Gg3U63Xs7u5OuP/W\nKucJP4SAs7MzHB0dod1uZ97DxcVF5gkAyMRerVaxs7Mz9rixsZGJXq26PT8/P8dgMMBgMMg+Vwgh\ns/qxGAG5PhT+iuNvANbi12o1tFottFot7O/vY39/H3t7e9ja2koeGxsbURdcz09OTrC7u5tZ7xAC\nhsMher0eKpXLPye9+VSrVezu7mbH3t5eJnzrxtvnw+EwW3YAT4OJo9EoN0BIrgeFv4LELH1M+PV6\nHc1mE/fu3cO9e/dw//591Gq1LAZgXXDviqei951OZ+wGoaLvdruoVCoIIWBrayuz8Ht7e6jVaqjV\natjb20OlUsH5+Xn0GI1G6Pf7E6I/Pz/P4g/2d0Dx3xwKf4Xx1l6DdGpt6/U6Wq0WDg4O8ODBAzzz\nzDNoNBrJwJ+u/2NrbxV+u90eE/3Z2Rm63S52dnZQqVRwcXEx5uqr56FLjUqlkq3h1cLbR911sKIf\nDocTwtfPT/HfDAp/xbAij0Xl1Zqra62iU5e/1WrlCj9m8e0hIjg9PcXp6SlOTk4mjsFgkMUUVOz2\nuRV+7LFarWYxBw062s85GAySHgmDf7ND4a8gKvRYdN6uqavVanbYIJtG2/X7ZnGjY0uJRqOB/f19\n9Pt9nJ+fZ15AvV5HrVaLPm5ubk649/Z5v99HrVbLbi56g9Hzfr8fvWHoOcU/GxT+imEF6KPyW1tb\nE6K353Zd74Ufc6Xt+4UQojEEK3oVtd9G1KNWq2X/JhXg6/f7mcjPzs4mzs/OztDv99Hr9bLHXq8H\nAFnkn0yHwl9BVKgaPdc19dbW1pjgvdVX8dvtO/UUUqJXwevXdTmhFn80GmWi397exmg0Gntv74Fs\nbm7mbucNBoMxkcfOdVlxenqaBQJt1J9Mh8JfMWwQT628zdTLE7wePnEnZe31/VT8wNPMwL29vUz0\n9mZwcXGRvY99Xz3XGELqUOGnjpOTExwfH2N7exubm5sAMLEbQKZD4a8Y3tXXrTMVV9763oovlkOf\nWufb162r70Vfr9cRQohuE6a2C/2jbg/6dGB97HQ6UdFvbW3R4l8DCn8FiVl871bnWf1YkU7MWnoh\nWaFb917d/n6/n+3j27iDPqo4fS2APVT4eqjo9djb28tShFX0Z2dnqFQqtPjXgMJfMfJcfR/M84E9\nfdSfE3uc9t4qdmvpbYAOQHLHQb2LvLJcFb4N3NmjWq1motdlQbfbpcW/JhT+CmD/oG1Qz+7Xa15+\ns9lEo9FArVbLhK/RfA3o5WFFGBOS9RBirrq9Mfkcg7xYgjIajaKegi4VLi4ucHJygk6ng2q1mn2u\nWX42eQqFX0Dyqu9sOqwm5zQaDTSbTTSbzSwvv9lsZkU5GtC7jjBSa3z7dWu9bQDQN/64yef3GYk2\nd8Em9NzmfcoMhV9Q7B+9/eNW4dtU2P39/Uzwmp3XbDYzq2+t4qzvbUVvRa3YYh491+/xorwpqexE\niv72UPgFxFs6ewNQt9cKv9Vq4d69ezg4OECr1cpSZa3wU3v1065DRR8TP4Ax0Vvhx8qGAeR6Ef7z\ne2s/zeqT2aHwC0YqF98m7XiLbwtxms3mWLacXQffxNXPE32sdj8leGB8OZD6mf73kOfqx6oUyWxQ\n+AUlZfFirr4K//79+2g2mxPJMzdZ4+s1pMTqbwyp3H5/bsm7ofibX8zS092/ORR+AUlZus3NzZmE\n7/fR9fymgTYgX/Q2JuC/z+PF7p/7m0Xq90A3/3ZQ+AUk748+z9VX4ecFw25zTdNen7U2/iZufmpr\nkBb/ZlD4BUP/uG07LGu5NXCXOvb29qKC0PPrNq6IJdvktcfOE3XMmucFAe0Rawri35vMDoV/x3iR\nqOhTufb7+/t48OABDg4Osi07bV6RWvNe1xrmiVqz5mynXPuo75d3pJJ77Bah7birzTcHgwF6vV52\nrjX8/kZApkPh3xGpda1vj6296vRxf38f9+/fz5J0UsJPvcc0vJX1575xhj80n99vRcaWLNqqS7MJ\nNc/Av4+KX1N5tRnHcDjMUoUp+utB4d8hXqAqEtszz2bm+T17Tc31wo/97OvgXWv7XNtfW0tsz7V4\nx++763O7bNH+fHqd3q237bZV8P1+P2rx2XnnekwVvoi8CuA/AXgYQvjhq9cOAPw3AC8AeBfAx0II\n7QVe51rhLbG10urqa/Cu2WxmrbE1QUdTc3XPXoXvk3RuKv6YAPV8OByOidB2w+n3+2OTdGyRjj7q\nroQdh6WejmJFr5Zdxa+ufszi0+rPziwW/4sA/iuA3zGvfRbAn4YQfl1EPgPgV65eIzOSt/5Vi6/C\n14j9/fv30Wq1JnrZWYuvP9u+z3WIid4eVoCxRhkhhGhLMFtJaEWvXkEsmOctvnf1afFvzlThhxC+\nLiIvuJdfBvDvr85fA/A/QeHfiJTwrcW37bFbrdZYya0+plz9m5ASv1pgrYG3jTD1Ud132w7Mnmsc\nwH5eXevrzSDP4ntXnxb/Ztx0jf9MCOEhAIQQ3heRZ+Z4TaUgttWmQtCovpbZeuHHZtJpjfxt97Pz\nRG+tr7bB8nP0Li4uxq7JD+6wkX/9rLqU8O/vbzZak+9dfVr86zOv4B5vtTOiAbzUlpaN5tuyWw3s\nNZvNidl3tlvuLNhofWyrLjb+Wo+Y4O3zEAJ2dnbGIu0q8rycAH99VvRq4W1cwbv6tPjX46bCfygi\nz4YQHorIcwC+N8+LWmdEJHcNrIE7O+TSd8f1TTJnsfJ2T963tLaHF5rdQ7dr+1gX3NPT0+zGtrW1\nlW3t2TRj3xzEpxOnLP4srj6ZnVmFL1eH8gaATwH4HIBPAnh9vpe1vqiFt26wfdR6et2q06i9FX6q\nUCWGFYQVlHWX7aH75bb9VewxFdnXG5i63rZjkHozWjikVYO+0+8swo+5+hT/7MyynfclAB8BcF9E\n/h7AKwB+DcAfisjPAHgPwMcWeZHrhFp82zrLBuqs8DUFV2vqvfCntbSyVl6xe/EpAdsovX+uAzRS\nSwH9XLb/nq0v0I5ANvBnhT9N9Fb43tUnszNLVP8TiS/9+JyvZe2JBfD8NFnNyEtZ/FlLU73obcRc\nhR/bkrOjq/yhs/FS6boqQLXEMVffj+mOxSdS4levwgrfDuWg+GeHmXtLQkVpXV8Vvs3Qi1l8baZh\nLb6tXku5+jHxe+H74ZcarEudDwaD6HtYVPj281rh25Rd7+pb0cesvorfCp+ivz4U/hLwCTXe4uu2\nnZ1oq3n4sTX+tHJUv67XRyt8bWN9enqKbreLTqczcdjX9VxHVsc8D319OByOrfHV4uuyJbWjYa/Z\nXmcquGfFT+FfDwp/wXhhWouvYlDha2quuvp2je+Fn/r5ihe/F761+N1uF8fHx2i322i32zg+Po4e\nKny/G+F3JgaDwZir74N7/sZlvZZUrn4suGctPq3+9aDwF0QqZ967+r6hRqwvvo9+5xGrXbeH34fv\ndDqZ6I+OjtBut7PXvOiPj48xGo0mEoc0E0/Laq2gY/0FYr8je+2xPXw7VSclfjI7FP4CiGXk6XMV\ngZ1+owE+20zDpuKq6GN4K+fLWf356enpmMj1sKLvdrs4PT3NsuS8O22TkGKDO208Qq89tiyJJd34\nDEHNEvQjs1X8XOffDAp/QaRcWRWDn4Rjs/T8tldsUkxqHW8r6Lx73O/3s2mz6tJb977dbmeit8K3\nUXov+jzh6y5EbCJvrHuOzxy0OQV2d0Etv782Cn92KPwFEBO8nluLr66+t/i2S65182MW0z+qcKyl\njI2ZTh0nJydjs+p0314tqv089rNYD0af643BbzvarUWfMpxn8bUQKLbWp6t/PSj8BeEFb/vixwZd\n6pZevV4fWz/7LTwgHbgDMCF8vyVnI/W6drfPrRttxWWLa+za3Y/qnsXix0pw7frer+29xbfNP+jq\n3wwKf87Eaux92ykr/JjFt+tjazW9qxw7rKtvt+psgE5f84+dTge9Xi+zojZGECu6uc4a3wclfRWg\n7fCjrr7PJlTh2+ui8G8Ghb8gYqJXi++j+n6Nb/+9fYyRqq6zW3U2gKfCTx3aRccX78Rcfb/Gt0M8\nVPx5xUSxEmDr6tsSYHX1T05OktdHZofCXwDXsfixqH5sNyCWkusDZN7iq6t/fHyMw8NDPHnyBO12\nO5qZp4f2zUsdAMY+j220Mc3i288QE7119fOCe7ElAoN714PCXyB55bKxJcEsVXZq1VP58ipo795b\ni2875thI+dnZGc7Pz3Ov1wf17HJFD5t/YLcjU6K3FtwuM+wNwD7GbnrkelD4CyC19haRaB85FWK3\n2022pdZDLXqqpPbk5CSz7oeHh2P78yp2G8BLRcW9t6Hnsa1IDUzqmG6bgKQZhzYXP+XeW8H79bvf\nAdDfs/2dk9mh8BeEFz2AqPDt+rXb7Wbism2pbZdaraVP1cx3u10cHR1lh+7R+8ScvASYPE/ERvG1\nIaivNYgJ31v8mPBvInoK/mZQ+AvCCt5i17De4nc6HYhItDOP/oGfn5+PNbu0JbM+71735r3F94Uu\nsah4XgJSyuKr8LX7b57wdcliXXwvehu4s9dH0d8eCn/O6B+1dWvtTWAWV1/38M/Pz7NceOAyqKYW\nX72EVDVdqsJO1/F2ieBd/Vhg0kbytbLQCt9afLvW98LPc/X9NaWsvv6e7e+cXA8Kf0H4NFq1dtNc\n/Y2NDezs7OD8/Bw7OzsTe+f6vTZi73PtU7X0ul2n12Af/XZdKsbgLb519bWy0M77y7P4s7j69tpi\ngTyK/mZQ+AvA/jGq6DUwZzPTUhbfJ8xoJF0FYdtb2626w8PDLGrvI/Y26y1VuafCB/JHdds1vnf1\nm83mRPtvG9X3oveuvhV/rLuP//2Sm0HhLwj/x+ldfbvGV2HqGt8WxKilr1QqY96CtfhHR0d4/Pgx\nHj16hOPj47ESVn3Ucx1okbcdlif61BrfjvWyY7NsoFJExvbe52Hxyc2g8BeAtZxWYD6qr9beNtkI\nIUyMhvbPbTmtdfOPjo7Q6XSSc+20QcYseFffT7n1STtq/W/SYcd6P7a1FttrLQ4Kf4HYIJ/tLmMz\n62zZLQD0+/2J8Vj2cTAYZK69de+73e5EkwofsZ9VOHnWPjXMI9WKK5ZxGGuiaVNybUff1K4DuR0U\n/oKJid9afBW9CqLX641Vutkg2c7ODobD4Zi1t+W0Vix+j37WXHa/dx/LJ/B5Bv55rPuvLcm11t7H\nOXTL0dbb6+cg84PCXxDe3fd/9Dp8wkb7h8MhTk9PJ1pb2WDZaDTKtuZsma1u1fnxUjdpRjnN4qdu\nAF70KYs/q/DtZ6HFny8U/gKJid82u7SWXj2AnZ2d5HgtHTBpI/W+JZUfLXWTZpSxrbw8F9+K3rbZ\nStUg2N0Nm4yk2462tx5d/cVA4S8YL34rfBW9FYC1qDF3+uLiYmKqjH0e2wa7jsW323l5a3wv+tg8\nv1Qpru+ea9f4vt+f3shYdjtfKPwl4Pf1dUvNil6FFQuQ2UNjBL6wxbv1NqB3m+Beao2fdxPQnxF7\nnMXVp8VfPBT+ktE/ehW9XwvH6vDteUzMvi4dSI/QmgV7Tak1fsra6+5EXnkx1/h3D4V/BxTZbfXW\n3ncF9r0AbaMN3x7MoqJV0ft6BdtM03fRZVR//lD4ZALfXcc2BfXz7WNjroH89tmp1lrsm788KHwy\nhrf2vhtwnvAVm17rU219M032zb8bKHwyQZ7wY0M8Y5NyfEmyrcHPa6bJvvnLIX8QGwAReVVEHorI\nX5nXXhGR74jIX14dH13sZZJl4fvq+Uo820/Pd9JVvOh9Qc609tl09RfPVOED+CKA/xB5/fMhhB+5\nOv54ztdF7ojruPrW4vueejHR25n3eQMzYq4+hT9fprr6IYSvi8gLkS+lW8iSlcaWAvtxX9cJ7qW6\n7XiLz775y2cWi5/i0yLyloj8loi05nZF5E7xLbZ8/3+/xp/V1bfWPlaOS1d/udxU+L8B4AdCCC8C\neB/A5+d3SeQusPv3tubei9720vODMX12XqzJhg3q2Ufbe8CWFFP0i+FGUf0Qwgfm6W8C+B/zuRyy\nLFLZgX7EV2zSjxe/X+PH3Ho91zW9zcNXgfutPyt2Cn++zCp8gVnTi8hzIYT3r57+FIBvzvvCyOLI\nq56bNtSzVqtFt/TU1Y8J31p0n4fvU3Jvk2pMZmeq8EXkSwA+AuC+iPw9gFcA/KiIvAjgAsC7AH5u\ngddIFoDvq6fnqTHe2lsvZvFjI7J8U1EbyIsN9LABPIp+8cwS1f9E5OUvLuBayJKINdNMBfViY7x1\nCVCtVscsfsrVt9t2NnDnE3To5i8PZu6VjJjofVNNu4Vn1/iNRgP1ej1L3rEWX4Xvt+585N67+j5q\n70VO0S8GCr+kpFpoxyy+HZxRr9fHOgLZCj3FW3y/X++bglrxAxT7MqDwS8i0vvmx/Xsb3Eu13Zrm\n6t/E4pPFQOGXkFSHHd8z37r0tuNvqo02MFl6a3vqxRpt2AAfRb88KPySERO9baOlhTfejY/11FPs\n/ntsYIgO99R24NpXT5uDDodDJuksGQq/hMSEr2K33XVsEU6qX75i03Pt2t520NUZANpNV60+Lf7y\nofBLiF3bx9x7b/G9+PVnxDru2C7CtqVWp9MZE763+BT+cqHwS0bM1Vehx6x+zOJ77MCQ2Ppe22bb\nqT++oSZ76i0XCr+EqPhTAT17E4iJP5ZcY6vxYsK3Ft9G923ffFr85UHhlwyfuOO38PKCe7a3vx6+\n+YbdxvODMjS456f40tVfPhR+CZkW3JsmftsUwzfUnMXi+/JbCn/5UPglQQNxsbRczcfXQpy8Ihw/\n2MM31PSJO7G+en4SEGvulw+Fv6akRljpXr1Nw9Uc/Hq9jv39fRwcHKDZbKJer2c3ADvOO9Vhxw7L\niB02U4/ZencLhb+G5I3jqlQqWQaeFt60Wi00m020Wi20Wq1M+LVaDbu7u1mzDZudF2ur5bvupOb7\n2SacFP/dQOGvKV78mnhjLf7e3l4m/IODA+zv72c3AWvx1d2PZevFeuqlxO+Ff5MR3mQ+UPhrSEzw\nNmFH1/bW4h8cHOD+/ftotVqo1WrZYV39aRbfCj4lfit2b/Ep/uVB4a8ZMffebt+pq69r/JjwtYW2\nHZ6hwp/m5k+z9r7pBl39u4HCX1Ni+/UbGxtj0Xwr/Hv37uHBgwdoNptjlXlanWf76gFxqz8tqKeP\nsZl6ZLlQ+GtIqsuOnY4TW+M/ePAAjUYj277zj6lJOd7Vn7bWZ1utu4fCX1PyxB+biWebbfi4gM3R\nt0k6dq9ee+hNm3HPiTjFgMJfQ/I67KjL79Nwfa19qvz24uJibMS15t3rcXR0hMPDw6wYRyvwvKUn\ndwuFv6bEIvox0Vvx+446qZr78/Nz9Pv9rMmG1tefnJzg6OgIR0dHWfkthV9MKPw1I7Z/7119L3ov\n/pjoFWvxz87Oxsptj4+P0W63Jyy+Ntug8IsDhb+m5Ln7KUvvp97OYvFV+IeHh5m1ty221OKzCKdY\nUPhrSEr0eWt7H8SLWXvgqcXXcVhabnt4eIgnT57g6OgI3W4XnU4nE75afAb2igOFv8akLPq0G0Ae\neRb/0aNHaLfb2bhr216Lrn6xoPDXkLx1fkzw3uoDk/PrbE89v8ZXi//48WO02+2syYZvoU3hFwcK\nf82wTTY0Wcf2xtc0XD/f3g/FACbbasXaZ9ue+Z1OB51OZ2wuHttnFxMKf83QXnq2yYYe1WoVBwcH\naLVaWcMNHXyZKrv153bmvR52DLZN2LFtsyn6YkHhrxm29NaOv9Jjf38fzWYTjUYjE76tt0812tDD\niz4m/lglHsVfLCj8NcOm5arw6/V6JnYVfp7FBzBRM+877HjBW7fedtmx4ifFIT+EC0BEnheRr4nI\nX4vI2yLyi1evH4jIV0Tkb0XkT0SktfjLJdOwrv7Ozs6Y8G2zDRX+zs5O1lQzr94+5uJPs/i26Qat\nfbGYKnwA5wB+OYTwrwD8WwA/LyI/BOCzAP40hPAvAXwNwK8s7jLJrKirby1+o9FAs9mc6KcXc/UB\nTIje19nniV5fY2+9YjNV+CGE90MIb12ddwG8A+B5AC8DeO3qn70G4CcXdZFkdnxwTy2+lt7GhG9d\n/dj6PlVbb9177/bHhE+Kw7XW+CLy/QBeBPDnAJ4NITwELm8OIvLM3K+OXJuU8NXVb7VaWftsdfXz\nmmle1+IPBoPsWvy2ICkOMwtfROoA/gjAL4UQuiLi/zf5v3sH+NTaVL29ts9uNBrZ67G++Xa/3q/t\nde/e1t/7rTsG8VaDmYQvIhVciv53QwivX738UESeDSE8FJHnAHxvURdJxon11dPDT8TRxB0rdhW8\nHYPtI/qj0SjL0LNHqskG3fnVYpbgHgD8NoC/CSF8wbz2BoBPXZ1/EsDr/pvIYohNvPUDML34q9Vq\ndthIvp97n+qwYzP0tPDGjrim8FeLqRZfRF4C8NMA3haRb+DSpf9VAJ8D8Aci8jMA3gPwsUVeKLnE\nWvpYA41ZLL6fhuvX934Mlk3NtRZfB16y8m71mCr8EMKfAdhMfPnH53s5ZBZi5ba+n57vkmutfmoK\nrorfu/o+J9+7+rT4qwcz91aQvH563tX3ot/Z2Rn793r4PfyYxbeNNG3VnQYCKfzVgcJfQWJrfLXg\ns1h8a+F9yy27jWfX+GrtrcWnq7+6UPgrRl6NfSy450VfrVbHfk5ee62Yqx+L6tPVXz0o/BVlWgtt\neyPwh8Wm0vrAnore1ttrR13v7lP4qwWFv+LYKL8+pjrkKl7s9jxm5bWHnnbS7Xa7Y2211OpT+KsD\nhb/CxEQ/DV9v7xtuxAJ6au1V+Brks+v80Wi00M9K5guFv4JYgedZ/Nih+CYb+ty7+Nbia9tsnZqj\n63y6+qsHhb8GXMfNB+L19rbDjrf43tW3jTStxafwVwcKf0Xx4vaCT1l6IF1vPxqNsu07m6ZrLb42\n0/QVehT+akHhrzAxUacsfp6r7yvx8tb4nU5nLGmHCTyrCYW/BsQse57L79tl+1r7vKh+p9MZiwn4\n4CBZDSj8FcNn7WnCjs3SsyW3sbJbu1/vG2v0er2ph/6M2CNZDSj8FUM77NhGGzYzL9ZPzzfaUCtv\nU3I1GSeVkmubbFDsqw+Fv2JY4dv2WtpKywtfy3Ct8GMpuXroFp5u1fntupjoeQNYPSj8FUOFX6lU\nxlpr1Wq1rLeeCn93dzdq8VPVd77s1lt8in59mLUDDykI3uKrpVfRq/BrtdqYxZ9Wbz9L9Z0P4FH0\nqwuFv2LE1vi2d35sjT+Lq2+37mKuvt+uo+hXG7r6K4YX/u7u7pibP4ur74N7dm2faq3F4ZfrBS3+\niqFbeX4wpp2PlwruAfn19t7ie1efzTbWB1r8guOTcLShpu2ea4N7jUYDtVoNu7u7ExF94Okefkz0\nOt8+1V6L1n59oPALSKroxvfNr1ar2N3dHQvwWdGrm2/basWi+b4Ip9PpjDXbYNnt+kHhFxQVq83U\nswMxbUsttfh66Ouavbexcbmiy4vm27JbH+Bj2e36QeEXEC923zffpudai1+r1bC3tzfWaNMK37r5\nNqhnhd9ut7N6e2/xKfz1gcIvGL6c1va916Ce3cpTd9+6+ra/nn4/MJ6jb9f3sXp7e1D46weFX0Bi\nTTTtwAwdxzPpAAAGyUlEQVQ/Gsu7+rFBGyIy0TY75eqz3n79ofALih+NpdbbW3zv6tdqNQCTAUJg\nuqt/fHyMdrsdrbWn8NcLCr+ApNpmx0pwY66+Ta+NtdnyW3m+p16q1p7CXx8o/IIRq7W3h1r43d3d\niQm4mp6rJbS2l55traWH7Z2Xqrf352Q9oPALhk/J9Uer1Zo5SUcbbdgjr9aedfblgcIvGLFcfHtM\nE76dfWeTdTRIp1t0NisvVX0H8CawrkzN1ReR50XkayLy1yLytoj8wtXrr4jId0TkL6+Ojy7+ctef\nVBFOo9FAq9WayeJb4atbr+t52w9/2uw7in59mcXinwP45RDCWyJSB/B/ROSrV1/7fAjh84u7vPKR\nJ3wVf6PRyKrvVPiVSiU67VYDeTaYN83VByj6dWeq8EMI7wN4/+q8KyLvAPjQ1Zenz2wi12IeFt+u\n8VMW3xfgsFNuubhWWa6IfD+AFwH876uXPi0ib4nIb4lIa87XVkpsos5NhB9b41tr7y0+x1yXk5mF\nf+Xm/xGAXwohdAH8BoAfCCG8iEuPgC7/HPCFOIsSvu2uwyKc8jGT8EWkgkvR/24I4XUACCF8EJ7+\npfwmgH+zmEssH+ru+6Qdu1ev63qbiw/Es/N8vX1svj0z88rFrBb/twH8TQjhC/qCiDxnvv5TAL45\nzwsrM7GsOz/jzk66tetz7a4zGAzGhl4eHx/j6OgIR0dHYzPue70ec/FLyNTgnoi8BOCnAbwtIt8A\nEAD8KoBPiMiLAC4AvAvg5xZ4naXDp9pa8ftzK34b1NNcfE3HjQlfrT6FXy5mier/GYDNyJf+eP6X\nQ2L59TGLHxO9HXVtG22oxT88PMwm3na73Wy9T4tfPpi5V0Bibr4Xv3f1rfi9q6+Vd2rxbUddu59P\n4ZcHCr+gxKrqUodd48dc/W63i3a7nVl8PwSTrn75oPALiBV9TPwpS+9dfTvmWi1+u90eS9e1absU\nfnmg8AvKLBZ/mqvf6/XGuuscHh7i+Pg427v3zTYo/PJA4RcMn4Bj6+bPzs6yZB1t0qFNO/RG0ev1\nxtpkd7tddLvdrHPu6elptFafwzLKBYVfMHSdrhZbE3Y0Scd+zbrx2nZrMBjg4cOH+OCDD/DkyRO0\n222cnJyg1+uNTcTxXXWYp18uKPyCYYde9Pt9nJ6ejvXF15p6Fb1W6Wn7rcFggMePH+PRo0djwvfd\ncv0SgZQLCr9g2JTbXq83Ifp+v49qtYputzvReqtarWI4HGZBPE3W8RafvfQIhV8wdM09GAzGRG/3\n5u3ADH+MRqOxtb3m5nvhU/zlhsIvGNbiA09FrwE+Lc7Rwz7XRpt2Eo4GBfv9fhbB1/eh6MsLhV8w\ndI0PPBW9VulpxZ5tue0HbuiSwPfaszPw9H1ij6QcyKL/w0WEf1HXJDYsM++w3wMgN52XQi8XIYRo\nlywKn5A1JiX8a7XeIoSsBxQ+ISWEwiekhFD4hJSQhQf3CCHFgxafkBJC4RNSQpYmfBH5qIh8S0S+\nLSKfWdb7zoqIvCsi/1dEviEif1GA63lVRB6KyF+Z1w5E5Csi8rci8id3Ob0ocX2FGaQaGfb6i1ev\nF+J3eNfDaJeyxheRDQDfBvBjAP4JwJsAPh5C+NbC33xGROT/AfjXIYTDu74WABCRfwegC+B3Qgg/\nfPXa5wA8DiH8+tXN8yCE8NkCXd8rADpFGKR6NffhOTvsFcDLAP4LCvA7zLm+/4wl/A6XZfE/DODv\nQgjvhRCGAH4flx+ySAgKtPQJIXwdgL8JvQzgtavz1wD85FIvypC4PqAgg1RDCO+HEN66Ou8CeAfA\n8yjI7zBxfUsbRrusP/QPAfgH8/w7ePohi0IA8FUReVNEfvauLybBMyGEh0A2xfiZO76eGIUbpGqG\nvf45gGeL9ju8i2G0hbFwBeClEMKPAPiPAH7+ypUtOkXbiy3cINXIsFf/O7vT3+FdDaNdlvD/EcD3\nmefPX71WGEII3716/ADAl3G5PCkaD0XkWSBbI37vjq9njKINUo0Ne0WBfod3OYx2WcJ/E8APisgL\nIrIN4OMA3ljSe09FRPau7rwQkRqAn0AxhoAKxtd7bwD41NX5JwG87r9hyYxdXwEHqU4Me0Wxfod3\nNox2aZl7V9sSX8DlzebVEMKvLeWNZ0BE/gUurXzAZXOS37vr6xORLwH4CID7AB4CeAXAfwfwhwD+\nOYD3AHwshHBUoOv7UVyuVbNBqrqevoPrewnA/wLwNi7/X3XY618A+APc8e8w5/o+gSX8DpmyS0gJ\nYXCPkBJC4RNSQih8QkoIhU9ICaHwCSkhFD4hJYTCJ6SEUPiElJD/D8zm2kPGP0uxAAAAAElFTkSu\nQmCC\n", 410 | "text/plain": [ 411 | "" 412 | ] 413 | }, 414 | "metadata": {}, 415 | "output_type": "display_data" 416 | }, 417 | { 418 | "name": "stdout", 419 | "output_type": "stream", 420 | "text": [ 421 | "Result: 7\n" 422 | ] 423 | } 424 | ], 425 | "source": [ 426 | "plt.imshow((X_test[0].reshape((28,28))*255).astype(np.uint8), cmap='Greys_r')\n", 427 | "plt.show()\n", 428 | "print 'Result:', model.predict(X_test[0:1])[0].argmax()" 429 | ] 430 | }, 431 | { 432 | "cell_type": "markdown", 433 | "metadata": {}, 434 | "source": [ 435 | "We can also evaluate the model's accuracy on the entire test set:" 436 | ] 437 | }, 438 | { 439 | "cell_type": "code", 440 | "execution_count": 10, 441 | "metadata": { 442 | "scrolled": true 443 | }, 444 | "outputs": [ 445 | { 446 | "name": "stdout", 447 | "output_type": "stream", 448 | "text": [ 449 | "Accuracy: 97.33 %\n" 450 | ] 451 | } 452 | ], 453 | "source": [ 454 | "print 'Accuracy:', model.score(test_iter)*100, '%'" 455 | ] 456 | }, 457 | { 458 | "cell_type": "markdown", 459 | "metadata": { 460 | "collapsed": true 461 | }, 462 | "source": [ 463 | "Now, try if your model recognizes your own hand writing.\n", 464 | "\n", 465 | "Write a digit from 0 to 9 in the box below. Try to put your digit in the middle of the box." 466 | ] 467 | }, 468 | { 469 | "cell_type": "code", 470 | "execution_count": 11, 471 | "metadata": { 472 | "scrolled": false 473 | }, 474 | "outputs": [ 475 | { 476 | "data": { 477 | "text/html": [ 478 | "
Sorry, your browser doesn't support canvas technology.

Result:

" 479 | ], 480 | "text/plain": [ 481 | "" 482 | ] 483 | }, 484 | "execution_count": 11, 485 | "metadata": {}, 486 | "output_type": "execute_result" 487 | } 488 | ], 489 | "source": [ 490 | "# run hand drawing test\n", 491 | "from IPython.display import HTML\n", 492 | "\n", 493 | "def classify(img):\n", 494 | " img = img[len('data:image/png;base64,'):].decode('base64')\n", 495 | " img = cv2.imdecode(np.fromstring(img, np.uint8), -1)\n", 496 | " img = cv2.resize(img[:,:,3], (28,28))\n", 497 | " img = img.astype(np.float32).reshape((1, 784))/255.0\n", 498 | " return model.predict(img)[0].argmax()\n", 499 | "\n", 500 | "html = \"\"\"
Sorry, your browser doesn't support canvas technology.

Result:

\"\"\"\n", 501 | "script = \"\"\"\"\"\"\n", 502 | "HTML(html+script)" 503 | ] 504 | }, 505 | { 506 | "cell_type": "markdown", 507 | "metadata": {}, 508 | "source": [ 509 | "## Debugging\n", 510 | "\n", 511 | "DNNs can perform poorly for a lot of reasons, like learning rate too big/small, initialization too big/small, network structure not reasonable, etc. When this happens it's often helpful to print out the weights and intermediate outputs to understand what's going on. MXNet provides a monitor utility that does this:" 512 | ] 513 | }, 514 | { 515 | "cell_type": "code", 516 | "execution_count": 11, 517 | "metadata": {}, 518 | "outputs": [ 519 | { 520 | "name": "stderr", 521 | "output_type": "stream", 522 | "text": [ 523 | "INFO:root:Start training with [gpu(0)]\n", 524 | "INFO:root:Batch: 1 fc1_backward_weight 0.000519617\t\n", 525 | "INFO:root:Batch: 1 fc1_weight 0.00577777\t\n", 526 | "INFO:root:Batch: 1 fc2_backward_weight 0.00164324\t\n", 527 | "INFO:root:Batch: 1 fc2_weight 0.00577121\t\n", 528 | "INFO:root:Batch: 1 fc3_backward_weight 0.00490826\t\n", 529 | "INFO:root:Batch: 1 fc3_weight 0.00581168\t\n", 530 | "INFO:root:Epoch[0] Batch [100]\tSpeed: 68004.57 samples/sec\tTrain-accuracy=0.141400\n", 531 | "INFO:root:Batch: 101 fc1_backward_weight 0.170696\t\n", 532 | "INFO:root:Batch: 101 fc1_weight 0.0077417\t\n", 533 | "INFO:root:Batch: 101 fc2_backward_weight 0.300237\t\n", 534 | "INFO:root:Batch: 101 fc2_weight 0.0188219\t\n", 535 | "INFO:root:Batch: 101 fc3_backward_weight 1.26234\t\n", 536 | "INFO:root:Batch: 101 fc3_weight 0.0678799\t\n", 537 | "INFO:root:Epoch[0] Batch [200]\tSpeed: 63867.17 samples/sec\tTrain-accuracy=0.696600\n", 538 | "INFO:root:Batch: 201 fc1_backward_weight 0.224992\t\n", 539 | "INFO:root:Batch: 201 fc1_weight 0.0224456\t\n", 540 | "INFO:root:Batch: 201 fc2_backward_weight 0.574649\t\n", 541 | "INFO:root:Batch: 201 fc2_weight 0.0481841\t\n", 542 | "INFO:root:Batch: 201 fc3_backward_weight 1.50355\t\n", 543 | "INFO:root:Batch: 201 fc3_weight 0.223626\t\n", 544 | "INFO:root:Epoch[0] Batch [300]\tSpeed: 63575.68 samples/sec\tTrain-accuracy=0.886700\n", 545 | "INFO:root:Batch: 301 fc1_backward_weight 0.128922\t\n", 546 | "INFO:root:Batch: 301 fc1_weight 0.0297723\t\n", 547 | "INFO:root:Batch: 301 fc2_backward_weight 0.25938\t\n", 548 | "INFO:root:Batch: 301 fc2_weight 0.0623646\t\n", 549 | "INFO:root:Batch: 301 fc3_backward_weight 0.623773\t\n", 550 | "INFO:root:Batch: 301 fc3_weight 0.243092\t\n", 551 | "INFO:root:Epoch[0] Batch [400]\tSpeed: 63417.56 samples/sec\tTrain-accuracy=0.924800\n", 552 | "INFO:root:Batch: 401 fc1_backward_weight 0.23871\t\n", 553 | "INFO:root:Batch: 401 fc1_weight 0.0343854\t\n", 554 | "INFO:root:Batch: 401 fc2_backward_weight 0.42189\t\n", 555 | "INFO:root:Batch: 401 fc2_weight 0.0708286\t\n", 556 | "INFO:root:Batch: 401 fc3_backward_weight 0.808316\t\n", 557 | "INFO:root:Batch: 401 fc3_weight 0.252618\t\n", 558 | "INFO:root:Epoch[0] Batch [500]\tSpeed: 63915.25 samples/sec\tTrain-accuracy=0.933600\n", 559 | "INFO:root:Batch: 501 fc1_backward_weight 0.189612\t\n", 560 | "INFO:root:Batch: 501 fc1_weight 0.0384733\t\n", 561 | "INFO:root:Batch: 501 fc2_backward_weight 0.46391\t\n", 562 | "INFO:root:Batch: 501 fc2_weight 0.0783045\t\n", 563 | "INFO:root:Batch: 501 fc3_backward_weight 1.06239\t\n", 564 | "INFO:root:Batch: 501 fc3_weight 0.259909\t\n", 565 | "INFO:root:Epoch[0] Batch [600]\tSpeed: 63705.95 samples/sec\tTrain-accuracy=0.948200\n", 566 | "INFO:root:Epoch[0] Resetting Data Iterator\n", 567 | "INFO:root:Epoch[0] Time cost=0.945\n", 568 | "INFO:root:Epoch[0] Validation-accuracy=0.953100\n" 569 | ] 570 | } 571 | ], 572 | "source": [ 573 | "def norm_stat(d):\n", 574 | " \"\"\"The statistics you want to see.\n", 575 | " We compute the L2 norm here but you can change it to anything you like.\"\"\"\n", 576 | " return mx.nd.norm(d)/np.sqrt(d.size)\n", 577 | "mon = mx.mon.Monitor(\n", 578 | " 100, # Print every 100 batches\n", 579 | " norm_stat, # The statistics function defined above\n", 580 | " pattern='.*weight', # A regular expression. Only arrays with name matching this pattern will be included.\n", 581 | " sort=True) # Sort output by name\n", 582 | "model = mx.model.FeedForward(ctx = mx.gpu(0), symbol = mlp, num_epoch = 1,\n", 583 | " learning_rate = 0.1, momentum = 0.9, wd = 0.00001)\n", 584 | "model.fit(X=train_iter, eval_data=test_iter, monitor=mon, # Set the monitor here\n", 585 | " batch_end_callback = mx.callback.Speedometer(100, 100))" 586 | ] 587 | }, 588 | { 589 | "cell_type": "markdown", 590 | "metadata": {}, 591 | "source": [ 592 | "## Under the hood: Custom Training Loop\n", 593 | "\n", 594 | "`mx.model.FeedForward` is a convenience wrapper for training standard feed forward networks. What if the model you are working with is more complicated? With MXNet, you can easily control every aspect of training by writing your own training loop.\n", 595 | "\n", 596 | "Neural network training typically has 3 steps: forward, backward (gradient), and update. With custom training loop, you can control the details in each step as while as insert complicated computations in between. You can also connect multiple networks together." 597 | ] 598 | }, 599 | { 600 | "cell_type": "code", 601 | "execution_count": 12, 602 | "metadata": {}, 603 | "outputs": [ 604 | { 605 | "name": "stdout", 606 | "output_type": "stream", 607 | "text": [ 608 | "input_shapes {'softmax_label': (100,), 'data': (100, 784L)}\n", 609 | "epoch: 0 iter: 100 metric: ('accuracy', 0.1427)\n", 610 | "epoch: 0 iter: 200 metric: ('accuracy', 0.42695)\n", 611 | "epoch: 0 iter: 300 metric: ('accuracy', 0.5826333333333333)\n", 612 | "epoch: 0 iter: 400 metric: ('accuracy', 0.66875)\n", 613 | "epoch: 0 iter: 500 metric: ('accuracy', 0.72238)\n", 614 | "epoch: 0 iter: 600 metric: ('accuracy', 0.7602166666666667)\n", 615 | "epoch: 1 iter: 100 metric: ('accuracy', 0.9504)\n", 616 | "epoch: 1 iter: 200 metric: ('accuracy', 0.9515)\n", 617 | "epoch: 1 iter: 300 metric: ('accuracy', 0.9547666666666667)\n", 618 | "epoch: 1 iter: 400 metric: ('accuracy', 0.956475)\n", 619 | "epoch: 1 iter: 500 metric: ('accuracy', 0.95762)\n", 620 | "epoch: 1 iter: 600 metric: ('accuracy', 0.9591666666666666)\n", 621 | "epoch: 2 iter: 100 metric: ('accuracy', 0.9681)\n", 622 | "epoch: 2 iter: 200 metric: ('accuracy', 0.9699)\n", 623 | "epoch: 2 iter: 300 metric: ('accuracy', 0.9718666666666667)\n", 624 | "epoch: 2 iter: 400 metric: ('accuracy', 0.97205)\n", 625 | "epoch: 2 iter: 500 metric: ('accuracy', 0.97282)\n", 626 | "epoch: 2 iter: 600 metric: ('accuracy', 0.9728833333333333)\n", 627 | "epoch: 3 iter: 100 metric: ('accuracy', 0.9776)\n", 628 | "epoch: 3 iter: 200 metric: ('accuracy', 0.97755)\n", 629 | "epoch: 3 iter: 300 metric: ('accuracy', 0.9782666666666666)\n", 630 | "epoch: 3 iter: 400 metric: ('accuracy', 0.977775)\n", 631 | "epoch: 3 iter: 500 metric: ('accuracy', 0.97842)\n", 632 | "epoch: 3 iter: 600 metric: ('accuracy', 0.9788666666666667)\n", 633 | "epoch: 4 iter: 100 metric: ('accuracy', 0.9804)\n", 634 | "epoch: 4 iter: 200 metric: ('accuracy', 0.98145)\n", 635 | "epoch: 4 iter: 300 metric: ('accuracy', 0.9818333333333333)\n", 636 | "epoch: 4 iter: 400 metric: ('accuracy', 0.981725)\n", 637 | "epoch: 4 iter: 500 metric: ('accuracy', 0.98214)\n", 638 | "epoch: 4 iter: 600 metric: ('accuracy', 0.9825333333333334)\n", 639 | "epoch: 5 iter: 100 metric: ('accuracy', 0.985)\n", 640 | "epoch: 5 iter: 200 metric: ('accuracy', 0.98415)\n", 641 | "epoch: 5 iter: 300 metric: ('accuracy', 0.9843333333333333)\n", 642 | "epoch: 5 iter: 400 metric: ('accuracy', 0.98425)\n", 643 | "epoch: 5 iter: 500 metric: ('accuracy', 0.9846)\n", 644 | "epoch: 5 iter: 600 metric: ('accuracy', 0.9846833333333334)\n", 645 | "epoch: 6 iter: 100 metric: ('accuracy', 0.9848)\n", 646 | "epoch: 6 iter: 200 metric: ('accuracy', 0.98445)\n", 647 | "epoch: 6 iter: 300 metric: ('accuracy', 0.9853333333333333)\n", 648 | "epoch: 6 iter: 400 metric: ('accuracy', 0.98555)\n", 649 | "epoch: 6 iter: 500 metric: ('accuracy', 0.9857)\n", 650 | "epoch: 6 iter: 600 metric: ('accuracy', 0.9862166666666666)\n", 651 | "epoch: 7 iter: 100 metric: ('accuracy', 0.9868)\n", 652 | "epoch: 7 iter: 200 metric: ('accuracy', 0.9858)\n", 653 | "epoch: 7 iter: 300 metric: ('accuracy', 0.9867333333333334)\n", 654 | "epoch: 7 iter: 400 metric: ('accuracy', 0.986825)\n", 655 | "epoch: 7 iter: 500 metric: ('accuracy', 0.98674)\n", 656 | "epoch: 7 iter: 600 metric: ('accuracy', 0.9869166666666667)\n", 657 | "epoch: 8 iter: 100 metric: ('accuracy', 0.9883)\n", 658 | "epoch: 8 iter: 200 metric: ('accuracy', 0.98825)\n", 659 | "epoch: 8 iter: 300 metric: ('accuracy', 0.9886666666666667)\n", 660 | "epoch: 8 iter: 400 metric: ('accuracy', 0.989075)\n", 661 | "epoch: 8 iter: 500 metric: ('accuracy', 0.98954)\n", 662 | "epoch: 8 iter: 600 metric: ('accuracy', 0.9896)\n", 663 | "epoch: 9 iter: 100 metric: ('accuracy', 0.9894)\n", 664 | "epoch: 9 iter: 200 metric: ('accuracy', 0.98945)\n", 665 | "epoch: 9 iter: 300 metric: ('accuracy', 0.9893333333333333)\n", 666 | "epoch: 9 iter: 400 metric: ('accuracy', 0.98935)\n", 667 | "epoch: 9 iter: 500 metric: ('accuracy', 0.98972)\n", 668 | "epoch: 9 iter: 600 metric: ('accuracy', 0.9903166666666666)\n" 669 | ] 670 | } 671 | ], 672 | "source": [ 673 | "# ==================Binding=====================\n", 674 | "# The symbol we created is only a graph description.\n", 675 | "# To run it, we first need to allocate memory and create an executor by 'binding' it.\n", 676 | "# In order to bind a symbol, we need at least two pieces of information: context and input shapes.\n", 677 | "# Context specifies which device the executor runs on, e.g. cpu, GPU0, GPU1, etc.\n", 678 | "# Input shapes define the executor's input array dimensions.\n", 679 | "# MXNet then run automatic shape inference to determine the dimensions of intermediate and output arrays.\n", 680 | "\n", 681 | "# data iterators defines shapes of its output with provide_data and provide_label property.\n", 682 | "input_shapes = dict(train_iter.provide_data+train_iter.provide_label)\n", 683 | "print 'input_shapes', input_shapes\n", 684 | "# We use simple_bind to let MXNet allocate memory for us.\n", 685 | "# You can also allocate memory youself and use bind to pass it to MXNet.\n", 686 | "exe = mlp.simple_bind(ctx=mx.gpu(0), **input_shapes)\n", 687 | "\n", 688 | "# ===============Initialization=================\n", 689 | "# First we get handle to input arrays\n", 690 | "arg_arrays = dict(zip(mlp.list_arguments(), exe.arg_arrays))\n", 691 | "data = arg_arrays[train_iter.provide_data[0][0]]\n", 692 | "label = arg_arrays[train_iter.provide_label[0][0]]\n", 693 | "\n", 694 | "# We initialize the weights with uniform distribution on (-0.01, 0.01).\n", 695 | "init = mx.init.Uniform(scale=0.01)\n", 696 | "for name, arr in arg_arrays.items():\n", 697 | " if name not in input_shapes:\n", 698 | " init(name, arr)\n", 699 | " \n", 700 | "# We also need to create an optimizer for updating weights\n", 701 | "opt = mx.optimizer.SGD(\n", 702 | " learning_rate=0.1,\n", 703 | " momentum=0.9,\n", 704 | " wd=0.00001,\n", 705 | " rescale_grad=1.0/train_iter.batch_size)\n", 706 | "updater = mx.optimizer.get_updater(opt)\n", 707 | "\n", 708 | "# Finally we need a metric to print out training progress\n", 709 | "metric = mx.metric.Accuracy()\n", 710 | "\n", 711 | "# Training loop begines\n", 712 | "for epoch in range(10):\n", 713 | " train_iter.reset()\n", 714 | " metric.reset()\n", 715 | " t = 0\n", 716 | " for batch in train_iter:\n", 717 | " # Copy data to executor input. Note the [:].\n", 718 | " data[:] = batch.data[0]\n", 719 | " label[:] = batch.label[0]\n", 720 | " \n", 721 | " # Forward\n", 722 | " exe.forward(is_train=True)\n", 723 | " \n", 724 | " # You perform operations on exe.outputs here if you need to.\n", 725 | " # For example, you can stack a CRF on top of a neural network.\n", 726 | " \n", 727 | " # Backward\n", 728 | " exe.backward()\n", 729 | " \n", 730 | " # Update\n", 731 | " for i, pair in enumerate(zip(exe.arg_arrays, exe.grad_arrays)):\n", 732 | " weight, grad = pair\n", 733 | " updater(i, grad, weight)\n", 734 | " metric.update(batch.label, exe.outputs)\n", 735 | " t += 1\n", 736 | " if t % 100 == 0:\n", 737 | " print 'epoch:', epoch, 'iter:', t, 'metric:', metric.get()\n" 738 | ] 739 | }, 740 | { 741 | "cell_type": "markdown", 742 | "metadata": {}, 743 | "source": [ 744 | "## New Operators\n", 745 | "\n", 746 | "MXNet provides a repository of common operators (or layers). However, new models often require new layers. There are several ways to [create new operators](https://mxnet.readthedocs.org/en/latest/tutorial/new_op_howto.html) with MXNet. Here we talk about the easiest way: pure python. " 747 | ] 748 | }, 749 | { 750 | "cell_type": "code", 751 | "execution_count": 13, 752 | "metadata": {}, 753 | "outputs": [ 754 | { 755 | "name": "stderr", 756 | "output_type": "stream", 757 | "text": [ 758 | "INFO:root:Start training with [gpu(0)]\n", 759 | "INFO:root:Epoch[0] Batch [100]\tSpeed: 37586.20 samples/sec\tTrain-accuracy=0.167800\n", 760 | "INFO:root:Epoch[0] Batch [200]\tSpeed: 36706.65 samples/sec\tTrain-accuracy=0.743800\n", 761 | "INFO:root:Epoch[0] Batch [300]\tSpeed: 36625.21 samples/sec\tTrain-accuracy=0.896900\n", 762 | "INFO:root:Epoch[0] Batch [400]\tSpeed: 37129.04 samples/sec\tTrain-accuracy=0.929000\n", 763 | "INFO:root:Epoch[0] Batch [500]\tSpeed: 38473.68 samples/sec\tTrain-accuracy=0.938100\n", 764 | "INFO:root:Epoch[0] Batch [600]\tSpeed: 39935.29 samples/sec\tTrain-accuracy=0.946400\n", 765 | "INFO:root:Epoch[0] Resetting Data Iterator\n", 766 | "INFO:root:Epoch[0] Time cost=1.600\n", 767 | "INFO:root:Epoch[0] Validation-accuracy=0.950400\n", 768 | "INFO:root:Epoch[1] Batch [100]\tSpeed: 39941.34 samples/sec\tTrain-accuracy=0.946700\n", 769 | "INFO:root:Epoch[1] Batch [200]\tSpeed: 39685.38 samples/sec\tTrain-accuracy=0.952700\n", 770 | "INFO:root:Epoch[1] Batch [300]\tSpeed: 40096.55 samples/sec\tTrain-accuracy=0.960800\n", 771 | "INFO:root:Epoch[1] Batch [400]\tSpeed: 39397.53 samples/sec\tTrain-accuracy=0.960200\n", 772 | "INFO:root:Epoch[1] Batch [500]\tSpeed: 38451.49 samples/sec\tTrain-accuracy=0.965000\n", 773 | "INFO:root:Epoch[1] Batch [600]\tSpeed: 39442.28 samples/sec\tTrain-accuracy=0.968600\n", 774 | "INFO:root:Epoch[1] Resetting Data Iterator\n", 775 | "INFO:root:Epoch[1] Time cost=1.527\n", 776 | "INFO:root:Epoch[1] Validation-accuracy=0.953400\n" 777 | ] 778 | } 779 | ], 780 | "source": [ 781 | "# Define custom softmax operator\n", 782 | "class NumpySoftmax(mx.operator.NumpyOp):\n", 783 | " def __init__(self):\n", 784 | " # Call the parent class constructor. \n", 785 | " # Because NumpySoftmax is a loss layer, it doesn't need gradient input from layers above.\n", 786 | " super(NumpySoftmax, self).__init__(need_top_grad=False)\n", 787 | " \n", 788 | " def list_arguments(self):\n", 789 | " # Define the input to NumpySoftmax.\n", 790 | " return ['data', 'label']\n", 791 | "\n", 792 | " def list_outputs(self):\n", 793 | " # Define the output.\n", 794 | " return ['output']\n", 795 | "\n", 796 | " def infer_shape(self, in_shape):\n", 797 | " # Calculate the dimensions of the output (and missing inputs) from (some) input shapes.\n", 798 | " data_shape = in_shape[0] # shape of first argument 'data'\n", 799 | " label_shape = (in_shape[0][0],) # 'label' should be one dimensional and has batch_size instances.\n", 800 | " output_shape = in_shape[0] # 'output' dimension is the same as the input.\n", 801 | " return [data_shape, label_shape], [output_shape]\n", 802 | "\n", 803 | " def forward(self, in_data, out_data):\n", 804 | " x = in_data[0] # 'data'\n", 805 | " y = out_data[0] # 'output'\n", 806 | " \n", 807 | " # Compute softmax\n", 808 | " y[:] = np.exp(x - x.max(axis=1).reshape((x.shape[0], 1)))\n", 809 | " y /= y.sum(axis=1).reshape((x.shape[0], 1))\n", 810 | "\n", 811 | " def backward(self, out_grad, in_data, out_data, in_grad):\n", 812 | " l = in_data[1] # 'label'\n", 813 | " l = l.reshape((l.size,)).astype(np.int) # cast to int\n", 814 | " y = out_data[0] # 'output'\n", 815 | " dx = in_grad[0] # gradient for 'data'\n", 816 | " \n", 817 | " # Compute gradient\n", 818 | " dx[:] = y\n", 819 | " dx[np.arange(l.shape[0]), l] -= 1.0\n", 820 | "\n", 821 | "numpy_softmax = NumpySoftmax()\n", 822 | "\n", 823 | "data = mx.symbol.Variable('data')\n", 824 | "fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128)\n", 825 | "act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type=\"relu\")\n", 826 | "fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64)\n", 827 | "act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type=\"relu\")\n", 828 | "fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)\n", 829 | "# Use the new operator we just defined instead of the standard softmax operator.\n", 830 | "mlp = numpy_softmax(data=fc3, name = 'softmax')\n", 831 | "\n", 832 | "model = mx.model.FeedForward(ctx = mx.gpu(0), symbol = mlp, num_epoch = 2,\n", 833 | " learning_rate = 0.1, momentum = 0.9, wd = 0.00001)\n", 834 | "model.fit(X=train_iter, eval_data=test_iter,\n", 835 | " batch_end_callback = mx.callback.Speedometer(100, 100))" 836 | ] 837 | }, 838 | { 839 | "cell_type": "markdown", 840 | "metadata": { 841 | "collapsed": true 842 | }, 843 | "source": [ 844 | "## Further Readings\n", 845 | "\n", 846 | "- More state-of-the-art cnn models are available at [mxnet/example/image-classification](https://github.com/dmlc/mxnet/tree/master/example/image-classification)" 847 | ] 848 | } 849 | ], 850 | "metadata": { 851 | "anaconda-cloud": {}, 852 | "kernelspec": { 853 | "display_name": "mxnet", 854 | "language": "python", 855 | "name": "mxnet" 856 | }, 857 | "language_info": { 858 | "codemirror_mode": { 859 | "name": "ipython", 860 | "version": 3 861 | }, 862 | "file_extension": ".py", 863 | "mimetype": "text/x-python", 864 | "name": "python", 865 | "nbconvert_exporter": "python", 866 | "pygments_lexer": "ipython3", 867 | "version": "3.6.1" 868 | } 869 | }, 870 | "nbformat": 4, 871 | "nbformat_minor": 1 872 | } 873 | -------------------------------------------------------------------------------- /images/Stop.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/manujeevanprakash/mxnet-ccn-samples/68468925919ea974e278a358237c288da80b5f19/images/Stop.jpg -------------------------------------------------------------------------------- /images/turn-left-ahead2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/manujeevanprakash/mxnet-ccn-samples/68468925919ea974e278a358237c288da80b5f19/images/turn-left-ahead2.jpg -------------------------------------------------------------------------------- /images/vis.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/manujeevanprakash/mxnet-ccn-samples/68468925919ea974e278a358237c288da80b5f19/images/vis.png -------------------------------------------------------------------------------- /signnames.csv: -------------------------------------------------------------------------------- 1 | a,b 2 | 0,Speed limit (20km/h) 3 | 1,Speed limit (30km/h) 4 | 2,Speed limit (50km/h) 5 | 3,Speed limit (60km/h) 6 | 4,Speed limit (70km/h) 7 | 5,Speed limit (80km/h) 8 | 6,End of speed limit (80km/h) 9 | 7,Speed limit (100km/h) 10 | 8,Speed limit (120km/h) 11 | 9,No passing 12 | 10,No passing for vehicles over 3.5 metric tons 13 | 11,Right-of-way at the next intersection 14 | 12,Priority road 15 | 13,Yield 16 | 14,Stop 17 | 15,No vehicles 18 | 16,Vehicles over 3.5 metric tons prohibited 19 | 17,No entry 20 | 18,General caution 21 | 19,Dangerous curve to the left 22 | 20,Dangerous curve to the right 23 | 21,Double curve 24 | 22,Bumpy road 25 | 23,Slippery road 26 | 24,Road narrows on the right 27 | 25,Road work 28 | 26,Traffic signals 29 | 27,Pedestrians 30 | 28,Children crossing 31 | 29,Bicycles crossing 32 | 30,Beware of ice/snow 33 | 31,Wild animals crossing 34 | 32,End of all speed and passing limits 35 | 33,Turn right ahead 36 | 34,Turn left ahead 37 | 35,Ahead only 38 | 36,Go straight or right 39 | 37,Go straight or left 40 | 38,Keep right 41 | 39,Keep left 42 | 40,Roundabout mandatory 43 | 41,End of no passing 44 | 42,End of no passing by vehicles over 3.5 metric tons 45 | -------------------------------------------------------------------------------- /traffic-data/training.p: -------------------------------------------------------------------------------- 1 | Training data can be found at - https://www.dropbox.com/s/k2o4n09fhja8x85/train.p?dl=0 2 | -------------------------------------------------------------------------------- /traffic-data/valid.p: -------------------------------------------------------------------------------- 1 | validation data can be found at - https://www.dropbox.com/s/wsofgk6otd8qaew/valid.p?dl=0 2 | --------------------------------------------------------------------------------