├── .gitattributes ├── .gitignore ├── Danbury AI - Workshop_ Scientific Computing in Python - December 2017.pdf ├── Data ├── faces │ ├── Aaron_Eckhart_0001.jpg │ ├── Aaron_Guiel_0001.jpg │ ├── Aaron_Patterson_0001.jpg │ ├── Aaron_Peirsol_0003.jpg │ ├── Aaron_Pena_0001.jpg │ ├── Aaron_Sorkin_0001.jpg │ ├── Aaron_Tippin_0001.jpg │ ├── Abba_Eban_0001.jpg │ ├── Abbas_Kiarostami_0001.jpg │ ├── Abdel_Aziz_Al-Hakim_0001.jpg │ ├── Abdel_Madi_Shabneh_0001.jpg │ ├── Abdel_Nasser_Assidi_0001.jpg │ ├── Abdoulaye_Wade_0002.jpg │ ├── Abdul_Majeed_Shobokshi_0001.jpg │ ├── Abdul_Rahman_0001.jpg │ ├── Abdulaziz_Kamilov_0001.jpg │ ├── Abdullah_Gul_0008.jpg │ ├── Amy_Gale_0001.jpg │ ├── Amy_Redford_0001.jpg │ ├── Anatoliy_Kinakh_0001.jpg │ ├── Anna_Nicole_Smith_0002.jpg │ ├── Arthur_Martinez_0001.jpg │ └── Aung_San_Suu_Kyi_0001.jpg ├── pokemon.csv └── readme.md ├── LICENSE ├── Notebooks ├── Beginner MNIST.ipynb ├── Exercise - Deriving the Quadratic Formula with SymPy.ipynb ├── Exploring MNIST Manifolds.ipynb ├── Latex Essentials .ipynb ├── Linear Regression - Gradient Descent.ipynb ├── Linear Regression - The Solution Space.ipynb ├── Linear Vs. Non-Linear Functions.ipynb ├── MNIST Probability Experiments 1.ipynb ├── Matrix as a Function & Plotting Vectors.ipynb ├── Neural Boolean Connectives 1.ipynb ├── Poke Pandas.ipynb ├── SymPy Basics.ipynb └── The Taylor Series.ipynb ├── README.md ├── Resources └── sympy_tutorial.pdf ├── TensorFlow-Workshop-March-2017 ├── 1_Introduction │ └── Beginner MNIST.ipynb ├── 2_going depp │ └── Deep MNIST for Experts.ipynb ├── Danbury AI_ TensorFlow Workshop 3%2F2017.pptx ├── MachineLearningWithTensorFlow.pptx ├── README.md └── old_stuff │ ├── ASingleNeuron.ipynb │ ├── Beginner MNIST.ipynb │ ├── Deep MNIST for Experts.ipynb │ ├── MachineLearningWithTensorFlow.pptx │ ├── Misc.ipynb │ ├── aws-gpu-setup.md │ ├── basics.py │ ├── characters │ ├── 3.jpg │ ├── 3.png │ └── 34.jpg │ ├── finalBasics.py │ ├── input_data.py │ ├── mnist_with_summaries.py │ ├── readme.md │ └── requirements.txt └── setup.ipynb /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | 4 | # Custom for Visual Studio 5 | *.cs diff=csharp 6 | 7 | # Standard to msysgit 8 | *.doc diff=astextplain 9 | *.DOC diff=astextplain 10 | *.docx diff=astextplain 11 | *.DOCX diff=astextplain 12 | *.dot diff=astextplain 13 | *.DOT diff=astextplain 14 | *.pdf diff=astextplain 15 | *.PDF diff=astextplain 16 | *.rtf diff=astextplain 17 | *.RTF diff=astextplain 18 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .ipynb_checkpoints 2 | .idea -------------------------------------------------------------------------------- /Danbury AI - Workshop_ Scientific Computing in Python - December 2017.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/Danbury AI - Workshop_ Scientific Computing in Python - December 2017.pdf -------------------------------------------------------------------------------- /Data/faces/Aaron_Eckhart_0001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/Data/faces/Aaron_Eckhart_0001.jpg -------------------------------------------------------------------------------- /Data/faces/Aaron_Guiel_0001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/Data/faces/Aaron_Guiel_0001.jpg -------------------------------------------------------------------------------- /Data/faces/Aaron_Patterson_0001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/Data/faces/Aaron_Patterson_0001.jpg -------------------------------------------------------------------------------- /Data/faces/Aaron_Peirsol_0003.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/Data/faces/Aaron_Peirsol_0003.jpg -------------------------------------------------------------------------------- /Data/faces/Aaron_Pena_0001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/Data/faces/Aaron_Pena_0001.jpg -------------------------------------------------------------------------------- /Data/faces/Aaron_Sorkin_0001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/Data/faces/Aaron_Sorkin_0001.jpg -------------------------------------------------------------------------------- /Data/faces/Aaron_Tippin_0001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/Data/faces/Aaron_Tippin_0001.jpg -------------------------------------------------------------------------------- /Data/faces/Abba_Eban_0001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/Data/faces/Abba_Eban_0001.jpg -------------------------------------------------------------------------------- /Data/faces/Abbas_Kiarostami_0001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/Data/faces/Abbas_Kiarostami_0001.jpg -------------------------------------------------------------------------------- /Data/faces/Abdel_Aziz_Al-Hakim_0001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/Data/faces/Abdel_Aziz_Al-Hakim_0001.jpg -------------------------------------------------------------------------------- /Data/faces/Abdel_Madi_Shabneh_0001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/Data/faces/Abdel_Madi_Shabneh_0001.jpg -------------------------------------------------------------------------------- /Data/faces/Abdel_Nasser_Assidi_0001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/Data/faces/Abdel_Nasser_Assidi_0001.jpg -------------------------------------------------------------------------------- /Data/faces/Abdoulaye_Wade_0002.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/Data/faces/Abdoulaye_Wade_0002.jpg -------------------------------------------------------------------------------- /Data/faces/Abdul_Majeed_Shobokshi_0001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/Data/faces/Abdul_Majeed_Shobokshi_0001.jpg -------------------------------------------------------------------------------- /Data/faces/Abdul_Rahman_0001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/Data/faces/Abdul_Rahman_0001.jpg -------------------------------------------------------------------------------- /Data/faces/Abdulaziz_Kamilov_0001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/Data/faces/Abdulaziz_Kamilov_0001.jpg -------------------------------------------------------------------------------- /Data/faces/Abdullah_Gul_0008.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/Data/faces/Abdullah_Gul_0008.jpg -------------------------------------------------------------------------------- /Data/faces/Amy_Gale_0001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/Data/faces/Amy_Gale_0001.jpg -------------------------------------------------------------------------------- /Data/faces/Amy_Redford_0001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/Data/faces/Amy_Redford_0001.jpg -------------------------------------------------------------------------------- /Data/faces/Anatoliy_Kinakh_0001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/Data/faces/Anatoliy_Kinakh_0001.jpg -------------------------------------------------------------------------------- /Data/faces/Anna_Nicole_Smith_0002.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/Data/faces/Anna_Nicole_Smith_0002.jpg -------------------------------------------------------------------------------- /Data/faces/Arthur_Martinez_0001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/Data/faces/Arthur_Martinez_0001.jpg -------------------------------------------------------------------------------- /Data/faces/Aung_San_Suu_Kyi_0001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/Data/faces/Aung_San_Suu_Kyi_0001.jpg -------------------------------------------------------------------------------- /Data/readme.md: -------------------------------------------------------------------------------- 1 | https://www.kaggle.com/zalando-research/fashionmnist/data 2 | 3 | https://www.kaggle.com/rounakbanik/pokemon/data 4 | 5 | http://vis-www.cs.umass.edu/lfw/#download 6 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Andrew Ribeiro 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Notebooks/Beginner MNIST.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "
\n", 8 | "

MNIST For ML Beginners

\n", 9 | "

Interactive IPython Notebook

\n", 10 | "
\n", 11 | " Source: https://www.tensorflow.org/versions/r0.8/tutorials/mnist/beginners/index.html\n", 12 | "
\n" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": 1, 18 | "metadata": { 19 | "collapsed": false 20 | }, 21 | "outputs": [], 22 | "source": [ 23 | "import matplotlib.pyplot as plt\n", 24 | "import matplotlib.image as mpimg\n", 25 | "from pylab import *\n", 26 | "from numpy import *\n", 27 | "%matplotlib inline" 28 | ] 29 | }, 30 | { 31 | "cell_type": "markdown", 32 | "metadata": {}, 33 | "source": [ 34 | "Download MNIST dataset. " 35 | ] 36 | }, 37 | { 38 | "cell_type": "code", 39 | "execution_count": 2, 40 | "metadata": { 41 | "collapsed": false, 42 | "scrolled": true 43 | }, 44 | "outputs": [ 45 | { 46 | "name": "stdout", 47 | "output_type": "stream", 48 | "text": [ 49 | "Extracting MNIST_data/train-images-idx3-ubyte.gz\n", 50 | "Extracting MNIST_data/train-labels-idx1-ubyte.gz\n", 51 | "Extracting MNIST_data/t10k-images-idx3-ubyte.gz\n", 52 | "Extracting MNIST_data/t10k-labels-idx1-ubyte.gz\n" 53 | ] 54 | } 55 | ], 56 | "source": [ 57 | "from tensorflow.examples.tutorials.mnist import input_data\n", 58 | "mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)" 59 | ] 60 | }, 61 | { 62 | "cell_type": "markdown", 63 | "metadata": {}, 64 | "source": [ 65 | "
" 66 | ] 67 | }, 68 | { 69 | "cell_type": "markdown", 70 | "metadata": {}, 71 | "source": [ 72 | "Get a batch of 50 training examples from the MNIST training set. " 73 | ] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "execution_count": 3, 78 | "metadata": { 79 | "collapsed": false 80 | }, 81 | "outputs": [], 82 | "source": [ 83 | "batch = mnist.train.next_batch(50)" 84 | ] 85 | }, 86 | { 87 | "cell_type": "markdown", 88 | "metadata": {}, 89 | "source": [ 90 | "The MNIST dataset has thousands of 28 X 28 greyscale pixel images of alpabetical characters. They are unrolled into 784 length rows. " 91 | ] 92 | }, 93 | { 94 | "cell_type": "code", 95 | "execution_count": 4, 96 | "metadata": { 97 | "collapsed": false 98 | }, 99 | "outputs": [ 100 | { 101 | "name": "stdout", 102 | "output_type": "stream", 103 | "text": [ 104 | "(784,) 784\n" 105 | ] 106 | } 107 | ], 108 | "source": [ 109 | "print( batch[0][0].shape, 28 * 28 )\n" 110 | ] 111 | }, 112 | { 113 | "cell_type": "markdown", 114 | "metadata": {}, 115 | "source": [ 116 | "First image in our batch of training examples alongside the one-hot vector classification/label. A one-hot vector is a vector which is 0 in most dimensions, and 1 in a single dimension. In this case, the nth digit will be represented as a vector which is 1 in the nth dimensions." 117 | ] 118 | }, 119 | { 120 | "cell_type": "code", 121 | "execution_count": 5, 122 | "metadata": { 123 | "collapsed": false 124 | }, 125 | "outputs": [ 126 | { 127 | "data": { 128 | "text/plain": [ 129 | "array([ 0., 0., 0., 1., 0., 0., 0., 0., 0., 0.])" 130 | ] 131 | }, 132 | "execution_count": 5, 133 | "metadata": {}, 134 | "output_type": "execute_result" 135 | }, 136 | { 137 | "data": { 138 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAP8AAAD8CAYAAAC4nHJkAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAABt1JREFUeJzt3a1vVHkbx+GZDQ7ZNkFRXElwBBw0wUDqWsuLgyY4QEEg\nmEIdCdTw8gegCnWgC4oQqKPFgqPFIjtrEI94zv1jmbbQ+V6Xvfe3M7D97BF3zzn9wWDQA/L886e/\nAPBniB9CiR9CiR9CiR9CiR9CiR9CiR9CiR9CHdjLD+v3+36dEHbZYDDo/8o/58oPocQPocQPocQP\nocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQP\nocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPoQ786S/AnzU5OVnO\nL1++XM5v375dzgeDQees3++XZ9fX18v5nTt3yvnKyko5T+fKD6HED6HED6HED6HED6HED6HED6H6\n1R52xz+s39+7DwsyMTHRObt161Z59sKFC+V8bGysnLd29cPs+Vs/m1+/fi3nJ0+e7JxtbW2VZ/ez\nwWBQ/8X+5MoPocQPocQPocQPocQPocQPoaz69oHWbbMLCwuds9Z/391et21ubpbzyvj4eDk/cuRI\nOf/06VPn7NixY7/zlfYFqz6gJH4IJX4IJX4IJX4IJX4IJX4IZc+/D7x//76cHz9+vHM27J6/2pX3\ner3emTNnyvkwt86eOnWqnK+urpbz6s9+4MDoPrXenh8oiR9CiR9CiR9CiR9CiR9CiR9C2fP/BY4e\nPVrOW3v+79+/d85a99O39vDXr18v59euXSvni4uLnbMvX76UZ1taP7vb29uds6tXr5Znnz179lvf\n6W9gzw+UxA+hxA+hxA+hxA+hxA+hxA+h7Pn3gdbvAVS7+mFfRT0/P1/OHz9+XM6r12R//PixPDs3\nN1fOl5eXy3n1s33o0KHy7H5+hbc9P1ASP4QSP4QSP4QSP4QSP4QSP4Qa3YeXj5CNjY0/9tmt5wF8\n/vy5nFfPGmg9K+DmzZvlvPXOgd38/YdR4MoPocQPocQPocQPocQPocQPoaz6RsD09HTnrHU7cGuV\nt76+Xs6npqbK+bt37zpnExMT5dnW7eat7z4zM1PO07nyQyjxQyjxQyjxQyjxQyjxQyjxQyh7/hFw\n/vz5ztmVK1fKs63bYlu79tb5apc/zC25vV6vt7S0VM5bjwZP58oPocQPocQPocQPocQPocQPocQP\noez5R9ywr2DfzfNv374tz964caOc2+MPx5UfQokfQokfQokfQokfQokfQokfQtnzj4Dnz593ziYn\nJ8uz4+Pj5bz13P+DBw+W88rdu3fLuT3+7nLlh1Dih1Dih1Dih1Dih1Dih1Dih1D9Ye/X/k8f1u/v\n3YexI1p7/nv37pXz2dnZztna2lp5dmZmppy3nuufajAY1C9E+MmVH0KJH0KJH0KJH0KJH0KJH0JZ\n9f2i6lXTm5ube/hN9pfXr193zs6dO1eebT26++HDh7/1nUadVR9QEj+EEj+EEj+EEj+EEj+EEj+E\n8ujun6anp8v5gwcPOmcbGxvl2UuXLv3WdxoF9+/f75ydPXu2PDs1NbXTX4f/4coPocQPocQPocQP\nocQPocQPocQPoWL2/NX9+L1er/fkyZNy/u3bt85Z8h6/9Yrup0+fds76/V+67Zxd4soPocQPocQP\nocQPocQPocQPocQPoWL2/HNzc+W8de/46urqTn6dfaP1iu4XL16U8+rvtfXOiNZzEhiOKz+EEj+E\nEj+EEj+EEj+EEj+Eiln1vXnzppz/80/9/8Hq0d4XL14sz66vr5fzDx8+lPOWycnJztnp06fLs60V\n6OzsbDlv3ZZbrfMePXpUnm3NGY4rP4QSP4QSP4QSP4QSP4QSP4QSP4Tqt26r3NEP6/f37sP+o+Xl\n5XJe7buH2XX3er3e2tpaOW85fPhw52xsbKw8O+x3b52vXtG9tLRUnt3a2irn/H+DweCXnonuyg+h\nxA+hxA+hxA+hxA+hxA+hxA+h7Pl/ar3C+9WrV52zEydOlGe3t7fL+W7u2ltnf/z4Uc5bj89eXFws\n5ysrK+WcnWfPD5TED6HED6HED6HED6HED6HED6Hs+X/R+Ph452xhYWGof/f8/Hw5f/nyZTkf5r73\n1rPxvSZ7/7HnB0rih1Dih1Dih1Dih1Dih1Dih1D2/DBi7PmBkvghlPghlPghlPghlPghlPghlPgh\nlPghlPghlPghlPghlPghlPghlPghlPghlPghlPghlPghlPghlPghlPgh1J4+uhv4e7jyQyjxQyjx\nQyjxQyjxQyjxQyjxQyjxQyjxQyjxQyjxQyjxQyjxQyjxQyjxQyjxQyjxQyjxQyjxQyjxQyjxQyjx\nQyjxQ6h/AdzwgYFBiQXqAAAAAElFTkSuQmCC\n", 139 | "text/plain": [ 140 | "" 141 | ] 142 | }, 143 | "metadata": {}, 144 | "output_type": "display_data" 145 | } 146 | ], 147 | "source": [ 148 | "firstIMG = batch[0][1].reshape((28, 28))\n", 149 | "secondIMG = batch[0][2].reshape((28, 28))\n", 150 | "\n", 151 | "plt.axis(\"off\")\n", 152 | "\n", 153 | "imgplot = plt.imshow(firstIMG, cmap=cm.gray)\n", 154 | "\n", 155 | "batch[1][1]" 156 | ] 157 | }, 158 | { 159 | "cell_type": "markdown", 160 | "metadata": {}, 161 | "source": [ 162 | "Second image in our batch of training examples alongside the one-hot classification vector." 163 | ] 164 | }, 165 | { 166 | "cell_type": "code", 167 | "execution_count": 6, 168 | "metadata": { 169 | "collapsed": false 170 | }, 171 | "outputs": [ 172 | { 173 | "data": { 174 | "text/plain": [ 175 | "array([ 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.])" 176 | ] 177 | }, 178 | "execution_count": 6, 179 | "metadata": {}, 180 | "output_type": "execute_result" 181 | }, 182 | { 183 | "data": { 184 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAP8AAAD8CAYAAAC4nHJkAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAABhVJREFUeJzt3T1rVFsYhuEzKgk2fiFYCGkEG8FGSSNEBWv/gI1WVhYi\ngoJaqEgQixCwEq0tBK1EEMXGQrAQQRBshIiQQrFQEPOxT3uaeSfuyewk57mu9p2194Jwu4qVib2m\naf4B8mxa6w0Aa0P8EEr8EEr8EEr8EEr8EEr8EEr8EEr8EGpLly/r9Xp+nRBGrGma3ko+5+SHUOKH\nUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKH\nUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKH\nUOKHUOKHUOKHUOKHUOKHUOKHUFvWegMMNjY2Vs5fvHjRd3bkyJFyba/XK+c/fvwo5wcPHiznc3Nz\n5Zy14+SHUOKHUOKHUOKHUOKHUOKHUK761oFBV3n3798v54Ou8ypPnjwp59PT0+X869evrd89anv2\n7Ok7m5+f73An65OTH0KJH0KJH0KJH0KJH0KJH0KJH0K5518HLly4UM5PnTrV+tl3794t5xcvXizn\nv3//bv3uUbtz5045P3PmTN/ZjRs3yrUzMzOt9rSROPkhlPghlPghlPghlPghlPghlPghlHv+Dhw4\ncKCcX7lyZajn//z5s+/s/Pnz5drFxcWh3j1Khw8fLuenT58u5zt37lzF3fz/OPkhlPghlPghlPgh\nlPghlPghlPghlHv+Dly6dKmcb926tZwPuos/efJk67Xr2aC/NbBr165yvrCw0Hc26P8rSODkh1Di\nh1Dih1Dih1Dih1Dih1Dih1Du+Ttw6NChodY/e/asnL969ar1szdv3lzOx8bGWj97kH379pXzo0eP\nDvX8R48e9Z19/vx5qGf/Hzj5IZT4IZT4IZT4IZT4IZT4IZSrvg1gfHy89drJyclyfvPmzXJ+4sSJ\n1u8etfn5+XJ+69atjnayMTn5IZT4IZT4IZT4IZT4IZT4IZT4IZR7/g7cvn27nD948KCcHz9+vJy/\nfPmy72xqaqpcu2nTxv33/969e+X8w4cPHe1kY9q4P3lgKOKHUOKHUOKHUOKHUOKHUOKHUO75OzAx\nMTHU+i1b6h/TsWPHWj/7zZs35fzx48flfO/eveX83Llzf72nlXr79u3Inp3AyQ+hxA+hxA+hxA+h\nxA+hxA+hxA+h3PN3YND39f/8+TOydz98+LCcz83NlfOlpaVyfvny5b/e00q9fv26nD99+nRk707g\n5IdQ4odQ4odQ4odQ4odQ4odQ4odQ7vk78OXLl3I+PT3d0U5W369fv0b27NnZ2XK+uLg4sncncPJD\nKPFDKPFDKPFDKPFDKPFDKFd9DGXQV34ry8vL5fzTp0+tn81gTn4IJX4IJX4IJX4IJX4IJX4IJX4I\n5Z6foZw9e7b12ufPn5fzd+/etX42gzn5IZT4IZT4IZT4IZT4IZT4IZT4IZR7fkrbt28v59u2bWv9\n7JmZmdZrGZ6TH0KJH0KJH0KJH0KJH0KJH0KJH0K556c0OTlZzicmJsr5wsJC39m3b99a7YnV4eSH\nUOKHUOKHUOKHUOKHUOKHUL2mabp7Wa/X3ctYFR8/fizn+/fvL+ffv3/vO9u9e3erPVFrmqa3ks85\n+SGU+CGU+CGU+CGU+CGU+CGU+CGUr/RSGh8fH2r9+/fvV2knrDYnP4QSP4QSP4QSP4QSP4QSP4QS\nP4Ryz89ILS0trfUW6MPJD6HED6HED6HED6HED6HED6HED6Hc8zNSU1NTfWfXrl0r116/fn21t8N/\nOPkhlPghlPghlPghlPghlPghlPghlHt+SrOzs+X86tWr5XzHjh19Z8vLy632xOpw8kMo8UMo8UMo\n8UMo8UMo8UOoXtM03b2s1+vuZRCqaZreSj7n5IdQ4odQ4odQ4odQ4odQ4odQ4odQ4odQ4odQ4odQ\n4odQ4odQ4odQ4odQ4odQnX6fH1g/nPwQSvwQSvwQSvwQSvwQSvwQSvwQSvwQSvwQSvwQSvwQSvwQ\nSvwQSvwQSvwQSvwQSvwQSvwQSvwQSvwQSvwQSvwQSvwQ6l8EhMln4ZEXXAAAAABJRU5ErkJggg==\n", 185 | "text/plain": [ 186 | "" 187 | ] 188 | }, 189 | "metadata": {}, 190 | "output_type": "display_data" 191 | } 192 | ], 193 | "source": [ 194 | "imgplot = plt.imshow(secondIMG , cmap=cm.gray)\n", 195 | "plt.axis(\"off\")\n", 196 | "batch[1][2]" 197 | ] 198 | }, 199 | { 200 | "cell_type": "markdown", 201 | "metadata": {}, 202 | "source": [ 203 | "

Softmax Regression

" 204 | ] 205 | }, 206 | { 207 | "cell_type": "markdown", 208 | "metadata": {}, 209 | "source": [ 210 | "$$y = softmax(Wx+b)$$" 211 | ] 212 | }, 213 | { 214 | "cell_type": "code", 215 | "execution_count": 7, 216 | "metadata": { 217 | "collapsed": true 218 | }, 219 | "outputs": [], 220 | "source": [ 221 | "import tensorflow as tf\n", 222 | "\n", 223 | "# Clear the graph from prior sessions. A ipython quirk. \n", 224 | "tf.reset_default_graph()" 225 | ] 226 | }, 227 | { 228 | "cell_type": "code", 229 | "execution_count": 8, 230 | "metadata": { 231 | "collapsed": false 232 | }, 233 | "outputs": [], 234 | "source": [ 235 | "with tf.name_scope('Hidden') as scope:\n", 236 | " x = tf.placeholder(tf.float32, [None, 784],name='input')\n", 237 | " W = tf.Variable(tf.zeros([784, 10]),name='weights')\n", 238 | " b = tf.Variable(tf.zeros([10]),name='biases')\n", 239 | " y = tf.nn.softmax(tf.matmul(x, W) + b, name='softmax')" 240 | ] 241 | }, 242 | { 243 | "cell_type": "markdown", 244 | "metadata": {}, 245 | "source": [ 246 | "

Cross-Entropy as a Cost Function

" 247 | ] 248 | }, 249 | { 250 | "cell_type": "markdown", 251 | "metadata": {}, 252 | "source": [ 253 | "

$$H_{y'}(y) = - \\sum_{i}y'_i log(y_i)$$

" 254 | ] 255 | }, 256 | { 257 | "cell_type": "markdown", 258 | "metadata": {}, 259 | "source": [ 260 | "

$$H(X) = \\sum_{i=1}^m p(x_i)log( \\frac{1}{p(x_i)} ) \\to - \\sum_{i=1}^m x_i log(x_i)$$

" 261 | ] 262 | }, 263 | { 264 | "cell_type": "markdown", 265 | "metadata": {}, 266 | "source": [ 267 | "Where y is our predicted probability distribution, and y′ is the true distribution (the one-hot vector we'll input). In some rough sense, the cross-entropy is measuring how inefficient our predictions are for describing the truth." 268 | ] 269 | }, 270 | { 271 | "cell_type": "markdown", 272 | "metadata": {}, 273 | "source": [ 274 | "To implement cross-entropy we need to first add a new placeholder to input the correct answers:\n" 275 | ] 276 | }, 277 | { 278 | "cell_type": "code", 279 | "execution_count": 9, 280 | "metadata": { 281 | "collapsed": true 282 | }, 283 | "outputs": [], 284 | "source": [ 285 | "y_ = tf.placeholder(tf.float32, [None, 10])" 286 | ] 287 | }, 288 | { 289 | "cell_type": "markdown", 290 | "metadata": {}, 291 | "source": [ 292 | "First, tf.log computes the logarithm of each element of y. Next, we multiply each element of y_ with the corresponding element of tf.log(y). Then tf.reduce_sum adds the elements in the second dimension of y, due to the reduction_indices=[1] parameter. Finally, tf.reduce_mean computes the mean over all the examples in the batch." 293 | ] 294 | }, 295 | { 296 | "cell_type": "code", 297 | "execution_count": 10, 298 | "metadata": { 299 | "collapsed": true 300 | }, 301 | "outputs": [], 302 | "source": [ 303 | "cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))" 304 | ] 305 | }, 306 | { 307 | "cell_type": "markdown", 308 | "metadata": {}, 309 | "source": [ 310 | "In this case, we ask TensorFlow to minimize cross_entropy using the gradient descent algorithm with a learning rate of 0.5. Gradient descent is a simple procedure, where TensorFlow simply shifts each variable a little bit in the direction that reduces the cost. " 311 | ] 312 | }, 313 | { 314 | "cell_type": "code", 315 | "execution_count": 11, 316 | "metadata": { 317 | "collapsed": true 318 | }, 319 | "outputs": [], 320 | "source": [ 321 | "train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n" 322 | ] 323 | }, 324 | { 325 | "cell_type": "markdown", 326 | "metadata": {}, 327 | "source": [ 328 | "Now we have our model set up to train. One last thing before we launch it, we have to add an operation to initialize the variables we created:" 329 | ] 330 | }, 331 | { 332 | "cell_type": "code", 333 | "execution_count": 20, 334 | "metadata": { 335 | "collapsed": false 336 | }, 337 | "outputs": [], 338 | "source": [ 339 | "init = tf.global_variables_initializer()" 340 | ] 341 | }, 342 | { 343 | "cell_type": "markdown", 344 | "metadata": {}, 345 | "source": [ 346 | "We can now launch the model in a Session, and run the operation that initializes the variables:" 347 | ] 348 | }, 349 | { 350 | "cell_type": "code", 351 | "execution_count": 21, 352 | "metadata": { 353 | "collapsed": false 354 | }, 355 | "outputs": [], 356 | "source": [ 357 | "sess = tf.Session()\n", 358 | "sess.run(init)" 359 | ] 360 | }, 361 | { 362 | "cell_type": "markdown", 363 | "metadata": {}, 364 | "source": [ 365 | "Before we train our model, let's get TensorBoard up and running. Go to your commandline and CD into the directory that contains this notebook. Activate your tensorflow virtualenv. Then issue the following console command to launch TensorBoard: \n", 366 | "\n", 367 | "```\n", 368 | "tensorboard --logdir=log_beginner_minst\n", 369 | "\n", 370 | "```\n", 371 | "\n", 372 | "You should get something like this: \n", 373 | "\n", 374 | "```\n", 375 | "Starting TensorBoard 16 on port 6006\n", 376 | "(You can navigate to http://0.0.0.0:6006)\n", 377 | "```\n", 378 | "\n", 379 | "Open up a web browser and navigate to the url specified. \n", 380 | "\n", 381 | "Let's now visualize our graph" 382 | ] 383 | }, 384 | { 385 | "cell_type": "code", 386 | "execution_count": 22, 387 | "metadata": { 388 | "collapsed": false 389 | }, 390 | "outputs": [ 391 | { 392 | "ename": "FailedPreconditionError", 393 | "evalue": "Failed to remove a directory: log_beginner_minst/run1/", 394 | "output_type": "error", 395 | "traceback": [ 396 | "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", 397 | "\u001b[1;31mFailedPreconditionError\u001b[0m Traceback (most recent call last)", 398 | "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m()\u001b[0m\n\u001b[0;32m 2\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 3\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mtf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgfile\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mExists\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msummaries_dir\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 4\u001b[1;33m \u001b[0mtf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgfile\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mDeleteRecursively\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msummaries_dir\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 5\u001b[0m \u001b[0mtf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgfile\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mMakeDirs\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msummaries_dir\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 6\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", 399 | "\u001b[1;32mC:\\Users\\Andre\\Anaconda3\\envs\\TF_GPU\\lib\\site-packages\\tensorflow\\python\\lib\\io\\file_io.py\u001b[0m in \u001b[0;36mdelete_recursively\u001b[1;34m(dirname)\u001b[0m\n\u001b[0;32m 364\u001b[0m \"\"\"\n\u001b[0;32m 365\u001b[0m \u001b[1;32mwith\u001b[0m \u001b[0merrors\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mraise_exception_on_not_ok_status\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mstatus\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 366\u001b[1;33m \u001b[0mpywrap_tensorflow\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mDeleteRecursively\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcompat\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mas_bytes\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdirname\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mstatus\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 367\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 368\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", 400 | "\u001b[1;32mC:\\Users\\Andre\\Anaconda3\\envs\\TF_GPU\\lib\\contextlib.py\u001b[0m in \u001b[0;36m__exit__\u001b[1;34m(self, type, value, traceback)\u001b[0m\n\u001b[0;32m 64\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mtype\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 65\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 66\u001b[1;33m \u001b[0mnext\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgen\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 67\u001b[0m \u001b[1;32mexcept\u001b[0m \u001b[0mStopIteration\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 68\u001b[0m \u001b[1;32mreturn\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", 401 | "\u001b[1;32mC:\\Users\\Andre\\Anaconda3\\envs\\TF_GPU\\lib\\site-packages\\tensorflow\\python\\framework\\errors_impl.py\u001b[0m in \u001b[0;36mraise_exception_on_not_ok_status\u001b[1;34m()\u001b[0m\n\u001b[0;32m 467\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 468\u001b[0m \u001b[0mcompat\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mas_text\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpywrap_tensorflow\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTF_Message\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mstatus\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 469\u001b[1;33m pywrap_tensorflow.TF_GetCode(status))\n\u001b[0m\u001b[0;32m 470\u001b[0m \u001b[1;32mfinally\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 471\u001b[0m \u001b[0mpywrap_tensorflow\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTF_DeleteStatus\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mstatus\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", 402 | "\u001b[1;31mFailedPreconditionError\u001b[0m: Failed to remove a directory: log_beginner_minst/run1/" 403 | ] 404 | } 405 | ], 406 | "source": [ 407 | "summaries_dir = \"log_beginner_minst/run1/\"\n", 408 | "\n", 409 | "if tf.gfile.Exists(summaries_dir):\n", 410 | " tf.gfile.DeleteRecursively(summaries_dir)\n", 411 | " tf.gfile.MakeDirs(summaries_dir)\n", 412 | "\n", 413 | "summary_writer = tf.summary.FileWriter(summaries_dir, sess.graph)" 414 | ] 415 | }, 416 | { 417 | "cell_type": "markdown", 418 | "metadata": {}, 419 | "source": [ 420 | "Let's train -- we'll run the training step 1000 times!" 421 | ] 422 | }, 423 | { 424 | "cell_type": "code", 425 | "execution_count": 23, 426 | "metadata": { 427 | "collapsed": false 428 | }, 429 | "outputs": [ 430 | { 431 | "name": "stdout", 432 | "output_type": "stream", 433 | "text": [ 434 | "Batch 100000\n", 435 | "Batch 200000\n", 436 | "Batch 300000\n", 437 | "Batch 400000\n", 438 | "Batch 500000\n", 439 | "Batch 600000\n", 440 | "Batch 700000\n", 441 | "Batch 800000\n", 442 | "Batch 900000\n", 443 | "Batch 1000000\n" 444 | ] 445 | } 446 | ], 447 | "source": [ 448 | "for i in range(10000):\n", 449 | "\n", 450 | " batch_xs, batch_ys = mnist.train.next_batch(100)\n", 451 | " \n", 452 | " sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) \n", 453 | " \n", 454 | " if( (i+1)*100 % 100000 == 0):\n", 455 | " print(\"Batch %i\"% ( (i+1)*100) )\n", 456 | " #print(\"One example: \",batch_xs[0],batch_ys[0])\n" 457 | ] 458 | }, 459 | { 460 | "cell_type": "markdown", 461 | "metadata": { 462 | "collapsed": true 463 | }, 464 | "source": [ 465 | "tf.argmax is an extremely useful function which gives you the index of the highest entry in a tensor along some axis. For example, tf.argmax(y,1) is the label our model thinks is most likely for each input, while tf.argmax(y_,1) is the true label. We can use tf.equal to check if our prediction matches the truth.\n" 466 | ] 467 | }, 468 | { 469 | "cell_type": "code", 470 | "execution_count": 24, 471 | "metadata": { 472 | "collapsed": false 473 | }, 474 | "outputs": [ 475 | { 476 | "name": "stdout", 477 | "output_type": "stream", 478 | "text": [ 479 | "[ True True True ..., True True True]\n", 480 | "0.9196\n" 481 | ] 482 | } 483 | ], 484 | "source": [ 485 | "correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))\n", 486 | "\n", 487 | "correctPredictionBools = sess.run(correct_prediction, feed_dict={x: mnist.test.images, y_: mnist.test.labels})\n", 488 | "\n", 489 | "print(correctPredictionBools)\n", 490 | "print(sum(correctPredictionBools)/float(len(correctPredictionBools)) )" 491 | ] 492 | }, 493 | { 494 | "cell_type": "code", 495 | "execution_count": 17, 496 | "metadata": { 497 | "collapsed": false 498 | }, 499 | "outputs": [], 500 | "source": [ 501 | "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))" 502 | ] 503 | }, 504 | { 505 | "cell_type": "code", 506 | "execution_count": 18, 507 | "metadata": { 508 | "collapsed": false 509 | }, 510 | "outputs": [ 511 | { 512 | "name": "stdout", 513 | "output_type": "stream", 514 | "text": [ 515 | "0.9249\n" 516 | ] 517 | } 518 | ], 519 | "source": [ 520 | "print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))" 521 | ] 522 | }, 523 | { 524 | "cell_type": "code", 525 | "execution_count": null, 526 | "metadata": { 527 | "collapsed": true 528 | }, 529 | "outputs": [], 530 | "source": [] 531 | } 532 | ], 533 | "metadata": { 534 | "kernelspec": { 535 | "display_name": "Python 3", 536 | "language": "python", 537 | "name": "python3" 538 | }, 539 | "language_info": { 540 | "codemirror_mode": { 541 | "name": "ipython", 542 | "version": 3 543 | }, 544 | "file_extension": ".py", 545 | "mimetype": "text/x-python", 546 | "name": "python", 547 | "nbconvert_exporter": "python", 548 | "pygments_lexer": "ipython3", 549 | "version": "3.5.3" 550 | } 551 | }, 552 | "nbformat": 4, 553 | "nbformat_minor": 0 554 | } 555 | -------------------------------------------------------------------------------- /Notebooks/Exercise - Deriving the Quadratic Formula with SymPy.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "$$\n", 8 | "\\huge \\text{Deriving the Quadratic Formula with SymPy}\\\\\n", 9 | "\\large \\text{Andrew Ribeiro}\\\\\n", 10 | "\\text{December 2017}\n", 11 | "$$" 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": 1, 17 | "metadata": { 18 | "collapsed": true 19 | }, 20 | "outputs": [], 21 | "source": [ 22 | "import sympy as sp\n", 23 | "from IPython.display import display\n", 24 | "sp.init_printing(order=\"lex\",use_latex='mathjax')" 25 | ] 26 | }, 27 | { 28 | "cell_type": "markdown", 29 | "metadata": { 30 | "collapsed": true 31 | }, 32 | "source": [ 33 | "Quadratic equations are of the form: \n", 34 | "\n", 35 | "$$ ax^2 + bx^1 + cx^0 = ax^2 + bx + c = 0$$\n", 36 | "\n", 37 | "Where $a,b,c$ are coeficients. The coeficients can be integers, real numbers, or imaginary numbers. We know from the quadratic formula that such equations can be solved for x by completing the square. In this exercise we will use SymPy to help us derive the quadratic formula. Let's first define some symbols we will use in our symbolic calculations. " 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": 2, 43 | "metadata": {}, 44 | "outputs": [], 45 | "source": [ 46 | "a,b,c = sp.symbols(\"a b c\")\n", 47 | "z,k = sp.symbols(\"z k\")\n", 48 | "x = sp.symbols(\"x\")" 49 | ] 50 | }, 51 | { 52 | "cell_type": "markdown", 53 | "metadata": {}, 54 | "source": [ 55 | "Now we can easily represent the quadratic equation." 56 | ] 57 | }, 58 | { 59 | "cell_type": "code", 60 | "execution_count": 3, 61 | "metadata": {}, 62 | "outputs": [ 63 | { 64 | "data": { 65 | "text/latex": [ 66 | "$$a x^{2} + b x + c = 0$$" 67 | ], 68 | "text/plain": [ 69 | " 2 \n", 70 | "a⋅x + b⋅x + c = 0" 71 | ] 72 | }, 73 | "execution_count": 3, 74 | "metadata": {}, 75 | "output_type": "execute_result" 76 | } 77 | ], 78 | "source": [ 79 | "lhs = a*x**2 + b*x + c\n", 80 | "rhs = 0 \n", 81 | "quadraticEqn = sp.Eq(lhs,rhs)\n", 82 | "quadraticEqn" 83 | ] 84 | }, 85 | { 86 | "cell_type": "markdown", 87 | "metadata": {}, 88 | "source": [ 89 | "## Completing the square \n", 90 | "\n", 91 | "Before we start our derivation, let's talk about completing the square. Say we have binomial rased to a power. " 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": 4, 97 | "metadata": {}, 98 | "outputs": [ 99 | { 100 | "data": { 101 | "text/latex": [ 102 | "$$\\left(a + b\\right)^{2}$$" 103 | ], 104 | "text/plain": [ 105 | " 2\n", 106 | "(a + b) " 107 | ] 108 | }, 109 | "execution_count": 4, 110 | "metadata": {}, 111 | "output_type": "execute_result" 112 | } 113 | ], 114 | "source": [ 115 | "f =(a+b)**2\n", 116 | "f" 117 | ] 118 | }, 119 | { 120 | "cell_type": "markdown", 121 | "metadata": {}, 122 | "source": [ 123 | "If we expand this we get the form: " 124 | ] 125 | }, 126 | { 127 | "cell_type": "code", 128 | "execution_count": 5, 129 | "metadata": {}, 130 | "outputs": [ 131 | { 132 | "data": { 133 | "text/latex": [ 134 | "$$a^{2} + 2 a b + b^{2}$$" 135 | ], 136 | "text/plain": [ 137 | " 2 2\n", 138 | "a + 2⋅a⋅b + b " 139 | ] 140 | }, 141 | "execution_count": 5, 142 | "metadata": {}, 143 | "output_type": "execute_result" 144 | } 145 | ], 146 | "source": [ 147 | "fExp = f.expand()\n", 148 | "fExp" 149 | ] 150 | }, 151 | { 152 | "cell_type": "markdown", 153 | "metadata": {}, 154 | "source": [ 155 | "Going from this expanded form back to the binomial is called *factoring.*" 156 | ] 157 | }, 158 | { 159 | "cell_type": "code", 160 | "execution_count": 6, 161 | "metadata": {}, 162 | "outputs": [ 163 | { 164 | "data": { 165 | "text/latex": [ 166 | "$$\\left(a + b\\right)^{2}$$" 167 | ], 168 | "text/plain": [ 169 | " 2\n", 170 | "(a + b) " 171 | ] 172 | }, 173 | "execution_count": 6, 174 | "metadata": {}, 175 | "output_type": "execute_result" 176 | } 177 | ], 178 | "source": [ 179 | "fExp.factor()" 180 | ] 181 | }, 182 | { 183 | "cell_type": "markdown", 184 | "metadata": {}, 185 | "source": [ 186 | "We use SymPy to define a function for completing the square of a symbolic polynomial. This will work for different types of polynomials, but in the case of a quadratic, this works by solving for $z$ and $k$: \n", 187 | "\n", 188 | "$$\n", 189 | "ax^2 + bx + c = (x+z)^2 + k \\\\\n", 190 | "ax^2 + bx + c - (x+z)^2 + k = 0\n", 191 | "$$" 192 | ] 193 | }, 194 | { 195 | "cell_type": "code", 196 | "execution_count": 7, 197 | "metadata": {}, 198 | "outputs": [], 199 | "source": [ 200 | "def completeSquare(poly):\n", 201 | " z,k = sp.symbols(\"z k\")\n", 202 | " completedSquareForm = (x+z)**2+k\n", 203 | " sol = sp.solve(poly-completedSquareForm,[z,k])\n", 204 | " squareRes = sp.Pow(x+sol[0][0],2,evaluate=False)\n", 205 | " constantRes = sol[0][1]\n", 206 | " return squareRes + constantRes\n" 207 | ] 208 | }, 209 | { 210 | "cell_type": "markdown", 211 | "metadata": {}, 212 | "source": [ 213 | "Consider the following polynomial, which is not a perfect square. " 214 | ] 215 | }, 216 | { 217 | "cell_type": "code", 218 | "execution_count": 8, 219 | "metadata": {}, 220 | "outputs": [ 221 | { 222 | "data": { 223 | "text/latex": [ 224 | "$$x^{2} + 10 x + 28$$" 225 | ], 226 | "text/plain": [ 227 | " 2 \n", 228 | "x + 10⋅x + 28" 229 | ] 230 | }, 231 | "execution_count": 8, 232 | "metadata": {}, 233 | "output_type": "execute_result" 234 | } 235 | ], 236 | "source": [ 237 | "poly1 = x**2 + 10*x + 28\n", 238 | "poly1" 239 | ] 240 | }, 241 | { 242 | "cell_type": "markdown", 243 | "metadata": {}, 244 | "source": [ 245 | "If we try to factor this with SymPy it will throw up its hands and do nothing because it cannot be factored. " 246 | ] 247 | }, 248 | { 249 | "cell_type": "code", 250 | "execution_count": 9, 251 | "metadata": {}, 252 | "outputs": [ 253 | { 254 | "data": { 255 | "text/latex": [ 256 | "$$x^{2} + 10 x + 28$$" 257 | ], 258 | "text/plain": [ 259 | " 2 \n", 260 | "x + 10⋅x + 28" 261 | ] 262 | }, 263 | "execution_count": 9, 264 | "metadata": {}, 265 | "output_type": "execute_result" 266 | } 267 | ], 268 | "source": [ 269 | "poly1.factor()" 270 | ] 271 | }, 272 | { 273 | "cell_type": "markdown", 274 | "metadata": {}, 275 | "source": [ 276 | "We still can, however, complete the square. " 277 | ] 278 | }, 279 | { 280 | "cell_type": "code", 281 | "execution_count": 10, 282 | "metadata": {}, 283 | "outputs": [ 284 | { 285 | "data": { 286 | "text/latex": [ 287 | "$$x^{2} + 10 x + 28 = c + \\left(b + x\\right)^{2}$$" 288 | ], 289 | "text/plain": [ 290 | " 2 2\n", 291 | "x + 10⋅x + 28 = c + (b + x) " 292 | ] 293 | }, 294 | "execution_count": 10, 295 | "metadata": {}, 296 | "output_type": "execute_result" 297 | } 298 | ], 299 | "source": [ 300 | "completedSquareForm = (x+b)**2+c\n", 301 | "poly1Eqn = sp.Eq(poly1,completedSquareForm)\n", 302 | "poly1Eqn" 303 | ] 304 | }, 305 | { 306 | "cell_type": "code", 307 | "execution_count": 11, 308 | "metadata": {}, 309 | "outputs": [ 310 | { 311 | "data": { 312 | "text/latex": [ 313 | "$$x^{2} + 10 x + 28 = c + \\left(b + x\\right)^{2} = \\left(x + 5\\right)^{2} + 3$$" 314 | ], 315 | "text/plain": [ 316 | " 2 2 2 \n", 317 | "x + 10⋅x + 28 = c + (b + x) = (x + 5) + 3" 318 | ] 319 | }, 320 | "execution_count": 11, 321 | "metadata": {}, 322 | "output_type": "execute_result" 323 | } 324 | ], 325 | "source": [ 326 | "sol = sp.solve(poly1 - completedSquareForm,[b,c])\n", 327 | "sp.Eq(poly1Eqn,completeSquare(poly1))" 328 | ] 329 | }, 330 | { 331 | "cell_type": "markdown", 332 | "metadata": {}, 333 | "source": [ 334 | "Now consider a polynomial which is a perfect square. " 335 | ] 336 | }, 337 | { 338 | "cell_type": "code", 339 | "execution_count": 12, 340 | "metadata": {}, 341 | "outputs": [ 342 | { 343 | "data": { 344 | "text/latex": [ 345 | "$$x^{2} + 10 x + 25$$" 346 | ], 347 | "text/plain": [ 348 | " 2 \n", 349 | "x + 10⋅x + 25" 350 | ] 351 | }, 352 | "execution_count": 12, 353 | "metadata": {}, 354 | "output_type": "execute_result" 355 | } 356 | ], 357 | "source": [ 358 | "poly2 = ((x+5)**2).expand()\n", 359 | "poly2" 360 | ] 361 | }, 362 | { 363 | "cell_type": "markdown", 364 | "metadata": {}, 365 | "source": [ 366 | "Factoring now works, but we can also complete the square with $c=0$" 367 | ] 368 | }, 369 | { 370 | "cell_type": "code", 371 | "execution_count": 13, 372 | "metadata": {}, 373 | "outputs": [ 374 | { 375 | "data": { 376 | "text/latex": [ 377 | "$$\\left(x + 5\\right)^{2}$$" 378 | ], 379 | "text/plain": [ 380 | " 2\n", 381 | "(x + 5) " 382 | ] 383 | }, 384 | "execution_count": 13, 385 | "metadata": {}, 386 | "output_type": "execute_result" 387 | } 388 | ], 389 | "source": [ 390 | "poly2.factor()" 391 | ] 392 | }, 393 | { 394 | "cell_type": "code", 395 | "execution_count": 14, 396 | "metadata": {}, 397 | "outputs": [ 398 | { 399 | "data": { 400 | "text/latex": [ 401 | "$$\\left(x + 5\\right)^{2}$$" 402 | ], 403 | "text/plain": [ 404 | " 2\n", 405 | "(x + 5) " 406 | ] 407 | }, 408 | "execution_count": 14, 409 | "metadata": {}, 410 | "output_type": "execute_result" 411 | } 412 | ], 413 | "source": [ 414 | "completeSquare(poly2)" 415 | ] 416 | }, 417 | { 418 | "cell_type": "markdown", 419 | "metadata": {}, 420 | "source": [ 421 | "We will use this function below to help us derive the quadratic formula. " 422 | ] 423 | }, 424 | { 425 | "cell_type": "markdown", 426 | "metadata": {}, 427 | "source": [ 428 | "## Using the completion of the square to derive the quadratic formula" 429 | ] 430 | }, 431 | { 432 | "cell_type": "code", 433 | "execution_count": 15, 434 | "metadata": {}, 435 | "outputs": [ 436 | { 437 | "data": { 438 | "text/latex": [ 439 | "$$x^{2} + \\frac{1}{a} \\left(b x + c\\right)$$" 440 | ], 441 | "text/plain": [ 442 | " 2 b⋅x + c\n", 443 | "x + ───────\n", 444 | " a " 445 | ] 446 | }, 447 | "execution_count": 15, 448 | "metadata": {}, 449 | "output_type": "execute_result" 450 | } 451 | ], 452 | "source": [ 453 | "quadApart = (quadraticEqn/a).apart(a)\n", 454 | "quadApart" 455 | ] 456 | }, 457 | { 458 | "cell_type": "code", 459 | "execution_count": 16, 460 | "metadata": {}, 461 | "outputs": [ 462 | { 463 | "data": { 464 | "text/latex": [ 465 | "$$x^{2} + \\frac{b x}{a} + \\frac{c}{a}$$" 466 | ], 467 | "text/plain": [ 468 | " 2 b⋅x c\n", 469 | "x + ─── + ─\n", 470 | " a a" 471 | ] 472 | }, 473 | "execution_count": 16, 474 | "metadata": {}, 475 | "output_type": "execute_result" 476 | } 477 | ], 478 | "source": [ 479 | "expanded = quadApart.expand()\n", 480 | "lhs = expanded\n", 481 | "lhs" 482 | ] 483 | }, 484 | { 485 | "cell_type": "markdown", 486 | "metadata": {}, 487 | "source": [ 488 | "Subtract both sides by $\\large \\frac{c}{a}$" 489 | ] 490 | }, 491 | { 492 | "cell_type": "code", 493 | "execution_count": 17, 494 | "metadata": {}, 495 | "outputs": [ 496 | { 497 | "data": { 498 | "text/latex": [ 499 | "$$x^{2} + \\frac{b x}{a} = - \\frac{c}{a}$$" 500 | ], 501 | "text/plain": [ 502 | " 2 b⋅x -c \n", 503 | "x + ─── = ───\n", 504 | " a a " 505 | ] 506 | }, 507 | "execution_count": 17, 508 | "metadata": {}, 509 | "output_type": "execute_result" 510 | } 511 | ], 512 | "source": [ 513 | "lhs = lhs - c/a\n", 514 | "rhs = rhs - c/a\n", 515 | "sp.Eq(lhs,rhs)" 516 | ] 517 | }, 518 | { 519 | "cell_type": "markdown", 520 | "metadata": {}, 521 | "source": [ 522 | "We would now like to know what term we must add to both sides of the equation such that we can complete the square of the left hand side so we can isolate x. We know from previous results that it is:\n", 523 | "\n", 524 | "$$ \n", 525 | "\\begin{align}\n", 526 | "\\large \\left( \\frac{b}{2a} \\right)^2 = \\frac{b^2}{4a^2}\n", 527 | "\\end{align}\n", 528 | "$$\n", 529 | "\n", 530 | "But let's derive this using sympy. To do this we will need to solve the following equation for $z$. We can also solve for $k$ to get the completed square, but we will derive this later. \n", 531 | "\n", 532 | "$$\n", 533 | "\\large x^2+\\frac{b}{a}x+z = \\left( x+k \\right)^2\n", 534 | "$$\n", 535 | "\n", 536 | "Subtracting the right hand side from both sides will put this in a form SymPy favors for solving:\n", 537 | "\n", 538 | "$$\n", 539 | "\\large x^2+\\frac{b}{a}x+z - \\left( x+k \\right)^2 = 0\n", 540 | "$$" 541 | ] 542 | }, 543 | { 544 | "cell_type": "code", 545 | "execution_count": 18, 546 | "metadata": {}, 547 | "outputs": [ 548 | { 549 | "name": "stdout", 550 | "output_type": "stream", 551 | "text": [ 552 | "Z:\n" 553 | ] 554 | }, 555 | { 556 | "data": { 557 | "text/latex": [ 558 | "$$\\frac{b^{2}}{4 a^{2}}$$" 559 | ], 560 | "text/plain": [ 561 | " 2 \n", 562 | " b \n", 563 | "────\n", 564 | " 2\n", 565 | "4⋅a " 566 | ] 567 | }, 568 | "metadata": {}, 569 | "output_type": "display_data" 570 | }, 571 | { 572 | "name": "stdout", 573 | "output_type": "stream", 574 | "text": [ 575 | "K:\n" 576 | ] 577 | }, 578 | { 579 | "data": { 580 | "text/latex": [ 581 | "$$\\frac{b}{2 a}$$" 582 | ], 583 | "text/plain": [ 584 | " b \n", 585 | "───\n", 586 | "2⋅a" 587 | ] 588 | }, 589 | "metadata": {}, 590 | "output_type": "display_data" 591 | } 592 | ], 593 | "source": [ 594 | "solvingForZK = sp.solve(lhs+z -(x+k)**2,z,k)\n", 595 | "print(\"Z:\")\n", 596 | "# Sympy automatically applies the square. \n", 597 | "display(solvingForZK[0][0])\n", 598 | "print(\"K:\")\n", 599 | "display(solvingForZK[0][1])" 600 | ] 601 | }, 602 | { 603 | "cell_type": "markdown", 604 | "metadata": {}, 605 | "source": [ 606 | "Thus we see if we'd like to write the left hand side of our equation as a square, we need to add $ \\left( \\frac{b}{2a} \\right)^2$ to both sides of our equation." 607 | ] 608 | }, 609 | { 610 | "cell_type": "code", 611 | "execution_count": 19, 612 | "metadata": {}, 613 | "outputs": [ 614 | { 615 | "data": { 616 | "text/latex": [ 617 | "$$x^{2} + \\left(\\frac{b}{2 a}\\right)^{2} + \\frac{b x}{a} = \\left(\\frac{b}{2 a}\\right)^{2} - \\frac{c}{a}$$" 618 | ], 619 | "text/plain": [ 620 | " 2 2 \n", 621 | " 2 ⎛ b ⎞ b⋅x ⎛ b ⎞ c\n", 622 | "x + ⎜───⎟ + ─── = ⎜───⎟ - ─\n", 623 | " ⎝2⋅a⎠ a ⎝2⋅a⎠ a" 624 | ] 625 | }, 626 | "execution_count": 19, 627 | "metadata": {}, 628 | "output_type": "execute_result" 629 | } 630 | ], 631 | "source": [ 632 | "completingSquareTerm = sp.Pow((b/(2*a)),2,evaluate=False)\n", 633 | "nLhs = lhs + completingSquareTerm\n", 634 | "nRhs = rhs + completingSquareTerm\n", 635 | "sp.Eq(nLhs,nRhs)" 636 | ] 637 | }, 638 | { 639 | "cell_type": "markdown", 640 | "metadata": {}, 641 | "source": [ 642 | "We can use the function we defined above to complete the square of the left hand side." 643 | ] 644 | }, 645 | { 646 | "cell_type": "code", 647 | "execution_count": 20, 648 | "metadata": {}, 649 | "outputs": [ 650 | { 651 | "data": { 652 | "text/latex": [ 653 | "$$\\left(x + \\frac{b}{2 a}\\right)^{2}$$" 654 | ], 655 | "text/plain": [ 656 | " 2\n", 657 | "⎛ b ⎞ \n", 658 | "⎜x + ───⎟ \n", 659 | "⎝ 2⋅a⎠ " 660 | ] 661 | }, 662 | "execution_count": 20, 663 | "metadata": {}, 664 | "output_type": "execute_result" 665 | } 666 | ], 667 | "source": [ 668 | "nLhs = completeSquare(nLhs)\n", 669 | "nLhs" 670 | ] 671 | }, 672 | { 673 | "cell_type": "markdown", 674 | "metadata": {}, 675 | "source": [ 676 | "As we see, $\\frac{b}{2a}$ is the $k$ we computed before. We now have: " 677 | ] 678 | }, 679 | { 680 | "cell_type": "code", 681 | "execution_count": 21, 682 | "metadata": {}, 683 | "outputs": [ 684 | { 685 | "data": { 686 | "text/latex": [ 687 | "$$\\left(x + \\frac{b}{2 a}\\right)^{2} = \\left(\\frac{b}{2 a}\\right)^{2} - \\frac{c}{a}$$" 688 | ], 689 | "text/plain": [ 690 | " 2 2 \n", 691 | "⎛ b ⎞ ⎛ b ⎞ c\n", 692 | "⎜x + ───⎟ = ⎜───⎟ - ─\n", 693 | "⎝ 2⋅a⎠ ⎝2⋅a⎠ a" 694 | ] 695 | }, 696 | "execution_count": 21, 697 | "metadata": {}, 698 | "output_type": "execute_result" 699 | } 700 | ], 701 | "source": [ 702 | "sp.Eq(nLhs,nRhs)" 703 | ] 704 | }, 705 | { 706 | "cell_type": "markdown", 707 | "metadata": {}, 708 | "source": [ 709 | "We have finally found a form where we can isolate $x$! The remainder of the derivation is just a simple matter of rearanging terms. \n", 710 | "\n", 711 | "We need to get the right hand side into a form easier for sympy to work with later. This requires a little wizardry with polynomial manipulation module. We could have done this operation on one line, but I will show the steps here. \n", 712 | "\n", 713 | "We first factor. " 714 | ] 715 | }, 716 | { 717 | "cell_type": "code", 718 | "execution_count": 22, 719 | "metadata": {}, 720 | "outputs": [ 721 | { 722 | "data": { 723 | "text/latex": [ 724 | "$$- \\frac{1}{4 a^{2}} \\left(4 a c - b^{2}\\right)$$" 725 | ], 726 | "text/plain": [ 727 | " ⎛ 2⎞ \n", 728 | "-⎝4⋅a⋅c - b ⎠ \n", 729 | "──────────────\n", 730 | " 2 \n", 731 | " 4⋅a " 732 | ] 733 | }, 734 | "execution_count": 22, 735 | "metadata": {}, 736 | "output_type": "execute_result" 737 | } 738 | ], 739 | "source": [ 740 | "nRhs = nRhs.factor()\n", 741 | "nRhs" 742 | ] 743 | }, 744 | { 745 | "cell_type": "markdown", 746 | "metadata": {}, 747 | "source": [ 748 | "As you see this gives us a strange form. We can resolve this by expanding, then bringing the terms together again. " 749 | ] 750 | }, 751 | { 752 | "cell_type": "code", 753 | "execution_count": 23, 754 | "metadata": {}, 755 | "outputs": [ 756 | { 757 | "data": { 758 | "text/latex": [ 759 | "$$- \\frac{c}{a} + \\frac{b^{2}}{4 a^{2}}$$" 760 | ], 761 | "text/plain": [ 762 | " 2 \n", 763 | " c b \n", 764 | "- ─ + ────\n", 765 | " a 2\n", 766 | " 4⋅a " 767 | ] 768 | }, 769 | "execution_count": 23, 770 | "metadata": {}, 771 | "output_type": "execute_result" 772 | } 773 | ], 774 | "source": [ 775 | "nRhs = nRhs.expand()\n", 776 | "nRhs" 777 | ] 778 | }, 779 | { 780 | "cell_type": "code", 781 | "execution_count": 24, 782 | "metadata": {}, 783 | "outputs": [ 784 | { 785 | "data": { 786 | "text/latex": [ 787 | "$$\\frac{1}{4 a^{2}} \\left(- 4 a c + b^{2}\\right)$$" 788 | ], 789 | "text/plain": [ 790 | " 2\n", 791 | "-4⋅a⋅c + b \n", 792 | "───────────\n", 793 | " 2 \n", 794 | " 4⋅a " 795 | ] 796 | }, 797 | "execution_count": 24, 798 | "metadata": {}, 799 | "output_type": "execute_result" 800 | } 801 | ], 802 | "source": [ 803 | "nRhs = nRhs.together()\n", 804 | "nRhs" 805 | ] 806 | }, 807 | { 808 | "cell_type": "markdown", 809 | "metadata": {}, 810 | "source": [ 811 | "We now square both sides. \n", 812 | "\n", 813 | "Since we did not define our symbol type, sympy will not apply the square root because it does not always hold for all types of numbers that $\\sqrt{x^2} = x^2$; however we can force it to make this assumption with a utility function called powdnest." 814 | ] 815 | }, 816 | { 817 | "cell_type": "code", 818 | "execution_count": 25, 819 | "metadata": {}, 820 | "outputs": [ 821 | { 822 | "data": { 823 | "text/latex": [ 824 | "$$x + \\frac{b}{2 a} = \\frac{1}{2 a} \\sqrt{- 4 a c + b^{2}}$$" 825 | ], 826 | "text/plain": [ 827 | " _____________\n", 828 | " ╱ 2 \n", 829 | " b ╲╱ -4⋅a⋅c + b \n", 830 | "x + ─── = ────────────────\n", 831 | " 2⋅a 2⋅a " 832 | ] 833 | }, 834 | "execution_count": 25, 835 | "metadata": {}, 836 | "output_type": "execute_result" 837 | } 838 | ], 839 | "source": [ 840 | "nLhs = sp.powdenest(sp.sqrt(nLhs),force=True)\n", 841 | "nRhs = sp.powdenest(sp.sqrt(nRhs),force=True)\n", 842 | "sp.Eq(nLhs,nRhs)" 843 | ] 844 | }, 845 | { 846 | "cell_type": "markdown", 847 | "metadata": {}, 848 | "source": [ 849 | "Now we subtract $\\frac{b}{2a}$ from both sides. " 850 | ] 851 | }, 852 | { 853 | "cell_type": "code", 854 | "execution_count": 26, 855 | "metadata": {}, 856 | "outputs": [ 857 | { 858 | "data": { 859 | "text/latex": [ 860 | "$$x = - \\frac{b}{2 a} + \\frac{1}{2 a} \\sqrt{- 4 a c + b^{2}}$$" 861 | ], 862 | "text/plain": [ 863 | " _____________\n", 864 | " ╱ 2 \n", 865 | " b ╲╱ -4⋅a⋅c + b \n", 866 | "x = - ─── + ────────────────\n", 867 | " 2⋅a 2⋅a " 868 | ] 869 | }, 870 | "execution_count": 26, 871 | "metadata": {}, 872 | "output_type": "execute_result" 873 | } 874 | ], 875 | "source": [ 876 | "nLhs = nLhs - b/(2*a)\n", 877 | "nRhs = nRhs - b/(2*a)\n", 878 | "sp.Eq(nLhs,nRhs)" 879 | ] 880 | }, 881 | { 882 | "cell_type": "markdown", 883 | "metadata": {}, 884 | "source": [ 885 | "We simplify the right hand side and get our familiar quadratic equation. " 886 | ] 887 | }, 888 | { 889 | "cell_type": "code", 890 | "execution_count": 27, 891 | "metadata": {}, 892 | "outputs": [ 893 | { 894 | "data": { 895 | "text/latex": [ 896 | "$$x = \\frac{1}{2 a} \\left(- b + \\sqrt{- 4 a c + b^{2}}\\right)$$" 897 | ], 898 | "text/plain": [ 899 | " _____________\n", 900 | " ╱ 2 \n", 901 | " -b + ╲╱ -4⋅a⋅c + b \n", 902 | "x = ─────────────────────\n", 903 | " 2⋅a " 904 | ] 905 | }, 906 | "execution_count": 27, 907 | "metadata": {}, 908 | "output_type": "execute_result" 909 | } 910 | ], 911 | "source": [ 912 | "sp.Eq(x,nRhs.simplify())" 913 | ] 914 | }, 915 | { 916 | "cell_type": "markdown", 917 | "metadata": {}, 918 | "source": [ 919 | "As you can see, there is no $\\pm$ we are accustom to seeing. There is obviously no plus or minus operator inherent in mathematics. We would have introduced this when we took the square root of both sides because the square of $x$ could either be positive or negative. " 920 | ] 921 | } 922 | ], 923 | "metadata": { 924 | "kernelspec": { 925 | "display_name": "Python 3", 926 | "language": "python", 927 | "name": "python3" 928 | }, 929 | "language_info": { 930 | "codemirror_mode": { 931 | "name": "ipython", 932 | "version": 3 933 | }, 934 | "file_extension": ".py", 935 | "mimetype": "text/x-python", 936 | "name": "python", 937 | "nbconvert_exporter": "python", 938 | "pygments_lexer": "ipython3", 939 | "version": "3.5.3" 940 | } 941 | }, 942 | "nbformat": 4, 943 | "nbformat_minor": 2 944 | } 945 | -------------------------------------------------------------------------------- /Notebooks/Latex Essentials .ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "$$\n", 8 | "\\huge\\text{Latex Essentials}\\\\\n", 9 | "\\text{Andrew Ribeiro}\\\\\n", 10 | "\\text{2017}\n", 11 | "$$\n", 12 | "\n", 13 | "LaTex is a robust typesetting system which is used ubiquitously in scientific publications and mathematical documents. With a working knowledge of LaTex, you can make beautiful scientific documents and express your mathematical constructions easily. Jupyter notebooks allow us embed LaTex in markdown cells using the double dollar sign block ```$$ Your latex here $$```, so we can use LaTex to document our mathematical notebooks easily! This guide will show you the basics of using LaTex in Jupyter notebooks. \n", 14 | "\n", 15 | "** Double click any cell to reveal the LaTex code. **\n", 16 | "\n", 17 | "Resources and Tools\n", 18 | "* https://www.sharelatex.com/learn/\n", 19 | "* https://www.latex4technics.com/\n", 20 | "* https://math.meta.stackexchange.com/questions/5020/mathjax-basic-tutorial-and-quick-reference" 21 | ] 22 | }, 23 | { 24 | "cell_type": "markdown", 25 | "metadata": {}, 26 | "source": [ 27 | "## Basic Formatting and Embedding Latex" 28 | ] 29 | }, 30 | { 31 | "cell_type": "markdown", 32 | "metadata": {}, 33 | "source": [ 34 | "We can use single dollar signs to embedd latex in a markdown sentence. Let's say we want to talk about a function $f(x)$ in some text and refer to $x$ as well. This is how you'd go about it. " 35 | ] 36 | }, 37 | { 38 | "cell_type": "markdown", 39 | "metadata": {}, 40 | "source": [ 41 | "If you want to put a snippet of LaTex on its own dedicated line, use double dollar signs. \n", 42 | "\n", 43 | "$$ \\text{This is a dedicated LaTex line}$$\n", 44 | "\n", 45 | "Within some latex block, you can write multiple lines as such:\n", 46 | "\n", 47 | "$$ \\text{Line 1} \\\\ \\text{Line 2} $$\n", 48 | "\n", 49 | "You can adjust the size of each line as such: \n", 50 | "\n", 51 | "$$ \\huge \\text{This is a big line} \\\\ \n", 52 | "\\large \\text{This is a smaller, but still big } \\\\\n", 53 | "\\text{This is a normal size} \\\\\n", 54 | "\\small \\text{This is a smaller size} \n", 55 | "$$\n", 56 | "\n", 57 | "For more complex statements you must use the begin and end commands:\n", 58 | "\n", 59 | "```$$\\begin{align} your latex \\end{align}$$``` \n", 60 | "\n", 61 | "The following snippets use this. \n", 62 | "\n", 63 | "You can also give each line a tag if you'd like to refer to them later. \n", 64 | "\n", 65 | "$$ \n", 66 | "\\begin{align}\n", 67 | "\\text{Line 1} \\tag 1 \\\\\n", 68 | "\\text{Line 2} \\tag 2\n", 69 | "\\end{align}\n", 70 | "$$\n", 71 | "\n", 72 | "You can arrange things in columns and rows using the double ampersand. \n", 73 | "\n", 74 | "$$ \n", 75 | "\\begin{align}\n", 76 | "\\text{This} && \\text{Is} && \\text{sparta} \\\\\n", 77 | "\\text{no} && \\text{this} && \\text{is} && \\text{columns} \\\\\n", 78 | "&& \\text{and} && \\text{rows}\n", 79 | "\\end{align}\n", 80 | "$$\n", 81 | "\n", 82 | "You can keep equations aligned by using the single ampersand.: \n", 83 | "\n", 84 | "$$\n", 85 | "\\begin{align}\n", 86 | "f(x) &= \\frac{x(x+1)}{2} \\\\\n", 87 | "& = equiv \\\\\n", 88 | "& = equiv\n", 89 | "\\end{align}\n", 90 | "$$\n" 91 | ] 92 | }, 93 | { 94 | "cell_type": "markdown", 95 | "metadata": {}, 96 | "source": [ 97 | "## Essentials " 98 | ] 99 | }, 100 | { 101 | "cell_type": "markdown", 102 | "metadata": {}, 103 | "source": [ 104 | "#### Fractions\n", 105 | "$$\\frac{2}{\\frac{2}{3}+5}+\\frac{1}{2}$$\n", 106 | "\n", 107 | "#### Matrices\n", 108 | "$$\n", 109 | "\\large\\begin{bmatrix}\n", 110 | "n && n-1 && n-2 && \\dots && 0 \\\\\n", 111 | "0 && 1 && 2 && \\dots&& n\n", 112 | "\\end{bmatrix}^\\intercal\n", 113 | "$$\n", 114 | "\n", 115 | "#### Sums\n", 116 | "$$ \\large\\sum_{i=1}^{n}{i} = \\frac{n(n+1)}{2} $$\n", 117 | "\n", 118 | "#### Products\n", 119 | "$$\n", 120 | "\\large\\prod_{i=1}^{n}{i}\n", 121 | "$$\n", 122 | "\n", 123 | "#### Calculus\n", 124 | "$$\\int_{a}^{b} x^2 dx$$\n", 125 | "\n", 126 | "$$\\lim_{x\\to\\infty} f(x)$$\n" 127 | ] 128 | }, 129 | { 130 | "cell_type": "markdown", 131 | "metadata": {}, 132 | "source": [ 133 | "## Examples" 134 | ] 135 | }, 136 | { 137 | "cell_type": "markdown", 138 | "metadata": {}, 139 | "source": [ 140 | "$$\n", 141 | "\\large\\sum_{i=1}^{n}{i} = \\frac{n(n+1)}{2} = \n", 142 | "\\frac{\n", 143 | "\\begin{bmatrix}\n", 144 | "1\\\\\n", 145 | "1\n", 146 | "\\end{bmatrix}^\\intercal\n", 147 | "\\begin{bmatrix}\n", 148 | "n && n-1 && n-2 && \\dots && 0 \\\\\n", 149 | "0 && 1 && 2 && \\dots&& n\n", 150 | "\\end{bmatrix}\n", 151 | "\\begin{bmatrix}\n", 152 | "1&&1&&\\dots&& n\n", 153 | "\\end{bmatrix}^\\intercal}{2}\n", 154 | "$$\n" 155 | ] 156 | }, 157 | { 158 | "cell_type": "markdown", 159 | "metadata": {}, 160 | "source": [ 161 | "From Mike\n", 162 | "\n", 163 | "$$\n", 164 | "\\begin{align}\n", 165 | "\\frac{\\partial{f}}{\\partial{v_{0}}} & = \\frac{1}{2} \\frac{\\partial}{\\partial{v_{0}}} (( A_{00}v_{0} + A_{01}v_{1} - b_{0} )^{2}) + \\frac{1}{2} \\frac{\\partial}{\\partial{v_{0}}} (( A_{10}v_{0} + A_{11}v_{1} - b_{1} )^{2}) && \\tag 6\\\\\n", 166 | "& = \\frac{1}{2} 2 ( A_{00}v_{0} + A_{01}v_{1} - b_{0} ) \\frac{\\partial}{\\partial{v_{0}}} ( A_{00}v_{0} + A_{01}v_{1} - b_{0} ) + \\frac{1}{2} 2 ( A_{10}v_{0} + A_{11}v_{1} - b_{1} ) \\frac{\\partial}{\\partial{v_{0}}} ( A_{10}v_{0} + A_{11}v_{1} - b_{1} ) && \\tag 7\\\\ \n", 167 | "& = A_{00}( A_{00}v_{0} + A_{01}v_{1} - b_{0} ) + A_{10} (A_{10}v_{0} + A_{11}v_{1} - b_{1}) && \\tag 8\\\\\n", 168 | "\\end{align}\n", 169 | "$$" 170 | ] 171 | }, 172 | { 173 | "cell_type": "markdown", 174 | "metadata": {}, 175 | "source": [ 176 | "$$\n", 177 | "f(\\mathbf{x}) = \\frac{1}{2} \\lVert \\mathbf{A}\\mathbf{x} - \\mathbf{b} \\rVert^{2}_{2}\n", 178 | "$$" 179 | ] 180 | }, 181 | { 182 | "cell_type": "markdown", 183 | "metadata": {}, 184 | "source": [ 185 | "$$\n", 186 | "\\begin{align}\n", 187 | "\\nabla_{\\mathbf{v}}f(\\mathbf{v}) & = \\begin{bmatrix}A_{00}&A_{10}\\\\A_{01}&A_{11}\\\\ \\end{bmatrix} \\begin{bmatrix}A_{00}v_{0} + A_{01}v_{1} - b_{0}\\\\A_{10}v_{0} + A_{11}v_{1} - b_{1}\\\\ \\end{bmatrix} && \\tag{11}\\\\\n", 188 | "& = \\mathbf{A}^T(\\mathbf{A}\\mathbf{x} - \\mathbf{b}) && \\tag{qed}\\\\\n", 189 | "\\end{align}\n", 190 | "$$" 191 | ] 192 | }, 193 | { 194 | "cell_type": "markdown", 195 | "metadata": {}, 196 | "source": [ 197 | "$$\n", 198 | "\\begin{align}\n", 199 | "Pr(x,y)Pr(z|w,x,y)Pr(w|x,y)\n", 200 | "& = Pr(z|w,x,y)Pr(w|x,y)Pr(x,y) &&\\text{rearrange} \\tag 1\\\\\n", 201 | "& = Pr(z|w,x,y)Pr(w,x,y) &&\\text{prince 2.5} \\tag 2\\\\\n", 202 | "& = \\frac{Pr(w,x,y,z)}{Pr(w,x,y)}Pr(w,x,y) &&\\text{bayes rule on left factor of (2)} \\tag 3\\\\\n", 203 | "& = Pr(w,x,y,z) &&\\text{terms cancel} \\tag 4\\\\\n", 204 | "\\end{align}\n", 205 | "$$" 206 | ] 207 | } 208 | ], 209 | "metadata": { 210 | "kernelspec": { 211 | "display_name": "Python 3", 212 | "language": "python", 213 | "name": "python3" 214 | }, 215 | "language_info": { 216 | "codemirror_mode": { 217 | "name": "ipython", 218 | "version": 3 219 | }, 220 | "file_extension": ".py", 221 | "mimetype": "text/x-python", 222 | "name": "python", 223 | "nbconvert_exporter": "python", 224 | "pygments_lexer": "ipython3", 225 | "version": "3.5.3" 226 | } 227 | }, 228 | "nbformat": 4, 229 | "nbformat_minor": 2 230 | } 231 | -------------------------------------------------------------------------------- /Notebooks/Neural Boolean Connectives 1.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Neural Boolean Connectives 1\n", 8 | "** November 2017 **\n", 9 | "\n", 10 | "** Andrew Riberio @ [AndrewRib.com](http://www.andrewrib.com) **\n", 11 | "\n", 12 | "A notebook on understanding how a single layer neural network can represent the operation of common logical operators. \n", 13 | "\n", 14 | "** Note: ** This notebook contains interactive elements and certain latex snippets that will not render in github markdown. \n", 15 | "You must run this notebook on your local Jupyter notebook environment for interactive elements or render or if you wish to render just the latex by using the url of this repo with the [online NBViewer](https://nbviewer.jupyter.org/).\n", 16 | "\n", 17 | "## Libraries" 18 | ] 19 | }, 20 | { 21 | "cell_type": "code", 22 | "execution_count": 221, 23 | "metadata": { 24 | "collapsed": true 25 | }, 26 | "outputs": [], 27 | "source": [ 28 | "import numpy as np\n", 29 | "\n", 30 | "#Interactive Components\n", 31 | "from ipywidgets import interact" 32 | ] 33 | }, 34 | { 35 | "cell_type": "markdown", 36 | "metadata": {}, 37 | "source": [ 38 | "## Boolean Connectives\n", 39 | "\n", 40 | "Boolean connectives are logical operators that connect two boolean statements in a particular way. Amoung the fundamental few are and, or, implication, and xor. We can define the operations by truth tables as seen here:\n", 41 | "\n", 42 | "\n", 43 | "A | B | A ∧ B\n", 44 | "--- | --- | ---\n", 45 | "0 | 0 | 0\n", 46 | "0 | 1 | 0\n", 47 | "1 | 0 | 0 \n", 48 | "1 | 1 | 1\n", 49 | "\n", 50 | "A | B | A ∨ B\n", 51 | "--- | --- | ---\n", 52 | "0 | 0 | 0\n", 53 | "0 | 1 | 1\n", 54 | "1 | 0 | 1\n", 55 | "1 | 1 | 1\n", 56 | "\n", 57 | "A | B | A → B\n", 58 | "--- | --- | ---\n", 59 | "0 | 0 | 1\n", 60 | "0 | 1 | 1\n", 61 | "1 | 0 | 0\n", 62 | "1 | 1 | 1\n", 63 | "\n", 64 | "A | B | A ⊻ B\n", 65 | "--- | --- | ---\n", 66 | "0 | 0 | 0\n", 67 | "0 | 1 | 1\n", 68 | "1 | 0 | 1\n", 69 | "1 | 1 | 0" 70 | ] 71 | }, 72 | { 73 | "cell_type": "markdown", 74 | "metadata": {}, 75 | "source": [ 76 | "## Learning XOR\n", 77 | "Here we simply implement the results in section 6.1 of the book. We are not really learning XOR here. We are using the results of some learning process to show that given some matrix X, we can transform it into a vector which represents the XOR operation on each row of X. " 78 | ] 79 | }, 80 | { 81 | "cell_type": "code", 82 | "execution_count": 222, 83 | "metadata": { 84 | "collapsed": true 85 | }, 86 | "outputs": [], 87 | "source": [ 88 | "def relu(z):\n", 89 | " return np.maximum(0,z)\n", 90 | "\n", 91 | "def linLayer(x,w,b):\n", 92 | " return x.T*w + b\n", 93 | "\n", 94 | "# x: 2 by 4\n", 95 | "# W: 2 by 2\n", 96 | "# c: 2 by 1\n", 97 | "# w: 2 by 1\n", 98 | "# b: scalar\n", 99 | "\n", 100 | "def fullNetwork(x,W,c,w,b):\n", 101 | " h = relu( linLayer(W,x,c) )\n", 102 | " return linLayer(w, h, b)" 103 | ] 104 | }, 105 | { 106 | "cell_type": "code", 107 | "execution_count": 223, 108 | "metadata": {}, 109 | "outputs": [ 110 | { 111 | "name": "stdout", 112 | "output_type": "stream", 113 | "text": [ 114 | "x.T\n", 115 | "\n", 116 | "[[0 0]\n", 117 | " [0 1]\n", 118 | " [1 0]\n", 119 | " [1 1]]\n", 120 | "\n", 121 | "x.T*W\n", 122 | "\n", 123 | "[[0 0]\n", 124 | " [1 1]\n", 125 | " [1 1]\n", 126 | " [2 2]]\n" 127 | ] 128 | } 129 | ], 130 | "source": [ 131 | "W = np.matrix([[1,1],[1,1]])\n", 132 | "x = np.matrix([[0,0,1,1],[0,1,0,1]])\n", 133 | "c = np.matrix([0,-1]).T\n", 134 | "\n", 135 | "print(\"x.T\\n\")\n", 136 | "print(x.T)\n", 137 | "print(\"\\nx.T*W\\n\")\n", 138 | "print(x.T*W)" 139 | ] 140 | }, 141 | { 142 | "cell_type": "code", 143 | "execution_count": 224, 144 | "metadata": {}, 145 | "outputs": [ 146 | { 147 | "name": "stdout", 148 | "output_type": "stream", 149 | "text": [ 150 | "x.T*W + c\n", 151 | "\n", 152 | "[[ 0 -1]\n", 153 | " [ 1 0]\n", 154 | " [ 1 0]\n", 155 | " [ 2 1]]\n" 156 | ] 157 | } 158 | ], 159 | "source": [ 160 | "linLayH = linLayer(W,x,c)\n", 161 | "print(\"x.T*W + c\\n\")\n", 162 | "print(linLayH.T)" 163 | ] 164 | }, 165 | { 166 | "cell_type": "code", 167 | "execution_count": 225, 168 | "metadata": {}, 169 | "outputs": [ 170 | { 171 | "name": "stdout", 172 | "output_type": "stream", 173 | "text": [ 174 | "relu( x.T*W + c )\n", 175 | "\n", 176 | "[[0 0]\n", 177 | " [1 0]\n", 178 | " [1 0]\n", 179 | " [2 1]]\n" 180 | ] 181 | } 182 | ], 183 | "source": [ 184 | "h = relu( linLayH )\n", 185 | "print(\"relu( x.T*W + c )\\n\")\n", 186 | "print(h.T)" 187 | ] 188 | }, 189 | { 190 | "cell_type": "code", 191 | "execution_count": 226, 192 | "metadata": {}, 193 | "outputs": [ 194 | { 195 | "name": "stdout", 196 | "output_type": "stream", 197 | "text": [ 198 | "w.T*relu( x.T*W + c )+b\n", 199 | "\n", 200 | "[[0]\n", 201 | " [1]\n", 202 | " [1]\n", 203 | " [0]]\n" 204 | ] 205 | } 206 | ], 207 | "source": [ 208 | "w = np.matrix([1,-2]).T\n", 209 | "b = 0\n", 210 | "\n", 211 | "f = linLayer(w, h, b)\n", 212 | "\n", 213 | "print(\"w.T*relu( x.T*W + c )+b\\n\")\n", 214 | "print(f.T)" 215 | ] 216 | }, 217 | { 218 | "cell_type": "markdown", 219 | "metadata": {}, 220 | "source": [ 221 | "If learning XOR was a linear transformation there would be an M such that x.T * M = [ 0,1,1,0]. There is no such M, however. Below you will find an interactive section for exploring M space ( 1 by 2 matrix ). " 222 | ] 223 | }, 224 | { 225 | "cell_type": "code", 226 | "execution_count": 227, 227 | "metadata": {}, 228 | "outputs": [ 229 | { 230 | "name": "stdout", 231 | "output_type": "stream", 232 | "text": [ 233 | "M:\n", 234 | "[[ 1]\n", 235 | " [-2]]\n", 236 | "\n", 237 | "x:\n", 238 | "[[0 0]\n", 239 | " [0 1]\n", 240 | " [1 0]\n", 241 | " [1 1]]\n", 242 | "\n", 243 | "Desired x.T * M:\n", 244 | "[[0]\n", 245 | " [1]\n", 246 | " [1]\n", 247 | " [0]]\n", 248 | "\n", 249 | "x.T * M:\n", 250 | "[[ 0]\n", 251 | " [-2]\n", 252 | " [ 1]\n", 253 | " [-1]]\n" 254 | ] 255 | }, 256 | { 257 | "data": { 258 | "text/plain": [ 259 | "" 260 | ] 261 | }, 262 | "execution_count": 227, 263 | "metadata": {}, 264 | "output_type": "execute_result" 265 | } 266 | ], 267 | "source": [ 268 | "def exploreM(m1=1,m2=-2):\n", 269 | " M = np.matrix([m1,m2]).T\n", 270 | " print(\"M:\")\n", 271 | " print(M)\n", 272 | " print(\"\\nx:\")\n", 273 | " print(x.T)\n", 274 | " print(\"\\nDesired x.T * M:\")\n", 275 | " print(np.matrix([0,1,1,0]).T)\n", 276 | " print(\"\\nx.T * M:\")\n", 277 | " print(x.T*M)\n", 278 | "\n", 279 | "interact(exploreM,m1=[-10,10],m2=[-10,10])" 280 | ] 281 | }, 282 | { 283 | "cell_type": "markdown", 284 | "metadata": {}, 285 | "source": [ 286 | "## Representing AND\n", 287 | "We'd like to transform matrix X representing all possible values for the opperands of AND( ∧ ) into a vector representing the value of applying AND to those opperands:\n", 288 | "\n", 289 | "$$\n", 290 | "\\begin{bmatrix} \n", 291 | "0 & 0 \\\\\n", 292 | "0 & 1 \\\\\n", 293 | "1 & 0 \\\\\n", 294 | "1 & 1 \\\\\n", 295 | "\\end{bmatrix}\n", 296 | "\\overset{∧}{\\rightarrow}\n", 297 | "\\begin{bmatrix} \n", 298 | "0 \\\\\n", 299 | "0 \\\\\n", 300 | "0 \\\\\n", 301 | "1 \\\\\n", 302 | "\\end{bmatrix}\n", 303 | "$$\n", 304 | "\n", 305 | "$$\n", 306 | "w^\\intercal \n", 307 | "relu(\\begin{bmatrix} \n", 308 | "0 & 0 \\\\\n", 309 | "0 & 1 \\\\\n", 310 | "1 & 0 \\\\\n", 311 | "1 & 1 \\\\\n", 312 | "\\end{bmatrix}^\\intercal \n", 313 | "W + c)+b = \\begin{bmatrix} 0 & 0 & 0 & 1 \\end{bmatrix}^\\intercal\n", 314 | "$$\n", 315 | "\n", 316 | "As you can see, the task is to find matricies W,w,c, and scalar b which transforms our x matrix into the solution desired. Let's try to do this by hand to explore how the various variables behave in respect to transforming X.\n", 317 | "\n", 318 | "We first look at how a matrix of ones is transformed by different values of W. " 319 | ] 320 | }, 321 | { 322 | "cell_type": "code", 323 | "execution_count": 228, 324 | "metadata": { 325 | "collapsed": true 326 | }, 327 | "outputs": [], 328 | "source": [ 329 | "x = np.matrix([[1,1,1,1],[1,1,1,1]])" 330 | ] 331 | }, 332 | { 333 | "cell_type": "markdown", 334 | "metadata": {}, 335 | "source": [ 336 | "### W" 337 | ] 338 | }, 339 | { 340 | "cell_type": "code", 341 | "execution_count": 237, 342 | "metadata": {}, 343 | "outputs": [ 344 | { 345 | "name": "stdout", 346 | "output_type": "stream", 347 | "text": [ 348 | "[[0 0]\n", 349 | " [0 0]\n", 350 | " [0 0]\n", 351 | " [0 0]]\n" 352 | ] 353 | }, 354 | { 355 | "data": { 356 | "text/plain": [ 357 | "" 358 | ] 359 | }, 360 | "execution_count": 237, 361 | "metadata": {}, 362 | "output_type": "execute_result" 363 | } 364 | ], 365 | "source": [ 366 | "def wInt(w1,w2,w3,w4):\n", 367 | " W = np.matrix([[w1,w2],\n", 368 | " [w3, w4]])\n", 369 | " print(x.T*W)\n", 370 | " \n", 371 | "interact(wInt,w1=[-5,5],w2=[-5,5],w3=[-5,5],w4=[-5,5])" 372 | ] 373 | }, 374 | { 375 | "cell_type": "markdown", 376 | "metadata": {}, 377 | "source": [ 378 | "From experimenting above we can see that: \n", 379 | "\n", 380 | "* w1: Modifies the first column values. \n", 381 | "* w3: Modifies the first column values. \n", 382 | "\n", 383 | "\n", 384 | "* w2: Modifies the second column values\n", 385 | "* w4: Modifies the second column values" 386 | ] 387 | }, 388 | { 389 | "cell_type": "markdown", 390 | "metadata": {}, 391 | "source": [ 392 | "Now let's do the same experiment with the x matrix set to the desired x. " 393 | ] 394 | }, 395 | { 396 | "cell_type": "code", 397 | "execution_count": 246, 398 | "metadata": {}, 399 | "outputs": [ 400 | { 401 | "name": "stdout", 402 | "output_type": "stream", 403 | "text": [ 404 | "[[0 0]\n", 405 | " [0 0]\n", 406 | " [0 0]\n", 407 | " [0 0]]\n" 408 | ] 409 | }, 410 | { 411 | "data": { 412 | "text/plain": [ 413 | "" 414 | ] 415 | }, 416 | "execution_count": 246, 417 | "metadata": {}, 418 | "output_type": "execute_result" 419 | } 420 | ], 421 | "source": [ 422 | "x = np.matrix([[0,0,1,1],[0,1,0,1]])\n", 423 | "interact(wInt,w1=[-5,5],w2=[-5,5],w3=[-5,5],w4=[-5,5])" 424 | ] 425 | }, 426 | { 427 | "cell_type": "markdown", 428 | "metadata": {}, 429 | "source": [ 430 | "From experimenting above we can see that: \n", 431 | "\n", 432 | "* w1: Modifies the first column values in **the last two rows**. \n", 433 | "* w2: Modifies the second column values in **the last two rows**.\n", 434 | "\n", 435 | "\n", 436 | "* w3: Modifies the first column values in **the second and forth rows**.\n", 437 | "* w4: Modifies the second column values in **the second and forth rows**.\n", 438 | "\n", 439 | "By experimenting with the applet above, we can find a matrix that makes the last column distinctly larger than all others. Since we'd like to transform this matrix to [0,0,0,1], this is a property we need. I found a W matrix of the form [0,1],[0,1] that seems to do the job. " 440 | ] 441 | }, 442 | { 443 | "cell_type": "code", 444 | "execution_count": 241, 445 | "metadata": {}, 446 | "outputs": [ 447 | { 448 | "name": "stdout", 449 | "output_type": "stream", 450 | "text": [ 451 | "[[0 0]\n", 452 | " [0 1]\n", 453 | " [0 1]\n", 454 | " [0 2]]\n" 455 | ] 456 | } 457 | ], 458 | "source": [ 459 | "W = np.matrix([[0,1],[0,1]])\n", 460 | "print(x.T*W)" 461 | ] 462 | }, 463 | { 464 | "cell_type": "markdown", 465 | "metadata": {}, 466 | "source": [ 467 | "### c\n", 468 | "C acts as a sort of filter on W. We can use this to get rid of the 1's above and keep the last row a 1. " 469 | ] 470 | }, 471 | { 472 | "cell_type": "code", 473 | "execution_count": 242, 474 | "metadata": {}, 475 | "outputs": [ 476 | { 477 | "name": "stdout", 478 | "output_type": "stream", 479 | "text": [ 480 | "x.T*W + c\n", 481 | "\n", 482 | "[[ 0 -1]\n", 483 | " [ 0 0]\n", 484 | " [ 0 0]\n", 485 | " [ 0 1]]\n" 486 | ] 487 | } 488 | ], 489 | "source": [ 490 | "c = np.matrix([0,-1]).T\n", 491 | "linLayH = linLayer(W,x,c)\n", 492 | "print(\"x.T*W + c\\n\")\n", 493 | "print(linLayH.T)" 494 | ] 495 | }, 496 | { 497 | "cell_type": "markdown", 498 | "metadata": {}, 499 | "source": [ 500 | "Nice! We can see that when applying an element wise relu will get us what we'd like. " 501 | ] 502 | }, 503 | { 504 | "cell_type": "code", 505 | "execution_count": 247, 506 | "metadata": {}, 507 | "outputs": [ 508 | { 509 | "name": "stdout", 510 | "output_type": "stream", 511 | "text": [ 512 | "[[0 0]\n", 513 | " [0 0]\n", 514 | " [0 0]\n", 515 | " [0 1]]\n" 516 | ] 517 | } 518 | ], 519 | "source": [ 520 | "h = relu(linLayH)\n", 521 | "print(h.T)" 522 | ] 523 | }, 524 | { 525 | "cell_type": "markdown", 526 | "metadata": {}, 527 | "source": [ 528 | "Now we must figure out the other variable values to flatten this matrix to a vector. \n", 529 | "\n", 530 | "### w\n", 531 | "Given the results from above, we know we can ignore b and just find a w that turns the matrix above to a vector. We do that with a column vector [0,1].T" 532 | ] 533 | }, 534 | { 535 | "cell_type": "code", 536 | "execution_count": 256, 537 | "metadata": {}, 538 | "outputs": [ 539 | { 540 | "name": "stdout", 541 | "output_type": "stream", 542 | "text": [ 543 | "[[0]\n", 544 | " [0]\n", 545 | " [0]\n", 546 | " [1]]\n" 547 | ] 548 | } 549 | ], 550 | "source": [ 551 | "w = np.matrix([0,1]).T\n", 552 | "fn = linLayer(w, h, 0)\n", 553 | "print(fn.T)" 554 | ] 555 | }, 556 | { 557 | "cell_type": "markdown", 558 | "metadata": {}, 559 | "source": [ 560 | "The final result we've achieved by hand is: \n", 561 | "\n", 562 | "$$\n", 563 | "\\begin{bmatrix} \n", 564 | "0 & 0 \\\\\n", 565 | "0 & 1 \\\\\n", 566 | "1 & 0 \\\\\n", 567 | "1 & 1 \\\\\n", 568 | "\\end{bmatrix}\n", 569 | "\\overset{∧}{\\rightarrow}\n", 570 | "\\begin{bmatrix} \n", 571 | "0 \\\\\n", 572 | "0 \\\\\n", 573 | "0 \\\\\n", 574 | "1 \\\\\n", 575 | "\\end{bmatrix}\n", 576 | "$$\n", 577 | "\n", 578 | "$$\n", 579 | "\\begin{bmatrix} \n", 580 | "0 \\\\\n", 581 | "1 \\\\\n", 582 | "\\end{bmatrix}^\\intercal \n", 583 | "relu(\\begin{bmatrix} \n", 584 | "0 & 0 \\\\\n", 585 | "0 & 1 \\\\\n", 586 | "1 & 0 \\\\\n", 587 | "1 & 1 \\\\\n", 588 | "\\end{bmatrix}\n", 589 | "\\begin{bmatrix} \n", 590 | "0 & 1 \\\\\n", 591 | "0 & 1 \\\\\n", 592 | "\\end{bmatrix}\n", 593 | "+ \n", 594 | "\\begin{bmatrix} \n", 595 | "0 \\\\\n", 596 | "-1 \\\\\n", 597 | "\\end{bmatrix}\n", 598 | ")^\\intercal +0 = \\begin{bmatrix} 0 & 0 & 0 & 1 \\end{bmatrix}\n", 599 | "$$\n", 600 | "\n" 601 | ] 602 | }, 603 | { 604 | "cell_type": "markdown", 605 | "metadata": {}, 606 | "source": [ 607 | "In our next notebook **Neural Boolean Connectives 2** we will explore the case when we are given an additional target vector which contains a label for each row of X representing the result of the boolean operation to be learned. In the case of AND, we'd have: " 608 | ] 609 | }, 610 | { 611 | "cell_type": "markdown", 612 | "metadata": {}, 613 | "source": [ 614 | "$$\n", 615 | "x = \\begin{bmatrix} \n", 616 | "0 & 0 \\\\\n", 617 | "0 & 1 \\\\\n", 618 | "1 & 0 \\\\\n", 619 | "1 & 1 \\\\\n", 620 | "\\end{bmatrix}\n", 621 | ",y = \n", 622 | "\\begin{bmatrix} \n", 623 | "0 \\\\\n", 624 | "0 \\\\\n", 625 | "0 \\\\\n", 626 | "1 \\\\\n", 627 | "\\end{bmatrix}\n", 628 | "$$" 629 | ] 630 | } 631 | ], 632 | "metadata": { 633 | "kernelspec": { 634 | "display_name": "Python 3", 635 | "language": "python", 636 | "name": "python3" 637 | }, 638 | "language_info": { 639 | "codemirror_mode": { 640 | "name": "ipython", 641 | "version": 3 642 | }, 643 | "file_extension": ".py", 644 | "mimetype": "text/x-python", 645 | "name": "python", 646 | "nbconvert_exporter": "python", 647 | "pygments_lexer": "ipython3", 648 | "version": "3.5.3" 649 | }, 650 | "widgets": { 651 | "state": { 652 | "0ef801e8ffcf439bba523cdd1b606410": { 653 | "views": [ 654 | { 655 | "cell_index": 17 656 | } 657 | ] 658 | }, 659 | "bc1d071cb0be4492a28a2ac0b16c8662": { 660 | "views": [ 661 | { 662 | "cell_index": 10 663 | } 664 | ] 665 | }, 666 | "f52c353fa4df40ea80db6b3b3f641c38": { 667 | "views": [ 668 | { 669 | "cell_index": 14 670 | } 671 | ] 672 | } 673 | }, 674 | "version": "1.2.0" 675 | } 676 | }, 677 | "nbformat": 4, 678 | "nbformat_minor": 2 679 | } 680 | -------------------------------------------------------------------------------- /Notebooks/SymPy Basics.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "collapsed": true 7 | }, 8 | "source": [ 9 | "# SymPy Basics\n", 10 | "Adapted from: https://github.com/sympy/sympy/wiki/Quick-examples" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 1, 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "from sympy import *\n", 20 | "from IPython.display import display\n", 21 | "init_printing(order=\"lex\",use_latex='mathjax')" 22 | ] 23 | }, 24 | { 25 | "cell_type": "markdown", 26 | "metadata": {}, 27 | "source": [ 28 | "# Symbolic Expressions and Calculations" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": 2, 34 | "metadata": { 35 | "collapsed": true 36 | }, 37 | "outputs": [], 38 | "source": [ 39 | "x, y, z, t = symbols('x y z t')\n", 40 | "k, m, n = symbols('k m n', integer=True)\n", 41 | "#f, g, h = map(Function, 'fgh')" 42 | ] 43 | }, 44 | { 45 | "cell_type": "code", 46 | "execution_count": 3, 47 | "metadata": {}, 48 | "outputs": [ 49 | { 50 | "data": { 51 | "text/latex": [ 52 | "$$\\frac{3 \\pi}{2} + \\frac{e^{i x}}{x^{2} + y}$$" 53 | ], 54 | "text/plain": [ 55 | " ⅈ⋅x \n", 56 | "3⋅π ℯ \n", 57 | "─── + ──────\n", 58 | " 2 2 \n", 59 | " x + y" 60 | ] 61 | }, 62 | "execution_count": 3, 63 | "metadata": {}, 64 | "output_type": "execute_result" 65 | } 66 | ], 67 | "source": [ 68 | "eqn = Rational(3,2)*pi + exp(I*x) / (x**2 + y)\n", 69 | "eqn" 70 | ] 71 | }, 72 | { 73 | "cell_type": "code", 74 | "execution_count": 4, 75 | "metadata": {}, 76 | "outputs": [ 77 | { 78 | "data": { 79 | "text/latex": [ 80 | "$$\\frac{3 \\pi}{2} + \\frac{e^{3 i}}{y + 9}$$" 81 | ], 82 | "text/plain": [ 83 | " 3⋅ⅈ\n", 84 | "3⋅π ℯ \n", 85 | "─── + ─────\n", 86 | " 2 y + 9" 87 | ] 88 | }, 89 | "execution_count": 4, 90 | "metadata": {}, 91 | "output_type": "execute_result" 92 | } 93 | ], 94 | "source": [ 95 | "eqn.subs(x,3)" 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": 5, 101 | "metadata": {}, 102 | "outputs": [ 103 | { 104 | "data": { 105 | "text/latex": [ 106 | "$$-1.0$$" 107 | ], 108 | "text/plain": [ 109 | "-1.00000000000000" 110 | ] 111 | }, 112 | "execution_count": 5, 113 | "metadata": {}, 114 | "output_type": "execute_result" 115 | } 116 | ], 117 | "source": [ 118 | "exp(I*x).subs(x,pi).evalf()" 119 | ] 120 | }, 121 | { 122 | "cell_type": "code", 123 | "execution_count": 6, 124 | "metadata": {}, 125 | "outputs": [ 126 | { 127 | "data": { 128 | "text/latex": [ 129 | "$$\\left ( x, \\quad 2 y\\right )$$" 130 | ], 131 | "text/plain": [ 132 | "(x, 2⋅y)" 133 | ] 134 | }, 135 | "execution_count": 6, 136 | "metadata": {}, 137 | "output_type": "execute_result" 138 | } 139 | ], 140 | "source": [ 141 | "expr = x + 2*y\n", 142 | "expr.args" 143 | ] 144 | }, 145 | { 146 | "cell_type": "code", 147 | "execution_count": 7, 148 | "metadata": {}, 149 | "outputs": [ 150 | { 151 | "data": { 152 | "text/latex": [ 153 | "$$262537412640768743.99999999999925007259719818568888$$" 154 | ], 155 | "text/plain": [ 156 | "262537412640768743.99999999999925007259719818568888" 157 | ] 158 | }, 159 | "execution_count": 7, 160 | "metadata": {}, 161 | "output_type": "execute_result" 162 | } 163 | ], 164 | "source": [ 165 | "exp(pi * sqrt(163)).evalf(50)" 166 | ] 167 | }, 168 | { 169 | "cell_type": "code", 170 | "execution_count": 8, 171 | "metadata": {}, 172 | "outputs": [ 173 | { 174 | "data": { 175 | "text/latex": [ 176 | "$$3.141592653589793238462643383279502884197169399375105820974944592307816406286208998628034825342117068$$" 177 | ], 178 | "text/plain": [ 179 | "3.1415926535897932384626433832795028841971693993751058209749445923078164062862\n", 180 | "08998628034825342117068" 181 | ] 182 | }, 183 | "execution_count": 8, 184 | "metadata": {}, 185 | "output_type": "execute_result" 186 | } 187 | ], 188 | "source": [ 189 | "N(pi,100)" 190 | ] 191 | }, 192 | { 193 | "cell_type": "code", 194 | "execution_count": 9, 195 | "metadata": {}, 196 | "outputs": [ 197 | { 198 | "data": { 199 | "text/plain": [ 200 | "'\\\\frac{3 \\\\pi}{2} + \\\\frac{e^{i x}}{x^{2} + y}'" 201 | ] 202 | }, 203 | "execution_count": 9, 204 | "metadata": {}, 205 | "output_type": "execute_result" 206 | } 207 | ], 208 | "source": [ 209 | "latex(S(eqn,evaluate=False))" 210 | ] 211 | }, 212 | { 213 | "cell_type": "markdown", 214 | "metadata": {}, 215 | "source": [ 216 | "$$ \\frac{3 \\pi}{2} + \\frac{e^{i x}}{x^{2} + y}$$" 217 | ] 218 | }, 219 | { 220 | "cell_type": "markdown", 221 | "metadata": {}, 222 | "source": [ 223 | "## Algebra" 224 | ] 225 | }, 226 | { 227 | "cell_type": "code", 228 | "execution_count": 10, 229 | "metadata": {}, 230 | "outputs": [ 231 | { 232 | "data": { 233 | "text/latex": [ 234 | "$$x^{3} + 2 x^{2} y + x^{2} + x y^{2} + 2 x y + y^{2}$$" 235 | ], 236 | "text/plain": [ 237 | " 3 2 2 2 2\n", 238 | "x + 2⋅x ⋅y + x + x⋅y + 2⋅x⋅y + y " 239 | ] 240 | }, 241 | "execution_count": 10, 242 | "metadata": {}, 243 | "output_type": "execute_result" 244 | } 245 | ], 246 | "source": [ 247 | "((x+y)**2 * (x+1)).expand()" 248 | ] 249 | }, 250 | { 251 | "cell_type": "code", 252 | "execution_count": 11, 253 | "metadata": {}, 254 | "outputs": [ 255 | { 256 | "data": { 257 | "text/latex": [ 258 | "$$\\frac{1}{x} \\left(x \\sin{\\left (x \\right )} - 1\\right) + \\frac{1}{x}$$" 259 | ], 260 | "text/plain": [ 261 | "x⋅sin(x) - 1 1\n", 262 | "──────────── + ─\n", 263 | " x x" 264 | ] 265 | }, 266 | "execution_count": 11, 267 | "metadata": {}, 268 | "output_type": "execute_result" 269 | } 270 | ], 271 | "source": [ 272 | "a = 1/x + (x*sin(x) - 1)/x\n", 273 | "a" 274 | ] 275 | }, 276 | { 277 | "cell_type": "code", 278 | "execution_count": 12, 279 | "metadata": {}, 280 | "outputs": [ 281 | { 282 | "data": { 283 | "text/latex": [ 284 | "$$\\sin{\\left (x \\right )}$$" 285 | ], 286 | "text/plain": [ 287 | "sin(x)" 288 | ] 289 | }, 290 | "execution_count": 12, 291 | "metadata": {}, 292 | "output_type": "execute_result" 293 | } 294 | ], 295 | "source": [ 296 | "a.simplify()" 297 | ] 298 | }, 299 | { 300 | "cell_type": "code", 301 | "execution_count": 13, 302 | "metadata": {}, 303 | "outputs": [ 304 | { 305 | "data": { 306 | "text/latex": [ 307 | "$$x^{3} + 2 x^{2} + 4 x + 8 = 0$$" 308 | ], 309 | "text/plain": [ 310 | " 3 2 \n", 311 | "x + 2⋅x + 4⋅x + 8 = 0" 312 | ] 313 | }, 314 | "execution_count": 13, 315 | "metadata": {}, 316 | "output_type": "execute_result" 317 | } 318 | ], 319 | "source": [ 320 | "eqn = Eq(x**3 + 2*x**2 + 4*x + 8, 0)\n", 321 | "eqn" 322 | ] 323 | }, 324 | { 325 | "cell_type": "code", 326 | "execution_count": 14, 327 | "metadata": {}, 328 | "outputs": [ 329 | { 330 | "data": { 331 | "text/latex": [ 332 | "$$\\left [ -2, \\quad - 2 i, \\quad 2 i\\right ]$$" 333 | ], 334 | "text/plain": [ 335 | "[-2, -2⋅ⅈ, 2⋅ⅈ]" 336 | ] 337 | }, 338 | "execution_count": 14, 339 | "metadata": {}, 340 | "output_type": "execute_result" 341 | } 342 | ], 343 | "source": [ 344 | "solve(eqn,x)" 345 | ] 346 | }, 347 | { 348 | "cell_type": "code", 349 | "execution_count": 15, 350 | "metadata": {}, 351 | "outputs": [ 352 | { 353 | "data": { 354 | "text/latex": [ 355 | "$$x + 5 y = 2$$" 356 | ], 357 | "text/plain": [ 358 | "x + 5⋅y = 2" 359 | ] 360 | }, 361 | "metadata": {}, 362 | "output_type": "display_data" 363 | }, 364 | { 365 | "data": { 366 | "text/latex": [ 367 | "$$- 3 x + 6 y = 15$$" 368 | ], 369 | "text/plain": [ 370 | "-3⋅x + 6⋅y = 15" 371 | ] 372 | }, 373 | "metadata": {}, 374 | "output_type": "display_data" 375 | }, 376 | { 377 | "data": { 378 | "text/latex": [ 379 | "$$\\left \\{ x : -3, \\quad y : 1\\right \\}$$" 380 | ], 381 | "text/plain": [ 382 | "{x: -3, y: 1}" 383 | ] 384 | }, 385 | "execution_count": 15, 386 | "metadata": {}, 387 | "output_type": "execute_result" 388 | } 389 | ], 390 | "source": [ 391 | "eq1 = Eq(x + 5*y, 2)\n", 392 | "eq2 = Eq(-3*x + 6*y, 15)\n", 393 | "display(eq1)\n", 394 | "display(eq2)\n", 395 | "sln = solve([eq1, eq2], [x, y])\n", 396 | "sln" 397 | ] 398 | }, 399 | { 400 | "cell_type": "code", 401 | "execution_count": 16, 402 | "metadata": {}, 403 | "outputs": [ 404 | { 405 | "data": { 406 | "text/latex": [ 407 | "$$\\mathrm{True}$$" 408 | ], 409 | "text/plain": [ 410 | "True" 411 | ] 412 | }, 413 | "metadata": {}, 414 | "output_type": "display_data" 415 | }, 416 | { 417 | "data": { 418 | "text/latex": [ 419 | "$$\\mathrm{True}$$" 420 | ], 421 | "text/plain": [ 422 | "True" 423 | ] 424 | }, 425 | "metadata": {}, 426 | "output_type": "display_data" 427 | } 428 | ], 429 | "source": [ 430 | "display(eq1.subs(sln))\n", 431 | "display(eq2.subs(sln))" 432 | ] 433 | }, 434 | { 435 | "cell_type": "markdown", 436 | "metadata": {}, 437 | "source": [ 438 | "## Recurrence Relations" 439 | ] 440 | }, 441 | { 442 | "cell_type": "markdown", 443 | "metadata": {}, 444 | "source": [ 445 | "$$\n", 446 | "\\large\\begin{align}\n", 447 | "y_0 & =1 \\\\\n", 448 | "y_1 & =4 \\\\\n", 449 | "y_n & =y_n-2y_{n-1}+5y_{n-2} \n", 450 | "\\end{align}\n", 451 | "$$" 452 | ] 453 | }, 454 | { 455 | "cell_type": "code", 456 | "execution_count": 17, 457 | "metadata": {}, 458 | "outputs": [ 459 | { 460 | "data": { 461 | "text/latex": [ 462 | "$$y{\\left (n \\right )} - 5 y{\\left (n - 2 \\right )} - 2 y{\\left (n - 1 \\right )}$$" 463 | ], 464 | "text/plain": [ 465 | "y(n) - 5⋅y(n - 2) - 2⋅y(n - 1)" 466 | ] 467 | }, 468 | "execution_count": 17, 469 | "metadata": {}, 470 | "output_type": "execute_result" 471 | } 472 | ], 473 | "source": [ 474 | "f=y(n)-2*y(n-1)-5*y(n-2)\n", 475 | "f" 476 | ] 477 | }, 478 | { 479 | "cell_type": "code", 480 | "execution_count": 18, 481 | "metadata": {}, 482 | "outputs": [ 483 | { 484 | "data": { 485 | "text/latex": [ 486 | "$$\\left(\\frac{1}{2} + \\frac{\\sqrt{6}}{4}\\right) \\left(1 + \\sqrt{6}\\right)^{n} + \\left(- \\sqrt{6} + 1\\right)^{n} \\left(- \\frac{\\sqrt{6}}{4} + \\frac{1}{2}\\right)$$" 487 | ], 488 | "text/plain": [ 489 | "⎛1 √6⎞ n n ⎛ √6 1⎞\n", 490 | "⎜─ + ──⎟⋅(1 + √6) + (-√6 + 1) ⋅⎜- ── + ─⎟\n", 491 | "⎝2 4 ⎠ ⎝ 4 2⎠" 492 | ] 493 | }, 494 | "execution_count": 18, 495 | "metadata": {}, 496 | "output_type": "execute_result" 497 | } 498 | ], 499 | "source": [ 500 | "sln = rsolve(f,y(n),[1,4])\n", 501 | "sln" 502 | ] 503 | }, 504 | { 505 | "cell_type": "code", 506 | "execution_count": 19, 507 | "metadata": {}, 508 | "outputs": [ 509 | { 510 | "name": "stdout", 511 | "output_type": "stream", 512 | "text": [ 513 | "1\n", 514 | "4\n", 515 | "13\n", 516 | "46\n", 517 | "157\n", 518 | "544\n", 519 | "1873\n", 520 | "6466\n", 521 | "22297\n", 522 | "76924\n" 523 | ] 524 | } 525 | ], 526 | "source": [ 527 | "for i in range(0,10):\n", 528 | " print(sln.subs(n,i).simplify())" 529 | ] 530 | }, 531 | { 532 | "cell_type": "markdown", 533 | "metadata": {}, 534 | "source": [ 535 | "## Sums and Products" 536 | ] 537 | }, 538 | { 539 | "cell_type": "code", 540 | "execution_count": 87, 541 | "metadata": {}, 542 | "outputs": [ 543 | { 544 | "data": { 545 | "text/latex": [ 546 | "$$\\sum_{n=a}^{b} \\left(2^{n} + 6 n^{2}\\right)$$" 547 | ], 548 | "text/plain": [ 549 | " b \n", 550 | " ___ \n", 551 | " ╲ \n", 552 | " ╲ ⎛ n 2⎞\n", 553 | " ╱ ⎝2 + 6⋅n ⎠\n", 554 | " ╱ \n", 555 | " ‾‾‾ \n", 556 | "n = a " 557 | ] 558 | }, 559 | "execution_count": 87, 560 | "metadata": {}, 561 | "output_type": "execute_result" 562 | } 563 | ], 564 | "source": [ 565 | "a, b = symbols('a b')\n", 566 | "s = Sum(6*n**2 + 2**n, (n, a, b))\n", 567 | "s" 568 | ] 569 | }, 570 | { 571 | "cell_type": "code", 572 | "execution_count": 90, 573 | "metadata": {}, 574 | "outputs": [ 575 | { 576 | "data": { 577 | "text/latex": [ 578 | "$$- 2^{a} + 2^{b + 1} - 2 a^{3} + 3 a^{2} - a + 2 b^{3} + 3 b^{2} + b$$" 579 | ], 580 | "text/plain": [ 581 | " a b + 1 3 2 3 2 \n", 582 | "- 2 + 2 - 2⋅a + 3⋅a - a + 2⋅b + 3⋅b + b" 583 | ] 584 | }, 585 | "execution_count": 90, 586 | "metadata": {}, 587 | "output_type": "execute_result" 588 | } 589 | ], 590 | "source": [ 591 | "s.doit()" 592 | ] 593 | }, 594 | { 595 | "cell_type": "code", 596 | "execution_count": 92, 597 | "metadata": {}, 598 | "outputs": [ 599 | { 600 | "data": { 601 | "text/latex": [ 602 | "$$98$$" 603 | ], 604 | "text/plain": [ 605 | "98" 606 | ] 607 | }, 608 | "execution_count": 92, 609 | "metadata": {}, 610 | "output_type": "execute_result" 611 | } 612 | ], 613 | "source": [ 614 | "s.subs({b:3,a:1}).doit()" 615 | ] 616 | }, 617 | { 618 | "cell_type": "code", 619 | "execution_count": 183, 620 | "metadata": {}, 621 | "outputs": [ 622 | { 623 | "data": { 624 | "text/latex": [ 625 | "$$\\frac{n}{2} \\left(n + 1\\right)$$" 626 | ], 627 | "text/plain": [ 628 | "n⋅(n + 1)\n", 629 | "─────────\n", 630 | " 2 " 631 | ] 632 | }, 633 | "execution_count": 183, 634 | "metadata": {}, 635 | "output_type": "execute_result" 636 | } 637 | ], 638 | "source": [ 639 | "Sum(b, (b, 1, n)).doit().factor()" 640 | ] 641 | }, 642 | { 643 | "cell_type": "code", 644 | "execution_count": 173, 645 | "metadata": {}, 646 | "outputs": [ 647 | { 648 | "data": { 649 | "text/latex": [ 650 | "$$\\frac{b^{3}}{6} + \\frac{b^{2}}{2} + \\frac{b}{3}$$" 651 | ], 652 | "text/plain": [ 653 | " 3 2 \n", 654 | "b b b\n", 655 | "── + ── + ─\n", 656 | "6 2 3" 657 | ] 658 | }, 659 | "execution_count": 173, 660 | "metadata": {}, 661 | "output_type": "execute_result" 662 | } 663 | ], 664 | "source": [ 665 | "Sum(n*(n+1)/2,(n, 1, b)).doit()" 666 | ] 667 | }, 668 | { 669 | "cell_type": "code", 670 | "execution_count": 172, 671 | "metadata": {}, 672 | "outputs": [ 673 | { 674 | "name": "stdout", 675 | "output_type": "stream", 676 | "text": [ 677 | "1\n", 678 | "4\n", 679 | "10\n", 680 | "20\n", 681 | "35\n", 682 | "56\n", 683 | "84\n", 684 | "120\n", 685 | "165\n" 686 | ] 687 | } 688 | ], 689 | "source": [ 690 | "for i in range(1,10):\n", 691 | " print(Sum(n*(n+1)/2, (n, 1, b)).doit().subs(b,i))" 692 | ] 693 | }, 694 | { 695 | "cell_type": "code", 696 | "execution_count": 95, 697 | "metadata": {}, 698 | "outputs": [ 699 | { 700 | "data": { 701 | "text/latex": [ 702 | "$$\\frac{b^{2}}{2} + \\frac{b}{2}$$" 703 | ], 704 | "text/plain": [ 705 | " 2 \n", 706 | "b b\n", 707 | "── + ─\n", 708 | "2 2" 709 | ] 710 | }, 711 | "execution_count": 95, 712 | "metadata": {}, 713 | "output_type": "execute_result" 714 | } 715 | ], 716 | "source": [ 717 | "Sum(n, (n, a, b)).subs(a,1).doit()" 718 | ] 719 | }, 720 | { 721 | "cell_type": "code", 722 | "execution_count": 176, 723 | "metadata": {}, 724 | "outputs": [ 725 | { 726 | "data": { 727 | "text/latex": [ 728 | "$$\\frac{x}{6} \\left(x + 1\\right) \\left(x + 2\\right)$$" 729 | ], 730 | "text/plain": [ 731 | "x⋅(x + 1)⋅(x + 2)\n", 732 | "─────────────────\n", 733 | " 6 " 734 | ] 735 | }, 736 | "execution_count": 176, 737 | "metadata": {}, 738 | "output_type": "execute_result" 739 | } 740 | ], 741 | "source": [ 742 | "(x**3/6 + x**2/2 +x/3).factor()" 743 | ] 744 | }, 745 | { 746 | "cell_type": "code", 747 | "execution_count": 98, 748 | "metadata": {}, 749 | "outputs": [ 750 | { 751 | "data": { 752 | "text/latex": [ 753 | "$${2}^{\\left(b\\right)} b!$$" 754 | ], 755 | "text/plain": [ 756 | "RisingFactorial(2, b)⋅b!" 757 | ] 758 | }, 759 | "execution_count": 98, 760 | "metadata": {}, 761 | "output_type": "execute_result" 762 | } 763 | ], 764 | "source": [ 765 | "product(n*(n+1), (n, 1, b))" 766 | ] 767 | }, 768 | { 769 | "cell_type": "code", 770 | "execution_count": 103, 771 | "metadata": {}, 772 | "outputs": [], 773 | "source": [ 774 | "f=Function('f')\n", 775 | "ex=Eq(f(1/x)-3*f(x),x)" 776 | ] 777 | }, 778 | { 779 | "cell_type": "markdown", 780 | "metadata": {}, 781 | "source": [ 782 | "## Calculus" 783 | ] 784 | }, 785 | { 786 | "cell_type": "markdown", 787 | "metadata": {}, 788 | "source": [ 789 | "$$\\lim_{x\\to 0} \\frac{\\sin x - x}{x^3} = -\\frac{1}{6}$$" 790 | ] 791 | }, 792 | { 793 | "cell_type": "code", 794 | "execution_count": 111, 795 | "metadata": {}, 796 | "outputs": [ 797 | { 798 | "data": { 799 | "text/latex": [ 800 | "$$- \\frac{1}{6}$$" 801 | ], 802 | "text/plain": [ 803 | "-1/6" 804 | ] 805 | }, 806 | "execution_count": 111, 807 | "metadata": {}, 808 | "output_type": "execute_result" 809 | } 810 | ], 811 | "source": [ 812 | "((sin(x)-x)/x**3).limit(x,0)" 813 | ] 814 | }, 815 | { 816 | "cell_type": "code", 817 | "execution_count": 109, 818 | "metadata": {}, 819 | "outputs": [ 820 | { 821 | "data": { 822 | "text/latex": [ 823 | "$$15 x^{2} + 2 x$$" 824 | ], 825 | "text/plain": [ 826 | " 2 \n", 827 | "15⋅x + 2⋅x" 828 | ] 829 | }, 830 | "execution_count": 109, 831 | "metadata": {}, 832 | "output_type": "execute_result" 833 | } 834 | ], 835 | "source": [ 836 | "(x**2+5*x**3).diff(x)" 837 | ] 838 | }, 839 | { 840 | "cell_type": "code", 841 | "execution_count": 120, 842 | "metadata": {}, 843 | "outputs": [ 844 | { 845 | "data": { 846 | "text/latex": [ 847 | "$$-\\infty$$" 848 | ], 849 | "text/plain": [ 850 | "-∞" 851 | ] 852 | }, 853 | "execution_count": 120, 854 | "metadata": {}, 855 | "output_type": "execute_result" 856 | } 857 | ], 858 | "source": [ 859 | "(-x).limit(x,oo)" 860 | ] 861 | }, 862 | { 863 | "cell_type": "markdown", 864 | "metadata": {}, 865 | "source": [ 866 | "$$\\int x^2 \\cos x \\ dx$$" 867 | ] 868 | }, 869 | { 870 | "cell_type": "code", 871 | "execution_count": 121, 872 | "metadata": {}, 873 | "outputs": [ 874 | { 875 | "data": { 876 | "text/latex": [ 877 | "$$x^{2} \\sin{\\left (x \\right )} + 2 x \\cos{\\left (x \\right )} - 2 \\sin{\\left (x \\right )}$$" 878 | ], 879 | "text/plain": [ 880 | " 2 \n", 881 | "x ⋅sin(x) + 2⋅x⋅cos(x) - 2⋅sin(x)" 882 | ] 883 | }, 884 | "execution_count": 121, 885 | "metadata": {}, 886 | "output_type": "execute_result" 887 | } 888 | ], 889 | "source": [ 890 | "(x**2 * cos(x)).integrate(x)" 891 | ] 892 | }, 893 | { 894 | "cell_type": "markdown", 895 | "metadata": {}, 896 | "source": [ 897 | "$$\\int_0^{\\pi/2} x^2 \\cos x \\ dx$$" 898 | ] 899 | }, 900 | { 901 | "cell_type": "code", 902 | "execution_count": 136, 903 | "metadata": {}, 904 | "outputs": [ 905 | { 906 | "data": { 907 | "text/latex": [ 908 | "$$-2 + \\frac{\\pi^{2}}{4}$$" 909 | ], 910 | "text/plain": [ 911 | " 2\n", 912 | " π \n", 913 | "-2 + ──\n", 914 | " 4 " 915 | ] 916 | }, 917 | "execution_count": 136, 918 | "metadata": {}, 919 | "output_type": "execute_result" 920 | } 921 | ], 922 | "source": [ 923 | "integrate(x**2 * cos(x), (x, 0, pi/2))\n", 924 | "##(x**2 * cos(x)).integrate(x, 0, pi/2) does not work. " 925 | ] 926 | }, 927 | { 928 | "cell_type": "markdown", 929 | "metadata": {}, 930 | "source": [ 931 | "$$ \\large f''(x) + 9 f(x) = 1 $$" 932 | ] 933 | }, 934 | { 935 | "cell_type": "code", 936 | "execution_count": 139, 937 | "metadata": {}, 938 | "outputs": [ 939 | { 940 | "data": { 941 | "text/latex": [ 942 | "$$f{\\left (x \\right )} = C_{1} \\sin{\\left (3 x \\right )} + C_{2} \\cos{\\left (3 x \\right )} + \\frac{1}{9}$$" 943 | ], 944 | "text/plain": [ 945 | "f(x) = C₁⋅sin(3⋅x) + C₂⋅cos(3⋅x) + 1/9" 946 | ] 947 | }, 948 | "execution_count": 139, 949 | "metadata": {}, 950 | "output_type": "execute_result" 951 | } 952 | ], 953 | "source": [ 954 | "fn = dsolve(Eq(Derivative(f(x),x,x) + 9*f(x), 1), f(x))\n", 955 | "fn" 956 | ] 957 | }, 958 | { 959 | "cell_type": "code", 960 | "execution_count": 153, 961 | "metadata": {}, 962 | "outputs": [ 963 | { 964 | "data": { 965 | "text/latex": [ 966 | "$$1.0$$" 967 | ], 968 | "text/plain": [ 969 | "1.00000000000000" 970 | ] 971 | }, 972 | "execution_count": 153, 973 | "metadata": {}, 974 | "output_type": "execute_result" 975 | } 976 | ], 977 | "source": [ 978 | "fla = 3*sin(3*x)+3*cos(3*x)+1/9\n", 979 | "fla.diff(x).diff(x).subs(x,3)+9*fla.subs(x,3)" 980 | ] 981 | }, 982 | { 983 | "cell_type": "markdown", 984 | "metadata": {}, 985 | "source": [ 986 | "## Linear Algebra " 987 | ] 988 | } 989 | ], 990 | "metadata": { 991 | "kernelspec": { 992 | "display_name": "Python 3", 993 | "language": "python", 994 | "name": "python3" 995 | }, 996 | "language_info": { 997 | "codemirror_mode": { 998 | "name": "ipython", 999 | "version": 3 1000 | }, 1001 | "file_extension": ".py", 1002 | "mimetype": "text/x-python", 1003 | "name": "python", 1004 | "nbconvert_exporter": "python", 1005 | "pygments_lexer": "ipython3", 1006 | "version": "3.5.3" 1007 | } 1008 | }, 1009 | "nbformat": 4, 1010 | "nbformat_minor": 2 1011 | } 1012 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Workshop: Scientific Computing in Python 2 | Python is one of the most popular open source languages in history. There are more than 100,000 open source packages published on the official package index PiPy alone and many more projects in general. Under the banner of SciPy, there is a mature ecosystem of python packages for doing far reaching scientific analysis in python. In this workshop we cover a good number of the core packages and show you the door for further study. This workshop is accompanied by several interactive Jupyter Notebooks which illustrate different aspects of the SciPy ecosystem. 3 | 4 | ## Workshop Notebooks 5 | * **Beginner MNIST** - A TensorFlow tutorial on how to make a simple neural network for classifying MNIST digits. 6 | * **Exercise - Deriving the Quadratic Formula with SymPy** - A tutorial on using SymPy to derive the quadratic formula. 7 | * **Exploring MNIST Manifolds** - Exploring MNIST with Scikit-Learn by applying PCA and K-Means. Also has interactive components. 8 | * **Latex Essentials**- Shows you the basics of using LaTex for typesetting and mathematical notes. 9 | * **Linear Regression** - The Solution Space - Interactive components allow you to explore linear regression. Also shows how to do 3D plotting in matplotlib. 10 | * **Linear Regression** - Gradient Descent - A tutorial on how gradient descent is used to find an optimal linear regression. 11 | * **Linear Vs. Non-Linear Functions** - Shows how to plot in 2D and basic 3D. Also gives you an intuition of the difference between linear and non-linear functions. 12 | * **Matrix as a Function & Plotting Vectors** - Shows how to plot vectors with Matplotlib and shows how a matrix can be thought of as a linear transformation. Uses a lot of Matplotlib. 13 | * **MNIST Probability Experiments 1** - Shows different experiments of computing various statistics on MNIST. 14 | * **Neural Boolean Connectives 1** - Shows a very simple single hidden layer neural network and how it can represent the XOR function. Also shows how it can represent AND. 15 | * **SymPy Basics** - Shows you some fundamental features of SymPy. 16 | * **The Taylor Series** - Uses SymPy to explore the Taylor Series. Also makes use of Matplotlib. 17 | * **Poke Pandas** - A notebook using Pandas to analyze data about pokemon from the pokemon games. 18 | 19 | ## Workshop Setup 20 | 1. Download this Workshop’s Repo as a .zip file: http://bit.ly/2A6dTYp 21 | 2. Unzip the workshop .zip in a place you can remember. ( Try Documents ) 22 | 3. Download Anaconda Navigator (AN) 23 | PYTHON 3.6 Version https://www.continuum.io/downloads 24 | 4. Launch the root environment Jupyter Notebook server from the home tab. 25 | 5. A browser should have opened up upon launching the Jupyter Notebook server. In that browser, navigate to the workshop folder you unziped. 26 | 6. Click on *setup.ipynb* and follow the instructions. 27 | -------------------------------------------------------------------------------- /Resources/sympy_tutorial.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/Resources/sympy_tutorial.pdf -------------------------------------------------------------------------------- /TensorFlow-Workshop-March-2017/1_Introduction/Beginner MNIST.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "
\n", 8 | "

MNIST For ML Beginners

\n", 9 | "

Interactive IPython Notebook

\n", 10 | "
\n", 11 | " Source: https://www.tensorflow.org/versions/r0.8/tutorials/mnist/beginners/index.html\n", 12 | "
\n" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": 1, 18 | "metadata": { 19 | "collapsed": false 20 | }, 21 | "outputs": [], 22 | "source": [ 23 | "import matplotlib.pyplot as plt\n", 24 | "import matplotlib.image as mpimg\n", 25 | "from pylab import *\n", 26 | "from numpy import *\n", 27 | "%matplotlib inline" 28 | ] 29 | }, 30 | { 31 | "cell_type": "markdown", 32 | "metadata": {}, 33 | "source": [ 34 | "Download MNIST dataset. " 35 | ] 36 | }, 37 | { 38 | "cell_type": "code", 39 | "execution_count": 2, 40 | "metadata": { 41 | "collapsed": false, 42 | "scrolled": true 43 | }, 44 | "outputs": [ 45 | { 46 | "name": "stdout", 47 | "output_type": "stream", 48 | "text": [ 49 | "Extracting MNIST_data/train-images-idx3-ubyte.gz\n", 50 | "Extracting MNIST_data/train-labels-idx1-ubyte.gz\n", 51 | "Extracting MNIST_data/t10k-images-idx3-ubyte.gz\n", 52 | "Extracting MNIST_data/t10k-labels-idx1-ubyte.gz\n" 53 | ] 54 | } 55 | ], 56 | "source": [ 57 | "from tensorflow.examples.tutorials.mnist import input_data\n", 58 | "mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)" 59 | ] 60 | }, 61 | { 62 | "cell_type": "markdown", 63 | "metadata": {}, 64 | "source": [ 65 | "
" 66 | ] 67 | }, 68 | { 69 | "cell_type": "markdown", 70 | "metadata": {}, 71 | "source": [ 72 | "Get a batch of 50 training examples from the MNIST training set. " 73 | ] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "execution_count": 3, 78 | "metadata": { 79 | "collapsed": false 80 | }, 81 | "outputs": [], 82 | "source": [ 83 | "batch = mnist.train.next_batch(50)" 84 | ] 85 | }, 86 | { 87 | "cell_type": "markdown", 88 | "metadata": {}, 89 | "source": [ 90 | "The MNIST dataset has thousands of 28 X 28 greyscale pixel images of alpabetical characters. They are unrolled into 784 length rows. " 91 | ] 92 | }, 93 | { 94 | "cell_type": "code", 95 | "execution_count": 4, 96 | "metadata": { 97 | "collapsed": false 98 | }, 99 | "outputs": [ 100 | { 101 | "name": "stdout", 102 | "output_type": "stream", 103 | "text": [ 104 | "(784,) 784\n" 105 | ] 106 | } 107 | ], 108 | "source": [ 109 | "print( batch[0][0].shape, 28 * 28 )\n" 110 | ] 111 | }, 112 | { 113 | "cell_type": "markdown", 114 | "metadata": {}, 115 | "source": [ 116 | "First image in our batch of training examples alongside the one-hot vector classification/label. A one-hot vector is a vector which is 0 in most dimensions, and 1 in a single dimension. In this case, the nth digit will be represented as a vector which is 1 in the nth dimensions." 117 | ] 118 | }, 119 | { 120 | "cell_type": "code", 121 | "execution_count": 5, 122 | "metadata": { 123 | "collapsed": false 124 | }, 125 | "outputs": [ 126 | { 127 | "data": { 128 | "text/plain": [ 129 | "array([ 0., 0., 0., 1., 0., 0., 0., 0., 0., 0.])" 130 | ] 131 | }, 132 | "execution_count": 5, 133 | "metadata": {}, 134 | "output_type": "execute_result" 135 | }, 136 | { 137 | "data": { 138 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAP8AAAD8CAYAAAC4nHJkAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAABt1JREFUeJzt3a1vVHkbx+GZDQ7ZNkFRXElwBBw0wUDqWsuLgyY4QEEg\nmEIdCdTw8gegCnWgC4oQqKPFgqPFIjtrEI94zv1jmbbQ+V6Xvfe3M7D97BF3zzn9wWDQA/L886e/\nAPBniB9CiR9CiR9CiR9CiR9CiR9CiR9CiR9CHdjLD+v3+36dEHbZYDDo/8o/58oPocQPocQPocQP\nocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQP\nocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPocQPoQ786S/AnzU5OVnO\nL1++XM5v375dzgeDQees3++XZ9fX18v5nTt3yvnKyko5T+fKD6HED6HED6HED6HED6HED6HED6H6\n1R52xz+s39+7DwsyMTHRObt161Z59sKFC+V8bGysnLd29cPs+Vs/m1+/fi3nJ0+e7JxtbW2VZ/ez\nwWBQ/8X+5MoPocQPocQPocQPocQPocQPoaz69oHWbbMLCwuds9Z/391et21ubpbzyvj4eDk/cuRI\nOf/06VPn7NixY7/zlfYFqz6gJH4IJX4IJX4IJX4IJX4IJX4IZc+/D7x//76cHz9+vHM27J6/2pX3\ner3emTNnyvkwt86eOnWqnK+urpbz6s9+4MDoPrXenh8oiR9CiR9CiR9CiR9CiR9CiR9C2fP/BY4e\nPVrOW3v+79+/d85a99O39vDXr18v59euXSvni4uLnbMvX76UZ1taP7vb29uds6tXr5Znnz179lvf\n6W9gzw+UxA+hxA+hxA+hxA+hxA+hxA+h7Pn3gdbvAVS7+mFfRT0/P1/OHz9+XM6r12R//PixPDs3\nN1fOl5eXy3n1s33o0KHy7H5+hbc9P1ASP4QSP4QSP4QSP4QSP4QSP4Qa3YeXj5CNjY0/9tmt5wF8\n/vy5nFfPGmg9K+DmzZvlvPXOgd38/YdR4MoPocQPocQPocQPocQPocQPoaz6RsD09HTnrHU7cGuV\nt76+Xs6npqbK+bt37zpnExMT5dnW7eat7z4zM1PO07nyQyjxQyjxQyjxQyjxQyjxQyjxQyh7/hFw\n/vz5ztmVK1fKs63bYlu79tb5apc/zC25vV6vt7S0VM5bjwZP58oPocQPocQPocQPocQPocQPocQP\noez5R9ywr2DfzfNv374tz964caOc2+MPx5UfQokfQokfQokfQokfQokfQokfQtnzj4Dnz593ziYn\nJ8uz4+Pj5bz13P+DBw+W88rdu3fLuT3+7nLlh1Dih1Dih1Dih1Dih1Dih1Dih1D9Ye/X/k8f1u/v\n3YexI1p7/nv37pXz2dnZztna2lp5dmZmppy3nuufajAY1C9E+MmVH0KJH0KJH0KJH0KJH0KJH0JZ\n9f2i6lXTm5ube/hN9pfXr193zs6dO1eebT26++HDh7/1nUadVR9QEj+EEj+EEj+EEj+EEj+EEj+E\n8ujun6anp8v5gwcPOmcbGxvl2UuXLv3WdxoF9+/f75ydPXu2PDs1NbXTX4f/4coPocQPocQPocQP\nocQPocQPocQPoWL2/NX9+L1er/fkyZNy/u3bt85Z8h6/9Yrup0+fds76/V+67Zxd4soPocQPocQP\nocQPocQPocQPocQPoWL2/HNzc+W8de/46urqTn6dfaP1iu4XL16U8+rvtfXOiNZzEhiOKz+EEj+E\nEj+EEj+EEj+EEj+Eiln1vXnzppz/80/9/8Hq0d4XL14sz66vr5fzDx8+lPOWycnJztnp06fLs60V\n6OzsbDlv3ZZbrfMePXpUnm3NGY4rP4QSP4QSP4QSP4QSP4QSP4QSP4Tqt26r3NEP6/f37sP+o+Xl\n5XJe7buH2XX3er3e2tpaOW85fPhw52xsbKw8O+x3b52vXtG9tLRUnt3a2irn/H+DweCXnonuyg+h\nxA+hxA+hxA+hxA+hxA+hxA+h7Pl/ar3C+9WrV52zEydOlGe3t7fL+W7u2ltnf/z4Uc5bj89eXFws\n5ysrK+WcnWfPD5TED6HED6HED6HED6HED6HED6Hs+X/R+Ph452xhYWGof/f8/Hw5f/nyZTkf5r73\n1rPxvSZ7/7HnB0rih1Dih1Dih1Dih1Dih1Dih1D2/DBi7PmBkvghlPghlPghlPghlPghlPghlPgh\nlPghlPghlPghlPghlPghlPghlPghlPghlPghlPghlPghlPghlPghlPgh1J4+uhv4e7jyQyjxQyjx\nQyjxQyjxQyjxQyjxQyjxQyjxQyjxQyjxQyjxQyjxQyjxQyjxQyjxQyjxQyjxQyjxQyjxQyjxQyjx\nQyjxQ6h/AdzwgYFBiQXqAAAAAElFTkSuQmCC\n", 139 | "text/plain": [ 140 | "" 141 | ] 142 | }, 143 | "metadata": {}, 144 | "output_type": "display_data" 145 | } 146 | ], 147 | "source": [ 148 | "firstIMG = batch[0][1].reshape((28, 28))\n", 149 | "secondIMG = batch[0][2].reshape((28, 28))\n", 150 | "\n", 151 | "plt.axis(\"off\")\n", 152 | "\n", 153 | "imgplot = plt.imshow(firstIMG, cmap=cm.gray)\n", 154 | "\n", 155 | "batch[1][1]" 156 | ] 157 | }, 158 | { 159 | "cell_type": "markdown", 160 | "metadata": {}, 161 | "source": [ 162 | "Second image in our batch of training examples alongside the one-hot classification vector." 163 | ] 164 | }, 165 | { 166 | "cell_type": "code", 167 | "execution_count": 6, 168 | "metadata": { 169 | "collapsed": false 170 | }, 171 | "outputs": [ 172 | { 173 | "data": { 174 | "text/plain": [ 175 | "array([ 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.])" 176 | ] 177 | }, 178 | "execution_count": 6, 179 | "metadata": {}, 180 | "output_type": "execute_result" 181 | }, 182 | { 183 | "data": { 184 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAP8AAAD8CAYAAAC4nHJkAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAABhVJREFUeJzt3T1rVFsYhuEzKgk2fiFYCGkEG8FGSSNEBWv/gI1WVhYi\ngoJaqEgQixCwEq0tBK1EEMXGQrAQQRBshIiQQrFQEPOxT3uaeSfuyewk57mu9p2194Jwu4qVib2m\naf4B8mxa6w0Aa0P8EEr8EEr8EEr8EEr8EEr8EEr8EEr8EGpLly/r9Xp+nRBGrGma3ko+5+SHUOKH\nUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKH\nUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKHUOKH\nUOKHUOKHUOKHUOKHUOKHUOKHUFvWegMMNjY2Vs5fvHjRd3bkyJFyba/XK+c/fvwo5wcPHiznc3Nz\n5Zy14+SHUOKHUOKHUOKHUOKHUOKHUK761oFBV3n3798v54Ou8ypPnjwp59PT0+X869evrd89anv2\n7Ok7m5+f73An65OTH0KJH0KJH0KJH0KJH0KJH0KJH0K5518HLly4UM5PnTrV+tl3794t5xcvXizn\nv3//bv3uUbtz5045P3PmTN/ZjRs3yrUzMzOt9rSROPkhlPghlPghlPghlPghlPghlPghlHv+Dhw4\ncKCcX7lyZajn//z5s+/s/Pnz5drFxcWh3j1Khw8fLuenT58u5zt37lzF3fz/OPkhlPghlPghlPgh\nlPghlPghlPghlHv+Dly6dKmcb926tZwPuos/efJk67Xr2aC/NbBr165yvrCw0Hc26P8rSODkh1Di\nh1Dih1Dih1Dih1Dih1Dih1Du+Ttw6NChodY/e/asnL969ar1szdv3lzOx8bGWj97kH379pXzo0eP\nDvX8R48e9Z19/vx5qGf/Hzj5IZT4IZT4IZT4IZT4IZT4IZSrvg1gfHy89drJyclyfvPmzXJ+4sSJ\n1u8etfn5+XJ+69atjnayMTn5IZT4IZT4IZT4IZT4IZT4IZT4IZR7/g7cvn27nD948KCcHz9+vJy/\nfPmy72xqaqpcu2nTxv33/969e+X8w4cPHe1kY9q4P3lgKOKHUOKHUOKHUOKHUOKHUOKHUO75OzAx\nMTHU+i1b6h/TsWPHWj/7zZs35fzx48flfO/eveX83Llzf72nlXr79u3Inp3AyQ+hxA+hxA+hxA+h\nxA+hxA+hxA+h3PN3YND39f/8+TOydz98+LCcz83NlfOlpaVyfvny5b/e00q9fv26nD99+nRk707g\n5IdQ4odQ4odQ4odQ4odQ4odQ4odQ7vk78OXLl3I+PT3d0U5W369fv0b27NnZ2XK+uLg4sncncPJD\nKPFDKPFDKPFDKPFDKPFDKFd9DGXQV34ry8vL5fzTp0+tn81gTn4IJX4IJX4IJX4IJX4IJX4IJX4I\n5Z6foZw9e7b12ufPn5fzd+/etX42gzn5IZT4IZT4IZT4IZT4IZT4IZT4IZR7fkrbt28v59u2bWv9\n7JmZmdZrGZ6TH0KJH0KJH0KJH0KJH0KJH0KJH0K556c0OTlZzicmJsr5wsJC39m3b99a7YnV4eSH\nUOKHUOKHUOKHUOKHUOKHUL2mabp7Wa/X3ctYFR8/fizn+/fvL+ffv3/vO9u9e3erPVFrmqa3ks85\n+SGU+CGU+CGU+CGU+CGU+CGU+CGUr/RSGh8fH2r9+/fvV2knrDYnP4QSP4QSP4QSP4QSP4QSP4QS\nP4Ryz89ILS0trfUW6MPJD6HED6HED6HED6HED6HED6HED6Hc8zNSU1NTfWfXrl0r116/fn21t8N/\nOPkhlPghlPghlPghlPghlPghlPghlHt+SrOzs+X86tWr5XzHjh19Z8vLy632xOpw8kMo8UMo8UMo\n8UMo8UMo8UOoXtM03b2s1+vuZRCqaZreSj7n5IdQ4odQ4odQ4odQ4odQ4odQ4odQ4odQ4odQ4odQ\n4odQ4odQ4odQ4odQ4odQnX6fH1g/nPwQSvwQSvwQSvwQSvwQSvwQSvwQSvwQSvwQSvwQSvwQSvwQ\nSvwQSvwQSvwQSvwQSvwQSvwQSvwQSvwQSvwQSvwQSvwQ6l8EhMln4ZEXXAAAAABJRU5ErkJggg==\n", 185 | "text/plain": [ 186 | "" 187 | ] 188 | }, 189 | "metadata": {}, 190 | "output_type": "display_data" 191 | } 192 | ], 193 | "source": [ 194 | "imgplot = plt.imshow(secondIMG , cmap=cm.gray)\n", 195 | "plt.axis(\"off\")\n", 196 | "batch[1][2]" 197 | ] 198 | }, 199 | { 200 | "cell_type": "markdown", 201 | "metadata": {}, 202 | "source": [ 203 | "

Softmax Regression

" 204 | ] 205 | }, 206 | { 207 | "cell_type": "markdown", 208 | "metadata": {}, 209 | "source": [ 210 | "$$y = softmax(Wx+b)$$" 211 | ] 212 | }, 213 | { 214 | "cell_type": "code", 215 | "execution_count": 7, 216 | "metadata": { 217 | "collapsed": true 218 | }, 219 | "outputs": [], 220 | "source": [ 221 | "import tensorflow as tf\n", 222 | "\n", 223 | "# Clear the graph from prior sessions. A ipython quirk. \n", 224 | "tf.reset_default_graph()" 225 | ] 226 | }, 227 | { 228 | "cell_type": "code", 229 | "execution_count": 8, 230 | "metadata": { 231 | "collapsed": false 232 | }, 233 | "outputs": [], 234 | "source": [ 235 | "with tf.name_scope('Hidden') as scope:\n", 236 | " x = tf.placeholder(tf.float32, [None, 784],name='input')\n", 237 | " W = tf.Variable(tf.zeros([784, 10]),name='weights')\n", 238 | " b = tf.Variable(tf.zeros([10]),name='biases')\n", 239 | " y = tf.nn.softmax(tf.matmul(x, W) + b, name='softmax')" 240 | ] 241 | }, 242 | { 243 | "cell_type": "markdown", 244 | "metadata": {}, 245 | "source": [ 246 | "

Cross-Entropy as a Cost Function

" 247 | ] 248 | }, 249 | { 250 | "cell_type": "markdown", 251 | "metadata": {}, 252 | "source": [ 253 | "

$$H_{y'}(y) = - \\sum_{i}y'_i log(y_i)$$

" 254 | ] 255 | }, 256 | { 257 | "cell_type": "markdown", 258 | "metadata": {}, 259 | "source": [ 260 | "

$$H(X) = \\sum_{i=1}^m p(x_i)log( \\frac{1}{p(x_i)} ) \\to - \\sum_{i=1}^m x_i log(x_i)$$

" 261 | ] 262 | }, 263 | { 264 | "cell_type": "markdown", 265 | "metadata": {}, 266 | "source": [ 267 | "Where y is our predicted probability distribution, and y′ is the true distribution (the one-hot vector we'll input). In some rough sense, the cross-entropy is measuring how inefficient our predictions are for describing the truth." 268 | ] 269 | }, 270 | { 271 | "cell_type": "markdown", 272 | "metadata": {}, 273 | "source": [ 274 | "To implement cross-entropy we need to first add a new placeholder to input the correct answers:\n" 275 | ] 276 | }, 277 | { 278 | "cell_type": "code", 279 | "execution_count": 9, 280 | "metadata": { 281 | "collapsed": true 282 | }, 283 | "outputs": [], 284 | "source": [ 285 | "y_ = tf.placeholder(tf.float32, [None, 10])" 286 | ] 287 | }, 288 | { 289 | "cell_type": "markdown", 290 | "metadata": {}, 291 | "source": [ 292 | "First, tf.log computes the logarithm of each element of y. Next, we multiply each element of y_ with the corresponding element of tf.log(y). Then tf.reduce_sum adds the elements in the second dimension of y, due to the reduction_indices=[1] parameter. Finally, tf.reduce_mean computes the mean over all the examples in the batch." 293 | ] 294 | }, 295 | { 296 | "cell_type": "code", 297 | "execution_count": 10, 298 | "metadata": { 299 | "collapsed": true 300 | }, 301 | "outputs": [], 302 | "source": [ 303 | "cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))" 304 | ] 305 | }, 306 | { 307 | "cell_type": "markdown", 308 | "metadata": {}, 309 | "source": [ 310 | "In this case, we ask TensorFlow to minimize cross_entropy using the gradient descent algorithm with a learning rate of 0.5. Gradient descent is a simple procedure, where TensorFlow simply shifts each variable a little bit in the direction that reduces the cost. " 311 | ] 312 | }, 313 | { 314 | "cell_type": "code", 315 | "execution_count": 11, 316 | "metadata": { 317 | "collapsed": true 318 | }, 319 | "outputs": [], 320 | "source": [ 321 | "train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n" 322 | ] 323 | }, 324 | { 325 | "cell_type": "markdown", 326 | "metadata": {}, 327 | "source": [ 328 | "Now we have our model set up to train. One last thing before we launch it, we have to add an operation to initialize the variables we created:" 329 | ] 330 | }, 331 | { 332 | "cell_type": "code", 333 | "execution_count": 20, 334 | "metadata": { 335 | "collapsed": false 336 | }, 337 | "outputs": [], 338 | "source": [ 339 | "init = tf.global_variables_initializer()" 340 | ] 341 | }, 342 | { 343 | "cell_type": "markdown", 344 | "metadata": {}, 345 | "source": [ 346 | "We can now launch the model in a Session, and run the operation that initializes the variables:" 347 | ] 348 | }, 349 | { 350 | "cell_type": "code", 351 | "execution_count": 21, 352 | "metadata": { 353 | "collapsed": false 354 | }, 355 | "outputs": [], 356 | "source": [ 357 | "sess = tf.Session()\n", 358 | "sess.run(init)" 359 | ] 360 | }, 361 | { 362 | "cell_type": "markdown", 363 | "metadata": {}, 364 | "source": [ 365 | "Before we train our model, let's get TensorBoard up and running. Go to your commandline and CD into the directory that contains this notebook. Activate your tensorflow virtualenv. Then issue the following console command to launch TensorBoard: \n", 366 | "\n", 367 | "```\n", 368 | "tensorboard --logdir=log_beginner_minst\n", 369 | "\n", 370 | "```\n", 371 | "\n", 372 | "You should get something like this: \n", 373 | "\n", 374 | "```\n", 375 | "Starting TensorBoard 16 on port 6006\n", 376 | "(You can navigate to http://0.0.0.0:6006)\n", 377 | "```\n", 378 | "\n", 379 | "Open up a web browser and navigate to the url specified. \n", 380 | "\n", 381 | "Let's now visualize our graph" 382 | ] 383 | }, 384 | { 385 | "cell_type": "code", 386 | "execution_count": 22, 387 | "metadata": { 388 | "collapsed": false 389 | }, 390 | "outputs": [ 391 | { 392 | "ename": "FailedPreconditionError", 393 | "evalue": "Failed to remove a directory: log_beginner_minst/run1/", 394 | "output_type": "error", 395 | "traceback": [ 396 | "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", 397 | "\u001b[1;31mFailedPreconditionError\u001b[0m Traceback (most recent call last)", 398 | "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m()\u001b[0m\n\u001b[0;32m 2\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 3\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mtf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgfile\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mExists\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msummaries_dir\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 4\u001b[1;33m \u001b[0mtf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgfile\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mDeleteRecursively\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msummaries_dir\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 5\u001b[0m \u001b[0mtf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgfile\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mMakeDirs\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msummaries_dir\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 6\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", 399 | "\u001b[1;32mC:\\Users\\Andre\\Anaconda3\\envs\\TF_GPU\\lib\\site-packages\\tensorflow\\python\\lib\\io\\file_io.py\u001b[0m in \u001b[0;36mdelete_recursively\u001b[1;34m(dirname)\u001b[0m\n\u001b[0;32m 364\u001b[0m \"\"\"\n\u001b[0;32m 365\u001b[0m \u001b[1;32mwith\u001b[0m \u001b[0merrors\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mraise_exception_on_not_ok_status\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mstatus\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 366\u001b[1;33m \u001b[0mpywrap_tensorflow\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mDeleteRecursively\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcompat\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mas_bytes\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdirname\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mstatus\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 367\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 368\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", 400 | "\u001b[1;32mC:\\Users\\Andre\\Anaconda3\\envs\\TF_GPU\\lib\\contextlib.py\u001b[0m in \u001b[0;36m__exit__\u001b[1;34m(self, type, value, traceback)\u001b[0m\n\u001b[0;32m 64\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mtype\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 65\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 66\u001b[1;33m \u001b[0mnext\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgen\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 67\u001b[0m \u001b[1;32mexcept\u001b[0m \u001b[0mStopIteration\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 68\u001b[0m \u001b[1;32mreturn\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", 401 | "\u001b[1;32mC:\\Users\\Andre\\Anaconda3\\envs\\TF_GPU\\lib\\site-packages\\tensorflow\\python\\framework\\errors_impl.py\u001b[0m in \u001b[0;36mraise_exception_on_not_ok_status\u001b[1;34m()\u001b[0m\n\u001b[0;32m 467\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 468\u001b[0m \u001b[0mcompat\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mas_text\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpywrap_tensorflow\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTF_Message\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mstatus\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 469\u001b[1;33m pywrap_tensorflow.TF_GetCode(status))\n\u001b[0m\u001b[0;32m 470\u001b[0m \u001b[1;32mfinally\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 471\u001b[0m \u001b[0mpywrap_tensorflow\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTF_DeleteStatus\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mstatus\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", 402 | "\u001b[1;31mFailedPreconditionError\u001b[0m: Failed to remove a directory: log_beginner_minst/run1/" 403 | ] 404 | } 405 | ], 406 | "source": [ 407 | "summaries_dir = \"log_beginner_minst/run1/\"\n", 408 | "\n", 409 | "if tf.gfile.Exists(summaries_dir):\n", 410 | " tf.gfile.DeleteRecursively(summaries_dir)\n", 411 | " tf.gfile.MakeDirs(summaries_dir)\n", 412 | "\n", 413 | "summary_writer = tf.summary.FileWriter(summaries_dir, sess.graph)" 414 | ] 415 | }, 416 | { 417 | "cell_type": "markdown", 418 | "metadata": {}, 419 | "source": [ 420 | "Let's train -- we'll run the training step 1000 times!" 421 | ] 422 | }, 423 | { 424 | "cell_type": "code", 425 | "execution_count": 23, 426 | "metadata": { 427 | "collapsed": false 428 | }, 429 | "outputs": [ 430 | { 431 | "name": "stdout", 432 | "output_type": "stream", 433 | "text": [ 434 | "Batch 100000\n", 435 | "Batch 200000\n", 436 | "Batch 300000\n", 437 | "Batch 400000\n", 438 | "Batch 500000\n", 439 | "Batch 600000\n", 440 | "Batch 700000\n", 441 | "Batch 800000\n", 442 | "Batch 900000\n", 443 | "Batch 1000000\n" 444 | ] 445 | } 446 | ], 447 | "source": [ 448 | "for i in range(10000):\n", 449 | "\n", 450 | " batch_xs, batch_ys = mnist.train.next_batch(100)\n", 451 | " \n", 452 | " sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) \n", 453 | " \n", 454 | " if( (i+1)*100 % 100000 == 0):\n", 455 | " print(\"Batch %i\"% ( (i+1)*100) )\n", 456 | " #print(\"One example: \",batch_xs[0],batch_ys[0])\n" 457 | ] 458 | }, 459 | { 460 | "cell_type": "markdown", 461 | "metadata": { 462 | "collapsed": true 463 | }, 464 | "source": [ 465 | "tf.argmax is an extremely useful function which gives you the index of the highest entry in a tensor along some axis. For example, tf.argmax(y,1) is the label our model thinks is most likely for each input, while tf.argmax(y_,1) is the true label. We can use tf.equal to check if our prediction matches the truth.\n" 466 | ] 467 | }, 468 | { 469 | "cell_type": "code", 470 | "execution_count": 24, 471 | "metadata": { 472 | "collapsed": false 473 | }, 474 | "outputs": [ 475 | { 476 | "name": "stdout", 477 | "output_type": "stream", 478 | "text": [ 479 | "[ True True True ..., True True True]\n", 480 | "0.9196\n" 481 | ] 482 | } 483 | ], 484 | "source": [ 485 | "correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))\n", 486 | "\n", 487 | "correctPredictionBools = sess.run(correct_prediction, feed_dict={x: mnist.test.images, y_: mnist.test.labels})\n", 488 | "\n", 489 | "print(correctPredictionBools)\n", 490 | "print(sum(correctPredictionBools)/float(len(correctPredictionBools)) )" 491 | ] 492 | }, 493 | { 494 | "cell_type": "code", 495 | "execution_count": 17, 496 | "metadata": { 497 | "collapsed": false 498 | }, 499 | "outputs": [], 500 | "source": [ 501 | "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))" 502 | ] 503 | }, 504 | { 505 | "cell_type": "code", 506 | "execution_count": 18, 507 | "metadata": { 508 | "collapsed": false 509 | }, 510 | "outputs": [ 511 | { 512 | "name": "stdout", 513 | "output_type": "stream", 514 | "text": [ 515 | "0.9249\n" 516 | ] 517 | } 518 | ], 519 | "source": [ 520 | "print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))" 521 | ] 522 | }, 523 | { 524 | "cell_type": "code", 525 | "execution_count": null, 526 | "metadata": { 527 | "collapsed": true 528 | }, 529 | "outputs": [], 530 | "source": [] 531 | } 532 | ], 533 | "metadata": { 534 | "kernelspec": { 535 | "display_name": "Python 3", 536 | "language": "python", 537 | "name": "python3" 538 | }, 539 | "language_info": { 540 | "codemirror_mode": { 541 | "name": "ipython", 542 | "version": 3 543 | }, 544 | "file_extension": ".py", 545 | "mimetype": "text/x-python", 546 | "name": "python", 547 | "nbconvert_exporter": "python", 548 | "pygments_lexer": "ipython3", 549 | "version": "3.5.3" 550 | } 551 | }, 552 | "nbformat": 4, 553 | "nbformat_minor": 0 554 | } 555 | -------------------------------------------------------------------------------- /TensorFlow-Workshop-March-2017/2_going depp/Deep MNIST for Experts.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "
\n", 8 | "

Deep MNIST for Experts

\n", 9 | "

Interactive IPython Notebook

\n", 10 | "
" 11 | ] 12 | }, 13 | { 14 | "cell_type": "markdown", 15 | "metadata": {}, 16 | "source": [ 17 | "
Source: https://www.tensorflow.org/versions/r0.8/tutorials/mnist/pros/index.html#deep-mnist-for-experts
" 18 | ] 19 | }, 20 | { 21 | "cell_type": "markdown", 22 | "metadata": {}, 23 | "source": [ 24 | "

Download MNIST Data

\n" 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": 2, 30 | "metadata": { 31 | "collapsed": false 32 | }, 33 | "outputs": [ 34 | { 35 | "name": "stdout", 36 | "output_type": "stream", 37 | "text": [ 38 | "Extracting MNIST_data/train-images-idx3-ubyte.gz\n", 39 | "Extracting MNIST_data/train-labels-idx1-ubyte.gz\n", 40 | "Extracting MNIST_data/t10k-images-idx3-ubyte.gz\n", 41 | "Extracting MNIST_data/t10k-labels-idx1-ubyte.gz\n" 42 | ] 43 | } 44 | ], 45 | "source": [ 46 | "from tensorflow.examples.tutorials.mnist import input_data\n", 47 | "mnist = input_data.read_data_sets('MNIST_data', one_hot=True)" 48 | ] 49 | }, 50 | { 51 | "cell_type": "markdown", 52 | "metadata": {}, 53 | "source": [ 54 | "

Start Session and Inport TF

" 55 | ] 56 | }, 57 | { 58 | "cell_type": "code", 59 | "execution_count": 3, 60 | "metadata": { 61 | "collapsed": false 62 | }, 63 | "outputs": [], 64 | "source": [ 65 | "import tensorflow as tf\n", 66 | "sess = tf.InteractiveSession()" 67 | ] 68 | }, 69 | { 70 | "cell_type": "markdown", 71 | "metadata": {}, 72 | "source": [ 73 | "

Placeholders

" 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": 4, 79 | "metadata": { 80 | "collapsed": true 81 | }, 82 | "outputs": [], 83 | "source": [ 84 | "x = tf.placeholder(tf.float32, shape=[None, 784])\n", 85 | "y_ = tf.placeholder(tf.float32, shape=[None, 10])" 86 | ] 87 | }, 88 | { 89 | "cell_type": "markdown", 90 | "metadata": {}, 91 | "source": [ 92 | "

Variables

" 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": 5, 98 | "metadata": { 99 | "collapsed": true 100 | }, 101 | "outputs": [], 102 | "source": [ 103 | "W = tf.Variable(tf.zeros([784,10]))\n", 104 | "b = tf.Variable(tf.zeros([10]))" 105 | ] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "execution_count": 6, 110 | "metadata": { 111 | "collapsed": true 112 | }, 113 | "outputs": [], 114 | "source": [ 115 | "sess.run(tf.initialize_all_variables())" 116 | ] 117 | }, 118 | { 119 | "cell_type": "markdown", 120 | "metadata": {}, 121 | "source": [ 122 | "

Predicted Class and Cost Function

" 123 | ] 124 | }, 125 | { 126 | "cell_type": "code", 127 | "execution_count": 7, 128 | "metadata": { 129 | "collapsed": true 130 | }, 131 | "outputs": [], 132 | "source": [ 133 | "y = tf.nn.softmax(tf.matmul(x,W) + b)" 134 | ] 135 | }, 136 | { 137 | "cell_type": "code", 138 | "execution_count": 8, 139 | "metadata": { 140 | "collapsed": true 141 | }, 142 | "outputs": [], 143 | "source": [ 144 | "cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))" 145 | ] 146 | }, 147 | { 148 | "cell_type": "markdown", 149 | "metadata": {}, 150 | "source": [ 151 | "

Train the Model

" 152 | ] 153 | }, 154 | { 155 | "cell_type": "code", 156 | "execution_count": 9, 157 | "metadata": { 158 | "collapsed": true 159 | }, 160 | "outputs": [], 161 | "source": [ 162 | "train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)" 163 | ] 164 | }, 165 | { 166 | "cell_type": "code", 167 | "execution_count": 10, 168 | "metadata": { 169 | "collapsed": false 170 | }, 171 | "outputs": [], 172 | "source": [ 173 | "for i in range(1000):\n", 174 | " batch = mnist.train.next_batch(50)\n", 175 | " train_step.run(feed_dict={x: batch[0], y_: batch[1]})" 176 | ] 177 | }, 178 | { 179 | "cell_type": "markdown", 180 | "metadata": {}, 181 | "source": [ 182 | "

Evaluating the Model

" 183 | ] 184 | }, 185 | { 186 | "cell_type": "code", 187 | "execution_count": 11, 188 | "metadata": { 189 | "collapsed": true 190 | }, 191 | "outputs": [], 192 | "source": [ 193 | "correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))" 194 | ] 195 | }, 196 | { 197 | "cell_type": "code", 198 | "execution_count": 12, 199 | "metadata": { 200 | "collapsed": true 201 | }, 202 | "outputs": [], 203 | "source": [ 204 | "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))" 205 | ] 206 | }, 207 | { 208 | "cell_type": "code", 209 | "execution_count": 13, 210 | "metadata": { 211 | "collapsed": false 212 | }, 213 | "outputs": [ 214 | { 215 | "name": "stdout", 216 | "output_type": "stream", 217 | "text": [ 218 | "0.9092\n" 219 | ] 220 | } 221 | ], 222 | "source": [ 223 | "print(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))" 224 | ] 225 | }, 226 | { 227 | "cell_type": "markdown", 228 | "metadata": {}, 229 | "source": [ 230 | "

Multilayer Convolutional Network

" 231 | ] 232 | }, 233 | { 234 | "cell_type": "markdown", 235 | "metadata": {}, 236 | "source": [ 237 | "In order to inprove our prediction accuracy, we will create a small convolutional neural network. " 238 | ] 239 | }, 240 | { 241 | "cell_type": "code", 242 | "execution_count": 14, 243 | "metadata": { 244 | "collapsed": true 245 | }, 246 | "outputs": [], 247 | "source": [ 248 | "def weight_variable(shape):\n", 249 | " initial = tf.truncated_normal(shape, stddev=0.1)\n", 250 | " return tf.Variable(initial)\n", 251 | "\n", 252 | "def bias_variable(shape):\n", 253 | " initial = tf.constant(0.1, shape=shape)\n", 254 | " return tf.Variable(initial)\n", 255 | "\n", 256 | "def conv2d(x, W):\n", 257 | " return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n", 258 | "\n", 259 | "def max_pool_2x2(x):\n", 260 | " return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n", 261 | " strides=[1, 2, 2, 1], padding='SAME')" 262 | ] 263 | }, 264 | { 265 | "cell_type": "markdown", 266 | "metadata": {}, 267 | "source": [ 268 | "

First Convolutional Layer

" 269 | ] 270 | }, 271 | { 272 | "cell_type": "markdown", 273 | "metadata": {}, 274 | "source": [ 275 | "[5, 5, 1, 32] - The first two dimensions are the patch size, the next is the number of input channels, and the last is the number of output channels. We will also have a bias vector with a component for each output channel." 276 | ] 277 | }, 278 | { 279 | "cell_type": "code", 280 | "execution_count": 15, 281 | "metadata": { 282 | "collapsed": true 283 | }, 284 | "outputs": [], 285 | "source": [ 286 | "W_conv1 = weight_variable([5, 5, 1, 32])\n", 287 | "b_conv1 = bias_variable([32])" 288 | ] 289 | }, 290 | { 291 | "cell_type": "markdown", 292 | "metadata": {}, 293 | "source": [ 294 | "To apply the layer, we first reshape x to a 4d tensor, with the second and third dimensions corresponding to image width and height, and the final dimension corresponding to the number of color channels." 295 | ] 296 | }, 297 | { 298 | "cell_type": "code", 299 | "execution_count": 16, 300 | "metadata": { 301 | "collapsed": true 302 | }, 303 | "outputs": [], 304 | "source": [ 305 | "x_image = tf.reshape(x, [-1,28,28,1])" 306 | ] 307 | }, 308 | { 309 | "cell_type": "markdown", 310 | "metadata": {}, 311 | "source": [ 312 | "Convolve x_image with the weight tensor, add the bias, apply the ReLU function, and finally max pool.\n", 313 | "\n" 314 | ] 315 | }, 316 | { 317 | "cell_type": "code", 318 | "execution_count": 17, 319 | "metadata": { 320 | "collapsed": true 321 | }, 322 | "outputs": [], 323 | "source": [ 324 | "h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n", 325 | "h_pool1 = max_pool_2x2(h_conv1)" 326 | ] 327 | }, 328 | { 329 | "cell_type": "markdown", 330 | "metadata": {}, 331 | "source": [ 332 | "

Second Convolutional Layer

" 333 | ] 334 | }, 335 | { 336 | "cell_type": "markdown", 337 | "metadata": {}, 338 | "source": [ 339 | "In order to build a deep network, we stack several layers of this type. The second layer will have 64 features for each 5x5 patch." 340 | ] 341 | }, 342 | { 343 | "cell_type": "code", 344 | "execution_count": 18, 345 | "metadata": { 346 | "collapsed": true 347 | }, 348 | "outputs": [], 349 | "source": [ 350 | "W_conv2 = weight_variable([5, 5, 32, 64])\n", 351 | "b_conv2 = bias_variable([64])\n", 352 | "\n", 353 | "h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n", 354 | "h_pool2 = max_pool_2x2(h_conv2)" 355 | ] 356 | }, 357 | { 358 | "cell_type": "markdown", 359 | "metadata": {}, 360 | "source": [ 361 | "

Densely Connected Layer

" 362 | ] 363 | }, 364 | { 365 | "cell_type": "markdown", 366 | "metadata": {}, 367 | "source": [ 368 | "Now that the image size has been reduced to 7x7, we add a fully-connected layer with 1024 neurons to allow processing on the entire image. We reshape the tensor from the pooling layer into a batch of vectors, multiply by a weight matrix, add a bias, and apply a ReLU." 369 | ] 370 | }, 371 | { 372 | "cell_type": "code", 373 | "execution_count": 19, 374 | "metadata": { 375 | "collapsed": true 376 | }, 377 | "outputs": [], 378 | "source": [ 379 | "W_fc1 = weight_variable([7 * 7 * 64, 1024])\n", 380 | "b_fc1 = bias_variable([1024])\n", 381 | "\n", 382 | "h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\n", 383 | "h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)" 384 | ] 385 | }, 386 | { 387 | "cell_type": "markdown", 388 | "metadata": {}, 389 | "source": [ 390 | "To reduce overfitting, we will apply dropout before the readout layer. We create a placeholder for the probability that a neuron's output is kept during dropout. This allows us to turn dropout on during training, and turn it off during testing. TensorFlow's tf.nn.dropout op automatically handles scaling neuron outputs in addition to masking them, so dropout just works without any additional scaling" 391 | ] 392 | }, 393 | { 394 | "cell_type": "code", 395 | "execution_count": 20, 396 | "metadata": { 397 | "collapsed": true 398 | }, 399 | "outputs": [], 400 | "source": [ 401 | "keep_prob = tf.placeholder(tf.float32)\n", 402 | "h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)" 403 | ] 404 | }, 405 | { 406 | "cell_type": "markdown", 407 | "metadata": {}, 408 | "source": [ 409 | "Add a softmax layer, just like for the one layer softmax regression above." 410 | ] 411 | }, 412 | { 413 | "cell_type": "code", 414 | "execution_count": null, 415 | "metadata": { 416 | "collapsed": true 417 | }, 418 | "outputs": [], 419 | "source": [ 420 | "W_fc2 = weight_variable([1024, 10])\n", 421 | "b_fc2 = bias_variable([10])\n", 422 | "\n", 423 | "y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)" 424 | ] 425 | }, 426 | { 427 | "cell_type": "markdown", 428 | "metadata": {}, 429 | "source": [ 430 | "

Train and Evaluate the Model

" 431 | ] 432 | }, 433 | { 434 | "cell_type": "code", 435 | "execution_count": null, 436 | "metadata": { 437 | "collapsed": false 438 | }, 439 | "outputs": [ 440 | { 441 | "name": "stdout", 442 | "output_type": "stream", 443 | "text": [ 444 | "step 0, training accuracy 0.08\n", 445 | "step 100, training accuracy 0.8\n" 446 | ] 447 | } 448 | ], 449 | "source": [ 450 | "cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))\n", 451 | "train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n", 452 | "correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))\n", 453 | "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n", 454 | "sess.run(tf.initialize_all_variables())\n", 455 | "for i in range(700):\n", 456 | " batch = mnist.train.next_batch(50)\n", 457 | " if i%100 == 0:\n", 458 | " train_accuracy = accuracy.eval(feed_dict={\n", 459 | " x:batch[0], y_: batch[1], keep_prob: 1.0})\n", 460 | " print(\"step %d, training accuracy %g\"%(i, train_accuracy))\n", 461 | " train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})\n", 462 | "\n", 463 | "print(\"test accuracy %g\"%accuracy.eval(feed_dict={\n", 464 | " x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))" 465 | ] 466 | }, 467 | { 468 | "cell_type": "code", 469 | "execution_count": null, 470 | "metadata": { 471 | "collapsed": true 472 | }, 473 | "outputs": [], 474 | "source": [] 475 | } 476 | ], 477 | "metadata": { 478 | "kernelspec": { 479 | "display_name": "Python 2", 480 | "language": "python", 481 | "name": "python2" 482 | }, 483 | "language_info": { 484 | "codemirror_mode": { 485 | "name": "ipython", 486 | "version": 2 487 | }, 488 | "file_extension": ".py", 489 | "mimetype": "text/x-python", 490 | "name": "python", 491 | "nbconvert_exporter": "python", 492 | "pygments_lexer": "ipython2", 493 | "version": "2.7.10" 494 | } 495 | }, 496 | "nbformat": 4, 497 | "nbformat_minor": 0 498 | } 499 | -------------------------------------------------------------------------------- /TensorFlow-Workshop-March-2017/Danbury AI_ TensorFlow Workshop 3%2F2017.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/TensorFlow-Workshop-March-2017/Danbury AI_ TensorFlow Workshop 3%2F2017.pptx -------------------------------------------------------------------------------- /TensorFlow-Workshop-March-2017/MachineLearningWithTensorFlow.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/TensorFlow-Workshop-March-2017/MachineLearningWithTensorFlow.pptx -------------------------------------------------------------------------------- /TensorFlow-Workshop-March-2017/README.md: -------------------------------------------------------------------------------- 1 | # TensorFlow-Workshop-March-2017 2 | TensorFlow Workshop given at the March 2017 Meeting of Danbury AI 3 | 4 | # Key Resources 5 | + 6 | -------------------------------------------------------------------------------- /TensorFlow-Workshop-March-2017/old_stuff/ASingleNeuron.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "# Source: https://www.oreilly.com/learning/hello-tensorflow\n", 12 | "# Annotated by: Andrew Ribeiro \n", 13 | "\n", 14 | "import tensorflow as tf\n", 15 | "\n", 16 | "x = tf.constant(1.0, name='input')\n", 17 | "w = tf.Variable(0.8, name='weight')\n", 18 | "y = tf.mul(w, x, name='output')\n", 19 | "y_ = tf.constant(0.0, name='correct_value')\n", 20 | "\n", 21 | "loss = tf.pow(y - y_, 2, name='loss')\n", 22 | "train_step = tf.train.GradientDescentOptimizer(0.025).minimize(loss)\n", 23 | "\n", 24 | "for value in [x, w, y, y_, loss]:\n", 25 | " tf.scalar_summary(value.op.name, value)\n", 26 | "\n", 27 | "summaries = tf.merge_all_summaries()\n", 28 | "\n", 29 | "sess = tf.Session()\n", 30 | "summary_writer = tf.train.SummaryWriter('log_single_neuron', sess.graph)\n", 31 | "\n", 32 | "sess.run(tf.initialize_all_variables())\n", 33 | "\n", 34 | "for i in range(100):\n", 35 | " summary_writer.add_summary(sess.run(summaries), i)\n", 36 | " sess.run(train_step)\n", 37 | "\n", 38 | " " 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "execution_count": null, 44 | "metadata": { 45 | "collapsed": true 46 | }, 47 | "outputs": [], 48 | "source": [] 49 | } 50 | ], 51 | "metadata": { 52 | "kernelspec": { 53 | "display_name": "Python 2", 54 | "language": "python", 55 | "name": "python2" 56 | }, 57 | "language_info": { 58 | "codemirror_mode": { 59 | "name": "ipython", 60 | "version": 2 61 | }, 62 | "file_extension": ".py", 63 | "mimetype": "text/x-python", 64 | "name": "python", 65 | "nbconvert_exporter": "python", 66 | "pygments_lexer": "ipython2", 67 | "version": "2.7.10" 68 | } 69 | }, 70 | "nbformat": 4, 71 | "nbformat_minor": 0 72 | } 73 | -------------------------------------------------------------------------------- /TensorFlow-Workshop-March-2017/old_stuff/Deep MNIST for Experts.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "
\n", 8 | "

Deep MNIST for Experts

\n", 9 | "

Interactive IPython Notebook

\n", 10 | "
" 11 | ] 12 | }, 13 | { 14 | "cell_type": "markdown", 15 | "metadata": {}, 16 | "source": [ 17 | "
Source: https://www.tensorflow.org/versions/r0.8/tutorials/mnist/pros/index.html#deep-mnist-for-experts
" 18 | ] 19 | }, 20 | { 21 | "cell_type": "markdown", 22 | "metadata": {}, 23 | "source": [ 24 | "

Download MNIST Data

\n" 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": 2, 30 | "metadata": { 31 | "collapsed": false 32 | }, 33 | "outputs": [ 34 | { 35 | "name": "stdout", 36 | "output_type": "stream", 37 | "text": [ 38 | "Extracting MNIST_data/train-images-idx3-ubyte.gz\n", 39 | "Extracting MNIST_data/train-labels-idx1-ubyte.gz\n", 40 | "Extracting MNIST_data/t10k-images-idx3-ubyte.gz\n", 41 | "Extracting MNIST_data/t10k-labels-idx1-ubyte.gz\n" 42 | ] 43 | } 44 | ], 45 | "source": [ 46 | "from tensorflow.examples.tutorials.mnist import input_data\n", 47 | "mnist = input_data.read_data_sets('MNIST_data', one_hot=True)" 48 | ] 49 | }, 50 | { 51 | "cell_type": "markdown", 52 | "metadata": {}, 53 | "source": [ 54 | "

Start Session and Inport TF

" 55 | ] 56 | }, 57 | { 58 | "cell_type": "code", 59 | "execution_count": 3, 60 | "metadata": { 61 | "collapsed": false 62 | }, 63 | "outputs": [], 64 | "source": [ 65 | "import tensorflow as tf\n", 66 | "sess = tf.InteractiveSession()" 67 | ] 68 | }, 69 | { 70 | "cell_type": "markdown", 71 | "metadata": {}, 72 | "source": [ 73 | "

Placeholders

" 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": 4, 79 | "metadata": { 80 | "collapsed": true 81 | }, 82 | "outputs": [], 83 | "source": [ 84 | "x = tf.placeholder(tf.float32, shape=[None, 784])\n", 85 | "y_ = tf.placeholder(tf.float32, shape=[None, 10])" 86 | ] 87 | }, 88 | { 89 | "cell_type": "markdown", 90 | "metadata": {}, 91 | "source": [ 92 | "

Variables

" 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": 5, 98 | "metadata": { 99 | "collapsed": true 100 | }, 101 | "outputs": [], 102 | "source": [ 103 | "W = tf.Variable(tf.zeros([784,10]))\n", 104 | "b = tf.Variable(tf.zeros([10]))" 105 | ] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "execution_count": 6, 110 | "metadata": { 111 | "collapsed": true 112 | }, 113 | "outputs": [], 114 | "source": [ 115 | "sess.run(tf.initialize_all_variables())" 116 | ] 117 | }, 118 | { 119 | "cell_type": "markdown", 120 | "metadata": {}, 121 | "source": [ 122 | "

Predicted Class and Cost Function

" 123 | ] 124 | }, 125 | { 126 | "cell_type": "code", 127 | "execution_count": 7, 128 | "metadata": { 129 | "collapsed": true 130 | }, 131 | "outputs": [], 132 | "source": [ 133 | "y = tf.nn.softmax(tf.matmul(x,W) + b)" 134 | ] 135 | }, 136 | { 137 | "cell_type": "code", 138 | "execution_count": 8, 139 | "metadata": { 140 | "collapsed": true 141 | }, 142 | "outputs": [], 143 | "source": [ 144 | "cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))" 145 | ] 146 | }, 147 | { 148 | "cell_type": "markdown", 149 | "metadata": {}, 150 | "source": [ 151 | "

Train the Model

" 152 | ] 153 | }, 154 | { 155 | "cell_type": "code", 156 | "execution_count": 9, 157 | "metadata": { 158 | "collapsed": true 159 | }, 160 | "outputs": [], 161 | "source": [ 162 | "train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)" 163 | ] 164 | }, 165 | { 166 | "cell_type": "code", 167 | "execution_count": 10, 168 | "metadata": { 169 | "collapsed": false 170 | }, 171 | "outputs": [], 172 | "source": [ 173 | "for i in range(1000):\n", 174 | " batch = mnist.train.next_batch(50)\n", 175 | " train_step.run(feed_dict={x: batch[0], y_: batch[1]})" 176 | ] 177 | }, 178 | { 179 | "cell_type": "markdown", 180 | "metadata": {}, 181 | "source": [ 182 | "

Evaluating the Model

" 183 | ] 184 | }, 185 | { 186 | "cell_type": "code", 187 | "execution_count": 11, 188 | "metadata": { 189 | "collapsed": true 190 | }, 191 | "outputs": [], 192 | "source": [ 193 | "correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))" 194 | ] 195 | }, 196 | { 197 | "cell_type": "code", 198 | "execution_count": 12, 199 | "metadata": { 200 | "collapsed": true 201 | }, 202 | "outputs": [], 203 | "source": [ 204 | "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))" 205 | ] 206 | }, 207 | { 208 | "cell_type": "code", 209 | "execution_count": 13, 210 | "metadata": { 211 | "collapsed": false 212 | }, 213 | "outputs": [ 214 | { 215 | "name": "stdout", 216 | "output_type": "stream", 217 | "text": [ 218 | "0.9092\n" 219 | ] 220 | } 221 | ], 222 | "source": [ 223 | "print(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))" 224 | ] 225 | }, 226 | { 227 | "cell_type": "markdown", 228 | "metadata": {}, 229 | "source": [ 230 | "

Multilayer Convolutional Network

" 231 | ] 232 | }, 233 | { 234 | "cell_type": "markdown", 235 | "metadata": {}, 236 | "source": [ 237 | "In order to inprove our prediction accuracy, we will create a small convolutional neural network. " 238 | ] 239 | }, 240 | { 241 | "cell_type": "code", 242 | "execution_count": 14, 243 | "metadata": { 244 | "collapsed": true 245 | }, 246 | "outputs": [], 247 | "source": [ 248 | "def weight_variable(shape):\n", 249 | " initial = tf.truncated_normal(shape, stddev=0.1)\n", 250 | " return tf.Variable(initial)\n", 251 | "\n", 252 | "def bias_variable(shape):\n", 253 | " initial = tf.constant(0.1, shape=shape)\n", 254 | " return tf.Variable(initial)\n", 255 | "\n", 256 | "def conv2d(x, W):\n", 257 | " return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n", 258 | "\n", 259 | "def max_pool_2x2(x):\n", 260 | " return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n", 261 | " strides=[1, 2, 2, 1], padding='SAME')" 262 | ] 263 | }, 264 | { 265 | "cell_type": "markdown", 266 | "metadata": {}, 267 | "source": [ 268 | "

First Convolutional Layer

" 269 | ] 270 | }, 271 | { 272 | "cell_type": "markdown", 273 | "metadata": {}, 274 | "source": [ 275 | "[5, 5, 1, 32] - The first two dimensions are the patch size, the next is the number of input channels, and the last is the number of output channels. We will also have a bias vector with a component for each output channel." 276 | ] 277 | }, 278 | { 279 | "cell_type": "code", 280 | "execution_count": 15, 281 | "metadata": { 282 | "collapsed": true 283 | }, 284 | "outputs": [], 285 | "source": [ 286 | "W_conv1 = weight_variable([5, 5, 1, 32])\n", 287 | "b_conv1 = bias_variable([32])" 288 | ] 289 | }, 290 | { 291 | "cell_type": "markdown", 292 | "metadata": {}, 293 | "source": [ 294 | "To apply the layer, we first reshape x to a 4d tensor, with the second and third dimensions corresponding to image width and height, and the final dimension corresponding to the number of color channels." 295 | ] 296 | }, 297 | { 298 | "cell_type": "code", 299 | "execution_count": 16, 300 | "metadata": { 301 | "collapsed": true 302 | }, 303 | "outputs": [], 304 | "source": [ 305 | "x_image = tf.reshape(x, [-1,28,28,1])" 306 | ] 307 | }, 308 | { 309 | "cell_type": "markdown", 310 | "metadata": {}, 311 | "source": [ 312 | "Convolve x_image with the weight tensor, add the bias, apply the ReLU function, and finally max pool.\n", 313 | "\n" 314 | ] 315 | }, 316 | { 317 | "cell_type": "code", 318 | "execution_count": 17, 319 | "metadata": { 320 | "collapsed": true 321 | }, 322 | "outputs": [], 323 | "source": [ 324 | "h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n", 325 | "h_pool1 = max_pool_2x2(h_conv1)" 326 | ] 327 | }, 328 | { 329 | "cell_type": "markdown", 330 | "metadata": {}, 331 | "source": [ 332 | "

Second Convolutional Layer

" 333 | ] 334 | }, 335 | { 336 | "cell_type": "markdown", 337 | "metadata": {}, 338 | "source": [ 339 | "In order to build a deep network, we stack several layers of this type. The second layer will have 64 features for each 5x5 patch." 340 | ] 341 | }, 342 | { 343 | "cell_type": "code", 344 | "execution_count": 18, 345 | "metadata": { 346 | "collapsed": true 347 | }, 348 | "outputs": [], 349 | "source": [ 350 | "W_conv2 = weight_variable([5, 5, 32, 64])\n", 351 | "b_conv2 = bias_variable([64])\n", 352 | "\n", 353 | "h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n", 354 | "h_pool2 = max_pool_2x2(h_conv2)" 355 | ] 356 | }, 357 | { 358 | "cell_type": "markdown", 359 | "metadata": {}, 360 | "source": [ 361 | "

Densely Connected Layer

" 362 | ] 363 | }, 364 | { 365 | "cell_type": "markdown", 366 | "metadata": {}, 367 | "source": [ 368 | "Now that the image size has been reduced to 7x7, we add a fully-connected layer with 1024 neurons to allow processing on the entire image. We reshape the tensor from the pooling layer into a batch of vectors, multiply by a weight matrix, add a bias, and apply a ReLU." 369 | ] 370 | }, 371 | { 372 | "cell_type": "code", 373 | "execution_count": 19, 374 | "metadata": { 375 | "collapsed": true 376 | }, 377 | "outputs": [], 378 | "source": [ 379 | "W_fc1 = weight_variable([7 * 7 * 64, 1024])\n", 380 | "b_fc1 = bias_variable([1024])\n", 381 | "\n", 382 | "h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\n", 383 | "h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)" 384 | ] 385 | }, 386 | { 387 | "cell_type": "markdown", 388 | "metadata": {}, 389 | "source": [ 390 | "To reduce overfitting, we will apply dropout before the readout layer. We create a placeholder for the probability that a neuron's output is kept during dropout. This allows us to turn dropout on during training, and turn it off during testing. TensorFlow's tf.nn.dropout op automatically handles scaling neuron outputs in addition to masking them, so dropout just works without any additional scaling" 391 | ] 392 | }, 393 | { 394 | "cell_type": "code", 395 | "execution_count": 20, 396 | "metadata": { 397 | "collapsed": true 398 | }, 399 | "outputs": [], 400 | "source": [ 401 | "keep_prob = tf.placeholder(tf.float32)\n", 402 | "h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)" 403 | ] 404 | }, 405 | { 406 | "cell_type": "markdown", 407 | "metadata": {}, 408 | "source": [ 409 | "Add a softmax layer, just like for the one layer softmax regression above." 410 | ] 411 | }, 412 | { 413 | "cell_type": "code", 414 | "execution_count": null, 415 | "metadata": { 416 | "collapsed": true 417 | }, 418 | "outputs": [], 419 | "source": [ 420 | "W_fc2 = weight_variable([1024, 10])\n", 421 | "b_fc2 = bias_variable([10])\n", 422 | "\n", 423 | "y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)" 424 | ] 425 | }, 426 | { 427 | "cell_type": "markdown", 428 | "metadata": {}, 429 | "source": [ 430 | "

Train and Evaluate the Model

" 431 | ] 432 | }, 433 | { 434 | "cell_type": "code", 435 | "execution_count": null, 436 | "metadata": { 437 | "collapsed": false 438 | }, 439 | "outputs": [ 440 | { 441 | "name": "stdout", 442 | "output_type": "stream", 443 | "text": [ 444 | "step 0, training accuracy 0.08\n", 445 | "step 100, training accuracy 0.8\n" 446 | ] 447 | } 448 | ], 449 | "source": [ 450 | "cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))\n", 451 | "train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\n", 452 | "correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))\n", 453 | "accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n", 454 | "sess.run(tf.initialize_all_variables())\n", 455 | "for i in range(700):\n", 456 | " batch = mnist.train.next_batch(50)\n", 457 | " if i%100 == 0:\n", 458 | " train_accuracy = accuracy.eval(feed_dict={\n", 459 | " x:batch[0], y_: batch[1], keep_prob: 1.0})\n", 460 | " print(\"step %d, training accuracy %g\"%(i, train_accuracy))\n", 461 | " train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})\n", 462 | "\n", 463 | "print(\"test accuracy %g\"%accuracy.eval(feed_dict={\n", 464 | " x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))" 465 | ] 466 | }, 467 | { 468 | "cell_type": "code", 469 | "execution_count": null, 470 | "metadata": { 471 | "collapsed": true 472 | }, 473 | "outputs": [], 474 | "source": [] 475 | } 476 | ], 477 | "metadata": { 478 | "kernelspec": { 479 | "display_name": "Python 2", 480 | "language": "python", 481 | "name": "python2" 482 | }, 483 | "language_info": { 484 | "codemirror_mode": { 485 | "name": "ipython", 486 | "version": 2 487 | }, 488 | "file_extension": ".py", 489 | "mimetype": "text/x-python", 490 | "name": "python", 491 | "nbconvert_exporter": "python", 492 | "pygments_lexer": "ipython2", 493 | "version": "2.7.10" 494 | } 495 | }, 496 | "nbformat": 4, 497 | "nbformat_minor": 0 498 | } 499 | -------------------------------------------------------------------------------- /TensorFlow-Workshop-March-2017/old_stuff/MachineLearningWithTensorFlow.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/TensorFlow-Workshop-March-2017/old_stuff/MachineLearningWithTensorFlow.pptx -------------------------------------------------------------------------------- /TensorFlow-Workshop-March-2017/old_stuff/Misc.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 5, 6 | "metadata": { 7 | "collapsed": false 8 | }, 9 | "outputs": [ 10 | { 11 | "data": { 12 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAMsAAAEACAYAAAAdo4LwAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJztvWusZNd1HvhV3ap7+3Y3W22yyebTfNikKImMpJG6MVBs\nS7FojhxGDxugbQEzlmeM/HEeRgaTsZQf8/jj2AbGmV+UDcSJaQ/MmEhsgoEYSZRlSIqMiC1GlChS\n3c2HCJsK2U2im212X957q+rW/Khadb/6aq2996k6t6sE1AcUquqcfdZe+7GeZ599gCWWWGKJJZZY\nYoklllhiiSWWWGKJJZZYYokllrgs+AiAkwCeA/Abc+ZliSUWFisAngdwC4A2gKcAvGOeDC2xRB1o\n7gHN4xgIy0sAOgD+HYCP70E9SyxxWbEXwnIDgL+h/y8Pjy2xxA819kJY+ntAc4kl5o7WHtD8AYCb\n6P9NGFiXEY4cOdJ//fXX96DqJZaYHbfddhtefPHFhh6fOFADWgBOAfgwgP8G4AkAnwTwPSrT/73f\n+z3s7OxgZ2cH3W4XFy5cQKvVwuHDh9Hr9dBsNtHv90dlAKDRGLBr/+2bz33+85/Hz/zMz4yO93o9\n9Pv90e/z58/j8OHDWFtbQ6PRQL/fH32PmBv+tu+dnR00Gg186Utfwj333DN2DgCazeboWL/fR6fT\nwZtvvokjR46g2Wyi0Wig0WiM6Cgef/xx3HvvvWNt0XL9fh/NZnPUH71eD2+++SYOHTqE9fX1sbJ8\n/WOPPYb77rtv9N/42dnZQb/fR6vVGvW1tcP6dXt7GxcuXMA111yDVqs10dY///M/x8/93M+Nrrd6\n7bfCjlldW1tbeOutt3DkyBE0Gg2srKwAAB5++GF88pOfHGuHR4950b7a3t5Gp9PB4cOHx8bgj/7o\nj/CpT31qojzX8eEPfxhwZGMvLEsXwD8G8AUMMmN/gHFBGTGmHdBoNNDr9bCysjISGC3Hk9vOG2wy\n8qDw4HkfD5EQMV37eBOlFN4ga13ef55AXGeqTdoum/zRdTzhtT9SdZTCG7+IZ+XDa4+OBytaE6pc\nHTle9kJYAOA/DT8hrIE2wU3LeRrYNKr9Vitg53jSqIAYfYZ1asmAMX07xsfZyhldvpbPc7s84eJ+\n4bKquc1q7uzsYGVlBc1mE81mE71eb4JnnpgqYDzheDys37mPVDCjiRjRt2uMRy7nTWz9r4qTj7P1\nXFlZmbBKLERslVjYUnNhr4QlC+u0lZUVdLvd0f9oAmqD7ZhqwNtvv32iLh70nItgdXjW5bbbbnNp\nq9tWCpuYt912W2hlIgtsZVioVEABTPRHKY+eW6qT78477xzjKcWr981urgk7ANx1111FlrqqVQKA\n97znPROC5fHnYS+yYUVgjcVaINIknhvi0fjxH//xibKsAT1h8f57A+8Ji5U3wa8Co3/rrbeGkyKa\ngPbhvgMmJ7mnPGYB1/WOd7zDPVfyrR/m/e677y5yizzePLo2Ps1mE+9973vHBISVdK7OuVkWDuA5\nwFT/P/LjPdOq7gIHsxE91Sx2rQctw5qRNbzypu0xXtW1sr4wSxgNnJWztjE0KaFxgbZXY0CLF9nV\nW1lZwc7OzsiN9ZRR5AloGf6vgs5KkK+JlAWXUx7M1dPxMlqtVmusr7hvIszNsnCQDEyaTK8TS+my\nEMwDVd0xD1Ff2LHIpYy0a8RXZLnVXSoZg6qWIMeT8lOi/b1zpbwvrGXxBkm1YK6R2tFKywuqp4Fn\nMXI8VRUYtUZ8zCsbfVI8RxPJK8PWrTQJMm2mzIuJIiWR40F/l8yB0jk3N/VrJl/9R++3oUS7sNsz\nD6irNy2NVFtT1tPrx5yQR31sSLmEEZ1p4I278lOSbrbrqoxDyZyZm7D0er2R7w6Mpzj5vx1j18Nz\n4bx7MjpJLB7wzqWQmpQe7RRs4mkywI7ZRzODyo9aUSvHfrod81Ks2lfcf0zP0vlKU2kpnymlxhPf\n+qHR2I2/VlZW3Gyc/vbGnOsGgHa77Sof7pdSizhXN4wDXiAfYJXQZNqedYquifhTDcWdXUWTVrU2\nUWIDmFy54JUt8cFziFy0VPkoBinhQ+8F1cG759amhCuFuQmLZV9s2QNQ3YyntJZpKhUabxJFdFKD\nb21I+cazuGR6L8mrX8vyZPOyfFVRxfqmFEjJZNTx8BRVdE3Ep/V/znVLjTVjbm6YMe/dbfbKVaGZ\n0k5quqOBjIJOr55UUK/nckKaO6fXa/o0EqQq9aSsWlVaTFP5S2n8ksnr1emNrcZcWl+pYpn7HXy+\n7+AFkrl7DgydmJ52brVa7lIIpq93xb1B9gSF6+Hf0eTzJrZdZ/c2PI2tloQHm2kwD+ziptquvHiT\nUa/x2pezCExH+1OFKMW70mBLHy2fUiXjtc/D/G5GDJHSoHXEMDnL4aE041I3vAnENyovN0+z1Jnq\ney2TS5Vz2Sr1mKLNzaNSd3muwsI+pX00c1EHlF5pwFfqCtTFo/KiGpM1Lbuxe8nTLMLC34yc1fJ4\nUNcqul4/JcKS48EwV2FpNAbLKbrd7oAZeS5EU4bs1ngBtTfRbGJpOtLrcBWi0ns2kYa0+r22MbSd\n7ErwIlAuw3Q918XrP6/dkWa39LX1tZdaziHlNXBbzSXi+FUXqHoBP4+Pl2pPWWRepR25lIq5ZsOA\n8Y6zZ1gUKRPs0U1NCL02ZT084auKlIk3Xvk+UlS3d23EK1vrnIsT1ckxZK7/2QJ6dZW6V7k69Ltk\nfK0tqiwiPlO8LoSwaH7dg1kGz+owTc9kA35qN+psTwvzMzOlmijHr/IR8abnPY2r5UrdHDuuSiqy\nsh793NiVuHLRuFWZ2NF/7qcczRTmupASGM94WJbKswzeMUXuvFcuhZwWmhWlfJTQ4W9dBZGzDpG7\nYtd4bm+Jxq/Kf9X/Eb+RcvHKlAo8sAB38O2bn5UHJtO+XmpZG6gpSPbnbVKwv8/gmEbjo8hCeH60\nLQ1nl5LLRIOh53WZutHWGMWWDbGAcLq9RDNz/0a86XIe5ol55L7WerhubR/HRd44cBvs22IcHlvm\nr9FooNvtYnV11X3WyOoqvc8y99QxxypVU8VqcTTI8wTLzpXQtu+UOxWt4arLanjICZ5u8FEVe8k7\n12GI+i3Hg2dJIq/Eg7pmuXkxV8vCD0Hl/PoUHf4GJjtBrViuHu1cu0EYaV4eoNxE9urx4qDU9Ryn\neG3hdlaZ9JFyqRueR2D/SwXFo1PFVfZc1lydcxUWNoN2vyDyq+2bG6VuQXRtahLouWjyRZNT3TxL\ng7Mr4fHpaUMuo9AJ5cUYUWYrB1UmqTIev6k6vWOa3NG6U5aCj3PSpd/vj7nS1ufeKgh18VP1MBbC\nDctpbfuOXC6mxWU9WtO4GDm6Xj25CZ+7PsdHdI5pVG1ntJrZUxB1QuupQl8FgY8zrcj6RNlED3Oz\nLIxGozHa7C1XLpeqtXL8f1aoxq57siwKIoHj5/n3ou1VXFddJxYpDs+l8xSvx0eEhRAWwzR+qncu\nldmZBnwXvU66iwZdZaCuZ9UYqCpKLC6XSSlXz9Lw3NFMZZQlZcx1KyTWXOxjeqledcOASY2fW5Jh\n+5N5GiaXPtRUKbfDy+awYPFgaLwV+eP8Hbl03oDnYiv+7T2p6tXRaOxu6OcJjtKw/ozaaOe9bXi1\njMcTQ59Z4n6zVcfekiDNbqaUsGEh7rNwZ1o+vESLpxrGk8arizvHcz80e8Z1ReWZ55yZ16ycCuMs\n7s60loC3zOXJ7EFvZFa1uKykStsauVaRwvGUiJ3TjGkJ5r6Q0vu233Uu0feEJBXYKU/RJE4tkPSg\n2j1lAabFLDSqxA/chlRfRmCBy2Uzo3GLxhYYXyKVK1vC/0JYFvsPlK/05WsMVTRU6fkSX1bLpgTB\nvlOWqvRhtxJ+qtDxXLhI+6aWvlcZuyoC5k3wlJUH4lUAHr0c5hrgW0d5r5WI0rXaGSXgmEF9aTvP\nlszr4JK74nzOi2XUz/d+K89eW/ShJo0Z+N4FX+fx7i3JYdgOlSkNHP1OHeP22T02L8ZhVy010b32\n6bipa63tzrm/c7/PAmBsCyBF5IvmXCfvOsOsbk4KkZBHZbV8SaymyLmUVekob/b/hxEqYFFflViZ\nhRAWW3gYPfWX8jWjcorIFNeNKpNKX1PhZXaqYNZ2eQolcikXHbnYJhUDRZj78yzsHllApr5zlObz\naHrrzMwcR+5UJHic2vTSjgpzj2yzBHuVAt/UY151cnLbdRWx5z5oezyXRCeFBruaLfT6w/jxzmvc\nknJz+H9Ut/7mYzr+Orm9cdcgP4obS6zyQliWOrVW5Ocbcn5prlyuQ6N7BqnBmLb9Eb29spyqxIDZ\nlqoYzdKNMVK0SyzErP21EMIC1CMwnstg7g1bl710KUxY+FkLIB/41snTtLFL5K5wW7wsE/d71Xaw\nNZ6G57r6rcQNWwhhmVYrpejob90AYS9hEyznNqq7URdm3cLIvj1LAkz2Lx/XYzn0+/3RA2xVEVn+\nqsoicusUc00dp9KjBs/njWCTRJ+Xt2s4ftB6PB5yiQKOJwxWR7vdHivjxVzey3i8uEStkyVDVlZW\nsL29PRHj5ISU4cU0ao2jTUS4H6KYJ8WLxoK5OlJxpp03Xnk1gvLm9WlJX5VYln8D4AyAp+nYlQAe\nB3AawBcBHKZznwHwHICTAO7FjJhF63IH8MDwwshpXRbmL9KuyntOSFMBqxe8euXqile8ZABD+3Na\n8H2v3FhHbmsqMVFn/FbSyn8L4CNy7NMYCMsdAP5i+B8A3gngF4ffHwHwQGEdIVTLVkEU8NUhJMqf\nZxE8fvQ6/c8CFcVgXEa1cx1tMg0dPTKda8s09ZXy7QmEwsua1YGSifw1AOfl2McAPDj8/SCATwx/\nfxzAQwA6AF4C8DyA4yWMpALvaQJHpsurkdVlSNEu7WTVchoI567h/1GdKctSJ1i52G47uhqByzHv\n09SV+p86l7PaKih1jPO0Wv8oBq4Zht9Hh7+vB/AylXsZwA0REZvINiiqQSPfnf1q/tgxD56rpH5+\nFXfGC34tPgHG3RQTTk+o+D+X1T5Q6LIdLutNbq8ftD3629txh+OClIJT2toX1gaD7UyaEwo9Zm31\n5ovVpTd/rYz36EfKpawjwO8PP6nzLkoZ1cmW00BangNWnVi5ScUCyIMSuT+cQNCYKeW6eJpav3Vy\n6q6T02h4j4/IinmBvOdCaTCeakdkYZWG8qgxFbD7eId3E9lTuMpLDtMKyxkA1wJ4FcB1AM4Oj/8A\nwE1U7sbhsQk8+uijo9+33norrrnmmjHNzFqSwYMZaR1vAtimGCxIuU7iyZEz+/qbJ1REI5qQfJ1X\nLrKiqQmWaqNn9VQwvHYxjdwk9+hU4dfrF09YPQHi//obAJ588kk8+eSTWV6mFZZHAXwKwG8Pvx+h\n438C4HcxcL9uB/CER+CjH/3oaLA3Nzdx8eLFsa2RbFJo2jV1Y5HP9fv90c00ExLeAM/e6W7Xqftj\nx+2/Tn77rS9jYk2v693YffEeruLB535gHnTZjq6q9fqFrZ7WZdfohoSGVqs1Sk8rH0zL24DPzllZ\ns4S6k48qHK+/uawJgG4o793YzHkkjUYD73//+3Hs2LFRn/z+7//+RDmgTFgeAvBBAEcA/A2A/wPA\nbwF4GMCvYhDI/8Kw7LPD488C6AL4NQRuWGQ5+v0+Op3OxKRhV4p/67XsIqnb1O12R/dBrKP5Oo8W\nsCsUzCtPVh6QTqcz9rQn++E66J5fre3mF5Rq3fzfrrE26jlWQJHFVLfYlMzOzs6EgmGerCyD+4AF\nMTVuzJcpSm8DRt69UvtGlYr2syqHKpa4RFg+GRy/Jzj+m8NPEjxoBjblno/PlibqdPvWCaHBqj5D\nwdfpU5H87L7VzcKi92ysPhtoz39moWaoX63+NltMYCCcW1tbaLfbEwLMk0lv0HnPD3n92e120el0\n0O12J1wcb7Jy/xr/rVZroi1ajwmkCqu6hGpBjUftO22bN9ciDyXC3O7g86TXh7+sc/UVFLohhWeV\njIb952MrKytjmtfoMC9m5ln76NOPPCjtdhudTmdsYK0utgzMl05+HWCe3HzO2sxuT6PRwOrq6qg8\nt00/1idKk8fDszacsfRoMB0+H7lsOmYsLJyuVtrGCytLVRD6XJQpWFWurFC47SlLMzdhOXv27Oh3\np9PBxsbGhM8P7Fog1d7NZhOdTgfAuEun1sg0vA3ehQsXsL29PSYkOviRb29ugZX13DI7t7m5iWaz\nic3NzdExFnCepN4yfhY4bzDN3ev1etje3h5zezwNq1ZGJ5FaOY73ut3uyBIzfW6LWmUeF26PxqXs\nIq+uro4pN2/5f6REdGN3HhNVfJ7F0XnnYW7Csr29PRowNvE86ACwvr4+ofXtWRFd/8OdpGa72+2i\n2+1iZ2cHBw8eHL2INbonYZOBP51OB6urqyP+OHHA5Xq93kj42+128rFYYHdjBZ583W537DkfTSR0\nu120Wi1cunRpzN2za1mg+W68TcrUuildCfzWW2+NhNPGit0+b9tUpqtr4FR4NzY28MYbb2B9fX0s\ne6UWzcpbe/TTbrfHLCP3oylWo20WyK6xuvb6PstUuOGGwb1K66y33noLV155JYDdxYhbW1tYW1sD\nMK6lrPGtVmss9mDtp/dVdnZ2sL29jU6ngwMHDoxNYu9GJguZt3aJr+OJZf9brRYOHDiAtbW10XG+\n+aoTSGMTa5M3wYDdOOqNN97ANddcM+EyquZlmmzJvBvC3D6z+keOHJm4nq0hWyntd8/ScR9fvHgR\n6+vr2L9//+h49EbpyIW08zZepuy2t7cBYLSwlcvu7OyMLL/Goh7mJiyaReLOtknfbrcngmO2GjyR\n+R6K0eMJ7W0Sl0oy6EBxnfbfyvI7Uthf57qtXSx4Wl7dMM368CRgWBym/aaTi62h0fM2o9C0NNfB\nSkHdQhUUK+O5OFzOPuwp7OzsjL3XUl1wVg4s8Gb57b9ZwwMHDoz1of1Wq8j9pZjrVkiGdruNjY2N\npK9tDbIBA8a1hLlmLAw6aKaxPC2u/rVdy0LMwT+7fDqonn+v/rYKpbmfGp9wG7UN1g+286Lxx8G+\nxkJepsy+mXe7vtPpTExKo6PXW5tZaanl1utZWDxryMpJBZ9ddh53nvSmRGycWDCsvWrdI8xNWF57\n7bWxexcbGxvY3Nwcm/TepNZ7KDwRbGAN5qqwS7G1tYX9+/e7KV3T5MCuILJFUm1tg6kCvrW1ha2t\nLbz11ltjsZnGUd5uNlbOBIA1s6elNzY2cOnSJQAYu84TfmuP+uasiNgdMb43NjbGskxqCSM3zj6q\noIy2xZ+XLl2acOvUPWw0GiPX2XNP2XXjjyUPOLFg/EXZyghzE5YjR46MGrC5uYmVlRVceeWV4XZI\n3EnqOjH43Pb29sg09/uDm53nzp3D+vo61tfXR9dokK+/OeD16rBBsYm2ubmJra0ttFotHDp0aDTp\n9GYrQwfTbgCyy2B1s1Kw+yDWbzYxmJ7n+hk4YWETmy2S3Yxst9sjQWS+9JvHzGC3Ajzht8m8traG\n1dXVMcFm3g3sbqvFZitk13W73VGsqm6iXWdvcFDPRjE3YVldXR0bVMs0qbBodox/a8M4DgGAtbW1\n0STgO/bqt+oE0phArZvV1e/3J7J1nU4HBw8eHE281dXVkam3yeCZex0s44ndM3Z9jN/V1VVcddVV\nI+3NbqqneFT7cn/yxDR3zAT9bW9725hLaN+qwDwXj/vR6jNBtKc99+3bhwMHDowJgFoQPaY8aLtM\nsN56661RBlS9AM/NjTDXrZA8U5ozhUB+paiab/7mMqnrvWM6uSL6fIwH2QtyS2ATkS0EB+Epul4d\nHM9p36trWpXXCFH/eOVUGKrMCyC/YrmEDw8L8X4W7YzcYOfoRJpb7wrrpImgQaHWqVaPB40nc0rr\ncj259kWbEXplqwo+88dCyVandPJyef7mPioRGB6nSIC9vosETo97ZTwsxAtY+ViJBeDOi2hrGa8z\not+pjtfz6nroNZwwSOXwc/Da4rUhJ/ieC6PWJGpL1C85S898cv3eDcCo7ugcMGkpPb5KhXNhhYW1\nhS5N8AYgGlQ7rp3Gx/v98ZSu0eFv5k3petcxbfttMQPfdefkQG5yMW3vBiP3l91F58kXaVCuW+vx\nrAzTAyZX+vJ5dhE9Wtx2pmHfnC1LwesLa5PyxvfjNNZTDyFqv2LubljKHdGJOAu4Y6vSSmnclLad\npd6UxQL8hY8ejzmtXMJDlbJeGy0TqMKsNzirggXEs7rT0o2wEK+cYETWJMqAlSIKXEv5jOiZe6UW\nUeuZpt6UwPDKaOUpFZhHVkev4/KeOxONU1SnCXe32x3bUy2ydh5SbYksZu6ufBXM3bJoGlfhDaAG\neinNamg0JneJZBMdXePx4NXtXcf12O9cjKPXcz08cVkr57Jsnk/uTW7lm12+lMb2FJ4KIa988FZY\n21h46V395n5QQY4sXO52QInAzl1YDN6EjBiv6prZIEVxTh3wtC+fixZrMo9V6mJh8egBcOMDRSqg\n34v/bGV0yUwdY1HqOnoKJHfd3F+Tp/9LG+oh1dmqmVlYvJRwKYymPok5KzytmeNBf9cJVjZV6UdW\naRrXNFcPf1dVQPztYSEWUnrLGnLX2gQthVqVKsFrCrMIWgpVePSEpa72KU9WR4nr5/GXuqYOzEIz\nd+3cdtHntVQGzzQCmFiIx2lgD97g8EpcrkMzNFwP12dgq2QCq6lR873VR1dYOyzVzO4au45aJ/eZ\n0eA0tWYTI3ePz/G17Cppv0ZZJ4/f6KPX9/v90fNJXh+VCoAXy3r7IADjaW/lLcLchEUnY/TgTV1a\nskp8oB2o931KzLyW8SynWVRexuK1V93GvQa321tgWLfrVGfGqhQqtJ6SVsz1/SylHWUDN+sdcI1X\nUvV5H+XTo1NlIvFSeLUWUXy1l2ABTyUk6ubjcguKQWOwhXXDgN0JzBrMg5fFupzgOtkKpMqqIogm\nn6fNIsEsdRfqALuQdcZ4itKJWjeimGshA3zWljqRNEtmGjayLtpwbzI1m7uP0HrnvZglVZf6u3zO\nYhSzGHrvQOlEbeDYIUqCMI9ROlljE+4TLee5nHaMH47TtG+0ziuahOrmajaR78GoBeD+ScHmC8dj\nyqeOQ5JetsRlRqn/WBW6ZWgd0ImQ41snYAk0WE+5fpGV8r5TLqXW6ZWvC3s13kzfW+UwTb0Lc1PS\nsFeDwkvNq6ScFVFGhxFpU7YiOc3oWZpIwNQSp+gw32xBIrcE2I2tqqT3S5GLC2cFW+dZXdiFeKck\nf3R1rrdMga/nY94Es0FmF45Ns2aieAKxyxHFDDrh+v3dHUWUP+bdcwd5wqTcGy7vrdhlGl7dmurm\n9qqFUdc3Z2lSLq4e477kXWaisnxNZC0U/Gy/N3e0noXNhl0OpBofde4sSGmxqpmt1OTxhKAq/Zym\nzU2uuqAWdy9QSjdVbuHcMMOsHaeBeIlFqmOwzGJFiQStj69TOpr88LRjHe4F18doNptjm26nkIuJ\ncmCruhdCU+o6/1AJi7pes/qwPHkNasq9JfbTgl0/75zxVDoh1C3iwY6sV5U+S5UvmUCzQl1vFro6\n4xfPxTX80AT43DmajvTg+d4GnaDsqkTCwBM4tSxFafIxTlHyZt4mNFFKl2MkXrbe749v/sZ8s9Xy\ndoTR/tE+0Sca9WlMFmTeM00f1IriNu0z45PrUCvEcQXzxm1gD0Hp8H+Na5rNyXfc2PKiqC0RFuLh\nLy9Irhspje+VnaWeOumV1KexSi7LpgEz08nVxSjVyDnrVcWClU5sRS4WLal7rmvD7Hta14eFLddx\nrElnDeJz8HzvvXApPEEpqdMTkOi31hnxUIXnVJm9gsZ/09S7EMtddKAY3sSYxvqwcFZd2l9SJirn\npS31Os8tLOWtqv+t/Rx9p65PrbgoqbOE58hCl46/0sxZpBK6C3GfBfDvfnOwP42ptusjN8XzkQH/\nfoPWHVlDThpwbGE8cJ26p5jngxstbxmKulTaFvbdtX+i6zzFpX3I7eQyHi0uqzx42+FG7bPf3ipo\nr21M165junacXfOch7IQwjKLK1YKvnPvbWs6KzxhzmnCaWILL5Om9VTVqh4dPubdwa+q2RmcOFDh\n9ngvcbNzyPVRCea+fauh6tOSVcHZqjqhg13ntZHA5Fy7XF0RIuvrjY3Wo5aqBN5eY9MKegmmcd8Z\nJbPzJgB/CeAZAN8F8E+Hx68E8DiA0wC+COAwXfMZAM8BOAngXo8oDzYvP2F42lOD+tSHJ5Vdzxvt\n6QuQIhgNfRjKc3lYIPnNZHqN7s6ibU1ZKqXFLk20XaxZB8/aRVrXe+qy1A3Wj9bNvOuqB/6oQEXj\n4/Go4+7xH7niHkqEpQPgnwF4F4D/HsA/AvAOAJ/GQFjuAPAXw/8A8E4Avzj8/giAB7x66swOMdT3\nNdg7JPnFQ3XchFToKxFS8GKBEvDkqyOzl7JGezVOdWIv3XdGibC8CuCp4e+LAL4H4AYAHwPw4PD4\ngwA+Mfz9cQAPYSBkLwF4HsBxJboXg5DSfux7s7av+5FWfbaboZbP+KoK5ntW17U0prpcE7IK2Grt\n9e0AoHrq+BYA7wXwDQBHAZwZHj8z/A8A1wN4ma55GQPhGsMskyWCugwM9bvZ7aszVmIXQKHCkntC\nNFUHT5Q6+E2NwyIKiuFy8lYlwD8I4D8A+HUAb8q5/vATYeLcn/3ZnwEYuC233347rrnmmsoZIy9F\nCEy+kVavUX89OsfHvHrZt9eyJgwpS8ffqjw8ofPiJKufeUopIo7jlF9dlsPL5jXFqvxxqjwK0nPJ\nBx0XjckipabjwfXoK9G9OOiJJ57AN7/5zbFjHkqFpY2BoPwxgEeGx84AuBYDN+06AGeHx3+AQVLA\ncOPw2Bg+8YlPoNFojF7rdv78+cqZKu4Ub+seXnOkga83wQ2pjEwOSq9KAJmDCodZJp1gKeumZZR3\nQ7TdKdOoyrfHE1vHnIWvUi+XzV1z7NgxHD9+fNTGBx54wC1X4n80APwBgGcB/L90/FEAnxr+/hR2\nhehRAL/8kfWHAAAgAElEQVQEYBXArQBuB/DEBNEZ/XZFlH3hc1w3n6vT341uQNaR9vS++bxq5sii\n5fqb+fesUCm/XkwRCTDHkqV0Pf7UItWJEsvydwH8jwC+A+Bbw2OfAfBbAB4G8KsYBPK/MDz37PD4\nswC6AH4NjhvG2p01Zsq6aEdzB6VcJa0HGH+XZE4jV7E06oaleI+eAk3R99LFnuuSQ+T+MR3+TlkH\nj78oZtPJ3Gg0xt4bym5SaZtSVkQTKrMorRJh+c+ILdA9wfHfHH6KkBpsPRZlaKJO0EmhtKJl+d61\nEQ+q2XjzPM8CeHQif1/54Hqil9Om3K/UOY8vO6/X6wTNuXVRm7hdHh1PuHJQYY949GinFNXct0JS\nzRVp8UgDGtS1Ulolv6vybr+NhsYPDC+WmdZN0L5jmqmJPk2soa5dKaq42dNq/jrjwRIsxKpj3RBB\n/VIr651Xekwnqg8YD2C1Du9bBzQ65qWCvThCfe4qMKHkDFRO2VQVFI/vqsLi8RPV4wllybWXU2Dm\nJiy8arXVarnLGjx3h6HnvXjBftv19uScPsHoxT8cS/FyEW/LVW+nSg5ac8JRGhN47pQdM/54k3B+\njn57e3uCP67PU1S2sbZXzurz+PXixKg9Wie3L7XcxVM+WtbzODwlW2LpF3p3F+7E0mxJRKcEpVqe\nB1RvLPLkstfBWeJiGnfGwHXwJNG4qNFojHbw39rawte//vXR/3a7HSoET6vnLF+pm6X0+L/+ztEo\nBVt5vd81rfs792fwFSrlul1oVVeAU8gpRHEA86BlAaDb7Y4C7a2tLfz1X/81Xn31VXS7XfzIj/wI\nrrrqKlx11VXY3NwcvVZiFpcox7vRbzabaLfbOH/+PD772c/iox/9KH70R3/UFQ5vEtkki9LqVTau\nS8Uk3N9VFVUVTONKKhbasgC7KdK9XP8zjT/On16vh69+9at46KGH8KUvfQlnzpzBK6+8gqeffhpf\n+cpXcOnSJWxvb8/EI78HJrfhOD+38/M///PYt28fPv/5z6PT6bht1YlUVePnkIsr2ErrNdNC3WbD\nLMKyMK/JS2m4Uk0c+Z1sim3SRYFv5O8y+JrV1VV89atfxcmTJ7G5uYl7770X119/PVqtFi5evDgq\n42l0bhtP1NSz4hzYK78rKysTO8q88cYbuPHGG/Hd734XGxsbOHjw4ARNbq+6eo3G7n2Q1E4xGmNo\nf3qxoZbv9XoT6XDuC9tg3RNiry+1fbyAtjRGZCyEG8b3JSJ4WZ6qPqx9OH7wguUSOjbg58+fx1NP\nPYW1tTX88i//8ujtVbaMhwNhOxcJNP+2IN2siGbvNCiOsG/fPhw6dAhra2uj2MWrMwcNwGeBWjAv\n4FbMWmcdq8sXwg1LZSlsgLwVuppJSYHdOHU5qrgdet2bb76JlZUVXHvttVhdXcX6+jp6vd4ow2dt\niHaW1Lq1bOR+lgbXrVZrlA1LKYkc1OrMAi9RoedKEgxVwA/tTcv/3IRFA8woEE91rFdWO5d/a4ep\nnxxNJM+q2fWvv/46er0errjiitHEbLfbY4JpwXbKxTN6zz33HC5evDixMYO2zayNt5CSfzebTZw8\neRJXX3019u3bl92LmdvrjUnpJI7KqXWK3Cflifs9+u+5plq/t2KjNFM295uS+tubFIySrFBkNfg7\nmjQlWocnpPnY6+vrY5PV86kjOjapz507h6997Wt45plnsspBXTAvOAeAS5cuYXNzE7fddtvERIno\ne8F+iauk50sEKkcvZdGqWh1P0Kokd+b+AtbomXYrUxUlGgLAxOK9adDv97G2tgZgEKPwsxNVB7HZ\nbOL5559Hu93GmTNnRjGP99x8iStlx8+dO4dOp4Obb7650qpe3SNgFpRYo+icZyWU1jRjOM01C7F9\nqx7j31UaZPRSiyMNGrBWSUtzJscsysbGhruwsYRfHjhL7+ozHpFFiCaY0X3ttddw4MCBEZ9V2ljX\nE6SpMU15EVom8hSmmfgpJR1eU6mGPUKjMblFUb8/vvuK98w5Z9G84N+0sp6zlw2pGxQJr+fbG3+W\nzrxw4cKYRrZ6mX8WMqYFDCzTpUuXRnfZORj37tLbNz/gxv4/MPDPX375ZVx11VU4dOjQaEUBt4Hb\nywkJSz/3+/2x1Lfy78UfGoNqfMiW0erS5UdKm7OlRj+1FEbpaCZQLTbzFWHuMQuv1eLj/O1dV9VF\n0ycjU767xk8pHDx4EI1GA3/7t3/rLslPQTWmWScTRJ70Hp/Mvw66uYVnzpzBu971LmxsbEw8Upxz\n4UrcHO2rqi7XNGNpYIHcyz3nDHMVFtUw7BblfFjvnJcdUeHwAtdUxieHK664AldccQUuXLiA119/\nvVI+n7Vmu91Gt9vF1tbWmCZXy5YTRhO0drs9yqwdOXIE6+vrE/VGE9xrfy4r5tFMtbuKcKUwraBN\ng7kLiy2QLPE7cxYnd63VwS5ficZUnhk7Ozu47bbbsG/fPpw6darYpCtf586dw2uvvYZ2u41Lly5N\nbHGamkxa59raGrrdLk6cOIH19XUcPXoUm5ubY68YjxSRd77quFyuiR/dx9orLETMAkwGlNNqHS0f\naWWONxTqbxtdry4AuOWWW9Dr9fDqq6+OLT5MXcvn+/0+nnnmGXS7XRw+fBidTgfnz58fZa94XZjS\nMlfWvpvNJra2tnDy5EmcP38e733vewEMBIizYZ4AeEvuvXs5zAv3NWfv+HzkYrMCs+ujPvKOpR4R\nUJoa7zA/pYphrqljHuTSnV1yE7CkPr1+FlPebDZx5MgR7Nu3DxcuXBjbRpVdqEjY+v0+zp49i1On\nTuHw4cP46Z/+aTQaDZw+fXq0Spmf99FdLFkp8HMs3/jGN3DFFVfgzjvvxNbWluuG5trNAsjHUlap\nlG7KDfOOW/ssFtM0eK5ObXfKDY+wEAE+4N8tjq4B4vVV3oDxINQdCPZ6PayuruKqq67Czs4OvvWt\nwZ4evNwl4hcYPJD1xBNPoNfr4fjx4zh06BCOHj2KixcvTmheg7pnLCgbGxv4yle+gosXL+Kuu+5C\nu90eWTvvmZjUBNflQTlhKPUAckKiZfi6lZUVrK6uotVqTVi4FKJ0dRXMVVgsXuEn/LyUnkHL6E07\nfaKRr2M3QmmnJiTg3xvg3zs7O3j3u9+NZrOJJ598Eo3GYD80Tr/ywLIF+N73voezZ8/i6NGjuPHG\nG9HtdvG+970Pb7zxBl588UV0Op1Rm8wCNxqNUUKg2WyO9ckjjzyC06dP49prr8WxY8eS8ZnXxiid\nzO2NHp1OZeu073RM2GXj9z3yxh86Nzh1bP2iwsZusd3oBSYVgVpsD3OPWayhtnF3rqzeW/Hus3iu\nlqJKije61ga52+3illtuwcGDB9FsNvHlL38Z+/btG01iHlS7ZmVlBRcuXMDTTz8NAPjABz4weqLy\nyJEjuPnmm/FXf/VXeOONN9DtdkcDan3Aq5oB4JVXXsEf/uEf4uzZs7jhhhvwsY99DJubm1krkEpw\n8PkoNihByr1KLTblvo7ugxmmHc8qc2buy12A3aCr5D4AC4Zqkui76iCX+N82ON1uF/v27cPm5ibu\nu+8+9Pt9vPLKK/jBD3Y34dRXW9g6sMcffxxvvvkm7r77blx55ZVYXV0dpY+PHTuGgwcP4gtf+AK+\n853vYHNzc8Rbp9PBysoKer0evv/97+NrX/saHnvsMVy4cAHHjh3DfffdBwA4cOBAuDFHFZeUA/xp\nBCbXlyl4MZY3PjmrWcpnCnN/nsW0bOn9CXWf7JqSJS51gbWtTdp9+/ah3W7jrrvuwjPPPIMvf/nL\nuPfewatp7Ll8a+u5c+fwuc99Dpubm7jllltw1113odPpjPnka2truP/++3Hq1Cl897vfxQsvvIC1\ntTUcPnwYzWYTly5dwsWLF7G5uYn19XX82I/9GH7yJ38Sq6urIyHY2toaPUOjyFkWK2P9ypmnaaxM\npLk1Hor4VDr8OycUOV5LhXmuT0raxzSvp+m0k7ljVON59zii50HYiuk1Hg86IOr7Wnbmp37qp3D+\n/Hm8+uqreOyxx3DHHXfg3e9+N972treh0Wjg+eefx9e//nV0u11cffXV+OAHPzhKBqg/3mg08Pa3\nvx133HEHTp8+jW9/+9t46aWX0Gg0sH//fhw+fBg33XQTDh8+jFtuuWXk79vkY/cvsiLcLruZye1W\n94c35NAx4t+e9mcB5T5ttVqjFz8xLRZKFewSAeDyUZzFtHJKYOEWUpYiCr73Ajn63Nnb29u45557\n8OCDD2JlZQUvvPACTp06hWaziYMHD+LNN99Er9fDoUOH8MEPfhCrq6sAxq2PuhCNRgN33HEH3vWu\nd40sUKPRwOrqKi5dujRaymLrn3TQowmnvOf6wLMOJeOn2ylFk1SPK+1ZPAdVjNPQmnvMMq05zx2r\nE6zxcwFxr9fD2toafuVXfgXXXXcdNjY2RpP43Llz6PV6+MAHPoBf+qVfwv79+0fZLN75xdOgpuXt\n7WUmKOYiNZvNMZeLrQK3w+M7OueVU0tRci3XH8WlvOhU61E+ok8KdTxWPDfLYpNEY5ASVLEmqQ4q\n0aZVeOn3+9i/fz8uXryIfr+PD33oQ7j77rvR7/fx6quv4uDBg7j22mtx8ODB0QqCfr8/+lYLoNrW\nVkr3+/3RE5mWPu73++h0OmPP+UcWxmtnFcWV08x6zlttnKIduUx1WZbUsRTmHuB7boE3Ubxj0fII\ndWG4rMYzVs7jKRpcvjuvccb29vZolxXLWl199dW4/vrrk23PTeqcy8Lt1bZH2lcVlZe6t3ZZ7Gcx\nkd7VZ4Hz3B09HrXT4haOvVRQSl1GT/lwm5j/Etpz3xg8YnpaVNGO9q3WoQpSA6+DUJe7mHMJp4U3\n2b0yJS6Nuo/2W5MPBt7GqcrjGpcTc92wwtNGs6Bqx+qkm2byeUF5xFsVH7sEeyEwOXht8NwbLqvH\ndNlNrm8WQVCAOVuWRqMxZna9zeN0Mmg6UM+pr64riPmOugaVSp9pebGVlyHSAWf3xeuDKF5IrZJt\ntVrY2tqaWMqxtrbmxinqgng887f+bjab6HQ6bkbLaOpTi9ombqe2iVPGnlVh682pbBU0T3HwWNlK\nCG+OlSjahVjucjnr4Q7SdWSlWtp7xLnEqkTHvXMpq2FLXXhTC3tUei+eGKzTfTR6/NElS7l4bBbM\nQmOuNyX3GhwX5bSQZy1SYNcrEraSAY7OR1u0Wj38jL7u5Vvn5C51G1OaWSd/Lr7Ta3PrwkpQhwv8\nQ3efpRRRcMq/VZuVxgCRVYrqTFkWLpPj3+OD3T2zLnVDrWiqHH+XIup7zRbOO3aZa+rY25kEmJxE\n6lOm3CbPz9VYRie4ugcRcgLI6Wlb/qIPgXlt1NjFi5O0HSzkNpntLr7ev9I6cmD6AMYeNfDKlcRB\nJeleg7XB6tbx43HyPAU9z1ba23xc+ylCTl3sA/ANAE9h8Pbhfzk8fiWAxwGcBvBFAIfpms8AeA7A\nSQD3RoSjRucwbblZNJMFh3qvJqdt90oT6gTkyaQJA1YGVeKZSIlon+qxiN+UheUdfnjBpid4OeEr\n7XOlWdKWXO9tAvh7AN4D4O8Mf/8EgE9jICx3APiL4X8AeCeAXxx+fwTAA1Ed2oFVJvO0Zad1/RqN\nxmiZCV+rk1Qn2F5BLYXnwnhtLU1gKA09pvRLhMX7zXQ8V0wfxci1paob6CnUFEpUzcbwexXACoDz\nAD4G4MHh8QcBfGL4++MAHgLQAfASgOcBHM8xyn63IpUOjI6rBrL/3tNw7MrowDFdr1Mjt0aD/px5\nZz5yUDeMoRs4RFmlFG0ro/siaFtT8Z1XR87SNxqNifdvKq1UDJPyVDy+S60Jo0RYmhi4YWcA/CWA\nZwAcHf7H8Pvo8Pf1AF6ma18GcEOuAmPe24whFfTloJOGH/EtCeZLJniqrApcVEfOteBzkdvKfVUl\nUZE7H2n0VNujSZialPrOzVLlYXRTAhrxWnVulQT4Oxi4YW8D8AUMXLGxOoafCO65KAPCMA3H5avC\nc1mAyXTrtNDA2RuAaXn3ECkQC+5L+C2ZzF6co5o9x2dJOStT1XWKhKOEn2ld5CrZsAsAPgfgfRhY\nk2sBvArgOgBnh2V+AOAmuubG4bEJPPzww6NBvvPOO3HzzTcDmMwulWjdUnCHle4mk4Pn0vE51paz\nwGhHT4RyZirHr7qcXIeW4+f+WdAsGE9ZgNKJWeoSzRoHRvycOHECJ06cyNaRE5YjALoA3gCwDuBn\nAPzfAB4F8CkAvz38fmRY/lEAfwLgdzFwv24H8IRH+P7770e/v7sXlO1t5VkSNf3R4PC2SiwM+tvq\nsc7TTeT0453n316cwxrZltPbNZH7pEvZ9TdfZ0tEeOdKe8SZoddzu3XVtcZitjGGtYHHgVP+vOm2\n3bviHXsajcbEamWuQ+MwL+bSDUn0Ws/d8tx5VhL2/f73vx/Hjh0bnf/sZz8LDzlhuQ6DAL45/Pwx\nBtmvbwF4GMCvYhDI/8Kw/LPD489iIGS/hsAN44bp3mFVAy/DNNrbswRV3I0UdHJW4c+LDZiuun+6\nVCRHt+ScTrAqvPOEjKxYpDQUXtkSN7Bq7JNDTlieBvDfOcfPAbgnuOY3h58kuDGm4bzUrHdNhCoB\nrhdrqJtTp9mvOmCqNKJYyJDayjSir+VTdZZOThUwb6mR0mNLx+Wia6rGN9a2WTH3HSmBXcvCe/ry\nef1dJzx3r66A3ITPvqdBKpBlVya1ull5iuKoyJKUxh1K05vs+tvr50hQSmMbr646MNcl+jpo3kZ5\ndl79zhKw9bL/epdY688F5REPnkthdeiLiSI+PTfQC+gtDmCrbGX7/f5EDMGW2661GCeazHat7Sts\n13iLGrmd2le8lZLxyHGLZfD4ONfvKTK2WimhVqUSWalSN3PujxUD4yliIB0El4ADSftv3zrZ1fWK\n6OnvXPlpLYkhco88cAJDfXW+dhbrZhMzZb1U+ZRMQC7LSoWv91w2j3ZkperC3J9nAXYHY1bXJ7I+\nfNwL4Kt0KJdPTb5ZXTmNpUqgZVU5lLyAVeu332xZovKeti514aKy3kqFErpRlmwWzPV5FtWA2kDP\nVUrBExTW8qVBP//2BorL6ROCKohWvyoDT/NH9D3h5utsR3mvD5V34zGagFF72bp47gxb6BRtFTxW\nZNqX3J9cn3eM4bXf6vOSENo/EeZuWVIuVsR8qbb1JoDXiSV0cvVH1sqO6911VRApPnL8quUsbVfK\nXdLr2fpHk7VkHKMJruemsQZ2XR0Pi3mYu7AYvBWmUYeWxDD9/uQrLJhGnVBrZB/2872tSatAr/UE\nzaNZKpAp6I4rdUCtXMoi6TURIovBx2YRooURFs/n9cqUao1IAxuNupCatF4GJ+KtpB7AtzI5gahi\nbRTMN69+mAXaH55rGfFQynPqmKeUSwRpIV6Tp6/29gaeB4z/c3k77gkVD7J2SsoX9x4djmIBPdbv\nj29c561H8zRnykIoff2vrpLXVxHfqSXyfHPRu3GryiDl4qnbaHTtCUlvaQvzzsKlcZq2kXnmtWzM\nQ6kgLkTqGKjnDmtKqzYajZFbFvnonnXwOrWUV7625JUYuQlmZTTo591meCLpi5Ry8O5rlPLnobSc\n7vBpdeWuj2IhdfH0OJ+rMu8Wwg2bxT2J4Pn10USIys0aJKq2z/HL13m09H8qjimly2A+67pPVFKn\n3RyNno3P1VPyiW6mVsHCWBbGtBM0miTA+PtYchNBtdo0wjxNnFCiTZkfg6aNzaKoVSzV1FG5aWOe\nEkVRkoCI3N/SPptF8QELcp/FPrYco91uj00cdmGilxMZTStj/1m783FesxUtm4jiC6Wj16pL4S3l\n0H4wqPvn+dfGNy97V5fDrtVl/6mVEuq28QSzsWGetE3Ks/U5x20aF3rCxHXr+HjfXrzr0eadSJmG\nF6N5mOtex/ad0iy58zl4waDR1Q6NtGCk0aq6C3WBBYiFxVsnpj58jsfoYzRT56Njs0DpzRNz3xjc\nfqfK5cqU1JPy4z0LxogmW6l7sVeDbVZWn2b02lHSf941GsNpWe9bP6n+L+GpLsGbFXPfkXLEyPDG\nly7T57I8+YB0OlUHz8o3Go3R3XR2K1LxQjRZvHVLnhugwpbS9t7k9OrXldO8MhjY3QAissi5x6qN\nro0Jr9Zm2lZG+8Xri6h99pvfycLnPDeNlaguJM3d4I7ca+8cYyGyYaWoU7NounWvMKtmZTrAeDBv\ntHSi6HGGxnMRPOVR6gp7ior/64qA0v7PuXylmHa8F0JY2IXYyzo8TBsL5erReEIn3zR1Rq6rCSQv\n2MytiGZeo3boZKziEucERb2AKpM+5Q6WtGdaLISwGPZSWLQe3tRiL4VFXUA+VwXsdmnSg+9P8Apr\nrw6eqKX3UiI3KncNf9tvb4MKvaaUbpVYJrJ2VTDX+yzqd9qgc5rXK2/w/P5oIthA8Ut5vE0ePC3q\nLbHR+gz6WPT29vZogvDzJNGgsbulS/v5Gu4j3oXF/lt53c1Gy6SCdz7P6WlvKRGDabKS4P/c957L\np+lwD3o/icdB08QWn+pSHXZdc1i4m5J1aHpvIPv9/uiFqJ6fn9KyqUA7VZYtV+7Z+Crw+oetTl0x\nGCuJkgmlfe7xUZJI8WiWQAP7uj2VhXLDgHyWJofIN+XJFF1n5aJz0fkUDhw4MPPSEeYhl8HaS5ey\npKz2e2ksYfWU1DEvLIyw1Bnk6yCzj2ym29vQjq/J0Y6g2SbenG5alAiwxkSzoMqkzMUcKU2vltpr\nU452dH6WMYww15uSXsYkMtMpjRW5XZpxMYGx8142Rv1fb0DsvOb3PT61Lu+4l2WKhDiiZbxwvOIt\n189ltDS+sLKeK1oSYOs4cx97Y6pzQvn1aHnXRvMlVUcOCxOzcCBdspy97rp13VOufCSUs/DASE0q\nncScLra3Ppe2JcWDPsNSFZ47rOc9hWX/lYanXFPHq6Bk/OYuLOp+RVp9r3korUsHTzX+tDxHmpuT\nEgZvYDnrw3zl3JNUbMftrPp0pKe5c5rf3OSU0ETWSOstsXpVsTAxC8cU/OSkuh6R+8V0IpeHhdLe\nLahPQnq0NHNm/6NVt7wyumS7JM8dY0vBbVX3Sq1xpG25fTrxWLA0Vc0rHawMv7PScyM5K6Ub+eU2\nI+e+s3N8I9Pj31uJrq6yx4PXLymlMHfLwp3t3e1WV2Bad0cnhB2rqnm63S56vd5o+6EUIm04K3KD\nulcW2XssW60st9kma6m7xMIJ7Cq4vfYwSuOXuQsLMM5g7jn1WevgiZZ6zkTrN2FTq+fx5k3mOge8\n1MWYxS306tT1XKn4IsWvF5+kAnhPqOoG8xBh7sLCviov1fBM5ixBtJeW5geocjyyq8F3zkuySqxp\n60AqK8Z114lIOLz6zAtgAUsJCsMTGvuvcVSdbdP6PcxNWNgv5Q7Sd7gDCDtJ4xi+jumyJeFrNaBM\n0VY+lT8PGldEsMmVQirpoYKoPrrHozeRDd49KN6DzUs4MC3P3bVzGqPYOb0/Zec0VtFMYEpgtbzy\nl7KCHhYmwC/17aNgfNr6SuvlhZc5RDc8ZwVbNE9BeEhNqBJME3d5cWFqclZxt7yYKFJIpf3P8Wyq\nnXN3wxgl8UMVYeFYwztXCu3IXN1qwVIuUxWU1h9dW1VYZnUhU3GNflSoIl6rHi9BqXs3V2HxzCvg\npyMj88vnlC5n0fg//9a4w3MBmaZaF88V5PO21Q+XUahmi7QkC7+XeVI3lHlQn1/L6RioYKZWKXu8\nemVScYfXFm2Xx2Op1Uy5X6WCNtfHigHfUniTZRbN4YFNd+7pQRWE1KRTgbRzJXHLNG1Q3z814Uug\nfERxYS5GSNH0zqV4ncaqeQo0t8t/DnOPWTxznHK1osH0LEIuKNYMXFRWUeVudr/fR7fbLV4uUrLU\nhwdXLZ3XP7ypRY6u0i8tr7x5x/R8LibJfaqi1ApFKB31FQzeUPwfh/+vBPA4gNMAvgjgMJX9DIDn\nAJwEcG9VhvTpRe4cDWxL45doUDyBKr3RWAJ1hXJlPRfDqy8lHB6qxExV2+a5a/pciZWr6jZVKV/C\n77RCZigVll/H4HXd1uOfxkBY7sDgVd+fHh5/J4BfHH5/BMADUR2eG9ZoNMZezKPLOjwtqhOBXSXe\nKih6ok5dKu1QLxZITT5uTxVr5SURNDmgx9gF83x9q7vX642epFSl48FTTICfhWs0Jl9v5/FuZSN3\ny86xpfeWInnubkRP+Um5eiXCVCIsNwL4+wD+NQCj9DEADw5/PwjgE8PfHwfwEIAOgJcAPA/geK6C\naXzSWeFNrCqIOjXSpLPCszT66CyXA3b7dWVlZeJlSlXrzSFau1XqaqXayVDBzdXD36XnwjYWlPlX\nAP45APZPjgI4M/x9ZvgfAK4H8DKVexnADcXcXEaYsFRdTQvkB8EbwFkVQjTBPAHSujgjN2udOQHg\n/7rlkZaP6srt+zUN76nzpcjNlH8A4CwG8UpEtY9d9yw6n0SU0ttrTCssQHXfvk7rqcKqlkVTreaG\n1fF4M9cbHcuNZWSBcgLCx7ytZPcaOdv8AQxcrr8PYB+AQwD+GANrci2AVwFch4FAAcAPANxE1984\nPDaBP/3TPx39vvPOO/H2t799tFbLnsnQ5dVexsvgZXu8pexWhje61g0rcpkjPs/Xe347AHQ6HbRa\nrbQ/TBM+Wt5vfWD183m9l2K/vZ1YIuVktHUjbzvHZVTJRClm5UHdRH5wzXOLuR7mu6qS29nZGe2u\nY49mGB8nTpzAE088kaWRE5Z/MfwAwAcB/G8A/icAvwPgUwB+e/j9yLDMowD+BMDvYuB+3Q7A5eL+\n++8f/e71emPbBEUDwudTx70APAq4L0e8NK3m44mtx/mb4ZVP8ZBz6/RYaXyQopfj4XLj+PHjOH58\nN7R+4IEH3HJVoz6bWb8F4GEAv4pBIP8Lw+PPDo8/C6AL4NdQ4IZNVJLJNkVZJs2cqDY0sGbbS2GZ\n1WGsXjgAAA5ASURBVOdOWVVug7ZHJzrzEtXF314bmG6ViV9iBeoSlL0WtirC8pXhBwDOAbgnKPeb\nw08S2vn8UTcoNVH42wbVSw1HA+25JOqyKKItWbktVsbb0I8zOl6KVftH+0EFiK+3JxNTSMUDXlZL\nhdYTQM8S6XFNdXOb9LdeE7mN2gbvP7usJX0TYa7LXVTj23G9q14CzeeX1q9auS5Lo0KjO8F4wllC\nL3Wdd4/Fo5GqI3UsUjhVLFZKcZXwWAqPRs59z2Guy128ydlsNsd2WqlKz5DrcH6WZS/hWcjItapC\nM5qA0Y3AumOCqjQ994y/o/KLhIVYG8bg/Y6rZjwi18iDCkvdlsVoRs+gaOxV6jp5rivXV9XCTouc\nhdCy+h21eS8Euy7MbYk+T2xNG9rSjFRGyzufAlssfnLSe6ArJzgaI/EjtPwueQBjqXC+htPVXJfS\ntmOeO8T8WR324iE9z21KTUTORHouXbRUJjX5VWFE7qNnhTUG88pE7fD4s7Qx44diY/Do+WwgToGW\nQCf75UgPT4NczBHBsx7cd6nHebX+KCaJ+r5qX1YZw3mNU2m9c31bMbA7QUwrs9afdud5TRBo1uiH\nQXhmjWdSblvquKflGaV8lbpSanFT9c0S59WBuT/8xR06TZyi8ARkFjqlx6dBagKnwK6Q7qLCx3K0\nI2uTQkn7q7SltExp3dOAlWiqjrkH+Pz8ij7LwohcilwHpgbCVg5Y3bz5ge5y6AXlWr/3xKUXZ6U0\nZCo75MUL7HrpqofIUrCgKbxMGsPbS7lEOLR+7T87pvxbGe+jSD16kIp1PaXiYe4Bvv73btR5AbjS\n4ElYEshaMOzRL+W/5N4JJxaq+O+zZINK3CjPDdP/kcDn6kjxM027Sq/1kiMMvlE8zZjP3bIoVOtF\nGTBPmHjPsZJBmeU+i2dpvInmvfahFNNOsNR1qeRAZLUjwSjRxrO0w6srhRJ+Z+FjrpbFwPc8Op0O\ngMHEb7fbrrsTpVd1N37+NrfKBKrVaqHT6bjWjN2BrB8rwtDtdkeDZQkKfqFRo7GbSo0SGFxG/+vW\ns0bbyhjvXprW/nsKxRMibiO3lX/rRuJMrzQm4knMvOsmiJ510986XlyG0+HT7KG8EJal3+9P7BY/\njTaO3LdUsK6WatYg0hNur0yJNi71pXkSVNlFZtq2zmIhPHhxUr8//sLaRcBCbbIH5LVTNMG9uCYl\nLLMIZY5/tVIeH9P67qlPSVtmzTaWxiqlLhowrtTsITV+XfmiYCEsCxBnkhRezKLnc4Ji9XjX1GVZ\njK4nLNNaFs/nVreqlO60YBc1VaYKP7zY1LCIlmWu75RUX9f+r6ysYGtrC61WK+mD5mh6g2WTy2IL\nfoLOaHDa2Nuz19uszerW5TP6ThN2l1RINS6LjvNyFONB+eVjWrfCEwA7xstNtC6vvPcEZcrKaPu4\nXVa3p8SqCLy2O7JaWXe3uMY9gHauN/mmRRS3GF17z4q3a3/EAwsErzbw6lXtmrN0uc2+PasSoTTW\nKaHHk30v4fFbdfyjdnvjOA3mvtwFiN2tqqndKNPiTVQ+FtWhb63STvc0rGVw9B6M1q/85gZQ+ciV\nrSJUyifzVEVIZwG7YyVtLKHH/c5CPy3tuT/85blj0yLlttgxbxJbOeVPO1bdmsgd0UGqwj8QB+FR\n/0SadJq6NVmQ0shVxysneNOOf0lsx4L/Q2dZgLSW1IlY0thU2jSa3LyTijdYvISEY4XUbonebjH9\n/uS2S5FV0+NR7GXf3hIX71mdyDX06uLy1u4oBsrR8/j2Ngb0+iP1X9vkKV7uAyujq91LsRDZMNVi\nvB2oxhpV6ebuO6QCbbUqVV67oOdL3Syl7wmKt5ewR69kMjBfOrHsmPJeh4vE3975KnFSyTjXgYUS\nFmDQUbZYL1pqX4rU7vHsSkWxDH+MTlW30Yt1PCjdWSZUFY3JdepqhMgyVUWJMlLeq/btrAJcgrnG\nLJ4rYQPG6dxIy0UdyoMbLSmxbBZ3eEqA1E1IaTOmZf9tkz1eyuHx6/UPt5XrtWssxRs99Qn4b4FW\n8HIZrZtT7I3G5Ls/dVz0em+zPx0/VmzeJvCehV1ZWRl9Uoql3+9PpME95ZTC3NeGaYd47g6DG+lp\nKy2rbpYdt8HrdrtYW1sDMG6Jqmi4VPui7Y6m1YTT8DNtG/R6TwiquEul9ZRi1oRDVcw1dcyT3hra\n6/VGGiD1tGTOnPNxzw3QhYqqsZlGVbeGP+qGlVjGVFuqCvGsgs9W0LN0HJxPg1kELicss/DlYa4x\nC7sW9j9lWdS18b4NOmGZJtdha5E408MPf1UNHJWP0tXLVenWqdFz4AntCUyd9Swy5uqGmYtk2a9+\nf3K5vGplT0vrtVoPMPmsi/rEzEPKn42CexZCdv3skQPPoqTcG417OJ7gvrPjZoF1U2/mi8t7iod5\n0d1n2IJwX2usycf4WrZO/JvrNk/C+PIeuWCo16GuNreVFaSNdVXhXLhVx7ydUJ2wDgPGhcueB7HO\n8+r2BkpdNR1U1ca5AJuv89w3g7fYMPWiInXDNKmRepNyynpFlob7chowjSgOzV2vvOuYTGvhFyJ1\nzLBJzJO7Lrr2zTcJOSWcym5FVi5KPRtUI0fl9LgOtuePA2VL7j2hZcEpcem8+nliqiWcFanxYOvk\nQTNvKsDTCsvc7+AbtAH8Do0qjfO0UuS68epT1egeTxGv3vlc1khdMbUsei0LNzD5Ni9tQyRcWleq\nTSlro4KSa3dqsrIge+33oPQid1nnw7TP3wNzXqIPTA4Y+7Pekhf7jia0p/2j+j2TP6v2UXiuiVoq\nry5ejMkWzCvLy3GieKukPSUTVcdLeYriML3WO899FFkpz1p49afmwLTWb66WJfL1bdC9+EF99hSt\nlLBop8/SBo+u557pmrGIpxwiv74kVvAmm35KaU2LFF21WrPWoe2xcZhmzOe+FZJObDbZnmXJuVgl\ncYFeywsk67AozCfT5PS0ffOE8FLmWlZjoH5/kHHTB7Wsvsj9i+hfLux1fZHLaxm3acZ57jtS8n/e\nwrXVaqHX640mMm/qzcsx7Dr131kQdIVwv98f0eP3wwPj7x706PEx77kV/s3PZ1g93F5d3sEfdaWU\nNvvg7XYbKysro3tGds4ecFNt7T1oFi2V8eIgjVe8OMY+3tIZT0hSK4GZFis1thTcFh4fzvblEkc5\nL2PhUsfcWbZjpKdRgd1gLXrYKleP7nCfKmv1eBPXkPLfS9tdYt1YEO0/C52lkpUXb1m8JwB7gZQl\n4XHV/mVwe3P7YDNNTzg8xeAdZyxUgG8w68IWQK+z3zpBT58+XcwDT7Acr/1+Hy+++OJE53tCrHVE\naU5r44svvljJV49iPbZkdu7kyZNjfHqTNrIMOTz11FPZMkovosvK6Jvf/GZWuEoVkI6X91ZiFqqF\nFhYF3wPhTuHGeK6DTZbTp0+H2sTjwXtfR8TT97///YlzkWWpMvFeeOGFSuUbjQZardbILWFLq4J5\n6tSpseuiGMg7n8O3v/3tbBnlOwUT9BMnToT9UGr5ojlw4sSJ0Xn92PEIC7HqmBm04ItflWcaR58+\nVGhs4q04toyU3pA0lGhAXc5h5dmXtnN2h31nZwedTme0VN9rP1/Py0f4vz61yW3kPksJnfaJunXc\nLr3G65sou8Rt5Pbw/S1rhxfbcL9qe0otCy/14XGJxj2FuW+F5Em3xSocsKYaU0U7pGg0Gn66OnVN\nihcWIM64Rdd62atU3aXtTNVZBVViwhw/OQHQGLG0X1J1Ml3Phc/16eXLFY7jKQDvnlPdSyyRw1cA\nfGjeTCyxxBJLLLHEEkssMTd8BMBJAM8B+I0Zab0E4DsAvgXAEuhXAngcwGkAXwRwuJDWvwFwBsDT\ndCxF6zMYtOEkgHunoP1/AXh5yPu3APzsFLRvAvCXAJ4B8F0A/7RGviPas/K9D8A3MIhbnwXwL2vk\nOaI9K89zwQqA5wHcAqCNQaPeMQO972PQyYzfAfC/D3//BoDfKqT1kwDei/EJHdF6Jwa8tzFoy/NI\nZxY92v8ngP/VKVuF9rUA3jP8fRDAKQz6sw6+I9p18L1/+N0C8F8A/ERNPEe06+D5sqeOj2PA0EsA\nOgD+HYCPz0hTM3ofA/Dg8PeDAD5RSOdrAM4X0vo4gIcwaMNLGLTpeEXagJ+NrEL7VQwGGwAuAvge\ngBtq4juiXQffG8PvVQwU6PmaeI5o18HzZReWGwD8Df1/GbsDMA36AL4E4JsA/uHw2FEMXB4Mv4/O\nQD+idT0GvBumbcc/AfBtAH+AXbdjWtq3YGC9vrEHfBvt/1IT300MBPEMdl29unj2aNfB82UXlnqe\nqNrF38VgEH8WwD/CwN3R+uqqM0eraj2fBXArBq7OKwD+nxloHwTwHwD8OoA3nWtn4fsggH8/pH0R\n9fC9M7z+RgA/BeDv1ciz0v5QTTxfdmH5AQaBo+EmjEt2Vbwy/H4NwJ9jYELPYOBvA8B1AM7OQD+i\npe24cXisCs5id1L8a+ya/6q02xgIyh8DeKRmvo32/0e06+IbAC4A+ByA99XIs9J+f808Xza0ALyA\ngVlfxWwB/n4AVwx/HwDwdQyyGb+D3Szbp1Ee4GPIlwb4Hi0LDFcx0FgvIL8aQmlfR7//GYA/mYJ2\nA8AfAfhXcrwOviPas/J9BLtu0DqArwL4cE08R7SvpTLT9vVc8LMYZFaexyBtNy1uxaChT2GQ2jRa\nV2IQx1RNHT8E4L8B2MYgrvqfM7T+xbANJwH8DxVp/y8YTMTvYOBHP4Lx2KqU9k9g4HY8hd206Edq\n4tuj/bM18H03gP86pPsdAP98eLwOniPadfT1EkssscQSSyyxxBJLLLHEEkssscQSSyyxxBJLLLHE\nEkssscTe4v8Hnhr+kgtI/NkAAAAASUVORK5CYII=\n", 13 | "text/plain": [ 14 | "" 15 | ] 16 | }, 17 | "metadata": {}, 18 | "output_type": "display_data" 19 | } 20 | ], 21 | "source": [ 22 | "import matplotlib.pyplot as plt\n", 23 | "from PIL import Image\n", 24 | "\n", 25 | "%matplotlib inline\n", 26 | "\n", 27 | "imgPath = \"characters/\"\n", 28 | "fileName = \"3\"\n", 29 | "ext = \".jpg\"\n", 30 | "\n", 31 | "#Use PLA to convert image to greyscale. \n", 32 | "im = Image.open(imgPath+fileName+ext).convert('LA')\n", 33 | "bg = Image.new(\"RGB\", im.size, (255,255,255))\n", 34 | "bg.paste(im)\n", 35 | "bg.save(imgPath+fileName+\".png\")\n", 36 | "\n", 37 | "img = plt.imread(imgPath+fileName+\".png\")\n", 38 | "imgplot = plt.imshow(img)" 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "execution_count": null, 44 | "metadata": { 45 | "collapsed": true 46 | }, 47 | "outputs": [], 48 | "source": [] 49 | } 50 | ], 51 | "metadata": { 52 | "kernelspec": { 53 | "display_name": "Python 2", 54 | "language": "python", 55 | "name": "python2" 56 | }, 57 | "language_info": { 58 | "codemirror_mode": { 59 | "name": "ipython", 60 | "version": 2 61 | }, 62 | "file_extension": ".py", 63 | "mimetype": "text/x-python", 64 | "name": "python", 65 | "nbconvert_exporter": "python", 66 | "pygments_lexer": "ipython2", 67 | "version": "2.7.10" 68 | } 69 | }, 70 | "nbformat": 4, 71 | "nbformat_minor": 0 72 | } 73 | -------------------------------------------------------------------------------- /TensorFlow-Workshop-March-2017/old_stuff/aws-gpu-setup.md: -------------------------------------------------------------------------------- 1 | Get setup and configured with an AWS GPU backed instance. 2 | These instructions show how to train a 3 | [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) classifier and are 4 | based on other people's writeups - they can be found 5 | [here](http://markus.com/install-theano-on-aws/) and 6 | [here](http://ramhiser.com/2016/01/05/installing-tensorflow-on-an-aws-ec2-instance-with-gpu-support/). 7 | 8 | ### 1. AWS SETUP 9 | 10 | Setup an AWS ubuntu instance (with gpu). Log in. 11 | 12 | ```bash 13 | # username for ubuntu instance is 'ubuntu' 14 | ssh -i [path/to/key.pem] ubuntu@[DNS] 15 | ``` 16 | 17 | ### 2. THEANO 18 | 19 | Install first batch of system deps. 20 | 21 | ```bash 22 | sudo apt-get install -y \ 23 | gcc \ 24 | g++ \ 25 | gfortran 26 | build-essential \ 27 | git \ 28 | wget \ 29 | linux-generic \ 30 | libopenblas-dev \ 31 | python-dev \ 32 | python-pip \ 33 | python-nose \ 34 | python-numpy \ 35 | python-scipy 36 | ``` 37 | 38 | install 'bleeding edge theano'. 39 | 40 | ```bash 41 | sudo pip install --upgrade --no-deps git+git://github.com/Theano/Theano.git 42 | ``` 43 | 44 | ### 3. CUDA 45 | 46 | get the cuda debian package deb file. 47 | 48 | ```bash 49 | sudo wget http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1404/x86_64/cuda-repo-ubuntu1404_7.0-28_amd64.deb 50 | ``` 51 | 52 | register it via dpkg. 53 | 54 | ```bash 55 | sudo dpkg -i cuda-repo-ubuntu1404_7.0-28_amd64.deb 56 | ``` 57 | 58 | update the system so the cuda package is visible to apt-get. 59 | 60 | ```bash 61 | sudo apt-get update 62 | ``` 63 | 64 | install cuda. 65 | 66 | ```bash 67 | sudo apt-get install -y cuda 68 | ``` 69 | 70 | export cuda bin to path. export include path for header files. 71 | ```base 72 | echo -e "\nexport PATH=/usr/local/cuda/bin:$PATH\n\nexport LD_LIBRARY_PATH=/usr/local/cuda/lib64" >> .bashrc 73 | ``` 74 | 75 | reboot. 76 | 77 | ```bash 78 | sudo reboot 79 | ``` 80 | 81 | the cuda bin directory that is now on the path contains this script. 82 | run it in home (~). 83 | 84 | ```bash 85 | cuda-install-samples-7.5.sh ~/ 86 | ``` 87 | 88 | it will create a 'Samples' directory. cd to the following. 89 | 90 | ```bash 91 | cd NVIDIA_CUDA-7.5_Samples/1_Utilities/deviceQuery 92 | ``` 93 | 94 | make and run. 95 | 96 | ```bash 97 | make 98 | ./deviceQuery 99 | ``` 100 | 101 | create a .theanorc set with gpu flags. 102 | 103 | ```bash 104 | echo -e "\n[global]\nfloatX=float32\ndevice=gpu\n[mode]=FAST_RUN\n\n[nvcc]\nfastmath=True\n\n[cuda]\nroot=/usr/local/cuda" >> ~/.theanorc 105 | ``` 106 | 107 | ### 4. KERAS 108 | 109 | this setup uses keras. 110 | keras uses HDF5 to save files. 111 | download the system deps. 112 | 113 | ```bash 114 | # http://stackoverflow.com/questions/24744969/installing-h5py-on-an-ubuntu-server 115 | sudo apt-get install libhdf5-dev 116 | ``` 117 | 118 | install keras and h5py. h5py is a python client for HDF5. 119 | 120 | ```bash 121 | sudo pip install keras 122 | sudo pip install h5py 123 | ``` 124 | 125 | ### 5. CUDNN 126 | 127 | cudnn reduces the train time of conv nets. 128 | sign up for [nvidia dev program](https://developer.nvidia.com/rdp/cudnn-download). 129 | unzip the cudnn download. 130 | 131 | ```bash 132 | tar -zxf cudnn-7.0.tgz && rm cudnn-7.0.tgz 133 | ``` 134 | 135 | the unzipped directory should have a lib and an include. 136 | copy those in to respective directories of the cuda installation. 137 | 138 | ```bash 139 | sudo cp -R cudnn-7.0/lib64/* /usr/local/cuda/lib64/ 140 | sudo cp cudnn-7.0/include/cudnn.h /usr/local/cuda/include/ 141 | ``` 142 | 143 | reboot. 144 | 145 | ```bash 146 | sudo reboot 147 | ``` 148 | 149 | ### 6. OTHER 150 | 151 | #### how to keep procs running after ending ssh session 152 | 153 | [screen - ask ubuntu](http://askubuntu.com/questions/8653/how-to-keep-processes-running-after-ending-ssh-session) 154 | 155 | [screen - digital ocean](https://www.digitalocean.com/community/tutorials/how-to-install-and-use-screen-on-an-ubuntu-cloud-server) 156 | 157 | ### 7. INTEGRATION 158 | 159 | run the the py files in this repo to sanity check various aspects of the previous 160 | six steps. 161 | 162 | 163 | ### 8. TRAIN 164 | 165 | ```python 166 | from __future__ import print_function 167 | from keras.datasets import cifar10 168 | from keras.preprocessing.image import ImageDataGenerator 169 | from keras.models import Sequential 170 | from keras.layers import Dense, Dropout, Activation, Flatten 171 | from keras.layers import Convolution2D, MaxPooling2D 172 | from keras.optimizers import SGD 173 | from keras.utils import np_utils 174 | 175 | batch_size = 32 176 | nb_classes = 10 177 | nb_epoch = 200 178 | 179 | # input image dimensions 180 | img_rows, img_cols = 32, 32 181 | # the CIFAR10 images are RGB 182 | img_channels = 3 183 | 184 | # the data, shuffled and split between train and test sets 185 | (X_train, y_train), (X_test, y_test) = cifar10.load_data() 186 | print('X_train shape:', X_train.shape) 187 | print(X_train.shape[0], 'train samples') 188 | print(X_test.shape[0], 'test samples') 189 | 190 | # convert class vectors to binary class matrices 191 | Y_train = np_utils.to_categorical(y_train, nb_classes) 192 | Y_test = np_utils.to_categorical(y_test, nb_classes) 193 | 194 | model = Sequential() 195 | 196 | model.add(Convolution2D(32, 3, 3, border_mode='same', 197 | input_shape=(img_channels, img_rows, img_cols))) 198 | model.add(Activation('relu')) 199 | model.add(Convolution2D(32, 3, 3)) 200 | model.add(Activation('relu')) 201 | model.add(MaxPooling2D(pool_size=(2, 2))) 202 | model.add(Dropout(0.25)) 203 | 204 | model.add(Convolution2D(64, 3, 3, border_mode='same')) 205 | model.add(Activation('relu')) 206 | model.add(Convolution2D(64, 3, 3)) 207 | model.add(Activation('relu')) 208 | model.add(MaxPooling2D(pool_size=(2, 2))) 209 | model.add(Dropout(0.25)) 210 | 211 | model.add(Flatten()) 212 | model.add(Dense(512)) 213 | model.add(Activation('relu')) 214 | model.add(Dropout(0.5)) 215 | model.add(Dense(nb_classes)) 216 | model.add(Activation('softmax')) 217 | 218 | # let's train the model using SGD + momentum (how original). 219 | sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) 220 | model.compile(loss='categorical_crossentropy', 221 | optimizer=sgd, 222 | metrics=['accuracy']) 223 | 224 | X_train = X_train.astype('float32') 225 | X_test = X_test.astype('float32') 226 | X_train /= 255 227 | X_test /= 255 228 | 229 | print('Start') 230 | model.fit(X_train, Y_train, 231 | batch_size=batch_size, 232 | nb_epoch=nb_epoch, 233 | validation_data=(X_test, Y_test), 234 | shuffle=True) 235 | 236 | print('Saving') 237 | json_string = model.to_json() 238 | open('model_arch.json', 'w').write(json_string) 239 | model.save_weights('model_weights.h5') 240 | 241 | ``` 242 | -------------------------------------------------------------------------------- /TensorFlow-Workshop-March-2017/old_stuff/basics.py: -------------------------------------------------------------------------------- 1 | # To be interactively typed in order to explore TF basics. 2 | # Following Oriley's: Hello TensorFlow 3 | 4 | ## Basic Exploration ## 5 | import tensorflow as tf 6 | 7 | graph = tf.get_default_graph() 8 | 9 | graph.get_operations() 10 | 11 | input_value = tf.constant(1.0) 12 | 13 | operations = graph.get_operations() 14 | 15 | operations 16 | 17 | operations[0].node_def 18 | 19 | sess = tf.Session() 20 | 21 | # TensorFlow manages it's own state of things and maintains a method of evaluating and executing code. 22 | sess.run(input_value) 23 | 24 | ## The simplest TensorFlow neuron ## 25 | 26 | weight = tf.Variable(0.8) 27 | 28 | # Display the operations added to the graph as a result. 29 | for op in graph.get_operations(): print(op.name) 30 | 31 | output_value = weight * input_value 32 | 33 | op = graph.get_operations()[-1] 34 | op.name 35 | 36 | for op_input in op.inputs: print(op_input) 37 | 38 | # Generates an operation which initializes all our variables ( in this case just weight ). 39 | #if you add more variables you'll want to use tf.initialize_all_variables() again; a stale init wouldn't include the new variables. 40 | 41 | init = tf.initialize_all_variables() 42 | sess.run(init) 43 | 44 | sess.run(output_value) 45 | 46 | x = tf.constant(1.0, name='input') 47 | w = tf.Variable(0.8, name='weight') 48 | y = tf.mul(w, x, name='output') 49 | 50 | summary_writer = tf.train.SummaryWriter('log_simple_graph', sess.graph_def) 51 | 52 | # Command line: tensorboard --logdir=log_simple_graph 53 | #localhost:6006/#graphs 54 | 55 | ## Training a sinngle Neuron ## 56 | 57 | y_ = tf.constant(0.0) 58 | 59 | # Defining the loss function as the squared diff between current output and desired. 60 | 61 | loss = (y - y_)**2 62 | 63 | optim = tf.train.GradientDescentOptimizer(learning_rate=0.025) 64 | grads_and_vars = optim.compute_gradients(loss) 65 | sess.run(tf.initialize_all_variables()) 66 | sess.run(grads_and_vars[1][0]) 67 | 68 | sess.run(optim.apply_gradients(grads_and_vars)) 69 | 70 | sess.run(w) 71 | 72 | train_step = tf.train.GradientDescentOptimizer(0.025).minimize(loss) 73 | for i in range(100): 74 | print('before step {}, y is {}'.format(i, sess.run(y))) 75 | summary_str = sess.run(summary_y) 76 | summary_writer.add_summary(summary_str, i) 77 | sess.run(train_step) 78 | 79 | 80 | 81 | sess.run(y) 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | -------------------------------------------------------------------------------- /TensorFlow-Workshop-March-2017/old_stuff/characters/3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/TensorFlow-Workshop-March-2017/old_stuff/characters/3.jpg -------------------------------------------------------------------------------- /TensorFlow-Workshop-March-2017/old_stuff/characters/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/TensorFlow-Workshop-March-2017/old_stuff/characters/3.png -------------------------------------------------------------------------------- /TensorFlow-Workshop-March-2017/old_stuff/characters/34.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Andrewnetwork/WorkshopScipy/739d24b9078fffb84408e7877862618d88d947dc/TensorFlow-Workshop-March-2017/old_stuff/characters/34.jpg -------------------------------------------------------------------------------- /TensorFlow-Workshop-March-2017/old_stuff/finalBasics.py: -------------------------------------------------------------------------------- 1 | # Source: https://www.oreilly.com/learning/hello-tensorflow 2 | # Annotated by: Andrew Ribeiro 3 | 4 | import tensorflow as tf 5 | 6 | x = tf.constant(1.0, name='input') 7 | w = tf.Variable(0.8, name='weight') 8 | y = tf.mul(w, x, name='output') 9 | y_ = tf.constant(0.0, name='correct_value') 10 | 11 | loss = tf.pow(y - y_, 2, name='loss') 12 | train_step = tf.train.GradientDescentOptimizer(0.025).minimize(loss) 13 | 14 | for value in [x, w, y, y_, loss]: 15 | tf.scalar_summary(value.op.name, value) 16 | 17 | summaries = tf.merge_all_summaries() 18 | 19 | sess = tf.Session() 20 | summary_writer = tf.train.SummaryWriter('log_simple_stats', sess.graph) 21 | 22 | sess.run(tf.initialize_all_variables()) 23 | 24 | for i in range(100): 25 | summary_writer.add_summary(sess.run(summaries), i) 26 | sess.run(train_step) 27 | -------------------------------------------------------------------------------- /TensorFlow-Workshop-March-2017/old_stuff/input_data.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | 16 | """Functions for downloading and reading MNIST data.""" 17 | from __future__ import absolute_import 18 | from __future__ import division 19 | from __future__ import print_function 20 | 21 | import gzip 22 | import os 23 | import tempfile 24 | 25 | import numpy 26 | from six.moves import urllib 27 | from six.moves import xrange # pylint: disable=redefined-builtin 28 | import tensorflow as tf 29 | from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets 30 | -------------------------------------------------------------------------------- /TensorFlow-Workshop-March-2017/old_stuff/mnist_with_summaries.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the 'License'); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an 'AS IS' BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # ============================================================================== 15 | """A simple MNIST classifier which displays summaries in TensorBoard. 16 | 17 | This is an unimpressive MNIST model, but it is a good example of using 18 | tf.name_scope to make a graph legible in the TensorBoard graph explorer, and of 19 | naming summary tags so that they are grouped meaningfully in TensorBoard. 20 | 21 | It demonstrates the functionality of every TensorBoard dashboard. 22 | """ 23 | from __future__ import absolute_import 24 | from __future__ import division 25 | from __future__ import print_function 26 | 27 | import tensorflow as tf 28 | 29 | from tensorflow.examples.tutorials.mnist import input_data 30 | 31 | flags = tf.app.flags 32 | FLAGS = flags.FLAGS 33 | flags.DEFINE_boolean('fake_data', False, 'If true, uses fake data ' 34 | 'for unit testing.') 35 | flags.DEFINE_integer('max_steps', 1000, 'Number of steps to run trainer.') 36 | flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.') 37 | flags.DEFINE_float('dropout', 0.9, 'Keep probability for training dropout.') 38 | flags.DEFINE_string('data_dir', '/tmp/data', 'Directory for storing data') 39 | flags.DEFINE_string('summaries_dir', '/tmp/mnist_logs', 'Summaries directory') 40 | 41 | 42 | def train(): 43 | # Import data 44 | mnist = input_data.read_data_sets(FLAGS.data_dir, 45 | one_hot=True, 46 | fake_data=FLAGS.fake_data) 47 | 48 | sess = tf.InteractiveSession() 49 | 50 | # Create a multilayer model. 51 | 52 | # Input placeholders 53 | with tf.name_scope('input'): 54 | x = tf.placeholder(tf.float32, [None, 784], name='x-input') 55 | y_ = tf.placeholder(tf.float32, [None, 10], name='y-input') 56 | 57 | with tf.name_scope('input_reshape'): 58 | image_shaped_input = tf.reshape(x, [-1, 28, 28, 1]) 59 | tf.image_summary('input', image_shaped_input, 10) 60 | 61 | # We can't initialize these variables to 0 - the network will get stuck. 62 | def weight_variable(shape): 63 | """Create a weight variable with appropriate initialization.""" 64 | initial = tf.truncated_normal(shape, stddev=0.1) 65 | return tf.Variable(initial) 66 | 67 | def bias_variable(shape): 68 | """Create a bias variable with appropriate initialization.""" 69 | initial = tf.constant(0.1, shape=shape) 70 | return tf.Variable(initial) 71 | 72 | def variable_summaries(var, name): 73 | """Attach a lot of summaries to a Tensor.""" 74 | with tf.name_scope('summaries'): 75 | mean = tf.reduce_mean(var) 76 | tf.scalar_summary('mean/' + name, mean) 77 | with tf.name_scope('stddev'): 78 | stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean))) 79 | tf.scalar_summary('sttdev/' + name, stddev) 80 | tf.scalar_summary('max/' + name, tf.reduce_max(var)) 81 | tf.scalar_summary('min/' + name, tf.reduce_min(var)) 82 | tf.histogram_summary(name, var) 83 | 84 | def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu): 85 | """Reusable code for making a simple neural net layer. 86 | 87 | It does a matrix multiply, bias add, and then uses relu to nonlinearize. 88 | It also sets up name scoping so that the resultant graph is easy to read, 89 | and adds a number of summary ops. 90 | """ 91 | # Adding a name scope ensures logical grouping of the layers in the graph. 92 | with tf.name_scope(layer_name): 93 | # This Variable will hold the state of the weights for the layer 94 | with tf.name_scope('weights'): 95 | weights = weight_variable([input_dim, output_dim]) 96 | variable_summaries(weights, layer_name + '/weights') 97 | with tf.name_scope('biases'): 98 | biases = bias_variable([output_dim]) 99 | variable_summaries(biases, layer_name + '/biases') 100 | with tf.name_scope('Wx_plus_b'): 101 | preactivate = tf.matmul(input_tensor, weights) + biases 102 | tf.histogram_summary(layer_name + '/pre_activations', preactivate) 103 | activations = act(preactivate, 'activation') 104 | tf.histogram_summary(layer_name + '/activations', activations) 105 | return activations 106 | 107 | hidden1 = nn_layer(x, 784, 500, 'layer1') 108 | 109 | with tf.name_scope('dropout'): 110 | keep_prob = tf.placeholder(tf.float32) 111 | tf.scalar_summary('dropout_keep_probability', keep_prob) 112 | dropped = tf.nn.dropout(hidden1, keep_prob) 113 | 114 | y = nn_layer(dropped, 500, 10, 'layer2', act=tf.nn.softmax) 115 | 116 | with tf.name_scope('cross_entropy'): 117 | diff = y_ * tf.log(y) 118 | with tf.name_scope('total'): 119 | cross_entropy = -tf.reduce_mean(diff) 120 | tf.scalar_summary('cross entropy', cross_entropy) 121 | 122 | with tf.name_scope('train'): 123 | train_step = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize( 124 | cross_entropy) 125 | 126 | with tf.name_scope('accuracy'): 127 | with tf.name_scope('correct_prediction'): 128 | correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) 129 | with tf.name_scope('accuracy'): 130 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) 131 | tf.scalar_summary('accuracy', accuracy) 132 | 133 | # Merge all the summaries and write them out to /tmp/mnist_logs (by default) 134 | merged = tf.merge_all_summaries() 135 | train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train', 136 | sess.graph) 137 | test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/test') 138 | tf.initialize_all_variables().run() 139 | 140 | # Train the model, and also write summaries. 141 | # Every 10th step, measure test-set accuracy, and write test summaries 142 | # All other steps, run train_step on training data, & add training summaries 143 | 144 | def feed_dict(train): 145 | """Make a TensorFlow feed_dict: maps data onto Tensor placeholders.""" 146 | if train or FLAGS.fake_data: 147 | xs, ys = mnist.train.next_batch(100, fake_data=FLAGS.fake_data) 148 | k = FLAGS.dropout 149 | else: 150 | xs, ys = mnist.test.images, mnist.test.labels 151 | k = 1.0 152 | return {x: xs, y_: ys, keep_prob: k} 153 | 154 | for i in range(FLAGS.max_steps): 155 | if i % 10 == 0: # Record summaries and test-set accuracy 156 | summary, acc = sess.run([merged, accuracy], feed_dict=feed_dict(False)) 157 | test_writer.add_summary(summary, i) 158 | print('Accuracy at step %s: %s' % (i, acc)) 159 | else: # Record train set summaries, and train 160 | if i % 100 == 99: # Record execution stats 161 | run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) 162 | run_metadata = tf.RunMetadata() 163 | summary, _ = sess.run([merged, train_step], 164 | feed_dict=feed_dict(True), 165 | options=run_options, 166 | run_metadata=run_metadata) 167 | train_writer.add_run_metadata(run_metadata, 'step%03d' % i) 168 | train_writer.add_summary(summary, i) 169 | print('Adding run metadata for', i) 170 | else: # Record a summary 171 | summary, _ = sess.run([merged, train_step], feed_dict=feed_dict(True)) 172 | train_writer.add_summary(summary, i) 173 | train_writer.close() 174 | test_writer.close() 175 | 176 | 177 | def main(_): 178 | if tf.gfile.Exists(FLAGS.summaries_dir): 179 | tf.gfile.DeleteRecursively(FLAGS.summaries_dir) 180 | tf.gfile.MakeDirs(FLAGS.summaries_dir) 181 | train() 182 | 183 | 184 | if __name__ == '__main__': 185 | tf.app.run() 186 | -------------------------------------------------------------------------------- /TensorFlow-Workshop-March-2017/old_stuff/readme.md: -------------------------------------------------------------------------------- 1 | readme.md 2 | Andrew Ribeiro 3 | July 2016 4 | 5 | Licensed under the Apache License, Version 2.0 (the "License"); 6 | you may not use this file except in compliance with the License. 7 | You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | ============================================================================== 17 | 18 | In order to install the virtualenv dependancies used in the creation of this project, please do the following: 19 | 20 | ##### Make a new virtual environment named whatever you'd like. Place holder: 21 | virtualenv 22 | 23 | ##### Activate the newly created virtual environment. 24 | source /bin/activate 25 | 26 | ##### Now that your new virtual environment is created 27 | ()$ pip install -r path/to/requirements.txti 28 | -------------------------------------------------------------------------------- /TensorFlow-Workshop-March-2017/old_stuff/requirements.txt: -------------------------------------------------------------------------------- 1 | altgraph==0.10.2 2 | appnope==0.1.0 3 | backports-abc==0.4 4 | backports.shutil-get-terminal-size==1.0.0 5 | bdist-mpkg==0.5.0 6 | bonjour-py==0.3 7 | certifi==2016.2.28 8 | configparser==3.5.0 9 | decorator==4.0.10 10 | entrypoints==0.2.2 11 | functools32==3.2.3.post2 12 | ipykernel==4.3.1 13 | ipython==5.0.0 14 | ipython-genutils==0.1.0 15 | Jinja2==2.8 16 | jsonschema==2.5.1 17 | jupyter-client==4.3.0 18 | jupyter-core==4.1.0 19 | macholib==1.5.1 20 | MarkupSafe==0.23 21 | matplotlib==1.3.1 22 | mercurial==3.2.4.post20150107 23 | mistune==0.7.3 24 | modulegraph==0.10.4 25 | nbconvert==4.2.0 26 | nbformat==4.0.1 27 | notebook==4.2.1 28 | numpy==1.11.1 29 | pathlib2==2.1.0 30 | pexpect==4.2.0 31 | pickleshare==0.7.3 32 | prompt-toolkit==1.0.3 33 | protobuf==3.0.0b2 34 | ptyprocess==0.5.1 35 | py2app==0.7.3 36 | Pygments==2.1.3 37 | pyobjc-core==2.5.1 38 | pyobjc-framework-Accounts==2.5.1 39 | pyobjc-framework-AddressBook==2.5.1 40 | pyobjc-framework-AppleScriptKit==2.5.1 41 | pyobjc-framework-AppleScriptObjC==2.5.1 42 | pyobjc-framework-Automator==2.5.1 43 | pyobjc-framework-CFNetwork==2.5.1 44 | pyobjc-framework-Cocoa==2.5.1 45 | pyobjc-framework-Collaboration==2.5.1 46 | pyobjc-framework-CoreData==2.5.1 47 | pyobjc-framework-CoreLocation==2.5.1 48 | pyobjc-framework-CoreText==2.5.1 49 | pyobjc-framework-DictionaryServices==2.5.1 50 | pyobjc-framework-EventKit==2.5.1 51 | pyobjc-framework-ExceptionHandling==2.5.1 52 | pyobjc-framework-FSEvents==2.5.1 53 | pyobjc-framework-InputMethodKit==2.5.1 54 | pyobjc-framework-InstallerPlugins==2.5.1 55 | pyobjc-framework-InstantMessage==2.5.1 56 | pyobjc-framework-LatentSemanticMapping==2.5.1 57 | pyobjc-framework-LaunchServices==2.5.1 58 | pyobjc-framework-Message==2.5.1 59 | pyobjc-framework-OpenDirectory==2.5.1 60 | pyobjc-framework-PreferencePanes==2.5.1 61 | pyobjc-framework-PubSub==2.5.1 62 | pyobjc-framework-QTKit==2.5.1 63 | pyobjc-framework-Quartz==2.5.1 64 | pyobjc-framework-ScreenSaver==2.5.1 65 | pyobjc-framework-ScriptingBridge==2.5.1 66 | pyobjc-framework-SearchKit==2.5.1 67 | pyobjc-framework-ServiceManagement==2.5.1 68 | pyobjc-framework-Social==2.5.1 69 | pyobjc-framework-SyncServices==2.5.1 70 | pyobjc-framework-SystemConfiguration==2.5.1 71 | pyobjc-framework-WebKit==2.5.1 72 | pyOpenSSL==0.13.1 73 | pyparsing==2.0.1 74 | python-dateutil==1.5 75 | pytz==2013.7 76 | pyzmq==15.3.0 77 | scipy==0.13.0b1 78 | simplegeneric==0.8.1 79 | singledispatch==3.4.0.3 80 | six==1.10.0 81 | tensorflow==0.8.0 82 | terminado==0.6 83 | tornado==4.4.1 84 | traitlets==4.2.2 85 | virtualenv==15.0.2 86 | wcwidth==0.1.7 87 | xattr==0.6.4 88 | zope.interface==4.1.1 89 | -------------------------------------------------------------------------------- /setup.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Scientific Computing in Python Workshop - Setup" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "Welcome to the workshop! We will begin by installing the required packages. Jupyter allows us to access the console with the ! symbol at the beginning of a cell. Take some time to look around the interface and aquaint yourself with Jupyter. \n", 15 | "\n", 16 | "**Note:** This installs requirements in the root conda environment. If you already have python installed before installing Anaconda Navigator, there may be some issues." 17 | ] 18 | }, 19 | { 20 | "cell_type": "markdown", 21 | "metadata": {}, 22 | "source": [ 23 | "## Step 1: Install Requirements\n", 24 | "This step assumes you have launched the Jupyter Notebook sever from your Anaconda Navigator dashboard and have installed the Python 3.6 version. Click on the cell below and press ```shift+enter```. It will take some time. " 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": null, 30 | "metadata": { 31 | "collapsed": false 32 | }, 33 | "outputs": [], 34 | "source": [ 35 | "!python -m pip install --user numpy scipy matplotlib ipython jupyter pandas sympy nose scikit-learn scikit-image tensorflow" 36 | ] 37 | }, 38 | { 39 | "cell_type": "markdown", 40 | "metadata": {}, 41 | "source": [ 42 | "## Step 2: Verify the requirments. \n", 43 | "Make sure everything is installed by pressing ```shift+enter``` on the cell below. It should quickly tell you that all requirements are satisfied. " 44 | ] 45 | }, 46 | { 47 | "cell_type": "code", 48 | "execution_count": null, 49 | "metadata": { 50 | "collapsed": true 51 | }, 52 | "outputs": [], 53 | "source": [ 54 | "!python -m pip install --user numpy scipy matplotlib ipython jupyter pandas sympy nose scikit-learn scikit-image tensorflow" 55 | ] 56 | }, 57 | { 58 | "cell_type": "markdown", 59 | "metadata": {}, 60 | "source": [ 61 | "## Step 3: Enjoy!\n", 62 | "Enjoy your journey through scientific computing in python. Click on the Jupyter icon at the top of the page and navigate to the ```WorkshopScipy/Notebooks``` folder and explore the notebooks. " 63 | ] 64 | } 65 | ], 66 | "metadata": { 67 | "kernelspec": { 68 | "display_name": "Python 3", 69 | "language": "python", 70 | "name": "python3" 71 | }, 72 | "language_info": { 73 | "codemirror_mode": { 74 | "name": "ipython", 75 | "version": 3 76 | }, 77 | "file_extension": ".py", 78 | "mimetype": "text/x-python", 79 | "name": "python", 80 | "nbconvert_exporter": "python", 81 | "pygments_lexer": "ipython3", 82 | "version": "3.6.0" 83 | } 84 | }, 85 | "nbformat": 4, 86 | "nbformat_minor": 2 87 | } 88 | --------------------------------------------------------------------------------