├── 0. Deep Learning from Scratch .ipynb ├── 1. Basic Concepts.ipynb ├── 2_Backpropagation.ipynb ├── 3.0_Tensorflow_Basic_Concepts.ipynb ├── 3.1_Linear_Regression.ipynb ├── 3.2_Logistic_Regression.ipynb ├── 3.3_Perceptron.ipynb ├── 3.4_Multilayer_Perceptron.ipynb ├── 3.5_Linear_Regression_TensorBoard.ipynb ├── 3.6_Multilayer_Perceptron_with_keras.ipynb ├── 3.7_Some_exercises_with_keras.ipynb ├── 4.0_Recap_exercise.ipynb ├── 4.1_Convolutional_Network.ipynb ├── 4.2_Convolutional_Network_Smile_DataSet.ipynb ├── 5.1 Recurrent Neural Networks.ipynb ├── 5.2 Recurrent Neural Networks in tf and keras.ipynb ├── 6_Unsupervised_final.ipynb ├── 7.2 Word_Embeddings_remake_colab.ipynb ├── 7.3_Sentiment_Analysis.ipynb ├── Dockerfile ├── LICENSE.txt ├── README.md ├── Test if everything is up .ipynb ├── dataset ├── Advertising.csv ├── vocab.txt ├── wiki106.txt.zip └── wordVectors.txt.zip ├── files ├── household_power_consumption.zip ├── international-airline-passengers.csv ├── t10k-images-idx3-ubyte.gz ├── t10k-labels-idx1-ubyte.gz ├── toponims.txt ├── train-images-idx3-ubyte.gz ├── train-labels-idx1-ubyte.gz └── womennamesbarcelona.txt ├── images ├── README.md ├── TanhReal.gif ├── comp_graph1.jpg ├── exploding.png ├── g1.gif ├── g2.gif ├── gru.png ├── kar.png ├── loss_functions.png ├── lstm.png ├── minibatch.png ├── pipeline1.png ├── pipeline2.png ├── ridge2.png ├── seq2seq.png ├── split.png ├── steeper.png ├── t9.png ├── tf-gru.png ├── tf-lstm.png ├── unrolling.png └── vanilla.png ├── input_data.py ├── models ├── autoencoder_digits_part1_autoencoder.ckpt ├── autoencoder_digits_part1_autoencoder.ckpt.meta ├── autoencoder_digits_part2_sparse.ckpt ├── autoencoder_digits_part2_sparse.ckpt.meta ├── autoencoder_digits_part3_viz.ckpt ├── autoencoder_digits_part3_viz.ckpt.meta ├── digits_idx_part1_autoencoder.pkl ├── digits_idx_part2_sparse.pkl └── digits_idx_part3_viz.pkl ├── names.py ├── slides ├── DeepLearningCourse_Convolutionals.pdf ├── DeepLearningCourse_TensorFlow.pdf ├── DeepLearningIntro.pdf ├── README.md └── Unsupervised.pdf └── vocab.txt /0. Deep Learning from Scratch .ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Deep Learning from Scratch" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "> This directory contains Jupyter's notebook-based documentation for the **Deep Learning from Scractch** course.\n", 15 | "\n", 16 | "\n", 17 | "Deep learning is one of the fastest growing areas of machine learning and a hot topic in both academia and industry. This course will cover the basics of deep learning by using a hands-on approach.\n", 18 | "\n", 19 | "### Approach\n", 20 | "We will illustrate all contents with Jupyter notebooks, a web application that allows you to create and share documents that contain live code, equations, visualizations and explanatory text.\n", 21 | "\n", 22 | "### Target Audience\n", 23 | "This course is targeted for developers, data scientists and researchers that have a basic knowledge of machine learning.\n", 24 | "\n", 25 | "### Prerequisites\n", 26 | "Minimal experience on Python programming, basic knowledge of calculus, linear algebra, and probability theory. Attendees are expected to bring their own laptops for the hands-on practical work.\n", 27 | "\n", 28 | "### Who\n", 29 | "This course is organized by the DataScience@UB Group (http://datascience.barcelona/)\n", 30 | "\n", 31 | "INSTRUCTORS: Oriol Pujol, Associate Professor at UB. Santi Seguí, Lecturer at UB. Jordi Vitrià. Full Professor at UB.\n", 32 | "\n", 33 | "### Why\n", 34 | "By the end of this course, you will be able to:\n", 35 | "+ Describe how a neural network works and combine different types of layers and activation functions.\n", 36 | "+ Describe how these models can be applied in computer vision, text analytics, etc.\n", 37 | "+ Develop your own models in Tensorflow.\n", 38 | "\n", 39 | "## Topics\n", 40 | "\n", 41 | "* Basic Concepts \n", 42 | "* Tensorflow & Keras\n", 43 | "* Convolutional Neural Networks\n", 44 | "* Recurrent Neural Networks\n", 45 | "* Unsupervised Learning\n", 46 | "* Advanced Applications" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": 3, 52 | "metadata": {}, 53 | "outputs": [ 54 | { 55 | "name": "stdout", 56 | "output_type": "stream", 57 | "text": [ 58 | "keras: 2.1.3\n", 59 | "tensorflow: 1.5.0\n" 60 | ] 61 | } 62 | ], 63 | "source": [ 64 | "import keras\n", 65 | "import tensorflow\n", 66 | "print('keras:', keras.__version__)\n", 67 | "print('tensorflow: ', tensorflow.__version__)" 68 | ] 69 | }, 70 | { 71 | "cell_type": "code", 72 | "execution_count": null, 73 | "metadata": {}, 74 | "outputs": [], 75 | "source": [] 76 | } 77 | ], 78 | "metadata": { 79 | "anaconda-cloud": {}, 80 | "kernelspec": { 81 | "display_name": "Python 3", 82 | "language": "python", 83 | "name": "python3" 84 | }, 85 | "language_info": { 86 | "codemirror_mode": { 87 | "name": "ipython", 88 | "version": 3 89 | }, 90 | "file_extension": ".py", 91 | "mimetype": "text/x-python", 92 | "name": "python", 93 | "nbconvert_exporter": "python", 94 | "pygments_lexer": "ipython3", 95 | "version": "3.5.2" 96 | } 97 | }, 98 | "nbformat": 4, 99 | "nbformat_minor": 1 100 | } 101 | -------------------------------------------------------------------------------- /3.0_Tensorflow_Basic_Concepts.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "3.0_Tensorflow_Basic_Concepts.ipynb", 7 | "version": "0.3.2", 8 | "views": {}, 9 | "default_view": {}, 10 | "provenance": [] 11 | }, 12 | "kernelspec": { 13 | "display_name": "Python 2", 14 | "language": "python", 15 | "name": "python2" 16 | } 17 | }, 18 | "cells": [ 19 | { 20 | "metadata": { 21 | "id": "1T3YlQlAKasq", 22 | "colab_type": "text" 23 | }, 24 | "cell_type": "markdown", 25 | "source": [ 26 | "# Start with TensorFlow" 27 | ] 28 | }, 29 | { 30 | "metadata": { 31 | "id": "UKS4TJuQKasr", 32 | "colab_type": "text" 33 | }, 34 | "cell_type": "markdown", 35 | "source": [ 36 | "**What is TensorFlow?** TensorFlow is a deep learning library recently open-sourced by Google. It is a programming system in which you represent computations as **graphs**. Nodes in the graph are called ops (short for operations). An op takes zero or more **Tensors**, performs some computation, and produces zero or more Tensors. TensorFlow provides primitives for defining functions on tensors and automatically computing their derivatives. \n", 37 | "\n", 38 | "\n", 39 | "Formally, tensors are mulinear maps from vectors spaces to the real numbers. A Tensor can be represented as a multi-dimensional array of numbers. For example, you can represent a mini-batch of images as a 4-D array of floating point numbers with dimensions [batch, height, width, channels].\n", 40 | "\n", 41 | "Every operation of machine learning can be writen as a function on tensors\n", 42 | "\n", 43 | "**Why TensorFlow?**\n", 44 | "TensorFlow is quite similar to Numpy, the most important difference is that numpy doesn't offer methods to create tensors as a functions and automatically compute derivatives (also, numpy does not provide GPU support)\n", 45 | "\n", 46 | "**TensorFlow Mechanics:**\n", 47 | "* Prepare the Data\n", 48 | " * Inputs and Placeholders\n", 49 | "* Build the Graph\n", 50 | " * Inference\n", 51 | " * Loss\n", 52 | " * Training\n", 53 | "* Train The model\n", 54 | " * The Session\n", 55 | " * The Graph\n", 56 | " * Train loop\n", 57 | "* Evaluate the model" 58 | ] 59 | }, 60 | { 61 | "metadata": { 62 | "id": "yJ1z5E9eKass", 63 | "colab_type": "text" 64 | }, 65 | "cell_type": "markdown", 66 | "source": [ 67 | "### Simple Numpy Recap" 68 | ] 69 | }, 70 | { 71 | "metadata": { 72 | "id": "Ui1C31FNKast", 73 | "colab_type": "code", 74 | "colab": { 75 | "autoexec": { 76 | "startup": false, 77 | "wait_interval": 0 78 | }, 79 | "base_uri": "https://localhost:8080/", 80 | "height": 34 81 | }, 82 | "outputId": "14a82060-2d82-4960-b2e1-5925386f5305", 83 | "executionInfo": { 84 | "status": "ok", 85 | "timestamp": 1528877235105, 86 | "user_tz": -120, 87 | "elapsed": 1003, 88 | "user": { 89 | "displayName": "", 90 | "photoUrl": "", 91 | "userId": "" 92 | } 93 | } 94 | }, 95 | "cell_type": "code", 96 | "source": [ 97 | "import numpy as np\n", 98 | "\n", 99 | "a = np.zeros((2,2)); b = np.ones((2,2))\n", 100 | "print np.sum(b,axis=1)" 101 | ], 102 | "execution_count": 1, 103 | "outputs": [ 104 | { 105 | "output_type": "stream", 106 | "text": [ 107 | "[2. 2.]\n" 108 | ], 109 | "name": "stdout" 110 | } 111 | ] 112 | }, 113 | { 114 | "metadata": { 115 | "id": "DU7hAMOWKasy", 116 | "colab_type": "code", 117 | "colab": { 118 | "autoexec": { 119 | "startup": false, 120 | "wait_interval": 0 121 | }, 122 | "base_uri": "https://localhost:8080/", 123 | "height": 34 124 | }, 125 | "outputId": "c3d88a88-f00d-4988-cf3a-4b53fda67b26", 126 | "executionInfo": { 127 | "status": "ok", 128 | "timestamp": 1528877235677, 129 | "user_tz": -120, 130 | "elapsed": 515, 131 | "user": { 132 | "displayName": "", 133 | "photoUrl": "", 134 | "userId": "" 135 | } 136 | } 137 | }, 138 | "cell_type": "code", 139 | "source": [ 140 | "print a.shape" 141 | ], 142 | "execution_count": 2, 143 | "outputs": [ 144 | { 145 | "output_type": "stream", 146 | "text": [ 147 | "(2, 2)\n" 148 | ], 149 | "name": "stdout" 150 | } 151 | ] 152 | }, 153 | { 154 | "metadata": { 155 | "id": "LKBX4lB3Kas0", 156 | "colab_type": "code", 157 | "colab": { 158 | "autoexec": { 159 | "startup": false, 160 | "wait_interval": 0 161 | }, 162 | "base_uri": "https://localhost:8080/", 163 | "height": 34 164 | }, 165 | "outputId": "7ab9ce74-9044-49f3-e356-f5ba46c88149", 166 | "executionInfo": { 167 | "status": "ok", 168 | "timestamp": 1528877236213, 169 | "user_tz": -120, 170 | "elapsed": 483, 171 | "user": { 172 | "displayName": "", 173 | "photoUrl": "", 174 | "userId": "" 175 | } 176 | } 177 | }, 178 | "cell_type": "code", 179 | "source": [ 180 | "print np.reshape(a,(1,4))" 181 | ], 182 | "execution_count": 3, 183 | "outputs": [ 184 | { 185 | "output_type": "stream", 186 | "text": [ 187 | "[[0. 0. 0. 0.]]\n" 188 | ], 189 | "name": "stdout" 190 | } 191 | ] 192 | }, 193 | { 194 | "metadata": { 195 | "id": "Y1xzMctdKas3", 196 | "colab_type": "text" 197 | }, 198 | "cell_type": "markdown", 199 | "source": [ 200 | "### Let's do the same with TensorFlow" 201 | ] 202 | }, 203 | { 204 | "metadata": { 205 | "id": "CjQmqu0rKas3", 206 | "colab_type": "code", 207 | "colab": { 208 | "autoexec": { 209 | "startup": false, 210 | "wait_interval": 0 211 | }, 212 | "base_uri": "https://localhost:8080/", 213 | "height": 17 214 | }, 215 | "outputId": "317effb4-52a2-4d3e-a9bc-57f702eb9ded", 216 | "executionInfo": { 217 | "status": "ok", 218 | "timestamp": 1528877237278, 219 | "user_tz": -120, 220 | "elapsed": 1016, 221 | "user": { 222 | "displayName": "", 223 | "photoUrl": "", 224 | "userId": "" 225 | } 226 | } 227 | }, 228 | "cell_type": "code", 229 | "source": [ 230 | "import tensorflow as tf" 231 | ], 232 | "execution_count": 4, 233 | "outputs": [] 234 | }, 235 | { 236 | "metadata": { 237 | "id": "vvGYU2xHKas6", 238 | "colab_type": "code", 239 | "colab": { 240 | "autoexec": { 241 | "startup": false, 242 | "wait_interval": 0 243 | }, 244 | "base_uri": "https://localhost:8080/", 245 | "height": 17 246 | }, 247 | "outputId": "7eee7f8d-ec39-4b0d-b842-7077bc86686d", 248 | "executionInfo": { 249 | "status": "ok", 250 | "timestamp": 1528877237830, 251 | "user_tz": -120, 252 | "elapsed": 493, 253 | "user": { 254 | "displayName": "", 255 | "photoUrl": "", 256 | "userId": "" 257 | } 258 | } 259 | }, 260 | "cell_type": "code", 261 | "source": [ 262 | "sess = tf.InteractiveSession()" 263 | ], 264 | "execution_count": 5, 265 | "outputs": [] 266 | }, 267 | { 268 | "metadata": { 269 | "id": "IEUpYm9eKas8", 270 | "colab_type": "code", 271 | "colab": { 272 | "autoexec": { 273 | "startup": false, 274 | "wait_interval": 0 275 | }, 276 | "base_uri": "https://localhost:8080/", 277 | "height": 17 278 | }, 279 | "outputId": "791b1c02-dec7-4bc1-ecbd-339159061778", 280 | "executionInfo": { 281 | "status": "ok", 282 | "timestamp": 1528877238370, 283 | "user_tz": -120, 284 | "elapsed": 471, 285 | "user": { 286 | "displayName": "", 287 | "photoUrl": "", 288 | "userId": "" 289 | } 290 | } 291 | }, 292 | "cell_type": "code", 293 | "source": [ 294 | "a = tf.zeros((2,2)); b = tf.ones((2,2))" 295 | ], 296 | "execution_count": 6, 297 | "outputs": [] 298 | }, 299 | { 300 | "metadata": { 301 | "id": "lXOzvHKYKas-", 302 | "colab_type": "code", 303 | "colab": { 304 | "autoexec": { 305 | "startup": false, 306 | "wait_interval": 0 307 | }, 308 | "base_uri": "https://localhost:8080/", 309 | "height": 34 310 | }, 311 | "outputId": "c7ad517e-f500-45ea-936e-8a910cc45d7c", 312 | "executionInfo": { 313 | "status": "ok", 314 | "timestamp": 1528877238938, 315 | "user_tz": -120, 316 | "elapsed": 503, 317 | "user": { 318 | "displayName": "", 319 | "photoUrl": "", 320 | "userId": "" 321 | } 322 | } 323 | }, 324 | "cell_type": "code", 325 | "source": [ 326 | "tf.reduce_sum(b, reduction_indices=1).eval()" 327 | ], 328 | "execution_count": 7, 329 | "outputs": [ 330 | { 331 | "output_type": "execute_result", 332 | "data": { 333 | "text/plain": [ 334 | "array([2., 2.], dtype=float32)" 335 | ] 336 | }, 337 | "metadata": { 338 | "tags": [] 339 | }, 340 | "execution_count": 7 341 | } 342 | ] 343 | }, 344 | { 345 | "metadata": { 346 | "id": "FiVt3L6kKatB", 347 | "colab_type": "code", 348 | "colab": { 349 | "autoexec": { 350 | "startup": false, 351 | "wait_interval": 0 352 | }, 353 | "base_uri": "https://localhost:8080/", 354 | "height": 34 355 | }, 356 | "outputId": "613f3de3-1803-4098-f814-c8b099d7a0ee", 357 | "executionInfo": { 358 | "status": "ok", 359 | "timestamp": 1528877239863, 360 | "user_tz": -120, 361 | "elapsed": 870, 362 | "user": { 363 | "displayName": "", 364 | "photoUrl": "", 365 | "userId": "" 366 | } 367 | } 368 | }, 369 | "cell_type": "code", 370 | "source": [ 371 | "a.get_shape()" 372 | ], 373 | "execution_count": 8, 374 | "outputs": [ 375 | { 376 | "output_type": "execute_result", 377 | "data": { 378 | "text/plain": [ 379 | "TensorShape([Dimension(2), Dimension(2)])" 380 | ] 381 | }, 382 | "metadata": { 383 | "tags": [] 384 | }, 385 | "execution_count": 8 386 | } 387 | ] 388 | }, 389 | { 390 | "metadata": { 391 | "id": "s2nFBXH8KatD", 392 | "colab_type": "code", 393 | "colab": { 394 | "autoexec": { 395 | "startup": false, 396 | "wait_interval": 0 397 | }, 398 | "base_uri": "https://localhost:8080/", 399 | "height": 34 400 | }, 401 | "outputId": "3795c8d2-a271-4252-da50-5db58ca83c9a", 402 | "executionInfo": { 403 | "status": "ok", 404 | "timestamp": 1528877240529, 405 | "user_tz": -120, 406 | "elapsed": 609, 407 | "user": { 408 | "displayName": "", 409 | "photoUrl": "", 410 | "userId": "" 411 | } 412 | } 413 | }, 414 | "cell_type": "code", 415 | "source": [ 416 | "tf.reshape(a, (1, 4)).eval()" 417 | ], 418 | "execution_count": 9, 419 | "outputs": [ 420 | { 421 | "output_type": "execute_result", 422 | "data": { 423 | "text/plain": [ 424 | "array([[0., 0., 0., 0.]], dtype=float32)" 425 | ] 426 | }, 427 | "metadata": { 428 | "tags": [] 429 | }, 430 | "execution_count": 9 431 | } 432 | ] 433 | }, 434 | { 435 | "metadata": { 436 | "id": "dDJdOAReKatG", 437 | "colab_type": "code", 438 | "colab": { 439 | "autoexec": { 440 | "startup": false, 441 | "wait_interval": 0 442 | }, 443 | "base_uri": "https://localhost:8080/", 444 | "height": 68 445 | }, 446 | "outputId": "08b0c1b6-1860-4559-d4b8-9e4bb44a1d73", 447 | "executionInfo": { 448 | "status": "ok", 449 | "timestamp": 1528877241050, 450 | "user_tz": -120, 451 | "elapsed": 464, 452 | "user": { 453 | "displayName": "", 454 | "photoUrl": "", 455 | "userId": "" 456 | } 457 | } 458 | }, 459 | "cell_type": "code", 460 | "source": [ 461 | "a = np.zeros((2,2))\n", 462 | "ta = tf.zeros((2,2))\n", 463 | "print a\n", 464 | "print ta" 465 | ], 466 | "execution_count": 10, 467 | "outputs": [ 468 | { 469 | "output_type": "stream", 470 | "text": [ 471 | "[[0. 0.]\n", 472 | " [0. 0.]]\n", 473 | "Tensor(\"zeros_1:0\", shape=(2, 2), dtype=float32)\n" 474 | ], 475 | "name": "stdout" 476 | } 477 | ] 478 | }, 479 | { 480 | "metadata": { 481 | "id": "KVA0jfOiKatI", 482 | "colab_type": "text" 483 | }, 484 | "cell_type": "markdown", 485 | "source": [ 486 | "TensorFlow computations define a **computation graph** that has no numerical value until it is evaluated!" 487 | ] 488 | }, 489 | { 490 | "metadata": { 491 | "id": "SoZ-rKFlKatK", 492 | "colab_type": "code", 493 | "colab": { 494 | "autoexec": { 495 | "startup": false, 496 | "wait_interval": 0 497 | }, 498 | "base_uri": "https://localhost:8080/", 499 | "height": 51 500 | }, 501 | "outputId": "23794e31-0b75-4e4f-fec3-a597033410e0", 502 | "executionInfo": { 503 | "status": "ok", 504 | "timestamp": 1528877241594, 505 | "user_tz": -120, 506 | "elapsed": 474, 507 | "user": { 508 | "displayName": "", 509 | "photoUrl": "", 510 | "userId": "" 511 | } 512 | } 513 | }, 514 | "cell_type": "code", 515 | "source": [ 516 | "print ta.eval()" 517 | ], 518 | "execution_count": 11, 519 | "outputs": [ 520 | { 521 | "output_type": "stream", 522 | "text": [ 523 | "[[0. 0.]\n", 524 | " [0. 0.]]\n" 525 | ], 526 | "name": "stdout" 527 | } 528 | ] 529 | }, 530 | { 531 | "metadata": { 532 | "id": "kkVpaRnmKatO", 533 | "colab_type": "text" 534 | }, 535 | "cell_type": "markdown", 536 | "source": [ 537 | "## Placeholders \n", 538 | "TensorFlow provides a placeholder operation that must be fed with data on execution. It will be used to load input data to the model.\n", 539 | "```python\n", 540 | "x = tf.placeholder(tf.float32, shape=(2, 2))\n", 541 | "y = tf.matmul(x, x)\n", 542 | "\n", 543 | "with tf.Session() as sess:\n", 544 | " print(sess.run(y)) # ERROR: will fail because x was not fed.\n", 545 | "\n", 546 | " rand_array = np.random.rand(2, 2) # we should get data from some training data\n", 547 | " print(sess.run(y, feed_dict={x: rand_array})) # Will succeed.\n", 548 | "```" 549 | ] 550 | }, 551 | { 552 | "metadata": { 553 | "id": "2DlZ5CimKatO", 554 | "colab_type": "code", 555 | "colab": { 556 | "autoexec": { 557 | "startup": false, 558 | "wait_interval": 0 559 | }, 560 | "base_uri": "https://localhost:8080/", 561 | "height": 51 562 | }, 563 | "outputId": "6ae0731d-a1cb-485c-dbef-d1983822b1ee", 564 | "executionInfo": { 565 | "status": "ok", 566 | "timestamp": 1528877242131, 567 | "user_tz": -120, 568 | "elapsed": 486, 569 | "user": { 570 | "displayName": "", 571 | "photoUrl": "", 572 | "userId": "" 573 | } 574 | } 575 | }, 576 | "cell_type": "code", 577 | "source": [ 578 | "x = tf.placeholder(tf.float32, shape=(2, 2))\n", 579 | "y = tf.matmul(x, x)\n", 580 | "\n", 581 | "with tf.Session() as sess:\n", 582 | " rand_array = np.random.rand(2, 2) # we should get data from some training data\n", 583 | " print(sess.run(y, feed_dict={x: rand_array})) " 584 | ], 585 | "execution_count": 12, 586 | "outputs": [ 587 | { 588 | "output_type": "stream", 589 | "text": [ 590 | "[[0.33912265 0.84068996]\n", 591 | " [0.36637893 0.92535 ]]\n" 592 | ], 593 | "name": "stdout" 594 | } 595 | ] 596 | }, 597 | { 598 | "metadata": { 599 | "id": "uSGNoMZGKatQ", 600 | "colab_type": "text" 601 | }, 602 | "cell_type": "markdown", 603 | "source": [ 604 | "## ** Session **\n", 605 | "A Session object encapsulates the environment in which Operation objects are executed, and Tensor objects are evaluated. A session may own resources, such as variables, queues, and readers. It is important to release these resources when they are no longer required. \n", 606 | "\n", 607 | "Three basic ways to work: \n", 608 | "\n", 609 | "1) Using the Session object:\n", 610 | "```python\n", 611 | "a = tf.constant(5.0)\n", 612 | "b = tf.constant(6.0)\n", 613 | "c = a * b\n", 614 | "sess = tf.Session()\n", 615 | "print sess.run(c)\n", 616 | "sess.close()\n", 617 | "```\n", 618 | "\n", 619 | "2) Using the context manager:\n", 620 | "```python\n", 621 | "a = tf.constant(5.0)\n", 622 | "b = tf.constant(6.0)\n", 623 | "c = a * b\n", 624 | "with tf.Session() as sess:\n", 625 | " print(c.eval())\n", 626 | "```\n", 627 | "\n", 628 | "3) Using Interactive Session:\n", 629 | "```python\n", 630 | "sess = tf.InteractiveSession()\n", 631 | "a = tf.constant(5.0)\n", 632 | "b = tf.constant(6.0)\n", 633 | "c = a * b\n", 634 | "#We can just use 'c.eval()' without passing 'sess'\n", 635 | "print(c.eval())\n", 636 | "sess.close()\n", 637 | "```" 638 | ] 639 | }, 640 | { 641 | "metadata": { 642 | "id": "ldRLT6v-KatQ", 643 | "colab_type": "text" 644 | }, 645 | "cell_type": "markdown", 646 | "source": [ 647 | "## Variables \n", 648 | "When you train a model, you use variables to hold and update parameters. Variables are in-memory buffers containing tensors. A tensorFlow variable does not exist until you initialize it, so they must be explicitly initialized and can be saved to disk during and after training. You can later restore saved values to exercise or analyse the model.\n" 649 | ] 650 | }, 651 | { 652 | "metadata": { 653 | "id": "IfoQbbMFKatR", 654 | "colab_type": "code", 655 | "colab": { 656 | "autoexec": { 657 | "startup": false, 658 | "wait_interval": 0 659 | }, 660 | "base_uri": "https://localhost:8080/", 661 | "height": 85 662 | }, 663 | "outputId": "8c4adc93-f2f6-4320-ff66-d6d91916f2a6", 664 | "executionInfo": { 665 | "status": "ok", 666 | "timestamp": 1528877242825, 667 | "user_tz": -120, 668 | "elapsed": 601, 669 | "user": { 670 | "displayName": "", 671 | "photoUrl": "", 672 | "userId": "" 673 | } 674 | } 675 | }, 676 | "cell_type": "code", 677 | "source": [ 678 | "W1 = tf.ones((2,2))\n", 679 | "W2 = tf.Variable(tf.zeros((2,2)),name = \"weights\")\n", 680 | "\n", 681 | "with tf.Session() as sess:\n", 682 | " print(sess.run(W1))\n", 683 | " sess.run(tf.global_variables_initializer())\n", 684 | " print(sess.run(W2))" 685 | ], 686 | "execution_count": 13, 687 | "outputs": [ 688 | { 689 | "output_type": "stream", 690 | "text": [ 691 | "[[1. 1.]\n", 692 | " [1. 1.]]\n", 693 | "[[0. 0.]\n", 694 | " [0. 0.]]\n" 695 | ], 696 | "name": "stdout" 697 | } 698 | ] 699 | }, 700 | { 701 | "metadata": { 702 | "id": "EocelWWzKatU", 703 | "colab_type": "code", 704 | "colab": { 705 | "autoexec": { 706 | "startup": false, 707 | "wait_interval": 0 708 | }, 709 | "base_uri": "https://localhost:8080/", 710 | "height": 85 711 | }, 712 | "outputId": "9fa431a9-0ea1-41db-d3e2-2f13490589b2", 713 | "executionInfo": { 714 | "status": "ok", 715 | "timestamp": 1528877243376, 716 | "user_tz": -120, 717 | "elapsed": 508, 718 | "user": { 719 | "displayName": "", 720 | "photoUrl": "", 721 | "userId": "" 722 | } 723 | } 724 | }, 725 | "cell_type": "code", 726 | "source": [ 727 | "W = tf.Variable(tf.zeros((2,2)),name = \"weights\")\n", 728 | "R = tf.Variable(tf.random_normal((2,2)),name = \"random_weights\")\n", 729 | "\n", 730 | "with tf.Session() as sess:\n", 731 | " sess.run(tf.global_variables_initializer())\n", 732 | " print(sess.run(W))\n", 733 | " print(sess.run(R))" 734 | ], 735 | "execution_count": 14, 736 | "outputs": [ 737 | { 738 | "output_type": "stream", 739 | "text": [ 740 | "[[0. 0.]\n", 741 | " [0. 0.]]\n", 742 | "[[-0.17557839 0.9507274 ]\n", 743 | " [-0.4273468 -0.16205686]]\n" 744 | ], 745 | "name": "stdout" 746 | } 747 | ] 748 | }, 749 | { 750 | "metadata": { 751 | "id": "Y3Yxyc4eKatY", 752 | "colab_type": "text" 753 | }, 754 | "cell_type": "markdown", 755 | "source": [ 756 | "## Our first TensorFlow program" 757 | ] 758 | }, 759 | { 760 | "metadata": { 761 | "id": "ZKpjh4-QKatY", 762 | "colab_type": "code", 763 | "colab": { 764 | "autoexec": { 765 | "startup": false, 766 | "wait_interval": 0 767 | }, 768 | "base_uri": "https://localhost:8080/", 769 | "height": 34 770 | }, 771 | "outputId": "7a3c05b2-2531-4fef-8311-91a5c4ea1fac", 772 | "executionInfo": { 773 | "status": "ok", 774 | "timestamp": 1528877243905, 775 | "user_tz": -120, 776 | "elapsed": 477, 777 | "user": { 778 | "displayName": "", 779 | "photoUrl": "", 780 | "userId": "" 781 | } 782 | } 783 | }, 784 | "cell_type": "code", 785 | "source": [ 786 | "import tensorflow as tf\n", 787 | "import numpy as np\n", 788 | "\n", 789 | "sess = tf.Session()\n", 790 | "x = tf.placeholder(tf.float32, shape=(1,3))\n", 791 | "w = tf.Variable(tf.random_normal([3,3]))\n", 792 | "y = tf.matmul(x, w)\n", 793 | "\n", 794 | "out = tf.nn.relu(y)\n", 795 | "\n", 796 | "sess = tf.Session()\n", 797 | "sess.run(tf.global_variables_initializer())\n", 798 | "print sess.run(out,feed_dict={x:np.array([[1.0,2.0,3.0]])})\n", 799 | "\n", 800 | "sess.close()" 801 | ], 802 | "execution_count": 15, 803 | "outputs": [ 804 | { 805 | "output_type": "stream", 806 | "text": [ 807 | "[[0. 0. 0.]]\n" 808 | ], 809 | "name": "stdout" 810 | } 811 | ] 812 | }, 813 | { 814 | "metadata": { 815 | "id": "DgKKOIBCKatb", 816 | "colab_type": "text" 817 | }, 818 | "cell_type": "markdown", 819 | "source": [ 820 | "## Let's try to do a simple counter" 821 | ] 822 | }, 823 | { 824 | "metadata": { 825 | "id": "7f9-KHCRKatc", 826 | "colab_type": "code", 827 | "colab": { 828 | "autoexec": { 829 | "startup": false, 830 | "wait_interval": 0 831 | }, 832 | "base_uri": "https://localhost:8080/", 833 | "height": 68 834 | }, 835 | "outputId": "94bc72f7-31ed-4375-f1a1-afaa2114be9e", 836 | "executionInfo": { 837 | "status": "ok", 838 | "timestamp": 1528877244442, 839 | "user_tz": -120, 840 | "elapsed": 495, 841 | "user": { 842 | "displayName": "", 843 | "photoUrl": "", 844 | "userId": "" 845 | } 846 | } 847 | }, 848 | "cell_type": "code", 849 | "source": [ 850 | "# Python code\n", 851 | "state = 0\n", 852 | "for _ in range(3):\n", 853 | " state +=1\n", 854 | " print state" 855 | ], 856 | "execution_count": 16, 857 | "outputs": [ 858 | { 859 | "output_type": "stream", 860 | "text": [ 861 | "1\n", 862 | "2\n", 863 | "3\n" 864 | ], 865 | "name": "stdout" 866 | } 867 | ] 868 | }, 869 | { 870 | "metadata": { 871 | "id": "bxX8FfNlKatf", 872 | "colab_type": "code", 873 | "colab": { 874 | "autoexec": { 875 | "startup": false, 876 | "wait_interval": 0 877 | }, 878 | "base_uri": "https://localhost:8080/", 879 | "height": 17 880 | }, 881 | "outputId": "5ae9c28e-65a4-45fb-ac25-15bb8e8bbad3", 882 | "executionInfo": { 883 | "status": "ok", 884 | "timestamp": 1528877244968, 885 | "user_tz": -120, 886 | "elapsed": 483, 887 | "user": { 888 | "displayName": "", 889 | "photoUrl": "", 890 | "userId": "" 891 | } 892 | } 893 | }, 894 | "cell_type": "code", 895 | "source": [ 896 | "# Your tensorflow code" 897 | ], 898 | "execution_count": 17, 899 | "outputs": [] 900 | } 901 | ] 902 | } -------------------------------------------------------------------------------- /3.1_Linear_Regression.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "3.1%20Linear%20Regression.ipynb", 7 | "version": "0.3.2", 8 | "views": {}, 9 | "default_view": {}, 10 | "provenance": [] 11 | }, 12 | "kernelspec": { 13 | "display_name": "Python 2", 14 | "language": "python", 15 | "name": "python2" 16 | } 17 | }, 18 | "cells": [ 19 | { 20 | "metadata": { 21 | "id": "L_CkNf-yMWIz", 22 | "colab_type": "text" 23 | }, 24 | "cell_type": "markdown", 25 | "source": [ 26 | "# A linear regression learning algorithm example using TensorFlow library." 27 | ] 28 | }, 29 | { 30 | "metadata": { 31 | "id": "ssIuhJ9bMWI0", 32 | "colab_type": "text" 33 | }, 34 | "cell_type": "markdown", 35 | "source": [ 36 | "Linear regression model is one of the simplest regression models. It assumes linear relationship between X and Y. The output equation is defined as follows:\n", 37 | "$$\\hat{y} = WX + b$$\n" 38 | ] 39 | }, 40 | { 41 | "metadata": { 42 | "id": "7HlKSkOWMWI0", 43 | "colab_type": "code", 44 | "colab": { 45 | "autoexec": { 46 | "startup": false, 47 | "wait_interval": 0 48 | }, 49 | "base_uri": "https://localhost:8080/", 50 | "height": 17 51 | }, 52 | "outputId": "22f5eed6-7183-48b0-b373-a129ad8526d3", 53 | "executionInfo": { 54 | "status": "ok", 55 | "timestamp": 1528969107940, 56 | "user_tz": -120, 57 | "elapsed": 1031, 58 | "user": { 59 | "displayName": "", 60 | "photoUrl": "", 61 | "userId": "" 62 | } 63 | } 64 | }, 65 | "cell_type": "code", 66 | "source": [ 67 | "import numpy as np\n", 68 | "import matplotlib.pyplot as plt\n", 69 | "import pandas as pd\n", 70 | "%matplotlib inline" 71 | ], 72 | "execution_count": 11, 73 | "outputs": [] 74 | }, 75 | { 76 | "metadata": { 77 | "id": "YnZrCOZVMWI3", 78 | "colab_type": "code", 79 | "colab": { 80 | "autoexec": { 81 | "startup": false, 82 | "wait_interval": 0 83 | }, 84 | "base_uri": "https://localhost:8080/", 85 | "height": 17 86 | }, 87 | "outputId": "764763e6-f0a2-4b21-ad18-e422c91458e0", 88 | "executionInfo": { 89 | "status": "ok", 90 | "timestamp": 1528969108884, 91 | "user_tz": -120, 92 | "elapsed": 831, 93 | "user": { 94 | "displayName": "", 95 | "photoUrl": "", 96 | "userId": "" 97 | } 98 | } 99 | }, 100 | "cell_type": "code", 101 | "source": [ 102 | "# Training Parameters\n", 103 | "learning_rate = 1e-1\n", 104 | "training_epochs = 2000\n", 105 | "display_step = 200" 106 | ], 107 | "execution_count": 12, 108 | "outputs": [] 109 | }, 110 | { 111 | "metadata": { 112 | "id": "Hb9k9hjLMWI5", 113 | "colab_type": "code", 114 | "colab": { 115 | "autoexec": { 116 | "startup": false, 117 | "wait_interval": 0 118 | }, 119 | "base_uri": "https://localhost:8080/", 120 | "height": 299 121 | }, 122 | "outputId": "1dbf5fa5-78c5-45db-e6b3-05920efc6aef", 123 | "executionInfo": { 124 | "status": "ok", 125 | "timestamp": 1528969109995, 126 | "user_tz": -120, 127 | "elapsed": 1005, 128 | "user": { 129 | "displayName": "", 130 | "photoUrl": "", 131 | "userId": "" 132 | } 133 | } 134 | }, 135 | "cell_type": "code", 136 | "source": [ 137 | "# Load data. Advertising dataset from \"An Introduction to Statistical Learning\",\n", 138 | "# textbook by Gareth James, Robert Tibshirani, and Trevor Hastie \n", 139 | "import numpy as np\n", 140 | "data = pd.read_csv('https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2017/master/dataset/Advertising.csv',index_col=0, delimiter=',')\n", 141 | "train_X = data[['TV']].values \n", 142 | "\n", 143 | "train_Y = data.Sales.values \n", 144 | "train_Y = train_Y[:,np.newaxis]\n", 145 | "\n", 146 | "n_samples = train_X.shape[0]\n", 147 | "print n_samples\n", 148 | "print train_X.shape, train_Y.shape\n", 149 | "plt.plot(train_X, train_Y, 'ro', label='Original data')\n", 150 | "plt.show()" 151 | ], 152 | "execution_count": 13, 153 | "outputs": [ 154 | { 155 | "output_type": "stream", 156 | "text": [ 157 | "200\n", 158 | "(200, 1) (200, 1)\n" 159 | ], 160 | "name": "stdout" 161 | }, 162 | { 163 | "output_type": "display_data", 164 | "data": { 165 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAW8AAAD4CAYAAAAjKGdbAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAIABJREFUeJztnX+QXNV157893ajFTAYhVhNGzJBK\neZ2+MJamkmW3svxa5MUbe40Tlwp5g4qibAhB5SAHGULFyGWtEQlsoISFsU2JxYaE3Yxsj2vWuJxy\n7Zpd2TGqShHHpgQjXeLUrhNJyBJo0AiN0tL0vP2ju4fXPe++vvf9vPfN91M1NTOvX793zrvd5513\n7jnnljzPAyGEELfoy1sAQggh5tB4E0KIg9B4E0KIg9B4E0KIg9B4E0KIg1SyOMmJE6cjp7SsXt2P\nmZm5JMXJjSLpAhRLnyLpAhRLnyLpApjpMzQ0WFK9Zr3nXamU8xYhMYqkC1AsfYqkC1AsfYqkC5Cc\nPtYbb0IIIUuh8SaEEAeh8SaEEAeh8SaEEAeh8SaEEAeh8SaEEADVqUmsvuFqrFm7GqtvuBrVqcm8\nRQolkzxvQgixmerUJC7acsfi/5WDr+GiLXdgFkB946b8BAuBnjchZNnTv3tX8PYnHs9YEn1ovAkh\ny57y64eMttsAjTchZNnTqF1htN0GaLwJIcueuW33BW+/596MJdGHxpsQsuypb9yE2T1fw/zYOniV\nCubH1mF2z9eUk5U2ZKYw24QQQtA04DqZJbZkptDzJoQQA2zJTKHxJoQQA2zJTKHxJoQQA2zJTKHx\nJoQQA2zJTKHxJoSQHvizS/p378LcnVu0M1PSgsabEJIbWaTcxT1HO7ukcvA1lBoNVA6+hv5n9mDu\nnntx+stPA56HwT/4/cxTBpkqSAjJhSxS7pI4hyq7ZOChHSgfPhzr2HGg500IyYUsUu6SOIcqi6TP\nZ7ijHjsOWp63EOJRANe39n8EwO8AuArAW61dHpNSfjcVCQkhhSSLlLskztGoXYHKwddinzNpenre\nQoj3A1gnpbwawIcA7G699ICUckPrh4abEGJEFil3SZxDlV2ycNlo7GPHQSds8kMAH2v9/TaAAQDl\n1CQihFhFWpOKWaTcJXEOVd+TM/95Z+xjx6HkeZ72zkKIu9AMnzQADANYAeA4gK1SyjdV75ufb3iV\nCu09Ic6xdy+wefPS7RMTwC23JHP8Rx4BpqeBsTHggQeSOW5W50hf/pLyBV3jLYT4KIDtAH4LwL8G\n8JaU8qdCiM8AGJVSblW998SJ0/p3iC6GhgZx4sTpqG+3iiLpAhRLnyLpAiSnz+obrg6M986PrcPM\nvv2xj69DnmNTnZpE/+5dKL9+CI3aFZjbdl/sTBITfYaGBpXGW3fC8oMAPgvgQ1LKUwBe9L38AoCn\ntCQhhDiFLX088sCW7oEqdCYsVwF4DMBHpJQnW9u+JYR4T2uXDQBeTU1CQkhu2NLHQ4ekY/O2dA9U\noeN5/y6ANQC+IYRob3sWwNeFEHMA3gFwezriEULyZG7bfR3e5+J2y1aYScNLtv2po6fxllI+DeDp\ngJf+PHlxCCE2Ud+4CbNoepuLcd977rUibOAnzEuOKqsqv9uWpw5WWBKyTNENM9Q3bsLMvv148+hJ\nzOzbb53hBtLxkuOmGabdt4XGm5CC4DcWGB8PNRZBzZYu2nJHLmsxJkEasXnTdS39ZHF9abwJKQDd\nxgIHDoQaC9sn40xJq+An6lNHFteXxpuQAmBqLGyfjDMljpecBllcX7aEJaQAmBoL2yfjoqC7+nsW\nhF3fpIwuPW9CCoBpzDfPpbyyWIAhb7K4vjTehBQAU2ORV5ihaBOlKrK4vkaNqaLC3iZNiqQLUCx9\niqBLdWpyMR+7NDaG2bu3WRNGaBOlV0oRxsZPpr1NCCH244/5Dg0Nom6hwSvaRGmeMGxCSMExjTGn\nGZN2qVeK7dB4E1JgTGPMacek85woLRo03oQ4io6HbJr/nXZxiW352C7DmDchDqLbRc80xpxFTNqm\nfGyXoedNiIPoesimMeaFS9cqtg/3lCmNWHl1ahIYHy90TnhUaLwJcRBdD9k4xqxKTFMmrDVJI1a+\n+HRx4EChc8KjQuNNiIPoetSmMea+Y28oth8LlSeNWHnRmmclDY03IQ5i4lHrdMZrhzzQaAQet1cq\nX5RYea8wC3PCw6HxJsRBksza6Ah5KPbplcpnGlvXCbMwJzwcGm9CHCWpFW5U4QkP0LopVKcmUZo9\nFfiayujrhESYEx4OjTchyxxlGKJS6XlTaHvQ5SOHO7Y3RkZDjb5OSKT9dIHxcSdywrPulkjjTYgF\n5NkmNU54Qum1r7o41MiaTLjilVesXj8TyKdbIo03ITmTd5vUOOGJqJOKRQuJ5JEZQ+NNSM7knRIX\nZ/IzqtdetDL5PDJjWB5PSM7YkBIXtWR9btt9HWX6i9s1PGgbyuSrU5Po370L5dcPoVG7AnPb7osk\nUx7LytHzJiRnlF/wctn6akITD9q25c+SDFepwkDnrrk2rphKaLwJyRnVF79UrztRDq5bBGTb8mdJ\nhqvqGzdh7s4tS4/1zJ7UdKTxJiRn2t6rV60Gvl6EcvC84/pBJB2uWvHSjwK3p6UjjTchFlDfuAmY\nnw98rQjl4DbE9btJuoIzax1pvAmxhCKXg0cpn2/HxzE+rh16MImrJ52umPX40XgTYgm25j4nMdFo\nolt3fBwHDmjFx03j6kmnK2Y9fiXP83ruJIR4FMD1aKYWPgLgZQDPAygDeAPAbVLKuur9J06c7n0S\nBUNDgzhh4SrYUSiSLkCx9LFFl+rUJPqfePzd1LV77o1kTJLSp3vFnjZRjJyubqtvuDow7W5+bB1m\n9u1XHj/q+5JER0eTsRkaGlR2Uu9pvIUQ7wdwv5Tyw0KIfwHgJwBeBPBXUspvCiEeBvBPUsqnVMeg\n8W5SJF2AYulTJF2A5PTJwyCuWbu66XF34VUqePPoSWVudq/32UJSxlsnbPJDAB9r/f02gAEAGwC8\n0Nr2HQAf0JKEEOIUupNwSeZwh8WOw0IjRZ4zCMTzPO2fWq12V61We75Wqx33bfuXtVptf9j7zp+f\n9wgpNBMTnrd+veeVy83fExN5S5QMl1/uecDSn/Hxd/eZmAjeJ+o1CDve+vVqeZKWww6UdlUr5g0A\nQoiPAtgO4LcA/L2U8pdb298L4C+klNeo3suwSZMi6QIUS584uiQZF06KuGNTnZrEwIM7UD56OPB1\nv25phFb8sePS2Bhm796mFRpJas4gTZIKm2j1NhFCfBDAZwF8SEp5SgjxjhDiQinlWQAjAI5qSUJI\nAQkrQLHNcOiguhm1aYyMduiVRn6zv+/J0NAg6i1j16uHiA39UrKiZ8xbCLEKwGMAPiKlbEf9vw/g\n5tbfNwP4XjriEWJGHv0zsi7OSFtH1c2oTd8vOhcjzjLWbGs6ZR7oeN6/C2ANgG8IIdrbPg7gGSHE\nFgA/B/Dn6YhHiD7dHmN7MmsWSNUby7KjXBY69rrpdOsVp7OgKfWNmzALWB8ayQLtmHccGPNuUiRd\nAPv0iRN7TSPm3bhsFH2/eCNWq9FudHVs6xOl5anqHG2CYvm9Ys2mcvj3L42NYXbrpwtjoDPL804C\nGu8mRdIFsE+fOHm+SUzwtY3XwvAwyoeXTvQlMYGpq+PQ0CBmn3420kSq8mY0Ooozn9sZqTjHRA4b\nJ4CTJMs8b0KcIM88X39bVG9wVeA+SXSXM9Exaic/Vdn4yb+bjmQ8TeWwsQOhjdB4k8IQazJr797E\nJgHTnMA00TGOHDo9unUxlcPGDoQ2QuNNCkPURkPVqUlg8+bEFgpI8wnARMdecmSVmWN6PZZdpWRE\naLxJoYjiMSb9mJ52OptKx+42queuvU4pR5Yr25heD6YD6kHjTZY9ST+m57EyelAb1f5n9mDuzi2B\ncmQZVza9Ht37Y3y8MJOVScJskwwpki5AfvokteJ3mygphknLEBdTHXSyVmzRcTl/b5htQgpDGo/7\npo/pNi6ma/r0oBMPt01H0gmNN3GKNB736xs3ARMT2o/1NqaymU7y9bph2agj6YTGmzhFamlkt9yi\nPdFpYyqb6dNDrzi0jTrm0bfGZrS6ChJiC1n2EbFZhm66e37426iGvUf1um065tW3xmboeROnsCGN\nzAYZgvCnEOKVVyKVsbc929KpU4H75KVjlDBO0T11Gm/iFEmn4bW/4KhUtL/geaQC+mVNwxh1T1C2\nF2FojI5mqqMK0zDOcphwZapghhRJF8AefaKmtOXRACkrWU3HRpVq2BgZxcmfTGsfJw2GhgYxP7bO\nKBXShpXkVTBVkBDE87CyzqiwWdbyoWADXT5y2Apv1TRUZeOEa9LQeBOniWPU4nzBo4Qw8pK1F9Wp\nSZRCnsBtSA80DVUth/4oNN4kNbKYMIpj1KJ+waN60HnIqkOvZc9s8VZN+tbYOqmcJDTeJBWymjCK\nY9SifsGjetBpyRp4kzRocWu67JkL5DWpnCU03iQVsoonx/Gw/F9wGHzBo3rQScnqN0YAAm+SJi1u\nexlnV73VJHuS2wizTTKkSLoA4frEWZLMlF7rJ+pgMjZxMhmSkFVHliBU8iW97FnSLKfvTcC+ymwT\nVliSVMiyQi+sUjAN4qyWnrSsJvFo1b5ckd1NGDYhqVDkCSOb4qkmN8OwfYseYoiC7RWa9LxJKrS9\nuYGHdqCvtZL6wmWj+QqVIFl7+ypUTwGB+xbgxpkVLvRSoedNUqV8+DBKAEoAykcPG2Wc2O752IBy\nIrNHi9s8r60L46qacB/YucMa2TlhmSFF0gXorU/cib0sS9eX09jk0RagzcD2+9H/zB6jc+cxNqoJ\n9yBMrxvL44n1xClK4WIA6XmoeV3b6tRkoOHO4tymmMwl5CU7jTdJjThFKUmUg7vweN7NosyXrkqt\nyCnpUnvd6xxWyWlLFWcb1YR7EHnJTuNNlMQ1fnEyTuKWg7vYErRDZkU4MwkvL8lSe5PrHGbkbKvi\nDJpLaIwET7jnJTuNNwkkCeMXNaWuOjWJ0my8xQBcDLv06jECNA1gnjfVbkyuc5iRszETpjt98syO\nnYH75SW7VqqgEGIdgG8D+IKU8ktCiOcAXAXgrdYuj0kpv5uOiCQPwr6UJpMzpil1ymq/kVGc2aFf\n7ediS1Ad2RaGh2OnsCVZlGNynZXFTXdusSb9Lgzbipl6et5CiAEATwJ4seulB6SUG1o/NNwFIy/j\np7ppeKsuNvqSKL28cjmX0ImOt6zz+F06Mxe43fSJIqmiHJMQjOpJ7MzDj0U6d9YsLqQhD6LxayL3\nKlSdsEkdwIcBHE1ZFmIRefVDTuqmoQoNlOr1zGPfuiEonUmyvpngvjB5PVGcu/a6wO1hq9a7UMnZ\nfbMd2H6/dXMoPY23lHJeSnk24KWtQoj/LYTYK4RYk4JsxJAksyvyKm9P6qbR9vK8ajXw9Sxj37px\n4W7P1KtcoH2OPCbNVKl/roRBVATdbK1McfQ8T+unVqt9vlarbW39fWOtVvv11t+fqdVqXwp77/nz\n8x5JmYkJzwOW/vT1ed769c3XoxxzfNzzKpXm7yjHiHLOID2inrtcDj5eudy8Lu3faeqmkqFSUb9H\ndR1UP1mMTTfr1wfLMj6evSxJotLLdAyTQWlXI/U2kVL6498vAHgqbP+ZmeA4nQ5FqnxLU5fVO/8k\neDAXFoADB4DNmzE7e9bMI7rxpuaPH5/8qehz402o7vna0kmhG2/qOLcuqxXdDdFoNK8LEPn66Cwm\nXJ2axGClElitN1+7AjMKnZTjidaK7hddHHp9svjerJmeRlD5nzc9jTcTPHfWNkClVxBhY6jCsMJS\n+VqkVEEhxLeEEO9p/bsBwKtRjkOSQ6tq0eI0OT9JxkVNii1Mro9OHLu9T6leD5YtJAQVNp5nPrfT\nirhxUdeJNJE/zxRHnWyTq4QQ+wB8AsA9rb//EsDXhRA/AHATgAdTlJFoEKdq0QbSqoYMynDwSsF+\nlcn10YljKzNnqit75rsrDePIqDXx5KK2/VXqdecWK9oAt+kZNpFS/hhN77qbbyUuDYmMTmvQNDwi\nndCBzjHSbL/ZnWuuaphlcn10smKUN4P6Py8adpV+qvFUFYrkgW15z0kRpteZvIXzwX7eBaHjA3do\nGqWFhSX7JO4R7d2biNFNqiBIlzgr4bTRWSlItU8Jva+VK4bRlr7mSeOCXiyPLxCLseJjb2ez0svD\nDwduNo2tZ10QFBRKwcSE0fXRCRnoxNvDrpUrOdEkH+h5F5RMPIfp6cDNpkY3y/Uu23Rfn6GhQaNs\nFh3PuGOf6VcDMxhsnocgdkPP2zGsanM6Nha42dToujrxpeMZt/dpXPm+wGO4nplB8oPG2yGsa3O6\nfXvgZlOjm/SCvlbd4Fq4eoNSEXaNbbz+RYTLoGVIXF3iLCvWTRJZIkNDg5h9+lmrJtWiLvGVxees\nOjWZ2bVKU5+wawwg8SXWimQDgOSWQaPxzpC4uqjW1fMqFbx5NLhhURBJrWGY5NjEvZksvv/ga4Gx\n5V43uCJ9zoCUq3lDnAh4Xk8Hw3Ssl/PYhBlvTlg6RFITe1mn5vUibp636mbkhxODyRGaHaRwBtvv\nSTunfznBmLdDJBU3VX75pl/NJUYZZdUbf1x18A8/2fMctk4MZhkfTupcYWXxvUrmXVzhyFZovB0i\nqYk91RfMXzySpQE3zfPunrhV9Q7xY8vEoN+AXvIbY5lNQCc52R3mRPRyMFxc4agXeU3Q0ng7Rlh6\nmu6HKG7xSNKYNjjSWesRADwg9R4UJl/cbgNaPnI4cL80rn2SHm+YE9HLwciymVUWRjXPDDBOWGZI\nErqoJntMJyEXMx8UxSM6k6BJjY2p7KqJW933BxFFF1O5VRN93ZhOQAfRrU9Sk91RWfzcHppGKcDm\nhI1VFmMTlSgZYElNWNLzdoiwu7ypZ5VF8Yiu52MaDlLJ5lVXZtrxzfSa64YG0vBC82zf2vG59Rlu\nr6+c2lhlFVvPMwxE4+0QYR/IqB+itIpHTB8nTfp4qGQ+/cWv4M2jJzF3z73o370r9Rik6TXXNZRp\nxOejjHNSYQdlmGuhocxOiUtWRjXPmyKNt0OEfSCjfoiSrm5sk6bnEyZzljFI02uuMqCNkVFrekS3\nSfI6qj63aU6QZ2VU86ycpfF2iLAPpOpDdO6aa3seN43udWl7PiqZs0xFM/3iqm46J38ynXrnQNPr\nkuR11DGYSY5PdWoSpVOnAl9L2qim5fzoQOPtEGHGor5xE+bu3LLktf5n9uTSW0L5hZ2fX3wETyMb\nIMsYZJQvbl5tXk2vSxLXsT2+ZXkwsnymtJ8Yykc7M3kao6OpGdW8xpTG2wKSmthb8dKPAt838NCO\n1GRXobrR+B+V0whvZB2D9H9xF2Ptl67CmsuHsGb44kzyfnU+P6bXRXm9ymXztEjfwiCqCHdS46Nc\neu6ii3saVdcaatF4p4z/A4Hx8SUfiCQn9pTe0uHDmX8QO240Bu8b3HJHrC9OXjHI7oyKUr2O0sJC\n6nm/qs8P9u7t2M/0uihvvvV6x3ku+Y2xQN1URnRhdNRIDlOiPjFY17FTAxrvFOn+QODAgSUfiKxi\ni1kV3fhvVv27dzW/lOWy9vvjTmLlFYPsVTiU1vVXnveRRzr+Nb0u3ft71WrgfuUjhwPHSmUs+44d\nS3V8oj55uVi2zyKdFNFJ4I9SPKEq1BnYfj/6n9kT+J44xRiq83WPjaowonHZ6JIYpA5RWt1GJa2O\nj23SKoYJO+/8le+L1Oo38DyXrgosrlk8V9dYJdm+2GRsohbnZFnExCIdB9B5hDP1FFSPd2GGO+x4\nvTB5nFR6gRE/ZTqTWLbEKXtd37Ri7mHHTfTRf8WK0Je7xyqv8FXUJ68887WjQuOdIjofCNMPucpA\nXvj8c6GyRP3SmDxOmj4q94qJ9/ri2BSn7NUvJi2jlVmfmvPnQ1/uHqs8U+iiZH+4uNIRjXeK6Hwg\nTD/kSm9U0VnPQ7x+DiYTQGE3q6CsjME/+H3A83A2IMURWPrF6fayB3YGZ9HkEafsGMe+vmapforl\n34HnVeyTRBpeQ1wZ+nqQkcsrhS4Ked5sosKYtyHVqUkMPLgDfe0YbuUCYKGBhrgyML7oX/qqNDaG\n2bu3xfpAqGKJXqkUGJOMGzcOi11WXjugFfP2fwlU+8zduQUr9r+kXCJMZ8GFNlHilLZ9zqKQZJy5\nG+V8xsgozuzYmaqRK8LY+GHMOwf8BQAlNDMjSvPnQ1PC/N4HXnkl9odcmcKluAnHfexTnu/tt4FK\npSPOrOO9qMIwK/a/FOql6baBBeyOU6ZJmo/+YdWhNnunRYaetwE6LT2TagUZht+bR7kcuBiBV12J\n01/8ilZhQv/uXc0quAsuAM6dQ+OKsY6nCP/5FoaHUT68NHNE9xEz6qy+bhtYE1n82PQ5i8PA9vtx\n4fPPoVSvw6tWcfa2T+DMw4/lLVYsijI2beh554BO7LB86GDq2Q9tb/70l59WxrrRmNcy3P4quFK9\njlJrAVn/U4T/6cEbXBV4rIGdO7T0jjqrr3zfyGhqcUpbMll0qU5Nov+ZPYs381K9nlt7BJI+NN4G\n6DyOlxYamS5rpbotazUDilBYopzAPHJYS++oj/aq953ZsTMw3DKw/f5mifovX4Q1lw9hYPv9ocfv\nxqZMFl1cLDQh0aHxNkAnLSuILJe1aqMT5+z1JGGSURJEkN5RZ/VN3tfOeS/V6815iZYHamLAXTSE\nRVwfkqjRinkLIdYB+DaAL0gpvySEuBzA8wDKAN4AcJuUUrkKbFFi3kAr22TnDvS11x+84AKgsYCF\ny9ai7/Dh0CXFktRFGTsGcFozdNArhh8Uv0876yMJ1lw+pJwHePOfjndsa8f8K68fwryvejTvZcOi\nkGa2SZ7YZgPiklnMWwgxAOBJAC/6Nu8E8GUp5fUAfgZA79tcAOobNzX7Lx+fbf4ceQunn/qvKCsM\nNwAsXDqcuBwqD3hhZFR7FZkohSV+DxgtD7gxEtxsKLesD9U8QP2fO/71h0bQFRpxseLOxUITEh2d\nsEkdwIcBHPVt2wDghdbf3wHwgWTFcoueaWzKe2d0VF9U3dgz0F1YUm4VlvRpNS2a2bcfOH8eM/v2\n48yOncEy5mU0FE2UUF3Z8W9YaMRFQxh0Y7W90IREp9JrBynlPIB5IYR/84AvTHIcwNoUZHOGXjHF\nvmPHtI7jbwC1MLwW8IC+X7zRsVJOR4OorsKW0qm3UT6yNI2v/4nHQw2x6rWe8tx1++IxZvHuWppB\nRTZZcva2TwT2eTl728c7/g+LEdumky7t8RwaGsRMgUINJADP87R+arXa52u12tbW38d9299bq9X2\nh733/Pl5z1omJjxv/XrPK5ebvycmwrcHsX695zWXUg3+GR/XkyPsGKofv1zlcvA+lUq062Jybtv4\n1Kc8b+XKppwrVzb/70Y1bjrjRUg2KO1qT89bwTtCiAullGcBjKAzpLKEmZm5iKdJd7JiyeTbgQPA\n5s2Ye3Ffp+fW2j47ezY4u+E3r0H/gQPK88zevQ31E6dDdVm9808iDcb8Q3+KmRtvah6jdkXghFVj\neBgnDa+hljx//Mc40Tq3dXzuT5s/frquQXXrp4PL+Vvj5TJFmuQrki6A8YSl8rWoqYLfB3Bz6++b\nAXwv4nFSJ6zQwrRDnypNTLX8mFddqR1zjJrOVZ5+dVEnZRw8wko6WvL84z8aHdM2GCMmLtMzVVAI\ncRWAXQB+FcB5AEcA3ArgOQArAfwcwO1SSmXPyLxSBcMaJQHNJbcCU/sQPMeoShPTTSsL9bw1Su/D\naBudS359LHDhA9N0MV15ThyfNZLTVpazd2c7RdIFSC5VUGfC8sdoZpd08x+0zp4jKs96YOeOwIm9\nRarVwHSzsPSxwHCFQVrZ3Lb7tPOng2hPSvb94o3A1009ey15fuVXjI5JCEmOQldYKhcHCDPcaGYr\nBGG6WKtJWll3BWFjdBSNkdElixf06tmcVH5yfeMmzCn6bC/yZ39mdExCSHIU2nibGiwPzb7YK176\nEebu3BJ5sdaosVN/A6iTfzfdLAby9eyob9yExpXvC9VVGfc+NG3cXOnMw4+F5oHjlluM9COEJEfU\nbBMnOHftdYHhjIWR0cCwSQkAWl31KgdfMzLAYfnSYagW91Xud2g68DhtL78jP/nQNEoLC03dfD3H\nZ1v76crCCTxC7KOwnne7PWY3c3duUVYEdpN2EyLdznUd+/kmmFXLbLU9eNXSVUF6udhFj5DlTGGN\nd9iKLe0wh6onR5ukurG10xW7V57R7Vyn2q9xxZWhawOadJlzsYseIcuZwhrvXoarvnETvIuCFxZo\nk0QTorDmR7rGNWqrT5PJS7YTJcQtCmu8dQxXL8OURBMilUc7+IefBBTLenXLHjWDxCQLxsUueoQs\nZwprvFWG69w11y7+rTJMJpWRvVDdINoLBQTRbVyjpiKaZMG42EWPkOVMYY23Kk/Zv6afymDpLNyr\ni67n6gFK4xonFdGffhgWH08q3ZEQkg2FXj1eZ2UR/8roabT91F15xuYVWlQUqWy5SLoAxdKnSLoA\nXD1eC+UknK+Zk65n6sdkVfHu5keeYqEAxpYJISYU2nirDGIJiJzDHCUf2r/yzOkvPhW4D2PLhBAT\nCm28e63RGCWHOW4+NGPLhJAkcLo8vmOZrkvXAiWg79gbHaXds1C3fi2/fqjnUl/dRjWJfGiWnBNC\n4uKs8e6eCPT3sO7u4dG/e1dwj5Ph4c5jHFEfo00S7V8JISQuzoZNeq7YjndDGeeuvS54hwWN83SF\nQ5gPTQixAWeNt06YYjEsomhQpVq4IOw8jFkTQmzA2bCJKnzRvU9YgyrdY3TDmDUhJG+c9byVoRAf\nc/fcq+x/XT74Ws9slPYxCCHENpw03qpQyMIllywNZaxYEXiMdl/ssKXFvL4+etiEECtxMmyiCoUs\nDF+GmUP/r3PjeeWi9uh/4nHM7NuvzEZpXDEWR0xCCEkNJz1vk1xr1Woy/v2ZQUIIcQ0njbdJ7+mw\nuHZ7f2aQEEJcw8mwydy2+wI79ak85YXVl6BvZmnHPv/+zCAhhLiEk563rqfcrsLsNtyNkVF61oQQ\np3HS8wbeLVnv370LZXlwcRIzuzj1AAAG90lEQVTTb5BVE5veqotpuAkhTuOs8e7ubdLuRTL38t9g\nxUs/ak5GKtaI5KK6hBDXcdJ4V6cmmwv4BhCU/90Nm0gRQlzHOeM9sP1+LQMdBlMACSGu49SEpaqy\nshcewBRAQkihiOR5CyE2APgmgHZZ4gEp5aeSEkqFThvYIBq+BYcJIaQIxAmb/EBKmakLGzbR6AGB\nq+UADJMQQoqHU2GT0InGygXB7xkdZZiEEFI4Sp6n6qmnphU2+QqAnwG4BMCDUsr/pdp/fr7hVSrl\nqDK+y969wObNZu+ZmABuuSX+uQkhJHtUAYXIxnsEwHUAvgHgPQD+D4D3SinPBe1/4sRp85O0GBoa\nxIkTpxf/r05Nov+Jx1GefjVQK6+6EmjMNxcQvudeq7zubl1cp0j6FEkXoFj6FEkXwEyfoaFBpfGO\nFPOWUh4B8PXWv/8ghDgGYATA/41yPBPaPUjWrF0dXITTmMebR5f2MSGEkCIRKeYthLhVCPFHrb+H\nAVwK4EiSgvXCpLMgIYQUjagTli8AuEEI8dcAvg3gk6qQSVqwBzchZDkTNWxyGsBvJyyLEfWNmzCL\n5mo45dcPWRnjJoSQtHCuPN4Pe3ATQpYrTuV5E0IIaULjTQghDkLjTQghDkLjTQghDuKM8a5OTWL1\nDVdjzdrVWH3D1ahOTeYtEiGE5IYT2SaqJc9mAWabEEKWJU543qo+3v1PPJ6xJIQQYgdOGG9VH28u\nJEwIWa44YbzZx4QQQjpxwnizjwkhhHTihPGub9yE2T1fw/zYOi4kTAghcCTbBGAfE0II8eOE500I\nIaQTGm9CCHEQGm9CCHEQGm9CCHEQGm9CCHEQq413dWoSGB9nMypCCOnC2lRBfzOqEtiMihBC/Fjr\nebMZFSGEqLHWeLMZFSGEqLHWeLMZFSGEqLHWeLMZFSGEqLHWeLebUWF8nM2oCCGkC2uzTYBWVsld\nt+PNE6fzFoUQQqzCWs+bEEKIGhpvQghxEBpvQghxEBpvQghxEBpvQghxkJLneXnLQAghxBB63oQQ\n4iA03oQQ4iA03oQQ4iA03oQQ4iA03oQQ4iA03oQQ4iA03oQQ4iBWdxUUQnwBwL8F4AG4R0r5cs4i\naSOE2ADgmwBea206AOBRAM8DKAN4A8BtUsp6LgJqIoRYB+DbAL4gpfySEOJyBOgghLgVwDYACwCe\nllJ+NTehQwjQ5zkAVwF4q7XLY1LK77qgjxDiUQDXo/k9fgTAy3B0bAJ0+R24Oy79AJ4DcCmAlQAe\nAvAKEh4baz1vIcQNAH5NSnk1gN8D8MWcRYrCD6SUG1o/nwKwE8CXpZTXA/gZgDvyFS8cIcQAgCcB\nvOjbvESH1n47AHwAwAYAnxZCXJKxuD1R6AMAD/jG6bsu6COEeD+Ada3vx4cA7IajY6PQBXBwXFr8\nNoC/lVLeAOA/AXgcKYyNtcYbwI0A/gcASCkPAlgthLgoX5FiswHAC62/v4PmoNlMHcCHARz1bduA\npTr8JoCXpZSnpJRnAbwE4NoM5dQlSJ8gXNDnhwA+1vr7bQADcHdsgnQpB+zngi6QUn5dSvlo69/L\nARxGCmNjc9hkGMCPff+faG2bzUecSIwJIV4AcAmABwEM+MIkxwGszU0yDaSU8wDmhRD+zUE6DKM5\nPujabhUKfQBgqxDiXjTl3goH9JFSNgCcaf37ewD+CsAHXRwbhS4NODgufoQQ+wGMAvgIgO8nPTY2\ne97dlPIWwJC/R9NgfxTAxwF8FZ03S9f0CUKlg0u6PQ/gM1LKfw/gpwA+H7CPtfoIIT6KpsHb2vWS\nc2PTpYvT4wIAUspr0Izd/zd0yprI2NhsvI+ieWdqcxmagX4nkFIeaT0+eVLKfwBwDM3Qz4WtXUbQ\n+/HdRt4J0KF7rJzRTUr5opTyp61/XwCwHo7oI4T4IIDPAviPUspTcHhsunVxfFyuak3so6VDBcDp\npMfGZuP9PwFsAgAhxL8CcFRK6cxilkKIW4UQf9T6exjNmednAdzc2uVmAN/LSbw4fB9LdfgbAP9G\nCHGxEOKX0Izb/XVO8hkhhPiWEOI9rX83AHgVDugjhFgF4DEAH5FSnmxtdnJsgnRxdVxa/DsA9wGA\nEOJSAL+EFMbG6pawQoj/guaFWABwt5TylZxF0kYIMQjgLwFcDGAFmiGUnwD4CzTTh34O4HYp5fnc\nhOyBEOIqALsA/CqA8wCOALgVzTSoDh2EEJsA3I9mWueTUsr/nofMYSj0eRLAZwDMAXgHTX2O266P\nEOIuNEMJr/s2fxzAM3BsbBS6PItm+MSpcQGAlof9VTQnKy9E87v/twj47sfRx2rjTQghJBibwyaE\nEEIU0HgTQoiD0HgTQoiD0HgTQoiD0HgTQoiD0HgTQoiD0HgTQoiD/H89X5cMXUfAuAAAAABJRU5E\nrkJggg==\n", 166 | "text/plain": [ 167 | "" 168 | ] 169 | }, 170 | "metadata": { 171 | "tags": [] 172 | } 173 | } 174 | ] 175 | }, 176 | { 177 | "metadata": { 178 | "id": "fXYH9klJMWI9", 179 | "colab_type": "code", 180 | "colab": { 181 | "autoexec": { 182 | "startup": false, 183 | "wait_interval": 0 184 | }, 185 | "base_uri": "https://localhost:8080/", 186 | "height": 17 187 | }, 188 | "outputId": "e4b2664d-be04-41ab-cb53-3060ebb9ee41", 189 | "executionInfo": { 190 | "status": "ok", 191 | "timestamp": 1528969111093, 192 | "user_tz": -120, 193 | "elapsed": 1012, 194 | "user": { 195 | "displayName": "", 196 | "photoUrl": "", 197 | "userId": "" 198 | } 199 | } 200 | }, 201 | "cell_type": "code", 202 | "source": [ 203 | "import tensorflow as tf\n", 204 | "# Define tf Graph Inputs\n", 205 | "X = tf.placeholder(\"float\",[None,1])\n", 206 | "y = tf.placeholder(\"float\",[None,1])\n", 207 | "\n", 208 | "# Create Model variables \n", 209 | "# Set model weights\n", 210 | "W = tf.Variable(np.random.randn(), name=\"weight\")\n", 211 | "b = tf.Variable(np.random.randn(), name=\"bias\")\n", 212 | "\n", 213 | "# Construct a linear model\n", 214 | "y_pred = tf.add(tf.multiply(X, W), b)\n", 215 | "\n", 216 | "# Minimize the squared errors\n", 217 | "cost = tf.reduce_sum(tf.pow(y_pred-y,2))/(n_samples) #L2 loss\n", 218 | "\n", 219 | "# Define the optimizer\n", 220 | "optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost) #Gradient descent" 221 | ], 222 | "execution_count": 14, 223 | "outputs": [] 224 | }, 225 | { 226 | "metadata": { 227 | "id": "q1ugxGZhMWI_", 228 | "colab_type": "code", 229 | "colab": { 230 | "autoexec": { 231 | "startup": false, 232 | "wait_interval": 0 233 | }, 234 | "base_uri": "https://localhost:8080/", 235 | "height": 469 236 | }, 237 | "outputId": "f9e499a0-bf05-4ef6-c35b-95043f4d98a6", 238 | "executionInfo": { 239 | "status": "ok", 240 | "timestamp": 1528969113930, 241 | "user_tz": -120, 242 | "elapsed": 2718, 243 | "user": { 244 | "displayName": "", 245 | "photoUrl": "", 246 | "userId": "" 247 | } 248 | } 249 | }, 250 | "cell_type": "code", 251 | "source": [ 252 | "# Initializing the variables\n", 253 | "init = tf.global_variables_initializer()\n", 254 | "# Launch the graph\n", 255 | "with tf.Session() as sess:\n", 256 | " sess.run(init)\n", 257 | "\n", 258 | " # Fit all training data\n", 259 | " for epoch in range(training_epochs):\n", 260 | " sess.run(optimizer, feed_dict={X: train_X, y: train_Y}) \n", 261 | "\n", 262 | " #Display logs per epoch step\n", 263 | " if epoch % display_step == 0:\n", 264 | " print \"Epoch:\", '%04d' % (epoch+1), \"cost=\", \\\n", 265 | " \"{:.9f}\".format(sess.run(cost, feed_dict={X: train_X, y:train_Y})), \\\n", 266 | " \"W=\", sess.run(W), \"b=\", sess.run(b)\n", 267 | "\n", 268 | " print \"Optimization Finished!\"\n", 269 | " print \"cost=\", sess.run(cost, feed_dict={X: train_X, y: train_Y}), \\\n", 270 | " \"W=\", sess.run(W), \"b=\", sess.run(b)\n", 271 | "\n", 272 | " #Graphic display\n", 273 | " plt.plot(train_X, train_Y, 'ro', label='Original data')\n", 274 | " plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')\n", 275 | " plt.legend()\n", 276 | " plt.show()" 277 | ], 278 | "execution_count": 15, 279 | "outputs": [ 280 | { 281 | "output_type": "stream", 282 | "text": [ 283 | "Epoch: 0001 cost= 9454.903320312 W= -0.49233034 b= 0.93602836\n", 284 | "Epoch: 0201 cost= 15.213056564 W= 0.06944617 b= 2.724759\n", 285 | "Epoch: 0401 cost= 12.311044693 W= 0.061090976 b= 4.367986\n", 286 | "Epoch: 0601 cost= 10.993334770 W= 0.054544155 b= 5.6550035\n", 287 | "Epoch: 0801 cost= 10.603803635 W= 0.05058813 b= 6.4327087\n", 288 | "Epoch: 1001 cost= 10.524839401 W= 0.048652414 b= 6.8132467\n", 289 | "Epoch: 1201 cost= 10.513781548 W= 0.04787619 b= 6.9658427\n", 290 | "Epoch: 1401 cost= 10.512723923 W= 0.047621492 b= 7.0159125\n", 291 | "Epoch: 1601 cost= 10.512657166 W= 0.04755375 b= 7.02923\n", 292 | "Epoch: 1801 cost= 10.512653351 W= 0.047539372 b= 7.0320573\n", 293 | "Optimization Finished!\n", 294 | "cost= 10.512653 W= 0.04753697 b= 7.0325284\n" 295 | ], 296 | "name": "stdout" 297 | }, 298 | { 299 | "output_type": "display_data", 300 | "data": { 301 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAW8AAAD4CAYAAAAjKGdbAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4yLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvNQv5yAAAIABJREFUeJztnXl4FGXSwH+TGQgEAQNEAgRUxDRG\niAfr4g3e94GwCrqsEhE8WERdvFDXWxRBUGTFA3RdBTUa0XXVXWXxgHXVT7lJsyj3IVdCgGDO+f6Y\nmTBH90z3TM9M91C/5+Eh02dV90x1db1V9bq8Xi+CIAiCs8hKtwCCIAiCecR4C4IgOBAx3oIgCA5E\njLcgCIIDEeMtCILgQDypOMm2bbvjTmnJzc2hoqLaSnHSRibpApmlTybpApmlTybpAub0yctr7dJb\nZ3vP2+Nxp1sEy8gkXSCz9MkkXSCz9MkkXcA6fWxvvAVBEIRIxHgLgiA4EDHegiAIDkSMtyAIggMR\n4y0IguBAxHgLgiAA2WWl5PY7iQ6dcsntdxLZZaXpFikqKcnzFgRBsDPZZaW0GVnS9NmzYhltRpZQ\nBdQMGJQ+waIgnrcgCAc8OZMnai+fMinFkhjngDbeGzdu4M47b2P48D9QUnINzzzzFDU1v0Zs9803\nCyiL8gr1+uuvsnTpYlPn/sc/PmTq1Mm666urqxk06JKox5g373NT5xQEQRv3ynJTy+2AY4y31fGo\nxsZGxo27kyuvHMLLL/+VGTPeID+/M0899VjEtieeeDIDorw6DR16Hb16FSckj1k2b97EZ599mtJz\nCkKm0lDY09RyO+CImHcy4lHffvsNXbt24ze/+W3TssGDr2HIkIFUVOxk2rRn8XiaUVVVySmnnM7P\nP//EqFFjmDx5AkuWLObww7uzbt1aHnrocWbMeJH+/c9i165KFi9eSGVlBevWreXqq4dy8cWX889/\nfkxp6VtkZzejoOAw7rprnKZMe/fuYdy4O6mtraW4+Nim5YH93e4sDjvsCO66axyTJj3JihXLmDnz\nJS666FIeeeQBAOrr67nvvofo0qUgrusiCAci1WPuCLExTctvvT0N0hjDEZ53MuJR69atobBQCVnm\ncrno3v0I1q9fB0CbNm147LEJTet/+mkVixcv5KWXXmPIkKGo6oqI4/700yoee2wCTzwxkdLStwHY\nt28fEyc+x+zZs1m3bg0//bRKU6ZPP/2Y7t2PYNq0lznyyMKm5YH9//KXGU37DxkylGOPPZ5hw25g\nx47tDBt2A889N52LLrqU9957J+7rIggHIjUDBlE1fQb1Rb3wejzUF/WiavoMXefQDpkpjvC8kxOP\nctHQ0BCx1Ov1kpXlaxxTVHR0yLo1a1ZTVNSbrKwsjjiiB/n5nSL279WrGLfbTV7eIezduwfwPQTu\nuecOmjVzs3btanbtqtSUaM2anzn22D4AHHdcn6blgf0Bzf3btWvP5MlP88or09m9uwpFOcroRRAE\nwU/NgEGG3uTtkpniCM87GfGoQw89LMJz9nq9rF79M926dQPA42kWtpeXrKz9HRpdrshujW73/o5h\nXq+Xuro6Jk16ioceepy//e1vFBX10pXJ66Xp+I2Nvi66wftPnfqi5v6vvDKdvn1P5PnnX2LYsBui\nKy4IQkLYJTPFEca7eswd2ssTiEedcEJfNm3axH/+83XTsrfeeoNjjjmWNm3aau7TpUsBqlqO1+tl\nzZrVbNmyOeZ5qqv34na7ad++A5s3b6a8fAX19fWa23brdijl5b4Hyg8/fB+x/y+/bGnaPysrq+nN\nobKyki5dCvB6vXz99RfU1dWZuhaCIBjHLpkpjjDeZuNRRsjKymLSpOf44IMyrr9+KCUl17B27VrG\njBmru0/PnkV07dqNESOu5e233+Sww7qTlRX9ErZtezAnnNCX4cP/wNSpU7n66qE8++wkTQN+/vkX\nsWzZEm699SbWr1+Ly+UK2X/mzJea9j/00MNR1XKefXYil112Bc88M4E77hjNWWedx8KFP/Dtt9/E\nfW0EQdDHLpkpLq837kluDJPITDp5ea3Ztm23leLETW1tLZ9//k8uuOBi9u3bxzXXDOLtt+fg8Rgb\nOrCTLlaQSfpkki6QWfrYTZfwmHcAow6lGX2izaTjiAFLu9C8eXPKy5dTWvoWWVkuhg+/0bDhFgTB\nuWSXlZIzeSLuleU0FPakevhImi+Yv//zrbenvIxeLI9JbrvtznSLIAgZQ4RRHHOH5UYw0XNoZZd4\nViyjavoMwDeA2frmG8iZPDEp8ushxlsQhLSQipQ7K86hl13S6pEHcG/YkNCxE8ERA5aCIGQeqUi5\ns+IcelkkWUGGO95jJ4Ihz1tRlKeA0/zbPwFcCvQBdvg3maCq6kdJkVAQhIwkFSl3VpyjobAnnhXL\nEj6n1cT0vBVFOQPoparqScD5QKAV3j2qqvb3/xPDLQiCKVKRcmfFOfTqTBo7a/cPSlXKoJGwyZfA\n7/x/VwKtALf+5s5g8+ZNnHPO6YwaNaLp35QpE/nf/1ReeWU6QEjBy7///ZnhY8+f/xWPPfZgyLJ/\n/ONDnnzySXbs2K7ZuVAQ7Eqy+ngko/guGefQqzPZ++eHEz52Qni9XsP/CgsLRxQWFr5eWFj4amFh\n4SeFhYVzCwsLZxcWFnaItl9dXb3Xbqxfv947YMCAqNv8/ve/9+7Zs8fr9XpjbhvM3LlzvXfddVfI\nsnfffdc7fvx484IKQjqZNcvr9XVuCP03a5Z1xy8u9no9Ht//Vh03VedIvvy6dtVwtomiKJcB1wPn\nAr8BdqiqulBRlLuBB4FRevtWVFTH/XBJVoL+zp17qa9vjDj2Dz98z3vvvc2pp/Zj4cKFXHddCSef\nfCrl5eXccMONPP74BKZPf57FixfS2NjAFVdcyTnnnM9PP63i0UcfoE2btnTuXEB9fV3IsXfv9k3y\nsHixyn333cUrr7zOVVddzmWXXcH8+V9RW1vLlCnTyM5uwVNPPcamTRupr69n+PAb6dPnBMv1twK7\nFU8kQibpAtbpk/vwo5pGov6Rx6g466KEj89ZF/n+BRMmd8K6GDiHHjHTDOM4tskiHd11RgcszwPG\nAeerqroLCJ7C5QPgL4Yk0eHtuav4rnyr5jq320VDg/kCzRN6HsKVZ/aIW6bzz7+Il19+gaeffpac\nnBxmzXqdxx+fwKJFP/LLL1t4/vmXqK2tpaTk95x+en9effVlSkpGcNpp/Xn66SfQaV8SQkNDA926\nHcbVV/+BP//5Hr7//juqq/fSvn0H7rnnASorK7n11ht57bXZceshCIlglz4e6cAu3QP1iGm8FUVp\nC0wAzlZVdad/2bvAWFVVfwb6A0uTKWSyWLduLaNGjWj6fMIJfend+5io+yxZsohly5Y07ef1NrJ9\n+3bWrPmZXr18+x53XB+++WaBIRmOOeY4APLyOrJ37x6WLVvCokU/snjxQgBqamqoq6ujWbPwDoeC\nkHz0Mi3sOMOM1QU/0dIMHWG8gauADsDbitI0ecFM4C1FUaqBPcCwRIS48sweul5yMl9nu3U7lKlT\nXwxZFujmp0ezZs24+OLLGDo0VOXQdq6NhmUIbyHr8TTjD38o4Zxzzjd8DEFIFk6ZYSYZXrLd3zpi\nZpuoqvqiqqqdg9IC+6uq+pqqqieoqtpPVdWLVFXVjnk4HJdrf9vVQH/toqJezJ//FY2NjdTU1PDM\nM08B4e1c/y/ucxYV9eLrr78AoKJiJ9OnP5+ICoKQEMno6JkMklHwY5fugXpIhWUUjjvueG6++Xoq\nKyspLFS44YY/0Lv3MRx3XB9GjhzGqFE3NM1ac+211zNt2rP86U+jadYs/q4DZ555Ni1b5nDjjSXc\needtIXNZCoKVGE0BrBkwiIp5C9i+aScV8xbYznBDcrzkRNMMkz1VmrSETSGZpAtklj6ZoEtwzNdV\nVETVqNuizsGYSFvTVGLk3uT2O0kzNl9f1IuKecbGn7TILislZ8ok090Do13fNiOGWdISVjxvQcgA\nAsbCs2IZroYGWLKENiNLdL09u0zlZRXJKviJ960jFddXjLcgZABmjYXdB+PMYrfYfCqur7SEFYQM\nwKyxcFIKoFGMzv6eCqJdX6uMrnjegpABmM2MSEVfET2SPZBnB1JxfcV4C0IGYNZYpCvMEB6bD+Rj\nZ5oBT8X1lWyTFJJJukBm6ZMJugRnRriKiqi6ZYxtwggB4skKyYR7E4xMQCwIQgjBMd+8vNbU2NDg\nZdpAaTqRsIkgZDhmY8zJjEnbvWrRSYjxFoQMxmyMOdkx6XQOlGYaYrwFwaEY8ZDN5n8nu7jEbvnY\nTkZi3oLgQIx20TMbY05FTNpO+dhORjxvQXAgRj1kszHmxo6ddJbnx5QpGbHy7LJSKC7O6JzweBHj\nLQgOxKiHbDrGrJeYppuw5iMZsfKmt4slSzI6JzxexHgLggMx6lGbjTFnbdmss3xLVHmSESvPtOZZ\nViPGWxAciBmP2khnvEDIA//kI+HESuWLJ1YeK8wiOeHREeMtCA7EyqyNkJCHzjaxUvnMxtaNhFkk\nJzw6YrwFwaFYNcONXnjCC4YeCtllpbiqdmmu0zP6RkIikhMeHTHegnCAoxuG8HhiPhQCHrR744aQ\n5Q1dCqIafSMhkcDbBcXFjsgJT3W3RDHegmAD0tkmNZHwhK7X3vbgqEbWzIArixbZev5MSE+3RDHe\ngpBm0t0mNZHwRLyDipkWEklHZowYb0FIM+lOiUtk8DNerz3TyuTTkRkj5fGCkGbskBIXb8l69Zg7\nNGdJN+JB26FMPruslJzJE/fPDj/mjrhkCp/2bEvbjjx50VhW5ffgjE9Vhp6nWCk2IJ63IKQdXS/V\n7bZ9NaEZD9pu059ZGa6qHnMH1c1bMvGCMVxy+/vccP10VuX3AGDzjr1Wiw6I5y0IaUfPe3XV1Gg2\nm7IbRjxoo420Ukm0cJVRmRobvXz0nzWUqe1g1KyQda33VXH/+4/R5cG7qOH4RMWNQIy3IKSZmgGD\nqAJaj74JV01NxHozxsSuWGEorSaRcNX35VuZ9v5SzXW3fTyZM1bMayp4qk+SjmK8BcEG1AwYROub\nb9Bclwnl4HaI64cTHqcOXq7F2i27efbdxVTsjnzAXnTSoVx/dV+a1dVGrEuWjhLzFgSbkMnl4PGU\nzwfi4xQXG45Dm4mrG0lXrNxTw4RZP1Iyfi4PvfpdiOE+vjCPZ289jRl3n8nAfkfg6nGk5vGSdf/E\n8xYEm5BI5kYysSIjw4xu4fFxliwxFB83G1cPhKtypkzar9utt7P74gG8/U+VuT9sjNinU/scbhnQ\nm84dWiWkoxW4vF5vzI0URXkKOA2fsX8C+A54HXADm4GhqqpGvkv42bZtd+yT6JCX15ptNpwFOx4y\nSRfILH3sokt2WWmEMYknXmqVPhGG1E88OdlGdcvtd5JmOKO+qBcV8xboHj/e/QC8Xi9zf9jIG/9a\nGbHOBYy58hh6d28f9RhgTEcz9yYvr7VuJ/WYxltRlDOAsaqqXqgoSnvgR+Bz4B+qqr6jKMrjwHpV\nVf+idwwx3j4ySRfILH0ySRewTp9EDGK8dOiUi0ujNa3X42H7pp26bwKx9tNi2eqdPPP2Iho17OCQ\ns4/k7D4FuFwxZqIwiVXG20jY5EvgW//flUAroD9wo3/Zh8CfAF3jLQiCMzE60GhVsQtEH0iMFhox\nOgC5ecdeppUtZeP2yPzrM47rwpVn9iC7mTsu2VOJobBJAEVRRuALn5ynquoh/mVHAK+rqnqy3n71\n9Q1ej8f+F0MQ4mb2bHj8cVi+HIqK4N57YfDgdEuVON26wfr1kcuLi2HRIt/fs2fDkCGR28yaFd81\niHa8xx+HJUu05bnnHt39dl96BVPfWciCxZEzBRX36MDtVx9P+7YtzcuafOIPmwRQFOUy4F7gXOB/\nQca7B/DXaMZbwiY+MkkXyCx9EtHFyriwVSR6b7LLSmn10AO4N23QXB+sWzJCK8GxY1dREVW3jDEU\nGgner0Yp4m9D7mHOzkij3Pag5oweWMzhndrEJV8ipDJsgqIo5wHjgPNVVd2lKMoeRVFaqqq6D+gC\nbDIkiSBkIHYsQEkEvYdRgIYuBSF6JSOHO7hqMy+vNTV+YxcrNFIzYBDzCk/lpQ+X+1aEhbpvvOxo\nfntUx7jlshMx87wVRWkLTAAuVlU1cCk+Awb6/x4IfJIc8QTBHOnon5HqApRk66j3MAqQ9UvoZMSp\nzE/Xy81eNGIso6d8Rcn4ufsNt5/LTz2cl+7sz4y7z8wYww3GPO+rgA7A24rS1BnrWuBlRVFGAmuB\n15IjniAYJ139M8xW6iVCKnSM9dAJ1yuV+c3Budk7N23j6cvvYUW7w2AdQF3Tdn2LOjL03EJyWjSz\nXAa7ENN4q6r6IvCixqpzrBdHEOInXeEL3cZSlZV06JSbcPZFMGZ1jCcLRO9hFCDcKOsVuwSfx6wc\nwdtTVET2qNt8oZTaBl5tcTRfnv9oxD7dOh7ETZf3omNuTlT9MgVT2SbxIgOWPjJJF7CfPvHk+Qaw\nYoAvYLwa8/Nxb4gc6LNiANOojnl5ral6cWZcA6l6Me+GggL23v9wXMU5ZuQI374RF3P6XMqMfsMi\ntm3uyeLW3x3DUYfmmpIpnaR0wFIQnEAqwxfhBA+w5fY7CYg03la8AZjRMd43ESOetBnMyhHY/tvD\nf8MjA+7T3PcP5yv0O6az5QU0TkKMt5AxJBR7nT2b3IcftaTIJJkDmGZ0TEQOK2e5MSPHhm17uO/E\nW9h0QeeIdZf9+CEXvjGRZh7ppwdivIUMIl6PMbusFEaWNP0YEh0ETOYbgBkdY8lhZVVkNGLJUVVd\ny8yPVrDopx2+Fbn7DXef1f/HrZ8+S271Ll/euBjuJiTmnUIySRfIHH2sLjJJV9FOsDF2FRVR3fdk\ncl6erikHkDIZta5HXZaHVx56nY8qIgto8po1cP8rd3D49jVJly0dSMxbECzC6jCH1TFjI2i1Uc1Z\nsoTq4SNpvmB+hBy+uHwkycjMCVyPllMm8W9PZ547+2bfiorQ7UZd0ZvjC/N8+nR/gHqNCkthP+J5\np5BM0gXSp4/Vr/vxeN6pCjkYxawORrJWrNJx5fpKJr+ziF9rI883qP8RnP/bbmRl6Q88Hsi/G/G8\nhYwhGUUqZgc67TiZrtm3ByPx8ER03Fa5jxfmLGX15kgjdUrvfK4+u5CW2WJ+EkGunuAoklGIUzNg\nELRpSf0jjxkKc9ixl4nZQdJYD6x4dNxXU0/pFz+xceseVm7YFbLuiC5tGHnJ0XQ42Jad+xyJGG/B\nUSQtDW/wYCrOuii9MiSA2beHWHF5ozo2Nnr5xzdree/LnyO2zcn2MHpQMYVdDzapjTZ2C1WlGzHe\ngqNIZyGOnWQIJ9wYGxnki5bLHUvH78u3Mu39pZr73n7lMRx9eDtLC2jsGKpKN5I0KTgKIzN+Hwgy\naFEzYBAV8xb4BhwXLYqrjD3QrdC1a1fE+p/yDmf4xQ9RMn5uhOG+6KRDeXGsr3Nfr+7tLa98jBbG\n0SMdHSZTiXjegqOwOg0v8CrOynJyDb6KpyMVMFjWZIQNwj3bwCQM23scxZRjB7Ow2zG+FfX79zm+\nMI/rLujJQS2T37nPbKjqQPDUJVUwhWSSLmAffeI1aukopkmVrGbvTXCqYa27GTNPv5a/H3dxxHad\n2udwy4DedO7QyvCxEyUvrzX1Rb1MpUKmY+Jko0iqoCCQmIeV6qwRO8uaVb6cj465gBfOGqm5fszv\njqH4iPYJnydezA7I2nFQ2Wok5i04mnhioQES+YHHE09Nl6zRWLZmJzeM/5zLbiuLMNw3/Ptl5kwa\nQNkn96XVcIM/VDV9BvVFvfB6PNQX9Yr6hpTK2X3ShXjeQtJIRWpXIkYt3qyReD3odMiqxZad1Uwr\nW8KGbXv9S/a/mV+w8GNKvpxJi/rapmUum3irZjodpnJ2n3QhxltICqkaMErEqMX7A483hJEsWbUe\nkrRpGdLidtsf/8SLHoXvy7dGHKP3+qXc8fEk2u/RnrDCid5qugaVU4kYbyEppCqenIiHFfwD96ws\np97gDzxeD9oqWYONEaD5kARwubJ446SreOvEq2AVwH7D3aZVc0YPLKZ75zbk9huHR8dwG5XPjljZ\nk9yOiPEWkkKqBowS9bACP/C8vNZUGMwAiNeDtkrWYLS6A87reToTL9Q2uCMvPZq+RaEzqOs9VOKd\n9kxIDWK8haSQyirEVHtYiXrQVsoaeBiq+UfyyOXj2JUTWYp+9YJZXPnfd8hyZ7H97kgP+0AIMWQi\nYryFpJDJA0Z2MXY7q37l6aFPs7zd4RHrTiv/ips/f4GDavY2LasvLNI9VqaHGOLB7r1UxHgLSSFg\n4Fo98gBZ/pnUGzsXpFcoC0mXsaupbWDW5//jy0WbfAuCDPfhW1dz99+fonPlZs19M+HBmSqcUKEp\ned5CUnFv2IALXzKae9MG2owsMdxjItN7Uxil0evl02/XUTJ+LjdN+mK/4QY87izuKaii7JP7mDJ7\nLId0bu+b5mzWrKg50em8tk64r3oD7q0efsA2skt5fArJJF0gtj6JlCinunTdjvdm0artTCldrLlu\n6HkK/Y/trNsAKpo+6ZpjE6DVvWN159XUO3c67o3eTENamL1uUh4v2J5EMk7sOOFBKti4bQ9T31vC\nLxX7Itadn/srA64/j2Yed0LnSNe1zS4r1TTcqTi3WfQG3LVIl+xivIWkkUjGiRWphnYfcAqwu7qW\nmf8oZ+Gq7RHrjl/zA2M+eZbc6koAqg5J3Du2Oo3T6HXWe2gkcu5koTfgrkW6ZBfjLeiSqPFLJOMk\n0VRDuw841Tc08t6XP/PJf9dFrDtk11bGffAE3betjlhnhZdnZRqnmesczcjZrYpTK6PItasS98YN\nEdumS3Yx3oImVhi/eFPqsstKcVVFTgYAxjMm7Bh28Xq9fL14MzM/1jZid/3wBqfOeyfqMdwry9P6\nUA3HzHWOFoqwYyZMeEaR3lhBumQ3NGCpKEovYA7wjKqqUxVFeRXoA+zwbzJBVdWP9PaXAUsfTtLF\nyGBjMvTR+4E0dClg7wPGq/30Bpy8Ho9vppkwknlvVq6vZPI7i/i1NlKegf26c0HfQ8nKchkaJGso\nKMC9IdL7Cx80i6VPdlmpJXnqZq6zrvEbPpK9j0/QPYedfjdWXLeUDVgqitIKeA74PGzVPaqq/t2Q\nBILjSFc/ZD1Pztv2YFM/El0vz+0mu6w06d73tsp9vDBnGas3V0WsO2PDD1xzxmFkDTTumQZw7a3W\nXG72jcKqPHUzIRi7FDfFS8QbT5plNxI2qQEuBO5KsiyCjUjXJLtWPTT0QgOumpqkxb731dTzt3+u\n5D/LtkSsUzarjP1oIh2r/M2h3oaqrFAZjAySZVVoN5BK16BZ7Smnan5Pos1a7wRjHW6oa085NSRT\nxg5jKDGLdFRVrVdVNTJvCUYpijJXUZTZiqJ0SIJsgkmsLH5I1yS7VjXRDzTv92Zna643MgGCERob\nvfx9wRpKxs/llme+DDHcLbM93H3N8ZR9PI6nZ92133DryBA+4YDXY3xuyHQMmuml/lUPH+kIA61H\nILzjWbEMV0MDnhXLoqY4pgvDRTqKojwIbPfHvM8CdqiqulBRlLuBAlVVR+ntW1/f4PUkmJsqxGD2\nbBgyJHJ5VhYcfTTcey8MHmz+mE88AcuXQ1ER3HOP+WOYRU+PWbPiO7fHA1pxZLfbp1NAN5PX5z9L\nNvH4q99prrv1quM464Su+wto9GTweKCuTvsEetdBj3ivTyIUF8OSJdrLFy1KrSxWoqeXFtHuoTXo\nxrzjMt5hy4uAv6iq2k9vXxmw9JFMXfQGGIOxuoIuWfpYNZgGxq5LgFjXZ+2W3Ux9bzE7qmoi1g34\n6Qsuv6A3DVeE7p9dVkrr0TfhqoncJ1qlaTS5GwoK8LY5OOr1ScXvxuygcLyk2gaYqa6MZ0LjtFZY\nKoryLjBWVdWfgf7A0niOI1iHoapFm1Wx6WFlXNRMsYXW9dm1p4aX/76cZWsqIrbvu+q//PGfU2n7\nq/+HOAeqXPtjoHrZFU2yRQlBRbufdumxna5xkWRjproynSmOMWPeiqL0URRlHnAdcKv/7zeBtxRF\n+QK4CHgoiTIKBkikatEOJKtZkdbEtV6dfiCB61NX38Cbn62kZPxcbps6P8Rw57fL4ZHrf0vZx+O4\n74Mn9htuP8ExUN3MmewWMb183dh/lwJbGG5I37hIstHVa/hIwxMgp4KYnreqqv+Hz7sO513LpRHi\nxoiHmQyPyIoS9GRXQ4Z78lohCS/w0VlDmT5+ruYxxvyumOIj9o/LG8mK0X1Y1vzaZNj19NO7n3sf\neFj7mGnA6al/ekTTa2/MvVOHdBVMIcnWpSlWXL4cV2NjxHrLY96ff6Q5qGb2PIl0H4yH4IfFoq7F\nPDTgPuo8zSO2G3zWkZz9mwKyNDx1IzInOg6RSOxffjf2xaqYtxjvFJJKXawc9NMj76xTNEflzRrd\nVA18BfhlZzV/ee1r1tVEvnj2P7YzV515JNnNo2dHGWmrGivmDcl7QMnvxr5IS1ghKikphli+XHOx\n2dh6Kga+qn+t46+fqny7IpBrvf+rr3Q9mLuv+y3eunrDxzMSMgjZZvlSzZwvO49DCPZGjLfDsFWb\n06IiTc/brNFN1nyXDY2NfDh/DR/MXxOxrk2r5oweWEz3zm0A6HBwS9PenZEHZGAbvRCK0zMzhPQh\nxttB2K7N6b33asa8zRpdqwe+fnjtA6ZuPkhz3YhLizixKD+u4yZCpk3IHM2JsJWDkcGI8XYQVrY5\nteQHNngwVVX7LDG6iYZ5ft5UxbPvLqZqby0QargH/2c2F157LvVXpM+AZFJmRjQnArCXg5HByIBl\nCklUF6sG9qyaw9DKexPPw6Ridw0vfrAMdX1lxLrTyr/i5s9f4KAaX3JXrIHBTPqeQXqqeeuLeoHX\nGzMLx+y9PpDvjQxYZghWDezZbaICM+GgmroG3vr8f8xbuIlwDtu2mrs/nECXysh1MjBoHVFz3HWc\nwcA+tgv9OZiYFZaCfbCqok33x7d8qaXVjUaJ9jABaPR6+ed36ykZP5ebJn4RYrg99XU8POdRPpx0\nOc+9fpum4Qb7Dgwmq7I0mef24WsJAAAY1ElEQVSK1vkxVlfIWPdaMI543g7CqripngfvIj2ekN7D\nZGFNSx7SqXi86bMXuGDxJ/ot18Kwy8BgcMigMb9TyJyIybz2Vnq8sQZfo61L1yQfySRdA7TieTuM\nmgGDqJi3gO2bdlIxb0HEHHtGPCs9Dz6YVHpCwd7aunYF3HjdVC65/X0eumxcyHZn9ylg+p/6Ufbx\nOC40YLi9kPQeFGa82fA+0VqT2UJyrr2VHq9Wv5jANY62Dqzr126EVLzVaPX+bjOyJCVvrzJgmUKs\n0EXvKW92ELKpAlOneMTIIKhV96a29F1mfrKS//boG7Hu6MNyuf7iIg4+aP+kCkZbdpox2vHoYvaa\nG21Pa0Vlabg+qa5iDafpe1u+HJeGzYl2r1Jxb+IlntYOVg1YiuftIKI95c16VgEPvuGoozXXW+EJ\nRfN86hsaeWfeKkrGz+XGVbkhhrvDvkoeO3QXM+4+kzsGHxdiuKPJ5s1ukdKOb2avudHQQDK80FR6\nvOGEfG+DDLc3y520e5Wq2Ho6w0AS83YQ0b6Q8X6JklU8ohVjbT2yhC92NefFLdoFNDdf3ovf9Dwk\n5rH1ZN797LSmt5CcyRNpffMNSY1Bmr3mRvtEJyM+H899tiqWq/e9pbFBNzslUVJlVNPZ01yMt4OI\n9oWM90uUrOKR4B/s8s49efjy+9nbohWEzc17xendufDEQ8nKMjr0GF3mVKaimb3mega0oUsBWb9s\nsVXhjpXXUe97m8wB8lQZ1XRWzorxdhDRvpDVt96u+SWqPfmUmMdNRhOr7Zt38PSQJ1E7KRHrTjo6\nn9+fW0jL7Pi/fnoypzKH3ewPN51Vlmavi5XX0cgbh5X3J7usFNeuXZrrrDaq6bynMmCZQhLVJdYg\nTKt7x2rOcp2s+G+4Pvtq6nnzXyuZv3RLxLZHblnJnR9NpOOuX2g46uimjBerU6ziHZiL996kovVu\nPCQ6YGnFAGdT2EVdodlf3uhxzdwbvd9IQ0GBbaaPkwHLDMJoSlOsNKzm87/W3K/VIw8kTfbGRi8f\n/WcNJePncsszX4YY7pY11Tzx1r18OOlyJr15J/m7fgl5VU5GilWqB+aCUzerb72dnMkT6dCxLR26\n5tEh/+CUFD0Z+f6YvS6618vtNp8WGWS49bw4q+6P7tRzbQ6OabhTWTBlBeJ5J5ngQR9XURFVo26L\nyM22KqUpWgqd1d73Dyu3MfW9yHawAMMu6MmpxZ1o8f67UdMRtfBCk2cej7zxXs9kvRUZPb/l5501\ni21nXRRzu2ippEYmbm7oUsDeByI9Wt0WuAUFuDdE5rdblSoY7xtDqlILQTxvRxCe2seSJRHepZUp\nTdG8FytSpNb9spux0xZQMn5uhOE+v283XhzbnzeUnVw6eiB5nduRM3miL8bojj4rTTDBnnk8nk+s\nt5NkoZtREVifpKIn3fM+8UTIR7PXJXx7b3a25nbujRs075XeIGXWli1JvT/xvnk5sWxfPO8kYiSB\nPx5PQS+FSy/mHet40di1t5aZr8xlcXXkHI+/Lcrn6rN70CaneZNcmvHGzgW4N2lXE0YjWVOEaZGs\njo8BklUME+289Qm8wUScp2NbzeKapnOF3Ssr5yW1IuYd6wGRyiIm8bwdgJFcU7Oegl6hTjTDHe14\nWtTVN/DmZyspGT+X2577OsRwd67YyNTXRvOGspP7r+/bZLghihcY57fMSE6uXeKUsa5vsmLu0Y5r\naal288iHdzDh98qqJmpmiffNK51FTPEixjuJGPlCmP2S6xnIlq+/GlWWWD8ar9fLv3/cSMn4uYx8\n+gs++z7UU76/zNe5b/rMWzh0xzrN10mzr8pNy3RkivXDSWdfiXBi9YtJltFKWZ+aurqoq8PvVbrC\nV4Fz6/X/0SNdD5tEkDzvJGIkD9hsnqiuN1pTo7nYC+yO8qNZsbaCye8soq4+MpXrqjN7MOSyY3E3\nRE7MqyVHtDz04Lzs8LDPvuEjNd8awn844fu5qrRzedPRlzzkPpYvh2bNoa6Ohp5HJTV9MFWTHDco\nR0XN1dYycimZBNsinDjTkcS8TZJdVkqrhx4gKxDD9TSDxgYalKM044vBecCuoiKqbhmT0BdCL5bo\ndbk0Y5JaMcZfdlYz7f2lrN+6J2L704/pzJCzjiS7uTvq+eqLeuFZtiTk3hiJN+ptUz18JM0XzNf9\n4RjNfoD44pR2+57Fg5Vx5nB0xzN0sk2sJBPuTTAyk04a0PwC1/teJ/XKfIO9j7y81tQk+CXU8+b1\nBpMCHlH1r3X89VOVb1dsjdimsKAtIy49mnZtWhg/X2UleDzkBg2YGvFe9MI+zRfMj2pgYmVzBGPn\nOGUySWapthM900xHPG8TGGnpaVUryGgEe/O43bg0Qibe7BZUTpnGe4f04f2vV0esb53TjNEDiyn6\n9l9NVXA0awa1tTT0LIqcDdx/vsb8fNN5usHEO6pvtA2sGVmCsdP3LBFa3TuWlq+/iqumBm92NvuG\nXsfexyekW6yEyJR7E0CyTdKAkdihu3xF0rMfAgMyu59/UTPW/VXhKVx6y2z+sLJdhOG+4ZIiZtx9\nJlNGn0bRt/8KqYJz1dTg8k8gGzzwFzwA5G3dVlOmVg8/YEjveEf1dffrUpC0QTG7ZLIYJbuslJyX\npzc9zF01NeS8PN32cgvxIZ63CYw20w8nYFCsnm09+BV5ZccePHrZvVQc1C5i20tOPoxLTjkMjzv0\nWR1LH623iEQ94HjzcM3uF/BAqakBAx5o+L1JZcWdVSQz5p1O7GQDrMAqz1uMtwnMDJoFE/jxWKlL\nbr+T2LV+M5POH8PibsUR6/u2ruH3159NqxbNdI8RT2GJmQeYntGIt5mT0f30ct6rh4/UNeDh98aJ\nhjDds+UkCzvZACtIqfFWFKUXMAd4RlXVqYqidAVeB9zAZmCoqqrauWpkjvEGf7bJww+QFZh/sFkz\naGiksXMnsjZsiDqlmBW61NQ18PbcVfz7x40R6w7btoY7//4Ubcc/asgYxuN5Jzvrwwo6dM3THQfY\nvj50wDaQfuhZWU590OCrEw2hEx84RrCbDUiUlMW8FUVpBTwHfB60+GHgeVVVTwNWAebdUYdSM2AQ\nO39czvatVb5/G3ew+y8v4dYx3ACNHfMTOqfX6+Vf36+nZPxcbpr4RYjh9jTU8XDpn/lw0uU89/oY\nOudk+TrbGYjTxlNYElx8gT/O3NClQHP/tGV96OS8U/NryMfgIh/CinycWHHnxEITIX6MpArWABcC\ndwUt6w/c6P/7Q+BPwF8slcxBxExjMz5JTAhLft7B5LcXaVYgXnfIXq64+5qIQ7s3bgD/W0GsWUpC\nC0v82SZ1/myTKKGMQFpgXl5rKrbt1s/dTpfRyM7WNuDZoamQ0ZoRpXOGlHgJvp9NbxKSzpexxDTe\nqqrWA/WKEjIjSqugMMlWoFMSZHMMsbJQsrZETk6gRXZZKTte+isTjh7I+vZdI9afe/CvlLz1BDnl\nS5sqE4MLW1y7Kn3GO4xoFYfRquCCKxob8zuBF7J+2dzUDIsRw5qOYacc4H1Dr9OMee8bem3I52i9\nZ+ymk1HCH6xCBuP1eg39KywsfLCwsHCU/++tQct7FBYWLoi2b11dvde2zJrl9fbu7fW63b7/Z82K\nvlyL3r29Xt9Uqtr/ioujirBrT433sYff9V58+/sR/+674kHvjla5+scOlsvt1t7G44nvukTTKfzc\nduOPf/R6W7Twydmihe9zOHr3Lcb9EoQUomtX462w3KMoSktVVfcBXYBN0TauqKiO8zTJHayIeN1f\nsgSGDKH683mhnpt/eVXVPu3shr4nk7NEe2ICwFcSv213iC71DY3M+Xo1H/1nrX+r/T2vO+zezrg5\nj9Nj688xdah/5DEq/E33c/V6i+Tns9PkNcx9+NHYX4677gpp+G8r7n/M9y+YsGuQPeo27XRA//1y\nMpk0yJdJuoDpAUvddfEW6XwGDPT/PRD4JM7jJJ1ohRZmO/TpdWfTm37Mm90iJC/Y6/Uyf8lmSsbP\nZcSEeUGG28fdHz7Jh5MuZ+ZLww0ZbgD38qVNOukNWLk3bDBdqGGomdG6daaOaTe0Bl/tnMctCMHE\nTBVUFKUPMBE4DKgDNgLXAK8CLYC1wDBVVXV7RqYrVTBaoQVA65El2ql9aI8xxjtZ66oNu5hSuoi9\nv0Z257vi9O5ceOKhtD/j5LgKgAIEjE67Y4s0Jz4wmy5mNJ9729YqU3LalQPZu7M7maQLpLAxlaqq\n/4cvuySccwydPY3oedatHn5Ac2CvCZ1shWjpY+GGbmvrPJ4adD/q+LkR2590dEd+f65Cy+z9l18v\nu8EogUHJrF82a6432xbUkDzdupk6piAI1pHRXQV1JweIZrjRz1bQSxMLGLp9zVrw4hnD+azX2RHb\nHJrfmnHD+uLxRvbNhsiMjcb8fH92x5amTAfQf1sI6Bqtp7YZagYMovq7/0adnYcnnzR1TEEQrCOj\ny+N1e1+jExYBcLlo6FlE7SmnRu0vHaDR6+XT/67jnXk/RazLbu5mzKBilG65CesSS6dAWEQvVOTN\nytLtOR6NppJ0jTzwNiOGZczr7IH8am53MkkXkH7ehqg95VRNQ9fYpUAzbOIC8HfV86xYFnXw6seV\n23juPe0Mk+su6MlpxZ1wuWJX5+hNJqy7XflyzeMEPPPwGV1cjT5P39XYGLNoR08WGcATBPuRsZ53\ntBlb6k/oayi+HD7It37rHp57dzHbd/0ase15v+3KFacfQTOPfgJPvJ3r9L1pd9Rptsz0uoini14m\neUSZpAtklj6ZpAuI5x2TaDO27H18AlXEHrh0ryynam8tr3y0giU/74hYX3xEe0ouPIo2raLPrB3w\naFlZHjLzTLTybCOzzzT0PCpqBomR2etjnSMd80EKghCbjDXesQxXk/HUMN51bg+vnTqUOX0ug+dC\nc7gPObglo67oTcEhBxmSI9yjDQ5dGDWuZoxwMGYGL+M9hyAI6SFjjbcRwxVsmLzAp73P5flzbtY8\n3uiBxRx7ZAfTcuh5tK1H3wQ6vbTDjWu8GSRmmitZlaUiCEJqyFjjrWe4ak8+penvhsKerNjt4sEB\n91PbLDti2yvP6MG5v+1KloGBRz30PFetftMBwo1rvB3uzDRXcmIXPUE4kMlY462Xp5zz8nQ2HnMS\nk6sLWHfBYxH7nbPkXwy+og+ugdbEefU82nC8QENRL03jmkiHO6PZIk7toicIByoZa7whtOfI3uY5\nTDv7Rr7seTqsBtjTtK5nxVrGlj1Bu84dLDdYhisnPZ6og4+pSNmTtEBBcA4Zbbz530pmnXglb558\ndcSqg9yNjL76BHp08c+G/uQwKgwe1mhuNkQ2yPe63ZohE4ktC4Jghow03t+u+IUX5iyD0e9ErLv9\n42c4Y8UXAFT1MN9BLlr2iJGZZ3a/OFNiy4IgJEzGGO/Vm6t49t3F7NpTG7Huym/eZsg3b+FpDM3u\niCeHOdF8aIktC4JgBY423tXvvMsL3+5kafsjItadcFAtQ68/i/afzKH1pDd1mznFmuor3KhakQ8t\nsWVBEBLFcca7tq6Bt+au8s+gngvtc5vWddu+lrs/fIquFb7Z1au6zGgqxtHscZKfHxLCCK621AuH\nSD60IAh2wBHG2+v1MveHjbzxr5UR67IaG/hz2SMcv3ZhxLpAKEOvQRXa3Vk1jxFA8qEFQbADtjbe\njV4vY5/9kvK1kXkgI/79Mhf/+HfNcEiAprCIVm/u4SNpOfPlmDKEh0MkZi0Igh2wtfGu3F0TYrjP\nPL4LV57Rg+bN3OR+PC6q4QZfKCNagyojBTRa4RCJWQuCkG5sbbzbtWnB82PP4NfqWnJbh5av64ZC\ngqi+9XZa33i95jr3imXsfuGVmAU0Eg4RBMGOxDt7fMrolt8mwnDrhUIa27XDGz4LeHPtdq0ufx/z\nqukz0Gs27s3KEg9bEARbYmvPWw+9UEhjfmcqyteELqzTndSenCmTqJi3QDcbpaFnUSJiCoIgJA3b\ne95amMm1blCOinmc6jF3aK6XkIkgCHbFkcZbL6daa7meYQ7evmbAIKqmz6C+qFdk2EUQBMGGODJs\nYjbXujG3HVkVO6NuLxkkgiA4CUd63kY95UATqXDD3dClQDxrQRAcjSM9b9hfsp4zeSJudUXTIKaR\niXu9bQ8Wwy0IgqNxrPHWa81a/d1/aT7/a99gpM4ckTKpriAITseRxju7rNQ3ga8GWvnf4UgTKUEQ\nnI7jjHere8caMtDRkBRAQRCcjqMGLPUqK2PhBUkBFAQho4jL81YUpT/wDhAoS1yiquofrRJKD70B\nyFg0FPWKOrmvIAiC00gkbPKFqqopdWGjDTR6QbfLoIRJBEHINBwVNok60Ohppr1PQYGESQRByDhc\nXq9eTz19/GGTacAqoB3wkKqq/9Lbvr6+wevxuOOVcT+zZ8OQIeb2mTULBg9O/NyCIAipR3fagniN\ndxfgVOBtoDvwb6CHqqqRU7cD27btNn8SP3l5rdm2bXfT5+yyUt8sNsuXamrlzW4BDfW2nOEmXBen\nk0n6ZJIukFn6ZJIuYE6fvLzWusY7rpi3qqobgbf8H39SFGUL0AVYHc/xzBDoQdKhU652EU5DPds3\nRfYxEQRByCTiinkrinKNoih/8v+dD3QENlopWCzMdBYUBEHINOIdsPwA6KcoylfAHOAmvZBJspAe\n3IIgHMjEGzbZDVxisSymkFncBUE4kHFceXww0oNbEIQDFUfleQuCIAg+xHgLgiA4EDHegiAIDkSM\ntyAIggNxjPHOLislt99JdOiUS26/k8guK023SIIgCGnDEdkmelOeVYFkmwiCcEDiCM9br493zpRJ\nKZZEEATBHjjCeOv18ZaJhAVBOFBxhPGWPiaCIAihOMJ4Sx8TQRCEUBxhvGsGDKJq+gzqi3rJRMKC\nIAg4JNsEpI+JIAhCMI7wvAVBEIRQxHgLgiA4EDHegiAIDkSMtyAIggMR4y0IguBAbG28s8tKobhY\nmlEJgiCEYdtUweBmVC6kGZUgCEIwtvW8pRmVIAiCPrY13tKMShAEQR/bGm9pRiUIgqCPbY23NKMS\nBEHQx7bGO9CMiuJiaUYlCIIQhm2zTcCfVTJiGNu37U63KIIgCLbCtp63IAiCoI8Yb0EQBAcixlsQ\nBMGBiPEWBEFwIGK8BUEQHIjL6/WmWwZBEATBJOJ5C4IgOBAx3oIgCA5EjLcgCIIDEeMtCILgQMR4\nC4IgOBAx3oIgCA5EjLcgCIIDsXVXQUVRngFOBLzAraqqfpdmkQyjKEp/4B1gmX/REuAp4HXADWwG\nhqqqWpMWAQ2iKEovYA7wjKqqUxVF6YqGDoqiXAOMARqBF1VVfSVtQkdBQ59XgT7ADv8mE1RV/cgJ\n+iiK8hRwGr7f8RPAdzj03mjocinOvS85wKtAR6AF8AiwCIvvjW09b0VR+gFHqqp6EnA98GyaRYqH\nL1RV7e//90fgYeB5VVVPA1YBJekVLzqKorQCngM+D1ocoYN/uweAs4H+wG2KorRLsbgx0dEH4J6g\n+/SRE/RRFOUMoJf/93E+MBmH3hsdXcCB98XPJcD3qqr2A64EJpGEe2Nb4w2cBbwPoKrqCiBXUZQ2\n6RUpYfoDH/j//hDfTbMzNcCFwKagZf2J1KEv8J2qqrtUVd0HzAdOSaGcRtHSRwsn6PMl8Dv/35VA\nK5x7b7R0cWts5wRdUFX1LVVVn/J/7ApsIAn3xs5hk3zg/4I+b/Mvq0qPOHFRpCjKB0A74CGgVVCY\nZCvQKW2SGUBV1XqgXlGU4MVaOuTjuz+ELbcVOvoAjFIU5XZ8co/CAfqoqtoA7PV/vB74B3CeE++N\nji4NOPC+BKMoygKgALgY+Mzqe2NnzzscV7oFMMn/8Bnsy4BrgVcIfVg6TR8t9HRwkm6vA3erqnom\nsBB4UGMb2+qjKMpl+AzeqLBVjrs3Ybo4+r4AqKp6Mr7Y/d8IldWSe2Nn470J35MpQGd8gX5HoKrq\nRv/rk1dV1Z+ALfhCPy39m3Qh9uu7HdmjoUP4vXKMbqqqfq6q6kL/xw+A3jhEH0VRzgPGAReoqroL\nB9+bcF0cfl/6+Af28evgAXZbfW/sbLz/CQwCUBTleGCTqqqOmcxSUZRrFEX5k//vfHwjzzOBgf5N\nBgKfpEm8RPiMSB3+C5ygKMrBiqIchC9u91Wa5DOFoijvKorS3f+xP7AUB+ijKEpbYAJwsaqqO/2L\nHXlvtHRx6n3xczpwB4CiKB2Bg0jCvbF1S1hFUcbjuxCNwC2qqi5Ks0iGURSlNfAmcDDQHF8I5Ufg\nr/jSh9YCw1RVrUubkDFQFKUPMBE4DKgDNgLX4EuDCtFBUZRBwFh8aZ3Pqar6RjpkjoaOPs8BdwPV\nwB58+my1uz6KoozAF0pYGbT4WuBlHHZvdHSZiS984qj7AuD3sF/BN1jZEt9v/3s0fvuJ6GNr4y0I\ngiBoY+ewiSAIgqCDGG9BEAQHIsZbEATBgYjxFgRBcCBivAVBEByIGG9BEAQHIsZbEATBgfw/MqwZ\nMyKuHnEAAAAASUVORK5CYII=\n", 302 | "text/plain": [ 303 | "" 304 | ] 305 | }, 306 | "metadata": { 307 | "tags": [] 308 | } 309 | } 310 | ] 311 | }, 312 | { 313 | "metadata": { 314 | "id": "IWFE0DscMWJC", 315 | "colab_type": "text" 316 | }, 317 | "cell_type": "markdown", 318 | "source": [ 319 | "### Multiple Linear Regression\n", 320 | "Let's use three features as input vector : TV,Radio,Newspaper" 321 | ] 322 | }, 323 | { 324 | "metadata": { 325 | "id": "5hqjoRy0MWJC", 326 | "colab_type": "code", 327 | "colab": { 328 | "autoexec": { 329 | "startup": false, 330 | "wait_interval": 0 331 | }, 332 | "base_uri": "https://localhost:8080/", 333 | "height": 17 334 | }, 335 | "outputId": "682b3a93-b13e-4596-bf4d-c440ac2a9bc6", 336 | "executionInfo": { 337 | "status": "ok", 338 | "timestamp": 1528969115028, 339 | "user_tz": -120, 340 | "elapsed": 1040, 341 | "user": { 342 | "displayName": "", 343 | "photoUrl": "", 344 | "userId": "" 345 | } 346 | } 347 | }, 348 | "cell_type": "code", 349 | "source": [ 350 | "# Parameters\n", 351 | "learning_rate = 1e-2\n", 352 | "training_epochs = 2000\n", 353 | "display_step = 200" 354 | ], 355 | "execution_count": 16, 356 | "outputs": [] 357 | }, 358 | { 359 | "metadata": { 360 | "id": "nEOeXulQMWJE", 361 | "colab_type": "code", 362 | "colab": { 363 | "autoexec": { 364 | "startup": false, 365 | "wait_interval": 0 366 | }, 367 | "base_uri": "https://localhost:8080/", 368 | "height": 51 369 | }, 370 | "outputId": "520b9f95-257d-4ad3-9da4-063a8392e93d", 371 | "executionInfo": { 372 | "status": "ok", 373 | "timestamp": 1528969117015, 374 | "user_tz": -120, 375 | "elapsed": 1885, 376 | "user": { 377 | "displayName": "", 378 | "photoUrl": "", 379 | "userId": "" 380 | } 381 | } 382 | }, 383 | "cell_type": "code", 384 | "source": [ 385 | "import numpy as np\n", 386 | "data = pd.read_csv('https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2017/master/dataset/Advertising.csv',index_col=0, delimiter=',')\n", 387 | "train_X = data[['TV','Radio','Newspaper']].values\n", 388 | "train_Y = data.Sales.values \n", 389 | "train_Y = train_Y[:,np.newaxis]\n", 390 | "n_samples = train_X.shape[0]\n", 391 | "print n_samples\n", 392 | "print train_X.shape, train_Y.shape\n" 393 | ], 394 | "execution_count": 17, 395 | "outputs": [ 396 | { 397 | "output_type": "stream", 398 | "text": [ 399 | "200\n", 400 | "(200, 3) (200, 1)\n" 401 | ], 402 | "name": "stdout" 403 | } 404 | ] 405 | }, 406 | { 407 | "metadata": { 408 | "id": "Gp1MDEAJMWJH", 409 | "colab_type": "code", 410 | "colab": { 411 | "autoexec": { 412 | "startup": false, 413 | "wait_interval": 0 414 | }, 415 | "base_uri": "https://localhost:8080/", 416 | "height": 17 417 | }, 418 | "outputId": "c1990625-12d6-4fde-d5a9-4c2b9e3ccb42", 419 | "executionInfo": { 420 | "status": "ok", 421 | "timestamp": 1528969117702, 422 | "user_tz": -120, 423 | "elapsed": 588, 424 | "user": { 425 | "displayName": "", 426 | "photoUrl": "", 427 | "userId": "" 428 | } 429 | } 430 | }, 431 | "cell_type": "code", 432 | "source": [ 433 | "# Define tf Graph Inputs\n", 434 | "X = tf.placeholder(\"float\",[None,3])\n", 435 | "y = tf.placeholder(\"float\",[None,1])\n", 436 | "\n", 437 | "# Create Model variables \n", 438 | "# Set model weights\n", 439 | "W = tf.Variable(tf.zeros([3, 1]),name=\"bias\")\n", 440 | "b = tf.Variable(np.random.randn(), name=\"bias\")\n", 441 | "\n", 442 | "# Construct a linear model\n", 443 | "y_pred = tf.matmul(X, W) + b" 444 | ], 445 | "execution_count": 18, 446 | "outputs": [] 447 | }, 448 | { 449 | "metadata": { 450 | "id": "RHY--sUfMWJJ", 451 | "colab_type": "code", 452 | "colab": { 453 | "autoexec": { 454 | "startup": false, 455 | "wait_interval": 0 456 | }, 457 | "base_uri": "https://localhost:8080/", 458 | "height": 17 459 | }, 460 | "outputId": "8111b393-2e22-42bc-892c-dc4061fd4f15", 461 | "executionInfo": { 462 | "status": "ok", 463 | "timestamp": 1528969118399, 464 | "user_tz": -120, 465 | "elapsed": 597, 466 | "user": { 467 | "displayName": "", 468 | "photoUrl": "", 469 | "userId": "" 470 | } 471 | } 472 | }, 473 | "cell_type": "code", 474 | "source": [ 475 | "# Minimize the squared errors\n", 476 | "cost = tf.reduce_sum(tf.pow(y_pred-y,2))/(2*n_samples) #L2 loss\n", 477 | "\n", 478 | "# Define the optimizer\n", 479 | "optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost) #Gradient descent" 480 | ], 481 | "execution_count": 19, 482 | "outputs": [] 483 | }, 484 | { 485 | "metadata": { 486 | "id": "zUCPPqt7MWJM", 487 | "colab_type": "code", 488 | "colab": { 489 | "autoexec": { 490 | "startup": false, 491 | "wait_interval": 0 492 | }, 493 | "base_uri": "https://localhost:8080/", 494 | "height": 595 495 | }, 496 | "outputId": "6b43f222-383f-406f-e206-4fc995f25b90", 497 | "executionInfo": { 498 | "status": "ok", 499 | "timestamp": 1528969120031, 500 | "user_tz": -120, 501 | "elapsed": 1531, 502 | "user": { 503 | "displayName": "", 504 | "photoUrl": "", 505 | "userId": "" 506 | } 507 | } 508 | }, 509 | "cell_type": "code", 510 | "source": [ 511 | "# Initializing the variables\n", 512 | "init = tf.global_variables_initializer()\n", 513 | "# Launch the graph\n", 514 | "with tf.Session() as sess:\n", 515 | " sess.run(init)\n", 516 | "\n", 517 | " # Fit all training data\n", 518 | " for epoch in range(training_epochs):\n", 519 | " sess.run(optimizer, feed_dict={X: train_X, y: train_Y})\n", 520 | " \n", 521 | " #Display logs per epoch step\n", 522 | " if epoch % display_step == 0:\n", 523 | " print \"Epoch:\", '%04d' % (epoch+1), \"cost=\", \\\n", 524 | " \"{:.9f}\".format(sess.run(cost, feed_dict={X: train_X, y:train_Y})), \\\n", 525 | " \"W=\", sess.run(W), \"b=\", sess.run(b)\n", 526 | "\n", 527 | " print \"Optimization Finished!\"\n", 528 | " print \"cost=\", sess.run(cost, feed_dict={X: train_X, y: train_Y}), \\\n", 529 | " \"W=\", sess.run(W), \"b=\", sess.run(b)\n", 530 | " " 531 | ], 532 | "execution_count": 20, 533 | "outputs": [ 534 | { 535 | "output_type": "stream", 536 | "text": [ 537 | "Epoch: 0001 cost= 86.326835632 W= [[0.01]\n", 538 | " [0.01]\n", 539 | " [0.01]] b= -0.35885254\n", 540 | "Epoch: 0201 cost= 2.007852077 W= [[0.05367266]\n", 541 | " [0.22218329]\n", 542 | " [0.01653813]] b= 0.03463026\n", 543 | "Epoch: 0401 cost= 1.816062450 W= [[0.05232672]\n", 544 | " [0.21644714]\n", 545 | " [0.01354946]] b= 0.52897114\n", 546 | "Epoch: 0601 cost= 1.649169564 W= [[0.05087486]\n", 547 | " [0.21026522]\n", 548 | " [0.01032231]] b= 1.0622686\n", 549 | "Epoch: 0801 cost= 1.530633807 W= [[0.0495164 ]\n", 550 | " [0.20448473]\n", 551 | " [0.00730258]] b= 1.5611866\n", 552 | "Epoch: 1001 cost= 1.458390474 W= [[0.04836035]\n", 553 | " [0.19956744]\n", 554 | " [0.00473274]] b= 1.9857281\n", 555 | "Epoch: 1201 cost= 1.420098186 W= [[0.04745222]\n", 556 | " [0.19570553]\n", 557 | " [0.002714 ]] b= 2.3192055\n", 558 | "Epoch: 1401 cost= 1.402437091 W= [[0.04679121]\n", 559 | " [0.19289485]\n", 560 | " [0.00124457]] b= 2.5619335\n", 561 | "Epoch: 1601 cost= 1.395387888 W= [[0.04634581]\n", 562 | " [0.19100101]\n", 563 | " [0.00025444]] b= 2.7254863\n", 564 | "Epoch: 1801 cost= 1.392974496 W= [[ 0.04606894]\n", 565 | " [ 0.18982378]\n", 566 | " [-0.00036105]] b= 2.827155\n", 567 | "Optimization Finished!\n", 568 | "cost= 1.3922757 W= [[ 0.04591151]\n", 569 | " [ 0.18915443]\n", 570 | " [-0.00071103]] b= 2.8849626\n" 571 | ], 572 | "name": "stdout" 573 | } 574 | ] 575 | }, 576 | { 577 | "metadata": { 578 | "id": "umIU80meMWJO", 579 | "colab_type": "code", 580 | "colab": { 581 | "autoexec": { 582 | "startup": false, 583 | "wait_interval": 0 584 | }, 585 | "base_uri": "https://localhost:8080/", 586 | "height": 17 587 | }, 588 | "outputId": "d28e771b-15f4-45f0-d68c-2d02cae51c83", 589 | "executionInfo": { 590 | "status": "ok", 591 | "timestamp": 1528969121038, 592 | "user_tz": -120, 593 | "elapsed": 945, 594 | "user": { 595 | "displayName": "", 596 | "photoUrl": "", 597 | "userId": "" 598 | } 599 | } 600 | }, 601 | "cell_type": "code", 602 | "source": [ 603 | "" 604 | ], 605 | "execution_count": 20, 606 | "outputs": [] 607 | } 608 | ] 609 | } -------------------------------------------------------------------------------- /3.2_Logistic_Regression.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "3.2%20Logistic%20Regression.ipynb", 7 | "version": "0.3.2", 8 | "views": {}, 9 | "default_view": {}, 10 | "provenance": [] 11 | }, 12 | "kernelspec": { 13 | "display_name": "Python 2", 14 | "language": "python", 15 | "name": "python2" 16 | } 17 | }, 18 | "cells": [ 19 | { 20 | "metadata": { 21 | "id": "4SPV5rlnPqjb", 22 | "colab_type": "text" 23 | }, 24 | "cell_type": "markdown", 25 | "source": [ 26 | "## Logistic regression learning algorithm example using TensorFlow library." 27 | ] 28 | }, 29 | { 30 | "metadata": { 31 | "id": "bDTVXgf1Pqjc", 32 | "colab_type": "text" 33 | }, 34 | "cell_type": "markdown", 35 | "source": [ 36 | "Logistic regression model is one of the simplest classification models. The most basic form deals with classifing a given set of data points into two possible classes, usually labelled as **0** and **1**. The logistic regression model thus predicts an output y in {**0**,**1**}, given an input vector **x**. The probability is modeled using the logistic function $$ g(z)=1/(1+e^{-z})$$Namely, the probability of finding the output **y=1** is given by\n", 37 | "$$ q_{{y=1}}\\ =\\ {\\hat {y}}\\ \\equiv \\ g({\\mathbf {w}}\\cdot {\\mathbf {x}} + b)\\,,$$\n", 38 | "while the probability of finding **y=0** is given by\n", 39 | "$$ q_{{y=0}} = 1 - q_{{y=1}}$$\n", 40 | "\n", 41 | "Weights **w** are usually learned in the training step by using some optimization algorithem like gradient descent.\n", 42 | "\n", 43 | "The typical loss function that one uses in logistic regression is computed by taking the average of all cross-entropies in the sample. For example, suppose we have N samples the loss function is then given by:\n", 44 | "$$L(w)\\frac{1}{N}\\sum_{n=1}^{N}H(p_{n},q_{n})=-{\\frac 1N}\\sum_{{n=1}}^{N}\\ {\\bigg [}y_{n}\\log {\\hat y}_{n}+(1-y_{n})\\log(1-{\\hat y}_{n}){\\bigg ]}$$\n", 45 | "\n", 46 | "In this example we will use MNIST database of handwritten digits provided in the tensorflow package. The corresponding labels in MNIST are numbers between 0 and 9, describing which digit a given image is. In order to deal with this problem we are going to use label representation of \"one-hot vectors\". A one-hot vector representation is a vector which is 0 in most dimensions, and 1 in a single dimension. In this case, the nth digit will be represented as a vector which is 1 in the nth dimensions. For example, 3 would be [0,0,0,1,0,0,0,0,0,0]. \n", 47 | "\n", 48 | "In the case of multiclass the output is given by:\n", 49 | "$$ \\hat{y} = softmax(g(w⋅x + b))$$\n", 50 | "which can be simplified by:\n", 51 | "$$ \\hat{y} = softmax(w⋅x + b)$$ \n", 52 | "and the loss is defined as:\n", 53 | "$$ L(w) = \\frac{1}{N}\\sum_{n=1}^{N}H(p_{n},q_{n})=-\\frac{1}{N}\\sum_{n=1}^{N}y_{n}log(\\hat{y}_{n})$$" 54 | ] 55 | }, 56 | { 57 | "metadata": { 58 | "id": "T5lJYt2PRPXV", 59 | "colab_type": "text" 60 | }, 61 | "cell_type": "markdown", 62 | "source": [ 63 | "
Let's create a Logistic Regression classifier with the classical MNIST Dataset.\n", 64 | "\n", 65 | "MNIST is a simple computer vision dataset. It consists of images of handwritten digits like these:\n", 66 | "![alt text](https://www.tensorflow.org/versions/r1.0/images/MNIST.png)\n", 67 | "\n" 68 | ] 69 | }, 70 | { 71 | "metadata": { 72 | "id": "Vyq3jHU3Pqjc", 73 | "colab_type": "code", 74 | "colab": { 75 | "autoexec": { 76 | "startup": false, 77 | "wait_interval": 0 78 | }, 79 | "base_uri": "https://localhost:8080/", 80 | "height": 411 81 | }, 82 | "outputId": "9df27a5b-713a-4559-f673-3e6955a3e697", 83 | "executionInfo": { 84 | "status": "ok", 85 | "timestamp": 1528878763734, 86 | "user_tz": -120, 87 | "elapsed": 5051, 88 | "user": { 89 | "displayName": "", 90 | "photoUrl": "", 91 | "userId": "" 92 | } 93 | } 94 | }, 95 | "cell_type": "code", 96 | "source": [ 97 | "# Import MINST data\n", 98 | "from tensorflow.examples.tutorials.mnist import input_data\n", 99 | "mnist = input_data.read_data_sets(\"tmp/data/\", one_hot=True)" 100 | ], 101 | "execution_count": 1, 102 | "outputs": [ 103 | { 104 | "output_type": "stream", 105 | "text": [ 106 | "WARNING:tensorflow:From :2: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", 107 | "Instructions for updating:\n", 108 | "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n", 109 | "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n", 110 | "Instructions for updating:\n", 111 | "Please write your own downloading logic.\n", 112 | "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", 113 | "Instructions for updating:\n", 114 | "Please use tf.data to implement this functionality.\n", 115 | "Extracting tmp/data/train-images-idx3-ubyte.gz\n", 116 | "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", 117 | "Instructions for updating:\n", 118 | "Please use tf.data to implement this functionality.\n", 119 | "Extracting tmp/data/train-labels-idx1-ubyte.gz\n", 120 | "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", 121 | "Instructions for updating:\n", 122 | "Please use tf.one_hot on tensors.\n", 123 | "Extracting tmp/data/t10k-images-idx3-ubyte.gz\n", 124 | "Extracting tmp/data/t10k-labels-idx1-ubyte.gz\n", 125 | "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:290: __init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", 126 | "Instructions for updating:\n", 127 | "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n" 128 | ], 129 | "name": "stdout" 130 | } 131 | ] 132 | }, 133 | { 134 | "metadata": { 135 | "id": "zN6hibGQPqjh", 136 | "colab_type": "code", 137 | "colab": { 138 | "autoexec": { 139 | "startup": false, 140 | "wait_interval": 0 141 | }, 142 | "base_uri": "https://localhost:8080/", 143 | "height": 17 144 | }, 145 | "outputId": "53e8ca5f-c2fd-4763-cbed-a84a0d904fb3", 146 | "executionInfo": { 147 | "status": "ok", 148 | "timestamp": 1528878764273, 149 | "user_tz": -120, 150 | "elapsed": 523, 151 | "user": { 152 | "displayName": "", 153 | "photoUrl": "", 154 | "userId": "" 155 | } 156 | } 157 | }, 158 | "cell_type": "code", 159 | "source": [ 160 | "#import tensorflow\n", 161 | "import tensorflow as tf\n", 162 | "import numpy as np\n", 163 | "\n", 164 | "# tf Graph Input\n", 165 | "X = tf.placeholder(\"float\", [None, 784]) # mnist data image of shape 28*28=784\n", 166 | "y = tf.placeholder(\"float\", [None, 10]) # 0-9 digits recognition => 10 classes\n", 167 | "\n", 168 | "# Create model\n", 169 | "# Set model weights\n", 170 | "W = tf.Variable(tf.zeros([784, 10]))\n", 171 | "b = tf.Variable(tf.zeros([10]))\n", 172 | "\n", 173 | "# Construct model\n", 174 | "y_pred = tf.nn.softmax(tf.add(tf.matmul(X, W),b)) # Softmax " 175 | ], 176 | "execution_count": 2, 177 | "outputs": [] 178 | }, 179 | { 180 | "metadata": { 181 | "id": "YcTPnA0ZPqjj", 182 | "colab_type": "code", 183 | "colab": { 184 | "autoexec": { 185 | "startup": false, 186 | "wait_interval": 0 187 | }, 188 | "base_uri": "https://localhost:8080/", 189 | "height": 17 190 | }, 191 | "outputId": "70efd742-18d4-406e-833b-058f0da944e7", 192 | "executionInfo": { 193 | "status": "ok", 194 | "timestamp": 1528878764814, 195 | "user_tz": -120, 196 | "elapsed": 486, 197 | "user": { 198 | "displayName": "", 199 | "photoUrl": "", 200 | "userId": "" 201 | } 202 | } 203 | }, 204 | "cell_type": "code", 205 | "source": [ 206 | "# Define Training Parameters\n", 207 | "learning_rate = 0.01\n", 208 | "training_epochs = 25\n", 209 | "batch_size = 100\n", 210 | "display_step = 1" 211 | ], 212 | "execution_count": 3, 213 | "outputs": [] 214 | }, 215 | { 216 | "metadata": { 217 | "id": "0L0nz5qCPqjl", 218 | "colab_type": "code", 219 | "colab": { 220 | "autoexec": { 221 | "startup": false, 222 | "wait_interval": 0 223 | }, 224 | "base_uri": "https://localhost:8080/", 225 | "height": 17 226 | }, 227 | "outputId": "faa239e0-7e26-4e6b-86ef-2b1cf62a906d", 228 | "executionInfo": { 229 | "status": "ok", 230 | "timestamp": 1528878765478, 231 | "user_tz": -120, 232 | "elapsed": 596, 233 | "user": { 234 | "displayName": "", 235 | "photoUrl": "", 236 | "userId": "" 237 | } 238 | } 239 | }, 240 | "cell_type": "code", 241 | "source": [ 242 | "# Minimize error using cross entropy\n", 243 | "# Cross entropy\n", 244 | "cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(y_pred), reduction_indices=1))\n", 245 | "\n", 246 | "# Gradient Descent\n", 247 | "optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) " 248 | ], 249 | "execution_count": 4, 250 | "outputs": [] 251 | }, 252 | { 253 | "metadata": { 254 | "id": "8_ZjaxyNPqjn", 255 | "colab_type": "code", 256 | "colab": { 257 | "autoexec": { 258 | "startup": false, 259 | "wait_interval": 0 260 | }, 261 | "base_uri": "https://localhost:8080/", 262 | "height": 476 263 | }, 264 | "outputId": "243e8165-ba60-4900-b182-07c5d77116e1", 265 | "executionInfo": { 266 | "status": "ok", 267 | "timestamp": 1528878795152, 268 | "user_tz": -120, 269 | "elapsed": 29625, 270 | "user": { 271 | "displayName": "", 272 | "photoUrl": "", 273 | "userId": "" 274 | } 275 | } 276 | }, 277 | "cell_type": "code", 278 | "source": [ 279 | "# Initializing the variables\n", 280 | "init = tf.global_variables_initializer()\n", 281 | "\n", 282 | "# Launch the graph\n", 283 | "with tf.Session() as sess:\n", 284 | " sess.run(init)\n", 285 | "\n", 286 | " # Training cycle\n", 287 | " for epoch in range(training_epochs):\n", 288 | " avg_cost = 0.\n", 289 | " total_batch = int(mnist.train.num_examples/batch_size)\n", 290 | " # Loop over all batches\n", 291 | " for i in range(total_batch):\n", 292 | " batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n", 293 | " # Fit training using batch data\n", 294 | " sess.run([optimizer, cost ], feed_dict={X: batch_xs, y: batch_ys})\n", 295 | " # Compute average loss\n", 296 | " avg_cost += sess.run(cost, feed_dict={X: batch_xs, y: batch_ys})/total_batch\n", 297 | " # Display logs per epoch step\n", 298 | " if epoch % display_step == 0:\n", 299 | " print \"Epoch:\", '%04d' % (epoch+1), \"cost=\", \"{:.9f}\".format(avg_cost)\n", 300 | "\n", 301 | " print \"Optimization Finished!\"\n", 302 | "\n", 303 | " # Test model\n", 304 | " correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y, 1))\n", 305 | " # Calculate accuracy\n", 306 | " accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n", 307 | " print \"Accuracy:\", accuracy.eval({X: mnist.test.images, y: mnist.test.labels})" 308 | ], 309 | "execution_count": 5, 310 | "outputs": [ 311 | { 312 | "output_type": "stream", 313 | "text": [ 314 | "Epoch: 0001 cost= 1.176532306\n", 315 | "Epoch: 0002 cost= 0.662508476\n", 316 | "Epoch: 0003 cost= 0.550652506\n", 317 | "Epoch: 0004 cost= 0.496730035\n", 318 | "Epoch: 0005 cost= 0.463776750\n", 319 | "Epoch: 0006 cost= 0.440948209\n", 320 | "Epoch: 0007 cost= 0.423931220\n", 321 | "Epoch: 0008 cost= 0.410649940\n", 322 | "Epoch: 0009 cost= 0.399874245\n", 323 | "Epoch: 0010 cost= 0.390927679\n", 324 | "Epoch: 0011 cost= 0.383369292\n", 325 | "Epoch: 0012 cost= 0.376792346\n", 326 | "Epoch: 0013 cost= 0.371000649\n", 327 | "Epoch: 0014 cost= 0.365940430\n", 328 | "Epoch: 0015 cost= 0.361393990\n", 329 | "Epoch: 0016 cost= 0.357267638\n", 330 | "Epoch: 0017 cost= 0.353549642\n", 331 | "Epoch: 0018 cost= 0.350162132\n", 332 | "Epoch: 0019 cost= 0.347016938\n", 333 | "Epoch: 0020 cost= 0.344117598\n", 334 | "Epoch: 0021 cost= 0.341454458\n", 335 | "Epoch: 0022 cost= 0.339016740\n", 336 | "Epoch: 0023 cost= 0.336675162\n", 337 | "Epoch: 0024 cost= 0.334488640\n", 338 | "Epoch: 0025 cost= 0.332450373\n", 339 | "Optimization Finished!\n", 340 | "Accuracy: 0.9139\n" 341 | ], 342 | "name": "stdout" 343 | } 344 | ] 345 | }, 346 | { 347 | "metadata": { 348 | "id": "L4Syjh6oPqjr", 349 | "colab_type": "code", 350 | "colab": { 351 | "autoexec": { 352 | "startup": false, 353 | "wait_interval": 0 354 | }, 355 | "base_uri": "https://localhost:8080/", 356 | "height": 17 357 | }, 358 | "outputId": "7f33def0-acdd-461c-8f97-37811bec69ea", 359 | "executionInfo": { 360 | "status": "ok", 361 | "timestamp": 1528878795728, 362 | "user_tz": -120, 363 | "elapsed": 565, 364 | "user": { 365 | "displayName": "", 366 | "photoUrl": "", 367 | "userId": "" 368 | } 369 | } 370 | }, 371 | "cell_type": "code", 372 | "source": [ 373 | "" 374 | ], 375 | "execution_count": 5, 376 | "outputs": [] 377 | } 378 | ] 379 | } -------------------------------------------------------------------------------- /3.3_Perceptron.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "3.3%20Perceptron.ipynb", 7 | "version": "0.3.2", 8 | "views": {}, 9 | "default_view": {}, 10 | "provenance": [] 11 | }, 12 | "kernelspec": { 13 | "display_name": "Python 2", 14 | "language": "python", 15 | "name": "python2" 16 | } 17 | }, 18 | "cells": [ 19 | { 20 | "metadata": { 21 | "id": "Y_RrYd1OSOhs", 22 | "colab_type": "text" 23 | }, 24 | "cell_type": "markdown", 25 | "source": [ 26 | "## Perceptron with TensorFlow" 27 | ] 28 | }, 29 | { 30 | "metadata": { 31 | "id": "OxFsVC2GSOhs", 32 | "colab_type": "code", 33 | "colab": { 34 | "autoexec": { 35 | "startup": false, 36 | "wait_interval": 0 37 | }, 38 | "base_uri": "https://localhost:8080/", 39 | "height": 411 40 | }, 41 | "outputId": "8c8794d9-89ab-430d-e79a-14b33da610e6", 42 | "executionInfo": { 43 | "status": "ok", 44 | "timestamp": 1528878933742, 45 | "user_tz": -120, 46 | "elapsed": 4573, 47 | "user": { 48 | "displayName": "", 49 | "photoUrl": "", 50 | "userId": "" 51 | } 52 | } 53 | }, 54 | "cell_type": "code", 55 | "source": [ 56 | "# Import MINST data\n", 57 | "from tensorflow.examples.tutorials.mnist import input_data\n", 58 | "mnist = input_data.read_data_sets(\"tmp/data/\", one_hot=True)\n", 59 | "\n", 60 | "import tensorflow as tf" 61 | ], 62 | "execution_count": 1, 63 | "outputs": [ 64 | { 65 | "output_type": "stream", 66 | "text": [ 67 | "WARNING:tensorflow:From :2: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", 68 | "Instructions for updating:\n", 69 | "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n", 70 | "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n", 71 | "Instructions for updating:\n", 72 | "Please write your own downloading logic.\n", 73 | "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", 74 | "Instructions for updating:\n", 75 | "Please use tf.data to implement this functionality.\n", 76 | "Extracting tmp/data/train-images-idx3-ubyte.gz\n", 77 | "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", 78 | "Instructions for updating:\n", 79 | "Please use tf.data to implement this functionality.\n", 80 | "Extracting tmp/data/train-labels-idx1-ubyte.gz\n", 81 | "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", 82 | "Instructions for updating:\n", 83 | "Please use tf.one_hot on tensors.\n", 84 | "Extracting tmp/data/t10k-images-idx3-ubyte.gz\n", 85 | "Extracting tmp/data/t10k-labels-idx1-ubyte.gz\n", 86 | "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:290: __init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", 87 | "Instructions for updating:\n", 88 | "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n" 89 | ], 90 | "name": "stdout" 91 | } 92 | ] 93 | }, 94 | { 95 | "metadata": { 96 | "id": "7osZkfg1SOhx", 97 | "colab_type": "code", 98 | "colab": { 99 | "autoexec": { 100 | "startup": false, 101 | "wait_interval": 0 102 | }, 103 | "base_uri": "https://localhost:8080/", 104 | "height": 17 105 | }, 106 | "outputId": "35306440-1b57-44dd-bd70-d8b244f07c88", 107 | "executionInfo": { 108 | "status": "ok", 109 | "timestamp": 1528878934278, 110 | "user_tz": -120, 111 | "elapsed": 525, 112 | "user": { 113 | "displayName": "", 114 | "photoUrl": "", 115 | "userId": "" 116 | } 117 | } 118 | }, 119 | "cell_type": "code", 120 | "source": [ 121 | "# Parameters\n", 122 | "learning_rate = 0.001\n", 123 | "training_epochs = 15\n", 124 | "batch_size = 100\n", 125 | "display_step = 1\n", 126 | "\n", 127 | "# Network Parameters\n", 128 | "n_hidden_1 = 256 # 1st layer number of features\n", 129 | "n_hidden_2 = 256 # 2nd layer number of features\n", 130 | "n_input = 784 # MNIST data input (img shape: 28*28)\n", 131 | "n_classes = 10 # MNIST total classes (0-9 digits)\n", 132 | "\n", 133 | "# tf Graph input\n", 134 | "x = tf.placeholder(\"float\", [None, n_input])\n", 135 | "y = tf.placeholder(\"float\", [None, n_classes])" 136 | ], 137 | "execution_count": 2, 138 | "outputs": [] 139 | }, 140 | { 141 | "metadata": { 142 | "id": "1TLxcD2wSOhz", 143 | "colab_type": "code", 144 | "colab": { 145 | "autoexec": { 146 | "startup": false, 147 | "wait_interval": 0 148 | }, 149 | "base_uri": "https://localhost:8080/", 150 | "height": 17 151 | }, 152 | "outputId": "31516afe-8694-4d84-c0c6-cf501cbc338e", 153 | "executionInfo": { 154 | "status": "ok", 155 | "timestamp": 1528878934811, 156 | "user_tz": -120, 157 | "elapsed": 484, 158 | "user": { 159 | "displayName": "", 160 | "photoUrl": "", 161 | "userId": "" 162 | } 163 | } 164 | }, 165 | "cell_type": "code", 166 | "source": [ 167 | "# Create model\n", 168 | "def multilayer_perceptron(x, weights, biases):\n", 169 | " # Hidden layer with RELU activation\n", 170 | " layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n", 171 | " layer_1 = tf.nn.relu(layer_1)\n", 172 | " # Output layer with linear activation\n", 173 | " out_layer = tf.matmul(layer_1, weights['out']) + biases['out']\n", 174 | " return out_layer" 175 | ], 176 | "execution_count": 3, 177 | "outputs": [] 178 | }, 179 | { 180 | "metadata": { 181 | "id": "RHppzI6oSOh2", 182 | "colab_type": "code", 183 | "colab": { 184 | "autoexec": { 185 | "startup": false, 186 | "wait_interval": 0 187 | }, 188 | "base_uri": "https://localhost:8080/", 189 | "height": 17 190 | }, 191 | "outputId": "0c6b07c8-f564-46e4-9aab-fb9789b43f1d", 192 | "executionInfo": { 193 | "status": "ok", 194 | "timestamp": 1528878970992, 195 | "user_tz": -120, 196 | "elapsed": 577, 197 | "user": { 198 | "displayName": "", 199 | "photoUrl": "", 200 | "userId": "" 201 | } 202 | } 203 | }, 204 | "cell_type": "code", 205 | "source": [ 206 | "# Store layers weight & bias\n", 207 | "weights = {\n", 208 | " 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),\n", 209 | " 'out': tf.Variable(tf.random_normal([n_hidden_1, n_classes]))\n", 210 | "}\n", 211 | "biases = {\n", 212 | " 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n", 213 | " 'out': tf.Variable(tf.random_normal([n_classes]))\n", 214 | "}\n", 215 | "\n", 216 | "# Construct model\n", 217 | "pred = multilayer_perceptron(x, weights, biases)\n", 218 | "\n", 219 | "# Define loss and optimizer\n", 220 | "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n", 221 | "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n", 222 | "\n", 223 | "# Initializing the variables\n", 224 | "init = tf.global_variables_initializer()" 225 | ], 226 | "execution_count": 6, 227 | "outputs": [] 228 | }, 229 | { 230 | "metadata": { 231 | "id": "wG-PkTZfSOh4", 232 | "colab_type": "code", 233 | "colab": { 234 | "autoexec": { 235 | "startup": false, 236 | "wait_interval": 0 237 | }, 238 | "base_uri": "https://localhost:8080/", 239 | "height": 306 240 | }, 241 | "outputId": "5e3a6f8b-79f6-457b-aeb7-b9279673dc55", 242 | "executionInfo": { 243 | "status": "ok", 244 | "timestamp": 1528879008940, 245 | "user_tz": -120, 246 | "elapsed": 34439, 247 | "user": { 248 | "displayName": "", 249 | "photoUrl": "", 250 | "userId": "" 251 | } 252 | } 253 | }, 254 | "cell_type": "code", 255 | "source": [ 256 | "# Launch the graph\n", 257 | "with tf.Session() as sess:\n", 258 | " sess.run(init)\n", 259 | "\n", 260 | " # Training cycle\n", 261 | " for epoch in range(training_epochs):\n", 262 | " avg_cost = 0.\n", 263 | " total_batch = int(mnist.train.num_examples/batch_size)\n", 264 | " # Loop over all batches\n", 265 | " for i in range(total_batch):\n", 266 | " batch_x, batch_y = mnist.train.next_batch(batch_size)\n", 267 | " # Run optimization op (backprop) and cost op (to get loss value)\n", 268 | " _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,\n", 269 | " y: batch_y})\n", 270 | " # Compute average loss\n", 271 | " avg_cost += c / total_batch\n", 272 | " # Display logs per epoch step\n", 273 | " if epoch % display_step == 0:\n", 274 | " print \"Epoch:\", '%04d' % (epoch+1), \"cost=\", \\\n", 275 | " \"{:.9f}\".format(avg_cost)\n", 276 | " print \"Optimization Finished!\"\n", 277 | "\n", 278 | " # Test model\n", 279 | " correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n", 280 | " # Calculate accuracy\n", 281 | " accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n", 282 | " print \"Accuracy:\", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})\n", 283 | " accuracy = sess.run([accuracy], feed_dict={x: mnist.test.images,\n", 284 | " y: mnist.test.labels})" 285 | ], 286 | "execution_count": 7, 287 | "outputs": [ 288 | { 289 | "output_type": "stream", 290 | "text": [ 291 | "Epoch: 0001 cost= 20.299232648\n", 292 | "Epoch: 0002 cost= 5.120573610\n", 293 | "Epoch: 0003 cost= 3.390567693\n", 294 | "Epoch: 0004 cost= 2.513128784\n", 295 | "Epoch: 0005 cost= 1.944997817\n", 296 | "Epoch: 0006 cost= 1.546649608\n", 297 | "Epoch: 0007 cost= 1.245732133\n", 298 | "Epoch: 0008 cost= 1.023161706\n", 299 | "Epoch: 0009 cost= 0.824373712\n", 300 | "Epoch: 0010 cost= 0.668858461\n", 301 | "Epoch: 0011 cost= 0.547593954\n", 302 | "Epoch: 0012 cost= 0.438884579\n", 303 | "Epoch: 0013 cost= 0.367057379\n", 304 | "Epoch: 0014 cost= 0.291111244\n", 305 | "Epoch: 0015 cost= 0.234335157\n", 306 | "Optimization Finished!\n", 307 | "Accuracy: 0.9411\n" 308 | ], 309 | "name": "stdout" 310 | } 311 | ] 312 | }, 313 | { 314 | "metadata": { 315 | "id": "EUlptaD0SOh7", 316 | "colab_type": "code", 317 | "colab": { 318 | "autoexec": { 319 | "startup": false, 320 | "wait_interval": 0 321 | } 322 | } 323 | }, 324 | "cell_type": "code", 325 | "source": [ 326 | "" 327 | ], 328 | "execution_count": 0, 329 | "outputs": [] 330 | } 331 | ] 332 | } -------------------------------------------------------------------------------- /3.4_Multilayer_Perceptron.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "3.4%20Multilayer%20Perceptron.ipynb", 7 | "version": "0.3.2", 8 | "views": {}, 9 | "default_view": {}, 10 | "provenance": [] 11 | }, 12 | "kernelspec": { 13 | "display_name": "Python 2", 14 | "language": "python", 15 | "name": "python2" 16 | } 17 | }, 18 | "cells": [ 19 | { 20 | "metadata": { 21 | "id": "GpKJxqCDSz1w", 22 | "colab_type": "text" 23 | }, 24 | "cell_type": "markdown", 25 | "source": [ 26 | "## Multilayer Perceptron" 27 | ] 28 | }, 29 | { 30 | "metadata": { 31 | "id": "uc39tiY0Sz1x", 32 | "colab_type": "code", 33 | "colab": { 34 | "autoexec": { 35 | "startup": false, 36 | "wait_interval": 0 37 | }, 38 | "base_uri": "https://localhost:8080/", 39 | "height": 411 40 | }, 41 | "outputId": "40a5c260-5ec5-45a2-e3c5-6812dfb9eb05", 42 | "executionInfo": { 43 | "status": "ok", 44 | "timestamp": 1528879090159, 45 | "user_tz": -120, 46 | "elapsed": 6016, 47 | "user": { 48 | "displayName": "", 49 | "photoUrl": "", 50 | "userId": "" 51 | } 52 | } 53 | }, 54 | "cell_type": "code", 55 | "source": [ 56 | "# Import MINST data\n", 57 | "from tensorflow.examples.tutorials.mnist import input_data\n", 58 | "mnist = input_data.read_data_sets(\"tmp/data/\", one_hot=True)\n", 59 | "\n", 60 | "import tensorflow as tf" 61 | ], 62 | "execution_count": 1, 63 | "outputs": [ 64 | { 65 | "output_type": "stream", 66 | "text": [ 67 | "WARNING:tensorflow:From :2: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", 68 | "Instructions for updating:\n", 69 | "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n", 70 | "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n", 71 | "Instructions for updating:\n", 72 | "Please write your own downloading logic.\n", 73 | "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", 74 | "Instructions for updating:\n", 75 | "Please use tf.data to implement this functionality.\n", 76 | "Extracting tmp/data/train-images-idx3-ubyte.gz\n", 77 | "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", 78 | "Instructions for updating:\n", 79 | "Please use tf.data to implement this functionality.\n", 80 | "Extracting tmp/data/train-labels-idx1-ubyte.gz\n", 81 | "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", 82 | "Instructions for updating:\n", 83 | "Please use tf.one_hot on tensors.\n", 84 | "Extracting tmp/data/t10k-images-idx3-ubyte.gz\n", 85 | "Extracting tmp/data/t10k-labels-idx1-ubyte.gz\n", 86 | "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:290: __init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", 87 | "Instructions for updating:\n", 88 | "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n" 89 | ], 90 | "name": "stdout" 91 | } 92 | ] 93 | }, 94 | { 95 | "metadata": { 96 | "id": "Fum5Dx2XSz10", 97 | "colab_type": "code", 98 | "colab": { 99 | "autoexec": { 100 | "startup": false, 101 | "wait_interval": 0 102 | }, 103 | "base_uri": "https://localhost:8080/", 104 | "height": 17 105 | }, 106 | "outputId": "30895702-7d8b-46c2-f4ca-e81cc3f55b49", 107 | "executionInfo": { 108 | "status": "ok", 109 | "timestamp": 1528879091114, 110 | "user_tz": -120, 111 | "elapsed": 946, 112 | "user": { 113 | "displayName": "", 114 | "photoUrl": "", 115 | "userId": "" 116 | } 117 | } 118 | }, 119 | "cell_type": "code", 120 | "source": [ 121 | "# Parameters\n", 122 | "learning_rate = 0.01\n", 123 | "training_epochs = 15\n", 124 | "batch_size = 100\n", 125 | "display_step = 1\n", 126 | "\n", 127 | "# Network Parameters\n", 128 | "n_hidden_1 = 256 # 1st layer number of features\n", 129 | "n_hidden_2 = 256 # 2nd layer number of features\n", 130 | "n_input = 784 # MNIST data input (img shape: 28*28)\n", 131 | "n_classes = 10 # MNIST total classes (0-9 digits)\n", 132 | "\n", 133 | "# tf Graph input\n", 134 | "x = tf.placeholder(\"float\", [None, n_input])\n", 135 | "y = tf.placeholder(\"float\", [None, n_classes])" 136 | ], 137 | "execution_count": 2, 138 | "outputs": [] 139 | }, 140 | { 141 | "metadata": { 142 | "id": "RiDXw7obSz13", 143 | "colab_type": "code", 144 | "colab": { 145 | "autoexec": { 146 | "startup": false, 147 | "wait_interval": 0 148 | }, 149 | "base_uri": "https://localhost:8080/", 150 | "height": 17 151 | }, 152 | "outputId": "c905d5c9-19b2-4f2d-cdf6-25ec07222c26", 153 | "executionInfo": { 154 | "status": "ok", 155 | "timestamp": 1528879091668, 156 | "user_tz": -120, 157 | "elapsed": 500, 158 | "user": { 159 | "displayName": "", 160 | "photoUrl": "", 161 | "userId": "" 162 | } 163 | } 164 | }, 165 | "cell_type": "code", 166 | "source": [ 167 | "# Create model\n", 168 | "def multilayer_perceptron(x, weights, biases):\n", 169 | " # Hidden layer with RELU activation\n", 170 | " layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])\n", 171 | " layer_1 = tf.nn.relu(layer_1)\n", 172 | " # Hidden layer with RELU activation\n", 173 | " layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])\n", 174 | " layer_2 = tf.nn.relu(layer_2)\n", 175 | " # Output layer with linear activation\n", 176 | " out_layer = tf.matmul(layer_2, weights['out']) + biases['out']\n", 177 | " return out_layer" 178 | ], 179 | "execution_count": 3, 180 | "outputs": [] 181 | }, 182 | { 183 | "metadata": { 184 | "id": "dbKvil3ISz15", 185 | "colab_type": "code", 186 | "colab": { 187 | "autoexec": { 188 | "startup": false, 189 | "wait_interval": 0 190 | }, 191 | "base_uri": "https://localhost:8080/", 192 | "height": 17 193 | }, 194 | "outputId": "e29afa7b-1207-48a6-ae09-a5284791249c", 195 | "executionInfo": { 196 | "status": "ok", 197 | "timestamp": 1528879117597, 198 | "user_tz": -120, 199 | "elapsed": 562, 200 | "user": { 201 | "displayName": "", 202 | "photoUrl": "", 203 | "userId": "" 204 | } 205 | } 206 | }, 207 | "cell_type": "code", 208 | "source": [ 209 | "# Store layers weight & bias\n", 210 | "weights = {\n", 211 | " 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),\n", 212 | " 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),\n", 213 | " 'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))\n", 214 | "}\n", 215 | "biases = {\n", 216 | " 'b1': tf.Variable(tf.random_normal([n_hidden_1])),\n", 217 | " 'b2': tf.Variable(tf.random_normal([n_hidden_2])),\n", 218 | " 'out': tf.Variable(tf.random_normal([n_classes]))\n", 219 | "}\n", 220 | "\n", 221 | "# Construct model\n", 222 | "pred = multilayer_perceptron(x, weights, biases)\n", 223 | "\n", 224 | "# Define loss and optimizer\n", 225 | "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n", 226 | "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n", 227 | "\n", 228 | "# Initializing the variables\n", 229 | "init = tf.global_variables_initializer()" 230 | ], 231 | "execution_count": 6, 232 | "outputs": [] 233 | }, 234 | { 235 | "metadata": { 236 | "id": "hS7hIZc2Sz17", 237 | "colab_type": "code", 238 | "colab": { 239 | "autoexec": { 240 | "startup": false, 241 | "wait_interval": 0 242 | }, 243 | "base_uri": "https://localhost:8080/", 244 | "height": 306 245 | }, 246 | "outputId": "5ec87f55-7d68-4fab-9736-84d326d3d156", 247 | "executionInfo": { 248 | "status": "ok", 249 | "timestamp": 1528879164713, 250 | "user_tz": -120, 251 | "elapsed": 45305, 252 | "user": { 253 | "displayName": "", 254 | "photoUrl": "", 255 | "userId": "" 256 | } 257 | } 258 | }, 259 | "cell_type": "code", 260 | "source": [ 261 | "# Launch the graph\n", 262 | "with tf.Session() as sess:\n", 263 | " sess.run(init)\n", 264 | "\n", 265 | " # Training cycle\n", 266 | " for epoch in range(training_epochs):\n", 267 | " avg_cost = 0.\n", 268 | " total_batch = int(mnist.train.num_examples/batch_size)\n", 269 | " # Loop over all batches\n", 270 | " for i in range(total_batch):\n", 271 | " batch_x, batch_y = mnist.train.next_batch(batch_size)\n", 272 | " # Run optimization op (backprop) and cost op (to get loss value)\n", 273 | " _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,\n", 274 | " y: batch_y})\n", 275 | " # Compute average loss\n", 276 | " avg_cost += c / total_batch\n", 277 | " # Display logs per epoch step\n", 278 | " if epoch % display_step == 0:\n", 279 | " print \"Epoch:\", '%04d' % (epoch+1), \"cost=\", \\\n", 280 | " \"{:.9f}\".format(avg_cost)\n", 281 | " print \"Optimization Finished!\"\n", 282 | "\n", 283 | " # Test model\n", 284 | " correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\n", 285 | " # Calculate accuracy\n", 286 | " accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n", 287 | " print \"Accuracy:\", accuracy.eval({x: mnist.test.images, y: mnist.test.labels})" 288 | ], 289 | "execution_count": 7, 290 | "outputs": [ 291 | { 292 | "output_type": "stream", 293 | "text": [ 294 | "Epoch: 0001 cost= 43.282013203\n", 295 | "Epoch: 0002 cost= 8.349755084\n", 296 | "Epoch: 0003 cost= 4.672669222\n", 297 | "Epoch: 0004 cost= 3.394152646\n", 298 | "Epoch: 0005 cost= 2.467624623\n", 299 | "Epoch: 0006 cost= 2.193587589\n", 300 | "Epoch: 0007 cost= 2.039622562\n", 301 | "Epoch: 0008 cost= 1.736807735\n", 302 | "Epoch: 0009 cost= 1.386611417\n", 303 | "Epoch: 0010 cost= 1.591948932\n", 304 | "Epoch: 0011 cost= 1.320480854\n", 305 | "Epoch: 0012 cost= 1.160013903\n", 306 | "Epoch: 0013 cost= 1.042314344\n", 307 | "Epoch: 0014 cost= 0.775135798\n", 308 | "Epoch: 0015 cost= 0.914223716\n", 309 | "Optimization Finished!\n", 310 | "Accuracy: 0.9594\n" 311 | ], 312 | "name": "stdout" 313 | } 314 | ] 315 | }, 316 | { 317 | "metadata": { 318 | "id": "KRD_TUeOSz1-", 319 | "colab_type": "text" 320 | }, 321 | "cell_type": "markdown", 322 | "source": [ 323 | "### Exercice 1\n", 324 | "Modify the architecture of the network. You can add extra hidden layers and/or the number of neurons per layer. How do you obtain the best results?" 325 | ] 326 | }, 327 | { 328 | "metadata": { 329 | "id": "D0MSBlseSz1-", 330 | "colab_type": "code", 331 | "colab": { 332 | "autoexec": { 333 | "startup": false, 334 | "wait_interval": 0 335 | } 336 | } 337 | }, 338 | "cell_type": "code", 339 | "source": [ 340 | "" 341 | ], 342 | "execution_count": 0, 343 | "outputs": [] 344 | }, 345 | { 346 | "metadata": { 347 | "id": "chpzoXWESz2C", 348 | "colab_type": "code", 349 | "colab": { 350 | "autoexec": { 351 | "startup": false, 352 | "wait_interval": 0 353 | } 354 | } 355 | }, 356 | "cell_type": "code", 357 | "source": [ 358 | "" 359 | ], 360 | "execution_count": 0, 361 | "outputs": [] 362 | }, 363 | { 364 | "metadata": { 365 | "id": "a4m25yjISz2D", 366 | "colab_type": "code", 367 | "colab": { 368 | "autoexec": { 369 | "startup": false, 370 | "wait_interval": 0 371 | } 372 | } 373 | }, 374 | "cell_type": "code", 375 | "source": [ 376 | "" 377 | ], 378 | "execution_count": 0, 379 | "outputs": [] 380 | } 381 | ] 382 | } -------------------------------------------------------------------------------- /3.6_Multilayer_Perceptron_with_keras.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Multilayer Perceptron with Keras" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 3, 13 | "metadata": { 14 | "collapsed": false 15 | }, 16 | "outputs": [], 17 | "source": [ 18 | "# Import MINST data\n", 19 | "from keras.datasets import mnist\n", 20 | "from keras import backend as K\n", 21 | "from keras.utils import np_utils\n", 22 | "from keras.models import Sequential\n", 23 | "from keras.layers import Dense, Activation \n", 24 | "from keras.optimizers import Adam" 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": 4, 30 | "metadata": { 31 | "collapsed": false 32 | }, 33 | "outputs": [ 34 | { 35 | "name": "stdout", 36 | "output_type": "stream", 37 | "text": [ 38 | "(60000, 'train samples')\n", 39 | "(10000, 'test samples')\n", 40 | "Train on 60000 samples, validate on 10000 samples\n", 41 | "Epoch 1/15\n", 42 | "60000/60000 [==============================] - 10s - loss: 0.2516 - acc: 0.9261 - val_loss: 0.1262 - val_acc: 0.9608\n", 43 | "Epoch 2/15\n", 44 | "60000/60000 [==============================] - 10s - loss: 0.0997 - acc: 0.9691 - val_loss: 0.0929 - val_acc: 0.9700\n", 45 | "Epoch 3/15\n", 46 | "60000/60000 [==============================] - 12s - loss: 0.0653 - acc: 0.9793 - val_loss: 0.0761 - val_acc: 0.9771\n", 47 | "Epoch 4/15\n", 48 | "60000/60000 [==============================] - 11s - loss: 0.0453 - acc: 0.9853 - val_loss: 0.0766 - val_acc: 0.9768\n", 49 | "Epoch 5/15\n", 50 | "60000/60000 [==============================] - 11s - loss: 0.0378 - acc: 0.9874 - val_loss: 0.0776 - val_acc: 0.9759\n", 51 | "Epoch 6/15\n", 52 | "60000/60000 [==============================] - 11s - loss: 0.0285 - acc: 0.9906 - val_loss: 0.0713 - val_acc: 0.9797\n", 53 | "Epoch 7/15\n", 54 | "60000/60000 [==============================] - 13s - loss: 0.0252 - acc: 0.9920 - val_loss: 0.0767 - val_acc: 0.9770\n", 55 | "Epoch 8/15\n", 56 | "60000/60000 [==============================] - 11s - loss: 0.0181 - acc: 0.9943 - val_loss: 0.0748 - val_acc: 0.9787\n", 57 | "Epoch 9/15\n", 58 | "60000/60000 [==============================] - 10s - loss: 0.0169 - acc: 0.9941 - val_loss: 0.0851 - val_acc: 0.9772\n", 59 | "Epoch 10/15\n", 60 | "60000/60000 [==============================] - 10s - loss: 0.0156 - acc: 0.9947 - val_loss: 0.0922 - val_acc: 0.9783\n", 61 | "Epoch 11/15\n", 62 | "60000/60000 [==============================] - 9s - loss: 0.0146 - acc: 0.9950 - val_loss: 0.1056 - val_acc: 0.9762\n", 63 | "Epoch 12/15\n", 64 | "60000/60000 [==============================] - 10s - loss: 0.0141 - acc: 0.9953 - val_loss: 0.0762 - val_acc: 0.9803\n", 65 | "Epoch 13/15\n", 66 | "60000/60000 [==============================] - 12s - loss: 0.0102 - acc: 0.9966 - val_loss: 0.0932 - val_acc: 0.9779\n", 67 | "Epoch 14/15\n", 68 | "60000/60000 [==============================] - 11s - loss: 0.0125 - acc: 0.9959 - val_loss: 0.1049 - val_acc: 0.9779\n", 69 | "Epoch 15/15\n", 70 | "60000/60000 [==============================] - 12s - loss: 0.0122 - acc: 0.9961 - val_loss: 0.0865 - val_acc: 0.9802\n", 71 | "('Test loss:', 0.086470205308183445)\n", 72 | "('Test accuracy:', 0.98019999999999996)\n" 73 | ] 74 | } 75 | ], 76 | "source": [ 77 | "# the data, shuffled and split between train and test sets\n", 78 | "(X_train, y_train), (X_test, y_test) = mnist.load_data()\n", 79 | "\n", 80 | "# Parameters\n", 81 | "learning_rate = 0.001\n", 82 | "training_epochs = 15\n", 83 | "batch_size = 100\n", 84 | "\n", 85 | "# Network Parameters\n", 86 | "n_hidden_1 = 256 # 1st layer number of features\n", 87 | "n_hidden_2 = 256 # 2nd layer number of features\n", 88 | "n_input = 784 # MNIST data input (img shape: 28*28)\n", 89 | "n_classes = 10 # MNIST total classes (0-9 digits)\n", 90 | "\n", 91 | "X_train = X_train.reshape(60000, 784)\n", 92 | "X_test = X_test.reshape(10000, 784)\n", 93 | "X_train = X_train.astype('float32')\n", 94 | "X_test = X_test.astype('float32')\n", 95 | "X_train /= 255\n", 96 | "X_test /= 255\n", 97 | "print(X_train.shape[0], 'train samples')\n", 98 | "print(X_test.shape[0], 'test samples')\n", 99 | "\n", 100 | "# convert class vectors to binary class matrices\n", 101 | "Y_train = np_utils.to_categorical(y_train, n_classes)\n", 102 | "Y_test = np_utils.to_categorical(y_test, n_classes)\n", 103 | "\n", 104 | "model = Sequential()\n", 105 | "model.add(Dense(n_hidden_1, input_shape=(784,)))\n", 106 | "model.add(Activation('relu'))\n", 107 | "model.add(Dense(n_hidden_2))\n", 108 | "model.add(Activation('relu')) \n", 109 | "model.add(Dense(n_classes))\n", 110 | "model.add(Activation('softmax'))\n", 111 | " \n", 112 | "adam = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n", 113 | "model.compile(loss='categorical_crossentropy',\n", 114 | " optimizer=adam,\n", 115 | " metrics=['accuracy'])\n", 116 | "\n", 117 | "model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=training_epochs,\n", 118 | " verbose=1, validation_data=(X_test, Y_test))\n", 119 | "score = model.evaluate(X_test, Y_test, verbose=0)\n", 120 | "print('Test loss:', score[0])\n", 121 | "print('Test accuracy:', score[1])" 122 | ] 123 | }, 124 | { 125 | "cell_type": "code", 126 | "execution_count": null, 127 | "metadata": { 128 | "collapsed": true 129 | }, 130 | "outputs": [], 131 | "source": [] 132 | } 133 | ], 134 | "metadata": { 135 | "kernelspec": { 136 | "display_name": "Python 2", 137 | "language": "python", 138 | "name": "python2" 139 | }, 140 | "language_info": { 141 | "codemirror_mode": { 142 | "name": "ipython", 143 | "version": 2 144 | }, 145 | "file_extension": ".py", 146 | "mimetype": "text/x-python", 147 | "name": "python", 148 | "nbconvert_exporter": "python", 149 | "pygments_lexer": "ipython2", 150 | "version": "2.7.6" 151 | } 152 | }, 153 | "nbformat": 4, 154 | "nbformat_minor": 1 155 | } 156 | -------------------------------------------------------------------------------- /3.7_Some_exercises_with_keras.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Some exercises with keras" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "### Exercice 1:\n", 15 | "CIFAR10 small image classification\n", 16 | "\n", 17 | "The dataset consist of 50,000 32x32 color training images, labeled over 10 categories, and 10,000 test images. " 18 | ] 19 | }, 20 | { 21 | "cell_type": "code", 22 | "execution_count": 1, 23 | "metadata": { 24 | "collapsed": false 25 | }, 26 | "outputs": [ 27 | { 28 | "name": "stderr", 29 | "output_type": "stream", 30 | "text": [ 31 | "Using TensorFlow backend.\n" 32 | ] 33 | } 34 | ], 35 | "source": [ 36 | "# Import MINST data\n", 37 | "from keras.datasets import cifar10\n", 38 | "from keras.utils import np_utils\n", 39 | "from keras.models import Sequential\n", 40 | "from keras.layers import Dense, Activation \n", 41 | "from keras.optimizers import Adam" 42 | ] 43 | }, 44 | { 45 | "cell_type": "code", 46 | "execution_count": null, 47 | "metadata": { 48 | "collapsed": false 49 | }, 50 | "outputs": [], 51 | "source": [] 52 | }, 53 | { 54 | "cell_type": "markdown", 55 | "metadata": {}, 56 | "source": [ 57 | "### Exercice 2\n", 58 | "The learning rate of the optimizer can be modified with the following function:" 59 | ] 60 | }, 61 | { 62 | "cell_type": "raw", 63 | "metadata": {}, 64 | "source": [ 65 | "adam.lr.assign(learning_rate)" 66 | ] 67 | }, 68 | { 69 | "cell_type": "markdown", 70 | "metadata": {}, 71 | "source": [ 72 | "Modify the code in order to decrease the learning_rate value every 10 Epochs. " 73 | ] 74 | }, 75 | { 76 | "cell_type": "markdown", 77 | "metadata": {}, 78 | "source": [ 79 | "### Exercice 3" 80 | ] 81 | }, 82 | { 83 | "cell_type": "markdown", 84 | "metadata": {}, 85 | "source": [ 86 | "First of all, let's play a little bit with http://playground.tensorflow.org/" 87 | ] 88 | }, 89 | { 90 | "cell_type": "raw", 91 | "metadata": { 92 | "collapsed": false 93 | }, 94 | "source": [ 95 | "Then create a mulitlayer perceptron with the following data and architecture you think is the best:" 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": 2, 101 | "metadata": { 102 | "collapsed": false 103 | }, 104 | "outputs": [ 105 | { 106 | "name": "stdout", 107 | "output_type": "stream", 108 | "text": [ 109 | "(600, 2)\n" 110 | ] 111 | } 112 | ], 113 | "source": [ 114 | "from sklearn.model_selection import train_test_split\n", 115 | "# the data, shuffled and split between train and test sets\n", 116 | "from sklearn.datasets import make_moons, make_circles, make_classification\n", 117 | "X, y = make_moons(n_samples = 1000,noise=0.3, random_state=0)\n", 118 | "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4, random_state=42)\n", 119 | "print X_train.shape" 120 | ] 121 | }, 122 | { 123 | "cell_type": "code", 124 | "execution_count": null, 125 | "metadata": { 126 | "collapsed": true 127 | }, 128 | "outputs": [], 129 | "source": [] 130 | } 131 | ], 132 | "metadata": { 133 | "kernelspec": { 134 | "display_name": "Python 2", 135 | "language": "python", 136 | "name": "python2" 137 | }, 138 | "language_info": { 139 | "codemirror_mode": { 140 | "name": "ipython", 141 | "version": 2 142 | }, 143 | "file_extension": ".py", 144 | "mimetype": "text/x-python", 145 | "name": "python", 146 | "nbconvert_exporter": "python", 147 | "pygments_lexer": "ipython2", 148 | "version": "2.7.6" 149 | } 150 | }, 151 | "nbformat": 4, 152 | "nbformat_minor": 1 153 | } 154 | -------------------------------------------------------------------------------- /4.0_Recap_exercise.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Wrap up" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 12, 13 | "metadata": { 14 | "collapsed": false 15 | }, 16 | "outputs": [], 17 | "source": [ 18 | "%reset -f\n", 19 | "%matplotlib inline\n", 20 | "import tensorflow as tf\n", 21 | "import numpy as np\n", 22 | "import matplotlib.pyplot as plt\n", 23 | "tf.reset_default_graph() \n" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": 13, 29 | "metadata": { 30 | "collapsed": true 31 | }, 32 | "outputs": [], 33 | "source": [ 34 | "# Data \n", 35 | "from sklearn.datasets import load_digits\n", 36 | "data = load_digits()\n", 37 | "\n", 38 | "idx = np.random.permutation(data.data.shape[0])\n", 39 | "idx_train = idx[:-100]\n", 40 | "idx_test = idx[-100:]\n", 41 | "\n", 42 | "x_train = data.data[idx_train,:]\n", 43 | "y_train = data.target[idx_train]\n", 44 | "x_test = data.data[idx_test,:]\n", 45 | "y_test = data.target[idx_test]" 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "execution_count": 14, 51 | "metadata": { 52 | "collapsed": false 53 | }, 54 | "outputs": [ 55 | { 56 | "ename": "SyntaxError", 57 | "evalue": "invalid syntax (, line 4)", 58 | "output_type": "error", 59 | "traceback": [ 60 | "\u001b[0;36m File \u001b[0;32m\"\"\u001b[0;36m, line \u001b[0;32m4\u001b[0m\n\u001b[0;31m ...\u001b[0m\n\u001b[0m ^\u001b[0m\n\u001b[0;31mSyntaxError\u001b[0m\u001b[0;31m:\u001b[0m invalid syntax\n" 61 | ] 62 | } 63 | ], 64 | "source": [ 65 | "# Network Parameters\n", 66 | "n_input = XX\n", 67 | "n_hidden_1 = XX\n", 68 | "...\n", 69 | "\n", 70 | "# Parameters\n", 71 | "learning_rate = XX" 72 | ] 73 | }, 74 | { 75 | "cell_type": "code", 76 | "execution_count": null, 77 | "metadata": { 78 | "collapsed": true 79 | }, 80 | "outputs": [], 81 | "source": [] 82 | } 83 | ], 84 | "metadata": { 85 | "kernelspec": { 86 | "display_name": "Python 2", 87 | "language": "python", 88 | "name": "python2" 89 | }, 90 | "language_info": { 91 | "codemirror_mode": { 92 | "name": "ipython", 93 | "version": 2 94 | }, 95 | "file_extension": ".py", 96 | "mimetype": "text/x-python", 97 | "name": "python", 98 | "nbconvert_exporter": "python", 99 | "pygments_lexer": "ipython2", 100 | "version": "2.7.6" 101 | } 102 | }, 103 | "nbformat": 4, 104 | "nbformat_minor": 0 105 | } 106 | -------------------------------------------------------------------------------- /4.1_Convolutional_Network.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "4.1%20Convolutional%20Network.ipynb", 7 | "version": "0.3.2", 8 | "views": {}, 9 | "default_view": {}, 10 | "provenance": [] 11 | }, 12 | "kernelspec": { 13 | "display_name": "Python 2", 14 | "language": "python", 15 | "name": "python2" 16 | } 17 | }, 18 | "cells": [ 19 | { 20 | "metadata": { 21 | "id": "SNr1cq7SWEAT", 22 | "colab_type": "text" 23 | }, 24 | "cell_type": "markdown", 25 | "source": [ 26 | "# Convolution Network " 27 | ] 28 | }, 29 | { 30 | "metadata": { 31 | "id": "jCeblau1WEAT", 32 | "colab_type": "code", 33 | "colab": { 34 | "autoexec": { 35 | "startup": false, 36 | "wait_interval": 0 37 | }, 38 | "base_uri": "https://localhost:8080/", 39 | "height": 17 40 | }, 41 | "outputId": "f4856e68-bb2a-4255-eca0-e67f4a14610b", 42 | "executionInfo": { 43 | "status": "ok", 44 | "timestamp": 1528879937158, 45 | "user_tz": -120, 46 | "elapsed": 5030, 47 | "user": { 48 | "displayName": "", 49 | "photoUrl": "", 50 | "userId": "" 51 | } 52 | } 53 | }, 54 | "cell_type": "code", 55 | "source": [ 56 | "# Adapted notebook from Author: Aymeric Damien\n", 57 | "# Project: https://github.com/aymericdamien/TensorFlow-Examples/" 58 | ], 59 | "execution_count": 1, 60 | "outputs": [] 61 | }, 62 | { 63 | "metadata": { 64 | "id": "f3bVGAiEWEAW", 65 | "colab_type": "code", 66 | "colab": { 67 | "autoexec": { 68 | "startup": false, 69 | "wait_interval": 0 70 | }, 71 | "base_uri": "https://localhost:8080/", 72 | "height": 411 73 | }, 74 | "outputId": "3221a145-39b9-466d-a4a7-73717d1c3e02", 75 | "executionInfo": { 76 | "status": "ok", 77 | "timestamp": 1528879940657, 78 | "user_tz": -120, 79 | "elapsed": 3080, 80 | "user": { 81 | "displayName": "", 82 | "photoUrl": "", 83 | "userId": "" 84 | } 85 | } 86 | }, 87 | "cell_type": "code", 88 | "source": [ 89 | "# Import MINST data\n", 90 | "from tensorflow.examples.tutorials.mnist import input_data\n", 91 | "mnist = input_data.read_data_sets(\"tmp/data/\", one_hot=True)" 92 | ], 93 | "execution_count": 2, 94 | "outputs": [ 95 | { 96 | "output_type": "stream", 97 | "text": [ 98 | "WARNING:tensorflow:From :2: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", 99 | "Instructions for updating:\n", 100 | "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n", 101 | "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\n", 102 | "Instructions for updating:\n", 103 | "Please write your own downloading logic.\n", 104 | "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", 105 | "Instructions for updating:\n", 106 | "Please use tf.data to implement this functionality.\n", 107 | "Extracting tmp/data/train-images-idx3-ubyte.gz\n", 108 | "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", 109 | "Instructions for updating:\n", 110 | "Please use tf.data to implement this functionality.\n", 111 | "Extracting tmp/data/train-labels-idx1-ubyte.gz\n", 112 | "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", 113 | "Instructions for updating:\n", 114 | "Please use tf.one_hot on tensors.\n", 115 | "Extracting tmp/data/t10k-images-idx3-ubyte.gz\n", 116 | "Extracting tmp/data/t10k-labels-idx1-ubyte.gz\n", 117 | "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py:290: __init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\n", 118 | "Instructions for updating:\n", 119 | "Please use alternatives such as official/mnist/dataset.py from tensorflow/models.\n" 120 | ], 121 | "name": "stdout" 122 | } 123 | ] 124 | }, 125 | { 126 | "metadata": { 127 | "id": "dgMkG9zlWEAZ", 128 | "colab_type": "code", 129 | "colab": { 130 | "autoexec": { 131 | "startup": false, 132 | "wait_interval": 0 133 | }, 134 | "base_uri": "https://localhost:8080/", 135 | "height": 17 136 | }, 137 | "outputId": "758b72fd-b766-4b1b-e42c-e4ac4d1df6ed", 138 | "executionInfo": { 139 | "status": "ok", 140 | "timestamp": 1528879941192, 141 | "user_tz": -120, 142 | "elapsed": 520, 143 | "user": { 144 | "displayName": "", 145 | "photoUrl": "", 146 | "userId": "" 147 | } 148 | } 149 | }, 150 | "cell_type": "code", 151 | "source": [ 152 | "import tensorflow as tf" 153 | ], 154 | "execution_count": 3, 155 | "outputs": [] 156 | }, 157 | { 158 | "metadata": { 159 | "id": "hbr5qPdnWEAb", 160 | "colab_type": "code", 161 | "colab": { 162 | "autoexec": { 163 | "startup": false, 164 | "wait_interval": 0 165 | }, 166 | "base_uri": "https://localhost:8080/", 167 | "height": 17 168 | }, 169 | "outputId": "035c0990-dbbb-48c3-e91a-2b17755ceabe", 170 | "executionInfo": { 171 | "status": "ok", 172 | "timestamp": 1528879941779, 173 | "user_tz": -120, 174 | "elapsed": 529, 175 | "user": { 176 | "displayName": "", 177 | "photoUrl": "", 178 | "userId": "" 179 | } 180 | } 181 | }, 182 | "cell_type": "code", 183 | "source": [ 184 | "# Parameters\n", 185 | "learning_rate = 0.001\n", 186 | "training_iters = 100000\n", 187 | "batch_size = 128\n", 188 | "display_step = 20" 189 | ], 190 | "execution_count": 4, 191 | "outputs": [] 192 | }, 193 | { 194 | "metadata": { 195 | "id": "mq53AdTHWEAd", 196 | "colab_type": "code", 197 | "colab": { 198 | "autoexec": { 199 | "startup": false, 200 | "wait_interval": 0 201 | }, 202 | "base_uri": "https://localhost:8080/", 203 | "height": 17 204 | }, 205 | "outputId": "21c12d5d-b589-4404-ad28-deb1ddd6efc4", 206 | "executionInfo": { 207 | "status": "ok", 208 | "timestamp": 1528879942399, 209 | "user_tz": -120, 210 | "elapsed": 574, 211 | "user": { 212 | "displayName": "", 213 | "photoUrl": "", 214 | "userId": "" 215 | } 216 | } 217 | }, 218 | "cell_type": "code", 219 | "source": [ 220 | "# Network Parameters\n", 221 | "n_input = 784 # MNIST data input (img shape: 28*28)\n", 222 | "n_classes = 10 # MNIST total classes (0-9 digits)\n", 223 | "dropout = 0.75 # Dropout, probability to keep units" 224 | ], 225 | "execution_count": 5, 226 | "outputs": [] 227 | }, 228 | { 229 | "metadata": { 230 | "id": "reNC0xmQWEAg", 231 | "colab_type": "code", 232 | "colab": { 233 | "autoexec": { 234 | "startup": false, 235 | "wait_interval": 0 236 | }, 237 | "base_uri": "https://localhost:8080/", 238 | "height": 17 239 | }, 240 | "outputId": "84f2b1aa-6860-41e5-d7f1-faf2f5b71561", 241 | "executionInfo": { 242 | "status": "ok", 243 | "timestamp": 1528879942969, 244 | "user_tz": -120, 245 | "elapsed": 524, 246 | "user": { 247 | "displayName": "", 248 | "photoUrl": "", 249 | "userId": "" 250 | } 251 | } 252 | }, 253 | "cell_type": "code", 254 | "source": [ 255 | "# tf Graph input\n", 256 | "x = tf.placeholder(tf.float32, [None, n_input])\n", 257 | "y = tf.placeholder(tf.float32, [None, n_classes])\n", 258 | "keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)" 259 | ], 260 | "execution_count": 6, 261 | "outputs": [] 262 | }, 263 | { 264 | "metadata": { 265 | "id": "Vq0d7ZQbWEAj", 266 | "colab_type": "code", 267 | "colab": { 268 | "autoexec": { 269 | "startup": false, 270 | "wait_interval": 0 271 | }, 272 | "base_uri": "https://localhost:8080/", 273 | "height": 17 274 | }, 275 | "outputId": "6d00c378-09a0-45d5-b05a-da68944f985d", 276 | "executionInfo": { 277 | "status": "ok", 278 | "timestamp": 1528879943540, 279 | "user_tz": -120, 280 | "elapsed": 527, 281 | "user": { 282 | "displayName": "", 283 | "photoUrl": "", 284 | "userId": "" 285 | } 286 | } 287 | }, 288 | "cell_type": "code", 289 | "source": [ 290 | "# Create model\n", 291 | "def conv2d(img, w, b):\n", 292 | " return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(img, w, strides=[1, 1, 1, 1], \n", 293 | " padding='SAME'),b))\n", 294 | "\n", 295 | "def max_pool(img, k):\n", 296 | " return tf.nn.max_pool(img, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')\n", 297 | "\n", 298 | "def conv_net(_X, _weights, _biases, _dropout):\n", 299 | " # Reshape input picture\n", 300 | " _X = tf.reshape(_X, shape=[-1, 28, 28, 1])\n", 301 | "\n", 302 | " # Convolution Layer\n", 303 | " conv1 = conv2d(_X, _weights['wc1'], _biases['bc1'])\n", 304 | " # Max Pooling (down-sampling)\n", 305 | " conv1 = max_pool(conv1, k=2)\n", 306 | " # Apply Dropout\n", 307 | " conv1 = tf.nn.dropout(conv1, _dropout)\n", 308 | "\n", 309 | " # Convolution Layer\n", 310 | " conv2 = conv2d(conv1, _weights['wc2'], _biases['bc2'])\n", 311 | " # Max Pooling (down-sampling)\n", 312 | " conv2 = max_pool(conv2, k=2)\n", 313 | " # Apply Dropout\n", 314 | " conv2 = tf.nn.dropout(conv2, _dropout)\n", 315 | "\n", 316 | " # Fully connected layer\n", 317 | " # Reshape conv2 output to fit dense layer input\n", 318 | " dense1 = tf.reshape(conv2, [-1, _weights['wd1'].get_shape().as_list()[0]]) \n", 319 | " # Relu activation\n", 320 | " dense1 = tf.nn.relu(tf.add(tf.matmul(dense1, _weights['wd1']), _biases['bd1']))\n", 321 | " # Apply Dropout\n", 322 | " dense1 = tf.nn.dropout(dense1, _dropout) # Apply Dropout\n", 323 | "\n", 324 | " # Output, class prediction\n", 325 | " out = tf.add(tf.matmul(dense1, _weights['out']), _biases['out'])\n", 326 | " return out" 327 | ], 328 | "execution_count": 7, 329 | "outputs": [] 330 | }, 331 | { 332 | "metadata": { 333 | "id": "w1kTc151WEAm", 334 | "colab_type": "code", 335 | "colab": { 336 | "autoexec": { 337 | "startup": false, 338 | "wait_interval": 0 339 | }, 340 | "base_uri": "https://localhost:8080/", 341 | "height": 17 342 | }, 343 | "outputId": "8cdc08e6-1e00-4f2e-a4da-78158d01eb2b", 344 | "executionInfo": { 345 | "status": "ok", 346 | "timestamp": 1528879944070, 347 | "user_tz": -120, 348 | "elapsed": 485, 349 | "user": { 350 | "displayName": "", 351 | "photoUrl": "", 352 | "userId": "" 353 | } 354 | } 355 | }, 356 | "cell_type": "code", 357 | "source": [ 358 | "# Store layers weight & bias\n", 359 | "weights = {\n", 360 | " # 5x5 conv, 1 input, 32 outputs\n", 361 | " 'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])), \n", 362 | " # 5x5 conv, 32 inputs, 64 outputs\n", 363 | " 'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])), \n", 364 | " # fully connected, 7*7*64 inputs, 1024 outputs\n", 365 | " 'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])), \n", 366 | " # 1024 inputs, 10 outputs (class prediction)\n", 367 | " 'out': tf.Variable(tf.random_normal([1024, n_classes])) \n", 368 | "}\n", 369 | "\n", 370 | "biases = {\n", 371 | " 'bc1': tf.Variable(tf.random_normal([32])),\n", 372 | " 'bc2': tf.Variable(tf.random_normal([64])),\n", 373 | " 'bd1': tf.Variable(tf.random_normal([1024])),\n", 374 | " 'out': tf.Variable(tf.random_normal([n_classes]))\n", 375 | "}" 376 | ], 377 | "execution_count": 8, 378 | "outputs": [] 379 | }, 380 | { 381 | "metadata": { 382 | "id": "ZkxYHRelWEAo", 383 | "colab_type": "code", 384 | "colab": { 385 | "autoexec": { 386 | "startup": false, 387 | "wait_interval": 0 388 | }, 389 | "base_uri": "https://localhost:8080/", 390 | "height": 17 391 | }, 392 | "outputId": "56b60b5a-f968-44ed-dced-ae438c56f9f0", 393 | "executionInfo": { 394 | "status": "ok", 395 | "timestamp": 1528879944882, 396 | "user_tz": -120, 397 | "elapsed": 768, 398 | "user": { 399 | "displayName": "", 400 | "photoUrl": "", 401 | "userId": "" 402 | } 403 | } 404 | }, 405 | "cell_type": "code", 406 | "source": [ 407 | "# Construct model\n", 408 | "pred = conv_net(x, weights, biases, keep_prob)" 409 | ], 410 | "execution_count": 9, 411 | "outputs": [] 412 | }, 413 | { 414 | "metadata": { 415 | "id": "VX2LTNpfWEAp", 416 | "colab_type": "code", 417 | "colab": { 418 | "autoexec": { 419 | "startup": false, 420 | "wait_interval": 0 421 | }, 422 | "base_uri": "https://localhost:8080/", 423 | "height": 17 424 | }, 425 | "outputId": "42a4475f-5b93-4df6-d577-500ef304ebfc", 426 | "executionInfo": { 427 | "status": "ok", 428 | "timestamp": 1528879959358, 429 | "user_tz": -120, 430 | "elapsed": 845, 431 | "user": { 432 | "displayName": "", 433 | "photoUrl": "", 434 | "userId": "" 435 | } 436 | } 437 | }, 438 | "cell_type": "code", 439 | "source": [ 440 | "# Define loss and optimizer\n", 441 | "cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\n", 442 | "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)" 443 | ], 444 | "execution_count": 11, 445 | "outputs": [] 446 | }, 447 | { 448 | "metadata": { 449 | "id": "ag0L9MAvWEAr", 450 | "colab_type": "code", 451 | "colab": { 452 | "autoexec": { 453 | "startup": false, 454 | "wait_interval": 0 455 | }, 456 | "base_uri": "https://localhost:8080/", 457 | "height": 17 458 | }, 459 | "outputId": "bdc0139f-344b-422f-abc9-990ec4497470", 460 | "executionInfo": { 461 | "status": "ok", 462 | "timestamp": 1528879963661, 463 | "user_tz": -120, 464 | "elapsed": 738, 465 | "user": { 466 | "displayName": "", 467 | "photoUrl": "", 468 | "userId": "" 469 | } 470 | } 471 | }, 472 | "cell_type": "code", 473 | "source": [ 474 | "# Evaluate model\n", 475 | "correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))\n", 476 | "accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))" 477 | ], 478 | "execution_count": 12, 479 | "outputs": [] 480 | }, 481 | { 482 | "metadata": { 483 | "id": "pFtMwyoOWEAt", 484 | "colab_type": "code", 485 | "colab": { 486 | "autoexec": { 487 | "startup": false, 488 | "wait_interval": 0 489 | }, 490 | "base_uri": "https://localhost:8080/", 491 | "height": 88 492 | }, 493 | "outputId": "cecd30a6-91ce-441c-c2e8-a640d6e644cd", 494 | "executionInfo": { 495 | "status": "ok", 496 | "timestamp": 1528879964283, 497 | "user_tz": -120, 498 | "elapsed": 569, 499 | "user": { 500 | "displayName": "", 501 | "photoUrl": "", 502 | "userId": "" 503 | } 504 | } 505 | }, 506 | "cell_type": "code", 507 | "source": [ 508 | "# Initializing the variables\n", 509 | "init = tf.initialize_all_variables()" 510 | ], 511 | "execution_count": 13, 512 | "outputs": [ 513 | { 514 | "output_type": "stream", 515 | "text": [ 516 | "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/python/util/tf_should_use.py:118: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.\n", 517 | "Instructions for updating:\n", 518 | "Use `tf.global_variables_initializer` instead.\n" 519 | ], 520 | "name": "stdout" 521 | } 522 | ] 523 | }, 524 | { 525 | "metadata": { 526 | "id": "5Y8zr2H0WEAu", 527 | "colab_type": "code", 528 | "colab": { 529 | "autoexec": { 530 | "startup": false, 531 | "wait_interval": 0 532 | }, 533 | "base_uri": "https://localhost:8080/", 534 | "height": 714 535 | }, 536 | "outputId": "f074d817-61c3-4a18-ba8f-5a6cad70d1ed", 537 | "executionInfo": { 538 | "status": "ok", 539 | "timestamp": 1528880284202, 540 | "user_tz": -120, 541 | "elapsed": 319860, 542 | "user": { 543 | "displayName": "", 544 | "photoUrl": "", 545 | "userId": "" 546 | } 547 | } 548 | }, 549 | "cell_type": "code", 550 | "source": [ 551 | "# Launch the graph\n", 552 | "with tf.Session() as sess:\n", 553 | " sess.run(init)\n", 554 | " step = 1\n", 555 | " # Keep training until reach max iterations\n", 556 | " while step * batch_size < training_iters:\n", 557 | " batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n", 558 | " # Fit training using batch data\n", 559 | " sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout})\n", 560 | " if step % display_step == 0:\n", 561 | " # Calculate batch accuracy\n", 562 | " acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})\n", 563 | " # Calculate batch loss\n", 564 | " loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})\n", 565 | " print \"Iter \" + str(step*batch_size) + \", Minibatch Loss= \" + \\\n", 566 | " \"{:.6f}\".format(loss) + \", Training Accuracy= \" + \"{:.5f}\".format(acc)\n", 567 | " step += 1\n", 568 | " print \"Optimization Finished!\"\n", 569 | " # Calculate accuracy for 256 mnist test images\n", 570 | " print \"Testing Accuracy:\", sess.run(accuracy, feed_dict={x: mnist.test.images[:256], \n", 571 | " y: mnist.test.labels[:256], \n", 572 | " keep_prob: 1.})" 573 | ], 574 | "execution_count": 14, 575 | "outputs": [ 576 | { 577 | "output_type": "stream", 578 | "text": [ 579 | "Iter 2560, Minibatch Loss= 17036.421875, Training Accuracy= 0.23438\n", 580 | "Iter 5120, Minibatch Loss= 6130.323242, Training Accuracy= 0.57812\n", 581 | "Iter 7680, Minibatch Loss= 5917.083008, Training Accuracy= 0.63281\n", 582 | "Iter 10240, Minibatch Loss= 2962.674805, Training Accuracy= 0.80469\n", 583 | "Iter 12800, Minibatch Loss= 1724.221191, Training Accuracy= 0.85938\n", 584 | "Iter 15360, Minibatch Loss= 3856.745361, Training Accuracy= 0.78906\n", 585 | "Iter 17920, Minibatch Loss= 2472.348877, Training Accuracy= 0.78125\n", 586 | "Iter 20480, Minibatch Loss= 2858.114014, Training Accuracy= 0.82812\n", 587 | "Iter 23040, Minibatch Loss= 2357.305664, Training Accuracy= 0.85938\n", 588 | "Iter 25600, Minibatch Loss= 3318.317139, Training Accuracy= 0.78125\n", 589 | "Iter 28160, Minibatch Loss= 1114.010620, Training Accuracy= 0.89062\n", 590 | "Iter 30720, Minibatch Loss= 1597.208008, Training Accuracy= 0.85156\n", 591 | "Iter 33280, Minibatch Loss= 1210.070923, Training Accuracy= 0.89062\n", 592 | "Iter 35840, Minibatch Loss= 777.603394, Training Accuracy= 0.91406\n", 593 | "Iter 38400, Minibatch Loss= 1670.858276, Training Accuracy= 0.89062\n", 594 | "Iter 40960, Minibatch Loss= 1385.604248, Training Accuracy= 0.82031\n", 595 | "Iter 43520, Minibatch Loss= 1204.731445, Training Accuracy= 0.88281\n", 596 | "Iter 46080, Minibatch Loss= 919.055481, Training Accuracy= 0.86719\n", 597 | "Iter 48640, Minibatch Loss= 1194.326172, Training Accuracy= 0.85938\n", 598 | "Iter 51200, Minibatch Loss= 1063.298828, Training Accuracy= 0.85938\n", 599 | "Iter 53760, Minibatch Loss= 973.690186, Training Accuracy= 0.88281\n", 600 | "Iter 56320, Minibatch Loss= 1144.928711, Training Accuracy= 0.89062\n", 601 | "Iter 58880, Minibatch Loss= 1131.631836, Training Accuracy= 0.90625\n", 602 | "Iter 61440, Minibatch Loss= 480.738403, Training Accuracy= 0.91406\n", 603 | "Iter 64000, Minibatch Loss= 1000.880188, Training Accuracy= 0.89844\n", 604 | "Iter 66560, Minibatch Loss= 379.635132, Training Accuracy= 0.91406\n", 605 | "Iter 69120, Minibatch Loss= 1170.406250, Training Accuracy= 0.83594\n", 606 | "Iter 71680, Minibatch Loss= 771.277283, Training Accuracy= 0.87500\n", 607 | "Iter 74240, Minibatch Loss= 1076.690674, Training Accuracy= 0.85156\n", 608 | "Iter 76800, Minibatch Loss= 495.006317, Training Accuracy= 0.90625\n", 609 | "Iter 79360, Minibatch Loss= 503.262970, Training Accuracy= 0.91406\n", 610 | "Iter 81920, Minibatch Loss= 982.506836, Training Accuracy= 0.89844\n", 611 | "Iter 84480, Minibatch Loss= 520.058289, Training Accuracy= 0.92188\n", 612 | "Iter 87040, Minibatch Loss= 468.711914, Training Accuracy= 0.89844\n", 613 | "Iter 89600, Minibatch Loss= 432.374664, Training Accuracy= 0.89844\n", 614 | "Iter 92160, Minibatch Loss= 310.926025, Training Accuracy= 0.91406\n", 615 | "Iter 94720, Minibatch Loss= 699.337769, Training Accuracy= 0.92188\n", 616 | "Iter 97280, Minibatch Loss= 295.681152, Training Accuracy= 0.94531\n", 617 | "Iter 99840, Minibatch Loss= 501.457764, Training Accuracy= 0.91406\n", 618 | "Optimization Finished!\n", 619 | "Testing Accuracy: 0.9453125\n" 620 | ], 621 | "name": "stdout" 622 | } 623 | ] 624 | }, 625 | { 626 | "metadata": { 627 | "id": "teBrJdKpWEAx", 628 | "colab_type": "code", 629 | "colab": { 630 | "autoexec": { 631 | "startup": false, 632 | "wait_interval": 0 633 | } 634 | } 635 | }, 636 | "cell_type": "code", 637 | "source": [ 638 | "" 639 | ], 640 | "execution_count": 0, 641 | "outputs": [] 642 | } 643 | ] 644 | } -------------------------------------------------------------------------------- /7.3_Sentiment_Analysis.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Predict Sentiment Analysis\n", 8 | "\n", 9 | "### IMDB Movie Review Sentiment Problem Description\n", 10 | "\n", 11 | "The Large Movie Review Dataset (often referred to as the IMDB dataset) contains 25,000 highly polar moving reviews (good or bad) for training and the same amount again for testing. The problem is to determine whether a given moving review has a positive or negative sentiment.\n", 12 | "\n", 13 | "The data was collected by Stanford researchers and was used in a 2011 paper [PDF] where a split of 50/50 of the data was used for training and test. An accuracy of 88.89% was achieved. This data set was also in a Kaggle compeition titled “Bag of Words Meets Bags of Popcorn” in late 2014 to early 2015. Accuracy was achieved above 97% with winners achieving 99%.\n", 14 | "\n" 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": 1, 20 | "metadata": { 21 | "collapsed": false 22 | }, 23 | "outputs": [ 24 | { 25 | "name": "stderr", 26 | "output_type": "stream", 27 | "text": [ 28 | "Using TensorFlow backend.\n" 29 | ] 30 | } 31 | ], 32 | "source": [ 33 | "import numpy\n", 34 | "from keras.datasets import imdb\n", 35 | "from matplotlib import pyplot\n", 36 | "# load the dataset\n", 37 | "(X_train, y_train), (X_test, y_test) = imdb.load_data()\n", 38 | "X = numpy.concatenate((X_train, X_test), axis=0)\n", 39 | "y = numpy.concatenate((y_train, y_test), axis=0)" 40 | ] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "execution_count": 2, 45 | "metadata": { 46 | "collapsed": false 47 | }, 48 | "outputs": [ 49 | { 50 | "name": "stdout", 51 | "output_type": "stream", 52 | "text": [ 53 | "Training data: \n", 54 | "(50000,)\n", 55 | "(50000,)\n", 56 | "Classes: \n", 57 | "[0 1]\n", 58 | "Number of words: \n", 59 | "88585\n" 60 | ] 61 | } 62 | ], 63 | "source": [ 64 | "# summarize size\n", 65 | "print(\"Training data: \")\n", 66 | "print(X.shape)\n", 67 | "print(y.shape)\n", 68 | "\n", 69 | "# Summarize number of classes\n", 70 | "print(\"Classes: \")\n", 71 | "print(numpy.unique(y))\n", 72 | "\n", 73 | "# Summarize number of words\n", 74 | "print(\"Number of words: \")\n", 75 | "print(len(numpy.unique(numpy.hstack(X))))" 76 | ] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": 3, 81 | "metadata": { 82 | "collapsed": false 83 | }, 84 | "outputs": [ 85 | { 86 | "name": "stdout", 87 | "output_type": "stream", 88 | "text": [ 89 | "Review length: \n", 90 | "Mean 234.76 words (172.911495)\n" 91 | ] 92 | }, 93 | { 94 | "data": { 95 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAg0AAAFkCAYAAACjCwibAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAAPYQAAD2EBqD+naQAAGpJJREFUeJzt3W1wXdV97/Hv38GxMRfLTLi2XBdwU3BsU6BIxYBTE3qd\nYqBcNTNpkop4CiXtJG2qZpyh6SSTFia5c5OQB6c14Ta3QCCBqBPIpLUgtSncBEIhobWA4UGGTC42\nT5EChcoM2BjM6ouzpRyd2PKSjs7ZOvL3M3PmeO+19vZfL0A/r73W2pFSQpIk6WBmlV2AJElqDYYG\nSZKUxdAgSZKyGBokSVIWQ4MkScpiaJAkSVkMDZIkKYuhQZIkZTE0SJKkLIYGSZKUZUKhISI+HhH3\nRcSuiBiKiO9ExLKaPt+PiDeqPvsi4qqaPsdExK0R8XJEDEbEFRExq6bP2RGxLSL2RMTjEXHR5H9M\nSZJUr4mONKwBNgGnA+8EZgO3RcThVX0S8H+BRUA7sBj42EhjEQ6+CxwGnAFcBFwMfKqqz1LgFuAO\n4BTgb4CrI+K3J1ivJEmaIlHPC6si4mjgZ8BZKaW7i3PfA+5PKX30ANecB2wGFqeUni/OfRD4LPDf\nU0qvR8TngPNSSidXXdcLtKWUzp90wZIkadLqndOwgMrIwgs1598fEc9FxEMR8b9rRiLOAB4aCQyF\nrUAbcGJVn9tr7rkVOLPOeiVJ0iQdNtkLIyKALwN3p5QerWq6EdgJPAucDFwBLAN+r2hvB4ZqbjdU\n1fbgOH3mR8SclNKr+6nnLcA6YAewZ3I/lSRJh6S5wFJga0rpPw7UadKhAbgKWAm8vfpkSunqqsNH\nImIQuCMifiWl9MRB7jnes5I4SJ91VAKLJEmanPcD3zxQ46RCQ0RcCZwPrEkp/fQg3X9UfB8PPAEM\nAqfV9FlUfA9WfS+q6bMQ2JVS2nuAv2cHwA033MCKFSsOUpKkRtuyZQtbtmwZPf7BD37AmjVrRo/P\nPfdczj333DJKk1RjYGCA9evXQ/G79EAmHBqKwPC7wDtSSk9mXHIqldGBkXBxL/CJiDi6al7DOcAw\nMFDV57ya+5xTnD+QPQArVqygo6MjoyxJjdTR0cEnPvGJ0eP29nbuuuuuEiuSlGHcx/sT3afhKipD\nFxcCL0fEouIzt2h/a0R8MiI6IuK4iOgCrgfuTCk9XNzmNuBR4BsRcXJErAM+DVyZUnqt6PN3wK9G\nxOci4m0R8adU5kR8aSL1SpKkqTPR1RMfAuYD36cy0XHk896ifS+V/Ru2Uhk1+DxwE9A1coOU0hvA\nBcA+4B7g68B1wGVVfXYAv1Pc6wFgA/CBlFLtigpJktQkE3o8kVIaN2SklJ4Gzs64z1NUgsN4fe4E\nOidSn6Tpa8mSJWWXIKlOvntCUlNceumlZZcgqU6GBklN0d3dXXYJkupkaJAkSVkMDZIkKYuhQZIk\nZTE0SJKkLIYGSZKUxdAgSZKyGBokSVIWQ4MkScpiaJAkSVkMDZIkKYuhQZIkZTE0SGqK3t7eskuQ\nVCdDg6SmMDRIrc/QIEmSshgaJElSlsPKLkDSzNTb2zvmkURfXx9dXV2jx93d3XR3d5dRmqRJMjRI\naojaUNDV1cXmzZtLrEhSvXw8IUmSshgaJElSFkODpKZw/oLU+gwNkprC0CC1PkODJEnKYmiQJElZ\nDA2SJCmLoUGSJGUxNEiSpCyGBkmSlMXQIEmSshgaJElSFkODJEnKYmiQJElZDA2SJCmLoUGSJGUx\nNEiSpCyGBkmSlMXQIEmSshgaJDVFb29v2SVIqpOhQVJTfOELXyi7BEl1MjRIaopnnnmm7BIk1cnQ\nIEmSshxWdgGSZqbe3t4x8xiGhobo6uoaPe7u7qa7u7uM0iRNkiMNkiQpiyMNkhqidiShvb2dzZs3\nl1iRpHo50iBJkrIYGiQ1xZIlS8ouQVKdDA2SmuLSSy8tuwRJdTI0SGoKV0pIrc/QIEmSshgaJElS\nlgmFhoj4eETcFxG7ImIoIr4TEctq+syJiK9ExPMR8VJE3BwRC2v6HBMRt0bEyxExGBFXRMSsmj5n\nR8S2iNgTEY9HxEWT/zElSVK9JjrSsAbYBJwOvBOYDdwWEYdX9fky8DvAu4GzgF8Cvj3SWISD71LZ\nI+IM4CLgYuBTVX2WArcAdwCnAH8DXB0Rvz3BeiVJ0hSZ0OZOKaXzq48j4mLgZ0AncHdEzAcuAX4/\npXRn0ecPgYGIWJVSug9YBywHfiul9DzwUET8FfDZiLg8pfQ68CfA/08pfaz4qx6LiN8ENgD/Msmf\nVZIk1aHeOQ0LgAS8UBx3Ugkid4x0SCk9BjwJnFmcOgN4qAgMI7YCbcCJVX1ur/m7tlbdQ5IkNdmk\nQ0NEBJVHEXenlB4tTrcDe1NKu2q6DxVtI32G9tNORp/5ETFnsjVLkqTJq+fdE1cBK4HfzOgbVEYk\nDma8PpHRhw0bNtDW1jbmnG/TkySpovYNtADDw8NZ104qNETElcD5wJqU0rNVTYPAmyNifs1ow0J+\nPnIwCJxWc8tFVW0j34tq+iwEdqWU9o5X28aNG+no6Mj7QSRJOsTs7x/S/f39dHZ2HvTaCT+eKALD\n71KZyPhkTfM24HVgbVX/ZcCxwD3FqXuBkyLi6KrrzgGGgYGqPmsZ65zivCRJKsGERhoi4iqgG+gC\nXo6IkdGA4ZTSnpTSroi4BvhSRLwIvAT8LfCvKaV/K/reBjwKfCMi/hJYDHwauDKl9FrR5++AP4uI\nzwHXUgkQv0dldEOSJJVgoiMNHwLmA98Hnq36vLeqzwYqeyzcXNXv3SONKaU3gAuAfVRGH74OXAdc\nVtVnB5W9Ht4JPFDc8wMppdoVFZIkqUkmuk/DQUNGSulVoKf4HKjPU1SCw3j3uZPKEk5JkjQN+O4J\nSZKUxdAgSZKyGBokSVIWQ4MkScpiaJAkSVkMDZIkKYuhQZIkZTE0SJKkLIYGSZKUxdAgqSl6eg64\nSaykFmFokNQUN910U9klSKqToUGSJGUxNEiSpCyGBkkN0dPTQ3t7++hnaGhozLFzHKTWM6FXY0tS\nrk2bNrFp06bR4/b2dgYHB0usSFK9HGmQJElZDA2SJCmLoUFSU7znPe8puwRJdTI0SGqK6vkNklqT\noUGSJGUxNEiSpCyGBkmSlMXQIEmSshgaJElSFkODpKbo7e0tuwRJdTI0SGoKQ4PU+gwNkiQpi6FB\nkiRl8S2Xkhqit7d3zCOJvr4+urq6Ro+7u7vp7u4uozRJk2RokNQQtaGgvb2dzZs3l1iRpHr5eEKS\nJGUxNEiSpCw+npDUELVzGoaGhpzTILU4Q4OkhqgNBV1dXc5pkFqcjyckSVIWQ4MkScpiaJDUFMcd\nd1zZJUiqk6FBUlPs3Lmz7BIk1cnQIEmSshgaJElSFpdcSmoI3z0hzTyGBkkN4T4N0szj4wlJkpTF\n0CBJkrIYGiQ1hfs0SK3P0CCpKdynQWp9hgZJkpTF0CCpKZ555pmyS5BUJ5dcSmqI2n0a+vv73adB\nanGGBkkNURsKFixY4D4NUovz8YSkpti9e3fZJUiq04RHGiJiDfAXQCewGHhXSmlzVfvXgItqLtuS\nUjq/qs9RwJXABcAbwLeBj6SUXq7qc3LR5zTgZ8CVKaXPT7ReSeWofTyxd+9eH09ILW4yjyeOAB4A\nrqXyy35//hm4GIji+NWa9m8Ci4C1wJuB64CvAusBIuJIYCtwG/BB4CTgaxHxYkrp6knULKnJakNB\nW1ubjyekFjfh0JBS2gJsAYiIOEC3V1NKz+2vISKWA+uAzpTS/cW5HuDWiLg0pTRIJTzMBj6QUnod\nGIiIU4GPAoYGqQXUjjTs2rXLkQapxTVqIuTZETEEvAj8P+CTKaUXirYzgRdHAkPhdiABpwP/BJwB\n3FUEhhFbgY9FRFtKabhBdUuaIrWhoL293ZEGqcU1YiLkPwN/APwP4GPAO4DvVo1KtFOZozAqpbQP\neKFoG+kzVHPfoao2SS1myZIlZZcgqU5TPtKQUvpW1eEjEfEQ8BPgbOB741waVEYbxmvnIH3YsGED\nbW1tY845DCqVz9AgTQ+1jw4BhofzBvAbvk9DSumJiHgeOJ5KaBgEFlb3iYg3AUcVbRTfi2puNXJN\n7QjEGBs3bqSjo6PesiVNMYO7ND3s7x/S/f39dHZ2HvTahu/TEBG/DLwF+Glx6l5gQTGxccRaKiMJ\n91X1OasIEyPOAR5zPoPUmgwNUuubcGiIiCMi4pSI+PXi1FuL42OKtisi4vSIOC4i1gL/CDxOZSIj\nKaXtxZ//PiJOi4i3A5uA3mLlBFSWZO4Fro2IlRHxPuDPgS/W9dNKkqRJm8zjid+g8pghFZ+RX+TX\nA38KnExlIuQC4FkqAeGvU0qvVd3jQiobN91OZXOnm4GPjDSmlHZFxLqiz78DzwOXp5SumUS9kiRp\nCkxmn4Y7GX+E4tyMe/wnxUZO4/R5iMrKC0mSNA347glJkpTF0CBJkrIYGiQ1RU9PT9klSKqToUFS\nU9x0001llyCpToYGSU2xZ8+eskuQVCdDg6Sm2L17d9klSKqToUFSQ/T09NDe3j762bt375hj5zhI\nrafh756QdGjatGkTmzZtGj2eNWsWg4OD41whabozNEhqiNo36aWU6OrqGj327bNS6zE0SGqI2lAw\na9YsNm/eXGJFkuplaJDUEI40SDOPoUFSQ9SGggULFjjSILU4V09IkqQshgZJTTF37tyyS5BUJ0OD\npKZYsmRJ2SVIqpNzGiQ1RO1EyP7+fidCSi3O0CCpIWpDwezZs50IKbU4H09Iaop9+/aVXYKkOhka\nJElSFkODpIaofWFVSskXVkktzjkNkhpi9erV7Ny5c/S4r6+PVatWjWmX1FoMDZIaonYi5Jw5c5wI\nKbU4H09IkqQshgZJTTF//vyyS5BUJ0ODpKY49thjyy5BUp0MDZIkKYuhQVJT/OQnPym7BEl1cvWE\npIaofffE8PCw756QWpyhQVJDuORSmnkMDZIaonakYe/evY40SC3OOQ2SJCmLIw2SGqJ2JKGtrc3H\nE1KLc6RBUlPs3r277BIk1cnQIKkpXnvttbJLkFQnQ4OkpoiIskuQVCdDgyRJymJokNQQ69atY86c\nOaOflNKY43Xr1pVdoqQJcvWEpIa4+OKLmTNnzuhxX1/fmKDgHg1S6zE0SGqI2iWXs2bNcsml1OJ8\nPCGpKVJKZZcgqU6GBkmSlMXQIEmSshgaJElSFkODpIbo6emhvb199AOMOe7p6Sm5QkkT5eoJSQ2x\nevVqdu7cOXrc19fHqlWrxrRLai2GBkkN8ZnPfIaHH354zLlbbrll9M87duxwrwapxfh4QlJDLF68\nmNmzZ49+gDHHixcvLrlCSRPlSIOkhnBHSGnmMTRIaojaHSEjwh0hpRbn4wlJDVH7wirAF1ZJLc6R\nBkkNsWzZMh588MHR46GhIY466qgx7ZJay4RHGiJiTURsjohnIuKNiOjaT59PRcSzEfFKRPxLRBxf\n035URNwYEcMR8WJEXB0RR9T0OTki7oqI3RGxMyL+YuI/nqSyrF69mlWrVo1+gDHHLrmUWs9kRhqO\nAB4ArgW+XdsYEX8J/BlwEfAE8L+ArRGxIqW0t+j2TWARsBZ4M3Ad8FVgfXGPI4GtwG3AB4GTgK9F\nxIsppasnUbOkJrvwwgt/4VxfX9+YPzsZUmotEw4NKaUtwBaAiIj9dPkI8OmUUl/R5w+AIeBdwLci\nYgWwDuhMKd1f9OkBbo2IS1NKg1TCw2zgAyml14GBiDgV+ChgaJBaQESM+2bL/f/vQ9J0NqUTISPi\nV4B24I6RcymlXcCPgDOLU2cAL44EhsLtQAJOr+pzVxEYRmwF3hYRbVNZs6TGONirsH1VttR6pnr1\nRDuVX/5DNeeHiraRPj+rbkwp7QNeqOmzv3tQ1UeSJDVRs1ZPBJUwUU+fkbHMce+zYcMG2trGDkbU\nrheXJOlQ1dvbS29v75hzw8PDWddOdWgYpPLLfRFjRwoWAvdX9VlYfVFEvAk4qmgb6bOo5t4j19SO\nQIyxceNGOjo6Jly4JEmHgv39Q7q/v5/Ozs6DXjuljydSSk9Q+YW/duRcRMynMlfhnuLUvcCCYmLj\niLVUwsZ9VX3OKsLEiHOAx1JKeXFIkiRNqcns03BERJwSEb9enHprcXxMcfxl4JMR8T8j4iTg68DT\nwD8BpJS2U5nU+PcRcVpEvB3YBPQWKyegsiRzL3BtRKyMiPcBfw58cZI/pyRJqtNkHk/8BvA9KnML\nEj//RX49cElK6YqImEdl34UFwA+A86r2aAC4ELiSyqqJN4CbqSzVBCorLiJiXdHn34HngctTStdM\nol5JkjQFJrNPw50cZIQipXQ5cPk47f9JsZHTOH0eAt4x0fokSVJj+MIqSZKUxdAgSZKyGBokSVIW\nQ4MkScpiaJAkSVkMDZIkKYuhQZIkZTE0SJKkLIYGSZKUxdAgSZKyGBokSVIWQ4MkScpiaJAkSVkM\nDZIkKYuhQZIkZTE0SJKkLIYGSZKUxdAgSZKyGBokSVIWQ4MkScpiaJAkSVkMDZIkKYuhQZIkZTE0\nSJKkLIYGSZKUxdAgSZKyGBokSVIWQ4MkScpiaJAkSVkMDZIkKYuhQZIkZTE0SJKkLIYGSZKUxdAg\nSZKyGBokSVIWQ4MkScpiaJAkSVkMDZIkKYuhQZIkZTE0SJKkLIYGSZKUxdAgSZKyGBokSVIWQ4Mk\nScpiaJAkSVkMDZIkKYuhQZIkZTE0SJKkLIYGSZKUxdAgSZKyGBokSVKWKQ8NEXFZRLxR83m0qn1O\nRHwlIp6PiJci4uaIWFhzj2Mi4taIeDkiBiPiiogw4EiSVKLDGnTfh4G1QBTHr1e1fRk4D3g3sAv4\nCvBtYA1AEQ6+CzwLnAH8EvANYC/wyQbVK0mSDqJRoeH1lNJztScjYj5wCfD7KaU7i3N/CAxExKqU\n0n3AOmA58FsppeeBhyLir4DPRsTlKaXXa+8rqXFeeeUVtm/f3pB79/f3T+q65cuXM2/evCmuRtLB\nNCo0nBARzwB7gHuBj6eUngI6i7/zjpGOKaXHIuJJ4EzgPiqjCw8VgWHEVuD/ACcCDzaoZkn7sX37\ndjo7Oxty78ned9u2bXR0dExxNZIOphGh4YfAxcBjwGLgcuCuiPg1oB3Ym1LaVXPNUNFG8T20n/aR\nNkOD1ETLly9n27Ztdd+ns7NzSu4DlZokNd+Uh4aU0taqw4cj4j5gJ/BeKiMP+xNAyrn9wTps2LCB\ntra2Mee6u7vp7u7OuL2kWvPmzZuyf9U7OiCVr7e3l97e3jHnhoeHs65t1OOJUSml4Yh4HDgeuB14\nc0TMrxltWMjPRxMGgdNqbrOo+K4dgfgFGzdu9H9MkiQdwP7+Id3f35/1uLDhyxgj4r8Bv0plNcQ2\nKisp1la1LwOOBe4pTt0LnBQRR1fd5hxgGHgUSS3npz8d+y2pNTVin4bPR8RZEXFcRKwGvkMlKPxD\nMbpwDfCliDg7IjqBrwH/mlL6t+IWt1EJB9+IiJMjYh3waeDKlNJrU12vpMarhIVkaJBaXCMeT/wy\n8E3gLcBzwN3AGSml/yjaNwD7gJuBOcAW4MMjF6eU3oiIC6islrgHeBm4DrisAbVKkqRMjZgIOe6M\nw5TSq0BP8TlQn6eAC6a4NEmSVAe3ZpYkSVkMDZIkKYuhQZIkZTE0SJKkLIYGSQ03dy6sXFn5ltS6\nGr4jpCStXAmPPFJ2FZLq5UiDJEnKYmiQJElZDA2SJCmLoUGSJGUxNEiSpCyGBkmSlMXQIEmSshga\nJDXco4/CiSdWviW1LkODpIbbs6cSGPbsKbsSSfUwNEiSpCyGBkmSlMXQIEmSshgaJElSFkODJEnK\n4quxpRnsxz+Gl14quwoYGBj7XbYjj4QTTii7Cqn1GBqkGerHP4Zly8quYqz168uu4Ocef9zgIE2U\noUGaoUZGGG64AVasKLeW6WRgoBJepsMIjNRqDA3SDLdiBXR0lF2FpJnAiZCSJCmLoUGSJGUxNEiS\npCyGBkmSlMXQIEmSshgaJElSFkODJEnKYmiQJElZDA2SJCmLoUGSJGVxG2lphordr3Aq2zl8mrxZ\ncro4fABOBWL3cmBe2eVILcXQIM1Qc3dsp59OmEZvlpwOVgD9wMCObfB2X8ohTYShQZqh9ixdTgfb\nuNG3XI4xMADvXw/XLF1edilSyzE0SDNUOnwe99PB7hWA/6AetRu4H0iHl12J1HqcCClJkrI40iDN\nUK+8Uvnu7y+3julmwImh0qQZGqQZavv2yvcf/3G5dUxXRx5ZdgVS6zE0SDPUu95V+V6+HOaVvLJw\nYADWr4cbpsmkzCOPhBNOKLsKqfUYGqQZ6uij4Y/+qOwqxlqxAjqclCm1LCdCSpKkLIYGSZKUxdAg\nSZKyGBokSVIWQ4MkScpiaJDUcHPnwsqVlW9Jrcsll5IabuVKeOSRsquQVC9HGiQ1RW9vb9klSKrT\ntA4NEfHhiHgiInZHxA8j4rSya5I0OYYGqfVN29AQEe8DvghcBpwKPAhsjYijSy1MkqRD1LQNDcAG\n4Ksppa+nlLYDHwJeAS4ptyxJkg5N0zI0RMRsoBO4Y+RcSikBtwNnllWXJEmHsum6euJo4E3AUM35\nIeBtB7hmLsDAwEADy5IOPbt372bHjh113+fpp5/mxhtvrL8gYOnSpRx++OFTci9JY353jrswerqG\nhgMJIB2gbSnA+vXrm1aMpInxv09p2lsK3HOgxukaGp4H9gGLas4v5BdHH0ZsBd4P7AD2NKwySZJm\nnrlUAsPW8TpFZarA9BMRPwR+lFL6SHEcwJPA36aUPl9qcZIkHYKm60gDwJeA6yNiG3AfldUU84Dr\nyixKkqRD1bQNDSmlbxV7MnyKymOKB4B1KaXnyq1MkqRD07R9PCFJkqaXablPgyRJmn4MDZIkKYuh\nQVLDRMSaiNgcEc9ExBsR0VV2TZImz9AgqZGOoDKJ+cMceGM2SS1i2q6ekNT6UkpbgC0wuteKpBbm\nSIMkScpiaJAkSVkMDZIkKYuhQZIkZTE0SJKkLK6ekNQwEXEEcDwwsnLirRFxCvBCSump8iqTNBm+\ne0JSw0TEO4Dv8Yt7NFyfUrqkhJIk1cHQIEmSsjinQZIkZTE0SJKkLIYGSZKUxdAgSZKyGBokSVIW\nQ4MkScpiaJAkSVkMDZIkKYuhQZIkZTE0SJKkLIYGSZKU5b8AiWdKOuYlSUQAAAAASUVORK5CYII=\n", 96 | "text/plain": [ 97 | "" 98 | ] 99 | }, 100 | "metadata": {}, 101 | "output_type": "display_data" 102 | } 103 | ], 104 | "source": [ 105 | "# Summarize review length\n", 106 | "print(\"Review length: \")\n", 107 | "result = map(len, X)\n", 108 | "print(\"Mean %.2f words (%f)\" % (numpy.mean(result), numpy.std(result)))\n", 109 | "# plot review length\n", 110 | "pyplot.boxplot(result)\n", 111 | "pyplot.show()" 112 | ] 113 | }, 114 | { 115 | "cell_type": "markdown", 116 | "metadata": {}, 117 | "source": [ 118 | "## Word Embeddings\n", 119 | "\n", 120 | "A recent breakthrough in the field of natural language processing is called word embedding.\n", 121 | "\n", 122 | "This is a technique where words are encoded as real-valued vectors in a high dimensional space, where the similarity between words in terms of meaning translates to closeness in the vector space.\n", 123 | "\n", 124 | "Discrete words are mapped to vectors of continuous numbers. This is useful when working with natural language problems with neural networks and deep learning models are we require numbers as input.\n", 125 | "\n", 126 | "Keras provides a convenient way to convert positive integer representations of words into a word embedding by an Embedding layer.\n", 127 | "\n", 128 | "The layer takes arguments that define the mapping including the maximum number of expected words also called the vocabulary size (e.g. the largest integer value that will be seen as an integer). The layer also allows you to specify the dimensionality for each word vector, called the output dimension.\n", 129 | "\n", 130 | "We would like to use a word embedding representation for the IMDB dataset.\n", 131 | "\n", 132 | "Let’s say that we are only interested in the first 5,000 most used words in the dataset. Therefore our vocabulary size will be 5,000. We can choose to use a 32-dimension vector to represent each word. Finally, we may choose to cap the maximum review length at 500 words, truncating reviews longer than that and padding reviews shorter than that with 0 values.\n", 133 | "\n", 134 | "We would load the IMDB dataset as follows:" 135 | ] 136 | }, 137 | { 138 | "cell_type": "raw", 139 | "metadata": { 140 | "collapsed": false 141 | }, 142 | "source": [ 143 | "imdb.load_data(nb_words=500)" 144 | ] 145 | }, 146 | { 147 | "cell_type": "markdown", 148 | "metadata": {}, 149 | "source": [ 150 | "### Simple Multi-Layer Perceptron Model for the IMDB Dataset" 151 | ] 152 | }, 153 | { 154 | "cell_type": "code", 155 | "execution_count": 4, 156 | "metadata": { 157 | "collapsed": true 158 | }, 159 | "outputs": [], 160 | "source": [ 161 | "# MLP for the IMDB problem\n", 162 | "import numpy\n", 163 | "from keras.datasets import imdb\n", 164 | "from keras.models import Sequential\n", 165 | "from keras.layers import Dense\n", 166 | "from keras.layers import Flatten\n", 167 | "from keras.layers.embeddings import Embedding\n", 168 | "from keras.preprocessing import sequence" 169 | ] 170 | }, 171 | { 172 | "cell_type": "code", 173 | "execution_count": 5, 174 | "metadata": { 175 | "collapsed": true 176 | }, 177 | "outputs": [], 178 | "source": [ 179 | "# load the dataset but only keep the top n words\n", 180 | "top_words = 5000\n", 181 | "test_split = 0.33\n", 182 | "(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=top_words)" 183 | ] 184 | }, 185 | { 186 | "cell_type": "code", 187 | "execution_count": 6, 188 | "metadata": { 189 | "collapsed": false 190 | }, 191 | "outputs": [], 192 | "source": [ 193 | "max_words = 500\n", 194 | "X_train = sequence.pad_sequences(X_train, maxlen=max_words)\n", 195 | "X_test = sequence.pad_sequences(X_test, maxlen=max_words)" 196 | ] 197 | }, 198 | { 199 | "cell_type": "code", 200 | "execution_count": 7, 201 | "metadata": { 202 | "collapsed": false 203 | }, 204 | "outputs": [ 205 | { 206 | "data": { 207 | "text/plain": [ 208 | "array([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 209 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 210 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 211 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 212 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 213 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 214 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 215 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 216 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 217 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 218 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 219 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 220 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 221 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 222 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 223 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 224 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 225 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 226 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 227 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 228 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 229 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 230 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 231 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 232 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 233 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 234 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 235 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 236 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 237 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 238 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 239 | " 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n", 240 | " 0, 1, 249, 1323, 7, 61, 113, 10, 10, 13, 1637,\n", 241 | " 14, 20, 56, 33, 2401, 18, 457, 88, 13, 2626, 1400,\n", 242 | " 45, 3171, 13, 70, 79, 49, 706, 919, 13, 16, 355,\n", 243 | " 340, 355, 1696, 96, 143, 4, 22, 32, 289, 7, 61,\n", 244 | " 369, 71, 2359, 5, 13, 16, 131, 2073, 249, 114, 249,\n", 245 | " 229, 249, 20, 13, 28, 126, 110, 13, 473, 8, 569,\n", 246 | " 61, 419, 56, 429, 6, 1513, 18, 35, 534, 95, 474,\n", 247 | " 570, 5, 25, 124, 138, 88, 12, 421, 1543, 52, 725,\n", 248 | " 2, 61, 419, 11, 13, 1571, 15, 1543, 20, 11, 4,\n", 249 | " 2, 5, 296, 12, 3524, 5, 15, 421, 128, 74, 233,\n", 250 | " 334, 207, 126, 224, 12, 562, 298, 2167, 1272, 7, 2601,\n", 251 | " 5, 516, 988, 43, 8, 79, 120, 15, 595, 13, 784,\n", 252 | " 25, 3171, 18, 165, 170, 143, 19, 14, 5, 2, 6,\n", 253 | " 226, 251, 7, 61, 113], dtype=int32)" 254 | ] 255 | }, 256 | "execution_count": 7, 257 | "metadata": {}, 258 | "output_type": "execute_result" 259 | } 260 | ], 261 | "source": [ 262 | "X_train[4,:]" 263 | ] 264 | }, 265 | { 266 | "cell_type": "code", 267 | "execution_count": 8, 268 | "metadata": { 269 | "collapsed": false 270 | }, 271 | "outputs": [ 272 | { 273 | "name": "stdout", 274 | "output_type": "stream", 275 | "text": [ 276 | "____________________________________________________________________________________________________\n", 277 | "Layer (type) Output Shape Param # Connected to \n", 278 | "====================================================================================================\n", 279 | "embedding_1 (Embedding) (None, 500, 32) 160000 embedding_input_1[0][0] \n", 280 | "____________________________________________________________________________________________________\n", 281 | "flatten_1 (Flatten) (None, 16000) 0 embedding_1[0][0] \n", 282 | "____________________________________________________________________________________________________\n", 283 | "dense_1 (Dense) (None, 30) 480030 flatten_1[0][0] \n", 284 | "____________________________________________________________________________________________________\n", 285 | "dense_2 (Dense) (None, 30) 930 dense_1[0][0] \n", 286 | "____________________________________________________________________________________________________\n", 287 | "dense_3 (Dense) (None, 30) 930 dense_2[0][0] \n", 288 | "____________________________________________________________________________________________________\n", 289 | "dense_4 (Dense) (None, 1) 31 dense_3[0][0] \n", 290 | "====================================================================================================\n", 291 | "Total params: 641921\n", 292 | "____________________________________________________________________________________________________\n", 293 | "None\n" 294 | ] 295 | } 296 | ], 297 | "source": [ 298 | "# create the model\n", 299 | "model = Sequential()\n", 300 | "model.add(Embedding(top_words, 32, input_length=max_words))\n", 301 | "#model.add(Dense(256, input_dim=max_words))\n", 302 | "model.add(Flatten())\n", 303 | "model.add(Dense(30, activation='relu'))\n", 304 | "model.add(Dense(30, activation='relu'))\n", 305 | "model.add(Dense(30, activation='relu'))\n", 306 | "model.add(Dense(1, activation='sigmoid'))\n", 307 | "model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n", 308 | "print(model.summary())" 309 | ] 310 | }, 311 | { 312 | "cell_type": "code", 313 | "execution_count": 9, 314 | "metadata": { 315 | "collapsed": false 316 | }, 317 | "outputs": [ 318 | { 319 | "name": "stdout", 320 | "output_type": "stream", 321 | "text": [ 322 | "Train on 25000 samples, validate on 25000 samples\n", 323 | "Epoch 1/4\n", 324 | "25000/25000 [==============================] - 17s - loss: 0.4989 - acc: 0.7193 - val_loss: 0.3300 - val_acc: 0.8574\n", 325 | "Epoch 2/4\n", 326 | "25000/25000 [==============================] - 17s - loss: 0.1990 - acc: 0.9240 - val_loss: 0.3204 - val_acc: 0.8706\n", 327 | "Epoch 3/4\n", 328 | "25000/25000 [==============================] - 13s - loss: 0.0796 - acc: 0.9752 - val_loss: 0.3967 - val_acc: 0.8659\n", 329 | "Epoch 4/4\n", 330 | "25000/25000 [==============================] - 12s - loss: 0.0208 - acc: 0.9954 - val_loss: 0.5222 - val_acc: 0.8627\n", 331 | "Accuracy: 86.27%\n" 332 | ] 333 | } 334 | ], 335 | "source": [ 336 | "# Fit the model\n", 337 | "model.fit(X_train, y_train, validation_data=(X_test, y_test), nb_epoch=4, batch_size=128, verbose=1)\n", 338 | "# Final evaluation of the model\n", 339 | "scores = model.evaluate(X_test, y_test, verbose=0)\n", 340 | "print(\"Accuracy: %.2f%%\" % (scores[1]*100))" 341 | ] 342 | }, 343 | { 344 | "cell_type": "markdown", 345 | "metadata": {}, 346 | "source": [ 347 | "### One-Dimensional Convolutional Neural Network Model for the IMDB Dataset" 348 | ] 349 | }, 350 | { 351 | "cell_type": "code", 352 | "execution_count": 10, 353 | "metadata": { 354 | "collapsed": true 355 | }, 356 | "outputs": [], 357 | "source": [ 358 | "# CNN for the IMDB problem\n", 359 | "import numpy\n", 360 | "from keras.datasets import imdb\n", 361 | "from keras.models import Sequential\n", 362 | "from keras.layers import Dense\n", 363 | "from keras.layers import Flatten\n", 364 | "from keras.layers.convolutional import Convolution1D\n", 365 | "from keras.layers.convolutional import MaxPooling1D\n", 366 | "from keras.layers.embeddings import Embedding\n", 367 | "from keras.preprocessing import sequence\n", 368 | "# fix random seed for reproducibility\n", 369 | "seed = 7\n", 370 | "numpy.random.seed(seed)" 371 | ] 372 | }, 373 | { 374 | "cell_type": "code", 375 | "execution_count": 11, 376 | "metadata": { 377 | "collapsed": true 378 | }, 379 | "outputs": [], 380 | "source": [ 381 | "# load the dataset but only keep the top n words, zero the rest\n", 382 | "top_words = 5000\n", 383 | "test_split = 0.33\n", 384 | "(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=top_words)\n", 385 | "# pad dataset to a maximum review length in words\n", 386 | "max_words = 500\n", 387 | "X_train = sequence.pad_sequences(X_train, maxlen=max_words)\n", 388 | "X_test = sequence.pad_sequences(X_test, maxlen=max_words)" 389 | ] 390 | }, 391 | { 392 | "cell_type": "code", 393 | "execution_count": 12, 394 | "metadata": { 395 | "collapsed": false 396 | }, 397 | "outputs": [ 398 | { 399 | "name": "stdout", 400 | "output_type": "stream", 401 | "text": [ 402 | "____________________________________________________________________________________________________\n", 403 | "Layer (type) Output Shape Param # Connected to \n", 404 | "====================================================================================================\n", 405 | "embedding_2 (Embedding) (None, 500, 32) 160000 embedding_input_2[0][0] \n", 406 | "____________________________________________________________________________________________________\n", 407 | "convolution1d_1 (Convolution1D) (None, 500, 32) 3104 embedding_2[0][0] \n", 408 | "____________________________________________________________________________________________________\n", 409 | "maxpooling1d_1 (MaxPooling1D) (None, 250, 32) 0 convolution1d_1[0][0] \n", 410 | "____________________________________________________________________________________________________\n", 411 | "flatten_2 (Flatten) (None, 8000) 0 maxpooling1d_1[0][0] \n", 412 | "____________________________________________________________________________________________________\n", 413 | "dense_5 (Dense) (None, 250) 2000250 flatten_2[0][0] \n", 414 | "____________________________________________________________________________________________________\n", 415 | "dense_6 (Dense) (None, 1) 251 dense_5[0][0] \n", 416 | "====================================================================================================\n", 417 | "Total params: 2163605\n", 418 | "____________________________________________________________________________________________________\n", 419 | "None\n" 420 | ] 421 | } 422 | ], 423 | "source": [ 424 | "# create the model\n", 425 | "model = Sequential()\n", 426 | "model.add(Embedding(top_words, 32, input_length=max_words))\n", 427 | "model.add(Convolution1D(nb_filter=32, filter_length=3, border_mode='same', activation='relu'))\n", 428 | "model.add(MaxPooling1D(pool_length=2))\n", 429 | "model.add(Flatten())\n", 430 | "model.add(Dense(250, activation='relu'))\n", 431 | "model.add(Dense(1, activation='sigmoid'))\n", 432 | "model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n", 433 | "print(model.summary())" 434 | ] 435 | }, 436 | { 437 | "cell_type": "code", 438 | "execution_count": 13, 439 | "metadata": { 440 | "collapsed": false 441 | }, 442 | "outputs": [ 443 | { 444 | "name": "stdout", 445 | "output_type": "stream", 446 | "text": [ 447 | "Train on 25000 samples, validate on 25000 samples\n", 448 | "Epoch 1/2\n", 449 | "25000/25000 [==============================] - 226s - loss: 0.4457 - acc: 0.7574 - val_loss: 0.3042 - val_acc: 0.8727\n", 450 | "Epoch 2/2\n", 451 | "25000/25000 [==============================] - 251s - loss: 0.2287 - acc: 0.9108 - val_loss: 0.2828 - val_acc: 0.8828\n", 452 | "Accuracy: 88.28%\n" 453 | ] 454 | } 455 | ], 456 | "source": [ 457 | "# Fit the model\n", 458 | "model.fit(X_train, y_train, validation_data=(X_test, y_test), nb_epoch=2, batch_size=128, verbose=1)\n", 459 | "# Final evaluation of the model\n", 460 | "scores = model.evaluate(X_test, y_test, verbose=0)\n", 461 | "print(\"Accuracy: %.2f%%\" % (scores[1]*100))" 462 | ] 463 | }, 464 | { 465 | "cell_type": "code", 466 | "execution_count": null, 467 | "metadata": { 468 | "collapsed": false 469 | }, 470 | "outputs": [], 471 | "source": [] 472 | }, 473 | { 474 | "cell_type": "code", 475 | "execution_count": null, 476 | "metadata": { 477 | "collapsed": true 478 | }, 479 | "outputs": [], 480 | "source": [] 481 | } 482 | ], 483 | "metadata": { 484 | "kernelspec": { 485 | "display_name": "Python 2", 486 | "language": "python", 487 | "name": "python2" 488 | }, 489 | "language_info": { 490 | "codemirror_mode": { 491 | "name": "ipython", 492 | "version": 2 493 | }, 494 | "file_extension": ".py", 495 | "mimetype": "text/x-python", 496 | "name": "python", 497 | "nbconvert_exporter": "python", 498 | "pygments_lexer": "ipython2", 499 | "version": "2.7.6" 500 | } 501 | }, 502 | "nbformat": 4, 503 | "nbformat_minor": 1 504 | } 505 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM gcr.io/tensorflow/tensorflow 2 | #Install packages 3 | RUN DEBIAN_FRONTEND=noninteractive apt-get update 4 | RUN DEBIAN_FRONTEND=noninteractive apt-get -qqy install wget python-pip git timidity unzip 5 | RUN DEBIAN_FRONTEND=noninteractive pip install --upgrade pip 6 | RUN DEBIAN_FRONTEND=noninteractive pip install tqdm pandas seaborn bokeh sklearn keras h5py scikit-image 7 | RUN DEBIAN_FRONTEND=noninteractive pip install git+https://github.com/tflearn/tflearn.git 8 | 9 | RUN pip install --upgrade numpy 10 | RUN pip install --upgrade scikit-image 11 | 12 | #Remove examples 13 | RUN rm -Rf * -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | The MIT License 2 | 3 | Copyright (c) 2016 Jordi Vitria, Santi Segui, Oriol Pujol. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Deep Learning from Scratch (v3.0) 2 | 3 | This course is organized by the DataScienceGroup@UB (http://datascience.barcelona/) 4 | 5 | Deep learning is one of the fastest growing areas of machine learning and a hot topic in both academia and industry. 6 | This course will cover the basics of deep learning by using a hands-on approach. 7 | 8 | ## Course Agenda 9 | ### Day 1 10 |
  • Introduction to Deep Learning and its applications. Using the Jupyter notebook & Docker. 11 |
  • Basic Concepts: Score & Loss functions, Optimization (SGD), Linear Regression. 12 |
  • Automated differentiation, Backpropagation, Training a Neural Netwotk from Scratch. 13 |
  • Tensorflow programming model. Keras. 14 | 15 | ### Day2 16 |
  • Recap Exercise 17 |
  • Convolutions & CNN models. 18 |
  • Recurrent Neural Netwoks. 19 |
  • Unsupervised Learning. 20 |
  • Advanced Applications. 21 | -------------------------------------------------------------------------------- /Test if everything is up .ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "collapsed": true 7 | }, 8 | "source": [ 9 | "# Test if everything is up\n", 10 | "\n" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 4, 16 | "metadata": { 17 | "collapsed": false 18 | }, 19 | "outputs": [ 20 | { 21 | "name": "stdout", 22 | "output_type": "stream", 23 | "text": [ 24 | "Python 2.7.12 :: Anaconda custom (x86_64)\r\n" 25 | ] 26 | } 27 | ], 28 | "source": [ 29 | "!python --version" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": 5, 35 | "metadata": { 36 | "collapsed": false 37 | }, 38 | "outputs": [ 39 | { 40 | "name": "stdout", 41 | "output_type": "stream", 42 | "text": [ 43 | "('numpy:', '1.11.2')\n", 44 | "('scipy:', '0.18.1')\n", 45 | "('matplotlib:', '1.5.3')\n", 46 | "('iPython:', '5.1.0')\n", 47 | "('pandas:', u'0.18.1')\n", 48 | "('scikit-learn:', '0.17.1')\n", 49 | "('skimage:', '0.12.3')\n", 50 | "('tensorflow:', '0.11.0rc1')\n", 51 | "('keras:', '1.0.4')\n" 52 | ] 53 | } 54 | ], 55 | "source": [ 56 | "import numpy\n", 57 | "print('numpy:', numpy.__version__)\n", 58 | "\n", 59 | "import scipy\n", 60 | "print('scipy:', scipy.__version__)\n", 61 | "\n", 62 | "import matplotlib\n", 63 | "print('matplotlib:', matplotlib.__version__)\n", 64 | "\n", 65 | "import IPython\n", 66 | "print('iPython:', IPython.__version__)\n", 67 | "\n", 68 | "import pandas\n", 69 | "print('pandas:', pandas.__version__)\n", 70 | "\n", 71 | "import sklearn\n", 72 | "print('scikit-learn:', sklearn.__version__)\n", 73 | "\n", 74 | "import skimage\n", 75 | "print('skimage:', skimage.__version__)\n", 76 | "\n", 77 | "import tensorflow\n", 78 | "print('tensorflow:', tensorflow.__version__)\n", 79 | "\n", 80 | "import keras\n", 81 | "print('keras:', keras.__version__)" 82 | ] 83 | } 84 | ], 85 | "metadata": { 86 | "anaconda-cloud": {}, 87 | "kernelspec": { 88 | "display_name": "Python [default]", 89 | "language": "python", 90 | "name": "python2" 91 | }, 92 | "language_info": { 93 | "codemirror_mode": { 94 | "name": "ipython", 95 | "version": 2 96 | }, 97 | "file_extension": ".py", 98 | "mimetype": "text/x-python", 99 | "name": "python", 100 | "nbconvert_exporter": "python", 101 | "pygments_lexer": "ipython2", 102 | "version": "2.7.12" 103 | } 104 | }, 105 | "nbformat": 4, 106 | "nbformat_minor": 0 107 | } 108 | -------------------------------------------------------------------------------- /dataset/Advertising.csv: -------------------------------------------------------------------------------- 1 | "","TV","Radio","Newspaper","Sales" 2 | "1",230.1,37.8,69.2,22.1 3 | "2",44.5,39.3,45.1,10.4 4 | "3",17.2,45.9,69.3,9.3 5 | "4",151.5,41.3,58.5,18.5 6 | "5",180.8,10.8,58.4,12.9 7 | "6",8.7,48.9,75,7.2 8 | "7",57.5,32.8,23.5,11.8 9 | "8",120.2,19.6,11.6,13.2 10 | "9",8.6,2.1,1,4.8 11 | "10",199.8,2.6,21.2,10.6 12 | "11",66.1,5.8,24.2,8.6 13 | "12",214.7,24,4,17.4 14 | "13",23.8,35.1,65.9,9.2 15 | "14",97.5,7.6,7.2,9.7 16 | "15",204.1,32.9,46,19 17 | "16",195.4,47.7,52.9,22.4 18 | "17",67.8,36.6,114,12.5 19 | "18",281.4,39.6,55.8,24.4 20 | "19",69.2,20.5,18.3,11.3 21 | "20",147.3,23.9,19.1,14.6 22 | "21",218.4,27.7,53.4,18 23 | "22",237.4,5.1,23.5,12.5 24 | "23",13.2,15.9,49.6,5.6 25 | "24",228.3,16.9,26.2,15.5 26 | "25",62.3,12.6,18.3,9.7 27 | "26",262.9,3.5,19.5,12 28 | "27",142.9,29.3,12.6,15 29 | "28",240.1,16.7,22.9,15.9 30 | "29",248.8,27.1,22.9,18.9 31 | "30",70.6,16,40.8,10.5 32 | "31",292.9,28.3,43.2,21.4 33 | "32",112.9,17.4,38.6,11.9 34 | "33",97.2,1.5,30,9.6 35 | "34",265.6,20,0.3,17.4 36 | "35",95.7,1.4,7.4,9.5 37 | "36",290.7,4.1,8.5,12.8 38 | "37",266.9,43.8,5,25.4 39 | "38",74.7,49.4,45.7,14.7 40 | "39",43.1,26.7,35.1,10.1 41 | "40",228,37.7,32,21.5 42 | "41",202.5,22.3,31.6,16.6 43 | "42",177,33.4,38.7,17.1 44 | "43",293.6,27.7,1.8,20.7 45 | "44",206.9,8.4,26.4,12.9 46 | "45",25.1,25.7,43.3,8.5 47 | "46",175.1,22.5,31.5,14.9 48 | "47",89.7,9.9,35.7,10.6 49 | "48",239.9,41.5,18.5,23.2 50 | "49",227.2,15.8,49.9,14.8 51 | "50",66.9,11.7,36.8,9.7 52 | "51",199.8,3.1,34.6,11.4 53 | "52",100.4,9.6,3.6,10.7 54 | "53",216.4,41.7,39.6,22.6 55 | "54",182.6,46.2,58.7,21.2 56 | "55",262.7,28.8,15.9,20.2 57 | "56",198.9,49.4,60,23.7 58 | "57",7.3,28.1,41.4,5.5 59 | "58",136.2,19.2,16.6,13.2 60 | "59",210.8,49.6,37.7,23.8 61 | "60",210.7,29.5,9.3,18.4 62 | "61",53.5,2,21.4,8.1 63 | "62",261.3,42.7,54.7,24.2 64 | "63",239.3,15.5,27.3,15.7 65 | "64",102.7,29.6,8.4,14 66 | "65",131.1,42.8,28.9,18 67 | "66",69,9.3,0.9,9.3 68 | "67",31.5,24.6,2.2,9.5 69 | "68",139.3,14.5,10.2,13.4 70 | "69",237.4,27.5,11,18.9 71 | "70",216.8,43.9,27.2,22.3 72 | "71",199.1,30.6,38.7,18.3 73 | "72",109.8,14.3,31.7,12.4 74 | "73",26.8,33,19.3,8.8 75 | "74",129.4,5.7,31.3,11 76 | "75",213.4,24.6,13.1,17 77 | "76",16.9,43.7,89.4,8.7 78 | "77",27.5,1.6,20.7,6.9 79 | "78",120.5,28.5,14.2,14.2 80 | "79",5.4,29.9,9.4,5.3 81 | "80",116,7.7,23.1,11 82 | "81",76.4,26.7,22.3,11.8 83 | "82",239.8,4.1,36.9,12.3 84 | "83",75.3,20.3,32.5,11.3 85 | "84",68.4,44.5,35.6,13.6 86 | "85",213.5,43,33.8,21.7 87 | "86",193.2,18.4,65.7,15.2 88 | "87",76.3,27.5,16,12 89 | "88",110.7,40.6,63.2,16 90 | "89",88.3,25.5,73.4,12.9 91 | "90",109.8,47.8,51.4,16.7 92 | "91",134.3,4.9,9.3,11.2 93 | "92",28.6,1.5,33,7.3 94 | "93",217.7,33.5,59,19.4 95 | "94",250.9,36.5,72.3,22.2 96 | "95",107.4,14,10.9,11.5 97 | "96",163.3,31.6,52.9,16.9 98 | "97",197.6,3.5,5.9,11.7 99 | "98",184.9,21,22,15.5 100 | "99",289.7,42.3,51.2,25.4 101 | "100",135.2,41.7,45.9,17.2 102 | "101",222.4,4.3,49.8,11.7 103 | "102",296.4,36.3,100.9,23.8 104 | "103",280.2,10.1,21.4,14.8 105 | "104",187.9,17.2,17.9,14.7 106 | "105",238.2,34.3,5.3,20.7 107 | "106",137.9,46.4,59,19.2 108 | "107",25,11,29.7,7.2 109 | "108",90.4,0.3,23.2,8.7 110 | "109",13.1,0.4,25.6,5.3 111 | "110",255.4,26.9,5.5,19.8 112 | "111",225.8,8.2,56.5,13.4 113 | "112",241.7,38,23.2,21.8 114 | "113",175.7,15.4,2.4,14.1 115 | "114",209.6,20.6,10.7,15.9 116 | "115",78.2,46.8,34.5,14.6 117 | "116",75.1,35,52.7,12.6 118 | "117",139.2,14.3,25.6,12.2 119 | "118",76.4,0.8,14.8,9.4 120 | "119",125.7,36.9,79.2,15.9 121 | "120",19.4,16,22.3,6.6 122 | "121",141.3,26.8,46.2,15.5 123 | "122",18.8,21.7,50.4,7 124 | "123",224,2.4,15.6,11.6 125 | "124",123.1,34.6,12.4,15.2 126 | "125",229.5,32.3,74.2,19.7 127 | "126",87.2,11.8,25.9,10.6 128 | "127",7.8,38.9,50.6,6.6 129 | "128",80.2,0,9.2,8.8 130 | "129",220.3,49,3.2,24.7 131 | "130",59.6,12,43.1,9.7 132 | "131",0.7,39.6,8.7,1.6 133 | "132",265.2,2.9,43,12.7 134 | "133",8.4,27.2,2.1,5.7 135 | "134",219.8,33.5,45.1,19.6 136 | "135",36.9,38.6,65.6,10.8 137 | "136",48.3,47,8.5,11.6 138 | "137",25.6,39,9.3,9.5 139 | "138",273.7,28.9,59.7,20.8 140 | "139",43,25.9,20.5,9.6 141 | "140",184.9,43.9,1.7,20.7 142 | "141",73.4,17,12.9,10.9 143 | "142",193.7,35.4,75.6,19.2 144 | "143",220.5,33.2,37.9,20.1 145 | "144",104.6,5.7,34.4,10.4 146 | "145",96.2,14.8,38.9,11.4 147 | "146",140.3,1.9,9,10.3 148 | "147",240.1,7.3,8.7,13.2 149 | "148",243.2,49,44.3,25.4 150 | "149",38,40.3,11.9,10.9 151 | "150",44.7,25.8,20.6,10.1 152 | "151",280.7,13.9,37,16.1 153 | "152",121,8.4,48.7,11.6 154 | "153",197.6,23.3,14.2,16.6 155 | "154",171.3,39.7,37.7,19 156 | "155",187.8,21.1,9.5,15.6 157 | "156",4.1,11.6,5.7,3.2 158 | "157",93.9,43.5,50.5,15.3 159 | "158",149.8,1.3,24.3,10.1 160 | "159",11.7,36.9,45.2,7.3 161 | "160",131.7,18.4,34.6,12.9 162 | "161",172.5,18.1,30.7,14.4 163 | "162",85.7,35.8,49.3,13.3 164 | "163",188.4,18.1,25.6,14.9 165 | "164",163.5,36.8,7.4,18 166 | "165",117.2,14.7,5.4,11.9 167 | "166",234.5,3.4,84.8,11.9 168 | "167",17.9,37.6,21.6,8 169 | "168",206.8,5.2,19.4,12.2 170 | "169",215.4,23.6,57.6,17.1 171 | "170",284.3,10.6,6.4,15 172 | "171",50,11.6,18.4,8.4 173 | "172",164.5,20.9,47.4,14.5 174 | "173",19.6,20.1,17,7.6 175 | "174",168.4,7.1,12.8,11.7 176 | "175",222.4,3.4,13.1,11.5 177 | "176",276.9,48.9,41.8,27 178 | "177",248.4,30.2,20.3,20.2 179 | "178",170.2,7.8,35.2,11.7 180 | "179",276.7,2.3,23.7,11.8 181 | "180",165.6,10,17.6,12.6 182 | "181",156.6,2.6,8.3,10.5 183 | "182",218.5,5.4,27.4,12.2 184 | "183",56.2,5.7,29.7,8.7 185 | "184",287.6,43,71.8,26.2 186 | "185",253.8,21.3,30,17.6 187 | "186",205,45.1,19.6,22.6 188 | "187",139.5,2.1,26.6,10.3 189 | "188",191.1,28.7,18.2,17.3 190 | "189",286,13.9,3.7,15.9 191 | "190",18.7,12.1,23.4,6.7 192 | "191",39.5,41.1,5.8,10.8 193 | "192",75.5,10.8,6,9.9 194 | "193",17.2,4.1,31.6,5.9 195 | "194",166.8,42,3.6,19.6 196 | "195",149.7,35.6,6,17.3 197 | "196",38.2,3.7,13.8,7.6 198 | "197",94.2,4.9,8.1,9.7 199 | "198",177,9.3,6.4,12.8 200 | "199",283.6,42,66.2,25.5 201 | "200",232.1,8.6,8.7,13.4 202 | -------------------------------------------------------------------------------- /dataset/wiki106.txt.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/dataset/wiki106.txt.zip -------------------------------------------------------------------------------- /dataset/wordVectors.txt.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/dataset/wordVectors.txt.zip -------------------------------------------------------------------------------- /files/household_power_consumption.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/files/household_power_consumption.zip -------------------------------------------------------------------------------- /files/international-airline-passengers.csv: -------------------------------------------------------------------------------- 1 | "Month","International airline passengers: monthly totals in thousands. Jan 49 ? Dec 60" 2 | "1949-01",112 3 | "1949-02",118 4 | "1949-03",132 5 | "1949-04",129 6 | "1949-05",121 7 | "1949-06",135 8 | "1949-07",148 9 | "1949-08",148 10 | "1949-09",136 11 | "1949-10",119 12 | "1949-11",104 13 | "1949-12",118 14 | "1950-01",115 15 | "1950-02",126 16 | "1950-03",141 17 | "1950-04",135 18 | "1950-05",125 19 | "1950-06",149 20 | "1950-07",170 21 | "1950-08",170 22 | "1950-09",158 23 | "1950-10",133 24 | "1950-11",114 25 | "1950-12",140 26 | "1951-01",145 27 | "1951-02",150 28 | "1951-03",178 29 | "1951-04",163 30 | "1951-05",172 31 | "1951-06",178 32 | "1951-07",199 33 | "1951-08",199 34 | "1951-09",184 35 | "1951-10",162 36 | "1951-11",146 37 | "1951-12",166 38 | "1952-01",171 39 | "1952-02",180 40 | "1952-03",193 41 | "1952-04",181 42 | "1952-05",183 43 | "1952-06",218 44 | "1952-07",230 45 | "1952-08",242 46 | "1952-09",209 47 | "1952-10",191 48 | "1952-11",172 49 | "1952-12",194 50 | "1953-01",196 51 | "1953-02",196 52 | "1953-03",236 53 | "1953-04",235 54 | "1953-05",229 55 | "1953-06",243 56 | "1953-07",264 57 | "1953-08",272 58 | "1953-09",237 59 | "1953-10",211 60 | "1953-11",180 61 | "1953-12",201 62 | "1954-01",204 63 | "1954-02",188 64 | "1954-03",235 65 | "1954-04",227 66 | "1954-05",234 67 | "1954-06",264 68 | "1954-07",302 69 | "1954-08",293 70 | "1954-09",259 71 | "1954-10",229 72 | "1954-11",203 73 | "1954-12",229 74 | "1955-01",242 75 | "1955-02",233 76 | "1955-03",267 77 | "1955-04",269 78 | "1955-05",270 79 | "1955-06",315 80 | "1955-07",364 81 | "1955-08",347 82 | "1955-09",312 83 | "1955-10",274 84 | "1955-11",237 85 | "1955-12",278 86 | "1956-01",284 87 | "1956-02",277 88 | "1956-03",317 89 | "1956-04",313 90 | "1956-05",318 91 | "1956-06",374 92 | "1956-07",413 93 | "1956-08",405 94 | "1956-09",355 95 | "1956-10",306 96 | "1956-11",271 97 | "1956-12",306 98 | "1957-01",315 99 | "1957-02",301 100 | "1957-03",356 101 | "1957-04",348 102 | "1957-05",355 103 | "1957-06",422 104 | "1957-07",465 105 | "1957-08",467 106 | "1957-09",404 107 | "1957-10",347 108 | "1957-11",305 109 | "1957-12",336 110 | "1958-01",340 111 | "1958-02",318 112 | "1958-03",362 113 | "1958-04",348 114 | "1958-05",363 115 | "1958-06",435 116 | "1958-07",491 117 | "1958-08",505 118 | "1958-09",404 119 | "1958-10",359 120 | "1958-11",310 121 | "1958-12",337 122 | "1959-01",360 123 | "1959-02",342 124 | "1959-03",406 125 | "1959-04",396 126 | "1959-05",420 127 | "1959-06",472 128 | "1959-07",548 129 | "1959-08",559 130 | "1959-09",463 131 | "1959-10",407 132 | "1959-11",362 133 | "1959-12",405 134 | "1960-01",417 135 | "1960-02",391 136 | "1960-03",419 137 | "1960-04",461 138 | "1960-05",472 139 | "1960-06",535 140 | "1960-07",622 141 | "1960-08",606 142 | "1960-09",508 143 | "1960-10",461 144 | "1960-11",390 145 | "1960-12",432 146 | 147 | International airline passengers: monthly totals in thousands. Jan 49 ? Dec 60 148 | 149 | -------------------------------------------------------------------------------- /files/t10k-images-idx3-ubyte.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/files/t10k-images-idx3-ubyte.gz -------------------------------------------------------------------------------- /files/t10k-labels-idx1-ubyte.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/files/t10k-labels-idx1-ubyte.gz -------------------------------------------------------------------------------- /files/train-images-idx3-ubyte.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/files/train-images-idx3-ubyte.gz -------------------------------------------------------------------------------- /files/train-labels-idx1-ubyte.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/files/train-labels-idx1-ubyte.gz -------------------------------------------------------------------------------- /images/README.md: -------------------------------------------------------------------------------- 1 | # Deep Learning from Scratch 2 | This course is organized by the Data Science Group @ UB 3 | -------------------------------------------------------------------------------- /images/TanhReal.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/images/TanhReal.gif -------------------------------------------------------------------------------- /images/comp_graph1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/images/comp_graph1.jpg -------------------------------------------------------------------------------- /images/exploding.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/images/exploding.png -------------------------------------------------------------------------------- /images/g1.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/images/g1.gif -------------------------------------------------------------------------------- /images/g2.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/images/g2.gif -------------------------------------------------------------------------------- /images/gru.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/images/gru.png -------------------------------------------------------------------------------- /images/kar.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/images/kar.png -------------------------------------------------------------------------------- /images/loss_functions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/images/loss_functions.png -------------------------------------------------------------------------------- /images/lstm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/images/lstm.png -------------------------------------------------------------------------------- /images/minibatch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/images/minibatch.png -------------------------------------------------------------------------------- /images/pipeline1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/images/pipeline1.png -------------------------------------------------------------------------------- /images/pipeline2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/images/pipeline2.png -------------------------------------------------------------------------------- /images/ridge2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/images/ridge2.png -------------------------------------------------------------------------------- /images/seq2seq.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/images/seq2seq.png -------------------------------------------------------------------------------- /images/split.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/images/split.png -------------------------------------------------------------------------------- /images/steeper.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/images/steeper.png -------------------------------------------------------------------------------- /images/t9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/images/t9.png -------------------------------------------------------------------------------- /images/tf-gru.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/images/tf-gru.png -------------------------------------------------------------------------------- /images/tf-lstm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/images/tf-lstm.png -------------------------------------------------------------------------------- /images/unrolling.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/images/unrolling.png -------------------------------------------------------------------------------- /images/vanilla.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/images/vanilla.png -------------------------------------------------------------------------------- /input_data.py: -------------------------------------------------------------------------------- 1 | """Functions for downloading and reading MNIST data.""" 2 | from __future__ import print_function 3 | import gzip 4 | import os 5 | import urllib 6 | import numpy 7 | SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/' 8 | def maybe_download(filename, work_directory): 9 | """Download the data from Yann's website, unless it's already here.""" 10 | if not os.path.exists(work_directory): 11 | os.mkdir(work_directory) 12 | filepath = os.path.join(work_directory, filename) 13 | if not os.path.exists(filepath): 14 | filepath, _ = urllib.urlretrieve(SOURCE_URL + filename, filepath) 15 | statinfo = os.stat(filepath) 16 | print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.') 17 | return filepath 18 | def _read32(bytestream): 19 | dt = numpy.dtype(numpy.uint32).newbyteorder('>') 20 | return numpy.frombuffer(bytestream.read(4), dtype=dt) 21 | def extract_images(filename): 22 | """Extract the images into a 4D uint8 numpy array [index, y, x, depth].""" 23 | print('Extracting', filename) 24 | with gzip.open(filename) as bytestream: 25 | magic = _read32(bytestream) 26 | if magic != 2051: 27 | raise ValueError( 28 | 'Invalid magic number %d in MNIST image file: %s' % 29 | (magic, filename)) 30 | num_images = _read32(bytestream) 31 | rows = _read32(bytestream) 32 | cols = _read32(bytestream) 33 | buf = bytestream.read(rows * cols * num_images) 34 | data = numpy.frombuffer(buf, dtype=numpy.uint8) 35 | data = data.reshape(num_images, rows, cols, 1) 36 | return data 37 | def dense_to_one_hot(labels_dense, num_classes=10): 38 | """Convert class labels from scalars to one-hot vectors.""" 39 | num_labels = labels_dense.shape[0] 40 | index_offset = numpy.arange(num_labels) * num_classes 41 | labels_one_hot = numpy.zeros((num_labels, num_classes)) 42 | labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1 43 | return labels_one_hot 44 | def extract_labels(filename, one_hot=False): 45 | """Extract the labels into a 1D uint8 numpy array [index].""" 46 | print('Extracting', filename) 47 | with gzip.open(filename) as bytestream: 48 | magic = _read32(bytestream) 49 | if magic != 2049: 50 | raise ValueError( 51 | 'Invalid magic number %d in MNIST label file: %s' % 52 | (magic, filename)) 53 | num_items = _read32(bytestream) 54 | buf = bytestream.read(num_items) 55 | labels = numpy.frombuffer(buf, dtype=numpy.uint8) 56 | if one_hot: 57 | return dense_to_one_hot(labels) 58 | return labels 59 | class DataSet(object): 60 | def __init__(self, images, labels, fake_data=False): 61 | if fake_data: 62 | self._num_examples = 10000 63 | else: 64 | assert images.shape[0] == labels.shape[0], ( 65 | "images.shape: %s labels.shape: %s" % (images.shape, 66 | labels.shape)) 67 | self._num_examples = images.shape[0] 68 | # Convert shape from [num examples, rows, columns, depth] 69 | # to [num examples, rows*columns] (assuming depth == 1) 70 | assert images.shape[3] == 1 71 | images = images.reshape(images.shape[0], 72 | images.shape[1] * images.shape[2]) 73 | # Convert from [0, 255] -> [0.0, 1.0]. 74 | images = images.astype(numpy.float32) 75 | images = numpy.multiply(images, 1.0 / 255.0) 76 | self._images = images 77 | self._labels = labels 78 | self._epochs_completed = 0 79 | self._index_in_epoch = 0 80 | @property 81 | def images(self): 82 | return self._images 83 | @property 84 | def labels(self): 85 | return self._labels 86 | @property 87 | def num_examples(self): 88 | return self._num_examples 89 | @property 90 | def epochs_completed(self): 91 | return self._epochs_completed 92 | def next_batch(self, batch_size, fake_data=False): 93 | """Return the next `batch_size` examples from this data set.""" 94 | if fake_data: 95 | fake_image = [1.0 for _ in xrange(784)] 96 | fake_label = 0 97 | return [fake_image for _ in xrange(batch_size)], [ 98 | fake_label for _ in xrange(batch_size)] 99 | start = self._index_in_epoch 100 | self._index_in_epoch += batch_size 101 | if self._index_in_epoch > self._num_examples: 102 | # Finished epoch 103 | self._epochs_completed += 1 104 | # Shuffle the data 105 | perm = numpy.arange(self._num_examples) 106 | numpy.random.shuffle(perm) 107 | self._images = self._images[perm] 108 | self._labels = self._labels[perm] 109 | # Start next epoch 110 | start = 0 111 | self._index_in_epoch = batch_size 112 | assert batch_size <= self._num_examples 113 | end = self._index_in_epoch 114 | return self._images[start:end], self._labels[start:end] 115 | def read_data_sets(train_dir, fake_data=False, one_hot=False): 116 | class DataSets(object): 117 | pass 118 | data_sets = DataSets() 119 | if fake_data: 120 | data_sets.train = DataSet([], [], fake_data=True) 121 | data_sets.validation = DataSet([], [], fake_data=True) 122 | data_sets.test = DataSet([], [], fake_data=True) 123 | return data_sets 124 | TRAIN_IMAGES = 'train-images-idx3-ubyte.gz' 125 | TRAIN_LABELS = 'train-labels-idx1-ubyte.gz' 126 | TEST_IMAGES = 't10k-images-idx3-ubyte.gz' 127 | TEST_LABELS = 't10k-labels-idx1-ubyte.gz' 128 | VALIDATION_SIZE = 5000 129 | local_file = maybe_download(TRAIN_IMAGES, train_dir) 130 | train_images = extract_images(local_file) 131 | local_file = maybe_download(TRAIN_LABELS, train_dir) 132 | train_labels = extract_labels(local_file, one_hot=one_hot) 133 | local_file = maybe_download(TEST_IMAGES, train_dir) 134 | test_images = extract_images(local_file) 135 | local_file = maybe_download(TEST_LABELS, train_dir) 136 | test_labels = extract_labels(local_file, one_hot=one_hot) 137 | validation_images = train_images[:VALIDATION_SIZE] 138 | validation_labels = train_labels[:VALIDATION_SIZE] 139 | train_images = train_images[VALIDATION_SIZE:] 140 | train_labels = train_labels[VALIDATION_SIZE:] 141 | data_sets.train = DataSet(train_images, train_labels) 142 | data_sets.validation = DataSet(validation_images, validation_labels) 143 | data_sets.test = DataSet(test_images, test_labels) 144 | return data_sets -------------------------------------------------------------------------------- /models/autoencoder_digits_part1_autoencoder.ckpt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/models/autoencoder_digits_part1_autoencoder.ckpt -------------------------------------------------------------------------------- /models/autoencoder_digits_part1_autoencoder.ckpt.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/models/autoencoder_digits_part1_autoencoder.ckpt.meta -------------------------------------------------------------------------------- /models/autoencoder_digits_part2_sparse.ckpt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/models/autoencoder_digits_part2_sparse.ckpt -------------------------------------------------------------------------------- /models/autoencoder_digits_part2_sparse.ckpt.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/models/autoencoder_digits_part2_sparse.ckpt.meta -------------------------------------------------------------------------------- /models/autoencoder_digits_part3_viz.ckpt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/models/autoencoder_digits_part3_viz.ckpt -------------------------------------------------------------------------------- /models/autoencoder_digits_part3_viz.ckpt.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/models/autoencoder_digits_part3_viz.ckpt.meta -------------------------------------------------------------------------------- /names.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | import tflearn 4 | 5 | def textfile_to_seq(file, seq_maxlen=25, redun_step=3): 6 | """ string_to_semi_redundant_sequences. 7 | Vectorize a string and returns parsed sequences and targets, along with 8 | the associated dictionary. 9 | Arguments: 10 | string: `str`. Lower-case text from input text file. 11 | seq_maxlen: `int`. Maximum length of a sequence. Default: 25. 12 | redun_step: `int`. Redundancy step. Default: 3. 13 | Returns: 14 | `tuple`: (inputs, targets, dictionary) 15 | """ 16 | import numpy as np 17 | import re 18 | print("Vectorizing text...") 19 | 20 | import codecs 21 | f = codecs.open('toponims.txt', "r", "utf-8") 22 | string = f.read() 23 | string.encode('utf-8') 24 | string = re.sub( '([A-Z])', '^\\1', string ).lower() 25 | chars = set() 26 | chars.update(string) 27 | char_idx = {c: i for i, c in enumerate(chars)} 28 | 29 | sequences = [] 30 | next_chars = [] 31 | for i in range(0, len(string) - seq_maxlen, redun_step): 32 | sequences.append(string[i: i + seq_maxlen]) 33 | next_chars.append(string[i + seq_maxlen]) 34 | 35 | X = np.zeros((len(sequences), seq_maxlen, len(chars)), dtype=np.bool) 36 | Y = np.zeros((len(sequences), len(chars)), dtype=np.bool) 37 | for i, seq in enumerate(sequences): 38 | for t, char in enumerate(seq): 39 | X[i, t, char_idx[char]] = 1 40 | Y[i, char_idx[next_chars[i]]] = 1 41 | 42 | print("Text total length: " + str(len(string))) 43 | print("Distinct chars: " + str(len(chars))) 44 | print("Total sequences: " + str(len(sequences))) 45 | return X, Y, char_idx 46 | 47 | def random_sequence_from_string(string, seq_maxlen): 48 | import random 49 | rand_index = random.randint(0, len(string) - seq_maxlen - 1) 50 | return string[rand_index: rand_index + seq_maxlen] 51 | 52 | def random_sequence_from_textfile(path, seq_maxlen): 53 | import codecs 54 | import re 55 | f = codecs.open(path, "r", "utf-8") 56 | text = f.read() 57 | text.encode('utf-8') 58 | text = re.sub( '([A-Z])', '^\\1', text ).lower() 59 | return random_sequence_from_string(text, seq_maxlen) 60 | 61 | path = 'toponims.txt' 62 | maxlen = 20 63 | 64 | X, Y, char_idx = \ 65 | textfile_to_seq(path, seq_maxlen=maxlen, redun_step=3) 66 | 67 | g = tflearn.input_data(shape=[None, maxlen, len(char_idx)]) 68 | g = tflearn.lstm(g, 64, return_seq=True) 69 | g = tflearn.dropout(g, 0.5) 70 | g = tflearn.lstm(g, 64) 71 | g = tflearn.dropout(g, 0.5) 72 | g = tflearn.fully_connected(g, len(char_idx), activation='softmax') 73 | g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy', 74 | learning_rate=0.01) 75 | 76 | m = tflearn.SequenceGenerator(g, dictionary=char_idx, 77 | seq_maxlen=maxlen, 78 | clip_gradients=5.0) 79 | 80 | for i in range(100): 81 | seed = random_sequence_from_textfile(path, maxlen) 82 | m.fit(X, Y, validation_set=0.1, batch_size=128, 83 | n_epoch=1, run_id='toponims') 84 | print("-- TESTING...") 85 | print("-- EPOCH = ", i) 86 | print("-- Test with temperature of 1.2 --") 87 | print(m.generate(30, temperature=1.2, seq_seed=seed)) 88 | print("-- Test with temperature of 1.0 --") 89 | print(m.generate(30, temperature=1.0, seq_seed=seed)) 90 | print("-- Test with temperature of 0.5 --") 91 | print(m.generate(30, temperature=0.5, seq_seed=seed)) -------------------------------------------------------------------------------- /slides/DeepLearningCourse_Convolutionals.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/slides/DeepLearningCourse_Convolutionals.pdf -------------------------------------------------------------------------------- /slides/DeepLearningCourse_TensorFlow.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/slides/DeepLearningCourse_TensorFlow.pdf -------------------------------------------------------------------------------- /slides/DeepLearningIntro.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/slides/DeepLearningIntro.pdf -------------------------------------------------------------------------------- /slides/README.md: -------------------------------------------------------------------------------- 1 | # Deep Learning from Scratch 2 | This course is organized by the Data Science Group @ UB 3 | 4 | -------------------------------------------------------------------------------- /slides/Unsupervised.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/DataScienceUB/DeepLearningfromScratch2018/16eaf73e22d12b8b3e436d49cde6237026357106/slides/Unsupervised.pdf --------------------------------------------------------------------------------