├── .gitignore ├── Data └── .gitkeep ├── Google_DL ├── README.md └── Replicate_1_notmnist.ipynb ├── Keras ├── 01 CNN_MNIST.ipynb ├── 03 - Transfer Learning - Keras.ipynb ├── Attention │ ├── Data Generation.ipynb │ └── Reader.ipynb ├── Attention2 │ └── basic_attention.ipynb └── Keras_from_scratch │ ├── 1_MNIST_Hello_World_of_DL.ipynb │ ├── 2_Sequential()_vs_Functional().ipynb │ ├── 3_IMDB_sentiment.ipynb │ ├── 4_multiclass_classification.ipynb │ ├── 5_Intro to CNN.ipynb │ ├── Advanced │ ├── Advanced_01.ipynb │ ├── Advanced_02.ipynb │ ├── Advanced_03.ipynb │ └── Advanced_04.ipynb │ ├── Generative │ ├── Generative_01.ipynb │ ├── Generative_02.ipynb │ ├── Generative_03.ipynb │ ├── Generative_04.ipynb │ └── Generative_05.ipynb │ ├── Text │ ├── Pretrained_Embeddings.ipynb │ ├── Text_01.ipynb │ ├── Text_02.ipynb │ ├── Text_03.ipynb │ ├── Text_04.ipynb │ ├── Text_05.ipynb │ ├── Text_06.ipynb │ └── basic_attention3.ipynb │ └── Vision │ ├── 01_CNN.ipynb │ ├── 02_CNN.ipynb │ ├── 03_CNN.ipynb │ ├── 04_CNN.ipynb │ ├── 05_CNN.ipynb │ ├── 06_CNN.ipynb │ ├── 07_CNN.ipynb │ ├── CNN_1.ipynb │ ├── CNN_2.ipynb │ ├── CNN_3.ipynb │ ├── CNN_4.ipynb │ ├── CNN_5.ipynb │ ├── CNN_6.ipynb │ ├── CNN_7.ipynb │ ├── CNN_8.ipynb │ ├── CNN_9.ipynb │ └── README.md ├── LSTM ├── IMDB_Embedding_w2v_LSTM_1.ipynb ├── IMDB_Embedding_w2v_LSTM_2.ipynb └── IMDB_Embedding_w2v_LSTM_3.ipynb ├── Online_Learning ├── Incorporating_feedback_in_DeepNets.ipynb └── Online_Learning_DeepNets.ipynb ├── README.md ├── Tensorflow ├── 02 Transfer Learning - Car classification.ipynb ├── 04 - style transfer.ipynb └── Learn_TF │ ├── BR_RBZ+Ch_2.ipynb │ ├── BR_RBZ+Ch_3.ipynb │ ├── BR_RBZ+Ch_4.ipynb │ ├── BR_RBZ+Ch_5.ipynb │ ├── Ch-1.ipynb │ ├── Ch__1.ipynb │ ├── notebook-ch1.ipynb │ └── notebook-ch2.ipynb ├── requirements.txt └── requirements1.txt /.gitignore: -------------------------------------------------------------------------------- 1 | /Data/*.txt 2 | .DS_Store 3 | Google_DL/udacity/* 4 | Google_DL/.ipynb_checkpoints/* 5 | 6 | 7 | Keras/.ipynb_checkpoints/ 8 | Data/*.json 9 | Data/*.csv 10 | 11 | LSTM/.ipynb_checkpoints/ 12 | 13 | 14 | 15 | Keras/Keras_from_scratch/Vision/*.h5 16 | Keras/Keras_from_scratch/Vision/.ipynb_checkpoints/* 17 | -------------------------------------------------------------------------------- /Data/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anujgupta82/DeepNets/f7d54970e4d2808a5a457fabe2a3c58005cef43e/Data/.gitkeep -------------------------------------------------------------------------------- /Google_DL/README.md: -------------------------------------------------------------------------------- 1 | Google's udacity course on DeepLearning by Vincent 2 | https://www.udacity.com/course/deep-learning--ud730 3 | 4 | 5 | -------------------------------------------------------------------------------- /Google_DL/Replicate_1_notmnist.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 2, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "# Config the matlotlib backend as plotting inline in IPython\n", 12 | "%matplotlib inline\n", 13 | "%load_ext autoreload\n", 14 | "%autoreload 2" 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": 1, 20 | "metadata": { 21 | "collapsed": false 22 | }, 23 | "outputs": [], 24 | "source": [ 25 | "import matplotlib.pyplot as plt\n", 26 | "import numpy as np\n", 27 | "import os\n", 28 | "import sys\n", 29 | "import tarfile\n", 30 | "from IPython.display import display, Image\n", 31 | "from scipy import ndimage\n", 32 | "from sklearn.linear_model import LogisticRegression\n", 33 | "from six.moves.urllib.request import urlretrieve\n", 34 | "from six.moves import cPickle as pickle" 35 | ] 36 | }, 37 | { 38 | "cell_type": "code", 39 | "execution_count": null, 40 | "metadata": { 41 | "collapsed": true 42 | }, 43 | "outputs": [], 44 | "source": [] 45 | } 46 | ], 47 | "metadata": { 48 | "kernelspec": { 49 | "display_name": "Python 2", 50 | "language": "python", 51 | "name": "python2" 52 | }, 53 | "language_info": { 54 | "codemirror_mode": { 55 | "name": "ipython", 56 | "version": 2 57 | }, 58 | "file_extension": ".py", 59 | "mimetype": "text/x-python", 60 | "name": "python", 61 | "nbconvert_exporter": "python", 62 | "pygments_lexer": "ipython2", 63 | "version": "2.7.11" 64 | } 65 | }, 66 | "nbformat": 4, 67 | "nbformat_minor": 0 68 | } 69 | -------------------------------------------------------------------------------- /Keras/Attention/Data Generation.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "BITS n PIECES\n", 15 | "\n", 16 | " * below we understand key pieces in data gerations" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": null, 22 | "metadata": {}, 23 | "outputs": [], 24 | "source": [] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": 40, 29 | "metadata": {}, 30 | "outputs": [], 31 | "source": [ 32 | "# We humans write dates in various formats \n", 33 | "\n", 34 | "DATE_FORMATS = ['short',\n", 35 | " 'medium',\n", 36 | " 'long',\n", 37 | " 'full',\n", 38 | " 'd MMM YYY',\n", 39 | " 'd MMMM YYY',\n", 40 | " 'dd MMM YYY',\n", 41 | " 'd MMM, YYY',\n", 42 | " 'd MMMM, YYY',\n", 43 | " 'dd, MMM YYY',\n", 44 | " 'd MM YY',\n", 45 | " 'd MMMM YYY',\n", 46 | " 'MMMM d YYY',\n", 47 | " 'MMMM d, YYY',\n", 48 | " 'dd.MM.YY',\n", 49 | " ]\n" 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": 41, 55 | "metadata": {}, 56 | "outputs": [ 57 | { 58 | "name": "stdout", 59 | "output_type": "stream", 60 | "text": [ 61 | "['en_US']\n" 62 | ] 63 | } 64 | ], 65 | "source": [ 66 | "# change this if you want it to work with only a single language\n", 67 | "LOCALES = ['en_US']\n", 68 | "#LOCALES = babel.localedata.locale_identifiers()\n", 69 | "\n", 70 | "print LOCALES" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": 53, 76 | "metadata": {}, 77 | "outputs": [ 78 | { 79 | "name": "stdout", 80 | "output_type": "stream", 81 | "text": [ 82 | "1970-12-24\n", 83 | "24 Dec 1970\n" 84 | ] 85 | } 86 | ], 87 | "source": [ 88 | "dt = fake.date_object()\n", 89 | "print dt\n", 90 | "human = format_date(dt,format=random.choice(DATE_FORMATS), locale=random.choice(LOCALES))\n", 91 | "print human\n" 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": null, 97 | "metadata": {}, 98 | "outputs": [], 99 | "source": [ 100 | "# key function to generate dates\n", 101 | "\n", 102 | "def create_date():\n", 103 | " \"\"\"\n", 104 | " Creates some fake dates \n", 105 | " :returns: tuple containing \n", 106 | " 1. human formatted string\n", 107 | " 2. machine formatted string\n", 108 | " 3. date object.\n", 109 | " \"\"\"\n", 110 | " # create a fake date - this is a date object\n", 111 | " dt = fake.date_object()\n", 112 | "\n", 113 | " # wrapping this in a try catch because\n", 114 | " # the locale 'vo' and format 'full' will fail\n", 115 | " try:\n", 116 | " # chose a DATE_FORMATS and chose a LOCALE\n", 117 | " # converts date object to human readable date using the DATE_FORMATS and LOCALE chosen\n", 118 | " human = format_date(dt,format=random.choice(DATE_FORMATS), locale=random.choice(LOCALES))\n", 119 | " \n", 120 | " # we add more flare by changing the casing - to either lower or upper\n", 121 | " case_change = random.randint(0,3) # 1/2 chance of case change\n", 122 | " \n", 123 | " if case_change == 1:\n", 124 | " human = human.upper()\n", 125 | " elif case_change == 2:\n", 126 | " human = human.lower()\n", 127 | "\n", 128 | " # convert date object into ISO machine format\n", 129 | " machine = dt.isoformat()\n", 130 | " \n", 131 | " except AttributeError as e:\n", 132 | " # print(e)\n", 133 | " return None, None, None\n", 134 | "\n", 135 | " return human, machine, dt" 136 | ] 137 | }, 138 | { 139 | "cell_type": "code", 140 | "execution_count": 34, 141 | "metadata": {}, 142 | "outputs": [ 143 | { 144 | "name": "stdout", 145 | "output_type": "stream", 146 | "text": [ 147 | "0 02, oct 2013 2013-10-02 2013-10-02\n", 148 | "1 11 JANUARY, 2003 2003-01-11 2003-01-11\n", 149 | "2 03.05.92 1992-05-03 1992-05-03\n", 150 | "3 04, nov 1979 1979-11-04 1979-11-04\n", 151 | "4 9/24/06 2006-09-24 2006-09-24\n", 152 | "5 December 21 1984 1984-12-21 1984-12-21\n", 153 | "6 AUG 5, 1988 1988-08-05 1988-08-05\n", 154 | "7 22, Jul 2005 2005-07-22 2005-07-22\n", 155 | "8 26, NOV 2002 2002-11-26 2002-11-26\n", 156 | "9 30 Apr 1986 1986-04-30 1986-04-30\n" 157 | ] 158 | } 159 | ], 160 | "source": [ 161 | "human_vocab = set()\n", 162 | "machine_vocab = set()\n", 163 | "\n", 164 | "for i in range(10):\n", 165 | " human, machine, _ = create_date()\n", 166 | " print i, human, machine, _\n", 167 | " \n", 168 | " if h is not None:\n", 169 | " #f.write('\"'+h + '\",\"' + m + '\"\\n')\n", 170 | " human_vocab.update(tuple(h))\n", 171 | " machine_vocab.update(tuple(m))" 172 | ] 173 | }, 174 | { 175 | "cell_type": "code", 176 | "execution_count": 35, 177 | "metadata": { 178 | "collapsed": true 179 | }, 180 | "outputs": [ 181 | { 182 | "data": { 183 | "text/plain": [ 184 | "{u' ',\n", 185 | " u',',\n", 186 | " u'.',\n", 187 | " u'/',\n", 188 | " u'0',\n", 189 | " u'1',\n", 190 | " u'2',\n", 191 | " u'3',\n", 192 | " u'4',\n", 193 | " u'5',\n", 194 | " u'6',\n", 195 | " u'7',\n", 196 | " u'8',\n", 197 | " u'9',\n", 198 | " u'A',\n", 199 | " u'D',\n", 200 | " u'G',\n", 201 | " u'J',\n", 202 | " u'N',\n", 203 | " u'O',\n", 204 | " u'R',\n", 205 | " u'U',\n", 206 | " u'V',\n", 207 | " u'Y',\n", 208 | " u'b',\n", 209 | " u'c',\n", 210 | " u'e',\n", 211 | " u'l',\n", 212 | " u'm',\n", 213 | " u'n',\n", 214 | " u'o',\n", 215 | " u'p',\n", 216 | " u'r',\n", 217 | " u't',\n", 218 | " u'u',\n", 219 | " u'v'}" 220 | ] 221 | }, 222 | "execution_count": 35, 223 | "metadata": {}, 224 | "output_type": "execute_result" 225 | } 226 | ], 227 | "source": [ 228 | "human_vocab" 229 | ] 230 | }, 231 | { 232 | "cell_type": "code", 233 | "execution_count": null, 234 | "metadata": {}, 235 | "outputs": [], 236 | "source": [] 237 | }, 238 | { 239 | "cell_type": "code", 240 | "execution_count": null, 241 | "metadata": {}, 242 | "outputs": [], 243 | "source": [] 244 | }, 245 | { 246 | "cell_type": "code", 247 | "execution_count": 1, 248 | "metadata": {}, 249 | "outputs": [], 250 | "source": [ 251 | "import random\n", 252 | "import json\n", 253 | "import os" 254 | ] 255 | }, 256 | { 257 | "cell_type": "code", 258 | "execution_count": 18, 259 | "metadata": {}, 260 | "outputs": [ 261 | { 262 | "name": "stdout", 263 | "output_type": "stream", 264 | "text": [ 265 | "/home/root1/anuj_work/Github_repos/DeepNets/Keras/Attention\n" 266 | ] 267 | } 268 | ], 269 | "source": [ 270 | "#datadir = os.path.dirname(__file__)\n", 271 | "cwd = os.getcwd()\n", 272 | "print cwd" 273 | ] 274 | }, 275 | { 276 | "cell_type": "code", 277 | "execution_count": 22, 278 | "metadata": {}, 279 | "outputs": [ 280 | { 281 | "name": "stdout", 282 | "output_type": "stream", 283 | "text": [ 284 | "/home/root1/anuj_work/Github_repos/DeepNets/Keras/Attention/data\n" 285 | ] 286 | } 287 | ], 288 | "source": [ 289 | "DATA_FOLDER = os.path.join(cwd, 'data')\n", 290 | "print DATA_FOLDER" 291 | ] 292 | }, 293 | { 294 | "cell_type": "code", 295 | "execution_count": 23, 296 | "metadata": {}, 297 | "outputs": [], 298 | "source": [ 299 | "from faker import Faker\n", 300 | "import babel\n", 301 | "from babel.dates import format_date" 302 | ] 303 | }, 304 | { 305 | "cell_type": "code", 306 | "execution_count": 24, 307 | "metadata": {}, 308 | "outputs": [], 309 | "source": [ 310 | "fake = Faker()\n", 311 | "fake.seed(230517)\n", 312 | "random.seed(230517)" 313 | ] 314 | }, 315 | { 316 | "cell_type": "code", 317 | "execution_count": 25, 318 | "metadata": {}, 319 | "outputs": [], 320 | "source": [] 321 | }, 322 | { 323 | "cell_type": "code", 324 | "execution_count": 26, 325 | "metadata": {}, 326 | "outputs": [], 327 | "source": [ 328 | "# change this if you want it to work with only a single language\n", 329 | "LOCALES = ['en_US']\n", 330 | "# LOCALES = babel.localedata.locale_identifiers()" 331 | ] 332 | }, 333 | { 334 | "cell_type": "code", 335 | "execution_count": 27, 336 | "metadata": {}, 337 | "outputs": [], 338 | "source": [ 339 | "def create_date():\n", 340 | " \"\"\"\n", 341 | " Creates some fake dates \n", 342 | " :returns: tuple containing \n", 343 | " 1. human formatted string\n", 344 | " 2. machine formatted string\n", 345 | " 3. date object.\n", 346 | " \"\"\"\n", 347 | " dt = fake.date_object()\n", 348 | "\n", 349 | " # wrapping this in a try catch because\n", 350 | " # the locale 'vo' and format 'full' will fail\n", 351 | " try:\n", 352 | " human = format_date(dt,format=random.choice(FORMATS), locale=random.choice(LOCALES))\n", 353 | "\n", 354 | " case_change = random.randint(0,3) # 1/2 chance of case change\n", 355 | " if case_change == 1:\n", 356 | " human = human.upper()\n", 357 | " elif case_change == 2:\n", 358 | " human = human.lower()\n", 359 | "\n", 360 | " machine = dt.isoformat()\n", 361 | " except AttributeError as e:\n", 362 | " # print(e)\n", 363 | " return None, None, None\n", 364 | "\n", 365 | " return human, machine, dt" 366 | ] 367 | }, 368 | { 369 | "cell_type": "code", 370 | "execution_count": 78, 371 | "metadata": {}, 372 | "outputs": [], 373 | "source": [ 374 | "def create_dataset(dataset_name, n_examples, vocabulary=False):\n", 375 | " \"\"\"\n", 376 | " Creates a csv dataset with n_examples and optional vocabulary\n", 377 | " :param dataset_name: name of the file to save as\n", 378 | " :n_examples: the number of examples to generate\n", 379 | " :vocabulary: if true, will also save the vocabulary\n", 380 | " \"\"\"\n", 381 | " \n", 382 | " # Vocabulary of data - vocab for human dates format and for machine dates format\n", 383 | " human_vocab = set()\n", 384 | " machine_vocab = set() \n", 385 | "\n", 386 | " with open(dataset_name, 'w+') as f:\n", 387 | " for i in range(n_examples):\n", 388 | " h, m, _ = create_date()\n", 389 | " if h is not None:\n", 390 | " f.write('\"'+h + '\",\"' + m + '\"\\n')\n", 391 | " \n", 392 | " # add cracters sets of h, m to respective vocabs\n", 393 | " human_vocab.update(tuple(h))\n", 394 | " machine_vocab.update(tuple(m))\n", 395 | "#\"\"\"\n", 396 | " \n", 397 | " if vocabulary:\n", 398 | " \n", 399 | " #create mapping of integer to vocabs\n", 400 | " int2human = dict(enumerate(human_vocab))\n", 401 | " int2human.update({len(int2human): '',\n", 402 | " len(int2human)+1: ''})\n", 403 | " \n", 404 | " int2machine = dict(enumerate(machine_vocab))\n", 405 | " int2machine.update({len(int2machine):'',\n", 406 | " len(int2machine)+1:''})\n", 407 | " \n", 408 | " # create mapping from vocab to integer\n", 409 | " human2int = {v: k for k, v in int2human.items()}\n", 410 | " machine2int = {v: k for k, v in int2machine.items()}\n", 411 | "\n", 412 | " #dump the dictionaries in the data folder\n", 413 | " with open(os.path.join(DATA_FOLDER, 'human_vocab.json'), 'w') as f:\n", 414 | " json.dump(human2int, f)\n", 415 | " with open(os.path.join(DATA_FOLDER, 'machine_vocab.json'), 'w') as f:\n", 416 | " json.dump(machine2int, f)\n", 417 | " \n", 418 | "#\"\"\" " 419 | ] 420 | }, 421 | { 422 | "cell_type": "code", 423 | "execution_count": 79, 424 | "metadata": {}, 425 | "outputs": [ 426 | { 427 | "data": { 428 | "text/plain": [ 429 | "'/home/root1/anuj_work/Github_repos/DeepNets/Keras/Attention/data'" 430 | ] 431 | }, 432 | "execution_count": 79, 433 | "metadata": {}, 434 | "output_type": "execute_result" 435 | } 436 | ], 437 | "source": [ 438 | "DATA_FOLDER" 439 | ] 440 | }, 441 | { 442 | "cell_type": "code", 443 | "execution_count": 80, 444 | "metadata": {}, 445 | "outputs": [ 446 | { 447 | "data": { 448 | "text/plain": [ 449 | "'/home/root1/anuj_work/Github_repos/DeepNets/Keras/Attention/data/training.csv'" 450 | ] 451 | }, 452 | "execution_count": 80, 453 | "metadata": {}, 454 | "output_type": "execute_result" 455 | } 456 | ], 457 | "source": [ 458 | "os.path.join(DATA_FOLDER, 'training.csv')" 459 | ] 460 | }, 461 | { 462 | "cell_type": "code", 463 | "execution_count": 81, 464 | "metadata": {}, 465 | "outputs": [], 466 | "source": [ 467 | "create_dataset(os.path.join(DATA_FOLDER, 'training.csv'), 500000, vocabulary=True)" 468 | ] 469 | }, 470 | { 471 | "cell_type": "code", 472 | "execution_count": 82, 473 | "metadata": {}, 474 | "outputs": [], 475 | "source": [ 476 | "create_dataset(os.path.join(DATA_FOLDER, 'validation.csv'), 1000)" 477 | ] 478 | }, 479 | { 480 | "cell_type": "code", 481 | "execution_count": null, 482 | "metadata": {}, 483 | "outputs": [], 484 | "source": [] 485 | } 486 | ], 487 | "metadata": { 488 | "kernelspec": { 489 | "display_name": "Python 2", 490 | "language": "python", 491 | "name": "python2" 492 | }, 493 | "language_info": { 494 | "codemirror_mode": { 495 | "name": "ipython", 496 | "version": 2 497 | }, 498 | "file_extension": ".py", 499 | "mimetype": "text/x-python", 500 | "name": "python", 501 | "nbconvert_exporter": "python", 502 | "pygments_lexer": "ipython2", 503 | "version": "2.7.12" 504 | } 505 | }, 506 | "nbformat": 4, 507 | "nbformat_minor": 2 508 | } 509 | -------------------------------------------------------------------------------- /Keras/Attention/Reader.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": null, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": null, 20 | "metadata": {}, 21 | "outputs": [], 22 | "source": [] 23 | }, 24 | { 25 | "cell_type": "code", 26 | "execution_count": null, 27 | "metadata": {}, 28 | "outputs": [], 29 | "source": [] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": null, 34 | "metadata": {}, 35 | "outputs": [], 36 | "source": [] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": 1, 41 | "metadata": {}, 42 | "outputs": [ 43 | { 44 | "name": "stderr", 45 | "output_type": "stream", 46 | "text": [ 47 | "Using TensorFlow backend.\n", 48 | "/home/root1/.virtualenv/demos/local/lib/python2.7/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n", 49 | " from ._conv import register_converters as _register_converters\n" 50 | ] 51 | } 52 | ], 53 | "source": [ 54 | "import json\n", 55 | "import csv\n", 56 | "import random\n", 57 | "\n", 58 | "import numpy as np\n", 59 | "from keras.utils.np_utils import to_categorical\n", 60 | "\n", 61 | "random.seed(1984)\n", 62 | "\n", 63 | "INPUT_PADDING = 50\n", 64 | "OUTPUT_PADDING = 100" 65 | ] 66 | }, 67 | { 68 | "cell_type": "code", 69 | "execution_count": 2, 70 | "metadata": {}, 71 | "outputs": [], 72 | "source": [ 73 | "class Vocabulary(object):\n", 74 | "\n", 75 | " def __init__(self, vocabulary_file, padding=None):\n", 76 | " \"\"\"\n", 77 | " Creates a vocabulary from a file\n", 78 | " :param vocabulary_file: the path to the vocabulary\n", 79 | " \"\"\"\n", 80 | " \n", 81 | " # set vocabulary file, padding and reverse vocabulary\n", 82 | " \n", 83 | " self.vocabulary_file = vocabulary_file\n", 84 | " with open(vocabulary_file, 'r') as f:\n", 85 | " self.vocabulary = json.load(f)\n", 86 | "\n", 87 | " self.padding = padding\n", 88 | " self.reverse_vocabulary = {v: k for k, v in self.vocabulary.items()}\n", 89 | "\n", 90 | " def size(self):\n", 91 | " \"\"\"\n", 92 | " returns the size of the vocabulary\n", 93 | " \"\"\"\n", 94 | " return len(self.vocabulary.keys())\n", 95 | "\n", 96 | " def string_to_int(self, text):\n", 97 | " \"\"\"\n", 98 | " Converts a string into it's character integer \n", 99 | " representation\n", 100 | " :param text: text to convert\n", 101 | " \"\"\"\n", 102 | " # get the characters\n", 103 | " characters = list(text)\n", 104 | "\n", 105 | " # integer representation\n", 106 | " integers = []\n", 107 | "\n", 108 | " # pick first k characters where k = self.padding\n", 109 | " if self.padding and len(characters) >= self.padding:\n", 110 | " # truncate if too long\n", 111 | " characters = characters[:self.padding - 1]\n", 112 | " \n", 113 | " # append special characters\n", 114 | " characters.append('')\n", 115 | " \n", 116 | " # append the integer equivalent\n", 117 | " for c in characters:\n", 118 | " if c in self.vocabulary:\n", 119 | " integers.append(self.vocabulary[c])\n", 120 | " else:\n", 121 | " integers.append(self.vocabulary[''])\n", 122 | "\n", 123 | "\n", 124 | " # pad the data if its shorter\n", 125 | " if self.padding and len(integers) < self.padding:\n", 126 | " integers.extend([self.vocabulary['']]\n", 127 | " * (self.padding - len(integers)))\n", 128 | "\n", 129 | " if len(integers) != self.padding:\n", 130 | " print(text)\n", 131 | " raise AttributeError('Length of text was not padding.')\n", 132 | " return integers\n", 133 | "\n", 134 | " def int_to_string(self, integers):\n", 135 | " \"\"\"\n", 136 | " Decodes a list of integers\n", 137 | " into it's string representation\n", 138 | " \"\"\"\n", 139 | " characters = []\n", 140 | " for i in integers:\n", 141 | " characters.append(self.reverse_vocabulary[i])\n", 142 | "\n", 143 | " return characters\n", 144 | "\n" 145 | ] 146 | }, 147 | { 148 | "cell_type": "code", 149 | "execution_count": 3, 150 | "metadata": {}, 151 | "outputs": [], 152 | "source": [ 153 | "class Data(object):\n", 154 | "\n", 155 | " def __init__(self, file_name, input_vocabulary, output_vocabulary):\n", 156 | " \"\"\"\n", 157 | " Creates an object that gets data from a file\n", 158 | " :param file_name: name of the file to read from\n", 159 | " :param vocabulary: the Vocabulary object to use\n", 160 | " :param batch_size: the number of datapoints to return\n", 161 | " :param padding: the amount of padding to apply to \n", 162 | " a short string\n", 163 | " \"\"\"\n", 164 | " \n", 165 | " # set vocab files and data file\n", 166 | " self.input_vocabulary = input_vocabulary\n", 167 | " self.output_vocabulary = output_vocabulary\n", 168 | " self.file_name = file_name\n", 169 | "\n", 170 | " def load(self):\n", 171 | " \"\"\"\n", 172 | " Loads data from a file\n", 173 | " \"\"\"\n", 174 | " self.inputs = []\n", 175 | " self.targets = []\n", 176 | "\n", 177 | " # load the data\n", 178 | " with open(self.file_name, 'r') as f:\n", 179 | " reader = csv.reader(f)\n", 180 | " for row in reader:\n", 181 | " self.inputs.append(row[0])\n", 182 | " self.targets.append(row[1])\n", 183 | "\n", 184 | " def transform(self):\n", 185 | " \"\"\"\n", 186 | " Transforms the data as necessary\n", 187 | " \"\"\"\n", 188 | " # @TODO: use `pool.map_async` here?\n", 189 | " \n", 190 | " # convert string to int\n", 191 | " self.inputs = np.array(list(map(self.input_vocabulary.string_to_int, self.inputs)))\n", 192 | " self.targets = map(self.output_vocabulary.string_to_int, self.targets)\n", 193 | " \n", 194 | " #output is a seq of integers - we represent each integer as 1-hopt encoding\n", 195 | " self.targets = np.array(\n", 196 | " list(map(\n", 197 | " lambda x: to_categorical(\n", 198 | " x,\n", 199 | " num_classes=self.output_vocabulary.size()),\n", 200 | " self.targets)))\n", 201 | " \n", 202 | " # noit sure what exactly is this for \n", 203 | " assert len(self.inputs.shape) == 2, 'Inputs could not properly be encoded'\n", 204 | " assert len(self.targets.shape) == 3, 'Targets could not properly be encoded'\n", 205 | "\n", 206 | " def generator(self, batch_size):\n", 207 | " \"\"\"\n", 208 | " Creates a generator that can be used in `model.fit_generator()`\n", 209 | " Batches are generated randomly.\n", 210 | " :param batch_size: the number of instances to include per batch\n", 211 | " \"\"\"\n", 212 | " instance_id = range(len(self.inputs))\n", 213 | " \n", 214 | " while True:\n", 215 | " try:\n", 216 | " \n", 217 | " batch_ids = random.sample(instance_id, batch_size) # random chose a batch\n", 218 | " yield (np.array(self.inputs[batch_ids], dtype=int), np.array(self.targets[batch_ids]))\n", 219 | " \n", 220 | " except Exception as e:\n", 221 | " print('EXCEPTION OMG')\n", 222 | " print(e)\n", 223 | " yield None, None" 224 | ] 225 | }, 226 | { 227 | "cell_type": "code", 228 | "execution_count": 9, 229 | "metadata": {}, 230 | "outputs": [], 231 | "source": [ 232 | "import os\n", 233 | "cwd = os.getcwd()\n", 234 | "DATA_FOLDER = os.path.join(cwd, 'data')\n", 235 | "input_vocab_file_path = os.path.join(DATA_FOLDER, 'human_vocab.json')\n", 236 | "output_vocab_file_path = os.path.join(DATA_FOLDER, 'machine_vocab.json')" 237 | ] 238 | }, 239 | { 240 | "cell_type": "code", 241 | "execution_count": 11, 242 | "metadata": {}, 243 | "outputs": [], 244 | "source": [ 245 | "input_vocab = Vocabulary(input_vocab_file_path, padding=50)\n", 246 | "output_vocab = Vocabulary(output_vocab_file_path, padding=12)" 247 | ] 248 | }, 249 | { 250 | "cell_type": "code", 251 | "execution_count": 12, 252 | "metadata": {}, 253 | "outputs": [], 254 | "source": [ 255 | "sample_csv_file_path = os.path.join(DATA_FOLDER, 'training.csv')\n", 256 | "\n", 257 | "ds = Data(sample_csv_file_path, input_vocab, output_vocab)\n", 258 | "ds.load()\n", 259 | "ds.transform()" 260 | ] 261 | }, 262 | { 263 | "cell_type": "code", 264 | "execution_count": 13, 265 | "metadata": {}, 266 | "outputs": [ 267 | { 268 | "name": "stdout", 269 | "output_type": "stream", 270 | "text": [ 271 | "(500000, 50)\n", 272 | "(500000, 12, 13)\n", 273 | "(3, 50)\n", 274 | "(3, 12, 13)\n" 275 | ] 276 | } 277 | ], 278 | "source": [ 279 | "print(ds.inputs.shape)\n", 280 | "print(ds.targets.shape)\n", 281 | "\n", 282 | "g = ds.generator(32)\n", 283 | "\n", 284 | "\n", 285 | "print(ds.inputs[[5,10, 12]].shape)\n", 286 | "print(ds.targets[[5,10,12]].shape)" 287 | ] 288 | }, 289 | { 290 | "cell_type": "code", 291 | "execution_count": 14, 292 | "metadata": {}, 293 | "outputs": [ 294 | { 295 | "data": { 296 | "text/plain": [ 297 | "" 298 | ] 299 | }, 300 | "execution_count": 14, 301 | "metadata": {}, 302 | "output_type": "execute_result" 303 | } 304 | ], 305 | "source": [ 306 | "g." 307 | ] 308 | }, 309 | { 310 | "cell_type": "code", 311 | "execution_count": null, 312 | "metadata": {}, 313 | "outputs": [], 314 | "source": [] 315 | } 316 | ], 317 | "metadata": { 318 | "kernelspec": { 319 | "display_name": "Python 2", 320 | "language": "python", 321 | "name": "python2" 322 | }, 323 | "language_info": { 324 | "codemirror_mode": { 325 | "name": "ipython", 326 | "version": 2 327 | }, 328 | "file_extension": ".py", 329 | "mimetype": "text/x-python", 330 | "name": "python", 331 | "nbconvert_exporter": "python", 332 | "pygments_lexer": "ipython2", 333 | "version": "2.7.12" 334 | } 335 | }, 336 | "nbformat": 4, 337 | "nbformat_minor": 2 338 | } 339 | -------------------------------------------------------------------------------- /Keras/Keras_from_scratch/1_MNIST_Hello_World_of_DL.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "%load_ext autoreload\n", 10 | "%autoreload 2" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 2, 16 | "metadata": {}, 17 | "outputs": [ 18 | { 19 | "name": "stderr", 20 | "output_type": "stream", 21 | "text": [ 22 | "c:\\users\\a00439512\\appdata\\local\\continuum\\anaconda3\\envs\\demo\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n", 23 | " from ._conv import register_converters as _register_converters\n", 24 | "Using TensorFlow backend.\n" 25 | ] 26 | } 27 | ], 28 | "source": [ 29 | "#from keras import layers, models\n", 30 | "from keras.datasets import mnist\n", 31 | "import numpy as np" 32 | ] 33 | }, 34 | { 35 | "cell_type": "markdown", 36 | "metadata": {}, 37 | "source": [ 38 | "# load dataset" 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "execution_count": 3, 44 | "metadata": {}, 45 | "outputs": [], 46 | "source": [ 47 | "(X_train, y_train), (X_test, y_test) = mnist.load_data()" 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": 4, 53 | "metadata": {}, 54 | "outputs": [ 55 | { 56 | "name": "stdout", 57 | "output_type": "stream", 58 | "text": [ 59 | "(60000, 28, 28)\n", 60 | "(10000, 28, 28)\n", 61 | "(60000,)\n", 62 | "(10000,)\n" 63 | ] 64 | } 65 | ], 66 | "source": [ 67 | "print(X_train.shape)\n", 68 | "print(X_test.shape)\n", 69 | "\n", 70 | "print(y_train.shape)\n", 71 | "print(y_test.shape)\n", 72 | "\n" 73 | ] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "execution_count": 5, 78 | "metadata": {}, 79 | "outputs": [ 80 | { 81 | "name": "stdout", 82 | "output_type": "stream", 83 | "text": [ 84 | "\n", 85 | "\n" 86 | ] 87 | } 88 | ], 89 | "source": [ 90 | "print(type(X_train))\n", 91 | "print(type(y_train))" 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": 6, 97 | "metadata": {}, 98 | "outputs": [ 99 | { 100 | "name": "stdout", 101 | "output_type": "stream", 102 | "text": [ 103 | "\n", 104 | "(28, 28)\n", 105 | "\n", 106 | "\n", 107 | "[[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", 108 | " 0 0 0 0 0 0 0 0 0 0]\n", 109 | " [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", 110 | " 0 0 0 0 0 0 0 0 0 0]\n", 111 | " [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", 112 | " 0 0 0 0 0 0 0 0 0 0]\n", 113 | " [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", 114 | " 0 0 0 0 0 0 0 0 0 0]\n", 115 | " [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", 116 | " 0 0 0 0 0 0 0 0 0 0]\n", 117 | " [ 0 0 0 0 0 0 0 0 0 0 0 0 3 18 18 18 126 136\n", 118 | " 175 26 166 255 247 127 0 0 0 0]\n", 119 | " [ 0 0 0 0 0 0 0 0 30 36 94 154 170 253 253 253 253 253\n", 120 | " 225 172 253 242 195 64 0 0 0 0]\n", 121 | " [ 0 0 0 0 0 0 0 49 238 253 253 253 253 253 253 253 253 251\n", 122 | " 93 82 82 56 39 0 0 0 0 0]\n", 123 | " [ 0 0 0 0 0 0 0 18 219 253 253 253 253 253 198 182 247 241\n", 124 | " 0 0 0 0 0 0 0 0 0 0]\n", 125 | " [ 0 0 0 0 0 0 0 0 80 156 107 253 253 205 11 0 43 154\n", 126 | " 0 0 0 0 0 0 0 0 0 0]\n", 127 | " [ 0 0 0 0 0 0 0 0 0 14 1 154 253 90 0 0 0 0\n", 128 | " 0 0 0 0 0 0 0 0 0 0]\n", 129 | " [ 0 0 0 0 0 0 0 0 0 0 0 139 253 190 2 0 0 0\n", 130 | " 0 0 0 0 0 0 0 0 0 0]\n", 131 | " [ 0 0 0 0 0 0 0 0 0 0 0 11 190 253 70 0 0 0\n", 132 | " 0 0 0 0 0 0 0 0 0 0]\n", 133 | " [ 0 0 0 0 0 0 0 0 0 0 0 0 35 241 225 160 108 1\n", 134 | " 0 0 0 0 0 0 0 0 0 0]\n", 135 | " [ 0 0 0 0 0 0 0 0 0 0 0 0 0 81 240 253 253 119\n", 136 | " 25 0 0 0 0 0 0 0 0 0]\n", 137 | " [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 45 186 253 253\n", 138 | " 150 27 0 0 0 0 0 0 0 0]\n", 139 | " [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 16 93 252\n", 140 | " 253 187 0 0 0 0 0 0 0 0]\n", 141 | " [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 249\n", 142 | " 253 249 64 0 0 0 0 0 0 0]\n", 143 | " [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 46 130 183 253\n", 144 | " 253 207 2 0 0 0 0 0 0 0]\n", 145 | " [ 0 0 0 0 0 0 0 0 0 0 0 0 39 148 229 253 253 253\n", 146 | " 250 182 0 0 0 0 0 0 0 0]\n", 147 | " [ 0 0 0 0 0 0 0 0 0 0 24 114 221 253 253 253 253 201\n", 148 | " 78 0 0 0 0 0 0 0 0 0]\n", 149 | " [ 0 0 0 0 0 0 0 0 23 66 213 253 253 253 253 198 81 2\n", 150 | " 0 0 0 0 0 0 0 0 0 0]\n", 151 | " [ 0 0 0 0 0 0 18 171 219 253 253 253 253 195 80 9 0 0\n", 152 | " 0 0 0 0 0 0 0 0 0 0]\n", 153 | " [ 0 0 0 0 55 172 226 253 253 253 253 244 133 11 0 0 0 0\n", 154 | " 0 0 0 0 0 0 0 0 0 0]\n", 155 | " [ 0 0 0 0 136 253 253 253 212 135 132 16 0 0 0 0 0 0\n", 156 | " 0 0 0 0 0 0 0 0 0 0]\n", 157 | " [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", 158 | " 0 0 0 0 0 0 0 0 0 0]\n", 159 | " [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", 160 | " 0 0 0 0 0 0 0 0 0 0]\n", 161 | " [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n", 162 | " 0 0 0 0 0 0 0 0 0 0]]\n" 163 | ] 164 | } 165 | ], 166 | "source": [ 167 | "print(type(X_train[0]))\n", 168 | "print(X_train[0].shape)\n", 169 | "\n", 170 | "print(type(X_train[0][0]))\n", 171 | "print(type(X_train[0][0][0]))\n", 172 | "\n", 173 | "print(X_train[0])" 174 | ] 175 | }, 176 | { 177 | "cell_type": "markdown", 178 | "metadata": {}, 179 | "source": [ 180 | "# Format dataset in correct shape" 181 | ] 182 | }, 183 | { 184 | "cell_type": "code", 185 | "execution_count": 7, 186 | "metadata": {}, 187 | "outputs": [ 188 | { 189 | "name": "stdout", 190 | "output_type": "stream", 191 | "text": [ 192 | "(60000, 784)\n", 193 | "(10000, 784)\n" 194 | ] 195 | } 196 | ], 197 | "source": [ 198 | "X_train = X_train.reshape(-1, 28*28)\n", 199 | "X_test = X_test.reshape(-1, 28*28)\n", 200 | "\n", 201 | "print(X_train.shape)\n", 202 | "print(X_test.shape)\n", 203 | "\n", 204 | "X_train = X_train.astype('float32')\n", 205 | "X_test = X_test.astype('float32')\n", 206 | "\n", 207 | "X_train = X_train/255\n", 208 | "X_test = X_test/255" 209 | ] 210 | }, 211 | { 212 | "cell_type": "code", 213 | "execution_count": 8, 214 | "metadata": {}, 215 | "outputs": [ 216 | { 217 | "name": "stdout", 218 | "output_type": "stream", 219 | "text": [ 220 | "\n", 221 | "(784,)\n", 222 | "\n", 223 | "[0. 0. 0. 0. 0. 0.\n", 224 | " 0. 0. 0. 0. 0. 0.\n", 225 | " 0. 0. 0. 0. 0. 0.\n", 226 | " 0. 0. 0. 0. 0. 0.\n", 227 | " 0. 0. 0. 0. 0. 0.\n", 228 | " 0. 0. 0. 0. 0. 0.\n", 229 | " 0. 0. 0. 0. 0. 0.\n", 230 | " 0. 0. 0. 0. 0. 0.\n", 231 | " 0. 0. 0. 0. 0. 0.\n", 232 | " 0. 0. 0. 0. 0. 0.\n", 233 | " 0. 0. 0. 0. 0. 0.\n", 234 | " 0. 0. 0. 0. 0. 0.\n", 235 | " 0. 0. 0. 0. 0. 0.\n", 236 | " 0. 0. 0. 0. 0. 0.\n", 237 | " 0. 0. 0. 0. 0. 0.\n", 238 | " 0. 0. 0. 0. 0. 0.\n", 239 | " 0. 0. 0. 0. 0. 0.\n", 240 | " 0. 0. 0. 0. 0. 0.\n", 241 | " 0. 0. 0. 0. 0. 0.\n", 242 | " 0. 0. 0. 0. 0. 0.\n", 243 | " 0. 0. 0. 0. 0. 0.\n", 244 | " 0. 0. 0. 0. 0. 0.\n", 245 | " 0. 0. 0. 0. 0. 0.\n", 246 | " 0. 0. 0. 0. 0. 0.\n", 247 | " 0. 0. 0. 0. 0. 0.\n", 248 | " 0. 0. 0.01176471 0.07058824 0.07058824 0.07058824\n", 249 | " 0.49411765 0.53333336 0.6862745 0.10196079 0.6509804 1.\n", 250 | " 0.96862745 0.49803922 0. 0. 0. 0.\n", 251 | " 0. 0. 0. 0. 0. 0.\n", 252 | " 0. 0. 0.11764706 0.14117648 0.36862746 0.6039216\n", 253 | " 0.6666667 0.99215686 0.99215686 0.99215686 0.99215686 0.99215686\n", 254 | " 0.88235295 0.6745098 0.99215686 0.9490196 0.7647059 0.2509804\n", 255 | " 0. 0. 0. 0. 0. 0.\n", 256 | " 0. 0. 0. 0. 0. 0.19215687\n", 257 | " 0.93333334 0.99215686 0.99215686 0.99215686 0.99215686 0.99215686\n", 258 | " 0.99215686 0.99215686 0.99215686 0.9843137 0.3647059 0.32156864\n", 259 | " 0.32156864 0.21960784 0.15294118 0. 0. 0.\n", 260 | " 0. 0. 0. 0. 0. 0.\n", 261 | " 0. 0. 0. 0.07058824 0.85882354 0.99215686\n", 262 | " 0.99215686 0.99215686 0.99215686 0.99215686 0.7764706 0.7137255\n", 263 | " 0.96862745 0.94509804 0. 0. 0. 0.\n", 264 | " 0. 0. 0. 0. 0. 0.\n", 265 | " 0. 0. 0. 0. 0. 0.\n", 266 | " 0. 0. 0.3137255 0.6117647 0.41960785 0.99215686\n", 267 | " 0.99215686 0.8039216 0.04313726 0. 0.16862746 0.6039216\n", 268 | " 0. 0. 0. 0. 0. 0.\n", 269 | " 0. 0. 0. 0. 0. 0.\n", 270 | " 0. 0. 0. 0. 0. 0.\n", 271 | " 0. 0.05490196 0.00392157 0.6039216 0.99215686 0.3529412\n", 272 | " 0. 0. 0. 0. 0. 0.\n", 273 | " 0. 0. 0. 0. 0. 0.\n", 274 | " 0. 0. 0. 0. 0. 0.\n", 275 | " 0. 0. 0. 0. 0. 0.\n", 276 | " 0. 0.54509807 0.99215686 0.74509805 0.00784314 0.\n", 277 | " 0. 0. 0. 0. 0. 0.\n", 278 | " 0. 0. 0. 0. 0. 0.\n", 279 | " 0. 0. 0. 0. 0. 0.\n", 280 | " 0. 0. 0. 0. 0. 0.04313726\n", 281 | " 0.74509805 0.99215686 0.27450982 0. 0. 0.\n", 282 | " 0. 0. 0. 0. 0. 0.\n", 283 | " 0. 0. 0. 0. 0. 0.\n", 284 | " 0. 0. 0. 0. 0. 0.\n", 285 | " 0. 0. 0. 0. 0.13725491 0.94509804\n", 286 | " 0.88235295 0.627451 0.42352942 0.00392157 0. 0.\n", 287 | " 0. 0. 0. 0. 0. 0.\n", 288 | " 0. 0. 0. 0. 0. 0.\n", 289 | " 0. 0. 0. 0. 0. 0.\n", 290 | " 0. 0. 0. 0.31764707 0.9411765 0.99215686\n", 291 | " 0.99215686 0.46666667 0.09803922 0. 0. 0.\n", 292 | " 0. 0. 0. 0. 0. 0.\n", 293 | " 0. 0. 0. 0. 0. 0.\n", 294 | " 0. 0. 0. 0. 0. 0.\n", 295 | " 0. 0. 0.1764706 0.7294118 0.99215686 0.99215686\n", 296 | " 0.5882353 0.10588235 0. 0. 0. 0.\n", 297 | " 0. 0. 0. 0. 0. 0.\n", 298 | " 0. 0. 0. 0. 0. 0.\n", 299 | " 0. 0. 0. 0. 0. 0.\n", 300 | " 0. 0.0627451 0.3647059 0.9882353 0.99215686 0.73333335\n", 301 | " 0. 0. 0. 0. 0. 0.\n", 302 | " 0. 0. 0. 0. 0. 0.\n", 303 | " 0. 0. 0. 0. 0. 0.\n", 304 | " 0. 0. 0. 0. 0. 0.\n", 305 | " 0. 0.9764706 0.99215686 0.9764706 0.2509804 0.\n", 306 | " 0. 0. 0. 0. 0. 0.\n", 307 | " 0. 0. 0. 0. 0. 0.\n", 308 | " 0. 0. 0. 0. 0. 0.\n", 309 | " 0. 0. 0.18039216 0.50980395 0.7176471 0.99215686\n", 310 | " 0.99215686 0.8117647 0.00784314 0. 0. 0.\n", 311 | " 0. 0. 0. 0. 0. 0.\n", 312 | " 0. 0. 0. 0. 0. 0.\n", 313 | " 0. 0. 0. 0. 0.15294118 0.5803922\n", 314 | " 0.8980392 0.99215686 0.99215686 0.99215686 0.98039216 0.7137255\n", 315 | " 0. 0. 0. 0. 0. 0.\n", 316 | " 0. 0. 0. 0. 0. 0.\n", 317 | " 0. 0. 0. 0. 0. 0.\n", 318 | " 0.09411765 0.44705883 0.8666667 0.99215686 0.99215686 0.99215686\n", 319 | " 0.99215686 0.7882353 0.30588236 0. 0. 0.\n", 320 | " 0. 0. 0. 0. 0. 0.\n", 321 | " 0. 0. 0. 0. 0. 0.\n", 322 | " 0. 0. 0.09019608 0.25882354 0.8352941 0.99215686\n", 323 | " 0.99215686 0.99215686 0.99215686 0.7764706 0.31764707 0.00784314\n", 324 | " 0. 0. 0. 0. 0. 0.\n", 325 | " 0. 0. 0. 0. 0. 0.\n", 326 | " 0. 0. 0. 0. 0.07058824 0.67058825\n", 327 | " 0.85882354 0.99215686 0.99215686 0.99215686 0.99215686 0.7647059\n", 328 | " 0.3137255 0.03529412 0. 0. 0. 0.\n", 329 | " 0. 0. 0. 0. 0. 0.\n", 330 | " 0. 0. 0. 0. 0. 0.\n", 331 | " 0.21568628 0.6745098 0.8862745 0.99215686 0.99215686 0.99215686\n", 332 | " 0.99215686 0.95686275 0.52156866 0.04313726 0. 0.\n", 333 | " 0. 0. 0. 0. 0. 0.\n", 334 | " 0. 0. 0. 0. 0. 0.\n", 335 | " 0. 0. 0. 0. 0.53333336 0.99215686\n", 336 | " 0.99215686 0.99215686 0.83137256 0.5294118 0.5176471 0.0627451\n", 337 | " 0. 0. 0. 0. 0. 0.\n", 338 | " 0. 0. 0. 0. 0. 0.\n", 339 | " 0. 0. 0. 0. 0. 0.\n", 340 | " 0. 0. 0. 0. 0. 0.\n", 341 | " 0. 0. 0. 0. 0. 0.\n", 342 | " 0. 0. 0. 0. 0. 0.\n", 343 | " 0. 0. 0. 0. 0. 0.\n", 344 | " 0. 0. 0. 0. 0. 0.\n", 345 | " 0. 0. 0. 0. 0. 0.\n", 346 | " 0. 0. 0. 0. 0. 0.\n", 347 | " 0. 0. 0. 0. 0. 0.\n", 348 | " 0. 0. 0. 0. 0. 0.\n", 349 | " 0. 0. 0. 0. 0. 0.\n", 350 | " 0. 0. 0. 0. 0. 0.\n", 351 | " 0. 0. 0. 0. 0. 0.\n", 352 | " 0. 0. 0. 0. 0. 0.\n", 353 | " 0. 0. 0. 0. ]\n" 354 | ] 355 | } 356 | ], 357 | "source": [ 358 | "print(type(X_train[0]))\n", 359 | "print(X_train[0].shape)\n", 360 | "\n", 361 | "print(type(X_train[0][0]))\n", 362 | "#print(type(X_train[0][0][0]))\n", 363 | "\n", 364 | "print(X_train[0])" 365 | ] 366 | }, 367 | { 368 | "cell_type": "code", 369 | "execution_count": 9, 370 | "metadata": {}, 371 | "outputs": [], 372 | "source": [ 373 | "from keras.utils import to_categorical" 374 | ] 375 | }, 376 | { 377 | "cell_type": "code", 378 | "execution_count": 10, 379 | "metadata": {}, 380 | "outputs": [ 381 | { 382 | "name": "stdout", 383 | "output_type": "stream", 384 | "text": [ 385 | "[0 1 2 3 4 5 6 7 8 9]\n", 386 | "[0 1 2 3 4 5 6 7 8 9]\n", 387 | "[5 0 4 1 9]\n", 388 | "[7 2 1 0 4]\n" 389 | ] 390 | } 391 | ], 392 | "source": [ 393 | "print(np.unique(y_train))\n", 394 | "print(np.unique(y_test))\n", 395 | "\n", 396 | "print(y_train[:5])\n", 397 | "print(y_test[:5])" 398 | ] 399 | }, 400 | { 401 | "cell_type": "code", 402 | "execution_count": 11, 403 | "metadata": {}, 404 | "outputs": [], 405 | "source": [ 406 | "y_train = to_categorical(y_train)\n", 407 | "y_test = to_categorical(y_test)" 408 | ] 409 | }, 410 | { 411 | "cell_type": "code", 412 | "execution_count": 12, 413 | "metadata": {}, 414 | "outputs": [ 415 | { 416 | "name": "stdout", 417 | "output_type": "stream", 418 | "text": [ 419 | "[0. 1.]\n", 420 | "[0. 1.]\n", 421 | "[[0. 0. 0. 0. 0. 1. 0. 0. 0. 0.]\n", 422 | " [1. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n", 423 | " [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]\n", 424 | " [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]\n", 425 | " [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.]]\n", 426 | "[[0. 0. 0. 0. 0. 0. 0. 1. 0. 0.]\n", 427 | " [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.]\n", 428 | " [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.]\n", 429 | " [1. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n", 430 | " [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.]]\n" 431 | ] 432 | } 433 | ], 434 | "source": [ 435 | "print(np.unique(y_train))\n", 436 | "print(np.unique(y_test))\n", 437 | "\n", 438 | "print(y_train[:5])\n", 439 | "print(y_test[:5])" 440 | ] 441 | }, 442 | { 443 | "cell_type": "markdown", 444 | "metadata": {}, 445 | "source": [ 446 | "# Build a basic netwrok" 447 | ] 448 | }, 449 | { 450 | "cell_type": "code", 451 | "execution_count": 13, 452 | "metadata": {}, 453 | "outputs": [], 454 | "source": [ 455 | "from keras.models import Sequential\n", 456 | "from keras.layers import Dense" 457 | ] 458 | }, 459 | { 460 | "cell_type": "code", 461 | "execution_count": 18, 462 | "metadata": {}, 463 | "outputs": [], 464 | "source": [ 465 | "model = Sequential()\n", 466 | "model.add(Dense(512, activation='relu', input_shape=(28*28,)))\n", 467 | "model.add(Dense(10, activation='softmax'))" 468 | ] 469 | }, 470 | { 471 | "cell_type": "code", 472 | "execution_count": 19, 473 | "metadata": {}, 474 | "outputs": [], 475 | "source": [ 476 | "model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])" 477 | ] 478 | }, 479 | { 480 | "cell_type": "markdown", 481 | "metadata": {}, 482 | "source": [ 483 | "# Train the model" 484 | ] 485 | }, 486 | { 487 | "cell_type": "code", 488 | "execution_count": 20, 489 | "metadata": {}, 490 | "outputs": [ 491 | { 492 | "name": "stdout", 493 | "output_type": "stream", 494 | "text": [ 495 | "Epoch 1/5\n", 496 | "60000/60000 [==============================] - 18s 308us/step - loss: 0.0344 - acc: 0.9884\n", 497 | "Epoch 2/5\n", 498 | "60000/60000 [==============================] - 17s 282us/step - loss: 0.0147 - acc: 0.9951\n", 499 | "Epoch 3/5\n", 500 | "60000/60000 [==============================] - 16s 272us/step - loss: 0.0099 - acc: 0.9967\n", 501 | "Epoch 4/5\n", 502 | "60000/60000 [==============================] - 17s 286us/step - loss: 0.0073 - acc: 0.9975\n", 503 | "Epoch 5/5\n", 504 | "60000/60000 [==============================] - 16s 271us/step - loss: 0.0055 - acc: 0.9982\n" 505 | ] 506 | } 507 | ], 508 | "source": [ 509 | "history = model.fit(X_train, y_train, epochs=5, batch_size=32)" 510 | ] 511 | }, 512 | { 513 | "cell_type": "code", 514 | "execution_count": 22, 515 | "metadata": {}, 516 | "outputs": [ 517 | { 518 | "name": "stdout", 519 | "output_type": "stream", 520 | "text": [ 521 | "10000/10000 [==============================] - 1s 69us/step\n", 522 | "0.013999633967231785\n", 523 | "0.9955599971771241\n" 524 | ] 525 | } 526 | ], 527 | "source": [ 528 | "(test_loss, test_accuracy) = model.evaluate(X_test, y_test)\n", 529 | "print(test_loss)\n", 530 | "print(test_accuracy)" 531 | ] 532 | }, 533 | { 534 | "cell_type": "code", 535 | "execution_count": null, 536 | "metadata": {}, 537 | "outputs": [], 538 | "source": [] 539 | } 540 | ], 541 | "metadata": { 542 | "kernelspec": { 543 | "display_name": "Python 3", 544 | "language": "python", 545 | "name": "python3" 546 | }, 547 | "language_info": { 548 | "codemirror_mode": { 549 | "name": "ipython", 550 | "version": 3 551 | }, 552 | "file_extension": ".py", 553 | "mimetype": "text/x-python", 554 | "name": "python", 555 | "nbconvert_exporter": "python", 556 | "pygments_lexer": "ipython3", 557 | "version": "3.5.4" 558 | } 559 | }, 560 | "nbformat": 4, 561 | "nbformat_minor": 2 562 | } 563 | -------------------------------------------------------------------------------- /Keras/Keras_from_scratch/2_Sequential()_vs_Functional().ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Sequential vs Functional : MNIST\n", 8 | " 2.1 first look at a neural network" 9 | ] 10 | }, 11 | { 12 | "cell_type": "code", 13 | "execution_count": 1, 14 | "metadata": {}, 15 | "outputs": [], 16 | "source": [ 17 | "%load_ext autoreload\n", 18 | "%autoreload 2" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 2, 24 | "metadata": {}, 25 | "outputs": [ 26 | { 27 | "name": "stderr", 28 | "output_type": "stream", 29 | "text": [ 30 | "c:\\users\\a00439512\\appdata\\local\\continuum\\anaconda3\\envs\\demo\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n", 31 | " from ._conv import register_converters as _register_converters\n", 32 | "Using TensorFlow backend.\n" 33 | ] 34 | } 35 | ], 36 | "source": [ 37 | "from keras.datasets import mnist\n", 38 | "import numpy as np" 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "execution_count": 3, 44 | "metadata": {}, 45 | "outputs": [], 46 | "source": [ 47 | "(x_train, y_train), (x_test, y_test) = mnist.load_data()" 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": null, 53 | "metadata": {}, 54 | "outputs": [], 55 | "source": [] 56 | }, 57 | { 58 | "cell_type": "code", 59 | "execution_count": 4, 60 | "metadata": {}, 61 | "outputs": [], 62 | "source": [ 63 | "x_train = x_train.reshape((-1, 28*28))\n", 64 | "x_train = x_train.astype('float32')\n", 65 | "x_train = x_train / 255" 66 | ] 67 | }, 68 | { 69 | "cell_type": "code", 70 | "execution_count": 5, 71 | "metadata": {}, 72 | "outputs": [], 73 | "source": [ 74 | "x_test = x_test.reshape((-1, 28*28))\n", 75 | "x_test = x_test.astype('float32')\n", 76 | "x_test = x_test / 255" 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "execution_count": 6, 82 | "metadata": {}, 83 | "outputs": [], 84 | "source": [ 85 | "from keras.utils import to_categorical\n", 86 | "\n", 87 | "y_train = to_categorical(y_train)\n", 88 | "y_test = to_categorical(y_test)" 89 | ] 90 | }, 91 | { 92 | "cell_type": "code", 93 | "execution_count": null, 94 | "metadata": {}, 95 | "outputs": [], 96 | "source": [] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": 7, 101 | "metadata": {}, 102 | "outputs": [], 103 | "source": [ 104 | "from keras.layers import Dense\n", 105 | "from keras import layers\n", 106 | "from keras import models" 107 | ] 108 | }, 109 | { 110 | "cell_type": "code", 111 | "execution_count": 8, 112 | "metadata": {}, 113 | "outputs": [], 114 | "source": [ 115 | "input_tensor = layers.Input(shape=(784,))\n", 116 | "L1 = layers.Dense(512, activation='relu')(input_tensor)\n", 117 | "L2 = layers.Dense(10, activation='softmax')(L1)" 118 | ] 119 | }, 120 | { 121 | "cell_type": "code", 122 | "execution_count": 9, 123 | "metadata": {}, 124 | "outputs": [], 125 | "source": [ 126 | "model = models.Model(inputs=input_tensor, outputs=L2)" 127 | ] 128 | }, 129 | { 130 | "cell_type": "code", 131 | "execution_count": 10, 132 | "metadata": {}, 133 | "outputs": [], 134 | "source": [ 135 | "model.compile(optimizer='rmsprop',\n", 136 | " loss='categorical_crossentropy',\n", 137 | " metrics=['accuracy'])" 138 | ] 139 | }, 140 | { 141 | "cell_type": "code", 142 | "execution_count": 11, 143 | "metadata": {}, 144 | "outputs": [ 145 | { 146 | "name": "stdout", 147 | "output_type": "stream", 148 | "text": [ 149 | "Epoch 1/5\n", 150 | "60000/60000 [==============================] - 15s 247us/step - loss: 0.2028 - acc: 0.9408\n", 151 | "Epoch 2/5\n", 152 | "60000/60000 [==============================] - 17s 277us/step - loss: 0.0902 - acc: 0.9741\n", 153 | "Epoch 3/5\n", 154 | "60000/60000 [==============================] - 15s 242us/step - loss: 0.0667 - acc: 0.9816\n", 155 | "Epoch 4/5\n", 156 | "60000/60000 [==============================] - 16s 264us/step - loss: 0.0509 - acc: 0.9858\n", 157 | "Epoch 5/5\n", 158 | "60000/60000 [==============================] - 16s 259us/step - loss: 0.0419 - acc: 0.9885\n" 159 | ] 160 | }, 161 | { 162 | "data": { 163 | "text/plain": [ 164 | "" 165 | ] 166 | }, 167 | "execution_count": 11, 168 | "metadata": {}, 169 | "output_type": "execute_result" 170 | } 171 | ], 172 | "source": [ 173 | "model.fit(x_train, y_train, epochs=5, batch_size=32)" 174 | ] 175 | }, 176 | { 177 | "cell_type": "code", 178 | "execution_count": null, 179 | "metadata": {}, 180 | "outputs": [], 181 | "source": [] 182 | } 183 | ], 184 | "metadata": { 185 | "kernelspec": { 186 | "display_name": "Python 3", 187 | "language": "python", 188 | "name": "python3" 189 | }, 190 | "language_info": { 191 | "codemirror_mode": { 192 | "name": "ipython", 193 | "version": 3 194 | }, 195 | "file_extension": ".py", 196 | "mimetype": "text/x-python", 197 | "name": "python", 198 | "nbconvert_exporter": "python", 199 | "pygments_lexer": "ipython3", 200 | "version": "3.5.4" 201 | } 202 | }, 203 | "nbformat": 4, 204 | "nbformat_minor": 2 205 | } 206 | -------------------------------------------------------------------------------- /Keras/Keras_from_scratch/3_IMDB_sentiment.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# 3.4 IMDB Sentiment analysis\n", 8 | "\n", 9 | " 3.4 Classifying movie reviews: a binary classification example" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": null, 15 | "metadata": { 16 | "collapsed": true 17 | }, 18 | "outputs": [], 19 | "source": [] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 1, 24 | "metadata": { 25 | "collapsed": true 26 | }, 27 | "outputs": [], 28 | "source": [ 29 | "%load_ext autoreload\n", 30 | "%autoreload 2" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 2, 36 | "metadata": { 37 | "collapsed": false 38 | }, 39 | "outputs": [ 40 | { 41 | "name": "stderr", 42 | "output_type": "stream", 43 | "text": [ 44 | "Using TensorFlow backend.\n", 45 | "/home/root1/.virtualenv/demos/local/lib/python2.7/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n", 46 | " from ._conv import register_converters as _register_converters\n" 47 | ] 48 | } 49 | ], 50 | "source": [ 51 | "from keras.datasets import imdb\n", 52 | "import numpy as np" 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": 3, 58 | "metadata": { 59 | "collapsed": true 60 | }, 61 | "outputs": [], 62 | "source": [ 63 | "(x_train, y_train), (x_test, y_test) = imdb.load_data()" 64 | ] 65 | }, 66 | { 67 | "cell_type": "code", 68 | "execution_count": 5, 69 | "metadata": { 70 | "collapsed": false 71 | }, 72 | "outputs": [ 73 | { 74 | "name": "stdout", 75 | "output_type": "stream", 76 | "text": [ 77 | "25000\n", 78 | "88586\n" 79 | ] 80 | } 81 | ], 82 | "source": [ 83 | "# find largest word index\n", 84 | "\n", 85 | "ll = [max(s) for s in x_train]\n", 86 | "print len(ll)\n", 87 | "#max()\n", 88 | "\n", 89 | "print max(ll)" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": 6, 95 | "metadata": { 96 | "collapsed": true 97 | }, 98 | "outputs": [], 99 | "source": [ 100 | "word_to_index = imdb.get_word_index()\n", 101 | "index_to_word = dict((value,key) for (key, value) in word_to_index.items())" 102 | ] 103 | }, 104 | { 105 | "cell_type": "code", 106 | "execution_count": 7, 107 | "metadata": { 108 | "collapsed": true 109 | }, 110 | "outputs": [], 111 | "source": [ 112 | "decoded_review = ' '.join([index_to_word.get(i-3, '?') for i in x_train[0]])" 113 | ] 114 | }, 115 | { 116 | "cell_type": "code", 117 | "execution_count": 8, 118 | "metadata": { 119 | "collapsed": false 120 | }, 121 | "outputs": [ 122 | { 123 | "name": "stdout", 124 | "output_type": "stream", 125 | "text": [ 126 | "? this film was just brilliant casting location scenery story direction everyone's really suited the part they played and you could just imagine being there robert redford's is an amazing actor and now the same being director norman's father came from the same scottish island as myself so i loved the fact there was a real connection with this film the witty remarks throughout the film were great it was just brilliant so much that i bought the film as soon as it was released for retail and would recommend it to everyone to watch and the fly fishing was amazing really cried at the end it was so sad and you know what they say if you cry at a film it must have been good and this definitely was also congratulations to the two little boy's that played the part's of norman and paul they were just brilliant children are often left out of the praising list i think because the stars that play them all grown up are such a big profile for the whole film but these children are amazing and should be praised for what they have done don't you think the whole story was so lovely because it was true and was someone's life after all that was shared with us all\n" 127 | ] 128 | } 129 | ], 130 | "source": [ 131 | "print decoded_review" 132 | ] 133 | }, 134 | { 135 | "cell_type": "code", 136 | "execution_count": 9, 137 | "metadata": { 138 | "collapsed": false 139 | }, 140 | "outputs": [ 141 | { 142 | "name": "stdout", 143 | "output_type": "stream", 144 | "text": [ 145 | "2494\n" 146 | ] 147 | } 148 | ], 149 | "source": [ 150 | "#print x_train[3972]\n", 151 | "print len(x_train[17934])" 152 | ] 153 | }, 154 | { 155 | "cell_type": "markdown", 156 | "metadata": {}, 157 | "source": [ 158 | "## data tranformation" 159 | ] 160 | }, 161 | { 162 | "cell_type": "code", 163 | "execution_count": 10, 164 | "metadata": { 165 | "collapsed": true 166 | }, 167 | "outputs": [], 168 | "source": [ 169 | "def vectorize_seqeuces(seqs, dimension=10000):\n", 170 | " \n", 171 | " result = np.zeros((len(seqs), dimension))\n", 172 | " \n", 173 | " for i, seq in enumerate(seqs):\n", 174 | " result[i, seq] = 1\n", 175 | " return result\n" 176 | ] 177 | }, 178 | { 179 | "cell_type": "code", 180 | "execution_count": 11, 181 | "metadata": { 182 | "collapsed": true 183 | }, 184 | "outputs": [], 185 | "source": [ 186 | "x_train = vectorize_seqeuces(x_train, dimension=89000)" 187 | ] 188 | }, 189 | { 190 | "cell_type": "code", 191 | "execution_count": 12, 192 | "metadata": { 193 | "collapsed": false 194 | }, 195 | "outputs": [ 196 | { 197 | "data": { 198 | "text/plain": [ 199 | "(25000, 89000)" 200 | ] 201 | }, 202 | "execution_count": 12, 203 | "metadata": {}, 204 | "output_type": "execute_result" 205 | } 206 | ], 207 | "source": [ 208 | "x_train.shape" 209 | ] 210 | }, 211 | { 212 | "cell_type": "code", 213 | "execution_count": 13, 214 | "metadata": { 215 | "collapsed": true 216 | }, 217 | "outputs": [], 218 | "source": [ 219 | "x_test = vectorize_seqeuces(x_test, dimension=89000)" 220 | ] 221 | }, 222 | { 223 | "cell_type": "code", 224 | "execution_count": 14, 225 | "metadata": { 226 | "collapsed": false 227 | }, 228 | "outputs": [ 229 | { 230 | "data": { 231 | "text/plain": [ 232 | "(25000, 89000)" 233 | ] 234 | }, 235 | "execution_count": 14, 236 | "metadata": {}, 237 | "output_type": "execute_result" 238 | } 239 | ], 240 | "source": [ 241 | "x_test.shape" 242 | ] 243 | }, 244 | { 245 | "cell_type": "code", 246 | "execution_count": 16, 247 | "metadata": { 248 | "collapsed": false 249 | }, 250 | "outputs": [ 251 | { 252 | "name": "stdout", 253 | "output_type": "stream", 254 | "text": [ 255 | "set([0, 1])\n", 256 | "set([0, 1])\n" 257 | ] 258 | } 259 | ], 260 | "source": [ 261 | "print set(y_train)\n", 262 | "print set(y_test)" 263 | ] 264 | }, 265 | { 266 | "cell_type": "code", 267 | "execution_count": 22, 268 | "metadata": { 269 | "collapsed": true 270 | }, 271 | "outputs": [], 272 | "source": [ 273 | "#define model\n", 274 | "\n", 275 | "from keras.layers import Dense\n", 276 | "from keras import models\n", 277 | "\n", 278 | "model = models.Sequential()\n", 279 | "model.add(Dense(16, activation='relu', input_shape=(89000,)))\n", 280 | "model.add(Dense(16, activation='relu'))\n", 281 | "model.add(Dense(1, activation='sigmoid'))" 282 | ] 283 | }, 284 | { 285 | "cell_type": "code", 286 | "execution_count": 23, 287 | "metadata": { 288 | "collapsed": false 289 | }, 290 | "outputs": [ 291 | { 292 | "name": "stdout", 293 | "output_type": "stream", 294 | "text": [ 295 | "WARNING:tensorflow:From /home/root1/.virtualenv/demos/local/lib/python2.7/site-packages/keras/backend/tensorflow_backend.py:1153: calling reduce_mean (from tensorflow.python.ops.math_ops) with keep_dims is deprecated and will be removed in a future version.\n", 296 | "Instructions for updating:\n", 297 | "keep_dims is deprecated, use keepdims instead\n" 298 | ] 299 | } 300 | ], 301 | "source": [ 302 | "model.compile(optimizer='adam',\n", 303 | " loss='binary_crossentropy',\n", 304 | " metrics=(['accuracy']))" 305 | ] 306 | }, 307 | { 308 | "cell_type": "code", 309 | "execution_count": 25, 310 | "metadata": { 311 | "collapsed": false 312 | }, 313 | "outputs": [ 314 | { 315 | "name": "stdout", 316 | "output_type": "stream", 317 | "text": [ 318 | "Epoch 1/5\n", 319 | "25000/25000 [==============================] - 30s - loss: 0.3158 - acc: 0.8724 \n", 320 | "Epoch 2/5\n", 321 | "25000/25000 [==============================] - 26s - loss: 0.1101 - acc: 0.9633 \n", 322 | "Epoch 3/5\n", 323 | "25000/25000 [==============================] - 26s - loss: 0.0477 - acc: 0.9846 \n", 324 | "Epoch 4/5\n", 325 | "25000/25000 [==============================] - 27s - loss: 0.0227 - acc: 0.9931 \n", 326 | "Epoch 5/5\n", 327 | "25000/25000 [==============================] - 27s - loss: 0.0111 - acc: 0.9971 \n" 328 | ] 329 | }, 330 | { 331 | "data": { 332 | "text/plain": [ 333 | "" 334 | ] 335 | }, 336 | "execution_count": 25, 337 | "metadata": {}, 338 | "output_type": "execute_result" 339 | } 340 | ], 341 | "source": [ 342 | "model.fit(x_train, y_train, epochs=5, batch_size=32)" 343 | ] 344 | }, 345 | { 346 | "cell_type": "code", 347 | "execution_count": 26, 348 | "metadata": { 349 | "collapsed": false 350 | }, 351 | "outputs": [ 352 | { 353 | "name": "stdout", 354 | "output_type": "stream", 355 | "text": [ 356 | "24928/25000 [============================>.] - ETA: 0s" 357 | ] 358 | }, 359 | { 360 | "data": { 361 | "text/plain": [ 362 | "[0.755559324336052, 0.8484]" 363 | ] 364 | }, 365 | "execution_count": 26, 366 | "metadata": {}, 367 | "output_type": "execute_result" 368 | } 369 | ], 370 | "source": [ 371 | "model.evaluate(x_test, y_test)" 372 | ] 373 | }, 374 | { 375 | "cell_type": "code", 376 | "execution_count": null, 377 | "metadata": { 378 | "collapsed": true 379 | }, 380 | "outputs": [], 381 | "source": [] 382 | } 383 | ], 384 | "metadata": { 385 | "kernelspec": { 386 | "display_name": "Python 2", 387 | "language": "python", 388 | "name": "python2" 389 | }, 390 | "language_info": { 391 | "codemirror_mode": { 392 | "name": "ipython", 393 | "version": 2 394 | }, 395 | "file_extension": ".py", 396 | "mimetype": "text/x-python", 397 | "name": "python", 398 | "nbconvert_exporter": "python", 399 | "pygments_lexer": "ipython2", 400 | "version": "2.7.10" 401 | } 402 | }, 403 | "nbformat": 4, 404 | "nbformat_minor": 2 405 | } 406 | -------------------------------------------------------------------------------- /Keras/Keras_from_scratch/4_multiclass_classification.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# 3.5 multiclass classification example\n", 8 | " 3.5 Reuters newswires" 9 | ] 10 | }, 11 | { 12 | "cell_type": "code", 13 | "execution_count": 1, 14 | "metadata": { 15 | "collapsed": true 16 | }, 17 | "outputs": [], 18 | "source": [ 19 | "%load_ext autoreload \n", 20 | "%autoreload 2" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": 13, 26 | "metadata": { 27 | "collapsed": true 28 | }, 29 | "outputs": [], 30 | "source": [ 31 | "from keras.datasets import reuters\n", 32 | "import numpy as np" 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "execution_count": 4, 38 | "metadata": { 39 | "collapsed": true 40 | }, 41 | "outputs": [], 42 | "source": [ 43 | "(x_train, y_train), (x_test, y_test) = reuters.load_data(num_words=10000)" 44 | ] 45 | }, 46 | { 47 | "cell_type": "code", 48 | "execution_count": 5, 49 | "metadata": {}, 50 | "outputs": [ 51 | { 52 | "name": "stdout", 53 | "output_type": "stream", 54 | "text": [ 55 | "Downloading data from https://s3.amazonaws.com/text-datasets/reuters_word_index.json\n" 56 | ] 57 | } 58 | ], 59 | "source": [ 60 | "word_to_index = reuters.get_word_index()\n", 61 | "\n", 62 | "index_to_word = dict([(value, key) for (key, value) in word_to_index.items()])" 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": 8, 68 | "metadata": {}, 69 | "outputs": [ 70 | { 71 | "data": { 72 | "text/plain": [ 73 | "u'? ? ? said as a result of its december acquisition of space co it expects earnings per share in 1987 of 1 15 to 1 30 dlrs per share up from 70 cts in 1986 the company said pretax net should rise to nine to 10 mln dlrs from six mln dlrs in 1986 and rental operation revenues to 19 to 22 mln dlrs from 12 5 mln dlrs it said cash flow per share this year should be 2 50 to three dlrs reuter 3'" 74 | ] 75 | }, 76 | "execution_count": 8, 77 | "metadata": {}, 78 | "output_type": "execute_result" 79 | } 80 | ], 81 | "source": [ 82 | "' '.join(index_to_word.get(i-3,'?') for i in x_train[0])" 83 | ] 84 | }, 85 | { 86 | "cell_type": "markdown", 87 | "metadata": {}, 88 | "source": [ 89 | "## data modification" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": 9, 95 | "metadata": {}, 96 | "outputs": [ 97 | { 98 | "data": { 99 | "text/plain": [ 100 | "9999" 101 | ] 102 | }, 103 | "execution_count": 9, 104 | "metadata": {}, 105 | "output_type": "execute_result" 106 | } 107 | ], 108 | "source": [ 109 | "max([max(ll) for ll in x_train])" 110 | ] 111 | }, 112 | { 113 | "cell_type": "code", 114 | "execution_count": 17, 115 | "metadata": { 116 | "collapsed": true 117 | }, 118 | "outputs": [], 119 | "source": [ 120 | "def vectorize_seq(seqs, dim=10000):\n", 121 | " \n", 122 | " result = np.zeros((len(seqs), dim))\n", 123 | " \n", 124 | " for i, s in enumerate(seqs):\n", 125 | " result[i,s] = 1\n", 126 | " \n", 127 | " return result\n", 128 | "\n" 129 | ] 130 | }, 131 | { 132 | "cell_type": "code", 133 | "execution_count": 18, 134 | "metadata": { 135 | "collapsed": true 136 | }, 137 | "outputs": [], 138 | "source": [ 139 | "x_train = vectorize_seq(x_train)\n", 140 | "x_test = vectorize_seq(x_test)" 141 | ] 142 | }, 143 | { 144 | "cell_type": "code", 145 | "execution_count": 19, 146 | "metadata": {}, 147 | "outputs": [ 148 | { 149 | "name": "stdout", 150 | "output_type": "stream", 151 | "text": [ 152 | "set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45])\n", 153 | "set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45])\n" 154 | ] 155 | } 156 | ], 157 | "source": [ 158 | "print set(y_train)\n", 159 | "print set(y_test)" 160 | ] 161 | }, 162 | { 163 | "cell_type": "code", 164 | "execution_count": 20, 165 | "metadata": { 166 | "collapsed": true 167 | }, 168 | "outputs": [], 169 | "source": [ 170 | "from keras.utils import to_categorical" 171 | ] 172 | }, 173 | { 174 | "cell_type": "code", 175 | "execution_count": 21, 176 | "metadata": { 177 | "collapsed": true 178 | }, 179 | "outputs": [], 180 | "source": [ 181 | "y_train = to_categorical(y_train)\n", 182 | "y_test = to_categorical(y_test)" 183 | ] 184 | }, 185 | { 186 | "cell_type": "markdown", 187 | "metadata": {}, 188 | "source": [ 189 | "## Network" 190 | ] 191 | }, 192 | { 193 | "cell_type": "code", 194 | "execution_count": 22, 195 | "metadata": { 196 | "collapsed": true 197 | }, 198 | "outputs": [], 199 | "source": [ 200 | "from keras.models import Sequential\n", 201 | "from keras.layers import Dense" 202 | ] 203 | }, 204 | { 205 | "cell_type": "code", 206 | "execution_count": 24, 207 | "metadata": { 208 | "collapsed": true 209 | }, 210 | "outputs": [], 211 | "source": [ 212 | "model = Sequential()\n", 213 | "model.add(Dense(64, activation='relu', input_shape=(10000,)))\n", 214 | "model.add(Dense(64, activation='relu'))\n", 215 | "model.add(Dense(46, activation='softmax'))" 216 | ] 217 | }, 218 | { 219 | "cell_type": "code", 220 | "execution_count": 25, 221 | "metadata": {}, 222 | "outputs": [ 223 | { 224 | "name": "stdout", 225 | "output_type": "stream", 226 | "text": [ 227 | "WARNING:tensorflow:From /home/root1/.virtualenv/demos/local/lib/python2.7/site-packages/keras/backend/tensorflow_backend.py:2578: calling reduce_sum (from tensorflow.python.ops.math_ops) with keep_dims is deprecated and will be removed in a future version.\n", 228 | "Instructions for updating:\n", 229 | "keep_dims is deprecated, use keepdims instead\n", 230 | "WARNING:tensorflow:From /home/root1/.virtualenv/demos/local/lib/python2.7/site-packages/keras/backend/tensorflow_backend.py:1153: calling reduce_mean (from tensorflow.python.ops.math_ops) with keep_dims is deprecated and will be removed in a future version.\n", 231 | "Instructions for updating:\n", 232 | "keep_dims is deprecated, use keepdims instead\n" 233 | ] 234 | } 235 | ], 236 | "source": [ 237 | "model.compile(optimizer='adam',\n", 238 | " loss='categorical_crossentropy', \n", 239 | " metrics = (['accuracy']))" 240 | ] 241 | }, 242 | { 243 | "cell_type": "code", 244 | "execution_count": 26, 245 | "metadata": {}, 246 | "outputs": [ 247 | { 248 | "name": "stdout", 249 | "output_type": "stream", 250 | "text": [ 251 | "Epoch 1/5\n", 252 | "8982/8982 [==============================] - 4s - loss: 1.5006 - acc: 0.6720 \n", 253 | "Epoch 2/5\n", 254 | "8982/8982 [==============================] - 4s - loss: 0.6396 - acc: 0.8563 \n", 255 | "Epoch 3/5\n", 256 | "8982/8982 [==============================] - 4s - loss: 0.3471 - acc: 0.9226 \n", 257 | "Epoch 4/5\n", 258 | "8982/8982 [==============================] - 3s - loss: 0.2382 - acc: 0.9446 \n", 259 | "Epoch 5/5\n", 260 | "8982/8982 [==============================] - 3s - loss: 0.1914 - acc: 0.9503 \n" 261 | ] 262 | }, 263 | { 264 | "data": { 265 | "text/plain": [ 266 | "" 267 | ] 268 | }, 269 | "execution_count": 26, 270 | "metadata": {}, 271 | "output_type": "execute_result" 272 | } 273 | ], 274 | "source": [ 275 | "model.fit(x_train, y_train, batch_size=32, epochs=5)" 276 | ] 277 | }, 278 | { 279 | "cell_type": "code", 280 | "execution_count": 27, 281 | "metadata": {}, 282 | "outputs": [ 283 | { 284 | "name": "stdout", 285 | "output_type": "stream", 286 | "text": [ 287 | "1888/2246 [========================>.....] - ETA: 0s" 288 | ] 289 | }, 290 | { 291 | "data": { 292 | "text/plain": [ 293 | "[0.9990636005214782, 0.7898486197950154]" 294 | ] 295 | }, 296 | "execution_count": 27, 297 | "metadata": {}, 298 | "output_type": "execute_result" 299 | } 300 | ], 301 | "source": [ 302 | "model.evaluate(x_test, y_test)" 303 | ] 304 | }, 305 | { 306 | "cell_type": "code", 307 | "execution_count": null, 308 | "metadata": { 309 | "collapsed": true 310 | }, 311 | "outputs": [], 312 | "source": [] 313 | } 314 | ], 315 | "metadata": { 316 | "kernelspec": { 317 | "display_name": "Python 3", 318 | "language": "python", 319 | "name": "python3" 320 | }, 321 | "language_info": { 322 | "codemirror_mode": { 323 | "name": "ipython", 324 | "version": 3 325 | }, 326 | "file_extension": ".py", 327 | "mimetype": "text/x-python", 328 | "name": "python", 329 | "nbconvert_exporter": "python", 330 | "pygments_lexer": "ipython3", 331 | "version": "3.5.4" 332 | } 333 | }, 334 | "nbformat": 4, 335 | "nbformat_minor": 2 336 | } 337 | -------------------------------------------------------------------------------- /Keras/Keras_from_scratch/5_Intro to CNN.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Ch 5 : Introduction Code" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "%load_ext autoreload\n", 17 | "%autoreload 2" 18 | ] 19 | }, 20 | { 21 | "cell_type": "code", 22 | "execution_count": 2, 23 | "metadata": {}, 24 | "outputs": [ 25 | { 26 | "name": "stderr", 27 | "output_type": "stream", 28 | "text": [ 29 | "c:\\users\\a00439512\\appdata\\local\\continuum\\anaconda3\\envs\\demo\\lib\\site-packages\\h5py\\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n", 30 | " from ._conv import register_converters as _register_converters\n", 31 | "Using TensorFlow backend.\n" 32 | ] 33 | } 34 | ], 35 | "source": [ 36 | "from keras.datasets import mnist\n", 37 | "import numpy as np" 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": null, 43 | "metadata": {}, 44 | "outputs": [], 45 | "source": [] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": 3, 50 | "metadata": {}, 51 | "outputs": [], 52 | "source": [ 53 | "(x_train, y_train), (x_test,y_test) = mnist.load_data()" 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": 4, 59 | "metadata": {}, 60 | "outputs": [ 61 | { 62 | "name": "stdout", 63 | "output_type": "stream", 64 | "text": [ 65 | "(60000, 28, 28)\n", 66 | "(10000, 28, 28)\n" 67 | ] 68 | } 69 | ], 70 | "source": [ 71 | "print(x_train.shape)\n", 72 | "print(x_test.shape)" 73 | ] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "execution_count": 5, 78 | "metadata": {}, 79 | "outputs": [], 80 | "source": [ 81 | "x_train = x_train.reshape((-1, 28, 28, 1))\n", 82 | "x_test = x_test.reshape((-1, 28, 28, 1))\n", 83 | "\n", 84 | "\n", 85 | "x_train = x_train.astype('float32') / 255\n", 86 | "x_test = x_test.astype('float32') / 255" 87 | ] 88 | }, 89 | { 90 | "cell_type": "code", 91 | "execution_count": 6, 92 | "metadata": {}, 93 | "outputs": [ 94 | { 95 | "name": "stdout", 96 | "output_type": "stream", 97 | "text": [ 98 | "(60000, 28, 28, 1)\n", 99 | "(10000, 28, 28, 1)\n" 100 | ] 101 | } 102 | ], 103 | "source": [ 104 | "print(x_train.shape)\n", 105 | "print(x_test.shape)" 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "execution_count": 7, 111 | "metadata": {}, 112 | "outputs": [], 113 | "source": [ 114 | "from keras.utils import to_categorical" 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "execution_count": 8, 120 | "metadata": {}, 121 | "outputs": [], 122 | "source": [ 123 | "y_train = to_categorical(y_train)\n", 124 | "y_test = to_categorical(y_test)" 125 | ] 126 | }, 127 | { 128 | "cell_type": "code", 129 | "execution_count": null, 130 | "metadata": {}, 131 | "outputs": [], 132 | "source": [] 133 | }, 134 | { 135 | "cell_type": "code", 136 | "execution_count": null, 137 | "metadata": {}, 138 | "outputs": [], 139 | "source": [] 140 | }, 141 | { 142 | "cell_type": "code", 143 | "execution_count": 9, 144 | "metadata": {}, 145 | "outputs": [], 146 | "source": [ 147 | "from keras.models import Sequential\n", 148 | "from keras import layers\n" 149 | ] 150 | }, 151 | { 152 | "cell_type": "code", 153 | "execution_count": 10, 154 | "metadata": {}, 155 | "outputs": [], 156 | "source": [ 157 | "model = Sequential()\n", 158 | "model.add(layers.Conv2D(filters=32, kernel_size=(3,3), activation='relu', input_shape=(28,28,1)))\n", 159 | "model.add(layers.MaxPool2D(2,2))\n", 160 | "model.add(layers.Conv2D(filters=64, kernel_size=(3,3), activation='relu'))\n", 161 | "model.add(layers.MaxPool2D(2,2))\n", 162 | "model.add(layers.Conv2D(64, kernel_size=(3,3), activation='relu'))\n", 163 | "model.add(layers.Flatten())\n", 164 | "model.add(layers.Dense(128, activation='relu'))\n", 165 | "model.add(layers.Dense(10, activation='softmax'))" 166 | ] 167 | }, 168 | { 169 | "cell_type": "code", 170 | "execution_count": 11, 171 | "metadata": {}, 172 | "outputs": [ 173 | { 174 | "name": "stdout", 175 | "output_type": "stream", 176 | "text": [ 177 | "_________________________________________________________________\n", 178 | "Layer (type) Output Shape Param # \n", 179 | "=================================================================\n", 180 | "conv2d_1 (Conv2D) (None, 26, 26, 32) 320 \n", 181 | "_________________________________________________________________\n", 182 | "max_pooling2d_1 (MaxPooling2 (None, 13, 13, 32) 0 \n", 183 | "_________________________________________________________________\n", 184 | "conv2d_2 (Conv2D) (None, 11, 11, 64) 18496 \n", 185 | "_________________________________________________________________\n", 186 | "max_pooling2d_2 (MaxPooling2 (None, 5, 5, 64) 0 \n", 187 | "_________________________________________________________________\n", 188 | "conv2d_3 (Conv2D) (None, 3, 3, 64) 36928 \n", 189 | "_________________________________________________________________\n", 190 | "flatten_1 (Flatten) (None, 576) 0 \n", 191 | "_________________________________________________________________\n", 192 | "dense_1 (Dense) (None, 128) 73856 \n", 193 | "_________________________________________________________________\n", 194 | "dense_2 (Dense) (None, 10) 1290 \n", 195 | "=================================================================\n", 196 | "Total params: 130,890\n", 197 | "Trainable params: 130,890\n", 198 | "Non-trainable params: 0\n", 199 | "_________________________________________________________________\n" 200 | ] 201 | } 202 | ], 203 | "source": [ 204 | "model.summary()" 205 | ] 206 | }, 207 | { 208 | "cell_type": "code", 209 | "execution_count": 12, 210 | "metadata": {}, 211 | "outputs": [], 212 | "source": [ 213 | "model.compile(optimizer='adam',\n", 214 | " loss='categorical_crossentropy',\n", 215 | " metrics= (['accuracy']))" 216 | ] 217 | }, 218 | { 219 | "cell_type": "code", 220 | "execution_count": null, 221 | "metadata": {}, 222 | "outputs": [ 223 | { 224 | "name": "stdout", 225 | "output_type": "stream", 226 | "text": [ 227 | "Epoch 1/5\n", 228 | "60000/60000 [==============================] - 58s 970us/step - loss: 0.1398 - acc: 0.9566\n", 229 | "Epoch 2/5\n", 230 | "60000/60000 [==============================] - 58s 962us/step - loss: 0.0452 - acc: 0.9860\n", 231 | "Epoch 3/5\n", 232 | "52512/60000 [=========================>....] - ETA: 7s - loss: 0.0320 - acc: 0.9904" 233 | ] 234 | } 235 | ], 236 | "source": [ 237 | "model.fit(x_train, y_train, epochs=5, batch_size=32)" 238 | ] 239 | }, 240 | { 241 | "cell_type": "code", 242 | "execution_count": null, 243 | "metadata": {}, 244 | "outputs": [], 245 | "source": [ 246 | "model.evaluate(x_test, y_test)" 247 | ] 248 | }, 249 | { 250 | "cell_type": "code", 251 | "execution_count": null, 252 | "metadata": {}, 253 | "outputs": [], 254 | "source": [] 255 | } 256 | ], 257 | "metadata": { 258 | "kernelspec": { 259 | "display_name": "Python 3", 260 | "language": "python", 261 | "name": "python3" 262 | }, 263 | "language_info": { 264 | "codemirror_mode": { 265 | "name": "ipython", 266 | "version": 3 267 | }, 268 | "file_extension": ".py", 269 | "mimetype": "text/x-python", 270 | "name": "python", 271 | "nbconvert_exporter": "python", 272 | "pygments_lexer": "ipython3", 273 | "version": "3.5.4" 274 | } 275 | }, 276 | "nbformat": 4, 277 | "nbformat_minor": 2 278 | } 279 | -------------------------------------------------------------------------------- /Keras/Keras_from_scratch/Advanced/Advanced_01.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "### Functional API vs Sequential" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 2, 13 | "metadata": {}, 14 | "outputs": [ 15 | { 16 | "name": "stderr", 17 | "output_type": "stream", 18 | "text": [ 19 | "Using TensorFlow backend.\n" 20 | ] 21 | } 22 | ], 23 | "source": [ 24 | "from keras.layers import Dense\n", 25 | "from keras import Sequential" 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "execution_count": 3, 31 | "metadata": {}, 32 | "outputs": [], 33 | "source": [ 34 | "model = Sequential()\n", 35 | "model.add(Dense(32, activation='relu', input_shape=(512,)))\n", 36 | "model.add(Dense(64, activation='relu'))\n", 37 | "model.add(Dense(1, activation='sigmoid'))" 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": 4, 43 | "metadata": {}, 44 | "outputs": [ 45 | { 46 | "name": "stdout", 47 | "output_type": "stream", 48 | "text": [ 49 | "_________________________________________________________________\n", 50 | "Layer (type) Output Shape Param # \n", 51 | "=================================================================\n", 52 | "dense_1 (Dense) (None, 32) 16416 \n", 53 | "_________________________________________________________________\n", 54 | "dense_2 (Dense) (None, 64) 2112 \n", 55 | "_________________________________________________________________\n", 56 | "dense_3 (Dense) (None, 1) 65 \n", 57 | "=================================================================\n", 58 | "Total params: 18,593\n", 59 | "Trainable params: 18,593\n", 60 | "Non-trainable params: 0\n", 61 | "_________________________________________________________________\n" 62 | ] 63 | } 64 | ], 65 | "source": [ 66 | "model.summary()" 67 | ] 68 | }, 69 | { 70 | "cell_type": "code", 71 | "execution_count": 5, 72 | "metadata": {}, 73 | "outputs": [], 74 | "source": [ 75 | "model.compile(optimizer='rmsprop', loss='categorical_crossentropy')" 76 | ] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": 6, 81 | "metadata": {}, 82 | "outputs": [], 83 | "source": [ 84 | "from keras import Input\n", 85 | "from keras import Model" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": 7, 91 | "metadata": {}, 92 | "outputs": [], 93 | "source": [ 94 | "input_tensor = Input(shape=(512,))\n", 95 | "dense1 = Dense(32, activation='relu')(input_tensor)\n", 96 | "dense2 = Dense(64, activation='relu')(dense1)\n", 97 | "output_tensor = Dense(1, activation='sigmoid')(dense2)" 98 | ] 99 | }, 100 | { 101 | "cell_type": "code", 102 | "execution_count": 8, 103 | "metadata": {}, 104 | "outputs": [], 105 | "source": [ 106 | "model1 = Model(input_tensor, output_tensor)" 107 | ] 108 | }, 109 | { 110 | "cell_type": "code", 111 | "execution_count": 9, 112 | "metadata": {}, 113 | "outputs": [ 114 | { 115 | "name": "stdout", 116 | "output_type": "stream", 117 | "text": [ 118 | "_________________________________________________________________\n", 119 | "Layer (type) Output Shape Param # \n", 120 | "=================================================================\n", 121 | "input_1 (InputLayer) (None, 512) 0 \n", 122 | "_________________________________________________________________\n", 123 | "dense_4 (Dense) (None, 32) 16416 \n", 124 | "_________________________________________________________________\n", 125 | "dense_5 (Dense) (None, 64) 2112 \n", 126 | "_________________________________________________________________\n", 127 | "dense_6 (Dense) (None, 1) 65 \n", 128 | "=================================================================\n", 129 | "Total params: 18,593\n", 130 | "Trainable params: 18,593\n", 131 | "Non-trainable params: 0\n", 132 | "_________________________________________________________________\n" 133 | ] 134 | } 135 | ], 136 | "source": [ 137 | "model1.summary()" 138 | ] 139 | }, 140 | { 141 | "cell_type": "code", 142 | "execution_count": 10, 143 | "metadata": {}, 144 | "outputs": [], 145 | "source": [ 146 | "model1.compile(optimizer='rmsprop', loss='categorical_crossentropy')" 147 | ] 148 | } 149 | ], 150 | "metadata": { 151 | "kernelspec": { 152 | "display_name": "Python 2", 153 | "language": "python", 154 | "name": "python2" 155 | }, 156 | "language_info": { 157 | "codemirror_mode": { 158 | "name": "ipython", 159 | "version": 2 160 | }, 161 | "file_extension": ".py", 162 | "mimetype": "text/x-python", 163 | "name": "python", 164 | "nbconvert_exporter": "python", 165 | "pygments_lexer": "ipython2", 166 | "version": "2.7.15" 167 | } 168 | }, 169 | "nbformat": 4, 170 | "nbformat_minor": 2 171 | } 172 | -------------------------------------------------------------------------------- /Keras/Keras_from_scratch/Advanced/Advanced_02.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "### Multi-input model\n", 8 | "Functional API implementation of a two-input question-answering model\n", 9 | "\n", 10 | "This takes in a question, a piece of text as inputs and gives out 1 word answer" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 1, 16 | "metadata": {}, 17 | "outputs": [ 18 | { 19 | "name": "stderr", 20 | "output_type": "stream", 21 | "text": [ 22 | "Using TensorFlow backend.\n" 23 | ] 24 | } 25 | ], 26 | "source": [ 27 | "from keras import Input, layers\n", 28 | "from keras import Model" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": 2, 34 | "metadata": {}, 35 | "outputs": [], 36 | "source": [ 37 | "text_vocab_size = 10000\n", 38 | "question_vocab_size = 10000\n", 39 | "answer_vocab_size = 500" 40 | ] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "execution_count": 3, 45 | "metadata": {}, 46 | "outputs": [], 47 | "source": [ 48 | "text_input = Input((None,), dtype='int32', name='text')\n", 49 | "embedded_text = layers.Embedding(text_vocab_size, 64)(text_input)\n", 50 | "encoded_text = layers.LSTM(32)(embedded_text)" 51 | ] 52 | }, 53 | { 54 | "cell_type": "code", 55 | "execution_count": null, 56 | "metadata": {}, 57 | "outputs": [], 58 | "source": [] 59 | }, 60 | { 61 | "cell_type": "code", 62 | "execution_count": 4, 63 | "metadata": {}, 64 | "outputs": [], 65 | "source": [ 66 | "question_input = Input((None,), dtype='int32', name='question')\n", 67 | "embedded_question = layers.Embedding(question_vocab_size, 32)(question_input)\n", 68 | "encoded_question = layers.LSTM(16)(embedded_question)" 69 | ] 70 | }, 71 | { 72 | "cell_type": "code", 73 | "execution_count": null, 74 | "metadata": {}, 75 | "outputs": [], 76 | "source": [] 77 | }, 78 | { 79 | "cell_type": "code", 80 | "execution_count": 5, 81 | "metadata": {}, 82 | "outputs": [], 83 | "source": [ 84 | "concatenated = layers.concatenate([encoded_text, encoded_question], axis=-1)" 85 | ] 86 | }, 87 | { 88 | "cell_type": "code", 89 | "execution_count": 6, 90 | "metadata": {}, 91 | "outputs": [], 92 | "source": [ 93 | "answer = layers.Dense(answer_vocab_size, activation='softmax')(concatenated)" 94 | ] 95 | }, 96 | { 97 | "cell_type": "code", 98 | "execution_count": 7, 99 | "metadata": {}, 100 | "outputs": [], 101 | "source": [ 102 | "model = Model([text_input, question_input], answer)" 103 | ] 104 | }, 105 | { 106 | "cell_type": "code", 107 | "execution_count": 8, 108 | "metadata": {}, 109 | "outputs": [], 110 | "source": [ 111 | "model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])" 112 | ] 113 | }, 114 | { 115 | "cell_type": "code", 116 | "execution_count": 9, 117 | "metadata": {}, 118 | "outputs": [], 119 | "source": [ 120 | "import numpy as np" 121 | ] 122 | }, 123 | { 124 | "cell_type": "code", 125 | "execution_count": 10, 126 | "metadata": {}, 127 | "outputs": [], 128 | "source": [ 129 | "num_samples = 1000\n", 130 | "max_len = 100" 131 | ] 132 | }, 133 | { 134 | "cell_type": "code", 135 | "execution_count": 11, 136 | "metadata": {}, 137 | "outputs": [], 138 | "source": [ 139 | "text = np.random.randint(1, text_vocab_size, size=(num_samples, max_len))" 140 | ] 141 | }, 142 | { 143 | "cell_type": "code", 144 | "execution_count": 12, 145 | "metadata": {}, 146 | "outputs": [], 147 | "source": [ 148 | "question = np.random.randint(1, question_vocab_size, size=(num_samples, max_len))" 149 | ] 150 | }, 151 | { 152 | "cell_type": "code", 153 | "execution_count": 13, 154 | "metadata": {}, 155 | "outputs": [], 156 | "source": [ 157 | "answers = np.random.randint(0, 1, size=(num_samples, answer_vocab_size))" 158 | ] 159 | }, 160 | { 161 | "cell_type": "code", 162 | "execution_count": 14, 163 | "metadata": {}, 164 | "outputs": [ 165 | { 166 | "name": "stdout", 167 | "output_type": "stream", 168 | "text": [ 169 | "Epoch 1/10\n", 170 | "1000/1000 [==============================] - 13s 13ms/step - loss: 0.0000e+00 - acc: 0.0020\n", 171 | "Epoch 2/10\n", 172 | "1000/1000 [==============================] - 10s 10ms/step - loss: 0.0000e+00 - acc: 0.0020\n", 173 | "Epoch 3/10\n", 174 | "1000/1000 [==============================] - 9s 9ms/step - loss: 0.0000e+00 - acc: 0.0020\n", 175 | "Epoch 4/10\n", 176 | "1000/1000 [==============================] - 10s 10ms/step - loss: 0.0000e+00 - acc: 0.0020\n", 177 | "Epoch 5/10\n", 178 | "1000/1000 [==============================] - 10s 10ms/step - loss: 0.0000e+00 - acc: 0.0020\n", 179 | "Epoch 6/10\n", 180 | "1000/1000 [==============================] - 10s 10ms/step - loss: 0.0000e+00 - acc: 0.0020\n", 181 | "Epoch 7/10\n", 182 | "1000/1000 [==============================] - 9s 9ms/step - loss: 0.0000e+00 - acc: 0.0020\n", 183 | "Epoch 8/10\n", 184 | "1000/1000 [==============================] - 10s 10ms/step - loss: 0.0000e+00 - acc: 0.0020\n", 185 | "Epoch 9/10\n", 186 | "1000/1000 [==============================] - 10s 10ms/step - loss: 0.0000e+00 - acc: 0.0020\n", 187 | "Epoch 10/10\n", 188 | "1000/1000 [==============================] - 10s 10ms/step - loss: 0.0000e+00 - acc: 0.0020\n" 189 | ] 190 | }, 191 | { 192 | "data": { 193 | "text/plain": [ 194 | "" 195 | ] 196 | }, 197 | "execution_count": 14, 198 | "metadata": {}, 199 | "output_type": "execute_result" 200 | } 201 | ], 202 | "source": [ 203 | "model.fit({'text': text, 'question': question}, answers, epochs=10, batch_size=128)" 204 | ] 205 | }, 206 | { 207 | "cell_type": "markdown", 208 | "metadata": {}, 209 | "source": [ 210 | "### Multi-output model\n", 211 | "Functional API implementation of a three-output model\n", 212 | "\n", 213 | "This takes social media post and tries to predict age, gender, and income " 214 | ] 215 | }, 216 | { 217 | "cell_type": "code", 218 | "execution_count": 16, 219 | "metadata": {}, 220 | "outputs": [], 221 | "source": [ 222 | "from keras import layers, Input, Model" 223 | ] 224 | }, 225 | { 226 | "cell_type": "code", 227 | "execution_count": 21, 228 | "metadata": {}, 229 | "outputs": [], 230 | "source": [ 231 | "vacob_size = 50000\n", 232 | "num_income_groups = 10" 233 | ] 234 | }, 235 | { 236 | "cell_type": "code", 237 | "execution_count": 22, 238 | "metadata": {}, 239 | "outputs": [], 240 | "source": [ 241 | "input_post = Input(shape=(None,), dtype='int32', name='posts')\n", 242 | "\n", 243 | "embedded_post = layers.Embedding(vacob_size, 32)(input_post)\n", 244 | "\n", 245 | "x = layers.Conv1D(128, 5, activation='relu')(embedded_post)\n", 246 | "x = layers.MaxPooling1D(5)(x)\n", 247 | "x = layers.Conv1D(256, 5, activation='relu')(x)\n", 248 | "x = layers.Conv1D(256, 5, activation='relu')(x)\n", 249 | "x = layers.MaxPooling1D(5)(x)\n", 250 | "x = layers.Conv1D(256, 5, activation='relu')(x)\n", 251 | "x = layers.Conv1D(256, 5, activation='relu')(x)\n", 252 | "x = layers.GlobalMaxPooling1D()(x)\n", 253 | "\n", 254 | "x = layers.Dense(128, activation='relu')(x)\n", 255 | "\n", 256 | "age_prediction = layers.Dense(1, name='age')(x)\n", 257 | "income_prediction = layers.Dense(num_income_groups, activation='softmax', name='income')(x)\n", 258 | "gender_prediction = layers.Dense(1, activation='sigmoid', name='gender')(x)\n", 259 | "\n", 260 | "model = Model(input_post, [age_prediction, income_prediction, gender_prediction])" 261 | ] 262 | }, 263 | { 264 | "cell_type": "code", 265 | "execution_count": 25, 266 | "metadata": {}, 267 | "outputs": [], 268 | "source": [ 269 | "## specify different loss functions for different heads of the network\n", 270 | "\n", 271 | "loss_dict = {\n", 272 | " 'age': 'mse',\n", 273 | " 'income': 'categorical_crossentropy',\n", 274 | " 'gender': 'binary_crossentropy'}\n", 275 | "\n", 276 | "loss_weights={\n", 277 | " 'age': 0.25,\n", 278 | " 'income': 1.,\n", 279 | " 'gender': 10.}\n", 280 | "\n", 281 | "model.compile(optimizer='rmsprop', loss=loss_dict, loss_weights=loss_weights)" 282 | ] 283 | }, 284 | { 285 | "cell_type": "code", 286 | "execution_count": 26, 287 | "metadata": {}, 288 | "outputs": [ 289 | { 290 | "ename": "NameError", 291 | "evalue": "name 'age_targets' is not defined", 292 | "output_type": "error", 293 | "traceback": [ 294 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", 295 | "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", 296 | "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m target_dict = {\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0;34m'age'\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mage_targets\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3\u001b[0m \u001b[0;34m'income'\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mincome_targets\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m 'gender': gender_targets}\n\u001b[1;32m 5\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", 297 | "\u001b[0;31mNameError\u001b[0m: name 'age_targets' is not defined" 298 | ] 299 | } 300 | ], 301 | "source": [ 302 | "# age_targets, income_targets, and gender_targets are assumed to be Numpy arrays.\n", 303 | "\n", 304 | "target_dict = {\n", 305 | " 'age': age_targets,\n", 306 | " 'income': income_targets,\n", 307 | " 'gender': gender_targets}\n", 308 | "\n", 309 | "model.fit(posts, target_dict, epochs=10, batch_size=64)" 310 | ] 311 | }, 312 | { 313 | "cell_type": "code", 314 | "execution_count": null, 315 | "metadata": {}, 316 | "outputs": [], 317 | "source": [] 318 | } 319 | ], 320 | "metadata": { 321 | "kernelspec": { 322 | "display_name": "Python 2", 323 | "language": "python", 324 | "name": "python2" 325 | }, 326 | "language_info": { 327 | "codemirror_mode": { 328 | "name": "ipython", 329 | "version": 2 330 | }, 331 | "file_extension": ".py", 332 | "mimetype": "text/x-python", 333 | "name": "python", 334 | "nbconvert_exporter": "python", 335 | "pygments_lexer": "ipython2", 336 | "version": "2.7.15" 337 | } 338 | }, 339 | "nbformat": 4, 340 | "nbformat_minor": 2 341 | } 342 | -------------------------------------------------------------------------------- /Keras/Keras_from_scratch/Advanced/Advanced_03.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "### learn keras callbacks" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 2, 13 | "metadata": {}, 14 | "outputs": [ 15 | { 16 | "name": "stderr", 17 | "output_type": "stream", 18 | "text": [ 19 | "Using TensorFlow backend.\n" 20 | ] 21 | } 22 | ], 23 | "source": [ 24 | "import keras" 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": 3, 30 | "metadata": {}, 31 | "outputs": [], 32 | "source": [ 33 | "ES = keras.callbacks.EarlyStopping(monitor=['acc'], patience=1)\n", 34 | "\n", 35 | "MC = keras.callbacks.ModelCheckpoint(filepath='my_model.h5', monitor='val_loss',\n", 36 | " save_best_only=True)\n", 37 | "\n", 38 | "callback_list = [ES,MC]" 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "execution_count": null, 44 | "metadata": {}, 45 | "outputs": [], 46 | "source": [ 47 | "model.fit(X, y , epochs=10, batch_size=32, validation_split=0.2)" 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": null, 53 | "metadata": {}, 54 | "outputs": [], 55 | "source": [ 56 | "keras.callbacks.ReduceLROnPlateau(\n", 57 | "monitor='val_loss'\n", 58 | "factor=0.1, # multiplies existsing LR with 0.1\n", 59 | "patience=10, # callback is triggered after the validation loss has stopped improving for 10 epochs.\n", 60 | ")" 61 | ] 62 | }, 63 | { 64 | "cell_type": "markdown", 65 | "metadata": {}, 66 | "source": [ 67 | "### WRITING YOUR OWN CALLBACK\n", 68 | "\n", 69 | "simple example of a custom callback that saves to disk (as Numpy arrays) the\n", 70 | "activations of every layer of the model at the end of every epoch, computed on the\n", 71 | "first sample of the validation set:" 72 | ] 73 | }, 74 | { 75 | "cell_type": "code", 76 | "execution_count": 5, 77 | "metadata": {}, 78 | "outputs": [], 79 | "source": [ 80 | "import keras\n", 81 | "import numpy as np" 82 | ] 83 | }, 84 | { 85 | "cell_type": "code", 86 | "execution_count": 6, 87 | "metadata": {}, 88 | "outputs": [], 89 | "source": [ 90 | "class ActivationLogger(keras.callbacks.Callback):\n", 91 | " \n", 92 | " def set_model(self, model):\n", 93 | " self.model = model\n", 94 | " layer_outputs = [layer.output for layer in model.layers]\n", 95 | " self.activations_model = keras.models.Model(model.input, layer_outputs)\n", 96 | " \n", 97 | " def on_epoch_end(self, epoch, logs=None):\n", 98 | " if self.validation_data is None:\n", 99 | " raise RuntimeError('Requires validation_data.')\n", 100 | " validation_sample = self.validation_data[0][0:1]\n", 101 | " activations = self.activations_model.predict(validation_sample)\n", 102 | " f = open('activations_at_epoch_' + str(epoch) + '.npz', 'w')\n", 103 | " np.savez(f, activations)\n", 104 | " f.close()" 105 | ] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "execution_count": null, 110 | "metadata": {}, 111 | "outputs": [], 112 | "source": [] 113 | } 114 | ], 115 | "metadata": { 116 | "kernelspec": { 117 | "display_name": "Python 2", 118 | "language": "python", 119 | "name": "python2" 120 | }, 121 | "language_info": { 122 | "codemirror_mode": { 123 | "name": "ipython", 124 | "version": 2 125 | }, 126 | "file_extension": ".py", 127 | "mimetype": "text/x-python", 128 | "name": "python", 129 | "nbconvert_exporter": "python", 130 | "pygments_lexer": "ipython2", 131 | "version": "2.7.15" 132 | } 133 | }, 134 | "nbformat": 4, 135 | "nbformat_minor": 2 136 | } 137 | -------------------------------------------------------------------------------- /Keras/Keras_from_scratch/Advanced/Advanced_04.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "hyperas - optimizing hyper-parameters in Keras\n", 8 | "\n", 9 | "https://github.com/maxpumperla/hyperas" 10 | ] 11 | } 12 | ], 13 | "metadata": { 14 | "kernelspec": { 15 | "display_name": "Python 2", 16 | "language": "python", 17 | "name": "python2" 18 | }, 19 | "language_info": { 20 | "codemirror_mode": { 21 | "name": "ipython", 22 | "version": 2 23 | }, 24 | "file_extension": ".py", 25 | "mimetype": "text/x-python", 26 | "name": "python", 27 | "nbconvert_exporter": "python", 28 | "pygments_lexer": "ipython2", 29 | "version": "2.7.15" 30 | } 31 | }, 32 | "nbformat": 4, 33 | "nbformat_minor": 2 34 | } 35 | -------------------------------------------------------------------------------- /Keras/Keras_from_scratch/Generative/Generative_01.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "### Generate Text using LSTMs" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": null, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [] 16 | } 17 | ], 18 | "metadata": { 19 | "kernelspec": { 20 | "display_name": "Python 2", 21 | "language": "python", 22 | "name": "python2" 23 | }, 24 | "language_info": { 25 | "codemirror_mode": { 26 | "name": "ipython", 27 | "version": 2 28 | }, 29 | "file_extension": ".py", 30 | "mimetype": "text/x-python", 31 | "name": "python", 32 | "nbconvert_exporter": "python", 33 | "pygments_lexer": "ipython2", 34 | "version": "2.7.15" 35 | } 36 | }, 37 | "nbformat": 4, 38 | "nbformat_minor": 2 39 | } 40 | -------------------------------------------------------------------------------- /Keras/Keras_from_scratch/Generative/Generative_02.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "### Generate Deep Dreams" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": null, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [] 16 | } 17 | ], 18 | "metadata": { 19 | "kernelspec": { 20 | "display_name": "Python 2", 21 | "language": "python", 22 | "name": "python2" 23 | }, 24 | "language_info": { 25 | "codemirror_mode": { 26 | "name": "ipython", 27 | "version": 2 28 | }, 29 | "file_extension": ".py", 30 | "mimetype": "text/x-python", 31 | "name": "python", 32 | "nbconvert_exporter": "python", 33 | "pygments_lexer": "ipython2", 34 | "version": "2.7.15" 35 | } 36 | }, 37 | "nbformat": 4, 38 | "nbformat_minor": 2 39 | } 40 | -------------------------------------------------------------------------------- /Keras/Keras_from_scratch/Generative/Generative_03.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "### Neural Style Transfer" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": null, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [] 16 | } 17 | ], 18 | "metadata": { 19 | "kernelspec": { 20 | "display_name": "Python 2", 21 | "language": "python", 22 | "name": "python2" 23 | }, 24 | "language_info": { 25 | "codemirror_mode": { 26 | "name": "ipython", 27 | "version": 2 28 | }, 29 | "file_extension": ".py", 30 | "mimetype": "text/x-python", 31 | "name": "python", 32 | "nbconvert_exporter": "python", 33 | "pygments_lexer": "ipython2", 34 | "version": "2.7.15" 35 | } 36 | }, 37 | "nbformat": 4, 38 | "nbformat_minor": 2 39 | } 40 | -------------------------------------------------------------------------------- /Keras/Keras_from_scratch/Generative/Generative_04.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "### VAE" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": null, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [] 16 | } 17 | ], 18 | "metadata": { 19 | "kernelspec": { 20 | "display_name": "Python 2", 21 | "language": "python", 22 | "name": "python2" 23 | }, 24 | "language_info": { 25 | "codemirror_mode": { 26 | "name": "ipython", 27 | "version": 2 28 | }, 29 | "file_extension": ".py", 30 | "mimetype": "text/x-python", 31 | "name": "python", 32 | "nbconvert_exporter": "python", 33 | "pygments_lexer": "ipython2", 34 | "version": "2.7.15" 35 | } 36 | }, 37 | "nbformat": 4, 38 | "nbformat_minor": 2 39 | } 40 | -------------------------------------------------------------------------------- /Keras/Keras_from_scratch/Generative/Generative_05.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "### GANs" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": null, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [] 16 | } 17 | ], 18 | "metadata": { 19 | "kernelspec": { 20 | "display_name": "Python 2", 21 | "language": "python", 22 | "name": "python2" 23 | }, 24 | "language_info": { 25 | "codemirror_mode": { 26 | "name": "ipython", 27 | "version": 2 28 | }, 29 | "file_extension": ".py", 30 | "mimetype": "text/x-python", 31 | "name": "python", 32 | "nbconvert_exporter": "python", 33 | "pygments_lexer": "ipython2", 34 | "version": "2.7.15" 35 | } 36 | }, 37 | "nbformat": 4, 38 | "nbformat_minor": 2 39 | } 40 | -------------------------------------------------------------------------------- /Keras/Keras_from_scratch/Text/Text_05.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "### Implement a LSTM using numpy" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": null, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": null, 20 | "metadata": {}, 21 | "outputs": [], 22 | "source": [] 23 | }, 24 | { 25 | "cell_type": "code", 26 | "execution_count": null, 27 | "metadata": {}, 28 | "outputs": [], 29 | "source": [] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": null, 34 | "metadata": {}, 35 | "outputs": [], 36 | "source": [] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": null, 41 | "metadata": {}, 42 | "outputs": [], 43 | "source": [ 44 | "i_t = activation(dot(state_t, Ui) + dot(input_t, Wi) + bi)\n", 45 | "f_t = activation(dot(state_t, Uf) + dot(input_t, Wf) + bf)\n", 46 | "k_t = activation(dot(state_t, Uk) + dot(input_t, Wk) + bk)" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": null, 52 | "metadata": {}, 53 | "outputs": [], 54 | "source": [] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": null, 59 | "metadata": {}, 60 | "outputs": [], 61 | "source": [] 62 | }, 63 | { 64 | "cell_type": "markdown", 65 | "metadata": {}, 66 | "source": [ 67 | "### LSTM using keras" 68 | ] 69 | }, 70 | { 71 | "cell_type": "code", 72 | "execution_count": 10, 73 | "metadata": {}, 74 | "outputs": [], 75 | "source": [ 76 | "#from keras.datasets import imdb\n", 77 | "from keras import Sequential\n", 78 | "from keras.layers import Embedding, LSTM, Dense" 79 | ] 80 | }, 81 | { 82 | "cell_type": "code", 83 | "execution_count": 11, 84 | "metadata": {}, 85 | "outputs": [], 86 | "source": [ 87 | "vocab_size = 10000\n", 88 | "embedding_dim = 32" 89 | ] 90 | }, 91 | { 92 | "cell_type": "code", 93 | "execution_count": 12, 94 | "metadata": {}, 95 | "outputs": [], 96 | "source": [ 97 | "model = Sequential()\n", 98 | "model.add(Embedding(vocab_size, embedding_dim))\n", 99 | "model.add(LSTM(64))\n", 100 | "model.add(Dense(1, activation='sigmoid'))" 101 | ] 102 | }, 103 | { 104 | "cell_type": "code", 105 | "execution_count": 13, 106 | "metadata": {}, 107 | "outputs": [], 108 | "source": [ 109 | "model.compile(optimizer='adam', metrics=['acc'], loss='binary_crossentropy')" 110 | ] 111 | }, 112 | { 113 | "cell_type": "code", 114 | "execution_count": 14, 115 | "metadata": {}, 116 | "outputs": [], 117 | "source": [ 118 | "from keras.datasets import imdb\n", 119 | "from keras.preprocessing import sequence" 120 | ] 121 | }, 122 | { 123 | "cell_type": "code", 124 | "execution_count": 15, 125 | "metadata": {}, 126 | "outputs": [], 127 | "source": [ 128 | "max_features = 10000\n", 129 | "maxlen = 500\n", 130 | "batch_size = 32" 131 | ] 132 | }, 133 | { 134 | "cell_type": "code", 135 | "execution_count": 16, 136 | "metadata": {}, 137 | "outputs": [], 138 | "source": [ 139 | "(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=maxlen)" 140 | ] 141 | }, 142 | { 143 | "cell_type": "code", 144 | "execution_count": 17, 145 | "metadata": {}, 146 | "outputs": [ 147 | { 148 | "name": "stdout", 149 | "output_type": "stream", 150 | "text": [ 151 | "(25000, 'train sequences')\n", 152 | "(25000, 'test sequences')\n" 153 | ] 154 | } 155 | ], 156 | "source": [ 157 | "print(len(X_train), 'train sequences')\n", 158 | "print(len(X_test), 'test sequences')" 159 | ] 160 | }, 161 | { 162 | "cell_type": "code", 163 | "execution_count": 18, 164 | "metadata": {}, 165 | "outputs": [ 166 | { 167 | "name": "stdout", 168 | "output_type": "stream", 169 | "text": [ 170 | "Pad sequences (samples x time)\n" 171 | ] 172 | } 173 | ], 174 | "source": [ 175 | "print('Pad sequences (samples x time)')" 176 | ] 177 | }, 178 | { 179 | "cell_type": "code", 180 | "execution_count": 19, 181 | "metadata": {}, 182 | "outputs": [], 183 | "source": [ 184 | "X_train = sequence.pad_sequences(X_train, maxlen=maxlen)\n", 185 | "X_test = sequence.pad_sequences(X_test, maxlen=maxlen)" 186 | ] 187 | }, 188 | { 189 | "cell_type": "code", 190 | "execution_count": 20, 191 | "metadata": {}, 192 | "outputs": [ 193 | { 194 | "name": "stdout", 195 | "output_type": "stream", 196 | "text": [ 197 | "('input_train shape:', (25000, 500))\n", 198 | "('input_test shape:', (25000, 500))\n" 199 | ] 200 | } 201 | ], 202 | "source": [ 203 | "print('input_train shape:', X_train.shape)\n", 204 | "print('input_test shape:', X_test.shape)" 205 | ] 206 | }, 207 | { 208 | "cell_type": "code", 209 | "execution_count": null, 210 | "metadata": {}, 211 | "outputs": [], 212 | "source": [] 213 | }, 214 | { 215 | "cell_type": "code", 216 | "execution_count": 21, 217 | "metadata": {}, 218 | "outputs": [ 219 | { 220 | "name": "stdout", 221 | "output_type": "stream", 222 | "text": [ 223 | "Train on 20000 samples, validate on 5000 samples\n", 224 | "Epoch 1/10\n", 225 | "20000/20000 [==============================] - 1670s 83ms/step - loss: 0.5528 - acc: 0.7074 - val_loss: 0.4653 - val_acc: 0.7786\n", 226 | "Epoch 2/10\n", 227 | "11776/20000 [================>.............] - ETA: 10:54 - loss: 0.4116 - acc: 0.8171" 228 | ] 229 | }, 230 | { 231 | "ename": "KeyboardInterrupt", 232 | "evalue": "", 233 | "output_type": "error", 234 | "traceback": [ 235 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", 236 | "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", 237 | "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mhistory\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX_train\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0my_train\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mepochs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m10\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m128\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalidation_split\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0.2\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", 238 | "\u001b[0;32m/Users/airwoot/anaconda3/envs/testing_anaconda/lib/python2.7/site-packages/keras/engine/training.pyc\u001b[0m in \u001b[0;36mfit\u001b[0;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)\u001b[0m\n\u001b[1;32m 1035\u001b[0m \u001b[0minitial_epoch\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minitial_epoch\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1036\u001b[0m \u001b[0msteps_per_epoch\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msteps_per_epoch\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1037\u001b[0;31m validation_steps=validation_steps)\n\u001b[0m\u001b[1;32m 1038\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1039\u001b[0m def evaluate(self, x=None, y=None,\n", 239 | "\u001b[0;32m/Users/airwoot/anaconda3/envs/testing_anaconda/lib/python2.7/site-packages/keras/engine/training_arrays.pyc\u001b[0m in \u001b[0;36mfit_loop\u001b[0;34m(model, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch, steps_per_epoch, validation_steps)\u001b[0m\n\u001b[1;32m 197\u001b[0m \u001b[0mins_batch\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mins_batch\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtoarray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 198\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 199\u001b[0;31m \u001b[0mouts\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mins_batch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 200\u001b[0m \u001b[0mouts\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mto_list\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mouts\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 201\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0ml\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mo\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mzip\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mout_labels\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mouts\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 240 | "\u001b[0;32m/Users/airwoot/anaconda3/envs/testing_anaconda/lib/python2.7/site-packages/keras/backend/tensorflow_backend.pyc\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, inputs)\u001b[0m\n\u001b[1;32m 2664\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_legacy_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2665\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2666\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2667\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2668\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mpy_any\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mis_tensor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mx\u001b[0m \u001b[0;32min\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 241 | "\u001b[0;32m/Users/airwoot/anaconda3/envs/testing_anaconda/lib/python2.7/site-packages/keras/backend/tensorflow_backend.pyc\u001b[0m in \u001b[0;36m_call\u001b[0;34m(self, inputs)\u001b[0m\n\u001b[1;32m 2634\u001b[0m \u001b[0msymbol_vals\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2635\u001b[0m session)\n\u001b[0;32m-> 2636\u001b[0;31m \u001b[0mfetched\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_callable_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0marray_vals\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2637\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mfetched\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moutputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2638\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", 242 | "\u001b[0;32m/Users/airwoot/anaconda3/envs/testing_anaconda/lib/python2.7/site-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1380\u001b[0m ret = tf_session.TF_SessionRunCallable(\n\u001b[1;32m 1381\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_handle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstatus\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1382\u001b[0;31m run_metadata_ptr)\n\u001b[0m\u001b[1;32m 1383\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1384\u001b[0m \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 243 | "\u001b[0;31mKeyboardInterrupt\u001b[0m: " 244 | ] 245 | } 246 | ], 247 | "source": [ 248 | "history = model.fit(X_train, y_train, epochs=10, batch_size=128, validation_split=0.2)" 249 | ] 250 | }, 251 | { 252 | "cell_type": "code", 253 | "execution_count": null, 254 | "metadata": {}, 255 | "outputs": [], 256 | "source": [ 257 | "import matplotlib.pyplot as plt\n", 258 | "\n", 259 | "acc = history.history['acc']\n", 260 | "val_acc = history.history['val_acc']\n", 261 | "loss = history.history['loss']\n", 262 | "val_loss = history.history['val_loss']\n", 263 | "\n", 264 | "epochs = range(1, len(acc) + 1)\n", 265 | "\n", 266 | "plt.plot(epochs, acc, 'bo', label='Training acc')\n", 267 | "plt.plot(epochs, val_acc, 'b', label='Validation acc')\n", 268 | "\n", 269 | "plt.title('Training and validation accuracy')\n", 270 | "plt.legend()\n", 271 | "\n", 272 | "\n", 273 | "plt.figure()\n", 274 | "plt.plot(epochs, loss, 'bo', label='Training loss')\n", 275 | "plt.plot(epochs, val_loss, 'b', label='Validation loss')\n", 276 | "plt.title('Training and validation loss')\n", 277 | "plt.legend()\n", 278 | "\n", 279 | "\n", 280 | "plt.show()" 281 | ] 282 | }, 283 | { 284 | "cell_type": "code", 285 | "execution_count": null, 286 | "metadata": {}, 287 | "outputs": [], 288 | "source": [] 289 | } 290 | ], 291 | "metadata": { 292 | "kernelspec": { 293 | "display_name": "Python 2", 294 | "language": "python", 295 | "name": "python2" 296 | }, 297 | "language_info": { 298 | "codemirror_mode": { 299 | "name": "ipython", 300 | "version": 2 301 | }, 302 | "file_extension": ".py", 303 | "mimetype": "text/x-python", 304 | "name": "python", 305 | "nbconvert_exporter": "python", 306 | "pygments_lexer": "ipython2", 307 | "version": "2.7.15" 308 | } 309 | }, 310 | "nbformat": 4, 311 | "nbformat_minor": 2 312 | } 313 | -------------------------------------------------------------------------------- /Keras/Keras_from_scratch/Vision/03_CNN.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "### In the last notebook, we saw how not to load all the data in ram. Rather to stream it from harddisk. \n", 8 | "\n", 9 | "A common trick in CV is to augment the data: take images, and tranform them to add more datapoints. 1 way is to transform them upfront, this requires additional storage. Instead we augment data as we stream it. This removes the need for additional storage. " 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 4, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "%load_ext autoreload\n", 19 | "%autoreload 2\n", 20 | "%matplotlib inline" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": 5, 26 | "metadata": {}, 27 | "outputs": [], 28 | "source": [ 29 | "import os" 30 | ] 31 | }, 32 | { 33 | "cell_type": "markdown", 34 | "metadata": {}, 35 | "source": [ 36 | "### Set data paths" 37 | ] 38 | }, 39 | { 40 | "cell_type": "code", 41 | "execution_count": 19, 42 | "metadata": {}, 43 | "outputs": [], 44 | "source": [ 45 | "root_dir = \"/\"\n", 46 | "users = os.path.join(root_dir, \"Users\")\n", 47 | "airwoot = os.path.join(users, \"airwoot\")\n", 48 | "documents = os.path.join(airwoot, \"Documents\")\n", 49 | "anuj = os.path.join(documents, \"Anuj\")\n", 50 | "Warehouse = os.path.join(anuj, \"Warehouse\")\n" 51 | ] 52 | }, 53 | { 54 | "cell_type": "code", 55 | "execution_count": 20, 56 | "metadata": {}, 57 | "outputs": [], 58 | "source": [ 59 | "src_folder = os.path.join(Warehouse, \"train\")\n", 60 | "\n", 61 | "cat_src_folder = os.path.join(src_folder, \"CAT\")\n", 62 | "dog_src_folder = os.path.join(src_folder, \"DOG\")" 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": 21, 68 | "metadata": {}, 69 | "outputs": [], 70 | "source": [ 71 | "dest_data_folder = os.path.join(Warehouse, \"cats_and_dogs\")\n", 72 | "\n", 73 | "train_dir = os.path.join(dest_data_folder, \"train\")\n", 74 | "cat_train_dir = os.path.join(train_dir, \"cat\")\n", 75 | "dog_train_dir = os.path.join(train_dir, \"dog\")\n", 76 | "\n", 77 | "\n", 78 | "test_dir = os.path.join(dest_data_folder, \"test\")\n", 79 | "cat_test_dir = os.path.join(test_dir, \"cat\")\n", 80 | "dog_test_dir = os.path.join(test_dir, \"dog\")\n", 81 | "\n", 82 | "\n", 83 | "validation_dir = os.path.join(dest_data_folder, \"validation\")\n", 84 | "cat_validation_dir = os.path.join(validation_dir, \"cat\")\n", 85 | "dog_validation_dir = os.path.join(validation_dir, \"dog\")\n" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": 24, 91 | "metadata": {}, 92 | "outputs": [ 93 | { 94 | "name": "stdout", 95 | "output_type": "stream", 96 | "text": [ 97 | "2000\n", 98 | "2000\n", 99 | "1000\n", 100 | "1000\n", 101 | "1000\n", 102 | "1000\n" 103 | ] 104 | } 105 | ], 106 | "source": [ 107 | "### sanity checks\n", 108 | "\n", 109 | "print(len(os.listdir(cat_train_dir)))\n", 110 | "print(len(os.listdir(dog_train_dir)))\n", 111 | "\n", 112 | "print(len(os.listdir(cat_test_dir)))\n", 113 | "print(len(os.listdir(dog_test_dir)))\n", 114 | "\n", 115 | "print(len(os.listdir(cat_validation_dir)))\n", 116 | "print(len(os.listdir(dog_validation_dir)))" 117 | ] 118 | }, 119 | { 120 | "cell_type": "markdown", 121 | "metadata": {}, 122 | "source": [ 123 | "### Set the data streams " 124 | ] 125 | }, 126 | { 127 | "cell_type": "code", 128 | "execution_count": 25, 129 | "metadata": {}, 130 | "outputs": [], 131 | "source": [ 132 | "from keras.preprocessing.image import ImageDataGenerator" 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": 27, 138 | "metadata": {}, 139 | "outputs": [], 140 | "source": [ 141 | "train_IDG = ImageDataGenerator(\n", 142 | " rescale=1.0/255,\n", 143 | " rotation_range=40,\n", 144 | " horizontal_flip= True,\n", 145 | " vertical_flip = True,\n", 146 | " width_shift_range=0.2,\n", 147 | " height_shift_range=0.2,\n", 148 | " shear_range=0.2,\n", 149 | " zoom_range=0.2)\n", 150 | "\n", 151 | "validation_IDG = ImageDataGenerator(\n", 152 | " rescale=1.0/255,\n", 153 | " rotation_range=40,\n", 154 | " horizontal_flip= True,\n", 155 | " vertical_flip = True,\n", 156 | " width_shift_range=0.2,\n", 157 | " height_shift_range=0.2,\n", 158 | " shear_range=0.2,\n", 159 | " zoom_range=0.2)" 160 | ] 161 | }, 162 | { 163 | "cell_type": "code", 164 | "execution_count": 28, 165 | "metadata": {}, 166 | "outputs": [ 167 | { 168 | "name": "stdout", 169 | "output_type": "stream", 170 | "text": [ 171 | "Found 4000 images belonging to 2 classes.\n", 172 | "Found 2000 images belonging to 2 classes.\n" 173 | ] 174 | } 175 | ], 176 | "source": [ 177 | "train_DG = train_IDG.flow_from_directory(train_dir, target_size=(150, 150),\n", 178 | " batch_size=32,\n", 179 | " class_mode='binary')\n", 180 | "\n", 181 | "validation_DG = validation_IDG.flow_from_directory(validation_dir, target_size=(150, 150),\n", 182 | " batch_size=32,\n", 183 | " class_mode='binary')" 184 | ] 185 | }, 186 | { 187 | "cell_type": "markdown", 188 | "metadata": {}, 189 | "source": [ 190 | "### Define the model" 191 | ] 192 | }, 193 | { 194 | "cell_type": "code", 195 | "execution_count": 29, 196 | "metadata": {}, 197 | "outputs": [], 198 | "source": [ 199 | "from keras import Sequential\n", 200 | "from keras import layers" 201 | ] 202 | }, 203 | { 204 | "cell_type": "code", 205 | "execution_count": 30, 206 | "metadata": {}, 207 | "outputs": [], 208 | "source": [ 209 | "model = Sequential()\n", 210 | "\n", 211 | "model.add(layers.Conv2D(32, kernel_size=(3,3), activation='relu', input_shape=(150, 150, 3)))\n", 212 | "model.add(layers.MaxPooling2D(2,2))\n", 213 | "\n", 214 | "model.add(layers.Conv2D(64, kernel_size=(3,3), activation='relu'))\n", 215 | "model.add(layers.MaxPooling2D(2,2))\n", 216 | "\n", 217 | "model.add(layers.Conv2D(128, kernel_size=(3,3), activation='relu'))\n", 218 | "model.add(layers.MaxPooling2D(2,2))\n", 219 | "\n", 220 | "model.add(layers.Conv2D(128, kernel_size=(3,3), activation='relu'))\n", 221 | "model.add(layers.MaxPooling2D(2,2))\n", 222 | "\n", 223 | "model.add(layers.Flatten())\n", 224 | "\n", 225 | "model.add(layers.Dense(512, activation='relu'))\n", 226 | "model.add(layers.Dense(1, activation='sigmoid'))" 227 | ] 228 | }, 229 | { 230 | "cell_type": "code", 231 | "execution_count": 31, 232 | "metadata": {}, 233 | "outputs": [], 234 | "source": [ 235 | "model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])" 236 | ] 237 | }, 238 | { 239 | "cell_type": "markdown", 240 | "metadata": {}, 241 | "source": [ 242 | "### training time" 243 | ] 244 | }, 245 | { 246 | "cell_type": "code", 247 | "execution_count": 33, 248 | "metadata": {}, 249 | "outputs": [ 250 | { 251 | "name": "stdout", 252 | "output_type": "stream", 253 | "text": [ 254 | "Epoch 1/2\n", 255 | "5/5 [==============================] - 808s 162s/step - loss: 0.7244 - acc: 0.4312 - val_loss: 0.6938 - val_acc: 0.4890\n", 256 | "Epoch 2/2\n", 257 | "5/5 [==============================] - 732s 146s/step - loss: 0.6958 - acc: 0.5187 - val_loss: 0.6933 - val_acc: 0.4970\n" 258 | ] 259 | } 260 | ], 261 | "source": [ 262 | "history = model.fit_generator(\n", 263 | " generator=train_DG, \n", 264 | " validation_data = validation_DG,\n", 265 | " steps_per_epoch = 5,\n", 266 | " epochs = 2,\n", 267 | " validation_steps = 50\n", 268 | " )" 269 | ] 270 | }, 271 | { 272 | "cell_type": "markdown", 273 | "metadata": {}, 274 | "source": [ 275 | "### Plotting" 276 | ] 277 | }, 278 | { 279 | "cell_type": "code", 280 | "execution_count": 41, 281 | "metadata": {}, 282 | "outputs": [], 283 | "source": [ 284 | "from matplotlib import pyplot as plt" 285 | ] 286 | }, 287 | { 288 | "cell_type": "code", 289 | "execution_count": 43, 290 | "metadata": {}, 291 | "outputs": [ 292 | { 293 | "data": { 294 | "text/plain": [ 295 | "['acc', 'loss', 'val_acc', 'val_loss']" 296 | ] 297 | }, 298 | "execution_count": 43, 299 | "metadata": {}, 300 | "output_type": "execute_result" 301 | } 302 | ], 303 | "source": [ 304 | "history.history.keys()" 305 | ] 306 | }, 307 | { 308 | "cell_type": "code", 309 | "execution_count": 44, 310 | "metadata": {}, 311 | "outputs": [], 312 | "source": [ 313 | "train_acc = history.history['acc']\n", 314 | "validation_acc = history.history['val_acc']\n", 315 | "\n", 316 | "train_loss = history.history['loss']\n", 317 | "validation_loss = history.history['val_loss']" 318 | ] 319 | }, 320 | { 321 | "cell_type": "code", 322 | "execution_count": 46, 323 | "metadata": {}, 324 | "outputs": [], 325 | "source": [ 326 | "epochs_list = range(1, (len(train_acc)+1) )" 327 | ] 328 | }, 329 | { 330 | "cell_type": "code", 331 | "execution_count": 53, 332 | "metadata": {}, 333 | "outputs": [ 334 | { 335 | "data": { 336 | "text/plain": [ 337 | "[]" 338 | ] 339 | }, 340 | "execution_count": 53, 341 | "metadata": {}, 342 | "output_type": "execute_result" 343 | }, 344 | { 345 | "data": { 346 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAX4AAAD8CAYAAABw1c+bAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAIABJREFUeJzt3X1w1dW97/H3Nw/kCQiB8CBPBa1KijwkRHCKFqijpUhFPVCsWgtTDgcOFvG0ncu950xvtXbGTr0eZI7iRYuOPVoPxaLejtZTbSi21ZYESAwP8qBoAhQQSEhIAtlh3T9+O9k7DyQ7yU72zt6f14yT/H6/tX57/YLz2b+9fmuvZc45REQkfiREugEiItK7FPwiInFGwS8iEmcU/CIicUbBLyISZxT8IiJxRsEvIhJnFPwiInFGwS8iEmeSIt2AtmRnZ7tx48ZFuhkiIn1GUVHR5865oaGUjcrgHzduHIWFhZFuhohIn2Fmn4ZaVl09IiJxRsEvIhJnFPwiInFGwS8iEmcU/CIicUbBLyISZxT8IiJxJirH8XfVT/74E/ol9mNQ6iCy0rIYlDqo1X/9EvtFupkiIhEVM8HvnONnf/4Z5+vPt1suPTm96U0gK7X1m0Ob+/xvIpkpmSQmJPbSFYmI9IyYCX4zo+p/VlHrq6WiroKKugrO1p5t+r1pX13zfcerj7Pv831N25fcpXZfZ0C/AW1+mgjlTWRAygASTL1rIhJZMRP84IV/enI66cnpjBwwstP1nXNUX6xu9ebQ6k3kQmD704pPKa4rpqKugsoLle2eP8ESyEzJbP1pIqXtTxgt30DSk9Mxs67+eUREgBgL/u4yMwakDGBAygDGZo7tdP2GSw2cu3Cu3U8YZ2vPUnEhsH3g9IGmN5GOuqmSEpI69Qmj5RtIalJqV/80IhJDFPxhlJiQSFZaFllpWV2qX99QT+WFyja7qdr8FFJ3lrJzZU3bdb66ds+fkpgSUjdVW28qg1IHkZyY3KXrEpHoouCPIsmJyWSnZ5Odnt2l+nW+uva7qFq8iZyuOc3hM4eb9vku+do9f0ZyRtufKFLa/oQR/CYyMGWgHoyLRAkFfwxJTUplRP8RjOg/otN1nXPU1Ne0/wmj8U3E31V1rOoYe07uaTrucO2+xsCUgV3uphrQb4Ceb4iEiYJfAO/5Rka/DDL6ZTBq4KhO17/kLnkPxkMYSdW475OKT5q2z1041+75Eyyhze6nULqpstKySEtK0xuHiJ+CX8IiwRIYmDKQgSkD+QJf6HT9hksNTc83Qh2O+9HnHzXtq6mvaff8yQnJXe6mGpQ6iJSklK7+aUSijoJfokJiQiKD0wYzOG1wl+pfbLhIZV1lyN1UjUNxG8tfbLjY7vlTk1K73E2VmZKpB+MSVRT8EhP6JfZjaMZQhmaEtORoK40PxkMdTXWq5hQHzxxsqtPgGto9f/9+/bvcTTUwZaC++CdhpeAXofsPxs/Xn+9UN9XRc0fZc3IPZ+vOUllX2e6DccOaHoy3+kSR0nE3Vf9+/fV8Q5pR8It0k5nRv19/+vfrz+iBoztd/5K7RNWFqk51UzUOw62oq6DqYlW750+0RDJTM0P+hNFynx6Mx56Qgt/M5gJPAonAc865x1oc/yFwb9A5c4ChzrkzHdUViXcJlkBmaiaZqZldejDuu+Rrer4RSjdVRV0Fxz8/3rSvowfjjTPeduZb48FvIpoRN/qYc+2PvTazROAAcAtQDuwAvuWc23uZ8t8AHnLOfbWzdRvl5+e7wsLCzl6LiHTBBd+FZiOqOjMk92ztWeov1bd7/rSktLY/TYTQTZWZmklSgjomQmFmRc65/FDKhvIXnQ4ccs597D/5K8AC4HLh/S3gV12sKyK9LCUphWFJwxiWMazTdZ1zgQfjIUxuWFFXwYnqE01DcSvqKjp8MD6g34C2P010MLlh4zfG9WC8tVCCfxRQFrRdDsxoq6CZpQNzgQc6W1dE+h4zIy05jbTkNK4YcEWn6zfOiNuZbqqyc2V8ePJDztae7XBGXMPITM3scjdVRnJGTD7fCPdnqG8Af3bOnelsRTNbDiwHGDu28zNjikjfEzwj7pjMMZ2u33CpgaqLVZ3qpmochltRV0H1xep2z59oiV3uphqUOoi05LSu/ml6VCjBfxQI/hcZ7d/XlrsJdPN0qq5zbiOwEbw+/hDaJSJxLjEhsSlku6LxwXhn1uA4eu5o0/5aX227509JTOlUN9XgtMHkjwypm75bQgn+HcDVZjYeL7TvBu5pWcjMMoFZwH2drSsiEglJCUkMSR/CkPQhXap/wXehU91UjXNUNb6JtHwwPixjGCd+cCIcl9auDoPfOeczsweAt/GGZG5yzu0xsxX+48/4i94J/Ldz7nxHdcN9EU2mTIHaoHfgln1zfXk7mtrS2e1oakt3t6OpLd3djqa2dHc7Qq+dAgwHhrdbPhVsJDCy2XHnHD7n40LDRS74LlDnq+Nieu/MCdXhcM5I6PJwzqVL4cIF7/eW19WXt6OpLZ3djqa2dHc7mtrS3e1oakt3t6OpLZ3dbnls2DA4fJiuCPdwzr7j+ecj3QIRkainAa4iInFGwS8iEmcU/CIicUbBLyISZxT8IiJxRsEvIhJnFPwiInFGwS8iEmcU/CIicUbBLyISZxT8IiJxRsEvIhJnFPwiInFGwS8iEmcU/CIicUbBLyISZxT8IiJxRsEvIhJnFPwiInFGwS8iEmcU/CIicUbBLyISZxT8IiJxRsEvIhJnQgp+M5trZh+Z2SEzW3uZMrPNbLeZ7TGzPwbtP2JmH/qPFYar4SIi0jVJHRUws0TgKeAWoBzYYWZvOOf2BpUZBDwNzHXOfWZmw1qcZo5z7vMwtltERLoolDv+6cAh59zHzrmLwCvAghZl7gF+45z7DMA5dzK8zRQRkXAJJfhHAWVB2+X+fcGuAbLMbJuZFZnZ/UHHHPCOf//yy72ImS03s0IzKzx16lSo7RcRkU7qsKunE+eZBtwMpAHvm9kHzrkDwI3OuaP+7p/fm9l+59z2lidwzm0ENgLk5+e7MLVLRERaCOWO/ygwJmh7tH9fsHLgbefceX9f/nZgCoBz7qj/50lgK17XkYiIREgowb8DuNrMxptZP+Bu4I0WZV4HbjSzJDNLB2YA+8wsw8wGAJhZBnArUBq+5ouISGd12NXjnPOZ2QPA20AisMk5t8fMVviPP+Oc22dmvwNKgEvAc865UjO7EthqZo2v9bJz7nc9dTEiItIxcy76utPz8/NdYaGG/IuIhMrMipxz+aGU1Td3RUTijIJfRCTOKPhFROKMgl9EJM4o+EVE4oyCX0Qkzij4RUTijIJfRCTOKPhFROKMgl9EJM6Ea1pmEemj6uvrKS8vp66uLtJNkRCkpqYyevRokpOTu3wOBb9InCsvL2fAgAGMGzcO/4SKEqWcc5w+fZry8nLGjx/f5fOoq0ckztXV1TFkyBCFfh9gZgwZMqTbn84U/CKi0O9DwvFvpeAXEYkzCn4RiaiKigqefvrpTtebN28eFRUVPdCi2KfgF5GIulzw+3y+duu9+eabDBo0qKeaFdM0qkdEAtasgd27w3vOqVNh3brLHl67di2HDx9m6tSpJCcnk5qaSlZWFvv37+fAgQPccccdlJWVUVdXx4MPPsjy5csBGDduHIWFhVRXV/P1r3+dG2+8kb/85S+MGjWK119/nbS0tDZf79lnn2Xjxo1cvHiRL37xi/zyl78kPT2dEydOsGLFCj7++GMANmzYwJe//GVefPFFHn/8ccyMyZMn88tf/jK8f58I0B2/iETUY489xlVXXcXu3bv5+c9/zs6dO3nyySc5cOAAAJs2baKoqIjCwkLWr1/P6dOnW53j4MGDrFq1ij179jBo0CBeffXVy77eXXfdxY4dOyguLiYnJ4df/OIXAKxevZpZs2ZRXFzMzp07mThxInv27OHRRx/lD3/4A8XFxTz55JM980foZbrjF5GAdu7Me8v06dObjVFfv349W7duBaCsrIyDBw8yZMiQZnXGjx/P1KlTAZg2bRpHjhy57PlLS0v5t3/7NyoqKqiuruZrX/saAH/4wx948cUXAUhMTCQzM5MXX3yRRYsWkZ2dDcDgwYPDdp2RpOAXkaiSkZHR9Pu2bdt45513eP/990lPT2f27NltjmFPSUlp+j0xMZHa2trLnn/JkiW89tprTJkyhRdeeIFt27aFtf19gbp6RCSiBgwYQFVVVZvHKisrycrKIj09nf379/PBBx90+/Wqqqq44oorqK+v56WXXmraf/PNN7NhwwYAGhoaqKys5Ktf/Sq//vWvm7qXzpw50+3XjwYKfhGJqCFDhjBz5kyuu+46fvjDHzY7NnfuXHw+Hzk5Oaxdu5Ybbrih26/3k5/8hBkzZjBz5kwmTJjQtP/JJ5+koKCASZMmMW3aNPbu3cvEiRP513/9V2bNmsWUKVP4l3/5l26/fjQw51yk29BKfn6+KywsjHQzROLCvn37yMnJiXQzpBPa+jczsyLnXH4o9UO64zezuWb2kZkdMrO1lykz28x2m9keM/tjZ+qKiEjv6fDhrpklAk8BtwDlwA4ze8M5tzeozCDgaWCuc+4zMxsWal0RkZ6watUq/vznPzfb9+CDD7J06dIItSh6hDKqZzpwyDn3MYCZvQIsAILD+x7gN865zwCccyc7UVdEJOyeeuqpSDchaoXS1TMKKAvaLvfvC3YNkGVm28ysyMzu70RdERHpReEax58ETANuBtKA982sU+OuzGw5sBxg7NixYWqWiIi0FMod/1FgTND2aP++YOXA28658865z4HtwJQQ6wLgnNvonMt3zuUPHTo01PaLiEgnhRL8O4CrzWy8mfUD7gbeaFHmdeBGM0sys3RgBrAvxLoiItKLOgx+55wPeAB4Gy/MNzvn9pjZCjNb4S+zD/gdUAL8DXjOOVd6ubo9cykiEg/69+8PwLFjx1i4cGGbZWbPnk1H3wVat24dNTU1TdvxNL9/SH38zrk3gTdb7HumxfbPgZ+HUldEpLtGjhzJli1bulx/3bp13HfffaSnpwPe/P7xQpO0iUiTCEzHz9q1axkzZgyrVq0C4Mc//jFJSUkUFBRw9uxZ6uvrefTRR1mwYEGzekeOHGH+/PmUlpZSW1vL0qVLKS4uZsKECc0maVu5ciU7duygtraWhQsX8vDDD7N+/XqOHTvGnDlzyM7OpqCgoGl+/+zsbJ544gk2bdoEwLJly1izZg1HjhyJmXn/NVePiETU4sWL2bx5c9P25s2b+c53vsPWrVvZuXMnBQUFfP/736e96WU2bNhAeno6+/bt4+GHH6aoqKjp2E9/+lMKCwspKSnhj3/8IyUlJaxevZqRI0dSUFBAQUFBs3MVFRXx/PPP89e//pUPPviAZ599ll27dgGxM++/7vhFpEkkpuPPzc3l5MmTHDt2jFOnTpGVlcWIESN46KGH2L59OwkJCRw9epQTJ04wYsSINs+xfft2Vq9eDcDkyZOZPHly07HNmzezceNGfD4fx48fZ+/evc2Ot/SnP/2JO++8s2l66Lvuuov33nuP22+/PWbm/Vfwi0jELVq0iC1btvD3v/+dxYsX89JLL3Hq1CmKiopITk5m3Lhxbc7D35FPPvmExx9/nB07dpCVlcWSJUu6dJ5GsTLvv7p6RCTiFi9ezCuvvMKWLVtYtGgRlZWVDBs2jOTkZAoKCvj000/brf+Vr3yFl19+GfDutEtKSgA4d+4cGRkZZGZmcuLECd56662mOpdbB+Cmm27itddeo6amhvPnz7N161ZuuummTl9TNM/7r+AXkYibOHEiVVVVjBo1iiuuuIJ7772XwsJCJk2axIsvvths3vy2rFy5kurqanJycvjRj37EtGnTAJgyZQq5ublMmDCBe+65h5kzZzbVWb58OXPnzmXOnDnNzpWXl8eSJUuYPn06M2bMYNmyZeTm5nb6mqJ53n/Nxy8S5zQff9/TK/Pxi4hI7NDDXRGRbuiL8/4r+EVEuqEvzvuvrh4RkTij4BcRiTMKfhGROKPgFxGJMwp+EYmoiooKnn766U7X6+r8+UuWLOnWdM6xQMEvIhF1ueD3+Xzt1nvzzTcZNGhQTzUrpmk4p4g0WfO7Nez+e3gn5J86Yirr5l5+2s+1a9dy+PBhpk6dSnJyMqmpqWRlZbF//34OHDjAHXfcQVlZGXV1dTz44IMsX74coGn+/Orq6k7Nkx/s3Xff5Qc/+AE+n4/rr7+eDRs2kJKSwtq1a3njjTdISkri1ltv5fHHH+fXv/41Dz/8cNOMmtu3bw/b36i3KfhFJKIee+wxSktL2b17N9u2beO2226jtLSU8ePHA7Bp0yYGDx5MbW0t119/Pf/wD//AkCFDmp3j4MGD/OpXv+LZZ5/lm9/8Jq+++ir33Xdfu69bV1fHkiVLePfdd7nmmmu4//772bBhA9/+9rfZunUr+/fvx8yaupMeeeQR3n77bUaNGtXnl2hU8ItIk/buzHvL9OnTm0IfYP369WzduhWAsrIyDh482Cr4OzNPfqOPPvqI8ePHc8011wDwne98h6eeeooHHniA1NRUvvvd7zJ//nzmz58PwMyZM1myZAnf/OY3ueuuu8JxqRGjPn4RiSqNC6AAbNu2jXfeeYf333+f4uJicnNz25xPv+U8+R09H2hPUlISf/vb31i4cCG//e1vmTt3LgDPPPMMjz76KGVlZUybNq1pCuW+SHf8IhJRl5sXH6CyspKsrCzS09PZv38/H3zwQdhe99prr+XIkSMcOnSoaU3cWbNmUV1dTU1NDfPmzWPmzJlceeWVABw+fJgZM2YwY8YM3nrrLcrKylp98ugrFPwiElFDhgxh5syZXHfddaSlpTF8+PCmY3PnzuWZZ54hJyeHa6+9lhtuuCFsr5uamsrzzz/PokWLmh7urlixgjNnzrBgwQLq6upwzvHEE08A8MMf/pCDBw/inOPmm29mypQpYWtLb9N8/CJxTvPx9z2aj19ERDpFXT0iEpP64jz5vSWk4DezucCTQCLwnHPusRbHZwOvA5/4d/3GOfeI/9gRoApoAHyhfhQREemOvjhPfm/pMPjNLBF4CrgFKAd2mNkbzrm9LYq+55ybf5nTzHHOfd69poqISDiE0sc/HTjknPvYOXcReAVY0LPNEhGRnhJK8I8CyoK2y/37WvqymZWY2VtmNjFovwPeMbMiM1vejbaKiEgYhOvh7k5grHOu2szmAa8BV/uP3eicO2pmw4Dfm9l+51yr2Y38bwrLAcaOHRumZomISEuh3PEfBcYEbY/272vinDvnnKv2//4mkGxm2f7to/6fJ4GteF1HrTjnNjrn8p1z+UOHDu30hYhIfOjfvz8Ax44dY+HChW2WmT17Nh19F2jdunXU1NQ0bXd1fv/LieZ5/0MJ/h3A1WY23sz6AXcDbwQXMLMRZmb+36f7z3vazDLMbIB/fwZwK1AazgsQkfg0cuTIbgVry+CPp/n9O+zqcc75zOwB4G284ZybnHN7zGyF//gzwEJgpZn5gFrgbuecM7PhwFb/e0IS8LJz7nc9dC0i0l1r1sDu8M7Hz9SpsK79+fjHjBnDqlWrAPjxj39MUlISBQUFnD17lvr6eh599FEWLGg+puTIkSPMnz+f0tJSamtrWbp0KcXFxUyYMIHa2tqmcitXrmTHjh3U1taycOFCHn74YdavX8+xY8eYM2cO2dnZFBQUNM3vn52dzRNPPMGmTZsAWLZsGWvWrOHIkSMxM+9/SH38/u6bN1vseybo9/8A/qONeh8DfXdCCxHpcYsXL2bNmjVNwb9582befvttVq9ezcCBA/n888+54YYbuP322/HfRLayYcMG0tPT2bdvHyUlJeTl5TUd++lPf8rgwYNpaGjg5ptvpqSkhNWrV/PEE09QUFBAdnZ2s3MVFRXx/PPP89e//hXnHDNmzGDWrFlkZWXFzLz/+uauiAS0c2feU3Jzczl58iTHjh3j1KlTZGVlMWLECB566CG2b99OQkICR48e5cSJE4wYMaLNc2zfvp3Vq1cDMHnyZCZPntx0bPPmzWzcuBGfz8fx48fZu3dvs+Mt/elPf+LOO+9smh76rrvu4r333uP222+PmXn/NVePiETcokWL2LJlC//1X//F4sWLeemllzh16hRFRUXs3r2b4cOHtzkPf0c++eQTHn/8cd59911KSkq47bbbunSeRrEy77+CX0QibvHixbzyyits2bKFRYsWUVlZybBhw0hOTqagoIBPP/203fpf+cpXePnllwEoLS2lpKQEgHPnzpGRkUFmZiYnTpzgrbfeaqpzuXUAbrrpJl577TVqamo4f/48W7du5aabburytQXP+w80m/e/srKSefPm8e///u8UFxcDgXn/H3nkEYYOHUpZWVl7p+8SdfWISMRNnDiRqqoqRo0axRVXXMG9997LN77xDSZNmkR+fj4TJkxot/7KlStZunQpOTk55OTkMG3aNACmTJlCbm4uEyZMYMyYMcycObOpzvLly5k7dy4jR46koKCgaX9eXh5Llixh+nRv5PmyZcvIzc0NqVunLdE477/m4xeJc5qPv+/RfPwiItIp6uoREemGvjjvv4JfRHDOXXaMvLSvt+f9D0f3vLp6ROJcamoqp0+fDkugSM9yznH69GlSU1O7dR7d8YvEudGjR1NeXs6pU6ci3RQJQWpqKqNHj+7WORT8InEuOTmZ8ePHR7oZ0ovU1SMiEmcU/CIicUbBLyISZxT8IiJxRg93RUQi4dIlOHwYdu6EXbu8n7W18N57Pf7SCn4RkZ7m88G+fYGA37nTW+mscXbQ5GS47jrIzwfnoIe/TKfgFxEJp7o6+PDDQMjv2gUlJd5+gLQ0bznK+++H3FzIy4OJE6Ffv15rooJfRKSrqqqguDhwF79rF+zZAw0N3vHMTC/YV60KhPw110BiYkSbreAXEQnF6dPNu2p27YKDB72uGYDhw71gnz/f+5mXB+PG9Xi3TVco+EVEgjkHx483D/idO+GzzwJlvvAFL9jvuy8Q8ldcEbk2d5KCX0Til3PwySetQ/7kSe+4mdc1M3MmPPCAF/BTp8KQIZFtdzcp+EUkPjQ0wEcfNQ/4XbugstI7npTkPWSdNy9wFz9lCvTvH9l29wAFv4jEngsXvIeswSFfXOyNkwdITfVC/VvfCoT8xIne/jig4BeRvu38+cDImsaQ37MH6uu94wMHet0z//RPXsDn5sKECd4dfpwK6crNbC7wJJAIPOece6zF8dnA68An/l2/cc49EkpdEZGQnT3rhXvw6JqPPgqMrMnO9sJ97txAyF95JSRodppgHQa/mSUCTwG3AOXADjN7wzm3t0XR95xz87tYV0Skub//vfld/M6dcORI4PiYMV6w3313YIz8qFFROXwy2oRyxz8dOOSc+xjAzF4BFgChhHd36opIPHAOPv209UPX48cDZb74RZg+HVas8EI+NxeGDo1cm/u4UIJ/FFAWtF0OzGij3JfNrAQ4CvzAObenE3VFJB40NHhfemr5RaizZ73jiYmQkwO33BK4i5861eunl7AJ19ONncBY51y1mc0DXgOu7swJzGw5sBxg7NixYWqWiETMxYuwd2/zkC8u9h7GAqSkwKRJsGhRIOQnTfLmsokxznnztNXXt/8TvPe9nhZK8B8FxgRtj/bva+KcOxf0+5tm9rSZZYdSN6jeRmAjQH5+vgup9SISHWpqvInIgrtqPvzQC3/wxsJPnQrf/W4g5HNyvFkp8WYorq/3B2BFxwEZ/LMzZSNVp3Hqno4MH+492uhpoQT/DuBqMxuPF9p3A/cEFzCzEcAJ55wzs+l4C7ycBio6qisi7XPOC47uhlDYQq/mIvWnKvCdrqT+zDl8Feepr76Aj0TquQ5fYj716ZnUZ/bH1y+D+uQ0fJZM/RHDdwjqX2p9fheBW72kJO99p/Fn8O/t/czI6HydUF8nPb2Xrr2jAs45n5k9ALyNNyRzk3Nuj5mt8B9/BlgIrDQzH1AL3O2cc0CbdXvoWiROXboUfXd44azT2AXQmxIS/GGUdIkkayDZ1ZN06QLJvlqSfbUk4SOZepKSUknOGEjSqFSSM9NIzuxP+oAUkpKsS6HaW3USE+N78I+5SLzVdiA/P98VFhZGuhkxofFuMRoCrKfqXLrU+3/XpKToCLCw1UlyJJ04SvKHO0kq2UnCbn+f/NGgntkrrwyMjW/8OXx47//xpU1mVuScyw+lbPx+dc0v+KFLNIddd+r2NrOuh1BqavQHZZ+/W2xc8m9Hi4nJTp/2jickeN9snT07MJ3B1KkwaFBEmy3hE1PBP2eON2CgMwEZ6kOXcEpM7FoIpaR4z8ii/U4ywmtMSDCfz1vyLzjgWy75N2kS3HFHIOQnT+69zmaJiJgK/gEDvJFg0XBXeLljiYn69rj0kMYl/4JDvqTEm7AMvDBvXPKvsauml5f8k+gQU8H/xhuRboFIL6mq8u7cg0N+797AR9hBg7xgb5xDPjc3Kpb8k+gQU8EvEpM+/7z1xGQHDwaONy75d/vtgZCP0iX/JDoo+EWihXNw7FjrOWvaWvLv/vsDo2v60JJ/Eh0U/CKR4Bx8/HHrOWvaWvLve98LTEw2eHBk2y0xQcEv0tN8Pm/O+OCQ37279ZJ/t90W6KqJ0SX/JDoo+EXC6cIFKC1t3lUTvORfWpo3XPKeewJdNddd543VFeklCn6Rrgpe8q8x5EtLA3MsDBzohXvjHPJ5eXDttd4dvkgE6f9AkVA0LvkXHPLBS/4NHdp8yb+8PBg/Xl/akKik4Bdp6fjx5l01bS35l5fnLfnXGPIjR2r4pPQZCn6JX8FL/gWHfPCE6FdfDTNmeN01jQ9es7Mj12aRMFDwS3xoXPIvOORbLvn3pS/BrbcG7uKnTNGSfxKTFPwSexqX/Gs5MVlNjXc8JcUbWbNoUeAuPkaX/BNpi4Jf+rbGJf+CQ760tPmSf7m5sGxZIOSDlvwTiUcKfuk7Kiq8O/fg0TX79wdWYhkyxAv2NWsCIf/FL2pkjUgLCn6JTidPNr+L37nTm+Kg0ahRXrAvXBgYIz9mjEbWiIRAwS+R5RyUlbWemKytJf+WLQvMWaMl/0S+nG4AAAAHyUlEQVS6TMEvvefSJTh0qPUXoVou+TdnTqCrRkv+iYSdgl96Rn29t+Rf8F38rl1QXe0db1zy7847A101WvJPpFco+KX7amu9Jf+CQ76tJf+WLAmE/Je+pCX/RCJEwS+dc+5c64nJWi75l5cXWPIvL8/79quW/BOJGgp+ubzGJf+CQz54yb8RI5ov+ZeX560QpZE1IlFNwS/Nl/wLHl1TVhYoM25cYMm/xgevWvJPpE8KKfjNbC7wJJAIPOece+wy5a4H3gfuds5t8e87AlQBDYDPOZcfhnZLVzUu+dcy5E+d8o6beXPG33hj4C5+6lQt+ScSQzoMfjNLBJ4CbgHKgR1m9oZzbm8b5X4G/Hcbp5njnPs8DO2Vzmhc8q/lGPlz57zjSUne6k/z5wdCfvJkLfknEuNCueOfDhxyzn0MYGavAAuAvS3KfQ94Fbg+rC2U0DQu+Rcc8sXFUFfnHU9L82abvPfeQMhPnKgl/0TiUCjBPwoI6uylHJgRXMDMRgF3AnNoHfwOeMfMGoD/65zb2NaLmNlyYDnA2LFjQ2p83KquDoysaQz5PXtaL/m3cmWgP15L/omIX7iSYB3wP5xzl6z1iI4bnXNHzWwY8Hsz2++c296ykP8NYSNAfn6+C1O7+r4zZwJffmrslz9woPWSf/PmBUJeS/6JSDtCCf6jwJig7dH+fcHygVf8oZ8NzDMzn3PuNefcUQDn3Ekz24rXddQq+AVvyb+WE5N9+mng+NixXrDfc0/gi1Ba8k9EOimU4N8BXG1m4/EC/27gnuACzrnxjb+b2QvAb51zr5lZBpDgnKvy/34r8Ei4Gt9nOeet4dpyjHzLJf9uuAH++Z8DE5NpyT8RCYMOg9855zOzB4C38YZzbnLO7TGzFf7jz7RTfTiw1f9JIAl42Tn3u+43uw9paPC6ZlqGfEWFd7xxyb+vfS1wF68l/0SkB5lz0dednp+f7woLCyPdjM67eNF7yBoc8sXFrZf8a+yLz8vzhlNqyT8R6SYzKwr1e1Ia5tFVNTVeqAePj//wQ29WSggs+feP/xgI+QkTtOSfiEScgj8UjUv+BXfVtFzyLy8PHnooMEb+qqs0skZEopKCv6UTJ5rfxbe15F9enrfkX2OXjZb8E5E+JH6DP3jJv+CQP3YsUOaqq2DaNG/Jv8aQHzYscm0WEQmD+Aj+xiX/Wob8mTPe8YQEyMmBr361+cRkmZmRbbeISA+IveBvXPIvOOB37w4s+devn7fk3113Be7iteSfiMSR2An+ixe9qYSDl/zLyPDGxC9ZEgh5LfknInEudoK/Xz+45hqYNSsQ8lryT0SkldgJfoD//M9It0BEJOppoLmISJxR8IuIxBkFv4hInFHwi4jEGQW/iEicUfCLiMQZBb+ISJxR8IuIxJmoXIHLzE4Bn3ZYsG3ZwOdhbE5foGuOffF2vaBr7qwvOOeGhlIwKoO/O8ysMNTlx2KFrjn2xdv1gq65J6mrR0Qkzij4RUTiTCwG/8ZINyACdM2xL96uF3TNPSbm+vhFRKR9sXjHLyIi7eiTwW9mm8zspJmVXua4mdl6MztkZiVmltfbbQy3EK75Xv+1fmhmfzGzKb3dxnDr6JqDyl1vZj4zW9hbbespoVyzmc02s91mtsfM/tib7Qu3EP6/zjSz/2dmxf7rXdrbbQw3MxtjZgVmttd/TQ+2UaZHM6xPBj/wAjC3neNfB672/7cc2NALbeppL9D+NX8CzHLOTQJ+Qmz0j75A+9eMmSUCPwP+uzca1AteoJ1rNrNBwNPA7c65icCiXmpXT3mB9v+NVwF7nXNTgNnA/zGzvr52qg/4vnPuS8ANwCoz+1KLMj2aYX0y+J1z24Ez7RRZALzoPB8Ag8zsit5pXc/o6Jqdc39xzp31b34AjO6VhvWgEP6dAb4HvAqc7PkW9bwQrvke4DfOuc/85fv0dYdwvQ4YYGYG9PeX9fVG23qKc+64c26n//cqYB8wqkWxHs2wPhn8IRgFlAVtl9P6DxvLvgu8FelG9DQzGwXcSWx8ogvVNUCWmW0zsyIzuz/SDeph/wHkAMeAD4EHnXOXItuk8DGzcUAu8NcWh3o0w2JrzV3BzObgBf+NkW5LL1gH/A/n3CXvhjAuJAHTgJuBNOB9M/vAOXcgss3qMV8DdgNfBa4Cfm9m7znnzkW2Wd1nZv3xPq2u6e3ridXgPwqMCdoe7d8X08xsMvAc8HXn3OlIt6cX5AOv+EM/G5hnZj7n3GuRbVaPKgdOO+fOA+fNbDswBYjV4F8KPOa8ceeHzOwTYALwt8g2q3vMLBkv9F9yzv2mjSI9mmGx2tXzBnC//8n4DUClc+54pBvVk8xsLPAb4NsxfPfXjHNuvHNunHNuHLAF+OcYD32A14EbzSzJzNKBGXh9xLHqM7xPN5jZcOBa4OOItqib/M8rfgHsc849cZliPZphffKO38x+hfeEP9vMyoH/DSQDOOeeAd4E5gGHgBq8u4Y+LYRr/hEwBHjafwfs6+sTXIVwzTGno2t2zu0zs98BJcAl4DnnXLvDXaNZCP/GPwFeMLMPAcPr2uvrM3bOBL4NfGhmu/37/hcwFnonw/TNXRGROBOrXT0iInIZCn4RkTij4BcRiTMKfhGROKPgFxGJMwp+EZE4o+AXEYkzCn4RkTjz/wEMkOXHIUk/eAAAAABJRU5ErkJggg==\n", 347 | "text/plain": [ 348 | "" 349 | ] 350 | }, 351 | "metadata": { 352 | "needs_background": "light" 353 | }, 354 | "output_type": "display_data" 355 | } 356 | ], 357 | "source": [ 358 | "plt.plot(epochs_list, train_acc, 'r', label='train_acc')\n", 359 | "plt.plot(epochs_list, validation_acc, 'b', label='validation_acc')\n", 360 | "\n", 361 | "plt.plot(epochs_list, train_loss, 'g', label='train_loss')\n", 362 | "plt.plot(epochs_list, validation_loss, 'r', label='validation_loss')\n", 363 | "\n", 364 | "plt.legend()\n", 365 | "plt.plot()\n" 366 | ] 367 | }, 368 | { 369 | "cell_type": "code", 370 | "execution_count": null, 371 | "metadata": {}, 372 | "outputs": [], 373 | "source": [] 374 | } 375 | ], 376 | "metadata": { 377 | "kernelspec": { 378 | "display_name": "Python 2", 379 | "language": "python", 380 | "name": "python2" 381 | }, 382 | "language_info": { 383 | "codemirror_mode": { 384 | "name": "ipython", 385 | "version": 2 386 | }, 387 | "file_extension": ".py", 388 | "mimetype": "text/x-python", 389 | "name": "python", 390 | "nbconvert_exporter": "python", 391 | "pygments_lexer": "ipython2", 392 | "version": "2.7.15" 393 | } 394 | }, 395 | "nbformat": 4, 396 | "nbformat_minor": 2 397 | } 398 | -------------------------------------------------------------------------------- /Keras/Keras_from_scratch/Vision/04_CNN.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "Until now, we trained CV models from scratch. That is mostly time consuming. \n", 8 | "\n", 9 | "So instead we will use pretrained models. This is what this notebook is about" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 1, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "%load_ext autoreload\n", 19 | "%autoreload 2\n", 20 | "%matplotlib inline" 21 | ] 22 | }, 23 | { 24 | "cell_type": "markdown", 25 | "metadata": {}, 26 | "source": [ 27 | "### Load pretrained model" 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": 2, 33 | "metadata": {}, 34 | "outputs": [ 35 | { 36 | "name": "stderr", 37 | "output_type": "stream", 38 | "text": [ 39 | "Using TensorFlow backend.\n" 40 | ] 41 | } 42 | ], 43 | "source": [ 44 | "from keras.applications import VGG16" 45 | ] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": 3, 50 | "metadata": {}, 51 | "outputs": [], 52 | "source": [ 53 | "base_model = VGG16(include_top=False, weights='imagenet', input_shape=(150, 150, 3))" 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": 4, 59 | "metadata": {}, 60 | "outputs": [ 61 | { 62 | "name": "stdout", 63 | "output_type": "stream", 64 | "text": [ 65 | "_________________________________________________________________\n", 66 | "Layer (type) Output Shape Param # \n", 67 | "=================================================================\n", 68 | "input_1 (InputLayer) (None, 150, 150, 3) 0 \n", 69 | "_________________________________________________________________\n", 70 | "block1_conv1 (Conv2D) (None, 150, 150, 64) 1792 \n", 71 | "_________________________________________________________________\n", 72 | "block1_conv2 (Conv2D) (None, 150, 150, 64) 36928 \n", 73 | "_________________________________________________________________\n", 74 | "block1_pool (MaxPooling2D) (None, 75, 75, 64) 0 \n", 75 | "_________________________________________________________________\n", 76 | "block2_conv1 (Conv2D) (None, 75, 75, 128) 73856 \n", 77 | "_________________________________________________________________\n", 78 | "block2_conv2 (Conv2D) (None, 75, 75, 128) 147584 \n", 79 | "_________________________________________________________________\n", 80 | "block2_pool (MaxPooling2D) (None, 37, 37, 128) 0 \n", 81 | "_________________________________________________________________\n", 82 | "block3_conv1 (Conv2D) (None, 37, 37, 256) 295168 \n", 83 | "_________________________________________________________________\n", 84 | "block3_conv2 (Conv2D) (None, 37, 37, 256) 590080 \n", 85 | "_________________________________________________________________\n", 86 | "block3_conv3 (Conv2D) (None, 37, 37, 256) 590080 \n", 87 | "_________________________________________________________________\n", 88 | "block3_pool (MaxPooling2D) (None, 18, 18, 256) 0 \n", 89 | "_________________________________________________________________\n", 90 | "block4_conv1 (Conv2D) (None, 18, 18, 512) 1180160 \n", 91 | "_________________________________________________________________\n", 92 | "block4_conv2 (Conv2D) (None, 18, 18, 512) 2359808 \n", 93 | "_________________________________________________________________\n", 94 | "block4_conv3 (Conv2D) (None, 18, 18, 512) 2359808 \n", 95 | "_________________________________________________________________\n", 96 | "block4_pool (MaxPooling2D) (None, 9, 9, 512) 0 \n", 97 | "_________________________________________________________________\n", 98 | "block5_conv1 (Conv2D) (None, 9, 9, 512) 2359808 \n", 99 | "_________________________________________________________________\n", 100 | "block5_conv2 (Conv2D) (None, 9, 9, 512) 2359808 \n", 101 | "_________________________________________________________________\n", 102 | "block5_conv3 (Conv2D) (None, 9, 9, 512) 2359808 \n", 103 | "_________________________________________________________________\n", 104 | "block5_pool (MaxPooling2D) (None, 4, 4, 512) 0 \n", 105 | "=================================================================\n", 106 | "Total params: 14,714,688\n", 107 | "Trainable params: 14,714,688\n", 108 | "Non-trainable params: 0\n", 109 | "_________________________________________________________________\n" 110 | ] 111 | } 112 | ], 113 | "source": [ 114 | "base_model.summary()" 115 | ] 116 | }, 117 | { 118 | "cell_type": "markdown", 119 | "metadata": {}, 120 | "source": [ 121 | "### Set data paths" 122 | ] 123 | }, 124 | { 125 | "cell_type": "code", 126 | "execution_count": 5, 127 | "metadata": {}, 128 | "outputs": [], 129 | "source": [ 130 | "import os" 131 | ] 132 | }, 133 | { 134 | "cell_type": "code", 135 | "execution_count": 6, 136 | "metadata": {}, 137 | "outputs": [], 138 | "source": [ 139 | "root_dir = \"/\"\n", 140 | "users = os.path.join(root_dir, \"Users\")\n", 141 | "airwoot = os.path.join(users, \"airwoot\")\n", 142 | "documents = os.path.join(airwoot, \"Documents\")\n", 143 | "anuj = os.path.join(documents, \"Anuj\")\n", 144 | "Warehouse = os.path.join(anuj, \"Warehouse\")\n", 145 | "\n", 146 | "\n", 147 | "src_folder = os.path.join(Warehouse, \"train\")\n", 148 | "\n", 149 | "cat_src_folder = os.path.join(src_folder, \"CAT\")\n", 150 | "dog_src_folder = os.path.join(src_folder, \"DOG\")\n", 151 | "\n", 152 | "dest_data_folder = os.path.join(Warehouse, \"cats_and_dogs\")\n", 153 | "\n", 154 | "train_dir = os.path.join(dest_data_folder, \"train\")\n", 155 | "cat_train_dir = os.path.join(train_dir, \"cat\")\n", 156 | "dog_train_dir = os.path.join(train_dir, \"dog\")\n", 157 | "\n", 158 | "\n", 159 | "test_dir = os.path.join(dest_data_folder, \"test\")\n", 160 | "cat_test_dir = os.path.join(test_dir, \"cat\")\n", 161 | "dog_test_dir = os.path.join(test_dir, \"dog\")\n", 162 | "\n", 163 | "\n", 164 | "validation_dir = os.path.join(dest_data_folder, \"validation\")\n", 165 | "cat_validation_dir = os.path.join(validation_dir, \"cat\")\n", 166 | "dog_validation_dir = os.path.join(validation_dir, \"dog\")" 167 | ] 168 | }, 169 | { 170 | "cell_type": "code", 171 | "execution_count": 7, 172 | "metadata": {}, 173 | "outputs": [ 174 | { 175 | "name": "stdout", 176 | "output_type": "stream", 177 | "text": [ 178 | "2000\n", 179 | "2000\n", 180 | "1000\n", 181 | "1000\n", 182 | "1000\n", 183 | "1000\n" 184 | ] 185 | } 186 | ], 187 | "source": [ 188 | "### sanity checks\n", 189 | "\n", 190 | "print(len(os.listdir(cat_train_dir)))\n", 191 | "print(len(os.listdir(dog_train_dir)))\n", 192 | "\n", 193 | "print(len(os.listdir(cat_test_dir)))\n", 194 | "print(len(os.listdir(dog_test_dir)))\n", 195 | "\n", 196 | "print(len(os.listdir(cat_validation_dir)))\n", 197 | "print(len(os.listdir(dog_validation_dir)))" 198 | ] 199 | }, 200 | { 201 | "cell_type": "markdown", 202 | "metadata": {}, 203 | "source": [ 204 | "### Extract features from pretrained model" 205 | ] 206 | }, 207 | { 208 | "cell_type": "code", 209 | "execution_count": 8, 210 | "metadata": {}, 211 | "outputs": [], 212 | "source": [ 213 | "from keras.preprocessing.image import ImageDataGenerator\n", 214 | "import numpy as np" 215 | ] 216 | }, 217 | { 218 | "cell_type": "code", 219 | "execution_count": 9, 220 | "metadata": {}, 221 | "outputs": [ 222 | { 223 | "name": "stdout", 224 | "output_type": "stream", 225 | "text": [ 226 | "Found 4000 images belonging to 2 classes.\n" 227 | ] 228 | } 229 | ], 230 | "source": [ 231 | "train_IDG = ImageDataGenerator(rescale=1.0/255)\n", 232 | "\n", 233 | "train_DG = train_IDG.flow_from_directory(\n", 234 | " train_dir,\n", 235 | " target_size = (150, 150),\n", 236 | " batch_size = 20,\n", 237 | " class_mode = 'binary')\n", 238 | "\n", 239 | "#since we are not doing any training, we dont need validation data" 240 | ] 241 | }, 242 | { 243 | "cell_type": "code", 244 | "execution_count": 10, 245 | "metadata": {}, 246 | "outputs": [], 247 | "source": [ 248 | "# write a func to push data via network\n", 249 | "\n", 250 | "batch_size_ = 20\n", 251 | "\n", 252 | "def extract_features(data_dir, no_of_data_points):\n", 253 | " \n", 254 | " features = np.zeros(shape=(no_of_data_points, 4, 4, 512))\n", 255 | " labels = np.zeros(shape=(no_of_data_points,))\n", 256 | " \n", 257 | " DataGen = ImageDataGenerator(rescale=1.0/255)\n", 258 | " DG = DataGen.flow_from_directory(\n", 259 | " data_dir, \n", 260 | " target_size = (150, 150), \n", 261 | " batch_size = batch_size_, \n", 262 | " class_mode='binary')\n", 263 | " \n", 264 | " i = 0\n", 265 | " \n", 266 | " for (batch_data, batch_label) in DG:\n", 267 | " \n", 268 | " base_features = base_model.predict(batch_data)\n", 269 | " \n", 270 | " features[i*batch_size_:(i+1)*batch_size_] = base_features\n", 271 | " labels[i*batch_size_ : (i+1)*batch_size_] = batch_label\n", 272 | " \n", 273 | " i = i+1\n", 274 | " \n", 275 | " if (i%100 == 0):\n", 276 | " print (i*batch_size)\n", 277 | " \n", 278 | " if (i*batch_size_) >= no_of_data_points:\n", 279 | " break;\n", 280 | " \n", 281 | " return (features, labels)\n", 282 | " " 283 | ] 284 | }, 285 | { 286 | "cell_type": "code", 287 | "execution_count": null, 288 | "metadata": {}, 289 | "outputs": [ 290 | { 291 | "name": "stdout", 292 | "output_type": "stream", 293 | "text": [ 294 | "Found 4000 images belonging to 2 classes.\n" 295 | ] 296 | } 297 | ], 298 | "source": [ 299 | "train_features, train_labels = extract_features(train_dir, 2000)\n", 300 | "validation_features, validation_labels = extract_features(validation_dir, 1000)\n", 301 | "test_features, test_labels = extract_features(test_dir, 1000)" 302 | ] 303 | }, 304 | { 305 | "cell_type": "code", 306 | "execution_count": null, 307 | "metadata": {}, 308 | "outputs": [], 309 | "source": [ 310 | "print(train_features.shape)\n", 311 | "print(train_labels.shape)\n", 312 | "\n", 313 | "print(validation_features.shape)\n", 314 | "print(validation_labels.shape)\n", 315 | "\n", 316 | "print(test_features.shape)\n", 317 | "print(test_labels.shape)" 318 | ] 319 | }, 320 | { 321 | "cell_type": "code", 322 | "execution_count": null, 323 | "metadata": {}, 324 | "outputs": [], 325 | "source": [ 326 | "# flatten out the feature sets\n", 327 | "\n", 328 | "train_features = np.reshape(train_features, (2000, -1))\n", 329 | "validation_features = np.reshape(validation_features, (1000, -1))\n", 330 | "test_features = np.reshape(test_features, (1000, -1))" 331 | ] 332 | }, 333 | { 334 | "cell_type": "code", 335 | "execution_count": null, 336 | "metadata": {}, 337 | "outputs": [], 338 | "source": [ 339 | "print(train_features.shape)\n", 340 | "\n", 341 | "print(validation_features.shape)\n", 342 | "\n", 343 | "print(test_features.shape)" 344 | ] 345 | }, 346 | { 347 | "cell_type": "markdown", 348 | "metadata": {}, 349 | "source": [ 350 | "### Simple feed forward" 351 | ] 352 | }, 353 | { 354 | "cell_type": "code", 355 | "execution_count": null, 356 | "metadata": {}, 357 | "outputs": [], 358 | "source": [ 359 | "from keras import models\n", 360 | "from keras import layers\n", 361 | "from keras import optimizers" 362 | ] 363 | }, 364 | { 365 | "cell_type": "code", 366 | "execution_count": null, 367 | "metadata": {}, 368 | "outputs": [], 369 | "source": [ 370 | "model = models.Sequential()\n", 371 | "\n", 372 | "model.add(layers.Dense(256, activation='relu', input_shape=(4 * 4 * 512,)))\n", 373 | "model.add(layers.Dropout(0.5))\n", 374 | "model.add(layers.Dense(1, activation='sigmoid'))" 375 | ] 376 | }, 377 | { 378 | "cell_type": "code", 379 | "execution_count": null, 380 | "metadata": {}, 381 | "outputs": [], 382 | "source": [ 383 | "model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])" 384 | ] 385 | }, 386 | { 387 | "cell_type": "code", 388 | "execution_count": null, 389 | "metadata": {}, 390 | "outputs": [], 391 | "source": [ 392 | "history = model.fit(train_features, train_labels,\n", 393 | " epochs=3,\n", 394 | " batch_size=20,\n", 395 | " validation_data=(validation_features, validation_labels))" 396 | ] 397 | }, 398 | { 399 | "cell_type": "code", 400 | "execution_count": null, 401 | "metadata": {}, 402 | "outputs": [], 403 | "source": [ 404 | "import matplotlib.pyplot as plt\n", 405 | "\n", 406 | "acc = history.history['acc']\n", 407 | "val_acc = history.history['val_acc']\n", 408 | "loss = history.history['loss']\n", 409 | "val_loss = history.history['val_loss']\n", 410 | "\n", 411 | "epochs = range(1, len(acc) + 1)\n", 412 | "plt.plot(epochs, acc, 'bo', label='Training acc')\n", 413 | "plt.plot(epochs, val_acc, 'b', label='Validation acc')\n", 414 | "plt.title('Training and validation accuracy')\n", 415 | "plt.legend()\n", 416 | "\n", 417 | "plt.figure()\n", 418 | "plt.plot(epochs, loss, 'bo', label='Training loss')\n", 419 | "plt.plot(epochs, val_loss, 'b', label='Validation loss')\n", 420 | "plt.title('Training and validation loss')\n", 421 | "plt.legend()\n", 422 | " \n", 423 | "plt.show()" 424 | ] 425 | }, 426 | { 427 | "cell_type": "code", 428 | "execution_count": null, 429 | "metadata": {}, 430 | "outputs": [], 431 | "source": [] 432 | } 433 | ], 434 | "metadata": { 435 | "kernelspec": { 436 | "display_name": "Python 2", 437 | "language": "python", 438 | "name": "python2" 439 | }, 440 | "language_info": { 441 | "codemirror_mode": { 442 | "name": "ipython", 443 | "version": 2 444 | }, 445 | "file_extension": ".py", 446 | "mimetype": "text/x-python", 447 | "name": "python", 448 | "nbconvert_exporter": "python", 449 | "pygments_lexer": "ipython2", 450 | "version": "2.7.15" 451 | } 452 | }, 453 | "nbformat": 4, 454 | "nbformat_minor": 2 455 | } 456 | -------------------------------------------------------------------------------- /Keras/Keras_from_scratch/Vision/CNN_1.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "%load_ext autoreload\n", 17 | "%autoreload 2" 18 | ] 19 | }, 20 | { 21 | "cell_type": "code", 22 | "execution_count": 2, 23 | "metadata": {}, 24 | "outputs": [ 25 | { 26 | "name": "stderr", 27 | "output_type": "stream", 28 | "text": [ 29 | "Using TensorFlow backend.\n" 30 | ] 31 | } 32 | ], 33 | "source": [ 34 | "from keras.datasets import mnist" 35 | ] 36 | }, 37 | { 38 | "cell_type": "code", 39 | "execution_count": 3, 40 | "metadata": {}, 41 | "outputs": [], 42 | "source": [ 43 | "(X_train, y_train), (X_test, y_test) = mnist.load_data()" 44 | ] 45 | }, 46 | { 47 | "cell_type": "code", 48 | "execution_count": 4, 49 | "metadata": {}, 50 | "outputs": [], 51 | "source": [ 52 | "X_train = X_train.reshape((-1, 28, 28, 1))\n", 53 | "X_train = X_train.astype('float32')/ 255" 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": 5, 59 | "metadata": {}, 60 | "outputs": [], 61 | "source": [ 62 | "X_test = X_test.reshape((-1, 28, 28, 1))\n", 63 | "X_test = X_test.astype('float32') / 255" 64 | ] 65 | }, 66 | { 67 | "cell_type": "code", 68 | "execution_count": 6, 69 | "metadata": {}, 70 | "outputs": [], 71 | "source": [ 72 | "from keras.utils import to_categorical\n" 73 | ] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "execution_count": 7, 78 | "metadata": {}, 79 | "outputs": [], 80 | "source": [ 81 | "y_train = to_categorical(y_train)\n", 82 | "y_test = to_categorical(y_test)" 83 | ] 84 | }, 85 | { 86 | "cell_type": "code", 87 | "execution_count": null, 88 | "metadata": {}, 89 | "outputs": [], 90 | "source": [] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": 8, 95 | "metadata": {}, 96 | "outputs": [], 97 | "source": [ 98 | "from keras import Sequential\n", 99 | "from keras import layers" 100 | ] 101 | }, 102 | { 103 | "cell_type": "code", 104 | "execution_count": 9, 105 | "metadata": {}, 106 | "outputs": [], 107 | "source": [ 108 | "model = Sequential()" 109 | ] 110 | }, 111 | { 112 | "cell_type": "code", 113 | "execution_count": 10, 114 | "metadata": {}, 115 | "outputs": [], 116 | "source": [ 117 | "model.add(layers.Conv2D(filters=32, kernel_size=(3,3), activation='relu', input_shape=(28, 28, 1)))\n", 118 | "model.add(layers.Conv2D(filters=16, kernel_size=(3,3), activation='relu'))\n", 119 | "\n", 120 | "model.add(layers.Flatten())\n", 121 | "\n", 122 | "model.add(layers.Dense(32, activation='relu'))\n", 123 | "model.add(layers.Dense(10, activation='softmax'))" 124 | ] 125 | }, 126 | { 127 | "cell_type": "code", 128 | "execution_count": 11, 129 | "metadata": {}, 130 | "outputs": [], 131 | "source": [ 132 | "model.compile(optimizer='adam',\n", 133 | " loss='categorical_crossentropy',\n", 134 | " metrics=['accuracy'])" 135 | ] 136 | }, 137 | { 138 | "cell_type": "code", 139 | "execution_count": 12, 140 | "metadata": {}, 141 | "outputs": [ 142 | { 143 | "name": "stdout", 144 | "output_type": "stream", 145 | "text": [ 146 | "_________________________________________________________________\n", 147 | "Layer (type) Output Shape Param # \n", 148 | "=================================================================\n", 149 | "conv2d_1 (Conv2D) (None, 26, 26, 32) 320 \n", 150 | "_________________________________________________________________\n", 151 | "conv2d_2 (Conv2D) (None, 24, 24, 16) 4624 \n", 152 | "_________________________________________________________________\n", 153 | "flatten_1 (Flatten) (None, 9216) 0 \n", 154 | "_________________________________________________________________\n", 155 | "dense_1 (Dense) (None, 32) 294944 \n", 156 | "_________________________________________________________________\n", 157 | "dense_2 (Dense) (None, 10) 330 \n", 158 | "=================================================================\n", 159 | "Total params: 300,218\n", 160 | "Trainable params: 300,218\n", 161 | "Non-trainable params: 0\n", 162 | "_________________________________________________________________\n" 163 | ] 164 | } 165 | ], 166 | "source": [ 167 | "model.summary()" 168 | ] 169 | }, 170 | { 171 | "cell_type": "code", 172 | "execution_count": 13, 173 | "metadata": {}, 174 | "outputs": [ 175 | { 176 | "name": "stdout", 177 | "output_type": "stream", 178 | "text": [ 179 | "Epoch 1/1\n", 180 | "60000/60000 [==============================] - 113s 2ms/step - loss: 0.1418 - acc: 0.9571\n" 181 | ] 182 | } 183 | ], 184 | "source": [ 185 | "history = model.fit(X_train, y_train, batch_size=32, epochs=1)" 186 | ] 187 | }, 188 | { 189 | "cell_type": "code", 190 | "execution_count": null, 191 | "metadata": {}, 192 | "outputs": [], 193 | "source": [] 194 | } 195 | ], 196 | "metadata": { 197 | "kernelspec": { 198 | "display_name": "Python 3", 199 | "language": "python", 200 | "name": "python3" 201 | }, 202 | "language_info": { 203 | "codemirror_mode": { 204 | "name": "ipython", 205 | "version": 3 206 | }, 207 | "file_extension": ".py", 208 | "mimetype": "text/x-python", 209 | "name": "python", 210 | "nbconvert_exporter": "python", 211 | "pygments_lexer": "ipython3", 212 | "version": "3.5.4" 213 | } 214 | }, 215 | "nbformat": 4, 216 | "nbformat_minor": 2 217 | } 218 | -------------------------------------------------------------------------------- /Keras/Keras_from_scratch/Vision/CNN_5.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": null, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "## Adding a densely connected classifier on top of the convolutional base" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": null, 22 | "metadata": {}, 23 | "outputs": [], 24 | "source": [ 25 | "from keras import models\n", 26 | "from keras import layers\n", 27 | "\n", 28 | "model = models.Sequential()\n", 29 | "\n", 30 | "model.add(conv_base)\n", 31 | "model.add(layers.Flatten())\n", 32 | "model.add(layers.Dense(256, activation='relu'))\n", 33 | "model.add(layers.Dense(1, activation='sigmoid'))" 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "execution_count": null, 39 | "metadata": {}, 40 | "outputs": [], 41 | "source": [ 42 | "print('This is the number of trainable weights before freezing the conv base:', len(model.trainable_weights))" 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": null, 48 | "metadata": {}, 49 | "outputs": [], 50 | "source": [ 51 | "conv_base.trainable = False" 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "execution_count": null, 57 | "metadata": {}, 58 | "outputs": [], 59 | "source": [ 60 | "print('This is the number of trainable weights after freezing the conv base:', len(model.trainable_weights))" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": null, 66 | "metadata": {}, 67 | "outputs": [], 68 | "source": [ 69 | "from keras.preprocessing.image import ImageDataGenerator\n", 70 | "from keras import optimizers" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": null, 76 | "metadata": {}, 77 | "outputs": [], 78 | "source": [ 79 | "\n", 80 | "train_datagen = ImageDataGenerator(\n", 81 | "rescale=1./255,\n", 82 | "rotation_range=40,\n", 83 | "width_shift_range=0.2,\n", 84 | "height_shift_range=0.2,\n", 85 | "shear_range=0.2,\n", 86 | "zoom_range=0.2,\n", 87 | "horizontal_flip=True,\n", 88 | "fill_mode='nearest')" 89 | ] 90 | }, 91 | { 92 | "cell_type": "code", 93 | "execution_count": null, 94 | "metadata": {}, 95 | "outputs": [], 96 | "source": [ 97 | "\n", 98 | "train_generator = train_datagen.flow_from_directory(\n", 99 | "train_dir,\n", 100 | "target_size=(150, 150),\n", 101 | "batch_size=20,\n", 102 | "class_mode='binary')" 103 | ] 104 | }, 105 | { 106 | "cell_type": "code", 107 | "execution_count": null, 108 | "metadata": {}, 109 | "outputs": [], 110 | "source": [ 111 | "test_datagen = ImageDataGenerator(rescale=1./255)" 112 | ] 113 | }, 114 | { 115 | "cell_type": "code", 116 | "execution_count": null, 117 | "metadata": {}, 118 | "outputs": [], 119 | "source": [ 120 | "validation_generator = test_datagen.flow_from_directory(\n", 121 | "validation_dir,\n", 122 | "target_size=(150, 150),\n", 123 | "batch_size=20,\n", 124 | "class_mode='binary')" 125 | ] 126 | }, 127 | { 128 | "cell_type": "code", 129 | "execution_count": null, 130 | "metadata": {}, 131 | "outputs": [], 132 | "source": [ 133 | "model.compile(loss='binary_crossentropy',\n", 134 | "optimizer=optimizers.RMSprop(lr=2e-5),\n", 135 | "metrics=['acc'])" 136 | ] 137 | }, 138 | { 139 | "cell_type": "code", 140 | "execution_count": null, 141 | "metadata": {}, 142 | "outputs": [], 143 | "source": [ 144 | "history = model.fit_generator(\n", 145 | "train_generator,\n", 146 | "steps_per_epoch=100,\n", 147 | "epochs=30,\n", 148 | "validation_data=validation_generator,\n", 149 | "validation_steps=50)" 150 | ] 151 | }, 152 | { 153 | "cell_type": "code", 154 | "execution_count": null, 155 | "metadata": {}, 156 | "outputs": [], 157 | "source": [] 158 | }, 159 | { 160 | "cell_type": "code", 161 | "execution_count": null, 162 | "metadata": {}, 163 | "outputs": [], 164 | "source": [] 165 | } 166 | ], 167 | "metadata": { 168 | "kernelspec": { 169 | "display_name": "Python 3", 170 | "language": "python", 171 | "name": "python3" 172 | }, 173 | "language_info": { 174 | "codemirror_mode": { 175 | "name": "ipython", 176 | "version": 3 177 | }, 178 | "file_extension": ".py", 179 | "mimetype": "text/x-python", 180 | "name": "python", 181 | "nbconvert_exporter": "python", 182 | "pygments_lexer": "ipython3", 183 | "version": "3.5.4" 184 | } 185 | }, 186 | "nbformat": 4, 187 | "nbformat_minor": 2 188 | } 189 | -------------------------------------------------------------------------------- /Keras/Keras_from_scratch/Vision/README.md: -------------------------------------------------------------------------------- 1 | 01 Basic CNN notebook 2 | 02 In notebook 01, we loaded the entire data in one go in the RAM. Here we will load chunk by chunk and then train the model. For this we use data generators 3 | 03 Pull data from folders on the fly and augment it in real time 4 | 04_1 Use pretrained network to extract features anf then feed these features to a FFN 5 | 05 We use pretrained network to extract features on the fly and then pass them to Dense layers 6 | 06 We train not just dense layers but also fine tune last part of the pretrained network 7 | 8 | 9 | Left overs: 10 | 07 Feature visualization 11 | 08 Filter visualization (Gradient ascent) 12 | 09 Class activation Maps (CAM) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DeepNets -------------------------------------------------------------------------------- /Tensorflow/Learn_TF/BR_RBZ+Ch_4.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Ch4 of TF book by BR and RBZ" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": null, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [] 16 | } 17 | ], 18 | "metadata": { 19 | "kernelspec": { 20 | "display_name": "Python 3", 21 | "language": "python", 22 | "name": "python3" 23 | }, 24 | "language_info": { 25 | "codemirror_mode": { 26 | "name": "ipython", 27 | "version": 3 28 | }, 29 | "file_extension": ".py", 30 | "mimetype": "text/x-python", 31 | "name": "python", 32 | "nbconvert_exporter": "python", 33 | "pygments_lexer": "ipython3", 34 | "version": "3.5.4" 35 | } 36 | }, 37 | "nbformat": 4, 38 | "nbformat_minor": 2 39 | } 40 | -------------------------------------------------------------------------------- /Tensorflow/Learn_TF/BR_RBZ+Ch_5.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Ch5 of TF book by BR and RBZ" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": null, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [] 16 | } 17 | ], 18 | "metadata": { 19 | "kernelspec": { 20 | "display_name": "Python 3", 21 | "language": "python", 22 | "name": "python3" 23 | }, 24 | "language_info": { 25 | "codemirror_mode": { 26 | "name": "ipython", 27 | "version": 3 28 | }, 29 | "file_extension": ".py", 30 | "mimetype": "text/x-python", 31 | "name": "python", 32 | "nbconvert_exporter": "python", 33 | "pygments_lexer": "ipython3", 34 | "version": "3.5.4" 35 | } 36 | }, 37 | "nbformat": 4, 38 | "nbformat_minor": 2 39 | } 40 | -------------------------------------------------------------------------------- /Tensorflow/Learn_TF/Ch-1.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 2, 6 | "metadata": {}, 7 | "outputs": [ 8 | { 9 | "name": "stdout", 10 | "output_type": "stream", 11 | "text": [ 12 | "The autoreload extension is already loaded. To reload it, use:\n", 13 | " %reload_ext autoreload\n" 14 | ] 15 | } 16 | ], 17 | "source": [ 18 | "%load_ext autoreload \n", 19 | "%autoreload 2\n", 20 | "#%matplotlib inline" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": 3, 26 | "metadata": {}, 27 | "outputs": [], 28 | "source": [ 29 | "import tensorflow as tf\n", 30 | "import numpy as np" 31 | ] 32 | }, 33 | { 34 | "cell_type": "markdown", 35 | "metadata": { 36 | "collapsed": true 37 | }, 38 | "source": [ 39 | "# create tensors" 40 | ] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "execution_count": 5, 45 | "metadata": {}, 46 | "outputs": [], 47 | "source": [ 48 | "a = tf.zeros([2,3])\n", 49 | "b = tf.ones([2,3])\n", 50 | "c = tf.fill([2,3], 42)" 51 | ] 52 | }, 53 | { 54 | "cell_type": "code", 55 | "execution_count": 9, 56 | "metadata": {}, 57 | "outputs": [], 58 | "source": [ 59 | "sess = tf.Session()\n", 60 | "init_op = tf.global_variables_initializer()\n", 61 | "sess.run(init_op)" 62 | ] 63 | }, 64 | { 65 | "cell_type": "code", 66 | "execution_count": 13, 67 | "metadata": {}, 68 | "outputs": [ 69 | { 70 | "data": { 71 | "text/plain": [ 72 | "array([[0., 0., 0.],\n", 73 | " [0., 0., 0.]], dtype=float32)" 74 | ] 75 | }, 76 | "execution_count": 13, 77 | "metadata": {}, 78 | "output_type": "execute_result" 79 | } 80 | ], 81 | "source": [ 82 | "a.eval(session=sess)" 83 | ] 84 | }, 85 | { 86 | "cell_type": "code", 87 | "execution_count": 15, 88 | "metadata": {}, 89 | "outputs": [ 90 | { 91 | "data": { 92 | "text/plain": [ 93 | "array([[42, 42, 42],\n", 94 | " [42, 42, 42]], dtype=int32)" 95 | ] 96 | }, 97 | "execution_count": 15, 98 | "metadata": {}, 99 | "output_type": "execute_result" 100 | } 101 | ], 102 | "source": [ 103 | "c.eval(session=sess)" 104 | ] 105 | }, 106 | { 107 | "cell_type": "code", 108 | "execution_count": null, 109 | "metadata": {}, 110 | "outputs": [], 111 | "source": [] 112 | } 113 | ], 114 | "metadata": { 115 | "kernelspec": { 116 | "display_name": "Python 2", 117 | "language": "python", 118 | "name": "python2" 119 | }, 120 | "language_info": { 121 | "codemirror_mode": { 122 | "name": "ipython", 123 | "version": 2 124 | }, 125 | "file_extension": ".py", 126 | "mimetype": "text/x-python", 127 | "name": "python", 128 | "nbconvert_exporter": "python", 129 | "pygments_lexer": "ipython2", 130 | "version": "2.7.10" 131 | } 132 | }, 133 | "nbformat": 4, 134 | "nbformat_minor": 1 135 | } 136 | -------------------------------------------------------------------------------- /Tensorflow/Learn_TF/Ch__1.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Ch1 from the book McClure " 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "%load_ext autoreload\n", 17 | "%autoreload 2\n", 18 | "%matplotlib inline" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 2, 24 | "metadata": {}, 25 | "outputs": [], 26 | "source": [ 27 | "import tensorflow as tf\n", 28 | "import numpy as np" 29 | ] 30 | }, 31 | { 32 | "cell_type": "markdown", 33 | "metadata": {}, 34 | "source": [ 35 | "1.1\n", 36 | " + Its all about tensors\n", 37 | " + Declare tensors as variables and or feed them in as placeholders" 38 | ] 39 | }, 40 | { 41 | "cell_type": "code", 42 | "execution_count": 4, 43 | "metadata": {}, 44 | "outputs": [ 45 | { 46 | "data": { 47 | "text/plain": [ 48 | "" 49 | ] 50 | }, 51 | "execution_count": 4, 52 | "metadata": {}, 53 | "output_type": "execute_result" 54 | } 55 | ], 56 | "source": [ 57 | "# Since we are experimenting, we will use TensorFlow interactively\n", 58 | "\n", 59 | "tf.InteractiveSession()" 60 | ] 61 | }, 62 | { 63 | "cell_type": "code", 64 | "execution_count": 5, 65 | "metadata": {}, 66 | "outputs": [], 67 | "source": [ 68 | "a = tf.zeros([3,4])" 69 | ] 70 | }, 71 | { 72 | "cell_type": "code", 73 | "execution_count": 6, 74 | "metadata": {}, 75 | "outputs": [ 76 | { 77 | "data": { 78 | "text/plain": [ 79 | "array([[0., 0., 0., 0.],\n", 80 | " [0., 0., 0., 0.],\n", 81 | " [0., 0., 0., 0.]], dtype=float32)" 82 | ] 83 | }, 84 | "execution_count": 6, 85 | "metadata": {}, 86 | "output_type": "execute_result" 87 | } 88 | ], 89 | "source": [ 90 | "# TensorFlow returns a reference to the desired tensor rather than the value of the tensor itself. \n", 91 | "# To force the value of the tensor to be returned, we will use the method tf.Tensor.eval() of tensor objects\n", 92 | "\n", 93 | "a.eval()" 94 | ] 95 | }, 96 | { 97 | "cell_type": "code", 98 | "execution_count": 7, 99 | "metadata": {}, 100 | "outputs": [ 101 | { 102 | "data": { 103 | "text/plain": [ 104 | "TensorShape([Dimension(3), Dimension(4)])" 105 | ] 106 | }, 107 | "execution_count": 7, 108 | "metadata": {}, 109 | "output_type": "execute_result" 110 | } 111 | ], 112 | "source": [ 113 | "a.shape" 114 | ] 115 | }, 116 | { 117 | "cell_type": "code", 118 | "execution_count": 8, 119 | "metadata": {}, 120 | "outputs": [ 121 | { 122 | "data": { 123 | "text/plain": [ 124 | "array([0., 0.], dtype=float32)" 125 | ] 126 | }, 127 | "execution_count": 8, 128 | "metadata": {}, 129 | "output_type": "execute_result" 130 | } 131 | ], 132 | "source": [ 133 | "a = tf.zeros(2)\n", 134 | "a.eval()" 135 | ] 136 | }, 137 | { 138 | "cell_type": "code", 139 | "execution_count": 13, 140 | "metadata": {}, 141 | "outputs": [ 142 | { 143 | "data": { 144 | "text/plain": [ 145 | "array([0, 0], dtype=int32)" 146 | ] 147 | }, 148 | "execution_count": 13, 149 | "metadata": {}, 150 | "output_type": "execute_result" 151 | } 152 | ], 153 | "source": [ 154 | "a = tf.zeros(2, dtype=tf.int32)\n", 155 | "a.eval()" 156 | ] 157 | }, 158 | { 159 | "cell_type": "code", 160 | "execution_count": 3, 161 | "metadata": {}, 162 | "outputs": [], 163 | "source": [ 164 | "# fixed tensors\n", 165 | "\n", 166 | "a = tf.zeros([3,4])\n", 167 | "b = tf.ones([2,3])\n", 168 | "c = tf.zeros_like(a)\n", 169 | "d = tf.ones_like(b)" 170 | ] 171 | }, 172 | { 173 | "cell_type": "code", 174 | "execution_count": null, 175 | "metadata": {}, 176 | "outputs": [], 177 | "source": [] 178 | } 179 | ], 180 | "metadata": { 181 | "kernelspec": { 182 | "display_name": "Python 2", 183 | "language": "python", 184 | "name": "python2" 185 | }, 186 | "language_info": { 187 | "codemirror_mode": { 188 | "name": "ipython", 189 | "version": 2 190 | }, 191 | "file_extension": ".py", 192 | "mimetype": "text/x-python", 193 | "name": "python", 194 | "nbconvert_exporter": "python", 195 | "pygments_lexer": "ipython2", 196 | "version": "2.7.10" 197 | } 198 | }, 199 | "nbformat": 4, 200 | "nbformat_minor": 2 201 | } 202 | -------------------------------------------------------------------------------- /Tensorflow/Learn_TF/notebook-ch1.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "# Config the matlotlib backend as plotting inline in IPython\n", 10 | "#%matplotlib inline\n", 11 | "%load_ext autoreload\n", 12 | "%autoreload 2" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": 2, 18 | "metadata": {}, 19 | "outputs": [], 20 | "source": [ 21 | "# start afresh\n", 22 | "# book TF ML cookbook by Nick McClure" 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": 3, 28 | "metadata": {}, 29 | "outputs": [], 30 | "source": [ 31 | "import tensorflow as tf" 32 | ] 33 | }, 34 | { 35 | "cell_type": "markdown", 36 | "metadata": {}, 37 | "source": [ 38 | "### create some basic tensors" 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "execution_count": 14, 44 | "metadata": {}, 45 | "outputs": [], 46 | "source": [ 47 | "# fixed tensors\n", 48 | "\n", 49 | "zero_tsr = tf.zeros([3,3])\n", 50 | "one_tsr = tf.ones([4,4])" 51 | ] 52 | }, 53 | { 54 | "cell_type": "code", 55 | "execution_count": 15, 56 | "metadata": {}, 57 | "outputs": [], 58 | "source": [ 59 | "filled_tsr = tf.fill([5,5], 42)" 60 | ] 61 | }, 62 | { 63 | "cell_type": "code", 64 | "execution_count": 16, 65 | "metadata": {}, 66 | "outputs": [], 67 | "source": [ 68 | "constant_tsr = tf.constant([1,2,3])" 69 | ] 70 | }, 71 | { 72 | "cell_type": "code", 73 | "execution_count": 17, 74 | "metadata": {}, 75 | "outputs": [], 76 | "source": [ 77 | "sess = tf.Session()\n", 78 | "init_op = tf.global_variables_initializer()\n", 79 | "sess.run(init_op)" 80 | ] 81 | }, 82 | { 83 | "cell_type": "code", 84 | "execution_count": 18, 85 | "metadata": {}, 86 | "outputs": [ 87 | { 88 | "data": { 89 | "text/plain": [ 90 | "array([[0., 0., 0.],\n", 91 | " [0., 0., 0.],\n", 92 | " [0., 0., 0.]], dtype=float32)" 93 | ] 94 | }, 95 | "execution_count": 18, 96 | "metadata": {}, 97 | "output_type": "execute_result" 98 | } 99 | ], 100 | "source": [ 101 | "zero_tsr.eval(session=sess)\n" 102 | ] 103 | }, 104 | { 105 | "cell_type": "code", 106 | "execution_count": 19, 107 | "metadata": {}, 108 | "outputs": [ 109 | { 110 | "data": { 111 | "text/plain": [ 112 | "array([[1., 1., 1., 1.],\n", 113 | " [1., 1., 1., 1.],\n", 114 | " [1., 1., 1., 1.],\n", 115 | " [1., 1., 1., 1.]], dtype=float32)" 116 | ] 117 | }, 118 | "execution_count": 19, 119 | "metadata": {}, 120 | "output_type": "execute_result" 121 | } 122 | ], 123 | "source": [ 124 | "one_tsr.eval(session=sess)" 125 | ] 126 | }, 127 | { 128 | "cell_type": "code", 129 | "execution_count": 20, 130 | "metadata": {}, 131 | "outputs": [ 132 | { 133 | "data": { 134 | "text/plain": [ 135 | "array([[42, 42, 42, 42, 42],\n", 136 | " [42, 42, 42, 42, 42],\n", 137 | " [42, 42, 42, 42, 42],\n", 138 | " [42, 42, 42, 42, 42],\n", 139 | " [42, 42, 42, 42, 42]], dtype=int32)" 140 | ] 141 | }, 142 | "execution_count": 20, 143 | "metadata": {}, 144 | "output_type": "execute_result" 145 | } 146 | ], 147 | "source": [ 148 | "filled_tsr.eval(session=sess)" 149 | ] 150 | }, 151 | { 152 | "cell_type": "code", 153 | "execution_count": 21, 154 | "metadata": {}, 155 | "outputs": [ 156 | { 157 | "data": { 158 | "text/plain": [ 159 | "array([1, 2, 3], dtype=int32)" 160 | ] 161 | }, 162 | "execution_count": 21, 163 | "metadata": {}, 164 | "output_type": "execute_result" 165 | } 166 | ], 167 | "source": [ 168 | "constant_tsr.eval(session=sess)" 169 | ] 170 | }, 171 | { 172 | "cell_type": "code", 173 | "execution_count": null, 174 | "metadata": {}, 175 | "outputs": [], 176 | "source": [ 177 | "# tensors with similar shape" 178 | ] 179 | }, 180 | { 181 | "cell_type": "code", 182 | "execution_count": null, 183 | "metadata": {}, 184 | "outputs": [], 185 | "source": [ 186 | "zero_similar = tf.zeros_like(zero_tsr)\n", 187 | "one_similar = tf.ones_like(one_tsr)" 188 | ] 189 | }, 190 | { 191 | "cell_type": "code", 192 | "execution_count": null, 193 | "metadata": {}, 194 | "outputs": [], 195 | "source": [ 196 | "#sequence tensors" 197 | ] 198 | }, 199 | { 200 | "cell_type": "code", 201 | "execution_count": null, 202 | "metadata": {}, 203 | "outputs": [], 204 | "source": [ 205 | "linear_tsr = tf.linspace(start=0.0, stop=5.0, num=5)" 206 | ] 207 | }, 208 | { 209 | "cell_type": "code", 210 | "execution_count": null, 211 | "metadata": {}, 212 | "outputs": [], 213 | "source": [ 214 | "integer_seq = tf.range(start=6, limit=12, delta=3)" 215 | ] 216 | }, 217 | { 218 | "cell_type": "code", 219 | "execution_count": null, 220 | "metadata": {}, 221 | "outputs": [], 222 | "source": [ 223 | "# random tensors" 224 | ] 225 | }, 226 | { 227 | "cell_type": "code", 228 | "execution_count": null, 229 | "metadata": {}, 230 | "outputs": [], 231 | "source": [ 232 | "random_tsr = tf.random_uniform([5,4], minval=0.0, maxval=1.0)" 233 | ] 234 | }, 235 | { 236 | "cell_type": "code", 237 | "execution_count": null, 238 | "metadata": {}, 239 | "outputs": [], 240 | "source": [ 241 | "random_tsr = tf.random_normal([5,4], mean=0.0, stddev=1.0)" 242 | ] 243 | }, 244 | { 245 | "cell_type": "code", 246 | "execution_count": null, 247 | "metadata": {}, 248 | "outputs": [], 249 | "source": [ 250 | "random_tsr = tf.truncated_normal([5,4], mean=0.0, stddev=1.0)" 251 | ] 252 | }, 253 | { 254 | "cell_type": "code", 255 | "execution_count": null, 256 | "metadata": {}, 257 | "outputs": [], 258 | "source": [ 259 | "shuffled_output = tf.random_shuffle(input_tsr)\n", 260 | "tf.random_crop(input_tensor, [dim1, dim2, dim3])" 261 | ] 262 | }, 263 | { 264 | "cell_type": "markdown", 265 | "metadata": {}, 266 | "source": [ 267 | "### creating variables" 268 | ] 269 | }, 270 | { 271 | "cell_type": "code", 272 | "execution_count": null, 273 | "metadata": {}, 274 | "outputs": [], 275 | "source": [ 276 | "# takes a tensor and returns a variable\n", 277 | "my_var = tf.Variable(zero_similar)" 278 | ] 279 | }, 280 | { 281 | "cell_type": "code", 282 | "execution_count": null, 283 | "metadata": {}, 284 | "outputs": [], 285 | "source": [ 286 | "# convert numpy lists into tensor\n", 287 | "ll = [1,2,3]\n", 288 | "\n", 289 | "ll1 = tf.convert_to_tensor(ll)" 290 | ] 291 | }, 292 | { 293 | "cell_type": "code", 294 | "execution_count": null, 295 | "metadata": {}, 296 | "outputs": [], 297 | "source": [ 298 | "# variable vs placeholder\n", 299 | "# variable has data\n", 300 | "# placeholder gets data at runtime - just need type and shape\n" 301 | ] 302 | }, 303 | { 304 | "cell_type": "code", 305 | "execution_count": null, 306 | "metadata": {}, 307 | "outputs": [], 308 | "source": [ 309 | "my_var = tf.Variable(tf.zeros([2,3]))\n", 310 | "\n", 311 | "sess = tf.Session()\n", 312 | "initialize_op = tf.global_variables_initializer() # initializes all variables not tensors. tensors are \n", 313 | "\n", 314 | "sess.run(initialize_op)" 315 | ] 316 | }, 317 | { 318 | "cell_type": "code", 319 | "execution_count": null, 320 | "metadata": {}, 321 | "outputs": [], 322 | "source": [ 323 | "my_var.eval()" 324 | ] 325 | }, 326 | { 327 | "cell_type": "code", 328 | "execution_count": null, 329 | "metadata": {}, 330 | "outputs": [], 331 | "source": [ 332 | "import numpy as np\n", 333 | "\n", 334 | "sess = tf.Session()\n", 335 | "\n", 336 | "x = tf.placeholder(tf.float32, shape=[2,2])\n", 337 | "y = tf.identity(x)\n", 338 | "\n", 339 | "x_vals = np.random.rand(2,2)\n", 340 | "\n", 341 | "sess.run(y, feed_dict={x:x_vals})\n" 342 | ] 343 | }, 344 | { 345 | "cell_type": "code", 346 | "execution_count": null, 347 | "metadata": {}, 348 | "outputs": [], 349 | "source": [ 350 | "sess = tf.Session()\n", 351 | "first_var = tf.Variable(tf.zeros([2,3]))\n", 352 | "sess.run(first_var.initializer)\n", 353 | "#first_var.eval()\n", 354 | "second_var = tf.Variable(tf.zeros_like(first_var))\n", 355 | "print(sess.run(second_var.initializer))\n", 356 | "print(type(first_var))" 357 | ] 358 | }, 359 | { 360 | "cell_type": "code", 361 | "execution_count": null, 362 | "metadata": {}, 363 | "outputs": [], 364 | "source": [ 365 | "identity_matrix = tf.diag([1.0, 1.0, 1.0])\n", 366 | "print(sess.run(identity_matrix))" 367 | ] 368 | }, 369 | { 370 | "cell_type": "code", 371 | "execution_count": null, 372 | "metadata": {}, 373 | "outputs": [], 374 | "source": [ 375 | "type(identity_matrix)" 376 | ] 377 | }, 378 | { 379 | "cell_type": "code", 380 | "execution_count": null, 381 | "metadata": {}, 382 | "outputs": [], 383 | "source": [ 384 | "# KEY point - you can print a tensor not a variable !!" 385 | ] 386 | }, 387 | { 388 | "cell_type": "code", 389 | "execution_count": null, 390 | "metadata": {}, 391 | "outputs": [], 392 | "source": [ 393 | "print(sess.run(tf.nn.relu([-3, 3, 10])))" 394 | ] 395 | }, 396 | { 397 | "cell_type": "code", 398 | "execution_count": null, 399 | "metadata": {}, 400 | "outputs": [], 401 | "source": [ 402 | "print(sess.run(tf.nn.sigmoid([-10.0, 3.0, 10.0])))" 403 | ] 404 | }, 405 | { 406 | "cell_type": "code", 407 | "execution_count": null, 408 | "metadata": {}, 409 | "outputs": [], 410 | "source": [] 411 | }, 412 | { 413 | "cell_type": "code", 414 | "execution_count": null, 415 | "metadata": {}, 416 | "outputs": [], 417 | "source": [ 418 | "# load some datasets" 419 | ] 420 | }, 421 | { 422 | "cell_type": "code", 423 | "execution_count": 9, 424 | "metadata": {}, 425 | "outputs": [ 426 | { 427 | "name": "stdout", 428 | "output_type": "stream", 429 | "text": [ 430 | "150\n", 431 | "150\n", 432 | "0\n", 433 | "[5.1 3.5 1.4 0.2]\n", 434 | "set([0, 1, 2])\n" 435 | ] 436 | } 437 | ], 438 | "source": [ 439 | "from sklearn import datasets\n", 440 | "\n", 441 | "iris = datasets.load_iris()\n", 442 | "\n", 443 | "print len(iris.data)\n", 444 | "\n", 445 | "print(len(iris.target))\n", 446 | "\n", 447 | "print((iris.target[0]))\n", 448 | "\n", 449 | "print((iris.data[0]))\n", 450 | "\n", 451 | "print(set(iris.target))" 452 | ] 453 | }, 454 | { 455 | "cell_type": "code", 456 | "execution_count": 14, 457 | "metadata": {}, 458 | "outputs": [ 459 | { 460 | "name": "stdout", 461 | "output_type": "stream", 462 | "text": [ 463 | "[]\n" 464 | ] 465 | } 466 | ], 467 | "source": [ 468 | "import requests\n", 469 | "\n", 470 | "birthdata_url = 'https://www.umass.edu/statdata/statdata/data/lowbwt.data'\n", 471 | "birth_file = requests.get(birthdata_url)\n", 472 | "\n", 473 | "birth_data = birth_file.text.split('\\r\\n')[5:]\n", 474 | "\n", 475 | "print birth_data" 476 | ] 477 | }, 478 | { 479 | "cell_type": "code", 480 | "execution_count": null, 481 | "metadata": {}, 482 | "outputs": [], 483 | "source": [] 484 | } 485 | ], 486 | "metadata": { 487 | "kernelspec": { 488 | "display_name": "Python 2", 489 | "language": "python", 490 | "name": "python2" 491 | }, 492 | "language_info": { 493 | "codemirror_mode": { 494 | "name": "ipython", 495 | "version": 2 496 | }, 497 | "file_extension": ".py", 498 | "mimetype": "text/x-python", 499 | "name": "python", 500 | "nbconvert_exporter": "python", 501 | "pygments_lexer": "ipython2", 502 | "version": "2.7.10" 503 | } 504 | }, 505 | "nbformat": 4, 506 | "nbformat_minor": 2 507 | } 508 | -------------------------------------------------------------------------------- /Tensorflow/Learn_TF/notebook-ch2.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "# Config the matlotlib backend as plotting inline in IPython\n", 10 | "%matplotlib inline\n", 11 | "%load_ext autoreload\n", 12 | "%autoreload 2" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": null, 18 | "metadata": {}, 19 | "outputs": [], 20 | "source": [ 21 | "# start afresh\n", 22 | "# book TF ML cookbook by Nick McClure" 23 | ] 24 | }, 25 | { 26 | "cell_type": "code", 27 | "execution_count": 1, 28 | "metadata": {}, 29 | "outputs": [], 30 | "source": [ 31 | "import tensorflow as tf\n", 32 | "import numpy as np" 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "execution_count": 5, 38 | "metadata": {}, 39 | "outputs": [ 40 | { 41 | "name": "stdout", 42 | "output_type": "stream", 43 | "text": [ 44 | "6.0\n", 45 | "9.0\n", 46 | "12.0\n" 47 | ] 48 | } 49 | ], 50 | "source": [ 51 | "x_vals = np.array([2,3,4])\n", 52 | "x_data = tf.placeholder(tf.float32)\n", 53 | "m_const = tf.constant(3.0)\n", 54 | "\n", 55 | "my_product = tf.multiply(x_data, m_const)\n", 56 | "\n", 57 | "sess = tf.Session()\n", 58 | "\n", 59 | "for val in x_vals:\n", 60 | " print(sess.run(my_product, feed_dict={x_data:val}))" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": 22, 66 | "metadata": {}, 67 | "outputs": [ 68 | { 69 | "name": "stdout", 70 | "output_type": "stream", 71 | "text": [ 72 | "(3, 5)\n", 73 | "(5, 1)\n", 74 | "(3, 1)\n", 75 | "(3, 1)\n", 76 | "(1, 1)\n", 77 | "(3, 1)\n", 78 | "(3, 1)\n", 79 | "(1, 1)\n", 80 | "(3, 1)\n" 81 | ] 82 | } 83 | ], 84 | "source": [ 85 | "my_array = np.array([[1.,3.,5.,7.,9.],[-2.,0.,2.,4.,6.],[-6.,-3.,0.,3.,6.]])\n", 86 | "\n", 87 | "x_vals = np.array([my_array, my_array+1])\n", 88 | "\n", 89 | "x_data = tf.placeholder(tf.float32, shape=(3,5))\n", 90 | "\n", 91 | "m1 = tf.constant([[1.], [0.], [-1.], [2.], [4.]])\n", 92 | "m2 = tf.constant([[2.]])\n", 93 | "a1 = tf.constant([[10.]])\n", 94 | "\n", 95 | "print(x_data.shape)\n", 96 | "print(m1.shape)\n", 97 | "prod1 = tf.matmul(x_data, m1)\n", 98 | "print(prod1.shape)\n", 99 | "\n", 100 | "prod2 = tf.matmul(prod1, m2)\n", 101 | "print(prod1.shape)\n", 102 | "print(m2.shape)\n", 103 | "print(prod2.shape)\n", 104 | "\n", 105 | "add1 = tf.add(prod2, a1)\n", 106 | "print(prod2.shape)\n", 107 | "print(a1.shape)\n", 108 | "print(add1.shape)\n", 109 | "\n", 110 | "\n" 111 | ] 112 | }, 113 | { 114 | "cell_type": "code", 115 | "execution_count": 23, 116 | "metadata": {}, 117 | "outputs": [ 118 | { 119 | "name": "stdout", 120 | "output_type": "stream", 121 | "text": [ 122 | "(2, 3, 5)\n", 123 | "[[102.]\n", 124 | " [ 66.]\n", 125 | " [ 58.]]\n", 126 | "[[114.]\n", 127 | " [ 78.]\n", 128 | " [ 70.]]\n" 129 | ] 130 | } 131 | ], 132 | "source": [ 133 | "print x_vals.shape\n", 134 | "\n", 135 | "for x_val in x_vals:\n", 136 | " print(sess.run(add1, feed_dict={x_data:x_val}))" 137 | ] 138 | }, 139 | { 140 | "cell_type": "code", 141 | "execution_count": null, 142 | "metadata": {}, 143 | "outputs": [], 144 | "source": [ 145 | "x_data = tf.placeholder(tf.float32, shape=(3, None))" 146 | ] 147 | }, 148 | { 149 | "cell_type": "code", 150 | "execution_count": null, 151 | "metadata": {}, 152 | "outputs": [], 153 | "source": [] 154 | }, 155 | { 156 | "cell_type": "code", 157 | "execution_count": 24, 158 | "metadata": {}, 159 | "outputs": [], 160 | "source": [ 161 | "import tensorflow as tf\n", 162 | "import numpy as np\n", 163 | "\n", 164 | "sess = tf.Session()\n", 165 | "\n", 166 | "x_shape = [1,4,4,1]\n", 167 | "\n", 168 | "x_val = np.random.uniform(size=x_shape)\n", 169 | "\n", 170 | "x_data = tf.placeholder(tf.float32, shape=x_shape)\n", 171 | "\n", 172 | "\n" 173 | ] 174 | }, 175 | { 176 | "cell_type": "code", 177 | "execution_count": 25, 178 | "metadata": {}, 179 | "outputs": [], 180 | "source": [ 181 | "my_filter = tf.constant(0.25, shape=[2,2,1,1])\n", 182 | "my_strides = [1,2,2,1]" 183 | ] 184 | }, 185 | { 186 | "cell_type": "code", 187 | "execution_count": 28, 188 | "metadata": {}, 189 | "outputs": [ 190 | { 191 | "data": { 192 | "text/plain": [ 193 | "TensorShape([Dimension(1), Dimension(2), Dimension(2), Dimension(1)])" 194 | ] 195 | }, 196 | "execution_count": 28, 197 | "metadata": {}, 198 | "output_type": "execute_result" 199 | } 200 | ], 201 | "source": [ 202 | "# Given an input tensor of shape [batch, in_height, in_width, in_channels] and a filter / kernel tensor of \n", 203 | "# shape [filter_height, filter_width, in_channels, out_channels], this op performs the following:\n", 204 | "\n", 205 | "#Flattens the filter to a 2-D matrix with shape [filter_height * filter_width * in_channels, output_channels].\n", 206 | "#Extracts image patches from the input tensor to form a virtual tensor of shape [batch, out_height, out_width, filter_height * filter_width * in_channels].\n", 207 | "#For each patch, right-multiplies the filter matrix and the image patch vector.\n", 208 | "\n", 209 | "mov_avg_layer = tf.nn.conv2d(x_data, my_filter, my_strides, padding = 'SAME', name='moving_avg_window')\n", 210 | "\n", 211 | "mov_avg_layer.shape" 212 | ] 213 | }, 214 | { 215 | "cell_type": "code", 216 | "execution_count": 29, 217 | "metadata": {}, 218 | "outputs": [], 219 | "source": [ 220 | "def custom_layer(input_matrix):\n", 221 | " input_matrix_squeezed = tf.squeeze(input_matrix)\n", 222 | " A = tf.constant([[1.,2.], [-1.,3.]])\n", 223 | " b = tf.constant(1., shape=[2,2])\n", 224 | " \n", 225 | " temp1 = tf.matmul(A, input_matrix_squeezed)\n", 226 | " temp = tf.add(temp1,b)\n", 227 | " \n", 228 | " return(tf.sigmoid(temp))" 229 | ] 230 | }, 231 | { 232 | "cell_type": "code", 233 | "execution_count": null, 234 | "metadata": {}, 235 | "outputs": [], 236 | "source": [] 237 | }, 238 | { 239 | "cell_type": "code", 240 | "execution_count": 30, 241 | "metadata": {}, 242 | "outputs": [], 243 | "source": [ 244 | "with tf.name_scope('Custom_Layer') as scope:\n", 245 | " custom_layer1 = custom_layer(mov_avg_layer)\n", 246 | " " 247 | ] 248 | }, 249 | { 250 | "cell_type": "code", 251 | "execution_count": 31, 252 | "metadata": {}, 253 | "outputs": [ 254 | { 255 | "name": "stdout", 256 | "output_type": "stream", 257 | "text": [ 258 | "[[0.8951406 0.8800743 ]\n", 259 | " [0.87575036 0.785013 ]]\n" 260 | ] 261 | } 262 | ], 263 | "source": [ 264 | "print(sess.run(custom_layer1, feed_dict={x_data:x_val}))" 265 | ] 266 | }, 267 | { 268 | "cell_type": "code", 269 | "execution_count": null, 270 | "metadata": {}, 271 | "outputs": [], 272 | "source": [] 273 | } 274 | ], 275 | "metadata": { 276 | "kernelspec": { 277 | "display_name": "Python 3", 278 | "language": "python", 279 | "name": "python3" 280 | }, 281 | "language_info": { 282 | "codemirror_mode": { 283 | "name": "ipython", 284 | "version": 3 285 | }, 286 | "file_extension": ".py", 287 | "mimetype": "text/x-python", 288 | "name": "python", 289 | "nbconvert_exporter": "python", 290 | "pygments_lexer": "ipython3", 291 | "version": "3.5.4" 292 | } 293 | }, 294 | "nbformat": 4, 295 | "nbformat_minor": 2 296 | } 297 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | boto==2.39.0 2 | bz2file==0.98 3 | cycler==0.10.0 4 | gensim==0.12.4 5 | httpretty==0.8.10 6 | Keras==0.3.2 7 | matplotlib==1.5.1 8 | numpy==1.10.4 9 | pandas==0.17.1 10 | pyparsing==2.1.0 11 | python-dateutil==2.4.2 12 | pytz==2015.7 13 | PyYAML==3.11 14 | requests==2.8.1 15 | scikit-learn==0.17.1 16 | scipy==0.17.0 17 | seaborn==0.7.0 18 | six==1.10.0 19 | sklearn==0.0 20 | smart-open==1.3.2 21 | statsmodels==0.6.1 22 | Theano==0.7.0 23 | -------------------------------------------------------------------------------- /requirements1.txt: -------------------------------------------------------------------------------- 1 | backports-abc==0.5 2 | backports.functools-lru-cache==1.4 3 | backports.shutil-get-terminal-size==1.0.0 4 | backports.weakref==1.0.post1 5 | bleach==1.5.0 6 | certifi==2017.11.5 7 | configparser==3.5.0 8 | cycler==0.10.0 9 | decorator==4.1.2 10 | entrypoints==0.2.3 11 | enum34==1.1.6 12 | funcsigs==1.0.2 13 | functools32==3.2.3.post2 14 | futures==3.2.0 15 | graphviz==0.8.2 16 | h5py==2.7.1 17 | html5lib==0.9999999 18 | ipykernel==4.7.0 19 | ipython==5.5.0 20 | ipython-genutils==0.2.0 21 | ipywidgets==7.1.0 22 | Jinja2==2.10 23 | jsonschema==2.6.0 24 | jupyter==1.0.0 25 | jupyter-client==5.2.1 26 | jupyter-console==5.2.0 27 | jupyter-core==4.4.0 28 | Keras==2.0.0 29 | Markdown==2.6.11 30 | MarkupSafe==1.0 31 | matplotlib==2.1.1 32 | mistune==0.8.3 33 | mock==2.0.0 34 | nbconvert==5.3.1 35 | nbformat==4.4.0 36 | notebook==5.2.2 37 | numpy==1.14.0 38 | pandas==0.22.0 39 | pandocfilters==1.4.2 40 | pathlib2==2.3.0 41 | pbr==3.1.1 42 | pexpect==4.3.1 43 | pickleshare==0.7.4 44 | Pillow==5.0.0 45 | prompt-toolkit==1.0.15 46 | protobuf==3.5.1 47 | ptyprocess==0.5.2 48 | pydot-ng==1.0.0 49 | Pygments==2.2.0 50 | pyparsing==2.2.0 51 | python-dateutil==2.6.1 52 | pytz==2017.3 53 | PyYAML==3.12 54 | pyzmq==16.0.3 55 | qtconsole==4.3.1 56 | request==0.0.22 57 | scandir==1.6 58 | scikit-learn==0.19.1 59 | scipy==1.0.0 60 | simplegeneric==0.8.1 61 | singledispatch==3.4.0.3 62 | six==1.11.0 63 | sklearn==0.0 64 | subprocess32==3.2.7 65 | tensorflow==1.4.1 66 | tensorflow-tensorboard==0.4.0rc3 67 | terminado==0.8.1 68 | testpath==0.3.1 69 | tornado==4.5.3 70 | traitlets==4.3.2 71 | wcwidth==0.1.7 72 | Werkzeug==0.14.1 73 | widgetsnbextension==3.1.0 74 | --------------------------------------------------------------------------------