└── fcc_cat_dog.ipynb /fcc_cat_dog.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": { 7 | "id": "la_Oz6oLlub6" 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "try:\n", 12 | " # This command only in Colab.\n", 13 | " %tensorflow_version 2.x\n", 14 | "except Exception:\n", 15 | " pass\n", 16 | "import tensorflow as tf\n", 17 | "\n", 18 | "from tensorflow.keras.models import Sequential\n", 19 | "from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D\n", 20 | "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n", 21 | "\n", 22 | "import os\n", 23 | "import numpy as np\n", 24 | "import matplotlib.pyplot as plt" 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": null, 30 | "metadata": { 31 | "id": "jaF8r6aOl48C" 32 | }, 33 | "outputs": [], 34 | "source": [ 35 | "# Get project files\n", 36 | "!wget https://cdn.freecodecamp.org/project-data/cats-and-dogs/cats_and_dogs.zip\n", 37 | "\n", 38 | "!unzip cats_and_dogs.zip\n", 39 | "\n", 40 | "PATH = 'cats_and_dogs'\n", 41 | "\n", 42 | "train_dir = os.path.join(PATH, 'train')\n", 43 | "validation_dir = os.path.join(PATH, 'validation')\n", 44 | "test_dir = os.path.join(PATH, 'test')\n", 45 | "\n", 46 | "# Get number of files in each directory. The train and validation directories\n", 47 | "# each have the subdirecories \"dogs\" and \"cats\".\n", 48 | "total_train = sum([len(files) for r, d, files in os.walk(train_dir)])\n", 49 | "total_val = sum([len(files) for r, d, files in os.walk(validation_dir)])\n", 50 | "total_test = len(os.listdir(test_dir))\n", 51 | "\n", 52 | "# Variables for pre-processing and training.\n", 53 | "batch_size = 128\n", 54 | "epochs = 15\n", 55 | "IMG_HEIGHT = 150\n", 56 | "IMG_WIDTH = 150" 57 | ] 58 | }, 59 | { 60 | "cell_type": "code", 61 | "execution_count": null, 62 | "metadata": { 63 | "id": "EOJFeEfumns6" 64 | }, 65 | "outputs": [], 66 | "source": [ 67 | "# 3\n", 68 | "train_image_generator = None\n", 69 | "validation_image_generator = None\n", 70 | "test_image_generator = None\n", 71 | "\n", 72 | "train_data_gen = None\n", 73 | "val_data_gen = None\n", 74 | "test_data_gen = None" 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": null, 80 | "metadata": { 81 | "id": "TP0WA8j1mt7Q" 82 | }, 83 | "outputs": [], 84 | "source": [ 85 | "# 4\n", 86 | "def plotImages(images_arr, probabilities = False):\n", 87 | " fig, axes = plt.subplots(len(images_arr), 1, figsize=(5,len(images_arr) * 3))\n", 88 | " if probabilities is False:\n", 89 | " for img, ax in zip( images_arr, axes):\n", 90 | " ax.imshow(img)\n", 91 | " ax.axis('off')\n", 92 | " else:\n", 93 | " for img, probability, ax in zip( images_arr, probabilities, axes):\n", 94 | " ax.imshow(img)\n", 95 | " ax.axis('off')\n", 96 | " if probability > 0.5:\n", 97 | " ax.set_title(\"%.2f\" % (probability*100) + \"% dog\")\n", 98 | " else:\n", 99 | " ax.set_title(\"%.2f\" % ((1-probability)*100) + \"% cat\")\n", 100 | " plt.show()\n", 101 | "\n", 102 | "sample_training_images, _ = next(train_data_gen)\n", 103 | "plotImages(sample_training_images[:5])\n" 104 | ] 105 | }, 106 | { 107 | "cell_type": "code", 108 | "execution_count": null, 109 | "metadata": { 110 | "id": "-32RRLY_3voj" 111 | }, 112 | "outputs": [], 113 | "source": [ 114 | "# 5\n", 115 | "train_image_generator = None\n" 116 | ] 117 | }, 118 | { 119 | "cell_type": "code", 120 | "execution_count": null, 121 | "metadata": { 122 | "id": "pkwq2LFvqabS" 123 | }, 124 | "outputs": [], 125 | "source": [ 126 | "# 6\n", 127 | "train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,\n", 128 | " directory=train_dir,\n", 129 | " target_size=(IMG_HEIGHT, IMG_WIDTH),\n", 130 | " class_mode='binary')\n", 131 | "\n", 132 | "augmented_images = [train_data_gen[0][0][0] for i in range(5)]\n", 133 | "\n", 134 | "plotImages(augmented_images)" 135 | ] 136 | }, 137 | { 138 | "cell_type": "code", 139 | "execution_count": null, 140 | "metadata": { 141 | "id": "k8aZkwMam4UY" 142 | }, 143 | "outputs": [], 144 | "source": [ 145 | "# 7\n", 146 | "model = Sequential()\n", 147 | "\n", 148 | "\n", 149 | "\n", 150 | "\n", 151 | "\n", 152 | "\n", 153 | "\n", 154 | "\n", 155 | "model.summary()" 156 | ] 157 | }, 158 | { 159 | "cell_type": "code", 160 | "execution_count": null, 161 | "metadata": { 162 | "id": "1niQDz5x6K7y" 163 | }, 164 | "outputs": [], 165 | "source": [ 166 | "# 8\n", 167 | "history = None" 168 | ] 169 | }, 170 | { 171 | "cell_type": "code", 172 | "execution_count": null, 173 | "metadata": { 174 | "id": "5xS51mB56OAC" 175 | }, 176 | "outputs": [], 177 | "source": [ 178 | "# 9\n", 179 | "acc = history.history['accuracy']\n", 180 | "val_acc = history.history['val_accuracy']\n", 181 | "\n", 182 | "loss = history.history['loss']\n", 183 | "val_loss = history.history['val_loss']\n", 184 | "\n", 185 | "epochs_range = range(epochs)\n", 186 | "\n", 187 | "plt.figure(figsize=(8, 8))\n", 188 | "plt.subplot(1, 2, 1)\n", 189 | "plt.plot(epochs_range, acc, label='Training Accuracy')\n", 190 | "plt.plot(epochs_range, val_acc, label='Validation Accuracy')\n", 191 | "plt.legend(loc='lower right')\n", 192 | "plt.title('Training and Validation Accuracy')\n", 193 | "\n", 194 | "plt.subplot(1, 2, 2)\n", 195 | "plt.plot(epochs_range, loss, label='Training Loss')\n", 196 | "plt.plot(epochs_range, val_loss, label='Validation Loss')\n", 197 | "plt.legend(loc='upper right')\n", 198 | "plt.title('Training and Validation Loss')\n", 199 | "plt.show()" 200 | ] 201 | }, 202 | { 203 | "cell_type": "code", 204 | "execution_count": null, 205 | "metadata": { 206 | "id": "vYrSifOit2aK" 207 | }, 208 | "outputs": [], 209 | "source": [] 210 | }, 211 | { 212 | "cell_type": "code", 213 | "execution_count": null, 214 | "metadata": {}, 215 | "outputs": [], 216 | "source": [] 217 | }, 218 | { 219 | "cell_type": "code", 220 | "execution_count": null, 221 | "metadata": { 222 | "id": "4IH86Ux_u7TZ" 223 | }, 224 | "outputs": [], 225 | "source": [ 226 | "# 11\n", 227 | "answers = [1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0,\n", 228 | " 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0,\n", 229 | " 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1,\n", 230 | " 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, \n", 231 | " 0, 0, 0, 0, 0, 0]\n", 232 | "\n", 233 | "correct = 0\n", 234 | "\n", 235 | "for probability, answer in zip(probabilities, answers):\n", 236 | " if round(probability) == answer:\n", 237 | " correct +=1\n", 238 | "\n", 239 | "percentage_identified = (correct / len(answers)) * 100\n", 240 | "\n", 241 | "passed_challenge = percentage_identified >= 63\n", 242 | "\n", 243 | "print(f\"Your model correctly identified {round(percentage_identified, 2)}% of the images of cats and dogs.\")\n", 244 | "\n", 245 | "if passed_challenge:\n", 246 | " print(\"You passed the challenge!\")\n", 247 | "else:\n", 248 | " print(\"You haven't passed yet. Your model should identify at least 63% of the images. Keep trying. You will get it!\")" 249 | ] 250 | } 251 | ], 252 | "metadata": { 253 | "colab": { 254 | "collapsed_sections": [], 255 | "name": "fcc_cat_dog.ipynb", 256 | "provenance": [] 257 | }, 258 | "kernelspec": { 259 | "display_name": "Python 3", 260 | "name": "python3" 261 | } 262 | }, 263 | "nbformat": 4, 264 | "nbformat_minor": 0 265 | } 266 | --------------------------------------------------------------------------------