├── Faceapp_with_cyclegan.ipynb ├── LICENSE └── README.md /Faceapp_with_cyclegan.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "kernelspec": { 6 | "display_name": "Python 3", 7 | "language": "python", 8 | "name": "python3" 9 | }, 10 | "language_info": { 11 | "codemirror_mode": { 12 | "name": "ipython", 13 | "version": 3 14 | }, 15 | "file_extension": ".py", 16 | "mimetype": "text/x-python", 17 | "name": "python", 18 | "nbconvert_exporter": "python", 19 | "pygments_lexer": "ipython3", 20 | "version": "3.6.8" 21 | }, 22 | "colab": { 23 | "name": "Faceapp with cyclegan.ipynb", 24 | "provenance": [] 25 | } 26 | }, 27 | "cells": [ 28 | { 29 | "cell_type": "markdown", 30 | "metadata": { 31 | "id": "Z6JknUuqL6r_", 32 | "colab_type": "text" 33 | }, 34 | "source": [ 35 | "Download data" 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "metadata": { 41 | "id": "rqB8gqq5L6sD", 42 | "colab_type": "code", 43 | "colab": {} 44 | }, 45 | "source": [ 46 | "import requests\n", 47 | "\n", 48 | "def download_file_from_google_drive(id, destination):\n", 49 | " URL = \"https://docs.google.com/uc?export=download\"\n", 50 | "\n", 51 | " session = requests.Session()\n", 52 | "\n", 53 | " response = session.get(URL, params = { 'id' : id }, stream = True)\n", 54 | " token = get_confirm_token(response)\n", 55 | "\n", 56 | " if token:\n", 57 | " params = { 'id' : id, 'confirm' : token }\n", 58 | " response = session.get(URL, params = params, stream = True)\n", 59 | "\n", 60 | " save_response_content(response, destination) \n", 61 | "\n", 62 | "def get_confirm_token(response):\n", 63 | " for key, value in response.cookies.items():\n", 64 | " if key.startswith('download_warning'):\n", 65 | " return value\n", 66 | "\n", 67 | " return None\n", 68 | "\n", 69 | "def save_response_content(response, destination):\n", 70 | " CHUNK_SIZE = 32768\n", 71 | "\n", 72 | " with open(destination, \"wb\") as f:\n", 73 | " for chunk in response.iter_content(CHUNK_SIZE):\n", 74 | " if chunk: # filter out keep-alive new chunks\n", 75 | " f.write(chunk)\n", 76 | "\n", 77 | "if __name__ == \"__main__\":\n", 78 | " file_id = 'TAKE ID FROM SHAREABLE LINK'\n", 79 | " destination = 'DESTINATION FILE ON YOUR DISK'\n", 80 | " download_file_from_google_drive(\"0BxYys69jI14kYVM3aVhKS1VhRUk\", \"UTKFace.tar.gz\")" 81 | ], 82 | "execution_count": 0, 83 | "outputs": [] 84 | }, 85 | { 86 | "cell_type": "markdown", 87 | "metadata": { 88 | "id": "jG__fcyyL6sL", 89 | "colab_type": "text" 90 | }, 91 | "source": [ 92 | "Install dependancies" 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "metadata": { 98 | "id": "zAMUuuzLL6sM", 99 | "colab_type": "code", 100 | "colab": {}, 101 | "outputId": "21f9b245-d402-44b2-8542-5af6de834b6e" 102 | }, 103 | "source": [ 104 | "!pip install opencv-python\n", 105 | "!apt update && apt install -y libsm6 libxext6 libxrender-dev" 106 | ], 107 | "execution_count": 0, 108 | "outputs": [ 109 | { 110 | "output_type": "stream", 111 | "text": [ 112 | "Requirement already satisfied: opencv-python in /usr/local/lib/python3.6/dist-packages (4.1.1.26)\n", 113 | "Requirement already satisfied: numpy>=1.11.3 in /usr/local/lib/python3.6/dist-packages (from opencv-python) (1.17.0)\n", 114 | "\u001b[33mWARNING: You are using pip version 19.2.1, however version 19.3.1 is available.\n", 115 | "You should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n", 116 | "Hit:1 http://security.ubuntu.com/ubuntu bionic-security InRelease\n", 117 | "Hit:2 http://archive.ubuntu.com/ubuntu bionic InRelease \u001b[0m\n", 118 | "Hit:3 http://ppa.launchpad.net/deadsnakes/ppa/ubuntu bionic InRelease \u001b[0m\n", 119 | "Hit:4 http://archive.ubuntu.com/ubuntu bionic-updates InRelease \u001b[33m\n", 120 | "Hit:5 http://archive.ubuntu.com/ubuntu bionic-backports InRelease\n", 121 | "Reading package lists... Done\u001b[0m\u001b[33m \u001b[33m\u001b[33m\n", 122 | "Building dependency tree \n", 123 | "Reading state information... Done\n", 124 | "56 packages can be upgraded. Run 'apt list --upgradable' to see them.\n", 125 | "Reading package lists... Done\n", 126 | "Building dependency tree \n", 127 | "Reading state information... Done\n", 128 | "libsm6 is already the newest version (2:1.2.2-1).\n", 129 | "libxext6 is already the newest version (2:1.3.3-1).\n", 130 | "libxrender-dev is already the newest version (1:0.9.10-1).\n", 131 | "0 upgraded, 0 newly installed, 0 to remove and 56 not upgraded.\n" 132 | ], 133 | "name": "stdout" 134 | } 135 | ] 136 | }, 137 | { 138 | "cell_type": "code", 139 | "metadata": { 140 | "id": "2u_PltrlL6sR", 141 | "colab_type": "code", 142 | "colab": {}, 143 | "outputId": "678d1c71-d2e1-4f8f-ed8b-95b43140c286" 144 | }, 145 | "source": [ 146 | "!pip install requests" 147 | ], 148 | "execution_count": 0, 149 | "outputs": [ 150 | { 151 | "output_type": "stream", 152 | "text": [ 153 | "Requirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (2.22.0)\n", 154 | "Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests) (1.25.6)\n", 155 | "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests) (2019.9.11)\n", 156 | "Requirement already satisfied: chardet<3.1.0,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests) (3.0.4)\n", 157 | "Requirement already satisfied: idna<2.9,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests) (2.8)\n", 158 | "\u001b[33mWARNING: You are using pip version 19.2.1, however version 19.3.1 is available.\n", 159 | "You should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n" 160 | ], 161 | "name": "stdout" 162 | } 163 | ] 164 | }, 165 | { 166 | "cell_type": "code", 167 | "metadata": { 168 | "id": "ggA3CBobL6sV", 169 | "colab_type": "code", 170 | "colab": {}, 171 | "outputId": "2a621fcb-9c29-44a7-87b1-eaa302e69b1d" 172 | }, 173 | "source": [ 174 | "!pip install keras" 175 | ], 176 | "execution_count": 0, 177 | "outputs": [ 178 | { 179 | "output_type": "stream", 180 | "text": [ 181 | "Requirement already satisfied: keras in /usr/local/lib/python3.6/dist-packages (2.3.1)\n", 182 | "Requirement already satisfied: numpy>=1.9.1 in /usr/local/lib/python3.6/dist-packages (from keras) (1.17.0)\n", 183 | "Requirement already satisfied: six>=1.9.0 in /usr/local/lib/python3.6/dist-packages (from keras) (1.12.0)\n", 184 | "Requirement already satisfied: pyyaml in /usr/local/lib/python3.6/dist-packages (from keras) (5.1.2)\n", 185 | "Requirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from keras) (2.9.0)\n", 186 | "Requirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from keras) (1.1.0)\n", 187 | "Requirement already satisfied: scipy>=0.14 in /usr/local/lib/python3.6/dist-packages (from keras) (1.0.0)\n", 188 | "Requirement already satisfied: keras-applications>=1.0.6 in /usr/local/lib/python3.6/dist-packages (from keras) (1.0.8)\n", 189 | "\u001b[33mWARNING: You are using pip version 19.2.1, however version 19.3.1 is available.\n", 190 | "You should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n" 191 | ], 192 | "name": "stdout" 193 | } 194 | ] 195 | }, 196 | { 197 | "cell_type": "code", 198 | "metadata": { 199 | "id": "HGD_V5WTL6sZ", 200 | "colab_type": "code", 201 | "colab": {}, 202 | "outputId": "d5429567-4cc5-4761-c1d9-2a4d9f5d1a25" 203 | }, 204 | "source": [ 205 | "!pip install imageio" 206 | ], 207 | "execution_count": 0, 208 | "outputs": [ 209 | { 210 | "output_type": "stream", 211 | "text": [ 212 | "Requirement already satisfied: imageio in /usr/local/lib/python3.6/dist-packages (2.6.1)\n", 213 | "Requirement already satisfied: pillow in /usr/local/lib/python3.6/dist-packages (from imageio) (6.2.1)\n", 214 | "Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from imageio) (1.17.0)\n", 215 | "\u001b[33mWARNING: You are using pip version 19.2.1, however version 19.3.1 is available.\n", 216 | "You should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n" 217 | ], 218 | "name": "stdout" 219 | } 220 | ] 221 | }, 222 | { 223 | "cell_type": "code", 224 | "metadata": { 225 | "id": "kH2Y-2hUL6sf", 226 | "colab_type": "code", 227 | "colab": {}, 228 | "outputId": "e8f49b50-2fb8-413b-c7fe-a018a261576a" 229 | }, 230 | "source": [ 231 | "pip install scikit-image" 232 | ], 233 | "execution_count": 0, 234 | "outputs": [ 235 | { 236 | "output_type": "stream", 237 | "text": [ 238 | "Requirement already satisfied: scikit-image in /usr/local/lib/python3.6/dist-packages (0.16.2)\n", 239 | "Requirement already satisfied: PyWavelets>=0.4.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image) (1.1.1)\n", 240 | "Requirement already satisfied: imageio>=2.3.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image) (2.6.1)\n", 241 | "Requirement already satisfied: pillow>=4.3.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image) (6.2.1)\n", 242 | "Requirement already satisfied: scipy>=0.19.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image) (1.0.0)\n", 243 | "Requirement already satisfied: matplotlib!=3.0.0,>=2.0.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image) (3.1.1)\n", 244 | "Requirement already satisfied: networkx>=2.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image) (2.4)\n", 245 | "Requirement already satisfied: numpy>=1.13.3 in /usr/local/lib/python3.6/dist-packages (from PyWavelets>=0.4.0->scikit-image) (1.17.0)\n", 246 | "Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image) (2.8.0)\n", 247 | "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image) (1.1.0)\n", 248 | "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image) (0.10.0)\n", 249 | "Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.0.0,>=2.0.0->scikit-image) (2.4.2)\n", 250 | "Requirement already satisfied: decorator>=4.3.0 in /usr/local/lib/python3.6/dist-packages (from networkx>=2.0->scikit-image) (4.4.0)\n", 251 | "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.1->matplotlib!=3.0.0,>=2.0.0->scikit-image) (1.12.0)\n", 252 | "Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from kiwisolver>=1.0.1->matplotlib!=3.0.0,>=2.0.0->scikit-image) (41.0.1)\n", 253 | "\u001b[33mWARNING: You are using pip version 19.2.1, however version 19.3.1 is available.\n", 254 | "You should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n", 255 | "Note: you may need to restart the kernel to use updated packages.\n" 256 | ], 257 | "name": "stdout" 258 | } 259 | ] 260 | }, 261 | { 262 | "cell_type": "code", 263 | "metadata": { 264 | "id": "evmDo4lEL6si", 265 | "colab_type": "code", 266 | "colab": {}, 267 | "outputId": "efed6cee-164e-4fd9-8ec2-d745735e42db" 268 | }, 269 | "source": [ 270 | "pip install git+https://www.github.com/keras-team/keras-contrib.git" 271 | ], 272 | "execution_count": 0, 273 | "outputs": [ 274 | { 275 | "output_type": "stream", 276 | "text": [ 277 | "Collecting git+https://www.github.com/keras-team/keras-contrib.git\n", 278 | " Cloning https://www.github.com/keras-team/keras-contrib.git to /tmp/pip-req-build-cb02u0tq\n", 279 | " Running command git clone -q https://www.github.com/keras-team/keras-contrib.git /tmp/pip-req-build-cb02u0tq\n", 280 | "Requirement already satisfied (use --upgrade to upgrade): keras-contrib==2.0.8 from git+https://www.github.com/keras-team/keras-contrib.git in /usr/local/lib/python3.6/dist-packages\n", 281 | "Requirement already satisfied: keras in /usr/local/lib/python3.6/dist-packages (from keras-contrib==2.0.8) (2.3.1)\n", 282 | "Requirement already satisfied: numpy>=1.9.1 in /usr/local/lib/python3.6/dist-packages (from keras->keras-contrib==2.0.8) (1.17.0)\n", 283 | "Requirement already satisfied: scipy>=0.14 in /usr/local/lib/python3.6/dist-packages (from keras->keras-contrib==2.0.8) (1.0.0)\n", 284 | "Requirement already satisfied: six>=1.9.0 in /usr/local/lib/python3.6/dist-packages (from keras->keras-contrib==2.0.8) (1.12.0)\n", 285 | "Requirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from keras->keras-contrib==2.0.8) (2.9.0)\n", 286 | "Requirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from keras->keras-contrib==2.0.8) (1.1.0)\n", 287 | "Requirement already satisfied: pyyaml in /usr/local/lib/python3.6/dist-packages (from keras->keras-contrib==2.0.8) (5.1.2)\n", 288 | "Requirement already satisfied: keras-applications>=1.0.6 in /usr/local/lib/python3.6/dist-packages (from keras->keras-contrib==2.0.8) (1.0.8)\n", 289 | "Building wheels for collected packages: keras-contrib\n", 290 | " Building wheel for keras-contrib (setup.py) ... \u001b[?25ldone\n", 291 | "\u001b[?25h Created wheel for keras-contrib: filename=keras_contrib-2.0.8-cp36-none-any.whl size=101066 sha256=68de7423387edc16657b45e20a18f8851d35a22fd6318ec7a9ab9e27fb81f4b4\n", 292 | " Stored in directory: /tmp/pip-ephem-wheel-cache-766jbcky/wheels/11/27/c8/4ed56de7b55f4f61244e2dc6ef3cdbaff2692527a2ce6502ba\n", 293 | "Successfully built keras-contrib\n", 294 | "\u001b[33mWARNING: You are using pip version 19.2.1, however version 19.3.1 is available.\n", 295 | "You should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n", 296 | "Note: you may need to restart the kernel to use updated packages.\n" 297 | ], 298 | "name": "stdout" 299 | } 300 | ] 301 | }, 302 | { 303 | "cell_type": "markdown", 304 | "metadata": { 305 | "id": "70ivFjysL6sn", 306 | "colab_type": "text" 307 | }, 308 | "source": [ 309 | "Arrange data" 310 | ] 311 | }, 312 | { 313 | "cell_type": "code", 314 | "metadata": { 315 | "id": "MylXMrwfL6sp", 316 | "colab_type": "code", 317 | "colab": {}, 318 | "outputId": "62d52642-59aa-41d1-d245-f034c29765f5" 319 | }, 320 | "source": [ 321 | "!gunzip UTKFace.tar.gz\n", 322 | "!tar -xf UTKFace.tar" 323 | ], 324 | "execution_count": 0, 325 | "outputs": [ 326 | { 327 | "output_type": "stream", 328 | "text": [ 329 | "gzip: UTKFace.tar already exists; do you wish to overwrite (y or n)? ^C\n" 330 | ], 331 | "name": "stdout" 332 | } 333 | ] 334 | }, 335 | { 336 | "cell_type": "code", 337 | "metadata": { 338 | "id": "yJPg5-F9L6sv", 339 | "colab_type": "code", 340 | "colab": {}, 341 | "outputId": "ff72b216-fa17-4cd6-a12b-76a2382e11ef" 342 | }, 343 | "source": [ 344 | "!mkdir data\n", 345 | "!mkdir data/trainA data/trainB " 346 | ], 347 | "execution_count": 0, 348 | "outputs": [ 349 | { 350 | "output_type": "stream", 351 | "text": [ 352 | "mkdir: cannot create directory ‘data’: File exists\n", 353 | "mkdir: cannot create directory ‘data/trainA’: File exists\n", 354 | "mkdir: cannot create directory ‘data/trainB’: File exists\n" 355 | ], 356 | "name": "stdout" 357 | } 358 | ] 359 | }, 360 | { 361 | "cell_type": "code", 362 | "metadata": { 363 | "id": "EUosNM-SL6s0", 364 | "colab_type": "code", 365 | "colab": {}, 366 | "outputId": "17e57906-284b-44ee-8585-6ef8f22aeec2" 367 | }, 368 | "source": [ 369 | "import os, shutil\n", 370 | "images = os.listdir(\"UTKFace\")\n", 371 | "for f in images:\n", 372 | " try:\n", 373 | " val = f.split(\"_\")\n", 374 | " age = int(val[0])\n", 375 | " race = int(val[2])\n", 376 | " if(age >=20 and age<=30):\n", 377 | " shutil.copy(\"UTKFace/\"+f, \"data/trainA\")\n", 378 | " if(age >=50 and age<=60):\n", 379 | " shutil.copy(\"UTKFace/\"+f, \"data/trainB\") \n", 380 | " except:\n", 381 | " print(f)" 382 | ], 383 | "execution_count": 0, 384 | "outputs": [ 385 | { 386 | "output_type": "stream", 387 | "text": [ 388 | "61_1_20170109142408075.jpg.chip.jpg\n", 389 | "39_1_20170116174525125.jpg.chip.jpg\n", 390 | "61_1_20170109150557335.jpg.chip.jpg\n" 391 | ], 392 | "name": "stdout" 393 | } 394 | ] 395 | }, 396 | { 397 | "cell_type": "markdown", 398 | "metadata": { 399 | "id": "_kaDdigyL6s3", 400 | "colab_type": "text" 401 | }, 402 | "source": [ 403 | "Dataloader and preprocessing for input images" 404 | ] 405 | }, 406 | { 407 | "cell_type": "code", 408 | "metadata": { 409 | "id": "1BJOKj9gL6s4", 410 | "colab_type": "code", 411 | "colab": {} 412 | }, 413 | "source": [ 414 | "import scipy\n", 415 | "from glob import glob\n", 416 | "import numpy as np\n", 417 | "from skimage.transform import resize\n", 418 | "import imageio\n", 419 | "\n", 420 | "class DataLoader():\n", 421 | " def __init__(self, dataset_name, img_res=(128, 128)):\n", 422 | " self.dataset_name = dataset_name\n", 423 | " self.img_res = img_res\n", 424 | "\n", 425 | " def load_data(self, domain, batch_size=1, is_testing=False):\n", 426 | " path = glob('./%s/%s%s/*' % (self.dataset_name, \"train\", domain))\n", 427 | " #data_type = \"train%s\" % domain if not is_testing else \"test%s\" % domain\n", 428 | " \n", 429 | " \n", 430 | " batch_images = np.random.choice(path, size=batch_size)\n", 431 | " imgs = []\n", 432 | " print(domain, batch_images)\n", 433 | " for img_path in batch_images:\n", 434 | " img = self.imread(img_path)\n", 435 | " if not is_testing:\n", 436 | " img = resize(img, self.img_res)\n", 437 | "\n", 438 | " if np.random.random() > 0.5:\n", 439 | " img = np.fliplr(img)\n", 440 | " else:\n", 441 | " img = resize(img, self.img_res)\n", 442 | " imgs.append(img)\n", 443 | "\n", 444 | " imgs = np.array(imgs)/127.5 - 1.\n", 445 | "\n", 446 | " return imgs\n", 447 | "\n", 448 | " def load_batch(self, batch_size=1, is_testing=False):\n", 449 | " path_A = glob('./%s/%sA/*' % (self.dataset_name, \"train\"))\n", 450 | " path_B = glob('./%s/%sB/*' % (self.dataset_name, \"train\"))\n", 451 | " print(len(path_A), len(path_B))\n", 452 | " self.n_batches = int(min(len(path_A), len(path_B)) / batch_size) \n", 453 | " total_samples = self.n_batches * batch_size\n", 454 | "\n", 455 | " # Sample n_batches * batch_size from each path list so that model sees all\n", 456 | " # samples from both domains\n", 457 | " path_A = np.random.choice(path_A, total_samples, replace=False)\n", 458 | " path_B = np.random.choice(path_B, total_samples, replace=False)\n", 459 | "\n", 460 | " for i in range(self.n_batches-1):\n", 461 | " batch_A = path_A[i*batch_size:(i+1)*batch_size]\n", 462 | " batch_B = path_B[i*batch_size:(i+1)*batch_size]\n", 463 | " imgs_A, imgs_B = [], []\n", 464 | " for img_A, img_B in zip(batch_A, batch_B): \n", 465 | " img_A = self.imread(img_A)\n", 466 | " img_B = self.imread(img_B)\n", 467 | "\n", 468 | " img_A = resize(img_A, self.img_res)\n", 469 | " img_B = resize(img_B, self.img_res)\n", 470 | "\n", 471 | " if not is_testing and np.random.random() > 0.5:\n", 472 | " img_A = np.fliplr(img_A)\n", 473 | " img_B = np.fliplr(img_B)\n", 474 | "\n", 475 | " imgs_A.append(img_A)\n", 476 | " imgs_B.append(img_B)\n", 477 | "\n", 478 | " imgs_A = np.array(imgs_A)/127.5 - 1.\n", 479 | " imgs_B = np.array(imgs_B)/127.5 - 1.\n", 480 | "\n", 481 | " yield imgs_A, imgs_B\n", 482 | "\n", 483 | " def load_img(self, path):\n", 484 | " img = self.imread(path)\n", 485 | " img = resize(img, self.img_res)\n", 486 | " img = img/127.5 - 1.\n", 487 | " return img[np.newaxis, :, :, :]\n", 488 | " \n", 489 | " def get_img(self, img):\n", 490 | " img = resize(img, self.img_res)\n", 491 | " img = img/127.5 - 1.\n", 492 | " return img\n", 493 | " \n", 494 | " def revert_img(self, img, new_res):\n", 495 | " img = resize(img, new_res)\n", 496 | " img = (img)*0.5 + 0.5\n", 497 | " img = img*255\n", 498 | " img = img.astype(np.float32)\n", 499 | " return img \n", 500 | "\n", 501 | " def imread(self, path):\n", 502 | " return imageio.imread(path, as_gray=False, pilmode=\"RGB\").astype(np.float)\n", 503 | " \n", 504 | "def revert_img(img, new_res):\n", 505 | " img = (img)*0.5 + 0.5\n", 506 | " img = img*255\n", 507 | " img = resize(img, new_res)\n", 508 | " img = img.astype(np.float32)\n", 509 | " return img" 510 | ], 511 | "execution_count": 0, 512 | "outputs": [] 513 | }, 514 | { 515 | "cell_type": "markdown", 516 | "metadata": { 517 | "id": "G0a3FVVNL6s7", 518 | "colab_type": "text" 519 | }, 520 | "source": [ 521 | "CycleGan code\n", 522 | "Follow the tricks mentioned in the article" 523 | ] 524 | }, 525 | { 526 | "cell_type": "code", 527 | "metadata": { 528 | "id": "Kv7Y7zIZL6s-", 529 | "colab_type": "code", 530 | "colab": {} 531 | }, 532 | "source": [ 533 | "from __future__ import print_function, division\n", 534 | "import scipy, os\n", 535 | "import scipy.misc\n", 536 | "\n", 537 | "from keras.datasets import mnist\n", 538 | "from keras_contrib.layers.normalization.instancenormalization import InstanceNormalization\n", 539 | "from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Concatenate\n", 540 | "from keras.layers import BatchNormalization, Activation, ZeroPadding2D\n", 541 | "from keras.layers.advanced_activations import LeakyReLU\n", 542 | "from keras.activations import relu \n", 543 | "from keras.layers.convolutional import UpSampling2D, Conv2D, Conv2DTranspose\n", 544 | "from keras.models import Sequential, Model\n", 545 | "from keras.optimizers import Adam\n", 546 | "import datetime\n", 547 | "import matplotlib.pyplot as plt\n", 548 | "import sys\n", 549 | "import numpy as np\n", 550 | "import os\n", 551 | "import keras\n", 552 | "import shutil, os, random\n", 553 | "from keras.models import load_model\n", 554 | "\n", 555 | "class CycleGAN():\n", 556 | " def __init__(self):\n", 557 | " # Input shape\n", 558 | " self.img_rows = 256\n", 559 | " self.img_cols = 256\n", 560 | " self.channels = 3\n", 561 | " self.img_shape = (self.img_rows, self.img_cols, self.channels) \n", 562 | "\n", 563 | " # Configure data loader\n", 564 | " self.dataset_name = 'data'\n", 565 | " self.data_loader = DataLoader(dataset_name=self.dataset_name,\n", 566 | " img_res=(self.img_rows, self.img_cols))\n", 567 | "\n", 568 | "\n", 569 | " # Calculate output shape of D (PatchGAN)\n", 570 | " patch = int(self.img_rows / 2**4)\n", 571 | " self.disc_patch = (patch, patch, 1)\n", 572 | "\n", 573 | " # Number of filters in the first layer of G and D\n", 574 | " self.gf = 64\n", 575 | " self.df = 64\n", 576 | "\n", 577 | " # Loss weights\n", 578 | " self.lambda_cycle = 0.1 # Cycle-consistency loss\n", 579 | " self.lambda_id = 0.1 * self.lambda_cycle # Identity loss\n", 580 | "\n", 581 | " optimizer = Adam(0.0002, 0.5)\n", 582 | " pdir = \"/content/drive/My Drive/keras_combined_gan/\"\n", 583 | " # Build and compile the discriminators\n", 584 | " self.d_A = self.build_discriminator()\n", 585 | " self.d_A.compile(loss='mse',\n", 586 | " optimizer=optimizer,\n", 587 | " metrics=['accuracy'])\n", 588 | " \n", 589 | " \n", 590 | " self.d_B = self.build_discriminator()\n", 591 | " self.d_B.compile(loss='mse',\n", 592 | " optimizer=optimizer,\n", 593 | " metrics=['accuracy'])\n", 594 | " \n", 595 | "\n", 596 | " #-------------------------\n", 597 | " # Construct Computational\n", 598 | " # Graph of Generators\n", 599 | " #-------------------------\n", 600 | "\n", 601 | " # Build the generators\n", 602 | " self.g_AB = self.build_generator()\n", 603 | " self.g_BA = self.build_generator()\n", 604 | "\n", 605 | " # Input images from both domains\n", 606 | " img_A = Input(shape=self.img_shape)\n", 607 | " img_B = Input(shape=self.img_shape)\n", 608 | "\n", 609 | " # Translate images to the other domain\n", 610 | " fake_B = self.g_AB(img_A)\n", 611 | " fake_A = self.g_BA(img_B)\n", 612 | " # Translate images back to original domain\n", 613 | " reconstr_A = self.g_BA(fake_B)\n", 614 | " reconstr_B = self.g_AB(fake_A)\n", 615 | " # Identity mapping of images\n", 616 | " img_A_id = self.g_BA(img_A)\n", 617 | " img_B_id = self.g_AB(img_B)\n", 618 | "\n", 619 | " # For the combined model we will only train the generators\n", 620 | " self.d_A.trainable = False\n", 621 | " self.d_B.trainable = False \n", 622 | "\n", 623 | " # Discriminators determines validity of translated images\n", 624 | " valid_A = self.d_A(fake_A)\n", 625 | " valid_B = self.d_B(fake_B) \n", 626 | "\n", 627 | " # Combined model trains generators to fool discriminators\n", 628 | " self.combined = Model(inputs=[img_A, img_B],\n", 629 | " outputs=[ valid_A, valid_B,\n", 630 | " reconstr_A, reconstr_B,\n", 631 | " img_A_id, img_B_id ])\n", 632 | " self.combined.compile(loss=['mse', 'mse',\n", 633 | " 'mae', 'mae',\n", 634 | " 'mae', 'mae'],\n", 635 | " loss_weights=[ 1, 1,\n", 636 | " self.lambda_cycle, self.lambda_cycle,\n", 637 | " self.lambda_id, self.lambda_id ],\n", 638 | " optimizer=optimizer)\n", 639 | "\n", 640 | " def build_generator(self):\n", 641 | " \"\"\"Resnet Generator\"\"\"\n", 642 | "\n", 643 | " def conv2d(layer_input, filters=16, strides=1, name=None, f_size=4):\n", 644 | " d = Conv2D(filters, kernel_size=f_size, strides=strides, padding='same', name=name)(layer_input)\n", 645 | " d = InstanceNormalization(name=name+\"_bn\")(d)\n", 646 | " d = Activation('relu')(d)\n", 647 | " return d\n", 648 | " \n", 649 | " def residual(layer_input, filters=16, strides=1, name=None, f_size=3):\n", 650 | " d = conv2d(layer_input, filters=filters, strides=strides, name=name, f_size=f_size)\n", 651 | " d = Conv2D(filters, kernel_size=f_size, strides=strides, padding='same', name=name+\"_2\")(d)\n", 652 | " d = InstanceNormalization(name=name+\"_bn2\")(d)\n", 653 | " d = keras.layers.add([d, layer_input])\n", 654 | " return d\n", 655 | "\n", 656 | " def conv2d_transpose(layer_input, filters=16, strides=1, name=None, f_size=4):\n", 657 | " u = Conv2DTranspose(filters, strides=strides, name=name, kernel_size=f_size, padding='same')(layer_input)\n", 658 | " u = InstanceNormalization(name=name+\"_bn\")(u)\n", 659 | " u = Activation('relu')(u)\n", 660 | " return u\n", 661 | "\n", 662 | " # Image input\n", 663 | " c0 = Input(shape=self.img_shape)\n", 664 | " c1 = conv2d(c0, filters=self.gf, strides=1, name=\"g_e1\", f_size=7)\n", 665 | " c2 = conv2d(c1, filters=self.gf*2, strides=2, name=\"g_e2\", f_size=3)\n", 666 | " c3 = conv2d(c2, filters=self.gf*4, strides=2, name=\"g_e3\", f_size=3)\n", 667 | " \n", 668 | " r1 = residual(c3, filters=self.gf*4, name='g_r1')\n", 669 | " r2 = residual(r1, self.gf*4, name='g_r2')\n", 670 | " r3 = residual(r2, self.gf*4, name='g_r3')\n", 671 | " r4 = residual(r3, self.gf*4, name='g_r4')\n", 672 | " r5 = residual(r4, self.gf*4, name='g_r5')\n", 673 | " r6 = residual(r5, self.gf*4, name='g_r6')\n", 674 | " r7 = residual(r6, self.gf*4, name='g_r7')\n", 675 | " r8 = residual(r7, self.gf*4, name='g_r8')\n", 676 | " r9 = residual(r8, self.gf*4, name='g_r9')\n", 677 | " \n", 678 | " d1 = conv2d_transpose(r9, filters=self.gf*2, f_size=3, strides=2, name='g_d1_dc')\n", 679 | " d2 = conv2d_transpose(d1, filters=self.gf, f_size=3, strides=2, name='g_d2_dc')\n", 680 | " \n", 681 | " output_img = Conv2D(self.channels, kernel_size=7, strides=1, padding='same', activation='tanh')(d2)\n", 682 | "\n", 683 | " return Model(inputs=[c0], outputs=[output_img])\n", 684 | "\n", 685 | " def build_discriminator(self):\n", 686 | "\n", 687 | " def d_layer(layer_input, filters, f_size=4, normalization=True):\n", 688 | " \"\"\"Discriminator layer\"\"\"\n", 689 | " d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)\n", 690 | " d = LeakyReLU(alpha=0.2)(d)\n", 691 | " if normalization:\n", 692 | " d = InstanceNormalization()(d)\n", 693 | " return d\n", 694 | "\n", 695 | " img = Input(shape=self.img_shape)\n", 696 | "\n", 697 | " d1 = d_layer(img, self.df, normalization=False)\n", 698 | " d2 = d_layer(d1, self.df*2)\n", 699 | " d3 = d_layer(d2, self.df*4)\n", 700 | " d4 = d_layer(d3, self.df*8)\n", 701 | "\n", 702 | " validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(d4)\n", 703 | "\n", 704 | " return Model(img, validity)\n", 705 | "\n", 706 | " def train(self, epochs, batch_size=1, sample_interval=50):\n", 707 | "\n", 708 | " start_time = datetime.datetime.now()\n", 709 | "\n", 710 | " # Adversarial loss ground truths\n", 711 | " valid = np.ones((batch_size,) + self.disc_patch)\n", 712 | " fake = np.zeros((batch_size,) + self.disc_patch)\n", 713 | " for epoch in range(epochs):\n", 714 | " for batch_i, (imgs_A, imgs_B) in enumerate(self.data_loader.load_batch(batch_size)):\n", 715 | "\n", 716 | " # ----------------------\n", 717 | " # Train Discriminators\n", 718 | " # ----------------------\n", 719 | "\n", 720 | " # Translate images to opposite domain \n", 721 | " fake_B = self.g_AB.predict([imgs_A])\n", 722 | " fake_A = self.g_BA.predict([imgs_B])\n", 723 | "\n", 724 | " # Train the discriminators (original images = real / translated = Fake)\n", 725 | " dA_loss_real = self.d_A.train_on_batch(imgs_A, valid)\n", 726 | " dA_loss_fake = self.d_A.train_on_batch(fake_A, fake) \n", 727 | " dA_loss = 0.5 * np.add(dA_loss_real, dA_loss_fake)\n", 728 | "\n", 729 | " dB_loss_real = self.d_B.train_on_batch(imgs_B, valid) \n", 730 | " dB_loss_fake = self.d_B.train_on_batch(fake_B, fake) \n", 731 | " dB_loss = 0.5 * np.add(dB_loss_real, dB_loss_fake)\n", 732 | "\n", 733 | " # Total disciminator loss\n", 734 | " d_loss = 0.5 * np.add(dA_loss, dB_loss)\n", 735 | " \n", 736 | " # ------------------\n", 737 | " # Train Generators\n", 738 | " # ------------------\n", 739 | "\n", 740 | " # Train the generators\n", 741 | " g_loss = self.combined.train_on_batch([imgs_A, imgs_B],\n", 742 | " [valid, valid,\n", 743 | " imgs_A, imgs_B,\n", 744 | " imgs_A, imgs_B]) \n", 745 | " \n", 746 | " elapsed_time = datetime.datetime.now() - start_time\n", 747 | " if batch_i%50==0:\n", 748 | " # Plot the progress\n", 749 | " print (\"[Age Epoch %d/%d] [Batch %d/%d] [D loss: %f, acc: %3d%%] [G loss: %05f, adv: %05f, recon: %05f, id: %05f] time: %s \" \\\n", 750 | " % ( epoch, epochs,\n", 751 | " batch_i, self.data_loader.n_batches,\n", 752 | " d_loss[0], 100*d_loss[1],\n", 753 | " g_loss[0],\n", 754 | " np.mean(g_loss[1:3]),\n", 755 | " np.mean(g_loss[3:5]),\n", 756 | " np.mean(g_loss[5:6]),\n", 757 | " elapsed_time)) \n", 758 | " \n", 759 | " \n", 760 | " # If at save interval => save generated image samples\n", 761 | " if batch_i % sample_interval == 0:\n", 762 | " self.sample_images(epoch, batch_i) \n", 763 | " \n", 764 | " \n", 765 | " def sample_images(self, epoch, batch_i):\n", 766 | " os.makedirs('images/%s' % self.dataset_name, exist_ok=True)\n", 767 | " r, c = 2, 3\n", 768 | " imgs_A = self.data_loader.load_data(domain=\"A\", batch_size=1, is_testing=False)\n", 769 | " imgs_B = self.data_loader.load_data(domain=\"B\", batch_size=1, is_testing=False)\n", 770 | "\n", 771 | " # Translate images to the other domain\n", 772 | " fake_B = self.g_AB.predict([imgs_A])\n", 773 | " fake_A = self.g_BA.predict([imgs_B])\n", 774 | " # Translate back to original domain\n", 775 | " reconstr_A = self.g_BA.predict([fake_B])\n", 776 | " reconstr_B = self.g_AB.predict([fake_A])\n", 777 | "\n", 778 | " gen_imgs = np.concatenate([imgs_A, fake_B, reconstr_A, imgs_B, fake_A, reconstr_B])\n", 779 | "\n", 780 | " # Rescale images 0 - 1\n", 781 | " gen_imgs = 0.5 * gen_imgs + 0.5\n", 782 | "\n", 783 | " titles = ['Original', 'Translated', 'Reconstructed']\n", 784 | " fig, axs = plt.subplots(r, c)\n", 785 | " cnt = 0\n", 786 | " for i in range(r):\n", 787 | " for j in range(c):\n", 788 | " axs[i,j].imshow(gen_imgs[cnt])\n", 789 | " axs[i, j].set_title(titles[j])\n", 790 | " axs[i,j].axis('off')\n", 791 | " cnt += 1\n", 792 | " fig.savefig(\"images/%s/%d_%d.png\" % (self.dataset_name, epoch, batch_i))\n", 793 | " plt.close()\n", 794 | " \n", 795 | " def run_20_to_50(self, image):\n", 796 | " imgs_A = self.data_loader.load_data(domain=\"A\", batch_size=1, is_testing=True) \n", 797 | " fake_B = self.g_AB.predict(imgs_A)\n", 798 | "\n", 799 | "gan = CycleGAN()\n", 800 | "gan.train(epochs=50, batch_size=2, sample_interval=10)" 801 | ], 802 | "execution_count": 0, 803 | "outputs": [] 804 | }, 805 | { 806 | "cell_type": "markdown", 807 | "metadata": { 808 | "id": "0L0X8aDAL6tn", 809 | "colab_type": "text" 810 | }, 811 | "source": [ 812 | "Face Detection" 813 | ] 814 | }, 815 | { 816 | "cell_type": "code", 817 | "metadata": { 818 | "id": "86WqP9NLL6to", 819 | "colab_type": "code", 820 | "colab": {} 821 | }, 822 | "source": [ 823 | "!wget https://github.com/spmallick/learnopencv/raw/master/FaceDetectionComparison/models/opencv_face_detector_uint8.pb\n", 824 | "!wget https://raw.githubusercontent.com/opencv/opencv_extra/master/testdata/dnn/opencv_face_detector.pbtxt" 825 | ], 826 | "execution_count": 0, 827 | "outputs": [] 828 | }, 829 | { 830 | "cell_type": "code", 831 | "metadata": { 832 | "id": "4EDqZeEXL6tt", 833 | "colab_type": "code", 834 | "colab": {} 835 | }, 836 | "source": [ 837 | "!wget https://statics.sportskeeda.com/editor/2018/09/4c606-1536825356-800.jpg \n", 838 | "!mv 4c606-1536825356-800.jpg big3.jpg" 839 | ], 840 | "execution_count": 0, 841 | "outputs": [] 842 | }, 843 | { 844 | "cell_type": "code", 845 | "metadata": { 846 | "id": "KRY5G-0PL6t3", 847 | "colab_type": "code", 848 | "colab": {} 849 | }, 850 | "source": [ 851 | "import cv2\n", 852 | "%matplotlib inline \n", 853 | "from matplotlib import pyplot as plt\n", 854 | "from PIL import Image\n", 855 | "def detectFaceOpenCVDnn(net, frame):\n", 856 | " frameOpencvDnn = frame.copy()\n", 857 | " frameHeight = frameOpencvDnn.shape[0]\n", 858 | " frameWidth = frameOpencvDnn.shape[1]\n", 859 | " blob = cv2.dnn.blobFromImage(frameOpencvDnn, 1.0, (frameHeight, frameWidth), [104, 117, 123], False, False)\n", 860 | "\n", 861 | " net.setInput(blob)\n", 862 | " detections = net.forward()\n", 863 | " bboxes = []\n", 864 | " for i in range(detections.shape[2]):\n", 865 | " confidence = detections[0, 0, i, 2]\n", 866 | " if confidence > conf_threshold:\n", 867 | " x1 = int(detections[0, 0, i, 3] * frameWidth)\n", 868 | " y1 = int(detections[0, 0, i, 4] * frameHeight)\n", 869 | " x2 = int(detections[0, 0, i, 5] * frameWidth)\n", 870 | " y2 = int(detections[0, 0, i, 6] * frameHeight)\n", 871 | " bboxes.append([x1, y1, x2, y2])\n", 872 | " if not(x1<30 or y1<30 or x2>frameWidth-30 or y2>frameHeight-30):\n", 873 | " y1, y2 = y1-20, y2+20\n", 874 | " x1, x2 = x1-20, x2+20\n", 875 | " else:\n", 876 | " continue\n", 877 | " crop_img = frameOpencvDnn[y1:y2, x1:x2]\n", 878 | " crop_img = cv2.cvtColor(crop_img, cv2.COLOR_BGR2RGB).astype(\"float32\")\n", 879 | " cv2.imwrite(\"cropped\"+str(i)+\".jpg\", crop_img)\n", 880 | " inp = np.array([gan.data_loader.get_img(crop_img)])\n", 881 | " old_img = gan.g_AB.predict([inp])\n", 882 | " new_img = revert_img(old_img[0], (y2-y1, x2-x1))\n", 883 | " new_img = cv2.cvtColor(new_img, cv2.COLOR_RGB2BGR).astype(\"float32\")\n", 884 | " frameOpencvDnn[y1:y2, x1:x2] = new_img\n", 885 | " scipy.misc.imsave(\"old\"+str(i)+\".jpg\", new_img)\n", 886 | " return frameOpencvDnn, bboxes\n", 887 | " \n", 888 | "conf_threshold = 0.8\n", 889 | "modelFile = \"opencv_face_detector_uint8.pb\"\n", 890 | "configFile = \"opencv_face_detector.pbtxt\"\n", 891 | "net = cv2.dnn.readNetFromTensorflow(modelFile, configFile)\n", 892 | "frame = cv2.imread(\"big3.jpg\")\n", 893 | "outOpencvDnn, bboxes = detectFaceOpenCVDnn(net,frame,0)\n", 894 | "cv2.imwrite(\"big3_old.jpg\", outOpencvDnn)\n", 895 | "outOpencvDnn, bboxes = detectFaceOpenCVDnn(net,frame,1)\n", 896 | "cv2.imwrite(\"big3_black.jpg\", outOpencvDnn)" 897 | ], 898 | "execution_count": 0, 899 | "outputs": [] 900 | }, 901 | { 902 | "cell_type": "code", 903 | "metadata": { 904 | "id": "uAkPXun9L6uB", 905 | "colab_type": "code", 906 | "colab": {} 907 | }, 908 | "source": [ 909 | "" 910 | ], 911 | "execution_count": 0, 912 | "outputs": [] 913 | } 914 | ] 915 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Erik Linder-Norén 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # FaceApp-with-Deep-Learning 2 | 3 | Compatible with Tensorflow 1.14 4 | 5 | Corresponding article :- https://blog.paperspace.com/use-cyclegan-age-conversion-keras-python/ 6 | --------------------------------------------------------------------------------