├── .devcontainer └── devcontainer.json ├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE.md ├── PULL_REQUEST_TEMPLATE.md └── workflows │ └── main.yml ├── .gitignore ├── .vscode └── settings.json ├── CONTRIBUTING.md ├── GAN.ipynb ├── LICENSE ├── NOTICE ├── README.md ├── fake_samples_epoch_000.png ├── real_samples.png └── requirements.txt /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "extensions": [ 3 | "GitHub.github-vscode-theme", 4 | "ms-python.python" // Includes Jupyter extension 5 | // Additional Extensions Here 6 | ], 7 | "onCreateCommand" : "[ -f requirements.txt ] && pip install -r requirements.txt; echo PS1='\"$ \"' >> ~/.bashrc", //Set Terminal Prompt to $ 8 | } 9 | 10 | // DevContainer Reference: https://code.visualstudio.com/docs/remote/devcontainerjson-reference 11 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Codeowners for these exercise files: 2 | # * (asterisk) denotes "all files and folders" 3 | # Example: * @producer @instructor 4 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 7 | 8 | ## Issue Overview 9 | 10 | 11 | ## Describe your environment 12 | 13 | 14 | ## Steps to Reproduce 15 | 16 | 1. 17 | 2. 18 | 3. 19 | 4. 20 | 21 | ## Expected Behavior 22 | 23 | 24 | ## Current Behavior 25 | 26 | 27 | ## Possible Solution 28 | 29 | 30 | ## Screenshots / Video 31 | 32 | 33 | ## Related Issues 34 | 35 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Copy To Branches 2 | on: 3 | workflow_dispatch: 4 | jobs: 5 | copy-to-branches: 6 | runs-on: ubuntu-latest 7 | steps: 8 | - uses: actions/checkout@v2 9 | with: 10 | fetch-depth: 0 11 | - name: Copy To Branches Action 12 | uses: planetoftheweb/copy-to-branches@v1.2 13 | env: 14 | key: main 15 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | node_modules 3 | .tmp 4 | npm-debug.log 5 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "editor.bracketPairColorization.enabled": true, 3 | "editor.cursorBlinking": "solid", 4 | "editor.fontFamily": "ui-monospace, Menlo, Monaco, 'Cascadia Mono', 'Segoe UI Mono', 'Roboto Mono', 'Oxygen Mono', 'Ubuntu Monospace', 'Source Code Pro', 'Fira Mono', 'Droid Sans Mono', 'Courier New', monospace", 5 | "editor.fontLigatures": false, 6 | "editor.fontSize": 22, 7 | "editor.formatOnPaste": true, 8 | "editor.formatOnSave": true, 9 | "editor.lineNumbers": "on", 10 | "editor.matchBrackets": "always", 11 | "editor.minimap.enabled": false, 12 | "editor.smoothScrolling": true, 13 | "editor.tabSize": 2, 14 | "editor.useTabStops": true, 15 | "emmet.triggerExpansionOnTab": true, 16 | "explorer.openEditors.visible": 0, 17 | "files.autoSave": "afterDelay", 18 | "screencastMode.onlyKeyboardShortcuts": true, 19 | "terminal.integrated.fontSize": 18, 20 | "workbench.activityBar.visible": true, 21 | "workbench.colorTheme": "Visual Studio Dark", 22 | "workbench.fontAliasing": "antialiased", 23 | "workbench.statusBar.visible": true 24 | } 25 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | 2 | Contribution Agreement 3 | ====================== 4 | 5 | This repository does not accept pull requests (PRs). All pull requests will be closed. 6 | 7 | However, if any contributions (through pull requests, issues, feedback or otherwise) are provided, as a contributor, you represent that the code you submit is your original work or that of your employer (in which case you represent you have the right to bind your employer). By submitting code (or otherwise providing feedback), you (and, if applicable, your employer) are licensing the submitted code (and/or feedback) to LinkedIn and the open source community subject to the BSD 2-Clause license. 8 | -------------------------------------------------------------------------------- /GAN.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "# Import the required libraries\n", 10 | "# For this example we will use pytorch to manage the construction of the neural networks and the training\n", 11 | "# torchvision is a module that is part of pytorch that supports vision datasets and it will be where we will source the mnist - handwritten digits - data\n", 12 | "\n", 13 | "from __future__ import print_function\n", 14 | "import argparse\n", 15 | "import os\n", 16 | "import random\n", 17 | "import torch\n", 18 | "import torch.nn as nn\n", 19 | "import torch.nn.parallel\n", 20 | "import torch.backends.cudnn as cudnn\n", 21 | "import torch.optim as optim\n", 22 | "import torch.utils.data\n", 23 | "import torchvision.datasets as dset\n", 24 | "import torchvision.transforms as transforms\n", 25 | "import torchvision.utils as vutils\n", 26 | "\n" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": 2, 32 | "metadata": {}, 33 | "outputs": [ 34 | { 35 | "name": "stdout", 36 | "output_type": "stream", 37 | "text": [ 38 | "Random Seed: 40\n" 39 | ] 40 | }, 41 | { 42 | "data": { 43 | "text/plain": [ 44 | "" 45 | ] 46 | }, 47 | "execution_count": 2, 48 | "metadata": {}, 49 | "output_type": "execute_result" 50 | } 51 | ], 52 | "source": [ 53 | "# Setting a seed will determine which data elements are selected. To replicate results keep the same seed.\n", 54 | "manualSeed = random.randint(1, 10000)\n", 55 | "print(\"Random Seed: \", manualSeed)\n", 56 | "random.seed(manualSeed)\n", 57 | "torch.manual_seed(manualSeed)\n" 58 | ] 59 | }, 60 | { 61 | "cell_type": "code", 62 | "execution_count": 17, 63 | "metadata": {}, 64 | "outputs": [ 65 | { 66 | "data": { 67 | "text/plain": [ 68 | "False" 69 | ] 70 | }, 71 | "execution_count": 17, 72 | "metadata": {}, 73 | "output_type": "execute_result" 74 | } 75 | ], 76 | "source": [ 77 | "# This is a check if there is a gpu available for training. At the moment we are assuming that it is not available.\n", 78 | "torch.cuda.is_available()\n" 79 | ] 80 | }, 81 | { 82 | "cell_type": "code", 83 | "execution_count": 7, 84 | "metadata": {}, 85 | "outputs": [], 86 | "source": [ 87 | "# Assuming the GPU is not available means we will set the device to cpu and set up some parameters\n", 88 | "cudnn.benchmark = True\n", 89 | "device = torch.device(\"cpu\")\n", 90 | "ngpu = 0\n", 91 | "#This is the width of the latent space matrix\n", 92 | "nz = 100\n", 93 | "# This is the generator matrix shape\n", 94 | "ngf = 64\n", 95 | "# This is the descrimator matrix shape\n", 96 | "ndf = 64\n", 97 | "# This is the number of color channels - other datasets may have 3 if they are color\n", 98 | "nc = 1\n", 99 | "# The number of sample to process per pass\n", 100 | "batch_size = 64\n", 101 | "# the number of CPU workers to work on the dataset\n", 102 | "workers = 4" 103 | ] 104 | }, 105 | { 106 | "cell_type": "code", 107 | "execution_count": 8, 108 | "metadata": {}, 109 | "outputs": [], 110 | "source": [ 111 | "dataset = dset.MNIST(root='data', download=True,\n", 112 | " transform=transforms.Compose([\n", 113 | " transforms.Resize(64),\n", 114 | " transforms.ToTensor(),\n", 115 | " transforms.Normalize((0.5,), (0.5,)),\n", 116 | " ]))\n", 117 | "\n", 118 | "dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,\n", 119 | " shuffle=True, num_workers=int(workers))\n" 120 | ] 121 | }, 122 | { 123 | "cell_type": "code", 124 | "execution_count": 9, 125 | "metadata": {}, 126 | "outputs": [], 127 | "source": [ 128 | "\n", 129 | "\n", 130 | "# custom weights initialization called on netG and netD\n", 131 | "# The weights will need to be initialised based on the layer type to some value before training. These could be imported from past training steps.\n", 132 | "def weights_init(m):\n", 133 | " classname = m.__class__.__name__\n", 134 | " if classname.find('Conv') != -1:\n", 135 | " torch.nn.init.normal_(m.weight, 0.0, 0.02)\n", 136 | " elif classname.find('BatchNorm') != -1:\n", 137 | " torch.nn.init.normal_(m.weight, 1.0, 0.02)\n", 138 | " torch.nn.init.zeros_(m.bias)\n" 139 | ] 140 | }, 141 | { 142 | "cell_type": "code", 143 | "execution_count": 10, 144 | "metadata": {}, 145 | "outputs": [ 146 | { 147 | "name": "stdout", 148 | "output_type": "stream", 149 | "text": [ 150 | "Generator(\n", 151 | " (main): Sequential(\n", 152 | " (0): ConvTranspose2d(100, 512, kernel_size=(4, 4), stride=(1, 1), bias=False)\n", 153 | " (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 154 | " (2): ReLU(inplace=True)\n", 155 | " (3): ConvTranspose2d(512, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)\n", 156 | " (4): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 157 | " (5): ReLU(inplace=True)\n", 158 | " (6): ConvTranspose2d(256, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)\n", 159 | " (7): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 160 | " (8): ReLU(inplace=True)\n", 161 | " (9): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)\n", 162 | " (10): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 163 | " (11): ReLU(inplace=True)\n", 164 | " (12): ConvTranspose2d(64, 1, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)\n", 165 | " (13): Tanh()\n", 166 | " )\n", 167 | ")\n" 168 | ] 169 | } 170 | ], 171 | "source": [ 172 | "# This is the bulk of the neural network definition for the Generator.\n", 173 | "# The init sets up the layers and connecting activation functions.\n", 174 | "# The forward function processes the data through the layers\n", 175 | "class Generator(nn.Module):\n", 176 | " def __init__(self, ngpu):\n", 177 | " super(Generator, self).__init__()\n", 178 | " self.ngpu = ngpu\n", 179 | " self.main = nn.Sequential(\n", 180 | " # input is Z, going into a convolution\n", 181 | " nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),\n", 182 | " nn.BatchNorm2d(ngf * 8),\n", 183 | " nn.ReLU(True),\n", 184 | " # state size. (ngf*8) x 4 x 4\n", 185 | " nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),\n", 186 | " nn.BatchNorm2d(ngf * 4),\n", 187 | " nn.ReLU(True),\n", 188 | " # state size. (ngf*4) x 8 x 8\n", 189 | " nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),\n", 190 | " nn.BatchNorm2d(ngf * 2),\n", 191 | " nn.ReLU(True),\n", 192 | " # state size. (ngf*2) x 16 x 16\n", 193 | " nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),\n", 194 | " nn.BatchNorm2d(ngf),\n", 195 | " nn.ReLU(True),\n", 196 | " # state size. (ngf) x 32 x 32\n", 197 | " nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n", 198 | " nn.Tanh()\n", 199 | " # state size. (nc) x 64 x 64\n", 200 | " )\n", 201 | "\n", 202 | " def forward(self, input):\n", 203 | " if input.is_cuda and self.ngpu > 1:\n", 204 | " output = nn.parallel.data_parallel(\n", 205 | " self.main, input, range(self.ngpu))\n", 206 | " else:\n", 207 | " output = self.main(input)\n", 208 | " return output\n", 209 | "\n", 210 | "\n", 211 | "netG = Generator(ngpu).to(device)\n", 212 | "netG.apply(weights_init)\n", 213 | "print(netG)\n" 214 | ] 215 | }, 216 | { 217 | "cell_type": "code", 218 | "execution_count": 11, 219 | "metadata": {}, 220 | "outputs": [ 221 | { 222 | "name": "stdout", 223 | "output_type": "stream", 224 | "text": [ 225 | "Discriminator(\n", 226 | " (main): Sequential(\n", 227 | " (0): Conv2d(1, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)\n", 228 | " (1): LeakyReLU(negative_slope=0.2, inplace=True)\n", 229 | " (2): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)\n", 230 | " (3): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 231 | " (4): LeakyReLU(negative_slope=0.2, inplace=True)\n", 232 | " (5): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)\n", 233 | " (6): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 234 | " (7): LeakyReLU(negative_slope=0.2, inplace=True)\n", 235 | " (8): Conv2d(256, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)\n", 236 | " (9): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n", 237 | " (10): LeakyReLU(negative_slope=0.2, inplace=True)\n", 238 | " (11): Conv2d(512, 1, kernel_size=(4, 4), stride=(1, 1), bias=False)\n", 239 | " (12): Sigmoid()\n", 240 | " )\n", 241 | ")\n" 242 | ] 243 | } 244 | ], 245 | "source": [ 246 | "# This is the bulk of the neural network definition for the Discrimator.\n", 247 | "# The init sets up the layers and connecting activation functions.\n", 248 | "# The forward function processes the data through the layers\n", 249 | "class Discriminator(nn.Module):\n", 250 | " def __init__(self, ngpu):\n", 251 | " super(Discriminator, self).__init__()\n", 252 | " self.ngpu = ngpu\n", 253 | " self.main = nn.Sequential(\n", 254 | " # input is (nc) x 64 x 64\n", 255 | " nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),\n", 256 | " nn.LeakyReLU(0.2, inplace=True),\n", 257 | " # state size. (ndf) x 32 x 32\n", 258 | " nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),\n", 259 | " nn.BatchNorm2d(ndf * 2),\n", 260 | " nn.LeakyReLU(0.2, inplace=True),\n", 261 | " # state size. (ndf*2) x 16 x 16\n", 262 | " nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),\n", 263 | " nn.BatchNorm2d(ndf * 4),\n", 264 | " nn.LeakyReLU(0.2, inplace=True),\n", 265 | " # state size. (ndf*4) x 8 x 8\n", 266 | " nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),\n", 267 | " nn.BatchNorm2d(ndf * 8),\n", 268 | " nn.LeakyReLU(0.2, inplace=True),\n", 269 | " # state size. (ndf*8) x 4 x 4\n", 270 | " nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),\n", 271 | " nn.Sigmoid()\n", 272 | " )\n", 273 | "\n", 274 | " def forward(self, input):\n", 275 | " if input.is_cuda and self.ngpu > 1:\n", 276 | " output = nn.parallel.data_parallel(\n", 277 | " self.main, input, range(self.ngpu))\n", 278 | " else:\n", 279 | " output = self.main(input)\n", 280 | "\n", 281 | " return output.view(-1, 1).squeeze(1)\n", 282 | " \n", 283 | "netD = Discriminator(ngpu).to(device)\n", 284 | "netD.apply(weights_init)\n", 285 | "print(netD)" 286 | ] 287 | }, 288 | { 289 | "cell_type": "code", 290 | "execution_count": 12, 291 | "metadata": {}, 292 | "outputs": [], 293 | "source": [ 294 | "# Set the loss function from pytorches established modules\n", 295 | "criterion = nn.BCELoss()\n", 296 | "\n", 297 | "# Set up the initial noise of the latent space to sample from.\n", 298 | "# Set the label of a real and fake sample to 0,1\n", 299 | "fixed_noise = torch.randn(64, nz, 1, 1, device=device)\n", 300 | "real_label = 1\n", 301 | "fake_label = 0\n", 302 | "\n", 303 | "# Create the optimiser which will dynamically change the parameters of the learning function over time to imporve the training process\n", 304 | "optimizerD = optim.Adam(netD.parameters(), lr=0.0005, betas=(0.5, 0.999))\n", 305 | "optimizerG = optim.Adam(netG.parameters(), lr=0.0005, betas=(0.5, 0.999))\n" 306 | ] 307 | }, 308 | { 309 | "cell_type": "code", 310 | "execution_count": 13, 311 | "metadata": {}, 312 | "outputs": [ 313 | { 314 | "name": "stdout", 315 | "output_type": "stream", 316 | "text": [ 317 | "[0/1][0/938] Loss_D: 1.5835 Loss_G: 7.6377 D(x): 0.4592 D(G(z)): 0.4420 / 0.0008\n", 318 | "[0/1][1/938] Loss_D: 6.3707 Loss_G: 12.9269 D(x): 0.9999 D(G(z)): 0.9966 / 0.0000\n", 319 | "[0/1][2/938] Loss_D: 0.6768 Loss_G: 16.8669 D(x): 0.9834 D(G(z)): 0.4200 / 0.0000\n", 320 | "[0/1][3/938] Loss_D: 0.6830 Loss_G: 6.9758 D(x): 0.7238 D(G(z)): 0.0103 / 0.0019\n", 321 | "[0/1][4/938] Loss_D: 5.0312 Loss_G: 22.7755 D(x): 0.9792 D(G(z)): 0.9849 / 0.0000\n", 322 | "[0/1][5/938] Loss_D: 0.8028 Loss_G: 20.7876 D(x): 0.7737 D(G(z)): 0.0000 / 0.0000\n", 323 | "[0/1][6/938] Loss_D: 0.1013 Loss_G: 6.8544 D(x): 0.9420 D(G(z)): 0.0100 / 0.0024\n", 324 | "[0/1][7/938] Loss_D: 4.5736 Loss_G: 27.5072 D(x): 0.9956 D(G(z)): 0.9841 / 0.0000\n", 325 | "[0/1][8/938] Loss_D: 0.2590 Loss_G: 30.5974 D(x): 0.8658 D(G(z)): 0.0000 / 0.0000\n", 326 | "[0/1][9/938] Loss_D: 0.5414 Loss_G: 27.9687 D(x): 0.7860 D(G(z)): 0.0000 / 0.0000\n", 327 | "[0/1][10/938] Loss_D: 0.0722 Loss_G: 12.9074 D(x): 0.9690 D(G(z)): 0.0000 / 0.0000\n", 328 | "[0/1][11/938] Loss_D: 3.3673 Loss_G: 34.2867 D(x): 0.9949 D(G(z)): 0.9281 / 0.0000\n", 329 | "[0/1][12/938] Loss_D: 0.4987 Loss_G: 39.9006 D(x): 0.8213 D(G(z)): 0.0000 / 0.0000\n", 330 | "[0/1][13/938] Loss_D: 0.3143 Loss_G: 41.1334 D(x): 0.8446 D(G(z)): 0.0000 / 0.0000\n", 331 | "[0/1][14/938] Loss_D: 0.0682 Loss_G: 41.4898 D(x): 0.9650 D(G(z)): 0.0000 / 0.0000\n", 332 | "[0/1][15/938] Loss_D: 0.0004 Loss_G: 41.6900 D(x): 0.9996 D(G(z)): 0.0000 / 0.0000\n", 333 | "[0/1][16/938] Loss_D: 0.0001 Loss_G: 41.7032 D(x): 0.9999 D(G(z)): 0.0000 / 0.0000\n", 334 | "[0/1][17/938] Loss_D: 0.0002 Loss_G: 41.6041 D(x): 0.9998 D(G(z)): 0.0000 / 0.0000\n", 335 | "[0/1][18/938] Loss_D: 0.0096 Loss_G: 41.7073 D(x): 0.9928 D(G(z)): 0.0000 / 0.0000\n", 336 | "[0/1][19/938] Loss_D: 0.0000 Loss_G: 41.6242 D(x): 1.0000 D(G(z)): 0.0000 / 0.0000\n", 337 | "[0/1][20/938] Loss_D: 0.0012 Loss_G: 41.6705 D(x): 0.9988 D(G(z)): 0.0000 / 0.0000\n", 338 | "[0/1][21/938] Loss_D: 0.0003 Loss_G: 41.6945 D(x): 0.9997 D(G(z)): 0.0000 / 0.0000\n", 339 | "[0/1][22/938] Loss_D: 0.0000 Loss_G: 41.6677 D(x): 1.0000 D(G(z)): 0.0000 / 0.0000\n", 340 | "[0/1][23/938] Loss_D: 0.0003 Loss_G: 41.7906 D(x): 0.9997 D(G(z)): 0.0000 / 0.0000\n" 341 | ] 342 | } 343 | ], 344 | "source": [ 345 | "# This is the engine of the code base - explicitly taking the objects created above \n", 346 | "# (The generator, discrimator and the dataset) and connecting them together to learn.\n", 347 | "\n", 348 | "for epoch in range(1):\n", 349 | " for i, data in enumerate(dataloader, 0):\n", 350 | " ############################\n", 351 | " # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))\n", 352 | " ###########################\n", 353 | " # train with real\n", 354 | " \n", 355 | " # Set the descrimator to forget any gradients.\n", 356 | " netD.zero_grad()\n", 357 | " # Get a sample of real handwritten digits and label them as 1 - all real\n", 358 | " real_cpu = data[0].to(device)\n", 359 | " batch_size = real_cpu.size(0)\n", 360 | " label = torch.full((batch_size,), real_label, dtype=real_cpu.dtype, device=device)\n", 361 | " # Pass the sample through the discrimator\n", 362 | " output = netD(real_cpu)\n", 363 | " # measure the error\n", 364 | " errD_real = criterion(output, label)\n", 365 | " # Calculate the gradients of each layer of the network\n", 366 | " errD_real.backward()\n", 367 | " # Get the average of the output across the batch\n", 368 | " D_x = output.mean().item()\n", 369 | "\n", 370 | " # train with fake\n", 371 | " noise = torch.randn(batch_size, nz, 1, 1, device=device)\n", 372 | " # pass the noise through the generator layers\n", 373 | " fake = netG(noise)\n", 374 | " # set the labels to all 0 - fake\n", 375 | " label.fill_(fake_label)\n", 376 | " # ask the discrimator to judge the fake images\n", 377 | " output = netD(fake.detach())\n", 378 | " # measure the error\n", 379 | " errD_fake = criterion(output, label)\n", 380 | " # Calculate the gradients \n", 381 | " errD_fake.backward()\n", 382 | " # Get the average output across the batch again\n", 383 | " D_G_z1 = output.mean().item()\n", 384 | " # Get the error\n", 385 | " errD = errD_real + errD_fake\n", 386 | " # Run the optimizer to update the weights\n", 387 | " optimizerD.step()\n", 388 | "\n", 389 | " ############################\n", 390 | " # (2) Update G network: maximize log(D(G(z)))\n", 391 | " ###########################\n", 392 | " # Set the gradients of the generator to zero\n", 393 | " netG.zero_grad()\n", 394 | " label.fill_(real_label) # fake labels are real for generator cost\n", 395 | " # get the judgements from the discrimator of the generator output is fake\n", 396 | " output = netD(fake)\n", 397 | " # calculate the error\n", 398 | " errG = criterion(output, label)\n", 399 | " # update the gradients\n", 400 | " errG.backward()\n", 401 | " # Get the average of the output across the batch\n", 402 | " D_G_z2 = output.mean().item()\n", 403 | " # update the weights\n", 404 | " optimizerG.step()\n", 405 | "\n", 406 | " print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'\n", 407 | " % (epoch, 1, i, len(dataloader), errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))\n", 408 | " # every 100 steps save a real sample and a fake sample for comparison\n", 409 | " if i % 100 == 0:\n", 410 | " vutils.save_image(real_cpu,'real_samples.png',normalize=True)\n", 411 | " fake = netG(fixed_noise)\n", 412 | " vutils.save_image(fake.detach(),'fake_samples_epoch_%03d.png' % epoch, normalize=True)\n" 413 | ] 414 | }, 415 | { 416 | "cell_type": "code", 417 | "execution_count": 1, 418 | "metadata": {}, 419 | "outputs": [], 420 | "source": [ 421 | "\n", 422 | "\n" 423 | ] 424 | }, 425 | { 426 | "cell_type": "code", 427 | "execution_count": null, 428 | "metadata": {}, 429 | "outputs": [], 430 | "source": [] 431 | }, 432 | { 433 | "cell_type": "code", 434 | "execution_count": null, 435 | "metadata": {}, 436 | "outputs": [], 437 | "source": [ 438 | "\n" 439 | ] 440 | }, 441 | { 442 | "cell_type": "code", 443 | "execution_count": null, 444 | "metadata": {}, 445 | "outputs": [], 446 | "source": [] 447 | } 448 | ], 449 | "metadata": { 450 | "kernelspec": { 451 | "display_name": "Python 3 (ipykernel)", 452 | "language": "python", 453 | "name": "python3" 454 | }, 455 | "language_info": { 456 | "codemirror_mode": { 457 | "name": "ipython", 458 | "version": 3 459 | }, 460 | "file_extension": ".py", 461 | "mimetype": "text/x-python", 462 | "name": "python", 463 | "nbconvert_exporter": "python", 464 | "pygments_lexer": "ipython3", 465 | "version": "3.10.4" 466 | }, 467 | "orig_nbformat": 4, 468 | "vscode": { 469 | "interpreter": { 470 | "hash": "3ad933181bd8a04b432d3370b9dc3b0662ad032c4dfaa4e4f1596c548f763858" 471 | } 472 | } 473 | }, 474 | "nbformat": 4, 475 | "nbformat_minor": 2 476 | } 477 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | LinkedIn Learning Exercise Files License Agreement 2 | ================================================== 3 | 4 | This License Agreement (the "Agreement") is a binding legal agreement 5 | between you (as an individual or entity, as applicable) and LinkedIn 6 | Corporation (“LinkedIn”). By downloading or using the LinkedIn Learning 7 | exercise files in this repository (“Licensed Materials”), you agree to 8 | be bound by the terms of this Agreement. If you do not agree to these 9 | terms, do not download or use the Licensed Materials. 10 | 11 | 1. License. 12 | - a. Subject to the terms of this Agreement, LinkedIn hereby grants LinkedIn 13 | members during their LinkedIn Learning subscription a non-exclusive, 14 | non-transferable copyright license, for internal use only, to 1) make a 15 | reasonable number of copies of the Licensed Materials, and 2) make 16 | derivative works of the Licensed Materials for the sole purpose of 17 | practicing skills taught in LinkedIn Learning courses. 18 | - b. Distribution. Unless otherwise noted in the Licensed Materials, subject 19 | to the terms of this Agreement, LinkedIn hereby grants LinkedIn members 20 | with a LinkedIn Learning subscription a non-exclusive, non-transferable 21 | copyright license to distribute the Licensed Materials, except the 22 | Licensed Materials may not be included in any product or service (or 23 | otherwise used) to instruct or educate others. 24 | 25 | 2. Restrictions and Intellectual Property. 26 | - a. You may not to use, modify, copy, make derivative works of, publish, 27 | distribute, rent, lease, sell, sublicense, assign or otherwise transfer the 28 | Licensed Materials, except as expressly set forth above in Section 1. 29 | - b. Linkedin (and its licensors) retains its intellectual property rights 30 | in the Licensed Materials. Except as expressly set forth in Section 1, 31 | LinkedIn grants no licenses. 32 | - c. You indemnify LinkedIn and its licensors and affiliates for i) any 33 | alleged infringement or misappropriation of any intellectual property rights 34 | of any third party based on modifications you make to the Licensed Materials, 35 | ii) any claims arising from your use or distribution of all or part of the 36 | Licensed Materials and iii) a breach of this Agreement. You will defend, hold 37 | harmless, and indemnify LinkedIn and its affiliates (and our and their 38 | respective employees, shareholders, and directors) from any claim or action 39 | brought by a third party, including all damages, liabilities, costs and 40 | expenses, including reasonable attorneys’ fees, to the extent resulting from, 41 | alleged to have resulted from, or in connection with: (a) your breach of your 42 | obligations herein; or (b) your use or distribution of any Licensed Materials. 43 | 44 | 3. Open source. This code may include open source software, which may be 45 | subject to other license terms as provided in the files. 46 | 47 | 4. Warranty Disclaimer. LINKEDIN PROVIDES THE LICENSED MATERIALS ON AN “AS IS” 48 | AND “AS AVAILABLE” BASIS. LINKEDIN MAKES NO REPRESENTATION OR WARRANTY, 49 | WHETHER EXPRESS OR IMPLIED, ABOUT THE LICENSED MATERIALS, INCLUDING ANY 50 | REPRESENTATION THAT THE LICENSED MATERIALS WILL BE FREE OF ERRORS, BUGS OR 51 | INTERRUPTIONS, OR THAT THE LICENSED MATERIALS ARE ACCURATE, COMPLETE OR 52 | OTHERWISE VALID. TO THE FULLEST EXTENT PERMITTED BY LAW, LINKEDIN AND ITS 53 | AFFILIATES DISCLAIM ANY IMPLIED OR STATUTORY WARRANTY OR CONDITION, INCLUDING 54 | ANY IMPLIED WARRANTY OR CONDITION OF MERCHANTABILITY OR FITNESS FOR A 55 | PARTICULAR PURPOSE, AVAILABILITY, SECURITY, TITLE AND/OR NON-INFRINGEMENT. 56 | YOUR USE OF THE LICENSED MATERIALS IS AT YOUR OWN DISCRETION AND RISK, AND 57 | YOU WILL BE SOLELY RESPONSIBLE FOR ANY DAMAGE THAT RESULTS FROM USE OF THE 58 | LICENSED MATERIALS TO YOUR COMPUTER SYSTEM OR LOSS OF DATA. NO ADVICE OR 59 | INFORMATION, WHETHER ORAL OR WRITTEN, OBTAINED BY YOU FROM US OR THROUGH OR 60 | FROM THE LICENSED MATERIALS WILL CREATE ANY WARRANTY OR CONDITION NOT 61 | EXPRESSLY STATED IN THESE TERMS. 62 | 63 | 5. Limitation of Liability. LINKEDIN SHALL NOT BE LIABLE FOR ANY INDIRECT, 64 | INCIDENTAL, SPECIAL, PUNITIVE, CONSEQUENTIAL OR EXEMPLARY DAMAGES, INCLUDING 65 | BUT NOT LIMITED TO, DAMAGES FOR LOSS OF PROFITS, GOODWILL, USE, DATA OR OTHER 66 | INTANGIBLE LOSSES . IN NO EVENT WILL LINKEDIN'S AGGREGATE LIABILITY TO YOU 67 | EXCEED $100. THIS LIMITATION OF LIABILITY SHALL: 68 | - i. APPLY REGARDLESS OF WHETHER (A) YOU BASE YOUR CLAIM ON CONTRACT, TORT, 69 | STATUTE, OR ANY OTHER LEGAL THEORY, (B) WE KNEW OR SHOULD HAVE KNOWN ABOUT 70 | THE POSSIBILITY OF SUCH DAMAGES, OR (C) THE LIMITED REMEDIES PROVIDED IN THIS 71 | SECTION FAIL OF THEIR ESSENTIAL PURPOSE; AND 72 | - ii. NOT APPLY TO ANY DAMAGE THAT LINKEDIN MAY CAUSE YOU INTENTIONALLY OR 73 | KNOWINGLY IN VIOLATION OF THESE TERMS OR APPLICABLE LAW, OR AS OTHERWISE 74 | MANDATED BY APPLICABLE LAW THAT CANNOT BE DISCLAIMED IN THESE TERMS. 75 | 76 | 6. Termination. This Agreement automatically terminates upon your breach of 77 | this Agreement or termination of your LinkedIn Learning subscription. On 78 | termination, all licenses granted under this Agreement will terminate 79 | immediately and you will delete the Licensed Materials. Sections 2-7 of this 80 | Agreement survive any termination of this Agreement. LinkedIn may discontinue 81 | the availability of some or all of the Licensed Materials at any time for any 82 | reason. 83 | 84 | 7. Miscellaneous. This Agreement will be governed by and construed in 85 | accordance with the laws of the State of California without regard to conflict 86 | of laws principles. The exclusive forum for any disputes arising out of or 87 | relating to this Agreement shall be an appropriate federal or state court 88 | sitting in the County of Santa Clara, State of California. If LinkedIn does 89 | not act to enforce a breach of this Agreement, that does not mean that 90 | LinkedIn has waived its right to enforce this Agreement. The Agreement does 91 | not create a partnership, agency relationship, or joint venture between the 92 | parties. Neither party has the power or authority to bind the other or to 93 | create any obligation or responsibility on behalf of the other. You may not, 94 | without LinkedIn’s prior written consent, assign or delegate any rights or 95 | obligations under these terms, including in connection with a change of 96 | control. Any purported assignment and delegation shall be ineffective. The 97 | Agreement shall bind and inure to the benefit of the parties, their respective 98 | successors and permitted assigns. If any provision of the Agreement is 99 | unenforceable, that provision will be modified to render it enforceable to the 100 | extent possible to give effect to the parties’ intentions and the remaining 101 | provisions will not be affected. This Agreement is the only agreement between 102 | you and LinkedIn regarding the Licensed Materials, and supersedes all prior 103 | agreements relating to the Licensed Materials. 104 | 105 | Last Updated: March 2019 106 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Copyright 2023 LinkedIn Corporation 2 | All Rights Reserved. 3 | 4 | Licensed under the LinkedIn Learning Exercise File License (the "License"). 5 | See LICENSE in the project root for license information. 6 | 7 | Please note, this project may automatically load third party code from external 8 | repositories (for example, NPM modules, Composer packages, or other dependencies). 9 | If so, such third party code may be subject to other license terms than as set 10 | forth above. In addition, such third party code may also depend on and load 11 | multiple tiers of dependencies. Please review the applicable licenses of the 12 | additional dependencies. 13 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Introduction to Generative Adversarial Networks (GANs) 2 | This is the repository for the LinkedIn Learning course Introduction to Generative Adversarial Networks (GANs). The full course is available from [LinkedIn Learning][lil-course-url]. 3 | 4 | ![Introduction to Generative Adversarial Networks (GANs)][lil-thumbnail-url] 5 | 6 | Recently, you’ve probably seen the impacts of large-scale generative art, generative text, and generative movies. Do you want to understand the basics of how this type of AI works? In this course, Martin Kemka, founder of the machine learning production house Northraine, introduces you to a very important component in the world of generative AI: Generative Adversarial Networks (GANs). Learn about the history of GANs, including where they came from and how they changed over the last decade. Find out how to train a model as you examine the model architecture and how the structure of multiple models works together. Get hands-on experience training a simple model in Jupyter Notebook. Plus, get insights on the current state of GAI and thoughts on where it’s going next. 7 | 8 | 9 | 10 | ## Instructions 11 | This repository has branches for each of the videos in the course. You can use the branch pop up menu in github to switch to a specific branch and take a look at the course at that stage, or you can add `/tree/BRANCH_NAME` to the URL to go to the branch you want to access. 12 | 13 | ## Branches 14 | The branches are structured to correspond to the videos in the course. The naming convention is `CHAPTER#_MOVIE#`. As an example, the branch named `02_03` corresponds to the second chapter and the third video in that chapter. 15 | Some branches will have a beginning and an end state. These are marked with the letters `b` for "beginning" and `e` for "end". The `b` branch contains the code as it is at the beginning of the movie. The `e` branch contains the code as it is at the end of the movie. The `main` branch holds the final state of the code when in the course. 16 | 17 | When switching from one exercise files branch to the next after making changes to the files, you may get a message like this: 18 | 19 | error: Your local changes to the following files would be overwritten by checkout: [files] 20 | Please commit your changes or stash them before you switch branches. 21 | Aborting 22 | 23 | To resolve this issue: 24 | 25 | Add changes to git using this command: git add . 26 | Commit changes using this command: git commit -m "some message" 27 | 28 | 29 | ### Instructor 30 | 31 | Martin Kemka 32 | 33 | Machine learning developer and data scientist 34 | 35 | 36 | 37 | Check out my other courses on [LinkedIn Learning](https://www.linkedin.com/learning/instructors/martin-kemka). 38 | 39 | [lil-course-url]: https://www.linkedin.com/learning/introduction-to-generative-adversarial-networks-gans?dApp=59033956 40 | [lil-thumbnail-url]: https://media.licdn.com/dms/image/D560DAQHvholt6Ju2vw/learning-public-crop_675_1200/0/1680211769471?e=2147483647&v=beta&t=jvDjzCtoh3zaew8AGTccbLni0mxkU7LiVlf1Wg7-G94 41 | -------------------------------------------------------------------------------- /fake_samples_epoch_000.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinkedInLearning/generative-ai-foundations-generative-adversarial-networks-gans-4380315/1e5f4588c1f5dab43153d2e1bb7f71c1935c22e1/fake_samples_epoch_000.png -------------------------------------------------------------------------------- /real_samples.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/LinkedInLearning/generative-ai-foundations-generative-adversarial-networks-gans-4380315/1e5f4588c1f5dab43153d2e1bb7f71c1935c22e1/real_samples.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # Specify Python package requirements for your project here (e.g., Mako==1.1.1). If your project doesn't require these, you can leave this file unchanged or delete it. 2 | --------------------------------------------------------------------------------