├── .github └── workflows │ └── gh_publish.yaml ├── README.md ├── research ├── .gitignore ├── batch.ipynb ├── main.ipynb ├── make_collage.ipynb ├── requirements.txt ├── src │ ├── collage.py │ ├── collage_save.py │ ├── face_loss.py │ ├── gan.py │ ├── notebook_utils.py │ ├── palette.py │ ├── pytorch_utils.py │ └── transform_utils.py └── upload_site_data.ipynb └── website ├── .gitignore ├── .vscode └── extensions.json ├── README.md ├── package-lock.json ├── package.json ├── postcss.config.js ├── rollup.config.js ├── src ├── client.ts ├── components │ ├── ImageData.svelte │ └── Nav.svelte ├── data.ts ├── dataTypes.ts ├── global.d.ts ├── io_utils.ts ├── routes │ ├── _error.svelte │ ├── _layout.svelte │ ├── about.svelte │ ├── exhibits │ │ ├── [slug] │ │ │ └── index.svelte │ │ ├── _exhibitMini.svelte │ │ ├── _exhibitMiniAutoPlay.svelte │ │ └── index.svelte │ └── index.svelte ├── server.ts ├── service-worker.ts ├── template.html └── url_utils.ts ├── static ├── favicon.ico ├── global.css ├── logo-192.png ├── logo-512.png ├── manifest.json └── sm-base.css ├── tailwind.config.js ├── tailwind.pcss └── tsconfig.json /.github/workflows/gh_publish.yaml: -------------------------------------------------------------------------------- 1 | # This is a basic workflow to help you get started with Actions 2 | 3 | name: web 4 | 5 | # Controls when the action will run. Triggers the workflow on push or pull request 6 | # events but only for the master branch 7 | on: 8 | push: 9 | branches: [ "main" ] 10 | jobs: 11 | publish: 12 | # The type of runner that the job will run on 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it 17 | - uses: actions/checkout@v2 18 | - uses: actions/setup-node@v1 19 | with: 20 | node-version: '12' 21 | - run: cd website && npm install && npm run export 22 | - name: Deploy to GitHub Pages 23 | uses: peaceiris/actions-gh-pages@v3 24 | with: 25 | publish_dir: website/__sapper__/export 26 | publish_branch: gh-pages 27 | github_token: ${{ secrets.GITHUB_TOKEN }} 28 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | source code for https://derivative.works 2 | 3 | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/tals/derivative-works/blob/main/research/make_collage.ipynb) 4 | 5 | 6 | ## About 7 | Derivative-Works is an experiment in using machine learning to create image collages. 8 | 9 | The algorithm cuts out shapes from images and rearranges them to create a face. 10 | 11 | #### Created By 12 | [Joel](https://www.joelsimon.net/) & [Tal](https://twitter.com/eiopa) 13 | 14 | 15 | ## Source Materials 16 | All of the reference images are in the public domain, created in Artbreeder using BigGAN and StyleGAN. 17 | 18 | ## Method 19 | 1. A patch generator (DCGAN) trained on Perlin noise was taken from a previous project. It creates a high diversity of shapes and is fully-differentiable. 20 | 2. There are a fixed number of patches that each has a corresponding latent vector and transformation matrices. These transformations control where in the reference image the patch is cut from and where in the canvas it is placed. 21 | 3. These variables are then optimized (using Adam) to do feature inversion over a face classifier (DLIB’s CNN model). 22 | 23 | The primary difference between this method and vanilla inversion is the input medium: instead of optimizing pixels directly, we optimize parameters. This simple technique lead to a variety of textures and compositions and the videos show the actual optimizations. 24 | 25 | ## Repo 26 | ### /website 27 | The website listed above. Uses svelte + typescript + sapper + tailwind 28 | 29 | ### /research 30 | See make_collage.ipynb to make your own 31 | 32 | Exporting the data for the site is a little buggy 😇 33 | 34 | 35 | ## Special Thanks 36 | - [@jacobgil](https://github.com/jacobgil/) for his [dlib port + feature inversion repo](https://github.com/jacobgil/dlib_facedetector_pytorch) which made things work :) 37 | - [@reiinakano](https://github.com/reiinakano) for [Neural Painters](https://github.com/reiinakano/neural-painters), which was helpful during our research 38 | -------------------------------------------------------------------------------- /research/.gitignore: -------------------------------------------------------------------------------- 1 | datasets/ 2 | saved_models/ 3 | saved_videos/ 4 | results/ 5 | __pycache__ 6 | .ipynb_checkpoints/ -------------------------------------------------------------------------------- /research/batch.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 2, 6 | "metadata": { 7 | "scrolled": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "%matplotlib inline\n", 12 | "%load_ext autoreload\n", 13 | "%autoreload 2" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": 3, 19 | "metadata": { 20 | "scrolled": true 21 | }, 22 | "outputs": [], 23 | "source": [ 24 | "import os, sys, math, random\n", 25 | "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n", 26 | "sys.path.insert(0, \"/home/joel/Repos/dlib_facedetector_pytorch\")\n", 27 | "import numpy as np\n", 28 | "import kornia\n", 29 | "import torch\n", 30 | "import torch.nn as nn\n", 31 | "import matplotlib.pyplot as plt\n", 32 | "from PIL import Image\n", 33 | "from torchvision import transforms\n", 34 | "import torch.nn.functional as F\n", 35 | "from tqdm.autonotebook import tqdm\n", 36 | "from pytorch_pretrained_biggan import BigGAN, convert_to_images, save_as_images\n", 37 | "from pathlib import Path\n", 38 | "from datetime import datetime\n", 39 | "import random, json\n", 40 | "\n", 41 | "from src.notebook_utils import imshow, imgrid, pltshow, draw_tensors\n", 42 | "from src.face_loss import DlibFaceLoss\n", 43 | "from src.pytorch_utils import augment\n", 44 | "from src.palette import random_biggan, load_directory, load_images\n", 45 | "from src.collage import Collager\n", 46 | "from src.collage_save import CollageSaver" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": 4, 52 | "metadata": { 53 | "scrolled": true 54 | }, 55 | "outputs": [], 56 | "source": [ 57 | "img_size = 512" 58 | ] 59 | }, 60 | { 61 | "cell_type": "code", 62 | "execution_count": 5, 63 | "metadata": {}, 64 | "outputs": [], 65 | "source": [ 66 | "face_loss = DlibFaceLoss(filter_index=1)" 67 | ] 68 | }, 69 | { 70 | "cell_type": "markdown", 71 | "metadata": {}, 72 | "source": [ 73 | "# Mask generator" 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": 6, 79 | "metadata": { 80 | "scrolled": true 81 | }, 82 | "outputs": [], 83 | "source": [ 84 | "from src.gan import Generator\n", 85 | "mask_generator = Generator(img_size=128, latent_size=100, channels=1).cuda()\n", 86 | "# https://drive.google.com/file/d/1IhoB6lxbKxL66F0X99ntL-t3-XKnxDPZ/view?usp=sharing\n", 87 | "model_path = './saved_models/dcgan_gen_128'\n", 88 | "mask_generator.load_state_dict(torch.load(model_path))\n", 89 | "mask_generator.eval()\n", 90 | "None" 91 | ] 92 | }, 93 | { 94 | "cell_type": "markdown", 95 | "metadata": {}, 96 | "source": [ 97 | "# Make or load the palette" 98 | ] 99 | }, 100 | { 101 | "cell_type": "code", 102 | "execution_count": 10, 103 | "metadata": { 104 | "scrolled": true 105 | }, 106 | "outputs": [], 107 | "source": [ 108 | "img_paths = []\n", 109 | "# for img_dir in ('./datasets/ab_biggan/', './datasets/sci-bio-art/', './datasets/eyes_closed/'):\n", 110 | "# img_paths += [ os.path.join(img_dir, n) for n in os.listdir(img_dir) ] \n", 111 | "\n", 112 | "img_paths = [\n", 113 | " './datasets/ab_biggan/2db513d411406270f1ee_hires.jpeg',\n", 114 | " './datasets/ab_biggan/2304b8bce5b78c75893e_hires.jpeg',\n", 115 | "]\n", 116 | " \n", 117 | "all_palette_imgs_large = load_images(img_paths, 1024)\n", 118 | "all_palette_imgs = F.interpolate(\n", 119 | " all_palette_imgs_large,\n", 120 | " size=(img_size, img_size),\n", 121 | " mode='bilinear'\n", 122 | ")\n", 123 | "# print(len(all_palette_imgs_large))" 124 | ] 125 | }, 126 | { 127 | "cell_type": "markdown", 128 | "metadata": {}, 129 | "source": [ 130 | "# Optimization" 131 | ] 132 | }, 133 | { 134 | "cell_type": "code", 135 | "execution_count": null, 136 | "metadata": { 137 | "scrolled": false 138 | }, 139 | "outputs": [], 140 | "source": [ 141 | "n_steps=600\n", 142 | "lr=2e-2\n", 143 | "\n", 144 | "while True:\n", 145 | "# n_refs = random.randint(1, 2)\n", 146 | " n_refs = random.choice((1, 2, 3, 4))\n", 147 | " indices = random.sample(range(len(all_palette_imgs_large)), n_refs)\n", 148 | " patch_per_img = random.randint(8, 28) // n_refs\n", 149 | " palette_imgs_large = all_palette_imgs_large[indices]\n", 150 | " palette_imgs = all_palette_imgs[indices]\n", 151 | " collager = Collager(palette_imgs, mask_generator, img_size, patch_per_img)\n", 152 | " \n", 153 | " draw_tensors(F.interpolate(palette_imgs, size=(200, 200)))\n", 154 | " \n", 155 | " frames = []\n", 156 | " collage_data = collager.makeRandom(trans_scale=.2)\n", 157 | " params = collage_data\n", 158 | " Z = collage_data[0]\n", 159 | "\n", 160 | " for x in collage_data:\n", 161 | " x.requires_grad_(True)\n", 162 | "\n", 163 | " opt = torch.optim.Adam(params, lr=lr)\n", 164 | " scheduler = torch.optim.lr_scheduler.OneCycleLR(opt, max_lr=lr, total_steps=n_steps)\n", 165 | "\n", 166 | " pbar = tqdm(total=n_steps)\n", 167 | " loss_history = []\n", 168 | "\n", 169 | " for i in range(n_steps):\n", 170 | " percent = i / n_steps\n", 171 | " pbar.update() \n", 172 | " opt.zero_grad()\n", 173 | " fl = torch.zeros(1)\n", 174 | " norm_loss = .25 * Z.norm()\n", 175 | " img, _ = collager(*collage_data)\n", 176 | " aug = augment(img, n=3)\n", 177 | " fl = face_loss(((aug+1)*.5)).mean()\n", 178 | " loss = fl + norm_loss - .01*img.mean()\n", 179 | " loss_history.append(loss.detach().cpu().item())\n", 180 | " loss.backward(retain_graph=True)\n", 181 | " opt.step()\n", 182 | " scheduler.step()\n", 183 | " pbar.set_description(f\"fl: {fl.item():.3f}\")\n", 184 | " frames.append(\n", 185 | " np.array(convert_to_images(img.detach().cpu())[0])\n", 186 | " )\n", 187 | " # Export results\n", 188 | " saver = CollageSaver()\n", 189 | " saver.save_palette(palette_imgs)\n", 190 | " print(saver.path)\n", 191 | " saver.save_video(frames)\n", 192 | "\n", 193 | " draw_tensors(img)\n", 194 | "\n", 195 | " export_collager = Collager(palette_imgs_large, mask_generator, 1024, patch_per_img)\n", 196 | " with torch.no_grad():\n", 197 | " hires, data = export_collager(*collage_data, return_data=True) \n", 198 | " saver.save(hires, data, final=True)\n", 199 | "\n", 200 | " with open(saver.path / 'image_names.txt', 'w') as outfile:\n", 201 | " json.dump([img_paths[i] for i in indices], outfile)\n" 202 | ] 203 | }, 204 | { 205 | "cell_type": "code", 206 | "execution_count": null, 207 | "metadata": { 208 | "scrolled": true 209 | }, 210 | "outputs": [], 211 | "source": [ 212 | "!ls ./results" 213 | ] 214 | }, 215 | { 216 | "cell_type": "code", 217 | "execution_count": null, 218 | "metadata": {}, 219 | "outputs": [], 220 | "source": [] 221 | } 222 | ], 223 | "metadata": { 224 | "kernelspec": { 225 | "display_name": "Python 3", 226 | "language": "python", 227 | "name": "python3" 228 | }, 229 | "language_info": { 230 | "codemirror_mode": { 231 | "name": "ipython", 232 | "version": 3 233 | }, 234 | "file_extension": ".py", 235 | "mimetype": "text/x-python", 236 | "name": "python", 237 | "nbconvert_exporter": "python", 238 | "pygments_lexer": "ipython3", 239 | "version": "3.6.9" 240 | }, 241 | "toc": { 242 | "base_numbering": 1, 243 | "nav_menu": {}, 244 | "number_sections": true, 245 | "sideBar": true, 246 | "skip_h1_title": false, 247 | "title_cell": "Table of Contents", 248 | "title_sidebar": "Contents", 249 | "toc_cell": false, 250 | "toc_position": {}, 251 | "toc_section_display": true, 252 | "toc_window_display": false 253 | } 254 | }, 255 | "nbformat": 4, 256 | "nbformat_minor": 2 257 | } 258 | -------------------------------------------------------------------------------- /research/main.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": { 7 | "scrolled": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "%matplotlib inline\n", 12 | "%load_ext autoreload\n", 13 | "%autoreload 2" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": null, 19 | "metadata": { 20 | "scrolled": true 21 | }, 22 | "outputs": [], 23 | "source": [ 24 | "import os, sys, math, json, random\n", 25 | "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n", 26 | "sys.path.insert(0, \"/home/joel/Repos/dlib_facedetector_pytorch\")\n", 27 | "import numpy as np\n", 28 | "import kornia\n", 29 | "import torch\n", 30 | "import torch.nn as nn\n", 31 | "import matplotlib.pyplot as plt\n", 32 | "from PIL import Image\n", 33 | "from torchvision import transforms\n", 34 | "import torch.nn.functional as F\n", 35 | "from tqdm.autonotebook import tqdm\n", 36 | "from pytorch_pretrained_biggan import BigGAN, convert_to_images, save_as_images\n", 37 | "from src.notebook_utils import imshow, imgrid, pltshow, draw_tensors\n", 38 | "from src.face_loss import DlibFaceLoss\n", 39 | "from src.pytorch_utils import augment\n", 40 | "from src.palette import random_biggan, load_directory, load_images\n", 41 | "from src.collage import Collager\n", 42 | "from src.collage_save import CollageSaver" 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": null, 48 | "metadata": { 49 | "scrolled": true 50 | }, 51 | "outputs": [], 52 | "source": [ 53 | "img_size = 512" 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": null, 59 | "metadata": {}, 60 | "outputs": [], 61 | "source": [ 62 | "face_loss = DlibFaceLoss(filter_index=1)" 63 | ] 64 | }, 65 | { 66 | "cell_type": "markdown", 67 | "metadata": {}, 68 | "source": [ 69 | "# Mask generator" 70 | ] 71 | }, 72 | { 73 | "cell_type": "code", 74 | "execution_count": null, 75 | "metadata": { 76 | "scrolled": true 77 | }, 78 | "outputs": [], 79 | "source": [ 80 | "from src.gan import Generator\n", 81 | "mask_generator = Generator(img_size=128, latent_size=100, channels=1).cuda()\n", 82 | "# https://drive.google.com/file/d/1IhoB6lxbKxL66F0X99ntL-t3-XKnxDPZ/view?usp=sharing\n", 83 | "model_path = './saved_models/dcgan_gen_128'\n", 84 | "mask_generator.load_state_dict(torch.load(model_path))\n", 85 | "mask_generator.eval()\n", 86 | "None" 87 | ] 88 | }, 89 | { 90 | "cell_type": "code", 91 | "execution_count": null, 92 | "metadata": { 93 | "scrolled": true 94 | }, 95 | "outputs": [], 96 | "source": [ 97 | "masks = mask_generator(torch.randn(10, 100).cuda())\n", 98 | "pltshow(np.hstack(masks[:, 0].detach().cpu().numpy()))" 99 | ] 100 | }, 101 | { 102 | "cell_type": "markdown", 103 | "metadata": {}, 104 | "source": [ 105 | "# Make or load the palette" 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "execution_count": null, 111 | "metadata": {}, 112 | "outputs": [], 113 | "source": [ 114 | "img_sets = [\n", 115 | " [ \n", 116 | " './datasets/ab_biggan/2db513d411406270f1ee_hires.jpeg',\n", 117 | " './datasets/ab_biggan/2012fae76a825b03405d8f46_hires.jpeg'\n", 118 | " ],\n", 119 | " [\n", 120 | "# './datasets/ab_biggan/2db513d411406270f1ee_hires.jpeg',\n", 121 | " './datasets/ab_biggan/444d1f1c0ce6e5842c61_hires.jpeg',\n", 122 | " './datasets/ab_biggan/60009772d97f8a8fa0fe_hires.jpeg',\n", 123 | " \n", 124 | "# './datasets/ab_biggan/a81e9cae83db1804b094_hires.jpeg'\n", 125 | " ],\n", 126 | " [\n", 127 | " './datasets/sci-bio-art/7c78e087c6fd92a0e884_hires.jpeg',\n", 128 | " './datasets/sci-bio-art/285cf1ce669edeab9041_hires.jpeg',\n", 129 | " ]\n", 130 | "]" 131 | ] 132 | }, 133 | { 134 | "cell_type": "code", 135 | "execution_count": null, 136 | "metadata": { 137 | "scrolled": true 138 | }, 139 | "outputs": [], 140 | "source": [ 141 | "USE_BIGGAN = False\n", 142 | "if USE_BIGGAN:\n", 143 | " n_refs = 24*2\n", 144 | " biggan = BigGAN.from_pretrained(f'biggan-deep-{img_size}').cuda()\n", 145 | " palette = random_biggan(n_refs, img_size, biggan, seed=1, truncation=.4)\n", 146 | "else:\n", 147 | "# palette_imgs_large = torch.cat([\n", 148 | "# load_directory('./datasets/eyes_closed/', 1024),\n", 149 | "# load_directory('./datasets/artbreeder/', img_size)\n", 150 | "# ])\n", 151 | " img_names = img_sets[2]\n", 152 | " palette_imgs_large = load_images(img_names, 1024)\n", 153 | " palette_imgs = F.interpolate(palette_imgs_large, size=(img_size, img_size), mode='bilinear')" 154 | ] 155 | }, 156 | { 157 | "cell_type": "code", 158 | "execution_count": null, 159 | "metadata": {}, 160 | "outputs": [], 161 | "source": [ 162 | "patch_per_img = 20 // palette_imgs.shape[0]" 163 | ] 164 | }, 165 | { 166 | "cell_type": "code", 167 | "execution_count": null, 168 | "metadata": { 169 | "scrolled": false 170 | }, 171 | "outputs": [], 172 | "source": [ 173 | "draw_tensors(palette_imgs)" 174 | ] 175 | }, 176 | { 177 | "cell_type": "code", 178 | "execution_count": null, 179 | "metadata": {}, 180 | "outputs": [], 181 | "source": [ 182 | "collager = Collager(palette_imgs, mask_generator, img_size, patch_per_img)" 183 | ] 184 | }, 185 | { 186 | "cell_type": "markdown", 187 | "metadata": {}, 188 | "source": [ 189 | "# View Random" 190 | ] 191 | }, 192 | { 193 | "cell_type": "code", 194 | "execution_count": null, 195 | "metadata": { 196 | "scrolled": false 197 | }, 198 | "outputs": [], 199 | "source": [ 200 | "with torch.no_grad():\n", 201 | " imgs = [\n", 202 | " collager(*collager.makeRandom(seed=i, trans_scale=.2))[0]\n", 203 | " for i in range(2)\n", 204 | " ] \n", 205 | " draw_tensors(torch.stack(imgs).squeeze())" 206 | ] 207 | }, 208 | { 209 | "cell_type": "markdown", 210 | "metadata": {}, 211 | "source": [ 212 | "# Optimization" 213 | ] 214 | }, 215 | { 216 | "cell_type": "code", 217 | "execution_count": null, 218 | "metadata": { 219 | "scrolled": false 220 | }, 221 | "outputs": [], 222 | "source": [ 223 | "n_steps=600\n", 224 | "seed=None\n", 225 | "lr=1e-2\n", 226 | "frames = []\n", 227 | "#save_every_step = False\n", 228 | "if seed is not None:\n", 229 | " random.seed(seed)\n", 230 | " np.random.seed(seed)\n", 231 | " torch.manual_seed(seed)\n", 232 | " torch.backends.cudnn.deterministic = True\n", 233 | " torch.backends.cudnn.benchmark = False\n", 234 | " \n", 235 | "collage_data = collager.makeRandom(seed=seed, trans_scale=.2)\n", 236 | "params = collage_data\n", 237 | "Z = collage_data[0]\n", 238 | "for x in collage_data:\n", 239 | " x.requires_grad_(True)\n", 240 | "opt = torch.optim.Adam(params, lr=lr)\n", 241 | "scheduler = torch.optim.lr_scheduler.OneCycleLR(opt, max_lr=lr, total_steps=n_steps)\n", 242 | "pbar = tqdm(total=n_steps)\n", 243 | "loss_history = []\n", 244 | "\n", 245 | "for i in range(n_steps):\n", 246 | " percent = i / n_steps\n", 247 | " pbar.update() \n", 248 | " opt.zero_grad()\n", 249 | " fl = torch.zeros(1)\n", 250 | " norm_loss = .25 * Z.norm()\n", 251 | " img, data = collager(*collage_data, return_data=False)\n", 252 | " aug = augment(img, n=3)\n", 253 | " fl = face_loss(((aug+1)*.5)).mean()\n", 254 | " loss = fl + norm_loss - .001*img.mean()\n", 255 | " loss_history.append(loss.detach().cpu().item())\n", 256 | " loss.backward(retain_graph=True)\n", 257 | " opt.step()\n", 258 | " scheduler.step()\n", 259 | " #if save_every_step:\n", 260 | " # data = export_collager(*collage_data, return_data=True)\n", 261 | " # saver.save(*data, final=True)\n", 262 | " pbar.set_description(f\"fl: {fl.item():.3f}\")\n", 263 | " frames.append(np.array(convert_to_images(img.detach().cpu())[0]))\n", 264 | " if i % 50 == 0 and i > 0 or i == n_steps-1:\n", 265 | " draw_tensors(img)" 266 | ] 267 | }, 268 | { 269 | "cell_type": "code", 270 | "execution_count": null, 271 | "metadata": {}, 272 | "outputs": [], 273 | "source": [ 274 | "opt_img, opt_collage_data, opt_history = \\\n", 275 | " img.detach(), tuple(x.detach() for x in collage_data), loss_history\n", 276 | "_= plt.plot(opt_history)" 277 | ] 278 | }, 279 | { 280 | "cell_type": "markdown", 281 | "metadata": {}, 282 | "source": [ 283 | "# Export video, highres image and masks" 284 | ] 285 | }, 286 | { 287 | "cell_type": "code", 288 | "execution_count": null, 289 | "metadata": {}, 290 | "outputs": [], 291 | "source": [ 292 | "saver = CollageSaver()\n", 293 | "saver.save_palette(palette_imgs)\n", 294 | "print(saver.path)\n", 295 | "saver.save_video(frames)\n", 296 | "\n", 297 | "# Regenerate at 2x scale.\n", 298 | "export_collager = Collager(palette_imgs_large, mask_generator, 1024, patch_per_img)\n", 299 | "with torch.no_grad():\n", 300 | " hires, data = export_collager(*collage_data, return_data=True) \n", 301 | " saver.save(hires, data, final=True)\n", 302 | "\n", 303 | "if img_names:\n", 304 | " with open(saver.path / 'image_names.txt', 'w') as outfile:\n", 305 | " json.dump(img_names, outfile)" 306 | ] 307 | }, 308 | { 309 | "cell_type": "code", 310 | "execution_count": null, 311 | "metadata": {}, 312 | "outputs": [], 313 | "source": [] 314 | } 315 | ], 316 | "metadata": { 317 | "kernelspec": { 318 | "display_name": "Python 3", 319 | "language": "python", 320 | "name": "python3" 321 | }, 322 | "language_info": { 323 | "codemirror_mode": { 324 | "name": "ipython", 325 | "version": 3 326 | }, 327 | "file_extension": ".py", 328 | "mimetype": "text/x-python", 329 | "name": "python", 330 | "nbconvert_exporter": "python", 331 | "pygments_lexer": "ipython3", 332 | "version": "3.6.9" 333 | }, 334 | "toc": { 335 | "base_numbering": 1, 336 | "nav_menu": {}, 337 | "number_sections": true, 338 | "sideBar": true, 339 | "skip_h1_title": false, 340 | "title_cell": "Table of Contents", 341 | "title_sidebar": "Contents", 342 | "toc_cell": false, 343 | "toc_position": {}, 344 | "toc_section_display": true, 345 | "toc_window_display": false 346 | } 347 | }, 348 | "nbformat": 4, 349 | "nbformat_minor": 2 350 | } 351 | -------------------------------------------------------------------------------- /research/make_collage.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "id": "jdm1tr5igN6G" 7 | }, 8 | "source": [ 9 | "# Setup the environment" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": null, 15 | "metadata": { 16 | "colab": { 17 | "base_uri": "https://localhost:8080/", 18 | "height": 310 19 | }, 20 | "id": "Pe0Ao34mxFyk", 21 | "outputId": "e911feb9-1054-4d25-80b6-aeaf6a723759" 22 | }, 23 | "outputs": [], 24 | "source": [ 25 | "!pip install kornia==0.4.1 tqdm==4.45.0 sk-video==1.1.10\n", 26 | "\n", 27 | "# colab broke something with urllib3/botocore\n", 28 | "!pip install -U urllib3 botocore" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": null, 34 | "metadata": {}, 35 | "outputs": [], 36 | "source": [ 37 | "%load_ext autoreload\n", 38 | "%autoreload 2" 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "execution_count": null, 44 | "metadata": { 45 | "ExecuteTime": { 46 | "end_time": "2020-10-14T21:39:31.413013Z", 47 | "start_time": "2020-10-14T21:39:26.626076Z" 48 | }, 49 | "colab": { 50 | "base_uri": "https://localhost:8080/", 51 | "height": 748 52 | }, 53 | "id": "wMm2sfLKgWz5", 54 | "outputId": "6c8139ba-44f7-41aa-929e-36cc1a3fb24c" 55 | }, 56 | "outputs": [], 57 | "source": [ 58 | "!git clone https://github.com/tals/derivative-works\n", 59 | "!git clone https://github.com/jacobgil/dlib_facedetector_pytorch\n", 60 | " \n", 61 | " # Used for BigGAN source image generation and there's still some bad dependencies on it\n", 62 | "!pip install pytorch_pretrained_biggan" 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": null, 68 | "metadata": { 69 | "id": "L2BAXRjyhQpa" 70 | }, 71 | "outputs": [], 72 | "source": [ 73 | "import sys\n", 74 | "import os\n", 75 | "sys.path.insert(0, os.path.join(os.getcwd(), \"derivative-works/research\"))\n", 76 | "sys.path.insert(0, os.path.join(os.getcwd(), \"dlib_facedetector_pytorch\"))" 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "execution_count": null, 82 | "metadata": { 83 | "id": "7iEwy_9MgMIE", 84 | "scrolled": true 85 | }, 86 | "outputs": [], 87 | "source": [ 88 | "%matplotlib inline\n", 89 | "%load_ext autoreload\n", 90 | "%autoreload 2" 91 | ] 92 | }, 93 | { 94 | "cell_type": "code", 95 | "execution_count": null, 96 | "metadata": { 97 | "ExecuteTime": { 98 | "end_time": "2020-10-14T21:41:50.866189Z", 99 | "start_time": "2020-10-14T21:41:48.709605Z" 100 | }, 101 | "id": "zvrZDaXcgMIH", 102 | "scrolled": true 103 | }, 104 | "outputs": [], 105 | "source": [ 106 | "import os, sys, math, json, random\n", 107 | "import numpy as np\n", 108 | "import kornia\n", 109 | "import torch\n", 110 | "import torch.nn as nn\n", 111 | "import matplotlib.pyplot as plt\n", 112 | "from PIL import Image\n", 113 | "from torchvision import transforms\n", 114 | "import torch.nn.functional as F\n", 115 | "from tqdm.autonotebook import tqdm\n", 116 | "from pytorch_pretrained_biggan import BigGAN, convert_to_images\n", 117 | "from src.notebook_utils import imshow, imgrid, pltshow, draw_tensors\n", 118 | "from src.pytorch_utils import augment\n", 119 | "from src.palette import random_biggan, load_directory, load_images\n", 120 | "from src.collage import Collager\n", 121 | "from src.collage_save import CollageSaver" 122 | ] 123 | }, 124 | { 125 | "cell_type": "code", 126 | "execution_count": null, 127 | "metadata": { 128 | "id": "CqWsrKbygMIJ", 129 | "scrolled": true 130 | }, 131 | "outputs": [], 132 | "source": [ 133 | "img_size = 512" 134 | ] 135 | }, 136 | { 137 | "cell_type": "markdown", 138 | "metadata": { 139 | "id": "dAS6gIYQy6Ea" 140 | }, 141 | "source": [ 142 | "# Face Loss" 143 | ] 144 | }, 145 | { 146 | "cell_type": "code", 147 | "execution_count": null, 148 | "metadata": { 149 | "id": "eS8VmilQy4qO" 150 | }, 151 | "outputs": [], 152 | "source": [ 153 | "import os, sys\n", 154 | "import dlib_torch_converter\n", 155 | "import torch\n", 156 | "import torch.nn as nn\n", 157 | "import torch.nn.functional as F\n", 158 | "from PIL import Image\n", 159 | "import torchvision.transforms as transforms\n", 160 | "from pathlib import Path\n", 161 | "\n", 162 | "MODEL_PATH = Path(dlib_torch_converter.__file__).parent / 'face.xml'\n", 163 | "assert MODEL_PATH.exists()\n", 164 | "\n", 165 | "class DlibFaceLoss:\n", 166 | " def __init__(self, filter_index=1, target_image_path=None):\n", 167 | " self.filter_index = filter_index\n", 168 | " self.dlib_model = dlib_torch_converter.get_model(str(MODEL_PATH)).eval().cuda()\n", 169 | " self.model = nn.Sequential(*[self.dlib_model._modules[i] \\\n", 170 | " for i in list(self.dlib_model._modules.keys())[:-2]])\n", 171 | " self.model.eval()\n", 172 | " self.model.zero_grad()\n", 173 | " self.target_activations = None\n", 174 | " if target_image_path:\n", 175 | " target_ten = transforms.ToTensor()(Image.open(target_image_path)).unsqueeze(0)\n", 176 | " target_ten = F.interpolate(target_ten, size=(128, 128), mode='bilinear')\n", 177 | " self.target_activations = self.model(target_ten.cuda()).detach()\n", 178 | "\n", 179 | " def __call__(self, img_tensors):\n", 180 | " # [0, 1] input range\n", 181 | " self.model.zero_grad()\n", 182 | " img_tensors = F.interpolate(img_tensors, size=(128, 128), mode='bilinear')\n", 183 | "\n", 184 | " out = self.model(img_tensors)\n", 185 | " size = out.size(2)\n", 186 | " if self.target_activations is not None:\n", 187 | " # loss = torch.dist(out[0, :5], self.target_activations[0, :5])\n", 188 | " loss = torch.dist(out, self.target_activations)\n", 189 | " else:\n", 190 | " # Take the middle pixel in the image.\n", 191 | " if self.filter_index == 'all':\n", 192 | " loss = -out[:, :, size//2, size//2]\n", 193 | " else:\n", 194 | " loss = -out[:, self.filter_index, size//2, size//2]\n", 195 | " return loss\n", 196 | "\n", 197 | "face_loss = DlibFaceLoss()" 198 | ] 199 | }, 200 | { 201 | "cell_type": "markdown", 202 | "metadata": { 203 | "id": "n0njj-hZgMIN" 204 | }, 205 | "source": [ 206 | "# Mask generator" 207 | ] 208 | }, 209 | { 210 | "cell_type": "code", 211 | "execution_count": null, 212 | "metadata": { 213 | "colab": { 214 | "base_uri": "https://localhost:8080/", 215 | "height": 82 216 | }, 217 | "id": "pJ9qJcyogMIO", 218 | "outputId": "a1ef3f38-f026-4523-9dce-96fc5fe85fae", 219 | "scrolled": true 220 | }, 221 | "outputs": [], 222 | "source": [ 223 | "from src.gan import Generator\n", 224 | "mask_generator = Generator(img_size=128, latent_size=100, channels=1).cuda()\n", 225 | "!gdown \"https://drive.google.com/u/0/uc?id=1IhoB6lxbKxL66F0X99ntL-t3-XKnxDPZ&export=download\"\n", 226 | "\n", 227 | "model_path = 'deriv_works_dcgan_gen_128'\n", 228 | "mask_generator.load_state_dict(torch.load(model_path))\n", 229 | "mask_generator.eval()\n", 230 | "None" 231 | ] 232 | }, 233 | { 234 | "cell_type": "code", 235 | "execution_count": null, 236 | "metadata": { 237 | "colab": { 238 | "base_uri": "https://localhost:8080/", 239 | "height": 63 240 | }, 241 | "id": "abuOna2fgMIQ", 242 | "outputId": "d03d2329-2f61-493d-b074-fc3af35d59f3", 243 | "scrolled": true 244 | }, 245 | "outputs": [], 246 | "source": [ 247 | "masks = mask_generator(torch.randn(10, 100).cuda())\n", 248 | "pltshow(np.hstack(masks[:, 0].detach().cpu().numpy()))" 249 | ] 250 | }, 251 | { 252 | "cell_type": "markdown", 253 | "metadata": { 254 | "id": "l3HZqSuxgMIU" 255 | }, 256 | "source": [ 257 | "# Make or load the palette" 258 | ] 259 | }, 260 | { 261 | "cell_type": "code", 262 | "execution_count": null, 263 | "metadata": { 264 | "colab": { 265 | "base_uri": "https://localhost:8080/", 266 | "height": 82 267 | }, 268 | "id": "uNkZt7YwgMIU", 269 | "outputId": "fc4946ee-b9ca-4de0-9484-e31057756680" 270 | }, 271 | "outputs": [], 272 | "source": [ 273 | "import subprocess\n", 274 | "def download_urls(urls):\n", 275 | " p = subprocess.run(['wget', '-i', \"-\", \"-P\", \"dataset\"], input=\"\\n\".join(urls), universal_newlines=True)\n", 276 | " return [f\"dataset/{x.split('/')[-1]}\" for x in urls]\n", 277 | "\n", 278 | "urls = [\n", 279 | " \"https://artbreeder.b-cdn.net/imgs/afc622a41966e3482a17.jpeg\",\n", 280 | " \"https://artbreeder.b-cdn.net/imgs/e8f11a059e51ce49f1fb.jpeg\",\n", 281 | " \"https://artbreeder.b-cdn.net/imgs/f9c1c5f14783165a5536.jpeg\",\n", 282 | " \"https://artbreeder.b-cdn.net/imgs/fb6d9b30088fb6a2aedfdbea.jpeg\",\n", 283 | "]\n", 284 | "dataset = download_urls(urls)\n", 285 | "dataset" 286 | ] 287 | }, 288 | { 289 | "cell_type": "code", 290 | "execution_count": null, 291 | "metadata": { 292 | "colab": { 293 | "base_uri": "https://localhost:8080/", 294 | "height": 70 295 | }, 296 | "id": "-5cpN5CJgMIW", 297 | "outputId": "043ef191-a885-47f6-c88b-856e0dfe88c8", 298 | "scrolled": true 299 | }, 300 | "outputs": [], 301 | "source": [ 302 | "USE_BIGGAN = False\n", 303 | "if USE_BIGGAN:\n", 304 | " n_refs = 24*2\n", 305 | " biggan = BigGAN.from_pretrained(f'biggan-deep-{img_size}').cuda()\n", 306 | " palette = random_biggan(n_refs, img_size, biggan, seed=1, truncation=.4)\n", 307 | "else:\n", 308 | " img_names = dataset\n", 309 | " palette_imgs_large = load_images(img_names, 1024)\n", 310 | " palette_imgs = F.interpolate(palette_imgs_large, size=(img_size, img_size), mode='bilinear')" 311 | ] 312 | }, 313 | { 314 | "cell_type": "code", 315 | "execution_count": null, 316 | "metadata": { 317 | "id": "mWu3ABZfgMIY" 318 | }, 319 | "outputs": [], 320 | "source": [ 321 | "patch_per_img = 20 // palette_imgs.shape[0]" 322 | ] 323 | }, 324 | { 325 | "cell_type": "code", 326 | "execution_count": null, 327 | "metadata": { 328 | "colab": { 329 | "base_uri": "https://localhost:8080/", 330 | "height": 435 331 | }, 332 | "id": "IEpQ5QRmgMIa", 333 | "outputId": "a482e584-632b-49a4-9e39-8a769de1dd66", 334 | "scrolled": false 335 | }, 336 | "outputs": [], 337 | "source": [ 338 | "draw_tensors(palette_imgs)" 339 | ] 340 | }, 341 | { 342 | "cell_type": "code", 343 | "execution_count": null, 344 | "metadata": { 345 | "id": "wHBb42cFgMIc" 346 | }, 347 | "outputs": [], 348 | "source": [ 349 | "collager = Collager(palette_imgs, mask_generator, img_size, patch_per_img)" 350 | ] 351 | }, 352 | { 353 | "cell_type": "markdown", 354 | "metadata": { 355 | "id": "8ePJ9HWwgMIe" 356 | }, 357 | "source": [ 358 | "# View Random" 359 | ] 360 | }, 361 | { 362 | "cell_type": "code", 363 | "execution_count": null, 364 | "metadata": { 365 | "colab": { 366 | "base_uri": "https://localhost:8080/", 367 | "height": 581 368 | }, 369 | "id": "uDAVSiK4gMIe", 370 | "outputId": "d652c218-104b-4e49-c1ad-562488f844bc", 371 | "scrolled": false 372 | }, 373 | "outputs": [], 374 | "source": [ 375 | "%pdb on\n", 376 | "with torch.no_grad():\n", 377 | " imgs = [\n", 378 | " collager(*collager.makeRandom(seed=i, trans_scale=.2))[0]\n", 379 | " for i in range(2)\n", 380 | " ] \n", 381 | " draw_tensors(torch.stack(imgs).squeeze())" 382 | ] 383 | }, 384 | { 385 | "cell_type": "markdown", 386 | "metadata": { 387 | "id": "kSik-2oigMIg" 388 | }, 389 | "source": [ 390 | "# Optimization" 391 | ] 392 | }, 393 | { 394 | "cell_type": "code", 395 | "execution_count": null, 396 | "metadata": { 397 | "colab": { 398 | "base_uri": "https://localhost:8080/", 399 | "height": 1000, 400 | "referenced_widgets": [ 401 | "7c9b01f24c1a45fb9b34c165fa738357", 402 | "d37e3f083f7449db8f73e271f5e86322", 403 | "84362c7f65794bceacbe505edb29d31e", 404 | "bf1a99e665f042219874cdb2d8b1d981", 405 | "302b1d7c1b1e400ea7e87828dd3387ff", 406 | "a03ffb797d9d40b189bf71aafa651721", 407 | "325114a6bdf24faaa4e7990d3e55ea87", 408 | "c77447ae65c646adabbf1accce65ec0d" 409 | ] 410 | }, 411 | "id": "AYsHjFYbgMIg", 412 | "outputId": "2db524f2-462a-405b-b838-495da86e2395", 413 | "scrolled": true 414 | }, 415 | "outputs": [], 416 | "source": [ 417 | "n_steps=600\n", 418 | "seed=8\n", 419 | "lr=2e-2\n", 420 | "frames = []\n", 421 | "#save_every_step = False\n", 422 | "if seed is not None:\n", 423 | " random.seed(seed)\n", 424 | " np.random.seed(seed)\n", 425 | " torch.manual_seed(seed)\n", 426 | " torch.backends.cudnn.deterministic = True\n", 427 | " \n", 428 | "collage_data = collager.makeRandom(seed=seed, trans_scale=.2)\n", 429 | "params = collage_data\n", 430 | "Z = collage_data[0]\n", 431 | "for x in collage_data:\n", 432 | " x.requires_grad_(True)\n", 433 | "opt = torch.optim.Adam(params, lr=lr)\n", 434 | "scheduler = torch.optim.lr_scheduler.OneCycleLR(opt, max_lr=lr, total_steps=n_steps)\n", 435 | "pbar = tqdm(total=n_steps)\n", 436 | "loss_history = []\n", 437 | "\n", 438 | "for i in range(n_steps):\n", 439 | " percent = i / n_steps\n", 440 | " pbar.update() \n", 441 | " opt.zero_grad()\n", 442 | " fl = torch.zeros(1)\n", 443 | " norm_loss = .25 * Z.norm()\n", 444 | " img, data = collager(*collage_data, return_data=False)\n", 445 | " aug = augment(img, n=3)\n", 446 | " fl = face_loss(((aug+1)*.5)).mean()\n", 447 | " loss = fl + norm_loss - .01*img.mean()\n", 448 | " loss_history.append(loss.detach().cpu().item())\n", 449 | " loss.backward(retain_graph=True)\n", 450 | " opt.step()\n", 451 | " scheduler.step()\n", 452 | " #if save_every_step:\n", 453 | " # data = export_collager(*collage_data, return_data=True)\n", 454 | " # saver.save(*data, final=True)\n", 455 | " pbar.set_description(f\"fl: {fl.item():.3f}\")\n", 456 | " frames.append(np.array(convert_to_images(img.detach().cpu())[0]))\n", 457 | " if i % 50 == 0 and i > 0 or i == n_steps-1:\n", 458 | " draw_tensors(img)" 459 | ] 460 | }, 461 | { 462 | "cell_type": "code", 463 | "execution_count": null, 464 | "metadata": { 465 | "colab": { 466 | "base_uri": "https://localhost:8080/", 467 | "height": 265 468 | }, 469 | "id": "laKogxHZgMIi", 470 | "outputId": "306fcefb-3be6-4424-9ca7-ccbb24f89312" 471 | }, 472 | "outputs": [], 473 | "source": [ 474 | "opt_img, opt_collage_data, opt_history = \\\n", 475 | " img.detach(), tuple(x.detach() for x in collage_data), loss_history\n", 476 | "_= plt.plot(opt_history)" 477 | ] 478 | }, 479 | { 480 | "cell_type": "markdown", 481 | "metadata": { 482 | "id": "r66Nglh1gMIk" 483 | }, 484 | "source": [ 485 | "# Export video, highres image and masks" 486 | ] 487 | }, 488 | { 489 | "cell_type": "code", 490 | "execution_count": null, 491 | "metadata": { 492 | "id": "HvyOTtqcgMIl", 493 | "outputId": "4cbde9ac-00f5-4b7b-bc69-93737e21b334" 494 | }, 495 | "outputs": [], 496 | "source": [ 497 | "saver = CollageSaver()\n", 498 | "saver.save_palette(palette_imgs)\n", 499 | "print(saver.path)\n", 500 | "saver.save_video(frames)\n", 501 | "\n", 502 | "# Regenerate at 2x scale.\n", 503 | "export_collager = Collager(palette_imgs_large, mask_generator, 1024, patch_per_img)\n", 504 | "with torch.no_grad():\n", 505 | " hires, data = export_collager(*collage_data, return_data=True) \n", 506 | " saver.save(hires, data, final=True)\n", 507 | "\n", 508 | "if img_names:\n", 509 | " with open(saver.path / 'image_names.txt', 'w') as outfile:\n", 510 | " json.dump(img_names, outfile)" 511 | ] 512 | }, 513 | { 514 | "cell_type": "code", 515 | "execution_count": null, 516 | "metadata": {}, 517 | "outputs": [], 518 | "source": [ 519 | "# View video\n", 520 | "from IPython.display import HTML\n", 521 | "from base64 import b64encode\n", 522 | "mp4 = open(saver.path / (saver.key+'.mp4'),'rb').read()\n", 523 | "data_url = \"data:video/mp4;base64,\" + b64encode(mp4).decode()\n", 524 | "HTML(\"\"\"\"\"\" % data_url)" 525 | ] 526 | } 527 | ], 528 | "metadata": { 529 | "accelerator": "GPU", 530 | "colab": { 531 | "collapsed_sections": [], 532 | "name": "collage_maker.ipynb", 533 | "provenance": [] 534 | }, 535 | "kernelspec": { 536 | "display_name": "Python 3", 537 | "language": "python", 538 | "name": "python3" 539 | }, 540 | "language_info": { 541 | "codemirror_mode": { 542 | "name": "ipython", 543 | "version": 3 544 | }, 545 | "file_extension": ".py", 546 | "mimetype": "text/x-python", 547 | "name": "python", 548 | "nbconvert_exporter": "python", 549 | "pygments_lexer": "ipython3", 550 | "version": "3.7.7" 551 | }, 552 | "toc": { 553 | "base_numbering": 1, 554 | "nav_menu": {}, 555 | "number_sections": true, 556 | "sideBar": true, 557 | "skip_h1_title": false, 558 | "title_cell": "Table of Contents", 559 | "title_sidebar": "Contents", 560 | "toc_cell": false, 561 | "toc_position": {}, 562 | "toc_section_display": true, 563 | "toc_window_display": false 564 | }, 565 | "varInspector": { 566 | "cols": { 567 | "lenName": 16, 568 | "lenType": 16, 569 | "lenVar": 40 570 | }, 571 | "kernels_config": { 572 | "python": { 573 | "delete_cmd_postfix": "", 574 | "delete_cmd_prefix": "del ", 575 | "library": "var_list.py", 576 | "varRefreshCmd": "print(var_dic_list())" 577 | }, 578 | "r": { 579 | "delete_cmd_postfix": ") ", 580 | "delete_cmd_prefix": "rm(", 581 | "library": "var_list.r", 582 | "varRefreshCmd": "cat(var_dic_list()) " 583 | } 584 | }, 585 | "types_to_exclude": [ 586 | "module", 587 | "function", 588 | "builtin_function_or_method", 589 | "instance", 590 | "_Feature" 591 | ], 592 | "window_display": false 593 | }, 594 | "widgets": { 595 | "application/vnd.jupyter.widget-state+json": { 596 | "302b1d7c1b1e400ea7e87828dd3387ff": { 597 | "model_module": "@jupyter-widgets/controls", 598 | "model_name": "ProgressStyleModel", 599 | "state": { 600 | "_model_module": "@jupyter-widgets/controls", 601 | "_model_module_version": "1.5.0", 602 | "_model_name": "ProgressStyleModel", 603 | "_view_count": null, 604 | "_view_module": "@jupyter-widgets/base", 605 | "_view_module_version": "1.2.0", 606 | "_view_name": "StyleView", 607 | "bar_color": null, 608 | "description_width": "initial" 609 | } 610 | }, 611 | "325114a6bdf24faaa4e7990d3e55ea87": { 612 | "model_module": "@jupyter-widgets/controls", 613 | "model_name": "DescriptionStyleModel", 614 | "state": { 615 | "_model_module": "@jupyter-widgets/controls", 616 | "_model_module_version": "1.5.0", 617 | "_model_name": "DescriptionStyleModel", 618 | "_view_count": null, 619 | "_view_module": "@jupyter-widgets/base", 620 | "_view_module_version": "1.2.0", 621 | "_view_name": "StyleView", 622 | "description_width": "" 623 | } 624 | }, 625 | "7c9b01f24c1a45fb9b34c165fa738357": { 626 | "model_module": "@jupyter-widgets/controls", 627 | "model_name": "HBoxModel", 628 | "state": { 629 | "_dom_classes": [], 630 | "_model_module": "@jupyter-widgets/controls", 631 | "_model_module_version": "1.5.0", 632 | "_model_name": "HBoxModel", 633 | "_view_count": null, 634 | "_view_module": "@jupyter-widgets/controls", 635 | "_view_module_version": "1.5.0", 636 | "_view_name": "HBoxView", 637 | "box_style": "", 638 | "children": [ 639 | "IPY_MODEL_84362c7f65794bceacbe505edb29d31e", 640 | "IPY_MODEL_bf1a99e665f042219874cdb2d8b1d981" 641 | ], 642 | "layout": "IPY_MODEL_d37e3f083f7449db8f73e271f5e86322" 643 | } 644 | }, 645 | "84362c7f65794bceacbe505edb29d31e": { 646 | "model_module": "@jupyter-widgets/controls", 647 | "model_name": "FloatProgressModel", 648 | "state": { 649 | "_dom_classes": [], 650 | "_model_module": "@jupyter-widgets/controls", 651 | "_model_module_version": "1.5.0", 652 | "_model_name": "FloatProgressModel", 653 | "_view_count": null, 654 | "_view_module": "@jupyter-widgets/controls", 655 | "_view_module_version": "1.5.0", 656 | "_view_name": "ProgressView", 657 | "bar_style": "", 658 | "description": "fl: -15.883: 100%", 659 | "description_tooltip": null, 660 | "layout": "IPY_MODEL_a03ffb797d9d40b189bf71aafa651721", 661 | "max": 600, 662 | "min": 0, 663 | "orientation": "horizontal", 664 | "style": "IPY_MODEL_302b1d7c1b1e400ea7e87828dd3387ff", 665 | "value": 600 666 | } 667 | }, 668 | "a03ffb797d9d40b189bf71aafa651721": { 669 | "model_module": "@jupyter-widgets/base", 670 | "model_name": "LayoutModel", 671 | "state": { 672 | "_model_module": "@jupyter-widgets/base", 673 | "_model_module_version": "1.2.0", 674 | "_model_name": "LayoutModel", 675 | "_view_count": null, 676 | "_view_module": "@jupyter-widgets/base", 677 | "_view_module_version": "1.2.0", 678 | "_view_name": "LayoutView", 679 | "align_content": null, 680 | "align_items": null, 681 | "align_self": null, 682 | "border": null, 683 | "bottom": null, 684 | "display": null, 685 | "flex": null, 686 | "flex_flow": null, 687 | "grid_area": null, 688 | "grid_auto_columns": null, 689 | "grid_auto_flow": null, 690 | "grid_auto_rows": null, 691 | "grid_column": null, 692 | "grid_gap": null, 693 | "grid_row": null, 694 | "grid_template_areas": null, 695 | "grid_template_columns": null, 696 | "grid_template_rows": null, 697 | "height": null, 698 | "justify_content": null, 699 | "justify_items": null, 700 | "left": null, 701 | "margin": null, 702 | "max_height": null, 703 | "max_width": null, 704 | "min_height": null, 705 | "min_width": null, 706 | "object_fit": null, 707 | "object_position": null, 708 | "order": null, 709 | "overflow": null, 710 | "overflow_x": null, 711 | "overflow_y": null, 712 | "padding": null, 713 | "right": null, 714 | "top": null, 715 | "visibility": null, 716 | "width": null 717 | } 718 | }, 719 | "bf1a99e665f042219874cdb2d8b1d981": { 720 | "model_module": "@jupyter-widgets/controls", 721 | "model_name": "HTMLModel", 722 | "state": { 723 | "_dom_classes": [], 724 | "_model_module": "@jupyter-widgets/controls", 725 | "_model_module_version": "1.5.0", 726 | "_model_name": "HTMLModel", 727 | "_view_count": null, 728 | "_view_module": "@jupyter-widgets/controls", 729 | "_view_module_version": "1.5.0", 730 | "_view_name": "HTMLView", 731 | "description": "", 732 | "description_tooltip": null, 733 | "layout": "IPY_MODEL_c77447ae65c646adabbf1accce65ec0d", 734 | "placeholder": "​", 735 | "style": "IPY_MODEL_325114a6bdf24faaa4e7990d3e55ea87", 736 | "value": " 600/600 [03:03<00:00, 3.29it/s]" 737 | } 738 | }, 739 | "c77447ae65c646adabbf1accce65ec0d": { 740 | "model_module": "@jupyter-widgets/base", 741 | "model_name": "LayoutModel", 742 | "state": { 743 | "_model_module": "@jupyter-widgets/base", 744 | "_model_module_version": "1.2.0", 745 | "_model_name": "LayoutModel", 746 | "_view_count": null, 747 | "_view_module": "@jupyter-widgets/base", 748 | "_view_module_version": "1.2.0", 749 | "_view_name": "LayoutView", 750 | "align_content": null, 751 | "align_items": null, 752 | "align_self": null, 753 | "border": null, 754 | "bottom": null, 755 | "display": null, 756 | "flex": null, 757 | "flex_flow": null, 758 | "grid_area": null, 759 | "grid_auto_columns": null, 760 | "grid_auto_flow": null, 761 | "grid_auto_rows": null, 762 | "grid_column": null, 763 | "grid_gap": null, 764 | "grid_row": null, 765 | "grid_template_areas": null, 766 | "grid_template_columns": null, 767 | "grid_template_rows": null, 768 | "height": null, 769 | "justify_content": null, 770 | "justify_items": null, 771 | "left": null, 772 | "margin": null, 773 | "max_height": null, 774 | "max_width": null, 775 | "min_height": null, 776 | "min_width": null, 777 | "object_fit": null, 778 | "object_position": null, 779 | "order": null, 780 | "overflow": null, 781 | "overflow_x": null, 782 | "overflow_y": null, 783 | "padding": null, 784 | "right": null, 785 | "top": null, 786 | "visibility": null, 787 | "width": null 788 | } 789 | }, 790 | "d37e3f083f7449db8f73e271f5e86322": { 791 | "model_module": "@jupyter-widgets/base", 792 | "model_name": "LayoutModel", 793 | "state": { 794 | "_model_module": "@jupyter-widgets/base", 795 | "_model_module_version": "1.2.0", 796 | "_model_name": "LayoutModel", 797 | "_view_count": null, 798 | "_view_module": "@jupyter-widgets/base", 799 | "_view_module_version": "1.2.0", 800 | "_view_name": "LayoutView", 801 | "align_content": null, 802 | "align_items": null, 803 | "align_self": null, 804 | "border": null, 805 | "bottom": null, 806 | "display": null, 807 | "flex": null, 808 | "flex_flow": null, 809 | "grid_area": null, 810 | "grid_auto_columns": null, 811 | "grid_auto_flow": null, 812 | "grid_auto_rows": null, 813 | "grid_column": null, 814 | "grid_gap": null, 815 | "grid_row": null, 816 | "grid_template_areas": null, 817 | "grid_template_columns": null, 818 | "grid_template_rows": null, 819 | "height": null, 820 | "justify_content": null, 821 | "justify_items": null, 822 | "left": null, 823 | "margin": null, 824 | "max_height": null, 825 | "max_width": null, 826 | "min_height": null, 827 | "min_width": null, 828 | "object_fit": null, 829 | "object_position": null, 830 | "order": null, 831 | "overflow": null, 832 | "overflow_x": null, 833 | "overflow_y": null, 834 | "padding": null, 835 | "right": null, 836 | "top": null, 837 | "visibility": null, 838 | "width": null 839 | } 840 | } 841 | } 842 | } 843 | }, 844 | "nbformat": 4, 845 | "nbformat_minor": 1 846 | } 847 | -------------------------------------------------------------------------------- /research/requirements.txt: -------------------------------------------------------------------------------- 1 | # media 2 | Pillow==6.0.0 3 | sk-video==1.1.10 4 | 5 | # torch 6 | torch==1.6.0 7 | torchvision==0.7.0 8 | kornia==0.4.0 9 | 10 | # misc 11 | tqdm==4.45.0 12 | -------------------------------------------------------------------------------- /research/src/collage.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | import torch.nn.functional as F 4 | from src.transform_utils import transform_imgs, create_trans_matrix 5 | 6 | class Collager: 7 | def __init__(self, palette_imgs, mask_generator, img_size, patch_per_img=1): 8 | self.latent_size=100 # for shape generator 9 | self.patch_per_img = patch_per_img 10 | self.palette_imgs = palette_imgs 11 | self.mask_generator = mask_generator 12 | self.img_size = img_size 13 | self.n_patches = palette_imgs.shape[0] * patch_per_img 14 | 15 | def makeRandom(self, trans_scale=.2, seed=None): 16 | if seed is not None: 17 | torch.random.manual_seed(seed) 18 | n = self.n_patches 19 | Z = torch.randn(n, self.latent_size).cuda()/2 # Latent to create the patch 20 | 21 | # these are spans of , so shape is [2, N, ...] 22 | angles = torch.randn(2, n).cuda() 23 | translations = torch.randn((2, n, 2)).cuda() * trans_scale 24 | scales = torch.randn(2, n, 1).repeat(1, 1, 2).cuda() 25 | ordering = torch.randn(n).cuda() 26 | return Z, angles, translations, scales, ordering 27 | 28 | def _process_transforms(self, angle_raw, scale_raw, translation_raw): 29 | # Assume all input values are in normal distributions. 30 | scale = torch.sigmoid(scale_raw) # Scale is [0, 1.0] 31 | angle = torch.tanh(angle_raw) * 180 #[-180, 180] range 32 | translation = torch.tanh(translation_raw) * self.img_size * .5 33 | 34 | scale = torch.stack(( 35 | .25 + .75*scale[0], # Dont go too small or it gets blurry. 36 | scale[1] * .75 37 | )) 38 | 39 | return angle, scale, translation 40 | 41 | def __call__(self, Z, angle, translation, scale, ordering, debug=False, return_data=False): 42 | img_size = self.img_size 43 | masks = self.mask_generator(Z) # Create each mask shape. 44 | masks = F.interpolate(masks, size=(img_size, img_size), mode='bilinear') 45 | masks = (masks + 1) / 2 # [0, 1] range 46 | 47 | angle, scale, translation = self._process_transforms(angle, scale, translation) 48 | 49 | M_pre = create_trans_matrix( 50 | img_size, angle[0], translation[0], scale[0] 51 | ) 52 | M_post = create_trans_matrix( 53 | img_size, 54 | angle[1]-angle[0], 55 | translation[1]-translation[0], 56 | scale[1] / scale[0] 57 | ) 58 | 59 | masks_pre = transform_imgs(M_pre, masks) 60 | masks_post = transform_imgs(M_post, masks_pre) 61 | patch_pre = masks_pre * torch.cat(self.patch_per_img*[self.palette_imgs]) 62 | 63 | patch_post = transform_imgs(M_post, patch_pre) 64 | collage = torch.ones((1, 3, img_size, img_size)).cuda() 65 | 66 | # for i in range(len(Z)): 67 | for i in ordering.argsort(): # this works?? :0 68 | collage *= (1-masks_post[i]) #Empty the spot the new image will go. 69 | collage += patch_post[i] 70 | 71 | if debug: 72 | from src.notebook_utils import pltshow, draw_tensors 73 | pltshow(np.hstack(masks[:5, 0].detach().cpu().numpy())) 74 | pltshow(np.hstack(masks_pre[:5, 0].detach().cpu().numpy())) 75 | pltshow(np.hstack(masks_post[:5, 0].detach().cpu().numpy())) 76 | draw_tensors(patch_pre[:5]) 77 | draw_tensors(patch_post[:5]) 78 | 79 | if return_data: 80 | with torch.no_grad(): 81 | lut = torch.zeros(self.img_size, self.img_size).cuda().byte() 82 | overlay = torch.ones(self.img_size, self.img_size).cuda().byte() 83 | for i in ordering.argsort(): 84 | write_mask = masks_post[i].round().squeeze().byte() 85 | lut *= (1-write_mask) 86 | lut += write_mask*overlay*(i+1) 87 | data = (lut, masks, masks_pre, masks_post, angle, scale, translation, ordering) 88 | return collage, data 89 | 90 | return collage, None 91 | -------------------------------------------------------------------------------- /research/src/collage_save.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import random, string, json 3 | from pathlib import Path 4 | from datetime import datetime 5 | from concurrent.futures import ThreadPoolExecutor, wait, as_completed 6 | from pytorch_pretrained_biggan import convert_to_images 7 | from tqdm.auto import tqdm 8 | from PIL import Image 9 | import skvideo 10 | import skvideo.io 11 | 12 | def masks_to_pil(masks): 13 | masks = ((masks+1)/2 * 255) 14 | masks = masks.byte().cpu().numpy() 15 | masks = [Image.fromarray(x.squeeze()) for x in masks] 16 | return masks 17 | 18 | class CollageSaver: 19 | def __init__(self, ffmpeg_path='/usr/bin/ffmpeg'): 20 | self.key = ''.join([random.choice(string.ascii_lowercase) for i in range(6)]) 21 | prefix = datetime.utcnow().strftime("%Y-%m-%d-%H-%M") + f"-{self.key}" 22 | self.path = Path(f"./results/{prefix}") 23 | self.path.mkdir(exist_ok=False, parents=True) 24 | 25 | self.palette_dir = self.path / "palette" 26 | self.palette_dir.mkdir(exist_ok=True) 27 | 28 | self.masks_dir = self.path / "masks" 29 | self.masks_dir.mkdir(exist_ok=True) 30 | 31 | # self.canvas_dir = self.path / "canvas" 32 | # self.canvas_dir.mkdir(exist_ok=True) 33 | 34 | # self.transform_dir = self.path / "transform" 35 | # self.transform_dir.mkdir(exist_ok=True) 36 | 37 | self.futures = set() 38 | self.executor = ThreadPoolExecutor(16) 39 | skvideo.setFFmpegPath(ffmpeg_path) 40 | 41 | def run_async(self, f, *args): 42 | f = self.executor.submit(f, *args) 43 | self.futures.add(f) 44 | f.add_done_callback(self.futures.remove) 45 | 46 | def save_palette(self, palette_imgs: torch.Tensor): 47 | imgs = convert_to_images(palette_imgs.cpu()) 48 | for i,x in enumerate(imgs): 49 | self.run_async(x.save, self.palette_dir / f"{i:04}.jpg") 50 | 51 | def save(self, img, data, final=False): 52 | lut, masks, masks_pre, masks_post, angle, scale, translation, ordering = data 53 | if final: 54 | self.save_canvas_final(img) 55 | self.save_lookup_table_final(lut) 56 | self.save_masks_final(masks_pre, masks_post) 57 | self.save_transforms_final(angle, scale, translation) 58 | with open(self.path / 'ordering.json', 'w') as outfile: 59 | ordering = ordering.detach().cpu().numpy().tolist() 60 | json.dump(ordering, outfile) 61 | 62 | def save_canvas_final(self, canvas: torch.Tensor): 63 | canvas = convert_to_images(canvas.detach().cpu())[0] 64 | self.run_async(canvas.save, self.path / f"{self.key}.jpg") 65 | 66 | def save_lookup_table_final(self, img: torch.Tensor): 67 | x = Image.fromarray(img.cpu().numpy(), "L") 68 | self.run_async(x.save, self.path / f"lut_{self.key}.png") 69 | 70 | def save_masks_final(self, masks_palette, masks_canvas): 71 | masks_canvas = masks_to_pil(masks_canvas.detach()) 72 | masks_palette = masks_to_pil(masks_palette.detach()) 73 | 74 | for i, (m1, m2) in enumerate(zip(masks_canvas, masks_palette)): 75 | self.run_async(m1.save, self.masks_dir / f"{i:04}_canvas.png") 76 | self.run_async(m2.save, self.masks_dir / f"{i:04}_palett.png") 77 | 78 | def save_transforms_final(self, angle, scale, translation): 79 | angle = angle.cpu().numpy().tolist() 80 | scale = scale.cpu().numpy().tolist() 81 | translation = translation.cpu().numpy().tolist() 82 | with open(self.path / 'transforms.json', 'w') as outfile: 83 | json.dump({'angle': angle, 'scale': scale, 'translation': translation}, outfile) 84 | 85 | def save_video(self, frames:list): 86 | skvideo.io.vwrite(self.path/f'{self.key}.mp4', frames, outputdict={ 87 | '-vcodec': 'libx264', 88 | '-pix_fmt': 'yuv420p', 89 | '-crf': '25', 90 | '-r':'60', 91 | }) 92 | 93 | def join(self): 94 | pbar = tqdm(total=len(self.futures)) 95 | for _ in as_completed(self.futures): 96 | pbar.update() 97 | 98 | ############################################################################ 99 | # def save_canvas(self, step: int, canvas: torch.Tensor): 100 | # canvas = convert_to_images(canvas.detach().cpu())[0] 101 | # self.run_async(canvas.save, self.canvas_dir / f"{step:04}.jpg") 102 | 103 | # def save_lookup_table(self, step: int, img: torch.Tensor): 104 | # x = Image.fromarray(img.cpu().numpy(), "L") 105 | # self.run_async(x.save, self.masks_dir / f"lut_{step:04}.png") 106 | 107 | # def save_masks(self, 108 | # step: int, 109 | # masks: torch.Tensor, 110 | # mask_pallete: torch.Tensor, 111 | # mask_canvas: torch.Tensor 112 | # ): 113 | # step_img_mask_dir = self.masks_dir / f"img_mask_{step:04}" 114 | # step_img_mask_dir.mkdir(exist_ok=True, parents=True) 115 | # masks = masks_to_pil(masks.detach()) 116 | # for i, x in enumerate(masks): 117 | # self.run_async(x.save, step_img_mask_dir / f"{i:02}.png") 118 | 119 | # step_canvas_mask_dir = self.masks_dir / f"canvas_mask_{step:04}" 120 | # step_canvas_mask_dir.mkdir(exist_ok=True, parents=True) 121 | # mask_pallete = masks_to_pil(mask_pallete.detach()) 122 | # for i, x in enumerate(mask_pallete): 123 | # self.run_async(x.save, step_canvas_mask_dir / f"{i:02}.png") 124 | 125 | # torch.save(transforms, self.masks_dir / f"transforms_{step:04}.pt") 126 | 127 | -------------------------------------------------------------------------------- /research/src/face_loss.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | import dlib_torch_converter 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | from PIL import Image 7 | import torchvision.transforms as transforms 8 | from pathlib import Path 9 | 10 | MODEL_PATH = Path(dlib_torch_converter.__file__).parent / 'face.xml' 11 | 12 | class DlibFaceLoss: 13 | def __init__(self, filter_index=1, target_image_path=None): 14 | self.filter_index = filter_index 15 | self.dlib_model = dlib_torch_converter.get_model(str(MODEL_PATH)).eval().cuda() 16 | self.model = nn.Sequential(*[self.dlib_model._modules[i] \ 17 | for i in list(self.dlib_model._modules.keys())[:-2]]) 18 | self.model.eval() 19 | self.model.zero_grad() 20 | self.target_activations = None 21 | if target_image_path: 22 | target_ten = transforms.ToTensor()(Image.open(target_image_path)).unsqueeze(0) 23 | target_ten = F.interpolate(target_ten, size=(128, 128), mode='bilinear') 24 | self.target_activations = self.model(target_ten.cuda()).detach() 25 | 26 | def __call__(self, img_tensors): 27 | # [0, 1] input range 28 | self.model.zero_grad() 29 | img_tensors = F.interpolate(img_tensors, size=(128, 128), mode='bilinear') 30 | 31 | out = self.model(img_tensors) 32 | size = out.size(2) 33 | if self.target_activations is not None: 34 | # loss = torch.dist(out[0, :5], self.target_activations[0, :5]) 35 | loss = torch.dist(out, self.target_activations) 36 | else: 37 | # Take the middle pixel in the image. 38 | if self.filter_index == 'all': 39 | loss = -out[:, :, size//2, size//2] 40 | else: 41 | loss = -out[:, self.filter_index, size//2, size//2] 42 | return loss 43 | 44 | class IdentityLoss: 45 | def __init__(self, target, image_size=256, margin=86): 46 | """ Target is a path to an image or directory of images. 47 | """ 48 | from facenet_pytorch import MTCNN, InceptionResnetV1, fixed_image_standardization 49 | self.mtcnn = MTCNN(image_size=image_size, margin=margin) 50 | self.facenet = InceptionResnetV1(pretrained='vggface2').eval().cuda() 51 | 52 | # Create target embedding. 53 | self.target_img_embeddings = [] 54 | self.target_img_tensors = [] 55 | if os.path.isdir(target): 56 | paths = [ os.path.join(target, n) for n in os.listdir(target) ] 57 | else: 58 | paths = [target] 59 | for name in paths: 60 | try: 61 | img = Image.open(name).convert('RGB') 62 | img_cropped = self.mtcnn(img) 63 | self.target_img_tensors.append(img_cropped) 64 | if img_cropped is not None: 65 | self.target_img_embeddings.append( 66 | self.facenet(img_cropped.cuda().unsqueeze(0)).detach() 67 | ) 68 | except Exception as e: 69 | # print(e) 70 | pass 71 | print(len(self.target_img_embeddings), 'images found with faces.') 72 | self.target_emb = torch.stack(self.target_img_embeddings).squeeze().mean(axis=0).cuda() 73 | 74 | def _get_emb(self, img_tensors): 75 | self.facenet.zero_grad() 76 | self.facenet.eval() 77 | img_tensors = F.interpolate(img_tensors, size=(256, 256), mode='bilinear') 78 | img_embs = self.facenet(fixed_image_standardization(img_tensors*255)) 79 | return img_embs 80 | 81 | def __call__(self, img_tensors): 82 | # [0, 1] input range 83 | img_embs = self._get_emb(img_tensors) 84 | distances = (img_embs - self.target_emb).norm(dim=1) 85 | return distances 86 | 87 | # class DiscriminatorLoss: 88 | # def __init__(self): 89 | # sys.path.append('/home/joel/Repos/stylegan2-pytorch') 90 | # from model import Discriminator 91 | # self.disc = Discriminator(256).eval().cuda() 92 | # checkpoint = torch.load('/home/joel/Repos/stylegan2-pytorch/550000.pt') 93 | # self.disc.load_state_dict(checkpoint['d'], strict=False) 94 | # def __call__(self, img_tensors): 95 | # # [-1, 1] image range 96 | # img_tensors = F.interpolate(img_tensors, size=(256, 256), mode='bilinear') 97 | # scores = self.disc(img_tensors.cuda()) 98 | # return scores -------------------------------------------------------------------------------- /research/src/gan.py: -------------------------------------------------------------------------------- 1 | # https://github.com/eriklindernoren/PyTorch-GAN/blob/master/implementations/cgan/cgan.py 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import torch 5 | 6 | class Generator(nn.Module): 7 | def __init__(self, img_size, latent_size, channels=1): 8 | super(Generator, self).__init__() 9 | self.img_size = img_size 10 | self.init_size = img_size // 4 11 | self.latent_size = latent_size 12 | self.l1 = nn.Sequential(nn.Linear(latent_size, 128 * self.init_size ** 2)) 13 | 14 | self.conv_blocks = nn.Sequential( 15 | nn.BatchNorm2d(128), 16 | nn.Upsample(scale_factor=2), 17 | nn.Conv2d(128, 128, 3, stride=1, padding=1), 18 | nn.BatchNorm2d(128, 0.8), 19 | nn.LeakyReLU(0.2, inplace=True), 20 | nn.Upsample(scale_factor=2), 21 | nn.Conv2d(128, 64, 3, stride=1, padding=1), 22 | nn.BatchNorm2d(64, 0.8), 23 | nn.LeakyReLU(0.2, inplace=True), 24 | nn.Conv2d(64, channels, 3, stride=1, padding=1), 25 | nn.Tanh(), 26 | ) 27 | 28 | def forward(self, z): 29 | out = self.l1(z) 30 | out = out.view(out.shape[0], 128, self.init_size, self.init_size) 31 | img = self.conv_blocks(out) 32 | return img 33 | 34 | class Discriminator(nn.Module): 35 | def __init__(self, img_size, channels=1): 36 | super(Discriminator, self).__init__() 37 | self.img_size = img_size 38 | self.channels = channels 39 | def discriminator_block(in_filters, out_filters, bn=True): 40 | block = [nn.Conv2d(in_filters, out_filters, 3, 2, 1), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(0.25)] 41 | if bn: 42 | block.append(nn.BatchNorm2d(out_filters, 0.8)) 43 | return block 44 | 45 | self.model = nn.Sequential( 46 | *discriminator_block(channels, 16, bn=False), 47 | *discriminator_block(16, 32), 48 | *discriminator_block(32, 64), 49 | *discriminator_block(64, 128), 50 | ) 51 | 52 | # The height and width of downsampled image 53 | ds_size = self.img_size // 2 ** 4 54 | self.adv_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, 1), nn.Sigmoid()) 55 | 56 | def forward(self, img): 57 | out = self.model(img) 58 | out = out.view(out.shape[0], -1) 59 | validity = self.adv_layer(out) 60 | 61 | return validity 62 | 63 | def weights_init_normal(m): 64 | classname = m.__class__.__name__ 65 | if classname.find("Conv") != -1: 66 | torch.nn.init.normal_(m.weight.data, 0.0, 0.02) 67 | elif classname.find("BatchNorm2d") != -1: 68 | torch.nn.init.normal_(m.weight.data, 1.0, 0.02) 69 | torch.nn.init.constant_(m.bias.data, 0.0) -------------------------------------------------------------------------------- /research/src/notebook_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import PIL 3 | import PIL.Image 4 | import IPython.display 5 | from io import BytesIO 6 | import matplotlib.pyplot as plt 7 | import cv2 8 | from pytorch_pretrained_biggan import convert_to_images 9 | 10 | 11 | def imgrid(imarray, cols=5, pad=1): 12 | if type(imarray) == list: 13 | imarray = np.array(imarray, dtype='uint8') 14 | """Turn an imarray into a grid.""" 15 | if imarray.dtype != np.uint8: 16 | raise ValueError("imgrid input imarray must be uint8") 17 | 18 | pad = int(pad) 19 | assert pad >= 0 20 | cols = int(cols) 21 | assert cols >= 1 22 | 23 | # print(imarray.shape) 24 | gray = False 25 | if len(imarray.shape) == 3: 26 | gray = True 27 | imarray = imarray[:, :, :, None] 28 | N, H, W, C = imarray.shape 29 | rows = N // cols + int(N % cols != 0) 30 | batch_pad = rows * cols - N 31 | assert batch_pad >= 0 32 | post_pad = [batch_pad, pad, pad, 0] 33 | pad_arg = [[0, p] for p in post_pad] 34 | imarray = np.pad(imarray, pad_arg, "constant", constant_values=255) 35 | H += pad 36 | W += pad 37 | grid = ( 38 | imarray.reshape(rows, cols, H, W, C) 39 | .transpose(0, 2, 1, 3, 4) 40 | .reshape(rows * H, cols * W, C) 41 | ) 42 | if pad: 43 | grid = grid[:-pad, :-pad] 44 | if gray: 45 | grid = grid[:,:,0] 46 | return grid 47 | 48 | 49 | def imshow(a, max_size=None, scale=False): 50 | a = np.asarray(a, dtype=np.uint8) 51 | 52 | if max_size: 53 | scale = min(max_size / a.shape[0],max_size / a.shape[1]) 54 | if scale < 1: 55 | a = cv2.resize(a, None, fx=scale, fy=scale) 56 | 57 | image = PIL.Image.fromarray(a, "L" if len(a.shape) == 2 else "RGB") 58 | buffered = BytesIO() 59 | image.save(buffered, format="JPEG", quality=90) 60 | im_data = buffered.getvalue() 61 | 62 | disp = IPython.display.display(IPython.display.Image(im_data)) 63 | return disp 64 | 65 | def half(img): 66 | return cv2.resize(img,None,fx=0.5,fy=0.5) 67 | 68 | def pltshow(img): 69 | plt.figure() 70 | plt.axis('off') 71 | plt.imshow(img) 72 | plt.show() 73 | 74 | def draw_tensors(t): 75 | imgs = [ np.array(img) for img in convert_to_images(t.cpu()) ] 76 | imshow(imgrid(imgs, cols=min(len(imgs), 5), pad=0)) -------------------------------------------------------------------------------- /research/src/palette.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | import torch 4 | import torch.nn as nn 5 | from PIL import Image 6 | from torchvision import transforms, datasets 7 | from pytorch_pretrained_biggan import truncated_noise_sample 8 | to_tensor = transforms.ToTensor() 9 | 10 | def random_biggan_latents(n_ref, truncation, img_size, n_classes=1000, seed=1234): 11 | noise_vectors = truncated_noise_sample(truncation=truncation, batch_size=n_ref, seed=seed) 12 | noise_vectors = torch.from_numpy(noise_vectors).cuda() 13 | class_vectors = torch.zeros(n_ref, n_classes).cuda() 14 | random.seed(seed) 15 | for i in range(n_ref): 16 | class_vectors[i, random.randrange(0, n_classes)] = 1.0 17 | return noise_vectors, class_vectors 18 | 19 | def random_biggan(n, img_size, model, seed=1, truncation=.4): 20 | pz, pc = random_biggan_latents(n, truncation, img_size, seed=seed) 21 | palette_imgs = torch.stack([ 22 | model(pz[i:i+1], pc[i:i+1], truncation).detach()[0] 23 | for i in range(pz.shape[0]) 24 | ]) 25 | return palette_imgs 26 | 27 | def load_images(img_paths, img_size): 28 | palette_imgs = [ 29 | Image.open(path).resize((img_size, img_size)).convert('RGB') 30 | for path in img_paths 31 | ] 32 | palette_imgs = torch.stack([ 33 | (to_tensor(img)-.5)*2 34 | for img in palette_imgs 35 | ]).cuda() 36 | return palette_imgs 37 | 38 | def load_directory(img_dir, img_size): 39 | img_paths = [ os.path.join(img_dir, n) for n in os.listdir(img_dir) ] 40 | return load_images(img_paths, img_size) 41 | -------------------------------------------------------------------------------- /research/src/pytorch_utils.py: -------------------------------------------------------------------------------- 1 | import sys, random 2 | import kornia 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | 7 | # Transformations from https://github.com/reiinakano/neural-painters-pytorch 8 | class RandomScale(nn.Module): 9 | """Module for randomly scaling an image""" 10 | def __init__(self, scales): 11 | """ 12 | :param scales: list of scales to randomly choose from e.g. [0.8, 1.0, 1.2] will randomly scale an image by 13 | 0.8, 1.0, or 1.2 14 | """ 15 | super(RandomScale, self).__init__() 16 | 17 | self.scales = scales 18 | 19 | def forward(self, x: torch.Tensor): 20 | scale = self.scales[random.randint(0, len(self.scales)-1)] 21 | return F.interpolate(x, scale_factor=scale, mode='bilinear') 22 | 23 | 24 | class RandomCrop(nn.Module): 25 | """Module for randomly cropping an image""" 26 | def __init__(self, size: int): 27 | """ 28 | :param size: How much to crop from both sides. e.g. 8 will remove 8 pixels in both x and y directions. 29 | """ 30 | super(RandomCrop, self).__init__() 31 | self.size = size 32 | 33 | def forward(self, x: torch.Tensor): 34 | batch_size, _, h, w = x.shape 35 | h_move = random.randint(0, self.size) 36 | w_move = random.randint(0, self.size) 37 | return x[:, :, h_move:h-self.size+h_move, w_move:w-self.size+w_move] 38 | 39 | 40 | class RandomRotate(nn.Module): 41 | """Module for randomly rotating an image""" 42 | def __init__(self, angle=10, same_throughout_batch=False): 43 | """ 44 | :param angle: Angle in degrees 45 | :param same_throughout_batch: Degree of rotation, although random, is kept the same throughout a single batch. 46 | """ 47 | super(RandomRotate, self).__init__() 48 | self.angle=angle 49 | self.same_throughout_batch = same_throughout_batch 50 | 51 | def forward(self, img: torch.tensor): 52 | b, _, h, w = img.shape 53 | # create transformation (rotation) 54 | if not self.same_throughout_batch: 55 | angle = torch.randn(b, device=img.device) * self.angle 56 | else: 57 | angle = torch.randn(1, device=img.device) * self.angle 58 | angle = angle.repeat(b) 59 | center = torch.ones(b, 2, device=img.device) 60 | center[..., 0] = img.shape[3] / 2 # x 61 | center[..., 1] = img.shape[2] / 2 # y 62 | # define the scale factor 63 | scale = torch.ones(b, 2, device=img.device) 64 | M = kornia.get_rotation_matrix2d(center, angle, scale) 65 | img_warped = kornia.warp_affine(img, M, dsize=(h, w)) 66 | return img_warped 67 | 68 | # Define image augmentations 69 | padder = nn.ConstantPad2d(12, 0.5) 70 | rand_crop_8 = RandomCrop(8) 71 | rand_scale = RandomScale([1 + (i-5)/50. for i in range(11)]) 72 | random_rotater = RandomRotate(angle=5, same_throughout_batch=False) 73 | rand_crop_4 = RandomCrop(4) 74 | 75 | def augment(img, n=1): 76 | imgs = img.repeat(n, 1, 1, 1) 77 | return rand_crop_4(random_rotater(rand_crop_8(rand_scale(padder(imgs))))) -------------------------------------------------------------------------------- /research/src/transform_utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import kornia 3 | 4 | def transform_imgs_white(M, I): 5 | # Transform the images while preserving white background. 6 | return 1-kornia.warp_affine( 7 | 1-I, M, 8 | dsize=I.shape[-2:], 9 | padding_mode='zeros' 10 | ) 11 | 12 | def transform_imgs(M, I): 13 | # Transform the images. 14 | return kornia.warp_affine( 15 | I, M, 16 | dsize=I.shape[-2:], 17 | padding_mode='zeros' 18 | ) 19 | 20 | def mask_images_white(masks, imgs): 21 | return 1-(((1-imgs) * masks)) 22 | 23 | def create_trans_matrix(img_size, angles, translations, scales): 24 | # Return a transformation matrix, size=(n, 2, 3) 25 | n = angles.shape[0] 26 | center = torch.ones(n, 2).cuda() 27 | center[..., 1] = img_size / 2 # y 28 | center[..., 0] = img_size / 2 # x 29 | M = kornia.get_rotation_matrix2d(center, angles, scales) 30 | M[..., 2] += translations 31 | return M -------------------------------------------------------------------------------- /research/upload_site_data.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 11, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import os" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 12, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "good = [\n", 19 | " [\n", 20 | " 'batch-1',\n", 21 | " [ '2020-10-06-08-22-dzdsdu', '2020-10-06-05-04-hcadfm', '2020-10-06-07-56-uvdsrj'],\n", 22 | " ],\n", 23 | " [\n", 24 | " 'batch-2',\n", 25 | " ['2020-10-06-21-10-zygkme', '2020-10-06-20-25-exwcgm', '2020-10-06-20-15-dugvqd',\n", 26 | " '2020-10-06-19-29-oxxoix', '2020-10-06-19-26-wmlfjf', '2020-10-06-19-19-wwswcs',\n", 27 | " '2020-10-06-19-08-hlflgw', '2020-10-06-18-33-hinvyo', '2020-10-06-18-13-nhchsg',\n", 28 | " '2020-10-06-17-24-blhrnd']\n", 29 | " ],\n", 30 | " [\n", 31 | " 'batch-3',\n", 32 | " ['2020-10-08-12-03-dqfjsy', '2020-10-08-13-45-swkatk',\n", 33 | " '2020-10-08-06-10-cfaysi', '2020-10-08-02-23-qewwai', '2020-10-08-07-22-zjstpy',\n", 34 | " '2020-10-08-13-14-xjdsbx', '2020-10-08-05-22-aufwvm',\n", 35 | " '2020-10-08-04-47-putftz', '2020-10-08-06-42-idzkjw', '2020-10-08-02-20-rofyii', \n", 36 | " '2020-10-08-11-08-yaeiyp', '2020-10-08-12-15-jancmb', '2020-10-08-04-04-kqflhs',\n", 37 | " '2020-10-08-04-14-ntmrkr', '2020-10-08-00-53-rvhbhj', '2020-10-08-15-22-irpgmi',\n", 38 | " '2020-10-08-13-29-spthzg', '2020-10-08-07-11-tzvxyl', '2020-10-08-10-11-yzplzk',\n", 39 | " '2020-10-08-15-48-bgfdgh', '2020-10-08-14-56-mwxfvg', '2020-10-08-10-36-plldjc',\n", 40 | " '2020-10-08-09-41-mdiyof', '2020-10-08-14-21-sqdmbi', '2020-10-08-02-49-ufinbn',\n", 41 | " '2020-10-08-12-44-xihhcm', '2020-10-08-12-26-tpezky', '2020-10-08-01-54-rhxgkt',\n", 42 | " '2020-10-08-09-34-bemcbu', '2020-10-08-04-10-pjngma', \n", 43 | " '2020-10-08-14-59-jfmdyu', '2020-10-08-05-27-vaqlcs', '2020-10-08-01-56-wwuycr',\n", 44 | " '2020-10-08-01-52-axeqoz', '2020-10-08-08-13-qaxbta', '2020-10-08-01-36-najvrz',\n", 45 | " '2020-10-08-15-02-mrmxzr', '2020-10-08-03-31-zxexrp', '2020-10-08-03-47-kugzvx']\n", 46 | " ]\n", 47 | "]" 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": null, 53 | "metadata": { 54 | "scrolled": true 55 | }, 56 | "outputs": [], 57 | "source": [ 58 | "for group_dir, dirnames in good:\n", 59 | " for dirname in dirnames:\n", 60 | " \n", 61 | " key = dirname.split('-')[-1]\n", 62 | " #if key != 'hcadfm':\n", 63 | " # continue\n", 64 | " print(dirname)\n", 65 | " path = f'./results/{group_dir}/{dirname}'\n", 66 | " \n", 67 | " fast_vid = f'{path}/{key}_fast.mp4'\n", 68 | " if not os.path.exists(fast_vid):\n", 69 | " !ffmpeg -y -i {path}/{key}.mp4 -filter:v \"setpts=0.16666666666*PTS\" {fast_vid}\n", 70 | " \n", 71 | " fast_small_vid = f'{path}/{key}_fast_small.mp4'\n", 72 | " if not os.path.exists(fast_small_vid):\n", 73 | " !ffmpeg -y -i {path}/{key}.mp4 -filter:v \"setpts=PTS/6,scale=256:-1\" {fast_small_vid}\n", 74 | " \n", 75 | " small_img = f'{path}/{key}_small.jpg'\n", 76 | " if not os.path.exists(small_img):\n", 77 | " !convert {path}/{key}.jpg -resize 350x350 {small_img}\n", 78 | " \n", 79 | " !ffmpeg -y -i {path}/{key}.mp4 -frames:v 1 -vf scale=350:-2 -q:v 3 {path}/{key}_frame0.jpg\n", 80 | " \n", 81 | " s3_path = f's3://derivative-works/results/{group_dir}/{dirname}'\n", 82 | " !aws s3 sync ./results/{group_dir}/{dirname} {s3_path} --acl public-read --quiet\n", 83 | " print('-'*80)\n", 84 | "# break" 85 | ] 86 | }, 87 | { 88 | "cell_type": "markdown", 89 | "metadata": {}, 90 | "source": [ 91 | "# Create data json" 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": null, 97 | "metadata": {}, 98 | "outputs": [], 99 | "source": [ 100 | "import json\n", 101 | "data = []\n", 102 | "for group_dir, dirnames in good:\n", 103 | " for dirname in dirnames:\n", 104 | " key = dirname.split('-')[-1]\n", 105 | " path = f'./results/{group_dir}/{dirname}'\n", 106 | " with open(path+'/image_names.txt', 'r') as f:\n", 107 | " image_names = json.load(f)\n", 108 | " data.append({\n", 109 | " 'key': key,\n", 110 | " 'path': f'{group_dir}/{dirname}',\n", 111 | " 'palette_keys': [s.split('/')[-1].split('_hires')[0] for s in image_names]\n", 112 | " })\n" 113 | ] 114 | }, 115 | { 116 | "cell_type": "code", 117 | "execution_count": null, 118 | "metadata": { 119 | "scrolled": false 120 | }, 121 | "outputs": [], 122 | "source": [ 123 | "data" 124 | ] 125 | }, 126 | { 127 | "cell_type": "code", 128 | "execution_count": null, 129 | "metadata": {}, 130 | "outputs": [], 131 | "source": [] 132 | } 133 | ], 134 | "metadata": { 135 | "kernelspec": { 136 | "display_name": "Python 3", 137 | "language": "python", 138 | "name": "python3" 139 | }, 140 | "language_info": { 141 | "codemirror_mode": { 142 | "name": "ipython", 143 | "version": 3 144 | }, 145 | "file_extension": ".py", 146 | "mimetype": "text/x-python", 147 | "name": "python", 148 | "nbconvert_exporter": "python", 149 | "pygments_lexer": "ipython3", 150 | "version": "3.6.9" 151 | }, 152 | "toc": { 153 | "base_numbering": 1, 154 | "nav_menu": {}, 155 | "number_sections": true, 156 | "sideBar": true, 157 | "skip_h1_title": false, 158 | "title_cell": "Table of Contents", 159 | "title_sidebar": "Contents", 160 | "toc_cell": false, 161 | "toc_position": {}, 162 | "toc_section_display": true, 163 | "toc_window_display": false 164 | } 165 | }, 166 | "nbformat": 4, 167 | "nbformat_minor": 2 168 | } 169 | -------------------------------------------------------------------------------- /website/.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | /node_modules/ 3 | /src/node_modules/@sapper/ 4 | yarn-error.log 5 | /__sapper__/ 6 | -------------------------------------------------------------------------------- /website/.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | {"recommendations": ["svelte.svelte-vscode"]} -------------------------------------------------------------------------------- /website/README.md: -------------------------------------------------------------------------------- 1 | ## Install requirements 2 | ```bash 3 | $ npm install 4 | $ npm run tailwind:dev 5 | ``` 6 | 7 | ## Running 8 | ```bash 9 | $ npm run dev 10 | ``` 11 | 12 | # Preparing for "prod" (github pages etc) 13 | See https://sapper.svelte.dev/docs#Exporting 14 | 15 | ```bash 16 | $ npm run export 17 | ``` 18 | -------------------------------------------------------------------------------- /website/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "TODO", 3 | "description": "TODO", 4 | "version": "0.0.1", 5 | "scripts": { 6 | "dev": "sapper dev", 7 | "build": "sapper build --legacy", 8 | "export": "npm run tailwind:prod && sapper export --legacy", 9 | "start": "node __sapper__/build", 10 | "validate": "svelte-check --ignore src/node_modules/@sapper", 11 | "tailwind:dev": "tailwindcss build tailwind.pcss -o static/sm-base.css", 12 | "tailwind:prod": "postcss static/sm-base.css -o static/sm-base.css" 13 | }, 14 | "dependencies": { 15 | "compression": "^1.7.1", 16 | "polka": "next", 17 | "sirv": "^1.0.0", 18 | "tailwindcss": "^1.8.10", 19 | "@types/lodash": "^4.14.161", 20 | "lodash": "^4.17.20" 21 | }, 22 | "devDependencies": { 23 | "@babel/core": "^7.0.0", 24 | "@babel/plugin-syntax-dynamic-import": "^7.0.0", 25 | "@babel/plugin-transform-runtime": "^7.0.0", 26 | "@babel/preset-env": "^7.0.0", 27 | "@babel/runtime": "^7.0.0", 28 | "@rollup/plugin-babel": "^5.0.0", 29 | "@rollup/plugin-commonjs": "^14.0.0", 30 | "@rollup/plugin-node-resolve": "^8.0.0", 31 | "@rollup/plugin-replace": "^2.2.0", 32 | "@rollup/plugin-typescript": "^6.0.0", 33 | "@tsconfig/svelte": "^1.0.10", 34 | "@types/compression": "^1.7.0", 35 | "@types/node": "^14.11.1", 36 | "@types/polka": "^0.5.1", 37 | "cssnano": "^4.1.10", 38 | "postcss": "^8.1.1", 39 | "postcss-cli": "^8.0.0", 40 | "rollup": "^2.3.4", 41 | "rollup-plugin-consts": "^1.0.2", 42 | "rollup-plugin-svelte": "^6.0.0", 43 | "rollup-plugin-terser": "^7.0.0", 44 | "sapper": "^0.28.0", 45 | "svelte": "^3.17.3", 46 | "svelte-check": "^1.0.46", 47 | "svelte-preprocess": "^4.3.0", 48 | "tslib": "^2.0.1", 49 | "typescript": "^4.0.3" 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /website/postcss.config.js: -------------------------------------------------------------------------------- 1 | /* 2 | Our postcss configuration 3 | We mostly use this to generate a minimized version of our tailwind css for production 4 | 5 | To run use yarn build-css 6 | 7 | */ 8 | 9 | const purgecss = require("@fullhuman/postcss-purgecss"); 10 | 11 | module.exports = { 12 | plugins: [ 13 | purgecss({ 14 | content: [ 15 | "src/**/*.svelte", 16 | "public/*.html", 17 | "src/**/*.html", 18 | "src/**/*.js", 19 | "src/**/*.ts", 20 | ], 21 | // extractor from: 22 | // https://github.com/tailwindcss/discuss/issues/254#issuecomment-517918397 23 | defaultExtractor: (content) => { 24 | const regExp = new RegExp(/[A-Za-z0-9-_:/\.]+/g); 25 | 26 | const matchedTokens = []; 27 | 28 | let match = regExp.exec(content); 29 | 30 | while (match) { 31 | if (match[0].startsWith("class:")) { 32 | matchedTokens.push(match[0].substring(6)); 33 | } else { 34 | matchedTokens.push(match[0]); 35 | } 36 | 37 | match = regExp.exec(content); 38 | } 39 | return matchedTokens; 40 | }, 41 | }), 42 | // ... 43 | require("cssnano")({ 44 | preset: "default", 45 | }), 46 | ], 47 | }; 48 | -------------------------------------------------------------------------------- /website/rollup.config.js: -------------------------------------------------------------------------------- 1 | import resolve from '@rollup/plugin-node-resolve'; 2 | import replace from '@rollup/plugin-replace'; 3 | import commonjs from '@rollup/plugin-commonjs'; 4 | import svelte from 'rollup-plugin-svelte'; 5 | import babel from '@rollup/plugin-babel'; 6 | import { terser } from 'rollup-plugin-terser'; 7 | import sveltePreprocess from 'svelte-preprocess'; 8 | import typescript from '@rollup/plugin-typescript'; 9 | import config from 'sapper/config/rollup.js'; 10 | import pkg from './package.json'; 11 | 12 | const mode = process.env.NODE_ENV; 13 | const dev = mode === 'development'; 14 | const legacy = !!process.env.SAPPER_LEGACY_BUILD; 15 | const onwarn = (warning, onwarn) => 16 | (warning.code === 'MISSING_EXPORT' && /'preload'/.test(warning.message)) || 17 | (warning.code === 'CIRCULAR_DEPENDENCY' && /[/\\]@sapper[/\\]/.test(warning.message)) || 18 | (warning.code === 'THIS_IS_UNDEFINED') || 19 | onwarn(warning); 20 | 21 | export default { 22 | client: { 23 | input: config.client.input().replace(/.js$/, '.ts'), 24 | output: config.client.output(), 25 | plugins: [ 26 | replace({ 27 | 'process.browser': true, 28 | 'process.env.NODE_ENV': JSON.stringify(mode), 29 | 'process.CDN_ROOT': JSON.stringify("http://localhost:4000/"), 30 | }), 31 | svelte({ 32 | dev, 33 | hydratable: true, 34 | preprocess: sveltePreprocess(), 35 | emitCss: true 36 | }), 37 | resolve({ 38 | browser: true, 39 | dedupe: ['svelte'] 40 | }), 41 | commonjs(), 42 | typescript({ sourceMap: dev }), 43 | 44 | legacy && babel({ 45 | extensions: ['.js', '.mjs', '.html', '.svelte'], 46 | babelHelpers: 'runtime', 47 | exclude: ['node_modules/@babel/**'], 48 | presets: [ 49 | ['@babel/preset-env', { 50 | targets: '> 0.25%, not dead' 51 | }] 52 | ], 53 | plugins: [ 54 | '@babel/plugin-syntax-dynamic-import', 55 | ['@babel/plugin-transform-runtime', { 56 | useESModules: true 57 | }] 58 | ] 59 | }), 60 | 61 | !dev && terser({ 62 | module: true 63 | }) 64 | ], 65 | 66 | preserveEntrySignatures: false, 67 | onwarn, 68 | }, 69 | 70 | server: { 71 | input: { server: config.server.input().server.replace(/.js$/, ".ts") }, 72 | output: config.server.output(), 73 | plugins: [ 74 | replace({ 75 | 'process.browser': false, 76 | 'process.env.NODE_ENV': JSON.stringify(mode) 77 | }), 78 | svelte({ 79 | generate: 'ssr', 80 | hydratable: true, 81 | preprocess: sveltePreprocess(), 82 | dev 83 | }), 84 | resolve({ 85 | dedupe: ['svelte'] 86 | }), 87 | commonjs(), 88 | typescript({ sourceMap: dev }) 89 | ], 90 | external: Object.keys(pkg.dependencies).concat(require('module').builtinModules), 91 | 92 | preserveEntrySignatures: 'strict', 93 | onwarn, 94 | }, 95 | 96 | serviceworker: { 97 | input: config.serviceworker.input().replace(/.js$/, '.ts'), 98 | output: config.serviceworker.output(), 99 | plugins: [ 100 | resolve(), 101 | replace({ 102 | 'process.browser': true, 103 | 'process.env.NODE_ENV': JSON.stringify(mode) 104 | }), 105 | commonjs(), 106 | typescript({ sourceMap: dev }), 107 | !dev && terser() 108 | ], 109 | 110 | preserveEntrySignatures: false, 111 | onwarn, 112 | } 113 | }; 114 | -------------------------------------------------------------------------------- /website/src/client.ts: -------------------------------------------------------------------------------- 1 | import * as sapper from '@sapper/app'; 2 | 3 | sapper.start({ 4 | target: document.querySelector('#sapper') 5 | }); -------------------------------------------------------------------------------- /website/src/components/ImageData.svelte: -------------------------------------------------------------------------------- 1 | 25 | 26 |