├── .gitignore
├── .replit
├── CycleGAN.ipynb
├── LICENSE
├── README.md
├── data
├── __init__.py
├── aligned_dataset.py
├── base_dataset.py
├── colorization_dataset.py
├── image_folder.py
├── single_dataset.py
├── template_dataset.py
└── unaligned_dataset.py
├── datasets
├── bibtex
│ ├── cityscapes.tex
│ ├── facades.tex
│ ├── handbags.tex
│ ├── shoes.tex
│ └── transattr.tex
├── combine_A_and_B.py
├── download_cyclegan_dataset.sh
├── download_pix2pix_dataset.sh
├── make_dataset_aligned.py
└── prepare_cityscapes_dataset.py
├── docs
├── Dockerfile
├── README_es.md
├── datasets.md
├── docker.md
├── overview.md
├── qa.md
└── tips.md
├── environment.yml
├── imgs
├── edges2cats.jpg
└── horse2zebra.gif
├── models
├── __init__.py
├── base_model.py
├── colorization_model.py
├── cycle_gan_model.py
├── networks.py
├── pix2pix_model.py
├── template_model.py
└── test_model.py
├── options
├── __init__.py
├── base_options.py
├── test_options.py
└── train_options.py
├── pix2pix.ipynb
├── requirements.txt
├── scripts
├── conda_deps.sh
├── download_cyclegan_model.sh
├── download_pix2pix_model.sh
├── edges
│ ├── PostprocessHED.m
│ └── batch_hed.py
├── eval_cityscapes
│ ├── caffemodel
│ │ └── deploy.prototxt
│ ├── cityscapes.py
│ ├── download_fcn8s.sh
│ ├── evaluate.py
│ └── util.py
├── install_deps.sh
├── test_before_push.py
├── test_colorization.sh
├── test_cyclegan.sh
├── test_pix2pix.sh
├── test_single.sh
├── train_colorization.sh
├── train_cyclegan.sh
└── train_pix2pix.sh
├── test.py
├── train.py
└── util
├── __init__.py
├── get_data.py
├── html.py
├── image_pool.py
├── util.py
└── visualizer.py
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | debug*
3 | datasets/
4 | checkpoints/
5 | results/
6 | build/
7 | dist/
8 | *.png
9 | torch.egg-info/
10 | */**/__pycache__
11 | torch/version.py
12 | torch/csrc/generic/TensorMethods.cpp
13 | torch/lib/*.so*
14 | torch/lib/*.dylib*
15 | torch/lib/*.h
16 | torch/lib/build
17 | torch/lib/tmp_install
18 | torch/lib/include
19 | torch/lib/torch_shm_manager
20 | torch/csrc/cudnn/cuDNN.cpp
21 | torch/csrc/nn/THNN.cwrap
22 | torch/csrc/nn/THNN.cpp
23 | torch/csrc/nn/THCUNN.cwrap
24 | torch/csrc/nn/THCUNN.cpp
25 | torch/csrc/nn/THNN_generic.cwrap
26 | torch/csrc/nn/THNN_generic.cpp
27 | torch/csrc/nn/THNN_generic.h
28 | docs/src/**/*
29 | test/data/legacy_modules.t7
30 | test/data/gpu_tensors.pt
31 | test/htmlcov
32 | test/.coverage
33 | */*.pyc
34 | */**/*.pyc
35 | */**/**/*.pyc
36 | */**/**/**/*.pyc
37 | */**/**/**/**/*.pyc
38 | */*.so*
39 | */**/*.so*
40 | */**/*.dylib*
41 | test/data/legacy_serialized.pt
42 | *~
43 | .idea
44 |
45 | #Ignore Wandb
46 | wandb/
47 |
--------------------------------------------------------------------------------
/.replit:
--------------------------------------------------------------------------------
1 | language = "python3"
2 | run = "
[Tensorflow] (by Christopher Hesse), [Tensorflow] (by Eyyüb Sariu), [Tensorflow (face2face)] (by Dat Tran), [Tensorflow (film)] (by Arthur Juliani), [Tensorflow (zi2zi)] (by Yuchen Tian), [Chainer] (by mattya), [tf/torch/keras/lasagne] (by tjwei), [Pytorch] (by taey16)
"
--------------------------------------------------------------------------------
/CycleGAN.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "colab_type": "text",
7 | "id": "view-in-github"
8 | },
9 | "source": [
10 | "
"
11 | ]
12 | },
13 | {
14 | "cell_type": "markdown",
15 | "metadata": {
16 | "colab_type": "text",
17 | "id": "5VIGyIus8Vr7"
18 | },
19 | "source": [
20 | "Take a look at the [repository](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix) for more information"
21 | ]
22 | },
23 | {
24 | "cell_type": "markdown",
25 | "metadata": {
26 | "colab_type": "text",
27 | "id": "7wNjDKdQy35h"
28 | },
29 | "source": [
30 | "# Install"
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": null,
36 | "metadata": {
37 | "colab": {},
38 | "colab_type": "code",
39 | "id": "TRm-USlsHgEV"
40 | },
41 | "outputs": [],
42 | "source": [
43 | "!git clone https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix"
44 | ]
45 | },
46 | {
47 | "cell_type": "code",
48 | "execution_count": null,
49 | "metadata": {
50 | "colab": {},
51 | "colab_type": "code",
52 | "id": "Pt3igws3eiVp"
53 | },
54 | "outputs": [],
55 | "source": [
56 | "import os\n",
57 | "os.chdir('pytorch-CycleGAN-and-pix2pix/')"
58 | ]
59 | },
60 | {
61 | "cell_type": "code",
62 | "execution_count": null,
63 | "metadata": {
64 | "colab": {},
65 | "colab_type": "code",
66 | "id": "z1EySlOXwwoa"
67 | },
68 | "outputs": [],
69 | "source": [
70 | "!pip install -r requirements.txt"
71 | ]
72 | },
73 | {
74 | "cell_type": "markdown",
75 | "metadata": {
76 | "colab_type": "text",
77 | "id": "8daqlgVhw29P"
78 | },
79 | "source": [
80 | "# Datasets\n",
81 | "\n",
82 | "Download one of the official datasets with:\n",
83 | "\n",
84 | "- `bash ./datasets/download_cyclegan_dataset.sh [apple2orange, summer2winter_yosemite, horse2zebra, monet2photo, cezanne2photo, ukiyoe2photo, vangogh2photo, maps, cityscapes, facades, iphone2dslr_flower, ae_photos]`\n",
85 | "\n",
86 | "Or use your own dataset by creating the appropriate folders and adding in the images.\n",
87 | "\n",
88 | "- Create a dataset folder under `/dataset` for your dataset.\n",
89 | "- Create subfolders `testA`, `testB`, `trainA`, and `trainB` under your dataset's folder. Place any images you want to transform from a to b (cat2dog) in the `testA` folder, images you want to transform from b to a (dog2cat) in the `testB` folder, and do the same for the `trainA` and `trainB` folders."
90 | ]
91 | },
92 | {
93 | "cell_type": "code",
94 | "execution_count": null,
95 | "metadata": {
96 | "colab": {},
97 | "colab_type": "code",
98 | "id": "vrdOettJxaCc"
99 | },
100 | "outputs": [],
101 | "source": [
102 | "!bash ./datasets/download_cyclegan_dataset.sh horse2zebra"
103 | ]
104 | },
105 | {
106 | "cell_type": "markdown",
107 | "metadata": {
108 | "colab_type": "text",
109 | "id": "gdUz4116xhpm"
110 | },
111 | "source": [
112 | "# Pretrained models\n",
113 | "\n",
114 | "Download one of the official pretrained models with:\n",
115 | "\n",
116 | "- `bash ./scripts/download_cyclegan_model.sh [apple2orange, orange2apple, summer2winter_yosemite, winter2summer_yosemite, horse2zebra, zebra2horse, monet2photo, style_monet, style_cezanne, style_ukiyoe, style_vangogh, sat2map, map2sat, cityscapes_photo2label, cityscapes_label2photo, facades_photo2label, facades_label2photo, iphone2dslr_flower]`\n",
117 | "\n",
118 | "Or add your own pretrained model to `./checkpoints/{NAME}_pretrained/latest_net_G.pt`"
119 | ]
120 | },
121 | {
122 | "cell_type": "code",
123 | "execution_count": null,
124 | "metadata": {
125 | "colab": {},
126 | "colab_type": "code",
127 | "id": "B75UqtKhxznS"
128 | },
129 | "outputs": [],
130 | "source": [
131 | "!bash ./scripts/download_cyclegan_model.sh horse2zebra"
132 | ]
133 | },
134 | {
135 | "cell_type": "markdown",
136 | "metadata": {
137 | "colab_type": "text",
138 | "id": "yFw1kDQBx3LN"
139 | },
140 | "source": [
141 | "# Training\n",
142 | "\n",
143 | "- `python train.py --dataroot ./datasets/horse2zebra --name horse2zebra --model cycle_gan`\n",
144 | "\n",
145 | "Change the `--dataroot` and `--name` to your own dataset's path and model's name. Use `--gpu_ids 0,1,..` to train on multiple GPUs and `--batch_size` to change the batch size. I've found that a batch size of 16 fits onto 4 V100s and can finish training an epoch in ~90s.\n",
146 | "\n",
147 | "Once your model has trained, copy over the last checkpoint to a format that the testing model can automatically detect:\n",
148 | "\n",
149 | "Use `cp ./checkpoints/horse2zebra/latest_net_G_A.pth ./checkpoints/horse2zebra/latest_net_G.pth` if you want to transform images from class A to class B and `cp ./checkpoints/horse2zebra/latest_net_G_B.pth ./checkpoints/horse2zebra/latest_net_G.pth` if you want to transform images from class B to class A.\n"
150 | ]
151 | },
152 | {
153 | "cell_type": "code",
154 | "execution_count": null,
155 | "metadata": {
156 | "colab": {},
157 | "colab_type": "code",
158 | "id": "0sp7TCT2x9dB"
159 | },
160 | "outputs": [],
161 | "source": [
162 | "!python train.py --dataroot ./datasets/horse2zebra --name horse2zebra --model cycle_gan --display_id -1"
163 | ]
164 | },
165 | {
166 | "cell_type": "markdown",
167 | "metadata": {
168 | "colab_type": "text",
169 | "id": "9UkcaFZiyASl"
170 | },
171 | "source": [
172 | "# Testing\n",
173 | "\n",
174 | "- `python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout`\n",
175 | "\n",
176 | "Change the `--dataroot` and `--name` to be consistent with your trained model's configuration.\n",
177 | "\n",
178 | "> from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix:\n",
179 | "> The option --model test is used for generating results of CycleGAN only for one side. This option will automatically set --dataset_mode single, which only loads the images from one set. On the contrary, using --model cycle_gan requires loading and generating results in both directions, which is sometimes unnecessary. The results will be saved at ./results/. Use --results_dir {directory_path_to_save_result} to specify the results directory.\n",
180 | "\n",
181 | "> For your own experiments, you might want to specify --netG, --norm, --no_dropout to match the generator architecture of the trained model."
182 | ]
183 | },
184 | {
185 | "cell_type": "code",
186 | "execution_count": null,
187 | "metadata": {
188 | "colab": {},
189 | "colab_type": "code",
190 | "id": "uCsKkEq0yGh0"
191 | },
192 | "outputs": [],
193 | "source": [
194 | "!python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout"
195 | ]
196 | },
197 | {
198 | "cell_type": "markdown",
199 | "metadata": {
200 | "colab_type": "text",
201 | "id": "OzSKIPUByfiN"
202 | },
203 | "source": [
204 | "# Visualize"
205 | ]
206 | },
207 | {
208 | "cell_type": "code",
209 | "execution_count": null,
210 | "metadata": {
211 | "colab": {},
212 | "colab_type": "code",
213 | "id": "9Mgg8raPyizq"
214 | },
215 | "outputs": [],
216 | "source": [
217 | "import matplotlib.pyplot as plt\n",
218 | "\n",
219 | "img = plt.imread('./results/horse2zebra_pretrained/test_latest/images/n02381460_1010_fake.png')\n",
220 | "plt.imshow(img)"
221 | ]
222 | },
223 | {
224 | "cell_type": "code",
225 | "execution_count": null,
226 | "metadata": {
227 | "colab": {},
228 | "colab_type": "code",
229 | "id": "0G3oVH9DyqLQ"
230 | },
231 | "outputs": [],
232 | "source": [
233 | "import matplotlib.pyplot as plt\n",
234 | "\n",
235 | "img = plt.imread('./results/horse2zebra_pretrained/test_latest/images/n02381460_1010_real.png')\n",
236 | "plt.imshow(img)"
237 | ]
238 | }
239 | ],
240 | "metadata": {
241 | "accelerator": "GPU",
242 | "colab": {
243 | "collapsed_sections": [],
244 | "include_colab_link": true,
245 | "name": "CycleGAN",
246 | "provenance": []
247 | },
248 | "environment": {
249 | "name": "tf2-gpu.2-3.m74",
250 | "type": "gcloud",
251 | "uri": "gcr.io/deeplearning-platform-release/tf2-gpu.2-3:m74"
252 | },
253 | "kernelspec": {
254 | "display_name": "Python 3",
255 | "language": "python",
256 | "name": "python3"
257 | },
258 | "language_info": {
259 | "codemirror_mode": {
260 | "name": "ipython",
261 | "version": 3
262 | },
263 | "file_extension": ".py",
264 | "mimetype": "text/x-python",
265 | "name": "python",
266 | "nbconvert_exporter": "python",
267 | "pygments_lexer": "ipython3",
268 | "version": "3.7.10"
269 | }
270 | },
271 | "nbformat": 4,
272 | "nbformat_minor": 4
273 | }
274 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2017, Jun-Yan Zhu and Taesung Park
2 | All rights reserved.
3 |
4 | Redistribution and use in source and binary forms, with or without
5 | modification, are permitted provided that the following conditions are met:
6 |
7 | * Redistributions of source code must retain the above copyright notice, this
8 | list of conditions and the following disclaimer.
9 |
10 | * Redistributions in binary form must reproduce the above copyright notice,
11 | this list of conditions and the following disclaimer in the documentation
12 | and/or other materials provided with the distribution.
13 |
14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
18 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
20 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
21 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 |
25 |
26 | --------------------------- LICENSE FOR pix2pix --------------------------------
27 | BSD License
28 |
29 | For pix2pix software
30 | Copyright (c) 2016, Phillip Isola and Jun-Yan Zhu
31 | All rights reserved.
32 |
33 | Redistribution and use in source and binary forms, with or without
34 | modification, are permitted provided that the following conditions are met:
35 |
36 | * Redistributions of source code must retain the above copyright notice, this
37 | list of conditions and the following disclaimer.
38 |
39 | * Redistributions in binary form must reproduce the above copyright notice,
40 | this list of conditions and the following disclaimer in the documentation
41 | and/or other materials provided with the distribution.
42 |
43 | ----------------------------- LICENSE FOR DCGAN --------------------------------
44 | BSD License
45 |
46 | For dcgan.torch software
47 |
48 | Copyright (c) 2015, Facebook, Inc. All rights reserved.
49 |
50 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
51 |
52 | Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
53 |
54 | Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
55 |
56 | Neither the name Facebook nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
57 |
58 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 |
--------------------------------------------------------------------------------
/data/__init__.py:
--------------------------------------------------------------------------------
1 | """This package includes all the modules related to data loading and preprocessing
2 |
3 | To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset.
4 | You need to implement four functions:
5 | -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
6 | -- <__len__>: return the size of dataset.
7 | -- <__getitem__>: get a data point from data loader.
8 | -- : (optionally) add dataset-specific options and set default options.
9 |
10 | Now you can use the dataset class by specifying flag '--dataset_mode dummy'.
11 | See our template dataset class 'template_dataset.py' for more details.
12 | """
13 | import importlib
14 | import torch.utils.data
15 | from data.base_dataset import BaseDataset
16 |
17 |
18 | def find_dataset_using_name(dataset_name):
19 | """Import the module "data/[dataset_name]_dataset.py".
20 |
21 | In the file, the class called DatasetNameDataset() will
22 | be instantiated. It has to be a subclass of BaseDataset,
23 | and it is case-insensitive.
24 | """
25 | dataset_filename = "data." + dataset_name + "_dataset"
26 | datasetlib = importlib.import_module(dataset_filename)
27 |
28 | dataset = None
29 | target_dataset_name = dataset_name.replace('_', '') + 'dataset'
30 | for name, cls in datasetlib.__dict__.items():
31 | if name.lower() == target_dataset_name.lower() \
32 | and issubclass(cls, BaseDataset):
33 | dataset = cls
34 |
35 | if dataset is None:
36 | raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
37 |
38 | return dataset
39 |
40 |
41 | def get_option_setter(dataset_name):
42 | """Return the static method of the dataset class."""
43 | dataset_class = find_dataset_using_name(dataset_name)
44 | return dataset_class.modify_commandline_options
45 |
46 |
47 | def create_dataset(opt):
48 | """Create a dataset given the option.
49 |
50 | This function wraps the class CustomDatasetDataLoader.
51 | This is the main interface between this package and 'train.py'/'test.py'
52 |
53 | Example:
54 | >>> from data import create_dataset
55 | >>> dataset = create_dataset(opt)
56 | """
57 | data_loader = CustomDatasetDataLoader(opt)
58 | dataset = data_loader.load_data()
59 | return dataset
60 |
61 |
62 | class CustomDatasetDataLoader():
63 | """Wrapper class of Dataset class that performs multi-threaded data loading"""
64 |
65 | def __init__(self, opt):
66 | """Initialize this class
67 |
68 | Step 1: create a dataset instance given the name [dataset_mode]
69 | Step 2: create a multi-threaded data loader.
70 | """
71 | self.opt = opt
72 | dataset_class = find_dataset_using_name(opt.dataset_mode)
73 | self.dataset = dataset_class(opt)
74 | print("dataset [%s] was created" % type(self.dataset).__name__)
75 | self.dataloader = torch.utils.data.DataLoader(
76 | self.dataset,
77 | batch_size=opt.batch_size,
78 | shuffle=not opt.serial_batches,
79 | num_workers=int(opt.num_threads))
80 |
81 | def load_data(self):
82 | return self
83 |
84 | def __len__(self):
85 | """Return the number of data in the dataset"""
86 | return min(len(self.dataset), self.opt.max_dataset_size)
87 |
88 | def __iter__(self):
89 | """Return a batch of data"""
90 | for i, data in enumerate(self.dataloader):
91 | if i * self.opt.batch_size >= self.opt.max_dataset_size:
92 | break
93 | yield data
94 |
--------------------------------------------------------------------------------
/data/aligned_dataset.py:
--------------------------------------------------------------------------------
1 | import os
2 | from data.base_dataset import BaseDataset, get_params, get_transform
3 | from data.image_folder import make_dataset
4 | from PIL import Image
5 |
6 |
7 | class AlignedDataset(BaseDataset):
8 | """A dataset class for paired image dataset.
9 |
10 | It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}.
11 | During test time, you need to prepare a directory '/path/to/data/test'.
12 | """
13 |
14 | def __init__(self, opt):
15 | """Initialize this dataset class.
16 |
17 | Parameters:
18 | opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
19 | """
20 | BaseDataset.__init__(self, opt)
21 | self.dir_AB = os.path.join(opt.dataroot, opt.phase) # get the image directory
22 | self.AB_paths = sorted(make_dataset(self.dir_AB, opt.max_dataset_size)) # get image paths
23 | assert(self.opt.load_size >= self.opt.crop_size) # crop_size should be smaller than the size of loaded image
24 | self.input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
25 | self.output_nc = self.opt.input_nc if self.opt.direction == 'BtoA' else self.opt.output_nc
26 |
27 | def __getitem__(self, index):
28 | """Return a data point and its metadata information.
29 |
30 | Parameters:
31 | index - - a random integer for data indexing
32 |
33 | Returns a dictionary that contains A, B, A_paths and B_paths
34 | A (tensor) - - an image in the input domain
35 | B (tensor) - - its corresponding image in the target domain
36 | A_paths (str) - - image paths
37 | B_paths (str) - - image paths (same as A_paths)
38 | """
39 | # read a image given a random integer index
40 | AB_path = self.AB_paths[index]
41 | AB = Image.open(AB_path).convert('RGB')
42 | # split AB image into A and B
43 | w, h = AB.size
44 | w2 = int(w / 2)
45 | A = AB.crop((0, 0, w2, h))
46 | B = AB.crop((w2, 0, w, h))
47 |
48 | # apply the same transform to both A and B
49 | transform_params = get_params(self.opt, A.size)
50 | A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1))
51 | B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))
52 |
53 | A = A_transform(A)
54 | B = B_transform(B)
55 |
56 | return {'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path}
57 |
58 | def __len__(self):
59 | """Return the total number of images in the dataset."""
60 | return len(self.AB_paths)
61 |
--------------------------------------------------------------------------------
/data/base_dataset.py:
--------------------------------------------------------------------------------
1 | """This module implements an abstract base class (ABC) 'BaseDataset' for datasets.
2 |
3 | It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses.
4 | """
5 | import random
6 | import numpy as np
7 | import torch.utils.data as data
8 | from PIL import Image
9 | import torchvision.transforms as transforms
10 | from abc import ABC, abstractmethod
11 |
12 |
13 | class BaseDataset(data.Dataset, ABC):
14 | """This class is an abstract base class (ABC) for datasets.
15 |
16 | To create a subclass, you need to implement the following four functions:
17 | -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
18 | -- <__len__>: return the size of dataset.
19 | -- <__getitem__>: get a data point.
20 | -- : (optionally) add dataset-specific options and set default options.
21 | """
22 |
23 | def __init__(self, opt):
24 | """Initialize the class; save the options in the class
25 |
26 | Parameters:
27 | opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
28 | """
29 | self.opt = opt
30 | self.root = opt.dataroot
31 |
32 | @staticmethod
33 | def modify_commandline_options(parser, is_train):
34 | """Add new dataset-specific options, and rewrite default values for existing options.
35 |
36 | Parameters:
37 | parser -- original option parser
38 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
39 |
40 | Returns:
41 | the modified parser.
42 | """
43 | return parser
44 |
45 | @abstractmethod
46 | def __len__(self):
47 | """Return the total number of images in the dataset."""
48 | return 0
49 |
50 | @abstractmethod
51 | def __getitem__(self, index):
52 | """Return a data point and its metadata information.
53 |
54 | Parameters:
55 | index - - a random integer for data indexing
56 |
57 | Returns:
58 | a dictionary of data with their names. It ususally contains the data itself and its metadata information.
59 | """
60 | pass
61 |
62 |
63 | def get_params(opt, size):
64 | w, h = size
65 | new_h = h
66 | new_w = w
67 | if opt.preprocess == 'resize_and_crop':
68 | new_h = new_w = opt.load_size
69 | elif opt.preprocess == 'scale_width_and_crop':
70 | new_w = opt.load_size
71 | new_h = opt.load_size * h // w
72 |
73 | x = random.randint(0, np.maximum(0, new_w - opt.crop_size))
74 | y = random.randint(0, np.maximum(0, new_h - opt.crop_size))
75 |
76 | flip = random.random() > 0.5
77 |
78 | return {'crop_pos': (x, y), 'flip': flip}
79 |
80 |
81 | def get_transform(opt, params=None, grayscale=False, method=transforms.InterpolationMode.BICUBIC, convert=True):
82 | transform_list = []
83 | if grayscale:
84 | transform_list.append(transforms.Grayscale(1))
85 | if 'resize' in opt.preprocess:
86 | osize = [opt.load_size, opt.load_size]
87 | transform_list.append(transforms.Resize(osize, method))
88 | elif 'scale_width' in opt.preprocess:
89 | transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method)))
90 |
91 | if 'crop' in opt.preprocess:
92 | if params is None:
93 | transform_list.append(transforms.RandomCrop(opt.crop_size))
94 | else:
95 | transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size)))
96 |
97 | if opt.preprocess == 'none':
98 | transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method)))
99 |
100 | if not opt.no_flip:
101 | if params is None:
102 | transform_list.append(transforms.RandomHorizontalFlip())
103 | elif params['flip']:
104 | transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))
105 |
106 | if convert:
107 | transform_list += [transforms.ToTensor()]
108 | if grayscale:
109 | transform_list += [transforms.Normalize((0.5,), (0.5,))]
110 | else:
111 | transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
112 | return transforms.Compose(transform_list)
113 |
114 |
115 | def __transforms2pil_resize(method):
116 | mapper = {transforms.InterpolationMode.BILINEAR: Image.BILINEAR,
117 | transforms.InterpolationMode.BICUBIC: Image.BICUBIC,
118 | transforms.InterpolationMode.NEAREST: Image.NEAREST,
119 | transforms.InterpolationMode.LANCZOS: Image.LANCZOS,}
120 | return mapper[method]
121 |
122 |
123 | def __make_power_2(img, base, method=transforms.InterpolationMode.BICUBIC):
124 | method = __transforms2pil_resize(method)
125 | ow, oh = img.size
126 | h = int(round(oh / base) * base)
127 | w = int(round(ow / base) * base)
128 | if h == oh and w == ow:
129 | return img
130 |
131 | __print_size_warning(ow, oh, w, h)
132 | return img.resize((w, h), method)
133 |
134 |
135 | def __scale_width(img, target_size, crop_size, method=transforms.InterpolationMode.BICUBIC):
136 | method = __transforms2pil_resize(method)
137 | ow, oh = img.size
138 | if ow == target_size and oh >= crop_size:
139 | return img
140 | w = target_size
141 | h = int(max(target_size * oh / ow, crop_size))
142 | return img.resize((w, h), method)
143 |
144 |
145 | def __crop(img, pos, size):
146 | ow, oh = img.size
147 | x1, y1 = pos
148 | tw = th = size
149 | if (ow > tw or oh > th):
150 | return img.crop((x1, y1, x1 + tw, y1 + th))
151 | return img
152 |
153 |
154 | def __flip(img, flip):
155 | if flip:
156 | return img.transpose(Image.FLIP_LEFT_RIGHT)
157 | return img
158 |
159 |
160 | def __print_size_warning(ow, oh, w, h):
161 | """Print warning information about image size(only print once)"""
162 | if not hasattr(__print_size_warning, 'has_printed'):
163 | print("The image size needs to be a multiple of 4. "
164 | "The loaded image size was (%d, %d), so it was adjusted to "
165 | "(%d, %d). This adjustment will be done to all images "
166 | "whose sizes are not multiples of 4" % (ow, oh, w, h))
167 | __print_size_warning.has_printed = True
168 |
--------------------------------------------------------------------------------
/data/colorization_dataset.py:
--------------------------------------------------------------------------------
1 | import os
2 | from data.base_dataset import BaseDataset, get_transform
3 | from data.image_folder import make_dataset
4 | from skimage import color # require skimage
5 | from PIL import Image
6 | import numpy as np
7 | import torchvision.transforms as transforms
8 |
9 |
10 | class ColorizationDataset(BaseDataset):
11 | """This dataset class can load a set of natural images in RGB, and convert RGB format into (L, ab) pairs in Lab color space.
12 |
13 | This dataset is required by pix2pix-based colorization model ('--model colorization')
14 | """
15 | @staticmethod
16 | def modify_commandline_options(parser, is_train):
17 | """Add new dataset-specific options, and rewrite default values for existing options.
18 |
19 | Parameters:
20 | parser -- original option parser
21 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
22 |
23 | Returns:
24 | the modified parser.
25 |
26 | By default, the number of channels for input image is 1 (L) and
27 | the number of channels for output image is 2 (ab). The direction is from A to B
28 | """
29 | parser.set_defaults(input_nc=1, output_nc=2, direction='AtoB')
30 | return parser
31 |
32 | def __init__(self, opt):
33 | """Initialize this dataset class.
34 |
35 | Parameters:
36 | opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
37 | """
38 | BaseDataset.__init__(self, opt)
39 | self.dir = os.path.join(opt.dataroot, opt.phase)
40 | self.AB_paths = sorted(make_dataset(self.dir, opt.max_dataset_size))
41 | assert(opt.input_nc == 1 and opt.output_nc == 2 and opt.direction == 'AtoB')
42 | self.transform = get_transform(self.opt, convert=False)
43 |
44 | def __getitem__(self, index):
45 | """Return a data point and its metadata information.
46 |
47 | Parameters:
48 | index - - a random integer for data indexing
49 |
50 | Returns a dictionary that contains A, B, A_paths and B_paths
51 | A (tensor) - - the L channel of an image
52 | B (tensor) - - the ab channels of the same image
53 | A_paths (str) - - image paths
54 | B_paths (str) - - image paths (same as A_paths)
55 | """
56 | path = self.AB_paths[index]
57 | im = Image.open(path).convert('RGB')
58 | im = self.transform(im)
59 | im = np.array(im)
60 | lab = color.rgb2lab(im).astype(np.float32)
61 | lab_t = transforms.ToTensor()(lab)
62 | A = lab_t[[0], ...] / 50.0 - 1.0
63 | B = lab_t[[1, 2], ...] / 110.0
64 | return {'A': A, 'B': B, 'A_paths': path, 'B_paths': path}
65 |
66 | def __len__(self):
67 | """Return the total number of images in the dataset."""
68 | return len(self.AB_paths)
69 |
--------------------------------------------------------------------------------
/data/image_folder.py:
--------------------------------------------------------------------------------
1 | """A modified image folder class
2 |
3 | We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py)
4 | so that this class can load images from both current directory and its subdirectories.
5 | """
6 |
7 | import torch.utils.data as data
8 |
9 | from PIL import Image
10 | import os
11 |
12 | IMG_EXTENSIONS = [
13 | '.jpg', '.JPG', '.jpeg', '.JPEG',
14 | '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
15 | '.tif', '.TIF', '.tiff', '.TIFF',
16 | ]
17 |
18 |
19 | def is_image_file(filename):
20 | return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
21 |
22 |
23 | def make_dataset(dir, max_dataset_size=float("inf")):
24 | images = []
25 | assert os.path.isdir(dir), '%s is not a valid directory' % dir
26 |
27 | for root, _, fnames in sorted(os.walk(dir)):
28 | for fname in fnames:
29 | if is_image_file(fname):
30 | path = os.path.join(root, fname)
31 | images.append(path)
32 | return images[:min(max_dataset_size, len(images))]
33 |
34 |
35 | def default_loader(path):
36 | return Image.open(path).convert('RGB')
37 |
38 |
39 | class ImageFolder(data.Dataset):
40 |
41 | def __init__(self, root, transform=None, return_paths=False,
42 | loader=default_loader):
43 | imgs = make_dataset(root)
44 | if len(imgs) == 0:
45 | raise(RuntimeError("Found 0 images in: " + root + "\n"
46 | "Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
47 |
48 | self.root = root
49 | self.imgs = imgs
50 | self.transform = transform
51 | self.return_paths = return_paths
52 | self.loader = loader
53 |
54 | def __getitem__(self, index):
55 | path = self.imgs[index]
56 | img = self.loader(path)
57 | if self.transform is not None:
58 | img = self.transform(img)
59 | if self.return_paths:
60 | return img, path
61 | else:
62 | return img
63 |
64 | def __len__(self):
65 | return len(self.imgs)
66 |
--------------------------------------------------------------------------------
/data/single_dataset.py:
--------------------------------------------------------------------------------
1 | from data.base_dataset import BaseDataset, get_transform
2 | from data.image_folder import make_dataset
3 | from PIL import Image
4 |
5 |
6 | class SingleDataset(BaseDataset):
7 | """This dataset class can load a set of images specified by the path --dataroot /path/to/data.
8 |
9 | It can be used for generating CycleGAN results only for one side with the model option '-model test'.
10 | """
11 |
12 | def __init__(self, opt):
13 | """Initialize this dataset class.
14 |
15 | Parameters:
16 | opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
17 | """
18 | BaseDataset.__init__(self, opt)
19 | self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size))
20 | input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
21 | self.transform = get_transform(opt, grayscale=(input_nc == 1))
22 |
23 | def __getitem__(self, index):
24 | """Return a data point and its metadata information.
25 |
26 | Parameters:
27 | index - - a random integer for data indexing
28 |
29 | Returns a dictionary that contains A and A_paths
30 | A(tensor) - - an image in one domain
31 | A_paths(str) - - the path of the image
32 | """
33 | A_path = self.A_paths[index]
34 | A_img = Image.open(A_path).convert('RGB')
35 | A = self.transform(A_img)
36 | return {'A': A, 'A_paths': A_path}
37 |
38 | def __len__(self):
39 | """Return the total number of images in the dataset."""
40 | return len(self.A_paths)
41 |
--------------------------------------------------------------------------------
/data/template_dataset.py:
--------------------------------------------------------------------------------
1 | """Dataset class template
2 |
3 | This module provides a template for users to implement custom datasets.
4 | You can specify '--dataset_mode template' to use this dataset.
5 | The class name should be consistent with both the filename and its dataset_mode option.
6 | The filename should be _dataset.py
7 | The class name should be Dataset.py
8 | You need to implement the following functions:
9 | -- : Add dataset-specific options and rewrite default values for existing options.
10 | -- <__init__>: Initialize this dataset class.
11 | -- <__getitem__>: Return a data point and its metadata information.
12 | -- <__len__>: Return the number of images.
13 | """
14 | from data.base_dataset import BaseDataset, get_transform
15 | # from data.image_folder import make_dataset
16 | # from PIL import Image
17 |
18 |
19 | class TemplateDataset(BaseDataset):
20 | """A template dataset class for you to implement custom datasets."""
21 | @staticmethod
22 | def modify_commandline_options(parser, is_train):
23 | """Add new dataset-specific options, and rewrite default values for existing options.
24 |
25 | Parameters:
26 | parser -- original option parser
27 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
28 |
29 | Returns:
30 | the modified parser.
31 | """
32 | parser.add_argument('--new_dataset_option', type=float, default=1.0, help='new dataset option')
33 | parser.set_defaults(max_dataset_size=10, new_dataset_option=2.0) # specify dataset-specific default values
34 | return parser
35 |
36 | def __init__(self, opt):
37 | """Initialize this dataset class.
38 |
39 | Parameters:
40 | opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
41 |
42 | A few things can be done here.
43 | - save the options (have been done in BaseDataset)
44 | - get image paths and meta information of the dataset.
45 | - define the image transformation.
46 | """
47 | # save the option and dataset root
48 | BaseDataset.__init__(self, opt)
49 | # get the image paths of your dataset;
50 | self.image_paths = [] # You can call sorted(make_dataset(self.root, opt.max_dataset_size)) to get all the image paths under the directory self.root
51 | # define the default transform function. You can use ; You can also define your custom transform function
52 | self.transform = get_transform(opt)
53 |
54 | def __getitem__(self, index):
55 | """Return a data point and its metadata information.
56 |
57 | Parameters:
58 | index -- a random integer for data indexing
59 |
60 | Returns:
61 | a dictionary of data with their names. It usually contains the data itself and its metadata information.
62 |
63 | Step 1: get a random image path: e.g., path = self.image_paths[index]
64 | Step 2: load your data from the disk: e.g., image = Image.open(path).convert('RGB').
65 | Step 3: convert your data to a PyTorch tensor. You can use helpder functions such as self.transform. e.g., data = self.transform(image)
66 | Step 4: return a data point as a dictionary.
67 | """
68 | path = 'temp' # needs to be a string
69 | data_A = None # needs to be a tensor
70 | data_B = None # needs to be a tensor
71 | return {'data_A': data_A, 'data_B': data_B, 'path': path}
72 |
73 | def __len__(self):
74 | """Return the total number of images."""
75 | return len(self.image_paths)
76 |
--------------------------------------------------------------------------------
/data/unaligned_dataset.py:
--------------------------------------------------------------------------------
1 | import os
2 | from data.base_dataset import BaseDataset, get_transform
3 | from data.image_folder import make_dataset
4 | from PIL import Image
5 | import random
6 |
7 |
8 | class UnalignedDataset(BaseDataset):
9 | """
10 | This dataset class can load unaligned/unpaired datasets.
11 |
12 | It requires two directories to host training images from domain A '/path/to/data/trainA'
13 | and from domain B '/path/to/data/trainB' respectively.
14 | You can train the model with the dataset flag '--dataroot /path/to/data'.
15 | Similarly, you need to prepare two directories:
16 | '/path/to/data/testA' and '/path/to/data/testB' during test time.
17 | """
18 |
19 | def __init__(self, opt):
20 | """Initialize this dataset class.
21 |
22 | Parameters:
23 | opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
24 | """
25 | BaseDataset.__init__(self, opt)
26 | self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA'
27 | self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B') # create a path '/path/to/data/trainB'
28 |
29 | self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'
30 | self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB'
31 | self.A_size = len(self.A_paths) # get the size of dataset A
32 | self.B_size = len(self.B_paths) # get the size of dataset B
33 | btoA = self.opt.direction == 'BtoA'
34 | input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image
35 | output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image
36 | self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))
37 | self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))
38 |
39 | def __getitem__(self, index):
40 | """Return a data point and its metadata information.
41 |
42 | Parameters:
43 | index (int) -- a random integer for data indexing
44 |
45 | Returns a dictionary that contains A, B, A_paths and B_paths
46 | A (tensor) -- an image in the input domain
47 | B (tensor) -- its corresponding image in the target domain
48 | A_paths (str) -- image paths
49 | B_paths (str) -- image paths
50 | """
51 | A_path = self.A_paths[index % self.A_size] # make sure index is within then range
52 | if self.opt.serial_batches: # make sure index is within then range
53 | index_B = index % self.B_size
54 | else: # randomize the index for domain B to avoid fixed pairs.
55 | index_B = random.randint(0, self.B_size - 1)
56 | B_path = self.B_paths[index_B]
57 | A_img = Image.open(A_path).convert('RGB')
58 | B_img = Image.open(B_path).convert('RGB')
59 | # apply image transformation
60 | A = self.transform_A(A_img)
61 | B = self.transform_B(B_img)
62 |
63 | return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
64 |
65 | def __len__(self):
66 | """Return the total number of images in the dataset.
67 |
68 | As we have two datasets with potentially different number of images,
69 | we take a maximum of
70 | """
71 | return max(self.A_size, self.B_size)
72 |
--------------------------------------------------------------------------------
/datasets/bibtex/cityscapes.tex:
--------------------------------------------------------------------------------
1 | @inproceedings{Cordts2016Cityscapes,
2 | title={The Cityscapes Dataset for Semantic Urban Scene Understanding},
3 | author={Cordts, Marius and Omran, Mohamed and Ramos, Sebastian and Rehfeld, Timo and Enzweiler, Markus and Benenson, Rodrigo and Franke, Uwe and Roth, Stefan and Schiele, Bernt},
4 | booktitle={Proc. of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
5 | year={2016}
6 | }
7 |
--------------------------------------------------------------------------------
/datasets/bibtex/facades.tex:
--------------------------------------------------------------------------------
1 | @INPROCEEDINGS{Tylecek13,
2 | author = {Radim Tyle{\v c}ek, Radim {\v S}{\' a}ra},
3 | title = {Spatial Pattern Templates for Recognition of Objects with Regular Structure},
4 | booktitle = {Proc. GCPR},
5 | year = {2013},
6 | address = {Saarbrucken, Germany},
7 | }
8 |
--------------------------------------------------------------------------------
/datasets/bibtex/handbags.tex:
--------------------------------------------------------------------------------
1 | @inproceedings{zhu2016generative,
2 | title={Generative Visual Manipulation on the Natural Image Manifold},
3 | author={Zhu, Jun-Yan and Kr{\"a}henb{\"u}hl, Philipp and Shechtman, Eli and Efros, Alexei A.},
4 | booktitle={Proceedings of European Conference on Computer Vision (ECCV)},
5 | year={2016}
6 | }
7 |
8 | @InProceedings{xie15hed,
9 | author = {"Xie, Saining and Tu, Zhuowen"},
10 | Title = {Holistically-Nested Edge Detection},
11 | Booktitle = "Proceedings of IEEE International Conference on Computer Vision",
12 | Year = {2015},
13 | }
14 |
--------------------------------------------------------------------------------
/datasets/bibtex/shoes.tex:
--------------------------------------------------------------------------------
1 | @InProceedings{fine-grained,
2 | author = {A. Yu and K. Grauman},
3 | title = {{F}ine-{G}rained {V}isual {C}omparisons with {L}ocal {L}earning},
4 | booktitle = {Computer Vision and Pattern Recognition (CVPR)},
5 | month = {June},
6 | year = {2014}
7 | }
8 |
9 | @InProceedings{xie15hed,
10 | author = {"Xie, Saining and Tu, Zhuowen"},
11 | Title = {Holistically-Nested Edge Detection},
12 | Booktitle = "Proceedings of IEEE International Conference on Computer Vision",
13 | Year = {2015},
14 | }
15 |
--------------------------------------------------------------------------------
/datasets/bibtex/transattr.tex:
--------------------------------------------------------------------------------
1 | @article {Laffont14,
2 | title = {Transient Attributes for High-Level Understanding and Editing of Outdoor Scenes},
3 | author = {Pierre-Yves Laffont and Zhile Ren and Xiaofeng Tao and Chao Qian and James Hays},
4 | journal = {ACM Transactions on Graphics (proceedings of SIGGRAPH)},
5 | volume = {33},
6 | number = {4},
7 | year = {2014}
8 | }
9 |
--------------------------------------------------------------------------------
/datasets/combine_A_and_B.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | import cv2
4 | import argparse
5 | from multiprocessing import Pool
6 |
7 |
8 | def image_write(path_A, path_B, path_AB):
9 | im_A = cv2.imread(path_A, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
10 | im_B = cv2.imread(path_B, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
11 | im_AB = np.concatenate([im_A, im_B], 1)
12 | cv2.imwrite(path_AB, im_AB)
13 |
14 |
15 | parser = argparse.ArgumentParser('create image pairs')
16 | parser.add_argument('--fold_A', dest='fold_A', help='input directory for image A', type=str, default='../dataset/50kshoes_edges')
17 | parser.add_argument('--fold_B', dest='fold_B', help='input directory for image B', type=str, default='../dataset/50kshoes_jpg')
18 | parser.add_argument('--fold_AB', dest='fold_AB', help='output directory', type=str, default='../dataset/test_AB')
19 | parser.add_argument('--num_imgs', dest='num_imgs', help='number of images', type=int, default=1000000)
20 | parser.add_argument('--use_AB', dest='use_AB', help='if true: (0001_A, 0001_B) to (0001_AB)', action='store_true')
21 | parser.add_argument('--no_multiprocessing', dest='no_multiprocessing', help='If used, chooses single CPU execution instead of parallel execution', action='store_true',default=False)
22 | args = parser.parse_args()
23 |
24 | for arg in vars(args):
25 | print('[%s] = ' % arg, getattr(args, arg))
26 |
27 | splits = os.listdir(args.fold_A)
28 |
29 | if not args.no_multiprocessing:
30 | pool=Pool()
31 |
32 | for sp in splits:
33 | img_fold_A = os.path.join(args.fold_A, sp)
34 | img_fold_B = os.path.join(args.fold_B, sp)
35 | img_list = os.listdir(img_fold_A)
36 | if args.use_AB:
37 | img_list = [img_path for img_path in img_list if '_A.' in img_path]
38 |
39 | num_imgs = min(args.num_imgs, len(img_list))
40 | print('split = %s, use %d/%d images' % (sp, num_imgs, len(img_list)))
41 | img_fold_AB = os.path.join(args.fold_AB, sp)
42 | if not os.path.isdir(img_fold_AB):
43 | os.makedirs(img_fold_AB)
44 | print('split = %s, number of images = %d' % (sp, num_imgs))
45 | for n in range(num_imgs):
46 | name_A = img_list[n]
47 | path_A = os.path.join(img_fold_A, name_A)
48 | if args.use_AB:
49 | name_B = name_A.replace('_A.', '_B.')
50 | else:
51 | name_B = name_A
52 | path_B = os.path.join(img_fold_B, name_B)
53 | if os.path.isfile(path_A) and os.path.isfile(path_B):
54 | name_AB = name_A
55 | if args.use_AB:
56 | name_AB = name_AB.replace('_A.', '.') # remove _A
57 | path_AB = os.path.join(img_fold_AB, name_AB)
58 | if not args.no_multiprocessing:
59 | pool.apply_async(image_write, args=(path_A, path_B, path_AB))
60 | else:
61 | im_A = cv2.imread(path_A, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
62 | im_B = cv2.imread(path_B, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR
63 | im_AB = np.concatenate([im_A, im_B], 1)
64 | cv2.imwrite(path_AB, im_AB)
65 | if not args.no_multiprocessing:
66 | pool.close()
67 | pool.join()
68 |
--------------------------------------------------------------------------------
/datasets/download_cyclegan_dataset.sh:
--------------------------------------------------------------------------------
1 | FILE=$1
2 |
3 | if [[ $FILE != "ae_photos" && $FILE != "apple2orange" && $FILE != "summer2winter_yosemite" && $FILE != "horse2zebra" && $FILE != "monet2photo" && $FILE != "cezanne2photo" && $FILE != "ukiyoe2photo" && $FILE != "vangogh2photo" && $FILE != "maps" && $FILE != "cityscapes" && $FILE != "facades" && $FILE != "iphone2dslr_flower" && $FILE != "mini" && $FILE != "mini_pix2pix" && $FILE != "mini_colorization" ]]; then
4 | echo "Available datasets are: apple2orange, summer2winter_yosemite, horse2zebra, monet2photo, cezanne2photo, ukiyoe2photo, vangogh2photo, maps, cityscapes, facades, iphone2dslr_flower, ae_photos"
5 | exit 1
6 | fi
7 |
8 | if [[ $FILE == "cityscapes" ]]; then
9 | echo "Due to license issue, we cannot provide the Cityscapes dataset from our repository. Please download the Cityscapes dataset from https://cityscapes-dataset.com, and use the script ./datasets/prepare_cityscapes_dataset.py."
10 | echo "You need to download gtFine_trainvaltest.zip and leftImg8bit_trainvaltest.zip. For further instruction, please read ./datasets/prepare_cityscapes_dataset.py"
11 | exit 1
12 | fi
13 |
14 | echo "Specified [$FILE]"
15 | URL=http://efrosgans.eecs.berkeley.edu/cyclegan/datasets/$FILE.zip
16 | ZIP_FILE=./datasets/$FILE.zip
17 | TARGET_DIR=./datasets/$FILE/
18 | wget -N $URL -O $ZIP_FILE
19 | mkdir $TARGET_DIR
20 | unzip $ZIP_FILE -d ./datasets/
21 | rm $ZIP_FILE
22 |
--------------------------------------------------------------------------------
/datasets/download_pix2pix_dataset.sh:
--------------------------------------------------------------------------------
1 | FILE=$1
2 |
3 | if [[ $FILE != "cityscapes" && $FILE != "night2day" && $FILE != "edges2handbags" && $FILE != "edges2shoes" && $FILE != "facades" && $FILE != "maps" ]]; then
4 | echo "Available datasets are cityscapes, night2day, edges2handbags, edges2shoes, facades, maps"
5 | exit 1
6 | fi
7 |
8 | if [[ $FILE == "cityscapes" ]]; then
9 | echo "Due to license issue, we cannot provide the Cityscapes dataset from our repository. Please download the Cityscapes dataset from https://cityscapes-dataset.com, and use the script ./datasets/prepare_cityscapes_dataset.py."
10 | echo "You need to download gtFine_trainvaltest.zip and leftImg8bit_trainvaltest.zip. For further instruction, please read ./datasets/prepare_cityscapes_dataset.py"
11 | exit 1
12 | fi
13 |
14 | echo "Specified [$FILE]"
15 |
16 | URL=http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/$FILE.tar.gz
17 | TAR_FILE=./datasets/$FILE.tar.gz
18 | TARGET_DIR=./datasets/$FILE/
19 | wget -N $URL -O $TAR_FILE
20 | mkdir -p $TARGET_DIR
21 | tar -zxvf $TAR_FILE -C ./datasets/
22 | rm $TAR_FILE
23 |
--------------------------------------------------------------------------------
/datasets/make_dataset_aligned.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from PIL import Image
4 |
5 |
6 | def get_file_paths(folder):
7 | image_file_paths = []
8 | for root, dirs, filenames in os.walk(folder):
9 | filenames = sorted(filenames)
10 | for filename in filenames:
11 | input_path = os.path.abspath(root)
12 | file_path = os.path.join(input_path, filename)
13 | if filename.endswith('.png') or filename.endswith('.jpg'):
14 | image_file_paths.append(file_path)
15 |
16 | break # prevent descending into subfolders
17 | return image_file_paths
18 |
19 |
20 | def align_images(a_file_paths, b_file_paths, target_path):
21 | if not os.path.exists(target_path):
22 | os.makedirs(target_path)
23 |
24 | for i in range(len(a_file_paths)):
25 | img_a = Image.open(a_file_paths[i])
26 | img_b = Image.open(b_file_paths[i])
27 | assert(img_a.size == img_b.size)
28 |
29 | aligned_image = Image.new("RGB", (img_a.size[0] * 2, img_a.size[1]))
30 | aligned_image.paste(img_a, (0, 0))
31 | aligned_image.paste(img_b, (img_a.size[0], 0))
32 | aligned_image.save(os.path.join(target_path, '{:04d}.jpg'.format(i)))
33 |
34 |
35 | if __name__ == '__main__':
36 | import argparse
37 | parser = argparse.ArgumentParser()
38 | parser.add_argument(
39 | '--dataset-path',
40 | dest='dataset_path',
41 | help='Which folder to process (it should have subfolders testA, testB, trainA and trainB'
42 | )
43 | args = parser.parse_args()
44 |
45 | dataset_folder = args.dataset_path
46 | print(dataset_folder)
47 |
48 | test_a_path = os.path.join(dataset_folder, 'testA')
49 | test_b_path = os.path.join(dataset_folder, 'testB')
50 | test_a_file_paths = get_file_paths(test_a_path)
51 | test_b_file_paths = get_file_paths(test_b_path)
52 | assert(len(test_a_file_paths) == len(test_b_file_paths))
53 | test_path = os.path.join(dataset_folder, 'test')
54 |
55 | train_a_path = os.path.join(dataset_folder, 'trainA')
56 | train_b_path = os.path.join(dataset_folder, 'trainB')
57 | train_a_file_paths = get_file_paths(train_a_path)
58 | train_b_file_paths = get_file_paths(train_b_path)
59 | assert(len(train_a_file_paths) == len(train_b_file_paths))
60 | train_path = os.path.join(dataset_folder, 'train')
61 |
62 | align_images(test_a_file_paths, test_b_file_paths, test_path)
63 | align_images(train_a_file_paths, train_b_file_paths, train_path)
64 |
--------------------------------------------------------------------------------
/datasets/prepare_cityscapes_dataset.py:
--------------------------------------------------------------------------------
1 | import os
2 | import glob
3 | from PIL import Image
4 |
5 | help_msg = """
6 | The dataset can be downloaded from https://cityscapes-dataset.com.
7 | Please download the datasets [gtFine_trainvaltest.zip] and [leftImg8bit_trainvaltest.zip] and unzip them.
8 | gtFine contains the semantics segmentations. Use --gtFine_dir to specify the path to the unzipped gtFine_trainvaltest directory.
9 | leftImg8bit contains the dashcam photographs. Use --leftImg8bit_dir to specify the path to the unzipped leftImg8bit_trainvaltest directory.
10 | The processed images will be placed at --output_dir.
11 |
12 | Example usage:
13 |
14 | python prepare_cityscapes_dataset.py --gtFine_dir ./gtFine/ --leftImg8bit_dir ./leftImg8bit --output_dir ./datasets/cityscapes/
15 | """
16 |
17 | def load_resized_img(path):
18 | return Image.open(path).convert('RGB').resize((256, 256))
19 |
20 | def check_matching_pair(segmap_path, photo_path):
21 | segmap_identifier = os.path.basename(segmap_path).replace('_gtFine_color', '')
22 | photo_identifier = os.path.basename(photo_path).replace('_leftImg8bit', '')
23 |
24 | assert segmap_identifier == photo_identifier, \
25 | "[%s] and [%s] don't seem to be matching. Aborting." % (segmap_path, photo_path)
26 |
27 |
28 | def process_cityscapes(gtFine_dir, leftImg8bit_dir, output_dir, phase):
29 | save_phase = 'test' if phase == 'val' else 'train'
30 | savedir = os.path.join(output_dir, save_phase)
31 | os.makedirs(savedir, exist_ok=True)
32 | os.makedirs(savedir + 'A', exist_ok=True)
33 | os.makedirs(savedir + 'B', exist_ok=True)
34 | print("Directory structure prepared at %s" % output_dir)
35 |
36 | segmap_expr = os.path.join(gtFine_dir, phase) + "/*/*_color.png"
37 | segmap_paths = glob.glob(segmap_expr)
38 | segmap_paths = sorted(segmap_paths)
39 |
40 | photo_expr = os.path.join(leftImg8bit_dir, phase) + "/*/*_leftImg8bit.png"
41 | photo_paths = glob.glob(photo_expr)
42 | photo_paths = sorted(photo_paths)
43 |
44 | assert len(segmap_paths) == len(photo_paths), \
45 | "%d images that match [%s], and %d images that match [%s]. Aborting." % (len(segmap_paths), segmap_expr, len(photo_paths), photo_expr)
46 |
47 | for i, (segmap_path, photo_path) in enumerate(zip(segmap_paths, photo_paths)):
48 | check_matching_pair(segmap_path, photo_path)
49 | segmap = load_resized_img(segmap_path)
50 | photo = load_resized_img(photo_path)
51 |
52 | # data for pix2pix where the two images are placed side-by-side
53 | sidebyside = Image.new('RGB', (512, 256))
54 | sidebyside.paste(segmap, (256, 0))
55 | sidebyside.paste(photo, (0, 0))
56 | savepath = os.path.join(savedir, "%d.jpg" % i)
57 | sidebyside.save(savepath, format='JPEG', subsampling=0, quality=100)
58 |
59 | # data for cyclegan where the two images are stored at two distinct directories
60 | savepath = os.path.join(savedir + 'A', "%d_A.jpg" % i)
61 | photo.save(savepath, format='JPEG', subsampling=0, quality=100)
62 | savepath = os.path.join(savedir + 'B', "%d_B.jpg" % i)
63 | segmap.save(savepath, format='JPEG', subsampling=0, quality=100)
64 |
65 | if i % (len(segmap_paths) // 10) == 0:
66 | print("%d / %d: last image saved at %s, " % (i, len(segmap_paths), savepath))
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 | if __name__ == '__main__':
78 | import argparse
79 | parser = argparse.ArgumentParser()
80 | parser.add_argument('--gtFine_dir', type=str, required=True,
81 | help='Path to the Cityscapes gtFine directory.')
82 | parser.add_argument('--leftImg8bit_dir', type=str, required=True,
83 | help='Path to the Cityscapes leftImg8bit_trainvaltest directory.')
84 | parser.add_argument('--output_dir', type=str, required=True,
85 | default='./datasets/cityscapes',
86 | help='Directory the output images will be written to.')
87 | opt = parser.parse_args()
88 |
89 | print(help_msg)
90 |
91 | print('Preparing Cityscapes Dataset for val phase')
92 | process_cityscapes(opt.gtFine_dir, opt.leftImg8bit_dir, opt.output_dir, "val")
93 | print('Preparing Cityscapes Dataset for train phase')
94 | process_cityscapes(opt.gtFine_dir, opt.leftImg8bit_dir, opt.output_dir, "train")
95 |
96 | print('Done')
97 |
98 |
99 |
100 |
--------------------------------------------------------------------------------
/docs/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM nvidia/cuda:10.1-base
2 |
3 | #Nvidia Public GPG Key
4 | RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
5 |
6 | RUN apt update && apt install -y wget unzip curl bzip2 git
7 | RUN curl -LO http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh
8 | RUN bash Miniconda3-latest-Linux-x86_64.sh -p /miniconda -b
9 | RUN rm Miniconda3-latest-Linux-x86_64.sh
10 | ENV PATH=/miniconda/bin:${PATH}
11 | RUN conda update -y conda
12 |
13 | RUN conda install -y pytorch torchvision -c pytorch
14 | RUN mkdir /workspace/ && cd /workspace/ && git clone https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix.git && cd pytorch-CycleGAN-and-pix2pix && pip install -r requirements.txt
15 |
16 | WORKDIR /workspace
17 |
--------------------------------------------------------------------------------
/docs/datasets.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | ### CycleGAN Datasets
4 | Download the CycleGAN datasets using the following script. Some of the datasets are collected by other researchers. Please cite their papers if you use the data.
5 | ```bash
6 | bash ./datasets/download_cyclegan_dataset.sh dataset_name
7 | ```
8 | - `facades`: 400 images from the [CMP Facades dataset](http://cmp.felk.cvut.cz/~tylecr1/facade). [[Citation](../datasets/bibtex/facades.tex)]
9 | - `cityscapes`: 2975 images from the [Cityscapes training set](https://www.cityscapes-dataset.com). [[Citation](../datasets/bibtex/cityscapes.tex)]. Note: Due to license issue, we cannot directly provide the Cityscapes dataset. Please download the Cityscapes dataset from [https://cityscapes-dataset.com](https://cityscapes-dataset.com) and use the script `./datasets/prepare_cityscapes_dataset.py`.
10 | - `maps`: 1096 training images scraped from Google Maps.
11 | - `horse2zebra`: 939 horse images and 1177 zebra images downloaded from [ImageNet](http://www.image-net.org) using keywords `wild horse` and `zebra`
12 | - `apple2orange`: 996 apple images and 1020 orange images downloaded from [ImageNet](http://www.image-net.org) using keywords `apple` and `navel orange`.
13 | - `summer2winter_yosemite`: 1273 summer Yosemite images and 854 winter Yosemite images were downloaded using Flickr API. See more details in our paper.
14 | - `monet2photo`, `vangogh2photo`, `ukiyoe2photo`, `cezanne2photo`: The art images were downloaded from [Wikiart](https://www.wikiart.org/). The real photos are downloaded from Flickr using the combination of the tags *landscape* and *landscapephotography*. The training set size of each class is Monet:1074, Cezanne:584, Van Gogh:401, Ukiyo-e:1433, Photographs:6853.
15 | - `iphone2dslr_flower`: both classes of images were downlaoded from Flickr. The training set size of each class is iPhone:1813, DSLR:3316. See more details in our paper.
16 |
17 | To train a model on your own datasets, you need to create a data folder with two subdirectories `trainA` and `trainB` that contain images from domain A and B. You can test your model on your training set by setting `--phase train` in `test.py`. You can also create subdirectories `testA` and `testB` if you have test data.
18 |
19 | You should **not** expect our method to work on just any random combination of input and output datasets (e.g. `cats<->keyboards`). From our experiments, we find it works better if two datasets share similar visual content. For example, `landscape painting<->landscape photographs` works much better than `portrait painting <-> landscape photographs`. `zebras<->horses` achieves compelling results while `cats<->dogs` completely fails.
20 |
21 | ### pix2pix datasets
22 | Download the pix2pix datasets using the following script. Some of the datasets are collected by other researchers. Please cite their papers if you use the data.
23 | ```bash
24 | bash ./datasets/download_pix2pix_dataset.sh dataset_name
25 | ```
26 | - `facades`: 400 images from [CMP Facades dataset](http://cmp.felk.cvut.cz/~tylecr1/facade). [[Citation](../datasets/bibtex/facades.tex)]
27 | - `cityscapes`: 2975 images from the [Cityscapes training set](https://www.cityscapes-dataset.com). [[Citation](../datasets/bibtex/cityscapes.tex)]
28 | - `maps`: 1096 training images scraped from Google Maps
29 | - `edges2shoes`: 50k training images from [UT Zappos50K dataset](http://vision.cs.utexas.edu/projects/finegrained/utzap50k). Edges are computed by [HED](https://github.com/s9xie/hed) edge detector + post-processing. [[Citation](datasets/bibtex/shoes.tex)]
30 | - `edges2handbags`: 137K Amazon Handbag images from [iGAN project](https://github.com/junyanz/iGAN). Edges are computed by [HED](https://github.com/s9xie/hed) edge detector + post-processing. [[Citation](datasets/bibtex/handbags.tex)]
31 | - `night2day`: around 20K natural scene images from [Transient Attributes dataset](http://transattr.cs.brown.edu/) [[Citation](datasets/bibtex/transattr.tex)]. To train a `day2night` pix2pix model, you need to add `--direction BtoA`.
32 |
33 | We provide a python script to generate pix2pix training data in the form of pairs of images {A,B}, where A and B are two different depictions of the same underlying scene. For example, these might be pairs {label map, photo} or {bw image, color image}. Then we can learn to translate A to B or B to A:
34 |
35 | Create folder `/path/to/data` with subfolders `A` and `B`. `A` and `B` should each have their own subfolders `train`, `val`, `test`, etc. In `/path/to/data/A/train`, put training images in style A. In `/path/to/data/B/train`, put the corresponding images in style B. Repeat same for other data splits (`val`, `test`, etc).
36 |
37 | Corresponding images in a pair {A,B} must be the same size and have the same filename, e.g., `/path/to/data/A/train/1.jpg` is considered to correspond to `/path/to/data/B/train/1.jpg`.
38 |
39 | Once the data is formatted this way, call:
40 | ```bash
41 | python datasets/combine_A_and_B.py --fold_A /path/to/data/A --fold_B /path/to/data/B --fold_AB /path/to/data
42 | ```
43 |
44 | This will combine each pair of images (A,B) into a single image file, ready for training.
45 |
--------------------------------------------------------------------------------
/docs/docker.md:
--------------------------------------------------------------------------------
1 | # Docker image with pytorch-CycleGAN-and-pix2pix
2 |
3 | We provide both Dockerfile and pre-built Docker container that can run this code repo.
4 |
5 | ## Prerequisite
6 |
7 | - Install [docker-ce](https://docs.docker.com/install/linux/docker-ce/ubuntu/)
8 | - Install [nvidia-docker](https://github.com/NVIDIA/nvidia-docker#quickstart)
9 |
10 | ## Running pre-built Dockerfile
11 |
12 | - Pull the pre-built docker file
13 |
14 | ```bash
15 | docker pull taesungp/pytorch-cyclegan-and-pix2pix
16 | ```
17 |
18 | - Start an interactive docker session. `-p 8097:8097` option is needed if you want to run `visdom` server on the Docker container.
19 |
20 | ```bash
21 | nvidia-docker run -it -p 8097:8097 taesungp/pytorch-cyclegan-and-pix2pix
22 | ```
23 |
24 | - Now you are in the Docker environment. Go to our code repo and start running things.
25 | ```bash
26 | cd /workspace/pytorch-CycleGAN-and-pix2pix
27 | bash datasets/download_pix2pix_dataset.sh facades
28 | python -m visdom.server &
29 | bash scripts/train_pix2pix.sh
30 | ```
31 |
32 | ## Running with Dockerfile
33 |
34 | We also posted the [Dockerfile](Dockerfile). To generate the pre-built file, download the Dockerfile in this directory and run
35 | ```bash
36 | docker build -t [target_tag] .
37 | ```
38 | in the directory that contains the Dockerfile.
39 |
--------------------------------------------------------------------------------
/docs/overview.md:
--------------------------------------------------------------------------------
1 | ## Overview of Code Structure
2 | To help users better understand and use our codebase, we briefly overview the functionality and implementation of each package and each module. Please see the documentation in each file for more details. If you have questions, you may find useful information in [training/test tips](tips.md) and [frequently asked questions](qa.md).
3 |
4 | [train.py](../train.py) is a general-purpose training script. It works for various models (with option `--model`: e.g., `pix2pix`, `cyclegan`, `colorization`) and different datasets (with option `--dataset_mode`: e.g., `aligned`, `unaligned`, `single`, `colorization`). See the main [README](.../README.md) and [training/test tips](tips.md) for more details.
5 |
6 | [test.py](../test.py) is a general-purpose test script. Once you have trained your model with `train.py`, you can use this script to test the model. It will load a saved model from `--checkpoints_dir` and save the results to `--results_dir`. See the main [README](.../README.md) and [training/test tips](tips.md) for more details.
7 |
8 |
9 | [data](../data) directory contains all the modules related to data loading and preprocessing. To add a custom dataset class called `dummy`, you need to add a file called `dummy_dataset.py` and define a subclass `DummyDataset` inherited from `BaseDataset`. You need to implement four functions: `__init__` (initialize the class, you need to first call `BaseDataset.__init__(self, opt)`), `__len__` (return the size of dataset), `__getitem__` (get a data point), and optionally `modify_commandline_options` (add dataset-specific options and set default options). Now you can use the dataset class by specifying flag `--dataset_mode dummy`. See our template dataset [class](../data/template_dataset.py) for an example. Below we explain each file in details.
10 |
11 | * [\_\_init\_\_.py](../data/__init__.py) implements the interface between this package and training and test scripts. `train.py` and `test.py` call `from data import create_dataset` and `dataset = create_dataset(opt)` to create a dataset given the option `opt`.
12 | * [base_dataset.py](../data/base_dataset.py) implements an abstract base class ([ABC](https://docs.python.org/3/library/abc.html)) for datasets. It also includes common transformation functions (e.g., `get_transform`, `__scale_width`), which can be later used in subclasses.
13 | * [image_folder.py](../data/image_folder.py) implements an image folder class. We modify the official PyTorch image folder [code](https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py) so that this class can load images from both the current directory and its subdirectories.
14 | * [template_dataset.py](../data/template_dataset.py) provides a dataset template with detailed documentation. Check out this file if you plan to implement your own dataset.
15 | * [aligned_dataset.py](../data/aligned_dataset.py) includes a dataset class that can load image pairs. It assumes a single image directory `/path/to/data/train`, which contains image pairs in the form of {A,B}. See [here](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md#prepare-your-own-datasets-for-pix2pix) on how to prepare aligned datasets. During test time, you need to prepare a directory `/path/to/data/test` as test data.
16 | * [unaligned_dataset.py](../data/unaligned_dataset.py) includes a dataset class that can load unaligned/unpaired datasets. It assumes that two directories to host training images from domain A `/path/to/data/trainA` and from domain B `/path/to/data/trainB` respectively. Then you can train the model with the dataset flag `--dataroot /path/to/data`. Similarly, you need to prepare two directories `/path/to/data/testA` and `/path/to/data/testB` during test time.
17 | * [single_dataset.py](../data/single_dataset.py) includes a dataset class that can load a set of single images specified by the path `--dataroot /path/to/data`. It can be used for generating CycleGAN results only for one side with the model option `-model test`.
18 | * [colorization_dataset.py](../data/colorization_dataset.py) implements a dataset class that can load a set of nature images in RGB, and convert RGB format into (L, ab) pairs in [Lab](https://en.wikipedia.org/wiki/CIELAB_color_space) color space. It is required by pix2pix-based colorization model (`--model colorization`).
19 |
20 |
21 | [models](../models) directory contains modules related to objective functions, optimizations, and network architectures. To add a custom model class called `dummy`, you need to add a file called `dummy_model.py` and define a subclass `DummyModel` inherited from `BaseModel`. You need to implement four functions: `__init__` (initialize the class; you need to first call `BaseModel.__init__(self, opt)`), `set_input` (unpack data from dataset and apply preprocessing), `forward` (generate intermediate results), `optimize_parameters` (calculate loss, gradients, and update network weights), and optionally `modify_commandline_options` (add model-specific options and set default options). Now you can use the model class by specifying flag `--model dummy`. See our template model [class](../models/template_model.py) for an example. Below we explain each file in details.
22 |
23 | * [\_\_init\_\_.py](../models/__init__.py) implements the interface between this package and training and test scripts. `train.py` and `test.py` call `from models import create_model` and `model = create_model(opt)` to create a model given the option `opt`. You also need to call `model.setup(opt)` to properly initialize the model.
24 | * [base_model.py](../models/base_model.py) implements an abstract base class ([ABC](https://docs.python.org/3/library/abc.html)) for models. It also includes commonly used helper functions (e.g., `setup`, `test`, `update_learning_rate`, `save_networks`, `load_networks`), which can be later used in subclasses.
25 | * [template_model.py](../models/template_model.py) provides a model template with detailed documentation. Check out this file if you plan to implement your own model.
26 | * [pix2pix_model.py](../models/pix2pix_model.py) implements the pix2pix [model](https://phillipi.github.io/pix2pix/), for learning a mapping from input images to output images given paired data. The model training requires `--dataset_mode aligned` dataset. By default, it uses a `--netG unet256` [U-Net](https://arxiv.org/pdf/1505.04597.pdf) generator, a `--netD basic` discriminator (PatchGAN), and a `--gan_mode vanilla` GAN loss (standard cross-entropy objective).
27 | * [colorization_model.py](../models/colorization_model.py) implements a subclass of `Pix2PixModel` for image colorization (black & white image to colorful image). The model training requires `-dataset_model colorization` dataset. It trains a pix2pix model, mapping from L channel to ab channels in [Lab](https://en.wikipedia.org/wiki/CIELAB_color_space) color space. By default, the `colorization` dataset will automatically set `--input_nc 1` and `--output_nc 2`.
28 | * [cycle_gan_model.py](../models/cycle_gan_model.py) implements the CycleGAN [model](https://junyanz.github.io/CycleGAN/), for learning image-to-image translation without paired data. The model training requires `--dataset_mode unaligned` dataset. By default, it uses a `--netG resnet_9blocks` ResNet generator, a `--netD basic` discriminator (PatchGAN introduced by pix2pix), and a least-square GANs [objective](https://arxiv.org/abs/1611.04076) (`--gan_mode lsgan`).
29 | * [networks.py](../models/networks.py) module implements network architectures (both generators and discriminators), as well as normalization layers, initialization methods, optimization scheduler (i.e., learning rate policy), and GAN objective function (`vanilla`, `lsgan`, `wgangp`).
30 | * [test_model.py](../models/test_model.py) implements a model that can be used to generate CycleGAN results for only one direction. This model will automatically set `--dataset_mode single`, which only loads the images from one set. See the test [instruction](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix#apply-a-pre-trained-model-cyclegan) for more details.
31 |
32 | [options](../options) directory includes our option modules: training options, test options, and basic options (used in both training and test). `TrainOptions` and `TestOptions` are both subclasses of `BaseOptions`. They will reuse the options defined in `BaseOptions`.
33 | * [\_\_init\_\_.py](../options/__init__.py) is required to make Python treat the directory `options` as containing packages,
34 | * [base_options.py](../options/base_options.py) includes options that are used in both training and test. It also implements a few helper functions such as parsing, printing, and saving the options. It also gathers additional options defined in `modify_commandline_options` functions in both dataset class and model class.
35 | * [train_options.py](../options/train_options.py) includes options that are only used during training time.
36 | * [test_options.py](../options/test_options.py) includes options that are only used during test time.
37 |
38 |
39 | [util](../util) directory includes a miscellaneous collection of useful helper functions.
40 | * [\_\_init\_\_.py](../util/__init__.py) is required to make Python treat the directory `util` as containing packages,
41 | * [get_data.py](../util/get_data.py) provides a Python script for downloading CycleGAN and pix2pix datasets. Alternatively, You can also use bash scripts such as [download_pix2pix_model.sh](../scripts/download_pix2pix_model.sh) and [download_cyclegan_model.sh](../scripts/download_cyclegan_model.sh).
42 | * [html.py](../util/html.py) implements a module that saves images into a single HTML file. It consists of functions such as `add_header` (add a text header to the HTML file), `add_images` (add a row of images to the HTML file), `save` (save the HTML to the disk). It is based on Python library `dominate`, a Python library for creating and manipulating HTML documents using a DOM API.
43 | * [image_pool.py](../util/image_pool.py) implements an image buffer that stores previously generated images. This buffer enables us to update discriminators using a history of generated images rather than the ones produced by the latest generators. The original idea was discussed in this [paper](http://openaccess.thecvf.com/content_cvpr_2017/papers/Shrivastava_Learning_From_Simulated_CVPR_2017_paper.pdf). The size of the buffer is controlled by the flag `--pool_size`.
44 | * [visualizer.py](../util/visualizer.py) includes several functions that can display/save images and print/save logging information. It uses a Python library `visdom` for display and a Python library `dominate` (wrapped in `HTML`) for creating HTML files with images.
45 | * [util.py](../util/util.py) consists of simple helper functions such as `tensor2im` (convert a tensor array to a numpy image array), `diagnose_network` (calculate and print the mean of average absolute value of gradients), and `mkdirs` (create multiple directories).
46 |
--------------------------------------------------------------------------------
/docs/tips.md:
--------------------------------------------------------------------------------
1 | ## Training/test Tips
2 | #### Training/test options
3 | Please see `options/train_options.py` and `options/base_options.py` for the training flags; see `options/test_options.py` and `options/base_options.py` for the test flags. There are some model-specific flags as well, which are added in the model files, such as `--lambda_A` option in `model/cycle_gan_model.py`. The default values of these options are also adjusted in the model files.
4 | #### CPU/GPU (default `--gpu_ids 0`)
5 | Please set`--gpu_ids -1` to use CPU mode; set `--gpu_ids 0,1,2` for multi-GPU mode. You need a large batch size (e.g., `--batch_size 32`) to benefit from multiple GPUs.
6 |
7 | #### Visualization
8 | During training, the current results can be viewed using two methods. First, if you set `--display_id` > 0, the results and loss plot will appear on a local graphics web server launched by [visdom](https://github.com/facebookresearch/visdom). To do this, you should have `visdom` installed and a server running by the command `python -m visdom.server`. The default server URL is `http://localhost:8097`. `display_id` corresponds to the window ID that is displayed on the `visdom` server. The `visdom` display functionality is turned on by default. To avoid the extra overhead of communicating with `visdom` set `--display_id -1`. Second, the intermediate results are saved to `[opt.checkpoints_dir]/[opt.name]/web/` as an HTML file. To avoid this, set `--no_html`.
9 |
10 | #### Preprocessing
11 | Images can be resized and cropped in different ways using `--preprocess` option. The default option `'resize_and_crop'` resizes the image to be of size `(opt.load_size, opt.load_size)` and does a random crop of size `(opt.crop_size, opt.crop_size)`. `'crop'` skips the resizing step and only performs random cropping. `'scale_width'` resizes the image to have width `opt.crop_size` while keeping the aspect ratio. `'scale_width_and_crop'` first resizes the image to have width `opt.load_size` and then does random cropping of size `(opt.crop_size, opt.crop_size)`. `'none'` tries to skip all these preprocessing steps. However, if the image size is not a multiple of some number depending on the number of downsamplings of the generator, you will get an error because the size of the output image may be different from the size of the input image. Therefore, `'none'` option still tries to adjust the image size to be a multiple of 4. You might need a bigger adjustment if you change the generator architecture. Please see `data/base_dataset.py` do see how all these were implemented.
12 |
13 | #### Fine-tuning/resume training
14 | To fine-tune a pre-trained model, or resume the previous training, use the `--continue_train` flag. The program will then load the model based on `epoch`. By default, the program will initialize the epoch count as 1. Set `--epoch_count ` to specify a different starting epoch count.
15 |
16 |
17 | #### Prepare your own datasets for CycleGAN
18 | You need to create two directories to host images from domain A `/path/to/data/trainA` and from domain B `/path/to/data/trainB`. Then you can train the model with the dataset flag `--dataroot /path/to/data`. Optionally, you can create hold-out test datasets at `/path/to/data/testA` and `/path/to/data/testB` to test your model on unseen images.
19 |
20 | #### Prepare your own datasets for pix2pix
21 | Pix2pix's training requires paired data. We provide a python script to generate training data in the form of pairs of images {A,B}, where A and B are two different depictions of the same underlying scene. For example, these might be pairs {label map, photo} or {bw image, color image}. Then we can learn to translate A to B or B to A:
22 |
23 | Create folder `/path/to/data` with subdirectories `A` and `B`. `A` and `B` should each have their own subdirectories `train`, `val`, `test`, etc. In `/path/to/data/A/train`, put training images in style A. In `/path/to/data/B/train`, put the corresponding images in style B. Repeat same for other data splits (`val`, `test`, etc).
24 |
25 | Corresponding images in a pair {A,B} must be the same size and have the same filename, e.g., `/path/to/data/A/train/1.jpg` is considered to correspond to `/path/to/data/B/train/1.jpg`.
26 |
27 | Once the data is formatted this way, call:
28 | ```bash
29 | python datasets/combine_A_and_B.py --fold_A /path/to/data/A --fold_B /path/to/data/B --fold_AB /path/to/data
30 | ```
31 |
32 | This will combine each pair of images (A,B) into a single image file, ready for training.
33 |
34 |
35 | #### About image size
36 | Since the generator architecture in CycleGAN involves a series of downsampling / upsampling operations, the size of the input and output image may not match if the input image size is not a multiple of 4. As a result, you may get a runtime error because the L1 identity loss cannot be enforced with images of different size. Therefore, we slightly resize the image to become multiples of 4 even with `--preprocess none` option. For the same reason, `--crop_size` needs to be a multiple of 4.
37 |
38 | #### Training/Testing with high res images
39 | CycleGAN is quite memory-intensive as four networks (two generators and two discriminators) need to be loaded on one GPU, so a large image cannot be entirely loaded. In this case, we recommend training with cropped images. For example, to generate 1024px results, you can train with `--preprocess scale_width_and_crop --load_size 1024 --crop_size 360`, and test with `--preprocess scale_width --load_size 1024`. This way makes sure the training and test will be at the same scale. At test time, you can afford higher resolution because you don’t need to load all networks.
40 |
41 | #### Training/Testing with rectangular images
42 | Both pix2pix and CycleGAN can work for rectangular images. To make them work, you need to use different preprocessing flags. Let's say that you are working with `360x256` images. During training, you can specify `--preprocess crop` and `--crop_size 256`. This will allow your model to be trained on randomly cropped `256x256` images during training time. During test time, you can apply the model on `360x256` images with the flag `--preprocess none`.
43 |
44 | There are practical restrictions regarding image sizes for each generator architecture. For `unet256`, it only supports images whose width and height are divisible by 256. For `unet128`, the width and height need to be divisible by 128. For `resnet_6blocks` and `resnet_9blocks`, the width and height need to be divisible by 4.
45 |
46 | #### About loss curve
47 | Unfortunately, the loss curve does not reveal much information in training GANs, and CycleGAN is no exception. To check whether the training has converged or not, we recommend periodically generating a few samples and looking at them.
48 |
49 | #### About batch size
50 | For all experiments in the paper, we set the batch size to be 1. If there is room for memory, you can use higher batch size with batch norm or instance norm. (Note that the default batchnorm does not work well with multi-GPU training. You may consider using [synchronized batchnorm](https://github.com/vacancy/Synchronized-BatchNorm-PyTorch) instead). But please be aware that it can impact the training. In particular, even with Instance Normalization, different batch sizes can lead to different results. Moreover, increasing `--crop_size` may be a good alternative to increasing the batch size.
51 |
52 |
53 | #### Notes on Colorization
54 | No need to run `combine_A_and_B.py` for colorization. Instead, you need to prepare natural images and set `--dataset_mode colorization` and `--model colorization` in the script. The program will automatically convert each RGB image into Lab color space, and create `L -> ab` image pair during the training. Also set `--input_nc 1` and `--output_nc 2`. The training and test directory should be organized as `/your/data/train` and `your/data/test`. See example scripts `scripts/train_colorization.sh` and `scripts/test_colorization` for more details.
55 |
56 | #### Notes on Extracting Edges
57 | We provide python and Matlab scripts to extract coarse edges from photos. Run `scripts/edges/batch_hed.py` to compute [HED](https://github.com/s9xie/hed) edges. Run `scripts/edges/PostprocessHED.m` to simplify edges with additional post-processing steps. Check the code documentation for more details.
58 |
59 | #### Evaluating Labels2Photos on Cityscapes
60 | We provide scripts for running the evaluation of the Labels2Photos task on the Cityscapes **validation** set. We assume that you have installed `caffe` (and `pycaffe`) in your system. If not, see the [official website](http://caffe.berkeleyvision.org/installation.html) for installation instructions. Once `caffe` is successfully installed, download the pre-trained FCN-8s semantic segmentation model (512MB) by running
61 | ```bash
62 | bash ./scripts/eval_cityscapes/download_fcn8s.sh
63 | ```
64 | Then make sure `./scripts/eval_cityscapes/` is in your system's python path. If not, run the following command to add it
65 | ```bash
66 | export PYTHONPATH=${PYTHONPATH}:./scripts/eval_cityscapes/
67 | ```
68 | Now you can run the following command to evaluate your predictions:
69 | ```bash
70 | python ./scripts/eval_cityscapes/evaluate.py --cityscapes_dir /path/to/original/cityscapes/dataset/ --result_dir /path/to/your/predictions/ --output_dir /path/to/output/directory/
71 | ```
72 | Images stored under `--result_dir` should contain your model predictions on the Cityscapes **validation** split, and have the original Cityscapes naming convention (e.g., `frankfurt_000001_038418_leftImg8bit.png`). The script will output a text file under `--output_dir` containing the metric.
73 |
74 | **Further notes**: Our pre-trained FCN model is **not** supposed to work on Cityscapes in the original resolution (1024x2048) as it was trained on 256x256 images that are then upsampled to 1024x2048 during training. The purpose of the resizing during training was to 1) keep the label maps in the original high resolution untouched and 2) avoid the need of changing the standard FCN training code and the architecture for Cityscapes. During test time, you need to synthesize 256x256 results. Our test code will automatically upsample your results to 1024x2048 before feeding them to the pre-trained FCN model. The output is at 1024x2048 resolution and will be compared to 1024x2048 ground truth labels. You do not need to resize the ground truth labels. The best way to verify whether everything is correct is to reproduce the numbers for real images in the paper first. To achieve it, you need to resize the original/real Cityscapes images (**not** labels) to 256x256 and feed them to the evaluation code.
75 |
--------------------------------------------------------------------------------
/environment.yml:
--------------------------------------------------------------------------------
1 | name: pytorch-CycleGAN-and-pix2pix
2 | channels:
3 | - pytorch
4 | - defaults
5 | dependencies:
6 | - python=3.8
7 | - pytorch=1.8.1
8 | - scipy
9 | - pip
10 | - pip:
11 | - dominate==2.6.0
12 | - torchvision==0.9.1
13 | - Pillow==8.0.1
14 | - numpy==1.19.2
15 | - visdom==0.1.8
16 | - wandb==0.12.18
17 |
18 |
--------------------------------------------------------------------------------
/imgs/edges2cats.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/junyanz/pytorch-CycleGAN-and-pix2pix/c3268edd50ec37a81600c9b981841f48929671b8/imgs/edges2cats.jpg
--------------------------------------------------------------------------------
/imgs/horse2zebra.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/junyanz/pytorch-CycleGAN-and-pix2pix/c3268edd50ec37a81600c9b981841f48929671b8/imgs/horse2zebra.gif
--------------------------------------------------------------------------------
/models/__init__.py:
--------------------------------------------------------------------------------
1 | """This package contains modules related to objective functions, optimizations, and network architectures.
2 |
3 | To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel.
4 | You need to implement the following five functions:
5 | -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
6 | -- : unpack data from dataset and apply preprocessing.
7 | -- : produce intermediate results.
8 | -- : calculate loss, gradients, and update network weights.
9 | -- : (optionally) add model-specific options and set default options.
10 |
11 | In the function <__init__>, you need to define four lists:
12 | -- self.loss_names (str list): specify the training losses that you want to plot and save.
13 | -- self.model_names (str list): define networks used in our training.
14 | -- self.visual_names (str list): specify the images that you want to display and save.
15 | -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage.
16 |
17 | Now you can use the model class by specifying flag '--model dummy'.
18 | See our template model class 'template_model.py' for more details.
19 | """
20 |
21 | import importlib
22 | from models.base_model import BaseModel
23 |
24 |
25 | def find_model_using_name(model_name):
26 | """Import the module "models/[model_name]_model.py".
27 |
28 | In the file, the class called DatasetNameModel() will
29 | be instantiated. It has to be a subclass of BaseModel,
30 | and it is case-insensitive.
31 | """
32 | model_filename = "models." + model_name + "_model"
33 | modellib = importlib.import_module(model_filename)
34 | model = None
35 | target_model_name = model_name.replace('_', '') + 'model'
36 | for name, cls in modellib.__dict__.items():
37 | if name.lower() == target_model_name.lower() \
38 | and issubclass(cls, BaseModel):
39 | model = cls
40 |
41 | if model is None:
42 | print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name))
43 | exit(0)
44 |
45 | return model
46 |
47 |
48 | def get_option_setter(model_name):
49 | """Return the static method of the model class."""
50 | model_class = find_model_using_name(model_name)
51 | return model_class.modify_commandline_options
52 |
53 |
54 | def create_model(opt):
55 | """Create a model given the option.
56 |
57 | This function warps the class CustomDatasetDataLoader.
58 | This is the main interface between this package and 'train.py'/'test.py'
59 |
60 | Example:
61 | >>> from models import create_model
62 | >>> model = create_model(opt)
63 | """
64 | model = find_model_using_name(opt.model)
65 | instance = model(opt)
66 | print("model [%s] was created" % type(instance).__name__)
67 | return instance
68 |
--------------------------------------------------------------------------------
/models/base_model.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch
3 | from collections import OrderedDict
4 | from abc import ABC, abstractmethod
5 | from . import networks
6 |
7 |
8 | class BaseModel(ABC):
9 | """This class is an abstract base class (ABC) for models.
10 | To create a subclass, you need to implement the following five functions:
11 | -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt).
12 | -- : unpack data from dataset and apply preprocessing.
13 | -- : produce intermediate results.
14 | -- : calculate losses, gradients, and update network weights.
15 | -- : (optionally) add model-specific options and set default options.
16 | """
17 |
18 | def __init__(self, opt):
19 | """Initialize the BaseModel class.
20 |
21 | Parameters:
22 | opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
23 |
24 | When creating your custom class, you need to implement your own initialization.
25 | In this function, you should first call
26 | Then, you need to define four lists:
27 | -- self.loss_names (str list): specify the training losses that you want to plot and save.
28 | -- self.model_names (str list): define networks used in our training.
29 | -- self.visual_names (str list): specify the images that you want to display and save.
30 | -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
31 | """
32 | self.opt = opt
33 | self.gpu_ids = opt.gpu_ids
34 | self.isTrain = opt.isTrain
35 | self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU
36 | self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir
37 | if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark.
38 | torch.backends.cudnn.benchmark = True
39 | self.loss_names = []
40 | self.model_names = []
41 | self.visual_names = []
42 | self.optimizers = []
43 | self.image_paths = []
44 | self.metric = 0 # used for learning rate policy 'plateau'
45 |
46 | @staticmethod
47 | def modify_commandline_options(parser, is_train):
48 | """Add new model-specific options, and rewrite default values for existing options.
49 |
50 | Parameters:
51 | parser -- original option parser
52 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
53 |
54 | Returns:
55 | the modified parser.
56 | """
57 | return parser
58 |
59 | @abstractmethod
60 | def set_input(self, input):
61 | """Unpack input data from the dataloader and perform necessary pre-processing steps.
62 |
63 | Parameters:
64 | input (dict): includes the data itself and its metadata information.
65 | """
66 | pass
67 |
68 | @abstractmethod
69 | def forward(self):
70 | """Run forward pass; called by both functions and ."""
71 | pass
72 |
73 | @abstractmethod
74 | def optimize_parameters(self):
75 | """Calculate losses, gradients, and update network weights; called in every training iteration"""
76 | pass
77 |
78 | def setup(self, opt):
79 | """Load and print networks; create schedulers
80 |
81 | Parameters:
82 | opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
83 | """
84 | if self.isTrain:
85 | self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers]
86 | if not self.isTrain or opt.continue_train:
87 | load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch
88 | self.load_networks(load_suffix)
89 | self.print_networks(opt.verbose)
90 |
91 | def eval(self):
92 | """Make models eval mode during test time"""
93 | for name in self.model_names:
94 | if isinstance(name, str):
95 | net = getattr(self, 'net' + name)
96 | net.eval()
97 |
98 | def test(self):
99 | """Forward function used in test time.
100 |
101 | This function wraps function in no_grad() so we don't save intermediate steps for backprop
102 | It also calls to produce additional visualization results
103 | """
104 | with torch.no_grad():
105 | self.forward()
106 | self.compute_visuals()
107 |
108 | def compute_visuals(self):
109 | """Calculate additional output images for visdom and HTML visualization"""
110 | pass
111 |
112 | def get_image_paths(self):
113 | """ Return image paths that are used to load current data"""
114 | return self.image_paths
115 |
116 | def update_learning_rate(self):
117 | """Update learning rates for all the networks; called at the end of every epoch"""
118 | old_lr = self.optimizers[0].param_groups[0]['lr']
119 | for scheduler in self.schedulers:
120 | if self.opt.lr_policy == 'plateau':
121 | scheduler.step(self.metric)
122 | else:
123 | scheduler.step()
124 |
125 | lr = self.optimizers[0].param_groups[0]['lr']
126 | print('learning rate %.7f -> %.7f' % (old_lr, lr))
127 |
128 | def get_current_visuals(self):
129 | """Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
130 | visual_ret = OrderedDict()
131 | for name in self.visual_names:
132 | if isinstance(name, str):
133 | visual_ret[name] = getattr(self, name)
134 | return visual_ret
135 |
136 | def get_current_losses(self):
137 | """Return traning losses / errors. train.py will print out these errors on console, and save them to a file"""
138 | errors_ret = OrderedDict()
139 | for name in self.loss_names:
140 | if isinstance(name, str):
141 | errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number
142 | return errors_ret
143 |
144 | def save_networks(self, epoch):
145 | """Save all the networks to the disk.
146 |
147 | Parameters:
148 | epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
149 | """
150 | for name in self.model_names:
151 | if isinstance(name, str):
152 | save_filename = '%s_net_%s.pth' % (epoch, name)
153 | save_path = os.path.join(self.save_dir, save_filename)
154 | net = getattr(self, 'net' + name)
155 |
156 | if len(self.gpu_ids) > 0 and torch.cuda.is_available():
157 | torch.save(net.module.cpu().state_dict(), save_path)
158 | net.cuda(self.gpu_ids[0])
159 | else:
160 | torch.save(net.cpu().state_dict(), save_path)
161 |
162 | def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):
163 | """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)"""
164 | key = keys[i]
165 | if i + 1 == len(keys): # at the end, pointing to a parameter/buffer
166 | if module.__class__.__name__.startswith('InstanceNorm') and \
167 | (key == 'running_mean' or key == 'running_var'):
168 | if getattr(module, key) is None:
169 | state_dict.pop('.'.join(keys))
170 | if module.__class__.__name__.startswith('InstanceNorm') and \
171 | (key == 'num_batches_tracked'):
172 | state_dict.pop('.'.join(keys))
173 | else:
174 | self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)
175 |
176 | def load_networks(self, epoch):
177 | """Load all the networks from the disk.
178 |
179 | Parameters:
180 | epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
181 | """
182 | for name in self.model_names:
183 | if isinstance(name, str):
184 | load_filename = '%s_net_%s.pth' % (epoch, name)
185 | load_path = os.path.join(self.save_dir, load_filename)
186 | net = getattr(self, 'net' + name)
187 | if isinstance(net, torch.nn.DataParallel):
188 | net = net.module
189 | print('loading the model from %s' % load_path)
190 | # if you are using PyTorch newer than 0.4 (e.g., built from
191 | # GitHub source), you can remove str() on self.device
192 | state_dict = torch.load(load_path, map_location=str(self.device))
193 | if hasattr(state_dict, '_metadata'):
194 | del state_dict._metadata
195 |
196 | # patch InstanceNorm checkpoints prior to 0.4
197 | for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop
198 | self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))
199 | net.load_state_dict(state_dict)
200 |
201 | def print_networks(self, verbose):
202 | """Print the total number of parameters in the network and (if verbose) network architecture
203 |
204 | Parameters:
205 | verbose (bool) -- if verbose: print the network architecture
206 | """
207 | print('---------- Networks initialized -------------')
208 | for name in self.model_names:
209 | if isinstance(name, str):
210 | net = getattr(self, 'net' + name)
211 | num_params = 0
212 | for param in net.parameters():
213 | num_params += param.numel()
214 | if verbose:
215 | print(net)
216 | print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
217 | print('-----------------------------------------------')
218 |
219 | def set_requires_grad(self, nets, requires_grad=False):
220 | """Set requies_grad=Fasle for all the networks to avoid unnecessary computations
221 | Parameters:
222 | nets (network list) -- a list of networks
223 | requires_grad (bool) -- whether the networks require gradients or not
224 | """
225 | if not isinstance(nets, list):
226 | nets = [nets]
227 | for net in nets:
228 | if net is not None:
229 | for param in net.parameters():
230 | param.requires_grad = requires_grad
231 |
--------------------------------------------------------------------------------
/models/colorization_model.py:
--------------------------------------------------------------------------------
1 | from .pix2pix_model import Pix2PixModel
2 | import torch
3 | from skimage import color # used for lab2rgb
4 | import numpy as np
5 |
6 |
7 | class ColorizationModel(Pix2PixModel):
8 | """This is a subclass of Pix2PixModel for image colorization (black & white image -> colorful images).
9 |
10 | The model training requires '-dataset_model colorization' dataset.
11 | It trains a pix2pix model, mapping from L channel to ab channels in Lab color space.
12 | By default, the colorization dataset will automatically set '--input_nc 1' and '--output_nc 2'.
13 | """
14 | @staticmethod
15 | def modify_commandline_options(parser, is_train=True):
16 | """Add new dataset-specific options, and rewrite default values for existing options.
17 |
18 | Parameters:
19 | parser -- original option parser
20 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
21 |
22 | Returns:
23 | the modified parser.
24 |
25 | By default, we use 'colorization' dataset for this model.
26 | See the original pix2pix paper (https://arxiv.org/pdf/1611.07004.pdf) and colorization results (Figure 9 in the paper)
27 | """
28 | Pix2PixModel.modify_commandline_options(parser, is_train)
29 | parser.set_defaults(dataset_mode='colorization')
30 | return parser
31 |
32 | def __init__(self, opt):
33 | """Initialize the class.
34 |
35 | Parameters:
36 | opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
37 |
38 | For visualization, we set 'visual_names' as 'real_A' (input real image),
39 | 'real_B_rgb' (ground truth RGB image), and 'fake_B_rgb' (predicted RGB image)
40 | We convert the Lab image 'real_B' (inherited from Pix2pixModel) to a RGB image 'real_B_rgb'.
41 | we convert the Lab image 'fake_B' (inherited from Pix2pixModel) to a RGB image 'fake_B_rgb'.
42 | """
43 | # reuse the pix2pix model
44 | Pix2PixModel.__init__(self, opt)
45 | # specify the images to be visualized.
46 | self.visual_names = ['real_A', 'real_B_rgb', 'fake_B_rgb']
47 |
48 | def lab2rgb(self, L, AB):
49 | """Convert an Lab tensor image to a RGB numpy output
50 | Parameters:
51 | L (1-channel tensor array): L channel images (range: [-1, 1], torch tensor array)
52 | AB (2-channel tensor array): ab channel images (range: [-1, 1], torch tensor array)
53 |
54 | Returns:
55 | rgb (RGB numpy image): rgb output images (range: [0, 255], numpy array)
56 | """
57 | AB2 = AB * 110.0
58 | L2 = (L + 1.0) * 50.0
59 | Lab = torch.cat([L2, AB2], dim=1)
60 | Lab = Lab[0].data.cpu().float().numpy()
61 | Lab = np.transpose(Lab.astype(np.float64), (1, 2, 0))
62 | rgb = color.lab2rgb(Lab) * 255
63 | return rgb
64 |
65 | def compute_visuals(self):
66 | """Calculate additional output images for visdom and HTML visualization"""
67 | self.real_B_rgb = self.lab2rgb(self.real_A, self.real_B)
68 | self.fake_B_rgb = self.lab2rgb(self.real_A, self.fake_B)
69 |
--------------------------------------------------------------------------------
/models/cycle_gan_model.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import itertools
3 | from util.image_pool import ImagePool
4 | from .base_model import BaseModel
5 | from . import networks
6 |
7 |
8 | class CycleGANModel(BaseModel):
9 | """
10 | This class implements the CycleGAN model, for learning image-to-image translation without paired data.
11 |
12 | The model training requires '--dataset_mode unaligned' dataset.
13 | By default, it uses a '--netG resnet_9blocks' ResNet generator,
14 | a '--netD basic' discriminator (PatchGAN introduced by pix2pix),
15 | and a least-square GANs objective ('--gan_mode lsgan').
16 |
17 | CycleGAN paper: https://arxiv.org/pdf/1703.10593.pdf
18 | """
19 | @staticmethod
20 | def modify_commandline_options(parser, is_train=True):
21 | """Add new dataset-specific options, and rewrite default values for existing options.
22 |
23 | Parameters:
24 | parser -- original option parser
25 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
26 |
27 | Returns:
28 | the modified parser.
29 |
30 | For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses.
31 | A (source domain), B (target domain).
32 | Generators: G_A: A -> B; G_B: B -> A.
33 | Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A.
34 | Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper)
35 | Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper)
36 | Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 "Photo generation from paintings" in the paper)
37 | Dropout is not used in the original CycleGAN paper.
38 | """
39 | parser.set_defaults(no_dropout=True) # default CycleGAN did not use dropout
40 | if is_train:
41 | parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')
42 | parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')
43 | parser.add_argument('--lambda_identity', type=float, default=0.5, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1')
44 |
45 | return parser
46 |
47 | def __init__(self, opt):
48 | """Initialize the CycleGAN class.
49 |
50 | Parameters:
51 | opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
52 | """
53 | BaseModel.__init__(self, opt)
54 | # specify the training losses you want to print out. The training/test scripts will call
55 | self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']
56 | # specify the images you want to save/display. The training/test scripts will call
57 | visual_names_A = ['real_A', 'fake_B', 'rec_A']
58 | visual_names_B = ['real_B', 'fake_A', 'rec_B']
59 | if self.isTrain and self.opt.lambda_identity > 0.0: # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)
60 | visual_names_A.append('idt_B')
61 | visual_names_B.append('idt_A')
62 |
63 | self.visual_names = visual_names_A + visual_names_B # combine visualizations for A and B
64 | # specify the models you want to save to the disk. The training/test scripts will call and .
65 | if self.isTrain:
66 | self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
67 | else: # during test time, only load Gs
68 | self.model_names = ['G_A', 'G_B']
69 |
70 | # define networks (both Generators and discriminators)
71 | # The naming is different from those used in the paper.
72 | # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
73 | self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
74 | not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
75 | self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,
76 | not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
77 |
78 | if self.isTrain: # define discriminators
79 | self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
80 | opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
81 | self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
82 | opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
83 |
84 | if self.isTrain:
85 | if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels
86 | assert(opt.input_nc == opt.output_nc)
87 | self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
88 | self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images
89 | # define loss functions
90 | self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) # define GAN loss.
91 | self.criterionCycle = torch.nn.L1Loss()
92 | self.criterionIdt = torch.nn.L1Loss()
93 | # initialize optimizers; schedulers will be automatically created by function .
94 | self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
95 | self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
96 | self.optimizers.append(self.optimizer_G)
97 | self.optimizers.append(self.optimizer_D)
98 |
99 | def set_input(self, input):
100 | """Unpack input data from the dataloader and perform necessary pre-processing steps.
101 |
102 | Parameters:
103 | input (dict): include the data itself and its metadata information.
104 |
105 | The option 'direction' can be used to swap domain A and domain B.
106 | """
107 | AtoB = self.opt.direction == 'AtoB'
108 | self.real_A = input['A' if AtoB else 'B'].to(self.device)
109 | self.real_B = input['B' if AtoB else 'A'].to(self.device)
110 | self.image_paths = input['A_paths' if AtoB else 'B_paths']
111 |
112 | def forward(self):
113 | """Run forward pass; called by both functions and ."""
114 | self.fake_B = self.netG_A(self.real_A) # G_A(A)
115 | self.rec_A = self.netG_B(self.fake_B) # G_B(G_A(A))
116 | self.fake_A = self.netG_B(self.real_B) # G_B(B)
117 | self.rec_B = self.netG_A(self.fake_A) # G_A(G_B(B))
118 |
119 | def backward_D_basic(self, netD, real, fake):
120 | """Calculate GAN loss for the discriminator
121 |
122 | Parameters:
123 | netD (network) -- the discriminator D
124 | real (tensor array) -- real images
125 | fake (tensor array) -- images generated by a generator
126 |
127 | Return the discriminator loss.
128 | We also call loss_D.backward() to calculate the gradients.
129 | """
130 | # Real
131 | pred_real = netD(real)
132 | loss_D_real = self.criterionGAN(pred_real, True)
133 | # Fake
134 | pred_fake = netD(fake.detach())
135 | loss_D_fake = self.criterionGAN(pred_fake, False)
136 | # Combined loss and calculate gradients
137 | loss_D = (loss_D_real + loss_D_fake) * 0.5
138 | loss_D.backward()
139 | return loss_D
140 |
141 | def backward_D_A(self):
142 | """Calculate GAN loss for discriminator D_A"""
143 | fake_B = self.fake_B_pool.query(self.fake_B)
144 | self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
145 |
146 | def backward_D_B(self):
147 | """Calculate GAN loss for discriminator D_B"""
148 | fake_A = self.fake_A_pool.query(self.fake_A)
149 | self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
150 |
151 | def backward_G(self):
152 | """Calculate the loss for generators G_A and G_B"""
153 | lambda_idt = self.opt.lambda_identity
154 | lambda_A = self.opt.lambda_A
155 | lambda_B = self.opt.lambda_B
156 | # Identity loss
157 | if lambda_idt > 0:
158 | # G_A should be identity if real_B is fed: ||G_A(B) - B||
159 | self.idt_A = self.netG_A(self.real_B)
160 | self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt
161 | # G_B should be identity if real_A is fed: ||G_B(A) - A||
162 | self.idt_B = self.netG_B(self.real_A)
163 | self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt
164 | else:
165 | self.loss_idt_A = 0
166 | self.loss_idt_B = 0
167 |
168 | # GAN loss D_A(G_A(A))
169 | self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)
170 | # GAN loss D_B(G_B(B))
171 | self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)
172 | # Forward cycle loss || G_B(G_A(A)) - A||
173 | self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A
174 | # Backward cycle loss || G_A(G_B(B)) - B||
175 | self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B
176 | # combined loss and calculate gradients
177 | self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B
178 | self.loss_G.backward()
179 |
180 | def optimize_parameters(self):
181 | """Calculate losses, gradients, and update network weights; called in every training iteration"""
182 | # forward
183 | self.forward() # compute fake images and reconstruction images.
184 | # G_A and G_B
185 | self.set_requires_grad([self.netD_A, self.netD_B], False) # Ds require no gradients when optimizing Gs
186 | self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero
187 | self.backward_G() # calculate gradients for G_A and G_B
188 | self.optimizer_G.step() # update G_A and G_B's weights
189 | # D_A and D_B
190 | self.set_requires_grad([self.netD_A, self.netD_B], True)
191 | self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero
192 | self.backward_D_A() # calculate gradients for D_A
193 | self.backward_D_B() # calculate graidents for D_B
194 | self.optimizer_D.step() # update D_A and D_B's weights
195 |
--------------------------------------------------------------------------------
/models/pix2pix_model.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from .base_model import BaseModel
3 | from . import networks
4 |
5 |
6 | class Pix2PixModel(BaseModel):
7 | """ This class implements the pix2pix model, for learning a mapping from input images to output images given paired data.
8 |
9 | The model training requires '--dataset_mode aligned' dataset.
10 | By default, it uses a '--netG unet256' U-Net generator,
11 | a '--netD basic' discriminator (PatchGAN),
12 | and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper).
13 |
14 | pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf
15 | """
16 | @staticmethod
17 | def modify_commandline_options(parser, is_train=True):
18 | """Add new dataset-specific options, and rewrite default values for existing options.
19 |
20 | Parameters:
21 | parser -- original option parser
22 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
23 |
24 | Returns:
25 | the modified parser.
26 |
27 | For pix2pix, we do not use image buffer
28 | The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1
29 | By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets.
30 | """
31 | # changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)
32 | parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned')
33 | if is_train:
34 | parser.set_defaults(pool_size=0, gan_mode='vanilla')
35 | parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')
36 |
37 | return parser
38 |
39 | def __init__(self, opt):
40 | """Initialize the pix2pix class.
41 |
42 | Parameters:
43 | opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
44 | """
45 | BaseModel.__init__(self, opt)
46 | # specify the training losses you want to print out. The training/test scripts will call
47 | self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']
48 | # specify the images you want to save/display. The training/test scripts will call
49 | self.visual_names = ['real_A', 'fake_B', 'real_B']
50 | # specify the models you want to save to the disk. The training/test scripts will call and
51 | if self.isTrain:
52 | self.model_names = ['G', 'D']
53 | else: # during test time, only load G
54 | self.model_names = ['G']
55 | # define networks (both generator and discriminator)
56 | self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
57 | not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
58 |
59 | if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc
60 | self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD,
61 | opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
62 |
63 | if self.isTrain:
64 | # define loss functions
65 | self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
66 | self.criterionL1 = torch.nn.L1Loss()
67 | # initialize optimizers; schedulers will be automatically created by function .
68 | self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
69 | self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
70 | self.optimizers.append(self.optimizer_G)
71 | self.optimizers.append(self.optimizer_D)
72 |
73 | def set_input(self, input):
74 | """Unpack input data from the dataloader and perform necessary pre-processing steps.
75 |
76 | Parameters:
77 | input (dict): include the data itself and its metadata information.
78 |
79 | The option 'direction' can be used to swap images in domain A and domain B.
80 | """
81 | AtoB = self.opt.direction == 'AtoB'
82 | self.real_A = input['A' if AtoB else 'B'].to(self.device)
83 | self.real_B = input['B' if AtoB else 'A'].to(self.device)
84 | self.image_paths = input['A_paths' if AtoB else 'B_paths']
85 |
86 | def forward(self):
87 | """Run forward pass; called by both functions and ."""
88 | self.fake_B = self.netG(self.real_A) # G(A)
89 |
90 | def backward_D(self):
91 | """Calculate GAN loss for the discriminator"""
92 | # Fake; stop backprop to the generator by detaching fake_B
93 | fake_AB = torch.cat((self.real_A, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator
94 | pred_fake = self.netD(fake_AB.detach())
95 | self.loss_D_fake = self.criterionGAN(pred_fake, False)
96 | # Real
97 | real_AB = torch.cat((self.real_A, self.real_B), 1)
98 | pred_real = self.netD(real_AB)
99 | self.loss_D_real = self.criterionGAN(pred_real, True)
100 | # combine loss and calculate gradients
101 | self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
102 | self.loss_D.backward()
103 |
104 | def backward_G(self):
105 | """Calculate GAN and L1 loss for the generator"""
106 | # First, G(A) should fake the discriminator
107 | fake_AB = torch.cat((self.real_A, self.fake_B), 1)
108 | pred_fake = self.netD(fake_AB)
109 | self.loss_G_GAN = self.criterionGAN(pred_fake, True)
110 | # Second, G(A) = B
111 | self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1
112 | # combine loss and calculate gradients
113 | self.loss_G = self.loss_G_GAN + self.loss_G_L1
114 | self.loss_G.backward()
115 |
116 | def optimize_parameters(self):
117 | self.forward() # compute fake images: G(A)
118 | # update D
119 | self.set_requires_grad(self.netD, True) # enable backprop for D
120 | self.optimizer_D.zero_grad() # set D's gradients to zero
121 | self.backward_D() # calculate gradients for D
122 | self.optimizer_D.step() # update D's weights
123 | # update G
124 | self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G
125 | self.optimizer_G.zero_grad() # set G's gradients to zero
126 | self.backward_G() # calculate graidents for G
127 | self.optimizer_G.step() # update G's weights
128 |
--------------------------------------------------------------------------------
/models/template_model.py:
--------------------------------------------------------------------------------
1 | """Model class template
2 |
3 | This module provides a template for users to implement custom models.
4 | You can specify '--model template' to use this model.
5 | The class name should be consistent with both the filename and its model option.
6 | The filename should be _dataset.py
7 | The class name should be Dataset.py
8 | It implements a simple image-to-image translation baseline based on regression loss.
9 | Given input-output pairs (data_A, data_B), it learns a network netG that can minimize the following L1 loss:
10 | min_ ||netG(data_A) - data_B||_1
11 | You need to implement the following functions:
12 | : Add model-specific options and rewrite default values for existing options.
13 | <__init__>: Initialize this model class.
14 | : Unpack input data and perform data pre-processing.
15 | : Run forward pass. This will be called by both and .
16 | : Update network weights; it will be called in every training iteration.
17 | """
18 | import torch
19 | from .base_model import BaseModel
20 | from . import networks
21 |
22 |
23 | class TemplateModel(BaseModel):
24 | @staticmethod
25 | def modify_commandline_options(parser, is_train=True):
26 | """Add new model-specific options and rewrite default values for existing options.
27 |
28 | Parameters:
29 | parser -- the option parser
30 | is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options.
31 |
32 | Returns:
33 | the modified parser.
34 | """
35 | parser.set_defaults(dataset_mode='aligned') # You can rewrite default values for this model. For example, this model usually uses aligned dataset as its dataset.
36 | if is_train:
37 | parser.add_argument('--lambda_regression', type=float, default=1.0, help='weight for the regression loss') # You can define new arguments for this model.
38 |
39 | return parser
40 |
41 | def __init__(self, opt):
42 | """Initialize this model class.
43 |
44 | Parameters:
45 | opt -- training/test options
46 |
47 | A few things can be done here.
48 | - (required) call the initialization function of BaseModel
49 | - define loss function, visualization images, model names, and optimizers
50 | """
51 | BaseModel.__init__(self, opt) # call the initialization method of BaseModel
52 | # specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk.
53 | self.loss_names = ['loss_G']
54 | # specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images.
55 | self.visual_names = ['data_A', 'data_B', 'output']
56 | # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks.
57 | # you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them.
58 | self.model_names = ['G']
59 | # define networks; you can use opt.isTrain to specify different behaviors for training and test.
60 | self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, gpu_ids=self.gpu_ids)
61 | if self.isTrain: # only defined during training time
62 | # define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss.
63 | # We also provide a GANLoss class "networks.GANLoss". self.criterionGAN = networks.GANLoss().to(self.device)
64 | self.criterionLoss = torch.nn.L1Loss()
65 | # define and initialize optimizers. You can define one optimizer for each network.
66 | # If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example.
67 | self.optimizer = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
68 | self.optimizers = [self.optimizer]
69 |
70 | # Our program will automatically call to define schedulers, load networks, and print networks
71 |
72 | def set_input(self, input):
73 | """Unpack input data from the dataloader and perform necessary pre-processing steps.
74 |
75 | Parameters:
76 | input: a dictionary that contains the data itself and its metadata information.
77 | """
78 | AtoB = self.opt.direction == 'AtoB' # use to swap data_A and data_B
79 | self.data_A = input['A' if AtoB else 'B'].to(self.device) # get image data A
80 | self.data_B = input['B' if AtoB else 'A'].to(self.device) # get image data B
81 | self.image_paths = input['A_paths' if AtoB else 'B_paths'] # get image paths
82 |
83 | def forward(self):
84 | """Run forward pass. This will be called by both functions and ."""
85 | self.output = self.netG(self.data_A) # generate output image given the input data_A
86 |
87 | def backward(self):
88 | """Calculate losses, gradients, and update network weights; called in every training iteration"""
89 | # caculate the intermediate results if necessary; here self.output has been computed during function
90 | # calculate loss given the input and intermediate results
91 | self.loss_G = self.criterionLoss(self.output, self.data_B) * self.opt.lambda_regression
92 | self.loss_G.backward() # calculate gradients of network G w.r.t. loss_G
93 |
94 | def optimize_parameters(self):
95 | """Update network weights; it will be called in every training iteration."""
96 | self.forward() # first call forward to calculate intermediate results
97 | self.optimizer.zero_grad() # clear network G's existing gradients
98 | self.backward() # calculate gradients for network G
99 | self.optimizer.step() # update gradients for network G
100 |
--------------------------------------------------------------------------------
/models/test_model.py:
--------------------------------------------------------------------------------
1 | from .base_model import BaseModel
2 | from . import networks
3 |
4 |
5 | class TestModel(BaseModel):
6 | """ This TesteModel can be used to generate CycleGAN results for only one direction.
7 | This model will automatically set '--dataset_mode single', which only loads the images from one collection.
8 |
9 | See the test instruction for more details.
10 | """
11 | @staticmethod
12 | def modify_commandline_options(parser, is_train=True):
13 | """Add new dataset-specific options, and rewrite default values for existing options.
14 |
15 | Parameters:
16 | parser -- original option parser
17 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
18 |
19 | Returns:
20 | the modified parser.
21 |
22 | The model can only be used during test time. It requires '--dataset_mode single'.
23 | You need to specify the network using the option '--model_suffix'.
24 | """
25 | assert not is_train, 'TestModel cannot be used during training time'
26 | parser.set_defaults(dataset_mode='single')
27 | parser.add_argument('--model_suffix', type=str, default='', help='In checkpoints_dir, [epoch]_net_G[model_suffix].pth will be loaded as the generator.')
28 |
29 | return parser
30 |
31 | def __init__(self, opt):
32 | """Initialize the pix2pix class.
33 |
34 | Parameters:
35 | opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
36 | """
37 | assert(not opt.isTrain)
38 | BaseModel.__init__(self, opt)
39 | # specify the training losses you want to print out. The training/test scripts will call
40 | self.loss_names = []
41 | # specify the images you want to save/display. The training/test scripts will call
42 | self.visual_names = ['real', 'fake']
43 | # specify the models you want to save to the disk. The training/test scripts will call and
44 | self.model_names = ['G' + opt.model_suffix] # only generator is needed.
45 | self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG,
46 | opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
47 |
48 | # assigns the model to self.netG_[suffix] so that it can be loaded
49 | # please see
50 | setattr(self, 'netG' + opt.model_suffix, self.netG) # store netG in self.
51 |
52 | def set_input(self, input):
53 | """Unpack input data from the dataloader and perform necessary pre-processing steps.
54 |
55 | Parameters:
56 | input: a dictionary that contains the data itself and its metadata information.
57 |
58 | We need to use 'single_dataset' dataset mode. It only load images from one domain.
59 | """
60 | self.real = input['A'].to(self.device)
61 | self.image_paths = input['A_paths']
62 |
63 | def forward(self):
64 | """Run forward pass."""
65 | self.fake = self.netG(self.real) # G(real)
66 |
67 | def optimize_parameters(self):
68 | """No optimization for test model."""
69 | pass
70 |
--------------------------------------------------------------------------------
/options/__init__.py:
--------------------------------------------------------------------------------
1 | """This package options includes option modules: training options, test options, and basic options (used in both training and test)."""
2 |
--------------------------------------------------------------------------------
/options/base_options.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | from util import util
4 | import torch
5 | import models
6 | import data
7 |
8 |
9 | class BaseOptions():
10 | """This class defines options used during both training and test time.
11 |
12 | It also implements several helper functions such as parsing, printing, and saving the options.
13 | It also gathers additional options defined in functions in both dataset class and model class.
14 | """
15 |
16 | def __init__(self):
17 | """Reset the class; indicates the class hasn't been initailized"""
18 | self.initialized = False
19 |
20 | def initialize(self, parser):
21 | """Define the common options that are used in both training and test."""
22 | # basic parameters
23 | parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
24 | parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
25 | parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
26 | parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
27 | # model parameters
28 | parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
29 | parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
30 | parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
31 | parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
32 | parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
33 | parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
34 | parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
35 | parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
36 | parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
37 | parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
38 | parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
39 | parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
40 | # dataset parameters
41 | parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
42 | parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
43 | parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
44 | parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
45 | parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
46 | parser.add_argument('--load_size', type=int, default=286, help='scale images to this size')
47 | parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size')
48 | parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
49 | parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
50 | parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
51 | parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
52 | # additional parameters
53 | parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
54 | parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
55 | parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
56 | parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
57 | # wandb parameters
58 | parser.add_argument('--use_wandb', action='store_true', help='if specified, then init wandb logging')
59 | parser.add_argument('--wandb_project_name', type=str, default='CycleGAN-and-pix2pix', help='specify wandb project name')
60 | self.initialized = True
61 | return parser
62 |
63 | def gather_options(self):
64 | """Initialize our parser with basic options(only once).
65 | Add additional model-specific and dataset-specific options.
66 | These options are defined in the function
67 | in model and dataset classes.
68 | """
69 | if not self.initialized: # check if it has been initialized
70 | parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
71 | parser = self.initialize(parser)
72 |
73 | # get the basic options
74 | opt, _ = parser.parse_known_args()
75 |
76 | # modify model-related parser options
77 | model_name = opt.model
78 | model_option_setter = models.get_option_setter(model_name)
79 | parser = model_option_setter(parser, self.isTrain)
80 | opt, _ = parser.parse_known_args() # parse again with new defaults
81 |
82 | # modify dataset-related parser options
83 | dataset_name = opt.dataset_mode
84 | dataset_option_setter = data.get_option_setter(dataset_name)
85 | parser = dataset_option_setter(parser, self.isTrain)
86 |
87 | # save and return the parser
88 | self.parser = parser
89 | return parser.parse_args()
90 |
91 | def print_options(self, opt):
92 | """Print and save options
93 |
94 | It will print both current options and default values(if different).
95 | It will save options into a text file / [checkpoints_dir] / opt.txt
96 | """
97 | message = ''
98 | message += '----------------- Options ---------------\n'
99 | for k, v in sorted(vars(opt).items()):
100 | comment = ''
101 | default = self.parser.get_default(k)
102 | if v != default:
103 | comment = '\t[default: %s]' % str(default)
104 | message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
105 | message += '----------------- End -------------------'
106 | print(message)
107 |
108 | # save to the disk
109 | expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
110 | util.mkdirs(expr_dir)
111 | file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
112 | with open(file_name, 'wt') as opt_file:
113 | opt_file.write(message)
114 | opt_file.write('\n')
115 |
116 | def parse(self):
117 | """Parse our options, create checkpoints directory suffix, and set up gpu device."""
118 | opt = self.gather_options()
119 | opt.isTrain = self.isTrain # train or test
120 |
121 | # process opt.suffix
122 | if opt.suffix:
123 | suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
124 | opt.name = opt.name + suffix
125 |
126 | self.print_options(opt)
127 |
128 | # set gpu ids
129 | str_ids = opt.gpu_ids.split(',')
130 | opt.gpu_ids = []
131 | for str_id in str_ids:
132 | id = int(str_id)
133 | if id >= 0:
134 | opt.gpu_ids.append(id)
135 | if len(opt.gpu_ids) > 0:
136 | torch.cuda.set_device(opt.gpu_ids[0])
137 |
138 | self.opt = opt
139 | return self.opt
140 |
--------------------------------------------------------------------------------
/options/test_options.py:
--------------------------------------------------------------------------------
1 | from .base_options import BaseOptions
2 |
3 |
4 | class TestOptions(BaseOptions):
5 | """This class includes test options.
6 |
7 | It also includes shared options defined in BaseOptions.
8 | """
9 |
10 | def initialize(self, parser):
11 | parser = BaseOptions.initialize(self, parser) # define shared options
12 | parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
13 | parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
14 | parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
15 | # Dropout and Batchnorm has different behavioir during training and test.
16 | parser.add_argument('--eval', action='store_true', help='use eval mode during test time.')
17 | parser.add_argument('--num_test', type=int, default=50, help='how many test images to run')
18 | # rewrite devalue values
19 | parser.set_defaults(model='test')
20 | # To avoid cropping, the load_size should be the same as crop_size
21 | parser.set_defaults(load_size=parser.get_default('crop_size'))
22 | self.isTrain = False
23 | return parser
24 |
--------------------------------------------------------------------------------
/options/train_options.py:
--------------------------------------------------------------------------------
1 | from .base_options import BaseOptions
2 |
3 |
4 | class TrainOptions(BaseOptions):
5 | """This class includes training options.
6 |
7 | It also includes shared options defined in BaseOptions.
8 | """
9 |
10 | def initialize(self, parser):
11 | parser = BaseOptions.initialize(self, parser)
12 | # visdom and HTML visualization parameters
13 | parser.add_argument('--display_freq', type=int, default=400, help='frequency of showing training results on screen')
14 | parser.add_argument('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.')
15 | parser.add_argument('--display_id', type=int, default=1, help='window id of the web display')
16 | parser.add_argument('--display_server', type=str, default="http://localhost", help='visdom server of the web display')
17 | parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")')
18 | parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display')
19 | parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html')
20 | parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
21 | parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
22 | # network saving and loading parameters
23 | parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
24 | parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs')
25 | parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration')
26 | parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
27 | parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by , +, ...')
28 | parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
29 | # training parameters
30 | parser.add_argument('--n_epochs', type=int, default=100, help='number of epochs with the initial learning rate')
31 | parser.add_argument('--n_epochs_decay', type=int, default=100, help='number of epochs to linearly decay learning rate to zero')
32 | parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
33 | parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
34 | parser.add_argument('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.')
35 | parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
36 | parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]')
37 | parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations')
38 |
39 | self.isTrain = True
40 | return parser
41 |
--------------------------------------------------------------------------------
/pix2pix.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "colab_type": "text",
7 | "id": "view-in-github"
8 | },
9 | "source": [
10 | "
"
11 | ]
12 | },
13 | {
14 | "cell_type": "markdown",
15 | "metadata": {
16 | "colab_type": "text",
17 | "id": "7wNjDKdQy35h"
18 | },
19 | "source": [
20 | "# Install"
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": null,
26 | "metadata": {
27 | "colab": {},
28 | "colab_type": "code",
29 | "id": "TRm-USlsHgEV"
30 | },
31 | "outputs": [],
32 | "source": [
33 | "!git clone https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix"
34 | ]
35 | },
36 | {
37 | "cell_type": "code",
38 | "execution_count": null,
39 | "metadata": {
40 | "colab": {},
41 | "colab_type": "code",
42 | "id": "Pt3igws3eiVp"
43 | },
44 | "outputs": [],
45 | "source": [
46 | "import os\n",
47 | "os.chdir('pytorch-CycleGAN-and-pix2pix/')"
48 | ]
49 | },
50 | {
51 | "cell_type": "code",
52 | "execution_count": null,
53 | "metadata": {
54 | "colab": {},
55 | "colab_type": "code",
56 | "id": "z1EySlOXwwoa"
57 | },
58 | "outputs": [],
59 | "source": [
60 | "!pip install -r requirements.txt"
61 | ]
62 | },
63 | {
64 | "cell_type": "markdown",
65 | "metadata": {
66 | "colab_type": "text",
67 | "id": "8daqlgVhw29P"
68 | },
69 | "source": [
70 | "# Datasets\n",
71 | "\n",
72 | "Download one of the official datasets with:\n",
73 | "\n",
74 | "- `bash ./datasets/download_pix2pix_dataset.sh [cityscapes, night2day, edges2handbags, edges2shoes, facades, maps]`\n",
75 | "\n",
76 | "Or use your own dataset by creating the appropriate folders and adding in the images. Follow the instructions [here](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/datasets.md#pix2pix-datasets)."
77 | ]
78 | },
79 | {
80 | "cell_type": "code",
81 | "execution_count": null,
82 | "metadata": {
83 | "colab": {},
84 | "colab_type": "code",
85 | "id": "vrdOettJxaCc"
86 | },
87 | "outputs": [],
88 | "source": [
89 | "!bash ./datasets/download_pix2pix_dataset.sh facades"
90 | ]
91 | },
92 | {
93 | "cell_type": "markdown",
94 | "metadata": {
95 | "colab_type": "text",
96 | "id": "gdUz4116xhpm"
97 | },
98 | "source": [
99 | "# Pretrained models\n",
100 | "\n",
101 | "Download one of the official pretrained models with:\n",
102 | "\n",
103 | "- `bash ./scripts/download_pix2pix_model.sh [edges2shoes, sat2map, map2sat, facades_label2photo, and day2night]`\n",
104 | "\n",
105 | "Or add your own pretrained model to `./checkpoints/{NAME}_pretrained/latest_net_G.pt`"
106 | ]
107 | },
108 | {
109 | "cell_type": "code",
110 | "execution_count": null,
111 | "metadata": {
112 | "colab": {},
113 | "colab_type": "code",
114 | "id": "GC2DEP4M0OsS"
115 | },
116 | "outputs": [],
117 | "source": [
118 | "!bash ./scripts/download_pix2pix_model.sh facades_label2photo"
119 | ]
120 | },
121 | {
122 | "cell_type": "markdown",
123 | "metadata": {
124 | "colab_type": "text",
125 | "id": "yFw1kDQBx3LN"
126 | },
127 | "source": [
128 | "# Training\n",
129 | "\n",
130 | "- `python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA`\n",
131 | "\n",
132 | "Change the `--dataroot` and `--name` to your own dataset's path and model's name. Use `--gpu_ids 0,1,..` to train on multiple GPUs and `--batch_size` to change the batch size. Add `--direction BtoA` if you want to train a model to transfrom from class B to A."
133 | ]
134 | },
135 | {
136 | "cell_type": "code",
137 | "execution_count": null,
138 | "metadata": {
139 | "colab": {},
140 | "colab_type": "code",
141 | "id": "0sp7TCT2x9dB"
142 | },
143 | "outputs": [],
144 | "source": [
145 | "!python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA --display_id -1"
146 | ]
147 | },
148 | {
149 | "cell_type": "markdown",
150 | "metadata": {
151 | "colab_type": "text",
152 | "id": "9UkcaFZiyASl"
153 | },
154 | "source": [
155 | "# Testing\n",
156 | "\n",
157 | "- `python test.py --dataroot ./datasets/facades --direction BtoA --model pix2pix --name facades_pix2pix`\n",
158 | "\n",
159 | "Change the `--dataroot`, `--name`, and `--direction` to be consistent with your trained model's configuration and how you want to transform images.\n",
160 | "\n",
161 | "> from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix:\n",
162 | "> Note that we specified --direction BtoA as Facades dataset's A to B direction is photos to labels.\n",
163 | "\n",
164 | "> If you would like to apply a pre-trained model to a collection of input images (rather than image pairs), please use --model test option. See ./scripts/test_single.sh for how to apply a model to Facade label maps (stored in the directory facades/testB).\n",
165 | "\n",
166 | "> See a list of currently available models at ./scripts/download_pix2pix_model.sh"
167 | ]
168 | },
169 | {
170 | "cell_type": "code",
171 | "execution_count": null,
172 | "metadata": {
173 | "colab": {},
174 | "colab_type": "code",
175 | "id": "mey7o6j-0368"
176 | },
177 | "outputs": [],
178 | "source": [
179 | "!ls checkpoints/"
180 | ]
181 | },
182 | {
183 | "cell_type": "code",
184 | "execution_count": null,
185 | "metadata": {
186 | "colab": {},
187 | "colab_type": "code",
188 | "id": "uCsKkEq0yGh0"
189 | },
190 | "outputs": [],
191 | "source": [
192 | "!python test.py --dataroot ./datasets/facades --direction BtoA --model pix2pix --name facades_label2photo_pretrained --use_wandb"
193 | ]
194 | },
195 | {
196 | "cell_type": "markdown",
197 | "metadata": {
198 | "colab_type": "text",
199 | "id": "OzSKIPUByfiN"
200 | },
201 | "source": [
202 | "# Visualize"
203 | ]
204 | },
205 | {
206 | "cell_type": "code",
207 | "execution_count": null,
208 | "metadata": {
209 | "colab": {},
210 | "colab_type": "code",
211 | "id": "9Mgg8raPyizq"
212 | },
213 | "outputs": [],
214 | "source": [
215 | "import matplotlib.pyplot as plt\n",
216 | "\n",
217 | "img = plt.imread('./results/facades_label2photo_pretrained/test_latest/images/100_fake_B.png')\n",
218 | "plt.imshow(img)"
219 | ]
220 | },
221 | {
222 | "cell_type": "code",
223 | "execution_count": null,
224 | "metadata": {
225 | "colab": {},
226 | "colab_type": "code",
227 | "id": "0G3oVH9DyqLQ"
228 | },
229 | "outputs": [],
230 | "source": [
231 | "img = plt.imread('./results/facades_label2photo_pretrained/test_latest/images/100_real_A.png')\n",
232 | "plt.imshow(img)"
233 | ]
234 | },
235 | {
236 | "cell_type": "code",
237 | "execution_count": null,
238 | "metadata": {
239 | "colab": {},
240 | "colab_type": "code",
241 | "id": "ErK5OC1j1LH4"
242 | },
243 | "outputs": [],
244 | "source": [
245 | "img = plt.imread('./results/facades_label2photo_pretrained/test_latest/images/100_real_B.png')\n",
246 | "plt.imshow(img)"
247 | ]
248 | }
249 | ],
250 | "metadata": {
251 | "accelerator": "GPU",
252 | "colab": {
253 | "collapsed_sections": [],
254 | "include_colab_link": true,
255 | "name": "pix2pix",
256 | "provenance": []
257 | },
258 | "environment": {
259 | "name": "tf2-gpu.2-3.m74",
260 | "type": "gcloud",
261 | "uri": "gcr.io/deeplearning-platform-release/tf2-gpu.2-3:m74"
262 | },
263 | "kernelspec": {
264 | "display_name": "Python 3",
265 | "language": "python",
266 | "name": "python3"
267 | },
268 | "language_info": {
269 | "codemirror_mode": {
270 | "name": "ipython",
271 | "version": 3
272 | },
273 | "file_extension": ".py",
274 | "mimetype": "text/x-python",
275 | "name": "python",
276 | "nbconvert_exporter": "python",
277 | "pygments_lexer": "ipython3",
278 | "version": "3.7.10"
279 | }
280 | },
281 | "nbformat": 4,
282 | "nbformat_minor": 4
283 | }
284 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | torch>=1.4.0
2 | torchvision>=0.5.0
3 | dominate>=2.4.0
4 | visdom>=0.1.8.8
5 | wandb
6 |
--------------------------------------------------------------------------------
/scripts/conda_deps.sh:
--------------------------------------------------------------------------------
1 | set -ex
2 | conda install numpy pyyaml mkl mkl-include setuptools cmake cffi typing
3 | conda install pytorch torchvision -c pytorch # add cuda90 if CUDA 9
4 | conda install visdom dominate -c conda-forge # install visdom and dominate
5 |
--------------------------------------------------------------------------------
/scripts/download_cyclegan_model.sh:
--------------------------------------------------------------------------------
1 | FILE=$1
2 |
3 | echo "Note: available models are apple2orange, orange2apple, summer2winter_yosemite, winter2summer_yosemite, horse2zebra, zebra2horse, monet2photo, style_monet, style_cezanne, style_ukiyoe, style_vangogh, sat2map, map2sat, cityscapes_photo2label, cityscapes_label2photo, facades_photo2label, facades_label2photo, iphone2dslr_flower"
4 |
5 | echo "Specified [$FILE]"
6 |
7 | mkdir -p ./checkpoints/${FILE}_pretrained
8 | MODEL_FILE=./checkpoints/${FILE}_pretrained/latest_net_G.pth
9 | URL=http://efrosgans.eecs.berkeley.edu/cyclegan/pretrained_models/$FILE.pth
10 |
11 | wget -N $URL -O $MODEL_FILE
12 |
--------------------------------------------------------------------------------
/scripts/download_pix2pix_model.sh:
--------------------------------------------------------------------------------
1 | FILE=$1
2 |
3 | echo "Note: available models are edges2shoes, sat2map, map2sat, facades_label2photo, and day2night"
4 | echo "Specified [$FILE]"
5 |
6 | mkdir -p ./checkpoints/${FILE}_pretrained
7 | MODEL_FILE=./checkpoints/${FILE}_pretrained/latest_net_G.pth
8 | URL=http://efrosgans.eecs.berkeley.edu/pix2pix/models-pytorch/$FILE.pth
9 |
10 | wget -N $URL -O $MODEL_FILE
11 |
--------------------------------------------------------------------------------
/scripts/edges/PostprocessHED.m:
--------------------------------------------------------------------------------
1 | %%% Prerequisites
2 | % You need to get the cpp file edgesNmsMex.cpp from https://raw.githubusercontent.com/pdollar/edges/master/private/edgesNmsMex.cpp
3 | % and compile it in Matlab: mex edgesNmsMex.cpp
4 | % You also need to download and install Piotr's Computer Vision Matlab Toolbox: https://pdollar.github.io/toolbox/
5 |
6 | %%% parameters
7 | % hed_mat_dir: the hed mat file directory (the output of 'batch_hed.py')
8 | % edge_dir: the output HED edges directory
9 | % image_width: resize the edge map to [image_width, image_width]
10 | % threshold: threshold for image binarization (default 25.0/255.0)
11 | % small_edge: remove small edges (default 5)
12 |
13 | function [] = PostprocessHED(hed_mat_dir, edge_dir, image_width, threshold, small_edge)
14 |
15 | if ~exist(edge_dir, 'dir')
16 | mkdir(edge_dir);
17 | end
18 | fileList = dir(fullfile(hed_mat_dir, '*.mat'));
19 | nFiles = numel(fileList);
20 | fprintf('find %d mat files\n', nFiles);
21 |
22 | for n = 1 : nFiles
23 | if mod(n, 1000) == 0
24 | fprintf('process %d/%d images\n', n, nFiles);
25 | end
26 | fileName = fileList(n).name;
27 | filePath = fullfile(hed_mat_dir, fileName);
28 | jpgName = strrep(fileName, '.mat', '.jpg');
29 | edge_path = fullfile(edge_dir, jpgName);
30 |
31 | if ~exist(edge_path, 'file')
32 | E = GetEdge(filePath);
33 | E = imresize(E,[image_width,image_width]);
34 | E_simple = SimpleEdge(E, threshold, small_edge);
35 | E_simple = uint8(E_simple*255);
36 | imwrite(E_simple, edge_path, 'Quality',100);
37 | end
38 | end
39 | end
40 |
41 |
42 |
43 |
44 | function [E] = GetEdge(filePath)
45 | load(filePath);
46 | E = 1-edge_predict;
47 | end
48 |
49 | function [E4] = SimpleEdge(E, threshold, small_edge)
50 | if nargin <= 1
51 | threshold = 25.0/255.0;
52 | end
53 |
54 | if nargin <= 2
55 | small_edge = 5;
56 | end
57 |
58 | if ndims(E) == 3
59 | E = E(:,:,1);
60 | end
61 |
62 | E1 = 1 - E;
63 | E2 = EdgeNMS(E1);
64 | E3 = double(E2>=max(eps,threshold));
65 | E3 = bwmorph(E3,'thin',inf);
66 | E4 = bwareaopen(E3, small_edge);
67 | E4=1-E4;
68 | end
69 |
70 | function [E_nms] = EdgeNMS( E )
71 | E=single(E);
72 | [Ox,Oy] = gradient2(convTri(E,4));
73 | [Oxx,~] = gradient2(Ox);
74 | [Oxy,Oyy] = gradient2(Oy);
75 | O = mod(atan(Oyy.*sign(-Oxy)./(Oxx+1e-5)),pi);
76 | E_nms = edgesNmsMex(E,O,1,5,1.01,1);
77 | end
78 |
--------------------------------------------------------------------------------
/scripts/edges/batch_hed.py:
--------------------------------------------------------------------------------
1 | # HED batch processing script; modified from https://github.com/s9xie/hed/blob/master/examples/hed/HED-tutorial.ipynb
2 | # Step 1: download the hed repo: https://github.com/s9xie/hed
3 | # Step 2: download the models and protoxt, and put them under {caffe_root}/examples/hed/
4 | # Step 3: put this script under {caffe_root}/examples/hed/
5 | # Step 4: run the following script:
6 | # python batch_hed.py --images_dir=/data/to/path/photos/ --hed_mat_dir=/data/to/path/hed_mat_files/
7 | # The code sometimes crashes after computation is done. Error looks like "Check failed: ... driver shutting down". You can just kill the job.
8 | # For large images, it will produce gpu memory issue. Therefore, you better resize the images before running this script.
9 | # Step 5: run the MATLAB post-processing script "PostprocessHED.m"
10 |
11 |
12 | import caffe
13 | import numpy as np
14 | from PIL import Image
15 | import os
16 | import argparse
17 | import sys
18 | import scipy.io as sio
19 |
20 |
21 | def parse_args():
22 | parser = argparse.ArgumentParser(description='batch proccesing: photos->edges')
23 | parser.add_argument('--caffe_root', dest='caffe_root', help='caffe root', default='../../', type=str)
24 | parser.add_argument('--caffemodel', dest='caffemodel', help='caffemodel', default='./hed_pretrained_bsds.caffemodel', type=str)
25 | parser.add_argument('--prototxt', dest='prototxt', help='caffe prototxt file', default='./deploy.prototxt', type=str)
26 | parser.add_argument('--images_dir', dest='images_dir', help='directory to store input photos', type=str)
27 | parser.add_argument('--hed_mat_dir', dest='hed_mat_dir', help='directory to store output hed edges in mat file', type=str)
28 | parser.add_argument('--border', dest='border', help='padding border', type=int, default=128)
29 | parser.add_argument('--gpu_id', dest='gpu_id', help='gpu id', type=int, default=1)
30 | args = parser.parse_args()
31 | return args
32 |
33 |
34 | args = parse_args()
35 | for arg in vars(args):
36 | print('[%s] =' % arg, getattr(args, arg))
37 | # Make sure that caffe is on the python path:
38 | caffe_root = args.caffe_root # this file is expected to be in {caffe_root}/examples/hed/
39 | sys.path.insert(0, caffe_root + 'python')
40 |
41 |
42 | if not os.path.exists(args.hed_mat_dir):
43 | print('create output directory %s' % args.hed_mat_dir)
44 | os.makedirs(args.hed_mat_dir)
45 |
46 | imgList = os.listdir(args.images_dir)
47 | nImgs = len(imgList)
48 | print('#images = %d' % nImgs)
49 |
50 | caffe.set_mode_gpu()
51 | caffe.set_device(args.gpu_id)
52 | # load net
53 | net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
54 | # pad border
55 | border = args.border
56 |
57 | for i in range(nImgs):
58 | if i % 500 == 0:
59 | print('processing image %d/%d' % (i, nImgs))
60 | im = Image.open(os.path.join(args.images_dir, imgList[i]))
61 |
62 | in_ = np.array(im, dtype=np.float32)
63 | in_ = np.pad(in_, ((border, border), (border, border), (0, 0)), 'reflect')
64 |
65 | in_ = in_[:, :, 0:3]
66 | in_ = in_[:, :, ::-1]
67 | in_ -= np.array((104.00698793, 116.66876762, 122.67891434))
68 | in_ = in_.transpose((2, 0, 1))
69 | # remove the following two lines if testing with cpu
70 |
71 | # shape for input (data blob is N x C x H x W), set data
72 | net.blobs['data'].reshape(1, *in_.shape)
73 | net.blobs['data'].data[...] = in_
74 | # run net and take argmax for prediction
75 | net.forward()
76 | fuse = net.blobs['sigmoid-fuse'].data[0][0, :, :]
77 | # get rid of the border
78 | fuse = fuse[(border + 35):(-border + 35), (border + 35):(-border + 35)]
79 | # save hed file to the disk
80 | name, ext = os.path.splitext(imgList[i])
81 | sio.savemat(os.path.join(args.hed_mat_dir, name + '.mat'), {'edge_predict': fuse})
82 |
--------------------------------------------------------------------------------
/scripts/eval_cityscapes/cityscapes.py:
--------------------------------------------------------------------------------
1 | # The following code is modified from https://github.com/shelhamer/clockwork-fcn
2 | import sys
3 | import os
4 | import glob
5 | import numpy as np
6 | from PIL import Image
7 |
8 |
9 | class cityscapes:
10 | def __init__(self, data_path):
11 | # data_path something like /data2/cityscapes
12 | self.dir = data_path
13 | self.classes = ['road', 'sidewalk', 'building', 'wall', 'fence',
14 | 'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain',
15 | 'sky', 'person', 'rider', 'car', 'truck',
16 | 'bus', 'train', 'motorcycle', 'bicycle']
17 | self.mean = np.array((72.78044, 83.21195, 73.45286), dtype=np.float32)
18 | # import cityscapes label helper and set up label mappings
19 | sys.path.insert(0, '{}/scripts/helpers/'.format(self.dir))
20 | labels = __import__('labels')
21 | self.id2trainId = {label.id: label.trainId for label in labels.labels} # dictionary mapping from raw IDs to train IDs
22 | self.trainId2color = {label.trainId: label.color for label in labels.labels} # dictionary mapping train IDs to colors as 3-tuples
23 |
24 | def get_dset(self, split):
25 | '''
26 | List images as (city, id) for the specified split
27 |
28 | TODO(shelhamer) generate splits from cityscapes itself, instead of
29 | relying on these separately made text files.
30 | '''
31 | if split == 'train':
32 | dataset = open('{}/ImageSets/segFine/train.txt'.format(self.dir)).read().splitlines()
33 | else:
34 | dataset = open('{}/ImageSets/segFine/val.txt'.format(self.dir)).read().splitlines()
35 | return [(item.split('/')[0], item.split('/')[1]) for item in dataset]
36 |
37 | def load_image(self, split, city, idx):
38 | im = Image.open('{}/leftImg8bit_sequence/{}/{}/{}_leftImg8bit.png'.format(self.dir, split, city, idx))
39 | return im
40 |
41 | def assign_trainIds(self, label):
42 | """
43 | Map the given label IDs to the train IDs appropriate for training
44 | Use the label mapping provided in labels.py from the cityscapes scripts
45 | """
46 | label = np.array(label, dtype=np.float32)
47 | if sys.version_info[0] < 3:
48 | for k, v in self.id2trainId.iteritems():
49 | label[label == k] = v
50 | else:
51 | for k, v in self.id2trainId.items():
52 | label[label == k] = v
53 | return label
54 |
55 | def load_label(self, split, city, idx):
56 | """
57 | Load label image as 1 x height x width integer array of label indices.
58 | The leading singleton dimension is required by the loss.
59 | """
60 | label = Image.open('{}/gtFine/{}/{}/{}_gtFine_labelIds.png'.format(self.dir, split, city, idx))
61 | label = self.assign_trainIds(label) # get proper labels for eval
62 | label = np.array(label, dtype=np.uint8)
63 | label = label[np.newaxis, ...]
64 | return label
65 |
66 | def preprocess(self, im):
67 | """
68 | Preprocess loaded image (by load_image) for Caffe:
69 | - cast to float
70 | - switch channels RGB -> BGR
71 | - subtract mean
72 | - transpose to channel x height x width order
73 | """
74 | in_ = np.array(im, dtype=np.float32)
75 | in_ = in_[:, :, ::-1]
76 | in_ -= self.mean
77 | in_ = in_.transpose((2, 0, 1))
78 | return in_
79 |
80 | def palette(self, label):
81 | '''
82 | Map trainIds to colors as specified in labels.py
83 | '''
84 | if label.ndim == 3:
85 | label = label[0]
86 | color = np.empty((label.shape[0], label.shape[1], 3))
87 | if sys.version_info[0] < 3:
88 | for k, v in self.trainId2color.iteritems():
89 | color[label == k, :] = v
90 | else:
91 | for k, v in self.trainId2color.items():
92 | color[label == k, :] = v
93 | return color
94 |
95 | def make_boundaries(label, thickness=None):
96 | """
97 | Input is an image label, output is a numpy array mask encoding the boundaries of the objects
98 | Extract pixels at the true boundary by dilation - erosion of label.
99 | Don't just pick the void label as it is not exclusive to the boundaries.
100 | """
101 | assert(thickness is not None)
102 | import skimage.morphology as skm
103 | void = 255
104 | mask = np.logical_and(label > 0, label != void)[0]
105 | selem = skm.disk(thickness)
106 | boundaries = np.logical_xor(skm.dilation(mask, selem),
107 | skm.erosion(mask, selem))
108 | return boundaries
109 |
110 | def list_label_frames(self, split):
111 | """
112 | Select labeled frames from a split for evaluation
113 | collected as (city, shot, idx) tuples
114 | """
115 | def file2idx(f):
116 | """Helper to convert file path into frame ID"""
117 | city, shot, frame = (os.path.basename(f).split('_')[:3])
118 | return "_".join([city, shot, frame])
119 | frames = []
120 | cities = [os.path.basename(f) for f in glob.glob('{}/gtFine/{}/*'.format(self.dir, split))]
121 | for c in cities:
122 | files = sorted(glob.glob('{}/gtFine/{}/{}/*labelIds.png'.format(self.dir, split, c)))
123 | frames.extend([file2idx(f) for f in files])
124 | return frames
125 |
126 | def collect_frame_sequence(self, split, idx, length):
127 | """
128 | Collect sequence of frames preceding (and including) a labeled frame
129 | as a list of Images.
130 |
131 | Note: 19 preceding frames are provided for each labeled frame.
132 | """
133 | SEQ_LEN = length
134 | city, shot, frame = idx.split('_')
135 | frame = int(frame)
136 | frame_seq = []
137 | for i in range(frame - SEQ_LEN, frame + 1):
138 | frame_path = '{0}/leftImg8bit_sequence/val/{1}/{1}_{2}_{3:0>6d}_leftImg8bit.png'.format(
139 | self.dir, city, shot, i)
140 | frame_seq.append(Image.open(frame_path))
141 | return frame_seq
142 |
--------------------------------------------------------------------------------
/scripts/eval_cityscapes/download_fcn8s.sh:
--------------------------------------------------------------------------------
1 | URL=http://efrosgans.eecs.berkeley.edu/pix2pix_extra/fcn-8s-cityscapes.caffemodel
2 | OUTPUT_FILE=./scripts/eval_cityscapes/caffemodel/fcn-8s-cityscapes.caffemodel
3 | wget -N $URL -O $OUTPUT_FILE
4 |
--------------------------------------------------------------------------------
/scripts/eval_cityscapes/evaluate.py:
--------------------------------------------------------------------------------
1 | import os
2 | import caffe
3 | import argparse
4 | import numpy as np
5 | import scipy.misc
6 | from PIL import Image
7 | from util import segrun, fast_hist, get_scores
8 | from cityscapes import cityscapes
9 |
10 | parser = argparse.ArgumentParser()
11 | parser.add_argument("--cityscapes_dir", type=str, required=True, help="Path to the original cityscapes dataset")
12 | parser.add_argument("--result_dir", type=str, required=True, help="Path to the generated images to be evaluated")
13 | parser.add_argument("--output_dir", type=str, required=True, help="Where to save the evaluation results")
14 | parser.add_argument("--caffemodel_dir", type=str, default='./scripts/eval_cityscapes/caffemodel/', help="Where the FCN-8s caffemodel stored")
15 | parser.add_argument("--gpu_id", type=int, default=0, help="Which gpu id to use")
16 | parser.add_argument("--split", type=str, default='val', help="Data split to be evaluated")
17 | parser.add_argument("--save_output_images", type=int, default=0, help="Whether to save the FCN output images")
18 | args = parser.parse_args()
19 |
20 |
21 | def main():
22 | if not os.path.isdir(args.output_dir):
23 | os.makedirs(args.output_dir)
24 | if args.save_output_images > 0:
25 | output_image_dir = args.output_dir + 'image_outputs/'
26 | if not os.path.isdir(output_image_dir):
27 | os.makedirs(output_image_dir)
28 | CS = cityscapes(args.cityscapes_dir)
29 | n_cl = len(CS.classes)
30 | label_frames = CS.list_label_frames(args.split)
31 | caffe.set_device(args.gpu_id)
32 | caffe.set_mode_gpu()
33 | net = caffe.Net(args.caffemodel_dir + '/deploy.prototxt',
34 | args.caffemodel_dir + 'fcn-8s-cityscapes.caffemodel',
35 | caffe.TEST)
36 |
37 | hist_perframe = np.zeros((n_cl, n_cl))
38 | for i, idx in enumerate(label_frames):
39 | if i % 10 == 0:
40 | print('Evaluating: %d/%d' % (i, len(label_frames)))
41 | city = idx.split('_')[0]
42 | # idx is city_shot_frame
43 | label = CS.load_label(args.split, city, idx)
44 | im_file = args.result_dir + '/' + idx + '_leftImg8bit.png'
45 | im = np.array(Image.open(im_file))
46 | im = scipy.misc.imresize(im, (label.shape[1], label.shape[2]))
47 | # im = np.array(Image.fromarray(im).resize((label.shape[1], label.shape[2]))) # Note: scipy.misc.imresize is deprecated, but we still use it for reproducibility.
48 | out = segrun(net, CS.preprocess(im))
49 | hist_perframe += fast_hist(label.flatten(), out.flatten(), n_cl)
50 | if args.save_output_images > 0:
51 | label_im = CS.palette(label)
52 | pred_im = CS.palette(out)
53 | scipy.misc.imsave(output_image_dir + '/' + str(i) + '_pred.jpg', pred_im)
54 | scipy.misc.imsave(output_image_dir + '/' + str(i) + '_gt.jpg', label_im)
55 | scipy.misc.imsave(output_image_dir + '/' + str(i) + '_input.jpg', im)
56 |
57 | mean_pixel_acc, mean_class_acc, mean_class_iou, per_class_acc, per_class_iou = get_scores(hist_perframe)
58 | with open(args.output_dir + '/evaluation_results.txt', 'w') as f:
59 | f.write('Mean pixel accuracy: %f\n' % mean_pixel_acc)
60 | f.write('Mean class accuracy: %f\n' % mean_class_acc)
61 | f.write('Mean class IoU: %f\n' % mean_class_iou)
62 | f.write('************ Per class numbers below ************\n')
63 | for i, cl in enumerate(CS.classes):
64 | while len(cl) < 15:
65 | cl = cl + ' '
66 | f.write('%s: acc = %f, iou = %f\n' % (cl, per_class_acc[i], per_class_iou[i]))
67 |
68 |
69 | main()
70 |
--------------------------------------------------------------------------------
/scripts/eval_cityscapes/util.py:
--------------------------------------------------------------------------------
1 | # The following code is modified from https://github.com/shelhamer/clockwork-fcn
2 | import numpy as np
3 |
4 |
5 | def get_out_scoremap(net):
6 | return net.blobs['score'].data[0].argmax(axis=0).astype(np.uint8)
7 |
8 |
9 | def feed_net(net, in_):
10 | """
11 | Load prepared input into net.
12 | """
13 | net.blobs['data'].reshape(1, *in_.shape)
14 | net.blobs['data'].data[...] = in_
15 |
16 |
17 | def segrun(net, in_):
18 | feed_net(net, in_)
19 | net.forward()
20 | return get_out_scoremap(net)
21 |
22 |
23 | def fast_hist(a, b, n):
24 | k = np.where((a >= 0) & (a < n))[0]
25 | bc = np.bincount(n * a[k].astype(int) + b[k], minlength=n**2)
26 | if len(bc) != n**2:
27 | # ignore this example if dimension mismatch
28 | return 0
29 | return bc.reshape(n, n)
30 |
31 |
32 | def get_scores(hist):
33 | # Mean pixel accuracy
34 | acc = np.diag(hist).sum() / (hist.sum() + 1e-12)
35 |
36 | # Per class accuracy
37 | cl_acc = np.diag(hist) / (hist.sum(1) + 1e-12)
38 |
39 | # Per class IoU
40 | iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist) + 1e-12)
41 |
42 | return acc, np.nanmean(cl_acc), np.nanmean(iu), cl_acc, iu
43 |
--------------------------------------------------------------------------------
/scripts/install_deps.sh:
--------------------------------------------------------------------------------
1 | set -ex
2 | pip install visdom
3 | pip install dominate
4 |
--------------------------------------------------------------------------------
/scripts/test_before_push.py:
--------------------------------------------------------------------------------
1 | # Simple script to make sure basic usage
2 | # such as training, testing, saving and loading
3 | # runs without errors.
4 | import os
5 |
6 |
7 | def run(command):
8 | print(command)
9 | exit_status = os.system(command)
10 | if exit_status > 0:
11 | exit(1)
12 |
13 |
14 | if __name__ == '__main__':
15 | # download mini datasets
16 | if not os.path.exists('./datasets/mini'):
17 | run('bash ./datasets/download_cyclegan_dataset.sh mini')
18 |
19 | if not os.path.exists('./datasets/mini_pix2pix'):
20 | run('bash ./datasets/download_cyclegan_dataset.sh mini_pix2pix')
21 |
22 | # pretrained cyclegan model
23 | if not os.path.exists('./checkpoints/horse2zebra_pretrained/latest_net_G.pth'):
24 | run('bash ./scripts/download_cyclegan_model.sh horse2zebra')
25 | run('python test.py --model test --dataroot ./datasets/mini --name horse2zebra_pretrained --no_dropout --num_test 1 --no_dropout')
26 |
27 | # pretrained pix2pix model
28 | if not os.path.exists('./checkpoints/facades_label2photo_pretrained/latest_net_G.pth'):
29 | run('bash ./scripts/download_pix2pix_model.sh facades_label2photo')
30 | if not os.path.exists('./datasets/facades'):
31 | run('bash ./datasets/download_pix2pix_dataset.sh facades')
32 | run('python test.py --dataroot ./datasets/facades/ --direction BtoA --model pix2pix --name facades_label2photo_pretrained --num_test 1')
33 |
34 | # cyclegan train/test
35 | run('python train.py --model cycle_gan --name temp_cyclegan --dataroot ./datasets/mini --n_epochs 1 --n_epochs_decay 0 --save_latest_freq 10 --print_freq 1 --display_id -1')
36 | run('python test.py --model test --name temp_cyclegan --dataroot ./datasets/mini --num_test 1 --model_suffix "_A" --no_dropout')
37 |
38 | # pix2pix train/test
39 | run('python train.py --model pix2pix --name temp_pix2pix --dataroot ./datasets/mini_pix2pix --n_epochs 1 --n_epochs_decay 5 --save_latest_freq 10 --display_id -1')
40 | run('python test.py --model pix2pix --name temp_pix2pix --dataroot ./datasets/mini_pix2pix --num_test 1')
41 |
42 | # template train/test
43 | run('python train.py --model template --name temp2 --dataroot ./datasets/mini_pix2pix --n_epochs 1 --n_epochs_decay 0 --save_latest_freq 10 --display_id -1')
44 | run('python test.py --model template --name temp2 --dataroot ./datasets/mini_pix2pix --num_test 1')
45 |
46 | # colorization train/test (optional)
47 | if not os.path.exists('./datasets/mini_colorization'):
48 | run('bash ./datasets/download_cyclegan_dataset.sh mini_colorization')
49 |
50 | run('python train.py --model colorization --name temp_color --dataroot ./datasets/mini_colorization --n_epochs 1 --n_epochs_decay 0 --save_latest_freq 5 --display_id -1')
51 | run('python test.py --model colorization --name temp_color --dataroot ./datasets/mini_colorization --num_test 1')
52 |
--------------------------------------------------------------------------------
/scripts/test_colorization.sh:
--------------------------------------------------------------------------------
1 | set -ex
2 | python test.py --dataroot ./datasets/colorization --name color_pix2pix --model colorization
3 |
--------------------------------------------------------------------------------
/scripts/test_cyclegan.sh:
--------------------------------------------------------------------------------
1 | set -ex
2 | python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan --phase test --no_dropout
3 |
--------------------------------------------------------------------------------
/scripts/test_pix2pix.sh:
--------------------------------------------------------------------------------
1 | set -ex
2 | python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --netG unet_256 --direction BtoA --dataset_mode aligned --norm batch
3 |
--------------------------------------------------------------------------------
/scripts/test_single.sh:
--------------------------------------------------------------------------------
1 | set -ex
2 | python test.py --dataroot ./datasets/facades/testB/ --name facades_pix2pix --model test --netG unet_256 --direction BtoA --dataset_mode single --norm batch
3 |
--------------------------------------------------------------------------------
/scripts/train_colorization.sh:
--------------------------------------------------------------------------------
1 | set -ex
2 | python train.py --dataroot ./datasets/colorization --name color_pix2pix --model colorization
3 |
--------------------------------------------------------------------------------
/scripts/train_cyclegan.sh:
--------------------------------------------------------------------------------
1 | set -ex
2 | python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan --pool_size 50 --no_dropout
3 |
--------------------------------------------------------------------------------
/scripts/train_pix2pix.sh:
--------------------------------------------------------------------------------
1 | set -ex
2 | python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --netG unet_256 --direction BtoA --lambda_L1 100 --dataset_mode aligned --norm batch --pool_size 0
3 |
--------------------------------------------------------------------------------
/test.py:
--------------------------------------------------------------------------------
1 | """General-purpose test script for image-to-image translation.
2 |
3 | Once you have trained your model with train.py, you can use this script to test the model.
4 | It will load a saved model from '--checkpoints_dir' and save the results to '--results_dir'.
5 |
6 | It first creates model and dataset given the option. It will hard-code some parameters.
7 | It then runs inference for '--num_test' images and save results to an HTML file.
8 |
9 | Example (You need to train models first or download pre-trained models from our website):
10 | Test a CycleGAN model (both sides):
11 | python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
12 |
13 | Test a CycleGAN model (one side only):
14 | python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout
15 |
16 | The option '--model test' is used for generating CycleGAN results only for one side.
17 | This option will automatically set '--dataset_mode single', which only loads the images from one set.
18 | On the contrary, using '--model cycle_gan' requires loading and generating results in both directions,
19 | which is sometimes unnecessary. The results will be saved at ./results/.
20 | Use '--results_dir ' to specify the results directory.
21 |
22 | Test a pix2pix model:
23 | python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
24 |
25 | See options/base_options.py and options/test_options.py for more test options.
26 | See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md
27 | See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md
28 | """
29 | import os
30 | from options.test_options import TestOptions
31 | from data import create_dataset
32 | from models import create_model
33 | from util.visualizer import save_images
34 | from util import html
35 |
36 | try:
37 | import wandb
38 | except ImportError:
39 | print('Warning: wandb package cannot be found. The option "--use_wandb" will result in error.')
40 |
41 |
42 | if __name__ == '__main__':
43 | opt = TestOptions().parse() # get test options
44 | # hard-code some parameters for test
45 | opt.num_threads = 0 # test code only supports num_threads = 0
46 | opt.batch_size = 1 # test code only supports batch_size = 1
47 | opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.
48 | opt.no_flip = True # no flip; comment this line if results on flipped images are needed.
49 | opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.
50 | dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
51 | model = create_model(opt) # create a model given opt.model and other options
52 | model.setup(opt) # regular setup: load and print networks; create schedulers
53 |
54 | # initialize logger
55 | if opt.use_wandb:
56 | wandb_run = wandb.init(project=opt.wandb_project_name, name=opt.name, config=opt) if not wandb.run else wandb.run
57 | wandb_run._label(repo='CycleGAN-and-pix2pix')
58 |
59 | # create a website
60 | web_dir = os.path.join(opt.results_dir, opt.name, '{}_{}'.format(opt.phase, opt.epoch)) # define the website directory
61 | if opt.load_iter > 0: # load_iter is 0 by default
62 | web_dir = '{:s}_iter{:d}'.format(web_dir, opt.load_iter)
63 | print('creating web directory', web_dir)
64 | webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch))
65 | # test with eval mode. This only affects layers like batchnorm and dropout.
66 | # For [pix2pix]: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode.
67 | # For [CycleGAN]: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout.
68 | if opt.eval:
69 | model.eval()
70 | for i, data in enumerate(dataset):
71 | if i >= opt.num_test: # only apply our model to opt.num_test images.
72 | break
73 | model.set_input(data) # unpack data from data loader
74 | model.test() # run inference
75 | visuals = model.get_current_visuals() # get image results
76 | img_path = model.get_image_paths() # get image paths
77 | if i % 5 == 0: # save images to an HTML file
78 | print('processing (%04d)-th image... %s' % (i, img_path))
79 | save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize, use_wandb=opt.use_wandb)
80 | webpage.save() # save the HTML
81 |
--------------------------------------------------------------------------------
/train.py:
--------------------------------------------------------------------------------
1 | """General-purpose training script for image-to-image translation.
2 |
3 | This script works for various models (with option '--model': e.g., pix2pix, cyclegan, colorization) and
4 | different datasets (with option '--dataset_mode': e.g., aligned, unaligned, single, colorization).
5 | You need to specify the dataset ('--dataroot'), experiment name ('--name'), and model ('--model').
6 |
7 | It first creates model, dataset, and visualizer given the option.
8 | It then does standard network training. During the training, it also visualize/save the images, print/save the loss plot, and save models.
9 | The script supports continue/resume training. Use '--continue_train' to resume your previous training.
10 |
11 | Example:
12 | Train a CycleGAN model:
13 | python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan
14 | Train a pix2pix model:
15 | python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA
16 |
17 | See options/base_options.py and options/train_options.py for more training options.
18 | See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md
19 | See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md
20 | """
21 | import time
22 | from options.train_options import TrainOptions
23 | from data import create_dataset
24 | from models import create_model
25 | from util.visualizer import Visualizer
26 |
27 | if __name__ == '__main__':
28 | opt = TrainOptions().parse() # get training options
29 | dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options
30 | dataset_size = len(dataset) # get the number of images in the dataset.
31 | print('The number of training images = %d' % dataset_size)
32 |
33 | model = create_model(opt) # create a model given opt.model and other options
34 | model.setup(opt) # regular setup: load and print networks; create schedulers
35 | visualizer = Visualizer(opt) # create a visualizer that display/save images and plots
36 | total_iters = 0 # the total number of training iterations
37 |
38 | for epoch in range(opt.epoch_count, opt.n_epochs + opt.n_epochs_decay + 1): # outer loop for different epochs; we save the model by , +
39 | epoch_start_time = time.time() # timer for entire epoch
40 | iter_data_time = time.time() # timer for data loading per iteration
41 | epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch
42 | visualizer.reset() # reset the visualizer: make sure it saves the results to HTML at least once every epoch
43 | model.update_learning_rate() # update learning rates in the beginning of every epoch.
44 | for i, data in enumerate(dataset): # inner loop within one epoch
45 | iter_start_time = time.time() # timer for computation per iteration
46 | if total_iters % opt.print_freq == 0:
47 | t_data = iter_start_time - iter_data_time
48 |
49 | total_iters += opt.batch_size
50 | epoch_iter += opt.batch_size
51 | model.set_input(data) # unpack data from dataset and apply preprocessing
52 | model.optimize_parameters() # calculate loss functions, get gradients, update network weights
53 |
54 | if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file
55 | save_result = total_iters % opt.update_html_freq == 0
56 | model.compute_visuals()
57 | visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)
58 |
59 | if total_iters % opt.print_freq == 0: # print training losses and save logging information to the disk
60 | losses = model.get_current_losses()
61 | t_comp = (time.time() - iter_start_time) / opt.batch_size
62 | visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data)
63 | if opt.display_id > 0:
64 | visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses)
65 |
66 | if total_iters % opt.save_latest_freq == 0: # cache our latest model every iterations
67 | print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters))
68 | save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest'
69 | model.save_networks(save_suffix)
70 |
71 | iter_data_time = time.time()
72 | if epoch % opt.save_epoch_freq == 0: # cache our model every epochs
73 | print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters))
74 | model.save_networks('latest')
75 | model.save_networks(epoch)
76 |
77 | print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.n_epochs + opt.n_epochs_decay, time.time() - epoch_start_time))
78 |
--------------------------------------------------------------------------------
/util/__init__.py:
--------------------------------------------------------------------------------
1 | """This package includes a miscellaneous collection of useful helper functions."""
2 |
--------------------------------------------------------------------------------
/util/get_data.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import os
3 | import tarfile
4 | import requests
5 | from warnings import warn
6 | from zipfile import ZipFile
7 | from bs4 import BeautifulSoup
8 | from os.path import abspath, isdir, join, basename
9 |
10 |
11 | class GetData(object):
12 | """A Python script for downloading CycleGAN or pix2pix datasets.
13 |
14 | Parameters:
15 | technique (str) -- One of: 'cyclegan' or 'pix2pix'.
16 | verbose (bool) -- If True, print additional information.
17 |
18 | Examples:
19 | >>> from util.get_data import GetData
20 | >>> gd = GetData(technique='cyclegan')
21 | >>> new_data_path = gd.get(save_path='./datasets') # options will be displayed.
22 |
23 | Alternatively, You can use bash scripts: 'scripts/download_pix2pix_model.sh'
24 | and 'scripts/download_cyclegan_model.sh'.
25 | """
26 |
27 | def __init__(self, technique='cyclegan', verbose=True):
28 | url_dict = {
29 | 'pix2pix': 'http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/',
30 | 'cyclegan': 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets'
31 | }
32 | self.url = url_dict.get(technique.lower())
33 | self._verbose = verbose
34 |
35 | def _print(self, text):
36 | if self._verbose:
37 | print(text)
38 |
39 | @staticmethod
40 | def _get_options(r):
41 | soup = BeautifulSoup(r.text, 'lxml')
42 | options = [h.text for h in soup.find_all('a', href=True)
43 | if h.text.endswith(('.zip', 'tar.gz'))]
44 | return options
45 |
46 | def _present_options(self):
47 | r = requests.get(self.url)
48 | options = self._get_options(r)
49 | print('Options:\n')
50 | for i, o in enumerate(options):
51 | print("{0}: {1}".format(i, o))
52 | choice = input("\nPlease enter the number of the "
53 | "dataset above you wish to download:")
54 | return options[int(choice)]
55 |
56 | def _download_data(self, dataset_url, save_path):
57 | if not isdir(save_path):
58 | os.makedirs(save_path)
59 |
60 | base = basename(dataset_url)
61 | temp_save_path = join(save_path, base)
62 |
63 | with open(temp_save_path, "wb") as f:
64 | r = requests.get(dataset_url)
65 | f.write(r.content)
66 |
67 | if base.endswith('.tar.gz'):
68 | obj = tarfile.open(temp_save_path)
69 | elif base.endswith('.zip'):
70 | obj = ZipFile(temp_save_path, 'r')
71 | else:
72 | raise ValueError("Unknown File Type: {0}.".format(base))
73 |
74 | self._print("Unpacking Data...")
75 | obj.extractall(save_path)
76 | obj.close()
77 | os.remove(temp_save_path)
78 |
79 | def get(self, save_path, dataset=None):
80 | """
81 |
82 | Download a dataset.
83 |
84 | Parameters:
85 | save_path (str) -- A directory to save the data to.
86 | dataset (str) -- (optional). A specific dataset to download.
87 | Note: this must include the file extension.
88 | If None, options will be presented for you
89 | to choose from.
90 |
91 | Returns:
92 | save_path_full (str) -- the absolute path to the downloaded data.
93 |
94 | """
95 | if dataset is None:
96 | selected_dataset = self._present_options()
97 | else:
98 | selected_dataset = dataset
99 |
100 | save_path_full = join(save_path, selected_dataset.split('.')[0])
101 |
102 | if isdir(save_path_full):
103 | warn("\n'{0}' already exists. Voiding Download.".format(
104 | save_path_full))
105 | else:
106 | self._print('Downloading Data...')
107 | url = "{0}/{1}".format(self.url, selected_dataset)
108 | self._download_data(url, save_path=save_path)
109 |
110 | return abspath(save_path_full)
111 |
--------------------------------------------------------------------------------
/util/html.py:
--------------------------------------------------------------------------------
1 | import dominate
2 | from dominate.tags import meta, h3, table, tr, td, p, a, img, br
3 | import os
4 |
5 |
6 | class HTML:
7 | """This HTML class allows us to save images and write texts into a single HTML file.
8 |
9 | It consists of functions such as (add a text header to the HTML file),
10 | (add a row of images to the HTML file), and (save the HTML to the disk).
11 | It is based on Python library 'dominate', a Python library for creating and manipulating HTML documents using a DOM API.
12 | """
13 |
14 | def __init__(self, web_dir, title, refresh=0):
15 | """Initialize the HTML classes
16 |
17 | Parameters:
18 | web_dir (str) -- a directory that stores the webpage. HTML file will be created at /index.html; images will be saved at 0:
32 | with self.doc.head:
33 | meta(http_equiv="refresh", content=str(refresh))
34 |
35 | def get_image_dir(self):
36 | """Return the directory that stores images"""
37 | return self.img_dir
38 |
39 | def add_header(self, text):
40 | """Insert a header to the HTML file
41 |
42 | Parameters:
43 | text (str) -- the header text
44 | """
45 | with self.doc:
46 | h3(text)
47 |
48 | def add_images(self, ims, txts, links, width=400):
49 | """add images to the HTML file
50 |
51 | Parameters:
52 | ims (str list) -- a list of image paths
53 | txts (str list) -- a list of image names shown on the website
54 | links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page
55 | """
56 | self.t = table(border=1, style="table-layout: fixed;") # Insert a table
57 | self.doc.add(self.t)
58 | with self.t:
59 | with tr():
60 | for im, txt, link in zip(ims, txts, links):
61 | with td(style="word-wrap: break-word;", halign="center", valign="top"):
62 | with p():
63 | with a(href=os.path.join('images', link)):
64 | img(style="width:%dpx" % width, src=os.path.join('images', im))
65 | br()
66 | p(txt)
67 |
68 | def save(self):
69 | """save the current content to the HMTL file"""
70 | html_file = '%s/index.html' % self.web_dir
71 | f = open(html_file, 'wt')
72 | f.write(self.doc.render())
73 | f.close()
74 |
75 |
76 | if __name__ == '__main__': # we show an example usage here.
77 | html = HTML('web/', 'test_html')
78 | html.add_header('hello world')
79 |
80 | ims, txts, links = [], [], []
81 | for n in range(4):
82 | ims.append('image_%d.png' % n)
83 | txts.append('text_%d' % n)
84 | links.append('image_%d.png' % n)
85 | html.add_images(ims, txts, links)
86 | html.save()
87 |
--------------------------------------------------------------------------------
/util/image_pool.py:
--------------------------------------------------------------------------------
1 | import random
2 | import torch
3 |
4 |
5 | class ImagePool():
6 | """This class implements an image buffer that stores previously generated images.
7 |
8 | This buffer enables us to update discriminators using a history of generated images
9 | rather than the ones produced by the latest generators.
10 | """
11 |
12 | def __init__(self, pool_size):
13 | """Initialize the ImagePool class
14 |
15 | Parameters:
16 | pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created
17 | """
18 | self.pool_size = pool_size
19 | if self.pool_size > 0: # create an empty pool
20 | self.num_imgs = 0
21 | self.images = []
22 |
23 | def query(self, images):
24 | """Return an image from the pool.
25 |
26 | Parameters:
27 | images: the latest generated images from the generator
28 |
29 | Returns images from the buffer.
30 |
31 | By 50/100, the buffer will return input images.
32 | By 50/100, the buffer will return images previously stored in the buffer,
33 | and insert the current images to the buffer.
34 | """
35 | if self.pool_size == 0: # if the buffer size is 0, do nothing
36 | return images
37 | return_images = []
38 | for image in images:
39 | image = torch.unsqueeze(image.data, 0)
40 | if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer
41 | self.num_imgs = self.num_imgs + 1
42 | self.images.append(image)
43 | return_images.append(image)
44 | else:
45 | p = random.uniform(0, 1)
46 | if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer
47 | random_id = random.randint(0, self.pool_size - 1) # randint is inclusive
48 | tmp = self.images[random_id].clone()
49 | self.images[random_id] = image
50 | return_images.append(tmp)
51 | else: # by another 50% chance, the buffer will return the current image
52 | return_images.append(image)
53 | return_images = torch.cat(return_images, 0) # collect all the images and return
54 | return return_images
55 |
--------------------------------------------------------------------------------
/util/util.py:
--------------------------------------------------------------------------------
1 | """This module contains simple helper functions """
2 | from __future__ import print_function
3 | import torch
4 | import numpy as np
5 | from PIL import Image
6 | import os
7 |
8 |
9 | def tensor2im(input_image, imtype=np.uint8):
10 | """"Converts a Tensor array into a numpy image array.
11 |
12 | Parameters:
13 | input_image (tensor) -- the input image tensor array
14 | imtype (type) -- the desired type of the converted numpy array
15 | """
16 | if not isinstance(input_image, np.ndarray):
17 | if isinstance(input_image, torch.Tensor): # get the data from a variable
18 | image_tensor = input_image.data
19 | else:
20 | return input_image
21 | image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array
22 | if image_numpy.shape[0] == 1: # grayscale to RGB
23 | image_numpy = np.tile(image_numpy, (3, 1, 1))
24 | image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
25 | else: # if it is a numpy array, do nothing
26 | image_numpy = input_image
27 | return image_numpy.astype(imtype)
28 |
29 |
30 | def diagnose_network(net, name='network'):
31 | """Calculate and print the mean of average absolute(gradients)
32 |
33 | Parameters:
34 | net (torch network) -- Torch network
35 | name (str) -- the name of the network
36 | """
37 | mean = 0.0
38 | count = 0
39 | for param in net.parameters():
40 | if param.grad is not None:
41 | mean += torch.mean(torch.abs(param.grad.data))
42 | count += 1
43 | if count > 0:
44 | mean = mean / count
45 | print(name)
46 | print(mean)
47 |
48 |
49 | def save_image(image_numpy, image_path, aspect_ratio=1.0):
50 | """Save a numpy image to the disk
51 |
52 | Parameters:
53 | image_numpy (numpy array) -- input numpy array
54 | image_path (str) -- the path of the image
55 | """
56 |
57 | image_pil = Image.fromarray(image_numpy)
58 | h, w, _ = image_numpy.shape
59 |
60 | if aspect_ratio > 1.0:
61 | image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
62 | if aspect_ratio < 1.0:
63 | image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
64 | image_pil.save(image_path)
65 |
66 |
67 | def print_numpy(x, val=True, shp=False):
68 | """Print the mean, min, max, median, std, and size of a numpy array
69 |
70 | Parameters:
71 | val (bool) -- if print the values of the numpy array
72 | shp (bool) -- if print the shape of the numpy array
73 | """
74 | x = x.astype(np.float64)
75 | if shp:
76 | print('shape,', x.shape)
77 | if val:
78 | x = x.flatten()
79 | print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
80 | np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
81 |
82 |
83 | def mkdirs(paths):
84 | """create empty directories if they don't exist
85 |
86 | Parameters:
87 | paths (str list) -- a list of directory paths
88 | """
89 | if isinstance(paths, list) and not isinstance(paths, str):
90 | for path in paths:
91 | mkdir(path)
92 | else:
93 | mkdir(paths)
94 |
95 |
96 | def mkdir(path):
97 | """create a single empty directory if it didn't exist
98 |
99 | Parameters:
100 | path (str) -- a single directory path
101 | """
102 | if not os.path.exists(path):
103 | os.makedirs(path)
104 |
--------------------------------------------------------------------------------
/util/visualizer.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import os
3 | import sys
4 | import ntpath
5 | import time
6 | from . import util, html
7 | from subprocess import Popen, PIPE
8 |
9 |
10 | try:
11 | import wandb
12 | except ImportError:
13 | print('Warning: wandb package cannot be found. The option "--use_wandb" will result in error.')
14 |
15 | if sys.version_info[0] == 2:
16 | VisdomExceptionBase = Exception
17 | else:
18 | VisdomExceptionBase = ConnectionError
19 |
20 |
21 | def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256, use_wandb=False):
22 | """Save images to the disk.
23 |
24 | Parameters:
25 | webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)
26 | visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs
27 | image_path (str) -- the string is used to create image paths
28 | aspect_ratio (float) -- the aspect ratio of saved images
29 | width (int) -- the images will be resized to width x width
30 |
31 | This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.
32 | """
33 | image_dir = webpage.get_image_dir()
34 | short_path = ntpath.basename(image_path[0])
35 | name = os.path.splitext(short_path)[0]
36 |
37 | webpage.add_header(name)
38 | ims, txts, links = [], [], []
39 | ims_dict = {}
40 | for label, im_data in visuals.items():
41 | im = util.tensor2im(im_data)
42 | image_name = '%s_%s.png' % (name, label)
43 | save_path = os.path.join(image_dir, image_name)
44 | util.save_image(im, save_path, aspect_ratio=aspect_ratio)
45 | ims.append(image_name)
46 | txts.append(label)
47 | links.append(image_name)
48 | if use_wandb:
49 | ims_dict[label] = wandb.Image(im)
50 | webpage.add_images(ims, txts, links, width=width)
51 | if use_wandb:
52 | wandb.log(ims_dict)
53 |
54 |
55 | class Visualizer():
56 | """This class includes several functions that can display/save images and print/save logging information.
57 |
58 | It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.
59 | """
60 |
61 | def __init__(self, opt):
62 | """Initialize the Visualizer class
63 |
64 | Parameters:
65 | opt -- stores all the experiment flags; needs to be a subclass of BaseOptions
66 | Step 1: Cache the training/test options
67 | Step 2: connect to a visdom server
68 | Step 3: create an HTML object for saveing HTML filters
69 | Step 4: create a logging file to store training losses
70 | """
71 | self.opt = opt # cache the option
72 | self.display_id = opt.display_id
73 | self.use_html = opt.isTrain and not opt.no_html
74 | self.win_size = opt.display_winsize
75 | self.name = opt.name
76 | self.port = opt.display_port
77 | self.saved = False
78 | self.use_wandb = opt.use_wandb
79 | self.wandb_project_name = opt.wandb_project_name
80 | self.current_epoch = 0
81 | self.ncols = opt.display_ncols
82 |
83 | if self.display_id > 0: # connect to a visdom server given and
84 | import visdom
85 | self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env)
86 | if not self.vis.check_connection():
87 | self.create_visdom_connections()
88 |
89 | if self.use_wandb:
90 | self.wandb_run = wandb.init(project=self.wandb_project_name, name=opt.name, config=opt) if not wandb.run else wandb.run
91 | self.wandb_run._label(repo='CycleGAN-and-pix2pix')
92 |
93 | if self.use_html: # create an HTML object at /web/; images will be saved under /web/images/
94 | self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
95 | self.img_dir = os.path.join(self.web_dir, 'images')
96 | print('create web directory %s...' % self.web_dir)
97 | util.mkdirs([self.web_dir, self.img_dir])
98 | # create a logging file to store training losses
99 | self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
100 | with open(self.log_name, "a") as log_file:
101 | now = time.strftime("%c")
102 | log_file.write('================ Training Loss (%s) ================\n' % now)
103 |
104 | def reset(self):
105 | """Reset the self.saved status"""
106 | self.saved = False
107 |
108 | def create_visdom_connections(self):
109 | """If the program could not connect to Visdom server, this function will start a new server at port < self.port > """
110 | cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port
111 | print('\n\nCould not connect to Visdom server. \n Trying to start a server....')
112 | print('Command: %s' % cmd)
113 | Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
114 |
115 | def display_current_results(self, visuals, epoch, save_result):
116 | """Display current results on visdom; save current results to an HTML file.
117 |
118 | Parameters:
119 | visuals (OrderedDict) - - dictionary of images to display or save
120 | epoch (int) - - the current epoch
121 | save_result (bool) - - if save the current results to an HTML file
122 | """
123 | if self.display_id > 0: # show images in the browser using visdom
124 | ncols = self.ncols
125 | if ncols > 0: # show all the images in one visdom panel
126 | ncols = min(ncols, len(visuals))
127 | h, w = next(iter(visuals.values())).shape[:2]
128 | table_css = """""" % (w, h) # create a table css
132 | # create a table of images.
133 | title = self.name
134 | label_html = ''
135 | label_html_row = ''
136 | images = []
137 | idx = 0
138 | for label, image in visuals.items():
139 | image_numpy = util.tensor2im(image)
140 | label_html_row += '%s | ' % label
141 | images.append(image_numpy.transpose([2, 0, 1]))
142 | idx += 1
143 | if idx % ncols == 0:
144 | label_html += '%s
' % label_html_row
145 | label_html_row = ''
146 | white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255
147 | while idx % ncols != 0:
148 | images.append(white_image)
149 | label_html_row += ' | '
150 | idx += 1
151 | if label_html_row != '':
152 | label_html += '%s
' % label_html_row
153 | try:
154 | self.vis.images(images, nrow=ncols, win=self.display_id + 1,
155 | padding=2, opts=dict(title=title + ' images'))
156 | label_html = '' % label_html
157 | self.vis.text(table_css + label_html, win=self.display_id + 2,
158 | opts=dict(title=title + ' labels'))
159 | except VisdomExceptionBase:
160 | self.create_visdom_connections()
161 |
162 | else: # show each image in a separate visdom panel;
163 | idx = 1
164 | try:
165 | for label, image in visuals.items():
166 | image_numpy = util.tensor2im(image)
167 | self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label),
168 | win=self.display_id + idx)
169 | idx += 1
170 | except VisdomExceptionBase:
171 | self.create_visdom_connections()
172 |
173 | if self.use_wandb:
174 | columns = [key for key, _ in visuals.items()]
175 | columns.insert(0, 'epoch')
176 | result_table = wandb.Table(columns=columns)
177 | table_row = [epoch]
178 | ims_dict = {}
179 | for label, image in visuals.items():
180 | image_numpy = util.tensor2im(image)
181 | wandb_image = wandb.Image(image_numpy)
182 | table_row.append(wandb_image)
183 | ims_dict[label] = wandb_image
184 | self.wandb_run.log(ims_dict)
185 | if epoch != self.current_epoch:
186 | self.current_epoch = epoch
187 | result_table.add_data(*table_row)
188 | self.wandb_run.log({"Result": result_table})
189 |
190 | if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved.
191 | self.saved = True
192 | # save images to the disk
193 | for label, image in visuals.items():
194 | image_numpy = util.tensor2im(image)
195 | img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))
196 | util.save_image(image_numpy, img_path)
197 |
198 | # update website
199 | webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=1)
200 | for n in range(epoch, 0, -1):
201 | webpage.add_header('epoch [%d]' % n)
202 | ims, txts, links = [], [], []
203 |
204 | for label, image_numpy in visuals.items():
205 | image_numpy = util.tensor2im(image)
206 | img_path = 'epoch%.3d_%s.png' % (n, label)
207 | ims.append(img_path)
208 | txts.append(label)
209 | links.append(img_path)
210 | webpage.add_images(ims, txts, links, width=self.win_size)
211 | webpage.save()
212 |
213 | def plot_current_losses(self, epoch, counter_ratio, losses):
214 | """display the current losses on visdom display: dictionary of error labels and values
215 |
216 | Parameters:
217 | epoch (int) -- current epoch
218 | counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1
219 | losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
220 | """
221 | if not hasattr(self, 'plot_data'):
222 | self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())}
223 | self.plot_data['X'].append(epoch + counter_ratio)
224 | self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']])
225 | try:
226 | self.vis.line(
227 | X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1),
228 | Y=np.array(self.plot_data['Y']),
229 | opts={
230 | 'title': self.name + ' loss over time',
231 | 'legend': self.plot_data['legend'],
232 | 'xlabel': 'epoch',
233 | 'ylabel': 'loss'},
234 | win=self.display_id)
235 | except VisdomExceptionBase:
236 | self.create_visdom_connections()
237 | if self.use_wandb:
238 | self.wandb_run.log(losses)
239 |
240 | # losses: same format as |losses| of plot_current_losses
241 | def print_current_losses(self, epoch, iters, losses, t_comp, t_data):
242 | """print current losses on console; also save the losses to the disk
243 |
244 | Parameters:
245 | epoch (int) -- current epoch
246 | iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)
247 | losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
248 | t_comp (float) -- computational time per data point (normalized by batch_size)
249 | t_data (float) -- data loading time per data point (normalized by batch_size)
250 | """
251 | message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data)
252 | for k, v in losses.items():
253 | message += '%s: %.3f ' % (k, v)
254 |
255 | print(message) # print the message
256 | with open(self.log_name, "a") as log_file:
257 | log_file.write('%s\n' % message) # save the message
258 |
--------------------------------------------------------------------------------