├── .gitignore ├── LICENSE ├── README.md ├── data ├── __init__.py ├── base_dataset.py ├── unaligned_coco_dataset.py └── unaligned_dataset.py ├── docs ├── Dockerfile ├── README_es.md ├── datasets.md ├── docker.md ├── overview.md ├── qa.md └── tips.md ├── imgs ├── edges2cats.jpg ├── forkgan.jpg └── horse2zebra.gif ├── install.sh ├── label_converter.py ├── loss_log_to_plot.py ├── models ├── __init__.py ├── base_model.py ├── fork_gan_model.py └── networks.py ├── options ├── __init__.py ├── base_options.py ├── test_options.py └── train_options.py ├── run.sh ├── svgs ├── 185bbe3a42b0f2df9493da909528a50c.svg ├── 1b4342b577c22a565bf4275ad350b51c.svg ├── 21fd4e8eecd6bdf1a4d3d6bd1fb8d733.svg ├── 421472f4ff7fdf1fcbb80a776f953e28.svg ├── 4f806e64be00e75b9da6946fa8b30ab9.svg ├── 55d3f040c4b762956ca1504da10e73cf.svg ├── 572d909dcb75f1d90e402fdb7fcbfefc.svg ├── 84df98c65d88c6adf15d4645ffa25e47.svg ├── 91aac9730317276af725abd8cef04ca9.svg ├── 929ed909014029a206f344a28aa47d15.svg ├── 9493f58d962b918a014f0611cbd7a2c8.svg ├── 9592e060056326cb8915a4d5f7f08906.svg ├── a057a67da77082c6678b0161bfe9361f.svg ├── a3a832b2fe92d672bd55cda4001fbb7c.svg ├── bc30d82546823adc821898ae820607df.svg ├── cbfb1b2a33b28eab8a3e59464768e810.svg ├── d4c4b525c4ba39454b0f939d81d6a2f4.svg ├── e5d134f35dc4949fab12ec64d186248a.svg ├── e93accfc68a4cbd2d241d8cc770c7ae0.svg └── ef3441dc1d8817d7e91591b8b57cea97.svg ├── test.py ├── train.py └── util ├── __init__.py ├── get_data.py ├── html.py ├── image_pool.py ├── util.py └── visualizer.py /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | debug* 3 | datasets/ 4 | checkpoints/ 5 | results/ 6 | build/ 7 | dist/ 8 | *.png 9 | torch.egg-info/ 10 | */**/__pycache__ 11 | torch/version.py 12 | torch/csrc/generic/TensorMethods.cpp 13 | torch/lib/*.so* 14 | torch/lib/*.dylib* 15 | torch/lib/*.h 16 | torch/lib/build 17 | torch/lib/tmp_install 18 | torch/lib/include 19 | torch/lib/torch_shm_manager 20 | torch/csrc/cudnn/cuDNN.cpp 21 | torch/csrc/nn/THNN.cwrap 22 | torch/csrc/nn/THNN.cpp 23 | torch/csrc/nn/THCUNN.cwrap 24 | torch/csrc/nn/THCUNN.cpp 25 | torch/csrc/nn/THNN_generic.cwrap 26 | torch/csrc/nn/THNN_generic.cpp 27 | torch/csrc/nn/THNN_generic.h 28 | docs/src/**/* 29 | test/data/legacy_modules.t7 30 | test/data/gpu_tensors.pt 31 | test/htmlcov 32 | test/.coverage 33 | */*.pyc 34 | */**/*.pyc 35 | */**/**/*.pyc 36 | */**/**/**/*.pyc 37 | */**/**/**/**/*.pyc 38 | */*.so* 39 | */**/*.so* 40 | */**/*.dylib* 41 | test/data/legacy_serialized.pt 42 | *~ 43 | .idea 44 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017, Jun-Yan Zhu and Taesung Park 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 15 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 18 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 20 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 21 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 22 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | 25 | 26 | --------------------------- LICENSE FOR pix2pix -------------------------------- 27 | BSD License 28 | 29 | For pix2pix software 30 | Copyright (c) 2016, Phillip Isola and Jun-Yan Zhu 31 | All rights reserved. 32 | 33 | Redistribution and use in source and binary forms, with or without 34 | modification, are permitted provided that the following conditions are met: 35 | 36 | * Redistributions of source code must retain the above copyright notice, this 37 | list of conditions and the following disclaimer. 38 | 39 | * Redistributions in binary form must reproduce the above copyright notice, 40 | this list of conditions and the following disclaimer in the documentation 41 | and/or other materials provided with the distribution. 42 | 43 | ----------------------------- LICENSE FOR DCGAN -------------------------------- 44 | BSD License 45 | 46 | For dcgan.torch software 47 | 48 | Copyright (c) 2015, Facebook, Inc. All rights reserved. 49 | 50 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 51 | 52 | Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 53 | 54 | Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 55 | 56 | Neither the name Facebook nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 57 | 58 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 59 | -------------------------------------------------------------------------------- /data/__init__.py: -------------------------------------------------------------------------------- 1 | """This package includes all the modules related to data loading and preprocessing 2 | 3 | To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset. 4 | You need to implement four functions: 5 | -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt). 6 | -- <__len__>: return the size of dataset. 7 | -- <__getitem__>: get a data point from data loader. 8 | -- : (optionally) add dataset-specific options and set default options. 9 | 10 | Now you can use the dataset class by specifying flag '--dataset_mode dummy'. 11 | See our template dataset class 'template_dataset.py' for more details. 12 | """ 13 | import importlib 14 | import torch.utils.data 15 | from data.base_dataset import BaseDataset 16 | from torch.utils.data.distributed import DistributedSampler 17 | 18 | 19 | def find_dataset_using_name(dataset_name): 20 | """Import the module "data/[dataset_name]_dataset.py". 21 | 22 | In the file, the class called DatasetNameDataset() will 23 | be instantiated. It has to be a subclass of BaseDataset, 24 | and it is case-insensitive. 25 | """ 26 | dataset_filename = "data." + dataset_name + "_dataset" 27 | datasetlib = importlib.import_module(dataset_filename) 28 | 29 | dataset = None 30 | target_dataset_name = dataset_name.replace('_', '') + 'dataset' 31 | for name, cls in datasetlib.__dict__.items(): 32 | if name.lower() == target_dataset_name.lower(): #and issubclass(cls, BaseDataset): 33 | dataset = cls 34 | 35 | if dataset is None: 36 | raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name)) 37 | 38 | return dataset 39 | 40 | 41 | def get_option_setter(dataset_name): 42 | """Return the static method of the dataset class.""" 43 | dataset_class = find_dataset_using_name(dataset_name) 44 | return dataset_class.modify_commandline_options 45 | 46 | 47 | def create_dataset(opt): 48 | """Create a dataset given the option. 49 | 50 | This function wraps the class CustomDatasetDataLoader. 51 | This is the main interface between this package and 'train.py'/'test.py' 52 | 53 | Example: 54 | >>> from data import create_dataset 55 | >>> dataset = create_dataset(opt) 56 | """ 57 | data_loader = CustomDatasetDataLoader(opt) 58 | dataset = data_loader.load_data() 59 | return dataset 60 | 61 | 62 | class CustomDatasetDataLoader(): 63 | """Wrapper class of Dataset class that performs multi-threaded data loading""" 64 | 65 | def __init__(self, opt): 66 | """Initialize this class 67 | 68 | Step 1: create a dataset instance given the name [dataset_mode] 69 | Step 2: create a multi-threaded data loader. 70 | """ 71 | self.opt = opt 72 | dataset_class = find_dataset_using_name(opt.dataset_mode) 73 | self.dataset = dataset_class(opt) 74 | print("dataset [%s] was created" % type(self.dataset).__name__) 75 | 76 | self.sampler = None 77 | if opt.distributed: 78 | self.sampler = DistributedSampler(self.dataset, num_replicas=opt.ngpus, rank=opt.gpu, shuffle=True, drop_last=False) 79 | self.dataloader = torch.utils.data.DataLoader( 80 | self.dataset, 81 | batch_size=opt.batch_size, 82 | num_workers=int(opt.num_threads), 83 | sampler=self.sampler) 84 | else: 85 | self.dataloader = torch.utils.data.DataLoader( 86 | self.dataset, 87 | batch_size=opt.batch_size, 88 | shuffle=not opt.serial_batches, 89 | num_workers=int(opt.num_threads), 90 | sampler=self.sampler) 91 | 92 | def load_data(self): 93 | return self 94 | 95 | def __len__(self): 96 | """Return the number of data in the dataset""" 97 | return min(len(self.dataset), self.opt.max_dataset_size) 98 | 99 | def __iter__(self): 100 | """Return a batch of data""" 101 | for i, data in enumerate(self.dataloader): 102 | if i * self.opt.batch_size * self.opt.ngpus >= self.opt.max_dataset_size: 103 | break 104 | yield data 105 | 106 | def set_epoch(self, epoch): 107 | self.sampler.set_epoch(epoch) 108 | -------------------------------------------------------------------------------- /data/base_dataset.py: -------------------------------------------------------------------------------- 1 | """This module implements an abstract base class (ABC) 'BaseDataset' for datasets. 2 | 3 | It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses. 4 | """ 5 | import random 6 | import numpy as np 7 | import torch.utils.data as data 8 | from PIL import Image 9 | import torchvision.transforms as transforms 10 | from abc import ABC, abstractmethod 11 | 12 | class BaseDataset(data.Dataset, ABC): 13 | """This class is an abstract base class (ABC) for datasets. 14 | 15 | To create a subclass, you need to implement the following four functions: 16 | -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt). 17 | -- <__len__>: return the size of dataset. 18 | -- <__getitem__>: get a data point. 19 | -- : (optionally) add dataset-specific options and set default options. 20 | """ 21 | 22 | def __init__(self, opt): 23 | """Initialize the class; save the options in the class 24 | 25 | Parameters: 26 | opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions 27 | """ 28 | self.opt = opt 29 | self.root = opt.dataroot 30 | 31 | @staticmethod 32 | def modify_commandline_options(parser, is_train): 33 | """Add new dataset-specific options, and rewrite default values for existing options. 34 | 35 | Parameters: 36 | parser -- original option parser 37 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. 38 | 39 | Returns: 40 | the modified parser. 41 | """ 42 | return parser 43 | 44 | @abstractmethod 45 | def __len__(self): 46 | """Return the total number of images in the dataset.""" 47 | return 0 48 | 49 | @abstractmethod 50 | def __getitem__(self, index): 51 | """Return a data point and its metadata information. 52 | 53 | Parameters: 54 | index - - a random integer for data indexing 55 | 56 | Returns: 57 | a dictionary of data with their names. It ususally contains the data itself and its metadata information. 58 | """ 59 | pass 60 | 61 | 62 | def get_params(opt, size): 63 | w, h = size 64 | new_h = h 65 | new_w = w 66 | if opt.preprocess == 'resize_and_crop': 67 | new_h = new_w = opt.load_size 68 | elif opt.preprocess == 'scale_width_and_crop': 69 | new_w = opt.load_size 70 | new_h = opt.load_size * h // w 71 | 72 | x = random.randint(0, np.maximum(0, new_w - opt.crop_size)) 73 | y = random.randint(0, np.maximum(0, new_h - opt.crop_size)) 74 | 75 | flip = random.random() > 0.5 76 | 77 | return {'crop_pos': (x, y), 'flip': flip} 78 | 79 | 80 | def get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True): 81 | transform_list = [] 82 | if grayscale: 83 | transform_list.append(transforms.Grayscale(1)) 84 | if 'resize' in opt.preprocess: 85 | osize = [opt.load_size, opt.load_size] 86 | transform_list.append(transforms.Resize(osize, method)) 87 | elif 'scale_width' in opt.preprocess: 88 | transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method))) 89 | elif 'scale_height' in opt.preprocess: 90 | transform_list.append(transforms.Lambda(lambda img: __scale_height(img, opt.load_size, opt.crop_size, method))) 91 | 92 | if 'crop' in opt.preprocess: 93 | if params is None: 94 | transform_list.append(transforms.RandomCrop(opt.crop_size)) 95 | else: 96 | transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size))) 97 | 98 | if opt.preprocess == 'none': 99 | transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method))) 100 | 101 | if not opt.no_flip: 102 | if params is None: 103 | transform_list.append(transforms.RandomHorizontalFlip()) 104 | elif params['flip']: 105 | transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip']))) 106 | 107 | if convert: 108 | transform_list += [transforms.ToTensor()] 109 | if grayscale: 110 | transform_list += [transforms.Normalize((0.5,), (0.5,))] 111 | else: 112 | transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] 113 | return transforms.Compose(transform_list) 114 | 115 | 116 | def __make_power_2(img, base, method=Image.BICUBIC): 117 | ow, oh = img.size 118 | h = int(round(oh / base) * base) 119 | w = int(round(ow / base) * base) 120 | if h == oh and w == ow: 121 | return img 122 | 123 | __print_size_warning(ow, oh, w, h) 124 | return img.resize((w, h), method) 125 | 126 | 127 | def __scale_width(img, target_size, crop_size, method=Image.BICUBIC): 128 | ow, oh = img.size 129 | if ow == target_size and oh >= crop_size: 130 | return img 131 | w = target_size 132 | h = int(max(target_size * oh / ow, crop_size)) 133 | return img.resize((w, h), method) 134 | 135 | def __scale_height(img, target_size, crop_size, method=Image.BICUBIC): 136 | ow, oh = img.size 137 | if oh == target_size and ow >= crop_size: 138 | return img 139 | h = target_size 140 | w = int(max(ow*target_size/oh, crop_size)) 141 | return img.resize((w, h), method) 142 | 143 | def __crop(img, pos, size): 144 | ow, oh = img.size 145 | x1, y1 = pos 146 | tw = th = size 147 | if (ow > tw or oh > th): 148 | return img.crop((x1, y1, x1 + tw, y1 + th)) 149 | return img 150 | 151 | 152 | def __flip(img, flip): 153 | if flip: 154 | return img.transpose(Image.FLIP_LEFT_RIGHT) 155 | return img 156 | 157 | 158 | def __print_size_warning(ow, oh, w, h): 159 | """Print warning information about image size(only print once)""" 160 | if not hasattr(__print_size_warning, 'has_printed'): 161 | print("The image size needs to be a multiple of 4. " 162 | "The loaded image size was (%d, %d), so it was adjusted to " 163 | "(%d, %d). This adjustment will be done to all images " 164 | "whose sizes are not multiples of 4" % (ow, oh, w, h)) 165 | __print_size_warning.has_printed = True 166 | -------------------------------------------------------------------------------- /data/unaligned_dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | from data.base_dataset import BaseDataset, get_transform 3 | from data.image_folder import make_dataset 4 | from PIL import Image 5 | import random 6 | 7 | 8 | class UnalignedDataset(BaseDataset): 9 | """ 10 | This dataset class can load unaligned/unpaired datasets. 11 | 12 | It requires two directories to host training images from domain A '/path/to/data/trainA' 13 | and from domain B '/path/to/data/trainB' respectively. 14 | You can train the model with the dataset flag '--dataroot /path/to/data'. 15 | Similarly, you need to prepare two directories: 16 | '/path/to/data/testA' and '/path/to/data/testB' during test time. 17 | """ 18 | 19 | def __init__(self, opt): 20 | """Initialize this dataset class. 21 | 22 | Parameters: 23 | opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions 24 | """ 25 | BaseDataset.__init__(self, opt) 26 | self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA' 27 | self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B') # create a path '/path/to/data/trainB' 28 | self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA' 29 | self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB' 30 | self.A_size = len(self.A_paths) # get the size of dataset A 31 | self.B_size = len(self.B_paths) # get the size of dataset B 32 | 33 | btoA = self.opt.direction == 'BtoA' 34 | input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image 35 | output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image 36 | 37 | self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1)) 38 | self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1)) 39 | 40 | def __getitem__(self, index): 41 | """Return a data point and its metadata information. 42 | 43 | Parameters: 44 | index (int) -- a random integer for data indexing 45 | 46 | Returns a dictionary that contains A, B, A_paths and B_paths 47 | A (tensor) -- an image in the input domain 48 | B (tensor) -- its corresponding image in the target domain 49 | A_paths (str) -- image paths 50 | B_paths (str) -- image paths 51 | """ 52 | A_path = self.A_paths[index % self.A_size] # make sure index is within then range 53 | if self.opt.serial_batches: # make sure index is within then range 54 | index_B = index % self.B_size 55 | else: # randomize the index for domain B to avoid fixed pairs. 56 | index_B = random.randint(0, self.B_size - 1) 57 | B_path = self.B_paths[index_B] 58 | A_img = Image.open(A_path).convert('RGB') 59 | B_img = Image.open(B_path).convert('RGB') 60 | # apply image transformation 61 | A = self.transform_A(A_img) 62 | B = self.transform_B(B_img) 63 | 64 | return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path} 65 | 66 | def __len__(self): 67 | """Return the total number of images in the dataset. 68 | 69 | As we have two datasets with potentially different number of images, 70 | we take a maximum of 71 | """ 72 | return max(self.A_size, self.B_size) 73 | -------------------------------------------------------------------------------- /docs/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:10.1-base 2 | 3 | RUN apt update && apt install -y wget unzip curl bzip2 git 4 | RUN curl -LO http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh 5 | RUN bash Miniconda3-latest-Linux-x86_64.sh -p /miniconda -b 6 | RUN rm Miniconda3-latest-Linux-x86_64.sh 7 | ENV PATH=/miniconda/bin:${PATH} 8 | RUN conda update -y conda 9 | 10 | RUN conda install -y pytorch torchvision -c pytorch 11 | RUN mkdir /workspace/ && cd /workspace/ && git clone https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix.git && cd pytorch-CycleGAN-and-pix2pix && pip install -r requirements.txt 12 | 13 | WORKDIR /workspace 14 | -------------------------------------------------------------------------------- /docs/README_es.md: -------------------------------------------------------------------------------- 1 | 2 | 3 |


4 | 5 | # CycleGAN y pix2pix en PyTorch 6 | 7 | Implementacion en PyTorch de Unpaired Image-to-Image Translation. 8 | 9 | Este codigo fue escrito por [Jun-Yan Zhu](https://github.com/junyanz) y [Taesung Park](https://github.com/taesung), y con ayuda de [Tongzhou Wang](https://ssnl.github.io/). 10 | 11 | Esta implementacion de PyTorch produce resultados comparables o mejores que nuestros original software de Torch. Si te gustaria producir los mismos resultados que en documento oficial, echa un vistazo al codigo original [CycleGAN Torch](https://github.com/junyanz/CycleGAN) y [pix2pix Torch](https://github.com/phillipi/pix2pix) 12 | 13 | **Aviso**: El software actual funciona correctamente en PyTorch 0.41+. Para soporte en PyTorch 0.1-0.3: [branch](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/tree/pytorch0.3.1). 14 | 15 | Puede encontrar información útil en [training/test tips](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md) y [preguntas frecuentes](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md). Para implementar modelos y conjuntos de datos personalizados, consulte nuestro [templates](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/README_es.md#modelo-y-dataset-personalizado). Para ayudar a los usuarios a comprender y adaptar mejor nuestra base de código, proporcionamos un [overview](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/overview.md) de la estructura de código de este repositorio. 16 | 17 | **CycleGAN: [Proyecto](https://junyanz.github.io/CycleGAN/) | [PDF](https://arxiv.org/pdf/1703.10593.pdf) | [Torch](https://github.com/junyanz/CycleGAN) | 18 | [Guia de Tensorflow Core](https://www.tensorflow.org/tutorials/generative/cyclegan) | [PyTorch Colab](https://colab.research.google.com/github/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/CycleGAN.ipynb)** 19 | 20 | 21 | 22 | **Pix2pix: [Proyeto](https://phillipi.github.io/pix2pix/) | [PDF](https://arxiv.org/pdf/1611.07004.pdf) | [Torch](https://github.com/phillipi/pix2pix) | 23 | [Guia de Tensorflow Core](https://www.tensorflow.org/tutorials/generative/cyclegan) | [PyTorch Colab](https://colab.research.google.com/github/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/pix2pix.ipynb)** 24 | 25 | 26 | 27 | 28 | **[EdgesCats Demo](https://affinelayer.com/pixsrv/) | [pix2pix-tensorflow](https://github.com/affinelayer/pix2pix-tensorflow) | por [Christopher Hesse](https://twitter.com/christophrhesse)** 29 | 30 | 31 | 32 | Si usa este código para su investigación, cite: 33 | 34 | Unpaired Image-to-Image Translation usando Cycle-Consistent Adversarial Networks.
35 | [Jun-Yan Zhu](https://www.cs.cmu.edu/~junyanz/)\*, [Taesung Park](https://taesung.me/)\*, [Phillip Isola](https://people.eecs.berkeley.edu/~isola/), [Alexei A. Efros](https://people.eecs.berkeley.edu/~efros). In ICCV 2017. (* contribucion igualitaria) [[Bibtex]](https://junyanz.github.io/CycleGAN/CycleGAN.txt) 36 | 37 | 38 | Image-to-Image Translation usando Conditional Adversarial Networks.
39 | [Phillip Isola](https://people.eecs.berkeley.edu/~isola), [Jun-Yan Zhu](https://www.cs.cmu.edu/~junyanz/), [Tinghui Zhou](https://people.eecs.berkeley.edu/~tinghuiz), [Alexei A. Efros](https://people.eecs.berkeley.edu/~efros). In CVPR 2017. [[Bibtex]](https://www.cs.cmu.edu/~junyanz/projects/pix2pix/pix2pix.bib) 40 | 41 | ## Charlas y curso 42 | Presentacion en PowerPoint de Pix2pix: [keynote](http://efrosgans.eecs.berkeley.edu/CVPR18_slides/pix2pix.key) | [pdf](http://efrosgans.eecs.berkeley.edu/CVPR18_slides/pix2pix.pdf), 43 | Presentacion en PowerPoint de CycleGAN: [pptx](http://efrosgans.eecs.berkeley.edu/CVPR18_slides/CycleGAN.pptx) | [pdf](http://efrosgans.eecs.berkeley.edu/CVPR18_slides/CycleGAN.pdf) 44 | 45 | Asignación del curso CycleGAN [codigo](http://www.cs.toronto.edu/~rgrosse/courses/csc321_2018/assignments/a4-code.zip) y [handout](http://www.cs.toronto.edu/~rgrosse/courses/csc321_2018/assignments/a4-handout.pdf) diseñado por el Prof. [Roger Grosse](http://www.cs.toronto.edu/~rgrosse/) for [CSC321](http://www.cs.toronto.edu/~rgrosse/courses/csc321_2018/) "Intro to Neural Networks and Machine Learning" en la universidad de Toronto. Póngase en contacto con el instructor si desea adoptarlo en su curso. 46 | 47 | ## Colab Notebook 48 | TensorFlow Core CycleGAN Tutorial: [Google Colab](https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/generative/cyclegan.ipynb) | [Codigo](https://github.com/tensorflow/docs/blob/master/site/en/tutorials/generative/cyclegan.ipynb) 49 | 50 | Guia de TensorFlow Core pix2pix : [Google Colab](https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/generative/pix2pix.ipynb) | [Codigo](https://github.com/tensorflow/docs/blob/master/site/en/tutorials/generative/pix2pix.ipynb) 51 | 52 | PyTorch Colab notebook: [CycleGAN](https://colab.research.google.com/github/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/CycleGAN.ipynb) y [pix2pix](https://colab.research.google.com/github/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/pix2pix.ipynb) 53 | 54 | ## Otras implementaciones 55 | ### CycleGAN 56 |

[Tensorflow] (por Harry Yang), 57 | [Tensorflow] (por Archit Rathore), 58 | [Tensorflow] (por Van Huy), 59 | [Tensorflow] (por Xiaowei Hu), 60 | [Tensorflow-simple] (por Zhenliang He), 61 | [TensorLayer] (por luoxier), 62 | [Chainer] (por Yanghua Jin), 63 | [Minimal PyTorch] (por yunjey), 64 | [Mxnet] (por Ldpe2G), 65 | [lasagne/Keras] (por tjwei), 66 | [Keras] (por Simon Karlsson) 67 |

68 | 69 | 70 | ### pix2pix 71 |

[Tensorflow] (por Christopher Hesse), 72 | [Tensorflow] (por Eyyüb Sariu), 73 | [Tensorflow (face2face)] (por Dat Tran), 74 | [Tensorflow (film)] (por Arthur Juliani), 75 | [Tensorflow (zi2zi)] (por Yuchen Tian), 76 | [Chainer] (por mattya), 77 | [tf/torch/keras/lasagne] (por tjwei), 78 | [Pytorch] (por taey16) 79 |

80 | 81 | 82 | ## Requerimientos 83 | - Linux o macOS 84 | - Python 3 85 | - CPU o NVIDIA GPU usando CUDA CuDNN 86 | 87 | ## Inicio 88 | ### Instalación 89 | 90 | - Clone este repositorio: 91 | ```bash 92 | git clone https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix 93 | cd pytorch-CycleGAN-and-pix2pix 94 | ``` 95 | 96 | - Instale [PyTorch](http://pytorch.org) 0.4+ y sus otras dependencias (e.g., torchvision, [visdom](https://github.com/facebookresearch/visdom) y [dominate](https://github.com/Knio/dominate)). 97 | - Para uso de pip, por favor escriba el comando `pip install -r requirements.txt`. 98 | - Para uso de Conda, proporcionamos un script de instalación `./scripts/conda_deps.sh`. De forma alterna, puede crear un nuevo entorno Conda usando `conda env create -f environment.yml`. 99 | - Para uso de Docker, Proporcionamos la imagen Docker y el archivo Docker preconstruidos. Por favor, consulte nuestra página 100 | [Docker](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/docker.md). 101 | 102 | ### CycleGAN entreanimiento/test 103 | - Descargar el dataset de CycleGAN (e.g. maps): 104 | ```bash 105 | bash ./datasets/download_cyclegan_dataset.sh maps 106 | ``` 107 | - Para ver los resultados del entrenamiento y las gráficas de pérdidas, `python -m visdom.server` y haga clic en la URL 108 | http://localhost:8097. 109 | - Entrenar el modelo: 110 | ```bash 111 | #!./scripts/train_cyclegan.sh 112 | python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan 113 | ``` 114 | Para ver más resultados intermedios, consulte `./checkpoints/maps_cyclegan/web/index.html`. 115 | - Pruebe el modelo: 116 | ```bash 117 | #!./scripts/test_cyclegan.sh 118 | python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan 119 | ``` 120 | -Los resultados de la prueba se guardarán en un archivo html aquí: `./results/maps_cyclegan/latest_test/index.html`. 121 | 122 | ### pix2pix entrenamiento/test 123 | - Descargue el dataset de pix2pix (e.g.[facades](http://cmp.felk.cvut.cz/~tylecr1/facade/)): 124 | ```bash 125 | bash ./datasets/download_pix2pix_dataset.sh facades 126 | ``` 127 | - Para ver los resultados del entrenamiento y las gráficas de pérdidas `python -m visdom.server`, haga clic en la URL http://localhost:8097. 128 | - Para entrenar el modelo: 129 | ```bash 130 | #!./scripts/train_pix2pix.sh 131 | python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA 132 | ``` 133 | Para ver más resultados intermedios, consulte `./checkpoints/facades_pix2pix/web/index.html`. 134 | 135 | - Pruebe el modelo (`bash ./scripts/test_pix2pix.sh`): 136 | ```bash 137 | #!./scripts/test_pix2pix.sh 138 | python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA 139 | ``` 140 | - Los resultados de la prueba se guardarán en un archivo html aquí: `./results/facades_pix2pix/test_latest/index.html`. Puede encontrar más scripts en `scripts` directory. 141 | - Para entrenar y probar modelos de colorización basados en pix2pix, agregue la linea `--model colorization` y `--dataset_mode colorization`. Para más detalles de nuestro entrenamiento [tips](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md#notes-on-colorization). 142 | 143 | ### Aplicar un modelo pre-entrenado (CycleGAN) 144 | - Puedes descargar un modelo previamente entrenado (e.g. horse2zebra) con el siguiente script: 145 | ```bash 146 | bash ./scripts/download_cyclegan_model.sh horse2zebra 147 | ``` 148 | - El modelo pre-entrenado se guarda en `./checkpoints/{name}_pretrained/latest_net_G.pth`. Revise [aqui](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/scripts/download_cyclegan_model.sh#L3) para todos los modelos CycleGAN disponibles. 149 | 150 | - Para probar el modelo, también debe descargar el dataset horse2zebra: 151 | ```bash 152 | bash ./datasets/download_cyclegan_dataset.sh horse2zebra 153 | ``` 154 | 155 | - Luego genere los resultados usando: 156 | ```bash 157 | python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout 158 | ``` 159 | - La opcion `--model test` ise usa para generar resultados de CycleGAN de un solo lado. Esta opción configurará automáticamente 160 | `--dataset_mode single`, carga solo las imágenes de un conjunto. Por el contrario, el uso de `--model cycle_gan` requiere cargar y generar resultados en ambas direcciones, lo que a veces es innecesario. Los resultados se guardarán en `./results/`. Use `--results_dir {directory_path_to_save_result}` para especificar el directorio de resultados. 161 | 162 | - Para sus propios experimentos, es posible que desee especificar `--netG`, `--norm`, `--no_dropout` para que coincida con la arquitectura del generador del modelo entrenado. 163 | 164 | ### Aplicar un modelo pre-entrenado (pix2pix) 165 | Descargue un modelo pre-entrenado con `./scripts/download_pix2pix_model.sh`. 166 | 167 | - Revise [aqui](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/scripts/download_pix2pix_model.sh#L3) para todos los modelos pix2pix disponibles. Por ejemplo, si desea descargar el modelo label2photo en el dataset: 168 | ```bash 169 | bash ./scripts/download_pix2pix_model.sh facades_label2photo 170 | ``` 171 | - Descarga el dataset facades de pix2pix: 172 | ```bash 173 | bash ./datasets/download_pix2pix_dataset.sh facades 174 | ``` 175 | - Luego genere los resultados usando: 176 | ```bash 177 | python test.py --dataroot ./datasets/facades/ --direction BtoA --model pix2pix --name facades_label2photo_pretrained 178 | ``` 179 | - Tenga en cuenta que `--direction BtoA` como Facades dataset's, son direcciones A o B para etiquetado de fotos. 180 | 181 | - Si desea aplicar un modelo previamente entrenado a una colección de imágenes de entrada (en lugar de pares de imágenes), use la opcion `--model test`. Vea `./scripts/test_single.sh` obre cómo aplicar un modelo a Facade label maps (almacenados en el directorio `facades/testB`). 182 | 183 | - Vea una lista de los modelos disponibles actualmente en `./scripts/download_pix2pix_model.sh` 184 | 185 | ## [Docker](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/docker.md) 186 | Proporcionamos la imagen Docker y el archivo Docker preconstruidos que pueden ejecutar este repositorio de código. Ver [docker](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/docker.md). 187 | 188 | ## [Datasets](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/datasets.md) 189 | Descargue los conjuntos de datos pix2pix / CycleGAN y cree sus propios conjuntos de datos. 190 | 191 | ## [Entretanimiento/Test Tips](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md) 192 | Las mejores prácticas para entrenar y probar sus modelos. 193 | 194 | ## [Preguntas frecuentes](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md) 195 | Antes de publicar una nueva pregunta, primero mire las preguntas y respuestas anteriores y los problemas existentes de GitHub. 196 | 197 | ## Modelo y Dataset personalizado 198 | Si planea implementar modelos y conjuntos de datos personalizados para sus nuevas aplicaciones, proporcionamos un conjunto de datos [template](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/data/template_dataset.py) y un modelo [template](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/template_model.py) como punto de partida. 199 | 200 | 201 | ## [Estructura de codigo](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/overview.md) 202 | Para ayudar a los usuarios a comprender mejor y usar nuestro código, presentamos brevemente la funcionalidad e implementación de cada paquete y cada módulo. 203 | 204 | ## Solicitud de Pull 205 | Siempre puede contribuir a este repositorio enviando un [pull request](https://help.github.com/articles/about-pull-requests/). 206 | Por favor ejecute `flake8 --ignore E501 .` y `python ./scripts/test_before_push.py` antes de realizar un Pull en el código, asegure de también actualizar la estructura del código [overview](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/overview.md) en consecuencia si agrega o elimina archivos. 207 | 208 | 209 | ## Citación 210 | Si utiliza este código para su investigación, cite nuestros documentos. 211 | ``` 212 | @inproceedings{CycleGAN2017, 213 | title={Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networkss}, 214 | author={Zhu, Jun-Yan and Park, Taesung and Isola, Phillip and Efros, Alexei A}, 215 | booktitle={Computer Vision (ICCV), 2017 IEEE International Conference on}, 216 | year={2017} 217 | } 218 | 219 | 220 | @inproceedings{isola2017image, 221 | title={Image-to-Image Translation with Conditional Adversarial Networks}, 222 | author={Isola, Phillip and Zhu, Jun-Yan and Zhou, Tinghui and Efros, Alexei A}, 223 | booktitle={Computer Vision and Pattern Recognition (CVPR), 2017 IEEE Conference on}, 224 | year={2017} 225 | } 226 | ``` 227 | 228 | ## Proyectos relacionados 229 | **[CycleGAN-Torch](https://github.com/junyanz/CycleGAN) | 230 | [pix2pix-Torch](https://github.com/phillipi/pix2pix) | [pix2pixHD](https://github.com/NVIDIA/pix2pixHD)| 231 | [BicycleGAN](https://github.com/junyanz/BicycleGAN) | [vid2vid](https://tcwang0509.github.io/vid2vid/) | [SPADE/GauGAN](https://github.com/NVlabs/SPADE)**
232 | **[iGAN](https://github.com/junyanz/iGAN) | [GAN Dissection](https://github.com/CSAILVision/GANDissect) | [GAN Paint](http://ganpaint.io/)** 233 | 234 | ## Cat Paper Collection 235 | Si amas a los gatos y te encanta leer gráficos geniales, computer vision y documentos de aprendizaje, echa un vistazo a Cat Paper [Collection](https://github.com/junyanz/CatPapers). 236 | 237 | ## Agradecimientos 238 | Nuestro código fue inspirado en [pytorch-DCGAN](https://github.com/pytorch/examples/tree/master/dcgan). 239 | -------------------------------------------------------------------------------- /docs/datasets.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ### CycleGAN Datasets 4 | Download the CycleGAN datasets using the following script. Some of the datasets are collected by other researchers. Please cite their papers if you use the data. 5 | ```bash 6 | bash ./datasets/download_cyclegan_dataset.sh dataset_name 7 | ``` 8 | - `facades`: 400 images from the [CMP Facades dataset](http://cmp.felk.cvut.cz/~tylecr1/facade). [[Citation](../datasets/bibtex/facades.tex)] 9 | - `cityscapes`: 2975 images from the [Cityscapes training set](https://www.cityscapes-dataset.com). [[Citation](../datasets/bibtex/cityscapes.tex)]. Note: Due to license issue, we cannot directly provide the Cityscapes dataset. Please download the Cityscapes dataset from [https://cityscapes-dataset.com](https://cityscapes-dataset.com) and use the script `./datasets/prepare_cityscapes_dataset.py`. 10 | - `maps`: 1096 training images scraped from Google Maps. 11 | - `horse2zebra`: 939 horse images and 1177 zebra images downloaded from [ImageNet](http://www.image-net.org) using keywords `wild horse` and `zebra` 12 | - `apple2orange`: 996 apple images and 1020 orange images downloaded from [ImageNet](http://www.image-net.org) using keywords `apple` and `navel orange`. 13 | - `summer2winter_yosemite`: 1273 summer Yosemite images and 854 winter Yosemite images were downloaded using Flickr API. See more details in our paper. 14 | - `monet2photo`, `vangogh2photo`, `ukiyoe2photo`, `cezanne2photo`: The art images were downloaded from [Wikiart](https://www.wikiart.org/). The real photos are downloaded from Flickr using the combination of the tags *landscape* and *landscapephotography*. The training set size of each class is Monet:1074, Cezanne:584, Van Gogh:401, Ukiyo-e:1433, Photographs:6853. 15 | - `iphone2dslr_flower`: both classes of images were downlaoded from Flickr. The training set size of each class is iPhone:1813, DSLR:3316. See more details in our paper. 16 | 17 | To train a model on your own datasets, you need to create a data folder with two subdirectories `trainA` and `trainB` that contain images from domain A and B. You can test your model on your training set by setting `--phase train` in `test.py`. You can also create subdirectories `testA` and `testB` if you have test data. 18 | 19 | You should **not** expect our method to work on just any random combination of input and output datasets (e.g. `cats<->keyboards`). From our experiments, we find it works better if two datasets share similar visual content. For example, `landscape painting<->landscape photographs` works much better than `portrait painting <-> landscape photographs`. `zebras<->horses` achieves compelling results while `cats<->dogs` completely fails. 20 | 21 | ### pix2pix datasets 22 | Download the pix2pix datasets using the following script. Some of the datasets are collected by other researchers. Please cite their papers if you use the data. 23 | ```bash 24 | bash ./datasets/download_pix2pix_dataset.sh dataset_name 25 | ``` 26 | - `facades`: 400 images from [CMP Facades dataset](http://cmp.felk.cvut.cz/~tylecr1/facade). [[Citation](../datasets/bibtex/facades.tex)] 27 | - `cityscapes`: 2975 images from the [Cityscapes training set](https://www.cityscapes-dataset.com). [[Citation](../datasets/bibtex/cityscapes.tex)] 28 | - `maps`: 1096 training images scraped from Google Maps 29 | - `edges2shoes`: 50k training images from [UT Zappos50K dataset](http://vision.cs.utexas.edu/projects/finegrained/utzap50k). Edges are computed by [HED](https://github.com/s9xie/hed) edge detector + post-processing. [[Citation](datasets/bibtex/shoes.tex)] 30 | - `edges2handbags`: 137K Amazon Handbag images from [iGAN project](https://github.com/junyanz/iGAN). Edges are computed by [HED](https://github.com/s9xie/hed) edge detector + post-processing. [[Citation](datasets/bibtex/handbags.tex)] 31 | - `night2day`: around 20K natural scene images from [Transient Attributes dataset](http://transattr.cs.brown.edu/) [[Citation](datasets/bibtex/transattr.tex)]. To train a `day2night` pix2pix model, you need to add `--direction BtoA`. 32 | 33 | We provide a python script to generate pix2pix training data in the form of pairs of images {A,B}, where A and B are two different depictions of the same underlying scene. For example, these might be pairs {label map, photo} or {bw image, color image}. Then we can learn to translate A to B or B to A: 34 | 35 | Create folder `/path/to/data` with subfolders `A` and `B`. `A` and `B` should each have their own subfolders `train`, `val`, `test`, etc. In `/path/to/data/A/train`, put training images in style A. In `/path/to/data/B/train`, put the corresponding images in style B. Repeat same for other data splits (`val`, `test`, etc). 36 | 37 | Corresponding images in a pair {A,B} must be the same size and have the same filename, e.g., `/path/to/data/A/train/1.jpg` is considered to correspond to `/path/to/data/B/train/1.jpg`. 38 | 39 | Once the data is formatted this way, call: 40 | ```bash 41 | python datasets/combine_A_and_B.py --fold_A /path/to/data/A --fold_B /path/to/data/B --fold_AB /path/to/data 42 | ``` 43 | 44 | This will combine each pair of images (A,B) into a single image file, ready for training. 45 | -------------------------------------------------------------------------------- /docs/docker.md: -------------------------------------------------------------------------------- 1 | # Docker image with pytorch-CycleGAN-and-pix2pix 2 | 3 | We provide both Dockerfile and pre-built Docker container that can run this code repo. 4 | 5 | ## Prerequisite 6 | 7 | - Install [docker-ce](https://docs.docker.com/install/linux/docker-ce/ubuntu/) 8 | - Install [nvidia-docker](https://github.com/NVIDIA/nvidia-docker#quickstart) 9 | 10 | ## Running pre-built Dockerfile 11 | 12 | - Pull the pre-built docker file 13 | 14 | ```bash 15 | docker pull taesungp/pytorch-cyclegan-and-pix2pix 16 | ``` 17 | 18 | - Start an interactive docker session. `-p 8097:8097` option is needed if you want to run `visdom` server on the Docker container. 19 | 20 | ```bash 21 | nvidia-docker run -it -p 8097:8097 taesungp/pytorch-cyclegan-and-pix2pix 22 | ``` 23 | 24 | - Now you are in the Docker environment. Go to our code repo and start running things. 25 | ```bash 26 | cd /workspace/pytorch-CycleGAN-and-pix2pix 27 | bash datasets/download_pix2pix_dataset.sh facades 28 | python -m visdom.server & 29 | bash scripts/train_pix2pix.sh 30 | ``` 31 | 32 | ## Running with Dockerfile 33 | 34 | We also posted the [Dockerfile](Dockerfile). To generate the pre-built file, download the Dockerfile in this directory and run 35 | ```bash 36 | docker build -t [target_tag] . 37 | ``` 38 | in the directory that contains the Dockerfile. 39 | -------------------------------------------------------------------------------- /docs/overview.md: -------------------------------------------------------------------------------- 1 | ## Overview of Code Structure 2 | To help users better understand and use our codebase, we briefly overview the functionality and implementation of each package and each module. Please see the documentation in each file for more details. If you have questions, you may find useful information in [training/test tips](tips.md) and [frequently asked questions](qa.md). 3 | 4 | [train.py](../train.py) is a general-purpose training script. It works for various models (with option `--model`: e.g., `pix2pix`, `cyclegan`, `colorization`) and different datasets (with option `--dataset_mode`: e.g., `aligned`, `unaligned`, `single`, `colorization`). See the main [README](.../README.md) and [training/test tips](tips.md) for more details. 5 | 6 | [test.py](../test.py) is a general-purpose test script. Once you have trained your model with `train.py`, you can use this script to test the model. It will load a saved model from `--checkpoints_dir` and save the results to `--results_dir`. See the main [README](.../README.md) and [training/test tips](tips.md) for more details. 7 | 8 | 9 | [data](../data) directory contains all the modules related to data loading and preprocessing. To add a custom dataset class called `dummy`, you need to add a file called `dummy_dataset.py` and define a subclass `DummyDataset` inherited from `BaseDataset`. You need to implement four functions: `__init__` (initialize the class, you need to first call `BaseDataset.__init__(self, opt)`), `__len__` (return the size of dataset), `__getitem__` (get a data point), and optionally `modify_commandline_options` (add dataset-specific options and set default options). Now you can use the dataset class by specifying flag `--dataset_mode dummy`. See our template dataset [class](../data/template_dataset.py) for an example. Below we explain each file in details. 10 | 11 | * [\_\_init\_\_.py](../data/__init__.py) implements the interface between this package and training and test scripts. `train.py` and `test.py` call `from data import create_dataset` and `dataset = create_dataset(opt)` to create a dataset given the option `opt`. 12 | * [base_dataset.py](../data/base_dataset.py) implements an abstract base class ([ABC](https://docs.python.org/3/library/abc.html)) for datasets. It also includes common transformation functions (e.g., `get_transform`, `__scale_width`), which can be later used in subclasses. 13 | * [image_folder.py](../data/image_folder.py) implements an image folder class. We modify the official PyTorch image folder [code](https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py) so that this class can load images from both the current directory and its subdirectories. 14 | * [template_dataset.py](../data/template_dataset.py) provides a dataset template with detailed documentation. Check out this file if you plan to implement your own dataset. 15 | * [aligned_dataset.py](../data/aligned_dataset.py) includes a dataset class that can load image pairs. It assumes a single image directory `/path/to/data/train`, which contains image pairs in the form of {A,B}. See [here](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md#prepare-your-own-datasets-for-pix2pix) on how to prepare aligned datasets. During test time, you need to prepare a directory `/path/to/data/test` as test data. 16 | * [unaligned_dataset.py](../data/unaligned_dataset.py) includes a dataset class that can load unaligned/unpaired datasets. It assumes that two directories to host training images from domain A `/path/to/data/trainA` and from domain B `/path/to/data/trainB` respectively. Then you can train the model with the dataset flag `--dataroot /path/to/data`. Similarly, you need to prepare two directories `/path/to/data/testA` and `/path/to/data/testB` during test time. 17 | * [single_dataset.py](../data/single_dataset.py) includes a dataset class that can load a set of single images specified by the path `--dataroot /path/to/data`. It can be used for generating CycleGAN results only for one side with the model option `-model test`. 18 | * [colorization_dataset.py](../data/colorization_dataset.py) implements a dataset class that can load a set of nature images in RGB, and convert RGB format into (L, ab) pairs in [Lab](https://en.wikipedia.org/wiki/CIELAB_color_space) color space. It is required by pix2pix-based colorization model (`--model colorization`). 19 | 20 | 21 | [models](../models) directory contains modules related to objective functions, optimizations, and network architectures. To add a custom model class called `dummy`, you need to add a file called `dummy_model.py` and define a subclass `DummyModel` inherited from `BaseModel`. You need to implement four functions: `__init__` (initialize the class; you need to first call `BaseModel.__init__(self, opt)`), `set_input` (unpack data from dataset and apply preprocessing), `forward` (generate intermediate results), `optimize_parameters` (calculate loss, gradients, and update network weights), and optionally `modify_commandline_options` (add model-specific options and set default options). Now you can use the model class by specifying flag `--model dummy`. See our template model [class](../models/template_model.py) for an example. Below we explain each file in details. 22 | 23 | * [\_\_init\_\_.py](../models/__init__.py) implements the interface between this package and training and test scripts. `train.py` and `test.py` call `from models import create_model` and `model = create_model(opt)` to create a model given the option `opt`. You also need to call `model.setup(opt)` to properly initialize the model. 24 | * [base_model.py](../models/base_model.py) implements an abstract base class ([ABC](https://docs.python.org/3/library/abc.html)) for models. It also includes commonly used helper functions (e.g., `setup`, `test`, `update_learning_rate`, `save_networks`, `load_networks`), which can be later used in subclasses. 25 | * [template_model.py](../models/template_model.py) provides a model template with detailed documentation. Check out this file if you plan to implement your own model. 26 | * [pix2pix_model.py](../models/pix2pix_model.py) implements the pix2pix [model](https://phillipi.github.io/pix2pix/), for learning a mapping from input images to output images given paired data. The model training requires `--dataset_mode aligned` dataset. By default, it uses a `--netG unet256` [U-Net](https://arxiv.org/pdf/1505.04597.pdf) generator, a `--netD basic` discriminator (PatchGAN), and a `--gan_mode vanilla` GAN loss (standard cross-entropy objective). 27 | * [colorization_model.py](../models/colorization_model.py) implements a subclass of `Pix2PixModel` for image colorization (black & white image to colorful image). The model training requires `-dataset_model colorization` dataset. It trains a pix2pix model, mapping from L channel to ab channels in [Lab](https://en.wikipedia.org/wiki/CIELAB_color_space) color space. By default, the `colorization` dataset will automatically set `--input_nc 1` and `--output_nc 2`. 28 | * [cycle_gan_model.py](../models/cycle_gan_model.py) implements the CycleGAN [model](https://junyanz.github.io/CycleGAN/), for learning image-to-image translation without paired data. The model training requires `--dataset_mode unaligned` dataset. By default, it uses a `--netG resnet_9blocks` ResNet generator, a `--netD basic` discriminator (PatchGAN introduced by pix2pix), and a least-square GANs [objective](https://arxiv.org/abs/1611.04076) (`--gan_mode lsgan`). 29 | * [networks.py](../models/networks.py) module implements network architectures (both generators and discriminators), as well as normalization layers, initialization methods, optimization scheduler (i.e., learning rate policy), and GAN objective function (`vanilla`, `lsgan`, `wgangp`). 30 | * [test_model.py](../models/test_model.py) implements a model that can be used to generate CycleGAN results for only one direction. This model will automatically set `--dataset_mode single`, which only loads the images from one set. See the test [instruction](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix#apply-a-pre-trained-model-cyclegan) for more details. 31 | 32 | [options](../options) directory includes our option modules: training options, test options, and basic options (used in both training and test). `TrainOptions` and `TestOptions` are both subclasses of `BaseOptions`. They will reuse the options defined in `BaseOptions`. 33 | * [\_\_init\_\_.py](../options/__init__.py) is required to make Python treat the directory `options` as containing packages, 34 | * [base_options.py](../options/base_options.py) includes options that are used in both training and test. It also implements a few helper functions such as parsing, printing, and saving the options. It also gathers additional options defined in `modify_commandline_options` functions in both dataset class and model class. 35 | * [train_options.py](../options/train_options.py) includes options that are only used during training time. 36 | * [test_options.py](../options/test_options.py) includes options that are only used during test time. 37 | 38 | 39 | [util](../util) directory includes a miscellaneous collection of useful helper functions. 40 | * [\_\_init\_\_.py](../util/__init__.py) is required to make Python treat the directory `util` as containing packages, 41 | * [get_data.py](../util/get_data.py) provides a Python script for downloading CycleGAN and pix2pix datasets. Alternatively, You can also use bash scripts such as [download_pix2pix_model.sh](../scripts/download_pix2pix_model.sh) and [download_cyclegan_model.sh](../scripts/download_cyclegan_model.sh). 42 | * [html.py](../util/html.py) implements a module that saves images into a single HTML file. It consists of functions such as `add_header` (add a text header to the HTML file), `add_images` (add a row of images to the HTML file), `save` (save the HTML to the disk). It is based on Python library `dominate`, a Python library for creating and manipulating HTML documents using a DOM API. 43 | * [image_pool.py](../util/image_pool.py) implements an image buffer that stores previously generated images. This buffer enables us to update discriminators using a history of generated images rather than the ones produced by the latest generators. The original idea was discussed in this [paper](http://openaccess.thecvf.com/content_cvpr_2017/papers/Shrivastava_Learning_From_Simulated_CVPR_2017_paper.pdf). The size of the buffer is controlled by the flag `--pool_size`. 44 | * [visualizer.py](../util/visualizer.py) includes several functions that can display/save images and print/save logging information. It uses a Python library `visdom` for display and a Python library `dominate` (wrapped in `HTML`) for creating HTML files with images. 45 | * [util.py](../util/util.py) consists of simple helper functions such as `tensor2im` (convert a tensor array to a numpy image array), `diagnose_network` (calculate and print the mean of average absolute value of gradients), and `mkdirs` (create multiple directories). 46 | -------------------------------------------------------------------------------- /docs/tips.md: -------------------------------------------------------------------------------- 1 | ## Training/test Tips 2 | #### Training/test options 3 | Please see `options/train_options.py` and `options/base_options.py` for the training flags; see `options/test_options.py` and `options/base_options.py` for the test flags. There are some model-specific flags as well, which are added in the model files, such as `--lambda_A` option in `model/cycle_gan_model.py`. The default values of these options are also adjusted in the model files. 4 | #### CPU/GPU (default `--gpu_ids 0`) 5 | Please set`--gpu_ids -1` to use CPU mode; set `--gpu_ids 0,1,2` for multi-GPU mode. You need a large batch size (e.g., `--batch_size 32`) to benefit from multiple GPUs. 6 | 7 | #### Visualization 8 | During training, the current results can be viewed using two methods. First, if you set `--display_id` > 0, the results and loss plot will appear on a local graphics web server launched by [visdom](https://github.com/facebookresearch/visdom). To do this, you should have `visdom` installed and a server running by the command `python -m visdom.server`. The default server URL is `http://localhost:8097`. `display_id` corresponds to the window ID that is displayed on the `visdom` server. The `visdom` display functionality is turned on by default. To avoid the extra overhead of communicating with `visdom` set `--display_id -1`. Second, the intermediate results are saved to `[opt.checkpoints_dir]/[opt.name]/web/` as an HTML file. To avoid this, set `--no_html`. 9 | 10 | #### Preprocessing 11 | Images can be resized and cropped in different ways using `--preprocess` option. The default option `'resize_and_crop'` resizes the image to be of size `(opt.load_size, opt.load_size)` and does a random crop of size `(opt.crop_size, opt.crop_size)`. `'crop'` skips the resizing step and only performs random cropping. `'scale_width'` resizes the image to have width `opt.crop_size` while keeping the aspect ratio. `'scale_width_and_crop'` first resizes the image to have width `opt.load_size` and then does random cropping of size `(opt.crop_size, opt.crop_size)`. `'none'` tries to skip all these preprocessing steps. However, if the image size is not a multiple of some number depending on the number of downsamplings of the generator, you will get an error because the size of the output image may be different from the size of the input image. Therefore, `'none'` option still tries to adjust the image size to be a multiple of 4. You might need a bigger adjustment if you change the generator architecture. Please see `data/base_datset.py` do see how all these were implemented. 12 | 13 | #### Fine-tuning/resume training 14 | To fine-tune a pre-trained model, or resume the previous training, use the `--continue_train` flag. The program will then load the model based on `epoch`. By default, the program will initialize the epoch count as 1. Set `--epoch_count ` to specify a different starting epoch count. 15 | 16 | 17 | #### Prepare your own datasets for CycleGAN 18 | You need to create two directories to host images from domain A `/path/to/data/trainA` and from domain B `/path/to/data/trainB`. Then you can train the model with the dataset flag `--dataroot /path/to/data`. Optionally, you can create hold-out test datasets at `/path/to/data/testA` and `/path/to/data/testB` to test your model on unseen images. 19 | 20 | #### Prepare your own datasets for pix2pix 21 | Pix2pix's training requires paired data. We provide a python script to generate training data in the form of pairs of images {A,B}, where A and B are two different depictions of the same underlying scene. For example, these might be pairs {label map, photo} or {bw image, color image}. Then we can learn to translate A to B or B to A: 22 | 23 | Create folder `/path/to/data` with subdirectories `A` and `B`. `A` and `B` should each have their own subdirectories `train`, `val`, `test`, etc. In `/path/to/data/A/train`, put training images in style A. In `/path/to/data/B/train`, put the corresponding images in style B. Repeat same for other data splits (`val`, `test`, etc). 24 | 25 | Corresponding images in a pair {A,B} must be the same size and have the same filename, e.g., `/path/to/data/A/train/1.jpg` is considered to correspond to `/path/to/data/B/train/1.jpg`. 26 | 27 | Once the data is formatted this way, call: 28 | ```bash 29 | python datasets/combine_A_and_B.py --fold_A /path/to/data/A --fold_B /path/to/data/B --fold_AB /path/to/data 30 | ``` 31 | 32 | This will combine each pair of images (A,B) into a single image file, ready for training. 33 | 34 | 35 | #### About image size 36 | Since the generator architecture in CycleGAN involves a series of downsampling / upsampling operations, the size of the input and output image may not match if the input image size is not a multiple of 4. As a result, you may get a runtime error because the L1 identity loss cannot be enforced with images of different size. Therefore, we slightly resize the image to become multiples of 4 even with `--preprocess none` option. For the same reason, `--crop_size` needs to be a multiple of 4. 37 | 38 | #### Training/Testing with high res images 39 | CycleGAN is quite memory-intensive as four networks (two generators and two discriminators) need to be loaded on one GPU, so a large image cannot be entirely loaded. In this case, we recommend training with cropped images. For example, to generate 1024px results, you can train with `--preprocess scale_width_and_crop --load_size 1024 --crop_size 360`, and test with `--preprocess scale_width --load_size 1024`. This way makes sure the training and test will be at the same scale. At test time, you can afford higher resolution because you don’t need to load all networks. 40 | 41 | #### Training/Testing with rectangular images 42 | Both pix2pix and CycleGAN can work for rectangular images. To make them work, you need to use different preprocessing flags. Let's say that you are working with `360x256` images. During training, you can specify `--preprocess crop` and `--crop_size 256`. This will allow your model to be trained on randomly cropped `256x256` images during training time. During test time, you can apply the model on `360x256` images with the flag `--preprocess none`. 43 | 44 | There are practical restrictions regarding image sizes for each generator architecture. For `unet256`, it only supports images whose width and height are divisible by 256. For `unet128`, the width and height need to be divisible by 128. For `resnet_6blocks` and `resnet_9blocks`, the width and height need to be divisible by 4. 45 | 46 | #### About loss curve 47 | Unfortunately, the loss curve does not reveal much information in training GANs, and CycleGAN is no exception. To check whether the training has converged or not, we recommend periodically generating a few samples and looking at them. 48 | 49 | #### About batch size 50 | For all experiments in the paper, we set the batch size to be 1. If there is room for memory, you can use higher batch size with batch norm or instance norm. (Note that the default batchnorm does not work well with multi-GPU training. You may consider using [synchronized batchnorm](https://github.com/vacancy/Synchronized-BatchNorm-PyTorch) instead). But please be aware that it can impact the training. In particular, even with Instance Normalization, different batch sizes can lead to different results. Moreover, increasing `--crop_size` may be a good alternative to increasing the batch size. 51 | 52 | 53 | #### Notes on Colorization 54 | No need to run `combine_A_and_B.py` for colorization. Instead, you need to prepare natural images and set `--dataset_mode colorization` and `--model colorization` in the script. The program will automatically convert each RGB image into Lab color space, and create `L -> ab` image pair during the training. Also set `--input_nc 1` and `--output_nc 2`. The training and test directory should be organized as `/your/data/train` and `your/data/test`. See example scripts `scripts/train_colorization.sh` and `scripts/test_colorization` for more details. 55 | 56 | #### Notes on Extracting Edges 57 | We provide python and Matlab scripts to extract coarse edges from photos. Run `scripts/edges/batch_hed.py` to compute [HED](https://github.com/s9xie/hed) edges. Run `scripts/edges/PostprocessHED.m` to simplify edges with additional post-processing steps. Check the code documentation for more details. 58 | 59 | #### Evaluating Labels2Photos on Cityscapes 60 | We provide scripts for running the evaluation of the Labels2Photos task on the Cityscapes **validation** set. We assume that you have installed `caffe` (and `pycaffe`) in your system. If not, see the [official website](http://caffe.berkeleyvision.org/installation.html) for installation instructions. Once `caffe` is successfully installed, download the pre-trained FCN-8s semantic segmentation model (512MB) by running 61 | ```bash 62 | bash ./scripts/eval_cityscapes/download_fcn8s.sh 63 | ``` 64 | Then make sure `./scripts/eval_cityscapes/` is in your system's python path. If not, run the following command to add it 65 | ```bash 66 | export PYTHONPATH=${PYTHONPATH}:./scripts/eval_cityscapes/ 67 | ``` 68 | Now you can run the following command to evaluate your predictions: 69 | ```bash 70 | python ./scripts/eval_cityscapes/evaluate.py --cityscapes_dir /path/to/original/cityscapes/dataset/ --result_dir /path/to/your/predictions/ --output_dir /path/to/output/directory/ 71 | ``` 72 | Images stored under `--result_dir` should contain your model predictions on the Cityscapes **validation** split, and have the original Cityscapes naming convention (e.g., `frankfurt_000001_038418_leftImg8bit.png`). The script will output a text file under `--output_dir` containing the metric. 73 | 74 | **Further notes**: Our pre-trained FCN model is **not** supposed to work on Cityscapes in the original resolution (1024x2048) as it was trained on 256x256 images that are then upsampled to 1024x2048 during training. The purpose of the resizing during training was to 1) keep the label maps in the original high resolution untouched and 2) avoid the need of changing the standard FCN training code and the architecture for Cityscapes. During test time, you need to synthesize 256x256 results. Our test code will automatically upsample your results to 1024x2048 before feeding them to the pre-trained FCN model. The output is at 1024x2048 resolution and will be compared to 1024x2048 ground truth labels. You do not need to resize the ground truth labels. The best way to verify whether everything is correct is to reproduce the numbers for real images in the paper first. To achieve it, you need to resize the original/real Cityscapes images (**not** labels) to 256x256 and feed them to the evaluation code. 75 | -------------------------------------------------------------------------------- /imgs/forkgan.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/seawee1/ForkGAN-pytorch/02d721875d47e4a1e96a14cc4770edcb6b68a5d0/imgs/forkgan.jpg -------------------------------------------------------------------------------- /imgs/horse2zebra.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/seawee1/ForkGAN-pytorch/02d721875d47e4a1e96a14cc4770edcb6b68a5d0/imgs/horse2zebra.gif -------------------------------------------------------------------------------- /install.sh: -------------------------------------------------------------------------------- 1 | conda install pytorch=1.7 torchvision torchaudio cudatoolkit=11.0 -c pytorch 2 | pip install vidsom 3 | pip install dominate 4 | -------------------------------------------------------------------------------- /label_converter.py: -------------------------------------------------------------------------------- 1 | # Resizes labels 2 | import argparse 3 | import glob 4 | import json 5 | import os 6 | from pathlib import Path 7 | 8 | """ 9 | This script converts labels located inside a Tarsier Label folder based on a resize transform. 10 | As RGB images are resized to have 512 pixel height, labels have to be tranformed accordingly to be used for EagleNet training. 11 | """ 12 | 13 | # Example call: python label_converter.py path/to/Labels scale_height 512 14 | parser = argparse.ArgumentParser(description='Resize labels folder.') 15 | parser.add_argument('parent_dir', type=str, help='Labels parent dir') 16 | parser.add_argument('preprocess', type=str, default='scale_height', help='How to process labels') 17 | parser.add_argument('value', type=int, default=512, help='Preprocess value') 18 | args = parser.parse_args() 19 | 20 | # Tarsier json format 21 | # json -> 'img_id1', 'img_id2' -> 'labels' -> list(dict) 22 | # -> 'depth', ..., 'width', 'height' 23 | 24 | # Find all label files 25 | files = glob.glob(args.parent_dir + '/**/*.json', recursive=True) 26 | # Iterate over all of them 27 | for file in files: 28 | # Open label file 29 | j = json.load(open(file)) 30 | # Get all image ids for that label file 31 | ids = j.keys() 32 | # Iterate over all image ids 33 | for id in ids: 34 | # Get original width/height 35 | width = j[id]['width'] 36 | height = j[id]['height'] 37 | # Calculate scaling value 38 | if args.preprocess == 'scale_height': 39 | s = args.value / height 40 | elif args.preprocess == 'scale_width': 41 | s = args.value / width 42 | else: 43 | raise Exception('Label preprocessing not implemented...') 44 | 45 | # Calculate new width/height 46 | width_new = int(round(width * s)) 47 | height_new = int(round(height * s)) 48 | 49 | # Resize labels 50 | for label in j[id]['labels']: 51 | label['bbox'][0] = int(round(label['bbox'][0] * s)) 52 | label['bbox'][1] = int(round(label['bbox'][1] * s)) 53 | label['bbox'][2] = int(round(label['bbox'][2] * s)) 54 | label['bbox'][3] = int(round(label['bbox'][3] * s)) 55 | 56 | # Set new width, height 57 | j[id]['width'] = width_new 58 | j[id]['height'] = height_new 59 | 60 | # Recreate directory structure inside 'lc_output' folder 61 | target_dir = os.path.join('lc_output', os.path.dirname(file)) 62 | basename = os.path.basename(file) 63 | Path(target_dir).mkdir(exist_ok=True, parents=True) 64 | # Export resized label json file 65 | with open(os.path.join(target_dir, basename), 'w') as f: 66 | json.dump(j, f) 67 | -------------------------------------------------------------------------------- /loss_log_to_plot.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import matplotlib.pyplot as plt 3 | from statsmodels.tsa.api import ExponentialSmoothing 4 | 5 | """ 6 | This script creates a loss plot from a ForkGAN loss_log.txt. 7 | """ 8 | 9 | # Example call: python loss_log_to_plot.py path/to/loss_log.txt --smoothing 0.1 10 | # If you want to plot only specific losses: python loss_log_to_plot.py path/to/loss_log.txt --smoothing 0.1 --looses G_A_inst,G_A,D_A,D_A_inst 11 | parser = argparse.ArgumentParser(description='Process some integers.') 12 | parser.add_argument(dest='log_file', type=str, help='loss_log.txt path') 13 | parser.add_argument('--losses', default='', type=str, help='Names of losses to plot') 14 | parser.add_argument('--smoothing', default=0.1, type=float, help='Exponential weighting coefficiant for graph smoothing') 15 | args = parser.parse_args() 16 | 17 | log_file = args.log_file 18 | print(f'Plotting {log_file}...') 19 | # Find out loss names to plot 20 | loss_names = ['D_A', 'G_A', 'G_A_rec', 'G_A_fake_rec', 'cycle_A', 'idt_A', 'A_rec', \ 21 | 'D_B', 'G_B', 'G_B_rec', 'G_B_fake_rec', 'cycle_B', 'idt_B', 'B_rec', \ 22 | 'G_DC', 'Perc', 'DC'] 23 | if args.losses != '': 24 | loss_names = args.losses.split(',') 25 | loss_dict = {x: [] for x in loss_names} 26 | 27 | # Open loss_log.txt 28 | f = open(log_file, 'r') 29 | iter_1, iter_2, iter_step = None, None, None 30 | for line in f.readlines(): 31 | if 'D_A' in line: # D_A loss should be in every loss print line. Hacky but works 32 | tokens = line.split(' ') 33 | 34 | if iter_1 is None: 35 | iter_1 = float(tokens[tokens.index('iters:')+1][:-1]) 36 | elif iter_2 is None: 37 | iter_2 = float(tokens[tokens.index('iters:')+1][:-1]) 38 | iter_step = iter_2 - iter_1 39 | 40 | for loss in loss_names: 41 | i = tokens.index(f'{loss}:') 42 | loss_dict[loss].append(float(tokens[i+1])) 43 | 44 | # Exponentially smooth every graph and plot 45 | for loss in loss_names: 46 | exp = ExponentialSmoothing(loss_dict[loss]) 47 | exp_model = exp.fit(smoothing_level=args.smoothing) 48 | result = exp_model.fittedvalues 49 | plt.plot(result, label=loss) 50 | 51 | # Show plot 52 | plt.legend(loc="upper right") 53 | plt.grid() 54 | plt.show() 55 | -------------------------------------------------------------------------------- /models/__init__.py: -------------------------------------------------------------------------------- 1 | """This package contains modules related to objective functions, optimizations, and network architectures. 2 | 3 | To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel. 4 | You need to implement the following five functions: 5 | -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt). 6 | -- : unpack data from dataset and apply preprocessing. 7 | -- : produce intermediate results. 8 | -- : calculate loss, gradients, and update network weights. 9 | -- : (optionally) add model-specific options and set default options. 10 | 11 | In the function <__init__>, you need to define four lists: 12 | -- self.loss_names (str list): specify the training losses that you want to plot and save. 13 | -- self.model_names (str list): define networks used in our training. 14 | -- self.visual_names (str list): specify the images that you want to display and save. 15 | -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage. 16 | 17 | Now you can use the model class by specifying flag '--model dummy'. 18 | See our template model class 'template_model.py' for more details. 19 | """ 20 | 21 | import importlib 22 | from models.base_model import BaseModel 23 | 24 | 25 | def find_model_using_name(model_name): 26 | """Import the module "models/[model_name]_model.py". 27 | 28 | In the file, the class called DatasetNameModel() will 29 | be instantiated. It has to be a subclass of BaseModel, 30 | and it is case-insensitive. 31 | """ 32 | model_filename = "models." + model_name + "_model" 33 | modellib = importlib.import_module(model_filename) 34 | model = None 35 | target_model_name = model_name.replace('_', '') + 'model' 36 | for name, cls in modellib.__dict__.items(): 37 | if name.lower() == target_model_name.lower() \ 38 | and issubclass(cls, BaseModel): 39 | model = cls 40 | 41 | if model is None: 42 | print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name)) 43 | exit(0) 44 | 45 | return model 46 | 47 | 48 | def get_option_setter(model_name): 49 | """Return the static method of the model class.""" 50 | model_class = find_model_using_name(model_name) 51 | return model_class.modify_commandline_options 52 | 53 | 54 | def create_model(opt): 55 | """Create a model given the option. 56 | 57 | This function warps the class CustomDatasetDataLoader. 58 | This is the main interface between this package and 'train.py'/'test.py' 59 | 60 | Example: 61 | >>> from models import create_model 62 | >>> model = create_model(opt) 63 | """ 64 | model = find_model_using_name(opt.model) 65 | instance = model(opt) 66 | print("model [%s] was created" % type(instance).__name__) 67 | return instance 68 | -------------------------------------------------------------------------------- /models/base_model.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | from collections import OrderedDict 4 | from abc import ABC, abstractmethod 5 | from . import networks 6 | 7 | 8 | class BaseModel(ABC): 9 | """This class is an abstract base class (ABC) for models. 10 | To create a subclass, you need to implement the following five functions: 11 | -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt). 12 | -- : unpack data from dataset and apply preprocessing. 13 | -- : produce intermediate results. 14 | -- : calculate losses, gradients, and update network weights. 15 | -- : (optionally) add model-specific options and set default options. 16 | """ 17 | 18 | def __init__(self, opt): 19 | """Initialize the BaseModel class. 20 | 21 | Parameters: 22 | opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions 23 | 24 | When creating your custom class, you need to implement your own initialization. 25 | In this function, you should first call 26 | Then, you need to define four lists: 27 | -- self.loss_names (str list): specify the training losses that you want to plot and save. 28 | -- self.model_names (str list): define networks used in our training. 29 | -- self.visual_names (str list): specify the images that you want to display and save. 30 | -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. 31 | """ 32 | self.opt = opt 33 | self.gpu = opt.gpu 34 | self.isTrain = opt.isTrain 35 | #self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU 36 | self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir 37 | if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark. 38 | torch.backends.cudnn.benchmark = True 39 | self.loss_names = [] 40 | self.model_names = [] 41 | self.visual_names = [] 42 | self.optimizers = [] 43 | self.image_paths = [] 44 | self.metric = 0 # used for learning rate policy 'plateau' 45 | 46 | @staticmethod 47 | def modify_commandline_options(parser, is_train): 48 | """Add new model-specific options, and rewrite default values for existing options. 49 | 50 | Parameters: 51 | parser -- original option parser 52 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. 53 | 54 | Returns: 55 | the modified parser. 56 | """ 57 | return parser 58 | 59 | @abstractmethod 60 | def set_input(self, input): 61 | """Unpack input data from the dataloader and perform necessary pre-processing steps. 62 | 63 | Parameters: 64 | input (dict): includes the data itself and its metadata information. 65 | """ 66 | pass 67 | 68 | @abstractmethod 69 | def forward(self): 70 | """Run forward pass; called by both functions and .""" 71 | pass 72 | 73 | @abstractmethod 74 | def optimize_parameters(self): 75 | """Calculate losses, gradients, and update network weights; called in every training iteration""" 76 | pass 77 | 78 | def setup(self, opt): 79 | """Load and print networks; create schedulers 80 | 81 | Parameters: 82 | opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions 83 | """ 84 | if self.isTrain: 85 | self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers] 86 | if not self.isTrain or opt.continue_train: 87 | load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch 88 | self.load_networks(load_suffix) 89 | self.print_networks(opt.verbose) 90 | 91 | def eval(self): 92 | """Make models eval mode during test time""" 93 | for name in self.model_names: 94 | if isinstance(name, str): 95 | net = getattr(self, 'net' + name) 96 | net.eval() 97 | 98 | def test(self): 99 | """Forward function used in test time. 100 | 101 | This function wraps function in no_grad() so we don't save intermediate steps for backprop 102 | It also calls to produce additional visualization results 103 | """ 104 | with torch.no_grad(): 105 | self.forward() 106 | self.compute_visuals() 107 | 108 | def compute_visuals(self): 109 | """Calculate additional output images for visdom and HTML visualization""" 110 | pass 111 | 112 | def get_image_paths(self): 113 | """ Return image paths that are used to load current data""" 114 | return self.image_paths 115 | 116 | def update_learning_rate(self): 117 | """Update learning rates for all the networks; called at the end of every epoch""" 118 | old_lr = self.optimizers[0].param_groups[0]['lr'] 119 | for scheduler in self.schedulers: 120 | if self.opt.lr_policy == 'plateau': 121 | scheduler.step(self.metric) 122 | else: 123 | scheduler.step() 124 | 125 | lr = self.optimizers[0].param_groups[0]['lr'] 126 | print('learning rate %.7f -> %.7f' % (old_lr, lr)) 127 | 128 | def get_current_visuals(self): 129 | """Return visualization images. train.py will display these images with visdom, and save the images to a HTML""" 130 | visual_ret = OrderedDict() 131 | for name in self.visual_names: 132 | if isinstance(name, str): 133 | visual_ret[name] = getattr(self, name) 134 | return visual_ret 135 | 136 | def get_current_losses(self): 137 | """Return traning losses / errors. train.py will print out these errors on console, and save them to a file""" 138 | errors_ret = OrderedDict() 139 | for name in self.loss_names: 140 | if isinstance(name, str): 141 | errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number 142 | return errors_ret 143 | 144 | def save_networks(self, epoch): 145 | """Save all the networks to the disk. 146 | 147 | Parameters: 148 | epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) 149 | """ 150 | for name in self.model_names: 151 | if isinstance(name, str): 152 | save_filename = '%s_net_%s.pth' % (epoch, name) 153 | save_path = os.path.join(self.save_dir, save_filename) 154 | net = getattr(self, 'net' + name) 155 | torch.save(net.cpu().state_dict(), save_path) 156 | net.cuda('cuda:' + str(self.gpu)) 157 | 158 | if hasattr(self, 'optimizer_names'): 159 | for name in self.optimizer_names: 160 | save_filename = '%s_%s.pth' % (epoch, name) 161 | save_path = os.path.join(self.save_dir, save_filename) 162 | optim = getattr(self, name) 163 | torch.save(optim.state_dict(), save_path) 164 | 165 | if hasattr(self, 'scaler'): 166 | scaler = getattr(self, 'scaler') 167 | save_filename = '%s_scaler.pth' % (epoch) 168 | save_path = os.path.join(self.save_dir, save_filename) 169 | torch.save(scaler.state_dict(), save_path) 170 | 171 | def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0): 172 | """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)""" 173 | key = keys[i] 174 | if i + 1 == len(keys): # at the end, pointing to a parameter/buffer 175 | if module.__class__.__name__.startswith('InstanceNorm') and \ 176 | (key == 'running_mean' or key == 'running_var'): 177 | if getattr(module, key) is None: 178 | state_dict.pop('.'.join(keys)) 179 | if module.__class__.__name__.startswith('InstanceNorm') and \ 180 | (key == 'num_batches_tracked'): 181 | state_dict.pop('.'.join(keys)) 182 | else: 183 | self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1) 184 | 185 | def load_networks(self, epoch): 186 | """Load all the networks from the disk. 187 | 188 | Parameters: 189 | epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) 190 | """ 191 | for name in self.model_names: 192 | if isinstance(name, str): 193 | load_filename = '%s_net_%s.pth' % (epoch, name) 194 | load_path = os.path.join(self.save_dir, load_filename) 195 | if not os.path.isfile(load_path): 196 | print('%s not found... skipping...' % load_path) 197 | continue 198 | net = getattr(self, 'net' + name) 199 | print('loading the model from %s' % load_path) 200 | # if you are using PyTorch newer than 0.4 (e.g., built from 201 | # GitHub source), you can remove str() on self.device 202 | state_dict = torch.load(load_path, map_location=str('cuda:' + str(self.gpu))) 203 | if hasattr(state_dict, '_metadata'): 204 | del state_dict._metadata 205 | 206 | # patch InstanceNorm checkpoints prior to 0.4 207 | for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop 208 | self.__patch_instance_norm_state_dict(state_dict, net, key.split('.')) 209 | net.load_state_dict(state_dict) 210 | 211 | if hasattr(self, 'optimizer_names'): 212 | for name in self.optimizer_names: 213 | load_filename = '%s_%s.pth' % (epoch, name) 214 | load_path = os.path.join(self.save_dir, load_filename) 215 | if not os.path.isfile(load_path): 216 | print('%s not found... skipping...' % load_path) 217 | continue 218 | optim = getattr(self, name) 219 | state_dict = torch.load(load_path, map_location=str('cuda:' + str(self.gpu))) 220 | if hasattr(state_dict, '_metadata'): 221 | del state_dict._metadata 222 | optim.load_state_dict(state_dict) 223 | 224 | if hasattr(self, 'scaler'): 225 | scaler = getattr(self, 'scaler') 226 | load_filename = '%s_scaler.pth' % (epoch) 227 | load_path = os.path.join(self.save_dir, load_filename) 228 | if not os.path.isfile(load_path): 229 | print('%s not found... skipping...' % load_path) 230 | else: 231 | print('loading the model from %s' % load_path) 232 | state_dict = torch.load(load_path) 233 | scaler.load_state_dict(state_dict) 234 | 235 | def print_networks(self, verbose): 236 | """Print the total number of parameters in the network and (if verbose) network architecture 237 | 238 | Parameters: 239 | verbose (bool) -- if verbose: print the network architecture 240 | """ 241 | print('---------- Networks initialized -------------') 242 | for name in self.model_names: 243 | if isinstance(name, str): 244 | net = getattr(self, 'net' + name) 245 | num_params = 0 246 | for param in net.parameters(): 247 | num_params += param.numel() 248 | if verbose: 249 | print(net) 250 | print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6)) 251 | print('-----------------------------------------------') 252 | 253 | def set_requires_grad(self, nets, requires_grad=False): 254 | """Set requies_grad=Fasle for all the networks to avoid unnecessary computations 255 | Parameters: 256 | nets (network list) -- a list of networks 257 | requires_grad (bool) -- whether the networks require gradients or not 258 | """ 259 | if not isinstance(nets, list): 260 | nets = [nets] 261 | for net in nets: 262 | if net is not None: 263 | for param in net.parameters(): 264 | param.requires_grad = requires_grad 265 | 266 | def make_data_parallel(self): 267 | """Make models data parallel""" 268 | if len(self.gpu_ids) == 0: 269 | return 270 | 271 | for name in self.model_names: 272 | if isinstance(name, str): 273 | net = getattr(self, 'net' + name) 274 | net = torch.nn.DataParallel(net, self.gpu_ids) # multi-GPUs 275 | setattr(self, 'net' + name, net) 276 | -------------------------------------------------------------------------------- /options/__init__.py: -------------------------------------------------------------------------------- 1 | """This package options includes option modules: training options, test options, and basic options (used in both training and test).""" 2 | -------------------------------------------------------------------------------- /options/base_options.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | from util import util 4 | import torch 5 | import models 6 | import data 7 | import sys 8 | 9 | 10 | class BaseOptions(): 11 | """This class defines options used during both training and test time. 12 | 13 | It also implements several helper functions such as parsing, printing, and saving the options. 14 | It also gathers additional options defined in functions in both dataset class and model class. 15 | """ 16 | 17 | def __init__(self): 18 | """Reset the class; indicates the class hasn't been initailized""" 19 | self.initialized = False 20 | 21 | def initialize(self, parser): 22 | """Define the common options that are used in both training and test.""" 23 | # basic parameters 24 | parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)') 25 | parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 26 | parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here') 27 | # model parameters 28 | parser.add_argument('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization | fork_gan]') 29 | parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale') 30 | parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale') 31 | parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer') 32 | parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer') 33 | parser.add_argument('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel | msX]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator') 34 | parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]') 35 | parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers') 36 | parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]') 37 | parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]') 38 | parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 39 | parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator') 40 | # dataset parameters 41 | parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | unaligned_coco | aligned | single | colorization]') 42 | parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA') 43 | parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly') 44 | parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data') 45 | parser.add_argument('--batch_size', type=int, default=1, help='input batch size') 46 | parser.add_argument('--load_size', type=int, default=286, help='scale images to this size') 47 | parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size') 48 | parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') 49 | parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]') 50 | parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation') 51 | parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML') 52 | # additional parameters 53 | parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') 54 | parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]') 55 | parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information') 56 | parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}') 57 | parser.add_argument('--local_rank', type=int, default=None) 58 | self.initialized = True 59 | return parser 60 | 61 | def gather_options(self): 62 | """Initialize our parser with basic options(only once). 63 | Add additional model-specific and dataset-specific options. 64 | These options are defined in the function 65 | in model and dataset classes. 66 | """ 67 | if not self.initialized: # check if it has been initialized 68 | parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) 69 | parser = self.initialize(parser) 70 | 71 | # get the basic options 72 | opt, _ = parser.parse_known_args() 73 | 74 | # modify model-related parser options 75 | model_name = opt.model 76 | model_option_setter = models.get_option_setter(model_name) 77 | parser = model_option_setter(parser, self.isTrain) 78 | opt, _ = parser.parse_known_args() # parse again with new defaults 79 | 80 | # modify dataset-related parser options 81 | dataset_name = opt.dataset_mode 82 | dataset_option_setter = data.get_option_setter(dataset_name) 83 | parser = dataset_option_setter(parser, self.isTrain) 84 | 85 | # save and return the parser 86 | self.parser = parser 87 | return parser.parse_args() 88 | 89 | def print_options(self, opt): 90 | """Print and save options 91 | 92 | It will print both current options and default values(if different). 93 | It will save options into a text file / [checkpoints_dir] / opt.txt 94 | """ 95 | message = '' 96 | message += '----------------- Options ---------------\n' 97 | for k, v in sorted(vars(opt).items()): 98 | comment = '' 99 | default = self.parser.get_default(k) 100 | if v != default: 101 | comment = '\t[default: %s]' % str(default) 102 | message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) 103 | message += '----------------- End -------------------' 104 | print(message) 105 | 106 | # save to the disk 107 | expr_dir = os.path.join(opt.checkpoints_dir, opt.name) 108 | util.mkdirs(expr_dir) 109 | file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase)) 110 | with open(file_name, 'wt') as opt_file: 111 | opt_file.write(message) 112 | opt_file.write('\n') 113 | 114 | def parse(self): 115 | """Parse our options, create checkpoints directory suffix""" 116 | opt = self.gather_options() 117 | opt.isTrain = self.isTrain # train or test 118 | 119 | # process opt.suffix 120 | if opt.suffix: 121 | suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else '' 122 | opt.name = opt.name + suffix 123 | 124 | # GPU stuff 125 | # TODO: CPU not working so far 126 | opt.gpu = 0 127 | opt.distributed = False 128 | opt.ngpus = 1 129 | if opt.local_rank is not None: 130 | opt.gpu = opt.local_rank 131 | opt.distributed = True 132 | # Watch out: This implementation will always assume that all available GPUs will be used for training 133 | #opt.ngpus = torch.cuda.device_count() 134 | #opt.batch_size = int(opt.batch_size / opt.ngpus) 135 | #opt.num_threads = int(opt.num_threads / opt.ngpus) 136 | torch.cuda.set_device(opt.gpu) 137 | 138 | # Block print for other threads than GPU-ID 0 thread 139 | if opt.gpu != 0: 140 | blockPrint() 141 | 142 | self.print_options(opt) 143 | self.opt = opt 144 | return self.opt 145 | 146 | 147 | def blockPrint(): 148 | sys.stdout = open(os.devnull, 'w') 149 | 150 | def enablePrint(): 151 | sys.stdout = sys.__stdout__ 152 | -------------------------------------------------------------------------------- /options/test_options.py: -------------------------------------------------------------------------------- 1 | from .base_options import BaseOptions 2 | import sys 3 | 4 | class TestOptions(BaseOptions): 5 | """This class includes test options. 6 | 7 | It also includes shared options defined in BaseOptions. 8 | """ 9 | 10 | def initialize(self, parser): 11 | parser = BaseOptions.initialize(self, parser) # define shared options 12 | parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') 13 | parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images') 14 | parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc') 15 | # Dropout and Batchnorm has different behavioir during training and test. 16 | parser.add_argument('--eval', action='store_true', help='use eval mode during test time.') 17 | parser.add_argument('--num_test', type=int, default=sys.maxsize, help='how many test images to run') 18 | 19 | parser.add_argument('--coco_test', action='store_true', help='translate a COCO dataset') 20 | # rewrite devalue values 21 | parser.set_defaults(model='test') 22 | # To avoid cropping, the load_size should be the same as crop_size 23 | parser.set_defaults(load_size=parser.get_default('crop_size')) 24 | self.isTrain = False 25 | return parser 26 | -------------------------------------------------------------------------------- /options/train_options.py: -------------------------------------------------------------------------------- 1 | from .base_options import BaseOptions 2 | 3 | 4 | class TrainOptions(BaseOptions): 5 | """This class includes training options. 6 | 7 | It also includes shared options defined in BaseOptions. 8 | """ 9 | 10 | def initialize(self, parser): 11 | parser = BaseOptions.initialize(self, parser) 12 | # visdom and HTML visualization parameters 13 | parser.add_argument('--display_freq', type=int, default=400, help='frequency of showing training results on screen') 14 | parser.add_argument('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.') 15 | parser.add_argument('--display_id', type=int, default=1, help='window id of the web display') 16 | parser.add_argument('--display_server', type=str, default="http://localhost", help='visdom server of the web display') 17 | parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")') 18 | parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display') 19 | parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html') 20 | parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console') 21 | parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/') 22 | # network saving and loading parameters 23 | parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results') 24 | parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 25 | parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration') 26 | parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') 27 | parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 28 | parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') 29 | # training parameters 30 | parser.add_argument('--n_epochs', type=int, default=100, help='number of epochs with the initial learning rate') 31 | parser.add_argument('--n_epochs_decay', type=int, default=100, help='number of epochs to linearly decay learning rate to zero') 32 | parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') 33 | parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam') 34 | parser.add_argument('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.') 35 | parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images') 36 | parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]') 37 | parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 38 | self.isTrain = True 39 | return parser 40 | 41 | def parse(self): 42 | opt = BaseOptions.parse(self) 43 | return opt -------------------------------------------------------------------------------- /run.sh: -------------------------------------------------------------------------------- 1 | # Standard Experiment 2 | #python -m torch.distributed.launch --nproc_per_node=8 train.py --dataroot ./datasets/dataset --name model_name --model fork_gan --load_size 512 --crop_size 512 --preprocess scale_height_and_crop --input_nc 1 --output_nc 1 --display_freq 100 --batch_size 8 --netD ms3 --lambda_identity 0.0 --num_threads 16 --lr 0.0006 --n_epochs 10 --n_epochs_decay 10 --save_latest_freq 1000 --display_freq 100 --display_id -1 --continue_train 3 | #python -m torch.distributed.launch --nproc_per_node=8 train.py --dataroot ./datasets/dataset --name model_name --model fork_gan --load_size 512 --crop_size 512 --preprocess scale_height_and_crop --input_nc 1 --output_nc 1 --display_freq 100 --batch_size 8 --netD ms3 --lambda_identity 0.0 --num_threads 16 --lr 0.0001 --n_epochs 10 --n_epochs_decay 10 --save_latest_freq 1000 --display_freq 100 --display_id -1 --norm none --save_epoch_freq 1 --continue_train 4 | 5 | # Train (no inst) 6 | #python -m torch.distributed.launch --nproc_per_node=8 train.py --dataroot ./datasets/dataset --name model_name --model fork_gan --load_size 512 --crop_size 512 --preprocess scale_height_and_crop --input_nc 1 --output_nc 1 --display_freq 100 --batch_size 8 --netD ms3 --lambda_identity 0.0 --num_threads 16 --lr 0.0001 --n_epochs 10 --n_epochs_decay 10 --save_latest_freq 1000 --display_freq 100 --display_id -1 --norm none --save_epoch_freq 1 --continue_train 7 | 8 | # Train (inst basic) 9 | #python -m torch.distributed.launch --nproc_per_node=1 train.py --dataroot ./datasets/dataset --name model_name --model fork_gan --load_size 512 --crop_size 512 --preprocess scale_height_and_crop --input_nc 1 --output_nc 1 --display_freq 100 --batch_size 1 --netD ms3 --lambda_identity 0.0 --num_threads 4 --lr 0.0001 --n_epochs 10 --n_epochs_decay 10 --save_latest_freq 1000 --display_freq 100 --display_id -1 --norm none --save_epoch_freq 1 --continue_train --dataset_mode unaligned_coco --instance_level --continue_train --epoch latest --coco_imagedir path/to/coco/imagedir 10 | 11 | # Test 12 | #CUDA_VISIBLE_DEVICES=0 python -m torch.distributed.launch --nproc_per_node=1 test.py --dataroot ./datasets/dataset --name model_name --model fork_gan --load_size 512 --crop_size 512 --preprocess scale_height --input_nc 1 --output_nc 1 --netD ms3 --norm none --coco_imagedir path/to/coco/imagedir --dataset_mode unaligned_coco --batch_size 8 --epoch latest --num_threads 8 13 | 14 | CUDA_VISIBLE_DEVICES=0 python -m torch.distributed.launch --nproc_per_node=1 test.py --dataroot ./datasets/dataset --name model_name --model fork_gan --load_size 512 --crop_size 512 --preprocess scale_height --input_nc 1 --output_nc 1 --netD ms3 --norm none --coco_imagedir path/to/coco/imagedir --dataset_mode unaligned_coco --batch_size 8 --epoch latest --num_threads 8 --results_dir results_inst 15 | -------------------------------------------------------------------------------- /svgs/185bbe3a42b0f2df9493da909528a50c.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /svgs/1b4342b577c22a565bf4275ad350b51c.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /svgs/21fd4e8eecd6bdf1a4d3d6bd1fb8d733.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /svgs/421472f4ff7fdf1fcbb80a776f953e28.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /svgs/4f806e64be00e75b9da6946fa8b30ab9.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /svgs/55d3f040c4b762956ca1504da10e73cf.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /svgs/572d909dcb75f1d90e402fdb7fcbfefc.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /svgs/84df98c65d88c6adf15d4645ffa25e47.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /svgs/91aac9730317276af725abd8cef04ca9.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /svgs/929ed909014029a206f344a28aa47d15.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /svgs/9493f58d962b918a014f0611cbd7a2c8.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /svgs/9592e060056326cb8915a4d5f7f08906.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /svgs/a057a67da77082c6678b0161bfe9361f.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /svgs/a3a832b2fe92d672bd55cda4001fbb7c.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /svgs/bc30d82546823adc821898ae820607df.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /svgs/cbfb1b2a33b28eab8a3e59464768e810.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /svgs/d4c4b525c4ba39454b0f939d81d6a2f4.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /svgs/e5d134f35dc4949fab12ec64d186248a.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /svgs/e93accfc68a4cbd2d241d8cc770c7ae0.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /svgs/ef3441dc1d8817d7e91591b8b57cea97.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | """General-purpose test script for image-to-image translation. 2 | 3 | Once you have trained your model with train.py, you can use this script to test the model. 4 | It will load a saved model from '--checkpoints_dir' and save the results to '--results_dir'. 5 | 6 | It first creates model and dataset given the option. It will hard-code some parameters. 7 | It then runs inference for '--num_test' images and save results to an HTML file. 8 | 9 | Example (You need to train models first or download pre-trained models from our website): 10 | Test a CycleGAN model (both sides): 11 | python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan 12 | 13 | Test a CycleGAN model (one side only): 14 | python test.py --dataroot datasets/horse2zebra/testA --name horse2zebra_pretrained --model test --no_dropout 15 | 16 | The option '--model test' is used for generating CycleGAN results only for one side. 17 | This option will automatically set '--dataset_mode single', which only loads the images from one set. 18 | On the contrary, using '--model cycle_gan' requires loading and generating results in both directions, 19 | which is sometimes unnecessary. The results will be saved at ./results/. 20 | Use '--results_dir ' to specify the results directory. 21 | 22 | Test a pix2pix model: 23 | python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA 24 | 25 | See options/base_options.py and options/test_options.py for more test options. 26 | See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md 27 | See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md 28 | """ 29 | import os 30 | from options.test_options import TestOptions 31 | from data import create_dataset 32 | from models import create_model 33 | from util.visualizer import save_images, save_images_dataset 34 | from util import html 35 | import torch 36 | 37 | 38 | if __name__ == '__main__': 39 | opt = TestOptions().parse() # get test options 40 | if opt.distributed: 41 | torch.distributed.init_process_group(backend='nccl', init_method='env://') 42 | opt.world_size = torch.distributed.get_world_size() 43 | 44 | print(opt.gpu) 45 | 46 | # hard-code some parameters for test 47 | opt.num_threads = 0 # test code only supports num_threads = 0 48 | opt.batch_size = 1 # test code only supports batch_size = 1 49 | opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed. 50 | opt.no_flip = True # no flip; comment this line if results on flipped images are needed. 51 | opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file. 52 | dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options 53 | model = create_model(opt) # create a model given opt.model and other options 54 | model.setup(opt) # regular setup: load and print networks; create schedulers 55 | if opt.dataset_mode == 'unaligned_coco': 56 | if opt.eval: 57 | model.eval() 58 | for i, data in enumerate(dataset): 59 | if i >= opt.num_test: # only apply our model to opt.num_test images. 60 | break 61 | model.set_input(data) # unpack data from data loader 62 | img_path = model.get_image_paths()[0] # get image paths 63 | if os.path.isfile(os.path.join(opt.results_dir, img_path)): 64 | print('Already present, skipping...') 65 | continue 66 | model.test() # run inference 67 | visuals = model.get_current_visuals() # get image results 68 | visuals = {'fake_B': visuals['fake_B']} 69 | 70 | if i % 5 == 0: # save images to an HTML file 71 | print('processing (%04d)-th image... %s' % (i, img_path)) 72 | save_images_dataset(opt.results_dir, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize) 73 | else: 74 | # create a website 75 | web_dir = os.path.join(opt.results_dir, opt.name, '{}_{}'.format(opt.phase, opt.epoch)) # define the website directory 76 | if opt.load_iter > 0: # load_iter is 0 by default 77 | web_dir = '{:s}_iter{:d}'.format(web_dir, opt.load_iter) 78 | print('creating web directory', web_dir) 79 | webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch)) 80 | # test with eval mode. This only affects layers like batchnorm and dropout. 81 | # For [pix2pix]: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode. 82 | # For [CycleGAN]: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout. 83 | if opt.eval: 84 | model.eval() 85 | for i, data in enumerate(dataset): 86 | if i >= opt.num_test: # only apply our model to opt.num_test images. 87 | break 88 | model.set_input(data) # unpack data from data loader 89 | model.test() # run inference 90 | visuals = model.get_current_visuals() # get image results 91 | img_path = model.get_image_paths() # get image paths 92 | if i % 5 == 0: # save images to an HTML file 93 | print('processing (%04d)-th image... %s' % (i, img_path)) 94 | save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize) 95 | webpage.save() # save the HTML 96 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | """General-purpose training script for image-to-image translation. 2 | 3 | This script works for various models (with option '--model': e.g., pix2pix, cyclegan, colorization) and 4 | different datasets (with option '--dataset_mode': e.g., aligned, unaligned, single, colorization). 5 | You need to specify the dataset ('--dataroot'), experiment name ('--name'), and model ('--model'). 6 | 7 | It first creates model, dataset, and visualizer given the option. 8 | It then does standard network training. During the training, it also visualize/save the images, print/save the loss plot, and save models. 9 | The script supports continue/resume training. Use '--continue_train' to resume your previous training. 10 | 11 | Example: 12 | Train a CycleGAN model: 13 | python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan 14 | Train a pix2pix model: 15 | python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA 16 | 17 | See options/base_options.py and options/train_options.py for more training options. 18 | See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md 19 | See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md 20 | """ 21 | import time 22 | from options.train_options import TrainOptions 23 | from data import create_dataset 24 | from models import create_model 25 | from util.visualizer import Visualizer 26 | import torch 27 | from util.util import save_image_grid, save_image_grid_inst 28 | from pathlib import Path 29 | import os 30 | from PIL import ImageDraw 31 | 32 | if __name__ == '__main__': 33 | opt = TrainOptions().parse() # get training options 34 | if opt.distributed: 35 | torch.distributed.init_process_group(backend='nccl', init_method='env://') 36 | opt.world_size = torch.distributed.get_world_size() 37 | opt.ngpus = opt.world_size 38 | opt.batch_size = int(opt.batch_size / opt.ngpus) 39 | opt.num_threads = int(opt.num_threads / opt.ngpus) 40 | 41 | dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options 42 | dataset_size = len(dataset) # get the number of images in the dataset. 43 | print('The number of training images = %d' % (dataset_size)) 44 | model = create_model(opt) # create a model given opt.model and other options 45 | model.setup(opt) # regular setup: load and print networks; create schedulers 46 | if opt.gpu == 0: 47 | visualizer = Visualizer(opt) # create a visualizer that display/save images and plots 48 | total_iters = 0 # the total number of training iterations 49 | 50 | test_dir = os.path.join(opt.checkpoints_dir, opt.name, 'test_results') 51 | Path(test_dir).mkdir(exist_ok=True) 52 | 53 | for epoch in range(opt.epoch_count, opt.n_epochs + opt.n_epochs_decay + 1): # outer loop for different epochs; we save the model by , + 54 | epoch_start_time = time.time() # timer for entire epoch 55 | iter_data_time = time.time() # timer for data loading per iteration 56 | epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch 57 | if opt.gpu == 0: 58 | visualizer.reset() # reset the visualizer: make sure it saves the results to HTML at least once every epoch 59 | model.update_learning_rate() # update learning rates in the beginning of every epoch. 60 | 61 | if opt.distributed: 62 | dataset.set_epoch(epoch) 63 | 64 | for i, data in enumerate(dataset): # inner loop within one epoch 65 | iter_start_time = time.time() # timer for computation per iteration 66 | if total_iters % opt.print_freq == 0: 67 | t_data = iter_start_time - iter_data_time 68 | 69 | total_iters += opt.batch_size * opt.ngpus 70 | epoch_iter += opt.batch_size * opt.ngpus 71 | model.set_input(data) # unpack data from dataset and apply preprocessing 72 | model.optimize_parameters() # calculate loss functions, get gradients, update network weights 73 | 74 | if total_iters % opt.display_freq == 0 and opt.gpu == 0: # display images on visdom and save images to a HTML file 75 | save_result = total_iters % opt.update_html_freq == 0 76 | model.compute_visuals() 77 | visualizer.display_current_results(model.get_current_visuals(), epoch, save_result) 78 | save_image_grid(model.get_current_visuals(), os.path.join(test_dir, f'{epoch}_{epoch_iter}.jpg')) 79 | if opt.instance_level != 'none': 80 | save_image_grid_inst(model.get_current_visuals(), os.path.join(test_dir, f'{epoch}_{epoch_iter}_inst.jpg')) 81 | 82 | if total_iters % opt.print_freq == 0 and opt.gpu == 0: # print training losses and save logging information to the disk 83 | losses = model.get_current_losses() 84 | t_comp = (time.time() - iter_start_time) / opt.batch_size 85 | visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data) 86 | if opt.display_id > 0: 87 | visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses) 88 | 89 | if total_iters % opt.save_latest_freq == 0 and opt.gpu == 0: # cache our latest model every iterations 90 | print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters)) 91 | save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest' 92 | model.save_networks(save_suffix) 93 | iter_data_time = time.time() 94 | 95 | if epoch % opt.save_epoch_freq == 0 and opt.gpu == 0: # cache our model every epochs 96 | print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters)) 97 | model.save_networks('latest') 98 | model.save_networks(epoch) 99 | print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.n_epochs + opt.n_epochs_decay, time.time() - epoch_start_time)) 100 | 101 | if opt.distributed: 102 | torch.distributed.destroy_process_group() 103 | -------------------------------------------------------------------------------- /util/__init__.py: -------------------------------------------------------------------------------- 1 | """This package includes a miscellaneous collection of useful helper functions.""" 2 | -------------------------------------------------------------------------------- /util/get_data.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import os 3 | import tarfile 4 | import requests 5 | from warnings import warn 6 | from zipfile import ZipFile 7 | from bs4 import BeautifulSoup 8 | from os.path import abspath, isdir, join, basename 9 | 10 | 11 | class GetData(object): 12 | """A Python script for downloading CycleGAN or pix2pix datasets. 13 | 14 | Parameters: 15 | technique (str) -- One of: 'cyclegan' or 'pix2pix'. 16 | verbose (bool) -- If True, print additional information. 17 | 18 | Examples: 19 | >>> from util.get_data import GetData 20 | >>> gd = GetData(technique='cyclegan') 21 | >>> new_data_path = gd.get(save_path='./datasets') # options will be displayed. 22 | 23 | Alternatively, You can use bash scripts: 'scripts/download_pix2pix_model.sh' 24 | and 'scripts/download_cyclegan_model.sh'. 25 | """ 26 | 27 | def __init__(self, technique='cyclegan', verbose=True): 28 | url_dict = { 29 | 'pix2pix': 'http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/', 30 | 'cyclegan': 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets' 31 | } 32 | self.url = url_dict.get(technique.lower()) 33 | self._verbose = verbose 34 | 35 | def _print(self, text): 36 | if self._verbose: 37 | print(text) 38 | 39 | @staticmethod 40 | def _get_options(r): 41 | soup = BeautifulSoup(r.text, 'lxml') 42 | options = [h.text for h in soup.find_all('a', href=True) 43 | if h.text.endswith(('.zip', 'tar.gz'))] 44 | return options 45 | 46 | def _present_options(self): 47 | r = requests.get(self.url) 48 | options = self._get_options(r) 49 | print('Options:\n') 50 | for i, o in enumerate(options): 51 | print("{0}: {1}".format(i, o)) 52 | choice = input("\nPlease enter the number of the " 53 | "dataset above you wish to download:") 54 | return options[int(choice)] 55 | 56 | def _download_data(self, dataset_url, save_path): 57 | if not isdir(save_path): 58 | os.makedirs(save_path) 59 | 60 | base = basename(dataset_url) 61 | temp_save_path = join(save_path, base) 62 | 63 | with open(temp_save_path, "wb") as f: 64 | r = requests.get(dataset_url) 65 | f.write(r.content) 66 | 67 | if base.endswith('.tar.gz'): 68 | obj = tarfile.open(temp_save_path) 69 | elif base.endswith('.zip'): 70 | obj = ZipFile(temp_save_path, 'r') 71 | else: 72 | raise ValueError("Unknown File Type: {0}.".format(base)) 73 | 74 | self._print("Unpacking Data...") 75 | obj.extractall(save_path) 76 | obj.close() 77 | os.remove(temp_save_path) 78 | 79 | def get(self, save_path, dataset=None): 80 | """ 81 | 82 | Download a dataset. 83 | 84 | Parameters: 85 | save_path (str) -- A directory to save the data to. 86 | dataset (str) -- (optional). A specific dataset to download. 87 | Note: this must include the file extension. 88 | If None, options will be presented for you 89 | to choose from. 90 | 91 | Returns: 92 | save_path_full (str) -- the absolute path to the downloaded data. 93 | 94 | """ 95 | if dataset is None: 96 | selected_dataset = self._present_options() 97 | else: 98 | selected_dataset = dataset 99 | 100 | save_path_full = join(save_path, selected_dataset.split('.')[0]) 101 | 102 | if isdir(save_path_full): 103 | warn("\n'{0}' already exists. Voiding Download.".format( 104 | save_path_full)) 105 | else: 106 | self._print('Downloading Data...') 107 | url = "{0}/{1}".format(self.url, selected_dataset) 108 | self._download_data(url, save_path=save_path) 109 | 110 | return abspath(save_path_full) 111 | -------------------------------------------------------------------------------- /util/html.py: -------------------------------------------------------------------------------- 1 | import dominate 2 | from dominate.tags import meta, h3, table, tr, td, p, a, img, br 3 | import os 4 | 5 | 6 | class HTML: 7 | """This HTML class allows us to save images and write texts into a single HTML file. 8 | 9 | It consists of functions such as (add a text header to the HTML file), 10 | (add a row of images to the HTML file), and (save the HTML to the disk). 11 | It is based on Python library 'dominate', a Python library for creating and manipulating HTML documents using a DOM API. 12 | """ 13 | 14 | def __init__(self, web_dir, title, refresh=0): 15 | """Initialize the HTML classes 16 | 17 | Parameters: 18 | web_dir (str) -- a directory that stores the webpage. HTML file will be created at /index.html; images will be saved at 0: 32 | with self.doc.head: 33 | meta(http_equiv="refresh", content=str(refresh)) 34 | 35 | def get_image_dir(self): 36 | """Return the directory that stores images""" 37 | return self.img_dir 38 | 39 | def add_header(self, text): 40 | """Insert a header to the HTML file 41 | 42 | Parameters: 43 | text (str) -- the header text 44 | """ 45 | with self.doc: 46 | h3(text) 47 | 48 | def add_images(self, ims, txts, links, width=400): 49 | """add images to the HTML file 50 | 51 | Parameters: 52 | ims (str list) -- a list of image paths 53 | txts (str list) -- a list of image names shown on the website 54 | links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page 55 | """ 56 | self.t = table(border=1, style="table-layout: fixed;") # Insert a table 57 | self.doc.add(self.t) 58 | with self.t: 59 | with tr(): 60 | for im, txt, link in zip(ims, txts, links): 61 | with td(style="word-wrap: break-word;", halign="center", valign="top"): 62 | with p(): 63 | with a(href=os.path.join('images', link)): 64 | img(style="width:%dpx" % width, src=os.path.join('images', im)) 65 | br() 66 | p(txt) 67 | 68 | def save(self): 69 | """save the current content to the HMTL file""" 70 | html_file = '%s/index.html' % self.web_dir 71 | f = open(html_file, 'wt') 72 | f.write(self.doc.render()) 73 | f.close() 74 | 75 | 76 | if __name__ == '__main__': # we show an example usage here. 77 | html = HTML('web/', 'test_html') 78 | html.add_header('hello world') 79 | 80 | ims, txts, links = [], [], [] 81 | for n in range(4): 82 | ims.append('image_%d.png' % n) 83 | txts.append('text_%d' % n) 84 | links.append('image_%d.png' % n) 85 | html.add_images(ims, txts, links) 86 | html.save() 87 | -------------------------------------------------------------------------------- /util/image_pool.py: -------------------------------------------------------------------------------- 1 | import random 2 | import torch 3 | 4 | 5 | class ImagePool(): 6 | """This class implements an image buffer that stores previously generated images. 7 | 8 | This buffer enables us to update discriminators using a history of generated images 9 | rather than the ones produced by the latest generators. 10 | """ 11 | 12 | def __init__(self, pool_size): 13 | """Initialize the ImagePool class 14 | 15 | Parameters: 16 | pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created 17 | """ 18 | self.pool_size = pool_size 19 | if self.pool_size > 0: # create an empty pool 20 | self.num_imgs = 0 21 | self.images = [] 22 | 23 | def query(self, images): 24 | """Return an image from the pool. 25 | 26 | Parameters: 27 | images: the latest generated images from the generator 28 | 29 | Returns images from the buffer. 30 | 31 | By 50/100, the buffer will return input images. 32 | By 50/100, the buffer will return images previously stored in the buffer, 33 | and insert the current images to the buffer. 34 | """ 35 | if self.pool_size == 0: # if the buffer size is 0, do nothing 36 | return images 37 | return_images = [] 38 | for image in images: 39 | if image.dim() == 3: 40 | image = torch.unsqueeze(image.data, 0) 41 | if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer 42 | self.num_imgs = self.num_imgs + 1 43 | self.images.append(image) 44 | return_images.append(image) 45 | else: 46 | p = random.uniform(0, 1) 47 | if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer 48 | random_id = random.randint(0, self.pool_size - 1) # randint is inclusive 49 | tmp = self.images[random_id].clone() 50 | self.images[random_id] = image 51 | return_images.append(tmp) 52 | else: # by another 50% chance, the buffer will return the current image 53 | return_images.append(image) 54 | if isinstance(images, torch.Tensor): # Return Tensor if queried with Tensor. Else return list. 55 | return_images = torch.cat(return_images, 0) # collect all the images and return 56 | return return_images 57 | -------------------------------------------------------------------------------- /util/util.py: -------------------------------------------------------------------------------- 1 | """This module contains simple helper functions """ 2 | from __future__ import print_function 3 | import torch 4 | import numpy as np 5 | from PIL import Image 6 | import os 7 | 8 | 9 | def tensor2im(input_image, imtype=np.uint8): 10 | """"Converts a Tensor array into a numpy image array. 11 | 12 | Parameters: 13 | input_image (tensor) -- the input image tensor array 14 | imtype (type) -- the desired type of the converted numpy array 15 | """ 16 | if not isinstance(input_image, np.ndarray): 17 | if isinstance(input_image, torch.Tensor): # get the data from a variable 18 | image_tensor = input_image.data 19 | else: 20 | return input_image 21 | image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array 22 | if image_numpy.shape[0] == 1: # grayscale to RGB 23 | image_numpy = np.tile(image_numpy, (3, 1, 1)) 24 | image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling 25 | else: # if it is a numpy array, do nothing 26 | image_numpy = input_image 27 | return image_numpy.astype(imtype) 28 | 29 | 30 | def diagnose_network(net, name='network'): 31 | """Calculate and print the mean of average absolute(gradients) 32 | 33 | Parameters: 34 | net (torch network) -- Torch network 35 | name (str) -- the name of the network 36 | """ 37 | mean = 0.0 38 | count = 0 39 | for param in net.parameters(): 40 | if param.grad is not None: 41 | mean += torch.mean(torch.abs(param.grad.data)) 42 | count += 1 43 | if count > 0: 44 | mean = mean / count 45 | print(name) 46 | print(mean) 47 | 48 | 49 | def save_image(image_numpy, image_path, aspect_ratio=1.0): 50 | """Save a numpy image to the disk 51 | 52 | Parameters: 53 | image_numpy (numpy array) -- input numpy array 54 | image_path (str) -- the path of the image 55 | """ 56 | 57 | image_pil = Image.fromarray(image_numpy) 58 | h, w, _ = image_numpy.shape 59 | 60 | if aspect_ratio > 1.0: 61 | image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC) 62 | if aspect_ratio < 1.0: 63 | image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC) 64 | image_pil.save(image_path) 65 | 66 | def save_image_grid(image_dict, image_path): 67 | A = np.concatenate(( 68 | tensor2im(image_dict['real_A']), 69 | tensor2im(image_dict['fake_B']), 70 | tensor2im(image_dict['rec_A']), 71 | tensor2im(image_dict['rec_fake_B']), 72 | tensor2im(image_dict['fake_A_']), 73 | ), axis=1) 74 | 75 | B = np.concatenate(( 76 | tensor2im(image_dict['real_B']), 77 | tensor2im(image_dict['fake_A']), 78 | tensor2im(image_dict['rec_B']), 79 | tensor2im(image_dict['rec_fake_A']), 80 | tensor2im(image_dict['fake_B_']), 81 | ), axis=1) 82 | 83 | AB = np.concatenate((A,B), axis=0) 84 | save_image(AB, image_path) 85 | 86 | def save_image_grid_inst(image_dict, image_path, size=(64, 64)): 87 | A = np.concatenate(( 88 | tensor2im(image_dict['real_A_inst']), 89 | tensor2im(image_dict['fake_B_inst']), 90 | tensor2im(image_dict['rec_A_inst']), 91 | tensor2im(image_dict['rec_fake_B_inst']), 92 | tensor2im(image_dict['fake_A__inst']), 93 | ), axis=1) 94 | 95 | B = np.concatenate(( 96 | tensor2im(image_dict['real_B_inst']), 97 | tensor2im(image_dict['fake_A_inst']), 98 | tensor2im(image_dict['rec_B_inst']), 99 | tensor2im(image_dict['rec_fake_A_inst']), 100 | tensor2im(image_dict['fake_B__inst']), 101 | ), axis=1) 102 | 103 | A = Image.fromarray(A) 104 | B = Image.fromarray(B) 105 | AB = Image.new('RGB', (max(A.width, B.width), A.height + B.height)) 106 | AB.paste(A, (0, 0)) 107 | AB.paste(B, (0, A.height)) 108 | AB.save(image_path) 109 | 110 | def resize_np(np_image, size): 111 | im = Image.fromarray(np_image) 112 | im = im.resize(size) 113 | return im.numpy() 114 | 115 | def print_numpy(x, val=True, shp=False): 116 | """Print the mean, min, max, median, std, and size of a numpy array 117 | 118 | Parameters: 119 | val (bool) -- if print the values of the numpy array 120 | shp (bool) -- if print the shape of the numpy array 121 | """ 122 | x = x.astype(np.float64) 123 | if shp: 124 | print('shape,', x.shape) 125 | if val: 126 | x = x.flatten() 127 | print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % ( 128 | np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x))) 129 | 130 | 131 | def mkdirs(paths): 132 | """create empty directories if they don't exist 133 | 134 | Parameters: 135 | paths (str list) -- a list of directory paths 136 | """ 137 | if isinstance(paths, list) and not isinstance(paths, str): 138 | for path in paths: 139 | mkdir(path) 140 | else: 141 | mkdir(paths) 142 | 143 | 144 | def mkdir(path): 145 | """create a single empty directory if it didn't exist 146 | 147 | Parameters: 148 | path (str) -- a single directory path 149 | """ 150 | if not os.path.exists(path): 151 | os.makedirs(path) 152 | -------------------------------------------------------------------------------- /util/visualizer.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import sys 4 | import ntpath 5 | import time 6 | from . import util, html 7 | from subprocess import Popen, PIPE 8 | from pathlib import Path 9 | 10 | 11 | if sys.version_info[0] == 2: 12 | VisdomExceptionBase = Exception 13 | else: 14 | VisdomExceptionBase = ConnectionError 15 | 16 | 17 | def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256): 18 | """Save images to the disk. 19 | 20 | Parameters: 21 | webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details) 22 | visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs 23 | image_path (str) -- the string is used to create image paths 24 | aspect_ratio (float) -- the aspect ratio of saved images 25 | width (int) -- the images will be resized to width x width 26 | 27 | This function will save images stored in 'visuals' to the HTML file specified by 'webpage'. 28 | """ 29 | image_dir = webpage.get_image_dir() 30 | short_path = ntpath.basename(image_path[0]) 31 | name = os.path.splitext(short_path)[0] 32 | 33 | webpage.add_header(name) 34 | ims, txts, links = [], [], [] 35 | 36 | for label, im_data in visuals.items(): 37 | im = util.tensor2im(im_data) 38 | image_name = '%s_%s.png' % (name, label) 39 | save_path = os.path.join(image_dir, image_name) 40 | util.save_image(im, save_path, aspect_ratio=aspect_ratio) 41 | ims.append(image_name) 42 | txts.append(label) 43 | links.append(image_name) 44 | webpage.add_images(ims, txts, links, width=width) 45 | 46 | # Only works with COCO dataset 47 | def save_images_dataset(result_dir, visuals, image_path, aspect_ratio=1.0, width=256): 48 | save_path = os.path.join(result_dir, image_path) 49 | Path(os.path.dirname(save_path)).mkdir(parents=True, exist_ok=True) 50 | 51 | for label, im_data in visuals.items(): 52 | im = util.tensor2im(im_data) 53 | util.save_image(im, save_path, aspect_ratio=aspect_ratio) 54 | 55 | class Visualizer(): 56 | """This class includes several functions that can display/save images and print/save logging information. 57 | 58 | It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images. 59 | """ 60 | 61 | def __init__(self, opt): 62 | """Initialize the Visualizer class 63 | 64 | Parameters: 65 | opt -- stores all the experiment flags; needs to be a subclass of BaseOptions 66 | Step 1: Cache the training/test options 67 | Step 2: connect to a visdom server 68 | Step 3: create an HTML object for saveing HTML filters 69 | Step 4: create a logging file to store training losses 70 | """ 71 | self.opt = opt # cache the option 72 | self.display_id = opt.display_id 73 | self.use_html = opt.isTrain and not opt.no_html 74 | self.win_size = opt.display_winsize 75 | self.name = opt.name 76 | self.port = opt.display_port 77 | self.saved = False 78 | if self.display_id > 0: # connect to a visdom server given and 79 | import visdom 80 | self.ncols = opt.display_ncols 81 | self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env) 82 | if not self.vis.check_connection(): 83 | self.create_visdom_connections() 84 | 85 | if self.use_html: # create an HTML object at /web/; images will be saved under /web/images/ 86 | self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web') 87 | self.img_dir = os.path.join(self.web_dir, 'images') 88 | print('create web directory %s...' % self.web_dir) 89 | util.mkdirs([self.web_dir, self.img_dir]) 90 | # create a logging file to store training losses 91 | self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt') 92 | with open(self.log_name, "a") as log_file: 93 | now = time.strftime("%c") 94 | log_file.write('================ Training Loss (%s) ================\n' % now) 95 | 96 | def reset(self): 97 | """Reset the self.saved status""" 98 | self.saved = False 99 | 100 | def create_visdom_connections(self): 101 | """If the program could not connect to Visdom server, this function will start a new server at port < self.port > """ 102 | cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port 103 | print('\n\nCould not connect to Visdom server. \n Trying to start a server....') 104 | print('Command: %s' % cmd) 105 | Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) 106 | 107 | def display_current_results(self, visuals, epoch, save_result): 108 | """Display current results on visdom; save current results to an HTML file. 109 | 110 | Parameters: 111 | visuals (OrderedDict) - - dictionary of images to display or save 112 | epoch (int) - - the current epoch 113 | save_result (bool) - - if save the current results to an HTML file 114 | """ 115 | if self.display_id > 0: # show images in the browser using visdom 116 | ncols = self.ncols 117 | if ncols > 0: # show all the images in one visdom panel 118 | ncols = min(ncols, len(visuals)) 119 | h, w = next(iter(visuals.values())).shape[:2] 120 | table_css = """""" % (w, h) # create a table css 124 | # create a table of images. 125 | title = self.name 126 | label_html = '' 127 | label_html_row = '' 128 | images = [] 129 | idx = 0 130 | for label, image in visuals.items(): 131 | image_numpy = util.tensor2im(image) 132 | label_html_row += '%s' % label 133 | images.append(image_numpy.transpose([2, 0, 1])) 134 | idx += 1 135 | if idx % ncols == 0: 136 | label_html += '%s' % label_html_row 137 | label_html_row = '' 138 | white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255 139 | while idx % ncols != 0: 140 | images.append(white_image) 141 | label_html_row += '' 142 | idx += 1 143 | if label_html_row != '': 144 | label_html += '%s' % label_html_row 145 | try: 146 | self.vis.images(images, nrow=ncols, win=self.display_id + 1, 147 | padding=2, opts=dict(title=title + ' images')) 148 | label_html = '%s
' % label_html 149 | self.vis.text(table_css + label_html, win=self.display_id + 2, 150 | opts=dict(title=title + ' labels')) 151 | except VisdomExceptionBase: 152 | self.create_visdom_connections() 153 | 154 | else: # show each image in a separate visdom panel; 155 | idx = 1 156 | try: 157 | for label, image in visuals.items(): 158 | image_numpy = util.tensor2im(image) 159 | self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label), 160 | win=self.display_id + idx) 161 | idx += 1 162 | except VisdomExceptionBase: 163 | self.create_visdom_connections() 164 | 165 | if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved. 166 | self.saved = True 167 | # save images to the disk 168 | for label, image in visuals.items(): 169 | image_numpy = util.tensor2im(image) 170 | img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label)) 171 | util.save_image(image_numpy, img_path) 172 | 173 | # update website 174 | webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=1) 175 | for n in range(epoch, 0, -1): 176 | webpage.add_header('epoch [%d]' % n) 177 | ims, txts, links = [], [], [] 178 | 179 | for label, image_numpy in visuals.items(): 180 | image_numpy = util.tensor2im(image) 181 | img_path = 'epoch%.3d_%s.png' % (n, label) 182 | ims.append(img_path) 183 | txts.append(label) 184 | links.append(img_path) 185 | webpage.add_images(ims, txts, links, width=self.win_size) 186 | webpage.save() 187 | 188 | def plot_current_losses(self, epoch, counter_ratio, losses): 189 | """display the current losses on visdom display: dictionary of error labels and values 190 | 191 | Parameters: 192 | epoch (int) -- current epoch 193 | counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1 194 | losses (OrderedDict) -- training losses stored in the format of (name, float) pairs 195 | """ 196 | if not hasattr(self, 'plot_data'): 197 | self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())} 198 | self.plot_data['X'].append(epoch + counter_ratio) 199 | self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']]) 200 | try: 201 | self.vis.line( 202 | X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1), 203 | Y=np.array(self.plot_data['Y']), 204 | opts={ 205 | 'title': self.name + ' loss over time', 206 | 'legend': self.plot_data['legend'], 207 | 'xlabel': 'epoch', 208 | 'ylabel': 'loss'}, 209 | win=self.display_id) 210 | except VisdomExceptionBase: 211 | self.create_visdom_connections() 212 | 213 | # losses: same format as |losses| of plot_current_losses 214 | def print_current_losses(self, epoch, iters, losses, t_comp, t_data): 215 | """print current losses on console; also save the losses to the disk 216 | 217 | Parameters: 218 | epoch (int) -- current epoch 219 | iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch) 220 | losses (OrderedDict) -- training losses stored in the format of (name, float) pairs 221 | t_comp (float) -- computational time per data point (normalized by batch_size) 222 | t_data (float) -- data loading time per data point (normalized by batch_size) 223 | """ 224 | message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data) 225 | for k, v in losses.items(): 226 | message += '%s: %.3f ' % (k, v) 227 | 228 | print(message) # print the message 229 | with open(self.log_name, "a") as log_file: 230 | log_file.write('%s\n' % message) # save the message 231 | --------------------------------------------------------------------------------