├── scripts ├── install_deps.sh ├── test_colorization.sh ├── train_colorization.sh ├── test_cyclegan.sh ├── train_cyclegan.sh ├── test_pix2pix.sh ├── test_single.sh ├── train_pix2pix.sh ├── conda_deps.sh ├── eval_cityscapes │ ├── download_fcn8s.sh │ ├── util.py │ ├── evaluate.py │ ├── cityscapes.py │ └── caffemodel │ │ └── deploy.prototxt ├── download_pix2pix_model.sh ├── train.sh ├── download_cyclegan_model.sh ├── edges │ ├── PostprocessHED.m │ └── batch_hed.py └── test_before_push.py ├── imgs ├── tiger2lion.jpg └── selfie2anime-danbooru.jpg ├── requirements.txt ├── util ├── __init__.py ├── image_pool.py ├── html.py ├── get_data.py ├── util.py └── visualizer.py ├── options ├── __init__.py ├── test_options.py ├── options.py ├── train_options.py └── base_options.py ├── .replit ├── .gitignore ├── data ├── single_dataset.py ├── image_folder.py ├── aligned_dataset.py ├── colorization_dataset.py ├── unaligned_dataset.py ├── template_dataset.py ├── base_dataset.py └── __init__.py ├── environment.yml ├── README.md ├── models ├── colorization_model.py ├── __init__.py ├── test_model.py ├── misc.py ├── template_model.py ├── loss.py ├── pix2pix_model.py ├── cycle_gan_model.py ├── base_model.py └── irw_gan_model.py ├── LICENSE └── main.py /scripts/install_deps.sh: -------------------------------------------------------------------------------- 1 | set -ex 2 | pip install visdom 3 | pip install dominate 4 | -------------------------------------------------------------------------------- /imgs/tiger2lion.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mid-Push/IrwGAN/HEAD/imgs/tiger2lion.jpg -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | torch>=1.4.0 2 | torchvision>=0.5.0 3 | dominate>=2.4.0 4 | visdom>=0.1.8.8 5 | -------------------------------------------------------------------------------- /util/__init__.py: -------------------------------------------------------------------------------- 1 | """This package includes a miscellaneous collection of useful helper functions.""" 2 | -------------------------------------------------------------------------------- /imgs/selfie2anime-danbooru.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mid-Push/IrwGAN/HEAD/imgs/selfie2anime-danbooru.jpg -------------------------------------------------------------------------------- /scripts/test_colorization.sh: -------------------------------------------------------------------------------- 1 | set -ex 2 | python test.py --dataroot ./datasets/colorization --name color_pix2pix --model colorization 3 | -------------------------------------------------------------------------------- /scripts/train_colorization.sh: -------------------------------------------------------------------------------- 1 | set -ex 2 | python train.py --dataroot ./datasets/colorization --name color_pix2pix --model colorization 3 | -------------------------------------------------------------------------------- /scripts/test_cyclegan.sh: -------------------------------------------------------------------------------- 1 | set -ex 2 | python test.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan --phase test --no_dropout 3 | -------------------------------------------------------------------------------- /scripts/train_cyclegan.sh: -------------------------------------------------------------------------------- 1 | set -ex 2 | python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan --pool_size 50 --no_dropout 3 | -------------------------------------------------------------------------------- /options/__init__.py: -------------------------------------------------------------------------------- 1 | """This package options includes option modules: training options, test options, and basic options (used in both training and test).""" 2 | -------------------------------------------------------------------------------- /scripts/test_pix2pix.sh: -------------------------------------------------------------------------------- 1 | set -ex 2 | python test.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --netG unet_256 --direction BtoA --dataset_mode aligned --norm batch 3 | -------------------------------------------------------------------------------- /scripts/test_single.sh: -------------------------------------------------------------------------------- 1 | set -ex 2 | python test.py --dataroot ./datasets/facades/testB/ --name facades_pix2pix --model test --netG unet_256 --direction BtoA --dataset_mode single --norm batch 3 | -------------------------------------------------------------------------------- /scripts/train_pix2pix.sh: -------------------------------------------------------------------------------- 1 | set -ex 2 | python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --netG unet_256 --direction BtoA --lambda_L1 100 --dataset_mode aligned --norm batch --pool_size 0 3 | -------------------------------------------------------------------------------- /scripts/conda_deps.sh: -------------------------------------------------------------------------------- 1 | set -ex 2 | conda install numpy pyyaml mkl mkl-include setuptools cmake cffi typing 3 | conda install pytorch torchvision -c pytorch # add cuda90 if CUDA 9 4 | conda install visdom dominate -c conda-forge # install visdom and dominate 5 | -------------------------------------------------------------------------------- /scripts/eval_cityscapes/download_fcn8s.sh: -------------------------------------------------------------------------------- 1 | URL=http://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/fcn-8s-cityscapes/fcn-8s-cityscapes.caffemodel 2 | OUTPUT_FILE=./scripts/eval_cityscapes/caffemodel/fcn-8s-cityscapes.caffemodel 3 | wget -N $URL -O $OUTPUT_FILE 4 | -------------------------------------------------------------------------------- /scripts/download_pix2pix_model.sh: -------------------------------------------------------------------------------- 1 | FILE=$1 2 | 3 | echo "Note: available models are edges2shoes, sat2map, map2sat, facades_label2photo, and day2night" 4 | echo "Specified [$FILE]" 5 | 6 | mkdir -p ./checkpoints/${FILE}_pretrained 7 | MODEL_FILE=./checkpoints/${FILE}_pretrained/latest_net_G.pth 8 | URL=http://efrosgans.eecs.berkeley.edu/pix2pix/models-pytorch/$FILE.pth 9 | 10 | wget -N $URL -O $MODEL_FILE 11 | -------------------------------------------------------------------------------- /scripts/train.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | dataset=$1 3 | config=$2 4 | for seed in 123 5 | do 6 | if [ $config = 1 ]; then 7 | python train.py --beta_mode=C --dataroot=../datasets/$dataset --lambda_irw_A=0.0 --lambda_irw_B=0.0 8 | elif [ $config = 2 ]; then 9 | python train.py --beta_mode=AB --dataroot=../datasets/$dataset --threshold=0.1 --lambda_irw_A=1.0 --lambda_irw_B=1.0 10 | elif [ $config = 3 ]; then 11 | python train.py --beta_mode=AB --dataroot=../datasets/$dataset --threshold=0.1 --lambda_irw_A=0.0 --lambda_irw_B=0.0 12 | fi 13 | done 14 | -------------------------------------------------------------------------------- /scripts/download_cyclegan_model.sh: -------------------------------------------------------------------------------- 1 | FILE=$1 2 | 3 | echo "Note: available models are apple2orange, orange2apple, summer2winter_yosemite, winter2summer_yosemite, horse2zebra, zebra2horse, monet2photo, style_monet, style_cezanne, style_ukiyoe, style_vangogh, sat2map, map2sat, cityscapes_photo2label, cityscapes_label2photo, facades_photo2label, facades_label2photo, iphone2dslr_flower" 4 | 5 | echo "Specified [$FILE]" 6 | 7 | mkdir -p ./checkpoints/${FILE}_pretrained 8 | MODEL_FILE=./checkpoints/${FILE}_pretrained/latest_net_G.pth 9 | URL=http://efrosgans.eecs.berkeley.edu/cyclegan/pretrained_models/$FILE.pth 10 | 11 | wget -N $URL -O $MODEL_FILE 12 | -------------------------------------------------------------------------------- /.replit: -------------------------------------------------------------------------------- 1 | language = "python3" 2 | run = "

[Tensorflow] (by Christopher Hesse), [Tensorflow] (by Eyyüb Sariu), [Tensorflow (face2face)] (by Dat Tran), [Tensorflow (film)] (by Arthur Juliani), [Tensorflow (zi2zi)] (by Yuchen Tian), [Chainer] (by mattya), [tf/torch/keras/lasagne] (by tjwei), [Pytorch] (by taey16)

" -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | debug* 3 | datasets/ 4 | checkpoints/ 5 | logs/ 6 | runs/ 7 | results/ 8 | build/ 9 | dist/ 10 | *.png 11 | torch.egg-info/ 12 | */**/__pycache__ 13 | torch/version.py 14 | torch/csrc/generic/TensorMethods.cpp 15 | torch/lib/*.so* 16 | torch/lib/*.dylib* 17 | torch/lib/*.h 18 | torch/lib/build 19 | torch/lib/tmp_install 20 | torch/lib/include 21 | torch/lib/torch_shm_manager 22 | torch/csrc/cudnn/cuDNN.cpp 23 | torch/csrc/nn/THNN.cwrap 24 | torch/csrc/nn/THNN.cpp 25 | torch/csrc/nn/THCUNN.cwrap 26 | torch/csrc/nn/THCUNN.cpp 27 | torch/csrc/nn/THNN_generic.cwrap 28 | torch/csrc/nn/THNN_generic.cpp 29 | torch/csrc/nn/THNN_generic.h 30 | docs/src/**/* 31 | test/data/legacy_modules.t7 32 | test/data/gpu_tensors.pt 33 | test/htmlcov 34 | test/.coverage 35 | */*.pyc 36 | */**/*.pyc 37 | */**/**/*.pyc 38 | */**/**/**/*.pyc 39 | */**/**/**/**/*.pyc 40 | */*.so* 41 | */**/*.so* 42 | */**/*.dylib* 43 | test/data/legacy_serialized.pt 44 | *~ 45 | .idea 46 | -------------------------------------------------------------------------------- /scripts/eval_cityscapes/util.py: -------------------------------------------------------------------------------- 1 | # The following code is modified from https://github.com/shelhamer/clockwork-fcn 2 | import numpy as np 3 | 4 | 5 | def get_out_scoremap(net): 6 | return net.blobs['score'].data[0].argmax(axis=0).astype(np.uint8) 7 | 8 | 9 | def feed_net(net, in_): 10 | """ 11 | Load prepared input into net. 12 | """ 13 | net.blobs['data'].reshape(1, *in_.shape) 14 | net.blobs['data'].data[...] = in_ 15 | 16 | 17 | def segrun(net, in_): 18 | feed_net(net, in_) 19 | net.forward() 20 | return get_out_scoremap(net) 21 | 22 | 23 | def fast_hist(a, b, n): 24 | k = np.where((a >= 0) & (a < n))[0] 25 | bc = np.bincount(n * a[k].astype(int) + b[k], minlength=n**2) 26 | if len(bc) != n**2: 27 | # ignore this example if dimension mismatch 28 | return 0 29 | return bc.reshape(n, n) 30 | 31 | 32 | def get_scores(hist): 33 | # Mean pixel accuracy 34 | acc = np.diag(hist).sum() / (hist.sum() + 1e-12) 35 | 36 | # Per class accuracy 37 | cl_acc = np.diag(hist) / (hist.sum(1) + 1e-12) 38 | 39 | # Per class IoU 40 | iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist) + 1e-12) 41 | 42 | return acc, np.nanmean(cl_acc), np.nanmean(iu), cl_acc, iu 43 | -------------------------------------------------------------------------------- /options/test_options.py: -------------------------------------------------------------------------------- 1 | from .base_options import BaseOptions 2 | 3 | 4 | class TestOptions(BaseOptions): 5 | """This class includes test options. 6 | 7 | It also includes shared options defined in BaseOptions. 8 | """ 9 | 10 | def initialize(self, parser): 11 | parser = BaseOptions.initialize(self, parser) # define shared options 12 | parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') 13 | parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images') 14 | parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc') 15 | # Dropout and Batchnorm has different behavioir during training and test. 16 | parser.add_argument('--eval', action='store_true', help='use eval mode during test time.') 17 | parser.add_argument('--num_test', type=int, default=50, help='how many test images to run') 18 | # rewrite devalue values 19 | parser.set_defaults(model='test') 20 | # To avoid cropping, the load_size should be the same as crop_size 21 | parser.set_defaults(load_size=parser.get_default('crop_size')) 22 | self.isTrain = False 23 | return parser 24 | -------------------------------------------------------------------------------- /data/single_dataset.py: -------------------------------------------------------------------------------- 1 | from data.base_dataset import BaseDataset, get_transform 2 | from data.image_folder import make_dataset 3 | from PIL import Image 4 | 5 | 6 | class SingleDataset(BaseDataset): 7 | """This dataset class can load a set of images specified by the path --dataroot /path/to/data. 8 | 9 | It can be used for generating CycleGAN results only for one side with the model option '-model test'. 10 | """ 11 | 12 | def __init__(self, opt): 13 | """Initialize this dataset class. 14 | 15 | Parameters: 16 | opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions 17 | """ 18 | BaseDataset.__init__(self, opt) 19 | self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size)) 20 | input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc 21 | self.transform = get_transform(opt, grayscale=(input_nc == 1)) 22 | 23 | def __getitem__(self, index): 24 | """Return a data point and its metadata information. 25 | 26 | Parameters: 27 | index - - a random integer for data indexing 28 | 29 | Returns a dictionary that contains A and A_paths 30 | A(tensor) - - an image in one domain 31 | A_paths(str) - - the path of the image 32 | """ 33 | A_path = self.A_paths[index] 34 | A_img = Image.open(A_path).convert('RGB') 35 | A = self.transform(A_img) 36 | return {'A': A, 'A_paths': A_path} 37 | 38 | def __len__(self): 39 | """Return the total number of images in the dataset.""" 40 | return len(self.A_paths) 41 | -------------------------------------------------------------------------------- /data/image_folder.py: -------------------------------------------------------------------------------- 1 | """A modified image folder class 2 | 3 | We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py) 4 | so that this class can load images from both current directory and its subdirectories. 5 | """ 6 | 7 | import torch.utils.data as data 8 | 9 | from PIL import Image 10 | import os 11 | 12 | IMG_EXTENSIONS = [ 13 | '.jpg', '.JPG', '.jpeg', '.JPEG', 14 | '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', 15 | '.tif', '.TIF', '.tiff', '.TIFF', 16 | ] 17 | 18 | 19 | def is_image_file(filename): 20 | return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) 21 | 22 | 23 | def make_dataset(dir, max_dataset_size=float("inf")): 24 | images = [] 25 | assert os.path.isdir(dir), '%s is not a valid directory' % dir 26 | 27 | for root, _, fnames in sorted(os.walk(dir)): 28 | for fname in fnames: 29 | if is_image_file(fname): 30 | path = os.path.join(root, fname) 31 | images.append(path) 32 | return images[:min(max_dataset_size, len(images))] 33 | 34 | 35 | def default_loader(path): 36 | return Image.open(path).convert('RGB') 37 | 38 | 39 | class ImageFolder(data.Dataset): 40 | 41 | def __init__(self, root, transform=None, return_paths=False, 42 | loader=default_loader): 43 | imgs = make_dataset(root) 44 | if len(imgs) == 0: 45 | raise(RuntimeError("Found 0 images in: " + root + "\n" 46 | "Supported image extensions are: " + ",".join(IMG_EXTENSIONS))) 47 | 48 | self.root = root 49 | self.imgs = imgs 50 | self.transform = transform 51 | self.return_paths = return_paths 52 | self.loader = loader 53 | 54 | def __getitem__(self, index): 55 | path = self.imgs[index] 56 | img = self.loader(path) 57 | if self.transform is not None: 58 | img = self.transform(img) 59 | if self.return_paths: 60 | return img, path 61 | else: 62 | return img 63 | 64 | def __len__(self): 65 | return len(self.imgs) 66 | -------------------------------------------------------------------------------- /scripts/edges/PostprocessHED.m: -------------------------------------------------------------------------------- 1 | %%% Prerequisites 2 | % You need to get the cpp file edgesNmsMex.cpp from https://raw.githubusercontent.com/pdollar/edges/master/private/edgesNmsMex.cpp 3 | % and compile it in Matlab: mex edgesNmsMex.cpp 4 | % You also need to download and install Piotr's Computer Vision Matlab Toolbox: https://pdollar.github.io/toolbox/ 5 | 6 | %%% parameters 7 | % hed_mat_dir: the hed mat file directory (the output of 'batch_hed.py') 8 | % edge_dir: the output HED edges directory 9 | % image_width: resize the edge map to [image_width, image_width] 10 | % threshold: threshold for image binarization (default 25.0/255.0) 11 | % small_edge: remove small edges (default 5) 12 | 13 | function [] = PostprocessHED(hed_mat_dir, edge_dir, image_width, threshold, small_edge) 14 | 15 | if ~exist(edge_dir, 'dir') 16 | mkdir(edge_dir); 17 | end 18 | fileList = dir(fullfile(hed_mat_dir, '*.mat')); 19 | nFiles = numel(fileList); 20 | fprintf('find %d mat files\n', nFiles); 21 | 22 | for n = 1 : nFiles 23 | if mod(n, 1000) == 0 24 | fprintf('process %d/%d images\n', n, nFiles); 25 | end 26 | fileName = fileList(n).name; 27 | filePath = fullfile(hed_mat_dir, fileName); 28 | jpgName = strrep(fileName, '.mat', '.jpg'); 29 | edge_path = fullfile(edge_dir, jpgName); 30 | 31 | if ~exist(edge_path, 'file') 32 | E = GetEdge(filePath); 33 | E = imresize(E,[image_width,image_width]); 34 | E_simple = SimpleEdge(E, threshold, small_edge); 35 | E_simple = uint8(E_simple*255); 36 | imwrite(E_simple, edge_path, 'Quality',100); 37 | end 38 | end 39 | end 40 | 41 | 42 | 43 | 44 | function [E] = GetEdge(filePath) 45 | load(filePath); 46 | E = 1-edge_predict; 47 | end 48 | 49 | function [E4] = SimpleEdge(E, threshold, small_edge) 50 | if nargin <= 1 51 | threshold = 25.0/255.0; 52 | end 53 | 54 | if nargin <= 2 55 | small_edge = 5; 56 | end 57 | 58 | if ndims(E) == 3 59 | E = E(:,:,1); 60 | end 61 | 62 | E1 = 1 - E; 63 | E2 = EdgeNMS(E1); 64 | E3 = double(E2>=max(eps,threshold)); 65 | E3 = bwmorph(E3,'thin',inf); 66 | E4 = bwareaopen(E3, small_edge); 67 | E4=1-E4; 68 | end 69 | 70 | function [E_nms] = EdgeNMS( E ) 71 | E=single(E); 72 | [Ox,Oy] = gradient2(convTri(E,4)); 73 | [Oxx,~] = gradient2(Ox); 74 | [Oxy,Oyy] = gradient2(Oy); 75 | O = mod(atan(Oyy.*sign(-Oxy)./(Oxx+1e-5)),pi); 76 | E_nms = edgesNmsMex(E,O,1,5,1.01,1); 77 | end 78 | -------------------------------------------------------------------------------- /util/image_pool.py: -------------------------------------------------------------------------------- 1 | import random 2 | import torch 3 | 4 | 5 | class ImagePool(): 6 | """This class implements an image buffer that stores previously generated images. 7 | 8 | This buffer enables us to update discriminators using a history of generated images 9 | rather than the ones produced by the latest generators. 10 | """ 11 | 12 | def __init__(self, pool_size): 13 | """Initialize the ImagePool class 14 | 15 | Parameters: 16 | pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created 17 | """ 18 | self.pool_size = pool_size 19 | if self.pool_size > 0: # create an empty pool 20 | self.num_imgs = 0 21 | self.images = [] 22 | 23 | def query(self, images): 24 | """Return an image from the pool. 25 | 26 | Parameters: 27 | images: the latest generated images from the generator 28 | 29 | Returns images from the buffer. 30 | 31 | By 50/100, the buffer will return input images. 32 | By 50/100, the buffer will return images previously stored in the buffer, 33 | and insert the current images to the buffer. 34 | """ 35 | if self.pool_size == 0: # if the buffer size is 0, do nothing 36 | return images 37 | return_images = [] 38 | for image in images: 39 | image = torch.unsqueeze(image.data, 0) 40 | if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer 41 | self.num_imgs = self.num_imgs + 1 42 | self.images.append(image) 43 | return_images.append(image) 44 | else: 45 | p = random.uniform(0, 1) 46 | if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer 47 | random_id = random.randint(0, self.pool_size - 1) # randint is inclusive 48 | tmp = self.images[random_id].clone() 49 | self.images[random_id] = image 50 | return_images.append(tmp) 51 | else: # by another 50% chance, the buffer will return the current image 52 | return_images.append(image) 53 | return_images = torch.cat(return_images, 0) # collect all the images and return 54 | return return_images 55 | -------------------------------------------------------------------------------- /data/aligned_dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | from data.base_dataset import BaseDataset, get_params, get_transform 3 | from data.image_folder import make_dataset 4 | from PIL import Image 5 | 6 | 7 | class AlignedDataset(BaseDataset): 8 | """A dataset class for paired image dataset. 9 | 10 | It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}. 11 | During test time, you need to prepare a directory '/path/to/data/test'. 12 | """ 13 | 14 | def __init__(self, opt): 15 | """Initialize this dataset class. 16 | 17 | Parameters: 18 | opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions 19 | """ 20 | BaseDataset.__init__(self, opt) 21 | self.dir_AB = os.path.join(opt.dataroot, opt.phase) # get the image directory 22 | self.AB_paths = sorted(make_dataset(self.dir_AB, opt.max_dataset_size)) # get image paths 23 | assert(self.opt.load_size >= self.opt.crop_size) # crop_size should be smaller than the size of loaded image 24 | self.input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc 25 | self.output_nc = self.opt.input_nc if self.opt.direction == 'BtoA' else self.opt.output_nc 26 | 27 | def __getitem__(self, index): 28 | """Return a data point and its metadata information. 29 | 30 | Parameters: 31 | index - - a random integer for data indexing 32 | 33 | Returns a dictionary that contains A, B, A_paths and B_paths 34 | A (tensor) - - an image in the input domain 35 | B (tensor) - - its corresponding image in the target domain 36 | A_paths (str) - - image paths 37 | B_paths (str) - - image paths (same as A_paths) 38 | """ 39 | # read a image given a random integer index 40 | AB_path = self.AB_paths[index] 41 | AB = Image.open(AB_path).convert('RGB') 42 | # split AB image into A and B 43 | w, h = AB.size 44 | w2 = int(w / 2) 45 | A = AB.crop((0, 0, w2, h)) 46 | B = AB.crop((w2, 0, w, h)) 47 | 48 | # apply the same transform to both A and B 49 | transform_params = get_params(self.opt, A.size) 50 | A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1)) 51 | B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1)) 52 | 53 | A = A_transform(A) 54 | B = B_transform(B) 55 | 56 | return {'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path} 57 | 58 | def __len__(self): 59 | """Return the total number of images in the dataset.""" 60 | return len(self.AB_paths) 61 | -------------------------------------------------------------------------------- /scripts/test_before_push.py: -------------------------------------------------------------------------------- 1 | # Simple script to make sure basic usage 2 | # such as training, testing, saving and loading 3 | # runs without errors. 4 | import os 5 | 6 | 7 | def run(command): 8 | print(command) 9 | exit_status = os.system(command) 10 | if exit_status > 0: 11 | exit(1) 12 | 13 | 14 | if __name__ == '__main__': 15 | # download mini datasets 16 | if not os.path.exists('./datasets/mini'): 17 | run('bash ./datasets/download_cyclegan_dataset.sh mini') 18 | 19 | if not os.path.exists('./datasets/mini_pix2pix'): 20 | run('bash ./datasets/download_cyclegan_dataset.sh mini_pix2pix') 21 | 22 | # pretrained cyclegan model 23 | if not os.path.exists('./checkpoints/horse2zebra_pretrained/latest_net_G.pth'): 24 | run('bash ./scripts/download_cyclegan_model.sh horse2zebra') 25 | run('python test.py --model test --dataroot ./datasets/mini --name horse2zebra_pretrained --no_dropout --num_test 1 --no_dropout') 26 | 27 | # pretrained pix2pix model 28 | if not os.path.exists('./checkpoints/facades_label2photo_pretrained/latest_net_G.pth'): 29 | run('bash ./scripts/download_pix2pix_model.sh facades_label2photo') 30 | if not os.path.exists('./datasets/facades'): 31 | run('bash ./datasets/download_pix2pix_dataset.sh facades') 32 | run('python test.py --dataroot ./datasets/facades/ --direction BtoA --model pix2pix --name facades_label2photo_pretrained --num_test 1') 33 | 34 | # cyclegan train/test 35 | run('python train.py --model cycle_gan --name temp_cyclegan --dataroot ./datasets/mini --n_epochs 1 --n_epochs_decay 0 --save_latest_freq 10 --print_freq 1 --display_id -1') 36 | run('python test.py --model test --name temp_cyclegan --dataroot ./datasets/mini --num_test 1 --model_suffix "_A" --no_dropout') 37 | 38 | # pix2pix train/test 39 | run('python train.py --model pix2pix --name temp_pix2pix --dataroot ./datasets/mini_pix2pix --n_epochs 1 --n_epochs_decay 5 --save_latest_freq 10 --display_id -1') 40 | run('python test.py --model pix2pix --name temp_pix2pix --dataroot ./datasets/mini_pix2pix --num_test 1') 41 | 42 | # template train/test 43 | run('python train.py --model template --name temp2 --dataroot ./datasets/mini_pix2pix --n_epochs 1 --n_epochs_decay 0 --save_latest_freq 10 --display_id -1') 44 | run('python test.py --model template --name temp2 --dataroot ./datasets/mini_pix2pix --num_test 1') 45 | 46 | # colorization train/test (optional) 47 | if not os.path.exists('./datasets/mini_colorization'): 48 | run('bash ./datasets/download_cyclegan_dataset.sh mini_colorization') 49 | 50 | run('python train.py --model colorization --name temp_color --dataroot ./datasets/mini_colorization --n_epochs 1 --n_epochs_decay 0 --save_latest_freq 5 --display_id -1') 51 | run('python test.py --model colorization --name temp_color --dataroot ./datasets/mini_colorization --num_test 1') 52 | -------------------------------------------------------------------------------- /data/colorization_dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | from data.base_dataset import BaseDataset, get_transform 3 | from data.image_folder import make_dataset 4 | from skimage import color # require skimage 5 | from PIL import Image 6 | import numpy as np 7 | import torchvision.transforms as transforms 8 | 9 | 10 | class ColorizationDataset(BaseDataset): 11 | """This dataset class can load a set of natural images in RGB, and convert RGB format into (L, ab) pairs in Lab color space. 12 | 13 | This dataset is required by pix2pix-based colorization model ('--model colorization') 14 | """ 15 | @staticmethod 16 | def modify_commandline_options(parser, is_train): 17 | """Add new dataset-specific options, and rewrite default values for existing options. 18 | 19 | Parameters: 20 | parser -- original option parser 21 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. 22 | 23 | Returns: 24 | the modified parser. 25 | 26 | By default, the number of channels for input image is 1 (L) and 27 | the number of channels for output image is 2 (ab). The direction is from A to B 28 | """ 29 | parser.set_defaults(input_nc=1, output_nc=2, direction='AtoB') 30 | return parser 31 | 32 | def __init__(self, opt): 33 | """Initialize this dataset class. 34 | 35 | Parameters: 36 | opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions 37 | """ 38 | BaseDataset.__init__(self, opt) 39 | self.dir = os.path.join(opt.dataroot, opt.phase) 40 | self.AB_paths = sorted(make_dataset(self.dir, opt.max_dataset_size)) 41 | assert(opt.input_nc == 1 and opt.output_nc == 2 and opt.direction == 'AtoB') 42 | self.transform = get_transform(self.opt, convert=False) 43 | 44 | def __getitem__(self, index): 45 | """Return a data point and its metadata information. 46 | 47 | Parameters: 48 | index - - a random integer for data indexing 49 | 50 | Returns a dictionary that contains A, B, A_paths and B_paths 51 | A (tensor) - - the L channel of an image 52 | B (tensor) - - the ab channels of the same image 53 | A_paths (str) - - image paths 54 | B_paths (str) - - image paths (same as A_paths) 55 | """ 56 | path = self.AB_paths[index] 57 | im = Image.open(path).convert('RGB') 58 | im = self.transform(im) 59 | im = np.array(im) 60 | lab = color.rgb2lab(im).astype(np.float32) 61 | lab_t = transforms.ToTensor()(lab) 62 | A = lab_t[[0], ...] / 50.0 - 1.0 63 | B = lab_t[[1, 2], ...] / 110.0 64 | return {'A': A, 'B': B, 'A_paths': path, 'B_paths': path} 65 | 66 | def __len__(self): 67 | """Return the total number of images in the dataset.""" 68 | return len(self.AB_paths) 69 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: project 2 | channels: 3 | - pytorch 4 | - defaults 5 | dependencies: 6 | - _libgcc_mutex=0.1=main 7 | - blas=1.0=mkl 8 | - ca-certificates=2020.1.1=0 9 | - certifi=2020.6.20=py36_0 10 | - cffi=1.14.0=py36he30daa8_1 11 | - cudatoolkit=10.0.130=0 12 | - freetype=2.10.2=h5ab3b9f_0 13 | - intel-openmp=2020.1=217 14 | - jpeg=9b=h024ee3a_2 15 | - ld_impl_linux-64=2.33.1=h53a641e_7 16 | - libedit=3.1.20191231=h7b6447c_0 17 | - libffi=3.3=he6710b0_1 18 | - libgcc-ng=9.1.0=hdf63c60_0 19 | - libgfortran-ng=7.3.0=hdf63c60_0 20 | - libpng=1.6.37=hbc83047_0 21 | - libstdcxx-ng=9.1.0=hdf63c60_0 22 | - libtiff=4.1.0=h2733197_1 23 | - lz4-c=1.9.2=he6710b0_0 24 | - mkl=2020.1=217 25 | - mkl-service=2.3.0=py36he904b0f_0 26 | - mkl_fft=1.1.0=py36h23d657b_0 27 | - mkl_random=1.1.1=py36h0573a6f_0 28 | - ncurses=6.2=he6710b0_1 29 | - ninja=1.9.0=py36hfd86e86_0 30 | - numpy-base=1.18.5=py36hde5b4d6_0 31 | - olefile=0.46=py36_0 32 | - openssl=1.1.1g=h7b6447c_0 33 | - pillow=7.1.2=py36hb39fc2d_0 34 | - pip=20.1.1=py36_1 35 | - pycparser=2.20=py_0 36 | - python=3.6.10=h7579374_2 37 | - pytorch=1.2.0=py3.6_cuda10.0.130_cudnn7.6.2_0 38 | - readline=8.0=h7b6447c_0 39 | - setuptools=47.3.1=py36_0 40 | - six=1.15.0=py_0 41 | - sqlite=3.32.3=h62c20be_0 42 | - tk=8.6.10=hbc83047_0 43 | - wheel=0.34.2=py36_0 44 | - xz=5.2.5=h7b6447c_0 45 | - zlib=1.2.11=h7b6447c_3 46 | - zstd=1.4.4=h0b5b093_3 47 | - pip: 48 | - absl-py==0.9.0 49 | - astor==0.8.1 50 | - chardet==3.0.4 51 | - cycler==0.10.0 52 | - decorator==4.4.2 53 | - dominate==2.5.1 54 | - future==0.18.2 55 | - gast==0.3.3 56 | - grpcio==1.30.0 57 | - h5py==2.10.0 58 | - idna==2.9 59 | - imageio==2.8.0 60 | - importlib-metadata==1.7.0 61 | - joblib==0.15.1 62 | - jsonpatch==1.26 63 | - jsonpointer==2.0 64 | - keras-applications==1.0.8 65 | - keras-preprocessing==1.1.2 66 | - kiwisolver==1.2.0 67 | - lmdb==0.98 68 | - markdown==3.2.2 69 | - matplotlib==3.2.2 70 | - mock==4.0.2 71 | - msgpack==1.0.0 72 | - networkx==2.4 73 | - numpy==1.16.4 74 | - opencv-python==4.2.0.34 75 | - pandas==1.0.3 76 | - progressbar==2.5 77 | - protobuf==3.12.2 78 | - pyarrow==0.17.1 79 | - pyparsing==2.4.7 80 | - python-dateutil==2.8.1 81 | - pytz==2020.1 82 | - pywavelets==1.1.1 83 | - pyyaml==5.3.1 84 | - pyzmq==19.0.1 85 | - requests==2.24.0 86 | - scikit-image==0.17.2 87 | - scikit-learn==0.23.1 88 | - scipy==0.19.0 89 | - sklearn==0.0 90 | - tensorboard==1.13.1 91 | - tensorboardx==2.0 92 | - tensorflow==1.13.1 93 | - tensorflow-estimator==1.13.0 94 | - termcolor==1.1.0 95 | - threadpoolctl==2.1.0 96 | - tifffile==2020.6.3 97 | - torch==1.5.1 98 | - torchfile==0.1.0 99 | - torchvision==0.6.1 100 | - tornado==6.0.4 101 | - tqdm==4.45.0 102 | - urllib3==1.25.9 103 | - visdom==0.1.8.9 104 | - websocket-client==0.57.0 105 | - werkzeug==1.0.1 106 | - zipp==3.1.0 107 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # IrwGAN (ICCV2021) 2 | ## [Unaligned Image-to-Image Translation by Learning to Reweight](https://openaccess.thecvf.com/content/ICCV2021/papers/Xie_Unaligned_Image-to-Image_Translation_by_Learning_to_Reweight_ICCV_2021_paper.pdf) 3 | 4 | ### [Update] 12/15/2021 All dataset are released, trained models and generated images of IrwGAN are released 5 | 6 | ### [Update] 11/16/2021 Code is pushed, selfie2anime-danbooru dataset released. 7 | 8 | 9 | ## Dataset 10 | 11 | **[selfie2anime-danbooru](https://drive.google.com/file/d/1jWjBygCJo5xrorIRJ8g5TprY69nnQuHY/view?usp=sharing)** 12 | | 13 | **[selfie-horse2zebra-dog](https://drive.google.com/file/d/1e6GmypJfLB-1cNm_GoiMV4tYLFZdys9P/view?usp=sharing)** 14 | | 15 | **[horse-cat2dog-anime](https://drive.google.com/file/d/1we6yjKSPYnyXpRl_si7brsNMIf0WOoOz/view?usp=sharing)** 16 | | 17 | **[beetle-tiger2lion-sealion](https://drive.google.com/file/d/1CCfJdkJcrMv6OXQeH3vwjMM4v5l7erSO/view?usp=sharing)** 18 | 19 | ### Trained Models and Generated Images 20 | 21 | - **selfie2anime-danbooru   [IrwGAN](https://drive.google.com/file/d/1y1Y153FeregoPG6U_ZomJ63yQ73-Vajt/view?usp=sharing) | [Baseline] | [CycleGAN] | 22 | [MUNIT] | [GcGAN] | [NICE-GAN]** 23 | - **selfie-horse2zebra-dog   [IrwGAN](https://drive.google.com/file/d/1SyFDdSpviXvPQka9wX-Xpj_TavfGkugj/view?usp=sharing) | [Baseline] | [CycleGAN] | 24 | [MUNIT] | [GcGAN] | [NICE-GAN]** 25 | - **horse-cat2dog-anime     [IrwGAN](https://drive.google.com/file/d/1L_n4k8BaC7yXDSMuD0NTqKz97HmfikHW/view?usp=sharing) | [Baseline] | [CycleGAN] | 26 | [MUNIT] | [GcGAN] | [NICE-GAN]** 27 | - **beetle-tiger2lion-sealion [IrwGAN](https://drive.google.com/file/d/1IkhZ2-ywJTbdjzZmMgmAWkbEShCajp5-/view?usp=sharing) | [Baseline] | [CycleGAN] | 28 | [MUNIT] | [GcGAN] | [NICE-GAN]** 29 | 30 | ### Basic Usage 31 | 32 | - Training: 33 | ```bash 34 | python main.py --dataroot=datasets/selfie2anime-danbooru 35 | ``` 36 | - Resume: 37 | ```bash 38 | python main.py --dataroot=datasets/selfie2anime-danbooru --phase=resume 39 | ``` 40 | - Test: 41 | ```bash 42 | python main.py --dataroot=datasets/selfie2anime-danbooru --phase=test 43 | ``` 44 | - Beta Mode `--beta_mode=A` if domain A is unaligned, `--beta_mode=B` if domain B is unaligned, `--beta_mode=AB` if two domains are unaligned 45 | - Effective Sample Size `lambda_nos_A` and `lambda_nos_B` are used to control how many samples are selected. The higher the weight, more samples are selected. We use `1.0` across all experiments. 46 | 47 | 48 | ### Example Results 49 | 50 | 51 | 52 | 53 | 54 | ## Citation 55 | If you use this code for your research, please cite our [paper](https://openaccess.thecvf.com/content/ICCV2021/papers/Xie_Unaligned_Image-to-Image_Translation_by_Learning_to_Reweight_ICCV_2021_paper.pdf): 56 | 57 | ``` 58 | @inproceedings{xie2021unaligned, 59 | title={Unaligned Image-to-Image Translation by Learning to Reweight}, 60 | author={Xie, Shaoan and Gong, Mingming and Xu, Yanwu and Zhang, Kun}, 61 | booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, 62 | pages={14174--14184}, 63 | year={2021} 64 | } 65 | ``` -------------------------------------------------------------------------------- /models/colorization_model.py: -------------------------------------------------------------------------------- 1 | from .pix2pix_model import Pix2PixModel 2 | import torch 3 | from skimage import color # used for lab2rgb 4 | import numpy as np 5 | 6 | 7 | class ColorizationModel(Pix2PixModel): 8 | """This is a subclass of Pix2PixModel for image colorization (black & white image -> colorful images). 9 | 10 | The model training requires '-dataset_model colorization' dataset. 11 | It trains a pix2pix model, mapping from L channel to ab channels in Lab color space. 12 | By default, the colorization dataset will automatically set '--input_nc 1' and '--output_nc 2'. 13 | """ 14 | @staticmethod 15 | def modify_commandline_options(parser, is_train=True): 16 | """Add new dataset-specific options, and rewrite default values for existing options. 17 | 18 | Parameters: 19 | parser -- original option parser 20 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. 21 | 22 | Returns: 23 | the modified parser. 24 | 25 | By default, we use 'colorization' dataset for this model. 26 | See the original pix2pix paper (https://arxiv.org/pdf/1611.07004.pdf) and colorization results (Figure 9 in the paper) 27 | """ 28 | Pix2PixModel.modify_commandline_options(parser, is_train) 29 | parser.set_defaults(dataset_mode='colorization') 30 | return parser 31 | 32 | def __init__(self, opt): 33 | """Initialize the class. 34 | 35 | Parameters: 36 | opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions 37 | 38 | For visualization, we set 'visual_names' as 'real_A' (input real image), 39 | 'real_B_rgb' (ground truth RGB image), and 'fake_B_rgb' (predicted RGB image) 40 | We convert the Lab image 'real_B' (inherited from Pix2pixModel) to a RGB image 'real_B_rgb'. 41 | we convert the Lab image 'fake_B' (inherited from Pix2pixModel) to a RGB image 'fake_B_rgb'. 42 | """ 43 | # reuse the pix2pix model 44 | Pix2PixModel.__init__(self, opt) 45 | # specify the images to be visualized. 46 | self.visual_names = ['real_A', 'real_B_rgb', 'fake_B_rgb'] 47 | 48 | def lab2rgb(self, L, AB): 49 | """Convert an Lab tensor image to a RGB numpy output 50 | Parameters: 51 | L (1-channel tensor array): L channel images (range: [-1, 1], torch tensor array) 52 | AB (2-channel tensor array): ab channel images (range: [-1, 1], torch tensor array) 53 | 54 | Returns: 55 | rgb (RGB numpy image): rgb output images (range: [0, 255], numpy array) 56 | """ 57 | AB2 = AB * 110.0 58 | L2 = (L + 1.0) * 50.0 59 | Lab = torch.cat([L2, AB2], dim=1) 60 | Lab = Lab[0].data.cpu().float().numpy() 61 | Lab = np.transpose(Lab.astype(np.float64), (1, 2, 0)) 62 | rgb = color.lab2rgb(Lab) * 255 63 | return rgb 64 | 65 | def compute_visuals(self): 66 | """Calculate additional output images for visdom and HTML visualization""" 67 | self.real_B_rgb = self.lab2rgb(self.real_A, self.real_B) 68 | self.fake_B_rgb = self.lab2rgb(self.real_A, self.fake_B) 69 | -------------------------------------------------------------------------------- /models/__init__.py: -------------------------------------------------------------------------------- 1 | """This package contains modules related to objective functions, optimizations, and network architectures. 2 | 3 | To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel. 4 | You need to implement the following five functions: 5 | -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt). 6 | -- : unpack data from dataset and apply preprocessing. 7 | -- : produce intermediate results. 8 | -- : calculate loss, gradients, and update network weights. 9 | -- : (optionally) add model-specific options and set default options. 10 | 11 | In the function <__init__>, you need to define four lists: 12 | -- self.loss_names (str list): specify the training losses that you want to plot and save. 13 | -- self.model_names (str list): define networks used in our training. 14 | -- self.visual_names (str list): specify the images that you want to display and save. 15 | -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage. 16 | 17 | Now you can use the model class by specifying flag '--model dummy'. 18 | See our template model class 'template_model.py' for more details. 19 | """ 20 | 21 | import importlib 22 | from models.base_model import BaseModel 23 | 24 | 25 | def find_model_using_name(model_name): 26 | """Import the module "models/[model_name]_model.py". 27 | 28 | In the file, the class called DatasetNameModel() will 29 | be instantiated. It has to be a subclass of BaseModel, 30 | and it is case-insensitive. 31 | """ 32 | model_filename = "models." + model_name + "_model" 33 | modellib = importlib.import_module(model_filename) 34 | model = None 35 | target_model_name = model_name.replace('_', '') + 'model' 36 | for name, cls in modellib.__dict__.items(): 37 | if name.lower() == target_model_name.lower() \ 38 | and issubclass(cls, BaseModel): 39 | model = cls 40 | 41 | if model is None: 42 | print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name)) 43 | exit(0) 44 | 45 | return model 46 | 47 | 48 | def get_option_setter(model_name): 49 | """Return the static method of the model class.""" 50 | model_class = find_model_using_name(model_name) 51 | return model_class.modify_commandline_options 52 | 53 | 54 | def create_model(opt): 55 | """Create a model given the option. 56 | 57 | This function warps the class CustomDatasetDataLoader. 58 | This is the main interface between this package and 'train.py'/'test.py' 59 | 60 | Example: 61 | >>> from models import create_model 62 | >>> model = create_model(opt) 63 | """ 64 | model = find_model_using_name(opt.model) 65 | instance = model(opt) 66 | print("model [%s] was created" % type(instance).__name__) 67 | return instance 68 | -------------------------------------------------------------------------------- /models/test_model.py: -------------------------------------------------------------------------------- 1 | from .base_model import BaseModel 2 | from . import networks 3 | 4 | 5 | class TestModel(BaseModel): 6 | """ This TesteModel can be used to generate CycleGAN results for only one direction. 7 | This model will automatically set '--dataset_mode single', which only loads the images from one collection. 8 | 9 | See the test instruction for more details. 10 | """ 11 | @staticmethod 12 | def modify_commandline_options(parser, is_train=True): 13 | """Add new dataset-specific options, and rewrite default values for existing options. 14 | 15 | Parameters: 16 | parser -- original option parser 17 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. 18 | 19 | Returns: 20 | the modified parser. 21 | 22 | The model can only be used during test time. It requires '--dataset_mode single'. 23 | You need to specify the network using the option '--model_suffix'. 24 | """ 25 | assert not is_train, 'TestModel cannot be used during training time' 26 | parser.set_defaults(dataset_mode='single') 27 | parser.add_argument('--model_suffix', type=str, default='', help='In checkpoints_dir, [epoch]_net_G[model_suffix].pth will be loaded as the generator.') 28 | 29 | return parser 30 | 31 | def __init__(self, opt): 32 | """Initialize the pix2pix class. 33 | 34 | Parameters: 35 | opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions 36 | """ 37 | assert(not opt.isTrain) 38 | BaseModel.__init__(self, opt) 39 | # specify the training losses you want to print out. The training/test scripts will call 40 | self.loss_names = [] 41 | # specify the images you want to save/display. The training/test scripts will call 42 | self.visual_names = ['real', 'fake'] 43 | # specify the models you want to save to the disk. The training/test scripts will call and 44 | self.model_names = ['G' + opt.model_suffix] # only generator is needed. 45 | self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, 46 | opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids) 47 | 48 | # assigns the model to self.netG_[suffix] so that it can be loaded 49 | # please see 50 | setattr(self, 'netG' + opt.model_suffix, self.netG) # store netG in self. 51 | 52 | def set_input(self, input): 53 | """Unpack input data from the dataloader and perform necessary pre-processing steps. 54 | 55 | Parameters: 56 | input: a dictionary that contains the data itself and its metadata information. 57 | 58 | We need to use 'single_dataset' dataset mode. It only load images from one domain. 59 | """ 60 | self.real = input['A'].to(self.device) 61 | self.image_paths = input['A_paths'] 62 | 63 | def forward(self): 64 | """Run forward pass.""" 65 | self.fake = self.netG(self.real) # G(real) 66 | 67 | def optimize_parameters(self): 68 | """No optimization for test model.""" 69 | pass 70 | -------------------------------------------------------------------------------- /scripts/eval_cityscapes/evaluate.py: -------------------------------------------------------------------------------- 1 | import os 2 | import caffe 3 | import argparse 4 | import numpy as np 5 | import scipy.misc 6 | from PIL import Image 7 | from util import segrun, fast_hist, get_scores 8 | from cityscapes import cityscapes 9 | 10 | parser = argparse.ArgumentParser() 11 | parser.add_argument("--cityscapes_dir", type=str, required=True, help="Path to the original cityscapes dataset") 12 | parser.add_argument("--result_dir", type=str, required=True, help="Path to the generated images to be evaluated") 13 | parser.add_argument("--output_dir", type=str, required=True, help="Where to save the evaluation results") 14 | parser.add_argument("--caffemodel_dir", type=str, default='./scripts/eval_cityscapes/caffemodel/', help="Where the FCN-8s caffemodel stored") 15 | parser.add_argument("--gpu_id", type=int, default=0, help="Which gpu id to use") 16 | parser.add_argument("--split", type=str, default='val', help="Data split to be evaluated") 17 | parser.add_argument("--save_output_images", type=int, default=0, help="Whether to save the FCN output images") 18 | args = parser.parse_args() 19 | 20 | 21 | def main(): 22 | if not os.path.isdir(args.output_dir): 23 | os.makedirs(args.output_dir) 24 | if args.save_output_images > 0: 25 | output_image_dir = args.output_dir + 'image_outputs/' 26 | if not os.path.isdir(output_image_dir): 27 | os.makedirs(output_image_dir) 28 | CS = cityscapes(args.cityscapes_dir) 29 | n_cl = len(CS.classes) 30 | label_frames = CS.list_label_frames(args.split) 31 | caffe.set_device(args.gpu_id) 32 | caffe.set_mode_gpu() 33 | net = caffe.Net(args.caffemodel_dir + '/deploy.prototxt', 34 | args.caffemodel_dir + 'fcn-8s-cityscapes.caffemodel', 35 | caffe.TEST) 36 | 37 | hist_perframe = np.zeros((n_cl, n_cl)) 38 | for i, idx in enumerate(label_frames): 39 | if i % 10 == 0: 40 | print('Evaluating: %d/%d' % (i, len(label_frames))) 41 | city = idx.split('_')[0] 42 | # idx is city_shot_frame 43 | label = CS.load_label(args.split, city, idx) 44 | im_file = args.result_dir + '/' + idx + '_leftImg8bit.png' 45 | im = np.array(Image.open(im_file)) 46 | im = scipy.misc.imresize(im, (label.shape[1], label.shape[2])) 47 | out = segrun(net, CS.preprocess(im)) 48 | hist_perframe += fast_hist(label.flatten(), out.flatten(), n_cl) 49 | if args.save_output_images > 0: 50 | label_im = CS.palette(label) 51 | pred_im = CS.palette(out) 52 | scipy.misc.imsave(output_image_dir + '/' + str(i) + '_pred.jpg', pred_im) 53 | scipy.misc.imsave(output_image_dir + '/' + str(i) + '_gt.jpg', label_im) 54 | scipy.misc.imsave(output_image_dir + '/' + str(i) + '_input.jpg', im) 55 | 56 | mean_pixel_acc, mean_class_acc, mean_class_iou, per_class_acc, per_class_iou = get_scores(hist_perframe) 57 | with open(args.output_dir + '/evaluation_results.txt', 'w') as f: 58 | f.write('Mean pixel accuracy: %f\n' % mean_pixel_acc) 59 | f.write('Mean class accuracy: %f\n' % mean_class_acc) 60 | f.write('Mean class IoU: %f\n' % mean_class_iou) 61 | f.write('************ Per class numbers below ************\n') 62 | for i, cl in enumerate(CS.classes): 63 | while len(cl) < 15: 64 | cl = cl + ' ' 65 | f.write('%s: acc = %f, iou = %f\n' % (cl, per_class_acc[i], per_class_iou[i])) 66 | 67 | 68 | main() 69 | -------------------------------------------------------------------------------- /util/html.py: -------------------------------------------------------------------------------- 1 | import dominate 2 | from dominate.tags import meta, h3, table, tr, td, p, a, img, br 3 | import os 4 | 5 | 6 | class HTML: 7 | """This HTML class allows us to save images and write texts into a single HTML file. 8 | 9 | It consists of functions such as (add a text header to the HTML file), 10 | (add a row of images to the HTML file), and (save the HTML to the disk). 11 | It is based on Python library 'dominate', a Python library for creating and manipulating HTML documents using a DOM API. 12 | """ 13 | 14 | def __init__(self, web_dir, title, refresh=0): 15 | """Initialize the HTML classes 16 | 17 | Parameters: 18 | web_dir (str) -- a directory that stores the webpage. HTML file will be created at /index.html; images will be saved at 0: 32 | with self.doc.head: 33 | meta(http_equiv="refresh", content=str(refresh)) 34 | 35 | def get_image_dir(self): 36 | """Return the directory that stores images""" 37 | return self.img_dir 38 | 39 | def add_header(self, text): 40 | """Insert a header to the HTML file 41 | 42 | Parameters: 43 | text (str) -- the header text 44 | """ 45 | with self.doc: 46 | h3(text) 47 | 48 | def add_images(self, ims, txts, links, width=400): 49 | """add images to the HTML file 50 | 51 | Parameters: 52 | ims (str list) -- a list of image paths 53 | txts (str list) -- a list of image names shown on the website 54 | links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page 55 | """ 56 | self.t = table(border=1, style="table-layout: fixed;") # Insert a table 57 | self.doc.add(self.t) 58 | with self.t: 59 | with tr(): 60 | for im, txt, link in zip(ims, txts, links): 61 | with td(style="word-wrap: break-word;", halign="center", valign="top"): 62 | with p(): 63 | with a(href=os.path.join('images', link)): 64 | img(style="width:%dpx" % width, src=os.path.join('images', im)) 65 | br() 66 | p(txt) 67 | 68 | def save(self): 69 | """save the current content to the HMTL file""" 70 | html_file = '%s/index.html' % self.web_dir 71 | f = open(html_file, 'wt') 72 | f.write(self.doc.render()) 73 | f.close() 74 | 75 | 76 | if __name__ == '__main__': # we show an example usage here. 77 | html = HTML('web/', 'test_html') 78 | html.add_header('hello world') 79 | 80 | ims, txts, links = [], [], [] 81 | for n in range(4): 82 | ims.append('image_%d.png' % n) 83 | txts.append('text_%d' % n) 84 | links.append('image_%d.png' % n) 85 | html.add_images(ims, txts, links) 86 | html.save() 87 | -------------------------------------------------------------------------------- /data/unaligned_dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | from data.base_dataset import BaseDataset, get_transform 3 | from data.image_folder import make_dataset 4 | from PIL import Image 5 | import random 6 | 7 | 8 | class UnalignedDataset(BaseDataset): 9 | """ 10 | This dataset class can load unaligned/unpaired datasets. 11 | 12 | It requires two directories to host training images from domain A '/path/to/data/trainA' 13 | and from domain B '/path/to/data/trainB' respectively. 14 | You can train the model with the dataset flag '--dataroot /path/to/data'. 15 | Similarly, you need to prepare two directories: 16 | '/path/to/data/testA' and '/path/to/data/testB' during test time. 17 | """ 18 | 19 | def __init__(self, opt): 20 | """Initialize this dataset class. 21 | 22 | Parameters: 23 | opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions 24 | """ 25 | BaseDataset.__init__(self, opt) 26 | self.dir_A = os.path.join(opt.dataroot, 'trainA') # create a path '/path/to/data/trainA' 27 | self.dir_B = os.path.join(opt.dataroot, 'trainB') # create a path '/path/to/data/trainB' 28 | 29 | self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA' 30 | self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB' 31 | 32 | self.A_size = len(self.A_paths) # get the size of dataset A 33 | self.B_size = len(self.B_paths) # get the size of dataset B 34 | btoA = self.opt.direction == 'BtoA' 35 | input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image 36 | output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image 37 | self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1)) 38 | self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1)) 39 | 40 | def __getitem__(self, index): 41 | """Return a data point and its metadata information. 42 | 43 | Parameters: 44 | index (int) -- a random integer for data indexing 45 | 46 | Returns a dictionary that contains A, B, A_paths and B_paths 47 | A (tensor) -- an image in the input domain 48 | B (tensor) -- its corresponding image in the target domain 49 | A_paths (str) -- image paths 50 | B_paths (str) -- image paths 51 | """ 52 | A_path = self.A_paths[index % self.A_size] # make sure index is within then range 53 | if self.opt.serial_batches: # make sure index is within then range 54 | index_B = index % self.B_size 55 | else: # randomize the index for domain B to avoid fixed pairs. 56 | index_B = random.randint(0, self.B_size - 1) 57 | B_path = self.B_paths[index_B] 58 | A_img = Image.open(A_path).convert('RGB') 59 | B_img = Image.open(B_path).convert('RGB') 60 | # apply image transformation 61 | A = self.transform_A(A_img) 62 | B = self.transform_B(B_img) 63 | 64 | return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path} 65 | 66 | def __len__(self): 67 | """Return the total number of images in the dataset. 68 | 69 | As we have two datasets with potentially different number of images, 70 | we take a maximum of 71 | """ 72 | return max(self.A_size, self.B_size) 73 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017, Jun-Yan Zhu and Taesung Park 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 15 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 18 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 20 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 21 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 22 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | 25 | 26 | --------------------------- LICENSE FOR pix2pix -------------------------------- 27 | BSD License 28 | 29 | For pix2pix software 30 | Copyright (c) 2016, Phillip Isola and Jun-Yan Zhu 31 | All rights reserved. 32 | 33 | Redistribution and use in source and binary forms, with or without 34 | modification, are permitted provided that the following conditions are met: 35 | 36 | * Redistributions of source code must retain the above copyright notice, this 37 | list of conditions and the following disclaimer. 38 | 39 | * Redistributions in binary form must reproduce the above copyright notice, 40 | this list of conditions and the following disclaimer in the documentation 41 | and/or other materials provided with the distribution. 42 | 43 | ----------------------------- LICENSE FOR DCGAN -------------------------------- 44 | BSD License 45 | 46 | For dcgan.torch software 47 | 48 | Copyright (c) 2015, Facebook, Inc. All rights reserved. 49 | 50 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 51 | 52 | Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 53 | 54 | Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 55 | 56 | Neither the name Facebook nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 57 | 58 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 59 | -------------------------------------------------------------------------------- /data/template_dataset.py: -------------------------------------------------------------------------------- 1 | """Dataset class template 2 | 3 | This module provides a template for users to implement custom datasets. 4 | You can specify '--dataset_mode template' to use this dataset. 5 | The class name should be consistent with both the filename and its dataset_mode option. 6 | The filename should be _dataset.py 7 | The class name should be Dataset.py 8 | You need to implement the following functions: 9 | -- : Add dataset-specific options and rewrite default values for existing options. 10 | -- <__init__>: Initialize this dataset class. 11 | -- <__getitem__>: Return a data point and its metadata information. 12 | -- <__len__>: Return the number of images. 13 | """ 14 | from data.base_dataset import BaseDataset, get_transform 15 | # from data.image_folder import make_dataset 16 | # from PIL import Image 17 | 18 | 19 | class TemplateDataset(BaseDataset): 20 | """A template dataset class for you to implement custom datasets.""" 21 | @staticmethod 22 | def modify_commandline_options(parser, is_train): 23 | """Add new dataset-specific options, and rewrite default values for existing options. 24 | 25 | Parameters: 26 | parser -- original option parser 27 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. 28 | 29 | Returns: 30 | the modified parser. 31 | """ 32 | parser.add_argument('--new_dataset_option', type=float, default=1.0, help='new dataset option') 33 | parser.set_defaults(max_dataset_size=10, new_dataset_option=2.0) # specify dataset-specific default values 34 | return parser 35 | 36 | def __init__(self, opt): 37 | """Initialize this dataset class. 38 | 39 | Parameters: 40 | opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions 41 | 42 | A few things can be done here. 43 | - save the options (have been done in BaseDataset) 44 | - get image paths and meta information of the dataset. 45 | - define the image transformation. 46 | """ 47 | # save the option and dataset root 48 | BaseDataset.__init__(self, opt) 49 | # get the image paths of your dataset; 50 | self.image_paths = [] # You can call sorted(make_dataset(self.root, opt.max_dataset_size)) to get all the image paths under the directory self.root 51 | # define the default transform function. You can use ; You can also define your custom transform function 52 | self.transform = get_transform(opt) 53 | 54 | def __getitem__(self, index): 55 | """Return a data point and its metadata information. 56 | 57 | Parameters: 58 | index -- a random integer for data indexing 59 | 60 | Returns: 61 | a dictionary of data with their names. It usually contains the data itself and its metadata information. 62 | 63 | Step 1: get a random image path: e.g., path = self.image_paths[index] 64 | Step 2: load your data from the disk: e.g., image = Image.open(path).convert('RGB'). 65 | Step 3: convert your data to a PyTorch tensor. You can use helpder functions such as self.transform. e.g., data = self.transform(image) 66 | Step 4: return a data point as a dictionary. 67 | """ 68 | path = 'temp' # needs to be a string 69 | data_A = None # needs to be a tensor 70 | data_B = None # needs to be a tensor 71 | return {'data_A': data_A, 'data_B': data_B, 'path': path} 72 | 73 | def __len__(self): 74 | """Return the total number of images.""" 75 | return len(self.image_paths) 76 | -------------------------------------------------------------------------------- /scripts/edges/batch_hed.py: -------------------------------------------------------------------------------- 1 | # HED batch processing script; modified from https://github.com/s9xie/hed/blob/master/examples/hed/HED-tutorial.ipynb 2 | # Step 1: download the hed repo: https://github.com/s9xie/hed 3 | # Step 2: download the models and protoxt, and put them under {caffe_root}/examples/hed/ 4 | # Step 3: put this script under {caffe_root}/examples/hed/ 5 | # Step 4: run the following script: 6 | # python batch_hed.py --images_dir=/data/to/path/photos/ --hed_mat_dir=/data/to/path/hed_mat_files/ 7 | # The code sometimes crashes after computation is done. Error looks like "Check failed: ... driver shutting down". You can just kill the job. 8 | # For large images, it will produce gpu memory issue. Therefore, you better resize the images before running this script. 9 | # Step 5: run the MATLAB post-processing script "PostprocessHED.m" 10 | 11 | 12 | import caffe 13 | import numpy as np 14 | from PIL import Image 15 | import os 16 | import argparse 17 | import sys 18 | import scipy.io as sio 19 | 20 | 21 | def parse_args(): 22 | parser = argparse.ArgumentParser(description='batch proccesing: photos->edges') 23 | parser.add_argument('--caffe_root', dest='caffe_root', help='caffe root', default='../../', type=str) 24 | parser.add_argument('--caffemodel', dest='caffemodel', help='caffemodel', default='./hed_pretrained_bsds.caffemodel', type=str) 25 | parser.add_argument('--prototxt', dest='prototxt', help='caffe prototxt file', default='./deploy.prototxt', type=str) 26 | parser.add_argument('--images_dir', dest='images_dir', help='directory to store input photos', type=str) 27 | parser.add_argument('--hed_mat_dir', dest='hed_mat_dir', help='directory to store output hed edges in mat file', type=str) 28 | parser.add_argument('--border', dest='border', help='padding border', type=int, default=128) 29 | parser.add_argument('--gpu_id', dest='gpu_id', help='gpu id', type=int, default=1) 30 | args = parser.parse_args() 31 | return args 32 | 33 | 34 | args = parse_args() 35 | for arg in vars(args): 36 | print('[%s] =' % arg, getattr(args, arg)) 37 | # Make sure that caffe is on the python path: 38 | caffe_root = args.caffe_root # this file is expected to be in {caffe_root}/examples/hed/ 39 | sys.path.insert(0, caffe_root + 'python') 40 | 41 | 42 | if not os.path.exists(args.hed_mat_dir): 43 | print('create output directory %s' % args.hed_mat_dir) 44 | os.makedirs(args.hed_mat_dir) 45 | 46 | imgList = os.listdir(args.images_dir) 47 | nImgs = len(imgList) 48 | print('#images = %d' % nImgs) 49 | 50 | caffe.set_mode_gpu() 51 | caffe.set_device(args.gpu_id) 52 | # load net 53 | net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST) 54 | # pad border 55 | border = args.border 56 | 57 | for i in range(nImgs): 58 | if i % 500 == 0: 59 | print('processing image %d/%d' % (i, nImgs)) 60 | im = Image.open(os.path.join(args.images_dir, imgList[i])) 61 | 62 | in_ = np.array(im, dtype=np.float32) 63 | in_ = np.pad(in_, ((border, border), (border, border), (0, 0)), 'reflect') 64 | 65 | in_ = in_[:, :, 0:3] 66 | in_ = in_[:, :, ::-1] 67 | in_ -= np.array((104.00698793, 116.66876762, 122.67891434)) 68 | in_ = in_.transpose((2, 0, 1)) 69 | # remove the following two lines if testing with cpu 70 | 71 | # shape for input (data blob is N x C x H x W), set data 72 | net.blobs['data'].reshape(1, *in_.shape) 73 | net.blobs['data'].data[...] = in_ 74 | # run net and take argmax for prediction 75 | net.forward() 76 | fuse = net.blobs['sigmoid-fuse'].data[0][0, :, :] 77 | # get rid of the border 78 | fuse = fuse[(border+35):(-border+35), (border+35):(-border+35)] 79 | # save hed file to the disk 80 | name, ext = os.path.splitext(imgList[i]) 81 | sio.savemat(os.path.join(args.hed_mat_dir, name + '.mat'), {'edge_predict': fuse}) 82 | -------------------------------------------------------------------------------- /util/get_data.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import os 3 | import tarfile 4 | import requests 5 | from warnings import warn 6 | from zipfile import ZipFile 7 | from bs4 import BeautifulSoup 8 | from os.path import abspath, isdir, join, basename 9 | 10 | 11 | class GetData(object): 12 | """A Python script for downloading CycleGAN or pix2pix datasets. 13 | 14 | Parameters: 15 | technique (str) -- One of: 'cyclegan' or 'pix2pix'. 16 | verbose (bool) -- If True, print additional information. 17 | 18 | Examples: 19 | >>> from util.get_data import GetData 20 | >>> gd = GetData(technique='cyclegan') 21 | >>> new_data_path = gd.get(save_path='./datasets') # options will be displayed. 22 | 23 | Alternatively, You can use bash scripts: 'scripts/download_pix2pix_model.sh' 24 | and 'scripts/download_cyclegan_model.sh'. 25 | """ 26 | 27 | def __init__(self, technique='cyclegan', verbose=True): 28 | url_dict = { 29 | 'pix2pix': 'http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/', 30 | 'cyclegan': 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets' 31 | } 32 | self.url = url_dict.get(technique.lower()) 33 | self._verbose = verbose 34 | 35 | def _print(self, text): 36 | if self._verbose: 37 | print(text) 38 | 39 | @staticmethod 40 | def _get_options(r): 41 | soup = BeautifulSoup(r.text, 'lxml') 42 | options = [h.text for h in soup.find_all('a', href=True) 43 | if h.text.endswith(('.zip', 'tar.gz'))] 44 | return options 45 | 46 | def _present_options(self): 47 | r = requests.get(self.url) 48 | options = self._get_options(r) 49 | print('Options:\n') 50 | for i, o in enumerate(options): 51 | print("{0}: {1}".format(i, o)) 52 | choice = input("\nPlease enter the number of the " 53 | "dataset above you wish to download:") 54 | return options[int(choice)] 55 | 56 | def _download_data(self, dataset_url, save_path): 57 | if not isdir(save_path): 58 | os.makedirs(save_path) 59 | 60 | base = basename(dataset_url) 61 | temp_save_path = join(save_path, base) 62 | 63 | with open(temp_save_path, "wb") as f: 64 | r = requests.get(dataset_url) 65 | f.write(r.content) 66 | 67 | if base.endswith('.tar.gz'): 68 | obj = tarfile.open(temp_save_path) 69 | elif base.endswith('.zip'): 70 | obj = ZipFile(temp_save_path, 'r') 71 | else: 72 | raise ValueError("Unknown File Type: {0}.".format(base)) 73 | 74 | self._print("Unpacking Data...") 75 | obj.extractall(save_path) 76 | obj.close() 77 | os.remove(temp_save_path) 78 | 79 | def get(self, save_path, dataset=None): 80 | """ 81 | 82 | Download a dataset. 83 | 84 | Parameters: 85 | save_path (str) -- A directory to save the data to. 86 | dataset (str) -- (optional). A specific dataset to download. 87 | Note: this must include the file extension. 88 | If None, options will be presented for you 89 | to choose from. 90 | 91 | Returns: 92 | save_path_full (str) -- the absolute path to the downloaded data. 93 | 94 | """ 95 | if dataset is None: 96 | selected_dataset = self._present_options() 97 | else: 98 | selected_dataset = dataset 99 | 100 | save_path_full = join(save_path, selected_dataset.split('.')[0]) 101 | 102 | if isdir(save_path_full): 103 | warn("\n'{0}' already exists. Voiding Download.".format( 104 | save_path_full)) 105 | else: 106 | self._print('Downloading Data...') 107 | url = "{0}/{1}".format(self.url, selected_dataset) 108 | self._download_data(url, save_path=save_path) 109 | 110 | return abspath(save_path_full) 111 | -------------------------------------------------------------------------------- /options/options.py: -------------------------------------------------------------------------------- 1 | from .base_options import BaseOptions 2 | from util.util import str2bool 3 | 4 | class Options(BaseOptions): 5 | """This class includes training options. 6 | 7 | It also includes shared options defined in BaseOptions. 8 | """ 9 | 10 | def initialize(self, parser): 11 | parser = BaseOptions.initialize(self, parser) 12 | # visdom and HTML visualization parameters 13 | parser.add_argument('--display_freq', type=int, default=1000, help='frequency of showing training results on screen') 14 | parser.add_argument('--display_size', type=int, default=16, help='frequency of showing training results on screen') 15 | parser.add_argument('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.') 16 | parser.add_argument('--display_id', type=int, default=-1, help='window id of the web display') 17 | parser.add_argument('--display_server', type=str, default="http://localhost", help='visdom server of the web display') 18 | parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")') 19 | parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display') 20 | parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html') 21 | parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console') 22 | parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/') 23 | # network saving and loading parameters 24 | parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results') 25 | parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 26 | parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration') 27 | parser.add_argument('--epoch_count', type=int, default=0, help='the starting epoch count, we save the model by , +, ...') 28 | parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') 29 | # training parameters 30 | parser.add_argument('--n_epochs', type=int, default=50, help='number of epochs with the initial learning rate') 31 | parser.add_argument('--n_epochs_decay', type=int, default=50, help='number of epochs to linearly decay learning rate to zero') 32 | parser.add_argument('--iterations_per_epoch', type=int, default=10000, help='number of iterations in an epoch') 33 | parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') 34 | parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate for adam') 35 | parser.add_argument('--weight_decay', type=float, default=0.0001, help='weight_decay for adam') 36 | parser.add_argument('--gan_type', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.') 37 | parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images') 38 | parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]') 39 | parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 40 | parser.add_argument('--drop_last', type=int, default=1, help='whether drop last batch to ensure the consistency between batchsize and samples') 41 | 42 | parser.add_argument('--beta_mode', type=str, default='AB', help='the type of beta_mode [A|B|AB|C]') 43 | parser.add_argument('--sn', type=str2bool, default='False', help='whether applying spectral norm on discriminator') 44 | parser.add_argument('--threshold', type=float, default=0.1, help='threshold on beta') 45 | parser.add_argument('--used_time', type=float, default=0, help='used time for training') 46 | 47 | self.isTrain = True 48 | return parser 49 | -------------------------------------------------------------------------------- /options/train_options.py: -------------------------------------------------------------------------------- 1 | from .base_options import BaseOptions 2 | from util.util import str2bool 3 | 4 | class TrainOptions(BaseOptions): 5 | """This class includes training options. 6 | 7 | It also includes shared options defined in BaseOptions. 8 | """ 9 | 10 | def initialize(self, parser): 11 | parser = BaseOptions.initialize(self, parser) 12 | # visdom and HTML visualization parameters 13 | parser.add_argument('--display_freq', type=int, default=1000, help='frequency of showing training results on screen') 14 | parser.add_argument('--display_size', type=int, default=16, help='frequency of showing training results on screen') 15 | parser.add_argument('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.') 16 | parser.add_argument('--display_id', type=int, default=-1, help='window id of the web display') 17 | parser.add_argument('--display_server', type=str, default="http://localhost", help='visdom server of the web display') 18 | parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")') 19 | parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display') 20 | parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html') 21 | parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console') 22 | parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/') 23 | # network saving and loading parameters 24 | parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results') 25 | parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 26 | parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration') 27 | parser.add_argument('--resume', action='store_true', default=False, help='continue training: load the latest model') 28 | parser.add_argument('--epoch_count', type=int, default=0, help='the starting epoch count, we save the model by , +, ...') 29 | parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') 30 | # training parameters 31 | parser.add_argument('--n_epochs', type=int, default=50, help='number of epochs with the initial learning rate') 32 | parser.add_argument('--n_epochs_decay', type=int, default=50, help='number of epochs to linearly decay learning rate to zero') 33 | parser.add_argument('--iterations_per_epoch', type=int, default=10000, help='number of iterations in an epoch') 34 | parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') 35 | parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate for adam') 36 | parser.add_argument('--weight_decay', type=float, default=0.0001, help='weight_decay for adam') 37 | parser.add_argument('--gan_type', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.') 38 | parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images') 39 | parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]') 40 | parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 41 | parser.add_argument('--drop_last', type=int, default=1, help='whether drop last batch to ensure the consistency between batchsize and samples') 42 | 43 | parser.add_argument('--beta_mode', type=str, default='AB', help='the type of beta_mode [A|B|AB|C]') 44 | parser.add_argument('--sn', type=str2bool, default='False', help='whether applying spectral norm on discriminator') 45 | parser.add_argument('--threshold', type=float, default=0.1, help='threshold on beta') 46 | parser.add_argument('--used_time', type=float, default=0, help='used time for training') 47 | 48 | self.isTrain = True 49 | return parser 50 | -------------------------------------------------------------------------------- /models/misc.py: -------------------------------------------------------------------------------- 1 | import os 2 | from util import util 3 | import torch 4 | import numpy as np 5 | import torch.nn.functional as F 6 | from PIL import Image, ImageDraw, ImageFont 7 | import matplotlib 8 | matplotlib.use('Agg') 9 | import matplotlib.pyplot as plt 10 | import torch_fidelity 11 | 12 | def scatter_beta(clean_betas, noise_betas, path): 13 | from matplotlib import rc 14 | rc('text', usetex=True) 15 | plt.rc('xtick', labelsize=25) 16 | plt.rc('ytick', labelsize=25) 17 | #plt.scatter(np.ones_like(clean_betas), clean_betas, c='r', label='clean') 18 | #plt.scatter(np.zeros_like(noise_betas), noise_betas, c='g', label='noise') 19 | weights = np.ones_like(clean_betas) / (len(clean_betas)+len(noise_betas)) 20 | n,bins,patches=plt.hist(clean_betas, bins=np.linspace(0,200,10), weights=weights,facecolor='r', label='aligned') 21 | print(n, bins, patches) 22 | weights = np.ones_like(noise_betas) / (len(noise_betas)+len(clean_betas)) 23 | n,bins,patches=plt.hist(noise_betas, bins=np.linspace(0,200,10),weights=weights, facecolor='g', label='unaligned') 24 | print(n, bins, patches) 25 | #plt.hist(np.concatenate([clean_betas,noise_betas],0), facecolor='g', label='unaligned') 26 | plt.xlabel('Value of %s'%(r'$\beta$'), fontsize=25) 27 | plt.ylabel('Probability', fontsize=25) 28 | plt.title('Distribution of %s'%(r'$\beta$'), fontsize=25) 29 | plt.grid(True) 30 | plt.legend(fontsize=25) 31 | plt.savefig(path, bbox_inches='tight') 32 | plt.close() 33 | 34 | 35 | def save_train_image_grid(tensor_images, betas, path, display_size=10, image_size=128): 36 | assert len(betas) == len(tensor_images) 37 | if display_size > len(tensor_images): 38 | display_size = len(tensor_images) 39 | images = [] 40 | # row-wise 41 | for image in (tensor_images): 42 | image = image.unsqueeze(0) 43 | #images.append(util.to_data( 44 | # F.interpolate(image, scale_factor=image_size / image.size(-1), recompute_scale_factor=True))) 45 | images.append(util.to_data( 46 | F.interpolate(image, scale_factor=image_size / image.size(-1)))) 47 | grid_size = [display_size, len(images)//display_size] 48 | images = np.concatenate(images, 0) 49 | img = util.convert_to_pil_image(util.create_image_grid(images, grid_size)) 50 | draw = ImageDraw.Draw(img) 51 | try: 52 | font = ImageFont.truetype('/usr/share/fonts/gnu-free/FreeSerif.ttf', 25) 53 | except: 54 | font = ImageFont.load_default() 55 | grid_w, grid_h = grid_size 56 | for idx in range(len(images)): 57 | x = (idx % grid_w) * image_size 58 | y = (idx // grid_w) * image_size 59 | name = '%4.3f' % betas[idx] 60 | color = 'rgb(255, 0, 0)' # white color 61 | draw.text((x, y), name, fill=color, font=font) 62 | # save the edited image 63 | img.save(path) 64 | 65 | def save_image_grid(tensor_image_list, path, display_size, image_size=128): 66 | images = [] 67 | # row-wise 68 | """ 69 | for i in range(display_size): 70 | for j in range(len(tensor_image_list)): 71 | image = tensor_image_list[j][i].unsqueeze(0) 72 | #if j == len(tensor_image_list)//2: 73 | # insert blank between a2b and b2a 74 | # images.append(util.to_data(torch.ones([1,3,image_size,image_size]))) 75 | images.append(util.to_data(F.interpolate(image, scale_factor=image_size/image.size(-1), recompute_scale_factor=True ) )) 76 | grid_size = [len(images)//display_size, display_size] 77 | """ 78 | num_images = 0 79 | for i in range(len(tensor_image_list)): 80 | for j in range(display_size): 81 | image = tensor_image_list[i][j] 82 | #images.append(util.to_data(F.interpolate(image, scale_factor=image_size/image.size(-1), recompute_scale_factor=True ) )) 83 | images.append(util.to_data(F.interpolate(image, scale_factor=image_size/image.size(-1)) )) 84 | num_images += 1 85 | assert len(images) == num_images 86 | grid_size = [display_size, num_images//display_size] 87 | 88 | images = np.concatenate(images, 0) 89 | util.save_image_grid(images, path, grid_size=grid_size) 90 | 91 | 92 | 93 | def make_dirs(dirs): 94 | for dir in dirs: 95 | if not os.path.exists(dir): 96 | os.makedirs(dir) 97 | 98 | 99 | def test_fid(a_loader, net_a2b, b_loader, net_b2a, run_dir, opt): 100 | net_a2b.eval() 101 | net_b2a.eval() 102 | fake_a_path = os.path.join(run_dir, 'fakeA') 103 | fake_b_path = os.path.join(run_dir, 'fakeB') 104 | real_a_path = os.path.join(opt.dataroot, 'testA') 105 | real_b_path = os.path.join(opt.dataroot, 'testB') 106 | make_dirs([fake_a_path, fake_b_path]) 107 | with torch.no_grad(): 108 | for i, item in enumerate(a_loader): 109 | data = item['A'].cuda() 110 | path = item['A_paths'][0].split('/')[-1].split('.')[0]+'.png' 111 | with torch.no_grad(): 112 | fake_b = net_a2b(data).detach() 113 | file_name = os.path.join(fake_b_path, path) 114 | util.save_image(util.tensor2im(fake_b), file_name) 115 | 116 | for i, item in enumerate(b_loader): 117 | data = item['A'].cuda() 118 | path = item['A_paths'][0].split('/')[-1].split('.')[0]+'.png' 119 | with torch.no_grad(): 120 | fake_a = net_b2a(data).detach() 121 | file_name = os.path.join(fake_a_path, path) 122 | util.save_image(util.tensor2im(fake_a), file_name) 123 | 124 | eval_args = {'fid': True, 'kid': True, 'kid_subset_size': 50, 'kid_subsets': 10, 'verbose': False, 'cuda': True} 125 | metric_dict_AB = torch_fidelity.calculate_metrics(input1=real_b_path, input2=fake_b_path, **eval_args) 126 | metric_dict_BA = torch_fidelity.calculate_metrics(input1=real_a_path, input2=fake_a_path, **eval_args) 127 | net_a2b.train() 128 | net_b2a.train() 129 | return metric_dict_AB, metric_dict_BA 130 | -------------------------------------------------------------------------------- /data/base_dataset.py: -------------------------------------------------------------------------------- 1 | """This module implements an abstract base class (ABC) 'BaseDataset' for datasets. 2 | 3 | It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses. 4 | """ 5 | import random 6 | import numpy as np 7 | import torch.utils.data as data 8 | from PIL import Image 9 | import torchvision.transforms as transforms 10 | from abc import ABC, abstractmethod 11 | 12 | 13 | class BaseDataset(data.Dataset, ABC): 14 | """This class is an abstract base class (ABC) for datasets. 15 | 16 | To create a subclass, you need to implement the following four functions: 17 | -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt). 18 | -- <__len__>: return the size of dataset. 19 | -- <__getitem__>: get a data point. 20 | -- : (optionally) add dataset-specific options and set default options. 21 | """ 22 | 23 | def __init__(self, opt): 24 | """Initialize the class; save the options in the class 25 | 26 | Parameters: 27 | opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions 28 | """ 29 | self.opt = opt 30 | self.root = opt.dataroot 31 | 32 | @staticmethod 33 | def modify_commandline_options(parser, is_train): 34 | """Add new dataset-specific options, and rewrite default values for existing options. 35 | 36 | Parameters: 37 | parser -- original option parser 38 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. 39 | 40 | Returns: 41 | the modified parser. 42 | """ 43 | return parser 44 | 45 | @abstractmethod 46 | def __len__(self): 47 | """Return the total number of images in the dataset.""" 48 | return 0 49 | 50 | @abstractmethod 51 | def __getitem__(self, index): 52 | """Return a data point and its metadata information. 53 | 54 | Parameters: 55 | index - - a random integer for data indexing 56 | 57 | Returns: 58 | a dictionary of data with their names. It ususally contains the data itself and its metadata information. 59 | """ 60 | pass 61 | 62 | 63 | def get_params(opt, size): 64 | w, h = size 65 | new_h = h 66 | new_w = w 67 | if opt.preprocess == 'resize_and_crop': 68 | new_h = new_w = opt.load_size 69 | elif opt.preprocess == 'scale_width_and_crop': 70 | new_w = opt.load_size 71 | new_h = opt.load_size * h // w 72 | 73 | x = random.randint(0, np.maximum(0, new_w - opt.crop_size)) 74 | y = random.randint(0, np.maximum(0, new_h - opt.crop_size)) 75 | 76 | flip = random.random() > 0.5 77 | 78 | return {'crop_pos': (x, y), 'flip': flip} 79 | 80 | 81 | def get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True): 82 | transform_list = [] 83 | if grayscale: 84 | transform_list.append(transforms.Grayscale(1)) 85 | if 'resize' in opt.preprocess: 86 | osize = [opt.load_size, opt.load_size] 87 | transform_list.append(transforms.Resize(osize, method)) 88 | elif 'scale_width' in opt.preprocess: 89 | transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, opt.crop_size, method))) 90 | 91 | if 'crop' in opt.preprocess: 92 | if params is None: 93 | transform_list.append(transforms.RandomCrop(opt.crop_size)) 94 | else: 95 | transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size))) 96 | 97 | if opt.preprocess == 'none': 98 | transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method))) 99 | 100 | if not opt.no_flip: 101 | if params is None: 102 | transform_list.append(transforms.RandomHorizontalFlip()) 103 | elif params['flip']: 104 | transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip']))) 105 | 106 | if convert: 107 | transform_list += [transforms.ToTensor()] 108 | if grayscale: 109 | transform_list += [transforms.Normalize((0.5,), (0.5,))] 110 | else: 111 | transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] 112 | return transforms.Compose(transform_list) 113 | 114 | 115 | def __make_power_2(img, base, method=Image.BICUBIC): 116 | ow, oh = img.size 117 | h = int(round(oh / base) * base) 118 | w = int(round(ow / base) * base) 119 | if h == oh and w == ow: 120 | return img 121 | 122 | __print_size_warning(ow, oh, w, h) 123 | return img.resize((w, h), method) 124 | 125 | 126 | def __scale_width(img, target_size, crop_size, method=Image.BICUBIC): 127 | ow, oh = img.size 128 | if ow == target_size and oh >= crop_size: 129 | return img 130 | w = target_size 131 | h = int(max(target_size * oh / ow, crop_size)) 132 | return img.resize((w, h), method) 133 | 134 | 135 | def __crop(img, pos, size): 136 | ow, oh = img.size 137 | x1, y1 = pos 138 | tw = th = size 139 | if (ow > tw or oh > th): 140 | return img.crop((x1, y1, x1 + tw, y1 + th)) 141 | return img 142 | 143 | 144 | def __flip(img, flip): 145 | if flip: 146 | return img.transpose(Image.FLIP_LEFT_RIGHT) 147 | return img 148 | 149 | 150 | def __print_size_warning(ow, oh, w, h): 151 | """Print warning information about image size(only print once)""" 152 | if not hasattr(__print_size_warning, 'has_printed'): 153 | print("The image size needs to be a multiple of 4. " 154 | "The loaded image size was (%d, %d), so it was adjusted to " 155 | "(%d, %d). This adjustment will be done to all images " 156 | "whose sizes are not multiples of 4" % (ow, oh, w, h)) 157 | __print_size_warning.has_printed = True 158 | -------------------------------------------------------------------------------- /models/template_model.py: -------------------------------------------------------------------------------- 1 | """Model class template 2 | 3 | This module provides a template for users to implement custom models. 4 | You can specify '--model template' to use this model. 5 | The class name should be consistent with both the filename and its model option. 6 | The filename should be _dataset.py 7 | The class name should be Dataset.py 8 | It implements a simple image-to-image translation baseline based on regression loss. 9 | Given input-output pairs (data_A, data_B), it learns a network netG that can minimize the following L1 loss: 10 | min_ ||netG(data_A) - data_B||_1 11 | You need to implement the following functions: 12 | : Add model-specific options and rewrite default values for existing options. 13 | <__init__>: Initialize this model class. 14 | : Unpack input data and perform data pre-processing. 15 | : Run forward pass. This will be called by both and . 16 | : Update network weights; it will be called in every training iteration. 17 | """ 18 | import torch 19 | from .base_model import BaseModel 20 | from . import networks 21 | 22 | 23 | class TemplateModel(BaseModel): 24 | @staticmethod 25 | def modify_commandline_options(parser, is_train=True): 26 | """Add new model-specific options and rewrite default values for existing options. 27 | 28 | Parameters: 29 | parser -- the option parser 30 | is_train -- if it is training phase or test phase. You can use this flag to add training-specific or test-specific options. 31 | 32 | Returns: 33 | the modified parser. 34 | """ 35 | parser.set_defaults(dataset_mode='aligned') # You can rewrite default values for this model. For example, this model usually uses aligned dataset as its dataset. 36 | if is_train: 37 | parser.add_argument('--lambda_regression', type=float, default=1.0, help='weight for the regression loss') # You can define new arguments for this model. 38 | 39 | return parser 40 | 41 | def __init__(self, opt): 42 | """Initialize this model class. 43 | 44 | Parameters: 45 | opt -- training/test options 46 | 47 | A few things can be done here. 48 | - (required) call the initialization function of BaseModel 49 | - define loss function, visualization images, model names, and optimizers 50 | """ 51 | BaseModel.__init__(self, opt) # call the initialization method of BaseModel 52 | # specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk. 53 | self.loss_names = ['loss_G'] 54 | # specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images. 55 | self.visual_names = ['data_A', 'data_B', 'output'] 56 | # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks. 57 | # you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them. 58 | self.model_names = ['G'] 59 | # define networks; you can use opt.isTrain to specify different behaviors for training and test. 60 | self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, gpu_ids=self.gpu_ids) 61 | if self.isTrain: # only defined during training time 62 | # define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss. 63 | # We also provide a GANLoss class "networks.GANLoss". self.criterionGAN = networks.GANLoss().to(self.device) 64 | self.criterionLoss = torch.nn.L1Loss() 65 | # define and initialize optimizers. You can define one optimizer for each network. 66 | # If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. 67 | self.optimizer = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) 68 | self.optimizers = [self.optimizer] 69 | 70 | # Our program will automatically call to define schedulers, load networks, and print networks 71 | 72 | def set_input(self, input): 73 | """Unpack input data from the dataloader and perform necessary pre-processing steps. 74 | 75 | Parameters: 76 | input: a dictionary that contains the data itself and its metadata information. 77 | """ 78 | AtoB = self.opt.direction == 'AtoB' # use to swap data_A and data_B 79 | self.data_A = input['A' if AtoB else 'B'].to(self.device) # get image data A 80 | self.data_B = input['B' if AtoB else 'A'].to(self.device) # get image data B 81 | self.image_paths = input['A_paths' if AtoB else 'B_paths'] # get image paths 82 | 83 | def forward(self): 84 | """Run forward pass. This will be called by both functions and .""" 85 | self.output = self.netG(self.data_A) # generate output image given the input data_A 86 | 87 | def backward(self): 88 | """Calculate losses, gradients, and update network weights; called in every training iteration""" 89 | # caculate the intermediate results if necessary; here self.output has been computed during function 90 | # calculate loss given the input and intermediate results 91 | self.loss_G = self.criterionLoss(self.output, self.data_B) * self.opt.lambda_regression 92 | self.loss_G.backward() # calculate gradients of network G w.r.t. loss_G 93 | 94 | def optimize_parameters(self): 95 | """Update network weights; it will be called in every training iteration.""" 96 | self.forward() # first call forward to calculate intermediate results 97 | self.optimizer.zero_grad() # clear network G's existing gradients 98 | self.backward() # calculate gradients for network G 99 | self.optimizer.step() # update gradients for network G 100 | -------------------------------------------------------------------------------- /scripts/eval_cityscapes/cityscapes.py: -------------------------------------------------------------------------------- 1 | # The following code is modified from https://github.com/shelhamer/clockwork-fcn 2 | import sys 3 | import os 4 | import glob 5 | import numpy as np 6 | from PIL import Image 7 | 8 | 9 | class cityscapes: 10 | def __init__(self, data_path): 11 | # data_path something like /data2/cityscapes 12 | self.dir = data_path 13 | self.classes = ['road', 'sidewalk', 'building', 'wall', 'fence', 14 | 'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain', 15 | 'sky', 'person', 'rider', 'car', 'truck', 16 | 'bus', 'train', 'motorcycle', 'bicycle'] 17 | self.mean = np.array((72.78044, 83.21195, 73.45286), dtype=np.float32) 18 | # import cityscapes label helper and set up label mappings 19 | sys.path.insert(0, '{}/scripts/helpers/'.format(self.dir)) 20 | labels = __import__('labels') 21 | self.id2trainId = {label.id: label.trainId for label in labels.labels} # dictionary mapping from raw IDs to train IDs 22 | self.trainId2color = {label.trainId: label.color for label in labels.labels} # dictionary mapping train IDs to colors as 3-tuples 23 | 24 | def get_dset(self, split): 25 | ''' 26 | List images as (city, id) for the specified split 27 | 28 | TODO(shelhamer) generate splits from cityscapes itself, instead of 29 | relying on these separately made text files. 30 | ''' 31 | if split == 'train': 32 | dataset = open('{}/ImageSets/segFine/train.txt'.format(self.dir)).read().splitlines() 33 | else: 34 | dataset = open('{}/ImageSets/segFine/val.txt'.format(self.dir)).read().splitlines() 35 | return [(item.split('/')[0], item.split('/')[1]) for item in dataset] 36 | 37 | def load_image(self, split, city, idx): 38 | im = Image.open('{}/leftImg8bit_sequence/{}/{}/{}_leftImg8bit.png'.format(self.dir, split, city, idx)) 39 | return im 40 | 41 | def assign_trainIds(self, label): 42 | """ 43 | Map the given label IDs to the train IDs appropriate for training 44 | Use the label mapping provided in labels.py from the cityscapes scripts 45 | """ 46 | label = np.array(label, dtype=np.float32) 47 | if sys.version_info[0] < 3: 48 | for k, v in self.id2trainId.iteritems(): 49 | label[label == k] = v 50 | else: 51 | for k, v in self.id2trainId.items(): 52 | label[label == k] = v 53 | return label 54 | 55 | def load_label(self, split, city, idx): 56 | """ 57 | Load label image as 1 x height x width integer array of label indices. 58 | The leading singleton dimension is required by the loss. 59 | """ 60 | label = Image.open('{}/gtFine/{}/{}/{}_gtFine_labelIds.png'.format(self.dir, split, city, idx)) 61 | label = self.assign_trainIds(label) # get proper labels for eval 62 | label = np.array(label, dtype=np.uint8) 63 | label = label[np.newaxis, ...] 64 | return label 65 | 66 | def preprocess(self, im): 67 | """ 68 | Preprocess loaded image (by load_image) for Caffe: 69 | - cast to float 70 | - switch channels RGB -> BGR 71 | - subtract mean 72 | - transpose to channel x height x width order 73 | """ 74 | in_ = np.array(im, dtype=np.float32) 75 | in_ = in_[:, :, ::-1] 76 | in_ -= self.mean 77 | in_ = in_.transpose((2, 0, 1)) 78 | return in_ 79 | 80 | def palette(self, label): 81 | ''' 82 | Map trainIds to colors as specified in labels.py 83 | ''' 84 | if label.ndim == 3: 85 | label = label[0] 86 | color = np.empty((label.shape[0], label.shape[1], 3)) 87 | if sys.version_info[0] < 3: 88 | for k, v in self.trainId2color.iteritems(): 89 | color[label == k, :] = v 90 | else: 91 | for k, v in self.trainId2color.items(): 92 | color[label == k, :] = v 93 | return color 94 | 95 | def make_boundaries(label, thickness=None): 96 | """ 97 | Input is an image label, output is a numpy array mask encoding the boundaries of the objects 98 | Extract pixels at the true boundary by dilation - erosion of label. 99 | Don't just pick the void label as it is not exclusive to the boundaries. 100 | """ 101 | assert(thickness is not None) 102 | import skimage.morphology as skm 103 | void = 255 104 | mask = np.logical_and(label > 0, label != void)[0] 105 | selem = skm.disk(thickness) 106 | boundaries = np.logical_xor(skm.dilation(mask, selem), 107 | skm.erosion(mask, selem)) 108 | return boundaries 109 | 110 | def list_label_frames(self, split): 111 | """ 112 | Select labeled frames from a split for evaluation 113 | collected as (city, shot, idx) tuples 114 | """ 115 | def file2idx(f): 116 | """Helper to convert file path into frame ID""" 117 | city, shot, frame = (os.path.basename(f).split('_')[:3]) 118 | return "_".join([city, shot, frame]) 119 | frames = [] 120 | cities = [os.path.basename(f) for f in glob.glob('{}/gtFine/{}/*'.format(self.dir, split))] 121 | for c in cities: 122 | files = sorted(glob.glob('{}/gtFine/{}/{}/*labelIds.png'.format(self.dir, split, c))) 123 | frames.extend([file2idx(f) for f in files]) 124 | return frames 125 | 126 | def collect_frame_sequence(self, split, idx, length): 127 | """ 128 | Collect sequence of frames preceding (and including) a labeled frame 129 | as a list of Images. 130 | 131 | Note: 19 preceding frames are provided for each labeled frame. 132 | """ 133 | SEQ_LEN = length 134 | city, shot, frame = idx.split('_') 135 | frame = int(frame) 136 | frame_seq = [] 137 | for i in range(frame - SEQ_LEN, frame + 1): 138 | frame_path = '{0}/leftImg8bit_sequence/val/{1}/{1}_{2}_{3:0>6d}_leftImg8bit.png'.format( 139 | self.dir, city, shot, i) 140 | frame_seq.append(Image.open(frame_path)) 141 | return frame_seq 142 | -------------------------------------------------------------------------------- /models/loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import numpy as np 5 | 6 | def get_gan_loss(gan_type): 7 | if gan_type == 'lsgan': 8 | return IRW_MS_LSGANLoss 9 | elif gan_type == 'logistic': 10 | return IRW_MS_LogisticLoss 11 | elif gan_type == 'wgangp': 12 | return IRW_MS_WGANGPLoss 13 | else: 14 | raise ValueError('gan_type: %s is not supported...' % gan_type ) 15 | 16 | class IRW_MS_WGANGPLoss: 17 | def __init__(self, dis, threshold, wgan_lambda=10, wgan_target=1.0,epsilon=1e-3): 18 | self.wgan_lambda = wgan_lambda 19 | self.wgan_target = wgan_target 20 | self.eps = epsilon 21 | self.dis = dis 22 | self.threshold = threshold 23 | 24 | def gen_loss(self,fake, beta_fake, **kwargs): 25 | dis_fake = self.dis(fake, **kwargs) 26 | loss = 0 27 | for dis_f in dis_fake: 28 | loss += (-torch.mean(beta_fake * dis_f)) 29 | return loss 30 | 31 | def dis_loss(self, real, beta_real, fake, beta_fake,**kwargs): 32 | dis_real = self.dis(real, **kwargs) 33 | dis_fake = self.dis(fake, **kwargs) 34 | beta_real = torch.nn.functional.threshold(beta_real, self.threshold, 0.0) 35 | loss = 0 36 | i = 0 37 | for dis_r, dis_f in zip(dis_real, dis_fake): 38 | loss += (dis_f - beta_real * dis_r) 39 | # gradient penalty 40 | size = len(real) 41 | eps = torch.rand(size,1,1,1).to(real.device) 42 | x_hat = eps * real.data + (1-eps) * fake.data 43 | x_hat.requires_grad = True 44 | dis_hat = self.dis(x_hat, **kwargs)[i] 45 | size = np.prod(dis_hat.size()[1:]) 46 | grad_x_hat = torch.autograd.grad(dis_hat.sum()/size, inputs=x_hat, create_graph=True)[0] 47 | grad_penalty = ((grad_x_hat.view(grad_x_hat.size(0),-1).norm(2,dim=1)-self.wgan_target)**2) 48 | grad_penalty = self.eps * grad_penalty/(self.wgan_target**2) 49 | # additional epsilon penalty by NVIDIA 50 | epsilon_penalty = self.eps * (dis_r**2) 51 | loss = torch.mean(loss + grad_penalty+epsilon_penalty) 52 | i += 1 53 | return loss 54 | 55 | class IRW_MS_LogisticLoss: 56 | def __init__(self, dis, threshold, G_saturate=False,D_gp='r1', gamma=10): 57 | self.sat = G_saturate 58 | self.gp = D_gp 59 | self.gamma = gamma 60 | self.dis = dis 61 | self.threshold = threshold 62 | 63 | def gen_loss(self, fake, beta_fake, **kwargs): 64 | dis_fake = self.dis(fake, **kwargs) 65 | loss_fake = 0 66 | for dis_f in dis_fake: 67 | if self.sat: 68 | loss_fake += beta_fake * (-F.softplus(dis_f)) 69 | else: 70 | loss_fake += beta_fake * (F.softplus(-dis_f)) 71 | return torch.mean(loss_fake) 72 | 73 | def dis_loss(self,real, beta_real, fake, beta_fake, **kwargs): 74 | real.requires_grad = True 75 | dis_fake = self.dis(fake, **kwargs) 76 | dis_real = self.dis(real, **kwargs) 77 | loss = 0 78 | beta_real = torch.nn.functional.threshold(beta_real, self.threshold, 0.0) 79 | for dis_r, dis_f in zip(dis_real, dis_fake): 80 | loss += F.softplus(dis_f) 81 | loss += beta_real * F.softplus(-dis_r) 82 | if self.gp in ['r1', 'r2']: 83 | if self.gp == 'r1': 84 | size = np.prod(dis_r.size()[1:]) 85 | grad = torch.autograd.grad(outputs=dis_r.sum()/size, inputs=real, create_graph=True)[0] 86 | elif self.gp == 'r2': 87 | size = np.prod(dis_f.size()[1:]) 88 | grad = torch.autograd.grad(outputs=dis_f.sum()/size, inputs=fake, create_graph=True)[0] 89 | grad_penalty = (grad.view(grad.size(0), -1).norm(2,dim=1)**2) 90 | loss += grad_penalty * self.gamma/2.0 91 | return torch.mean(loss) 92 | 93 | class IRW_MS_LSGANLoss: 94 | def __init__(self, dis, threshold): 95 | self.dis = dis 96 | self.threshold = threshold # if beta 47 | self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake'] 48 | # specify the images you want to save/display. The training/test scripts will call 49 | self.visual_names = ['real_A', 'fake_B', 'real_B'] 50 | # specify the models you want to save to the disk. The training/test scripts will call and 51 | if self.isTrain: 52 | self.model_names = ['G', 'D'] 53 | else: # during test time, only load G 54 | self.model_names = ['G'] 55 | # define networks (both generator and discriminator) 56 | self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm, 57 | not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids) 58 | 59 | if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc 60 | self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD, 61 | opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids) 62 | 63 | if self.isTrain: 64 | # define loss functions 65 | self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) 66 | self.criterionL1 = torch.nn.L1Loss() 67 | # initialize optimizers; schedulers will be automatically created by function . 68 | self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) 69 | self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) 70 | self.optimizers.append(self.optimizer_G) 71 | self.optimizers.append(self.optimizer_D) 72 | 73 | def set_input(self, input): 74 | """Unpack input data from the dataloader and perform necessary pre-processing steps. 75 | 76 | Parameters: 77 | input (dict): include the data itself and its metadata information. 78 | 79 | The option 'direction' can be used to swap images in domain A and domain B. 80 | """ 81 | AtoB = self.opt.direction == 'AtoB' 82 | self.real_A = input['A' if AtoB else 'B'].to(self.device) 83 | self.real_B = input['B' if AtoB else 'A'].to(self.device) 84 | self.image_paths = input['A_paths' if AtoB else 'B_paths'] 85 | 86 | def forward(self): 87 | """Run forward pass; called by both functions and .""" 88 | self.fake_B = self.netG(self.real_A) # G(A) 89 | 90 | def backward_D(self): 91 | """Calculate GAN loss for the discriminator""" 92 | # Fake; stop backprop to the generator by detaching fake_B 93 | fake_AB = torch.cat((self.real_A, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator 94 | pred_fake = self.netD(fake_AB.detach()) 95 | self.loss_D_fake = self.criterionGAN(pred_fake, False) 96 | # Real 97 | real_AB = torch.cat((self.real_A, self.real_B), 1) 98 | pred_real = self.netD(real_AB) 99 | self.loss_D_real = self.criterionGAN(pred_real, True) 100 | # combine loss and calculate gradients 101 | self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5 102 | self.loss_D.backward() 103 | 104 | def backward_G(self): 105 | """Calculate GAN and L1 loss for the generator""" 106 | # First, G(A) should fake the discriminator 107 | fake_AB = torch.cat((self.real_A, self.fake_B), 1) 108 | pred_fake = self.netD(fake_AB) 109 | self.loss_G_GAN = self.criterionGAN(pred_fake, True) 110 | # Second, G(A) = B 111 | self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1 112 | # combine loss and calculate gradients 113 | self.loss_G = self.loss_G_GAN + self.loss_G_L1 114 | self.loss_G.backward() 115 | 116 | def optimize_parameters(self): 117 | self.forward() # compute fake images: G(A) 118 | # update D 119 | self.set_requires_grad(self.netD, True) # enable backprop for D 120 | self.optimizer_D.zero_grad() # set D's gradients to zero 121 | self.backward_D() # calculate gradients for D 122 | self.optimizer_D.step() # update D's weights 123 | # update G 124 | self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G 125 | self.optimizer_G.zero_grad() # set G's gradients to zero 126 | self.backward_G() # calculate graidents for G 127 | self.optimizer_G.step() # udpate G's weights 128 | -------------------------------------------------------------------------------- /data/__init__.py: -------------------------------------------------------------------------------- 1 | """This package includes all the modules related to data loading and preprocessing 2 | 3 | To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset. 4 | You need to implement four functions: 5 | -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt). 6 | -- <__len__>: return the size of dataset. 7 | -- <__getitem__>: get a data point from data loader. 8 | -- : (optionally) add dataset-specific options and set default options. 9 | 10 | Now you can use the dataset class by specifying flag '--dataset_mode dummy'. 11 | See our template dataset class 'template_dataset.py' for more details. 12 | """ 13 | import importlib 14 | import torch.utils.data 15 | from data.base_dataset import BaseDataset 16 | import copy 17 | import os 18 | 19 | 20 | def find_dataset_using_name(dataset_name): 21 | """Import the module "data/[dataset_name]_dataset.py". 22 | 23 | In the file, the class called DatasetNameDataset() will 24 | be instantiated. It has to be a subclass of BaseDataset, 25 | and it is case-insensitive. 26 | """ 27 | dataset_filename = "data." + dataset_name + "_dataset" 28 | datasetlib = importlib.import_module(dataset_filename) 29 | 30 | dataset = None 31 | target_dataset_name = dataset_name.replace('_', '') + 'dataset' 32 | for name, cls in datasetlib.__dict__.items(): 33 | if name.lower() == target_dataset_name.lower() \ 34 | and issubclass(cls, BaseDataset): 35 | dataset = cls 36 | 37 | if dataset is None: 38 | raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name)) 39 | 40 | return dataset 41 | 42 | 43 | def get_option_setter(dataset_name): 44 | """Return the static method of the dataset class.""" 45 | dataset_class = find_dataset_using_name(dataset_name) 46 | return dataset_class.modify_commandline_options 47 | 48 | 49 | def create_dataset(opt, inf=True): 50 | """Create a dataset given the option. 51 | 52 | This function wraps the class CustomDatasetDataLoader. 53 | This is the main interface between this package and 'train.py'/'test.py' 54 | 55 | Example: 56 | >>> from data import create_dataset 57 | >>> dataset = create_dataset(opt) 58 | """ 59 | data_loader = CustomDatasetDataLoader(opt) 60 | dataset = data_loader.load_data() 61 | if inf: 62 | dataset = InfiniteDataLoader(dataset) 63 | else: 64 | dataset = dataset.dataloader 65 | return dataset 66 | 67 | class InfiniteDataLoader: 68 | def __init__(self, dataset): 69 | self.data_loader = dataset.dataloader 70 | self.dataset_iterator = iter(self.data_loader) 71 | self.wrap_dataset = dataset 72 | self.dataset = self.data_loader.dataset 73 | 74 | def next(self): 75 | try: 76 | batch = next(self.dataset_iterator) 77 | except StopIteration: 78 | # Dataset exhausted, use a new fresh iterator. 79 | self.dataset_iterator = iter(self.data_loader) 80 | batch = next(self.dataset_iterator) 81 | return batch 82 | 83 | def __len__(self): 84 | return len(self.wrap_dataset) 85 | 86 | class CustomDatasetDataLoader(): 87 | """Wrapper class of Dataset class that performs multi-threaded data loading""" 88 | 89 | def __init__(self, opt): 90 | """Initialize this class 91 | 92 | Step 1: create a dataset instance given the name [dataset_mode] 93 | Step 2: create a multi-threaded data loader. 94 | """ 95 | self.opt = opt 96 | dataset_class = find_dataset_using_name(opt.dataset_mode) 97 | self.dataset = dataset_class(opt) 98 | drop_last = opt.drop_last>0 99 | #print("dataset [%s] was created" % type(self.dataset).__name__) 100 | self.dataloader = torch.utils.data.DataLoader( 101 | self.dataset, 102 | batch_size=opt.batch_size, 103 | shuffle=not opt.serial_batches, 104 | num_workers=int(opt.num_threads), 105 | drop_last=drop_last) 106 | 107 | def load_data(self): 108 | return self 109 | 110 | def __len__(self): 111 | """Return the number of data in the dataset""" 112 | return min(len(self.dataset), self.opt.max_dataset_size) 113 | 114 | def __iter__(self): 115 | """Return a batch of data""" 116 | for i, data in enumerate(self.dataloader): 117 | if i * self.opt.batch_size >= self.opt.max_dataset_size: 118 | break 119 | yield data 120 | 121 | def get_test_loaders(train_opt): 122 | opt = copy.deepcopy(train_opt) 123 | opt.num_threads = 0 # test code only supports num_threads = 1 124 | opt.batch_size = 1 # test code only supports batch_size = 1 125 | opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed. 126 | opt.no_flip = True # no flip; comment this line if results on flipped images are needed. 127 | opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file. 128 | opt.drop_last = False 129 | opt.phase = 'test' 130 | opt.dataset_mode = 'single' 131 | opt.load_size = opt.crop_size 132 | opt_A = copy.deepcopy(opt) 133 | opt_A.dataroot = os.path.join(opt_A.dataroot, 'testA') 134 | opt_B = copy.deepcopy(opt) 135 | opt_B.dataroot = os.path.join(opt_B.dataroot, 'testB') 136 | dataloader_A = create_dataset(opt_A, inf=False) # create a dataset given opt.dataset_mode and other options 137 | dataloader_B = create_dataset(opt_B, inf=False) # create a dataset given opt.dataset_mode and other options 138 | return dataloader_A, dataloader_B 139 | 140 | 141 | def get_fix_train_loaders(train_opt): 142 | opt = copy.deepcopy(train_opt) 143 | opt.num_threads = 0 # test code only supports num_threads = 1 144 | opt.batch_size = 1 # test code only supports batch_size = 1 145 | opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed. 146 | opt.no_flip = True # no flip; comment this line if results on flipped images are needed. 147 | opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file. 148 | opt.drop_last = False 149 | opt.phase = 'test' 150 | opt.dataset_mode = 'single' 151 | opt.load_size = opt.crop_size 152 | opt_A = copy.deepcopy(opt) 153 | opt_A.dataroot = os.path.join(opt_A.dataroot, 'trainA') 154 | opt_B = copy.deepcopy(opt) 155 | opt_B.dataroot = os.path.join(opt_B.dataroot, 'trainB') 156 | dataloader_A = create_dataset(opt_A, inf=False) # create a dataset given opt.dataset_mode and other options 157 | dataloader_B = create_dataset(opt_B, inf=False) # create a dataset given opt.dataset_mode and other options 158 | return dataloader_A, dataloader_B 159 | 160 | 161 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | """General-purpose training script for image-to-image translation. 2 | 3 | This script works for various models (with option '--model': e.g., pix2pix, cyclegan, colorization) and 4 | different datasets (with option '--dataset_mode': e.g., aligned, unaligned, single, colorization). 5 | You need to specify the dataset ('--dataroot'), experiment name ('--name'), and model ('--model'). 6 | 7 | It first creates model, dataset, and visualizer given the option. 8 | It then does standard network training. During the training, it also visualize/save the images, print/save the loss plot, and save models. 9 | The script supports continue/resume training. Use '--continue_train' to resume your previous training. 10 | 11 | Example: 12 | Train a CycleGAN model: 13 | python train.py --dataroot ./datasets/maps --name maps_cyclegan --model cycle_gan 14 | Train a pix2pix model: 15 | python train.py --dataroot ./datasets/facades --name facades_pix2pix --model pix2pix --direction BtoA 16 | 17 | See options/base_options.py and options/train_options.py for more training options. 18 | See training and test tips at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/tips.md 19 | See frequently asked questions at: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/docs/qa.md 20 | """ 21 | import time 22 | from options.options import Options 23 | from data import create_dataset, get_test_loaders 24 | from models import create_model 25 | from models import misc 26 | from util.visualizer import Visualizer 27 | from util.util import format_time 28 | from util import util 29 | import tensorboardX as tensorboard 30 | import os 31 | import numpy as np 32 | import torch 33 | import copy 34 | 35 | def test(model, opt, test_loader_a, test_loader_b): 36 | # test 37 | print('[*] testing start!') 38 | dict_a, dict_b = misc.test_fid(test_loader_a, model.gen_a2b, test_loader_b, model.gen_b2a, model.run_dir, opt) 39 | print('[Evaluation] A2B: ') 40 | print(dict_a) 41 | print('[Evaluation] B2A: ') 42 | print(dict_b) 43 | print('[*] testing finished!') 44 | 45 | def training_loop(model, opt, dataset, test_loader_a, test_loader_b): 46 | 47 | #----------------------------------------------------------- 48 | # fix images for visualization 49 | fix_a = torch.stack([test_loader_a.dataset[i]['A'] for i in range(opt.display_size)]).cuda() # fixed test data 50 | fix_b = torch.stack([test_loader_b.dataset[i]['A'] for i in range(opt.display_size)]).cuda() 51 | fix_train_a = torch.stack([dataset.dataset[i]['A'] for i in range(opt.batch_size)]).cuda() # fixed with different runs 52 | fix_train_b = torch.stack([dataset.dataset[i]['B'] for i in range(opt.batch_size)]).cuda() # fixed with same run 53 | #----------------------------------------------------------- 54 | visualizer = Visualizer(opt) # create a visualizer that display/save images and plots 55 | train_writer = tensorboard.SummaryWriter(os.path.join(model.run_dir, 'log')) # setup train writer 56 | #----------------------------------------------------------- 57 | cur_iters = opt.epoch_count * opt.iterations_per_epoch 58 | used_time = opt.used_time 59 | start_time = time.time() - used_time 60 | total_iters = (opt.n_epochs + opt.n_epochs_decay) * opt.iterations_per_epoch 61 | print('[*] training start!\n') 62 | for epoch in range(opt.epoch_count+1, opt.n_epochs + opt.n_epochs_decay + 1): # outer loop for different epochs; we save the model by , + 63 | visualizer.reset() # reset the visualizer: make sure it saves the results to HTML at least once every epoch 64 | 65 | for _ in range(opt.iterations_per_epoch//opt.batch_size): # inner loop within one epoch 66 | data = dataset.next() 67 | cur_iters += opt.batch_size 68 | 69 | model.set_input(data) # unpack data from dataset and apply preprocessing 70 | input_images, fake_images, betas = model.optimize_parameters() # calculate loss functions, get gradients, update network weights 71 | 72 | if cur_iters % opt.print_freq == 0: # print training losses and save logging information to the disk 73 | used_time = (time.time() - start_time) 74 | util.write_loss(cur_iters, model, train_writer, prefix='training') 75 | visualizer.print_current_losses(cur_iters, total_iters, used_time, model.get_current_losses()) 76 | 77 | if cur_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file 78 | 79 | model.compute_visuals() 80 | visualizer.display_current_results(model.get_current_visuals(), epoch, save_result=True) 81 | # evaluation 82 | test_images = model.test(fix_a, fix_b) 83 | test_path = os.path.join(model.run_dir, 'img', 'fix-test_image-%03d.jpg' % epoch) 84 | misc.save_image_grid(test_images, test_path, opt.display_size) 85 | train_path = os.path.join(model.run_dir, 'img', 'fix-train_image-%03d.jpg' % epoch) 86 | fix_images, fix_betas = model.get_betas(fix_train_a, fix_train_b) 87 | misc.save_train_image_grid(fix_images, fix_betas, train_path) 88 | train_path = os.path.join(model.run_dir, 'img', 'train_image-%03d.jpg' % epoch) 89 | misc.save_train_image_grid(input_images, betas, train_path) 90 | train_path = os.path.join(model.run_dir, 'img', 'train_image_fake-%03d.jpg' % epoch) 91 | misc.save_train_image_grid(fake_images, betas, train_path) 92 | 93 | model.update_learning_rate() # update learning rates in the beginning of every epoch. 94 | if epoch % opt.save_epoch_freq == 0: # cache our model every epochs 95 | model.save_networks('latest', used_time) 96 | model.save_networks(epoch, used_time) 97 | 98 | print('[*] training finished!') 99 | 100 | 101 | if __name__ == '__main__': 102 | opt = Options().parse() # get training options 103 | #----------------------------------------------------------- 104 | dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options 105 | opt.trainA_size = dataset.dataset.A_size # get the number of images in the dataset. 106 | opt.trainB_size = dataset.dataset.B_size # get the number of images in the dataset. 107 | test_loader_a, test_loader_b = get_test_loaders(opt) # get test loader by hard-coding options 108 | opt.testA_size = len(test_loader_a.dataset) # get the number of images in the dataset. 109 | opt.testB_size = len(test_loader_b.dataset) # get the number of images in the dataset. 110 | #----------------------------------------------------------- 111 | model = create_model(opt) # create a model given opt.model and other options 112 | model.setup(opt) # regular setup: load and print networks; create schedulers; update opt.epoch_count 113 | #----------------------------------------------------------- 114 | if opt.phase == 'train' or opt.phase == 'resume': 115 | training_loop(model, opt, dataset, test_loader_a, test_loader_b) 116 | elif opt.phase == 'test': 117 | test(model, opt, test_loader_a, test_loader_b) 118 | else: 119 | raise ValueError('opt.phase %s is not recognizable' % opt.phase) 120 | 121 | 122 | -------------------------------------------------------------------------------- /options/base_options.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | from util import util 4 | import torch 5 | import models 6 | import data 7 | 8 | 9 | class BaseOptions(): 10 | """This class defines options used during both training and test time. 11 | 12 | It also implements several helper functions such as parsing, printing, and saving the options. 13 | It also gathers additional options defined in functions in both dataset class and model class. 14 | """ 15 | 16 | def __init__(self): 17 | """Reset the class; indicates the class hasn't been initailized""" 18 | self.initialized = False 19 | self.important_args = [] 20 | self.print_args = [] 21 | 22 | def initialize(self, parser): 23 | """Define the common options that are used in both training and test.""" 24 | # basic parameters 25 | parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)') 26 | parser.add_argument('--name', type=str, default=None, help='name of the experiment. It decides where to store samples and models') 27 | parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 28 | parser.add_argument('--result_dir', type=str, default='./results', help='models are saved here') 29 | # model parameters 30 | parser.add_argument('--model', type=str, default='irw_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]') 31 | parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale') 32 | parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale') 33 | parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer') 34 | parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer') 35 | parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers') 36 | parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.') 37 | parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator') 38 | # dataset parameters 39 | parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]') 40 | parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA') 41 | parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly') 42 | parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data') 43 | parser.add_argument('--load_size', type=int, default=286, help='scale images to this size') 44 | parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size') 45 | parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') 46 | parser.add_argument('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]') 47 | parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation') 48 | parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML') 49 | # additional parameters 50 | parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information') 51 | parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}') 52 | parser.add_argument('--seed', type=int, default=None, help='random seed') 53 | # important parameters 54 | parser.add_argument('--normG', type=str, default='in', help='instance normalization or batch normalization [instance | batch | none]') 55 | parser.add_argument('--normD', type=str, default='none', help='instance normalization or batch normalization [instance | batch | none]') 56 | parser.add_argument('--initG', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]') 57 | parser.add_argument('--initD', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]') 58 | parser.add_argument('--batch_size', type=int, default=20, help='input batch size') 59 | parser.add_argument('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]') 60 | parser.add_argument('--netD', type=str, default='gl', help='specify discriminator architecture [basic | n_layers | pixel | local | global|gl|ms]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator') 61 | self.initialized = True 62 | return parser 63 | 64 | def gather_options(self): 65 | """Initialize our parser with basic options(only once). 66 | Add additional model-specific and dataset-specific options. 67 | These options are defined in the function 68 | in model and dataset classes. 69 | """ 70 | if not self.initialized: # check if it has been initialized 71 | parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) 72 | parser = self.initialize(parser) 73 | 74 | # get the basic options 75 | opt, _ = parser.parse_known_args() 76 | # modify model-related parser options 77 | model_name = opt.model 78 | model_option_setter = models.get_option_setter(model_name) 79 | parser = model_option_setter(parser, self.isTrain) 80 | opt, _ = parser.parse_known_args() # parse again with new defaults 81 | 82 | # modify dataset-related parser options 83 | dataset_name = opt.dataset_mode 84 | dataset_option_setter = data.get_option_setter(dataset_name) 85 | parser = dataset_option_setter(parser, self.isTrain) 86 | 87 | # save and return the parser 88 | self.parser = parser 89 | opt = parser.parse_args() 90 | return opt 91 | 92 | def print_options(self, opt): 93 | """Print and save options 94 | It will print both current options and default values(if different). 95 | It will save options into a text file / [checkpoints_dir] / opt.txt 96 | """ 97 | message = '' 98 | message += '----------------- Options ---------------\n' 99 | for k, v in sorted(vars(opt).items()): 100 | comment = '' 101 | default = self.parser.get_default(k) 102 | if v != default: 103 | comment = '\t[default: %s]' % str(default) 104 | message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) 105 | message += '----------------- End -------------------' 106 | print(message) 107 | 108 | def parse(self): 109 | """Parse our options, create checkpoints directory suffix, and set up gpu device.""" 110 | opt = self.gather_options() 111 | opt.isTrain = self.isTrain # train or test 112 | # process opt.suffix 113 | if opt.suffix: 114 | suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else '' 115 | opt.name = opt.name + suffix 116 | 117 | #self.print_options(opt) 118 | 119 | # set gpu ids 120 | str_ids = opt.gpu_ids.split(',') 121 | opt.gpu_ids = [] 122 | for str_id in str_ids: 123 | id = int(str_id) 124 | if id >= 0: 125 | opt.gpu_ids.append(id) 126 | if len(opt.gpu_ids) > 0: 127 | torch.cuda.set_device(opt.gpu_ids[0]) 128 | 129 | self.opt = opt 130 | return self.opt 131 | -------------------------------------------------------------------------------- /util/util.py: -------------------------------------------------------------------------------- 1 | """This module contains simple helper functions """ 2 | from __future__ import print_function 3 | import torch 4 | import numpy as np 5 | from PIL import Image 6 | import os 7 | from typing import Any, List, Tuple, Union 8 | import sys 9 | import random 10 | 11 | def set_seed(seed=None): 12 | if seed is not None: 13 | torch.manual_seed(seed) 14 | np.random.seed(seed) 15 | torch.backends.cudnn.deterministic = True 16 | random.seed(seed) 17 | torch.cuda.manual_seed_all(seed) 18 | torch.backends.cudnn.benchmark = True 19 | 20 | class Logger(object): 21 | """Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file.""" 22 | 23 | def __init__(self, file_name: str = None, file_mode: str = "a", should_flush: bool = True, append=False): 24 | self.file = None 25 | 26 | if append: 27 | file_mode = 'a' 28 | else: 29 | file_mode = 'w' 30 | self.file = open(file_name, file_mode) 31 | 32 | self.should_flush = should_flush 33 | self.stdout = sys.stdout 34 | self.stderr = sys.stderr 35 | 36 | sys.stdout = self 37 | sys.stderr = self 38 | 39 | def __enter__(self) -> "Logger": 40 | return self 41 | 42 | def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: 43 | self.close() 44 | 45 | def write(self, text: str) -> None: 46 | """Write text to stdout (and a file) and optionally flush.""" 47 | if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash 48 | return 49 | 50 | if self.file is not None: 51 | self.file.write(text) 52 | 53 | self.stdout.write(text) 54 | 55 | if self.should_flush: 56 | self.flush() 57 | 58 | def flush(self) -> None: 59 | """Flush written text to both stdout and a file, if open.""" 60 | if self.file is not None: 61 | self.file.flush() 62 | 63 | self.stdout.flush() 64 | 65 | def close(self) -> None: 66 | """Flush, close possible files, and remove stdout/stderr mirroring.""" 67 | self.flush() 68 | 69 | # if using multiple loggers, prevent closing in wrong order 70 | if sys.stdout is self: 71 | sys.stdout = self.stdout 72 | if sys.stderr is self: 73 | sys.stderr = self.stderr 74 | 75 | if self.file is not None: 76 | self.file.close() 77 | 78 | def write_loss(iterations, trainer, train_writer, prefix): 79 | members = [attr for attr in dir(trainer) \ 80 | if not callable(getattr(trainer, attr)) and not attr.startswith("__") and ( 81 | 'loss' in attr or 'grad' in attr or 'nwd' in attr ) and 'name' not in attr and 'pool' not in attr] 82 | for m in members: 83 | train_writer.add_scalar(prefix+'/'+m, getattr(trainer, m), iterations + 1) 84 | 85 | 86 | def format_time(seconds: Union[int, float]) -> str: 87 | """Convert the seconds to human readable string with days, hours, minutes and seconds.""" 88 | s = int(np.rint(seconds)) 89 | 90 | if s < 60: 91 | return "{0}s".format(s) 92 | elif s < 60 * 60: 93 | return "{0}m {1:02}s".format(s // 60, s % 60) 94 | elif s < 24 * 60 * 60: 95 | return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60) 96 | else: 97 | return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60) 98 | 99 | 100 | def tensor2im(input_image, imtype=np.uint8): 101 | """"Converts a Tensor array into a numpy image array. 102 | 103 | Parameters: 104 | input_image (tensor) -- the input image tensor array 105 | imtype (type) -- the desired type of the converted numpy array 106 | """ 107 | if not isinstance(input_image, np.ndarray): 108 | if isinstance(input_image, torch.Tensor): # get the data from a variable 109 | image_tensor = input_image.data 110 | else: 111 | return input_image 112 | image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array 113 | if image_numpy.shape[0] == 1: # grayscale to RGB 114 | image_numpy = np.tile(image_numpy, (3, 1, 1)) 115 | image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling 116 | else: # if it is a numpy array, do nothing 117 | image_numpy = input_image 118 | return image_numpy.astype(imtype) 119 | 120 | def str2bool(x): 121 | return x.lower() in ('true') 122 | 123 | def diagnose_network(net, name='network'): 124 | """Calculate and print the mean of average absolute(gradients) 125 | 126 | Parameters: 127 | net (torch network) -- Torch network 128 | name (str) -- the name of the network 129 | """ 130 | mean = 0.0 131 | count = 0 132 | for param in net.parameters(): 133 | if param.grad is not None: 134 | mean += torch.mean(torch.abs(param.grad.data)) 135 | count += 1 136 | if count > 0: 137 | mean = mean / count 138 | print(name) 139 | print(mean) 140 | 141 | 142 | def save_image(image_numpy, image_path, aspect_ratio=1.0): 143 | """Save a numpy image to the disk 144 | 145 | Parameters: 146 | image_numpy (numpy array) -- input numpy array 147 | image_path (str) -- the path of the image 148 | """ 149 | 150 | image_pil = Image.fromarray(image_numpy) 151 | h, w, _ = image_numpy.shape 152 | 153 | if aspect_ratio > 1.0: 154 | image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC) 155 | if aspect_ratio < 1.0: 156 | image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC) 157 | image_pil.save(image_path) 158 | 159 | 160 | def print_numpy(x, val=True, shp=False): 161 | """Print the mean, min, max, median, std, and size of a numpy array 162 | 163 | Parameters: 164 | val (bool) -- if print the values of the numpy array 165 | shp (bool) -- if print the shape of the numpy array 166 | """ 167 | x = x.astype(np.float64) 168 | if shp: 169 | print('shape,', x.shape) 170 | if val: 171 | x = x.flatten() 172 | print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % ( 173 | np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x))) 174 | 175 | 176 | def get_model_list(dirname, key, exclude='latest'): 177 | if os.path.exists(dirname) is False: 178 | return None 179 | gen_models = [os.path.join(dirname, f) for f in os.listdir(dirname) if 180 | os.path.isfile(os.path.join(dirname, f)) and key in f and ".pt" in f and exclude not in f] 181 | if gen_models is None: 182 | return None 183 | gen_models.sort() 184 | last_model_name = gen_models[-1] 185 | return last_model_name 186 | 187 | 188 | def mkdirs(paths): 189 | """create empty directories if they don't exist 190 | 191 | Parameters: 192 | paths (str list) -- a list of directory paths 193 | """ 194 | if isinstance(paths, list) and not isinstance(paths, str): 195 | for path in paths: 196 | mkdir(path) 197 | else: 198 | mkdir(paths) 199 | 200 | 201 | def mkdir(path): 202 | """create a single empty directory if it didn't exist 203 | 204 | Parameters: 205 | path (str) -- a single directory path 206 | """ 207 | if not os.path.exists(path): 208 | os.makedirs(path) 209 | 210 | def adjust_dynamic_range(data, drange_in, drange_out): 211 | if drange_in != drange_out: 212 | scale = (np.float32(drange_out[1]) - np.float32(drange_out[0])) / (np.float32(drange_in[1]) - np.float32(drange_in[0])) 213 | bias = (np.float32(drange_out[0]) - np.float32(drange_in[0]) * scale) 214 | data = data * scale + bias 215 | return data 216 | 217 | def create_image_grid(images, grid_size=None): 218 | assert images.ndim == 3 or images.ndim == 4 219 | num, img_w, img_h = images.shape[0], images.shape[-1], images.shape[-2] 220 | 221 | if grid_size is not None: 222 | grid_w, grid_h = tuple(grid_size) 223 | else: 224 | grid_w = max(int(np.ceil(np.sqrt(num))), 1) 225 | grid_h = max((num - 1) // grid_w + 1, 1) 226 | 227 | grid = np.zeros(list(images.shape[1:-2]) + [grid_h * img_h, grid_w * img_w], dtype=images.dtype) 228 | for idx in range(num): 229 | x = (idx % grid_w) * img_w 230 | y = (idx // grid_w) * img_h 231 | grid[..., y : y + img_h, x : x + img_w] = images[idx] 232 | return grid 233 | 234 | def convert_to_pil_image(image, drange=[-1,1]): 235 | assert image.ndim == 2 or image.ndim == 3 236 | if image.ndim == 3: 237 | if image.shape[0] == 1: 238 | image = image[0] # grayscale CHW => HW 239 | elif image.shape[1]>image.shape[0]: 240 | image = image.transpose(1, 2, 0) # CHW -> HWC 241 | 242 | image = adjust_dynamic_range(image, drange, [0,255]) 243 | image = np.rint(image).clip(0, 255).astype(np.uint8) 244 | fmt = 'RGB' if image.ndim == 3 else 'L' 245 | return Image.fromarray(image, fmt) 246 | 247 | def save_images(image, filename, drange=[-1,1], quality=95): 248 | img = convert_to_pil_image(image, drange) 249 | if '.jpg' in filename: 250 | img.save(filename, "JPEG", quality=quality, optimize=True) 251 | else: 252 | img.save(filename) 253 | def to_var( x): 254 | """Converts numpy to variable.""" 255 | if torch.cuda.is_available(): 256 | x = x.cuda() 257 | return torch.autograd.Variable(x) 258 | 259 | def to_data(x): 260 | """Converts variable to numpy.""" 261 | if torch.cuda.is_available(): 262 | x = x.cpu() 263 | return x.data.numpy() 264 | def save_image_grid(images, filename, drange=[-1,1], grid_size=None): 265 | convert_to_pil_image(create_image_grid(images, grid_size), drange).save(filename) -------------------------------------------------------------------------------- /models/cycle_gan_model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import itertools 3 | from util.image_pool import ImagePool 4 | from .base_model import BaseModel 5 | from . import networks 6 | 7 | 8 | class CycleGANModel(BaseModel): 9 | """ 10 | This class implements the CycleGAN model, for learning image-to-image translation without paired data. 11 | 12 | The model training requires '--dataset_mode unaligned' dataset. 13 | By default, it uses a '--netG resnet_9blocks' ResNet generator, 14 | a '--netD basic' discriminator (PatchGAN introduced by pix2pix), 15 | and a least-square GANs objective ('--gan_mode lsgan'). 16 | 17 | CycleGAN paper: https://arxiv.org/pdf/1703.10593.pdf 18 | """ 19 | @staticmethod 20 | def modify_commandline_options(parser, is_train=True): 21 | """Add new dataset-specific options, and rewrite default values for existing options. 22 | 23 | Parameters: 24 | parser -- original option parser 25 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. 26 | 27 | Returns: 28 | the modified parser. 29 | 30 | For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses. 31 | A (source domain), B (target domain). 32 | Generators: G_A: A -> B; G_B: B -> A. 33 | Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A. 34 | Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper) 35 | Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper) 36 | Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 "Photo generation from paintings" in the paper) 37 | Dropout is not used in the original CycleGAN paper. 38 | """ 39 | parser.set_defaults(no_dropout=True) # default CycleGAN did not use dropout 40 | if is_train: 41 | parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)') 42 | parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)') 43 | parser.add_argument('--lambda_identity', type=float, default=0.5, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1') 44 | 45 | return parser 46 | 47 | def __init__(self, opt): 48 | """Initialize the CycleGAN class. 49 | 50 | Parameters: 51 | opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions 52 | """ 53 | BaseModel.__init__(self, opt) 54 | # specify the training losses you want to print out. The training/test scripts will call 55 | self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B'] 56 | # specify the images you want to save/display. The training/test scripts will call 57 | visual_names_A = ['real_A', 'fake_B', 'rec_A'] 58 | visual_names_B = ['real_B', 'fake_A', 'rec_B'] 59 | if self.isTrain and self.opt.lambda_identity > 0.0: # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B) 60 | visual_names_A.append('idt_B') 61 | visual_names_B.append('idt_A') 62 | 63 | self.visual_names = visual_names_A + visual_names_B # combine visualizations for A and B 64 | # specify the models you want to save to the disk. The training/test scripts will call and . 65 | if self.isTrain: 66 | self.model_names = ['G_A', 'G_B', 'D_A', 'D_B'] 67 | else: # during test time, only load Gs 68 | self.model_names = ['G_A', 'G_B'] 69 | 70 | # define networks (both Generators and discriminators) 71 | # The naming is different from those used in the paper. 72 | # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X) 73 | self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm, 74 | not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids) 75 | self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm, 76 | not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids) 77 | 78 | if self.isTrain: # define discriminators 79 | self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD, 80 | opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids) 81 | self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD, 82 | opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids) 83 | 84 | if self.isTrain: 85 | if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels 86 | assert(opt.input_nc == opt.output_nc) 87 | self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images 88 | self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images 89 | # define loss functions 90 | self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) # define GAN loss. 91 | self.criterionCycle = torch.nn.L1Loss() 92 | self.criterionIdt = torch.nn.L1Loss() 93 | # initialize optimizers; schedulers will be automatically created by function . 94 | self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999)) 95 | self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999)) 96 | self.optimizers.append(self.optimizer_G) 97 | self.optimizers.append(self.optimizer_D) 98 | 99 | def set_input(self, input): 100 | """Unpack input data from the dataloader and perform necessary pre-processing steps. 101 | 102 | Parameters: 103 | input (dict): include the data itself and its metadata information. 104 | 105 | The option 'direction' can be used to swap domain A and domain B. 106 | """ 107 | AtoB = self.opt.direction == 'AtoB' 108 | self.real_A = input['A' if AtoB else 'B'].to(self.device) 109 | self.real_B = input['B' if AtoB else 'A'].to(self.device) 110 | self.image_paths = input['A_paths' if AtoB else 'B_paths'] 111 | 112 | def forward(self): 113 | """Run forward pass; called by both functions and .""" 114 | self.fake_B = self.netG_A(self.real_A) # G_A(A) 115 | self.rec_A = self.netG_B(self.fake_B) # G_B(G_A(A)) 116 | self.fake_A = self.netG_B(self.real_B) # G_B(B) 117 | self.rec_B = self.netG_A(self.fake_A) # G_A(G_B(B)) 118 | 119 | def backward_D_basic(self, netD, real, fake): 120 | """Calculate GAN loss for the discriminator 121 | 122 | Parameters: 123 | netD (network) -- the discriminator D 124 | real (tensor array) -- real images 125 | fake (tensor array) -- images generated by a generator 126 | 127 | Return the discriminator loss. 128 | We also call loss_D.backward() to calculate the gradients. 129 | """ 130 | # Real 131 | pred_real = netD(real) 132 | loss_D_real = self.criterionGAN(pred_real, True) 133 | # Fake 134 | pred_fake = netD(fake.detach()) 135 | loss_D_fake = self.criterionGAN(pred_fake, False) 136 | # Combined loss and calculate gradients 137 | loss_D = (loss_D_real + loss_D_fake) * 0.5 138 | loss_D.backward() 139 | return loss_D 140 | 141 | def backward_D_A(self): 142 | """Calculate GAN loss for discriminator D_A""" 143 | fake_B = self.fake_B_pool.query(self.fake_B) 144 | self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B) 145 | 146 | def backward_D_B(self): 147 | """Calculate GAN loss for discriminator D_B""" 148 | fake_A = self.fake_A_pool.query(self.fake_A) 149 | self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A) 150 | 151 | def backward_G(self): 152 | """Calculate the loss for generators G_A and G_B""" 153 | lambda_idt = self.opt.lambda_identity 154 | lambda_A = self.opt.lambda_A 155 | lambda_B = self.opt.lambda_B 156 | # Identity loss 157 | if lambda_idt > 0: 158 | # G_A should be identity if real_B is fed: ||G_A(B) - B|| 159 | self.idt_A = self.netG_A(self.real_B) 160 | self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt 161 | # G_B should be identity if real_A is fed: ||G_B(A) - A|| 162 | self.idt_B = self.netG_B(self.real_A) 163 | self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt 164 | else: 165 | self.loss_idt_A = 0 166 | self.loss_idt_B = 0 167 | 168 | # GAN loss D_A(G_A(A)) 169 | self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True) 170 | # GAN loss D_B(G_B(B)) 171 | self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True) 172 | # Forward cycle loss || G_B(G_A(A)) - A|| 173 | self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A 174 | # Backward cycle loss || G_A(G_B(B)) - B|| 175 | self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B 176 | # combined loss and calculate gradients 177 | self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B 178 | self.loss_G.backward() 179 | 180 | def optimize_parameters(self): 181 | """Calculate losses, gradients, and update network weights; called in every training iteration""" 182 | # forward 183 | self.forward() # compute fake images and reconstruction images. 184 | # G_A and G_B 185 | self.set_requires_grad([self.netD_A, self.netD_B], False) # Ds require no gradients when optimizing Gs 186 | self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero 187 | self.backward_G() # calculate gradients for G_A and G_B 188 | self.optimizer_G.step() # update G_A and G_B's weights 189 | # D_A and D_B 190 | self.set_requires_grad([self.netD_A, self.netD_B], True) 191 | self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero 192 | self.backward_D_A() # calculate gradients for D_A 193 | self.backward_D_B() # calculate graidents for D_B 194 | self.optimizer_D.step() # update D_A and D_B's weights 195 | -------------------------------------------------------------------------------- /util/visualizer.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import sys 4 | import ntpath 5 | import time 6 | from . import util, html 7 | from subprocess import Popen, PIPE 8 | 9 | if sys.version_info[0] == 2: 10 | VisdomExceptionBase = Exception 11 | else: 12 | VisdomExceptionBase = ConnectionError 13 | 14 | 15 | def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256): 16 | """Save images to the disk. 17 | 18 | Parameters: 19 | webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details) 20 | visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs 21 | image_path (str) -- the string is used to create image paths 22 | aspect_ratio (float) -- the aspect ratio of saved images 23 | width (int) -- the images will be resized to width x width 24 | 25 | This function will save images stored in 'visuals' to the HTML file specified by 'webpage'. 26 | """ 27 | image_dir = webpage.get_image_dir() 28 | short_path = ntpath.basename(image_path[0]) 29 | name = os.path.splitext(short_path)[0] 30 | 31 | webpage.add_header(name) 32 | ims, txts, links = [], [], [] 33 | 34 | for label, im_data in visuals.items(): 35 | im = util.tensor2im(im_data) 36 | image_name = '%s_%s.png' % (name, label) 37 | save_path = os.path.join(image_dir, image_name) 38 | util.save_image(im, save_path, aspect_ratio=aspect_ratio) 39 | ims.append(image_name) 40 | txts.append(label) 41 | links.append(image_name) 42 | webpage.add_images(ims, txts, links, width=width) 43 | 44 | 45 | class Visualizer(): 46 | """This class includes several functions that can display/save images and print/save logging information. 47 | 48 | It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images. 49 | """ 50 | 51 | def __init__(self, opt): 52 | """Initialize the Visualizer class 53 | 54 | Parameters: 55 | opt -- stores all the experiment flags; needs to be a subclass of BaseOptions 56 | Step 1: Cache the training/test options 57 | Step 2: connect to a visdom server 58 | Step 3: create an HTML object for saveing HTML filters 59 | Step 4: create a logging file to store training losses 60 | """ 61 | self.opt = opt # cache the option 62 | self.display_id = opt.display_id 63 | self.use_html = opt.isTrain and not opt.no_html 64 | self.win_size = opt.display_winsize 65 | self.name = opt.name 66 | self.port = opt.display_port 67 | self.saved = False 68 | if self.display_id > 0: # connect to a visdom server given and 69 | import visdom 70 | self.ncols = opt.display_ncols 71 | self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env) 72 | if not self.vis.check_connection(): 73 | self.create_visdom_connections() 74 | 75 | if self.use_html: # create an HTML object at /web/; images will be saved under /web/images/ 76 | self.web_dir = os.path.join(opt.run_dir, 'web') 77 | self.img_dir = os.path.join(self.web_dir, 'images') 78 | #print('create web directory %s...' % self.web_dir) 79 | util.mkdirs([self.web_dir, self.img_dir]) 80 | # create a logging file to store training losses 81 | #with open(self.log_name, "a") as log_file: 82 | # now = time.strftime("%c") 83 | # log_file.write('================ Training Loss (%s) ================\n' % now) 84 | self.betas = [[]] 85 | 86 | def reset(self): 87 | """Reset the self.saved status""" 88 | self.saved = False 89 | 90 | def create_visdom_connections(self): 91 | """If the program could not connect to Visdom server, this function will start a new server at port < self.port > """ 92 | cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port 93 | print('\n\nCould not connect to Visdom server. \n Trying to start a server....') 94 | print('Command: %s' % cmd) 95 | Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) 96 | 97 | def display_current_results(self, visuals, epoch, save_result, betas=None): 98 | """Display current results on visdom; save current results to an HTML file. 99 | 100 | Parameters: 101 | visuals (OrderedDict) - - dictionary of images to display or save 102 | epoch (int) - - the current epoch 103 | save_result (bool) - - if save the current results to an HTML file 104 | """ 105 | if betas is not None: 106 | if len(self.betas) <= epoch: 107 | self.betas.append(betas) 108 | else: 109 | self.betas[epoch] = betas 110 | 111 | if self.display_id > 0: # show images in the browser using visdom 112 | ncols = self.ncols 113 | if ncols > 0: # show all the images in one visdom panel 114 | ncols = min(ncols, len(visuals)) 115 | h, w = next(iter(visuals.values())).shape[:2] 116 | table_css = """""" % (w, h) # create a table css 120 | # create a table of images. 121 | title = self.name 122 | label_html = '' 123 | label_html_row = '' 124 | images = [] 125 | idx = 0 126 | for label, image in visuals.items(): 127 | image_numpy = util.tensor2im(image) 128 | label_html_row += '%s' % label 129 | images.append(image_numpy.transpose([2, 0, 1])) 130 | idx += 1 131 | if idx % ncols == 0: 132 | label_html += '%s' % label_html_row 133 | label_html_row = '' 134 | white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255 135 | while idx % ncols != 0: 136 | images.append(white_image) 137 | label_html_row += '' 138 | idx += 1 139 | if label_html_row != '': 140 | label_html += '%s' % label_html_row 141 | try: 142 | self.vis.images(images, nrow=ncols, win=self.display_id + 1, 143 | padding=2, opts=dict(title=title + ' images')) 144 | label_html = '%s
' % label_html 145 | self.vis.text(table_css + label_html, win=self.display_id + 2, 146 | opts=dict(title=title + ' labels')) 147 | except VisdomExceptionBase: 148 | self.create_visdom_connections() 149 | 150 | else: # show each image in a separate visdom panel; 151 | idx = 1 152 | try: 153 | for label, image in visuals.items(): 154 | image_numpy = util.tensor2im(image) 155 | self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label), 156 | win=self.display_id + idx) 157 | idx += 1 158 | except VisdomExceptionBase: 159 | self.create_visdom_connections() 160 | 161 | if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved. 162 | self.saved = True 163 | # save images to the disk 164 | i = -1 165 | for label, image in visuals.items(): 166 | i += 1 167 | image_numpy = util.tensor2im(image) 168 | img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label)) 169 | util.save_image(image_numpy, img_path) 170 | 171 | # update website 172 | webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=200) 173 | for n in range(epoch, 0, -1): 174 | webpage.add_header('epoch [%d]' % n) 175 | ims, txts, links = [], [], [] 176 | if betas is not None: 177 | betas_epoch = self.betas[n] 178 | i = -1 179 | for label, image_numpy in visuals.items(): 180 | i += 1 181 | image_numpy = util.tensor2im(image) 182 | img_path = 'epoch%.3d_%s.png' % (n, label) 183 | ims.append(img_path) 184 | if betas is not None and betas_epoch[i] != '': 185 | txts.append(label+' '+betas_epoch[i]) 186 | else: 187 | txts.append(label) 188 | links.append(img_path) 189 | webpage.add_images(ims, txts, links, width=self.win_size) 190 | webpage.save() 191 | 192 | def plot_current_losses(self, epoch, counter_ratio, losses): 193 | """display the current losses on visdom display: dictionary of error labels and values 194 | 195 | Parameters: 196 | epoch (int) -- current epoch 197 | counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1 198 | losses (OrderedDict) -- training losses stored in the format of (name, float) pairs 199 | """ 200 | if not hasattr(self, 'plot_data'): 201 | self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())} 202 | self.plot_data['X'].append(epoch + counter_ratio) 203 | self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']]) 204 | try: 205 | self.vis.line( 206 | X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1), 207 | Y=np.array(self.plot_data['Y']), 208 | opts={ 209 | 'title': self.name + ' loss over time', 210 | 'legend': self.plot_data['legend'], 211 | 'xlabel': 'epoch', 212 | 'ylabel': 'loss'}, 213 | win=self.display_id) 214 | except VisdomExceptionBase: 215 | self.create_visdom_connections() 216 | 217 | # losses: same format as |losses| of plot_current_losses 218 | def print_current_losses(self, cur_iters, total_iters, t_comp, losses): 219 | """print current losses on console; also save the losses to the disk 220 | 221 | Parameters: 222 | epoch (int) -- current epoch 223 | iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch) 224 | losses (OrderedDict) -- training losses stored in the format of (name, float) pairs 225 | t_comp (float) -- computational time per data point (normalized by batch_size) 226 | t_data (float) -- data loading time per data point (normalized by batch_size) 227 | """ 228 | message = '(cur_iters: [%d/%07d], time: %-s) ' % (cur_iters, total_iters, util.format_time(t_comp)) 229 | for k, v in losses.items(): 230 | message += '%s: %.2f ' % (k, v) 231 | print(message) # print the message 232 | #with open(self.log_name, "a") as log_file: 233 | # log_file.write('%s\n' % message) # save the message 234 | -------------------------------------------------------------------------------- /models/base_model.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | from collections import OrderedDict 4 | from abc import ABC, abstractmethod 5 | from . import networks 6 | import util.util as util 7 | 8 | class BaseModel(ABC): 9 | """This class is an abstract base class (ABC) for models. 10 | To create a subclass, you need to implement the following five functions: 11 | -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt). 12 | -- : unpack data from dataset and apply preprocessing. 13 | -- : produce intermediate results. 14 | -- : calculate losses, gradients, and update network weights. 15 | -- : (optionally) add model-specific options and set default options. 16 | """ 17 | 18 | def __init__(self, opt): 19 | """Initialize the BaseModel class. 20 | 21 | Parameters: 22 | opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions 23 | 24 | When creating your custom class, you need to implement your own initialization. 25 | In this function, you should first call 26 | Then, you need to define four lists: 27 | -- self.loss_names (str list): specify the training losses that you want to plot and save. 28 | -- self.model_names (str list): define networks used in our training. 29 | -- self.visual_names (str list): specify the images that you want to display and save. 30 | -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. 31 | """ 32 | self.opt = opt 33 | self.gpu_ids = opt.gpu_ids 34 | self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU 35 | if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark. 36 | torch.backends.cudnn.benchmark = True 37 | self.loss_names = [] 38 | self.model_names = [] 39 | self.visual_names = [] 40 | self.opt_names = [] 41 | self.optimizers = [] 42 | self.image_paths = [] 43 | self.metric = 0 # used for learning rate policy 'plateau' 44 | 45 | @staticmethod 46 | def modify_commandline_options(parser, is_train): 47 | """Add new model-specific options, and rewrite default values for existing options. 48 | 49 | Parameters: 50 | parser -- original option parser 51 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. 52 | 53 | Returns: 54 | the modified parser. 55 | """ 56 | return parser 57 | 58 | @abstractmethod 59 | def set_input(self, input): 60 | """Unpack input data from the dataloader and perform necessary pre-processing steps. 61 | 62 | Parameters: 63 | input (dict): includes the data itself and its metadata information. 64 | """ 65 | pass 66 | 67 | @abstractmethod 68 | def forward(self): 69 | """Run forward pass; called by both functions and .""" 70 | pass 71 | 72 | @abstractmethod 73 | def optimize_parameters(self): 74 | """Calculate losses, gradients, and update network weights; called in every training iteration""" 75 | pass 76 | @property 77 | def model_name(self): 78 | return '' 79 | @abstractmethod 80 | def print_information(self, opt): 81 | pass 82 | def setup(self, opt): 83 | opt.run_dir = os.path.join(opt.result_dir, self.model_name) 84 | self.run_dir = opt.run_dir 85 | util.mkdirs(self.run_dir) 86 | util.mkdirs(os.path.join(self.run_dir, 'fakeB')) 87 | util.mkdirs(os.path.join(self.run_dir, 'fakeA')) 88 | util.mkdirs(os.path.join(self.run_dir, 'img')) 89 | util.mkdirs(os.path.join(self.run_dir, 'model')) 90 | util.mkdirs(os.path.join(self.run_dir, 'log')) 91 | eval_log = os.path.join(self.run_dir, 'metric-fid.txt') 92 | if opt.phase == 'train': 93 | f = open(eval_log, 'w') 94 | f.writelines('\n###################################\n') 95 | f.writelines('########### training ##############\n') 96 | f.writelines('###################################\n') 97 | f.close() 98 | if opt.phase == 'test': 99 | f = open(eval_log, 'a') 100 | f.writelines('\n###################################\n') 101 | f.writelines('############## test ###############\n') 102 | f.writelines('###################################\n') 103 | f.close() 104 | 105 | if opt.phase == 'train' or opt.phase == 'resume': 106 | log_name = os.path.join(self.run_dir, 'log', 'training_log.txt') 107 | self.logger = util.Logger(log_name, append=(opt.phase == 'resume')) 108 | 109 | self.print_information(opt) 110 | self.setup_networks(opt) 111 | 112 | def setup_networks(self, opt): 113 | """Load and print networks; create schedulers 114 | 115 | Parameters: 116 | opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions 117 | """ 118 | if opt.phase == 'train': 119 | # we already initialized the networks while constructing them 120 | pass 121 | 122 | if opt.phase == 'test': 123 | # load the latest model for testing 124 | if opt.epoch_count>0: 125 | print('[*] resuming from %d' %opt.epoch_count) 126 | load_path = os.path.join(self.run_dir, 'model', 'network-snapshot-%03d.pth' % opt.epoch_count) 127 | #load_path = os.path.join('results/selfie2anime-danbooru_lsgan_20_B_gl_1.0_thr0.1', 'model', 'network-snapshot-latest.pth') 128 | self.load_networks(load_path) 129 | 130 | if opt.phase == 'resume': 131 | # automatically load the model if resume flag is True 132 | latest_model_name = util.get_model_list(os.path.join(self.run_dir, 'model'), key='network', exclude='latest') 133 | self.load_networks(latest_model_name) 134 | opt.epoch_count = int(os.path.basename(latest_model_name).split('.')[0].split('-')[-1]) # setup the epoch_count to start with 135 | 136 | # put opt later as we load_networks will load optimizer as well 137 | if opt.phase == 'train' or opt.phase == 'resume': 138 | self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers] 139 | # we do not have to save lr_scheduler as it will be reflected in opt.epoch_count 140 | self.print_networks(opt.verbose) 141 | 142 | def eval(self): 143 | """Make models eval mode during test time""" 144 | for name in self.model_names: 145 | if isinstance(name, str): 146 | net = getattr(self, 'net' + name) 147 | net.eval() 148 | 149 | def test(self): 150 | """Forward function used in test time. 151 | 152 | This function wraps function in no_grad() so we don't save intermediate steps for backprop 153 | It also calls to produce additional visualization results 154 | """ 155 | with torch.no_grad(): 156 | self.forward() 157 | self.compute_visuals() 158 | 159 | def compute_visuals(self): 160 | """Calculate additional output images for visdom and HTML visualization""" 161 | pass 162 | 163 | def get_image_paths(self): 164 | """ Return image paths that are used to load current data""" 165 | return self.image_paths 166 | 167 | def update_learning_rate(self): 168 | """Update learning rates for all the networks; called at the end of every epoch""" 169 | old_lr = self.optimizers[0].param_groups[0]['lr'] 170 | for scheduler in self.schedulers: 171 | if self.opt.lr_policy == 'plateau': 172 | scheduler.step(self.metric) 173 | else: 174 | scheduler.step() 175 | 176 | lr = self.optimizers[0].param_groups[0]['lr'] 177 | print('learning rate %.7f -> %.7f' % (old_lr, lr)) 178 | 179 | def get_current_visuals(self): 180 | """Return visualization images. train.py will display these images with visdom, and save the images to a HTML""" 181 | visual_ret = OrderedDict() 182 | for name in self.visual_names: 183 | if isinstance(name, str): 184 | visual_ret[name] = getattr(self, name) 185 | return visual_ret 186 | 187 | def get_current_losses(self): 188 | """Return traning losses / errors. train.py will print out these errors on console, and save them to a file""" 189 | errors_ret = OrderedDict() 190 | for name in self.loss_names: 191 | if isinstance(name, str): 192 | errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number 193 | return errors_ret 194 | 195 | def save_networks(self, epoch, used_time): 196 | """Save all the networks to the disk. 197 | 198 | Parameters: 199 | epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) 200 | """ 201 | 202 | model_dict = {} 203 | if isinstance(epoch, str): 204 | save_filename = 'network-snapshot-%s.pth' % (epoch) 205 | else: 206 | save_filename = 'network-snapshot-%03d.pth' % (epoch) 207 | save_path = os.path.join(self.run_dir, 'model', save_filename) 208 | for name in self.model_names: 209 | if isinstance(name, str): 210 | net = getattr(self, name) 211 | if len(self.gpu_ids) > 0 and torch.cuda.is_available(): 212 | model_dict[name] = net.module.cpu().state_dict() 213 | net.cuda(self.gpu_ids[0]) 214 | else: 215 | model_dict[name] = net.cpu().state_dict() 216 | net.cuda(self.gpu_ids[0]) 217 | 218 | # save optimizers 219 | for name in self.opt_names: 220 | if isinstance(name, str): 221 | opt = getattr(self, name) 222 | model_dict[name] = opt.state_dict() 223 | model_dict['used_time'] = used_time 224 | torch.save(model_dict, save_path) 225 | 226 | def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0): 227 | """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)""" 228 | key = keys[i] 229 | if i + 1 == len(keys): # at the end, pointing to a parameter/buffer 230 | if module.__class__.__name__.startswith('InstanceNorm') and \ 231 | (key == 'running_mean' or key == 'running_var'): 232 | if getattr(module, key) is None: 233 | state_dict.pop('.'.join(keys)) 234 | if module.__class__.__name__.startswith('InstanceNorm') and \ 235 | (key == 'num_batches_tracked'): 236 | state_dict.pop('.'.join(keys)) 237 | else: 238 | self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1) 239 | 240 | def load_networks(self, latest_model_name): 241 | """Load all the networks from the disk. 242 | 243 | Parameters: 244 | epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) 245 | """ 246 | load_path = latest_model_name 247 | model = torch.load(load_path, map_location=str(self.device)) 248 | print('Loading from %s' % load_path) 249 | print('Loading models...') 250 | for name in self.model_names: 251 | if isinstance(name, str): 252 | net = getattr(self, name) 253 | if isinstance(net, torch.nn.DataParallel): 254 | net = net.module 255 | net.load_state_dict(model[name]) 256 | # load optimizers 257 | if len(self.opt_names) > 0: 258 | print('Loading optimizers...') 259 | for name in self.opt_names: 260 | if isinstance(name, str): 261 | opt = getattr(self, name) 262 | opt.load_state_dict(model[name]) 263 | self.opt.used_time = model['used_time'] 264 | 265 | def print_networks(self, verbose): 266 | """Print the total number of parameters in the network and (if verbose) network architecture 267 | 268 | Parameters: 269 | verbose (bool) -- if verbose: print the network architecture 270 | """ 271 | if self.opt.phase == 'train': 272 | print('---------- Networks initialized -------------') 273 | else: 274 | print('-------------- Networks loaded ----------------') 275 | for name in self.model_names: 276 | if isinstance(name, str): 277 | net = getattr(self, name) 278 | num_params = 0 279 | for param in net.parameters(): 280 | num_params += param.numel() 281 | if verbose: 282 | print(net) 283 | print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6)) 284 | print('-----------------------------------------------') 285 | 286 | 287 | def set_requires_grad(self, nets, requires_grad=False): 288 | """Set requies_grad=Fasle for all the networks to avoid unnecessary computations 289 | Parameters: 290 | nets (network list) -- a list of networks 291 | requires_grad (bool) -- whether the networks require gradients or not 292 | """ 293 | if not isinstance(nets, list): 294 | nets = [nets] 295 | for net in nets: 296 | if net is not None: 297 | for param in net.parameters(): 298 | param.requires_grad = requires_grad 299 | -------------------------------------------------------------------------------- /scripts/eval_cityscapes/caffemodel/deploy.prototxt: -------------------------------------------------------------------------------- 1 | layer { 2 | name: "data" 3 | type: "Input" 4 | top: "data" 5 | input_param { 6 | shape { 7 | dim: 1 8 | dim: 3 9 | dim: 500 10 | dim: 500 11 | } 12 | } 13 | } 14 | layer { 15 | name: "conv1_1" 16 | type: "Convolution" 17 | bottom: "data" 18 | top: "conv1_1" 19 | param { 20 | lr_mult: 1 21 | decay_mult: 1 22 | } 23 | param { 24 | lr_mult: 2 25 | decay_mult: 0 26 | } 27 | convolution_param { 28 | num_output: 64 29 | pad: 100 30 | kernel_size: 3 31 | stride: 1 32 | weight_filler { 33 | type: "gaussian" 34 | std: 0.01 35 | } 36 | bias_filler { 37 | type: "constant" 38 | value: 0 39 | } 40 | } 41 | } 42 | layer { 43 | name: "relu1_1" 44 | type: "ReLU" 45 | bottom: "conv1_1" 46 | top: "conv1_1" 47 | } 48 | layer { 49 | name: "conv1_2" 50 | type: "Convolution" 51 | bottom: "conv1_1" 52 | top: "conv1_2" 53 | param { 54 | lr_mult: 1 55 | decay_mult: 1 56 | } 57 | param { 58 | lr_mult: 2 59 | decay_mult: 0 60 | } 61 | convolution_param { 62 | num_output: 64 63 | pad: 1 64 | kernel_size: 3 65 | stride: 1 66 | weight_filler { 67 | type: "gaussian" 68 | std: 0.01 69 | } 70 | bias_filler { 71 | type: "constant" 72 | value: 0 73 | } 74 | } 75 | } 76 | layer { 77 | name: "relu1_2" 78 | type: "ReLU" 79 | bottom: "conv1_2" 80 | top: "conv1_2" 81 | } 82 | layer { 83 | name: "pool1" 84 | type: "Pooling" 85 | bottom: "conv1_2" 86 | top: "pool1" 87 | pooling_param { 88 | pool: MAX 89 | kernel_size: 2 90 | stride: 2 91 | } 92 | } 93 | layer { 94 | name: "conv2_1" 95 | type: "Convolution" 96 | bottom: "pool1" 97 | top: "conv2_1" 98 | param { 99 | lr_mult: 1 100 | decay_mult: 1 101 | } 102 | param { 103 | lr_mult: 2 104 | decay_mult: 0 105 | } 106 | convolution_param { 107 | num_output: 128 108 | pad: 1 109 | kernel_size: 3 110 | stride: 1 111 | weight_filler { 112 | type: "gaussian" 113 | std: 0.01 114 | } 115 | bias_filler { 116 | type: "constant" 117 | value: 0 118 | } 119 | } 120 | } 121 | layer { 122 | name: "relu2_1" 123 | type: "ReLU" 124 | bottom: "conv2_1" 125 | top: "conv2_1" 126 | } 127 | layer { 128 | name: "conv2_2" 129 | type: "Convolution" 130 | bottom: "conv2_1" 131 | top: "conv2_2" 132 | param { 133 | lr_mult: 1 134 | decay_mult: 1 135 | } 136 | param { 137 | lr_mult: 2 138 | decay_mult: 0 139 | } 140 | convolution_param { 141 | num_output: 128 142 | pad: 1 143 | kernel_size: 3 144 | stride: 1 145 | weight_filler { 146 | type: "gaussian" 147 | std: 0.01 148 | } 149 | bias_filler { 150 | type: "constant" 151 | value: 0 152 | } 153 | } 154 | } 155 | layer { 156 | name: "relu2_2" 157 | type: "ReLU" 158 | bottom: "conv2_2" 159 | top: "conv2_2" 160 | } 161 | layer { 162 | name: "pool2" 163 | type: "Pooling" 164 | bottom: "conv2_2" 165 | top: "pool2" 166 | pooling_param { 167 | pool: MAX 168 | kernel_size: 2 169 | stride: 2 170 | } 171 | } 172 | layer { 173 | name: "conv3_1" 174 | type: "Convolution" 175 | bottom: "pool2" 176 | top: "conv3_1" 177 | param { 178 | lr_mult: 1 179 | decay_mult: 1 180 | } 181 | param { 182 | lr_mult: 2 183 | decay_mult: 0 184 | } 185 | convolution_param { 186 | num_output: 256 187 | pad: 1 188 | kernel_size: 3 189 | stride: 1 190 | weight_filler { 191 | type: "gaussian" 192 | std: 0.01 193 | } 194 | bias_filler { 195 | type: "constant" 196 | value: 0 197 | } 198 | } 199 | } 200 | layer { 201 | name: "relu3_1" 202 | type: "ReLU" 203 | bottom: "conv3_1" 204 | top: "conv3_1" 205 | } 206 | layer { 207 | name: "conv3_2" 208 | type: "Convolution" 209 | bottom: "conv3_1" 210 | top: "conv3_2" 211 | param { 212 | lr_mult: 1 213 | decay_mult: 1 214 | } 215 | param { 216 | lr_mult: 2 217 | decay_mult: 0 218 | } 219 | convolution_param { 220 | num_output: 256 221 | pad: 1 222 | kernel_size: 3 223 | stride: 1 224 | weight_filler { 225 | type: "gaussian" 226 | std: 0.01 227 | } 228 | bias_filler { 229 | type: "constant" 230 | value: 0 231 | } 232 | } 233 | } 234 | layer { 235 | name: "relu3_2" 236 | type: "ReLU" 237 | bottom: "conv3_2" 238 | top: "conv3_2" 239 | } 240 | layer { 241 | name: "conv3_3" 242 | type: "Convolution" 243 | bottom: "conv3_2" 244 | top: "conv3_3" 245 | param { 246 | lr_mult: 1 247 | decay_mult: 1 248 | } 249 | param { 250 | lr_mult: 2 251 | decay_mult: 0 252 | } 253 | convolution_param { 254 | num_output: 256 255 | pad: 1 256 | kernel_size: 3 257 | stride: 1 258 | weight_filler { 259 | type: "gaussian" 260 | std: 0.01 261 | } 262 | bias_filler { 263 | type: "constant" 264 | value: 0 265 | } 266 | } 267 | } 268 | layer { 269 | name: "relu3_3" 270 | type: "ReLU" 271 | bottom: "conv3_3" 272 | top: "conv3_3" 273 | } 274 | layer { 275 | name: "pool3" 276 | type: "Pooling" 277 | bottom: "conv3_3" 278 | top: "pool3" 279 | pooling_param { 280 | pool: MAX 281 | kernel_size: 2 282 | stride: 2 283 | } 284 | } 285 | layer { 286 | name: "conv4_1" 287 | type: "Convolution" 288 | bottom: "pool3" 289 | top: "conv4_1" 290 | param { 291 | lr_mult: 1 292 | decay_mult: 1 293 | } 294 | param { 295 | lr_mult: 2 296 | decay_mult: 0 297 | } 298 | convolution_param { 299 | num_output: 512 300 | pad: 1 301 | kernel_size: 3 302 | stride: 1 303 | weight_filler { 304 | type: "gaussian" 305 | std: 0.01 306 | } 307 | bias_filler { 308 | type: "constant" 309 | value: 0 310 | } 311 | } 312 | } 313 | layer { 314 | name: "relu4_1" 315 | type: "ReLU" 316 | bottom: "conv4_1" 317 | top: "conv4_1" 318 | } 319 | layer { 320 | name: "conv4_2" 321 | type: "Convolution" 322 | bottom: "conv4_1" 323 | top: "conv4_2" 324 | param { 325 | lr_mult: 1 326 | decay_mult: 1 327 | } 328 | param { 329 | lr_mult: 2 330 | decay_mult: 0 331 | } 332 | convolution_param { 333 | num_output: 512 334 | pad: 1 335 | kernel_size: 3 336 | stride: 1 337 | weight_filler { 338 | type: "gaussian" 339 | std: 0.01 340 | } 341 | bias_filler { 342 | type: "constant" 343 | value: 0 344 | } 345 | } 346 | } 347 | layer { 348 | name: "relu4_2" 349 | type: "ReLU" 350 | bottom: "conv4_2" 351 | top: "conv4_2" 352 | } 353 | layer { 354 | name: "conv4_3" 355 | type: "Convolution" 356 | bottom: "conv4_2" 357 | top: "conv4_3" 358 | param { 359 | lr_mult: 1 360 | decay_mult: 1 361 | } 362 | param { 363 | lr_mult: 2 364 | decay_mult: 0 365 | } 366 | convolution_param { 367 | num_output: 512 368 | pad: 1 369 | kernel_size: 3 370 | stride: 1 371 | weight_filler { 372 | type: "gaussian" 373 | std: 0.01 374 | } 375 | bias_filler { 376 | type: "constant" 377 | value: 0 378 | } 379 | } 380 | } 381 | layer { 382 | name: "relu4_3" 383 | type: "ReLU" 384 | bottom: "conv4_3" 385 | top: "conv4_3" 386 | } 387 | layer { 388 | name: "pool4" 389 | type: "Pooling" 390 | bottom: "conv4_3" 391 | top: "pool4" 392 | pooling_param { 393 | pool: MAX 394 | kernel_size: 2 395 | stride: 2 396 | } 397 | } 398 | layer { 399 | name: "conv5_1" 400 | type: "Convolution" 401 | bottom: "pool4" 402 | top: "conv5_1" 403 | param { 404 | lr_mult: 1 405 | decay_mult: 1 406 | } 407 | param { 408 | lr_mult: 2 409 | decay_mult: 0 410 | } 411 | convolution_param { 412 | num_output: 512 413 | pad: 1 414 | kernel_size: 3 415 | stride: 1 416 | weight_filler { 417 | type: "gaussian" 418 | std: 0.01 419 | } 420 | bias_filler { 421 | type: "constant" 422 | value: 0 423 | } 424 | } 425 | } 426 | layer { 427 | name: "relu5_1" 428 | type: "ReLU" 429 | bottom: "conv5_1" 430 | top: "conv5_1" 431 | } 432 | layer { 433 | name: "conv5_2" 434 | type: "Convolution" 435 | bottom: "conv5_1" 436 | top: "conv5_2" 437 | param { 438 | lr_mult: 1 439 | decay_mult: 1 440 | } 441 | param { 442 | lr_mult: 2 443 | decay_mult: 0 444 | } 445 | convolution_param { 446 | num_output: 512 447 | pad: 1 448 | kernel_size: 3 449 | stride: 1 450 | weight_filler { 451 | type: "gaussian" 452 | std: 0.01 453 | } 454 | bias_filler { 455 | type: "constant" 456 | value: 0 457 | } 458 | } 459 | } 460 | layer { 461 | name: "relu5_2" 462 | type: "ReLU" 463 | bottom: "conv5_2" 464 | top: "conv5_2" 465 | } 466 | layer { 467 | name: "conv5_3" 468 | type: "Convolution" 469 | bottom: "conv5_2" 470 | top: "conv5_3" 471 | param { 472 | lr_mult: 1 473 | decay_mult: 1 474 | } 475 | param { 476 | lr_mult: 2 477 | decay_mult: 0 478 | } 479 | convolution_param { 480 | num_output: 512 481 | pad: 1 482 | kernel_size: 3 483 | stride: 1 484 | weight_filler { 485 | type: "gaussian" 486 | std: 0.01 487 | } 488 | bias_filler { 489 | type: "constant" 490 | value: 0 491 | } 492 | } 493 | } 494 | layer { 495 | name: "relu5_3" 496 | type: "ReLU" 497 | bottom: "conv5_3" 498 | top: "conv5_3" 499 | } 500 | layer { 501 | name: "pool5" 502 | type: "Pooling" 503 | bottom: "conv5_3" 504 | top: "pool5" 505 | pooling_param { 506 | pool: MAX 507 | kernel_size: 2 508 | stride: 2 509 | } 510 | } 511 | layer { 512 | name: "fc6_cs" 513 | type: "Convolution" 514 | bottom: "pool5" 515 | top: "fc6_cs" 516 | param { 517 | lr_mult: 1 518 | decay_mult: 1 519 | } 520 | param { 521 | lr_mult: 2 522 | decay_mult: 0 523 | } 524 | convolution_param { 525 | num_output: 4096 526 | pad: 0 527 | kernel_size: 7 528 | stride: 1 529 | weight_filler { 530 | type: "gaussian" 531 | std: 0.01 532 | } 533 | bias_filler { 534 | type: "constant" 535 | value: 0 536 | } 537 | } 538 | } 539 | layer { 540 | name: "relu6_cs" 541 | type: "ReLU" 542 | bottom: "fc6_cs" 543 | top: "fc6_cs" 544 | } 545 | layer { 546 | name: "fc7_cs" 547 | type: "Convolution" 548 | bottom: "fc6_cs" 549 | top: "fc7_cs" 550 | param { 551 | lr_mult: 1 552 | decay_mult: 1 553 | } 554 | param { 555 | lr_mult: 2 556 | decay_mult: 0 557 | } 558 | convolution_param { 559 | num_output: 4096 560 | pad: 0 561 | kernel_size: 1 562 | stride: 1 563 | weight_filler { 564 | type: "gaussian" 565 | std: 0.01 566 | } 567 | bias_filler { 568 | type: "constant" 569 | value: 0 570 | } 571 | } 572 | } 573 | layer { 574 | name: "relu7_cs" 575 | type: "ReLU" 576 | bottom: "fc7_cs" 577 | top: "fc7_cs" 578 | } 579 | layer { 580 | name: "score_fr" 581 | type: "Convolution" 582 | bottom: "fc7_cs" 583 | top: "score_fr" 584 | param { 585 | lr_mult: 1 586 | decay_mult: 1 587 | } 588 | param { 589 | lr_mult: 2 590 | decay_mult: 0 591 | } 592 | convolution_param { 593 | num_output: 20 594 | pad: 0 595 | kernel_size: 1 596 | weight_filler { 597 | type: "xavier" 598 | } 599 | bias_filler { 600 | type: "constant" 601 | } 602 | } 603 | } 604 | layer { 605 | name: "upscore2" 606 | type: "Deconvolution" 607 | bottom: "score_fr" 608 | top: "upscore2" 609 | param { 610 | lr_mult: 1 611 | } 612 | convolution_param { 613 | num_output: 20 614 | bias_term: false 615 | kernel_size: 4 616 | stride: 2 617 | weight_filler { 618 | type: "xavier" 619 | } 620 | bias_filler { 621 | type: "constant" 622 | } 623 | } 624 | } 625 | layer { 626 | name: "score_pool4" 627 | type: "Convolution" 628 | bottom: "pool4" 629 | top: "score_pool4" 630 | param { 631 | lr_mult: 1 632 | decay_mult: 1 633 | } 634 | param { 635 | lr_mult: 2 636 | decay_mult: 0 637 | } 638 | convolution_param { 639 | num_output: 20 640 | pad: 0 641 | kernel_size: 1 642 | weight_filler { 643 | type: "xavier" 644 | } 645 | bias_filler { 646 | type: "constant" 647 | } 648 | } 649 | } 650 | layer { 651 | name: "score_pool4c" 652 | type: "Crop" 653 | bottom: "score_pool4" 654 | bottom: "upscore2" 655 | top: "score_pool4c" 656 | crop_param { 657 | axis: 2 658 | offset: 5 659 | } 660 | } 661 | layer { 662 | name: "fuse_pool4" 663 | type: "Eltwise" 664 | bottom: "upscore2" 665 | bottom: "score_pool4c" 666 | top: "fuse_pool4" 667 | eltwise_param { 668 | operation: SUM 669 | } 670 | } 671 | layer { 672 | name: "upscore_pool4" 673 | type: "Deconvolution" 674 | bottom: "fuse_pool4" 675 | top: "upscore_pool4" 676 | param { 677 | lr_mult: 1 678 | } 679 | convolution_param { 680 | num_output: 20 681 | bias_term: false 682 | kernel_size: 4 683 | stride: 2 684 | weight_filler { 685 | type: "xavier" 686 | } 687 | bias_filler { 688 | type: "constant" 689 | } 690 | } 691 | } 692 | layer { 693 | name: "score_pool3" 694 | type: "Convolution" 695 | bottom: "pool3" 696 | top: "score_pool3" 697 | param { 698 | lr_mult: 1 699 | decay_mult: 1 700 | } 701 | param { 702 | lr_mult: 2 703 | decay_mult: 0 704 | } 705 | convolution_param { 706 | num_output: 20 707 | pad: 0 708 | kernel_size: 1 709 | weight_filler { 710 | type: "xavier" 711 | } 712 | bias_filler { 713 | type: "constant" 714 | } 715 | } 716 | } 717 | layer { 718 | name: "score_pool3c" 719 | type: "Crop" 720 | bottom: "score_pool3" 721 | bottom: "upscore_pool4" 722 | top: "score_pool3c" 723 | crop_param { 724 | axis: 2 725 | offset: 9 726 | } 727 | } 728 | layer { 729 | name: "fuse_pool3" 730 | type: "Eltwise" 731 | bottom: "upscore_pool4" 732 | bottom: "score_pool3c" 733 | top: "fuse_pool3" 734 | eltwise_param { 735 | operation: SUM 736 | } 737 | } 738 | layer { 739 | name: "upscore8" 740 | type: "Deconvolution" 741 | bottom: "fuse_pool3" 742 | top: "upscore8" 743 | param { 744 | lr_mult: 1 745 | } 746 | convolution_param { 747 | num_output: 20 748 | bias_term: false 749 | kernel_size: 16 750 | stride: 8 751 | weight_filler { 752 | type: "xavier" 753 | } 754 | bias_filler { 755 | type: "constant" 756 | } 757 | } 758 | } 759 | layer { 760 | name: "score" 761 | type: "Crop" 762 | bottom: "upscore8" 763 | bottom: "data" 764 | top: "score" 765 | crop_param { 766 | axis: 2 767 | offset: 31 768 | } 769 | } 770 | -------------------------------------------------------------------------------- /models/irw_gan_model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import itertools 3 | from util.image_pool import ImagePool 4 | from .base_model import BaseModel 5 | from . import networks 6 | from . import loss 7 | from util.util import to_data 8 | import os 9 | 10 | class IRWGANModel(BaseModel): 11 | """ 12 | This class implements the CycleGAN model, for learning image-to-image translation without paired data. 13 | 14 | The model training requires '--dataset_mode unaligned' dataset. 15 | By default, it uses a '--netG resnet_9blocks' ResNet generator, 16 | a '--netD basic' discriminator (PatchGAN introduced by pix2pix), 17 | and a least-square GANs objective ('--gan_mode lsgan'). 18 | 19 | CycleGAN paper: https://arxiv.org/pdf/1703.10593.pdf 20 | """ 21 | @staticmethod 22 | def modify_commandline_options(parser, is_train=True): 23 | """Add new dataset-specific options, and rewrite default values for existing options. 24 | 25 | Parameters: 26 | parser -- original option parser 27 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. 28 | 29 | Returns: 30 | the modified parser. 31 | 32 | For CycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B, and lambda_identity for the following losses. 33 | A (source domain), B (target domain). 34 | Generators: G_A: A -> B; G_B: B -> A. 35 | Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A. 36 | Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| (Eqn. (2) in the paper) 37 | Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| (Eqn. (2) in the paper) 38 | Identity loss (optional): lambda_identity * (||G_A(B) - B|| * lambda_B + ||G_B(A) - A|| * lambda_A) (Sec 5.2 "Photo generation from paintings" in the paper) 39 | Dropout is not used in the original CycleGAN paper. 40 | """ 41 | parser.set_defaults(no_dropout=True) # default CycleGAN did not use dropout 42 | if is_train: 43 | parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)') 44 | parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)') 45 | parser.add_argument('--lambda_identity', type=float, default=1.0, help='use identity mapping. Setting lambda_identity other than 0 has an effect of scaling the weight of the identity mapping loss. For example, if the weight of the identity loss should be 10 times smaller than the weight of the reconstruction loss, please set lambda_identity = 0.1') 46 | parser.add_argument('--lambda_nos_A', type=float, default=1.0, help='weight for controlling the sparsity of beta') 47 | parser.add_argument('--lambda_nos_B', type=float, default=1.0, help='weight for controlling the sparsity of beta') 48 | 49 | return parser 50 | 51 | def __init__(self, opt): 52 | """Initialize the CycleGAN class. 53 | 54 | Parameters: 55 | opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions 56 | """ 57 | BaseModel.__init__(self, opt) 58 | # specify the training losses you want to print out. The training/test scripts will call 59 | self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B'] 60 | if opt.beta_mode in ['A', 'AB']: 61 | self.loss_names += ['nos_A'] 62 | if opt.beta_mode in ['B', 'AB']: 63 | self.loss_names += ['nos_B'] 64 | 65 | # specify the images you want to save/display. The training/test scripts will call 66 | visual_names_A = ['real_A', 'fake_B', 'cycle_A', 'idt_A'] 67 | visual_names_B = ['real_B', 'fake_A', 'cycle_B', 'idt_B'] 68 | 69 | self.visual_names = visual_names_A + visual_names_B # combine visualizations for A and B 70 | # specify the models you want to save to the disk. The training/test scripts will call and . 71 | if opt.phase == 'train' or opt.phase == 'resume' or opt.phase=='test': 72 | self.model_names = ['gen_a2b', 'gen_b2a', 'dis_a', 'dis_b', 'beta_net_a', 'beta_net_b'] 73 | self.opt_names = ['optimizer_G', 'optimizer_D', 'optimizer_B'] 74 | else: # during test time, only load Gs and beta_nets 75 | self.model_names = ['gen_a2b', 'gen_b2a', 'beta_net_a', 'beta_net_b'] 76 | self.opt_names = [] 77 | 78 | # define networks (both Generators and discriminators) 79 | # The naming is different from those used in the paper. 80 | # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X) 81 | self.gen_a2b = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.normG, 82 | not opt.no_dropout, opt.initG, opt.init_gain, self.gpu_ids) 83 | self.gen_b2a = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.normG, 84 | not opt.no_dropout, opt.initG, opt.init_gain, self.gpu_ids) 85 | self.beta_net_a = networks.define_BetaNet(opt.input_nc, opt.ndf, 4, opt.normG, opt.initG, opt.init_gain, 86 | self.gpu_ids, is_use=('A' in opt.beta_mode)) 87 | self.beta_net_b = networks.define_BetaNet(opt.input_nc, opt.ndf, 4, opt.normG, opt.initG, opt.init_gain, 88 | self.gpu_ids, is_use=('B' in opt.beta_mode)) 89 | 90 | if opt.phase == 'train' or opt.phase == 'resume' or opt.phase=='test': # define discriminators 91 | self.dis_a = networks.define_D(opt.output_nc, opt.ndf, opt.netD, 92 | opt.n_layers_D, opt.normD, opt.sn, opt.initD, opt.init_gain, self.gpu_ids) 93 | self.dis_b = networks.define_D(opt.input_nc, opt.ndf, opt.netD, 94 | opt.n_layers_D, opt.normD, opt.sn, opt.initD, opt.init_gain, self.gpu_ids) 95 | 96 | if opt.phase == 'train' or opt.phase == 'resume' or opt.phase=='test': 97 | if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels 98 | assert(opt.input_nc == opt.output_nc) 99 | # define loss functions, ignore samples that have beta. 105 | self.optimizer_G = torch.optim.Adam(itertools.chain(self.gen_a2b.parameters(), self.gen_b2a.parameters()), 106 | lr=opt.lr, betas=(opt.beta1, 0.999), weight_decay=opt.weight_decay) 107 | self.optimizer_D = torch.optim.Adam(itertools.chain(self.dis_a.parameters(), self.dis_b.parameters()), 108 | lr=opt.lr, betas=(opt.beta1, 0.999), weight_decay=opt.weight_decay) 109 | self.optimizer_B = torch.optim.Adam(itertools.chain(self.beta_net_a.parameters(), self.beta_net_b.parameters()), 110 | lr=opt.lr, betas=(opt.beta1, 0.999), weight_decay=opt.weight_decay) 111 | self.optimizers.append(self.optimizer_G) 112 | self.optimizers.append(self.optimizer_D) 113 | self.optimizers.append(self.optimizer_B) 114 | 115 | def print_information(self, opt): 116 | print('#### Information ####') 117 | print('# task: %s' % opt.task) 118 | print('# phase: %s' % opt.phase) 119 | print('# gan_type: %s' % opt.gan_type) 120 | print('# netD: %s' % opt.netD) 121 | print('# trainA_size: %d' % opt.trainA_size) 122 | print('# trainB_size: %d' % opt.trainB_size) 123 | print('# testA_size: %d' % opt.testA_size) 124 | print('# testB_size: %d' % opt.testB_size) 125 | print() 126 | print('#### Weight ####') 127 | print('# lambda_A: %.f' % opt.lambda_A) 128 | print('# lambda_B: %.f' % opt.lambda_B) 129 | print('# lambda_identity: %.f' % opt.lambda_identity) 130 | print() 131 | print('#### Model Specific ####') 132 | print('# beta_mode: %s' % opt.beta_mode) 133 | print('# threshold: %s' % opt.threshold) 134 | print('# batch_size: %d' % opt.batch_size) 135 | print('# lambda_nos_A: %.f' % opt.lambda_nos_A) 136 | print('# lambda_nos_B: %.f' % opt.lambda_nos_B) 137 | print() 138 | 139 | @property 140 | def model_name(self): 141 | opt = self.opt 142 | opt.task = self.opt.dataroot.strip('/').split('/')[-1] 143 | sn = '_sn' if self.opt.sn else '' 144 | name = "IrwGAN_{}_{}_{}_{}_{}".format(opt.task, opt.gan_type, opt.batch_size, opt.beta_mode, opt.netD) 145 | if opt.beta_mode in ['A', 'AB']: 146 | name += "_{}".format(opt.lambda_nos_A) 147 | if opt.beta_mode in ['B', 'AB']: 148 | name += "_{}".format(opt.lambda_nos_B) 149 | if opt.beta_mode in ['A', 'B', 'AB']: 150 | name += '_thr{}'.format(opt.threshold) 151 | name += sn 152 | return name 153 | 154 | def set_input(self, input): 155 | """Unpack input data from the dataloader and perform necessary pre-processing steps. 156 | 157 | Parameters: 158 | input (dict): include the data itself and its metadata information. 159 | 160 | The option 'direction' can be used to swap domain A and domain B. 161 | """ 162 | self.real_As = input['A'].to(self.device) 163 | self.real_Bs = input['B'].to(self.device) 164 | self.image_paths_A = input['A_paths'] 165 | self.image_paths_B = input['B_paths'] 166 | 167 | def test(self, x_A, x_B): 168 | """Run forward pass; called by both functions and .""" 169 | self.gen_a2b.eval() 170 | self.gen_b2a.eval() 171 | real_A_pool = [] 172 | real_B_pool = [] 173 | fake_B_pool = [] 174 | fake_A_pool = [] 175 | cycle_A_pool = [] 176 | cycle_B_pool = [] 177 | idt_A_pool = [] 178 | idt_B_pool = [] 179 | with torch.no_grad(): 180 | for i in range(len(x_A)): 181 | real_A = x_A[i:i+1]; real_A_pool.append(real_A) 182 | real_B = x_B[i:i+1]; real_B_pool.append(real_B) 183 | fake_B = self.gen_a2b(real_A).detach(); fake_B_pool.append(fake_B) 184 | cycle_A = self.gen_b2a(fake_B).detach(); cycle_A_pool.append(cycle_A) 185 | idt_A = self.gen_b2a(real_A).detach(); idt_A_pool.append(idt_A) 186 | fake_A = self.gen_b2a(real_B).detach(); fake_A_pool.append(fake_A) 187 | cycle_B = self.gen_a2b(fake_A).detach(); cycle_B_pool.append(cycle_B) 188 | idt_B = self.gen_a2b(real_B).detach(); idt_B_pool.append(idt_B) 189 | 190 | return [real_A_pool, fake_B_pool, cycle_A_pool, idt_A_pool, real_B_pool, fake_A_pool, cycle_B_pool, idt_B_pool] 191 | 192 | def forward(self, x_A, x_B): 193 | """Run forward pass; called by both functions and .""" 194 | self.real_A = x_A 195 | self.real_B = x_B 196 | self.fake_B = self.gen_a2b(self.real_A) # G_A(A) 197 | self.cycle_A = self.gen_b2a(self.fake_B) # G_B(G_A(A)) 198 | self.idt_A = self.gen_b2a(self.real_A) 199 | self.fake_A = self.gen_b2a(self.real_B) # G_B(B) 200 | self.cycle_B = self.gen_a2b(self.fake_A) # G_A(G_B(B)) 201 | self.idt_B = self.gen_a2b(self.real_B) 202 | 203 | def compute_loss_D(self, real_a, real_b, fake_a, fake_b, beta_a, beta_b): 204 | self.loss_D_A = self.gan_criterion_a2b.dis_loss(real_b, beta_b, fake_b, beta_a) 205 | self.loss_D_B = self.gan_criterion_b2a.dis_loss(real_a, beta_a, fake_a, beta_b) 206 | self.loss_D = self.loss_D_A + self.loss_D_B 207 | return self.loss_D 208 | 209 | def compute_loss_G(self, beta_a, beta_b): 210 | """ 211 | Calculate the loss for generators G_A and G_B 212 | Only G_loss is used to update Beta!!! 213 | """ 214 | lambda_idt = self.opt.lambda_identity 215 | lambda_A = self.opt.lambda_A 216 | lambda_B = self.opt.lambda_B 217 | 218 | # G_A should be identity if real_B is fed: ||G_A(B) - B|| 219 | self.loss_idt_A = self.criterionIdt(self.idt_A, self.real_A, beta_a.detach()) * lambda_A * lambda_idt 220 | # G_B should be identity if real_A is fed: ||G_B(A) - A|| 221 | self.loss_idt_B = self.criterionIdt(self.idt_B, self.real_B, beta_b.detach()) * lambda_B * lambda_idt 222 | # GAN loss D_A(G_A(A)) 223 | self.loss_G_A, self.loss_unweight_A = self.gan_criterion_a2b.gen_loss(self.fake_B, beta_a.detach()) 224 | # GAN loss D_B(G_B(B)) 225 | self.loss_G_B, self.loss_unweight_B = self.gan_criterion_b2a.gen_loss(self.fake_A, beta_b.detach()) 226 | # Forward cycle loss || G_B(G_A(A)) - A|| 227 | self.loss_cycle_A = self.criterionCycle(self.cycle_A, self.real_A, beta_a.detach()) * lambda_A 228 | # Backward cycle loss || G_A(G_B(B)) - B|| 229 | self.loss_cycle_B = self.criterionCycle(self.cycle_B, self.real_B, beta_b.detach()) * lambda_B 230 | # combined loss and calculate gradients 231 | self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B 232 | return self.loss_G 233 | 234 | def optimize_parameters(self): 235 | """ 236 | Three steps: 237 | train G_A and G_B 238 | train D_A and D_B 239 | train beta_net_A, beta_net_B 240 | """ 241 | 242 | #------------------------------------------------------------------ 243 | # train G_A and G_B 244 | self.set_requires_grad([self.dis_a, self.dis_b, self.beta_net_a, self.beta_net_b], False) # Ds require no gradients when optimizing Gs 245 | self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero 246 | self.fake_A_pool = []; self.fake_B_pool = [] 247 | self.beta_a_pool = []; self.beta_b_pool = [] 248 | self.loss_unweight_A_pool = []; self.loss_unweight_B_pool = [] 249 | self.loss_G_A_pool = []; self.loss_G_B_pool = [] 250 | batch_size = self.opt.batch_size 251 | # use gradient accumulation to mimic batch-wise training 252 | beta_as = self.beta_net_a(self.real_As).detach() 253 | beta_bs = self.beta_net_b(self.real_Bs).detach() 254 | for i in range(batch_size): 255 | x_A = self.real_As[i:i+1]; x_B = self.real_Bs[i:i+1] 256 | self.beta_a = beta_as[i]; self.beta_b = beta_bs[i] 257 | # forward and backward loss 258 | self.forward(x_A, x_B) # compute fake images and reconstruction images. 259 | loss_G = self.compute_loss_G(self.beta_a, self.beta_b) / batch_size # calculate gradients for G_A and G_B 260 | loss_G.backward() # backward for G_A, G_B update 261 | self.fake_B_pool.append(self.fake_B.detach()) # buffer for training D 262 | self.fake_A_pool.append(self.fake_A.detach()) 263 | self.beta_a_pool.append(self.beta_a.detach()) 264 | self.beta_b_pool.append(self.beta_b.detach()) 265 | self.loss_unweight_A_pool.append(self.loss_unweight_A) # buffer for training beta_net 266 | self.loss_unweight_B_pool.append(self.loss_unweight_B) 267 | self.optimizer_G.step() # update G_A and G_B's weights 268 | 269 | #------------------------------------------------------------------ 270 | # train D_A and D_B 271 | self.set_requires_grad([self.dis_a, self.dis_b], True) 272 | self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero 273 | for i in range(batch_size): 274 | loss_D = self.compute_loss_D(self.real_As[i:i+1], self.real_Bs[i:i+1], 275 | self.fake_A_pool[i], self.fake_B_pool[i], 276 | self.beta_a_pool[i], self.beta_b_pool[i]) / batch_size # calculate gradients for D_B 277 | loss_D.backward() 278 | self.optimizer_D.step() # update D_A and D_B's weights 279 | 280 | #------------------------------------------------------------------ 281 | # train beta_net_A, beta_net_B 282 | self.loss_nos_A = 0 283 | self.loss_nos_B = 0 284 | if self.opt.beta_mode in ['A', 'B', 'AB']: 285 | self.set_requires_grad([self.beta_net_a, self.beta_net_b], True) 286 | self.optimizer_B.zero_grad() 287 | beta_as = self.beta_net_a(self.real_As.detach()) 288 | beta_bs = self.beta_net_b(self.real_Bs.detach()) 289 | self.loss_beta_A = 0 290 | self.loss_beta_B = 0 291 | for i in range(batch_size): 292 | beta_a = beta_as[i] 293 | beta_b = beta_bs[i] 294 | self.loss_beta_A += (beta_a * self.loss_unweight_A_pool[i]/batch_size) 295 | self.loss_beta_B += (beta_b * self.loss_unweight_B_pool[i]/batch_size) 296 | self.loss_nos_A = torch.norm(beta_as) / batch_size 297 | self.loss_nos_B = torch.norm(beta_bs) / batch_size 298 | nos_loss = self.opt.lambda_nos_A * self.loss_nos_A + self.opt.lambda_nos_B * self.loss_nos_B 299 | self.loss_beta = self.loss_beta_A + self.loss_beta_B + nos_loss 300 | self.loss_beta.backward() 301 | self.optimizer_B.step() 302 | 303 | #------------------------------------------------------------------ 304 | # save input images and betas 305 | images = torch.cat([self.real_As.detach(), self.real_Bs.detach()], 0) 306 | fake_images = torch.cat(self.fake_B_pool + self.fake_A_pool, dim=0) 307 | betas = self.beta_a_pool + self.beta_b_pool 308 | return images, fake_images, betas 309 | 310 | def get_betas(self, x_as, x_bs): 311 | self.beta_net_a.eval() 312 | self.beta_net_b.eval() 313 | with torch.no_grad(): 314 | beta_as = self.beta_net_a(x_as).detach() 315 | beta_bs = self.beta_net_b(x_bs).detach() 316 | self.beta_net_a.train() 317 | self.beta_net_b.train() 318 | images = torch.cat([x_as, x_bs], 0) 319 | betas = torch.cat([beta_as, beta_bs],0) 320 | return images, betas 321 | 322 | def debug(self, x_as, x_bs): 323 | with torch.no_grad(): 324 | print('Debugging trainA...') 325 | for i in range(len(x_as)): 326 | x = x_as[i].unsqueeze(0) 327 | dis_fake_b = self.dis_b(self.gen_a2b(x)) 328 | dis_raw_a = self.dis_b(x) 329 | if len(dis_fake_b) == 1: 330 | print(dis_raw_a[0].mean().item(), 331 | dis_fake_b[0].mean().item()) 332 | else: 333 | print(dis_raw_a[0].mean().item(), dis_raw_a[1].mean().item(), 334 | dis_fake_b[0].mean().item(), dis_fake_b[1].mean().item()) 335 | print('Debugging trainB...') 336 | for i in range(len(x_bs)): 337 | x = x_bs[i].unsqueeze(0) 338 | dis_fake_a = self.dis_a(self.gen_b2a(x)) 339 | dis_raw_b = self.dis_a(x) 340 | if len(dis_fake_b) == 1: 341 | print(dis_raw_b[0].mean().item(), 342 | dis_fake_a[0].mean().item()) 343 | else: 344 | print(dis_raw_b[0].mean().item(), dis_raw_b[1].mean().item(), 345 | dis_fake_a[0].mean().item(), dis_fake_a[1].mean().item()) 346 | 347 | 348 | --------------------------------------------------------------------------------