├── util ├── __init__.py ├── __pycache__ │ ├── html.cpython-36.pyc │ ├── util.cpython-36.pyc │ ├── util.cpython-37.pyc │ ├── __init__.cpython-36.pyc │ ├── __init__.cpython-37.pyc │ ├── image_pool.cpython-36.pyc │ └── visualizer.cpython-36.pyc ├── image_pool.py ├── html.py ├── util.py ├── get_data.py └── visualizer.py ├── data ├── __pycache__ │ ├── __init__.cpython-36.pyc │ ├── base_dataset.cpython-36.pyc │ ├── image_folder.cpython-36.pyc │ └── aligned_dataset.cpython-36.pyc ├── image_folder.py ├── aligned_dataset.py ├── template_dataset.py ├── __init__.py └── base_dataset.py ├── models ├── __pycache__ │ ├── __init__.cpython-36.pyc │ ├── networks.cpython-36.pyc │ ├── base_model.cpython-36.pyc │ └── paired_cycle_gan_eye_model.cpython-36.pyc ├── __init__.py ├── paired_cycle_gan_eye_model.py ├── paired_cycle_gan_mouth_model.py ├── base_model.py └── networks.py ├── options ├── __pycache__ │ ├── __init__.cpython-36.pyc │ ├── __init__.cpython-37.pyc │ ├── base_options.cpython-36.pyc │ ├── base_options.cpython-37.pyc │ ├── test_options.cpython-36.pyc │ ├── test_options.cpython-37.pyc │ └── train_options.cpython-36.pyc ├── __init__.py ├── test_options.py ├── train_options.py └── base_options.py ├── datasets ├── imagesize.py ├── combine_A_and_B.py ├── make_dataset_aligned.py ├── remove.py ├── face_parse.py └── poisson_blending.py ├── README.md ├── .gitignore ├── test.py └── train.py /util/__init__.py: -------------------------------------------------------------------------------- 1 | """This package includes a miscellaneous collection of useful helper functions.""" 2 | -------------------------------------------------------------------------------- /util/__pycache__/html.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jingyut/PairedCycleGan/HEAD/util/__pycache__/html.cpython-36.pyc -------------------------------------------------------------------------------- /util/__pycache__/util.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jingyut/PairedCycleGan/HEAD/util/__pycache__/util.cpython-36.pyc -------------------------------------------------------------------------------- /util/__pycache__/util.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jingyut/PairedCycleGan/HEAD/util/__pycache__/util.cpython-37.pyc -------------------------------------------------------------------------------- /data/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jingyut/PairedCycleGan/HEAD/data/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /util/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jingyut/PairedCycleGan/HEAD/util/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /util/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jingyut/PairedCycleGan/HEAD/util/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /models/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jingyut/PairedCycleGan/HEAD/models/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /models/__pycache__/networks.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jingyut/PairedCycleGan/HEAD/models/__pycache__/networks.cpython-36.pyc -------------------------------------------------------------------------------- /util/__pycache__/image_pool.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jingyut/PairedCycleGan/HEAD/util/__pycache__/image_pool.cpython-36.pyc -------------------------------------------------------------------------------- /util/__pycache__/visualizer.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jingyut/PairedCycleGan/HEAD/util/__pycache__/visualizer.cpython-36.pyc -------------------------------------------------------------------------------- /data/__pycache__/base_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jingyut/PairedCycleGan/HEAD/data/__pycache__/base_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /data/__pycache__/image_folder.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jingyut/PairedCycleGan/HEAD/data/__pycache__/image_folder.cpython-36.pyc -------------------------------------------------------------------------------- /models/__pycache__/base_model.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jingyut/PairedCycleGan/HEAD/models/__pycache__/base_model.cpython-36.pyc -------------------------------------------------------------------------------- /options/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jingyut/PairedCycleGan/HEAD/options/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /options/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jingyut/PairedCycleGan/HEAD/options/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /data/__pycache__/aligned_dataset.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jingyut/PairedCycleGan/HEAD/data/__pycache__/aligned_dataset.cpython-36.pyc -------------------------------------------------------------------------------- /options/__pycache__/base_options.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jingyut/PairedCycleGan/HEAD/options/__pycache__/base_options.cpython-36.pyc -------------------------------------------------------------------------------- /options/__pycache__/base_options.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jingyut/PairedCycleGan/HEAD/options/__pycache__/base_options.cpython-37.pyc -------------------------------------------------------------------------------- /options/__pycache__/test_options.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jingyut/PairedCycleGan/HEAD/options/__pycache__/test_options.cpython-36.pyc -------------------------------------------------------------------------------- /options/__pycache__/test_options.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jingyut/PairedCycleGan/HEAD/options/__pycache__/test_options.cpython-37.pyc -------------------------------------------------------------------------------- /options/__pycache__/train_options.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jingyut/PairedCycleGan/HEAD/options/__pycache__/train_options.cpython-36.pyc -------------------------------------------------------------------------------- /options/__init__.py: -------------------------------------------------------------------------------- 1 | """This package options includes option modules: training options, test options, and basic options (used in both training and test).""" 2 | -------------------------------------------------------------------------------- /models/__pycache__/paired_cycle_gan_eye_model.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jingyut/PairedCycleGan/HEAD/models/__pycache__/paired_cycle_gan_eye_model.cpython-36.pyc -------------------------------------------------------------------------------- /datasets/imagesize.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import os, os.path as OP 3 | print("current dir:\t", OP.abspath(os.getcwd())) 4 | print("app dir:\t", OP.abspath(OP.dirname(__file__))) 5 | 6 | def imagesize(input_dir_B): 7 | splits = os.listdir(input_dir_B) 8 | 9 | for sp in splits: 10 | img_fold_B = os.path.join(input_dir_B, sp) 11 | img_list = os.listdir(img_fold_B) 12 | num_imgs = len(img_list) 13 | print('split = %s, use %d/%d images' % (sp, num_imgs, len(img_list))) 14 | for n in range(num_imgs): 15 | file = img_list[n] 16 | path_A = os.path.join(img_fold_B, file) 17 | img = Image.open(path_A) 18 | print(img.size) 19 | # imagesize('./dataset/B_parse/test') 20 | # imagesize('./dataset/B_parse/val') 21 | imagesize('./dataset/AB_parse/eyes') 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ! 2 | This is an implementation I made when I get started with computer vision and deep learning, and I nearly forget that I had such a repository.... 3 | The implementation is NOT complete and I didn't finished training a model with satisfying results, part of the reason is that I only had a GTX960. 4 | I will try to optimize it when i finished my master thesis, that is to say in November at least. 5 | 6 | # PairedCycleGan 7 | This is a try of implementation of the paper 'PairedCycleGAN: Asymmetric Style Transferfor Applying and Removing Makeup' with PyTorch. 8 | https://adoberesearch.ctlprojects.com/wp-content/uploads/2018/04/CVPR2018_Paper3623_Chang.pdf 9 | 10 | The code is based on the implementation of junyanz's pytorch-CycleGAN-and-pix2pix project https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix. 11 | 12 | I principally modified the parts of 'model' and 'datasets' . 13 | 14 | P.S: The code is far away from perfect and may not look like exactly the same as described in the paper but the core ideas are the same.I'm stil working on it. 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /options/test_options.py: -------------------------------------------------------------------------------- 1 | from .base_options import BaseOptions 2 | 3 | 4 | class TestOptions(BaseOptions): 5 | """This class includes test options. 6 | 7 | It also includes shared options defined in BaseOptions. 8 | """ 9 | 10 | def initialize(self, parser): 11 | parser = BaseOptions.initialize(self, parser) # define shared options 12 | parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.') 13 | parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') 14 | parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images') 15 | parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc') 16 | # Dropout and Batchnorm has different behavioir during training and test. 17 | parser.add_argument('--eval', action='store_true', help='use eval mode during test time.') 18 | parser.add_argument('--num_test', type=int, default=50, help='how many test images to run') 19 | # rewrite devalue values 20 | parser.set_defaults(model='test') 21 | # To avoid cropping, the load_size should be the same as crop_size 22 | parser.set_defaults(load_size=parser.get_default('crop_size')) 23 | self.isTrain = False 24 | return parser 25 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | -------------------------------------------------------------------------------- /data/image_folder.py: -------------------------------------------------------------------------------- 1 | """A modified image folder class 2 | 3 | We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py) 4 | so that this class can load images from both current directory and its subdirectories. 5 | """ 6 | 7 | import torch.utils.data as data 8 | 9 | from PIL import Image 10 | import os 11 | import os.path 12 | 13 | IMG_EXTENSIONS = [ 14 | '.jpg', '.JPG', '.jpeg', '.JPEG', 15 | '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', 16 | ] 17 | 18 | 19 | def is_image_file(filename): 20 | return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) 21 | 22 | 23 | def make_dataset(dir, max_dataset_size=float("inf")): 24 | images = [] 25 | assert os.path.isdir(dir), '%s is not a valid directory' % dir 26 | 27 | for root, _, fnames in sorted(os.walk(dir)): 28 | for fname in fnames: 29 | if is_image_file(fname): 30 | path = os.path.join(root, fname) 31 | images.append(path) 32 | return images[:min(max_dataset_size, len(images))] 33 | 34 | 35 | def default_loader(path): 36 | return Image.open(path).convert('RGB') 37 | 38 | 39 | class ImageFolder(data.Dataset): 40 | 41 | def __init__(self, root, transform=None, return_paths=False, 42 | loader=default_loader): 43 | imgs = make_dataset(root) 44 | if len(imgs) == 0: 45 | raise(RuntimeError("Found 0 images in: " + root + "\n" 46 | "Supported image extensions are: " + 47 | ",".join(IMG_EXTENSIONS))) 48 | 49 | self.root = root 50 | self.imgs = imgs 51 | self.transform = transform 52 | self.return_paths = return_paths 53 | self.loader = loader 54 | 55 | def __getitem__(self, index): 56 | path = self.imgs[index] 57 | img = self.loader(path) 58 | if self.transform is not None: 59 | img = self.transform(img) 60 | if self.return_paths: 61 | return img, path 62 | else: 63 | return img 64 | 65 | def __len__(self): 66 | return len(self.imgs) 67 | -------------------------------------------------------------------------------- /datasets/combine_A_and_B.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import cv2 4 | import argparse 5 | 6 | parser = argparse.ArgumentParser('create image pairs') 7 | parser.add_argument('--fold_A', dest='fold_A', help='input directory for image A', type=str, default='./dataset/A') 8 | parser.add_argument('--fold_B', dest='fold_B', help='input directory for image B', type=str, default='./dataset/B') 9 | parser.add_argument('--fold_AB', dest='fold_AB', help='output directory', type=str, default='./dataset/AB') 10 | parser.add_argument('--num_imgs', dest='num_imgs', help='number of images', type=int, default=100000) 11 | parser.add_argument('--use_AB', dest='use_AB', help='if true: (001_A, 001_B) to (001_AB)', action='store_true') 12 | args = parser.parse_args() 13 | 14 | for arg in vars(args): 15 | print('[%s] = ' % arg, getattr(args, arg)) 16 | 17 | splits = os.listdir(args.fold_A) 18 | 19 | for sp in splits: 20 | img_fold_A = os.path.join(args.fold_A, sp) 21 | img_fold_B = os.path.join(args.fold_B, sp) 22 | img_list = os.listdir(img_fold_A) 23 | if args.use_AB: 24 | img_list = [img_path for img_path in img_list if '_A.' in img_path] 25 | 26 | num_imgs = min(args.num_imgs, len(img_list)) 27 | print('split = %s, use %d/%d images' % (sp, num_imgs, len(img_list))) 28 | img_fold_AB = os.path.join(args.fold_AB, sp) 29 | if not os.path.isdir(img_fold_AB): 30 | os.makedirs(img_fold_AB) 31 | print('split = %s, number of images = %d' % (sp, num_imgs)) 32 | for n in range(num_imgs): 33 | name_A = img_list[n] 34 | path_A = os.path.join(img_fold_A, name_A) 35 | if args.use_AB: 36 | name_B = name_Areplace('_A.', '_B.') 37 | else: 38 | name_B = name_A 39 | path_B = os.path.join(img_fold_B, name_B) 40 | if os.path.isfile(path_A) and os.path.isfile(path_B): 41 | name_AB = name_A 42 | if args.use_AB: 43 | name_AB = name_AB.replace('_A.', '.') # remove _A 44 | path_AB = os.path.join(img_fold_AB, name_AB) 45 | im_A = cv2.imread(path_A, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR 46 | im_B = cv2.imread(path_B, 1) # python2: cv2.CV_LOAD_IMAGE_COLOR; python3: cv2.IMREAD_COLOR 47 | im_AB = np.concatenate([im_A, im_B], 1) 48 | cv2.imwrite(path_AB, im_AB) 49 | -------------------------------------------------------------------------------- /util/image_pool.py: -------------------------------------------------------------------------------- 1 | import random 2 | import torch 3 | 4 | 5 | class ImagePool(): 6 | """This class implements an image buffer that stores previously generated images. 7 | 8 | This buffer enables us to update discriminators using a history of generated images 9 | rather than the ones produced by the latest generators. 10 | """ 11 | 12 | def __init__(self, pool_size): 13 | """Initialize the ImagePool class 14 | 15 | Parameters: 16 | pool_size (int) -- the size of image buffer, if pool_size=0, no buffer will be created 17 | """ 18 | self.pool_size = pool_size 19 | if self.pool_size > 0: # create an empty pool 20 | self.num_imgs = 0 21 | self.images = [] 22 | 23 | def query(self, images): 24 | """Return an image from the pool. 25 | 26 | Parameters: 27 | images: the latest generated images from the generator 28 | 29 | Returns images from the buffer. 30 | 31 | By 50/100, the buffer will return input images. 32 | By 50/100, the buffer will return images previously stored in the buffer, 33 | and insert the current images to the buffer. 34 | """ 35 | if self.pool_size == 0: # if the buffer size is 0, do nothing 36 | return images 37 | return_images = [] 38 | for image in images: 39 | image = torch.unsqueeze(image.data, 0) 40 | if self.num_imgs < self.pool_size: # if the buffer is not full; keep inserting current images to the buffer 41 | self.num_imgs = self.num_imgs + 1 42 | self.images.append(image) 43 | return_images.append(image) 44 | else: 45 | p = random.uniform(0, 1) 46 | if p > 0.5: # by 50% chance, the buffer will return a previously stored image, and insert the current image into the buffer 47 | random_id = random.randint(0, self.pool_size - 1) # randint is inclusive 48 | tmp = self.images[random_id].clone() 49 | self.images[random_id] = image 50 | return_images.append(tmp) 51 | else: # by another 50% chance, the buffer will return the current image 52 | return_images.append(image) 53 | return_images = torch.cat(return_images, 0) # collect all the images and return 54 | return return_images 55 | -------------------------------------------------------------------------------- /datasets/make_dataset_aligned.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from PIL import Image 4 | 5 | 6 | def get_file_paths(folder): 7 | image_file_paths = [] 8 | for root, dirs, filenames in os.walk(folder): 9 | filenames = sorted(filenames) 10 | for filename in filenames: 11 | input_path = os.path.abspath(root) 12 | file_path = os.path.join(input_path, filename) 13 | if filename.endswith('.png') or filename.endswith('.jpg'): 14 | image_file_paths.append(file_path) 15 | 16 | break # prevent descending into subfolders 17 | return image_file_paths 18 | 19 | 20 | def align_images(a_file_paths, b_file_paths, target_path): 21 | if not os.path.exists(target_path): 22 | os.makedirs(target_path) 23 | 24 | for i in range(len(a_file_paths)): 25 | img_a = Image.open(a_file_paths[i]) 26 | img_b = Image.open(b_file_paths[i]) 27 | assert(img_a.size == img_b.size) 28 | 29 | aligned_image = Image.new("RGB", (img_a.size[0] * 2, img_a.size[1])) 30 | aligned_image.paste(img_a, (0, 0)) 31 | aligned_image.paste(img_b, (img_a.size[0], 0)) 32 | aligned_image.save(os.path.join(target_path, '{:04d}.jpg'.format(i))) 33 | 34 | 35 | if __name__ == '__main__': 36 | import argparse 37 | parser = argparse.ArgumentParser() 38 | parser.add_argument( 39 | '--dataset-path', 40 | dest='dataset_path', 41 | help='Which folder to process (it should have subfolders testA, testB, trainA and trainB' 42 | ) 43 | args = parser.parse_args() 44 | 45 | dataset_folder = args.dataset_path 46 | print(dataset_folder) 47 | 48 | # test_a_path = os.path.join(dataset_folder, 'testA') 49 | # test_b_path = os.path.join(dataset_folder, 'testB') 50 | # test_a_file_paths = get_file_paths(test_a_path) 51 | # test_b_file_paths = get_file_paths(test_b_path) 52 | # assert(len(test_a_file_paths) == len(test_b_file_paths)) 53 | # test_path = os.path.join(dataset_folder, 'test') 54 | 55 | train_a_path = os.path.join(dataset_folder, 'trainA') 56 | train_b_path = os.path.join(dataset_folder, 'trainB') 57 | train_a_file_paths = get_file_paths(train_a_path) 58 | train_b_file_paths = get_file_paths(train_b_path) 59 | assert(len(train_a_file_paths) == len(train_b_file_paths)) 60 | train_path = os.path.join(dataset_folder, 'train') 61 | 62 | align_images(test_a_file_paths, test_b_file_paths, test_path) 63 | align_images(train_a_file_paths, train_b_file_paths, train_path) 64 | -------------------------------------------------------------------------------- /datasets/remove.py: -------------------------------------------------------------------------------- 1 | #Basically remove the images to corresponding folders 2 | 3 | 4 | from imutils import face_utils 5 | import numpy as np 6 | import argparse 7 | import imutils 8 | import dlib 9 | import cv2 10 | from collections import OrderedDict 11 | import matplotlib.pyplot as plt 12 | import os 13 | import os.path as osp 14 | import re 15 | from PIL import Image 16 | import shutil 17 | 18 | def IsSubString(SubStrList,Str): 19 | flag=True 20 | for substr in SubStrList: 21 | if not(substr in Str): 22 | flag=False 23 | return flag 24 | # FileList = [] 25 | input_dir_test = './dataset/A_parse/test/' 26 | 27 | 28 | splits = os.listdir(input_dir_test) 29 | 30 | for sp in splits: 31 | # move data to corresponding file 32 | img_fold_test = os.path.join(input_dir_test, sp) 33 | img_list = os.listdir(img_fold_test) 34 | for fn in img_list: 35 | if IsSubString(['left_eye'],fn): 36 | fullfilename=os.path.join(img_fold_test,fn) 37 | shutil.move(fullfilename,'./dataset/A_parse/eyes/test/left') 38 | elif IsSubString(['right_eye'],fn): 39 | fullfilename=os.path.join(img_fold_test,fn) 40 | shutil.move(fullfilename,'./dataset/A_parse/eyes/test/right') 41 | if IsSubString(['mouth'],fn): 42 | fullfilename=os.path.join(img_fold_test,fn) 43 | shutil.move(fullfilename,'./dataset/A_parse/mouth/test') 44 | elif IsSubString(['nose'],fn): 45 | fullfilename=os.path.join(img_fold_test,fn) 46 | shutil.move(fullfilename,'./dataset/A_parse/nose/test') 47 | 48 | 49 | # search for complete parsed image 50 | # for n in range(20): 51 | # m = "%02d" % n 52 | # SubStrList = [str(m)] 53 | # img_fold_B = os.path.join(input_dir_B, sp) 54 | # img_list = os.listdir(img_fold_B) 55 | # for fn in img_list: 56 | # # print (SubStrList) 57 | # if IsSubString(SubStrList,fn): 58 | # fullfilename=os.path.join(img_fold_B,fn) 59 | # FileList.append(fullfilename) 60 | # print (len(FileList)) 61 | # if len(FileList) != 4: 62 | # for files in FileList: 63 | # os.remove(files) 64 | # FileList = [] 65 | # else: 66 | # FileList = [] 67 | 68 | # remove images not with size (120,120) 69 | # img_fold_B = os.path.join(input_dir_B, sp) 70 | # img_list = os.listdir(img_fold_B) 71 | # for fn in img_list: 72 | # # print (SubStrList) 73 | # image_path = osp.join(img_fold_B,fn) 74 | # image = Image.open(image_path) 75 | # if image.size != (120,120): 76 | # os.remove(image_path) 77 | 78 | 79 | 80 | 81 | 82 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | """General-purpose test script for image-to-image translation. 2 | 3 | Once you have trained your model with train.py, you can use this script to test the model. 4 | It will load a saved model from --checkpoints_dir and save the results to --results_dir. 5 | 6 | It first creates model and dataset given the option. It will hard-code some parameters. 7 | It then runs inference for --num_test images and save results to an HTML file. 8 | 9 | Example (You need to train models first or download pre-trained models from our website): 10 | Test a PairedCycleGAN model (both sides): 11 | python test.py --dataroot ./datasets/dataset/AB_parse/eyes --name eyes --model paired_cycle_gan_eye 12 | 13 | 14 | """ 15 | import os 16 | from options.test_options import TestOptions 17 | from data import create_dataset 18 | from models import create_model 19 | from util.visualizer import save_images 20 | from util import html 21 | 22 | 23 | if __name__ == '__main__': 24 | opt = TestOptions().parse() # get test options 25 | # hard-code some parameters for test 26 | opt.num_threads = 0 # test code only supports num_threads = 1 27 | opt.batch_size = 1 # test code only supports batch_size = 1 28 | opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed. 29 | opt.no_flip = True # no flip; comment this line if results on flipped images are needed. 30 | opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file. 31 | dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options 32 | model = create_model(opt) # create a model given opt.model and other options 33 | model.setup(opt) # regular setup: load and print networks; create schedulers 34 | # create a website 35 | web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.epoch)) # define the website directory 36 | webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch)) 37 | 38 | if opt.eval: 39 | model.eval() 40 | for i, data in enumerate(dataset): 41 | if i >= opt.num_test: # only apply our model to opt.num_test images. 42 | break 43 | model.set_input(data) # unpack data from data loader 44 | model.test() # run inference 45 | visuals = model.get_current_visuals() # get image results 46 | img_path = model.get_image_paths() # get image paths 47 | if i % 5 == 0: # save images to an HTML file 48 | print('processing (%04d)-th image... %s' % (i, img_path)) 49 | save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize) 50 | webpage.save() # save the HTML 51 | -------------------------------------------------------------------------------- /data/aligned_dataset.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | from data.base_dataset import BaseDataset, get_params, get_transform 3 | from data.image_folder import make_dataset 4 | from PIL import Image 5 | import numpy as np 6 | 7 | class AlignedDataset(BaseDataset): 8 | """A dataset class for paired image dataset. 9 | 10 | It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}. 11 | During test time, you need to prepare a directory '/path/to/data/test'. 12 | """ 13 | 14 | def __init__(self, opt): 15 | """Initialize this dataset class. 16 | 17 | Parameters: 18 | opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions 19 | """ 20 | BaseDataset.__init__(self, opt) 21 | # self.dir_AB_eye = os.path.join(opt.dataroot_eye, opt.phase) # get the image directory 22 | # self.dir_AB_mouth = os.path.join(opt.dataroot_mouth, opt.phase) # get the image directory 23 | self.dir_AB = os.path.join(opt.dataroot, opt.phase) # get the image directory 24 | 25 | # self.AB_paths_eye = sorted(make_dataset(self.dir_AB_eye, opt.max_dataset_size)) # get image paths 26 | # self.AB_paths_mouth = sorted(make_dataset(self.dir_AB_mouth, opt.max_dataset_size)) # get image paths 27 | self.AB_paths = sorted(make_dataset(self.dir_AB, opt.max_dataset_size)) # get image paths 28 | 29 | 30 | assert(self.opt.load_size >= self.opt.crop_size) # crop_size should be smaller than the size of loaded image 31 | self.input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc 32 | self.output_nc = self.opt.input_nc if self.opt.direction == 'BtoA' else self.opt.output_nc 33 | 34 | def __getitem__(self, index): 35 | """Return a data point and its metadata information. 36 | 37 | Parameters: 38 | index - - a random integer for data indexing 39 | 40 | Returns a dictionary that contains A, B, A_paths and B_paths 41 | A (tensor) - - an image in the input domain 42 | B (tensor) - - its corresponding image in the target domain 43 | A_paths (str) - - image paths 44 | B_paths (str) - - image paths (same as A_paths) 45 | """ 46 | # read a image given a random integer index 47 | AB_path = self.AB_paths[index] 48 | AB = Image.open(AB_path).convert('RGB') 49 | # split AB image into A and B 50 | w, h = AB.size 51 | w2 = int(w / 2) 52 | A = AB.crop((0, 0, w2, h)) 53 | B = AB.crop((w2, 0, w, h)) 54 | A_im = np.array(A) 55 | B_im = np.array(B) 56 | # apply the same transform to both A and B 57 | transform_params = get_params(self.opt, A.size) 58 | A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1)) 59 | B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1)) 60 | 61 | A = A_transform(A) 62 | B = B_transform(B) 63 | 64 | 65 | return {'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path, 'A_im': A_im, 'B_im': B_im} 66 | 67 | def __len__(self): 68 | """Return the total number of images in the dataset.""" 69 | return len(self.AB_paths) 70 | -------------------------------------------------------------------------------- /models/__init__.py: -------------------------------------------------------------------------------- 1 | """This package contains modules related to objective functions, optimizations, and network architectures. 2 | 3 | To add a custom model class called 'dummy', you need to add a file called 'dummy_model.py' and define a subclass DummyModel inherited from BaseModel. 4 | You need to implement the following five functions: 5 | -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt). 6 | -- : unpack data from dataset and apply preprocessing. 7 | -- : produce intermediate results. 8 | -- : calculate loss, gradients, and update network weights. 9 | -- : (optionally) add model-specific options and set default options. 10 | 11 | In the function <__init__>, you need to define four lists: 12 | -- self.loss_names (str list): specify the training losses that you want to plot and save. 13 | -- self.model_names (str list): define networks used in our training. 14 | -- self.visual_names (str list): specify the images that you want to display and save. 15 | -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an usage. 16 | 17 | Now you can use the model class by specifying flag '--model dummy'. 18 | See our template model class 'template_model.py' for more details. 19 | """ 20 | 21 | import importlib 22 | from models.base_model import BaseModel 23 | 24 | 25 | def find_model_using_name(model_name): 26 | """Import the module "models/[model_name]_model.py". 27 | 28 | In the file, the class called DatasetNameModel() will 29 | be instantiated. It has to be a subclass of BaseModel, 30 | and it is case-insensitive. 31 | """ 32 | model_filename = "models." + model_name + "_model" 33 | modellib = importlib.import_module(model_filename) 34 | model = None 35 | target_model_name = model_name.replace('_', '') + 'model' 36 | for name, cls in modellib.__dict__.items(): 37 | if name.lower() == target_model_name.lower() \ 38 | and issubclass(cls, BaseModel): 39 | model = cls 40 | 41 | if model is None: 42 | print("In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase." % (model_filename, target_model_name)) 43 | exit(0) 44 | 45 | return model 46 | 47 | 48 | def get_option_setter(model_name): 49 | """Return the static method of the model class.""" 50 | model_class = find_model_using_name(model_name) 51 | return model_class.modify_commandline_options 52 | 53 | 54 | def create_model(opt): 55 | """Create a model given the option. 56 | 57 | This function warps the class CustomDatasetDataLoader. 58 | This is the main interface between this package and 'train.py'/'test.py' 59 | 60 | Example: 61 | >>> from models import create_model 62 | >>> model = create_model(opt) 63 | """ 64 | model = find_model_using_name(opt.model) 65 | instance = model(opt) 66 | print("model [%s] was created" % type(instance).__name__) 67 | return instance 68 | -------------------------------------------------------------------------------- /options/train_options.py: -------------------------------------------------------------------------------- 1 | from .base_options import BaseOptions 2 | 3 | 4 | class TrainOptions(BaseOptions): 5 | """This class includes training options. 6 | 7 | It also includes shared options defined in BaseOptions. 8 | """ 9 | 10 | def initialize(self, parser): 11 | parser = BaseOptions.initialize(self, parser) 12 | # visdom and HTML visualization parameters 13 | parser.add_argument('--display_freq', type=int, default=400, help='frequency of showing training results on screen') 14 | parser.add_argument('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.') 15 | parser.add_argument('--display_id', type=int, default=1, help='window id of the web display') 16 | parser.add_argument('--display_server', type=str, default="http://localhost", help='visdom server of the web display') 17 | parser.add_argument('--display_env', type=str, default='main', help='visdom display environment name (default is "main")') 18 | parser.add_argument('--display_port', type=int, default=8097, help='visdom port of the web display') 19 | parser.add_argument('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html') 20 | parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console') 21 | parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/') 22 | # network saving and loading parameters 23 | parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results') 24 | parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 25 | parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration') 26 | parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') 27 | parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by , +, ...') 28 | parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') 29 | # training parameters 30 | parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate') 31 | parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero') 32 | parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') 33 | parser.add_argument('--lr', type=float, default=0.0005, help='initial learning rate for adam') 34 | parser.add_argument('--gan_mode', type=str, default='vanilla', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.') 35 | parser.add_argument('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images') 36 | parser.add_argument('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]') 37 | parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') 38 | 39 | self.isTrain = True 40 | return parser 41 | -------------------------------------------------------------------------------- /util/html.py: -------------------------------------------------------------------------------- 1 | import dominate 2 | from dominate.tags import meta, h3, table, tr, td, p, a, img, br 3 | import os 4 | 5 | 6 | class HTML: 7 | """This HTML class allows us to save images and write texts into a single HTML file. 8 | 9 | It consists of functions such as (add a text header to the HTML file), 10 | (add a row of images to the HTML file), and (save the HTML to the disk). 11 | It is based on Python library 'dominate', a Python library for creating and manipulating HTML documents using a DOM API. 12 | """ 13 | 14 | def __init__(self, web_dir, title, refresh=0): 15 | """Initialize the HTML classes 16 | 17 | Parameters: 18 | web_dir (str) -- a directory that stores the webpage. HTML file will be created at /index.html; images will be saved at 0: 32 | with self.doc.head: 33 | meta(http_equiv="refresh", content=str(refresh)) 34 | 35 | def get_image_dir(self): 36 | """Return the directory that stores images""" 37 | return self.img_dir 38 | 39 | def add_header(self, text): 40 | """Insert a header to the HTML file 41 | 42 | Parameters: 43 | text (str) -- the header text 44 | """ 45 | with self.doc: 46 | h3(text) 47 | 48 | def add_images(self, ims, txts, links, width=400): 49 | """add images to the HTML file 50 | 51 | Parameters: 52 | ims (str list) -- a list of image paths 53 | txts (str list) -- a list of image names shown on the website 54 | links (str list) -- a list of hyperref links; when you click an image, it will redirect you to a new page 55 | """ 56 | self.t = table(border=1, style="table-layout: fixed;") # Insert a table 57 | self.doc.add(self.t) 58 | with self.t: 59 | with tr(): 60 | for im, txt, link in zip(ims, txts, links): 61 | with td(style="word-wrap: break-word;", halign="center", valign="top"): 62 | with p(): 63 | with a(href=os.path.join('images', link)): 64 | img(style="width:%dpx" % width, src=os.path.join('images', im)) 65 | br() 66 | p(txt) 67 | 68 | def save(self): 69 | """save the current content to the HMTL file""" 70 | html_file = '%s/index.html' % self.web_dir 71 | f = open(html_file, 'wt') 72 | f.write(self.doc.render()) 73 | f.close() 74 | 75 | 76 | if __name__ == '__main__': # we show an example usage here. 77 | html = HTML('web/', 'test_html') 78 | html.add_header('hello world') 79 | 80 | ims, txts, links = [], [], [] 81 | for n in range(4): 82 | ims.append('image_%d.png' % n) 83 | txts.append('text_%d' % n) 84 | links.append('image_%d.png' % n) 85 | html.add_images(ims, txts, links) 86 | html.save() 87 | -------------------------------------------------------------------------------- /util/util.py: -------------------------------------------------------------------------------- 1 | """This module contains simple helper functions """ 2 | from __future__ import print_function 3 | import torch 4 | import numpy as np 5 | from PIL import Image 6 | import os 7 | 8 | 9 | def tensor2im(input_image, imtype=np.uint8): 10 | """"Converts a Tensor array into a numpy image array. 11 | 12 | Parameters: 13 | input_image (tensor) -- the input image tensor array 14 | imtype (type) -- the desired type of the converted numpy array 15 | """ 16 | if not isinstance(input_image, np.ndarray): 17 | if isinstance(input_image, torch.Tensor): # get the data from a variable 18 | image_tensor = input_image.data 19 | else: 20 | return input_image 21 | image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array 22 | if image_numpy.shape[0] == 1: # grayscale to RGB 23 | image_numpy = np.tile(image_numpy, (3, 1, 1)) 24 | image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling 25 | else: # if it is a numpy array, do nothing 26 | image_numpy = input_image 27 | return image_numpy.astype(imtype) 28 | 29 | 30 | def diagnose_network(net, name='network'): 31 | """Calculate and print the mean of average absolute(gradients) 32 | 33 | Parameters: 34 | net (torch network) -- Torch network 35 | name (str) -- the name of the network 36 | """ 37 | mean = 0.0 38 | count = 0 39 | for param in net.parameters(): 40 | if param.grad is not None: 41 | mean += torch.mean(torch.abs(param.grad.data)) 42 | count += 1 43 | if count > 0: 44 | mean = mean / count 45 | print(name) 46 | print(mean) 47 | 48 | 49 | def save_image(image_numpy, image_path, aspect_ratio=1.0): 50 | """Save a numpy image to the disk 51 | 52 | Parameters: 53 | image_numpy (numpy array) -- input numpy array 54 | image_path (str) -- the path of the image 55 | """ 56 | 57 | image_pil = Image.fromarray(image_numpy) 58 | h, w, _ = image_numpy.shape 59 | 60 | if aspect_ratio > 1.0: 61 | image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC) 62 | if aspect_ratio < 1.0: 63 | image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC) 64 | image_pil.save(image_path) 65 | 66 | 67 | def print_numpy(x, val=True, shp=False): 68 | """Print the mean, min, max, median, std, and size of a numpy array 69 | 70 | Parameters: 71 | val (bool) -- if print the values of the numpy array 72 | shp (bool) -- if print the shape of the numpy array 73 | """ 74 | x = x.astype(np.float64) 75 | if shp: 76 | print('shape,', x.shape) 77 | if val: 78 | x = x.flatten() 79 | print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % ( 80 | np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x))) 81 | 82 | 83 | def mkdirs(paths): 84 | """create empty directories if they don't exist 85 | 86 | Parameters: 87 | paths (str list) -- a list of directory paths 88 | """ 89 | if isinstance(paths, list) and not isinstance(paths, str): 90 | for path in paths: 91 | mkdir(path) 92 | else: 93 | mkdir(paths) 94 | 95 | 96 | def mkdir(path): 97 | """create a single empty directory if it didn't exist 98 | 99 | Parameters: 100 | path (str) -- a single directory path 101 | """ 102 | if not os.path.exists(path): 103 | os.makedirs(path) 104 | -------------------------------------------------------------------------------- /datasets/face_parse.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Sat Nov 17 2019 5 | @author: jingyu 6 | """ 7 | #basic code is taken from https://github.com/Shuvrajit9904/PairedCycleGAN-tf 8 | 9 | from imutils import face_utils 10 | import numpy as np 11 | import argparse 12 | import imutils 13 | import dlib 14 | import cv2 15 | from collections import OrderedDict 16 | import matplotlib.pyplot as plt 17 | import os 18 | import os.path as osp 19 | import re 20 | from PIL import Image 21 | 22 | 23 | mouth_idx = np.arange(48, 68) 24 | right_eyebrow_idx = np.arange(17, 22) 25 | left_eyebrow_idx = np.arange(22, 27) 26 | right_eye_idx = np.arange(36,42) 27 | left_eye_idx = np.arange(42, 48) 28 | nose_idx = np.arange(27, 35) 29 | 30 | 31 | FACIAL_LANDMARKS_IDXS = OrderedDict([ 32 | ("mouth", mouth_idx), 33 | ("right_eye_eyebrow", np.append(right_eyebrow_idx, right_eye_idx)), 34 | ("left_eye_eyebrow", np.append(left_eyebrow_idx, left_eye_idx)), 35 | ("nose", nose_idx), 36 | ]) 37 | 38 | shape_pred = './dataset/shape_predictor_68_face_landmarks.dat' 39 | 40 | detector = dlib.get_frontal_face_detector() 41 | predictor = dlib.shape_predictor(shape_pred) 42 | 43 | 44 | def parse_save(image, file,rects, predictor, FACIAL_LANDMARKS_IDXS, output_dir): 45 | 46 | p = re.compile('(.*).png') 47 | out_file_init = p.match(file).group(1) 48 | 49 | for (i, rect) in enumerate(rects): 50 | 51 | shape = predictor(gray, rect) 52 | shape = face_utils.shape_to_np(shape) 53 | 54 | for (name, idx_arr) in FACIAL_LANDMARKS_IDXS.items(): 55 | 56 | clone = image.copy() 57 | 58 | (x,y),radius = cv2.minEnclosingCircle(np.array([shape[idx_arr]])) 59 | center = (int(x),int(y)) 60 | radius = int(radius) + 12 61 | 62 | mask = np.zeros(clone.shape, dtype=np.uint8) 63 | mask = cv2.circle(mask, center, radius, (255, 255, 255), -1, 8, 0) 64 | 65 | result_array = clone & mask 66 | y_min = max(0, center[1] - radius) 67 | x_min = max(0, center[0] - radius) 68 | result_array = result_array[y_min:center[1] + radius, 69 | x_min:center[0] + radius, :] 70 | 71 | out_file_name = output_dir + out_file_init + '_'+ name + '.png' 72 | if name == 'right_eye_eyebrow': 73 | cv2.imwrite(out_file_name, cv2.flip( result_array, 1 )) 74 | 75 | else: 76 | cv2.imwrite(out_file_name, result_array) 77 | # output = face_utils.visualize_facial_landmarks(image, shape) 78 | # cv2.imshow("Image", output) 79 | 80 | 81 | input_dir = './dataset/A/test' 82 | output_dir = './dataset/A_parse/test/test' 83 | 84 | splits = os.listdir(input_dir) 85 | for sp in splits: 86 | img_fold = os.path.join(input_dir, sp) 87 | img_list = os.listdir(img_fold) 88 | num_imgs = len(img_list) 89 | print('split = %s, use %d/%d images' % (sp, num_imgs, len(img_list))) 90 | for n in range(num_imgs): 91 | file = img_list[n] 92 | path = os.path.join(img_fold, file) 93 | if os.path.isfile(path): 94 | image = cv2.imread(path,1) 95 | # image = np.array(image) 96 | image = imutils.resize(image, width=512) 97 | gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 98 | 99 | rects = detector(gray, 1) 100 | 101 | print(file) 102 | parse_save(image, file, rects, predictor, FACIAL_LANDMARKS_IDXS, output_dir) 103 | 104 | 105 | 106 | 107 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | 2 | import time 3 | from options.train_options import TrainOptions 4 | from data import create_dataset 5 | from models import create_model 6 | from util.visualizer import Visualizer 7 | 8 | if __name__ == '__main__': 9 | opt = TrainOptions().parse() # get training options 10 | dataset = create_dataset(opt) # create a dataset given opt.dataset_mode and other options 11 | dataset_size = len(dataset) # get the number of images in the dataset. 12 | print('The number of training images = %d' % dataset_size) 13 | 14 | model = create_model(opt) # create a model given opt.model and other options 15 | model.setup(opt) # regular setup: load and print networks; create schedulers 16 | visualizer = Visualizer(opt) # create a visualizer that display/save images and plots 17 | total_iters = 0 # the total number of training iterations 18 | 19 | for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1): # outer loop for different epochs; we save the model by , + 20 | epoch_start_time = time.time() # timer for entire epoch 21 | iter_data_time = time.time() # timer for data loading per iteration 22 | epoch_iter = 0 # the number of training iterations in current epoch, reset to 0 every epoch 23 | 24 | for i, data in enumerate(dataset): # inner loop within one epoch 25 | iter_start_time = time.time() # timer for computation per iteration 26 | if total_iters % opt.print_freq == 0: 27 | t_data = iter_start_time - iter_data_time 28 | visualizer.reset() 29 | total_iters += opt.batch_size 30 | epoch_iter += opt.batch_size 31 | model.set_input(data) # unpack data from dataset and apply preprocessing 32 | model.optimize_parameters() # calculate loss functions, get gradients, update network weights 33 | 34 | if total_iters % opt.display_freq == 0: # display images on visdom and save images to a HTML file 35 | save_result = total_iters % opt.update_html_freq == 0 36 | model.compute_visuals() 37 | visualizer.display_current_results(model.get_current_visuals(), epoch, save_result) 38 | 39 | if total_iters % opt.print_freq == 0: # print training losses and save logging information to the disk 40 | losses = model.get_current_losses() 41 | t_comp = (time.time() - iter_start_time) / opt.batch_size 42 | visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data) 43 | if opt.display_id > 0: 44 | visualizer.plot_current_losses(epoch, float(epoch_iter) / dataset_size, losses) 45 | 46 | if total_iters % opt.save_latest_freq == 0: # cache our latest model every iterations 47 | print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters)) 48 | save_suffix = 'iter_%d' % total_iters if opt.save_by_iter else 'latest' 49 | model.save_networks(save_suffix) 50 | 51 | iter_data_time = time.time() 52 | if epoch % opt.save_epoch_freq == 0: # cache our model every epochs 53 | print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters)) 54 | model.save_networks('latest') 55 | model.save_networks(epoch) 56 | 57 | print('End of epoch %d / %d \t Time Taken: %d sec' % (epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time)) 58 | model.update_learning_rate() # update learning rates at the end of every epoch. 59 | -------------------------------------------------------------------------------- /data/template_dataset.py: -------------------------------------------------------------------------------- 1 | """Dataset class template 2 | 3 | This module provides a template for users to implement custom datasets. 4 | You can specify '--dataset_mode template' to use this dataset. 5 | The class name should be consistent with both the filename and its dataset_mode option. 6 | The filename should be _dataset.py 7 | The class name should be Dataset.py 8 | You need to implement the following functions: 9 | -- : Add dataset-specific options and rewrite default values for existing options. 10 | -- <__init__>: Initialize this dataset class. 11 | -- <__getitem__>: Return a data point and its metadata information. 12 | -- <__len__>: Return the number of images. 13 | """ 14 | from data.base_dataset import BaseDataset, get_transform 15 | # from data.image_folder import make_dataset 16 | # from PIL import Image 17 | 18 | 19 | class TemplateDataset(BaseDataset): 20 | """A template dataset class for you to implement custom datasets.""" 21 | @staticmethod 22 | def modify_commandline_options(parser, is_train): 23 | """Add new dataset-specific options, and rewrite default values for existing options. 24 | 25 | Parameters: 26 | parser -- original option parser 27 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. 28 | 29 | Returns: 30 | the modified parser. 31 | """ 32 | parser.add_argument('--new_dataset_option', type=float, default=1.0, help='new dataset option') 33 | parser.set_defaults(max_dataset_size=10, new_dataset_option=2.0) # specify dataset-specific default values 34 | return parser 35 | 36 | def __init__(self, opt): 37 | """Initialize this dataset class. 38 | 39 | Parameters: 40 | opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions 41 | 42 | A few things can be done here. 43 | - save the options (have been done in BaseDataset) 44 | - get image paths and meta information of the dataset. 45 | - define the image transformation. 46 | """ 47 | # save the option and dataset root 48 | BaseDataset.__init__(self, opt) 49 | # get the image paths of your dataset; 50 | self.image_paths = [] # You can call sorted(make_dataset(self.root, opt.max_dataset_size)) to get all the image paths under the directory self.root 51 | # define the default transform function. You can use ; You can also define your custom transform function 52 | self.transform = get_transform(opt) 53 | 54 | def __getitem__(self, index): 55 | """Return a data point and its metadata information. 56 | 57 | Parameters: 58 | index -- a random integer for data indexing 59 | 60 | Returns: 61 | a dictionary of data with their names. It usually contains the data itself and its metadata information. 62 | 63 | Step 1: get a random image path: e.g., path = self.image_paths[index] 64 | Step 2: load your data from the disk: e.g., image = Image.open(path).convert('RGB'). 65 | Step 3: convert your data to a PyTorch tensor. You can use helpder functions such as self.transform. e.g., data = self.transform(image) 66 | Step 4: return a data point as a dictionary. 67 | """ 68 | path = 'temp' # needs to be a string 69 | data_A = None # needs to be a tensor 70 | data_B = None # needs to be a tensor 71 | return {'data_A': data_A, 'data_B': data_B, 'path': path} 72 | 73 | def __len__(self): 74 | """Return the total number of images.""" 75 | return len(self.image_paths) 76 | -------------------------------------------------------------------------------- /data/__init__.py: -------------------------------------------------------------------------------- 1 | """This package includes all the modules related to data loading and preprocessing 2 | 3 | To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset. 4 | You need to implement four functions: 5 | -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt). 6 | -- <__len__>: return the size of dataset. 7 | -- <__getitem__>: get a data point from data loader. 8 | -- : (optionally) add dataset-specific options and set default options. 9 | 10 | Now you can use the dataset class by specifying flag '--dataset_mode dummy'. 11 | See our template dataset class 'template_dataset.py' for more details. 12 | """ 13 | import importlib 14 | import torch.utils.data 15 | from data.base_dataset import BaseDataset 16 | 17 | 18 | def find_dataset_using_name(dataset_name): 19 | """Import the module "data/[dataset_name]_dataset.py". 20 | 21 | In the file, the class called DatasetNameDataset() will 22 | be instantiated. It has to be a subclass of BaseDataset, 23 | and it is case-insensitive. 24 | """ 25 | dataset_filename = "data." + dataset_name + "_dataset" 26 | datasetlib = importlib.import_module(dataset_filename) 27 | 28 | dataset = None 29 | target_dataset_name = dataset_name.replace('_', '') + 'dataset' 30 | for name, cls in datasetlib.__dict__.items(): 31 | if name.lower() == target_dataset_name.lower() \ 32 | and issubclass(cls, BaseDataset): 33 | dataset = cls 34 | 35 | if dataset is None: 36 | raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name)) 37 | 38 | return dataset 39 | 40 | 41 | def get_option_setter(dataset_name): 42 | """Return the static method of the dataset class.""" 43 | dataset_class = find_dataset_using_name(dataset_name) 44 | return dataset_class.modify_commandline_options 45 | 46 | 47 | def create_dataset(opt): 48 | """Create a dataset given the option. 49 | 50 | This function wraps the class CustomDatasetDataLoader. 51 | This is the main interface between this package and 'train.py'/'test.py' 52 | 53 | Example: 54 | >>> from data import create_dataset 55 | >>> dataset = create_dataset(opt) 56 | """ 57 | data_loader = CustomDatasetDataLoader(opt) 58 | dataset = data_loader.load_data() 59 | return dataset 60 | 61 | 62 | class CustomDatasetDataLoader(): 63 | """Wrapper class of Dataset class that performs multi-threaded data loading""" 64 | 65 | def __init__(self, opt): 66 | """Initialize this class 67 | 68 | Step 1: create a dataset instance given the name [dataset_mode] 69 | Step 2: create a multi-threaded data loader. 70 | """ 71 | self.opt = opt 72 | dataset_class = find_dataset_using_name(opt.dataset_mode) 73 | self.dataset = dataset_class(opt) 74 | print("dataset [%s] was created" % type(self.dataset).__name__) 75 | self.dataloader = torch.utils.data.DataLoader( 76 | self.dataset, 77 | batch_size=opt.batch_size, 78 | shuffle=not opt.serial_batches, 79 | num_workers=int(opt.num_threads)) 80 | 81 | def load_data(self): 82 | return self 83 | 84 | def __len__(self): 85 | """Return the number of data in the dataset""" 86 | return min(len(self.dataset), self.opt.max_dataset_size) 87 | 88 | def __iter__(self): 89 | """Return a batch of data""" 90 | for i, data in enumerate(self.dataloader): 91 | if i * self.opt.batch_size >= self.opt.max_dataset_size: 92 | break 93 | yield data 94 | -------------------------------------------------------------------------------- /util/get_data.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import os 3 | import tarfile 4 | import requests 5 | from warnings import warn 6 | from zipfile import ZipFile 7 | from bs4 import BeautifulSoup 8 | from os.path import abspath, isdir, join, basename 9 | 10 | 11 | class GetData(object): 12 | """A Python script for downloading CycleGAN or pix2pix datasets. 13 | 14 | Parameters: 15 | technique (str) -- One of: 'cyclegan' or 'pix2pix'. 16 | verbose (bool) -- If True, print additional information. 17 | 18 | Examples: 19 | >>> from util.get_data import GetData 20 | >>> gd = GetData(technique='cyclegan') 21 | >>> new_data_path = gd.get(save_path='./datasets') # options will be displayed. 22 | 23 | Alternatively, You can use bash scripts: 'scripts/download_pix2pix_model.sh' 24 | and 'scripts/download_cyclegan_model.sh'. 25 | """ 26 | 27 | def __init__(self, technique='cyclegan', verbose=True): 28 | url_dict = { 29 | 'pix2pix': 'http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/', 30 | 'cyclegan': 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets' 31 | } 32 | self.url = url_dict.get(technique.lower()) 33 | self._verbose = verbose 34 | 35 | def _print(self, text): 36 | if self._verbose: 37 | print(text) 38 | 39 | @staticmethod 40 | def _get_options(r): 41 | soup = BeautifulSoup(r.text, 'lxml') 42 | options = [h.text for h in soup.find_all('a', href=True) 43 | if h.text.endswith(('.zip', 'tar.gz'))] 44 | return options 45 | 46 | def _present_options(self): 47 | r = requests.get(self.url) 48 | options = self._get_options(r) 49 | print('Options:\n') 50 | for i, o in enumerate(options): 51 | print("{0}: {1}".format(i, o)) 52 | choice = input("\nPlease enter the number of the " 53 | "dataset above you wish to download:") 54 | return options[int(choice)] 55 | 56 | def _download_data(self, dataset_url, save_path): 57 | if not isdir(save_path): 58 | os.makedirs(save_path) 59 | 60 | base = basename(dataset_url) 61 | temp_save_path = join(save_path, base) 62 | 63 | with open(temp_save_path, "wb") as f: 64 | r = requests.get(dataset_url) 65 | f.write(r.content) 66 | 67 | if base.endswith('.tar.gz'): 68 | obj = tarfile.open(temp_save_path) 69 | elif base.endswith('.zip'): 70 | obj = ZipFile(temp_save_path, 'r') 71 | else: 72 | raise ValueError("Unknown File Type: {0}.".format(base)) 73 | 74 | self._print("Unpacking Data...") 75 | obj.extractall(save_path) 76 | obj.close() 77 | os.remove(temp_save_path) 78 | 79 | def get(self, save_path, dataset=None): 80 | """ 81 | 82 | Download a dataset. 83 | 84 | Parameters: 85 | save_path (str) -- A directory to save the data to. 86 | dataset (str) -- (optional). A specific dataset to download. 87 | Note: this must include the file extension. 88 | If None, options will be presented for you 89 | to choose from. 90 | 91 | Returns: 92 | save_path_full (str) -- the absolute path to the downloaded data. 93 | 94 | """ 95 | if dataset is None: 96 | selected_dataset = self._present_options() 97 | else: 98 | selected_dataset = dataset 99 | 100 | save_path_full = join(save_path, selected_dataset.split('.')[0]) 101 | 102 | if isdir(save_path_full): 103 | warn("\n'{0}' already exists. Voiding Download.".format( 104 | save_path_full)) 105 | else: 106 | self._print('Downloading Data...') 107 | url = "{0}/{1}".format(self.url, selected_dataset) 108 | self._download_data(url, save_path=save_path) 109 | 110 | return abspath(save_path_full) 111 | -------------------------------------------------------------------------------- /data/base_dataset.py: -------------------------------------------------------------------------------- 1 | """This module implements an abstract base class (ABC) 'BaseDataset' for datasets. 2 | 3 | It also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses. 4 | """ 5 | import random 6 | import numpy as np 7 | import torch.utils.data as data 8 | from PIL import Image 9 | import torchvision.transforms as transforms 10 | from abc import ABC, abstractmethod 11 | 12 | 13 | class BaseDataset(data.Dataset, ABC): 14 | """This class is an abstract base class (ABC) for datasets. 15 | 16 | To create a subclass, you need to implement the following four functions: 17 | -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt). 18 | -- <__len__>: return the size of dataset. 19 | -- <__getitem__>: get a data point. 20 | -- : (optionally) add dataset-specific options and set default options. 21 | """ 22 | 23 | def __init__(self, opt): 24 | """Initialize the class; save the options in the class 25 | 26 | Parameters: 27 | opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions 28 | """ 29 | self.opt = opt 30 | self.root = opt.dataroot 31 | 32 | @staticmethod 33 | def modify_commandline_options(parser, is_train): 34 | """Add new dataset-specific options, and rewrite default values for existing options. 35 | 36 | Parameters: 37 | parser -- original option parser 38 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. 39 | 40 | Returns: 41 | the modified parser. 42 | """ 43 | return parser 44 | 45 | @abstractmethod 46 | def __len__(self): 47 | """Return the total number of images in the dataset.""" 48 | return 0 49 | 50 | @abstractmethod 51 | def __getitem__(self, index): 52 | """Return a data point and its metadata information. 53 | 54 | Parameters: 55 | index - - a random integer for data indexing 56 | 57 | Returns: 58 | a dictionary of data with their names. It ususally contains the data itself and its metadata information. 59 | """ 60 | pass 61 | 62 | 63 | def get_params(opt, size): 64 | w, h = size 65 | new_h = h 66 | new_w = w 67 | if opt.preprocess == 'resize_and_crop': 68 | new_h = new_w = opt.load_size 69 | elif opt.preprocess == 'scale_width_and_crop': 70 | new_w = opt.load_size 71 | new_h = opt.load_size * h // w 72 | 73 | x = random.randint(0, np.maximum(0, new_w - opt.crop_size)) 74 | y = random.randint(0, np.maximum(0, new_h - opt.crop_size)) 75 | 76 | flip = random.random() > 0.5 77 | 78 | return {'crop_pos': (x, y), 'flip': flip} 79 | 80 | 81 | def get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True): 82 | transform_list = [] 83 | if grayscale: 84 | transform_list.append(transforms.Grayscale(1)) 85 | if 'resize' in opt.preprocess: 86 | osize = [opt.load_size, opt.load_size] 87 | transform_list.append(transforms.Resize(osize, method)) 88 | elif 'scale_width' in opt.preprocess: 89 | transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, method))) 90 | 91 | if 'crop' in opt.preprocess: 92 | if params is None: 93 | transform_list.append(transforms.RandomCrop(opt.crop_size)) 94 | else: 95 | transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size))) 96 | 97 | if opt.preprocess == 'none': 98 | transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method))) 99 | 100 | if not opt.no_flip: 101 | if params is None: 102 | transform_list.append(transforms.RandomHorizontalFlip()) 103 | elif params['flip']: 104 | transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip']))) 105 | 106 | if convert: 107 | transform_list += [transforms.ToTensor()] 108 | if grayscale: 109 | transform_list += [transforms.Normalize((0.5,), (0.5,))] 110 | else: 111 | transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] 112 | return transforms.Compose(transform_list) 113 | 114 | 115 | def __make_power_2(img, base, method=Image.BICUBIC): 116 | ow, oh = img.size 117 | h = int(round(oh / base) * base) 118 | w = int(round(ow / base) * base) 119 | if (h == oh) and (w == ow): 120 | return img 121 | 122 | __print_size_warning(ow, oh, w, h) 123 | return img.resize((w, h), method) 124 | 125 | 126 | def __scale_width(img, target_width, method=Image.BICUBIC): 127 | ow, oh = img.size 128 | if (ow == target_width): 129 | return img 130 | w = target_width 131 | h = int(target_width * oh / ow) 132 | return img.resize((w, h), method) 133 | 134 | 135 | def __crop(img, pos, size): 136 | ow, oh = img.size 137 | x1, y1 = pos 138 | tw = th = size 139 | if (ow > tw or oh > th): 140 | return img.crop((x1, y1, x1 + tw, y1 + th)) 141 | return img 142 | 143 | 144 | def __flip(img, flip): 145 | if flip: 146 | return img.transpose(Image.FLIP_LEFT_RIGHT) 147 | return img 148 | 149 | 150 | def __print_size_warning(ow, oh, w, h): 151 | """Print warning information about image size(only print once)""" 152 | if not hasattr(__print_size_warning, 'has_printed'): 153 | print("The image size needs to be a multiple of 4. " 154 | "The loaded image size was (%d, %d), so it was adjusted to " 155 | "(%d, %d). This adjustment will be done to all images " 156 | "whose sizes are not multiples of 4" % (ow, oh, w, h)) 157 | __print_size_warning.has_printed = True 158 | -------------------------------------------------------------------------------- /options/base_options.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | from util import util 4 | import torch 5 | import models 6 | import data 7 | 8 | 9 | class BaseOptions(): 10 | """This class defines options used during both training and test time. 11 | 12 | It also implements several helper functions such as parsing, printing, and saving the options. 13 | It also gathers additional options defined in functions in both dataset class and model class. 14 | """ 15 | 16 | def __init__(self): 17 | """Reset the class; indicates the class hasn't been initailized""" 18 | self.initialized = False 19 | 20 | def initialize(self, parser): 21 | """Define the common options that are used in both training and test.""" 22 | # basic parameters 23 | parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)') 24 | parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models') 25 | parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 26 | parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here') 27 | # model parameters 28 | parser.add_argument('--model', type=str, default='paired_cycle_gan', help='chooses which model to use.') 29 | parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale') 30 | parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale') 31 | parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in the last conv layer') 32 | parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer') 33 | parser.add_argument('--netD', type=str, default='n_layers', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator') 34 | parser.add_argument('--netG', type=str, default='resnet_3blocks', help='specify generator architecture ') 35 | parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers') 36 | parser.add_argument('--norm', type=str, default='batch', help='instance normalization or batch normalization [instance | batch | none]') 37 | parser.add_argument('--init_type', type=str, default='normal', help='network initialization ') 38 | parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal.') 39 | parser.add_argument('--no_dropout', action='store_false', help='no dropout for the generator') 40 | # dataset parameters 41 | parser.add_argument('--dataset_mode', type=str, default='aligned', help='chooses how datasets are loaded. ') 42 | parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA') 43 | parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly') 44 | parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data') 45 | parser.add_argument('--batch_size', type=int, default=1, help='input batch size') 46 | parser.add_argument('--load_size', type=int, default=512, help='scale images to this size') 47 | parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size') 48 | parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') 49 | parser.add_argument('--preprocess', type=str, default='none', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]') 50 | parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation') 51 | parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML') 52 | # additional parameters 53 | parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') 54 | parser.add_argument('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]') 55 | parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information') 56 | parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}') 57 | self.initialized = True 58 | return parser 59 | 60 | def gather_options(self): 61 | """Initialize our parser with basic options(only once). 62 | Add additional model-specific and dataset-specific options. 63 | These options are defined in the function 64 | in model and dataset classes. 65 | """ 66 | if not self.initialized: # check if it has been initialized 67 | parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) 68 | parser = self.initialize(parser) 69 | 70 | # get the basic options 71 | opt, _ = parser.parse_known_args() 72 | 73 | # modify model-related parser options 74 | model_name = opt.model 75 | model_option_setter = models.get_option_setter(model_name) 76 | parser = model_option_setter(parser, self.isTrain) 77 | opt, _ = parser.parse_known_args() # parse again with new defaults 78 | 79 | # modify dataset-related parser options 80 | dataset_name = opt.dataset_mode 81 | dataset_option_setter = data.get_option_setter(dataset_name) 82 | parser = dataset_option_setter(parser, self.isTrain) 83 | 84 | # save and return the parser 85 | self.parser = parser 86 | return parser.parse_args() 87 | 88 | def print_options(self, opt): 89 | """Print and save options 90 | 91 | It will print both current options and default values(if different). 92 | It will save options into a text file / [checkpoints_dir] / opt.txt 93 | """ 94 | message = '' 95 | message += '----------------- Options ---------------\n' 96 | for k, v in sorted(vars(opt).items()): 97 | comment = '' 98 | default = self.parser.get_default(k) 99 | if v != default: 100 | comment = '\t[default: %s]' % str(default) 101 | message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) 102 | message += '----------------- End -------------------' 103 | print(message) 104 | 105 | # save to the disk 106 | expr_dir = os.path.join(opt.checkpoints_dir, opt.name) 107 | util.mkdirs(expr_dir) 108 | file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase)) 109 | with open(file_name, 'wt') as opt_file: 110 | opt_file.write(message) 111 | opt_file.write('\n') 112 | 113 | def parse(self): 114 | """Parse our options, create checkpoints directory suffix, and set up gpu device.""" 115 | opt = self.gather_options() 116 | opt.isTrain = self.isTrain # train or test 117 | 118 | # process opt.suffix 119 | if opt.suffix: 120 | suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else '' 121 | opt.name = opt.name + suffix 122 | 123 | self.print_options(opt) 124 | 125 | # set gpu ids 126 | str_ids = opt.gpu_ids.split(',') 127 | opt.gpu_ids = [] 128 | for str_id in str_ids: 129 | id = int(str_id) 130 | if id >= 0: 131 | opt.gpu_ids.append(id) 132 | if len(opt.gpu_ids) > 0: 133 | torch.cuda.set_device(opt.gpu_ids[0]) 134 | 135 | self.opt = opt 136 | return self.opt 137 | -------------------------------------------------------------------------------- /models/paired_cycle_gan_eye_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Sat Nov 17 2019 5 | @author: jingyu 6 | """ 7 | 8 | # Basic Code is taken from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix 9 | 10 | 11 | import torch 12 | import itertools 13 | from util.image_pool import ImagePool 14 | from .base_model import BaseModel 15 | from . import networks 16 | from util.util import tensor2im 17 | 18 | 19 | class PairedCycleGANEyeModel(BaseModel): 20 | """ 21 | This class implements the PairedCycleGAN model, for learning image-to-image translation with paired data. 22 | Especially for eyes training. 23 | 24 | The model training requires '--dataset_mode aligned' dataset. 25 | By default, it uses a '--netG resnet_3blocks' DilatedResNet generator, 26 | a '--netD basic' discriminator (PatchGAN introduced by pix2pix), 27 | 28 | """ 29 | @staticmethod 30 | def modify_commandline_options(parser, is_train=True): 31 | """Add new dataset-specific options, and rewrite default values for existing options. 32 | 33 | Parameters: 34 | parser -- original option parser 35 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. 36 | 37 | Returns: 38 | the modified parser. 39 | 40 | For PairedCycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B for the following losses. 41 | A (source domain), B (target domain). 42 | Generators: G_A: A -> B; G_B: B -> A. 43 | Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A. 44 | Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| 45 | Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| 46 | Dropout is used in the original PairedCycleGAN paper. 47 | """ 48 | parser.set_defaults(no_dropout=False, dataset_mode='aligned') # default CycleGAN did not use dropout 49 | if is_train: 50 | parser.add_argument('--lambda_A', type=float, default=0.2, help='weight for GAN loss (A -> B -> A)') 51 | parser.add_argument('--lambda_B', type=float, default=0.2, help='weight for GAN loss (B -> A -> B)') 52 | parser.add_argument('--lambda_idt', type=float, default=1, help='weight for loss of identity loss')# identiy loss is 1 in the original paper 53 | 54 | return parser 55 | 56 | def __init__(self, opt): 57 | """Initialize the PairedCycleGAN class. 58 | 59 | Parameters: 60 | opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions 61 | """ 62 | BaseModel.__init__(self, opt) 63 | # specify the training losses you want to print out. The training/test scripts will call 64 | self.loss_names = ['D_A', 'G_A', 'idt', 'D_B', 'G_B', 'style'] 65 | # specify the images you want to save/display. The training/test scripts will call 66 | visual_names_A = ['real_A', 'fake_B', 'rec_A'] 67 | visual_names_B = ['real_B', 'fake_A', 'rec_B'] 68 | # if self.isTrain and self.opt.lambda_identity > 0.0: # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B) 69 | # visual_names_A.append('idt_B') 70 | # visual_names_B.append('idt_A') 71 | 72 | self.visual_names = visual_names_A + visual_names_B # combine visualizations for A and B 73 | # specify the models you want to save to the disk. The training/test scripts will call and . 74 | if self.isTrain: 75 | self.model_names = ['G_A', 'G_B', 'D_A', 'D_B'] 76 | else: # during test time, only load Gs 77 | self.model_names = ['G_A', 'G_B'] 78 | 79 | # define networks (both Generators and discriminators) 80 | # The naming is different from those used in the paper. 81 | # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X) 82 | self.netG_A = networks.define_G(opt.input_nc*2, opt.output_nc, opt.ngf, opt.netG, opt.norm, 83 | not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids) 84 | self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm, 85 | not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids) 86 | 87 | if self.isTrain: # define discriminators 88 | self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD, 89 | opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids) 90 | self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD, 91 | opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids) 92 | 93 | if self.isTrain: 94 | if opt.lambda_idt > 0.0: # only works when input and output images have the same number of channels 95 | assert(opt.input_nc == opt.output_nc) 96 | self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images 97 | self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images 98 | # define loss functions 99 | self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) # define GAN loss. 100 | self.criterionCycle = torch.nn.L1Loss() 101 | self.criterionIdt = torch.nn.L1Loss() 102 | # initialize optimizers; schedulers will be automatically created by function . 103 | self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999)) 104 | self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999)) 105 | self.optimizers.append(self.optimizer_G) 106 | self.optimizers.append(self.optimizer_D) 107 | 108 | def set_input(self, input): 109 | """Unpack input data from the dataloader and perform necessary pre-processing steps. 110 | 111 | Parameters: 112 | input (dict): include the data itself and its metadata information. 113 | 114 | The option 'direction' can be used to swap domain A and domain B. 115 | """ 116 | AtoB = self.opt.direction == 'AtoB' 117 | self.real_A = input['A' if AtoB else 'B'].to(self.device) 118 | self.real_B = input['B' if AtoB else 'A'].to(self.device) 119 | self.image_paths = input['A_paths' if AtoB else 'B_paths'] 120 | 121 | 122 | def forward(self): 123 | """Run forward pass; called by both functions and .""" 124 | self.real_AB = torch.cat((self.real_A, self.real_B), 1) 125 | self.fake_B = self.netG_A(self.real_AB) # G_A(A,B) 126 | self.rec_A = self.netG_B(self.fake_B) # G_B(G_A(A,B)) 127 | self.fake_A = self.netG_B(self.real_B) # G_B(B) 128 | self.fake_AB = torch.cat((self.fake_A, self.fake_B),1) #G_B(B),G_A(A,B) 129 | self.rec_B = self.netG_A(self.fake_AB) # G_A(G_B(B), G_A(A,B)) 130 | 131 | def backward_D_basic(self, netD, real, fake): 132 | """Calculate GAN loss for the discriminator 133 | 134 | Parameters: 135 | netD (network) -- the discriminator D 136 | real (tensor array) -- real images 137 | fake (tensor array) -- images generated by a generator 138 | 139 | Return the discriminator loss. 140 | We also call loss_D.backward() to calculate the gradients. 141 | Discriminator's losses are not changed from the original CycleGan model 142 | """ 143 | # Real 144 | pred_real = netD(real) 145 | loss_D_real = self.criterionGAN(pred_real, True) 146 | # Fake 147 | pred_fake = netD(fake.detach()) 148 | loss_D_fake = self.criterionGAN(pred_fake, False) 149 | # Combined loss and calculate gradients 150 | loss_D = (loss_D_real + loss_D_fake) * 0.5 151 | loss_D.backward() 152 | return loss_D 153 | 154 | def backward_D_A(self): 155 | """Calculate GAN loss for discriminator D_A""" 156 | fake_B = self.fake_B_pool.query(self.fake_B) 157 | self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B) 158 | 159 | def backward_D_B(self): 160 | """Calculate GAN loss for discriminator D_B""" 161 | fake_A = self.fake_A_pool.query(self.fake_A) 162 | self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A) 163 | 164 | def backward_G(self): 165 | """Calculate the loss for generators G_A and G_B""" 166 | lambda_idt = self.opt.lambda_idt 167 | lambda_A = self.opt.lambda_A 168 | lambda_B = self.opt.lambda_B 169 | 170 | 171 | # GAN loss D_A(G_A(A,B)) 172 | self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)*lambda_A #Adeversarial loss for G in paper PairedCycleGAN 173 | # GAN loss D_B(G_B(B)) 174 | self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)*lambda_B #Adeversarial loss for F in paper PairedCycleGAN 175 | # Identity loss : || G_B(G_A(A,B)) - A|| 176 | self.loss_idt = self.criterionCycle(self.rec_A, self.real_A)*lambda_idt 177 | # Style loss : || G_A(G_B(B),G_A(A,B)) - B|| 178 | self.loss_style = self.criterionIdt(self.rec_B, self.real_B) 179 | 180 | # combined loss and calculate gradients 181 | self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_idt + self.loss_style 182 | self.loss_G.backward() 183 | 184 | def optimize_parameters(self): 185 | """Calculate losses, gradients, and update network weights; called in every training iteration""" 186 | # forward 187 | self.forward() # compute fake images and reconstruction images. 188 | # G_A and G_B 189 | self.set_requires_grad([self.netD_A, self.netD_B], False) # Ds require no gradients when optimizing Gs 190 | self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero 191 | self.backward_G() # calculate gradients for G_A and G_B 192 | self.optimizer_G.step() # update G_A and G_B's weights 193 | # D_A and D_B 194 | self.set_requires_grad([self.netD_A, self.netD_B], True) 195 | self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero 196 | self.backward_D_A() # calculate gradients for D_A 197 | self.backward_D_B() # calculate graidents for D_B 198 | self.optimizer_D.step() # update D_A and D_B's weights 199 | -------------------------------------------------------------------------------- /models/paired_cycle_gan_mouth_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Sat Nov 17 2019 5 | @author: jingyu 6 | """ 7 | 8 | # Basic Code is taken from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix 9 | 10 | 11 | import torch 12 | import itertools 13 | from util.image_pool import ImagePool 14 | from .base_model import BaseModel 15 | from . import networks 16 | from util.util import tensor2im 17 | 18 | 19 | class PairedCycleGANEyeModel(BaseModel): 20 | """ 21 | This class implements the PairedCycleGAN model, for learning image-to-image translation with paired data. 22 | Especially for mouth training. 23 | 24 | The model training requires '--dataset_mode aligned' dataset. 25 | By default, it uses a '--netG resnet_3blocks' DilatedResNet generator, 26 | a '--netD basic' discriminator (PatchGAN introduced by pix2pix), 27 | 28 | """ 29 | @staticmethod 30 | def modify_commandline_options(parser, is_train=True): 31 | """Add new dataset-specific options, and rewrite default values for existing options. 32 | 33 | Parameters: 34 | parser -- original option parser 35 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. 36 | 37 | Returns: 38 | the modified parser. 39 | 40 | For PairedCycleGAN, in addition to GAN losses, we introduce lambda_A, lambda_B for the following losses. 41 | A (source domain), B (target domain). 42 | Generators: G_A: A -> B; G_B: B -> A. 43 | Discriminators: D_A: G_A(A) vs. B; D_B: G_B(B) vs. A. 44 | Forward cycle loss: lambda_A * ||G_B(G_A(A)) - A|| 45 | Backward cycle loss: lambda_B * ||G_A(G_B(B)) - B|| 46 | Dropout is used in the original PairedCycleGAN paper. 47 | """ 48 | parser.set_defaults(no_dropout=False, dataset_mode='aligned') # default CycleGAN did not use dropout 49 | if is_train: 50 | parser.add_argument('--lambda_A', type=float, default=0.2, help='weight for GAN loss (A -> B -> A)') 51 | parser.add_argument('--lambda_B', type=float, default=0.2, help='weight for GAN loss (B -> A -> B)') 52 | parser.add_argument('--lambda_idt', type=float, default=1, help='weight for loss of identity loss')# identiy loss is 1 in the original paper 53 | 54 | return parser 55 | 56 | def __init__(self, opt): 57 | """Initialize the PairedCycleGAN class. 58 | 59 | Parameters: 60 | opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions 61 | """ 62 | BaseModel.__init__(self, opt) 63 | # specify the training losses you want to print out. The training/test scripts will call 64 | self.loss_names = ['D_A', 'G_A', 'idt', 'D_B', 'G_B', 'style'] 65 | # specify the images you want to save/display. The training/test scripts will call 66 | visual_names_A = ['real_A', 'fake_B', 'rec_A'] 67 | visual_names_B = ['real_B', 'fake_A', 'rec_B'] 68 | # if self.isTrain and self.opt.lambda_identity > 0.0: # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B) 69 | # visual_names_A.append('idt_B') 70 | # visual_names_B.append('idt_A') 71 | 72 | self.visual_names = visual_names_A + visual_names_B # combine visualizations for A and B 73 | # specify the models you want to save to the disk. The training/test scripts will call and . 74 | if self.isTrain: 75 | self.model_names = ['G_A', 'G_B', 'D_A', 'D_B'] 76 | else: # during test time, only load Gs 77 | self.model_names = ['G_A', 'G_B'] 78 | 79 | # define networks (both Generators and discriminators) 80 | # The naming is different from those used in the paper. 81 | # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X) 82 | self.netG_A = networks.define_G(opt.input_nc*2, opt.output_nc, opt.ngf, opt.netG, opt.norm, 83 | not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids) 84 | self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm, 85 | not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids) 86 | 87 | if self.isTrain: # define discriminators 88 | self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD, 89 | opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids) 90 | self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD, 91 | opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids) 92 | 93 | if self.isTrain: 94 | if opt.lambda_idt > 0.0: # only works when input and output images have the same number of channels 95 | assert(opt.input_nc == opt.output_nc) 96 | self.fake_A_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images 97 | self.fake_B_pool = ImagePool(opt.pool_size) # create image buffer to store previously generated images 98 | # define loss functions 99 | self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device) # define GAN loss. 100 | self.criterionCycle = torch.nn.L1Loss() 101 | self.criterionIdt = torch.nn.L1Loss() 102 | # initialize optimizers; schedulers will be automatically created by function . 103 | self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999)) 104 | self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999)) 105 | self.optimizers.append(self.optimizer_G) 106 | self.optimizers.append(self.optimizer_D) 107 | 108 | def set_input(self, input): 109 | """Unpack input data from the dataloader and perform necessary pre-processing steps. 110 | 111 | Parameters: 112 | input (dict): include the data itself and its metadata information. 113 | 114 | The option 'direction' can be used to swap domain A and domain B. 115 | """ 116 | AtoB = self.opt.direction == 'AtoB' 117 | self.real_A = input['A' if AtoB else 'B'].to(self.device) 118 | self.real_B = input['B' if AtoB else 'A'].to(self.device) 119 | self.image_paths = input['A_paths' if AtoB else 'B_paths'] 120 | 121 | 122 | def forward(self): 123 | """Run forward pass; called by both functions and .""" 124 | self.real_AB = torch.cat((self.real_A, self.real_B), 1) 125 | self.fake_B = self.netG_A(self.real_AB) # G_A(A,B) 126 | self.rec_A = self.netG_B(self.fake_B) # G_B(G_A(A,B)) 127 | self.fake_A = self.netG_B(self.real_B) # G_B(B) 128 | self.fake_AB = torch.cat((self.fake_A, self.fake_B),1) #G_B(B),G_A(A,B) 129 | self.rec_B = self.netG_A(self.fake_AB) # G_A(G_B(B), G_A(A,B)) 130 | 131 | def backward_D_basic(self, netD, real, fake): 132 | """Calculate GAN loss for the discriminator 133 | 134 | Parameters: 135 | netD (network) -- the discriminator D 136 | real (tensor array) -- real images 137 | fake (tensor array) -- images generated by a generator 138 | 139 | Return the discriminator loss. 140 | We also call loss_D.backward() to calculate the gradients. 141 | Discriminator's losses are not changed from the original CycleGan model 142 | """ 143 | # Real 144 | pred_real = netD(real) 145 | loss_D_real = self.criterionGAN(pred_real, True) 146 | # Fake 147 | pred_fake = netD(fake.detach()) 148 | loss_D_fake = self.criterionGAN(pred_fake, False) 149 | # Combined loss and calculate gradients 150 | loss_D = (loss_D_real + loss_D_fake) * 0.5 151 | loss_D.backward() 152 | return loss_D 153 | 154 | def backward_D_A(self): 155 | """Calculate GAN loss for discriminator D_A""" 156 | fake_B = self.fake_B_pool.query(self.fake_B) 157 | self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B) 158 | 159 | def backward_D_B(self): 160 | """Calculate GAN loss for discriminator D_B""" 161 | fake_A = self.fake_A_pool.query(self.fake_A) 162 | self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A) 163 | 164 | def backward_G(self): 165 | """Calculate the loss for generators G_A and G_B""" 166 | lambda_idt = self.opt.lambda_idt 167 | lambda_A = self.opt.lambda_A 168 | lambda_B = self.opt.lambda_B 169 | 170 | 171 | # GAN loss D_A(G_A(A,B)) 172 | self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)*lambda_A #Adeversarial loss for G in paper PairedCycleGAN 173 | # GAN loss D_B(G_B(B)) 174 | self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)*lambda_B #Adeversarial loss for F in paper PairedCycleGAN 175 | # Identity loss : || G_B(G_A(A,B)) - A|| 176 | self.loss_idt = self.criterionCycle(self.rec_A, self.real_A)*lambda_idt 177 | # Style loss : || G_A(G_B(B),G_A(A,B)) - B|| 178 | self.loss_style = self.criterionIdt(self.rec_B, self.real_B) 179 | 180 | # combined loss and calculate gradients 181 | self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_idt + self.loss_style 182 | self.loss_G.backward() 183 | 184 | def optimize_parameters(self): 185 | """Calculate losses, gradients, and update network weights; called in every training iteration""" 186 | # forward 187 | self.forward() # compute fake images and reconstruction images. 188 | # G_A and G_B 189 | self.set_requires_grad([self.netD_A, self.netD_B], False) # Ds require no gradients when optimizing Gs 190 | self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero 191 | self.backward_G() # calculate gradients for G_A and G_B 192 | self.optimizer_G.step() # update G_A and G_B's weights 193 | # D_A and D_B 194 | self.set_requires_grad([self.netD_A, self.netD_B], True) 195 | self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero 196 | self.backward_D_A() # calculate gradients for D_A 197 | self.backward_D_B() # calculate graidents for D_B 198 | self.optimizer_D.step() # update D_A and D_B's weights 199 | -------------------------------------------------------------------------------- /models/base_model.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | from collections import OrderedDict 4 | from abc import ABC, abstractmethod 5 | from . import networks 6 | 7 | 8 | class BaseModel(ABC): 9 | """This class is an abstract base class (ABC) for models. 10 | To create a subclass, you need to implement the following five functions: 11 | -- <__init__>: initialize the class; first call BaseModel.__init__(self, opt). 12 | -- : unpack data from dataset and apply preprocessing. 13 | -- : produce intermediate results. 14 | -- : calculate losses, gradients, and update network weights. 15 | -- : (optionally) add model-specific options and set default options. 16 | """ 17 | 18 | def __init__(self, opt): 19 | """Initialize the BaseModel class. 20 | 21 | Parameters: 22 | opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions 23 | 24 | When creating your custom class, you need to implement your own initialization. 25 | In this fucntion, you should first call 26 | Then, you need to define four lists: 27 | -- self.loss_names (str list): specify the training losses that you want to plot and save. 28 | -- self.model_names (str list): specify the images that you want to display and save. 29 | -- self.visual_names (str list): define networks used in our training. 30 | -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. 31 | """ 32 | self.opt = opt 33 | self.gpu_ids = opt.gpu_ids 34 | self.isTrain = opt.isTrain 35 | self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') # get device name: CPU or GPU 36 | self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) # save all the checkpoints to save_dir 37 | if opt.preprocess != 'scale_width': # with [scale_width], input images might have different sizes, which hurts the performance of cudnn.benchmark. 38 | torch.backends.cudnn.benchmark = True 39 | self.loss_names = [] 40 | self.model_names = [] 41 | self.visual_names = [] 42 | self.optimizers = [] 43 | self.image_paths = [] 44 | self.metric = 0 # used for learning rate policy 'plateau' 45 | 46 | @staticmethod 47 | def modify_commandline_options(parser, is_train): 48 | """Add new model-specific options, and rewrite default values for existing options. 49 | 50 | Parameters: 51 | parser -- original option parser 52 | is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options. 53 | 54 | Returns: 55 | the modified parser. 56 | """ 57 | return parser 58 | 59 | @abstractmethod 60 | def set_input(self, input): 61 | """Unpack input data from the dataloader and perform necessary pre-processing steps. 62 | 63 | Parameters: 64 | input (dict): includes the data itself and its metadata information. 65 | """ 66 | pass 67 | 68 | @abstractmethod 69 | def forward(self): 70 | """Run forward pass; called by both functions and .""" 71 | pass 72 | 73 | @abstractmethod 74 | def optimize_parameters(self): 75 | """Calculate losses, gradients, and update network weights; called in every training iteration""" 76 | pass 77 | 78 | def setup(self, opt): 79 | """Load and print networks; create schedulers 80 | 81 | Parameters: 82 | opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions 83 | """ 84 | if self.isTrain: 85 | self.schedulers = [networks.get_scheduler(optimizer, opt) for optimizer in self.optimizers] 86 | if not self.isTrain or opt.continue_train: 87 | load_suffix = 'iter_%d' % opt.load_iter if opt.load_iter > 0 else opt.epoch 88 | self.load_networks(load_suffix) 89 | self.print_networks(opt.verbose) 90 | 91 | def eval(self): 92 | """Make models eval mode during test time""" 93 | for name in self.model_names: 94 | if isinstance(name, str): 95 | net = getattr(self, 'net' + name) 96 | net.eval() 97 | 98 | def test(self): 99 | """Forward function used in test time. 100 | 101 | This function wraps function in no_grad() so we don't save intermediate steps for backprop 102 | It also calls to produce additional visualization results 103 | """ 104 | with torch.no_grad(): 105 | self.forward() 106 | self.compute_visuals() 107 | 108 | def compute_visuals(self): 109 | """Calculate additional output images for visdom and HTML visualization""" 110 | pass 111 | 112 | def get_image_paths(self): 113 | """ Return image paths that are used to load current data""" 114 | return self.image_paths 115 | 116 | def update_learning_rate(self): 117 | """Update learning rates for all the networks; called at the end of every epoch""" 118 | for scheduler in self.schedulers: 119 | if self.opt.lr_policy == 'plateau': 120 | scheduler.step(self.metric) 121 | else: 122 | scheduler.step() 123 | 124 | lr = self.optimizers[0].param_groups[0]['lr'] 125 | print('learning rate = %.7f' % lr) 126 | 127 | def get_current_visuals(self): 128 | """Return visualization images. train.py will display these images with visdom, and save the images to a HTML""" 129 | visual_ret = OrderedDict() 130 | for name in self.visual_names: 131 | if isinstance(name, str): 132 | visual_ret[name] = getattr(self, name) 133 | return visual_ret 134 | 135 | def get_current_losses(self): 136 | """Return traning losses / errors. train.py will print out these errors on console, and save them to a file""" 137 | errors_ret = OrderedDict() 138 | for name in self.loss_names: 139 | if isinstance(name, str): 140 | errors_ret[name] = float(getattr(self, 'loss_' + name)) # float(...) works for both scalar tensor and float number 141 | return errors_ret 142 | 143 | def save_networks(self, epoch): 144 | """Save all the networks to the disk. 145 | 146 | Parameters: 147 | epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) 148 | """ 149 | for name in self.model_names: 150 | if isinstance(name, str): 151 | save_filename = '%s_net_%s.pth' % (epoch, name) 152 | save_path = os.path.join(self.save_dir, save_filename) 153 | net = getattr(self, 'net' + name) 154 | 155 | if len(self.gpu_ids) > 0 and torch.cuda.is_available(): 156 | torch.save(net.module.cpu().state_dict(), save_path) 157 | net.cuda(self.gpu_ids[0]) 158 | else: 159 | torch.save(net.cpu().state_dict(), save_path) 160 | 161 | def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0): 162 | """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)""" 163 | key = keys[i] 164 | if i + 1 == len(keys): # at the end, pointing to a parameter/buffer 165 | if module.__class__.__name__.startswith('InstanceNorm') and \ 166 | (key == 'running_mean' or key == 'running_var'): 167 | if getattr(module, key) is None: 168 | state_dict.pop('.'.join(keys)) 169 | if module.__class__.__name__.startswith('InstanceNorm') and \ 170 | (key == 'num_batches_tracked'): 171 | state_dict.pop('.'.join(keys)) 172 | else: 173 | self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1) 174 | 175 | def load_networks(self, epoch): 176 | """Load all the networks from the disk. 177 | 178 | Parameters: 179 | epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name) 180 | """ 181 | for name in self.model_names: 182 | if isinstance(name, str): 183 | load_filename = '%s_net_%s.pth' % (epoch, name) 184 | load_path = os.path.join(self.save_dir, load_filename) 185 | net = getattr(self, 'net' + name) 186 | if isinstance(net, torch.nn.DataParallel): 187 | net = net.module 188 | print('loading the model from %s' % load_path) 189 | # if you are using PyTorch newer than 0.4 (e.g., built from 190 | # GitHub source), you can remove str() on self.device 191 | state_dict = torch.load(load_path, map_location=str(self.device)) 192 | if hasattr(state_dict, '_metadata'): 193 | del state_dict._metadata 194 | 195 | # patch InstanceNorm checkpoints prior to 0.4 196 | for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop 197 | self.__patch_instance_norm_state_dict(state_dict, net, key.split('.')) 198 | net.load_state_dict(state_dict) 199 | 200 | def print_networks(self, verbose): 201 | """Print the total number of parameters in the network and (if verbose) network architecture 202 | 203 | Parameters: 204 | verbose (bool) -- if verbose: print the network architecture 205 | """ 206 | print('---------- Networks initialized -------------') 207 | for name in self.model_names: 208 | if isinstance(name, str): 209 | net = getattr(self, 'net' + name) 210 | num_params = 0 211 | for param in net.parameters(): 212 | num_params += param.numel() 213 | if verbose: 214 | print(net) 215 | print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6)) 216 | print('-----------------------------------------------') 217 | 218 | def set_requires_grad(self, nets, requires_grad=False): 219 | """Set requies_grad=Fasle for all the networks to avoid unnecessary computations 220 | Parameters: 221 | nets (network list) -- a list of networks 222 | requires_grad (bool) -- whether the networks require gradients or not 223 | """ 224 | if not isinstance(nets, list): 225 | nets = [nets] 226 | for net in nets: 227 | if net is not None: 228 | for param in net.parameters(): 229 | param.requires_grad = requires_grad 230 | -------------------------------------------------------------------------------- /util/visualizer.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import sys 4 | import ntpath 5 | import time 6 | from . import util, html 7 | from subprocess import Popen, PIPE 8 | 9 | 10 | if sys.version_info[0] == 2: 11 | VisdomExceptionBase = Exception 12 | else: 13 | VisdomExceptionBase = ConnectionError 14 | 15 | 16 | def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256): 17 | """Save images to the disk. 18 | 19 | Parameters: 20 | webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details) 21 | visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs 22 | image_path (str) -- the string is used to create image paths 23 | aspect_ratio (float) -- the aspect ratio of saved images 24 | width (int) -- the images will be resized to width x width 25 | 26 | This function will save images stored in 'visuals' to the HTML file specified by 'webpage'. 27 | """ 28 | image_dir = webpage.get_image_dir() 29 | short_path = ntpath.basename(image_path[0]) 30 | name = os.path.splitext(short_path)[0] 31 | 32 | webpage.add_header(name) 33 | ims, txts, links = [], [], [] 34 | 35 | for label, im_data in visuals.items(): 36 | im = util.tensor2im(im_data) 37 | image_name = '%s_%s.png' % (name, label) 38 | save_path = os.path.join(image_dir, image_name) 39 | util.save_image(im, save_path, aspect_ratio=aspect_ratio) 40 | ims.append(image_name) 41 | txts.append(label) 42 | links.append(image_name) 43 | webpage.add_images(ims, txts, links, width=width) 44 | 45 | 46 | class Visualizer(): 47 | """This class includes several functions that can display/save images and print/save logging information. 48 | 49 | It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images. 50 | """ 51 | 52 | def __init__(self, opt): 53 | """Initialize the Visualizer class 54 | 55 | Parameters: 56 | opt -- stores all the experiment flags; needs to be a subclass of BaseOptions 57 | Step 1: Cache the training/test options 58 | Step 2: connect to a visdom server 59 | Step 3: create an HTML object for saveing HTML filters 60 | Step 4: create a logging file to store training losses 61 | """ 62 | self.opt = opt # cache the option 63 | self.display_id = opt.display_id 64 | self.use_html = opt.isTrain and not opt.no_html 65 | self.win_size = opt.display_winsize 66 | self.name = opt.name 67 | self.port = opt.display_port 68 | self.saved = False 69 | if self.display_id > 0: # connect to a visdom server given and 70 | import visdom 71 | self.ncols = opt.display_ncols 72 | self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env) 73 | if not self.vis.check_connection(): 74 | self.create_visdom_connections() 75 | 76 | if self.use_html: # create an HTML object at /web/; images will be saved under /web/images/ 77 | self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web') 78 | self.img_dir = os.path.join(self.web_dir, 'images') 79 | print('create web directory %s...' % self.web_dir) 80 | util.mkdirs([self.web_dir, self.img_dir]) 81 | # create a logging file to store training losses 82 | self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt') 83 | with open(self.log_name, "a") as log_file: 84 | now = time.strftime("%c") 85 | log_file.write('================ Training Loss (%s) ================\n' % now) 86 | 87 | def reset(self): 88 | """Reset the self.saved status""" 89 | self.saved = False 90 | 91 | def create_visdom_connections(self): 92 | """If the program could not connect to Visdom server, this function will start a new server at port < self.port > """ 93 | cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port 94 | print('\n\nCould not connect to Visdom server. \n Trying to start a server....') 95 | print('Command: %s' % cmd) 96 | Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) 97 | 98 | def display_current_results(self, visuals, epoch, save_result): 99 | """Display current results on visdom; save current results to an HTML file. 100 | 101 | Parameters: 102 | visuals (OrderedDict) - - dictionary of images to display or save 103 | epoch (int) - - the current epoch 104 | save_result (bool) - - if save the current results to an HTML file 105 | """ 106 | if self.display_id > 0: # show images in the browser using visdom 107 | ncols = self.ncols 108 | if ncols > 0: # show all the images in one visdom panel 109 | ncols = min(ncols, len(visuals)) 110 | h, w = next(iter(visuals.values())).shape[:2] 111 | table_css = """""" % (w, h) # create a table css 115 | # create a table of images. 116 | title = self.name 117 | label_html = '' 118 | label_html_row = '' 119 | images = [] 120 | idx = 0 121 | for label, image in visuals.items(): 122 | image_numpy = util.tensor2im(image) 123 | label_html_row += '%s' % label 124 | images.append(image_numpy.transpose([2, 0, 1])) 125 | idx += 1 126 | if idx % ncols == 0: 127 | label_html += '%s' % label_html_row 128 | label_html_row = '' 129 | white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255 130 | while idx % ncols != 0: 131 | images.append(white_image) 132 | label_html_row += '' 133 | idx += 1 134 | if label_html_row != '': 135 | label_html += '%s' % label_html_row 136 | try: 137 | self.vis.images(images, nrow=ncols, win=self.display_id + 1, 138 | padding=2, opts=dict(title=title + ' images')) 139 | label_html = '%s
' % label_html 140 | self.vis.text(table_css + label_html, win=self.display_id + 2, 141 | opts=dict(title=title + ' labels')) 142 | except VisdomExceptionBase: 143 | self.create_visdom_connections() 144 | 145 | else: # show each image in a separate visdom panel; 146 | idx = 1 147 | try: 148 | for label, image in visuals.items(): 149 | image_numpy = util.tensor2im(image) 150 | self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label), 151 | win=self.display_id + idx) 152 | idx += 1 153 | except VisdomExceptionBase: 154 | self.create_visdom_connections() 155 | 156 | if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved. 157 | self.saved = True 158 | # save images to the disk 159 | for label, image in visuals.items(): 160 | image_numpy = util.tensor2im(image) 161 | img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label)) 162 | util.save_image(image_numpy, img_path) 163 | 164 | # update website 165 | webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=1) 166 | for n in range(epoch, 0, -1): 167 | webpage.add_header('epoch [%d]' % n) 168 | ims, txts, links = [], [], [] 169 | 170 | for label, image_numpy in visuals.items(): 171 | image_numpy = util.tensor2im(image) 172 | img_path = 'epoch%.3d_%s.png' % (n, label) 173 | ims.append(img_path) 174 | txts.append(label) 175 | links.append(img_path) 176 | webpage.add_images(ims, txts, links, width=self.win_size) 177 | webpage.save() 178 | 179 | def plot_current_losses(self, epoch, counter_ratio, losses): 180 | """display the current losses on visdom display: dictionary of error labels and values 181 | 182 | Parameters: 183 | epoch (int) -- current epoch 184 | counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1 185 | losses (OrderedDict) -- training losses stored in the format of (name, float) pairs 186 | """ 187 | if not hasattr(self, 'plot_data'): 188 | self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())} 189 | self.plot_data['X'].append(epoch + counter_ratio) 190 | self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']]) 191 | try: 192 | self.vis.line( 193 | X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1), 194 | Y=np.array(self.plot_data['Y']), 195 | opts={ 196 | 'title': self.name + ' loss over time', 197 | 'legend': self.plot_data['legend'], 198 | 'xlabel': 'epoch', 199 | 'ylabel': 'loss'}, 200 | win=self.display_id) 201 | except VisdomExceptionBase: 202 | self.create_visdom_connections() 203 | 204 | # losses: same format as |losses| of plot_current_losses 205 | def print_current_losses(self, epoch, iters, losses, t_comp, t_data): 206 | """print current losses on console; also save the losses to the disk 207 | 208 | Parameters: 209 | epoch (int) -- current epoch 210 | iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch) 211 | losses (OrderedDict) -- training losses stored in the format of (name, float) pairs 212 | t_comp (float) -- computational time per data point (normalized by batch_size) 213 | t_data (float) -- data loading time per data point (normalized by batch_size) 214 | """ 215 | message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data) 216 | for k, v in losses.items(): 217 | message += '%s: %.3f ' % (k, v) 218 | 219 | print(message) # print the message 220 | with open(self.log_name, "a") as log_file: 221 | log_file.write('%s\n' % message) # save the message 222 | -------------------------------------------------------------------------------- /datasets/poisson_blending.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Python implementation of poisson image blending 3 | It's for the blending after training but also can be used to produce warped image which could serve as the groud truth for discriminator. 4 | ''' 5 | 6 | # Basic Code is taken from https://github.com/yskmt/pb 7 | 8 | 9 | 10 | import numpy as np 11 | from skimage import data, io 12 | import scipy.sparse 13 | from scipy.sparse import coo_matrix 14 | 15 | import matplotlib.pyplot as plt 16 | from PIL import Image 17 | import pdb 18 | import cv2 19 | import os 20 | import imutils 21 | import dlib 22 | import os 23 | import os.path as osp 24 | import re 25 | from imutils import face_utils 26 | from collections import OrderedDict 27 | 28 | 29 | def create_mask(img_mask, img_target, img_src, offset=(0, 0)): 30 | ''' 31 | Takes the np.array from the grayscale image 32 | ''' 33 | 34 | # crop img_mask and img_src to fit to the img_target 35 | hm, wm = img_mask.shape 36 | ht, wt, nl = img_target.shape 37 | 38 | hd0 = max(0, -offset[0]) 39 | wd0 = max(0, -offset[1]) 40 | 41 | hd1 = hm - max(hm + offset[0] - ht, 0) 42 | wd1 = wm - max(wm + offset[1] - wt, 0) 43 | 44 | mask = np.zeros((hm, wm)) 45 | mask[img_mask > 0] = 1 46 | mask[img_mask == 0] = 0 47 | 48 | mask = mask[hd0:hd1, wd0:wd1] 49 | src = img_src[hd0:hd1, wd0:wd1] 50 | 51 | # fix offset 52 | offset_adj = (max(offset[0], 0), max(offset[1], 0)) 53 | 54 | # remove edge from the mask so that we don't have to check the 55 | # edge condition 56 | mask[:, -1] = 0 57 | mask[:, 0] = 0 58 | mask[-1, :] = 0 59 | mask[0, :] = 0 60 | 61 | return mask, src, offset_adj 62 | 63 | 64 | def get_gradient_sum(img, i, j, h, w): 65 | """ 66 | Return the sum of the gradient of the source imgae. 67 | * 3D array for RGB 68 | """ 69 | 70 | v_sum = np.array([0.0, 0.0, 0.0]) 71 | v_sum = img[i, j] * 4 \ 72 | - img[i + 1, j] - img[i - 1, j] - img[i, j + 1] - img[i, j - 1] 73 | 74 | return v_sum 75 | 76 | 77 | def get_mixed_gradient_sum(img_src, img_target, i, j, h, w, ofs, 78 | c=1.0): 79 | """ 80 | Return the sum of the gradient of the source imgae. 81 | * 3D array for RGB 82 | c(>=0): larger, the more important the target image gradient is 83 | """ 84 | 85 | v_sum = np.array([0.0, 0.0, 0.0]) 86 | nb = np.array([[1, 0], [-1, 0], [0, 1], [0, -1]]) 87 | 88 | for kk in range(4): 89 | 90 | fp = img_src[i, j] - img_src[i + nb[kk, 0], j + nb[kk, 1]] 91 | gp = img_target[i + ofs[0], j + ofs[1]] \ 92 | - img_target[i + nb[kk, 0] + ofs[0], j + nb[kk, 1] + ofs[1]] 93 | 94 | # if np.linalg.norm(fp) > np.linalg.norm(gp): 95 | # v_sum += fp 96 | # else: 97 | # v_sum += gp 98 | 99 | v_sum += np.array([fp[0] if abs(fp[0] * c) > abs(gp[0]) else gp[0], 100 | fp[1] if abs(fp[1] * c) > abs(gp[1]) else gp[1], 101 | fp[2] if abs(fp[2] * c) > abs(gp[2]) else gp[2]]) 102 | 103 | return v_sum 104 | 105 | 106 | def poisson_blend(img_mask, img_src, img_target, method='mix', c=1.0, 107 | offset_adj=(0,0)): 108 | 109 | hm, wm = img_mask.shape 110 | region_size = hm * wm 111 | 112 | F = np.zeros((region_size, 3)) 113 | A = scipy.sparse.identity(region_size, format='lil') 114 | 115 | get_k = lambda i, j: i + j * hm 116 | 117 | # plane insertion 118 | if method in ['target', 'src']: 119 | for i in range(hm): 120 | for j in range(wm): 121 | k = get_k(i, j) 122 | 123 | # ignore the edge case (# of neighboor is always 4) 124 | if img_mask[i, j] == 1: 125 | 126 | if method == 'target': 127 | F[k] = img_target[i + offset_adj[0], j + offset_adj[1]] 128 | elif method == 'src': 129 | F[k] = img_src[i, j] 130 | else: 131 | F[k] = img_target[i + offset_adj[0], j + offset_adj[1]] 132 | 133 | # poisson blending 134 | else: 135 | if method == 'mix': 136 | grad_func = lambda ii, jj: get_mixed_gradient_sum( 137 | img_src, img_target, ii, jj, hm, wm, offset_adj, c=c) 138 | else: 139 | grad_func = lambda ii, jj: get_gradient_sum( 140 | img_src, ii, jj, hm, wm) 141 | 142 | for i in range(hm): 143 | for j in range(wm): 144 | k = get_k(i, j) 145 | 146 | # ignore the edge case (# of neighboor is always 4) 147 | if img_mask[i, j] == 1: 148 | f_star = np.array([0.0, 0.0, 0.0]) 149 | 150 | if img_mask[i - 1, j] == 1: 151 | A[k, k - 1] = -1 152 | else: 153 | f_star += img_target[i - 1 + 154 | offset_adj[0], j + offset_adj[1]] 155 | 156 | if img_mask[i + 1, j] == 1: 157 | A[k, k + 1] = -1 158 | else: 159 | f_star += img_target[i + 1 + 160 | offset_adj[0], j + offset_adj[1]] 161 | 162 | if img_mask[i, j - 1] == 1: 163 | A[k, k - hm] = -1 164 | else: 165 | f_star += img_target[i + 166 | offset_adj[0], j - 1 + offset_adj[1]] 167 | 168 | if img_mask[i, j + 1] == 1: 169 | A[k, k + hm] = -1 170 | else: 171 | f_star += img_target[i + 172 | offset_adj[0], j + 1 + offset_adj[1]] 173 | 174 | A[k, k] = 4 175 | F[k] = grad_func(i, j) + f_star 176 | 177 | else: 178 | F[k] = img_target[i + offset_adj[0], j + offset_adj[1]] 179 | 180 | A = A.tocsr() 181 | 182 | img_pro = np.empty_like(img_target.astype(np.uint8)) 183 | img_pro[:] = img_target.astype(np.uint8) 184 | 185 | for l in range(3): 186 | # x = pyamg.solve(A, F[:, l], verb=True, tol=1e-15, maxiter=100) 187 | x = scipy.sparse.linalg.spsolve(A, F[:, l]) 188 | x[x > 255] = 255 189 | x[x < 0] = 0 190 | x = np.array(x, img_pro.dtype) 191 | 192 | img_pro[offset_adj[0]:offset_adj[0] + hm, 193 | offset_adj[1]:offset_adj[1] + wm, l]\ 194 | = x.reshape(hm, wm, order='F') 195 | 196 | return img_pro 197 | 198 | 199 | 200 | ### create mask images 201 | 202 | # mouth_idx = np.arange(48, 68) 203 | # right_eyebrow_idx = np.arange(17, 22) 204 | # left_eyebrow_idx = np.arange(22, 27) 205 | # right_eye_idx = np.arange(36,42) 206 | # left_eye_idx = np.arange(42, 48) 207 | # nose_idx = np.arange(27, 35) 208 | 209 | 210 | # FACIAL_LANDMARKS_IDXS = OrderedDict([ 211 | # ("mouth", mouth_idx), 212 | # ("right_eye_eyebrow", np.append(right_eyebrow_idx, right_eye_idx)), 213 | # ("left_eye_eyebrow", np.append(left_eyebrow_idx, left_eye_idx)), 214 | # ]) 215 | 216 | # shape_pred = './datasets/dataset/shape_predictor_68_face_landmarks.dat' 217 | 218 | # detector = dlib.get_frontal_face_detector() 219 | # predictor = dlib.shape_predictor(shape_pred) 220 | # def parse_save(image, file,rects, predictor, FACIAL_LANDMARKS_IDXS, output_dir): 221 | 222 | # p = re.compile('(.*).png') 223 | # out_file_init = p.match(file).group(1) 224 | 225 | # for (i, rect) in enumerate(rects): 226 | 227 | # shape = predictor(gray, rect) 228 | # shape = face_utils.shape_to_np(shape) 229 | 230 | # for (name, idx_arr) in FACIAL_LANDMARKS_IDXS.items(): 231 | 232 | # clone = image.copy() 233 | 234 | # (x,y),radius = cv2.minEnclosingCircle(np.array([shape[idx_arr]])) 235 | # center = (int(x),int(y)) 236 | # radius = int(radius) + 12 237 | 238 | # mask = np.zeros(clone.shape, dtype=np.uint8) 239 | # mask = cv2.circle(mask, center, radius, (255, 255, 255), -1, 8, 0) 240 | 241 | # # result_array = clone & mask 242 | # # y_min = max(0, center[1] - radius) 243 | # # x_min = max(0, center[0] - radius) 244 | # # result_array = result_array[y_min:center[1] + radius, 245 | # # x_min:center[0] + radius, :] 246 | 247 | # out_file_name = output_dir + out_file_init + '_'+ name + '.png' 248 | 249 | # cv2.imwrite(out_file_name, mask) 250 | 251 | # input_dir = './datasets/dataset/B/test' 252 | # output_dir = './datasets/dataset/B_mask/test/test' 253 | # # def face_parse(input_dir, output_dir): 254 | # splits = os.listdir(input_dir) 255 | # for sp in splits: 256 | # img_fold = os.path.join(input_dir, sp) 257 | # img_list = os.listdir(img_fold) 258 | # num_imgs = len(img_list) 259 | # print('split = %s, use %d/%d images' % (sp, num_imgs, len(img_list))) 260 | # for n in range(num_imgs): 261 | # file = img_list[n] 262 | # path = os.path.join(img_fold, file) 263 | # if os.path.isfile(path): 264 | # image = cv2.imread(path,1) 265 | # # image = np.array(image) 266 | # image = imutils.resize(image, width=512) 267 | # gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 268 | 269 | # rects = detector(gray, 1) 270 | 271 | # print(file) 272 | # # break 273 | # parse_save(image, file, rects, predictor, FACIAL_LANDMARKS_IDXS, output_dir) 274 | 275 | def pb(path_src): 276 | 277 | offset = (0,0) 278 | 279 | # # split AB image into A and B 280 | # AB = Image.open(path_src).convert('RGB') 281 | 282 | # w, h = AB.size 283 | # w2 = int(w / 2) 284 | # A = AB.crop((0, 0, w2, h)) 285 | # B = AB.crop((w2, 0, w, h)) 286 | # path_A_split = './datasets/dataset/AB_split/A.png' 287 | # path_B_split = './datasets/dataset/AB_split/B.png' 288 | # A.save(path_A_split, 'PNG') 289 | # B.save(path_B_split, 'PNG') 290 | # # 291 | path_src = './datasets/dataset/B/test/test/001.png' 292 | path_target = './datasets/dataset/A/test/test/004.png' 293 | path_mask = './datasets/dataset/B_mask/test/test001_left_eye_eyebrow.png' 294 | img_src = io.imread(path_src).astype(np.float64) 295 | img_src = imutils.resize(img_src.copy(), width=512) 296 | # img_src = img_src.copy() 297 | # img_src = imutils.resize(img_src,width = 64) 298 | img_target = io.imread(path_target) 299 | img_target = imutils.resize(img_target.copy(), width=512) 300 | img_mask = io.imread(path_mask, as_gray=True) 301 | img_mask, img_src, offset_adj \ 302 | = create_mask(img_mask.astype(np.float64), 303 | img_target, img_src, offset=offset) 304 | 305 | img_pro = poisson_blend(img_mask, img_src, img_target, 306 | method='normal', offset_adj=offset_adj) 307 | path_mask = './datasets/dataset/B_mask/test/test001_right_eye_eyebrow.png' 308 | img_target = img_pro 309 | img_mask = io.imread(path_mask, as_gray=True) 310 | img_mask, img_src, offset_adj \ 311 | = create_mask(img_mask.astype(np.float64), 312 | img_target, img_src, offset=offset) 313 | 314 | img_pro = poisson_blend(img_mask, img_src, img_target, 315 | method='normal', offset_adj=offset_adj) 316 | path_mask = './datasets/dataset/B_mask/test/test001_mouth.png' 317 | img_target = img_pro 318 | img_mask = io.imread(path_mask, as_gray=True) 319 | img_mask, img_src, offset_adj \ 320 | = create_mask(img_mask.astype(np.float64), 321 | img_target, img_src, offset=offset) 322 | 323 | img_pro = poisson_blend(img_mask, img_src, img_target, 324 | method='normal', offset_adj=offset_adj) 325 | 326 | plt.imshow(img_pro) 327 | plt.show() 328 | path_pro = './datasets/dataset/fake_B_warp/A_01.png' 329 | io.imsave(path_pro, img_pro) 330 | 331 | pb('./datasets/dataset/B/test/test/001.png') -------------------------------------------------------------------------------- /models/networks.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Sat Nov 17 2019 5 | @author: jingyu 6 | """ 7 | 8 | # Basic Code is taken from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix 9 | 10 | import torch 11 | import torch.nn as nn 12 | from torch.nn import init 13 | import functools 14 | from torch.optim import lr_scheduler 15 | 16 | BatchNorm = nn.BatchNorm2d 17 | ############################################################################### 18 | # Helper Functions 19 | ############################################################################### 20 | 21 | 22 | class Identity(nn.Module): 23 | def forward(self, x): 24 | return x 25 | 26 | 27 | def get_norm_layer(norm_type='instance'): 28 | """Return a normalization layer 29 | 30 | Parameters: 31 | norm_type (str) -- the name of the normalization layer: batch | instance | none 32 | 33 | For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev). 34 | For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics. 35 | """ 36 | if norm_type == 'batch': 37 | norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True) 38 | elif norm_type == 'instance': 39 | norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False) 40 | elif norm_type == 'none': 41 | norm_layer = lambda x: Identity() 42 | else: 43 | raise NotImplementedError('normalization layer [%s] is not found' % norm_type) 44 | return norm_layer 45 | 46 | 47 | def get_scheduler(optimizer, opt): 48 | """Return a learning rate scheduler 49 | 50 | Parameters: 51 | optimizer -- the optimizer of the network 52 | opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.  53 | opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine 54 | 55 | For 'linear', we keep the same learning rate for the first epochs 56 | and linearly decay the rate to zero over the next epochs. 57 | For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers. 58 | See https://pytorch.org/docs/stable/optim.html for more details. 59 | """ 60 | if opt.lr_policy == 'linear': 61 | def lambda_rule(epoch): 62 | lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1) 63 | return lr_l 64 | scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) 65 | elif opt.lr_policy == 'step': 66 | scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1) 67 | elif opt.lr_policy == 'plateau': 68 | scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5) 69 | elif opt.lr_policy == 'cosine': 70 | scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.niter, eta_min=0) 71 | else: 72 | return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy) 73 | return scheduler 74 | 75 | 76 | def init_weights(net, init_type='normal', init_gain=0.02): 77 | """Initialize network weights. 78 | 79 | Parameters: 80 | net (network) -- network to be initialized 81 | init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal 82 | init_gain (float) -- scaling factor for normal, xavier and orthogonal. 83 | 84 | We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might 85 | work better for some applications. Feel free to try yourself. 86 | """ 87 | def init_func(m): # define the initialization function 88 | classname = m.__class__.__name__ 89 | if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): 90 | if init_type == 'normal': 91 | init.normal_(m.weight.data, 0.0, init_gain) 92 | else: 93 | raise NotImplementedError('initialization method [%s] is not implemented' % init_type) 94 | if hasattr(m, 'bias') and m.bias is not None: 95 | init.constant_(m.bias.data, 0.0) 96 | elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies. 97 | init.normal_(m.weight.data, 1.0, init_gain) 98 | init.constant_(m.bias.data, 0.0) 99 | 100 | print('initialize network with %s' % init_type) 101 | net.apply(init_func) # apply the initialization function 102 | 103 | 104 | def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]): 105 | """Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights 106 | Parameters: 107 | net (network) -- the network to be initialized 108 | init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal 109 | gain (float) -- scaling factor for normal, xavier and orthogonal. 110 | gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 111 | 112 | Return an initialized network. 113 | """ 114 | if len(gpu_ids) > 0: 115 | assert(torch.cuda.is_available()) 116 | net.to(gpu_ids[0]) 117 | net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs 118 | init_weights(net, init_type, init_gain=init_gain) 119 | return net 120 | 121 | 122 | def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]): 123 | """Create a generator 124 | Parameters: 125 | input_nc (int) -- the number of channels in input images 126 | output_nc (int) -- the number of channels in output images 127 | ngf (int) -- the number of filters in the last conv layer 128 | netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128 129 | norm (str) -- the name of normalization layers used in the network: batch | instance | none 130 | use_dropout (bool) -- if use dropout layers. 131 | init_type (str) -- the name of our initialization method. 132 | init_gain (float) -- scaling factor for normal, xavier and orthogonal. 133 | gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 134 | 135 | Returns a generator 136 | 137 | Our current implementation provides two types of generators: 138 | U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images) 139 | The original U-Net paper: https://arxiv.org/abs/1505.04597 140 | 141 | Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks) 142 | Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations. 143 | We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style). 144 | 145 | 146 | The generator has been initialized by . It uses RELU for non-linearity. 147 | """ 148 | net = None 149 | norm_layer = get_norm_layer(norm_type=norm) 150 | 151 | if netG == 'resnet_3blocks': 152 | net = DResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout) 153 | 154 | else: 155 | raise NotImplementedError('Generator model name [%s] is not recognized' % netG) 156 | return init_net(net, init_type, init_gain, gpu_ids) 157 | 158 | 159 | def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]): 160 | """Create a discriminator 161 | 162 | Parameters: 163 | input_nc (int) -- the number of channels in input images 164 | ndf (int) -- the number of filters in the first conv layer 165 | netD (str) -- the architecture's name: basic | n_layers | pixel 166 | n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers' 167 | norm (str) -- the type of normalization layers used in the network. 168 | init_type (str) -- the name of the initialization method. 169 | init_gain (float) -- scaling factor for normal, xavier and orthogonal. 170 | gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2 171 | 172 | Returns a discriminator 173 | 174 | [n_layers]: With this mode, you can specify the number of conv layers in the discriminator 175 | with the parameter 176 | [pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not. 177 | It encourages greater color diversity but has no effect on spatial statistics. 178 | 179 | The discriminator has been initialized by . It uses Leakly RELU for non-linearity. 180 | """ 181 | net = None 182 | norm_layer = get_norm_layer(norm_type=norm) 183 | 184 | if netD == 'n_layers': # more options 185 | net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer) 186 | elif netD == 'pixel': # classify if each pixel is real or fake 187 | net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer) 188 | else: 189 | raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD) 190 | return init_net(net, init_type, init_gain, gpu_ids) 191 | 192 | 193 | ############################################################################## 194 | # Classes 195 | ############################################################################## 196 | class GANLoss(nn.Module): 197 | """Define different GAN objectives. 198 | 199 | The GANLoss class abstracts away the need to create the target label tensor 200 | that has the same size as the input. 201 | """ 202 | 203 | def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0): 204 | """ Initialize the GANLoss class. 205 | 206 | Note: Do not use sigmoid as the last layer of Discriminator. 207 | LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss. 208 | """ 209 | super(GANLoss, self).__init__() 210 | self.register_buffer('real_label', torch.tensor(target_real_label)) 211 | self.register_buffer('fake_label', torch.tensor(target_fake_label)) 212 | self.gan_mode = gan_mode 213 | if gan_mode == 'lsgan': 214 | self.loss = nn.MSELoss() 215 | elif gan_mode == 'vanilla': 216 | self.loss = nn.BCEWithLogitsLoss() 217 | else: 218 | raise NotImplementedError('gan mode %s not implemented' % gan_mode) 219 | 220 | def get_target_tensor(self, prediction, target_is_real): 221 | """Create label tensors with the same size as the input. 222 | 223 | Parameters: 224 | prediction (tensor) - - tpyically the prediction from a discriminator 225 | target_is_real (bool) - - if the ground truth label is for real images or fake images 226 | 227 | Returns: 228 | A label tensor filled with ground truth label, and with the size of the input 229 | """ 230 | 231 | if target_is_real: 232 | target_tensor = self.real_label 233 | else: 234 | target_tensor = self.fake_label 235 | return target_tensor.expand_as(prediction) 236 | 237 | def __call__(self, prediction, target_is_real): 238 | """Calculate loss given Discriminator's output and ground truth labels. 239 | 240 | Parameters: 241 | prediction (tensor) - - tpyically the prediction output from a discriminator 242 | target_is_real (bool) - - if the ground truth label is for real images or fake images 243 | 244 | Returns: 245 | the calculated loss. 246 | """ 247 | target_tensor = self.get_target_tensor(prediction, target_is_real) 248 | loss = self.loss(prediction, target_tensor) 249 | return loss 250 | 251 | 252 | def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0): 253 | """Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028 254 | 255 | Arguments: 256 | netD (network) -- discriminator network 257 | real_data (tensor array) -- real images 258 | fake_data (tensor array) -- generated images from the generator 259 | device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') 260 | type (str) -- if we mix real and fake data or not [real | fake | mixed]. 261 | constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2 262 | lambda_gp (float) -- weight for this loss 263 | 264 | Returns the gradient penalty loss 265 | """ 266 | if lambda_gp > 0.0: 267 | if type == 'real': # either use real images, fake images, or a linear interpolation of two. 268 | interpolatesv = real_data 269 | elif type == 'fake': 270 | interpolatesv = fake_data 271 | elif type == 'mixed': 272 | alpha = torch.rand(real_data.shape[0], 1, device=device) 273 | alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape) 274 | interpolatesv = alpha * real_data + ((1 - alpha) * fake_data) 275 | else: 276 | raise NotImplementedError('{} not implemented'.format(type)) 277 | interpolatesv.requires_grad_(True) 278 | disc_interpolates = netD(interpolatesv) 279 | gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv, 280 | grad_outputs=torch.ones(disc_interpolates.size()).to(device), 281 | create_graph=True, retain_graph=True, only_inputs=True) 282 | gradients = gradients[0].view(real_data.size(0), -1) # flat the data 283 | gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps 284 | return gradient_penalty, gradients 285 | else: 286 | return 0.0, None 287 | class DResnetGenerator(nn.Module): 288 | """Dilated Resnet-based generator that consists of Dilated Resnet blocks between a few downsampling/upsampling operations. 289 | I adapt Torch code and idea from Junyan Zhu's cycle gan project(https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix) 290 | """ 291 | 292 | def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=True, padding_type='reflect'): 293 | """ 294 | Parameters: 295 | input_nc (int) -- the number of channels in input images 296 | output_nc (int) -- the number of channels in output images 297 | ngf (int) -- the number of filters in the last conv layer 298 | norm_layer -- normalization layer 299 | use_dropout (bool) -- if use dropout layers 300 | 301 | padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero 302 | """ 303 | 304 | super(DResnetGenerator, self).__init__() 305 | if type(norm_layer) == functools.partial: 306 | use_bias = norm_layer.func == nn.InstanceNorm2d 307 | else: 308 | use_bias = norm_layer == nn.InstanceNorm2d 309 | 310 | model = [nn.ReflectionPad2d(3), 311 | nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), 312 | norm_layer(ngf), 313 | nn.ReLU(True)] 314 | 315 | n_downsampling = 2 316 | for i in range(n_downsampling): # add downsampling layers 317 | mult = 2 ** i 318 | model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias), 319 | norm_layer(ngf * mult * 2), 320 | nn.ReLU(True)] 321 | 322 | mult = 2 ** n_downsampling 323 | # add DilatedResNet blocks 324 | 325 | model += [DResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias, dilation=1)] 326 | model += [DResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias, dilation=2)] 327 | model += [DResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias, dilation=4)] 328 | 329 | 330 | for i in range(n_downsampling): # add upsampling layers 331 | mult = 2 ** (n_downsampling - i) 332 | model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), 333 | kernel_size=3, stride=2, 334 | padding=1, output_padding=1, 335 | bias=use_bias), 336 | norm_layer(int(ngf * mult / 2)), 337 | nn.ReLU(True)] 338 | model += [nn.ReflectionPad2d(2)] 339 | model += [nn.Conv2d(ngf, ngf, kernel_size=3, padding=0), 340 | norm_layer(ngf), 341 | nn.ReLU(True)] 342 | model += [nn.Conv2d(ngf, output_nc, kernel_size=3, padding=0)] 343 | model += [nn.Tanh()] 344 | 345 | self.model = nn.Sequential(*model) 346 | 347 | def forward(self, input): 348 | """Standard forward""" 349 | return self.model(input) 350 | 351 | 352 | 353 | 354 | 355 | class DResnetBlock(nn.Module): 356 | """Define a Dilated Resnet block""" 357 | 358 | def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias, dilation): 359 | """Initialize the Resnet block 360 | A resnet block is a conv block with skip connections 361 | We construct a conv block with build_conv_block function, 362 | and implement skip connections in function. 363 | Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf 364 | """ 365 | super(DResnetBlock, self).__init__() 366 | self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias, dilation) 367 | 368 | def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias, dilation): 369 | """ 370 | Construct a dilated convolutional block. 371 | Parameters: 372 | dim (int) -- the number of channels in the conv layer. 373 | padding_type (str) -- the name of padding layer: reflect | replicate | zero 374 | norm_layer -- normalization layer 375 | use_dropout (bool) -- if use dropout layers. 376 | use_bias (bool) -- if the conv layer uses bias or not 377 | dilation -- the dilation of the conv block 378 | Returns a dilated conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU)) 379 | """ 380 | conv_block = [] 381 | 382 | # p = 0 383 | if padding_type == 'reflect': 384 | conv_block += [nn.ReflectionPad2d(1)] 385 | conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=dilation-1, bias=use_bias, dilation=dilation), norm_layer(dim), nn.ReLU(True)] 386 | if use_dropout: 387 | conv_block += [nn.Dropout(0.5)] 388 | 389 | 390 | # p = 0 391 | if padding_type == 'reflect': 392 | conv_block += [nn.ReflectionPad2d(1)] 393 | conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=dilation-1, bias=use_bias,dilation=dilation), norm_layer(dim)] 394 | return nn.Sequential(*conv_block) 395 | 396 | def forward(self, x): 397 | """Forward function (with skip connections)""" 398 | out = x + self.conv_block(x) # add skip connections 399 | return out 400 | 401 | 402 | 403 | 404 | class NLayerDiscriminator(nn.Module): 405 | """Defines a PatchGAN discriminator""" 406 | 407 | def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d): 408 | """Construct a PatchGAN discriminator 409 | 410 | Parameters: 411 | input_nc (int) -- the number of channels in input images 412 | ndf (int) -- the number of filters in the last conv layer 413 | n_layers (int) -- the number of conv layers in the discriminator 414 | norm_layer -- normalization layer 415 | """ 416 | super(NLayerDiscriminator, self).__init__() 417 | if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters 418 | use_bias = norm_layer.func == nn.InstanceNorm2d 419 | else: 420 | use_bias = norm_layer == nn.InstanceNorm2d 421 | 422 | kw = 4 423 | padw = 1 424 | sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)] 425 | nf_mult = 1 426 | nf_mult_prev = 1 427 | for n in range(1, n_layers): # gradually increase the number of filters 428 | nf_mult_prev = nf_mult 429 | nf_mult = min(2 ** n, 8) 430 | sequence += [ 431 | nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias), 432 | norm_layer(ndf * nf_mult), 433 | nn.LeakyReLU(0.2, True) 434 | ] 435 | 436 | nf_mult_prev = nf_mult 437 | nf_mult = min(2 ** n_layers, 8) 438 | sequence += [ 439 | nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias), 440 | norm_layer(ndf * nf_mult), 441 | nn.LeakyReLU(0.2, True) 442 | ] 443 | 444 | sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map 445 | self.model = nn.Sequential(*sequence) 446 | 447 | def forward(self, input): 448 | """Standard forward.""" 449 | return self.model(input) 450 | 451 | 452 | class PixelDiscriminator(nn.Module): 453 | """Defines a 1x1 PatchGAN discriminator (pixelGAN)""" 454 | 455 | def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d): 456 | """Construct a 1x1 PatchGAN discriminator 457 | 458 | Parameters: 459 | input_nc (int) -- the number of channels in input images 460 | ndf (int) -- the number of filters in the last conv layer 461 | norm_layer -- normalization layer 462 | """ 463 | super(PixelDiscriminator, self).__init__() 464 | if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters 465 | use_bias = norm_layer.func == nn.InstanceNorm2d 466 | else: 467 | use_bias = norm_layer == nn.InstanceNorm2d 468 | 469 | self.net = [ 470 | nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0), 471 | nn.LeakyReLU(0.2, True), 472 | nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias), 473 | norm_layer(ndf * 2), 474 | nn.LeakyReLU(0.2, True), 475 | nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)] 476 | 477 | self.net = nn.Sequential(*self.net) 478 | 479 | def forward(self, input): 480 | """Standard forward.""" 481 | return self.net(input) 482 | --------------------------------------------------------------------------------