├── .gitignore ├── CODE_OF_CONDUCT.md ├── LICENSE ├── README.md ├── SECURITY.md ├── data ├── __init__.py ├── ade20k_dataset.py ├── ade20k_ref_test.txt ├── base_dataset.py ├── celebahq_dataset.py ├── celebahq_ref_test.txt ├── celebahqedge_dataset.py ├── deepfashion_dataset.py ├── deepfashion_ref_test.txt ├── deepfashion_self_pair.txt ├── flickr_dataset.py ├── image_folder.py └── pix2pix_dataset.py ├── imgs ├── DeepFashion │ ├── img │ │ └── WOMEN │ │ │ ├── Blouses_Shirts │ │ │ └── id_00007636 │ │ │ │ └── 01_1_front.jpg │ │ │ ├── Cardigans │ │ │ ├── id_00006266 │ │ │ │ └── 02_2_side.jpg │ │ │ └── id_00006462 │ │ │ │ └── 02_7_additional.jpg │ │ │ ├── Dresses │ │ │ ├── id_00000203 │ │ │ │ └── 03_1_front.jpg │ │ │ ├── id_00000244 │ │ │ │ └── 02_1_front.jpg │ │ │ ├── id_00002791 │ │ │ │ └── 01_2_side.jpg │ │ │ └── id_00006245 │ │ │ │ └── 03_2_side.jpg │ │ │ └── Tees_Tanks │ │ │ └── id_00007059 │ │ │ └── 03_3_back.jpg │ ├── pose │ │ └── WOMEN │ │ │ ├── Blouses_Shirts │ │ │ └── id_00007636 │ │ │ │ ├── 01_1_front_candidate.txt │ │ │ │ └── 01_1_front_subset.txt │ │ │ ├── Cardigans │ │ │ ├── id_00006266 │ │ │ │ ├── 02_2_side_candidate.txt │ │ │ │ └── 02_2_side_subset.txt │ │ │ └── id_00006462 │ │ │ │ ├── 02_7_additional_candidate.txt │ │ │ │ └── 02_7_additional_subset.txt │ │ │ ├── Dresses │ │ │ ├── id_00000203 │ │ │ │ ├── 03_1_front_candidate.txt │ │ │ │ └── 03_1_front_subset.txt │ │ │ ├── id_00000244 │ │ │ │ ├── 02_1_front_candidate.txt │ │ │ │ └── 02_1_front_subset.txt │ │ │ ├── id_00002791 │ │ │ │ ├── 01_2_side_candidate.txt │ │ │ │ └── 01_2_side_subset.txt │ │ │ └── id_00006245 │ │ │ │ ├── 03_2_side_candidate.txt │ │ │ │ └── 03_2_side_subset.txt │ │ │ └── Tees_Tanks │ │ │ └── id_00007059 │ │ │ ├── 03_3_back_candidate.txt │ │ │ └── 03_3_back_subset.txt │ └── val.txt ├── ade20k │ ├── training │ │ ├── ADE_train_00004996.jpg │ │ ├── ADE_train_00004996.png │ │ ├── ADE_train_00007201.jpg │ │ ├── ADE_train_00007201.png │ │ ├── ADE_train_00007860.jpg │ │ ├── ADE_train_00007860.png │ │ ├── ADE_train_00010793.jpg │ │ ├── ADE_train_00010793.png │ │ ├── ADE_train_00015171.jpg │ │ ├── ADE_train_00015171.png │ │ ├── ADE_train_00017763.jpg │ │ └── ADE_train_00017763.png │ └── validation │ │ ├── ADE_val_00001232.jpg │ │ ├── ADE_val_00001232.png │ │ ├── ADE_val_00001349.jpg │ │ ├── ADE_val_00001349.png │ │ ├── ADE_val_00001517.jpg │ │ ├── ADE_val_00001517.png │ │ ├── ADE_val_00001599.jpg │ │ ├── ADE_val_00001599.png │ │ ├── ADE_val_00001697.jpg │ │ ├── ADE_val_00001697.png │ │ ├── ADE_val_00001996.jpg │ │ └── ADE_val_00001996.png ├── celebahq │ ├── CelebA-HQ-img │ │ ├── 23071.jpg │ │ ├── 25807.jpg │ │ ├── 26768.jpg │ │ ├── 26969.jpg │ │ ├── 29883.jpg │ │ ├── 29922.jpg │ │ ├── 5487.jpg │ │ └── 6897.jpg │ ├── CelebAMask-HQ-mask-anno │ │ ├── 2 │ │ │ ├── 05487_l_eye.png │ │ │ └── 05487_r_eye.png │ │ ├── 3 │ │ │ ├── 06897_l_eye.png │ │ │ └── 06897_r_eye.png │ │ ├── 11 │ │ │ ├── 23071_l_eye.png │ │ │ └── 23071_r_eye.png │ │ ├── 12 │ │ │ ├── 25807_l_eye.png │ │ │ └── 25807_r_eye.png │ │ ├── 13 │ │ │ ├── 26768_l_eye.png │ │ │ ├── 26768_r_eye.png │ │ │ ├── 26969_l_eye.png │ │ │ └── 26969_r_eye.png │ │ ├── 14 │ │ │ ├── 29883_l_eye.png │ │ │ ├── 29883_r_eye.png │ │ │ ├── 29922_l_eye.png │ │ │ └── 29922_r_eye.png │ │ └── all_parts_except_glasses │ │ │ ├── 05487.png │ │ │ ├── 06897.png │ │ │ ├── 23071.png │ │ │ ├── 25807.png │ │ │ ├── 26768.png │ │ │ ├── 26969.png │ │ │ ├── 29883.png │ │ │ └── 29922.png │ └── val.txt ├── celebahqedge │ ├── CelebA-HQ-img │ │ ├── 14721.jpg │ │ ├── 15064.jpg │ │ ├── 22233.jpg │ │ ├── 26397.jpg │ │ ├── 29318.jpg │ │ ├── 29539.jpg │ │ ├── 29642.jpg │ │ └── 2994.jpg │ ├── CelebAMask-HQ-mask-anno │ │ ├── 1 │ │ │ ├── 02994_cloth.png │ │ │ ├── 02994_ear_r.png │ │ │ ├── 02994_hair.png │ │ │ ├── 02994_l_brow.png │ │ │ ├── 02994_l_eye.png │ │ │ ├── 02994_l_lip.png │ │ │ ├── 02994_mouth.png │ │ │ ├── 02994_neck.png │ │ │ ├── 02994_nose.png │ │ │ ├── 02994_r_brow.png │ │ │ ├── 02994_r_ear.png │ │ │ ├── 02994_r_eye.png │ │ │ ├── 02994_skin.png │ │ │ └── 02994_u_lip.png │ │ ├── 7 │ │ │ ├── 14721_hair.png │ │ │ ├── 14721_l_brow.png │ │ │ ├── 14721_l_eye.png │ │ │ ├── 14721_l_lip.png │ │ │ ├── 14721_mouth.png │ │ │ ├── 14721_neck.png │ │ │ ├── 14721_nose.png │ │ │ ├── 14721_r_brow.png │ │ │ ├── 14721_r_eye.png │ │ │ ├── 14721_skin.png │ │ │ ├── 14721_u_lip.png │ │ │ ├── 15064_ear_r.png │ │ │ ├── 15064_hair.png │ │ │ ├── 15064_l_brow.png │ │ │ ├── 15064_l_ear.png │ │ │ ├── 15064_l_eye.png │ │ │ ├── 15064_l_lip.png │ │ │ ├── 15064_mouth.png │ │ │ ├── 15064_neck.png │ │ │ ├── 15064_nose.png │ │ │ ├── 15064_r_brow.png │ │ │ ├── 15064_r_ear.png │ │ │ ├── 15064_r_eye.png │ │ │ ├── 15064_skin.png │ │ │ └── 15064_u_lip.png │ │ ├── 11 │ │ │ ├── 22233_cloth.png │ │ │ ├── 22233_ear_r.png │ │ │ ├── 22233_hair.png │ │ │ ├── 22233_l_brow.png │ │ │ ├── 22233_l_eye.png │ │ │ ├── 22233_l_lip.png │ │ │ ├── 22233_neck.png │ │ │ ├── 22233_nose.png │ │ │ ├── 22233_r_brow.png │ │ │ ├── 22233_r_ear.png │ │ │ ├── 22233_r_eye.png │ │ │ ├── 22233_skin.png │ │ │ └── 22233_u_lip.png │ │ ├── 13 │ │ │ ├── 26397_cloth.png │ │ │ ├── 26397_hair.png │ │ │ ├── 26397_l_brow.png │ │ │ ├── 26397_l_ear.png │ │ │ ├── 26397_l_eye.png │ │ │ ├── 26397_l_lip.png │ │ │ ├── 26397_mouth.png │ │ │ ├── 26397_neck.png │ │ │ ├── 26397_nose.png │ │ │ ├── 26397_r_brow.png │ │ │ ├── 26397_r_ear.png │ │ │ ├── 26397_r_eye.png │ │ │ ├── 26397_skin.png │ │ │ └── 26397_u_lip.png │ │ └── 14 │ │ │ ├── 29318_hair.png │ │ │ ├── 29318_l_brow.png │ │ │ ├── 29318_l_eye.png │ │ │ ├── 29318_l_lip.png │ │ │ ├── 29318_mouth.png │ │ │ ├── 29318_neck.png │ │ │ ├── 29318_nose.png │ │ │ ├── 29318_r_brow.png │ │ │ ├── 29318_r_eye.png │ │ │ ├── 29318_skin.png │ │ │ ├── 29318_u_lip.png │ │ │ ├── 29539_hair.png │ │ │ ├── 29539_l_brow.png │ │ │ ├── 29539_l_ear.png │ │ │ ├── 29539_l_eye.png │ │ │ ├── 29539_l_lip.png │ │ │ ├── 29539_neck.png │ │ │ ├── 29539_nose.png │ │ │ ├── 29539_r_brow.png │ │ │ ├── 29539_r_eye.png │ │ │ ├── 29539_skin.png │ │ │ ├── 29539_u_lip.png │ │ │ ├── 29642_cloth.png │ │ │ ├── 29642_hair.png │ │ │ ├── 29642_l_brow.png │ │ │ ├── 29642_l_ear.png │ │ │ ├── 29642_l_eye.png │ │ │ ├── 29642_l_lip.png │ │ │ ├── 29642_neck.png │ │ │ ├── 29642_nose.png │ │ │ ├── 29642_r_brow.png │ │ │ ├── 29642_r_ear.png │ │ │ ├── 29642_r_eye.png │ │ │ ├── 29642_skin.png │ │ │ └── 29642_u_lip.png │ └── val.txt ├── demo.gif └── teaser.png ├── models ├── __init__.py ├── networks │ ├── ContextualLoss.py │ ├── __init__.py │ ├── architecture.py │ ├── base_network.py │ ├── correspondence.py │ ├── discriminator.py │ ├── generator.py │ ├── loss.py │ └── normalization.py └── pix2pix_model.py ├── options ├── __init__.py ├── base_options.py ├── test_options.py └── train_options.py ├── output └── test │ ├── ade20k │ └── 0.png │ ├── celebahq │ └── 0.png │ ├── celebahqedge │ └── 0.png │ ├── deepfashion │ └── 0.png │ └── teaser.png ├── requirements.txt ├── test.py ├── train.py ├── trainers ├── __init__.py └── pix2pix_trainer.py └── util ├── __init__.py ├── color150.mat ├── iter_counter.py ├── mask_to_edge.py └── util.py /.gitignore: -------------------------------------------------------------------------------- 1 | saved_files/ 2 | scripts/ 3 | __pycache__/ 4 | models/networks/Synchronized-BatchNorm-PyTorch/ 5 | models/networks/sync_batchnorm/ 6 | *.pyc 7 | *.pth 8 | *.npy 9 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Microsoft Open Source Code of Conduct 2 | 3 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 4 | 5 | Resources: 6 | 7 | - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) 8 | - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) 9 | - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns 10 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Microsoft Corporation. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE 22 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Security 4 | 5 | Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). 6 | 7 | If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://docs.microsoft.com/en-us/previous-versions/tn-archive/cc751383(v=technet.10)), please report it to us as described below. 8 | 9 | ## Reporting Security Issues 10 | 11 | **Please do not report security vulnerabilities through public GitHub issues.** 12 | 13 | Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://msrc.microsoft.com/create-report). 14 | 15 | If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://www.microsoft.com/en-us/msrc/pgp-key-msrc). 16 | 17 | You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc). 18 | 19 | Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: 20 | 21 | * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) 22 | * Full paths of source file(s) related to the manifestation of the issue 23 | * The location of the affected source code (tag/branch/commit or direct URL) 24 | * Any special configuration required to reproduce the issue 25 | * Step-by-step instructions to reproduce the issue 26 | * Proof-of-concept or exploit code (if possible) 27 | * Impact of the issue, including how an attacker might exploit the issue 28 | 29 | This information will help us triage your report more quickly. 30 | 31 | If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://microsoft.com/msrc/bounty) page for more details about our active programs. 32 | 33 | ## Preferred Languages 34 | 35 | We prefer all communications to be in English. 36 | 37 | ## Policy 38 | 39 | Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://www.microsoft.com/en-us/msrc/cvd). 40 | 41 | -------------------------------------------------------------------------------- /data/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import importlib 7 | import torch.utils.data 8 | from data.base_dataset import BaseDataset 9 | 10 | 11 | def find_dataset_using_name(dataset_name): 12 | # Given the option --dataset [datasetname], 13 | # the file "datasets/datasetname_dataset.py" 14 | # will be imported. 15 | dataset_filename = "data." + dataset_name + "_dataset" 16 | datasetlib = importlib.import_module(dataset_filename) 17 | 18 | # In the file, the class called DatasetNameDataset() will 19 | # be instantiated. It has to be a subclass of BaseDataset, 20 | # and it is case-insensitive. 21 | dataset = None 22 | target_dataset_name = dataset_name.replace('_', '') + 'dataset' 23 | for name, cls in datasetlib.__dict__.items(): 24 | if name.lower() == target_dataset_name.lower() \ 25 | and issubclass(cls, BaseDataset): 26 | dataset = cls 27 | 28 | if dataset is None: 29 | raise ValueError("In %s.py, there should be a subclass of BaseDataset " 30 | "with class name that matches %s in lowercase." % 31 | (dataset_filename, target_dataset_name)) 32 | 33 | return dataset 34 | 35 | 36 | def get_option_setter(dataset_name): 37 | dataset_class = find_dataset_using_name(dataset_name) 38 | return dataset_class.modify_commandline_options 39 | 40 | 41 | def create_dataloader(opt): 42 | dataset = find_dataset_using_name(opt.dataset_mode) 43 | instance = dataset() 44 | instance.initialize(opt) 45 | print("dataset [%s] of size %d was created" % 46 | (type(instance).__name__, len(instance))) 47 | dataloader = torch.utils.data.DataLoader( 48 | instance, 49 | batch_size=opt.batchSize, 50 | shuffle=not opt.serial_batches, 51 | num_workers=int(opt.nThreads), 52 | drop_last=opt.isTrain 53 | ) 54 | return dataloader 55 | -------------------------------------------------------------------------------- /data/ade20k_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | import os 5 | from data.pix2pix_dataset import Pix2pixDataset 6 | from data.image_folder import make_dataset 7 | 8 | 9 | class ADE20KDataset(Pix2pixDataset): 10 | 11 | @staticmethod 12 | def modify_commandline_options(parser, is_train): 13 | parser = Pix2pixDataset.modify_commandline_options(parser, is_train) 14 | parser.set_defaults(preprocess_mode='resize_and_crop') 15 | if is_train: 16 | parser.set_defaults(load_size=286) 17 | else: 18 | parser.set_defaults(load_size=256) 19 | parser.set_defaults(crop_size=256) 20 | parser.set_defaults(display_winsize=256) 21 | parser.set_defaults(label_nc=150) 22 | parser.set_defaults(contain_dontcare_label=True) 23 | parser.set_defaults(cache_filelist_read=False) 24 | parser.set_defaults(cache_filelist_write=False) 25 | return parser 26 | 27 | def get_paths(self, opt): 28 | root = opt.dataroot 29 | phase = 'val' if opt.phase == 'test' else 'train' 30 | subfolder = 'validation' if opt.phase == 'test' else 'training' 31 | cache = False if opt.phase == 'test' else True 32 | all_images = sorted(make_dataset(root + '/' + subfolder, recursive=True, read_cache=cache, write_cache=False)) 33 | image_paths = [] 34 | label_paths = [] 35 | for p in all_images: 36 | if '_%s_' % phase not in p: 37 | continue 38 | if p.endswith('.jpg'): 39 | image_paths.append(p) 40 | elif p.endswith('.png'): 41 | label_paths.append(p) 42 | 43 | return label_paths, image_paths 44 | 45 | def get_ref(self, opt): 46 | extra = '_test' if opt.phase == 'test' else '' 47 | with open('./data/ade20k_ref{}.txt'.format(extra)) as fd: 48 | lines = fd.readlines() 49 | ref_dict = {} 50 | for i in range(len(lines)): 51 | items = lines[i].strip().split(',') 52 | key = items[0] 53 | if opt.phase == 'test': 54 | val = items[1:] 55 | else: 56 | val = [items[1], items[-1]] 57 | ref_dict[key] = val 58 | train_test_folder = ('training', 'validation') 59 | return ref_dict, train_test_folder 60 | 61 | -------------------------------------------------------------------------------- /data/ade20k_ref_test.txt: -------------------------------------------------------------------------------- 1 | ADE_val_00001232.jpg,ADE_train_00004996.jpg 2 | ADE_val_00001517.jpg,ADE_train_00007201.jpg 3 | ADE_val_00001697.jpg,ADE_train_00015171.jpg 4 | ADE_val_00001599.jpg,ADE_train_00017763.jpg 5 | ADE_val_00001996.jpg,ADE_train_00010793.jpg 6 | ADE_val_00001349.jpg,ADE_train_00007860.jpg -------------------------------------------------------------------------------- /data/base_dataset.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import torch.utils.data as data 7 | from PIL import Image 8 | import torchvision.transforms as transforms 9 | import numpy as np 10 | import random 11 | 12 | 13 | class BaseDataset(data.Dataset): 14 | def __init__(self): 15 | super(BaseDataset, self).__init__() 16 | 17 | @staticmethod 18 | def modify_commandline_options(parser, is_train): 19 | return parser 20 | 21 | def initialize(self, opt): 22 | pass 23 | 24 | 25 | def get_params(opt, size): 26 | w, h = size 27 | new_h = h 28 | new_w = w 29 | if opt.preprocess_mode == 'resize_and_crop': 30 | new_h = new_w = opt.load_size 31 | elif opt.preprocess_mode == 'scale_width_and_crop': 32 | new_w = opt.load_size 33 | new_h = opt.load_size * h // w 34 | elif opt.preprocess_mode == 'scale_shortside_and_crop': 35 | ss, ls = min(w, h), max(w, h) # shortside and longside 36 | width_is_shorter = w == ss 37 | ls = int(opt.load_size * ls / ss) 38 | new_w, new_h = (ss, ls) if width_is_shorter else (ls, ss) 39 | 40 | x = random.randint(0, np.maximum(0, new_w - opt.crop_size)) 41 | y = random.randint(0, np.maximum(0, new_h - opt.crop_size)) 42 | 43 | flip = random.random() > 0.5 44 | return {'crop_pos': (x, y), 'flip': flip} 45 | 46 | 47 | def get_transform(opt, params, method=Image.BICUBIC, normalize=True, toTensor=True): 48 | transform_list = [] 49 | if opt.dataset_mode == 'flickr' and method == Image.NEAREST: 50 | transform_list.append(transforms.Lambda(lambda img: __add1(img))) 51 | if 'resize' in opt.preprocess_mode: 52 | osize = [opt.load_size, opt.load_size] 53 | transform_list.append(transforms.Resize(osize, interpolation=method)) 54 | elif 'scale_width' in opt.preprocess_mode: 55 | transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.load_size, method))) 56 | elif 'scale_shortside' in opt.preprocess_mode: 57 | transform_list.append(transforms.Lambda(lambda img: __scale_shortside(img, opt.load_size, method))) 58 | 59 | if 'crop' in opt.preprocess_mode: 60 | transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size))) 61 | 62 | if opt.preprocess_mode == 'none': 63 | base = 32 64 | transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base, method))) 65 | 66 | if opt.preprocess_mode == 'fixed': 67 | w = opt.crop_size 68 | h = round(opt.crop_size / opt.aspect_ratio) 69 | transform_list.append(transforms.Lambda(lambda img: __resize(img, w, h, method))) 70 | 71 | if opt.isTrain and not opt.no_flip: 72 | transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip']))) 73 | 74 | if opt.isTrain and 'rotate' in params.keys(): 75 | transform_list.append(transforms.Lambda(lambda img: __rotate(img, params['rotate'], method))) 76 | 77 | if toTensor: 78 | transform_list += [transforms.ToTensor()] 79 | 80 | if normalize: 81 | transform_list += [transforms.Normalize((0.5, 0.5, 0.5), 82 | (0.5, 0.5, 0.5))] 83 | return transforms.Compose(transform_list) 84 | 85 | def normalize(): 86 | return transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) 87 | 88 | 89 | def __resize(img, w, h, method=Image.BICUBIC): 90 | return img.resize((w, h), method) 91 | 92 | 93 | def __make_power_2(img, base, method=Image.BICUBIC): 94 | ow, oh = img.size 95 | h = int(round(oh / base) * base) 96 | w = int(round(ow / base) * base) 97 | if (h == oh) and (w == ow): 98 | return img 99 | return img.resize((w, h), method) 100 | 101 | 102 | def __scale_width(img, target_width, method=Image.BICUBIC): 103 | ow, oh = img.size 104 | if (ow == target_width): 105 | return img 106 | w = target_width 107 | h = int(target_width * oh / ow) 108 | return img.resize((w, h), method) 109 | 110 | 111 | def __scale_shortside(img, target_width, method=Image.BICUBIC): 112 | ow, oh = img.size 113 | ss, ls = min(ow, oh), max(ow, oh) # shortside and longside 114 | width_is_shorter = ow == ss 115 | if (ss == target_width): 116 | return img 117 | ls = int(target_width * ls / ss) 118 | nw, nh = (ss, ls) if width_is_shorter else (ls, ss) 119 | return img.resize((nw, nh), method) 120 | 121 | 122 | def __crop(img, pos, size): 123 | ow, oh = img.size 124 | x1, y1 = pos 125 | tw = th = size 126 | return img.crop((x1, y1, x1 + tw, y1 + th)) 127 | 128 | 129 | def __flip(img, flip): 130 | if flip: 131 | return img.transpose(Image.FLIP_LEFT_RIGHT) 132 | return img 133 | def __rotate(img, deg, method=Image.BICUBIC): 134 | return img.rotate(deg, resample=method) 135 | 136 | def __add1(img): 137 | return Image.fromarray(np.array(img) + 1) -------------------------------------------------------------------------------- /data/celebahq_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | import os 5 | import torch 6 | import numpy as np 7 | from PIL import Image 8 | from data.pix2pix_dataset import Pix2pixDataset 9 | from data.base_dataset import get_params, get_transform 10 | 11 | class CelebAHQDataset(Pix2pixDataset): 12 | #hair, skin, l_brow, r_blow, l_eye, r_eye, l_ear, r_ear, nose, u_lip, mouth, l_lip, neck, 13 | #cloth, hat, eye_g, ear_r, neck_l 14 | @staticmethod 15 | def modify_commandline_options(parser, is_train): 16 | parser = Pix2pixDataset.modify_commandline_options(parser, is_train) 17 | parser.set_defaults(preprocess_mode='resize_and_crop') 18 | parser.set_defaults(no_pairing_check=True) 19 | if is_train: 20 | parser.set_defaults(load_size=286) 21 | else: 22 | parser.set_defaults(load_size=256) 23 | parser.set_defaults(crop_size=256) 24 | parser.set_defaults(display_winsize=256) 25 | parser.set_defaults(label_nc=19) 26 | parser.set_defaults(contain_dontcare_label=False) 27 | parser.set_defaults(cache_filelist_read=False) 28 | parser.set_defaults(cache_filelist_write=False) 29 | return parser 30 | 31 | def get_paths(self, opt): 32 | if opt.phase == 'train': 33 | fd = open(os.path.join(opt.dataroot, 'train.txt')) 34 | lines = fd.readlines() 35 | fd.close() 36 | elif opt.phase == 'test': 37 | fd = open(os.path.join(opt.dataroot, 'val.txt')) 38 | lines = fd.readlines() 39 | fd.close() 40 | 41 | image_paths = [] 42 | label_paths = [] 43 | for i in range(len(lines)): 44 | image_paths.append(os.path.join(opt.dataroot, 'CelebA-HQ-img', lines[i].strip() + '.jpg')) 45 | label_paths.append(os.path.join(opt.dataroot, 'CelebAMask-HQ-mask-anno', 'all_parts_except_glasses', lines[i].strip().zfill(5) + '.png')) 46 | 47 | return label_paths, image_paths 48 | 49 | def get_ref(self, opt): 50 | extra = '' 51 | if opt.phase == 'test': 52 | extra = '_test' 53 | with open('./data/celebahq_ref{}.txt'.format(extra)) as fd: 54 | lines = fd.readlines() 55 | ref_dict = {} 56 | for i in range(len(lines)): 57 | items = lines[i].strip().split(',') 58 | key = items[0] 59 | if opt.phase == 'test': 60 | val = items[1:] 61 | else: 62 | val = [items[1], items[-1]] 63 | ref_dict[key] = val 64 | train_test_folder = ('', '') 65 | return ref_dict, train_test_folder 66 | 67 | def get_label_tensor(self, path): 68 | # parts = ['skin', 'hair', 'l_brow', 'r_brow', 'l_eye', 'r_eye', 'l_ear', 'r_ear', 'nose', 'u_lip', 'mouth', 'l_lip', 'neck', 69 | # 'cloth', 'hat', 'eye_g', 'ear_r', 'neck_l'] 70 | label_except_glasses = Image.open(path).convert('L') 71 | root, name = path.replace('\\', '/').split('all_parts_except_glasses/') 72 | idx = name.split('.')[0] 73 | subfolder = str(int(idx) // 2000) 74 | if os.path.exists(os.path.join(root, subfolder, idx + '_eye_g.png')): 75 | glasses = Image.open(os.path.join(root, subfolder, idx + '_eye_g.png')).convert('L') 76 | else: 77 | glasses = Image.fromarray(np.zeros(label_except_glasses.size, dtype=np.uint8)) 78 | 79 | params = get_params(self.opt, label_except_glasses.size) 80 | transform_label = get_transform(self.opt, params, method=Image.NEAREST, normalize=False) 81 | label_except_glasses_tensor = transform_label(label_except_glasses) * 255.0 82 | glasses_tensor = transform_label(glasses) 83 | label_tensor = torch.cat((label_except_glasses_tensor, glasses_tensor), dim=0) 84 | return label_tensor, params 85 | 86 | def imgpath_to_labelpath(self, path): 87 | root, name = path.split('CelebA-HQ-img/') 88 | label_path = os.path.join(root, 'CelebAMask-HQ-mask-anno', 'all_parts_except_glasses', name.split('.')[0].zfill(5) + '.png') 89 | return label_path 90 | -------------------------------------------------------------------------------- /data/celebahq_ref_test.txt: -------------------------------------------------------------------------------- 1 | 29922.jpg,26768.jpg 2 | 29883.jpg,5487.jpg 3 | 26969.jpg,6897.jpg 4 | 25807.jpg,23071.jpg 5 | 29642.jpg,26397.jpg 6 | 29539.jpg,14721.jpg 7 | 29318.jpg,15064.jpg 8 | 22233.jpg,2994.jpg -------------------------------------------------------------------------------- /data/celebahqedge_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | import os 5 | import cv2 6 | import torch 7 | import numpy as np 8 | from PIL import Image 9 | from skimage import feature 10 | from data.pix2pix_dataset import Pix2pixDataset 11 | from data.base_dataset import get_params, get_transform 12 | 13 | class CelebAHQEdgeDataset(Pix2pixDataset): 14 | #hair, skin, l_brow, r_blow, l_eye, r_eye, l_ear, r_ear, nose, u_lip, mouth, l_lip, neck, 15 | @staticmethod 16 | def modify_commandline_options(parser, is_train): 17 | parser = Pix2pixDataset.modify_commandline_options(parser, is_train) 18 | parser.set_defaults(preprocess_mode='resize_and_crop') 19 | parser.set_defaults(no_pairing_check=True) 20 | if is_train: 21 | parser.set_defaults(load_size=286) 22 | else: 23 | parser.set_defaults(load_size=256) 24 | parser.set_defaults(crop_size=256) 25 | parser.set_defaults(display_winsize=256) 26 | parser.set_defaults(label_nc=15) 27 | parser.set_defaults(contain_dontcare_label=False) 28 | parser.set_defaults(cache_filelist_read=False) 29 | parser.set_defaults(cache_filelist_write=False) 30 | return parser 31 | 32 | def get_paths(self, opt): 33 | if opt.phase == 'train': 34 | fd = open(os.path.join(opt.dataroot, 'train.txt')) 35 | lines = fd.readlines() 36 | fd.close() 37 | elif opt.phase == 'test': 38 | fd = open(os.path.join(opt.dataroot, 'val.txt')) 39 | lines = fd.readlines() 40 | fd.close() 41 | 42 | image_paths = [] 43 | label_paths = [] 44 | for i in range(len(lines)): 45 | image_paths.append(os.path.join(opt.dataroot, 'CelebA-HQ-img', lines[i].strip() + '.jpg')) 46 | subfolder = str(int(lines[i].strip()) // 2000) 47 | label_paths.append(os.path.join(opt.dataroot, 'CelebAMask-HQ-mask-anno', subfolder, lines[i].strip().zfill(5) + '_{}.png')) 48 | 49 | return label_paths, image_paths 50 | 51 | def get_ref(self, opt): 52 | extra = '' 53 | if opt.phase == 'test': 54 | extra = '_test' 55 | with open('./data/celebahq_ref{}.txt'.format(extra)) as fd: 56 | lines = fd.readlines() 57 | ref_dict = {} 58 | for i in range(len(lines)): 59 | items = lines[i].strip().split(',') 60 | key = items[0] 61 | if opt.phase == 'test': 62 | val = items[1:] 63 | else: 64 | val = [items[1], items[-1]] 65 | ref_dict[key] = val 66 | train_test_folder = ('', '') 67 | return ref_dict, train_test_folder 68 | 69 | def get_edges(self, edge, t): 70 | edge[:,1:] = edge[:,1:] | (t[:,1:] != t[:,:-1]) 71 | edge[:,:-1] = edge[:,:-1] | (t[:,1:] != t[:,:-1]) 72 | edge[1:,:] = edge[1:,:] | (t[1:,:] != t[:-1,:]) 73 | edge[:-1,:] = edge[:-1,:] | (t[1:,:] != t[:-1,:]) 74 | return edge 75 | 76 | def get_label_tensor(self, path): 77 | inner_parts = ['skin', 'l_brow', 'r_brow', 'l_eye', 'r_eye', 'l_ear', 'r_ear', 'nose', 'u_lip', 'mouth', 'l_lip', 'eye_g', 'hair'] 78 | img_path = self.labelpath_to_imgpath(path) 79 | img = Image.open(img_path).resize((self.opt.load_size, self.opt.load_size), resample=Image.BILINEAR) 80 | params = get_params(self.opt, img.size) 81 | transform_label = get_transform(self.opt, params, method=Image.NEAREST, normalize=False) 82 | transform_img = get_transform(self.opt, params, method=Image.BILINEAR, normalize=False) 83 | 84 | inner_label = np.ones(img.size, dtype=np.uint8) 85 | edges = np.zeros(img.size, dtype=np.uint8) 86 | tensors_dist = 0 87 | e = 1 88 | for part in inner_parts: 89 | edge = np.zeros(img.size, dtype=np.uint8) #this for distance transform map on each facial part 90 | if os.path.exists(path.format(part)): 91 | part_label = Image.open(path.format(part)).convert('L').resize((self.opt.load_size, self.opt.load_size), resample=Image.NEAREST) 92 | part_label = np.array(part_label) 93 | if part == 'hair': 94 | inner_label[part_label == 255] = 1 95 | else: 96 | inner_label[part_label == 255] = 0 97 | edges = self.get_edges(edges, part_label) 98 | edge = self.get_edges(edge, part_label) 99 | im_dist = cv2.distanceTransform(255-edge*255, cv2.DIST_L1, 3) 100 | im_dist = np.clip((im_dist / 3), 0, 255).astype(np.uint8) 101 | tensor_dist = transform_img(Image.fromarray(im_dist)) 102 | tensors_dist = tensor_dist if e == 1 else torch.cat([tensors_dist, tensor_dist]) 103 | e += 1 104 | 105 | # canny edge for background 106 | canny_edges = feature.canny(np.array(img.convert('L'))) 107 | canny_edges = canny_edges * inner_label 108 | 109 | edges_all = edges + canny_edges 110 | edges_all[edges_all > 1] = 1 111 | tensor_edges_all = transform_label(Image.fromarray(edges_all * 255)) 112 | edges[edges > 1] = 1 113 | tensor_edges = transform_label(Image.fromarray(edges * 255)) 114 | 115 | label_tensor = torch.cat((tensor_edges_all, tensors_dist, tensor_edges), dim=0) 116 | return label_tensor, params 117 | 118 | def imgpath_to_labelpath(self, path): 119 | root, name = path.split('CelebA-HQ-img/') 120 | subfolder = str(int(name.split('.')[0]) // 2000) 121 | label_path = os.path.join(root, 'CelebAMask-HQ-mask-anno', subfolder, name.split('.')[0].zfill(5) + '_{}.png') 122 | return label_path 123 | 124 | def labelpath_to_imgpath(self, path): 125 | root= path.replace('\\', '/').split('CelebAMask-HQ-mask-anno/')[0] 126 | name = os.path.basename(path).split('_')[0] 127 | img_path = os.path.join(root, 'CelebA-HQ-img', str(int(name)) + '.jpg') 128 | return img_path 129 | 130 | # In ADE20k, 'unknown' label is of value 0. 131 | # Change the 'unknown' label to the last label to match other datasets. 132 | # def postprocess(self, input_dict): 133 | # label = input_dict['label'] 134 | # label = label - 1 135 | # label[label == -1] = self.opt.label_nc 136 | # input_dict['label'] = label 137 | # if input_dict['label_ref'] is not None: 138 | # label_ref = input_dict['label_ref'] 139 | # label_ref = label_ref - 1 140 | # label_ref[label_ref == -1] = self.opt.label_nc 141 | # input_dict['label_ref'] = label_ref 142 | -------------------------------------------------------------------------------- /data/deepfashion_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | import os 5 | import cv2 6 | import torch 7 | import numpy as np 8 | import math 9 | import random 10 | from PIL import Image 11 | from skimage import feature 12 | from data.pix2pix_dataset import Pix2pixDataset 13 | from data.base_dataset import get_params, get_transform 14 | 15 | class DeepFashionDataset(Pix2pixDataset): 16 | @staticmethod 17 | def modify_commandline_options(parser, is_train): 18 | parser = Pix2pixDataset.modify_commandline_options(parser, is_train) 19 | parser.set_defaults(preprocess_mode='resize_and_crop') 20 | parser.set_defaults(no_pairing_check=True) 21 | if is_train: 22 | parser.set_defaults(load_size=286) 23 | else: 24 | parser.set_defaults(load_size=256) 25 | parser.set_defaults(crop_size=256) 26 | parser.set_defaults(display_winsize=256) 27 | parser.set_defaults(label_nc=20) 28 | parser.set_defaults(contain_dontcare_label=False) 29 | parser.set_defaults(cache_filelist_read=False) 30 | parser.set_defaults(cache_filelist_write=False) 31 | return parser 32 | 33 | def get_paths(self, opt): 34 | #root = os.path.dirname(opt.dataroot) if opt.hdfs else opt.dataroot 35 | root = opt.dataroot 36 | if opt.phase == 'train': 37 | fd = open(os.path.join(root, 'train.txt')) 38 | lines = fd.readlines() 39 | fd.close() 40 | elif opt.phase == 'test': 41 | fd = open(os.path.join(root, 'val.txt')) 42 | lines = fd.readlines() 43 | fd.close() 44 | 45 | image_paths = [] 46 | label_paths = [] 47 | for i in range(len(lines)): 48 | image_paths.append(os.path.join(opt.dataroot, lines[i].strip().replace('\\', '/'))) 49 | label_path = lines[i].strip().replace('img', 'pose').replace('.jpg', '_{}.txt').replace('\\', '/') 50 | label_paths.append(os.path.join(opt.dataroot, label_path)) 51 | 52 | return label_paths, image_paths 53 | 54 | def get_ref_video_like(self, opt): 55 | pair_path = './data/deepfashion_self_pair.txt' 56 | with open(pair_path) as fd: 57 | self_pair = fd.readlines() 58 | self_pair = [it.strip() for it in self_pair] 59 | key_name = {} 60 | for it in self_pair: 61 | items = it.split(',') 62 | key_name[items[0]] = items[1:] 63 | ref_name = './data/deepfashion_ref_test.txt' if opt.phase == 'test' else './data/deepfashion_ref.txt' 64 | with open(ref_name) as fd: 65 | ref = fd.readlines() 66 | ref = [it.strip() for it in ref] 67 | ref_dict = {} 68 | #split = 'DeepFashion.zip@/' if opt.hdfs else 'DeepFashion/' 69 | split = 'DeepFashion/' 70 | for i in range(len(ref)): 71 | items = ref[i].strip().split(',') 72 | if items[0] in key_name.keys(): 73 | #ref_dict[items[0].replace('\\', '/')] = [random.choice(key_name[items[0]]).replace('\\', '/'), random.choice(self.image_paths).split(split)[-1]] 74 | ref_dict[items[0].replace('\\', '/')] = [it.replace('\\', '/') for it in key_name[items[0]]] + [it.split(split)[-1] for it in random.sample(self.image_paths, min(len(self.image_paths), 20))] 75 | else: 76 | ref_dict[items[0].replace('\\', '/')] = [items[-1].replace('\\', '/')] + [it.split(split)[-1] for it in random.sample(self.image_paths, min(len(self.image_paths), 20))] 77 | train_test_folder = ('', '') 78 | return ref_dict, train_test_folder 79 | 80 | def get_ref_vgg(self, opt): 81 | extra = '' 82 | if opt.phase == 'test': 83 | extra = '_test' 84 | with open('./data/deepfashion_ref{}.txt'.format(extra)) as fd: 85 | lines = fd.readlines() 86 | ref_dict = {} 87 | for i in range(len(lines)): 88 | items = lines[i].strip().split(',') 89 | key = items[0].replace('\\', '/') 90 | if opt.phase == 'test': 91 | val = [it.replace('\\', '/') for it in items[1:]] 92 | else: 93 | val = [items[-1].replace('\\', '/'), random.choice(self.image_paths).split('DeepFashion/')[-1].replace('\\', '/')] 94 | ref_dict[key] = val 95 | train_test_folder = ('', '') 96 | return ref_dict, train_test_folder 97 | 98 | def get_ref(self, opt): 99 | if opt.video_like: 100 | return self.get_ref_video_like(opt) 101 | else: 102 | return self.get_ref_vgg(opt) 103 | 104 | def get_edges(self, edge, t): 105 | edge[:,1:] = edge[:,1:] | (t[:,1:] != t[:,:-1]) 106 | edge[:,:-1] = edge[:,:-1] | (t[:,1:] != t[:,:-1]) 107 | edge[1:,:] = edge[1:,:] | (t[1:,:] != t[:-1,:]) 108 | edge[:-1,:] = edge[:-1,:] | (t[1:,:] != t[:-1,:]) 109 | return edge 110 | 111 | def get_label_tensor(self, path): 112 | candidate = np.loadtxt(path.format('candidate')) 113 | subset = np.loadtxt(path.format('subset')) 114 | stickwidth = 4 115 | limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \ 116 | [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \ 117 | [1, 16], [16, 18], [3, 17], [6, 18]] 118 | 119 | colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \ 120 | [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \ 121 | [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]] 122 | 123 | img_path = self.labelpath_to_imgpath(path) 124 | img = cv2.imread(img_path) 125 | canvas = np.zeros_like(img) 126 | for i in range(18): 127 | index = int(subset[i]) 128 | if index == -1: 129 | continue 130 | x, y = candidate[index][0:2] 131 | cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1) 132 | joints = [] 133 | for i in range(17): 134 | index = subset[np.array(limbSeq[i]) - 1] 135 | cur_canvas = canvas.copy() 136 | if -1 in index: 137 | joints.append(np.zeros_like(cur_canvas[:, :, 0])) 138 | continue 139 | Y = candidate[index.astype(int), 0] 140 | X = candidate[index.astype(int), 1] 141 | mX = np.mean(X) 142 | mY = np.mean(Y) 143 | length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5 144 | angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1])) 145 | polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1) 146 | cv2.fillConvexPoly(cur_canvas, polygon, colors[i]) 147 | canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0) 148 | 149 | joint = np.zeros_like(cur_canvas[:, :, 0]) 150 | cv2.fillConvexPoly(joint, polygon, 255) 151 | joint = cv2.addWeighted(joint, 0.4, joint, 0.6, 0) 152 | joints.append(joint) 153 | 154 | pose = Image.fromarray(cv2.cvtColor(canvas, cv2.COLOR_BGR2RGB)).resize((self.opt.load_size, self.opt.load_size), resample=Image.NEAREST) 155 | params = get_params(self.opt, pose.size) 156 | transform_label = get_transform(self.opt, params, method=Image.NEAREST, normalize=False) 157 | transform_img = get_transform(self.opt, params, method=Image.BILINEAR, normalize=False) 158 | tensors_dist = 0 159 | e = 1 160 | for i in range(len(joints)): 161 | im_dist = cv2.distanceTransform(255-joints[i], cv2.DIST_L1, 3) 162 | im_dist = np.clip((im_dist / 3), 0, 255).astype(np.uint8) 163 | tensor_dist = transform_img(Image.fromarray(im_dist)) 164 | tensors_dist = tensor_dist if e == 1 else torch.cat([tensors_dist, tensor_dist]) 165 | e += 1 166 | 167 | tensor_pose = transform_label(pose) 168 | label_tensor = torch.cat((tensor_pose, tensors_dist), dim=0) 169 | return label_tensor, params 170 | 171 | def imgpath_to_labelpath(self, path): 172 | label_path = path.replace('\\', '/').replace('/img/', '/pose/').replace('.jpg', '_{}.txt') 173 | return label_path 174 | 175 | def labelpath_to_imgpath(self, path): 176 | img_path = path.replace('\\', '/').replace('/pose/', '/img/').replace('_{}.txt', '.jpg') 177 | return img_path 178 | -------------------------------------------------------------------------------- /data/deepfashion_ref_test.txt: -------------------------------------------------------------------------------- 1 | img\WOMEN\Dresses\id_00000244\02_1_front.jpg,img\WOMEN\Tees_Tanks\id_00007059\03_3_back.jpg 2 | img\WOMEN\Dresses\id_00000203\03_1_front.jpg,img\WOMEN\Dresses\id_00002791\01_2_side.jpg 3 | img\WOMEN\Cardigans\id_00006462\02_7_additional.jpg,img\WOMEN\Dresses\id_00006245\03_2_side.jpg 4 | img\WOMEN\Cardigans\id_00006266\02_2_side.jpg,img\WOMEN\Blouses_Shirts\id_00007636\01_1_front.jpg -------------------------------------------------------------------------------- /data/deepfashion_self_pair.txt: -------------------------------------------------------------------------------- 1 | img\MEN\Denim\id_00000265\01_1_front.jpg,img\MEN\Denim\id_00000265\01_2_side.jpg,img\MEN\Denim\id_00000265\01_7_additional.jpg -------------------------------------------------------------------------------- /data/flickr_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | import os 5 | from data.pix2pix_dataset import Pix2pixDataset 6 | 7 | class FlickrDataset(Pix2pixDataset): 8 | 9 | @staticmethod 10 | def modify_commandline_options(parser, is_train): 11 | parser = Pix2pixDataset.modify_commandline_options(parser, is_train) 12 | parser.set_defaults(preprocess_mode='resize_and_crop') 13 | if is_train: 14 | parser.set_defaults(load_size=286) 15 | else: 16 | parser.set_defaults(load_size=256) 17 | parser.set_defaults(crop_size=256) 18 | parser.set_defaults(display_winsize=256) 19 | parser.set_defaults(label_nc=150) 20 | parser.set_defaults(contain_dontcare_label=True) 21 | parser.set_defaults(cache_filelist_read=False) 22 | parser.set_defaults(cache_filelist_write=False) 23 | return parser 24 | 25 | def get_paths(self, opt): 26 | root = os.path.join(opt.dataroot, 'test/images') if opt.phase == 'test' else os.path.join(opt.dataroot, 'images') 27 | root_mask = root.replace('images', 'mask') 28 | 29 | image_paths = sorted(os.listdir(root)) 30 | image_paths = [os.path.join(root, it) for it in image_paths] 31 | label_paths = sorted(os.listdir(root_mask)) 32 | label_paths = [os.path.join(root_mask, it) for it in label_paths] 33 | 34 | return label_paths, image_paths 35 | 36 | def get_ref(self, opt): 37 | extra = '_test_from_train' if opt.phase == 'test' else '' 38 | with open('./data/flickr_ref{}.txt'.format(extra)) as fd: 39 | lines = fd.readlines() 40 | ref_dict = {} 41 | for i in range(len(lines)): 42 | items = lines[i].strip().split(',') 43 | key = items[0] 44 | if opt.phase == 'test': 45 | val = items[1:] 46 | else: 47 | val = [items[1], items[-1]] 48 | ref_dict[key] = val 49 | train_test_folder = ('', 'test') 50 | return ref_dict, train_test_folder 51 | 52 | def imgpath_to_labelpath(self, path): 53 | path_ref_label = path.replace('images', 'mask') 54 | return path_ref_label 55 | # In ADE20k, 'unknown' label is of value 0. 56 | # Change the 'unknown' label to the last label to match other datasets. 57 | # def postprocess(self, input_dict): 58 | # label = input_dict['label'] 59 | # label = label - 1 60 | # label[label == -1] = self.opt.label_nc 61 | # input_dict['label'] = label 62 | # if input_dict['label_ref'] is not None: 63 | # label_ref = input_dict['label_ref'] 64 | # label_ref = label_ref - 1 65 | # label_ref[label_ref == -1] = self.opt.label_nc 66 | # input_dict['label_ref'] = label_ref 67 | -------------------------------------------------------------------------------- /data/image_folder.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import torch.utils.data as data 7 | from PIL import Image 8 | import os 9 | 10 | IMG_EXTENSIONS = [ 11 | '.jpg', '.JPG', '.jpeg', '.JPEG', 12 | '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tiff', '.webp' 13 | ] 14 | 15 | 16 | def is_image_file(filename): 17 | return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) 18 | 19 | 20 | def make_dataset_rec(dir, images): 21 | assert os.path.isdir(dir), '%s is not a valid directory' % dir 22 | 23 | for root, dnames, fnames in sorted(os.walk(dir, followlinks=True)): 24 | for fname in fnames: 25 | if is_image_file(fname): 26 | path = os.path.join(root, fname) 27 | images.append(path) 28 | 29 | 30 | def make_dataset(dir, recursive=False, read_cache=False, write_cache=False): 31 | images = [] 32 | 33 | if read_cache: 34 | possible_filelist = os.path.join(dir, 'files.list') 35 | if os.path.isfile(possible_filelist): 36 | with open(possible_filelist, 'r') as f: 37 | images = f.read().splitlines() 38 | return images 39 | 40 | if recursive: 41 | make_dataset_rec(dir, images) 42 | else: 43 | assert os.path.isdir(dir) or os.path.islink(dir), '%s is not a valid directory' % dir 44 | 45 | for root, dnames, fnames in sorted(os.walk(dir)): 46 | for fname in fnames: 47 | if is_image_file(fname): 48 | path = os.path.join(root, fname) 49 | images.append(path) 50 | 51 | if write_cache: 52 | filelist_cache = os.path.join(dir, 'files.list') 53 | with open(filelist_cache, 'w') as f: 54 | for path in images: 55 | f.write("%s\n" % path) 56 | print('wrote filelist cache at %s' % filelist_cache) 57 | 58 | return images 59 | 60 | 61 | def default_loader(path): 62 | return Image.open(path).convert('RGB') 63 | 64 | 65 | class ImageFolder(data.Dataset): 66 | 67 | def __init__(self, root, transform=None, return_paths=False, 68 | loader=default_loader): 69 | imgs = make_dataset(root) 70 | if len(imgs) == 0: 71 | raise(RuntimeError("Found 0 images in: " + root + "\n" 72 | "Supported image extensions are: " + 73 | ",".join(IMG_EXTENSIONS))) 74 | 75 | self.root = root 76 | self.imgs = imgs 77 | self.transform = transform 78 | self.return_paths = return_paths 79 | self.loader = loader 80 | 81 | def __getitem__(self, index): 82 | path = self.imgs[index] 83 | img = self.loader(path) 84 | if self.transform is not None: 85 | img = self.transform(img) 86 | if self.return_paths: 87 | return img, path 88 | else: 89 | return img 90 | 91 | def __len__(self): 92 | return len(self.imgs) 93 | -------------------------------------------------------------------------------- /data/pix2pix_dataset.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | from data.base_dataset import BaseDataset, get_params, get_transform 5 | import torch 6 | import torchvision.transforms as transforms 7 | from PIL import Image 8 | import util.util as util 9 | import os 10 | import random 11 | #from scipy.ndimage.filters import gaussian_filter 12 | 13 | 14 | class Pix2pixDataset(BaseDataset): 15 | @staticmethod 16 | def modify_commandline_options(parser, is_train): 17 | parser.add_argument('--no_pairing_check', action='store_true', 18 | help='If specified, skip sanity check of correct label-image file pairing') 19 | return parser 20 | 21 | def initialize(self, opt): 22 | self.opt = opt 23 | 24 | label_paths, image_paths = self.get_paths(opt) 25 | 26 | if opt.dataset_mode != 'celebahq' and opt.dataset_mode != 'deepfashion': 27 | util.natural_sort(label_paths) 28 | util.natural_sort(image_paths) 29 | 30 | label_paths = label_paths[:opt.max_dataset_size] 31 | image_paths = image_paths[:opt.max_dataset_size] 32 | 33 | if not opt.no_pairing_check: 34 | for path1, path2 in zip(label_paths, image_paths): 35 | assert self.paths_match(path1, path2), \ 36 | "The label-image pair (%s, %s) do not look like the right pair because the filenames are quite different. Are you sure about the pairing? Please see data/pix2pix_dataset.py to see what is going on, and use --no_pairing_check to bypass this." % (path1, path2) 37 | 38 | self.label_paths = label_paths 39 | self.image_paths = image_paths 40 | 41 | size = len(self.label_paths) 42 | self.dataset_size = size 43 | 44 | self.real_reference_probability = 1 if opt.phase == 'test' else opt.real_reference_probability 45 | self.hard_reference_probability = 0 if opt.phase == 'test' else opt.hard_reference_probability 46 | self.ref_dict, self.train_test_folder = self.get_ref(opt) 47 | 48 | def get_paths(self, opt): 49 | label_paths = [] 50 | image_paths = [] 51 | assert False, "A subclass of Pix2pixDataset must override self.get_paths(self, opt)" 52 | return label_paths, image_paths 53 | 54 | def paths_match(self, path1, path2): 55 | filename1_without_ext = os.path.splitext(os.path.basename(path1))[0] 56 | filename2_without_ext = os.path.splitext(os.path.basename(path2))[0] 57 | return filename1_without_ext == filename2_without_ext 58 | 59 | def get_label_tensor(self, path): 60 | label = Image.open(path) 61 | params1 = get_params(self.opt, label.size) 62 | transform_label = get_transform(self.opt, params1, method=Image.NEAREST, normalize=False) 63 | label_tensor = transform_label(label) * 255.0 64 | label_tensor[label_tensor == 255] = self.opt.label_nc # 'unknown' is opt.label_nc 65 | return label_tensor, params1 66 | 67 | def __getitem__(self, index): 68 | # Label Image 69 | label_path = self.label_paths[index] 70 | label_tensor, params1 = self.get_label_tensor(label_path) 71 | 72 | # input image (real images) 73 | image_path = self.image_paths[index] 74 | if not self.opt.no_pairing_check: 75 | assert self.paths_match(label_path, image_path), \ 76 | "The label_path %s and image_path %s don't match." % \ 77 | (label_path, image_path) 78 | image = Image.open(image_path) 79 | image = image.convert('RGB') 80 | 81 | transform_image = get_transform(self.opt, params1) 82 | image_tensor = transform_image(image) 83 | 84 | ref_tensor = 0 85 | label_ref_tensor = 0 86 | 87 | random_p = random.random() 88 | if random_p < self.real_reference_probability or self.opt.phase == 'test': 89 | key = image_path.replace('\\', '/').split('DeepFashion/')[-1] if self.opt.dataset_mode == 'deepfashion' else os.path.basename(image_path) 90 | val = self.ref_dict[key] 91 | if random_p < self.hard_reference_probability: 92 | path_ref = val[1] #hard reference 93 | else: 94 | path_ref = val[0] #easy reference 95 | if self.opt.dataset_mode == 'deepfashion': 96 | path_ref = os.path.join(self.opt.dataroot, path_ref) 97 | else: 98 | path_ref = os.path.dirname(image_path).replace(self.train_test_folder[1], self.train_test_folder[0]) + '/' + path_ref 99 | 100 | image_ref = Image.open(path_ref).convert('RGB') 101 | if self.opt.dataset_mode != 'deepfashion': 102 | path_ref_label = path_ref.replace('.jpg', '.png') 103 | path_ref_label = self.imgpath_to_labelpath(path_ref_label) 104 | else: 105 | path_ref_label = self.imgpath_to_labelpath(path_ref) 106 | 107 | label_ref_tensor, params = self.get_label_tensor(path_ref_label) 108 | transform_image = get_transform(self.opt, params) 109 | ref_tensor = transform_image(image_ref) 110 | #ref_tensor = self.reference_transform(image_ref) 111 | self_ref_flag = torch.zeros_like(ref_tensor) 112 | else: 113 | pair = False 114 | if self.opt.dataset_mode == 'deepfashion' and self.opt.video_like: 115 | # if self.opt.hdfs: 116 | # key = image_path.split('DeepFashion.zip@/')[-1] 117 | # else: 118 | # key = image_path.split('DeepFashion/')[-1] 119 | key = image_path.replace('\\', '/').split('DeepFashion/')[-1] 120 | val = self.ref_dict[key] 121 | ref_name = val[0] 122 | key_name = key 123 | if os.path.dirname(ref_name) == os.path.dirname(key_name) and os.path.basename(ref_name).split('_')[0] == os.path.basename(key_name).split('_')[0]: 124 | path_ref = os.path.join(self.opt.dataroot, ref_name) 125 | image_ref = Image.open(path_ref).convert('RGB') 126 | label_ref_path = self.imgpath_to_labelpath(path_ref) 127 | label_ref_tensor, params = self.get_label_tensor(label_ref_path) 128 | transform_image = get_transform(self.opt, params) 129 | ref_tensor = transform_image(image_ref) 130 | pair = True 131 | if not pair: 132 | label_ref_tensor, params = self.get_label_tensor(label_path) 133 | transform_image = get_transform(self.opt, params) 134 | ref_tensor = transform_image(image) 135 | #ref_tensor = self.reference_transform(image) 136 | self_ref_flag = torch.ones_like(ref_tensor) 137 | 138 | input_dict = {'label': label_tensor, 139 | 'image': image_tensor, 140 | 'path': image_path, 141 | 'self_ref': self_ref_flag, 142 | 'ref': ref_tensor, 143 | 'label_ref': label_ref_tensor 144 | } 145 | 146 | # Give subclasses a chance to modify the final output 147 | self.postprocess(input_dict) 148 | 149 | return input_dict 150 | 151 | def postprocess(self, input_dict): 152 | return input_dict 153 | 154 | def __len__(self): 155 | return self.dataset_size 156 | 157 | def get_ref(self, opt): 158 | pass 159 | 160 | def imgpath_to_labelpath(self, path): 161 | return path -------------------------------------------------------------------------------- /imgs/DeepFashion/img/WOMEN/Blouses_Shirts/id_00007636/01_1_front.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/DeepFashion/img/WOMEN/Blouses_Shirts/id_00007636/01_1_front.jpg -------------------------------------------------------------------------------- /imgs/DeepFashion/img/WOMEN/Cardigans/id_00006266/02_2_side.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/DeepFashion/img/WOMEN/Cardigans/id_00006266/02_2_side.jpg -------------------------------------------------------------------------------- /imgs/DeepFashion/img/WOMEN/Cardigans/id_00006462/02_7_additional.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/DeepFashion/img/WOMEN/Cardigans/id_00006462/02_7_additional.jpg -------------------------------------------------------------------------------- /imgs/DeepFashion/img/WOMEN/Dresses/id_00000203/03_1_front.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/DeepFashion/img/WOMEN/Dresses/id_00000203/03_1_front.jpg -------------------------------------------------------------------------------- /imgs/DeepFashion/img/WOMEN/Dresses/id_00000244/02_1_front.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/DeepFashion/img/WOMEN/Dresses/id_00000244/02_1_front.jpg -------------------------------------------------------------------------------- /imgs/DeepFashion/img/WOMEN/Dresses/id_00002791/01_2_side.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/DeepFashion/img/WOMEN/Dresses/id_00002791/01_2_side.jpg -------------------------------------------------------------------------------- /imgs/DeepFashion/img/WOMEN/Dresses/id_00006245/03_2_side.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/DeepFashion/img/WOMEN/Dresses/id_00006245/03_2_side.jpg -------------------------------------------------------------------------------- /imgs/DeepFashion/img/WOMEN/Tees_Tanks/id_00007059/03_3_back.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/DeepFashion/img/WOMEN/Tees_Tanks/id_00007059/03_3_back.jpg -------------------------------------------------------------------------------- /imgs/DeepFashion/pose/WOMEN/Blouses_Shirts/id_00007636/01_1_front_candidate.txt: -------------------------------------------------------------------------------- 1 | 1.290000000000000000e+02 4.000000000000000000e+01 9.781419634819030762e-01 0.000000000000000000e+00 2 | 1.310000000000000000e+02 8.500000000000000000e+01 9.124031066894531250e-01 1.000000000000000000e+00 3 | 9.800000000000000000e+01 8.400000000000000000e+01 8.668758273124694824e-01 2.000000000000000000e+00 4 | 9.200000000000000000e+01 1.500000000000000000e+02 8.552884459495544434e-01 3.000000000000000000e+00 5 | 9.800000000000000000e+01 2.060000000000000000e+02 8.713585734367370605e-01 4.000000000000000000e+00 6 | 1.650000000000000000e+02 8.700000000000000000e+01 8.727692961692810059e-01 5.000000000000000000e+00 7 | 1.700000000000000000e+02 1.550000000000000000e+02 8.835062384605407715e-01 6.000000000000000000e+00 8 | 1.790000000000000000e+02 2.140000000000000000e+02 8.572984337806701660e-01 7.000000000000000000e+00 9 | 1.100000000000000000e+02 1.940000000000000000e+02 6.707262396812438965e-01 8.000000000000000000e+00 10 | 1.550000000000000000e+02 1.950000000000000000e+02 6.938654780387878418e-01 9.000000000000000000e+00 11 | 1.210000000000000000e+02 3.400000000000000000e+01 9.170883893966674805e-01 1.000000000000000000e+01 12 | 1.380000000000000000e+02 3.200000000000000000e+01 9.513149261474609375e-01 1.100000000000000000e+01 13 | 1.130000000000000000e+02 3.900000000000000000e+01 7.142572999000549316e-01 1.200000000000000000e+01 14 | 1.500000000000000000e+02 3.600000000000000000e+01 8.690021634101867676e-01 1.300000000000000000e+01 15 | -------------------------------------------------------------------------------- /imgs/DeepFashion/pose/WOMEN/Blouses_Shirts/id_00007636/01_1_front_subset.txt: -------------------------------------------------------------------------------- 1 | 0.000000000000000000e+00 1.000000000000000000e+00 2.000000000000000000e+00 3.000000000000000000e+00 4.000000000000000000e+00 5.000000000000000000e+00 6.000000000000000000e+00 7.000000000000000000e+00 8.000000000000000000e+00 -1.000000000000000000e+00 -1.000000000000000000e+00 9.000000000000000000e+00 -1.000000000000000000e+00 -1.000000000000000000e+00 1.000000000000000000e+01 1.100000000000000000e+01 1.200000000000000000e+01 1.300000000000000000e+01 2.457221853691778790e+01 1.400000000000000000e+01 2 | -------------------------------------------------------------------------------- /imgs/DeepFashion/pose/WOMEN/Cardigans/id_00006266/02_2_side_candidate.txt: -------------------------------------------------------------------------------- 1 | 1.600000000000000000e+02 3.600000000000000000e+01 9.836565256118774414e-01 0.000000000000000000e+00 2 | 1.350000000000000000e+02 7.600000000000000000e+01 6.573168039321899414e-01 1.000000000000000000e+00 3 | 1.280000000000000000e+02 7.900000000000000000e+01 8.179771304130554199e-01 2.000000000000000000e+00 4 | 1.310000000000000000e+02 1.520000000000000000e+02 7.068420648574829102e-01 3.000000000000000000e+00 5 | 1.410000000000000000e+02 2.150000000000000000e+02 8.571766018867492676e-01 4.000000000000000000e+00 6 | 1.410000000000000000e+02 7.500000000000000000e+01 5.557610988616943359e-01 5.000000000000000000e+00 7 | 1.500000000000000000e+02 1.500000000000000000e+02 1.888840943574905396e-01 6.000000000000000000e+00 8 | 1.350000000000000000e+02 2.000000000000000000e+02 6.253219246864318848e-01 7.000000000000000000e+00 9 | 1.490000000000000000e+02 1.950000000000000000e+02 4.419538974761962891e-01 8.000000000000000000e+00 10 | 1.520000000000000000e+02 2.600000000000000000e+01 9.701329469680786133e-01 9.000000000000000000e+00 11 | 1.660000000000000000e+02 2.900000000000000000e+01 9.409177899360656738e-01 1.000000000000000000e+01 12 | 1.340000000000000000e+02 2.400000000000000000e+01 9.528830647468566895e-01 1.100000000000000000e+01 13 | -------------------------------------------------------------------------------- /imgs/DeepFashion/pose/WOMEN/Cardigans/id_00006266/02_2_side_subset.txt: -------------------------------------------------------------------------------- 1 | 0.000000000000000000e+00 1.000000000000000000e+00 2.000000000000000000e+00 3.000000000000000000e+00 4.000000000000000000e+00 5.000000000000000000e+00 6.000000000000000000e+00 -1.000000000000000000e+00 7.000000000000000000e+00 -1.000000000000000000e+00 -1.000000000000000000e+00 8.000000000000000000e+00 -1.000000000000000000e+00 -1.000000000000000000e+00 9.000000000000000000e+00 1.000000000000000000e+01 1.100000000000000000e+01 -1.000000000000000000e+00 1.751898223298882229e+01 1.200000000000000000e+01 2 | -------------------------------------------------------------------------------- /imgs/DeepFashion/pose/WOMEN/Cardigans/id_00006462/02_7_additional_candidate.txt: -------------------------------------------------------------------------------- 1 | 1.490000000000000000e+02 2.600000000000000000e+01 9.639234542846679688e-01 0.000000000000000000e+00 2 | 1.470000000000000000e+02 5.200000000000000000e+01 9.432929754257202148e-01 1.000000000000000000e+00 3 | 1.260000000000000000e+02 5.000000000000000000e+01 9.349830150604248047e-01 2.000000000000000000e+00 4 | 1.190000000000000000e+02 8.900000000000000000e+01 8.650190234184265137e-01 3.000000000000000000e+00 5 | 1.210000000000000000e+02 1.250000000000000000e+02 9.393053054809570312e-01 4.000000000000000000e+00 6 | 1.680000000000000000e+02 5.200000000000000000e+01 9.098938107490539551e-01 5.000000000000000000e+00 7 | 1.680000000000000000e+02 9.000000000000000000e+01 8.949556350708007812e-01 6.000000000000000000e+00 8 | 1.660000000000000000e+02 1.250000000000000000e+02 8.962563872337341309e-01 7.000000000000000000e+00 9 | 1.290000000000000000e+02 1.210000000000000000e+02 7.583002448081970215e-01 8.000000000000000000e+00 10 | 1.300000000000000000e+02 1.740000000000000000e+02 8.903183937072753906e-01 9.000000000000000000e+00 11 | 1.590000000000000000e+02 2.150000000000000000e+02 8.965585231781005859e-01 1.000000000000000000e+01 12 | 1.540000000000000000e+02 1.220000000000000000e+02 8.122842907905578613e-01 1.100000000000000000e+01 13 | 1.490000000000000000e+02 1.760000000000000000e+02 9.031013846397399902e-01 1.200000000000000000e+01 14 | 1.400000000000000000e+02 2.250000000000000000e+02 8.052030205726623535e-01 1.300000000000000000e+01 15 | 1.450000000000000000e+02 2.100000000000000000e+01 8.995509147644042969e-01 1.400000000000000000e+01 16 | 1.520000000000000000e+02 2.100000000000000000e+01 8.971232771873474121e-01 1.500000000000000000e+01 17 | 1.380000000000000000e+02 2.300000000000000000e+01 8.110531568527221680e-01 1.600000000000000000e+01 18 | 1.590000000000000000e+02 2.400000000000000000e+01 6.379645466804504395e-01 1.700000000000000000e+01 19 | -------------------------------------------------------------------------------- /imgs/DeepFashion/pose/WOMEN/Cardigans/id_00006462/02_7_additional_subset.txt: -------------------------------------------------------------------------------- 1 | 0.000000000000000000e+00 1.000000000000000000e+00 2.000000000000000000e+00 3.000000000000000000e+00 4.000000000000000000e+00 5.000000000000000000e+00 6.000000000000000000e+00 7.000000000000000000e+00 8.000000000000000000e+00 9.000000000000000000e+00 1.000000000000000000e+01 1.100000000000000000e+01 1.200000000000000000e+01 1.300000000000000000e+01 1.400000000000000000e+01 1.500000000000000000e+01 1.600000000000000000e+01 1.700000000000000000e+01 3.231099844985619995e+01 1.800000000000000000e+01 2 | -------------------------------------------------------------------------------- /imgs/DeepFashion/pose/WOMEN/Dresses/id_00000203/03_1_front_candidate.txt: -------------------------------------------------------------------------------- 1 | 1.300000000000000000e+02 3.100000000000000000e+01 9.783996343612670898e-01 0.000000000000000000e+00 2 | 1.280000000000000000e+02 6.900000000000000000e+01 9.581637382507324219e-01 1.000000000000000000e+00 3 | 9.800000000000000000e+01 6.700000000000000000e+01 8.994741439819335938e-01 2.000000000000000000e+00 4 | 9.300000000000000000e+01 1.210000000000000000e+02 9.282154440879821777e-01 3.000000000000000000e+00 5 | 1.050000000000000000e+02 1.640000000000000000e+02 9.353148937225341797e-01 4.000000000000000000e+00 6 | 1.570000000000000000e+02 7.100000000000000000e+01 8.891844749450683594e-01 5.000000000000000000e+00 7 | 1.600000000000000000e+02 1.240000000000000000e+02 9.018204212188720703e-01 6.000000000000000000e+00 8 | 1.720000000000000000e+02 1.710000000000000000e+02 9.431709647178649902e-01 7.000000000000000000e+00 9 | 1.110000000000000000e+02 1.690000000000000000e+02 7.091249227523803711e-01 8.000000000000000000e+00 10 | 1.130000000000000000e+02 2.550000000000000000e+02 6.363827586174011230e-01 9.000000000000000000e+00 11 | 1.500000000000000000e+02 1.690000000000000000e+02 7.744201421737670898e-01 1.000000000000000000e+01 12 | 1.440000000000000000e+02 2.550000000000000000e+02 6.030450463294982910e-01 1.100000000000000000e+01 13 | 1.230000000000000000e+02 2.600000000000000000e+01 9.613689184188842773e-01 1.200000000000000000e+01 14 | 1.370000000000000000e+02 2.500000000000000000e+01 9.662402272224426270e-01 1.300000000000000000e+01 15 | 1.130000000000000000e+02 2.900000000000000000e+01 8.398466706275939941e-01 1.400000000000000000e+01 16 | 1.450000000000000000e+02 2.800000000000000000e+01 6.678193211555480957e-01 1.500000000000000000e+01 17 | -------------------------------------------------------------------------------- /imgs/DeepFashion/pose/WOMEN/Dresses/id_00000203/03_1_front_subset.txt: -------------------------------------------------------------------------------- 1 | 0.000000000000000000e+00 1.000000000000000000e+00 2.000000000000000000e+00 3.000000000000000000e+00 4.000000000000000000e+00 5.000000000000000000e+00 6.000000000000000000e+00 7.000000000000000000e+00 8.000000000000000000e+00 9.000000000000000000e+00 -1.000000000000000000e+00 1.000000000000000000e+01 1.100000000000000000e+01 -1.000000000000000000e+00 1.200000000000000000e+01 1.300000000000000000e+01 1.400000000000000000e+01 1.500000000000000000e+01 2.821650014703974918e+01 1.600000000000000000e+01 2 | -------------------------------------------------------------------------------- /imgs/DeepFashion/pose/WOMEN/Dresses/id_00000244/02_1_front_candidate.txt: -------------------------------------------------------------------------------- 1 | 1.270000000000000000e+02 3.300000000000000000e+01 9.341222643852233887e-01 0.000000000000000000e+00 2 | 1.320000000000000000e+02 7.400000000000000000e+01 9.348551630973815918e-01 1.000000000000000000e+00 3 | 1.020000000000000000e+02 7.300000000000000000e+01 8.929463624954223633e-01 2.000000000000000000e+00 4 | 1.000000000000000000e+02 1.330000000000000000e+02 9.158918857574462891e-01 3.000000000000000000e+00 5 | 8.600000000000000000e+01 1.830000000000000000e+02 8.827364444732666016e-01 4.000000000000000000e+00 6 | 1.620000000000000000e+02 7.300000000000000000e+01 9.297263622283935547e-01 5.000000000000000000e+00 7 | 1.680000000000000000e+02 1.320000000000000000e+02 8.879975080490112305e-01 6.000000000000000000e+00 8 | 1.820000000000000000e+02 1.840000000000000000e+02 9.119865894317626953e-01 7.000000000000000000e+00 9 | 1.240000000000000000e+02 1.790000000000000000e+02 6.546844840049743652e-01 8.000000000000000000e+00 10 | 1.430000000000000000e+02 2.550000000000000000e+02 2.148336321115493774e-01 9.000000000000000000e+00 11 | 1.600000000000000000e+02 1.750000000000000000e+02 6.687886118888854980e-01 1.000000000000000000e+01 12 | 1.600000000000000000e+02 2.550000000000000000e+02 1.844243109226226807e-01 1.100000000000000000e+01 13 | 1.220000000000000000e+02 2.700000000000000000e+01 9.203147888183593750e-01 1.200000000000000000e+01 14 | 1.350000000000000000e+02 2.600000000000000000e+01 9.571847915649414062e-01 1.300000000000000000e+01 15 | 1.500000000000000000e+02 2.900000000000000000e+01 9.829818010330200195e-01 1.400000000000000000e+01 16 | -------------------------------------------------------------------------------- /imgs/DeepFashion/pose/WOMEN/Dresses/id_00000244/02_1_front_subset.txt: -------------------------------------------------------------------------------- 1 | 0.000000000000000000e+00 1.000000000000000000e+00 2.000000000000000000e+00 3.000000000000000000e+00 4.000000000000000000e+00 5.000000000000000000e+00 6.000000000000000000e+00 7.000000000000000000e+00 8.000000000000000000e+00 9.000000000000000000e+00 -1.000000000000000000e+00 1.000000000000000000e+01 1.100000000000000000e+01 -1.000000000000000000e+00 1.200000000000000000e+01 1.300000000000000000e+01 -1.000000000000000000e+00 1.400000000000000000e+01 2.587078630340811713e+01 1.500000000000000000e+01 2 | -------------------------------------------------------------------------------- /imgs/DeepFashion/pose/WOMEN/Dresses/id_00002791/01_2_side_candidate.txt: -------------------------------------------------------------------------------- 1 | 1.170000000000000000e+02 3.000000000000000000e+01 9.378154873847961426e-01 0.000000000000000000e+00 2 | 1.450000000000000000e+02 8.000000000000000000e+01 6.306344866752624512e-01 1.000000000000000000e+00 3 | 1.410000000000000000e+02 7.800000000000000000e+01 5.642595291137695312e-01 2.000000000000000000e+00 4 | 1.470000000000000000e+02 8.100000000000000000e+01 8.000036478042602539e-01 3.000000000000000000e+00 5 | 1.470000000000000000e+02 1.370000000000000000e+02 7.605579495429992676e-01 4.000000000000000000e+00 6 | 1.370000000000000000e+02 1.880000000000000000e+02 8.629501461982727051e-01 5.000000000000000000e+00 7 | 1.470000000000000000e+02 1.770000000000000000e+02 5.010345578193664551e-01 6.000000000000000000e+00 8 | 1.520000000000000000e+02 2.470000000000000000e+02 4.303085803985595703e-01 7.000000000000000000e+00 9 | 1.520000000000000000e+02 2.550000000000000000e+02 4.303106963634490967e-01 8.000000000000000000e+00 10 | 1.360000000000000000e+02 1.780000000000000000e+02 7.079824805259704590e-01 9.000000000000000000e+00 11 | 1.200000000000000000e+02 2.550000000000000000e+02 4.430750012397766113e-01 1.000000000000000000e+01 12 | 1.460000000000000000e+02 2.550000000000000000e+02 1.226684600114822388e-01 1.100000000000000000e+01 13 | 1.190000000000000000e+02 2.500000000000000000e+01 1.930258870124816895e-01 1.200000000000000000e+01 14 | 1.250000000000000000e+02 2.500000000000000000e+01 9.556379914283752441e-01 1.300000000000000000e+01 15 | 1.430000000000000000e+02 3.500000000000000000e+01 9.292136430740356445e-01 1.400000000000000000e+01 16 | -------------------------------------------------------------------------------- /imgs/DeepFashion/pose/WOMEN/Dresses/id_00002791/01_2_side_subset.txt: -------------------------------------------------------------------------------- 1 | 0.000000000000000000e+00 1.000000000000000000e+00 2.000000000000000000e+00 -1.000000000000000000e+00 -1.000000000000000000e+00 3.000000000000000000e+00 4.000000000000000000e+00 5.000000000000000000e+00 6.000000000000000000e+00 7.000000000000000000e+00 -1.000000000000000000e+00 9.000000000000000000e+00 1.000000000000000000e+01 -1.000000000000000000e+00 1.200000000000000000e+01 1.300000000000000000e+01 -1.000000000000000000e+00 1.400000000000000000e+01 1.762618939380657679e+01 1.300000000000000000e+01 2 | -------------------------------------------------------------------------------- /imgs/DeepFashion/pose/WOMEN/Dresses/id_00006245/03_2_side_candidate.txt: -------------------------------------------------------------------------------- 1 | 1.130000000000000000e+02 3.300000000000000000e+01 8.916585445404052734e-01 0.000000000000000000e+00 2 | 1.250000000000000000e+02 7.300000000000000000e+01 8.936308026313781738e-01 1.000000000000000000e+00 3 | 1.060000000000000000e+02 7.100000000000000000e+01 8.008211851119995117e-01 2.000000000000000000e+00 4 | 1.040000000000000000e+02 1.390000000000000000e+02 5.845063924789428711e-01 3.000000000000000000e+00 5 | 8.600000000000000000e+01 1.850000000000000000e+02 6.952582001686096191e-01 4.000000000000000000e+00 6 | 1.420000000000000000e+02 7.300000000000000000e+01 8.249495029449462891e-01 5.000000000000000000e+00 7 | 1.820000000000000000e+02 1.270000000000000000e+02 9.636043310165405273e-01 6.000000000000000000e+00 8 | 1.380000000000000000e+02 1.630000000000000000e+02 9.008183479309082031e-01 7.000000000000000000e+00 9 | 9.900000000000000000e+01 1.770000000000000000e+02 6.262004971504211426e-01 8.000000000000000000e+00 10 | 8.900000000000000000e+01 2.550000000000000000e+02 3.811275362968444824e-01 9.000000000000000000e+00 11 | 1.220000000000000000e+02 1.790000000000000000e+02 6.284172534942626953e-01 1.000000000000000000e+01 12 | 1.160000000000000000e+02 2.550000000000000000e+02 2.910405695438385010e-01 1.100000000000000000e+01 13 | 1.060000000000000000e+02 2.700000000000000000e+01 9.608835577964782715e-01 1.200000000000000000e+01 14 | 1.200000000000000000e+02 2.600000000000000000e+01 9.427283406257629395e-01 1.300000000000000000e+01 15 | 1.000000000000000000e+02 3.400000000000000000e+01 1.226811334490776062e-01 1.400000000000000000e+01 16 | 1.360000000000000000e+02 2.700000000000000000e+01 8.473211526870727539e-01 1.500000000000000000e+01 17 | -------------------------------------------------------------------------------- /imgs/DeepFashion/pose/WOMEN/Dresses/id_00006245/03_2_side_subset.txt: -------------------------------------------------------------------------------- 1 | 0.000000000000000000e+00 1.000000000000000000e+00 2.000000000000000000e+00 3.000000000000000000e+00 4.000000000000000000e+00 5.000000000000000000e+00 6.000000000000000000e+00 7.000000000000000000e+00 8.000000000000000000e+00 9.000000000000000000e+00 -1.000000000000000000e+00 1.000000000000000000e+01 1.100000000000000000e+01 -1.000000000000000000e+00 1.200000000000000000e+01 1.300000000000000000e+01 1.400000000000000000e+01 1.500000000000000000e+01 2.419357599188928276e+01 1.600000000000000000e+01 2 | -------------------------------------------------------------------------------- /imgs/DeepFashion/pose/WOMEN/Tees_Tanks/id_00007059/03_3_back_candidate.txt: -------------------------------------------------------------------------------- 1 | 1.190000000000000000e+02 3.900000000000000000e+01 1.235611811280250549e-01 0.000000000000000000e+00 2 | 1.480000000000000000e+02 8.000000000000000000e+01 9.321066141128540039e-01 1.000000000000000000e+00 3 | 1.840000000000000000e+02 8.000000000000000000e+01 9.020690321922302246e-01 2.000000000000000000e+00 4 | 1.850000000000000000e+02 1.450000000000000000e+02 8.811913728713989258e-01 3.000000000000000000e+00 5 | 1.780000000000000000e+02 2.010000000000000000e+02 7.621915340423583984e-01 4.000000000000000000e+00 6 | 1.130000000000000000e+02 8.000000000000000000e+01 8.418201208114624023e-01 5.000000000000000000e+00 7 | 1.060000000000000000e+02 1.460000000000000000e+02 8.889629244804382324e-01 6.000000000000000000e+00 8 | 8.900000000000000000e+01 1.980000000000000000e+02 8.734655380249023438e-01 7.000000000000000000e+00 9 | 1.610000000000000000e+02 1.970000000000000000e+02 6.344254612922668457e-01 8.000000000000000000e+00 10 | 1.160000000000000000e+02 1.920000000000000000e+02 6.594762206077575684e-01 9.000000000000000000e+00 11 | 1.200000000000000000e+02 3.400000000000000000e+01 2.521242499351501465e-01 1.000000000000000000e+01 12 | 1.630000000000000000e+02 3.300000000000000000e+01 2.763812839984893799e-01 1.100000000000000000e+01 13 | 1.280000000000000000e+02 3.700000000000000000e+01 8.409863710403442383e-01 1.200000000000000000e+01 14 | -------------------------------------------------------------------------------- /imgs/DeepFashion/pose/WOMEN/Tees_Tanks/id_00007059/03_3_back_subset.txt: -------------------------------------------------------------------------------- 1 | -1.000000000000000000e+00 1.000000000000000000e+00 2.000000000000000000e+00 3.000000000000000000e+00 4.000000000000000000e+00 5.000000000000000000e+00 6.000000000000000000e+00 7.000000000000000000e+00 8.000000000000000000e+00 -1.000000000000000000e+00 -1.000000000000000000e+00 9.000000000000000000e+00 -1.000000000000000000e+00 -1.000000000000000000e+00 -1.000000000000000000e+00 1.000000000000000000e+01 1.100000000000000000e+01 1.200000000000000000e+01 1.769423181688729940e+01 1.200000000000000000e+01 2 | -------------------------------------------------------------------------------- /imgs/DeepFashion/val.txt: -------------------------------------------------------------------------------- 1 | img\WOMEN\Dresses\id_00000244\02_1_front.jpg 2 | img\WOMEN\Dresses\id_00000203\03_1_front.jpg 3 | img\WOMEN\Cardigans\id_00006462\02_7_additional.jpg 4 | img\WOMEN\Cardigans\id_00006266\02_2_side.jpg -------------------------------------------------------------------------------- /imgs/ade20k/training/ADE_train_00004996.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/ade20k/training/ADE_train_00004996.jpg -------------------------------------------------------------------------------- /imgs/ade20k/training/ADE_train_00004996.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/ade20k/training/ADE_train_00004996.png -------------------------------------------------------------------------------- /imgs/ade20k/training/ADE_train_00007201.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/ade20k/training/ADE_train_00007201.jpg -------------------------------------------------------------------------------- /imgs/ade20k/training/ADE_train_00007201.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/ade20k/training/ADE_train_00007201.png -------------------------------------------------------------------------------- /imgs/ade20k/training/ADE_train_00007860.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/ade20k/training/ADE_train_00007860.jpg -------------------------------------------------------------------------------- /imgs/ade20k/training/ADE_train_00007860.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/ade20k/training/ADE_train_00007860.png -------------------------------------------------------------------------------- /imgs/ade20k/training/ADE_train_00010793.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/ade20k/training/ADE_train_00010793.jpg -------------------------------------------------------------------------------- /imgs/ade20k/training/ADE_train_00010793.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/ade20k/training/ADE_train_00010793.png -------------------------------------------------------------------------------- /imgs/ade20k/training/ADE_train_00015171.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/ade20k/training/ADE_train_00015171.jpg -------------------------------------------------------------------------------- /imgs/ade20k/training/ADE_train_00015171.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/ade20k/training/ADE_train_00015171.png -------------------------------------------------------------------------------- /imgs/ade20k/training/ADE_train_00017763.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/ade20k/training/ADE_train_00017763.jpg -------------------------------------------------------------------------------- /imgs/ade20k/training/ADE_train_00017763.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/ade20k/training/ADE_train_00017763.png -------------------------------------------------------------------------------- /imgs/ade20k/validation/ADE_val_00001232.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/ade20k/validation/ADE_val_00001232.jpg -------------------------------------------------------------------------------- /imgs/ade20k/validation/ADE_val_00001232.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/ade20k/validation/ADE_val_00001232.png -------------------------------------------------------------------------------- /imgs/ade20k/validation/ADE_val_00001349.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/ade20k/validation/ADE_val_00001349.jpg -------------------------------------------------------------------------------- /imgs/ade20k/validation/ADE_val_00001349.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/ade20k/validation/ADE_val_00001349.png -------------------------------------------------------------------------------- /imgs/ade20k/validation/ADE_val_00001517.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/ade20k/validation/ADE_val_00001517.jpg -------------------------------------------------------------------------------- /imgs/ade20k/validation/ADE_val_00001517.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/ade20k/validation/ADE_val_00001517.png -------------------------------------------------------------------------------- /imgs/ade20k/validation/ADE_val_00001599.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/ade20k/validation/ADE_val_00001599.jpg -------------------------------------------------------------------------------- /imgs/ade20k/validation/ADE_val_00001599.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/ade20k/validation/ADE_val_00001599.png -------------------------------------------------------------------------------- /imgs/ade20k/validation/ADE_val_00001697.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/ade20k/validation/ADE_val_00001697.jpg -------------------------------------------------------------------------------- /imgs/ade20k/validation/ADE_val_00001697.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/ade20k/validation/ADE_val_00001697.png -------------------------------------------------------------------------------- /imgs/ade20k/validation/ADE_val_00001996.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/ade20k/validation/ADE_val_00001996.jpg -------------------------------------------------------------------------------- /imgs/ade20k/validation/ADE_val_00001996.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/ade20k/validation/ADE_val_00001996.png -------------------------------------------------------------------------------- /imgs/celebahq/CelebA-HQ-img/23071.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebA-HQ-img/23071.jpg -------------------------------------------------------------------------------- /imgs/celebahq/CelebA-HQ-img/25807.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebA-HQ-img/25807.jpg -------------------------------------------------------------------------------- /imgs/celebahq/CelebA-HQ-img/26768.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebA-HQ-img/26768.jpg -------------------------------------------------------------------------------- /imgs/celebahq/CelebA-HQ-img/26969.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebA-HQ-img/26969.jpg -------------------------------------------------------------------------------- /imgs/celebahq/CelebA-HQ-img/29883.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebA-HQ-img/29883.jpg -------------------------------------------------------------------------------- /imgs/celebahq/CelebA-HQ-img/29922.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebA-HQ-img/29922.jpg -------------------------------------------------------------------------------- /imgs/celebahq/CelebA-HQ-img/5487.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebA-HQ-img/5487.jpg -------------------------------------------------------------------------------- /imgs/celebahq/CelebA-HQ-img/6897.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebA-HQ-img/6897.jpg -------------------------------------------------------------------------------- /imgs/celebahq/CelebAMask-HQ-mask-anno/11/23071_l_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebAMask-HQ-mask-anno/11/23071_l_eye.png -------------------------------------------------------------------------------- /imgs/celebahq/CelebAMask-HQ-mask-anno/11/23071_r_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebAMask-HQ-mask-anno/11/23071_r_eye.png -------------------------------------------------------------------------------- /imgs/celebahq/CelebAMask-HQ-mask-anno/12/25807_l_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebAMask-HQ-mask-anno/12/25807_l_eye.png -------------------------------------------------------------------------------- /imgs/celebahq/CelebAMask-HQ-mask-anno/12/25807_r_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebAMask-HQ-mask-anno/12/25807_r_eye.png -------------------------------------------------------------------------------- /imgs/celebahq/CelebAMask-HQ-mask-anno/13/26768_l_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebAMask-HQ-mask-anno/13/26768_l_eye.png -------------------------------------------------------------------------------- /imgs/celebahq/CelebAMask-HQ-mask-anno/13/26768_r_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebAMask-HQ-mask-anno/13/26768_r_eye.png -------------------------------------------------------------------------------- /imgs/celebahq/CelebAMask-HQ-mask-anno/13/26969_l_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebAMask-HQ-mask-anno/13/26969_l_eye.png -------------------------------------------------------------------------------- /imgs/celebahq/CelebAMask-HQ-mask-anno/13/26969_r_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebAMask-HQ-mask-anno/13/26969_r_eye.png -------------------------------------------------------------------------------- /imgs/celebahq/CelebAMask-HQ-mask-anno/14/29883_l_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebAMask-HQ-mask-anno/14/29883_l_eye.png -------------------------------------------------------------------------------- /imgs/celebahq/CelebAMask-HQ-mask-anno/14/29883_r_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebAMask-HQ-mask-anno/14/29883_r_eye.png -------------------------------------------------------------------------------- /imgs/celebahq/CelebAMask-HQ-mask-anno/14/29922_l_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebAMask-HQ-mask-anno/14/29922_l_eye.png -------------------------------------------------------------------------------- /imgs/celebahq/CelebAMask-HQ-mask-anno/14/29922_r_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebAMask-HQ-mask-anno/14/29922_r_eye.png -------------------------------------------------------------------------------- /imgs/celebahq/CelebAMask-HQ-mask-anno/2/05487_l_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebAMask-HQ-mask-anno/2/05487_l_eye.png -------------------------------------------------------------------------------- /imgs/celebahq/CelebAMask-HQ-mask-anno/2/05487_r_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebAMask-HQ-mask-anno/2/05487_r_eye.png -------------------------------------------------------------------------------- /imgs/celebahq/CelebAMask-HQ-mask-anno/3/06897_l_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebAMask-HQ-mask-anno/3/06897_l_eye.png -------------------------------------------------------------------------------- /imgs/celebahq/CelebAMask-HQ-mask-anno/3/06897_r_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebAMask-HQ-mask-anno/3/06897_r_eye.png -------------------------------------------------------------------------------- /imgs/celebahq/CelebAMask-HQ-mask-anno/all_parts_except_glasses/05487.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebAMask-HQ-mask-anno/all_parts_except_glasses/05487.png -------------------------------------------------------------------------------- /imgs/celebahq/CelebAMask-HQ-mask-anno/all_parts_except_glasses/06897.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebAMask-HQ-mask-anno/all_parts_except_glasses/06897.png -------------------------------------------------------------------------------- /imgs/celebahq/CelebAMask-HQ-mask-anno/all_parts_except_glasses/23071.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebAMask-HQ-mask-anno/all_parts_except_glasses/23071.png -------------------------------------------------------------------------------- /imgs/celebahq/CelebAMask-HQ-mask-anno/all_parts_except_glasses/25807.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebAMask-HQ-mask-anno/all_parts_except_glasses/25807.png -------------------------------------------------------------------------------- /imgs/celebahq/CelebAMask-HQ-mask-anno/all_parts_except_glasses/26768.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebAMask-HQ-mask-anno/all_parts_except_glasses/26768.png -------------------------------------------------------------------------------- /imgs/celebahq/CelebAMask-HQ-mask-anno/all_parts_except_glasses/26969.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebAMask-HQ-mask-anno/all_parts_except_glasses/26969.png -------------------------------------------------------------------------------- /imgs/celebahq/CelebAMask-HQ-mask-anno/all_parts_except_glasses/29883.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebAMask-HQ-mask-anno/all_parts_except_glasses/29883.png -------------------------------------------------------------------------------- /imgs/celebahq/CelebAMask-HQ-mask-anno/all_parts_except_glasses/29922.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahq/CelebAMask-HQ-mask-anno/all_parts_except_glasses/29922.png -------------------------------------------------------------------------------- /imgs/celebahq/val.txt: -------------------------------------------------------------------------------- 1 | 29922 2 | 29883 3 | 26969 4 | 25807 -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebA-HQ-img/14721.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebA-HQ-img/14721.jpg -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebA-HQ-img/15064.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebA-HQ-img/15064.jpg -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebA-HQ-img/22233.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebA-HQ-img/22233.jpg -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebA-HQ-img/26397.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebA-HQ-img/26397.jpg -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebA-HQ-img/29318.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebA-HQ-img/29318.jpg -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebA-HQ-img/29539.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebA-HQ-img/29539.jpg -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebA-HQ-img/29642.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebA-HQ-img/29642.jpg -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebA-HQ-img/2994.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebA-HQ-img/2994.jpg -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_cloth.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_cloth.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_ear_r.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_ear_r.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_hair.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_hair.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_l_brow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_l_brow.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_l_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_l_eye.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_l_lip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_l_lip.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_mouth.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_mouth.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_neck.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_neck.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_nose.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_nose.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_r_brow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_r_brow.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_r_ear.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_r_ear.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_r_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_r_eye.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_skin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_skin.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_u_lip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/1/02994_u_lip.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_cloth.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_cloth.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_ear_r.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_ear_r.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_hair.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_hair.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_l_brow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_l_brow.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_l_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_l_eye.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_l_lip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_l_lip.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_neck.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_neck.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_nose.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_nose.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_r_brow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_r_brow.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_r_ear.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_r_ear.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_r_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_r_eye.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_skin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_skin.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_u_lip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/11/22233_u_lip.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_cloth.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_cloth.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_hair.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_hair.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_l_brow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_l_brow.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_l_ear.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_l_ear.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_l_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_l_eye.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_l_lip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_l_lip.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_mouth.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_mouth.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_neck.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_neck.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_nose.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_nose.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_r_brow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_r_brow.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_r_ear.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_r_ear.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_r_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_r_eye.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_skin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_skin.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_u_lip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/13/26397_u_lip.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29318_hair.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29318_hair.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29318_l_brow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29318_l_brow.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29318_l_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29318_l_eye.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29318_l_lip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29318_l_lip.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29318_mouth.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29318_mouth.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29318_neck.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29318_neck.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29318_nose.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29318_nose.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29318_r_brow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29318_r_brow.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29318_r_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29318_r_eye.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29318_skin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29318_skin.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29318_u_lip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29318_u_lip.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29539_hair.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29539_hair.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29539_l_brow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29539_l_brow.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29539_l_ear.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29539_l_ear.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29539_l_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29539_l_eye.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29539_l_lip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29539_l_lip.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29539_neck.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29539_neck.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29539_nose.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29539_nose.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29539_r_brow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29539_r_brow.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29539_r_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29539_r_eye.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29539_skin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29539_skin.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29539_u_lip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29539_u_lip.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_cloth.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_cloth.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_hair.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_hair.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_l_brow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_l_brow.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_l_ear.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_l_ear.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_l_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_l_eye.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_l_lip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_l_lip.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_neck.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_neck.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_nose.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_nose.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_r_brow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_r_brow.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_r_ear.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_r_ear.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_r_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_r_eye.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_skin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_skin.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_u_lip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/14/29642_u_lip.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/14721_hair.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/14721_hair.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/14721_l_brow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/14721_l_brow.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/14721_l_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/14721_l_eye.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/14721_l_lip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/14721_l_lip.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/14721_mouth.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/14721_mouth.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/14721_neck.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/14721_neck.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/14721_nose.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/14721_nose.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/14721_r_brow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/14721_r_brow.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/14721_r_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/14721_r_eye.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/14721_skin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/14721_skin.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/14721_u_lip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/14721_u_lip.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_ear_r.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_ear_r.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_hair.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_hair.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_l_brow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_l_brow.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_l_ear.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_l_ear.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_l_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_l_eye.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_l_lip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_l_lip.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_mouth.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_mouth.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_neck.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_neck.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_nose.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_nose.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_r_brow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_r_brow.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_r_ear.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_r_ear.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_r_eye.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_r_eye.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_skin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_skin.png -------------------------------------------------------------------------------- /imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_u_lip.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/celebahqedge/CelebAMask-HQ-mask-anno/7/15064_u_lip.png -------------------------------------------------------------------------------- /imgs/celebahqedge/val.txt: -------------------------------------------------------------------------------- 1 | 29642 2 | 29539 3 | 29318 4 | 22233 -------------------------------------------------------------------------------- /imgs/demo.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/demo.gif -------------------------------------------------------------------------------- /imgs/teaser.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/imgs/teaser.png -------------------------------------------------------------------------------- /models/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import importlib 7 | import torch 8 | 9 | 10 | def find_model_using_name(model_name): 11 | # Given the option --model [modelname], 12 | # the file "models/modelname_model.py" 13 | # will be imported. 14 | model_filename = "models." + model_name + "_model" 15 | modellib = importlib.import_module(model_filename) 16 | 17 | # In the file, the class called ModelNameModel() will 18 | # be instantiated. It has to be a subclass of torch.nn.Module, 19 | # and it is case-insensitive. 20 | model = None 21 | target_model_name = model_name.replace('_', '') + 'model' 22 | for name, cls in modellib.__dict__.items(): 23 | if name.lower() == target_model_name.lower() \ 24 | and issubclass(cls, torch.nn.Module): 25 | model = cls 26 | 27 | if model is None: 28 | print("In %s.py, there should be a subclass of torch.nn.Module with class name that matches %s in lowercase." % (model_filename, target_model_name)) 29 | exit(0) 30 | 31 | return model 32 | 33 | 34 | def get_option_setter(model_name): 35 | model_class = find_model_using_name(model_name) 36 | return model_class.modify_commandline_options 37 | 38 | 39 | def create_model(opt): 40 | model = find_model_using_name(opt.model) 41 | instance = model(opt) 42 | print("model [%s] was created" % (type(instance).__name__)) 43 | 44 | return instance 45 | -------------------------------------------------------------------------------- /models/networks/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import torch 7 | from models.networks.base_network import BaseNetwork 8 | from models.networks.loss import * 9 | from models.networks.discriminator import * 10 | from models.networks.generator import * 11 | #from models.networks.encoder import * 12 | from models.networks.ContextualLoss import * 13 | from models.networks.correspondence import * 14 | #from models.networks.progressive_sub_net import * 15 | import util.util as util 16 | 17 | 18 | def find_network_using_name(target_network_name, filename, add=True): 19 | target_class_name = target_network_name + filename if add else target_network_name 20 | module_name = 'models.networks.' + filename 21 | network = util.find_class_in_module(target_class_name, module_name) 22 | 23 | assert issubclass(network, BaseNetwork), \ 24 | "Class %s should be a subclass of BaseNetwork" % network 25 | 26 | return network 27 | 28 | 29 | def modify_commandline_options(parser, is_train): 30 | opt, _ = parser.parse_known_args() 31 | 32 | netG_cls = find_network_using_name(opt.netG, 'generator') 33 | parser = netG_cls.modify_commandline_options(parser, is_train) 34 | if is_train: 35 | netD_cls = find_network_using_name(opt.netD, 'discriminator') 36 | parser = netD_cls.modify_commandline_options(parser, is_train) 37 | # netE_cls = find_network_using_name('conv', 'encoder') 38 | # parser = netE_cls.modify_commandline_options(parser, is_train) 39 | 40 | return parser 41 | 42 | 43 | def create_network(cls, opt, stage1=False): 44 | if stage1: 45 | net = cls(opt, stage1=True) 46 | else: 47 | net = cls(opt) 48 | net.print_network() 49 | if len(opt.gpu_ids) > 0: 50 | assert(torch.cuda.is_available()) 51 | net.cuda() 52 | net.init_weights(opt.init_type, opt.init_variance) 53 | return net 54 | 55 | 56 | def define_G(opt): 57 | netG_cls = find_network_using_name(opt.netG, 'generator') 58 | return create_network(netG_cls, opt) 59 | 60 | def define_G_stage1(opt): 61 | netG_cls = find_network_using_name(opt.netG, 'generator') 62 | return create_network(netG_cls, opt, stage1=True) 63 | 64 | def define_D(opt): 65 | netD_cls = find_network_using_name(opt.netD, 'discriminator') 66 | return create_network(netD_cls, opt) 67 | 68 | def define_D_stage1(opt): 69 | netD_cls = find_network_using_name(opt.netD, 'discriminator') 70 | return create_network(netD_cls, opt, stage1=True) 71 | 72 | def define_DomainClassifier(opt): 73 | netDomainclassifier = find_network_using_name('DomainClassifier', 'generator', add=False) 74 | return create_network(netDomainclassifier, opt) 75 | 76 | def define_Corr(opt): 77 | netCoor_cls = find_network_using_name('novgg', 'correspondence') 78 | return create_network(netCoor_cls, opt) 79 | -------------------------------------------------------------------------------- /models/networks/architecture.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | import torch 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | import torchvision 8 | import torch.nn.utils.spectral_norm as spectral_norm 9 | from models.networks.normalization import SPADE, equal_lr, SPADE_TwoPath 10 | 11 | 12 | # ResNet block that uses SPADE. 13 | # It differs from the ResNet block of pix2pixHD in that 14 | # it takes in the segmentation map as input, learns the skip connection if necessary, 15 | # and applies normalization first and then convolution. 16 | # This architecture seemed like a standard architecture for unconditional or 17 | # class-conditional GAN architecture using residual block. 18 | # The code was inspired from https://github.com/LMescheder/GAN_stability. 19 | class SPADEResnetBlock(nn.Module): 20 | def __init__(self, fin, fout, opt, use_se=False, dilation=1): 21 | super().__init__() 22 | # Attributes 23 | self.learned_shortcut = (fin != fout) 24 | fmiddle = min(fin, fout) 25 | self.opt = opt 26 | self.pad_type = 'nozero' 27 | self.use_se = use_se 28 | 29 | # create conv layers 30 | if self.pad_type != 'zero': 31 | self.pad = nn.ReflectionPad2d(dilation) 32 | self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=0, dilation=dilation) 33 | self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=0, dilation=dilation) 34 | else: 35 | self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=dilation, dilation=dilation) 36 | self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=dilation, dilation=dilation) 37 | if self.learned_shortcut: 38 | self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False) 39 | 40 | # apply spectral norm if specified 41 | if 'spectral' in opt.norm_G: 42 | if opt.eqlr_sn: 43 | self.conv_0 = equal_lr(self.conv_0) 44 | self.conv_1 = equal_lr(self.conv_1) 45 | if self.learned_shortcut: 46 | self.conv_s = equal_lr(self.conv_s) 47 | else: 48 | self.conv_0 = spectral_norm(self.conv_0) 49 | self.conv_1 = spectral_norm(self.conv_1) 50 | if self.learned_shortcut: 51 | self.conv_s = spectral_norm(self.conv_s) 52 | 53 | # define normalization layers 54 | spade_config_str = opt.norm_G.replace('spectral', '') 55 | if 'spade_ic' in opt: 56 | ic = opt.spade_ic 57 | else: 58 | ic = 0 + (3 if 'warp' in opt.CBN_intype else 0) + (opt.semantic_nc if 'mask' in opt.CBN_intype else 0) 59 | 60 | self.norm_0 = SPADE(spade_config_str, fin, ic, PONO=opt.PONO, use_apex=opt.apex) 61 | self.norm_1 = SPADE(spade_config_str, fmiddle, ic, PONO=opt.PONO, use_apex=opt.apex) 62 | if self.learned_shortcut: 63 | self.norm_s = SPADE(spade_config_str, fin, ic, PONO=opt.PONO, use_apex=opt.apex) 64 | 65 | if use_se: 66 | self.se_layar = SELayer(fout) 67 | 68 | # note the resnet block with SPADE also takes in |seg|, 69 | # the semantic segmentation map as input 70 | def forward(self, x, seg1): 71 | x_s = self.shortcut(x, seg1) 72 | if self.pad_type != 'zero': 73 | dx = self.conv_0(self.pad(self.actvn(self.norm_0(x, seg1)))) 74 | dx = self.conv_1(self.pad(self.actvn(self.norm_1(dx, seg1)))) 75 | if self.use_se: 76 | dx = self.se_layar(dx) 77 | else: 78 | dx = self.conv_0(self.actvn(self.norm_0(x, seg1))) 79 | dx = self.conv_1(self.actvn(self.norm_1(dx, seg1))) 80 | if self.use_se: 81 | dx = self.se_layar(dx) 82 | 83 | out = x_s + dx 84 | 85 | return out 86 | 87 | def shortcut(self, x, seg1): 88 | if self.learned_shortcut: 89 | x_s = self.conv_s(self.norm_s(x, seg1)) 90 | else: 91 | x_s = x 92 | return x_s 93 | 94 | def actvn(self, x): 95 | return F.leaky_relu(x, 2e-1) 96 | 97 | class Attention(nn.Module): 98 | def __init__(self, ch, use_sn): 99 | super(Attention, self).__init__() 100 | # Channel multiplier 101 | self.ch = ch 102 | self.theta = nn.Conv2d(self.ch, self.ch // 8, kernel_size=1, padding=0, bias=False) 103 | self.phi = nn.Conv2d(self.ch, self.ch // 8, kernel_size=1, padding=0, bias=False) 104 | self.g = nn.Conv2d(self.ch, self.ch // 2, kernel_size=1, padding=0, bias=False) 105 | self.o = nn.Conv2d(self.ch // 2, self.ch, kernel_size=1, padding=0, bias=False) 106 | if use_sn: 107 | self.theta = spectral_norm(self.theta) 108 | self.phi = spectral_norm(self.phi) 109 | self.g = spectral_norm(self.g) 110 | self.o = spectral_norm(self.o) 111 | # Learnable gain parameter 112 | self.gamma = nn.Parameter(torch.tensor(0.), requires_grad=True) 113 | 114 | def forward(self, x, y=None): 115 | # Apply convs 116 | theta = self.theta(x) 117 | phi = F.max_pool2d(self.phi(x), [2,2]) 118 | g = F.max_pool2d(self.g(x), [2,2]) 119 | # Perform reshapes 120 | theta = theta.view(-1, self. ch // 8, x.shape[2] * x.shape[3]) 121 | phi = phi.view(-1, self. ch // 8, x.shape[2] * x.shape[3] // 4) 122 | g = g.view(-1, self. ch // 2, x.shape[2] * x.shape[3] // 4) 123 | # Matmul and softmax to get attention maps 124 | beta = F.softmax(torch.bmm(theta.transpose(1, 2), phi), -1) 125 | # Attention map times g path 126 | o = self.o(torch.bmm(g, beta.transpose(1,2)).view(-1, self.ch // 2, x.shape[2], x.shape[3])) 127 | return self.gamma * o + x 128 | 129 | # ResNet block used in pix2pixHD 130 | # We keep the same architecture as pix2pixHD. 131 | class ResnetBlock(nn.Module): 132 | def __init__(self, dim, norm_layer, activation=nn.ReLU(False), kernel_size=3): 133 | super().__init__() 134 | 135 | pw = (kernel_size - 1) // 2 136 | self.conv_block = nn.Sequential( 137 | nn.ReflectionPad2d(pw), 138 | norm_layer(nn.Conv2d(dim, dim, kernel_size=kernel_size)), 139 | activation, 140 | nn.ReflectionPad2d(pw), 141 | norm_layer(nn.Conv2d(dim, dim, kernel_size=kernel_size)) 142 | ) 143 | 144 | def forward(self, x): 145 | y = self.conv_block(x) 146 | out = x + y 147 | return out 148 | 149 | 150 | # VGG architecter, used for the perceptual loss using a pretrained VGG network 151 | class VGG19(torch.nn.Module): 152 | def __init__(self, requires_grad=False): 153 | super().__init__() 154 | vgg_pretrained_features = torchvision.models.vgg19(pretrained=True).features 155 | self.slice1 = torch.nn.Sequential() 156 | self.slice2 = torch.nn.Sequential() 157 | self.slice3 = torch.nn.Sequential() 158 | self.slice4 = torch.nn.Sequential() 159 | self.slice5 = torch.nn.Sequential() 160 | for x in range(2): 161 | self.slice1.add_module(str(x), vgg_pretrained_features[x]) #r11 162 | for x in range(2, 7): 163 | self.slice2.add_module(str(x), vgg_pretrained_features[x]) #r21 164 | for x in range(7, 12): 165 | self.slice3.add_module(str(x), vgg_pretrained_features[x]) #r31 166 | for x in range(12, 21): 167 | self.slice4.add_module(str(x), vgg_pretrained_features[x]) #r41 168 | for x in range(21, 30): 169 | self.slice5.add_module(str(x), vgg_pretrained_features[x]) #r51 170 | if not requires_grad: 171 | for param in self.parameters(): 172 | param.requires_grad = False 173 | 174 | def forward(self, X): 175 | h_relu1 = self.slice1(X) 176 | h_relu2 = self.slice2(h_relu1) 177 | h_relu3 = self.slice3(h_relu2) 178 | h_relu4 = self.slice4(h_relu3) 179 | h_relu5 = self.slice5(h_relu4) 180 | out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5] 181 | return out 182 | 183 | class SELayer(nn.Module): 184 | def __init__(self, channel, reduction=16): 185 | super(SELayer, self).__init__() 186 | self.avg_pool = nn.AdaptiveAvgPool2d(1) 187 | self.fc = nn.Sequential( 188 | nn.Linear(channel, channel // reduction, bias=False), 189 | nn.ReLU(inplace=True), 190 | nn.Linear(channel // reduction, channel, bias=False), 191 | nn.Sigmoid() 192 | ) 193 | 194 | def forward(self, x): 195 | b, c, _, _ = x.size() 196 | y = self.avg_pool(x).view(b, c) 197 | y = self.fc(y).view(b, c, 1, 1) 198 | return x * y.expand_as(x) -------------------------------------------------------------------------------- /models/networks/base_network.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import torch.nn as nn 7 | from torch.nn import init 8 | 9 | 10 | class BaseNetwork(nn.Module): 11 | def __init__(self): 12 | super(BaseNetwork, self).__init__() 13 | 14 | @staticmethod 15 | def modify_commandline_options(parser, is_train): 16 | return parser 17 | 18 | def print_network(self): 19 | if isinstance(self, list): 20 | self = self[0] 21 | num_params = 0 22 | for param in self.parameters(): 23 | num_params += param.numel() 24 | print('Network [%s] was created. Total number of parameters: %.1f million. ' 25 | 'To see the architecture, do print(network).' 26 | % (type(self).__name__, num_params / 1000000)) 27 | 28 | def init_weights(self, init_type='normal', gain=0.02): 29 | def init_func(m): 30 | classname = m.__class__.__name__ 31 | if classname.find('BatchNorm2d') != -1: 32 | if hasattr(m, 'weight') and m.weight is not None: 33 | init.normal_(m.weight.data, 1.0, gain) 34 | if hasattr(m, 'bias') and m.bias is not None: 35 | init.constant_(m.bias.data, 0.0) 36 | elif hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): 37 | if init_type == 'normal': 38 | init.normal_(m.weight.data, 0.0, gain) 39 | elif init_type == 'xavier': 40 | init.xavier_normal_(m.weight.data, gain=gain) 41 | elif init_type == 'xavier_uniform': 42 | init.xavier_uniform_(m.weight.data, gain=1.0) 43 | elif init_type == 'kaiming': 44 | init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') 45 | elif init_type == 'orthogonal': 46 | init.orthogonal_(m.weight.data, gain=gain) 47 | elif init_type == 'none': # uses pytorch's default init method 48 | m.reset_parameters() 49 | else: 50 | raise NotImplementedError('initialization method [%s] is not implemented' % init_type) 51 | if hasattr(m, 'bias') and m.bias is not None: 52 | init.constant_(m.bias.data, 0.0) 53 | 54 | self.apply(init_func) 55 | 56 | # propagate to children 57 | for m in self.children(): 58 | if hasattr(m, 'init_weights'): 59 | m.init_weights(init_type, gain) 60 | -------------------------------------------------------------------------------- /models/networks/discriminator.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import torch 7 | import torch.nn as nn 8 | import numpy as np 9 | import torch.nn.functional as F 10 | from models.networks.base_network import BaseNetwork 11 | from models.networks.normalization import get_nonspade_norm_layer, equal_lr 12 | from models.networks.architecture import Attention 13 | import util.util as util 14 | 15 | 16 | class MultiscaleDiscriminator(BaseNetwork): 17 | @staticmethod 18 | def modify_commandline_options(parser, is_train): 19 | parser.add_argument('--netD_subarch', type=str, default='n_layer', 20 | help='architecture of each discriminator') 21 | parser.add_argument('--num_D', type=int, default=2, 22 | help='number of discriminators to be used in multiscale') 23 | opt, _ = parser.parse_known_args() 24 | 25 | # define properties of each discriminator of the multiscale discriminator 26 | subnetD = util.find_class_in_module(opt.netD_subarch + 'discriminator', 27 | 'models.networks.discriminator') 28 | subnetD.modify_commandline_options(parser, is_train) 29 | 30 | return parser 31 | 32 | def __init__(self, opt, stage1=False): 33 | super().__init__() 34 | self.opt = opt 35 | self.stage1 = stage1 36 | 37 | for i in range(opt.num_D): 38 | subnetD = self.create_single_discriminator(opt) 39 | self.add_module('discriminator_%d' % i, subnetD) 40 | 41 | def create_single_discriminator(self, opt): 42 | subarch = opt.netD_subarch 43 | if subarch == 'n_layer': 44 | netD = NLayerDiscriminator(opt, stage1=self.stage1) 45 | else: 46 | raise ValueError('unrecognized discriminator subarchitecture %s' % subarch) 47 | return netD 48 | 49 | def downsample(self, input): 50 | return F.avg_pool2d(input, kernel_size=3, 51 | stride=2, padding=[1, 1], 52 | count_include_pad=False) 53 | 54 | # Returns list of lists of discriminator outputs. 55 | # The final result is of size opt.num_D x opt.n_layers_D 56 | def forward(self, input): 57 | result = [] 58 | segs = [] 59 | cam_logits = [] 60 | get_intermediate_features = not self.opt.no_ganFeat_loss 61 | for name, D in self.named_children(): 62 | out, cam_logit = D(input) 63 | cam_logits.append(cam_logit) 64 | if not get_intermediate_features: 65 | out = [out] 66 | result.append(out) 67 | input = self.downsample(input) 68 | 69 | return result, segs, cam_logits 70 | 71 | 72 | # Defines the PatchGAN discriminator with the specified arguments. 73 | class NLayerDiscriminator(BaseNetwork): 74 | @staticmethod 75 | def modify_commandline_options(parser, is_train): 76 | parser.add_argument('--n_layers_D', type=int, default=4, 77 | help='# layers in each discriminator') 78 | return parser 79 | 80 | def __init__(self, opt, stage1=False): 81 | super().__init__() 82 | self.opt = opt 83 | self.stage1 = stage1 84 | 85 | kw = 4 86 | #padw = int(np.ceil((kw - 1.0) / 2)) 87 | padw = int((kw - 1.0) / 2) 88 | nf = opt.ndf 89 | input_nc = self.compute_D_input_nc(opt) 90 | 91 | norm_layer = get_nonspade_norm_layer(opt, opt.norm_D) 92 | sequence = [[nn.Conv2d(input_nc, nf, kernel_size=kw, stride=2, padding=padw), 93 | nn.LeakyReLU(0.2, False)]] 94 | 95 | for n in range(1, opt.n_layers_D): 96 | nf_prev = nf 97 | nf = min(nf * 2, 512) 98 | stride = 1 if n == opt.n_layers_D - 1 else 2 99 | if (((not stage1) and opt.use_attention) or (stage1 and opt.use_attention_st1)) and n == opt.n_layers_D - 1: 100 | self.attn = Attention(nf_prev, 'spectral' in opt.norm_D) 101 | if n == opt.n_layers_D - 1 and (not stage1): 102 | dec = [] 103 | nc_dec = nf_prev 104 | for _ in range(opt.n_layers_D - 1): 105 | dec += [nn.Upsample(scale_factor=2), 106 | norm_layer(nn.Conv2d(nc_dec, int(nc_dec//2), kernel_size=3, stride=1, padding=1)), 107 | nn.LeakyReLU(0.2, False)] 108 | nc_dec = int(nc_dec // 2) 109 | dec += [nn.Conv2d(nc_dec, opt.semantic_nc, kernel_size=3, stride=1, padding=1)] 110 | self.dec = nn.Sequential(*dec) 111 | sequence += [[norm_layer(nn.Conv2d(nf_prev, nf, kernel_size=kw, 112 | stride=stride, padding=padw)), 113 | nn.LeakyReLU(0.2, False) 114 | ]] 115 | sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]] 116 | 117 | if opt.D_cam > 0: 118 | mult = min(2 ** (opt.n_layers_D - 1), 8) 119 | if opt.eqlr_sn: 120 | self.gap_fc = equal_lr(nn.Linear(opt.ndf * mult, 1, bias=False)) 121 | self.gmp_fc = equal_lr(nn.Linear(opt.ndf * mult, 1, bias=False)) 122 | else: 123 | self.gap_fc = nn.utils.spectral_norm(nn.Linear(opt.ndf * mult, 1, bias=False)) 124 | self.gmp_fc = nn.utils.spectral_norm(nn.Linear(opt.ndf * mult, 1, bias=False)) 125 | self.conv1x1 = nn.Conv2d(opt.ndf * mult * 2, opt.ndf * mult, kernel_size=1, stride=1, bias=True) 126 | self.leaky_relu = nn.LeakyReLU(0.2, True) 127 | 128 | # We divide the layers into groups to extract intermediate layer outputs 129 | for n in range(len(sequence)): 130 | self.add_module('model' + str(n), nn.Sequential(*sequence[n])) 131 | 132 | def compute_D_input_nc(self, opt): 133 | input_nc = opt.label_nc + opt.output_nc 134 | if opt.contain_dontcare_label: 135 | input_nc += 1 136 | return input_nc 137 | 138 | def forward(self, input): 139 | results = [input] 140 | seg = None 141 | cam_logit = None 142 | for name, submodel in self.named_children(): 143 | if 'model' not in name: 144 | continue 145 | if name == 'model3': 146 | if ((not self.stage1) and self.opt.use_attention) or (self.stage1 and self.opt.use_attention_st1): 147 | x = self.attn(results[-1]) 148 | else: 149 | x = results[-1] 150 | else: 151 | x = results[-1] 152 | intermediate_output = submodel(x) 153 | if self.opt.D_cam > 0 and name == 'model3': 154 | gap = F.adaptive_avg_pool2d(intermediate_output, 1) 155 | gap_logit = self.gap_fc(gap.view(intermediate_output.shape[0], -1)) 156 | gap_weight = list(self.gap_fc.parameters())[0] 157 | gap = intermediate_output * gap_weight.unsqueeze(2).unsqueeze(3) 158 | 159 | gmp = F.adaptive_max_pool2d(intermediate_output, 1) 160 | gmp_logit = self.gmp_fc(gmp.view(intermediate_output.shape[0], -1)) 161 | gmp_weight = list(self.gmp_fc.parameters())[0] 162 | gmp = intermediate_output * gmp_weight.unsqueeze(2).unsqueeze(3) 163 | 164 | cam_logit = torch.cat([gap_logit, gmp_logit], 1) 165 | intermediate_output = torch.cat([gap, gmp], 1) 166 | intermediate_output = self.leaky_relu(self.conv1x1(intermediate_output)) 167 | results.append(intermediate_output) 168 | 169 | get_intermediate_features = not self.opt.no_ganFeat_loss 170 | if get_intermediate_features: 171 | retu = results[1:] 172 | else: 173 | retu = results[-1] 174 | if seg is None: 175 | return retu, cam_logit 176 | else: 177 | return retu, seg, cam_logit 178 | -------------------------------------------------------------------------------- /models/networks/generator.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | #08.09 change pad 4 | 5 | import numpy as np 6 | import torch 7 | import torch.nn as nn 8 | import torch.nn.functional as F 9 | from torch.autograd import Function 10 | from models.networks.base_network import BaseNetwork 11 | from models.networks.normalization import get_nonspade_norm_layer, equal_lr 12 | from models.networks.architecture import ResnetBlock as ResnetBlock 13 | from models.networks.architecture import SPADEResnetBlock as SPADEResnetBlock 14 | from models.networks.architecture import Attention 15 | from models.networks.sync_batchnorm import SynchronizedBatchNorm2d, SynchronizedBatchNorm1d 16 | 17 | class SPADEGenerator(BaseNetwork): 18 | @staticmethod 19 | def modify_commandline_options(parser, is_train): 20 | parser.set_defaults(norm_G='spectralspadesyncbatch3x3') 21 | return parser 22 | 23 | def __init__(self, opt): 24 | super().__init__() 25 | self.opt = opt 26 | nf = opt.ngf 27 | 28 | self.sw, self.sh = self.compute_latent_vector_size(opt) 29 | 30 | ic = 0 + (3 if 'warp' in self.opt.CBN_intype else 0) + (self.opt.semantic_nc if 'mask' in self.opt.CBN_intype else 0) 31 | self.fc = nn.Conv2d(ic, 16 * nf, 3, padding=1) 32 | if opt.eqlr_sn: 33 | self.fc = equal_lr(self.fc) 34 | 35 | self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt) 36 | 37 | self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt) 38 | self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt) 39 | 40 | self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt) 41 | self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt) 42 | if opt.use_attention: 43 | self.attn = Attention(4 * nf, 'spectral' in opt.norm_G) 44 | self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt) 45 | self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt) 46 | 47 | final_nc = nf 48 | 49 | self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1) 50 | self.up = nn.Upsample(scale_factor=2) 51 | 52 | def compute_latent_vector_size(self, opt): 53 | num_up_layers = 5 54 | 55 | sw = opt.crop_size // (2**num_up_layers) 56 | sh = round(sw / opt.aspect_ratio) 57 | 58 | return sw, sh 59 | 60 | def forward(self, input, warp_out=None): 61 | seg = input if warp_out is None else warp_out 62 | 63 | # we downsample segmap and run convolution 64 | x = F.interpolate(seg, size=(self.sh, self.sw)) 65 | x = self.fc(x) 66 | 67 | x = self.head_0(x, seg) 68 | 69 | x = self.up(x) 70 | x = self.G_middle_0(x, seg) 71 | 72 | x = self.G_middle_1(x, seg) 73 | 74 | x = self.up(x) 75 | x = self.up_0(x, seg) 76 | x = self.up(x) 77 | x = self.up_1(x, seg) 78 | 79 | x = self.up(x) 80 | if self.opt.use_attention: 81 | x = self.attn(x) 82 | x = self.up_2(x, seg) 83 | x = self.up(x) 84 | x = self.up_3(x, seg) 85 | 86 | x = self.conv_img(F.leaky_relu(x, 2e-1)) 87 | x = F.tanh(x) 88 | 89 | return x 90 | 91 | class AdaptiveFeatureGenerator(BaseNetwork): 92 | @staticmethod 93 | def modify_commandline_options(parser, is_train): 94 | parser.set_defaults(norm_G='spectralspadesyncbatch3x3') 95 | parser.add_argument('--num_upsampling_layers', 96 | choices=('normal', 'more', 'most'), default='normal', 97 | help="If 'more', adds upsampling layer between the two middle resnet blocks. If 'most', also add one more upsampling + resnet layer at the end of the generator") 98 | 99 | return parser 100 | 101 | def __init__(self, opt): 102 | # TODO: kernel=4, concat noise, or change architecture to vgg feature pyramid 103 | super().__init__() 104 | self.opt = opt 105 | kw = 3 106 | pw = int(np.ceil((kw - 1.0) / 2)) 107 | ndf = opt.ngf 108 | norm_layer = get_nonspade_norm_layer(opt, opt.norm_E) 109 | self.layer1 = norm_layer(nn.Conv2d(opt.spade_ic, ndf, kw, stride=1, padding=pw)) 110 | self.layer2 = norm_layer(nn.Conv2d(ndf * 1, ndf * 2, opt.adaptor_kernel, stride=2, padding=pw)) 111 | self.layer3 = norm_layer(nn.Conv2d(ndf * 2, ndf * 4, kw, stride=1, padding=pw)) 112 | if opt.warp_stride == 2: 113 | self.layer4 = norm_layer(nn.Conv2d(ndf * 4, ndf * 8, kw, stride=1, padding=pw)) 114 | else: 115 | self.layer4 = norm_layer(nn.Conv2d(ndf * 4, ndf * 8, opt.adaptor_kernel, stride=2, padding=pw)) 116 | self.layer5 = norm_layer(nn.Conv2d(ndf * 8, ndf * 8, kw, stride=1, padding=pw)) 117 | 118 | self.actvn = nn.LeakyReLU(0.2, False) 119 | self.opt = opt 120 | 121 | nf = opt.ngf 122 | 123 | self.head_0 = SPADEResnetBlock(8 * nf, 8 * nf, opt, use_se=opt.adaptor_se) 124 | if opt.adaptor_nonlocal: 125 | self.attn = Attention(8 * nf, False) 126 | self.G_middle_0 = SPADEResnetBlock(8 * nf, 8 * nf, opt, use_se=opt.adaptor_se) 127 | self.G_middle_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt, use_se=opt.adaptor_se) 128 | 129 | if opt.adaptor_res_deeper: 130 | self.deeper0 = SPADEResnetBlock(4 * nf, 4 * nf, opt) 131 | if opt.dilation_conv: 132 | self.deeper1 = SPADEResnetBlock(4 * nf, 4 * nf, opt, dilation=2) 133 | self.deeper2 = SPADEResnetBlock(4 * nf, 4 * nf, opt, dilation=4) 134 | self.degridding0 = norm_layer(nn.Conv2d(ndf * 4, ndf * 4, 3, stride=1, padding=2, dilation=2)) 135 | self.degridding1 = norm_layer(nn.Conv2d(ndf * 4, ndf * 4, 3, stride=1, padding=1)) 136 | else: 137 | self.deeper1 = SPADEResnetBlock(4 * nf, 4 * nf, opt) 138 | self.deeper2 = SPADEResnetBlock(4 * nf, 4 * nf, opt) 139 | 140 | def forward(self, input, seg): 141 | x = self.layer1(input) 142 | x = self.layer2(self.actvn(x)) 143 | x = self.layer3(self.actvn(x)) 144 | x = self.layer4(self.actvn(x)) 145 | x = self.layer5(self.actvn(x)) 146 | 147 | x = self.head_0(x, seg) 148 | if self.opt.adaptor_nonlocal: 149 | x = self.attn(x) 150 | x = self.G_middle_0(x, seg) 151 | x = self.G_middle_1(x, seg) 152 | if self.opt.adaptor_res_deeper: 153 | x = self.deeper0(x, seg) 154 | x = self.deeper1(x, seg) 155 | x = self.deeper2(x, seg) 156 | if self.opt.dilation_conv: 157 | x = self.degridding0(x) 158 | x = self.degridding1(x) 159 | 160 | return x 161 | 162 | class ReverseGenerator(BaseNetwork): 163 | def __init__(self, opt, ic, oc, size): 164 | super().__init__() 165 | self.opt = opt 166 | self.downsample = True if size == 256 else False 167 | nf = opt.ngf 168 | opt.spade_ic = ic 169 | if opt.warp_reverseG_s: 170 | self.backbone_0 = SPADEResnetBlock(4 * nf, 4 * nf, opt) 171 | else: 172 | self.backbone_0 = SPADEResnetBlock(4 * nf, 8 * nf, opt) 173 | self.backbone_1 = SPADEResnetBlock(8 * nf, 8 * nf, opt) 174 | self.backbone_2 = SPADEResnetBlock(8 * nf, 8 * nf, opt) 175 | self.backbone_3 = SPADEResnetBlock(8 * nf, 4 * nf, opt) 176 | self.backbone_4 = SPADEResnetBlock(4 * nf, 2 * nf, opt) 177 | self.backbone_5 = SPADEResnetBlock(2 * nf, nf, opt) 178 | del opt.spade_ic 179 | if self.downsample: 180 | kw = 3 181 | pw = int(np.ceil((kw - 1.0) / 2)) 182 | ndf = opt.ngf 183 | norm_layer = get_nonspade_norm_layer(opt, opt.norm_E) 184 | self.layer1 = norm_layer(nn.Conv2d(ic, ndf, kw, stride=1, padding=pw)) 185 | self.layer2 = norm_layer(nn.Conv2d(ndf * 1, ndf * 2, 4, stride=2, padding=pw)) 186 | self.layer3 = norm_layer(nn.Conv2d(ndf * 2, ndf * 4, kw, stride=1, padding=pw)) 187 | self.layer4 = norm_layer(nn.Conv2d(ndf * 4, ndf * 4, 4, stride=2, padding=pw)) 188 | self.up = nn.Upsample(scale_factor=2) 189 | self.actvn = nn.LeakyReLU(0.2, False) 190 | self.conv_img = nn.Conv2d(nf, oc, 3, padding=1) 191 | 192 | def forward(self, x): 193 | input = x 194 | if self.downsample: 195 | x = self.layer1(input) 196 | x = self.layer2(self.actvn(x)) 197 | x = self.layer3(self.actvn(x)) 198 | x = self.layer4(self.actvn(x)) 199 | x = self.backbone_0(x, input) 200 | if not self.opt.warp_reverseG_s: 201 | x = self.backbone_1(x, input) 202 | x = self.backbone_2(x, input) 203 | x = self.backbone_3(x, input) 204 | if self.downsample: 205 | x = self.up(x) 206 | x = self.backbone_4(x, input) 207 | if self.downsample: 208 | x = self.up(x) 209 | x = self.backbone_5(x, input) 210 | x = self.conv_img(F.leaky_relu(x, 2e-1)) 211 | x = F.tanh(x) 212 | return x 213 | 214 | class DomainClassifier(BaseNetwork): 215 | def __init__(self, opt): 216 | super().__init__() 217 | nf = opt.ngf 218 | kw = 4 if opt.domain_rela else 3 219 | pw = int((kw - 1.0) / 2) 220 | self.feature = nn.Sequential(nn.Conv2d(4 * nf, 2 * nf, kw, stride=2, padding=pw), 221 | SynchronizedBatchNorm2d(2 * nf, affine=True), 222 | nn.LeakyReLU(0.2, False), 223 | nn.Conv2d(2 * nf, nf, kw, stride=2, padding=pw), 224 | SynchronizedBatchNorm2d(nf, affine=True), 225 | nn.LeakyReLU(0.2, False), 226 | nn.Conv2d(nf, int(nf // 2), kw, stride=2, padding=pw), 227 | SynchronizedBatchNorm2d(int(nf // 2), affine=True), 228 | nn.LeakyReLU(0.2, False)) #32*8*8 229 | model = [nn.Linear(int(nf // 2) * 8 * 8, 100), 230 | SynchronizedBatchNorm1d(100, affine=True), 231 | nn.ReLU()] 232 | if opt.domain_rela: 233 | model += [nn.Linear(100, 1)] 234 | else: 235 | model += [nn.Linear(100, 2), 236 | nn.LogSoftmax(dim=1)] 237 | self.classifier = nn.Sequential(*model) 238 | 239 | def forward(self, x): 240 | x = self.feature(x) 241 | x = self.classifier(x.view(x.shape[0], -1)) 242 | return x 243 | 244 | class ReverseLayerF(Function): 245 | 246 | @staticmethod 247 | def forward(ctx, x, alpha): 248 | ctx.alpha = alpha 249 | 250 | return x.view_as(x) 251 | 252 | @staticmethod 253 | def backward(ctx, grad_output): 254 | output = grad_output.neg() * ctx.alpha 255 | 256 | return output, None 257 | 258 | 259 | class EMA(): 260 | def __init__(self, mu): 261 | self.mu = mu 262 | self.shadow = {} 263 | self.original = {} 264 | 265 | def register(self, name, val): 266 | self.shadow[name] = val.clone() 267 | 268 | def __call__(self, model): 269 | for name, param in model.named_parameters(): 270 | if param.requires_grad: 271 | assert name in self.shadow 272 | decay = self.mu 273 | new_average = (1.0 - decay) * param.data + decay * self.shadow[name] 274 | self.shadow[name] = new_average.clone() 275 | 276 | def assign(self, model): 277 | for name, param in model.named_parameters(): 278 | if param.requires_grad: 279 | assert name in self.shadow 280 | self.original[name] = param.data.clone() 281 | param.data = self.shadow[name] 282 | 283 | def resume(self, model): 284 | for name, param in model.named_parameters(): 285 | if param.requires_grad: 286 | assert name in self.shadow 287 | param.data = self.original[name] 288 | -------------------------------------------------------------------------------- /models/networks/loss.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | import torch 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | from models.networks.architecture import VGG19 8 | from models.networks.correspondence import VGG19_feature_color_torchversion 9 | 10 | 11 | # Defines the GAN loss which uses either LSGAN or the regular GAN. 12 | # When LSGAN is used, it is basically same as MSELoss, 13 | # but it abstracts away the need to create the target label tensor 14 | # that has the same size as the input 15 | class GANLoss(nn.Module): 16 | def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0, 17 | tensor=torch.FloatTensor, opt=None): 18 | super(GANLoss, self).__init__() 19 | self.real_label = target_real_label 20 | self.fake_label = target_fake_label 21 | self.real_label_tensor = None 22 | self.fake_label_tensor = None 23 | self.zero_tensor = None 24 | self.Tensor = tensor 25 | self.gan_mode = gan_mode 26 | self.opt = opt 27 | if gan_mode == 'ls': 28 | pass 29 | elif gan_mode == 'original': 30 | pass 31 | elif gan_mode == 'w': 32 | pass 33 | elif gan_mode == 'hinge': 34 | pass 35 | else: 36 | raise ValueError('Unexpected gan_mode {}'.format(gan_mode)) 37 | 38 | def get_target_tensor(self, input, target_is_real): 39 | if target_is_real: 40 | if self.real_label_tensor is None: 41 | self.real_label_tensor = self.Tensor(1).fill_(self.real_label) 42 | self.real_label_tensor.requires_grad_(False) 43 | return self.real_label_tensor.expand_as(input) 44 | else: 45 | if self.fake_label_tensor is None: 46 | self.fake_label_tensor = self.Tensor(1).fill_(self.fake_label) 47 | self.fake_label_tensor.requires_grad_(False) 48 | return self.fake_label_tensor.expand_as(input) 49 | 50 | def get_zero_tensor(self, input): 51 | if self.zero_tensor is None: 52 | self.zero_tensor = self.Tensor(1).fill_(0) 53 | self.zero_tensor.requires_grad_(False) 54 | return self.zero_tensor.expand_as(input) 55 | 56 | def loss(self, input, target_is_real, for_discriminator=True): 57 | if self.gan_mode == 'original': # cross entropy loss 58 | target_tensor = self.get_target_tensor(input, target_is_real) 59 | loss = F.binary_cross_entropy_with_logits(input, target_tensor) 60 | return loss 61 | elif self.gan_mode == 'ls': 62 | target_tensor = self.get_target_tensor(input, target_is_real) 63 | return F.mse_loss(input, target_tensor) 64 | elif self.gan_mode == 'hinge': 65 | if for_discriminator: 66 | if target_is_real: 67 | minval = torch.min(input - 1, self.get_zero_tensor(input)) 68 | loss = -torch.mean(minval) 69 | else: 70 | minval = torch.min(-input - 1, self.get_zero_tensor(input)) 71 | loss = -torch.mean(minval) 72 | else: 73 | assert target_is_real, "The generator's hinge loss must be aiming for real" 74 | loss = -torch.mean(input) 75 | return loss 76 | else: 77 | # wgan 78 | if target_is_real: 79 | return -input.mean() 80 | else: 81 | return input.mean() 82 | 83 | def __call__(self, input, target_is_real, for_discriminator=True): 84 | # computing loss is a bit complicated because |input| may not be 85 | # a tensor, but list of tensors in case of multiscale discriminator 86 | if isinstance(input, list): 87 | loss = 0 88 | for pred_i in input: 89 | if isinstance(pred_i, list): 90 | pred_i = pred_i[-1] 91 | loss_tensor = self.loss(pred_i, target_is_real, for_discriminator) 92 | bs = 1 if len(loss_tensor.size()) == 0 else loss_tensor.size(0) 93 | new_loss = torch.mean(loss_tensor.view(bs, -1), dim=1) 94 | loss += new_loss 95 | return loss / len(input) 96 | else: 97 | return self.loss(input, target_is_real, for_discriminator) 98 | 99 | 100 | # Perceptual loss that uses a pretrained VGG network 101 | class VGGLoss(nn.Module): 102 | def __init__(self, gpu_ids, vgg_normal_correct=False): 103 | super(VGGLoss, self).__init__() 104 | self.vgg_normal_correct = vgg_normal_correct 105 | if vgg_normal_correct: 106 | self.vgg = VGG19_feature_color_torchversion(vgg_normal_correct=True).cuda() 107 | else: 108 | self.vgg = VGG19().cuda() 109 | self.criterion = nn.L1Loss() 110 | self.weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0] 111 | 112 | def forward(self, x, y): 113 | if self.vgg_normal_correct: 114 | x_vgg, y_vgg = self.vgg(x, ['r11', 'r21', 'r31', 'r41', 'r51'], preprocess=True), self.vgg(y, ['r11', 'r21', 'r31', 'r41', 'r51'], preprocess=True) 115 | else: 116 | x_vgg, y_vgg = self.vgg(x), self.vgg(y) 117 | loss = 0 118 | for i in range(len(x_vgg)): 119 | loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach()) 120 | return loss 121 | 122 | 123 | # KL Divergence loss used in VAE with an image encoder 124 | class KLDLoss(nn.Module): 125 | def forward(self, mu, logvar): 126 | return -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) 127 | -------------------------------------------------------------------------------- /models/networks/normalization.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | import re 5 | import sys 6 | import numpy as np 7 | import torch 8 | import torch.nn as nn 9 | import torch.nn.functional as F 10 | from models.networks.sync_batchnorm import SynchronizedBatchNorm2d 11 | import torch.nn.utils.spectral_norm as spectral_norm 12 | try: 13 | import apex 14 | from apex import amp 15 | except: 16 | print('apex not found') 17 | pass 18 | 19 | # Returns a function that creates a normalization function 20 | # that does not condition on semantic map 21 | def get_nonspade_norm_layer(opt, norm_type='instance'): 22 | # helper function to get # output channels of the previous layer 23 | def get_out_channel(layer): 24 | if hasattr(layer, 'out_channels'): 25 | return getattr(layer, 'out_channels') 26 | return layer.weight.size(0) 27 | 28 | # this function will be returned 29 | def add_norm_layer(layer): 30 | nonlocal norm_type 31 | if norm_type.startswith('spectral'): 32 | if opt.eqlr_sn: 33 | layer = equal_lr(layer) 34 | else: 35 | layer = spectral_norm(layer) 36 | subnorm_type = norm_type[len('spectral'):] 37 | 38 | if subnorm_type == 'none' or len(subnorm_type) == 0: 39 | return layer 40 | 41 | # remove bias in the previous layer, which is meaningless 42 | # since it has no effect after normalization 43 | if getattr(layer, 'bias', None) is not None: 44 | delattr(layer, 'bias') 45 | layer.register_parameter('bias', None) 46 | 47 | if subnorm_type == 'batch': 48 | norm_layer = nn.BatchNorm2d(get_out_channel(layer), affine=True) 49 | elif subnorm_type == 'sync_batch': 50 | if opt.apex: 51 | norm_layer = apex.parallel.SyncBatchNorm(get_out_channel(layer), affine=True) 52 | else: 53 | norm_layer = SynchronizedBatchNorm2d(get_out_channel(layer), affine=True) 54 | elif subnorm_type == 'instance': 55 | norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=False) 56 | else: 57 | raise ValueError('normalization layer %s is not recognized' % subnorm_type) 58 | 59 | return nn.Sequential(layer, norm_layer) 60 | 61 | return add_norm_layer 62 | 63 | def PositionalNorm2d(x, epsilon=1e-5): 64 | # x: B*C*W*H normalize in C dim 65 | mean = x.mean(dim=1, keepdim=True) 66 | std = x.var(dim=1, keepdim=True).add(epsilon).sqrt() 67 | output = (x - mean) / std 68 | return output 69 | 70 | # Creates SPADE normalization layer based on the given configuration 71 | # SPADE consists of two steps. First, it normalizes the activations using 72 | # your favorite normalization method, such as Batch Norm or Instance Norm. 73 | # Second, it applies scale and bias to the normalized output, conditioned on 74 | # the segmentation map. 75 | # The format of |config_text| is spade(norm)(ks), where 76 | # (norm) specifies the type of parameter-free normalization. 77 | # (e.g. syncbatch, batch, instance) 78 | # (ks) specifies the size of kernel in the SPADE module (e.g. 3x3) 79 | # Example |config_text| will be spadesyncbatch3x3, or spadeinstance5x5. 80 | # Also, the other arguments are 81 | # |norm_nc|: the #channels of the normalized activations, hence the output dim of SPADE 82 | # |label_nc|: the #channels of the input semantic map, hence the input dim of SPADE 83 | class SPADE(nn.Module): 84 | def __init__(self, config_text, norm_nc, label_nc, PONO=False, use_apex=False): 85 | super().__init__() 86 | 87 | assert config_text.startswith('spade') 88 | parsed = re.search('spade(\D+)(\d)x\d', config_text) 89 | param_free_norm_type = str(parsed.group(1)) 90 | ks = int(parsed.group(2)) 91 | self.pad_type = 'nozero' 92 | 93 | if PONO: 94 | self.param_free_norm = PositionalNorm2d 95 | elif param_free_norm_type == 'instance': 96 | self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False) 97 | elif param_free_norm_type == 'syncbatch': 98 | if use_apex: 99 | self.param_free_norm = apex.parallel.SyncBatchNorm(norm_nc, affine=False) 100 | else: 101 | self.param_free_norm = SynchronizedBatchNorm2d(norm_nc, affine=False) 102 | elif param_free_norm_type == 'batch': 103 | self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False) 104 | else: 105 | raise ValueError('%s is not a recognized param-free norm type in SPADE' 106 | % param_free_norm_type) 107 | 108 | # The dimension of the intermediate embedding space. Yes, hardcoded. 109 | nhidden = 128 110 | 111 | pw = ks // 2 112 | if self.pad_type != 'zero': 113 | self.mlp_shared = nn.Sequential( 114 | nn.ReflectionPad2d(pw), 115 | nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=0), 116 | nn.ReLU() 117 | ) 118 | self.pad = nn.ReflectionPad2d(pw) 119 | self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=0) 120 | self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=0) 121 | else: 122 | self.mlp_shared = nn.Sequential( 123 | nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=pw), 124 | nn.ReLU() 125 | ) 126 | self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw) 127 | self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw) 128 | 129 | def forward(self, x, segmap, similarity_map=None): 130 | 131 | # Part 1. generate parameter-free normalized activations 132 | normalized = self.param_free_norm(x) 133 | 134 | # Part 2. produce scaling and bias conditioned on semantic map 135 | segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest') 136 | actv = self.mlp_shared(segmap) 137 | if self.pad_type != 'zero': 138 | gamma = self.mlp_gamma(self.pad(actv)) 139 | beta = self.mlp_beta(self.pad(actv)) 140 | else: 141 | gamma = self.mlp_gamma(actv) 142 | beta = self.mlp_beta(actv) 143 | 144 | if similarity_map is not None: 145 | similarity_map = F.interpolate(similarity_map, size=gamma.size()[2:], mode='nearest') 146 | gamma = gamma * similarity_map 147 | beta = beta * similarity_map 148 | # apply scale and bias 149 | out = normalized * (1 + gamma) + beta 150 | 151 | return out 152 | 153 | class SPADE_TwoPath(nn.Module): 154 | def __init__(self, config_text, norm_nc, label_nc_example, label_nc_imagine, PONO=False, use_apex=False): 155 | super().__init__() 156 | 157 | assert config_text.startswith('spade') 158 | parsed = re.search('spade(\D+)(\d)x\d', config_text) 159 | param_free_norm_type = str(parsed.group(1)) 160 | ks = int(parsed.group(2)) 161 | self.pad_type = 'nozero' 162 | 163 | if PONO: 164 | self.param_free_norm = PositionalNorm2d 165 | elif param_free_norm_type == 'instance': 166 | self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False) 167 | elif param_free_norm_type == 'syncbatch': 168 | if use_apex: 169 | self.param_free_norm = apex.parallel.SyncBatchNorm(norm_nc, affine=False) 170 | else: 171 | self.param_free_norm = SynchronizedBatchNorm2d(norm_nc, affine=False) 172 | elif param_free_norm_type == 'batch': 173 | self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False) 174 | else: 175 | raise ValueError('%s is not a recognized param-free norm type in SPADE' 176 | % param_free_norm_type) 177 | 178 | # The dimension of the intermediate embedding space. Yes, hardcoded. 179 | nhidden = 128 180 | 181 | pw = ks // 2 182 | if self.pad_type != 'zero': 183 | self.mlp_shared_example = nn.Sequential( 184 | nn.ReflectionPad2d(pw), 185 | nn.Conv2d(label_nc_example, nhidden, kernel_size=ks, padding=0), 186 | nn.ReLU() 187 | ) 188 | self.pad = nn.ReflectionPad2d(pw) 189 | self.mlp_gamma_example = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=0) 190 | self.mlp_beta_example = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=0) 191 | 192 | self.mlp_shared_imagine = nn.Sequential( 193 | nn.ReflectionPad2d(pw), 194 | nn.Conv2d(label_nc_imagine, nhidden, kernel_size=ks, padding=0), 195 | nn.ReLU() 196 | ) 197 | self.mlp_gamma_imagine = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=0) 198 | self.mlp_beta_imagine = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=0) 199 | else: 200 | self.mlp_shared_example = nn.Sequential( 201 | nn.Conv2d(label_nc_example, nhidden, kernel_size=ks, padding=pw), 202 | nn.ReLU() 203 | ) 204 | self.mlp_gamma_example = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw) 205 | self.mlp_beta_example = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw) 206 | 207 | self.mlp_shared_imagine = nn.Sequential( 208 | nn.Conv2d(label_nc_imagine, nhidden, kernel_size=ks, padding=pw), 209 | nn.ReLU() 210 | ) 211 | self.mlp_gamma_imagine = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw) 212 | self.mlp_beta_imagine = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw) 213 | 214 | def forward(self, x, warpmap, segmap, similarity_map): 215 | similarity_map = similarity_map.detach() 216 | # Part 1. generate parameter-free normalized activations 217 | normalized = self.param_free_norm(x) 218 | 219 | # Part 2. produce scaling and bias conditioned on semantic map 220 | segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest') 221 | warpmap = F.interpolate(warpmap, size=x.size()[2:], mode='nearest') 222 | actv_example = self.mlp_shared_example(warpmap) 223 | actv_imagine = self.mlp_shared_imagine(segmap) 224 | if self.pad_type != 'zero': 225 | gamma_example = self.mlp_gamma_example(self.pad(actv_example)) 226 | beta_example = self.mlp_beta_example(self.pad(actv_example)) 227 | gamma_imagine = self.mlp_gamma_imagine(self.pad(actv_imagine)) 228 | beta_imagine = self.mlp_beta_imagine(self.pad(actv_imagine)) 229 | else: 230 | gamma_example = self.mlp_gamma_example(actv_example) 231 | beta_example = self.mlp_beta_example(actv_example) 232 | gamma_imagine = self.mlp_gamma_imagine(actv_imagine) 233 | beta_imagine = self.mlp_beta_imagine(actv_imagine) 234 | 235 | similarity_map = F.interpolate(similarity_map, size=x.size()[2:], mode='nearest') 236 | gamma = gamma_example * similarity_map + gamma_imagine * (1 - similarity_map) 237 | beta = beta_example * similarity_map + beta_imagine * (1 - similarity_map) 238 | # apply scale and bias 239 | out = normalized * (1 + gamma) + beta 240 | 241 | return out 242 | 243 | class EqualLR: 244 | def __init__(self, name): 245 | self.name = name 246 | def compute_weight(self, module): 247 | weight = getattr(module, self.name + '_orig') 248 | fan_in = weight.data.size(1) * weight.data[0][0].numel() 249 | return weight * np.sqrt(2 / fan_in) 250 | 251 | @staticmethod 252 | def apply(module, name): 253 | fn = EqualLR(name) 254 | weight = getattr(module, name) 255 | del module._parameters[name] 256 | module.register_parameter(name + '_orig', nn.Parameter(weight.data)) 257 | module.register_forward_pre_hook(fn) 258 | return fn 259 | def __call__(self, module, input): 260 | weight = self.compute_weight(module) 261 | setattr(module, self.name, weight) 262 | 263 | 264 | def equal_lr(module, name='weight'): 265 | EqualLR.apply(module, name) 266 | return module -------------------------------------------------------------------------------- /options/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. -------------------------------------------------------------------------------- /options/base_options.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import sys 7 | import argparse 8 | import os 9 | from util import util 10 | import torch 11 | import models 12 | import data 13 | import pickle 14 | 15 | 16 | class BaseOptions(): 17 | def __init__(self): 18 | self.initialized = False 19 | 20 | def initialize(self, parser): 21 | # experiment specifics 22 | parser.add_argument('--name', type=str, default='label2coco', help='name of the experiment. It decides where to store samples and models') 23 | 24 | parser.add_argument('--gpu_ids', type=str, default='0,1,2,3', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 25 | parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here') 26 | parser.add_argument('--model', type=str, default='pix2pix', help='which model to use') 27 | parser.add_argument('--norm_G', type=str, default='spectralinstance', help='instance normalization or batch normalization') 28 | parser.add_argument('--norm_D', type=str, default='spectralinstance', help='instance normalization or batch normalization') 29 | parser.add_argument('--norm_E', type=str, default='spectralinstance', help='instance normalization or batch normalization') 30 | parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') 31 | 32 | # input/output sizes 33 | parser.add_argument('--batchSize', type=int, default=4, help='input batch size') 34 | parser.add_argument('--preprocess_mode', type=str, default='scale_width_and_crop', help='scaling and cropping of images at load time.', choices=("resize_and_crop", "crop", "scale_width", "scale_width_and_crop", "scale_shortside", "scale_shortside_and_crop", "fixed", "none")) 35 | parser.add_argument('--load_size', type=int, default=256, help='Scale images to this size. The final image will be cropped to --crop_size.') 36 | parser.add_argument('--crop_size', type=int, default=256, help='Crop to the width of crop_size (after initially scaling the images to load_size.)') 37 | parser.add_argument('--aspect_ratio', type=float, default=1.0, help='The ratio width/height. The final height of the load image will be crop_size/aspect_ratio') 38 | parser.add_argument('--label_nc', type=int, default=182, help='# of input label classes without unknown class. If you have unknown class as class label, specify --contain_dopntcare_label.') 39 | parser.add_argument('--contain_dontcare_label', action='store_true', help='if the label map contains dontcare label (dontcare=255)') 40 | parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels') 41 | 42 | # for setting inputs 43 | parser.add_argument('--dataroot', type=str, default='/mnt/blob/Dataset/ADEChallengeData2016/images') 44 | parser.add_argument('--dataset_mode', type=str, default='ade20k') 45 | parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly') 46 | parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data argumentation') 47 | parser.add_argument('--nThreads', default=16, type=int, help='# threads for loading data') 48 | parser.add_argument('--max_dataset_size', type=int, default=sys.maxsize, help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') 49 | parser.add_argument('--load_from_opt_file', action='store_true', help='load the options from checkpoints and use that as default') 50 | parser.add_argument('--cache_filelist_write', action='store_true', help='saves the current filelist into a text file, so that it loads faster') 51 | parser.add_argument('--cache_filelist_read', action='store_true', help='reads from the file list cache') 52 | 53 | # for displays 54 | parser.add_argument('--display_winsize', type=int, default=400, help='display window size') 55 | 56 | # for generator 57 | parser.add_argument('--netG', type=str, default='spade', help='selects model to use for netG (pix2pixhd | spade)') 58 | parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer') 59 | parser.add_argument('--init_type', type=str, default='xavier', help='network initialization [normal|xavier|kaiming|orthogonal]') 60 | parser.add_argument('--init_variance', type=float, default=0.02, help='variance of the initialization distribution') 61 | parser.add_argument('--z_dim', type=int, default=256, 62 | help="dimension of the latent z vector") 63 | 64 | # for instance-wise features 65 | parser.add_argument('--CBN_intype', type=str, default='warp_mask', help='type of CBN input for framework, warp/mask/warp_mask') 66 | parser.add_argument('--maskmix', action='store_true', help='use mask in correspondence net') 67 | parser.add_argument('--use_attention', action='store_true', help='and nonlocal block in G and D') 68 | parser.add_argument('--warp_mask_losstype', type=str, default='none', help='type of warped mask loss, none/direct/cycle') 69 | parser.add_argument('--show_warpmask', action='store_true', help='save warp mask') 70 | parser.add_argument('--match_kernel', type=int, default=3, help='correspondence matrix match kernel size') 71 | parser.add_argument('--adaptor_kernel', type=int, default=3, help='kernel size in domain adaptor') 72 | parser.add_argument('--PONO', action='store_true', help='use positional normalization ') 73 | parser.add_argument('--PONO_C', action='store_true', help='use C normalization in corr module') 74 | parser.add_argument('--eqlr_sn', action='store_true', help='if true, use equlr, else use sn') 75 | parser.add_argument('--vgg_normal_correct', action='store_true', help='if true, correct vgg normalization and replace vgg FM model with ctx model') 76 | parser.add_argument('--weight_domainC', type=float, default=0.0, help='weight of Domain classification loss for domain adaptation') 77 | parser.add_argument('--domain_rela', action='store_true', help='if true, use Relativistic loss in domain classifier') 78 | parser.add_argument('--use_ema', action='store_true', help='if true, use EMA in G') 79 | parser.add_argument('--ema_beta', type=float, default=0.999, help='beta in ema setting') 80 | parser.add_argument('--warp_cycle_w', type=float, default=0.0, help='push warp cycle to ref') 81 | parser.add_argument('--two_cycle', action='store_true', help='input to ref and back') 82 | parser.add_argument('--apex', action='store_true', help='if true, use apex') 83 | parser.add_argument('--warp_bilinear', action='store_true', help='if true, upsample warp by bilinear') 84 | parser.add_argument('--adaptor_res_deeper', action='store_true', help='if true, use 6 res block in domain adaptor') 85 | parser.add_argument('--adaptor_nonlocal', action='store_true', help='if true, use nonlocal block in domain adaptor') 86 | parser.add_argument('--adaptor_se', action='store_true', help='if true, use se layer in domain adaptor') 87 | parser.add_argument('--dilation_conv', action='store_true', help='if true, use dilation conv in domain adaptor when adaptor_res_deeper is True') 88 | parser.add_argument('--use_coordconv', action='store_true', help='if true, use coordconv in CorrNet') 89 | parser.add_argument('--warp_patch', action='store_true', help='use corr matrix to warp 4*4 patch') 90 | parser.add_argument('--warp_stride', type=int, default=4, help='corr matrix 256 / warp_stride') 91 | parser.add_argument('--mask_noise', action='store_true', help='use noise with mask') 92 | parser.add_argument('--noise_for_mask', action='store_true', help='replace mask with noise') 93 | parser.add_argument('--video_like', action='store_true', help='useful in deepfashion') 94 | self.initialized = True 95 | return parser 96 | 97 | def gather_options(self): 98 | # initialize parser with basic options 99 | if not self.initialized: 100 | parser = argparse.ArgumentParser( 101 | formatter_class=argparse.ArgumentDefaultsHelpFormatter) 102 | parser = self.initialize(parser) 103 | 104 | # get the basic options 105 | opt, unknown = parser.parse_known_args() 106 | 107 | # modify model-related parser options 108 | model_name = opt.model 109 | model_option_setter = models.get_option_setter(model_name) 110 | parser = model_option_setter(parser, self.isTrain) 111 | 112 | # modify dataset-related parser options 113 | dataset_mode = opt.dataset_mode 114 | dataset_option_setter = data.get_option_setter(dataset_mode) 115 | parser = dataset_option_setter(parser, self.isTrain) 116 | 117 | opt, unknown = parser.parse_known_args() 118 | 119 | # if there is opt_file, load it. 120 | # The previous default options will be overwritten 121 | if opt.load_from_opt_file: 122 | parser = self.update_options_from_file(parser, opt) 123 | 124 | opt = parser.parse_args() 125 | self.parser = parser 126 | return opt 127 | 128 | def print_options(self, opt): 129 | message = '' 130 | message += '----------------- Options ---------------\n' 131 | for k, v in sorted(vars(opt).items()): 132 | comment = '' 133 | default = self.parser.get_default(k) 134 | if v != default: 135 | comment = '\t[default: %s]' % str(default) 136 | message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) 137 | message += '----------------- End -------------------' 138 | print(message) 139 | 140 | def option_file_path(self, opt, makedir=False): 141 | expr_dir = os.path.join(opt.checkpoints_dir, opt.name) 142 | if makedir: 143 | util.mkdirs(expr_dir) 144 | file_name = os.path.join(expr_dir, 'opt') 145 | return file_name 146 | 147 | def save_options(self, opt): 148 | file_name = self.option_file_path(opt, makedir=True) 149 | with open(file_name + '.txt', 'wt') as opt_file: 150 | for k, v in sorted(vars(opt).items()): 151 | comment = '' 152 | default = self.parser.get_default(k) 153 | if v != default: 154 | comment = '\t[default: %s]' % str(default) 155 | opt_file.write('{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)) 156 | 157 | with open(file_name + '.pkl', 'wb') as opt_file: 158 | pickle.dump(opt, opt_file) 159 | 160 | def update_options_from_file(self, parser, opt): 161 | new_opt = self.load_options(opt) 162 | for k, v in sorted(vars(opt).items()): 163 | if hasattr(new_opt, k) and v != getattr(new_opt, k): 164 | new_val = getattr(new_opt, k) 165 | parser.set_defaults(**{k: new_val}) 166 | return parser 167 | 168 | def load_options(self, opt): 169 | file_name = self.option_file_path(opt, makedir=False) 170 | new_opt = pickle.load(open(file_name + '.pkl', 'rb')) 171 | return new_opt 172 | 173 | def parse(self, save=False): 174 | 175 | opt = self.gather_options() #gather options from base, train, dataset, model 176 | opt.isTrain = self.isTrain # train or test 177 | 178 | self.print_options(opt) 179 | if opt.isTrain: 180 | self.save_options(opt) 181 | 182 | # Set semantic_nc based on the option. 183 | # This will be convenient in many places 184 | opt.semantic_nc = opt.label_nc + \ 185 | (1 if opt.contain_dontcare_label else 0) 186 | 187 | # set gpu ids 188 | str_ids = opt.gpu_ids.split(',') 189 | opt.gpu_ids = [] 190 | for str_id in str_ids: 191 | id = int(str_id) 192 | if id >= 0: 193 | opt.gpu_ids.append(id) 194 | if len(opt.gpu_ids) > 0: 195 | torch.cuda.set_device(opt.gpu_ids[0]) 196 | 197 | assert len(opt.gpu_ids) == 0 or opt.batchSize % len(opt.gpu_ids) == 0, \ 198 | "Batch size %d is wrong. It must be a multiple of # GPUs %d." \ 199 | % (opt.batchSize, len(opt.gpu_ids)) 200 | 201 | self.opt = opt 202 | return self.opt 203 | -------------------------------------------------------------------------------- /options/test_options.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | from .base_options import BaseOptions 7 | 8 | 9 | class TestOptions(BaseOptions): 10 | def initialize(self, parser): 11 | BaseOptions.initialize(self, parser) 12 | parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') 13 | parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') 14 | parser.add_argument('--how_many', type=int, default=float("inf"), help='how many test images to run') 15 | parser.add_argument('--save_per_img', action='store_true', help='if specified, save per image') 16 | parser.add_argument('--show_corr', action='store_true', help='if specified, save bilinear upsample correspondence') 17 | 18 | parser.set_defaults(preprocess_mode='scale_width_and_crop', crop_size=256, load_size=256, display_winsize=256) 19 | parser.set_defaults(serial_batches=True) 20 | parser.set_defaults(no_flip=True) 21 | parser.set_defaults(phase='test') 22 | self.isTrain = False 23 | return parser 24 | -------------------------------------------------------------------------------- /options/train_options.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | from .base_options import BaseOptions 7 | 8 | 9 | class TrainOptions(BaseOptions): 10 | def initialize(self, parser): 11 | BaseOptions.initialize(self, parser) 12 | # for displays 13 | parser.add_argument('--display_freq', type=int, default=2000, help='frequency of showing training results on screen') 14 | parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console') 15 | parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results') 16 | parser.add_argument('--save_epoch_freq', type=int, default=10, help='frequency of saving checkpoints at the end of epochs') 17 | 18 | # for training 19 | parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') 20 | parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') 21 | parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate. This is NOT the total #epochs. Totla #epochs is niter + niter_decay') 22 | parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero') 23 | parser.add_argument('--optimizer', type=str, default='adam') 24 | parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') 25 | parser.add_argument('--beta2', type=float, default=0.999, help='momentum term of adam') 26 | parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam') 27 | parser.add_argument('--D_steps_per_G', type=int, default=1, help='number of discriminator iterations per generator iterations.') 28 | 29 | # for discriminators 30 | parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer') 31 | parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature matching loss') 32 | parser.add_argument('--lambda_vgg', type=float, default=10.0, help='weight for vgg loss') 33 | parser.add_argument('--no_ganFeat_loss', action='store_true', help='if specified, do *not* use discriminator feature matching loss') 34 | parser.add_argument('--gan_mode', type=str, default='hinge', help='(ls|original|hinge)') 35 | parser.add_argument('--netD', type=str, default='multiscale', help='(n_layers|multiscale|image)') 36 | parser.add_argument('--no_TTUR', action='store_true', help='Use TTUR training scheme') 37 | 38 | parser.add_argument('--which_perceptual', type=str, default='5_2', help='relu5_2 or relu4_2') 39 | parser.add_argument('--weight_perceptual', type=float, default=0.01) 40 | parser.add_argument('--weight_mask', type=float, default=0.0, help='weight of warped mask loss, used in direct/cycle') 41 | parser.add_argument('--real_reference_probability', type=float, default=0.7, help='self-supervised training probability') 42 | parser.add_argument('--hard_reference_probability', type=float, default=0.2, help='hard reference training probability') 43 | parser.add_argument('--weight_gan', type=float, default=10.0, help='weight of all loss in stage1') 44 | parser.add_argument('--novgg_featpair', type=float, default=10.0, help='in no vgg setting, use pair feat loss in domain adaptation') 45 | parser.add_argument('--D_cam', type=float, default=0.0, help='weight of CAM loss in D') 46 | parser.add_argument('--warp_self_w', type=float, default=0.0, help='push warp self to ref') 47 | parser.add_argument('--fm_ratio', type=float, default=0.1, help='vgg fm loss weight comp with ctx loss') 48 | parser.add_argument('--use_22ctx', action='store_true', help='if true, also use 2-2 in ctx loss') 49 | parser.add_argument('--ctx_w', type=float, default=1.0, help='ctx loss weight') 50 | parser.add_argument('--mask_epoch', type=int, default=-1, help='useful when noise_for_mask is true, first train mask_epoch with mask, the rest epoch with noise') 51 | 52 | self.isTrain = True 53 | return parser 54 | -------------------------------------------------------------------------------- /output/test/ade20k/0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/output/test/ade20k/0.png -------------------------------------------------------------------------------- /output/test/celebahq/0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/output/test/celebahq/0.png -------------------------------------------------------------------------------- /output/test/celebahqedge/0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/output/test/celebahqedge/0.png -------------------------------------------------------------------------------- /output/test/deepfashion/0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/output/test/deepfashion/0.png -------------------------------------------------------------------------------- /output/test/teaser.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/output/test/teaser.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | matplotlib 2 | torchvision>=0.2.2 3 | scikit-image==0.14.2 4 | opencv-python 5 | torch>=1.0.0 6 | scipy -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | import os 5 | from collections import OrderedDict 6 | import torch 7 | import torchvision.utils as vutils 8 | import torch.nn.functional as F 9 | import data 10 | import numpy as np 11 | from util.util import masktorgb 12 | from options.test_options import TestOptions 13 | from models.pix2pix_model import Pix2PixModel 14 | 15 | opt = TestOptions().parse() 16 | 17 | torch.manual_seed(0) 18 | dataloader = data.create_dataloader(opt) 19 | dataloader.dataset[0] 20 | 21 | model = Pix2PixModel(opt) 22 | model.eval() 23 | 24 | save_root = os.path.join(os.path.dirname(opt.checkpoints_dir), 'output') 25 | 26 | # test 27 | for i, data_i in enumerate(dataloader): 28 | print('{} / {}'.format(i, len(dataloader))) 29 | if i * opt.batchSize >= opt.how_many: 30 | break 31 | imgs_num = data_i['label'].shape[0] 32 | #data_i['stage1'] = torch.ones_like(data_i['stage1']) 33 | 34 | out = model(data_i, mode='inference') 35 | if opt.save_per_img: 36 | root = save_root + '/test_per_img/' 37 | if not os.path.exists(root + opt.name): 38 | os.makedirs(root + opt.name) 39 | imgs = out['fake_image'].data.cpu() 40 | try: 41 | imgs = (imgs + 1) / 2 42 | for i in range(imgs.shape[0]): 43 | if opt.dataset_mode == 'deepfashion': 44 | name = data_i['path'][i].split('Dataset/DeepFashion/')[-1].replace('/', '_') 45 | else: 46 | name = os.path.basename(data_i['path'][i]) 47 | vutils.save_image(imgs[i:i+1], root + opt.name + '/' + name, 48 | nrow=1, padding=0, normalize=False) 49 | except OSError as err: 50 | print(err) 51 | else: 52 | if not os.path.exists(save_root + '/test/' + opt.name): 53 | os.makedirs(save_root + '/test/' + opt.name) 54 | 55 | if opt.dataset_mode == 'deepfashion': 56 | label = data_i['label'][:,:3,:,:] 57 | elif opt.dataset_mode == 'celebahqedge': 58 | label = data_i['label'].expand(-1, 3, -1, -1).float() 59 | else: 60 | label = masktorgb(data_i['label'].cpu().numpy()) 61 | label = torch.from_numpy(label).float() / 128 - 1 62 | 63 | imgs = torch.cat((label.cpu(), data_i['ref'].cpu(), out['fake_image'].data.cpu()), 0) 64 | try: 65 | imgs = (imgs + 1) / 2 66 | vutils.save_image(imgs, save_root + '/test/' + opt.name + '/' + str(i) + '.png', 67 | nrow=imgs_num, padding=0, normalize=False) 68 | except OSError as err: 69 | print(err) 70 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | import os 5 | import numpy as np 6 | import torch 7 | import torchvision.utils as vutils 8 | import sys 9 | from collections import OrderedDict 10 | from options.train_options import TrainOptions 11 | import data 12 | from util.iter_counter import IterationCounter 13 | from util.util import print_current_errors 14 | from trainers.pix2pix_trainer import Pix2PixTrainer 15 | import torch.nn.functional as F 16 | 17 | # parse options 18 | opt = TrainOptions().parse() 19 | 20 | # print options to help debugging 21 | print(' '.join(sys.argv)) 22 | 23 | #torch.manual_seed(0) 24 | # load the dataset 25 | dataloader = data.create_dataloader(opt) 26 | len_dataloader = len(dataloader) 27 | dataloader.dataset[11] 28 | 29 | # create tool for counting iterations 30 | iter_counter = IterationCounter(opt, len(dataloader)) 31 | 32 | # create trainer for our model 33 | trainer = Pix2PixTrainer(opt, resume_epoch=iter_counter.first_epoch) 34 | 35 | save_root = os.path.join(os.path.dirname(opt.checkpoints_dir), 'output', opt.name) 36 | for epoch in iter_counter.training_epochs(): 37 | opt.epoch = epoch 38 | if not opt.maskmix: 39 | print('inject nothing') 40 | elif opt.maskmix and opt.noise_for_mask and epoch > opt.mask_epoch: 41 | print('inject noise') 42 | else: 43 | print('inject mask') 44 | print('real_reference_probability is :{}'.format(dataloader.dataset.real_reference_probability)) 45 | print('hard_reference_probability is :{}'.format(dataloader.dataset.hard_reference_probability)) 46 | iter_counter.record_epoch_start(epoch) 47 | for i, data_i in enumerate(dataloader, start=iter_counter.epoch_iter): 48 | iter_counter.record_one_iteration() 49 | #use for Domain adaptation loss 50 | p = min(float(i + (epoch - 1) * len_dataloader) / 50 / len_dataloader, 1) 51 | alpha = 2. / (1. + np.exp(-10 * p)) - 1 52 | # Training 53 | # train generator 54 | if i % opt.D_steps_per_G == 0: 55 | trainer.run_generator_one_step(data_i, alpha=alpha) 56 | 57 | # train discriminator 58 | trainer.run_discriminator_one_step(data_i) 59 | 60 | if iter_counter.needs_printing(): 61 | losses = trainer.get_latest_losses() 62 | try: 63 | print_current_errors(opt, epoch, iter_counter.epoch_iter, 64 | losses, iter_counter.time_per_iter) 65 | except OSError as err: 66 | print(err) 67 | 68 | if iter_counter.needs_displaying(): 69 | if not os.path.exists(save_root + opt.name): 70 | os.makedirs(save_root + opt.name) 71 | imgs_num = data_i['label'].shape[0] 72 | if opt.dataset_mode == 'celebahq': 73 | data_i['label'] = data_i['label'][:,::2,:,:] 74 | elif opt.dataset_mode == 'celebahqedge': 75 | data_i['label'] = data_i['label'][:,:1,:,:] 76 | elif opt.dataset_mode == 'deepfashion': 77 | data_i['label'] = data_i['label'][:,:3,:,:] 78 | if data_i['label'].shape[1] == 3: 79 | label = data_i['label'] 80 | else: 81 | label = data_i['label'].expand(-1, 3, -1, -1).float() / data_i['label'].max() 82 | 83 | cycleshow = None 84 | if opt.warp_cycle_w > 0: 85 | cycleshow = trainer.out['warp_cycle'] if opt.warp_patch else F.interpolate(trainer.out['warp_cycle'], scale_factor=opt.warp_stride) 86 | if opt.two_cycle: 87 | cycleshow = torch.cat((cycleshow, F.interpolate(trainer.out['warp_i2r'], scale_factor=opt.warp_stride), F.interpolate(trainer.out['warp_i2r2i'], scale_factor=opt.warp_stride)), 0) 88 | 89 | if cycleshow is not None: 90 | imgs = torch.cat((label.cpu(), data_i['ref'].cpu(), trainer.out['warp_out'].cpu(), cycleshow.cpu(), trainer.get_latest_generated().data.cpu(), data_i['image'].cpu()), 0) 91 | else: 92 | imgs = torch.cat((label.cpu(), data_i['ref'].cpu(), trainer.out['warp_out'].cpu(), trainer.get_latest_generated().data.cpu(), data_i['image'].cpu()), 0) 93 | 94 | try: 95 | vutils.save_image(imgs, save_root + opt.name + '/' + str(epoch) + '_' + str(iter_counter.total_steps_so_far) + '.png', 96 | nrow=imgs_num, padding=0, normalize=True) 97 | except OSError as err: 98 | print(err) 99 | 100 | if iter_counter.needs_saving(): 101 | print('saving the latest model (epoch %d, total_steps %d)' % 102 | (epoch, iter_counter.total_steps_so_far)) 103 | try: 104 | trainer.save('latest') 105 | iter_counter.record_current_iter() 106 | except OSError as err: 107 | print(err) 108 | 109 | trainer.update_learning_rate(epoch) 110 | iter_counter.record_epoch_end() 111 | 112 | if epoch % opt.save_epoch_freq == 0 or \ 113 | epoch == iter_counter.total_epochs: 114 | print('saving the model at the end of epoch %d, iters %d' % 115 | (epoch, iter_counter.total_steps_so_far)) 116 | try: 117 | trainer.save('latest') 118 | trainer.save(epoch) 119 | except OSError as err: 120 | print(err) 121 | 122 | print('Training was successfully finished.') 123 | -------------------------------------------------------------------------------- /trainers/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. -------------------------------------------------------------------------------- /trainers/pix2pix_trainer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | import os 5 | import copy 6 | import sys 7 | import torch 8 | from models.networks.sync_batchnorm import DataParallelWithCallback 9 | from models.pix2pix_model import Pix2PixModel 10 | from models.networks.generator import EMA 11 | import util.util as util 12 | 13 | class Pix2PixTrainer(): 14 | """ 15 | Trainer creates the model and optimizers, and uses them to 16 | updates the weights of the network while reporting losses 17 | and the latest visuals to visualize the progress in training. 18 | """ 19 | 20 | def __init__(self, opt, resume_epoch=0): 21 | self.opt = opt 22 | self.pix2pix_model = Pix2PixModel(opt) 23 | if len(opt.gpu_ids) > 1: 24 | self.pix2pix_model = DataParallelWithCallback(self.pix2pix_model, 25 | device_ids=opt.gpu_ids) 26 | self.pix2pix_model_on_one_gpu = self.pix2pix_model.module 27 | else: 28 | self.pix2pix_model.to(opt.gpu_ids[0]) 29 | self.pix2pix_model_on_one_gpu = self.pix2pix_model 30 | 31 | if opt.use_ema: 32 | self.netG_ema = EMA(opt.ema_beta) 33 | for name, param in self.pix2pix_model_on_one_gpu.net['netG'].named_parameters(): 34 | if param.requires_grad: 35 | self.netG_ema.register(name, param.data) 36 | self.netCorr_ema = EMA(opt.ema_beta) 37 | for name, param in self.pix2pix_model_on_one_gpu.net['netCorr'].named_parameters(): 38 | if param.requires_grad: 39 | self.netCorr_ema.register(name, param.data) 40 | 41 | self.generated = None 42 | if opt.isTrain: 43 | self.optimizer_G, self.optimizer_D = \ 44 | self.pix2pix_model_on_one_gpu.create_optimizers(opt) 45 | self.old_lr = opt.lr 46 | if opt.continue_train and opt.which_epoch == 'latest': 47 | checkpoint = torch.load(os.path.join(opt.checkpoints_dir, opt.name, 'optimizer.pth')) 48 | self.optimizer_G.load_state_dict(checkpoint['G']) 49 | self.optimizer_D.load_state_dict(checkpoint['D']) 50 | self.last_data, self.last_netCorr, self.last_netG, self.last_optimizer_G = None, None, None, None 51 | 52 | def run_generator_one_step(self, data, alpha=1): 53 | self.optimizer_G.zero_grad() 54 | g_losses, out = self.pix2pix_model(data, mode='generator', alpha=alpha) 55 | g_loss = sum(g_losses.values()).mean() 56 | g_loss.backward() 57 | self.optimizer_G.step() 58 | self.g_losses = g_losses 59 | self.out = out 60 | if self.opt.use_ema: 61 | self.netG_ema(self.pix2pix_model_on_one_gpu.net['netG']) 62 | self.netCorr_ema(self.pix2pix_model_on_one_gpu.net['netCorr']) 63 | 64 | def run_discriminator_one_step(self, data): 65 | self.optimizer_D.zero_grad() 66 | GforD = {} 67 | GforD['fake_image'] = self.out['fake_image'] 68 | GforD['adaptive_feature_seg'] = self.out['adaptive_feature_seg'] 69 | GforD['adaptive_feature_img'] = self.out['adaptive_feature_img'] 70 | d_losses = self.pix2pix_model(data, mode='discriminator', GforD=GforD) 71 | d_loss = sum(d_losses.values()).mean() 72 | d_loss.backward() 73 | self.optimizer_D.step() 74 | self.d_losses = d_losses 75 | 76 | def get_latest_losses(self): 77 | return {**self.g_losses, **self.d_losses} 78 | 79 | def get_latest_generated(self): 80 | return self.out['fake_image'] 81 | 82 | def update_learning_rate(self, epoch): 83 | self.update_learning_rate(epoch) 84 | 85 | def save(self, epoch): 86 | self.pix2pix_model_on_one_gpu.save(epoch) 87 | if self.opt.use_ema: 88 | self.netG_ema.assign(self.pix2pix_model_on_one_gpu.net['netG']) 89 | util.save_network(self.pix2pix_model_on_one_gpu.net['netG'], 'G_ema', epoch, self.opt) 90 | self.netG_ema.resume(self.pix2pix_model_on_one_gpu.net['netG']) 91 | 92 | self.netCorr_ema.assign(self.pix2pix_model_on_one_gpu.net['netCorr']) 93 | util.save_network(self.pix2pix_model_on_one_gpu.net['netCorr'], 'netCorr_ema', epoch, self.opt) 94 | self.netCorr_ema.resume(self.pix2pix_model_on_one_gpu.net['netCorr']) 95 | if epoch == 'latest': 96 | torch.save({'G': self.optimizer_G.state_dict(), 97 | 'D': self.optimizer_D.state_dict(), 98 | 'lr': self.old_lr, 99 | }, os.path.join(self.opt.checkpoints_dir, self.opt.name, 'optimizer.pth')) 100 | 101 | ################################################################## 102 | # Helper functions 103 | ################################################################## 104 | 105 | def update_learning_rate(self, epoch): 106 | if epoch > self.opt.niter: 107 | lrd = self.opt.lr / self.opt.niter_decay 108 | new_lr = self.old_lr - lrd 109 | else: 110 | new_lr = self.old_lr 111 | 112 | if new_lr != self.old_lr: 113 | if self.opt.no_TTUR: 114 | new_lr_G = new_lr 115 | new_lr_D = new_lr 116 | else: 117 | new_lr_G = new_lr / 2 118 | new_lr_D = new_lr * 2 119 | 120 | for param_group in self.optimizer_D.param_groups: 121 | param_group['lr'] = new_lr_D 122 | for param_group in self.optimizer_G.param_groups: 123 | param_group['lr'] = new_lr_G 124 | print('update learning rate: %f -> %f' % (self.old_lr, new_lr)) 125 | self.old_lr = new_lr 126 | 127 | def update_fixed_params(self): 128 | for param in self.pix2pix_model_on_one_gpu.net['netCorr'].parameters(): 129 | param.requires_grad = True 130 | G_params = [{'params': self.pix2pix_model_on_one_gpu.net['netG'].parameters(), 'lr': self.opt.lr*0.5}] 131 | G_params += [{'params': self.pix2pix_model_on_one_gpu.net['netCorr'].parameters(), 'lr': self.opt.lr*0.5}] 132 | if self.opt.no_TTUR: 133 | beta1, beta2 = self.opt.beta1, self.opt.beta2 134 | G_lr = self.opt.lr 135 | else: 136 | beta1, beta2 = 0, 0.9 137 | G_lr = self.opt.lr / 2 138 | 139 | self.optimizer_G = torch.optim.Adam(G_params, lr=G_lr, betas=(beta1, beta2), eps=1e-3) -------------------------------------------------------------------------------- /util/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. -------------------------------------------------------------------------------- /util/color150.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/CoCosNet/de0c1bbd3389e0ae4631997513d7ddca32ce4432/util/color150.mat -------------------------------------------------------------------------------- /util/iter_counter.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2019 NVIDIA Corporation. All rights reserved. 3 | Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode). 4 | """ 5 | 6 | import os 7 | import time 8 | import numpy as np 9 | 10 | 11 | # Helper class that keeps track of training iterations 12 | class IterationCounter(): 13 | def __init__(self, opt, dataset_size): 14 | self.opt = opt 15 | self.dataset_size = dataset_size 16 | 17 | self.first_epoch = 1 18 | self.total_epochs = opt.niter + opt.niter_decay 19 | self.epoch_iter = 0 # iter number within each epoch 20 | self.iter_record_path = os.path.join(self.opt.checkpoints_dir, self.opt.name, 'iter.txt') 21 | if opt.isTrain and opt.continue_train: 22 | try: 23 | self.first_epoch, self.epoch_iter = np.loadtxt( 24 | self.iter_record_path, delimiter=',', dtype=int) 25 | print('Resuming from epoch %d at iteration %d' % (self.first_epoch, self.epoch_iter)) 26 | except: 27 | print('Could not load iteration record at %s. Starting from beginning.' % 28 | self.iter_record_path) 29 | 30 | self.total_steps_so_far = (self.first_epoch - 1) * dataset_size + self.epoch_iter 31 | 32 | # return the iterator of epochs for the training 33 | def training_epochs(self): 34 | return range(self.first_epoch, self.total_epochs + 1) 35 | 36 | def record_epoch_start(self, epoch): 37 | self.epoch_start_time = time.time() 38 | self.epoch_iter = 0 39 | self.last_iter_time = time.time() 40 | self.current_epoch = epoch 41 | 42 | def record_one_iteration(self): 43 | current_time = time.time() 44 | 45 | # the last remaining batch is dropped (see data/__init__.py), 46 | # so we can assume batch size is always opt.batchSize 47 | self.time_per_iter = (current_time - self.last_iter_time) / self.opt.batchSize 48 | self.last_iter_time = current_time 49 | self.total_steps_so_far += self.opt.batchSize 50 | self.epoch_iter += self.opt.batchSize 51 | 52 | def record_epoch_end(self): 53 | current_time = time.time() 54 | self.time_per_epoch = current_time - self.epoch_start_time 55 | print('End of epoch %d / %d \t Time Taken: %d sec' % 56 | (self.current_epoch, self.total_epochs, self.time_per_epoch)) 57 | if self.current_epoch % self.opt.save_epoch_freq == 0: 58 | np.savetxt(self.iter_record_path, (self.current_epoch + 1, 0), 59 | delimiter=',', fmt='%d') 60 | print('Saved current iteration count at %s.' % self.iter_record_path) 61 | 62 | def record_current_iter(self): 63 | np.savetxt(self.iter_record_path, (self.current_epoch, self.epoch_iter), 64 | delimiter=',', fmt='%d') 65 | print('Saved current iteration count at %s.' % self.iter_record_path) 66 | 67 | def needs_saving(self): 68 | return (self.total_steps_so_far % self.opt.save_latest_freq) < self.opt.batchSize 69 | 70 | def needs_printing(self): 71 | return (self.total_steps_so_far % self.opt.print_freq) < self.opt.batchSize 72 | 73 | def needs_displaying(self): 74 | return (self.total_steps_so_far % self.opt.display_freq) < self.opt.batchSize 75 | -------------------------------------------------------------------------------- /util/mask_to_edge.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Microsoft Corporation. 2 | # Licensed under the MIT License. 3 | 4 | import os 5 | import cv2 6 | from PIL import Image 7 | import numpy as np 8 | from skimage import feature 9 | # parts = ['skin', 'hair', 'l_brow', 'r_brow', 'l_eye', 'r_eye', 'l_ear', 'r_ear', 'nose', 'u_lip', 'mouth', 'l_lip', 'neck', 10 | # 'cloth', 'hat', 'eye_g', 'ear_r', 'neck_l'] 11 | inner_parts = ['skin', 'l_brow', 'r_brow', 'l_eye', 'r_eye', 'l_ear', 'r_ear', 'nose', 'u_lip', 'mouth', 'l_lip', 'eye_g', 'hair'] 12 | root = 'C:/Data/CelebAMask-HQ' 13 | 14 | def get_edges(edge, t): 15 | edge[:,1:] = edge[:,1:] | (t[:,1:] != t[:,:-1]) 16 | edge[:,:-1] = edge[:,:-1] | (t[:,1:] != t[:,:-1]) 17 | edge[1:,:] = edge[1:,:] | (t[1:,:] != t[:-1,:]) 18 | edge[:-1,:] = edge[:-1,:] | (t[1:,:] != t[:-1,:]) 19 | return edge 20 | 21 | for i in range(30000): 22 | img = Image.open(os.path.join(root, 'CelebA-HQ-img', str(i) + '.jpg')).resize((512, 512), resample=Image.BILINEAR) 23 | inner_label = np.ones(img.size, dtype=np.uint8) 24 | edges = np.zeros(img.size, dtype=np.uint8) 25 | subfolder = str(i // 2000) 26 | for part in inner_parts: 27 | edge = np.zeros(img.size, dtype=np.uint8) #this for distance transform map on each facial part 28 | path = os.path.join(root, 'CelebAMask-HQ-mask-anno', subfolder, str(i).zfill(5) + '_' + part + '.png') 29 | if os.path.exists(path): 30 | part_label = Image.open(path).convert('L') 31 | part_label = np.array(part_label) 32 | if part == 'hair': 33 | inner_label[part_label == 255] = 1 34 | else: 35 | inner_label[part_label == 255] = 0 36 | edges = get_edges(edges, part_label) 37 | edge = get_edges(edge, part_label) 38 | im_dist = cv2.distanceTransform(255-edge*255, cv2.DIST_L1, 3) 39 | im_dist = np.clip((im_dist / 3), 0, 255).astype(np.uint8) 40 | #Image.fromarray(im_dist).save(os.path.join(root, 'CelebAMask-HQ-mask-anno', 'parsing_edges', str(i).zfill(5) + '_{}.png'.format(part))) 41 | 42 | # canny edge for background 43 | canny_edges = feature.canny(np.array(img.convert('L'))) 44 | canny_edges = canny_edges * inner_label 45 | 46 | edges += canny_edges 47 | Image.fromarray(edges * 255).save(os.path.join(root, 'CelebAMask-HQ-mask-anno', 'parsing_edges', str(i).zfill(5) + '.png')) --------------------------------------------------------------------------------