├── appearance_generation ├── data │ ├── image_folder.py │ ├── ov_test_dataset.py │ └── ov_train_dataset.py ├── models │ ├── base_model.py │ ├── models.py │ ├── networks.py │ └── ov_pix2pixHD_model.py ├── options │ ├── base_options.py │ ├── test_options.py │ └── train_options.py ├── results │ └── test_results │ │ ├── images │ │ ├── 0_N00098_generated_parse_map.png │ │ ├── 0_N00098_query_img.png │ │ ├── 0_N00098_ref_image.png │ │ ├── 0_N00098_synthesized_image.png │ │ ├── 1_N00155_generated_parse_map.png │ │ ├── 1_N00155_query_img.png │ │ ├── 1_N00155_ref_image.png │ │ ├── 1_N00155_synthesized_image.png │ │ ├── 2_N00383_generated_parse_map.png │ │ ├── 2_N00383_query_img.png │ │ ├── 2_N00383_ref_image.png │ │ ├── 2_N00383_synthesized_image.png │ │ ├── 3_N00807_generated_parse_map.png │ │ ├── 3_N00807_query_img.png │ │ ├── 3_N00807_ref_image.png │ │ ├── 3_N00807_synthesized_image.png │ │ ├── 4_N01497_generated_parse_map.png │ │ ├── 4_N01497_query_img.png │ │ ├── 4_N01497_ref_image.png │ │ └── 4_N01497_synthesized_image.png │ │ └── index.html ├── test.py ├── train.py └── util │ ├── html.py │ ├── image_pool.py │ ├── util.py │ └── visualizer.py ├── inference ├── data │ ├── image_folder.py │ └── ov_test_dataset.py ├── datasets │ ├── fashion_compatibility │ │ ├── test_densepose │ │ │ ├── 0_N00098.npy │ │ │ ├── 1_N00115.npy │ │ │ ├── 2_N00383.npy │ │ │ ├── 3_N00807.npy │ │ │ └── 4_N01517.npy │ │ ├── test_query_img │ │ │ ├── 0_N00098.jpg │ │ │ ├── 1_N00155.jpg │ │ │ ├── 2_N00383.jpg │ │ │ ├── 3_N00807.jpg │ │ │ └── 4_N01497.jpg │ │ ├── test_query_label │ │ │ ├── 0_N00098.png │ │ │ ├── 1_N00155.png │ │ │ ├── 2_N00383.png │ │ │ ├── 3_N00807.png │ │ │ └── 4_N01497.png │ │ ├── test_query_ref_label │ │ │ ├── 0_N00098_synthesized_image_edgemap.png │ │ │ ├── 1_N00155_synthesized_image_edgemap.png │ │ │ ├── 2_N00383_synthesized_image_edgemap.png │ │ │ ├── 3_N00807_synthesized_image_edgemap.png │ │ │ └── 4_N01497_synthesized_image_edgemap.png │ │ ├── test_ref_img │ │ │ ├── 0_N00119.jpg │ │ │ ├── 1_N00154.jpg │ │ │ ├── 2_N00382.jpg │ │ │ ├── 3_N00798.jpg │ │ │ └── 4_N01801.jpg │ │ └── test_ref_label │ │ │ ├── 0_N00119.png │ │ │ ├── 1_N00154.png │ │ │ ├── 2_N00382.png │ │ │ ├── 3_N00798.png │ │ │ └── 4_N01801.png │ ├── test_data │ │ ├── test_densepose │ │ │ ├── 00_Y0121E0K6-J11@9.npy │ │ │ ├── 0_4BE21E07Q-K11@10.npy │ │ │ ├── 0_N00098.npy │ │ │ ├── 1_B0N21E061-Q11@7.npy │ │ │ ├── 1_N00115.npy │ │ │ ├── 2_AM421E00G-K11@8.npy │ │ │ ├── 2_N00383.npy │ │ │ ├── 3_31021E00B-A11@8.npy │ │ │ ├── 3_N00807.npy │ │ │ ├── 4_AM421E00N-A11@11.npy │ │ │ ├── 4_N01517.npy │ │ │ └── 5_ARC21E00O-E11@14.npy │ │ ├── test_query_img │ │ │ ├── 00_Y0121E0K6-J11@9.jpg │ │ │ ├── 0_4BE21E07Q-K11@10.jpg │ │ │ ├── 0_N00098.jpg │ │ │ ├── 1_B0N21E061-Q11@7.jpg │ │ │ ├── 1_N00155.jpg │ │ │ ├── 2_AM421E00G-K11@8.jpg │ │ │ ├── 2_N00383.jpg │ │ │ ├── 3_31021E00B-A11@8.jpg │ │ │ ├── 3_N00807.jpg │ │ │ ├── 4_AM421E00N-A11@11.jpg │ │ │ ├── 4_N01497.jpg │ │ │ └── 5_ARC21E00O-E11@14.jpg │ │ ├── test_query_label │ │ │ ├── 00_Y0121E0K6-J11@9.png │ │ │ ├── 0_4BE21E07Q-K11@10.png │ │ │ ├── 0_N00098.png │ │ │ ├── 1_B0N21E061-Q11@7.png │ │ │ ├── 1_N00155.png │ │ │ ├── 2_AM421E00G-K11@8.png │ │ │ ├── 2_N00383.png │ │ │ ├── 3_31021E00B-A11@8.png │ │ │ ├── 3_N00807.png │ │ │ ├── 4_AM421E00N-A11@11.png │ │ │ ├── 4_N01497.png │ │ │ └── 5_ARC21E00O-E11@14.png │ │ ├── test_ref_img │ │ │ ├── 00_Y0121E0K7-A11@4.jpg │ │ │ ├── 0_31021E00B-Q11@12.jpg │ │ │ ├── 0_N00119.jpg │ │ │ ├── 1_4BE21E07T-I11@9.jpg │ │ │ ├── 1_N00154.jpg │ │ │ ├── 2_AN621D0PD-A11@10.jpg │ │ │ ├── 2_N00382.jpg │ │ │ ├── 3_B0N21E062-M11@7.jpg │ │ │ ├── 3_N00798.jpg │ │ │ ├── 4_DP521E1CL-C11@10.jpg │ │ │ ├── 4_N01801.jpg │ │ │ └── 5_A0F21E034-M11@10.jpg │ │ └── test_ref_label │ │ │ ├── 00_Y0121E0K7-A11@4.png │ │ │ ├── 0_31021E00B-Q11@12.png │ │ │ ├── 0_N00119.png │ │ │ ├── 1_4BE21E07T-I11@9.png │ │ │ ├── 1_N00154.png │ │ │ ├── 2_AN621D0PD-A11@10.png │ │ │ ├── 2_N00382.png │ │ │ ├── 3_B0N21E062-M11@7.png │ │ │ ├── 3_N00798.png │ │ │ ├── 4_DP521E1CL-C11@10.png │ │ │ ├── 4_N01801.png │ │ │ └── 5_A0F21E034-M11@10.png │ └── zalando_data │ │ ├── test_densepose │ │ ├── 00_Y0121E0K6-J11@9.npy │ │ ├── 0_4BE21E07Q-K11@10.npy │ │ ├── 1_B0N21E061-Q11@7.npy │ │ ├── 2_AM421E00G-K11@8.npy │ │ ├── 3_31021E00B-A11@8.npy │ │ ├── 4_AM421E00N-A11@11.npy │ │ └── 5_ARC21E00O-E11@14.npy │ │ ├── test_query_img │ │ ├── 00_Y0121E0K6-J11@9.jpg │ │ ├── 0_4BE21E07Q-K11@10.jpg │ │ ├── 1_B0N21E061-Q11@7.jpg │ │ ├── 2_AM421E00G-K11@8.jpg │ │ ├── 3_31021E00B-A11@8.jpg │ │ ├── 4_AM421E00N-A11@11.jpg │ │ └── 5_ARC21E00O-E11@14.jpg │ │ ├── test_query_label │ │ ├── 00_Y0121E0K6-J11@9.png │ │ ├── 0_4BE21E07Q-K11@10.png │ │ ├── 1_B0N21E061-Q11@7.png │ │ ├── 2_AM421E00G-K11@8.png │ │ ├── 3_31021E00B-A11@8.png │ │ ├── 4_AM421E00N-A11@11.png │ │ └── 5_ARC21E00O-E11@14.png │ │ ├── test_query_ref_label │ │ ├── 00_Y0121E0K6-J11@9_synthesized_Simage.jpg │ │ ├── 00_Y0121E0K6-J11@9_synthesized_image_edgemap.jpg │ │ ├── 00_Y0121E0K6-J11@9_synthesized_image_edgemap.png │ │ ├── 0_4BE21E07Q-K11@10_synthesized_Simage.jpg │ │ ├── 0_4BE21E07Q-K11@10_synthesized_image_edgemap.jpg │ │ ├── 0_4BE21E07Q-K11@10_synthesized_image_edgemap.png │ │ ├── 1_B0N21E061-Q11@7_synthesized_Simage.jpg │ │ ├── 1_B0N21E061-Q11@7_synthesized_image_edgemap.jpg │ │ ├── 1_B0N21E061-Q11@7_synthesized_image_edgemap.png │ │ ├── 2_AM421E00G-K11@8_synthesized_Simage.jpg │ │ ├── 2_AM421E00G-K11@8_synthesized_image_edgemap.jpg │ │ ├── 2_AM421E00G-K11@8_synthesized_image_edgemap.png │ │ ├── 3_31021E00B-A11@8_synthesized_Simage.jpg │ │ ├── 3_31021E00B-A11@8_synthesized_image_edgemap.jpg │ │ ├── 3_31021E00B-A11@8_synthesized_image_edgemap.png │ │ ├── 4_AM421E00N-A11@11_synthesized_Simage.jpg │ │ ├── 4_AM421E00N-A11@11_synthesized_image_edgemap.jpg │ │ ├── 4_AM421E00N-A11@11_synthesized_image_edgemap.png │ │ ├── 5_ARC21E00O-E11@14_synthesized_image_edgemap.jpg │ │ └── 5_ARC21E00O-E11@14_synthesized_image_edgemap.png │ │ ├── test_ref_img │ │ ├── 00_Y0121E0K7-A11@4.jpg │ │ ├── 0_31021E00B-Q11@12.jpg │ │ ├── 1_4BE21E07T-I11@9.jpg │ │ ├── 2_AN621D0PD-A11@10.jpg │ │ ├── 3_B0N21E062-M11@7.jpg │ │ ├── 4_DP521E1CL-C11@10.jpg │ │ └── 5_A0F21E034-M11@10.jpg │ │ └── test_ref_label │ │ ├── 00_Y0121E0K7-A11@4.png │ │ ├── 0_31021E00B-Q11@12.png │ │ ├── 1_4BE21E07T-I11@9.png │ │ ├── 2_AN621D0PD-A11@10.png │ │ ├── 3_B0N21E062-M11@7.png │ │ ├── 4_DP521E1CL-C11@10.png │ │ └── 5_A0F21E034-M11@10.png ├── models │ ├── base_model.py │ ├── models.py │ ├── networks.py │ ├── ov_pix2pixHD_model.py │ ├── ov_pix2pixHD_model_online.py │ ├── pix2pixHD_model.py │ └── ui_model.py ├── options │ ├── base_options.py │ ├── test_options.py │ └── train_options.py ├── results │ ├── zalando_final_app │ │ └── test_latest │ │ │ ├── images │ │ │ ├── 00_Y0121E0K6-J11@9_generated_parse_map.jpg │ │ │ ├── 00_Y0121E0K6-J11@9_query_img.jpg │ │ │ ├── 00_Y0121E0K6-J11@9_real_image.jpg │ │ │ ├── 00_Y0121E0K6-J11@9_synthesized_image.jpg │ │ │ ├── 0_4BE21E07Q-K11@10_generated_parse_map.jpg │ │ │ ├── 0_4BE21E07Q-K11@10_query_img.jpg │ │ │ ├── 0_4BE21E07Q-K11@10_real_image.jpg │ │ │ ├── 0_4BE21E07Q-K11@10_synthesized_image.jpg │ │ │ ├── 1_B0N21E061-Q11@7_generated_parse_map.jpg │ │ │ ├── 1_B0N21E061-Q11@7_query_img.jpg │ │ │ ├── 1_B0N21E061-Q11@7_real_image.jpg │ │ │ ├── 1_B0N21E061-Q11@7_synthesized_image.jpg │ │ │ ├── 2_AM421E00G-K11@8_generated_parse_map.jpg │ │ │ ├── 2_AM421E00G-K11@8_query_img.jpg │ │ │ ├── 2_AM421E00G-K11@8_real_image.jpg │ │ │ ├── 2_AM421E00G-K11@8_synthesized_image.jpg │ │ │ ├── 3_31021E00B-A11@8_generated_parse_map.jpg │ │ │ ├── 3_31021E00B-A11@8_query_img.jpg │ │ │ ├── 3_31021E00B-A11@8_real_image.jpg │ │ │ ├── 3_31021E00B-A11@8_synthesized_image.jpg │ │ │ ├── 4_AM421E00N-A11@11_generated_parse_map.jpg │ │ │ ├── 4_AM421E00N-A11@11_query_img.jpg │ │ │ ├── 4_AM421E00N-A11@11_real_image.jpg │ │ │ ├── 4_AM421E00N-A11@11_synthesized_image.jpg │ │ │ ├── 5_ARC21E00O-E11@14_generated_parse_map.jpg │ │ │ ├── 5_ARC21E00O-E11@14_query_img.jpg │ │ │ ├── 5_ARC21E00O-E11@14_real_image.jpg │ │ │ └── 5_ARC21E00O-E11@14_synthesized_image.jpg │ │ │ └── index.html │ └── zalando_final_shape │ │ └── test_latest │ │ ├── images │ │ ├── 00_Y0121E0K6-J11@9_query.jpg │ │ ├── 00_Y0121E0K6-J11@9_query_ref_mixed.jpg │ │ ├── 00_Y0121E0K6-J11@9_ref.jpg │ │ ├── 00_Y0121E0K6-J11@9_synthesized_Simage.jpg │ │ ├── 00_Y0121E0K6-J11@9_synthesized_image_edgemap.jpg │ │ ├── 0_4BE21E07Q-K11@10_query.jpg │ │ ├── 0_4BE21E07Q-K11@10_query_ref_mixed.jpg │ │ ├── 0_4BE21E07Q-K11@10_ref.jpg │ │ ├── 0_4BE21E07Q-K11@10_synthesized_Simage.jpg │ │ ├── 0_4BE21E07Q-K11@10_synthesized_image_edgemap.jpg │ │ ├── 1_B0N21E061-Q11@7_query.jpg │ │ ├── 1_B0N21E061-Q11@7_query_ref_mixed.jpg │ │ ├── 1_B0N21E061-Q11@7_ref.jpg │ │ ├── 1_B0N21E061-Q11@7_synthesized_Simage.jpg │ │ ├── 1_B0N21E061-Q11@7_synthesized_image_edgemap.jpg │ │ ├── 2_AM421E00G-K11@8_query.jpg │ │ ├── 2_AM421E00G-K11@8_query_ref_mixed.jpg │ │ ├── 2_AM421E00G-K11@8_ref.jpg │ │ ├── 2_AM421E00G-K11@8_synthesized_Simage.jpg │ │ ├── 2_AM421E00G-K11@8_synthesized_image_edgemap.jpg │ │ ├── 3_31021E00B-A11@8_query.jpg │ │ ├── 3_31021E00B-A11@8_query_ref_mixed.jpg │ │ ├── 3_31021E00B-A11@8_ref.jpg │ │ ├── 3_31021E00B-A11@8_synthesized_Simage.jpg │ │ ├── 3_31021E00B-A11@8_synthesized_image_edgemap.jpg │ │ ├── 4_AM421E00N-A11@11_query.jpg │ │ ├── 4_AM421E00N-A11@11_query_ref_mixed.jpg │ │ ├── 4_AM421E00N-A11@11_ref.jpg │ │ ├── 4_AM421E00N-A11@11_synthesized_Simage.jpg │ │ ├── 4_AM421E00N-A11@11_synthesized_image_edgemap.jpg │ │ ├── 5_ARC21E00O-E11@14_query.jpg │ │ ├── 5_ARC21E00O-E11@14_query_ref_mixed.jpg │ │ ├── 5_ARC21E00O-E11@14_ref.jpg │ │ ├── 5_ARC21E00O-E11@14_synthesized_Simage.jpg │ │ └── 5_ARC21E00O-E11@14_synthesized_image_edgemap.jpg │ │ └── index.html ├── test.ipynb └── util │ ├── html.py │ ├── image_pool.py │ ├── util.py │ └── visualizer.py ├── readme.md ├── requirements.txt └── shape_generation ├── data ├── image_folder.py ├── ov_test_dataset.py ├── ov_train_dataset.py └── ov_train_dataset_denspose_pkl.py ├── models ├── base_model.py ├── models.py ├── networks.py └── ov_pix2pixHD_model.py ├── options ├── base_options.py ├── test_options.py └── train_options.py ├── results └── test_results │ ├── images │ ├── 31021E00B-A11@8_query.jpg │ ├── 31021E00B-A11@8_ref.jpg │ ├── 31021E00B-A11@8_synthesized_image.jpg │ ├── 4BE21E07Q-K11@10_query.jpg │ ├── 4BE21E07Q-K11@10_ref.jpg │ ├── 4BE21E07Q-K11@10_synthesized_image.jpg │ ├── B0N21E061-Q11@7_query.jpg │ ├── B0N21E061-Q11@7_ref.jpg │ └── B0N21E061-Q11@7_synthesized_image.jpg │ └── index.html ├── test.py ├── train.py └── util ├── __init__.py ├── __pycache__ ├── __init__.cpython-38.pyc ├── html.cpython-38.pyc ├── image_pool.cpython-38.pyc ├── util.cpython-36.pyc ├── util.cpython-38.pyc └── visualizer.cpython-38.pyc ├── html.py ├── image_pool.py ├── util.py └── visualizer.py /appearance_generation/data/image_folder.py: -------------------------------------------------------------------------------- 1 | ############################################################################### 2 | # Code from 3 | # https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py 4 | # Modified the original code so that it also loads images from the current 5 | # directory as well as the subdirectories 6 | ############################################################################### 7 | import torch.utils.data as data 8 | from PIL import Image 9 | import os 10 | 11 | IMG_EXTENSIONS = [ 12 | '.jpg', '.JPG', '.jpeg', '.JPEG', 13 | '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tiff' 14 | ] 15 | 16 | 17 | def is_image_file(filename): 18 | return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) 19 | 20 | 21 | def make_dataset(dir): 22 | images = [] 23 | assert os.path.isdir(dir), '%s is not a valid directory' % dir 24 | 25 | for root, _, fnames in sorted(os.walk(dir)): 26 | for fname in fnames: 27 | if is_image_file(fname): 28 | path = os.path.join(root, fname) 29 | images.append(path) 30 | 31 | return images 32 | 33 | 34 | def default_loader(path): 35 | return Image.open(path).convert('RGB') 36 | 37 | 38 | class ImageFolder(data.Dataset): 39 | 40 | def __init__(self, root, transform=None, return_paths=False, 41 | loader=default_loader): 42 | imgs = make_dataset(root) 43 | if len(imgs) == 0: 44 | raise(RuntimeError("Found 0 images in: " + root + "\n" 45 | "Supported image extensions are: " + 46 | ",".join(IMG_EXTENSIONS))) 47 | 48 | self.root = root 49 | self.imgs = imgs 50 | self.transform = transform 51 | self.return_paths = return_paths 52 | self.loader = loader 53 | 54 | def __getitem__(self, index): 55 | path = self.imgs[index] 56 | img = self.loader(path) 57 | if self.transform is not None: 58 | img = self.transform(img) 59 | if self.return_paths: 60 | return img, path 61 | else: 62 | return img 63 | 64 | def __len__(self): 65 | return len(self.imgs) 66 | -------------------------------------------------------------------------------- /appearance_generation/data/ov_test_dataset.py: -------------------------------------------------------------------------------- 1 | from torch.utils.data.dataset import Dataset 2 | 3 | from data.image_folder import make_dataset 4 | 5 | import os 6 | from PIL import Image 7 | from glob import glob as glob 8 | import numpy as np 9 | import random 10 | import torch 11 | 12 | 13 | class TestDataset(Dataset): 14 | def __init__(self, opt, augment): 15 | 16 | self.opt = opt 17 | self.root = opt.dataroot 18 | self.transforms = augment 19 | 20 | # query label (label maps) 21 | dir_query_label = '_query_label' 22 | self.dir_query_label = os.path.join( 23 | opt.dataroot, opt.phase + dir_query_label) 24 | self.query_label_paths = sorted(make_dataset(self.dir_query_label)) 25 | 26 | # ref label (label images) 27 | dir_ref_label = '_ref_label' 28 | self.dir_ref_label = os.path.join( 29 | opt.dataroot, opt.phase + dir_ref_label) 30 | self.ref_label_paths = sorted(make_dataset(self.dir_ref_label)) 31 | 32 | # query img (RGB maps) 33 | dir_query_img = '_query_img' 34 | self.dir_query_img = os.path.join( 35 | opt.dataroot, opt.phase + dir_query_img) 36 | self.query_img_paths = sorted(make_dataset(self.dir_query_img)) 37 | 38 | # ref img (RGB images) 39 | dir_ref_img = '_ref_img' 40 | self.dir_ref_img = os.path.join( 41 | opt.dataroot, opt.phase + dir_ref_img) 42 | self.ref_img_paths = sorted(make_dataset(self.dir_ref_img)) 43 | 44 | # generated segmentation from shape_generation (label maps) 45 | dir_query_ref_label = '_query_ref_label' 46 | self.dir_query_ref_label = os.path.join( 47 | opt.dataroot, opt.phase + dir_query_ref_label) 48 | self.query_ref_label_paths = sorted( 49 | make_dataset(self.dir_query_ref_label)) 50 | 51 | def custom_transform(self, input_image, per_channel_transform=True, input_type="densepose"): 52 | 53 | if per_channel_transform: 54 | num_channel_img = input_image.shape[0] 55 | tform_input_image_np = np.zeros( 56 | shape=input_image.shape, dtype=input_image.dtype) 57 | if input_type == "densepose": 58 | for i in range(num_channel_img): 59 | if i > 24: 60 | tform_input_image_np[i] = self.transforms['1']( 61 | input_image[i].astype('uint8')) 62 | else: 63 | tform_input_image_np[i] = input_image[i] 64 | return torch.from_numpy(tform_input_image_np) 65 | 66 | def __getitem__(self, index): 67 | 68 | # query label (label maps) 69 | query_label_path = self.query_label_paths[index] 70 | query_label_parse = self.parsing_embedding(query_label_path, 'seg') 71 | query_label_parse = torch.from_numpy(query_label_parse) # channel(20), H, W 72 | 73 | query_label_seg_mask = Image.open(query_label_path) 74 | query_label_seg_mask = np.array(query_label_seg_mask) 75 | query_label_seg_mask = torch.tensor(query_label_seg_mask, dtype=torch.long) 76 | 77 | # ref label (label maps) 78 | ref_label_path = self.ref_label_paths[index] 79 | ref_label_parse = self.parsing_embedding(ref_label_path, 'seg') 80 | ref_label_parse = torch.from_numpy( 81 | ref_label_parse) # channel(20), H, W 82 | 83 | ref_label_seg_mask = Image.open(ref_label_path) 84 | ref_label_seg_mask = np.array(ref_label_seg_mask) 85 | ref_label_seg_mask = torch.tensor(ref_label_seg_mask, dtype=torch.long) 86 | 87 | # input B (images) 88 | query_img_path = self.query_img_paths[index] 89 | query_img = Image.open(query_img_path) 90 | query_img = self.transforms['1'](query_img) 91 | 92 | # input B (images) 93 | ref_img_path = self.ref_img_paths[index] 94 | ref_img = Image.open(ref_img_path) 95 | ref_img = self.transforms['1'](ref_img) 96 | 97 | # input A (label maps) 98 | query_ref_label_path = self.query_ref_label_paths[index] 99 | query_ref_label_parse = self.parsing_embedding(query_ref_label_path, 'seg') # channel(20), H, W 100 | C_tensor = torch.from_numpy(query_ref_label_parse) 101 | 102 | query_ref_label_seg_mask = Image.open(query_ref_label_path) 103 | query_ref_label_seg_mask = np.array(query_ref_label_seg_mask) 104 | query_ref_label_seg_mask = torch.tensor(query_ref_label_seg_mask, dtype=torch.long) 105 | 106 | 107 | input_dict = { 108 | 'query_parse_map': query_label_parse, 109 | 'ref_parse_map': ref_label_parse, 110 | 'query_seg_map': query_label_seg_mask, 111 | 'ref_seg_map': ref_label_seg_mask, 112 | 'query_img': query_img, 113 | 'ref_img': ref_img, 114 | 'C_tensor_parse_map': C_tensor, 115 | 'C_tensor_seg_map': query_ref_label_seg_mask, 116 | 'path': query_label_path 117 | } 118 | 119 | return input_dict 120 | 121 | def parsing_embedding(self, parse_obj, parse_type): 122 | if parse_type == "seg": 123 | parse = Image.open(parse_obj) 124 | parse = np.array(parse) 125 | parse_channel = 20 126 | 127 | elif parse_type == "densemap": 128 | parse = np.array(parse_obj) 129 | parse_channel = 25 130 | 131 | parse_emb = [] 132 | 133 | for i in range(parse_channel): 134 | parse_emb.append((parse == i).astype(np.float32).tolist()) 135 | 136 | parse = np.array(parse_emb).astype(np.float32) 137 | return parse # (channel,H,W) 138 | 139 | def __len__(self): 140 | return len(self.query_label_paths) // self.opt.batchSize * self.opt.batchSize 141 | 142 | def name(self): 143 | return 'TestDataset' 144 | -------------------------------------------------------------------------------- /appearance_generation/data/ov_train_dataset.py: -------------------------------------------------------------------------------- 1 | from torch.utils.data.dataset import Dataset 2 | 3 | from data.image_folder import make_dataset 4 | 5 | import os 6 | from PIL import Image 7 | from glob import glob as glob 8 | import numpy as np 9 | import random 10 | import torch 11 | 12 | 13 | class RegularDataset(Dataset): 14 | def __init__(self, opt, augment): 15 | self.opt = opt 16 | self.root = opt.dataroot 17 | self.transforms = augment 18 | 19 | # input A (label maps) 20 | dir_A = '_label' 21 | self.dir_A = os.path.join(opt.dataroot, opt.phase + dir_A) 22 | self.A_paths = sorted(make_dataset(self.dir_A)) 23 | 24 | # input B (label images) 25 | dir_B = '_img' 26 | self.dir_B = os.path.join(opt.dataroot, opt.phase + dir_B) 27 | self.B_paths = sorted(make_dataset(self.dir_B)) 28 | 29 | self.dataset_size = len(self.A_paths) 30 | 31 | def __getitem__(self, index): 32 | 33 | # input A (label maps) 34 | A_path = self.A_paths[index] 35 | A = Image.open(A_path) 36 | A = self.parsing_embedding(A_path, 'seg') # channel(20), H, W 37 | # A_tensor = self.transforms['1'](A) 38 | A_tensor = torch.from_numpy(A) 39 | 40 | # input B (images) 41 | B_path = self.B_paths[index] 42 | B = Image.open(B_path) 43 | B = np.array(B) 44 | B_tensor = self.transforms['1'](B) 45 | 46 | # original seg mask 47 | seg_mask = Image.open(A_path) 48 | seg_mask = np.array(seg_mask) 49 | seg_mask = torch.tensor(seg_mask, dtype=torch.long) 50 | 51 | input_dict = {'seg_map': A_tensor, 'target': B_tensor, 'seg_map_path': A_path, 52 | 'target_path': B_path, 'seg_mask': seg_mask} 53 | 54 | return input_dict 55 | 56 | def parsing_embedding(self, parse_path, parse_type = "seg"): 57 | if parse_type == "seg": 58 | parse = Image.open(parse_path) 59 | parse = np.array(parse) 60 | parse_channel = 20 61 | 62 | parse_emb = [] 63 | for i in range(parse_channel): 64 | parse_emb.append((parse == i).astype(np.float32).tolist()) 65 | 66 | parse = np.array(parse_emb).astype(np.float32) 67 | return parse # (channel,H,W) 68 | 69 | def __len__(self): 70 | return len(self.A_paths) // self.opt.batchSize * self.opt.batchSize 71 | 72 | def name(self): 73 | return 'RegularDataset' 74 | -------------------------------------------------------------------------------- /appearance_generation/models/base_model.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import sys 4 | 5 | class BaseModel(torch.nn.Module): 6 | def name(self): 7 | return 'BaseModel' 8 | 9 | def initialize(self, opt): 10 | self.opt = opt 11 | self.gpu_ids = opt.gpu_ids 12 | self.isTrain = opt.isTrain 13 | self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor 14 | self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) 15 | 16 | def set_input(self, input): 17 | self.input = input 18 | 19 | def forward(self): 20 | pass 21 | 22 | # used in test time, no backprop 23 | def test(self): 24 | pass 25 | 26 | def get_image_paths(self): 27 | pass 28 | 29 | def optimize_parameters(self): 30 | pass 31 | 32 | def get_current_visuals(self): 33 | return self.input 34 | 35 | def get_current_errors(self): 36 | return {} 37 | 38 | def save(self, label): 39 | pass 40 | 41 | # helper saving function that can be used by subclasses 42 | def save_network(self, network, network_label, epoch_label, gpu_ids): 43 | save_filename = '%s_net_%s.pth' % (epoch_label, network_label) 44 | save_path = os.path.join(self.save_dir, save_filename) 45 | torch.save(network.cpu().state_dict(), save_path) 46 | if len(gpu_ids) and torch.cuda.is_available(): 47 | network.cuda() 48 | 49 | # helper loading function that can be used by subclasses 50 | def load_network(self, network, network_label, epoch_label, save_dir=''): 51 | save_filename = '%s_net_%s.pth' % (epoch_label, network_label) 52 | if not save_dir: 53 | save_dir = self.save_dir 54 | save_path = os.path.join(save_dir, save_filename) 55 | print(save_path) 56 | if not os.path.isfile(save_path): 57 | print('%s not exists yet!' % save_path) 58 | if network_label == 'G': 59 | raise('Generator must exist!') 60 | else: 61 | #network.load_state_dict(torch.load(save_path)) 62 | try: 63 | network.load_state_dict(torch.load(save_path)) 64 | except: 65 | pretrained_dict = torch.load(save_path) 66 | model_dict = network.state_dict() 67 | try: 68 | pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict} 69 | network.load_state_dict(pretrained_dict) 70 | if self.opt.verbose: 71 | print('Pretrained network %s has excessive layers; Only loading layers that are used' % network_label) 72 | except: 73 | print('Pretrained network %s has fewer layers; The following are not initialized:' % network_label) 74 | for k, v in pretrained_dict.items(): 75 | if v.size() == model_dict[k].size(): 76 | model_dict[k] = v 77 | 78 | if sys.version_info >= (3,0): 79 | not_initialized = set() 80 | else: 81 | from sets import Set 82 | not_initialized = Set() 83 | 84 | for k, v in model_dict.items(): 85 | if k not in pretrained_dict or v.size() != pretrained_dict[k].size(): 86 | not_initialized.add(k.split('.')[0]) 87 | 88 | print(sorted(not_initialized)) 89 | network.load_state_dict(model_dict) 90 | 91 | def update_learning_rate(): 92 | pass 93 | -------------------------------------------------------------------------------- /appearance_generation/models/models.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def create_model(opt): 5 | if opt.model == 'pix2pixHD': 6 | from .pix2pixHD_model import Pix2PixHDModel, InferenceModel 7 | if opt.isTrain: 8 | model = Pix2PixHDModel() 9 | else: 10 | model = InferenceModel() 11 | elif opt.model == 'ov_pix2pixHD': 12 | from .ov_pix2pixHD_model import Pix2PixHDModel, InferenceModel 13 | if opt.isTrain: 14 | model = Pix2PixHDModel() 15 | else: 16 | model = InferenceModel() 17 | else: 18 | from .ui_model import UIModel 19 | model = UIModel() 20 | 21 | print('--------- model used ---------',opt.model) 22 | model.initialize(opt) 23 | 24 | if opt.verbose: 25 | print("model [%s] was created" % (model.name())) 26 | 27 | if opt.isTrain and len(opt.gpu_ids) and not opt.fp16: 28 | model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids) 29 | 30 | return model 31 | -------------------------------------------------------------------------------- /appearance_generation/options/test_options.py: -------------------------------------------------------------------------------- 1 | from .base_options import BaseOptions 2 | 3 | class TestOptions(BaseOptions): 4 | def initialize(self): 5 | BaseOptions.initialize(self) 6 | self.parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.') 7 | self.parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') 8 | self.parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images') 9 | self.parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc') 10 | self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') 11 | self.parser.add_argument('--how_many', type=int, default=50, help='how many test images to run') 12 | self.parser.add_argument('--cluster_path', type=str, default='features_clustered_010.npy', help='the path for clustered results of encoded features') 13 | self.parser.add_argument('--use_encoded_image', action='store_true', help='if specified, encode the real image to get the feature map') 14 | self.parser.add_argument("--export_onnx", type=str, help="export ONNX model to a given file") 15 | self.parser.add_argument("--engine", type=str, help="run serialized TRT engine") 16 | self.parser.add_argument("--onnx", type=str, help="run ONNX model via TRT") 17 | 18 | # for discriminators 19 | self.parser.add_argument('--num_D', type=int, default=2, help='number of discriminators to use') 20 | self.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers') 21 | self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer') 22 | self.parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature matching loss') 23 | self.parser.add_argument('--no_ganFeat_loss', action='store_true', help='if specified, do *not* use discriminator feature matching loss') 24 | self.parser.add_argument('--no_vgg_loss', action='store_true', help='if specified, do *not* use VGG feature matching loss') 25 | self.parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN') 26 | self.parser.add_argument('--pool_size', type=int, default=0, help='the size of image buffer that stores previously generated images') 27 | self.parser.add_argument('--no_ce_loss', action='store_true', help='if specified, do *not* use ce matching loss') 28 | 29 | self.isTrain = False 30 | -------------------------------------------------------------------------------- /appearance_generation/options/train_options.py: -------------------------------------------------------------------------------- 1 | from .base_options import BaseOptions 2 | 3 | class TrainOptions(BaseOptions): 4 | def initialize(self): 5 | BaseOptions.initialize(self) 6 | # for displays 7 | self.parser.add_argument('--display_freq', type=int, default=100, help='frequency of showing training results on screen') 8 | self.parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console') 9 | self.parser.add_argument('--save_latest_freq', type=int, default=1000, help='frequency of saving the latest results') 10 | self.parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 11 | self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/') 12 | self.parser.add_argument('--debug', action='store_true', help='only do one epoch and displays at each iteration') 13 | 14 | # for training 15 | self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') 16 | self.parser.add_argument('--load_pretrain', type=str, default='', help='load the pretrained model from the specified location') 17 | self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') 18 | self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') 19 | self.parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate') 20 | self.parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero') 21 | self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') 22 | self.parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam') 23 | 24 | # for discriminators 25 | self.parser.add_argument('--num_D', type=int, default=2, help='number of discriminators to use') 26 | self.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers') 27 | self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer') 28 | self.parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature matching loss') 29 | self.parser.add_argument('--no_ganFeat_loss', action='store_true', help='if specified, do *not* use discriminator feature matching loss') 30 | self.parser.add_argument('--no_vgg_loss', action='store_true', help='if specified, do *not* use VGG feature matching loss') 31 | self.parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN') 32 | self.parser.add_argument('--pool_size', type=int, default=0, help='the size of image buffer that stores previously generated images') 33 | self.parser.add_argument('--no_ce_loss', action='store_true', help='if specified, do *not* use ce matching loss') 34 | self.isTrain = True 35 | -------------------------------------------------------------------------------- /appearance_generation/results/test_results/images/0_N00098_generated_parse_map.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/appearance_generation/results/test_results/images/0_N00098_generated_parse_map.png -------------------------------------------------------------------------------- /appearance_generation/results/test_results/images/0_N00098_query_img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/appearance_generation/results/test_results/images/0_N00098_query_img.png -------------------------------------------------------------------------------- /appearance_generation/results/test_results/images/0_N00098_ref_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/appearance_generation/results/test_results/images/0_N00098_ref_image.png -------------------------------------------------------------------------------- /appearance_generation/results/test_results/images/0_N00098_synthesized_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/appearance_generation/results/test_results/images/0_N00098_synthesized_image.png -------------------------------------------------------------------------------- /appearance_generation/results/test_results/images/1_N00155_generated_parse_map.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/appearance_generation/results/test_results/images/1_N00155_generated_parse_map.png -------------------------------------------------------------------------------- /appearance_generation/results/test_results/images/1_N00155_query_img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/appearance_generation/results/test_results/images/1_N00155_query_img.png -------------------------------------------------------------------------------- /appearance_generation/results/test_results/images/1_N00155_ref_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/appearance_generation/results/test_results/images/1_N00155_ref_image.png -------------------------------------------------------------------------------- /appearance_generation/results/test_results/images/1_N00155_synthesized_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/appearance_generation/results/test_results/images/1_N00155_synthesized_image.png -------------------------------------------------------------------------------- /appearance_generation/results/test_results/images/2_N00383_generated_parse_map.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/appearance_generation/results/test_results/images/2_N00383_generated_parse_map.png -------------------------------------------------------------------------------- /appearance_generation/results/test_results/images/2_N00383_query_img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/appearance_generation/results/test_results/images/2_N00383_query_img.png -------------------------------------------------------------------------------- /appearance_generation/results/test_results/images/2_N00383_ref_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/appearance_generation/results/test_results/images/2_N00383_ref_image.png -------------------------------------------------------------------------------- /appearance_generation/results/test_results/images/2_N00383_synthesized_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/appearance_generation/results/test_results/images/2_N00383_synthesized_image.png -------------------------------------------------------------------------------- /appearance_generation/results/test_results/images/3_N00807_generated_parse_map.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/appearance_generation/results/test_results/images/3_N00807_generated_parse_map.png -------------------------------------------------------------------------------- /appearance_generation/results/test_results/images/3_N00807_query_img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/appearance_generation/results/test_results/images/3_N00807_query_img.png -------------------------------------------------------------------------------- /appearance_generation/results/test_results/images/3_N00807_ref_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/appearance_generation/results/test_results/images/3_N00807_ref_image.png -------------------------------------------------------------------------------- /appearance_generation/results/test_results/images/3_N00807_synthesized_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/appearance_generation/results/test_results/images/3_N00807_synthesized_image.png -------------------------------------------------------------------------------- /appearance_generation/results/test_results/images/4_N01497_generated_parse_map.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/appearance_generation/results/test_results/images/4_N01497_generated_parse_map.png -------------------------------------------------------------------------------- /appearance_generation/results/test_results/images/4_N01497_query_img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/appearance_generation/results/test_results/images/4_N01497_query_img.png -------------------------------------------------------------------------------- /appearance_generation/results/test_results/images/4_N01497_ref_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/appearance_generation/results/test_results/images/4_N01497_ref_image.png -------------------------------------------------------------------------------- /appearance_generation/results/test_results/images/4_N01497_synthesized_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/appearance_generation/results/test_results/images/4_N01497_synthesized_image.png -------------------------------------------------------------------------------- /appearance_generation/test.py: -------------------------------------------------------------------------------- 1 | ''' 2 | run the code with python test.py --name name_of_exp --dataroot ./datasets/dataroot/ 3 | ''' 4 | import os 5 | from collections import OrderedDict 6 | from torch.autograd import Variable 7 | from options.test_options import TestOptions 8 | from data.ov_test_dataset import TestDataset 9 | from models.models import create_model 10 | import util.util as util 11 | from util.visualizer import Visualizer 12 | from util import html 13 | import torch 14 | from torchvision import transforms 15 | from torch.utils.data import DataLoader 16 | 17 | opt = TestOptions().parse(save=False) 18 | opt.nThreads = 1 # test code only supports nThreads = 1 19 | opt.batchSize = 1 # test code only supports batchSize = 1 20 | 21 | augment = {} 22 | # augment['1'] = transforms.Compose( 23 | # [transforms.ToTensor()]) # change to [C, H, W] 24 | 25 | augment['1'] = transforms.Compose( 26 | [ 27 | transforms.ToTensor(), 28 | transforms.Normalize((0.5, ), (0.5, ))]) 29 | 30 | test_dataset = TestDataset(opt, augment) 31 | test_dataloader = DataLoader(test_dataset, 32 | shuffle=False, 33 | num_workers=int(opt.nThreads), 34 | batch_size=opt.batchSize, 35 | pin_memory=True) 36 | 37 | dataset_size = len(test_dataset) 38 | print('#testing images = %d' % dataset_size) 39 | 40 | # Create and Load Model 41 | model = create_model(opt) 42 | 43 | # Initialize visualizer 44 | visualizer = Visualizer(opt) 45 | 46 | # create website 47 | web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch)) 48 | webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch)) 49 | for i, data in enumerate(test_dataloader): 50 | if i >= opt.how_many: 51 | break 52 | generated = model.inference_forward_appearance(data['query_img'],data['query_parse_map'], 53 | data['query_seg_map'],data['ref_img'], 54 | data['ref_parse_map'],data['ref_seg_map'], 55 | data['C_tensor_parse_map'],data['C_tensor_seg_map']) 56 | visuals = OrderedDict([('query_img', util.tensor2im(data['query_img'][0])), 57 | ('ref_image', util.tensor2im(data['ref_img'][0])), 58 | ('generated_parse_map', util.tensor2label(data['C_tensor_parse_map'][0], opt.label_nc)), 59 | ('synthesized_image', util.tensor2im(generated.data[0]))]) 60 | img_path = data['path'] 61 | print('process image... %s' % img_path) 62 | visualizer.save_images(webpage, visuals, img_path) 63 | 64 | webpage.save() 65 | -------------------------------------------------------------------------------- /appearance_generation/util/html.py: -------------------------------------------------------------------------------- 1 | import dominate 2 | from dominate.tags import * 3 | import os 4 | 5 | 6 | class HTML: 7 | def __init__(self, web_dir, title, refresh=0): 8 | self.title = title 9 | self.web_dir = web_dir 10 | self.img_dir = os.path.join(self.web_dir, 'images') 11 | if not os.path.exists(self.web_dir): 12 | os.makedirs(self.web_dir) 13 | if not os.path.exists(self.img_dir): 14 | os.makedirs(self.img_dir) 15 | 16 | self.doc = dominate.document(title=title) 17 | if refresh > 0: 18 | with self.doc.head: 19 | meta(http_equiv="refresh", content=str(refresh)) 20 | 21 | def get_image_dir(self): 22 | return self.img_dir 23 | 24 | def add_header(self, str): 25 | with self.doc: 26 | h3(str) 27 | 28 | def add_table(self, border=1): 29 | self.t = table(border=border, style="table-layout: fixed;") 30 | self.doc.add(self.t) 31 | 32 | def add_images(self, ims, txts, links, width=512): 33 | self.add_table() 34 | with self.t: 35 | with tr(): 36 | for im, txt, link in zip(ims, txts, links): 37 | with td(style="word-wrap: break-word;", halign="center", valign="top"): 38 | with p(): 39 | with a(href=os.path.join('images', link)): 40 | img(style="width:%dpx" % (width), src=os.path.join('images', im)) 41 | br() 42 | p(txt) 43 | 44 | def save(self): 45 | html_file = '%s/index.html' % self.web_dir 46 | f = open(html_file, 'wt') 47 | f.write(self.doc.render()) 48 | f.close() 49 | 50 | 51 | if __name__ == '__main__': 52 | html = HTML('web/', 'test_html') 53 | html.add_header('hello world') 54 | 55 | ims = [] 56 | txts = [] 57 | links = [] 58 | for n in range(4): 59 | ims.append('image_%d.jpg' % n) 60 | txts.append('text_%d' % n) 61 | links.append('image_%d.jpg' % n) 62 | html.add_images(ims, txts, links) 63 | html.save() 64 | -------------------------------------------------------------------------------- /appearance_generation/util/image_pool.py: -------------------------------------------------------------------------------- 1 | import random 2 | import torch 3 | from torch.autograd import Variable 4 | class ImagePool(): 5 | def __init__(self, pool_size): 6 | self.pool_size = pool_size 7 | if self.pool_size > 0: 8 | self.num_imgs = 0 9 | self.images = [] 10 | 11 | def query(self, images): 12 | if self.pool_size == 0: 13 | return images 14 | return_images = [] 15 | for image in images.data: 16 | image = torch.unsqueeze(image, 0) 17 | if self.num_imgs < self.pool_size: 18 | self.num_imgs = self.num_imgs + 1 19 | self.images.append(image) 20 | return_images.append(image) 21 | else: 22 | p = random.uniform(0, 1) 23 | if p > 0.5: 24 | random_id = random.randint(0, self.pool_size-1) 25 | tmp = self.images[random_id].clone() 26 | self.images[random_id] = image 27 | return_images.append(tmp) 28 | else: 29 | return_images.append(image) 30 | return_images = Variable(torch.cat(return_images, 0)) 31 | return return_images 32 | -------------------------------------------------------------------------------- /appearance_generation/util/util.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import torch 3 | import numpy as np 4 | from PIL import Image 5 | import numpy as np 6 | import os 7 | import cv2 8 | 9 | # Converts a Tensor into a Numpy array 10 | # |imtype|: the desired type of the converted numpy array 11 | def tensor2im(image_tensor, imtype=np.uint8, normalize=True): 12 | if isinstance(image_tensor, list): 13 | image_numpy = [] 14 | for i in range(len(image_tensor)): 15 | image_numpy.append(tensor2im(image_tensor[i], imtype, normalize)) 16 | return image_numpy 17 | image_numpy = image_tensor.cpu().float().numpy() 18 | if normalize: 19 | image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 20 | else: 21 | image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0 22 | image_numpy = np.clip(image_numpy, 0, 255) 23 | if image_numpy.shape[2] == 1 or image_numpy.shape[2] > 3: 24 | image_numpy = image_numpy[:,:,0] 25 | return image_numpy.astype(imtype) 26 | 27 | # Converts a one-hot tensor into a colorful label map 28 | def tensor2label(label_tensor, n_label, imtype=np.uint8): 29 | if n_label == 0: 30 | return tensor2im(label_tensor, imtype) 31 | label_tensor = label_tensor.float() 32 | if label_tensor.size()[0] > 1: 33 | label_tensor = label_tensor.max(0, keepdim=True)[1] 34 | label_tensor = Colorize(n_label)(label_tensor) 35 | label_numpy = np.transpose(label_tensor.numpy(), (1, 2, 0)) 36 | return label_numpy.astype(imtype) 37 | 38 | def tensor2edgemap(label_tensor, imtype=np.uint8): 39 | edgemap = torch.argmax(label_tensor,dim=0,keepdim=True) 40 | edgemap = edgemap.squeeze(0) 41 | edgemap = edgemap.cpu().float().numpy() 42 | return edgemap.astype(imtype) 43 | 44 | 45 | def save_image(image_numpy, image_path): 46 | image_pil = Image.fromarray(image_numpy) 47 | image_pil.save(image_path) 48 | 49 | def mkdirs(paths): 50 | if isinstance(paths, list) and not isinstance(paths, str): 51 | for path in paths: 52 | mkdir(path) 53 | else: 54 | mkdir(paths) 55 | 56 | def mkdir(path): 57 | if not os.path.exists(path): 58 | os.makedirs(path) 59 | 60 | ############################################################################### 61 | # Code from 62 | # https://github.com/ycszen/pytorch-seg/blob/master/transform.py 63 | # Modified so it complies with the Citscape label map colors 64 | ############################################################################### 65 | def uint82bin(n, count=8): 66 | """returns the binary of integer n, count refers to amount of bits""" 67 | return ''.join([str((n >> y) & 1) for y in range(count-1, -1, -1)]) 68 | 69 | def labelcolormap(N): 70 | if N == 20: # cityscape 71 | label_colours = [(0,0,0) 72 | # 0=Background 73 | ,(128,0,0),(255,0,0),(0,85,0),(170,0,51),(255,85,0) 74 | # 1=Hat, 2=Hair, 3=Glove, 4=Sunglasses, 5=UpperClothes 75 | ,(0,0,85),(0,119,221),(85,85,0),(0,85,85),(85,51,0) 76 | # 6=Dress, 7=Coat, 8=Socks, 9=Pants, 10=Jumpsuits 77 | ,(52,86,128),(0,128,0),(0,0,255),(51,170,221),(0,255,255) 78 | # 11=Scarf, 12=Skirt, 13=Face, 14=LeftArm, 15=RightArm 79 | ,(85,255,170),(170,255,85),(255,255,0),(255,170,0)] 80 | # 16=LeftLeg, 17=RightLeg, 18=LeftShoe, 19=RightShoe 81 | cmap = np.array(label_colours,dtype=np.uint8) 82 | else: 83 | cmap = np.zeros((N, 3), dtype=np.uint8) 84 | for i in range(N): 85 | r, g, b = 0, 0, 0 86 | id = i 87 | for j in range(7): 88 | str_id = uint82bin(id) 89 | r = r ^ (np.uint8(str_id[-1]) << (7-j)) 90 | g = g ^ (np.uint8(str_id[-2]) << (7-j)) 91 | b = b ^ (np.uint8(str_id[-3]) << (7-j)) 92 | id = id >> 3 93 | cmap[i, 0] = r 94 | cmap[i, 1] = g 95 | cmap[i, 2] = b 96 | return cmap 97 | 98 | class Colorize(object): 99 | def __init__(self, n=20): 100 | self.cmap = labelcolormap(n) 101 | self.cmap = torch.from_numpy(self.cmap[:n]) 102 | 103 | def __call__(self, gray_image): 104 | size = gray_image.size() 105 | color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0) 106 | 107 | for label in range(0, len(self.cmap)): 108 | mask = (label == gray_image[0]).cpu() 109 | color_image[0][mask] = self.cmap[label][0] 110 | color_image[1][mask] = self.cmap[label][1] 111 | color_image[2][mask] = self.cmap[label][2] 112 | 113 | return color_image 114 | -------------------------------------------------------------------------------- /appearance_generation/util/visualizer.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import ntpath 4 | import time 5 | from . import util 6 | from . import html 7 | # import scipy.misc 8 | from PIL import Image 9 | try: 10 | from StringIO import StringIO # Python 2.7 11 | except ImportError: 12 | from io import BytesIO # Python 3.x 13 | 14 | class Visualizer(): 15 | def __init__(self, opt): 16 | # self.opt = opt 17 | self.tf_log = opt.tf_log 18 | self.use_html = opt.isTrain and not opt.no_html 19 | self.win_size = opt.display_winsize 20 | self.name = opt.name 21 | if self.tf_log: 22 | import tensorflow as tf 23 | self.tf = tf 24 | self.log_dir = os.path.join(opt.checkpoints_dir, opt.name, 'logs') 25 | #self.writer = tf.summary.FileWriter(self.log_dir) 26 | self.writer = tf.summary.create_file_writer(self.log_dir) 27 | 28 | if self.use_html: 29 | self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web') 30 | self.img_dir = os.path.join(self.web_dir, 'images') 31 | print('create web directory %s...' % self.web_dir) 32 | util.mkdirs([self.web_dir, self.img_dir]) 33 | self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt') 34 | with open(self.log_name, "a") as log_file: 35 | now = time.strftime("%c") 36 | log_file.write('================ Training Loss (%s) ================\n' % now) 37 | 38 | # |visuals|: dictionary of images to display or save 39 | def display_current_results(self, visuals, epoch, step): 40 | # if self.tf_log: # show images in tensorboard output 41 | # img_summaries = [] 42 | # for label, image_numpy in visuals.items(): 43 | # # Write the image to a string 44 | # try: 45 | # s = StringIO() 46 | # except: 47 | # s = BytesIO() 48 | # Image.fromarray(image_numpy).save(s, format="jpeg") 49 | # # Create an Image object 50 | # img_sum = self.tf.summary.image(encoded_image_string=s.getvalue(), height=image_numpy.shape[0], width=image_numpy.shape[1]) 51 | # # Create a Summary value 52 | # img_summaries.append(self.tf.summary.scaler(tag=label, image=img_sum)) 53 | 54 | # # Create and write Summary 55 | # summary = self.tf.summary(value=img_summaries) 56 | # self.writer.add_summary(summary, step) 57 | 58 | if self.use_html: # save images to a html file 59 | for label, image_numpy in visuals.items(): 60 | if isinstance(image_numpy, list): 61 | for i in range(len(image_numpy)): 62 | img_path = os.path.join(self.img_dir, 'epoch%.3d_%s_%d.jpg' % (epoch, label, i)) 63 | util.save_image(image_numpy[i], img_path) 64 | else: 65 | img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.jpg' % (epoch, label)) 66 | util.save_image(image_numpy, img_path) 67 | 68 | # update website 69 | webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=30) 70 | for n in range(epoch, 0, -1): 71 | webpage.add_header('epoch [%d]' % n) 72 | ims = [] 73 | txts = [] 74 | links = [] 75 | 76 | for label, image_numpy in visuals.items(): 77 | if isinstance(image_numpy, list): 78 | for i in range(len(image_numpy)): 79 | img_path = 'epoch%.3d_%s_%d.jpg' % (n, label, i) 80 | ims.append(img_path) 81 | txts.append(label+str(i)) 82 | links.append(img_path) 83 | else: 84 | img_path = 'epoch%.3d_%s.jpg' % (n, label) 85 | ims.append(img_path) 86 | txts.append(label) 87 | links.append(img_path) 88 | if len(ims) < 10: 89 | webpage.add_images(ims, txts, links, width=self.win_size) 90 | else: 91 | num = int(round(len(ims)/2.0)) 92 | webpage.add_images(ims[:num], txts[:num], links[:num], width=self.win_size) 93 | webpage.add_images(ims[num:], txts[num:], links[num:], width=self.win_size) 94 | webpage.save() 95 | 96 | # errors: dictionary of error labels and values 97 | def plot_current_errors(self, errors, step): 98 | if self.tf_log: 99 | with self.writer.as_default(): 100 | for tag, value in errors.items(): 101 | self.tf.summary.scalar(tag, value, step=step) 102 | self.writer.flush() 103 | # summary = self.tf.summary(value=[self.tf.summary.Value(tag=tag, simple_value=value)]) 104 | # self.writer.add_summary(summary, step) 105 | 106 | # errors: same format as |errors| of plotCurrentErrors 107 | def print_current_errors(self, epoch, i, errors, t): 108 | message = '(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t) 109 | for k, v in errors.items(): 110 | if v != 0: 111 | message += '%s: %.3f ' % (k, v) 112 | 113 | print(message) 114 | with open(self.log_name, "a") as log_file: 115 | log_file.write('%s\n' % message) 116 | 117 | # save image to the disk 118 | def save_images(self, webpage, visuals, image_path): 119 | image_dir = webpage.get_image_dir() 120 | short_path = ntpath.basename(image_path[0]) 121 | name = os.path.splitext(short_path)[0] 122 | 123 | webpage.add_header(name) 124 | ims = [] 125 | txts = [] 126 | links = [] 127 | 128 | for label, image_numpy in visuals.items(): 129 | image_name = '%s_%s.png' % (name, label) 130 | save_path = os.path.join(image_dir, image_name) 131 | util.save_image(image_numpy, save_path) 132 | 133 | ims.append(image_name) 134 | txts.append(label) 135 | links.append(image_name) 136 | webpage.add_images(ims, txts, links, width=self.win_size) 137 | -------------------------------------------------------------------------------- /inference/data/image_folder.py: -------------------------------------------------------------------------------- 1 | ############################################################################### 2 | # Code from 3 | # https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py 4 | # Modified the original code so that it also loads images from the current 5 | # directory as well as the subdirectories 6 | ############################################################################### 7 | import torch.utils.data as data 8 | from PIL import Image 9 | import os 10 | 11 | IMG_EXTENSIONS = [ 12 | '.jpg', '.JPG', '.jpeg', '.JPEG', 13 | '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tiff' 14 | ] 15 | 16 | 17 | def is_image_file(filename): 18 | return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) 19 | 20 | 21 | def make_dataset(dir): 22 | images = [] 23 | assert os.path.isdir(dir), '%s is not a valid directory' % dir 24 | 25 | for root, _, fnames in sorted(os.walk(dir)): 26 | for fname in fnames: 27 | if is_image_file(fname): 28 | path = os.path.join(root, fname) 29 | images.append(path) 30 | 31 | return images 32 | 33 | 34 | def default_loader(path): 35 | return Image.open(path).convert('RGB') 36 | 37 | 38 | class ImageFolder(data.Dataset): 39 | 40 | def __init__(self, root, transform=None, return_paths=False, 41 | loader=default_loader): 42 | imgs = make_dataset(root) 43 | if len(imgs) == 0: 44 | raise(RuntimeError("Found 0 images in: " + root + "\n" 45 | "Supported image extensions are: " + 46 | ",".join(IMG_EXTENSIONS))) 47 | 48 | self.root = root 49 | self.imgs = imgs 50 | self.transform = transform 51 | self.return_paths = return_paths 52 | self.loader = loader 53 | 54 | def __getitem__(self, index): 55 | path = self.imgs[index] 56 | img = self.loader(path) 57 | if self.transform is not None: 58 | img = self.transform(img) 59 | if self.return_paths: 60 | return img, path 61 | else: 62 | return img 63 | 64 | def __len__(self): 65 | return len(self.imgs) 66 | -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_densepose/0_N00098.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_densepose/0_N00098.npy -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_densepose/1_N00115.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_densepose/1_N00115.npy -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_densepose/2_N00383.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_densepose/2_N00383.npy -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_densepose/3_N00807.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_densepose/3_N00807.npy -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_densepose/4_N01517.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_densepose/4_N01517.npy -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_query_img/0_N00098.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_query_img/0_N00098.jpg -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_query_img/1_N00155.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_query_img/1_N00155.jpg -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_query_img/2_N00383.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_query_img/2_N00383.jpg -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_query_img/3_N00807.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_query_img/3_N00807.jpg -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_query_img/4_N01497.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_query_img/4_N01497.jpg -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_query_label/0_N00098.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_query_label/0_N00098.png -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_query_label/1_N00155.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_query_label/1_N00155.png -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_query_label/2_N00383.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_query_label/2_N00383.png -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_query_label/3_N00807.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_query_label/3_N00807.png -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_query_label/4_N01497.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_query_label/4_N01497.png -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_query_ref_label/0_N00098_synthesized_image_edgemap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_query_ref_label/0_N00098_synthesized_image_edgemap.png -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_query_ref_label/1_N00155_synthesized_image_edgemap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_query_ref_label/1_N00155_synthesized_image_edgemap.png -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_query_ref_label/2_N00383_synthesized_image_edgemap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_query_ref_label/2_N00383_synthesized_image_edgemap.png -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_query_ref_label/3_N00807_synthesized_image_edgemap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_query_ref_label/3_N00807_synthesized_image_edgemap.png -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_query_ref_label/4_N01497_synthesized_image_edgemap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_query_ref_label/4_N01497_synthesized_image_edgemap.png -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_ref_img/0_N00119.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_ref_img/0_N00119.jpg -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_ref_img/1_N00154.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_ref_img/1_N00154.jpg -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_ref_img/2_N00382.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_ref_img/2_N00382.jpg -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_ref_img/3_N00798.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_ref_img/3_N00798.jpg -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_ref_img/4_N01801.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_ref_img/4_N01801.jpg -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_ref_label/0_N00119.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_ref_label/0_N00119.png -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_ref_label/1_N00154.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_ref_label/1_N00154.png -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_ref_label/2_N00382.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_ref_label/2_N00382.png -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_ref_label/3_N00798.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_ref_label/3_N00798.png -------------------------------------------------------------------------------- /inference/datasets/fashion_compatibility/test_ref_label/4_N01801.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/fashion_compatibility/test_ref_label/4_N01801.png -------------------------------------------------------------------------------- /inference/datasets/test_data/test_densepose/00_Y0121E0K6-J11@9.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_densepose/00_Y0121E0K6-J11@9.npy -------------------------------------------------------------------------------- /inference/datasets/test_data/test_densepose/0_4BE21E07Q-K11@10.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_densepose/0_4BE21E07Q-K11@10.npy -------------------------------------------------------------------------------- /inference/datasets/test_data/test_densepose/0_N00098.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_densepose/0_N00098.npy -------------------------------------------------------------------------------- /inference/datasets/test_data/test_densepose/1_B0N21E061-Q11@7.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_densepose/1_B0N21E061-Q11@7.npy -------------------------------------------------------------------------------- /inference/datasets/test_data/test_densepose/1_N00115.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_densepose/1_N00115.npy -------------------------------------------------------------------------------- /inference/datasets/test_data/test_densepose/2_AM421E00G-K11@8.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_densepose/2_AM421E00G-K11@8.npy -------------------------------------------------------------------------------- /inference/datasets/test_data/test_densepose/2_N00383.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_densepose/2_N00383.npy -------------------------------------------------------------------------------- /inference/datasets/test_data/test_densepose/3_31021E00B-A11@8.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_densepose/3_31021E00B-A11@8.npy -------------------------------------------------------------------------------- /inference/datasets/test_data/test_densepose/3_N00807.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_densepose/3_N00807.npy -------------------------------------------------------------------------------- /inference/datasets/test_data/test_densepose/4_AM421E00N-A11@11.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_densepose/4_AM421E00N-A11@11.npy -------------------------------------------------------------------------------- /inference/datasets/test_data/test_densepose/4_N01517.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_densepose/4_N01517.npy -------------------------------------------------------------------------------- /inference/datasets/test_data/test_densepose/5_ARC21E00O-E11@14.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_densepose/5_ARC21E00O-E11@14.npy -------------------------------------------------------------------------------- /inference/datasets/test_data/test_query_img/00_Y0121E0K6-J11@9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_query_img/00_Y0121E0K6-J11@9.jpg -------------------------------------------------------------------------------- /inference/datasets/test_data/test_query_img/0_4BE21E07Q-K11@10.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_query_img/0_4BE21E07Q-K11@10.jpg -------------------------------------------------------------------------------- /inference/datasets/test_data/test_query_img/0_N00098.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_query_img/0_N00098.jpg -------------------------------------------------------------------------------- /inference/datasets/test_data/test_query_img/1_B0N21E061-Q11@7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_query_img/1_B0N21E061-Q11@7.jpg -------------------------------------------------------------------------------- /inference/datasets/test_data/test_query_img/1_N00155.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_query_img/1_N00155.jpg -------------------------------------------------------------------------------- /inference/datasets/test_data/test_query_img/2_AM421E00G-K11@8.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_query_img/2_AM421E00G-K11@8.jpg -------------------------------------------------------------------------------- /inference/datasets/test_data/test_query_img/2_N00383.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_query_img/2_N00383.jpg -------------------------------------------------------------------------------- /inference/datasets/test_data/test_query_img/3_31021E00B-A11@8.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_query_img/3_31021E00B-A11@8.jpg -------------------------------------------------------------------------------- /inference/datasets/test_data/test_query_img/3_N00807.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_query_img/3_N00807.jpg -------------------------------------------------------------------------------- /inference/datasets/test_data/test_query_img/4_AM421E00N-A11@11.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_query_img/4_AM421E00N-A11@11.jpg -------------------------------------------------------------------------------- /inference/datasets/test_data/test_query_img/4_N01497.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_query_img/4_N01497.jpg -------------------------------------------------------------------------------- /inference/datasets/test_data/test_query_img/5_ARC21E00O-E11@14.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_query_img/5_ARC21E00O-E11@14.jpg -------------------------------------------------------------------------------- /inference/datasets/test_data/test_query_label/00_Y0121E0K6-J11@9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_query_label/00_Y0121E0K6-J11@9.png -------------------------------------------------------------------------------- /inference/datasets/test_data/test_query_label/0_4BE21E07Q-K11@10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_query_label/0_4BE21E07Q-K11@10.png -------------------------------------------------------------------------------- /inference/datasets/test_data/test_query_label/0_N00098.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_query_label/0_N00098.png -------------------------------------------------------------------------------- /inference/datasets/test_data/test_query_label/1_B0N21E061-Q11@7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_query_label/1_B0N21E061-Q11@7.png -------------------------------------------------------------------------------- /inference/datasets/test_data/test_query_label/1_N00155.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_query_label/1_N00155.png -------------------------------------------------------------------------------- /inference/datasets/test_data/test_query_label/2_AM421E00G-K11@8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_query_label/2_AM421E00G-K11@8.png -------------------------------------------------------------------------------- /inference/datasets/test_data/test_query_label/2_N00383.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_query_label/2_N00383.png -------------------------------------------------------------------------------- /inference/datasets/test_data/test_query_label/3_31021E00B-A11@8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_query_label/3_31021E00B-A11@8.png -------------------------------------------------------------------------------- /inference/datasets/test_data/test_query_label/3_N00807.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_query_label/3_N00807.png -------------------------------------------------------------------------------- /inference/datasets/test_data/test_query_label/4_AM421E00N-A11@11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_query_label/4_AM421E00N-A11@11.png -------------------------------------------------------------------------------- /inference/datasets/test_data/test_query_label/4_N01497.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_query_label/4_N01497.png -------------------------------------------------------------------------------- /inference/datasets/test_data/test_query_label/5_ARC21E00O-E11@14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_query_label/5_ARC21E00O-E11@14.png -------------------------------------------------------------------------------- /inference/datasets/test_data/test_ref_img/00_Y0121E0K7-A11@4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_ref_img/00_Y0121E0K7-A11@4.jpg -------------------------------------------------------------------------------- /inference/datasets/test_data/test_ref_img/0_31021E00B-Q11@12.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_ref_img/0_31021E00B-Q11@12.jpg -------------------------------------------------------------------------------- /inference/datasets/test_data/test_ref_img/0_N00119.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_ref_img/0_N00119.jpg -------------------------------------------------------------------------------- /inference/datasets/test_data/test_ref_img/1_4BE21E07T-I11@9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_ref_img/1_4BE21E07T-I11@9.jpg -------------------------------------------------------------------------------- /inference/datasets/test_data/test_ref_img/1_N00154.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_ref_img/1_N00154.jpg -------------------------------------------------------------------------------- /inference/datasets/test_data/test_ref_img/2_AN621D0PD-A11@10.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_ref_img/2_AN621D0PD-A11@10.jpg -------------------------------------------------------------------------------- /inference/datasets/test_data/test_ref_img/2_N00382.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_ref_img/2_N00382.jpg -------------------------------------------------------------------------------- /inference/datasets/test_data/test_ref_img/3_B0N21E062-M11@7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_ref_img/3_B0N21E062-M11@7.jpg -------------------------------------------------------------------------------- /inference/datasets/test_data/test_ref_img/3_N00798.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_ref_img/3_N00798.jpg -------------------------------------------------------------------------------- /inference/datasets/test_data/test_ref_img/4_DP521E1CL-C11@10.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_ref_img/4_DP521E1CL-C11@10.jpg -------------------------------------------------------------------------------- /inference/datasets/test_data/test_ref_img/4_N01801.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_ref_img/4_N01801.jpg -------------------------------------------------------------------------------- /inference/datasets/test_data/test_ref_img/5_A0F21E034-M11@10.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_ref_img/5_A0F21E034-M11@10.jpg -------------------------------------------------------------------------------- /inference/datasets/test_data/test_ref_label/00_Y0121E0K7-A11@4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_ref_label/00_Y0121E0K7-A11@4.png -------------------------------------------------------------------------------- /inference/datasets/test_data/test_ref_label/0_31021E00B-Q11@12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_ref_label/0_31021E00B-Q11@12.png -------------------------------------------------------------------------------- /inference/datasets/test_data/test_ref_label/0_N00119.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_ref_label/0_N00119.png -------------------------------------------------------------------------------- /inference/datasets/test_data/test_ref_label/1_4BE21E07T-I11@9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_ref_label/1_4BE21E07T-I11@9.png -------------------------------------------------------------------------------- /inference/datasets/test_data/test_ref_label/1_N00154.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_ref_label/1_N00154.png -------------------------------------------------------------------------------- /inference/datasets/test_data/test_ref_label/2_AN621D0PD-A11@10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_ref_label/2_AN621D0PD-A11@10.png -------------------------------------------------------------------------------- /inference/datasets/test_data/test_ref_label/2_N00382.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_ref_label/2_N00382.png -------------------------------------------------------------------------------- /inference/datasets/test_data/test_ref_label/3_B0N21E062-M11@7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_ref_label/3_B0N21E062-M11@7.png -------------------------------------------------------------------------------- /inference/datasets/test_data/test_ref_label/3_N00798.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_ref_label/3_N00798.png -------------------------------------------------------------------------------- /inference/datasets/test_data/test_ref_label/4_DP521E1CL-C11@10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_ref_label/4_DP521E1CL-C11@10.png -------------------------------------------------------------------------------- /inference/datasets/test_data/test_ref_label/4_N01801.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_ref_label/4_N01801.png -------------------------------------------------------------------------------- /inference/datasets/test_data/test_ref_label/5_A0F21E034-M11@10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/test_data/test_ref_label/5_A0F21E034-M11@10.png -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_densepose/00_Y0121E0K6-J11@9.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_densepose/00_Y0121E0K6-J11@9.npy -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_densepose/0_4BE21E07Q-K11@10.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_densepose/0_4BE21E07Q-K11@10.npy -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_densepose/1_B0N21E061-Q11@7.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_densepose/1_B0N21E061-Q11@7.npy -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_densepose/2_AM421E00G-K11@8.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_densepose/2_AM421E00G-K11@8.npy -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_densepose/3_31021E00B-A11@8.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_densepose/3_31021E00B-A11@8.npy -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_densepose/4_AM421E00N-A11@11.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_densepose/4_AM421E00N-A11@11.npy -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_densepose/5_ARC21E00O-E11@14.npy: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_densepose/5_ARC21E00O-E11@14.npy -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_img/00_Y0121E0K6-J11@9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_img/00_Y0121E0K6-J11@9.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_img/0_4BE21E07Q-K11@10.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_img/0_4BE21E07Q-K11@10.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_img/1_B0N21E061-Q11@7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_img/1_B0N21E061-Q11@7.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_img/2_AM421E00G-K11@8.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_img/2_AM421E00G-K11@8.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_img/3_31021E00B-A11@8.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_img/3_31021E00B-A11@8.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_img/4_AM421E00N-A11@11.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_img/4_AM421E00N-A11@11.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_img/5_ARC21E00O-E11@14.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_img/5_ARC21E00O-E11@14.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_label/00_Y0121E0K6-J11@9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_label/00_Y0121E0K6-J11@9.png -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_label/0_4BE21E07Q-K11@10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_label/0_4BE21E07Q-K11@10.png -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_label/1_B0N21E061-Q11@7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_label/1_B0N21E061-Q11@7.png -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_label/2_AM421E00G-K11@8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_label/2_AM421E00G-K11@8.png -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_label/3_31021E00B-A11@8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_label/3_31021E00B-A11@8.png -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_label/4_AM421E00N-A11@11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_label/4_AM421E00N-A11@11.png -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_label/5_ARC21E00O-E11@14.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_label/5_ARC21E00O-E11@14.png -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_ref_label/00_Y0121E0K6-J11@9_synthesized_Simage.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_ref_label/00_Y0121E0K6-J11@9_synthesized_Simage.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_ref_label/00_Y0121E0K6-J11@9_synthesized_image_edgemap.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_ref_label/00_Y0121E0K6-J11@9_synthesized_image_edgemap.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_ref_label/00_Y0121E0K6-J11@9_synthesized_image_edgemap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_ref_label/00_Y0121E0K6-J11@9_synthesized_image_edgemap.png -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_ref_label/0_4BE21E07Q-K11@10_synthesized_Simage.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_ref_label/0_4BE21E07Q-K11@10_synthesized_Simage.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_ref_label/0_4BE21E07Q-K11@10_synthesized_image_edgemap.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_ref_label/0_4BE21E07Q-K11@10_synthesized_image_edgemap.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_ref_label/0_4BE21E07Q-K11@10_synthesized_image_edgemap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_ref_label/0_4BE21E07Q-K11@10_synthesized_image_edgemap.png -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_ref_label/1_B0N21E061-Q11@7_synthesized_Simage.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_ref_label/1_B0N21E061-Q11@7_synthesized_Simage.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_ref_label/1_B0N21E061-Q11@7_synthesized_image_edgemap.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_ref_label/1_B0N21E061-Q11@7_synthesized_image_edgemap.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_ref_label/1_B0N21E061-Q11@7_synthesized_image_edgemap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_ref_label/1_B0N21E061-Q11@7_synthesized_image_edgemap.png -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_ref_label/2_AM421E00G-K11@8_synthesized_Simage.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_ref_label/2_AM421E00G-K11@8_synthesized_Simage.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_ref_label/2_AM421E00G-K11@8_synthesized_image_edgemap.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_ref_label/2_AM421E00G-K11@8_synthesized_image_edgemap.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_ref_label/2_AM421E00G-K11@8_synthesized_image_edgemap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_ref_label/2_AM421E00G-K11@8_synthesized_image_edgemap.png -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_ref_label/3_31021E00B-A11@8_synthesized_Simage.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_ref_label/3_31021E00B-A11@8_synthesized_Simage.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_ref_label/3_31021E00B-A11@8_synthesized_image_edgemap.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_ref_label/3_31021E00B-A11@8_synthesized_image_edgemap.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_ref_label/3_31021E00B-A11@8_synthesized_image_edgemap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_ref_label/3_31021E00B-A11@8_synthesized_image_edgemap.png -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_ref_label/4_AM421E00N-A11@11_synthesized_Simage.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_ref_label/4_AM421E00N-A11@11_synthesized_Simage.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_ref_label/4_AM421E00N-A11@11_synthesized_image_edgemap.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_ref_label/4_AM421E00N-A11@11_synthesized_image_edgemap.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_ref_label/4_AM421E00N-A11@11_synthesized_image_edgemap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_ref_label/4_AM421E00N-A11@11_synthesized_image_edgemap.png -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_ref_label/5_ARC21E00O-E11@14_synthesized_image_edgemap.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_ref_label/5_ARC21E00O-E11@14_synthesized_image_edgemap.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_query_ref_label/5_ARC21E00O-E11@14_synthesized_image_edgemap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_query_ref_label/5_ARC21E00O-E11@14_synthesized_image_edgemap.png -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_ref_img/00_Y0121E0K7-A11@4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_ref_img/00_Y0121E0K7-A11@4.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_ref_img/0_31021E00B-Q11@12.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_ref_img/0_31021E00B-Q11@12.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_ref_img/1_4BE21E07T-I11@9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_ref_img/1_4BE21E07T-I11@9.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_ref_img/2_AN621D0PD-A11@10.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_ref_img/2_AN621D0PD-A11@10.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_ref_img/3_B0N21E062-M11@7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_ref_img/3_B0N21E062-M11@7.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_ref_img/4_DP521E1CL-C11@10.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_ref_img/4_DP521E1CL-C11@10.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_ref_img/5_A0F21E034-M11@10.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_ref_img/5_A0F21E034-M11@10.jpg -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_ref_label/00_Y0121E0K7-A11@4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_ref_label/00_Y0121E0K7-A11@4.png -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_ref_label/0_31021E00B-Q11@12.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_ref_label/0_31021E00B-Q11@12.png -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_ref_label/1_4BE21E07T-I11@9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_ref_label/1_4BE21E07T-I11@9.png -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_ref_label/2_AN621D0PD-A11@10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_ref_label/2_AN621D0PD-A11@10.png -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_ref_label/3_B0N21E062-M11@7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_ref_label/3_B0N21E062-M11@7.png -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_ref_label/4_DP521E1CL-C11@10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_ref_label/4_DP521E1CL-C11@10.png -------------------------------------------------------------------------------- /inference/datasets/zalando_data/test_ref_label/5_A0F21E034-M11@10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/datasets/zalando_data/test_ref_label/5_A0F21E034-M11@10.png -------------------------------------------------------------------------------- /inference/models/base_model.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import sys 4 | 5 | class BaseModel(torch.nn.Module): 6 | def name(self): 7 | return 'BaseModel' 8 | 9 | def initialize(self, opt): 10 | self.opt = opt 11 | self.gpu_ids = opt.gpu_ids 12 | self.isTrain = opt.isTrain 13 | self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor 14 | self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) 15 | 16 | def set_input(self, input): 17 | self.input = input 18 | 19 | def forward(self): 20 | pass 21 | 22 | # used in test time, no backprop 23 | def test(self): 24 | pass 25 | 26 | def get_image_paths(self): 27 | pass 28 | 29 | def optimize_parameters(self): 30 | pass 31 | 32 | def get_current_visuals(self): 33 | return self.input 34 | 35 | def get_current_errors(self): 36 | return {} 37 | 38 | def save(self, label): 39 | pass 40 | 41 | # helper saving function that can be used by subclasses 42 | def save_network(self, network, network_label, epoch_label, gpu_ids): 43 | save_filename = '%s_net_%s.pth' % (epoch_label, network_label) 44 | save_path = os.path.join(self.save_dir, save_filename) 45 | torch.save(network.cpu().state_dict(), save_path) 46 | if len(gpu_ids) and torch.cuda.is_available(): 47 | network.cuda() 48 | 49 | # helper loading function that can be used by subclasses 50 | def load_network(self, network, network_label, epoch_label, save_dir=''): 51 | save_filename = '%s_net_%s.pth' % (epoch_label, network_label) 52 | if not save_dir: 53 | save_dir = self.save_dir 54 | save_path = os.path.join(save_dir, save_filename) 55 | print(save_path) 56 | if not os.path.isfile(save_path): 57 | print('%s not exists yet!' % save_path) 58 | if network_label == 'G': 59 | raise('Generator must exist!') 60 | else: 61 | #network.load_state_dict(torch.load(save_path)) 62 | try: 63 | network.load_state_dict(torch.load(save_path)) 64 | except: 65 | pretrained_dict = torch.load(save_path) 66 | model_dict = network.state_dict() 67 | try: 68 | pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict} 69 | network.load_state_dict(pretrained_dict) 70 | if self.opt.verbose: 71 | print('Pretrained network %s has excessive layers; Only loading layers that are used' % network_label) 72 | except: 73 | print('Pretrained network %s has fewer layers; The following are not initialized:' % network_label) 74 | for k, v in pretrained_dict.items(): 75 | if v.size() == model_dict[k].size(): 76 | model_dict[k] = v 77 | 78 | if sys.version_info >= (3,0): 79 | not_initialized = set() 80 | else: 81 | from sets import Set 82 | not_initialized = Set() 83 | 84 | for k, v in model_dict.items(): 85 | if k not in pretrained_dict or v.size() != pretrained_dict[k].size(): 86 | not_initialized.add(k.split('.')[0]) 87 | 88 | print(sorted(not_initialized)) 89 | network.load_state_dict(model_dict) 90 | 91 | def update_learning_rate(): 92 | pass 93 | -------------------------------------------------------------------------------- /inference/models/models.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def create_model(opt): 5 | if opt.model == 'pix2pixHD': 6 | from .pix2pixHD_model import Pix2PixHDModel, InferenceModel 7 | if opt.isTrain: 8 | model = Pix2PixHDModel() 9 | else: 10 | model = InferenceModel() 11 | elif opt.model == 'ov_pix2pixHD': 12 | from .ov_pix2pixHD_model import Pix2PixHDModel , InferenceModel 13 | if opt.isTrain: 14 | model = Pix2PixHDModel() 15 | else: 16 | model = InferenceModel() 17 | elif opt.model == 'ov_pix2pixHD_online': 18 | from .ov_pix2pixHD_model_online import Pix2PixHDModel 19 | if opt.isTrain: 20 | model = Pix2PixHDModel() 21 | else: 22 | from .ui_model import UIModel 23 | model = UIModel() 24 | 25 | print('--------- model used ---------',opt.model) #TODO add type of the model with inference or train 26 | model.initialize(opt) 27 | 28 | if opt.verbose: 29 | print("model [%s] was created" % (model.name())) 30 | 31 | if opt.isTrain and len(opt.gpu_ids) and not opt.fp16: 32 | model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids) 33 | 34 | return model 35 | -------------------------------------------------------------------------------- /inference/options/test_options.py: -------------------------------------------------------------------------------- 1 | from .base_options import BaseOptions 2 | 3 | class TestOptions(BaseOptions): 4 | def initialize(self): 5 | BaseOptions.initialize(self) 6 | self.parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.') 7 | self.parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') 8 | self.parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images') 9 | self.parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc') 10 | self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') 11 | self.parser.add_argument('--how_many', type=int, default=50, help='how many test images to run') 12 | self.parser.add_argument('--cluster_path', type=str, default='features_clustered_010.npy', help='the path for clustered results of encoded features') 13 | self.parser.add_argument('--use_encoded_image', action='store_true', help='if specified, encode the real image to get the feature map') 14 | self.parser.add_argument("--export_onnx", type=str, help="export ONNX model to a given file") 15 | self.parser.add_argument("--engine", type=str, help="run serialized TRT engine") 16 | self.parser.add_argument("--onnx", type=str, help="run ONNX model via TRT") 17 | self.isTrain = False 18 | -------------------------------------------------------------------------------- /inference/options/train_options.py: -------------------------------------------------------------------------------- 1 | from .base_options import BaseOptions 2 | 3 | class TrainOptions(BaseOptions): 4 | def initialize(self): 5 | BaseOptions.initialize(self) 6 | # for displays 7 | self.parser.add_argument('--display_freq', type=int, default=100, help='frequency of showing training results on screen') 8 | self.parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console') 9 | self.parser.add_argument('--save_latest_freq', type=int, default=1000, help='frequency of saving the latest results') 10 | self.parser.add_argument('--save_epoch_freq', type=int, default=10, help='frequency of saving checkpoints at the end of epochs') 11 | self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/') 12 | self.parser.add_argument('--debug', action='store_true', help='only do one epoch and displays at each iteration') 13 | 14 | # for training 15 | self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') 16 | self.parser.add_argument('--load_pretrain', type=str, default='', help='load the pretrained model from the specified location') 17 | self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') 18 | self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') 19 | self.parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate') 20 | self.parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero') 21 | self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') 22 | self.parser.add_argument('--lr', type=float, default=0.001, help='initial learning rate for adam') 23 | 24 | # for discriminators 25 | self.parser.add_argument('--num_D', type=int, default=2, help='number of discriminators to use') 26 | self.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers') 27 | self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer') 28 | self.parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature matching loss') 29 | self.parser.add_argument('--no_ganFeat_loss', action='store_true', help='if specified, do *not* use discriminator feature matching loss') 30 | self.parser.add_argument('--no_vgg_loss', action='store_true', help='if specified, do *not* use VGG feature matching loss') 31 | self.parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN') 32 | self.parser.add_argument('--pool_size', type=int, default=0, help='the size of image buffer that stores previously generated images') 33 | self.parser.add_argument('--no_ce_loss', action='store_true', help='if specified, do *not* use ce matching loss') 34 | self.isTrain = True 35 | -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/00_Y0121E0K6-J11@9_generated_parse_map.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/00_Y0121E0K6-J11@9_generated_parse_map.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/00_Y0121E0K6-J11@9_query_img.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/00_Y0121E0K6-J11@9_query_img.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/00_Y0121E0K6-J11@9_real_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/00_Y0121E0K6-J11@9_real_image.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/00_Y0121E0K6-J11@9_synthesized_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/00_Y0121E0K6-J11@9_synthesized_image.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/0_4BE21E07Q-K11@10_generated_parse_map.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/0_4BE21E07Q-K11@10_generated_parse_map.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/0_4BE21E07Q-K11@10_query_img.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/0_4BE21E07Q-K11@10_query_img.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/0_4BE21E07Q-K11@10_real_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/0_4BE21E07Q-K11@10_real_image.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/0_4BE21E07Q-K11@10_synthesized_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/0_4BE21E07Q-K11@10_synthesized_image.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/1_B0N21E061-Q11@7_generated_parse_map.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/1_B0N21E061-Q11@7_generated_parse_map.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/1_B0N21E061-Q11@7_query_img.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/1_B0N21E061-Q11@7_query_img.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/1_B0N21E061-Q11@7_real_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/1_B0N21E061-Q11@7_real_image.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/1_B0N21E061-Q11@7_synthesized_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/1_B0N21E061-Q11@7_synthesized_image.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/2_AM421E00G-K11@8_generated_parse_map.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/2_AM421E00G-K11@8_generated_parse_map.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/2_AM421E00G-K11@8_query_img.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/2_AM421E00G-K11@8_query_img.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/2_AM421E00G-K11@8_real_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/2_AM421E00G-K11@8_real_image.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/2_AM421E00G-K11@8_synthesized_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/2_AM421E00G-K11@8_synthesized_image.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/3_31021E00B-A11@8_generated_parse_map.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/3_31021E00B-A11@8_generated_parse_map.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/3_31021E00B-A11@8_query_img.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/3_31021E00B-A11@8_query_img.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/3_31021E00B-A11@8_real_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/3_31021E00B-A11@8_real_image.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/3_31021E00B-A11@8_synthesized_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/3_31021E00B-A11@8_synthesized_image.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/4_AM421E00N-A11@11_generated_parse_map.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/4_AM421E00N-A11@11_generated_parse_map.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/4_AM421E00N-A11@11_query_img.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/4_AM421E00N-A11@11_query_img.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/4_AM421E00N-A11@11_real_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/4_AM421E00N-A11@11_real_image.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/4_AM421E00N-A11@11_synthesized_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/4_AM421E00N-A11@11_synthesized_image.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/5_ARC21E00O-E11@14_generated_parse_map.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/5_ARC21E00O-E11@14_generated_parse_map.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/5_ARC21E00O-E11@14_query_img.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/5_ARC21E00O-E11@14_query_img.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/5_ARC21E00O-E11@14_real_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/5_ARC21E00O-E11@14_real_image.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_app/test_latest/images/5_ARC21E00O-E11@14_synthesized_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_app/test_latest/images/5_ARC21E00O-E11@14_synthesized_image.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/00_Y0121E0K6-J11@9_query.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/00_Y0121E0K6-J11@9_query.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/00_Y0121E0K6-J11@9_query_ref_mixed.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/00_Y0121E0K6-J11@9_query_ref_mixed.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/00_Y0121E0K6-J11@9_ref.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/00_Y0121E0K6-J11@9_ref.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/00_Y0121E0K6-J11@9_synthesized_Simage.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/00_Y0121E0K6-J11@9_synthesized_Simage.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/00_Y0121E0K6-J11@9_synthesized_image_edgemap.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/00_Y0121E0K6-J11@9_synthesized_image_edgemap.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/0_4BE21E07Q-K11@10_query.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/0_4BE21E07Q-K11@10_query.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/0_4BE21E07Q-K11@10_query_ref_mixed.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/0_4BE21E07Q-K11@10_query_ref_mixed.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/0_4BE21E07Q-K11@10_ref.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/0_4BE21E07Q-K11@10_ref.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/0_4BE21E07Q-K11@10_synthesized_Simage.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/0_4BE21E07Q-K11@10_synthesized_Simage.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/0_4BE21E07Q-K11@10_synthesized_image_edgemap.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/0_4BE21E07Q-K11@10_synthesized_image_edgemap.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/1_B0N21E061-Q11@7_query.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/1_B0N21E061-Q11@7_query.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/1_B0N21E061-Q11@7_query_ref_mixed.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/1_B0N21E061-Q11@7_query_ref_mixed.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/1_B0N21E061-Q11@7_ref.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/1_B0N21E061-Q11@7_ref.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/1_B0N21E061-Q11@7_synthesized_Simage.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/1_B0N21E061-Q11@7_synthesized_Simage.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/1_B0N21E061-Q11@7_synthesized_image_edgemap.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/1_B0N21E061-Q11@7_synthesized_image_edgemap.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/2_AM421E00G-K11@8_query.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/2_AM421E00G-K11@8_query.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/2_AM421E00G-K11@8_query_ref_mixed.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/2_AM421E00G-K11@8_query_ref_mixed.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/2_AM421E00G-K11@8_ref.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/2_AM421E00G-K11@8_ref.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/2_AM421E00G-K11@8_synthesized_Simage.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/2_AM421E00G-K11@8_synthesized_Simage.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/2_AM421E00G-K11@8_synthesized_image_edgemap.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/2_AM421E00G-K11@8_synthesized_image_edgemap.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/3_31021E00B-A11@8_query.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/3_31021E00B-A11@8_query.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/3_31021E00B-A11@8_query_ref_mixed.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/3_31021E00B-A11@8_query_ref_mixed.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/3_31021E00B-A11@8_ref.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/3_31021E00B-A11@8_ref.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/3_31021E00B-A11@8_synthesized_Simage.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/3_31021E00B-A11@8_synthesized_Simage.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/3_31021E00B-A11@8_synthesized_image_edgemap.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/3_31021E00B-A11@8_synthesized_image_edgemap.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/4_AM421E00N-A11@11_query.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/4_AM421E00N-A11@11_query.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/4_AM421E00N-A11@11_query_ref_mixed.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/4_AM421E00N-A11@11_query_ref_mixed.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/4_AM421E00N-A11@11_ref.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/4_AM421E00N-A11@11_ref.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/4_AM421E00N-A11@11_synthesized_Simage.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/4_AM421E00N-A11@11_synthesized_Simage.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/4_AM421E00N-A11@11_synthesized_image_edgemap.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/4_AM421E00N-A11@11_synthesized_image_edgemap.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/5_ARC21E00O-E11@14_query.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/5_ARC21E00O-E11@14_query.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/5_ARC21E00O-E11@14_query_ref_mixed.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/5_ARC21E00O-E11@14_query_ref_mixed.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/5_ARC21E00O-E11@14_ref.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/5_ARC21E00O-E11@14_ref.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/5_ARC21E00O-E11@14_synthesized_Simage.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/5_ARC21E00O-E11@14_synthesized_Simage.jpg -------------------------------------------------------------------------------- /inference/results/zalando_final_shape/test_latest/images/5_ARC21E00O-E11@14_synthesized_image_edgemap.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/inference/results/zalando_final_shape/test_latest/images/5_ARC21E00O-E11@14_synthesized_image_edgemap.jpg -------------------------------------------------------------------------------- /inference/util/html.py: -------------------------------------------------------------------------------- 1 | import dominate 2 | from dominate.tags import * 3 | import os 4 | 5 | 6 | class HTML: 7 | def __init__(self, web_dir, title, refresh=0): 8 | self.title = title 9 | self.web_dir = web_dir 10 | self.img_dir = os.path.join(self.web_dir, 'images') 11 | if not os.path.exists(self.web_dir): 12 | os.makedirs(self.web_dir) 13 | if not os.path.exists(self.img_dir): 14 | os.makedirs(self.img_dir) 15 | 16 | self.doc = dominate.document(title=title) 17 | if refresh > 0: 18 | with self.doc.head: 19 | meta(http_equiv="refresh", content=str(refresh)) 20 | 21 | def get_image_dir(self): 22 | return self.img_dir 23 | 24 | def add_header(self, str): 25 | with self.doc: 26 | h3(str) 27 | 28 | def add_table(self, border=1): 29 | self.t = table(border=border, style="table-layout: fixed;") 30 | self.doc.add(self.t) 31 | 32 | def add_images(self, ims, txts, links, width=512): 33 | self.add_table() 34 | with self.t: 35 | with tr(): 36 | for im, txt, link in zip(ims, txts, links): 37 | with td(style="word-wrap: break-word;", halign="center", valign="top"): 38 | with p(): 39 | with a(href=os.path.join('images', link)): 40 | img(style="width:%dpx" % (width), src=os.path.join('images', im)) 41 | br() 42 | p(txt) 43 | 44 | def save(self): 45 | html_file = '%s/index.html' % self.web_dir 46 | f = open(html_file, 'wt') 47 | f.write(self.doc.render()) 48 | f.close() 49 | 50 | 51 | if __name__ == '__main__': 52 | html = HTML('web/', 'test_html') 53 | html.add_header('hello world') 54 | 55 | ims = [] 56 | txts = [] 57 | links = [] 58 | for n in range(4): 59 | ims.append('image_%d.jpg' % n) 60 | txts.append('text_%d' % n) 61 | links.append('image_%d.jpg' % n) 62 | html.add_images(ims, txts, links) 63 | html.save() 64 | -------------------------------------------------------------------------------- /inference/util/image_pool.py: -------------------------------------------------------------------------------- 1 | import random 2 | import torch 3 | from torch.autograd import Variable 4 | class ImagePool(): 5 | def __init__(self, pool_size): 6 | self.pool_size = pool_size 7 | if self.pool_size > 0: 8 | self.num_imgs = 0 9 | self.images = [] 10 | 11 | def query(self, images): 12 | if self.pool_size == 0: 13 | return images 14 | return_images = [] 15 | for image in images.data: 16 | image = torch.unsqueeze(image, 0) 17 | if self.num_imgs < self.pool_size: 18 | self.num_imgs = self.num_imgs + 1 19 | self.images.append(image) 20 | return_images.append(image) 21 | else: 22 | p = random.uniform(0, 1) 23 | if p > 0.5: 24 | random_id = random.randint(0, self.pool_size-1) 25 | tmp = self.images[random_id].clone() 26 | self.images[random_id] = image 27 | return_images.append(tmp) 28 | else: 29 | return_images.append(image) 30 | return_images = Variable(torch.cat(return_images, 0)) 31 | return return_images 32 | -------------------------------------------------------------------------------- /inference/util/util.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import torch 3 | import numpy as np 4 | from PIL import Image 5 | import numpy as np 6 | import os 7 | import cv2 8 | 9 | # Converts a Tensor into a Numpy array 10 | # |imtype|: the desired type of the converted numpy array 11 | def tensor2im(image_tensor, imtype=np.uint8, normalize=True): 12 | if isinstance(image_tensor, list): 13 | image_numpy = [] 14 | for i in range(len(image_tensor)): 15 | image_numpy.append(tensor2im(image_tensor[i], imtype, normalize)) 16 | return image_numpy 17 | image_numpy = image_tensor.cpu().float().numpy() 18 | if normalize: 19 | image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 20 | else: 21 | image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0 22 | image_numpy = np.clip(image_numpy, 0, 255) 23 | if image_numpy.shape[2] == 1 or image_numpy.shape[2] > 3: 24 | image_numpy = image_numpy[:,:,0] 25 | return image_numpy.astype(imtype) 26 | 27 | # Converts a one-hot tensor into a colorful label map 28 | def tensor2label(label_tensor, n_label, imtype=np.uint8): 29 | if n_label == 0: 30 | return tensor2im(label_tensor, imtype) 31 | label_tensor = label_tensor.float() 32 | if label_tensor.size()[0] > 1: 33 | label_tensor = label_tensor.max(0, keepdim=True)[1] 34 | label_tensor = Colorize(n_label)(label_tensor) 35 | label_numpy = np.transpose(label_tensor.numpy(), (1, 2, 0)) 36 | return label_numpy.astype(imtype) 37 | 38 | def tensor2edgemap(label_tensor, imtype=np.uint8): 39 | edgemap = torch.argmax(label_tensor,dim=0,keepdim=True) 40 | edgemap = edgemap.squeeze(0) 41 | edgemap = edgemap.cpu().float().numpy() 42 | return edgemap.astype(imtype) 43 | 44 | 45 | def save_image(image_numpy, image_path): 46 | image_pil = Image.fromarray(image_numpy) 47 | image_pil.save(image_path) 48 | 49 | def mkdirs(paths): 50 | if isinstance(paths, list) and not isinstance(paths, str): 51 | for path in paths: 52 | mkdir(path) 53 | else: 54 | mkdir(paths) 55 | 56 | def mkdir(path): 57 | if not os.path.exists(path): 58 | os.makedirs(path) 59 | 60 | ############################################################################### 61 | # Code from 62 | # https://github.com/ycszen/pytorch-seg/blob/master/transform.py 63 | # Modified so it complies with the Citscape label map colors 64 | ############################################################################### 65 | def uint82bin(n, count=8): 66 | """returns the binary of integer n, count refers to amount of bits""" 67 | return ''.join([str((n >> y) & 1) for y in range(count-1, -1, -1)]) 68 | 69 | def labelcolormap(N): 70 | if N == 20: # cityscape 71 | label_colours = [(0,0,0) 72 | # 0=Background 73 | ,(128,0,0),(255,0,0),(0,85,0),(170,0,51),(255,85,0) 74 | # 1=Hat, 2=Hair, 3=Glove, 4=Sunglasses, 5=UpperClothes 75 | ,(0,0,85),(0,119,221),(85,85,0),(0,85,85),(85,51,0) 76 | # 6=Dress, 7=Coat, 8=Socks, 9=Pants, 10=Jumpsuits 77 | ,(52,86,128),(0,128,0),(0,0,255),(51,170,221),(0,255,255) 78 | # 11=Scarf, 12=Skirt, 13=Face, 14=LeftArm, 15=RightArm 79 | ,(85,255,170),(170,255,85),(255,255,0),(255,170,0)] 80 | # 16=LeftLeg, 17=RightLeg, 18=LeftShoe, 19=RightShoe 81 | cmap = np.array(label_colours,dtype=np.uint8) 82 | else: 83 | cmap = np.zeros((N, 3), dtype=np.uint8) 84 | for i in range(N): 85 | r, g, b = 0, 0, 0 86 | id = i 87 | for j in range(7): 88 | str_id = uint82bin(id) 89 | r = r ^ (np.uint8(str_id[-1]) << (7-j)) 90 | g = g ^ (np.uint8(str_id[-2]) << (7-j)) 91 | b = b ^ (np.uint8(str_id[-3]) << (7-j)) 92 | id = id >> 3 93 | cmap[i, 0] = r 94 | cmap[i, 1] = g 95 | cmap[i, 2] = b 96 | return cmap 97 | 98 | class Colorize(object): 99 | def __init__(self, n=20): 100 | self.cmap = labelcolormap(n) 101 | self.cmap = torch.from_numpy(self.cmap[:n]) 102 | 103 | def __call__(self, gray_image): 104 | size = gray_image.size() 105 | color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0) 106 | 107 | for label in range(0, len(self.cmap)): 108 | mask = (label == gray_image[0]).cpu() 109 | color_image[0][mask] = self.cmap[label][0] 110 | color_image[1][mask] = self.cmap[label][1] 111 | color_image[2][mask] = self.cmap[label][2] 112 | 113 | return color_image 114 | -------------------------------------------------------------------------------- /inference/util/visualizer.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import ntpath 4 | import time 5 | from . import util 6 | from . import html 7 | # import scipy.misc 8 | from PIL import Image 9 | try: 10 | from StringIO import StringIO # Python 2.7 11 | except ImportError: 12 | from io import BytesIO # Python 3.x 13 | 14 | class Visualizer(): 15 | def __init__(self, opt): 16 | # self.opt = opt 17 | self.tf_log = opt.tf_log 18 | self.use_html = opt.isTrain and not opt.no_html 19 | self.win_size = opt.display_winsize 20 | self.name = opt.name 21 | if self.tf_log: 22 | import tensorflow as tf 23 | self.tf = tf 24 | self.log_dir = os.path.join(opt.checkpoints_dir, opt.name, 'logs') 25 | #self.writer = tf.summary.FileWriter(self.log_dir) 26 | self.writer = tf.summary.create_file_writer(self.log_dir) 27 | 28 | if self.use_html: 29 | self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web') 30 | self.img_dir = os.path.join(self.web_dir, 'images') 31 | print('create web directory %s...' % self.web_dir) 32 | util.mkdirs([self.web_dir, self.img_dir]) 33 | self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt') 34 | with open(self.log_name, "a") as log_file: 35 | now = time.strftime("%c") 36 | log_file.write('================ Training Loss (%s) ================\n' % now) 37 | 38 | # |visuals|: dictionary of images to display or save 39 | def display_current_results(self, visuals, epoch, step): 40 | # if self.tf_log: # show images in tensorboard output 41 | # img_summaries = [] 42 | # for label, image_numpy in visuals.items(): 43 | # # Write the image to a string 44 | # try: 45 | # s = StringIO() 46 | # except: 47 | # s = BytesIO() 48 | # Image.fromarray(image_numpy).save(s, format="jpeg") 49 | # # Create an Image object 50 | # img_sum = self.tf.summary.image(encoded_image_string=s.getvalue(), height=image_numpy.shape[0], width=image_numpy.shape[1]) 51 | # # Create a Summary value 52 | # img_summaries.append(self.tf.summary.scaler(tag=label, image=img_sum)) 53 | 54 | # # Create and write Summary 55 | # summary = self.tf.summary(value=img_summaries) 56 | # self.writer.add_summary(summary, step) 57 | 58 | if self.use_html: # save images to a html file 59 | for label, image_numpy in visuals.items(): 60 | if isinstance(image_numpy, list): 61 | for i in range(len(image_numpy)): 62 | img_path = os.path.join(self.img_dir, 'epoch%.3d_%s_%d.jpg' % (epoch, label, i)) 63 | util.save_image(image_numpy[i], img_path) 64 | else: 65 | img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.jpg' % (epoch, label)) 66 | util.save_image(image_numpy, img_path) 67 | 68 | # update website 69 | webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=30) 70 | for n in range(epoch, 0, -1): 71 | webpage.add_header('epoch [%d]' % n) 72 | ims = [] 73 | txts = [] 74 | links = [] 75 | 76 | for label, image_numpy in visuals.items(): 77 | if isinstance(image_numpy, list): 78 | for i in range(len(image_numpy)): 79 | img_path = 'epoch%.3d_%s_%d.jpg' % (n, label, i) 80 | ims.append(img_path) 81 | txts.append(label+str(i)) 82 | links.append(img_path) 83 | else: 84 | img_path = 'epoch%.3d_%s.jpg' % (n, label) 85 | ims.append(img_path) 86 | txts.append(label) 87 | links.append(img_path) 88 | if len(ims) < 10: 89 | webpage.add_images(ims, txts, links, width=self.win_size) 90 | else: 91 | num = int(round(len(ims)/2.0)) 92 | webpage.add_images(ims[:num], txts[:num], links[:num], width=self.win_size) 93 | webpage.add_images(ims[num:], txts[num:], links[num:], width=self.win_size) 94 | webpage.save() 95 | 96 | # errors: dictionary of error labels and values 97 | def plot_current_errors(self, errors, step): 98 | if self.tf_log: 99 | with self.writer.as_default(): 100 | for tag, value in errors.items(): 101 | self.tf.summary.scalar(tag, value, step=step) 102 | self.writer.flush() 103 | # summary = self.tf.summary(value=[self.tf.summary.Value(tag=tag, simple_value=value)]) 104 | # self.writer.add_summary(summary, step) 105 | 106 | # errors: same format as |errors| of plotCurrentErrors 107 | def print_current_errors(self, epoch, i, errors, t): 108 | message = '(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t) 109 | for k, v in errors.items(): 110 | if v != 0: 111 | message += '%s: %.3f ' % (k, v) 112 | 113 | print(message) 114 | with open(self.log_name, "a") as log_file: 115 | log_file.write('%s\n' % message) 116 | 117 | # save image to the disk 118 | def save_images(self, webpage, visuals, image_path): 119 | image_dir = webpage.get_image_dir() 120 | short_path = ntpath.basename(image_path[0]) 121 | name = os.path.splitext(short_path)[0] 122 | 123 | webpage.add_header(name) 124 | ims = [] 125 | txts = [] 126 | links = [] 127 | 128 | for label, image_numpy in visuals.items(): 129 | image_name = '%s_%s.png' % (name, label) 130 | save_path = os.path.join(image_dir, image_name) 131 | util.save_image(image_numpy, save_path) 132 | 133 | ims.append(image_name) 134 | txts.append(label) 135 | links.append(image_name) 136 | webpage.add_images(ims, txts, links, width=self.win_size) 137 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # Image Based Virtual Try-on Network from Unpaired Data 2 | Pytorch implementation of the paper "Image Based Virtual Try-on Network from Unpaired Data" 3 | 4 | [Paper Link](https://assets.amazon.science/1a/2b/7a4dd8264ce19a959559da799aff/scipub-1281.pdf) 5 | 6 | This project has 3 sections 7 | - Shape Generator section 8 | - Appearance Generator/Refinement section 9 | - Inference section 10 | 11 | Inference Flow 12 | ------------------------------------------------------------------ 13 | 14 | ![Inference section](https://i.imgur.com/eP7LsWn.png) 15 | 16 | Training Flow 17 | ------------------------------------------------------------------ 18 | 19 | ![Inference section](https://i.imgur.com/ZrfMGze.png) 20 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==0.9.0 2 | addict==2.2.1 3 | appdirs==1.4.3 4 | astunparse==1.6.3 5 | attrs==19.3.0 6 | backcall==0.2.0 7 | bleach==3.1.5 8 | CacheControl==0.12.6 9 | cachetools==4.1.1 10 | certifi==2019.11.28 11 | chardet==3.0.4 12 | click==7.1.2 13 | colorama==0.4.3 14 | configparser==5.0.0 15 | contextlib2==0.6.0 16 | cycler==0.10.0 17 | decorator==4.4.2 18 | defusedxml==0.6.0 19 | distlib==0.3.0 20 | distro==1.4.0 21 | docker-pycreds==0.4.0 22 | dominate==2.5.1 23 | entrypoints==0.3 24 | future==0.18.2 25 | gast==0.3.3 26 | gitdb==4.0.5 27 | GitPython==3.1.7 28 | google-auth==1.18.0 29 | google-auth-oauthlib==0.4.1 30 | google-pasta==0.2.0 31 | gql==0.2.0 32 | graphql-core==1.1 33 | graphviz==0.14 34 | grpcio==1.30.0 35 | h5py==2.10.0 36 | html5lib==1.0.1 37 | idna==2.8 38 | imageio==2.9.0 39 | ipaddr==2.2.0 40 | ipykernel==5.3.0 41 | ipyplot==1.0.5 42 | ipython==7.15.0 43 | ipython-genutils==0.2.0 44 | ipywidgets==7.5.1 45 | jedi==0.17.1 46 | Jinja2==2.11.2 47 | joblib==0.16.0 48 | jsonschema==3.2.0 49 | jupyter-client==6.1.3 50 | jupyter-core==4.6.3 51 | Keras-Preprocessing==1.1.2 52 | kiwisolver==1.2.0 53 | lockfile==0.12.2 54 | Markdown==3.2.2 55 | MarkupSafe==1.1.1 56 | matplotlib==3.2.2 57 | mistune==0.8.4 58 | mmcv==1.0.5 59 | mmfashion==0.4.0 60 | msgpack==0.6.2 61 | nbconvert==5.6.1 62 | nbformat==5.0.7 63 | networkx==2.4 64 | notebook==6.0.3 65 | numpy==1.19.0 66 | nvidia-ml-py3==7.352.0 67 | oauthlib==3.1.0 68 | opencv-python==4.2.0.34 69 | opt-einsum==3.2.1 70 | packaging==20.3 71 | pandas==1.0.5 72 | pandocfilters==1.4.2 73 | parso==0.7.0 74 | pathtools==0.1.2 75 | pep517==0.8.2 76 | pexpect==4.8.0 77 | pickleshare==0.7.5 78 | Pillow==7.1.2 79 | progress==1.5 80 | prometheus-client==0.8.0 81 | promise==2.3 82 | prompt-toolkit==3.0.5 83 | protobuf==3.12.2 84 | psutil==5.7.2 85 | ptyprocess==0.6.0 86 | pyasn1==0.4.8 87 | pyasn1-modules==0.2.8 88 | Pygments==2.6.1 89 | pyparsing==2.4.6 90 | pyrsistent==0.16.0 91 | python-dateutil==2.8.1 92 | pytoml==0.1.21 93 | pytorch-metric-learning==0.9.88 94 | pytz==2020.1 95 | PyWavelets==1.1.1 96 | PyYAML==5.3.1 97 | pyzmq==19.0.1 98 | requests==2.22.0 99 | requests-oauthlib==1.3.0 100 | retrying==1.3.3 101 | rsa==4.6 102 | scikit-image==0.17.2 103 | scikit-learn==0.23.1 104 | scipy==1.4.1 105 | Send2Trash==1.5.0 106 | sentry-sdk==0.16.3 107 | shortuuid==1.0.1 108 | six==1.14.0 109 | smmap==3.0.4 110 | subprocess32==3.5.4 111 | tensorboard==2.2.2 112 | tensorboard-plugin-wit==1.7.0 113 | tensorboardX==2.1 114 | tensorflow==2.2.0 115 | tensorflow-estimator==2.2.0 116 | termcolor==1.1.0 117 | terminado==0.8.3 118 | testpath==0.4.4 119 | threadpoolctl==2.1.0 120 | tifffile==2020.7.4 121 | torch==1.5.1 122 | torchsummaryX==1.3.0 123 | torchvision==0.6.1 124 | torchviz==0.0.1 125 | tornado==6.0.4 126 | tqdm==4.47.0 127 | traitlets==4.3.3 128 | urllib3==1.25.8 129 | wandb==0.9.4 130 | watchdog==0.10.3 131 | wcwidth==0.2.5 132 | webencodings==0.5.1 133 | Werkzeug==1.0.1 134 | widgetsnbextension==3.5.1 135 | wrapt==1.12.1 136 | yapf==0.30.0 137 | -------------------------------------------------------------------------------- /shape_generation/data/image_folder.py: -------------------------------------------------------------------------------- 1 | ############################################################################### 2 | # Code from 3 | # https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py 4 | # Modified the original code so that it also loads images from the current 5 | # directory as well as the subdirectories 6 | ############################################################################### 7 | import torch.utils.data as data 8 | from PIL import Image 9 | import os 10 | 11 | IMG_EXTENSIONS = [ 12 | '.jpg', '.JPG', '.jpeg', '.JPEG', 13 | '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tiff' 14 | ] 15 | 16 | 17 | def is_image_file(filename): 18 | return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) 19 | 20 | 21 | def make_dataset(dir): 22 | images = [] 23 | assert os.path.isdir(dir), '%s is not a valid directory' % dir 24 | 25 | for root, _, fnames in sorted(os.walk(dir)): 26 | for fname in fnames: 27 | if is_image_file(fname): 28 | path = os.path.join(root, fname) 29 | images.append(path) 30 | 31 | return images 32 | 33 | 34 | def default_loader(path): 35 | return Image.open(path).convert('RGB') 36 | 37 | 38 | class ImageFolder(data.Dataset): 39 | 40 | def __init__(self, root, transform=None, return_paths=False, 41 | loader=default_loader): 42 | imgs = make_dataset(root) 43 | if len(imgs) == 0: 44 | raise(RuntimeError("Found 0 images in: " + root + "\n" 45 | "Supported image extensions are: " + 46 | ",".join(IMG_EXTENSIONS))) 47 | 48 | self.root = root 49 | self.imgs = imgs 50 | self.transform = transform 51 | self.return_paths = return_paths 52 | self.loader = loader 53 | 54 | def __getitem__(self, index): 55 | path = self.imgs[index] 56 | img = self.loader(path) 57 | if self.transform is not None: 58 | img = self.transform(img) 59 | if self.return_paths: 60 | return img, path 61 | else: 62 | return img 63 | 64 | def __len__(self): 65 | return len(self.imgs) 66 | -------------------------------------------------------------------------------- /shape_generation/data/ov_test_dataset.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.utils.data.dataset import Dataset 3 | 4 | from data.image_folder import make_dataset 5 | 6 | import os 7 | from PIL import Image 8 | from glob import glob as glob 9 | import numpy as np 10 | 11 | 12 | class TestDataset(Dataset): 13 | 14 | def __init__(self, opt, augment): 15 | self.opt = opt 16 | self.root = opt.dataroot 17 | self.transforms = augment 18 | 19 | # input A (label maps) 20 | dir_A = '_query_label' 21 | self.dir_A = os.path.join(opt.dataroot, opt.phase + dir_A) 22 | self.A_paths = sorted(make_dataset(self.dir_A)) 23 | 24 | # input B (label images) 25 | dir_B = '_ref_label' 26 | self.dir_B = os.path.join(opt.dataroot, opt.phase + dir_B) 27 | self.B_paths = sorted(make_dataset(self.dir_B)) 28 | 29 | # densepose maps 30 | self.dir_densepose = os.path.join( 31 | opt.dataroot, opt.phase + '_densepose') 32 | self.densepose_paths = sorted(glob(self.dir_densepose + '/*')) 33 | 34 | self.dataset_size = len(self.A_paths) 35 | 36 | def __getitem__(self, index): 37 | 38 | # input A (label maps) 39 | A_path = self.A_paths[index] 40 | A = self.parsing_embedding(A_path, 'seg') 41 | A_tensor = torch.from_numpy(A) 42 | 43 | # input B (label maps) 44 | B_path = self.B_paths[index] 45 | B = self.parsing_embedding(B_path, 'seg') 46 | B_tensor = torch.from_numpy(B) 47 | 48 | # densepose maps 49 | dense_path = self.densepose_paths[index] 50 | dense_img = np.load(dense_path).astype('uint8') # channel last 51 | dense_img_parts_embeddings = self.parsing_embedding( 52 | dense_img[:, :, 0], 'densemap') 53 | dense_img_parts_embeddings = np.transpose(dense_img_parts_embeddings,axes= (1,2,0)) 54 | dense_img_final = np.concatenate((dense_img_parts_embeddings,dense_img[:, :, 1:]), axis=-1) # channel(27), H, W 55 | dense_img_final = torch.from_numpy(np.transpose(dense_img_final,axes= (2,0,1))) 56 | 57 | input_dict = {'query': A_tensor, 'dense_map': dense_img_final, 58 | 'ref': B_tensor, 'query_path': A_path,'ref_path': B_path} 59 | 60 | return input_dict 61 | 62 | def custom_transform(self, input_image, per_channel_transform=True, input_type="densepose"): 63 | 64 | if per_channel_transform: 65 | num_channel_img = input_image.shape[0] 66 | tform_input_image_np = np.zeros( 67 | shape=input_image.shape, dtype=input_image.dtype) 68 | if input_type == "densepose": 69 | for i in range(num_channel_img): 70 | if i > 24: 71 | tform_input_image_np[i] = self.transforms['1']( 72 | input_image[i].astype('uint8')) 73 | else: 74 | tform_input_image_np[i] = input_image[i] 75 | 76 | return torch.from_numpy(tform_input_image_np) 77 | 78 | def parsing_embedding(self, parse_obj, parse_type): 79 | if parse_type == "seg": 80 | parse = Image.open(parse_obj) 81 | parse = np.array(parse) 82 | parse_channel = 20 83 | 84 | elif parse_type == "densemap": 85 | parse = np.array(parse_obj) 86 | parse_channel = 25 87 | 88 | parse_emb = [] 89 | 90 | for i in range(parse_channel): 91 | parse_emb.append((parse == i).astype(np.float32).tolist()) 92 | 93 | parse = np.array(parse_emb).astype(np.float32) 94 | return parse 95 | 96 | def __len__(self): 97 | return len(self.A_paths) // self.opt.batchSize * self.opt.batchSize 98 | 99 | def name(self): 100 | return 'TestDataset' 101 | -------------------------------------------------------------------------------- /shape_generation/data/ov_train_dataset.py: -------------------------------------------------------------------------------- 1 | from torch.utils.data.dataset import Dataset 2 | 3 | from data.image_folder import make_dataset 4 | 5 | import os 6 | from PIL import Image 7 | from glob import glob as glob 8 | import numpy as np 9 | import random 10 | import torch 11 | 12 | 13 | class RegularDataset(Dataset): 14 | 15 | def __init__(self, opt, augment): 16 | 17 | self.opt = opt 18 | self.root = opt.dataroot 19 | self.transforms = augment 20 | 21 | # input A (label maps source) 22 | dir_A = '_label' 23 | self.dir_A = os.path.join(opt.dataroot, opt.phase + dir_A) 24 | self.A_paths = sorted(make_dataset(self.dir_A)) 25 | 26 | # input B (label images target) 27 | dir_B = '_label' 28 | self.dir_B = os.path.join(opt.dataroot, opt.phase + dir_B) 29 | self.B_paths = sorted(make_dataset(self.dir_B)) 30 | 31 | # densepose maps 32 | self.dir_densepose = os.path.join( 33 | opt.dataroot, opt.phase + '_densepose') 34 | self.densepose_paths = sorted(glob(self.dir_densepose + '/*')) 35 | 36 | self.dataset_size = len(self.A_paths) 37 | 38 | def custom_transform(self, input_image, per_channel_transform): 39 | 40 | manualSeed = random.randint(1, 10000) 41 | random.seed(manualSeed) 42 | torch.manual_seed(manualSeed) 43 | 44 | if per_channel_transform: 45 | num_channel_image = input_image.shape[0] 46 | tform_input_image_np = np.zeros( 47 | shape=input_image.shape, dtype=input_image.dtype) 48 | 49 | for i in range(num_channel_image): 50 | # TODO check why i!=5 makes a big difference in the output 51 | if i != 1 and i != 2 and i != 4 and i != 5 and i != 13: 52 | # if i != 0 and i != 1 and i != 2 and i != 4 and i != 13: 53 | tform_input_image_np[i] = self.transforms['1']( 54 | input_image[i]) 55 | else: 56 | tform_input_image_np[i] = self.transforms['2']( 57 | input_image[i]) 58 | 59 | return torch.from_numpy(tform_input_image_np) 60 | 61 | def __getitem__(self, index): 62 | 63 | # input A (label maps source) 64 | A_path = self.A_paths[index] 65 | A = self.parsing_embedding(A_path, 'seg') # channel(20), H, W 66 | 67 | # input B (label maps target) 68 | B_path = self.B_paths[index] 69 | B = self.parsing_embedding(B_path, 'seg') # channel(20), H, W 70 | 71 | # densepose maps 72 | dense_path = self.densepose_paths[index] 73 | dense_img = np.load(dense_path).astype('uint8') 74 | dense_img_parts_embeddings = self.parsing_embedding( 75 | dense_img[:, :, 0], 'densemap') 76 | 77 | dense_img_parts_embeddings = np.transpose( 78 | dense_img_parts_embeddings, axes=(1, 2, 0)) 79 | dense_img_final = np.concatenate( 80 | (dense_img_parts_embeddings, dense_img[:, :, 1:]), axis=-1) # channel(27), H, W 81 | 82 | # original seg mask 83 | seg_mask = Image.open(A_path) 84 | seg_mask = np.array(seg_mask) 85 | seg_mask = torch.tensor(seg_mask, dtype=torch.long) 86 | 87 | # final returns 88 | A_tensor = self.custom_transform(A, True) 89 | B_tensor = torch.from_numpy(B) 90 | dense_img_final = torch.from_numpy( 91 | np.transpose(dense_img_final, axes=(2, 0, 1))) 92 | 93 | input_dict = {'seg_map': A_tensor, 'dense_map': dense_img_final, 'target': B_tensor, 'seg_map_path': A_path, 94 | 'target_path': A_path, 'densepose_path': dense_path, 'seg_mask': seg_mask} 95 | 96 | return input_dict 97 | 98 | def parsing_embedding(self, parse_obj, parse_type): 99 | 100 | if parse_type == "seg": 101 | parse = Image.open(parse_obj) 102 | parse = np.array(parse) 103 | parse_channel = 20 104 | 105 | elif parse_type == "densemap": 106 | parse = np.array(parse_obj) 107 | parse_channel = 25 108 | 109 | parse_emb = [] 110 | 111 | for i in range(parse_channel): 112 | parse_emb.append((parse == i).astype(np.float32).tolist()) 113 | 114 | parse = np.array(parse_emb).astype(np.float32) 115 | return parse 116 | 117 | def __len__(self): 118 | return len(self.A_paths) // self.opt.batchSize * self.opt.batchSize 119 | 120 | def name(self): 121 | return 'RegularDataset' 122 | 123 | -------------------------------------------------------------------------------- /shape_generation/models/base_model.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import sys 4 | 5 | class BaseModel(torch.nn.Module): 6 | def name(self): 7 | return 'BaseModel' 8 | 9 | def initialize(self, opt): 10 | self.opt = opt 11 | self.gpu_ids = opt.gpu_ids 12 | self.isTrain = opt.isTrain 13 | self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor 14 | self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) 15 | 16 | def set_input(self, input): 17 | self.input = input 18 | 19 | def forward(self): 20 | pass 21 | 22 | # used in test time, no backprop 23 | def test(self): 24 | pass 25 | 26 | def get_image_paths(self): 27 | pass 28 | 29 | def optimize_parameters(self): 30 | pass 31 | 32 | def get_current_visuals(self): 33 | return self.input 34 | 35 | def get_current_errors(self): 36 | return {} 37 | 38 | def save(self, label): 39 | pass 40 | 41 | # helper saving function that can be used by subclasses 42 | def save_network(self, network, network_label, epoch_label, gpu_ids): 43 | save_filename = '%s_net_%s.pth' % (epoch_label, network_label) 44 | save_path = os.path.join(self.save_dir, save_filename) 45 | torch.save(network.cpu().state_dict(), save_path) 46 | if len(gpu_ids) and torch.cuda.is_available(): 47 | network.cuda() 48 | 49 | # helper loading function that can be used by subclasses 50 | def load_network(self, network, network_label, epoch_label, save_dir=''): 51 | save_filename = '%s_net_%s.pth' % (epoch_label, network_label) 52 | if not save_dir: 53 | save_dir = self.save_dir 54 | save_path = os.path.join(save_dir, save_filename) 55 | print(save_path) 56 | if not os.path.isfile(save_path): 57 | print('%s not exists yet!' % save_path) 58 | if network_label == 'G': 59 | raise('Generator must exist!') 60 | else: 61 | #network.load_state_dict(torch.load(save_path)) 62 | try: 63 | network.load_state_dict(torch.load(save_path)) 64 | except: 65 | pretrained_dict = torch.load(save_path) 66 | model_dict = network.state_dict() 67 | try: 68 | pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict} 69 | network.load_state_dict(pretrained_dict) 70 | if self.opt.verbose: 71 | print('Pretrained network %s has excessive layers; Only loading layers that are used' % network_label) 72 | except: 73 | print('Pretrained network %s has fewer layers; The following are not initialized:' % network_label) 74 | for k, v in pretrained_dict.items(): 75 | if v.size() == model_dict[k].size(): 76 | model_dict[k] = v 77 | 78 | if sys.version_info >= (3,0): 79 | not_initialized = set() 80 | else: 81 | from sets import Set 82 | not_initialized = Set() 83 | 84 | for k, v in model_dict.items(): 85 | if k not in pretrained_dict or v.size() != pretrained_dict[k].size(): 86 | not_initialized.add(k.split('.')[0]) 87 | 88 | print(sorted(not_initialized)) 89 | network.load_state_dict(model_dict) 90 | 91 | def update_learning_rate(): 92 | pass 93 | -------------------------------------------------------------------------------- /shape_generation/models/models.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def create_model(opt): 5 | if opt.model == 'pix2pixHD': 6 | from .pix2pixHD_model import Pix2PixHDModel, InferenceModel 7 | if opt.isTrain: 8 | model = Pix2PixHDModel() 9 | else: 10 | model = InferenceModel() 11 | elif opt.model == 'ov_pix2pixHD': 12 | from .ov_pix2pixHD_model import Pix2PixHDModel , InferenceModel 13 | if opt.isTrain: 14 | model = Pix2PixHDModel() 15 | else: 16 | model = InferenceModel() 17 | else: 18 | from .ui_model import UIModel 19 | model = UIModel() 20 | 21 | print('--------- model used ---------',opt.model) 22 | model.initialize(opt) 23 | 24 | if opt.verbose: 25 | print("model [%s] was created" % (model.name())) 26 | 27 | if opt.isTrain and len(opt.gpu_ids) and not opt.fp16: 28 | model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids) 29 | 30 | return model 31 | -------------------------------------------------------------------------------- /shape_generation/options/base_options.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | from util import util 4 | import torch 5 | 6 | class BaseOptions(): 7 | def __init__(self): 8 | self.parser = argparse.ArgumentParser() 9 | self.initialized = False 10 | 11 | def initialize(self): 12 | # experiment specifics 13 | self.parser.add_argument('--name', type=str, default='zalando', help='name of the experiment. It decides where to store samples and models') 14 | self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') 15 | self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here') 16 | self.parser.add_argument('--model', type=str, default='ov_pix2pixHD', help='which model to use') 17 | self.parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization') 18 | self.parser.add_argument('--use_dropout', action='store_true', help='use dropout for the generator') 19 | self.parser.add_argument('--data_type', default=32, type=int, choices=[8, 16, 32], help="Supported data type i.e. 8, 16, 32 bit") 20 | self.parser.add_argument('--verbose', action='store_true', default=False, help='toggles verbose') 21 | self.parser.add_argument('--fp16', action='store_true', default=False, help='train with AMP') 22 | self.parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training') 23 | 24 | # input/output sizes 25 | self.parser.add_argument('--batchSize', type=int, default=1, help='input batch size') 26 | self.parser.add_argument('--loadSize', type=int, default=1024, help='scale images to this size') 27 | self.parser.add_argument('--fineSize', type=int, default=512, help='then crop to this size') 28 | self.parser.add_argument('--label_nc', type=int, default=20, help='# of input label channels') 29 | self.parser.add_argument('--input_nc', type=int, default=20, help='# of input image channels') 30 | self.parser.add_argument('--densepose_nc', type=int, default=27, help='# of denspose channels') 31 | self.parser.add_argument('--output_nc', type=int, default=20, help='# of output image channels') 32 | 33 | # for setting inputs 34 | self.parser.add_argument('--dataroot', type=str, default='./datasets/zalando_data/') 35 | self.parser.add_argument('--resize_or_crop', type=str, default='scale_width', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]') 36 | self.parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly') 37 | self.parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data argumentation') 38 | self.parser.add_argument('--nThreads', default=2, type=int, help='# threads for loading data') 39 | self.parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.') 40 | 41 | # for displays 42 | self.parser.add_argument('--display_winsize', type=int, default=512, help='display window size') 43 | self.parser.add_argument('--tf_log', action='store_true', help='if specified, use tensorboard logging. Requires tensorflow installed') 44 | 45 | # for generator 46 | self.parser.add_argument('--netG', type=str, default='global', help='selects model to use for netG') 47 | self.parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer') 48 | self.parser.add_argument('--n_downsample_global', type=int, default=4, help='number of downsampling layers in netG') 49 | self.parser.add_argument('--n_blocks_global', type=int, default=9, help='number of residual blocks in the global generator network') 50 | self.parser.add_argument('--n_blocks_local', type=int, default=3, help='number of residual blocks in the local enhancer network') 51 | self.parser.add_argument('--n_local_enhancers', type=int, default=1, help='number of local enhancers to use') 52 | self.parser.add_argument('--niter_fix_global', type=int, default=0, help='number of epochs that we only train the outmost local enhancer') 53 | 54 | # for instance-wise features 55 | self.parser.add_argument('--no_instance', action='store_true', help='if specified, do *not* add instance map as input') 56 | self.parser.add_argument('--instance_feat', action='store_true', help='if specified, add encoded instance features as input') 57 | self.parser.add_argument('--label_feat', action='store_true', help='if specified, add encoded label features as input') 58 | self.parser.add_argument('--feat_num', type=int, default=10, help='vector length for encoded features') 59 | self.parser.add_argument('--load_features', action='store_true', help='if specified, load precomputed feature maps') 60 | self.parser.add_argument('--n_downsample_E', type=int, default=4, help='# of downsampling layers in encoder') 61 | self.parser.add_argument('--nef', type=int, default=16, help='# of encoder filters in the first conv layer') 62 | self.parser.add_argument('--n_clusters', type=int, default=10, help='number of clusters for features') 63 | 64 | self.initialized = True 65 | 66 | def parse(self, save=True): 67 | if not self.initialized: 68 | self.initialize() 69 | self.opt = self.parser.parse_args() 70 | self.opt.isTrain = self.isTrain # train or test 71 | 72 | str_ids = self.opt.gpu_ids.split(',') 73 | self.opt.gpu_ids = [] 74 | for str_id in str_ids: 75 | id = int(str_id) 76 | if id >= 0: 77 | self.opt.gpu_ids.append(id) 78 | 79 | # set gpu ids 80 | if len(self.opt.gpu_ids) > 0: 81 | torch.cuda.set_device(self.opt.gpu_ids[0]) 82 | 83 | args = vars(self.opt) 84 | 85 | print('------------ Options -------------') 86 | for k, v in sorted(args.items()): 87 | print('%s: %s' % (str(k), str(v))) 88 | print('-------------- End ----------------') 89 | 90 | # save to the disk 91 | expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name) 92 | util.mkdirs(expr_dir) 93 | if save and not self.opt.continue_train: 94 | file_name = os.path.join(expr_dir, 'opt.txt') 95 | with open(file_name, 'wt') as opt_file: 96 | opt_file.write('------------ Options -------------\n') 97 | for k, v in sorted(args.items()): 98 | opt_file.write('%s: %s\n' % (str(k), str(v))) 99 | opt_file.write('-------------- End ----------------\n') 100 | return self.opt 101 | -------------------------------------------------------------------------------- /shape_generation/options/test_options.py: -------------------------------------------------------------------------------- 1 | from .base_options import BaseOptions 2 | 3 | class TestOptions(BaseOptions): 4 | def initialize(self): 5 | BaseOptions.initialize(self) 6 | self.parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.') 7 | self.parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') 8 | self.parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images') 9 | self.parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc') 10 | self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') 11 | self.parser.add_argument('--how_many', type=int, default=50, help='how many test images to run') 12 | self.parser.add_argument('--cluster_path', type=str, default='features_clustered_010.npy', help='the path for clustered results of encoded features') 13 | self.parser.add_argument('--use_encoded_image', action='store_true', help='if specified, encode the real image to get the feature map') 14 | self.parser.add_argument("--export_onnx", type=str, help="export ONNX model to a given file") 15 | self.parser.add_argument("--engine", type=str, help="run serialized TRT engine") 16 | self.parser.add_argument("--onnx", type=str, help="run ONNX model via TRT") 17 | 18 | # for discriminators 19 | self.parser.add_argument('--num_D', type=int, default=2, help='number of discriminators to use') 20 | self.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers') 21 | self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer') 22 | self.parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature matching loss') 23 | self.parser.add_argument('--no_ganFeat_loss', action='store_true', help='if specified, do *not* use discriminator feature matching loss') 24 | self.parser.add_argument('--no_vgg_loss', action='store_true', help='if specified, do *not* use VGG feature matching loss') 25 | self.parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN') 26 | self.parser.add_argument('--pool_size', type=int, default=0, help='the size of image buffer that stores previously generated images') 27 | self.parser.add_argument('--no_ce_loss', action='store_true', help='if specified, do *not* use ce matching loss') 28 | 29 | # cloth part 30 | self.parser.add_argument('--cloth_part', type=str, default='uppercloth', help='specify the cloth part to generate from ref image') 31 | 32 | self.isTrain = False 33 | -------------------------------------------------------------------------------- /shape_generation/options/train_options.py: -------------------------------------------------------------------------------- 1 | from .base_options import BaseOptions 2 | 3 | class TrainOptions(BaseOptions): 4 | def initialize(self): 5 | BaseOptions.initialize(self) 6 | # for displays 7 | self.parser.add_argument('--display_freq', type=int, default=100, help='frequency of showing training results on screen') 8 | self.parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console') 9 | self.parser.add_argument('--save_latest_freq', type=int, default=1000, help='frequency of saving the latest results') 10 | self.parser.add_argument('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs') 11 | self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/') 12 | self.parser.add_argument('--debug', action='store_true', help='only do one epoch and displays at each iteration') 13 | 14 | # for training 15 | self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model') 16 | self.parser.add_argument('--load_pretrain', type=str, default='', help='load the pretrained model from the specified location') 17 | self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') 18 | self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') 19 | self.parser.add_argument('--niter', type=int, default=100, help='# of iter at starting learning rate') 20 | self.parser.add_argument('--niter_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero') 21 | self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') 22 | self.parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam') 23 | 24 | # for discriminators 25 | self.parser.add_argument('--num_D', type=int, default=2, help='number of discriminators to use') 26 | self.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers') 27 | self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer') 28 | self.parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature matching loss') 29 | self.parser.add_argument('--no_ganFeat_loss', action='store_true', help='if specified, do *not* use discriminator feature matching loss') 30 | self.parser.add_argument('--no_vgg_loss', action='store_true', help='if specified, do *not* use VGG feature matching loss') 31 | self.parser.add_argument('--no_lsgan', action='store_true', help='do *not* use least square GAN, if false, use vanilla GAN') 32 | self.parser.add_argument('--pool_size', type=int, default=0, help='the size of image buffer that stores previously generated images') 33 | self.parser.add_argument('--no_ce_loss', action='store_true', help='if specified, do *not* use ce matching loss') 34 | self.isTrain = True 35 | -------------------------------------------------------------------------------- /shape_generation/results/test_results/images/31021E00B-A11@8_query.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/shape_generation/results/test_results/images/31021E00B-A11@8_query.jpg -------------------------------------------------------------------------------- /shape_generation/results/test_results/images/31021E00B-A11@8_ref.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/shape_generation/results/test_results/images/31021E00B-A11@8_ref.jpg -------------------------------------------------------------------------------- /shape_generation/results/test_results/images/31021E00B-A11@8_synthesized_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/shape_generation/results/test_results/images/31021E00B-A11@8_synthesized_image.jpg -------------------------------------------------------------------------------- /shape_generation/results/test_results/images/4BE21E07Q-K11@10_query.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/shape_generation/results/test_results/images/4BE21E07Q-K11@10_query.jpg -------------------------------------------------------------------------------- /shape_generation/results/test_results/images/4BE21E07Q-K11@10_ref.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/shape_generation/results/test_results/images/4BE21E07Q-K11@10_ref.jpg -------------------------------------------------------------------------------- /shape_generation/results/test_results/images/4BE21E07Q-K11@10_synthesized_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/shape_generation/results/test_results/images/4BE21E07Q-K11@10_synthesized_image.jpg -------------------------------------------------------------------------------- /shape_generation/results/test_results/images/B0N21E061-Q11@7_query.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/shape_generation/results/test_results/images/B0N21E061-Q11@7_query.jpg -------------------------------------------------------------------------------- /shape_generation/results/test_results/images/B0N21E061-Q11@7_ref.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/shape_generation/results/test_results/images/B0N21E061-Q11@7_ref.jpg -------------------------------------------------------------------------------- /shape_generation/results/test_results/images/B0N21E061-Q11@7_synthesized_image.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/shape_generation/results/test_results/images/B0N21E061-Q11@7_synthesized_image.jpg -------------------------------------------------------------------------------- /shape_generation/results/test_results/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Experiment = zalando_encoder_shape, Phase = test, Epoch = latest 5 | 6 | 7 |

31021E00B-A11@8

8 | 9 | 10 | 18 | 26 | 34 | 35 |
11 |

12 | 13 | 14 |
15 |

query

16 |

17 |
19 |

20 | 21 | 22 |
23 |

synthesized_image

24 |

25 |
27 |

28 | 29 | 30 |
31 |

ref

32 |

33 |
36 |

4BE21E07Q-K11@10

37 | 38 | 39 | 47 | 55 | 63 | 64 |
40 |

41 | 42 | 43 |
44 |

query

45 |

46 |
48 |

49 | 50 | 51 |
52 |

synthesized_image

53 |

54 |
56 |

57 | 58 | 59 |
60 |

ref

61 |

62 |
65 |

B0N21E061-Q11@7

66 | 67 | 68 | 76 | 84 | 92 | 93 |
69 |

70 | 71 | 72 |
73 |

query

74 |

75 |
77 |

78 | 79 | 80 |
81 |

synthesized_image

82 |

83 |
85 |

86 | 87 | 88 |
89 |

ref

90 |

91 |
94 | 95 | -------------------------------------------------------------------------------- /shape_generation/test.py: -------------------------------------------------------------------------------- 1 | ''' 2 | run the code with python test.py --name name_of_exp --dataroot ./datasets/dataroot/ 3 | ''' 4 | 5 | import os 6 | from collections import OrderedDict 7 | from torch.autograd import Variable 8 | from options.test_options import TestOptions 9 | from data.ov_test_dataset import TestDataset 10 | from models.models import create_model 11 | import util.util as util 12 | from util.visualizer import Visualizer 13 | from util import html 14 | import torch 15 | from torchvision import transforms 16 | from torch.utils.data import DataLoader 17 | 18 | opt = TestOptions().parse(save=False) 19 | opt.nThreads = 1 # test code only supports nThreads = 1 20 | opt.batchSize = 1 # test code only supports batchSize = 1 21 | 22 | augment = {} 23 | # augment['1'] = transforms.Compose( 24 | # [transforms.ToTensor(), 25 | # transforms.Normalize((0.5, ), (0.5, ))]) # change to [C, H, W] 26 | augment['1'] = transforms.Compose( 27 | [transforms.ToTensor()]) # change to [C, H, W] 28 | 29 | test_dataset = TestDataset(opt, augment) 30 | test_dataloader = DataLoader(test_dataset, 31 | shuffle=False, 32 | num_workers=int(opt.nThreads), 33 | batch_size=opt.batchSize, 34 | pin_memory=True) 35 | dataset_size = len(test_dataset) 36 | print('#testing images = %d' % dataset_size) 37 | 38 | for key in test_dataset[0].keys(): 39 | try: 40 | x = test_dataset[0][key] 41 | print("name of the input and shape -- > ", key, x.shape) 42 | print("type,dtype,and min max -- >", type(x), 43 | x.dtype, torch.min(x), torch.max(x)) 44 | except Exception as e: 45 | print("name of the input -- > ", key, test_dataset[0][key]) 46 | print('----------------') 47 | 48 | # Create and Load Model 49 | model = create_model(opt) 50 | 51 | # Initialize visualizer 52 | visualizer = Visualizer(opt) 53 | # create website 54 | web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % 55 | (opt.phase, opt.which_epoch)) 56 | webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % ( 57 | opt.name, opt.phase, opt.which_epoch)) 58 | for i, data in enumerate(test_dataloader): 59 | if i >= opt.how_many: 60 | break 61 | query_ref_mixed, generated = model.inference_enc(data['query'], data['dense_map'], 62 | data['ref'], cloth_part= opt.cloth_part) 63 | visuals = OrderedDict([('query', util.tensor2label(data['query'][0], opt.label_nc)), 64 | ('ref', util.tensor2label( 65 | data['ref'][0], opt.label_nc)), 66 | ('query_ref_mixed', util.tensor2label( 67 | query_ref_mixed.data[0], opt.label_nc)), 68 | ('synthesized_image', util.tensor2label( 69 | generated.data[0], opt.label_nc)), 70 | ('synthesized_image_edgemap', util.tensor2edgemap(torch.softmax(generated.data[0], dim=0)))]) 71 | img_path = data['query_path'] 72 | print('process image... %s' % img_path) 73 | visualizer.save_images(webpage, visuals, img_path) 74 | 75 | webpage.save() 76 | -------------------------------------------------------------------------------- /shape_generation/util/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/shape_generation/util/__init__.py -------------------------------------------------------------------------------- /shape_generation/util/__pycache__/__init__.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/shape_generation/util/__pycache__/__init__.cpython-38.pyc -------------------------------------------------------------------------------- /shape_generation/util/__pycache__/html.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/shape_generation/util/__pycache__/html.cpython-38.pyc -------------------------------------------------------------------------------- /shape_generation/util/__pycache__/image_pool.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/shape_generation/util/__pycache__/image_pool.cpython-38.pyc -------------------------------------------------------------------------------- /shape_generation/util/__pycache__/util.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/shape_generation/util/__pycache__/util.cpython-36.pyc -------------------------------------------------------------------------------- /shape_generation/util/__pycache__/util.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/shape_generation/util/__pycache__/util.cpython-38.pyc -------------------------------------------------------------------------------- /shape_generation/util/__pycache__/visualizer.cpython-38.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/trinanjan12/Image-Based-Virtual-Try-on-Network-from-Unpaired-Data/a31bdf2453b6c398374fa6763fc18634f0cc2724/shape_generation/util/__pycache__/visualizer.cpython-38.pyc -------------------------------------------------------------------------------- /shape_generation/util/html.py: -------------------------------------------------------------------------------- 1 | import dominate 2 | from dominate.tags import * 3 | import os 4 | 5 | 6 | class HTML: 7 | def __init__(self, web_dir, title, refresh=0): 8 | self.title = title 9 | self.web_dir = web_dir 10 | self.img_dir = os.path.join(self.web_dir, 'images') 11 | if not os.path.exists(self.web_dir): 12 | os.makedirs(self.web_dir) 13 | if not os.path.exists(self.img_dir): 14 | os.makedirs(self.img_dir) 15 | 16 | self.doc = dominate.document(title=title) 17 | if refresh > 0: 18 | with self.doc.head: 19 | meta(http_equiv="refresh", content=str(refresh)) 20 | 21 | def get_image_dir(self): 22 | return self.img_dir 23 | 24 | def add_header(self, str): 25 | with self.doc: 26 | h3(str) 27 | 28 | def add_table(self, border=1): 29 | self.t = table(border=border, style="table-layout: fixed;") 30 | self.doc.add(self.t) 31 | 32 | def add_images(self, ims, txts, links, width=512): 33 | self.add_table() 34 | with self.t: 35 | with tr(): 36 | for im, txt, link in zip(ims, txts, links): 37 | with td(style="word-wrap: break-word;", halign="center", valign="top"): 38 | with p(): 39 | with a(href=os.path.join('images', link)): 40 | img(style="width:%dpx" % (width), src=os.path.join('images', im)) 41 | br() 42 | p(txt) 43 | 44 | def save(self): 45 | html_file = '%s/index.html' % self.web_dir 46 | f = open(html_file, 'wt') 47 | f.write(self.doc.render()) 48 | f.close() 49 | 50 | 51 | if __name__ == '__main__': 52 | html = HTML('web/', 'test_html') 53 | html.add_header('hello world') 54 | 55 | ims = [] 56 | txts = [] 57 | links = [] 58 | for n in range(4): 59 | ims.append('image_%d.jpg' % n) 60 | txts.append('text_%d' % n) 61 | links.append('image_%d.jpg' % n) 62 | html.add_images(ims, txts, links) 63 | html.save() 64 | -------------------------------------------------------------------------------- /shape_generation/util/image_pool.py: -------------------------------------------------------------------------------- 1 | import random 2 | import torch 3 | from torch.autograd import Variable 4 | class ImagePool(): 5 | def __init__(self, pool_size): 6 | self.pool_size = pool_size 7 | if self.pool_size > 0: 8 | self.num_imgs = 0 9 | self.images = [] 10 | 11 | def query(self, images): 12 | if self.pool_size == 0: 13 | return images 14 | return_images = [] 15 | for image in images.data: 16 | image = torch.unsqueeze(image, 0) 17 | if self.num_imgs < self.pool_size: 18 | self.num_imgs = self.num_imgs + 1 19 | self.images.append(image) 20 | return_images.append(image) 21 | else: 22 | p = random.uniform(0, 1) 23 | if p > 0.5: 24 | random_id = random.randint(0, self.pool_size-1) 25 | tmp = self.images[random_id].clone() 26 | self.images[random_id] = image 27 | return_images.append(tmp) 28 | else: 29 | return_images.append(image) 30 | return_images = Variable(torch.cat(return_images, 0)) 31 | return return_images 32 | -------------------------------------------------------------------------------- /shape_generation/util/util.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import torch 3 | import numpy as np 4 | from PIL import Image 5 | import numpy as np 6 | import os 7 | import cv2 8 | 9 | # Converts a Tensor into a Numpy array 10 | # |imtype|: the desired type of the converted numpy array 11 | def tensor2im(image_tensor, imtype=np.uint8, normalize=True): 12 | if isinstance(image_tensor, list): 13 | image_numpy = [] 14 | for i in range(len(image_tensor)): 15 | image_numpy.append(tensor2im(image_tensor[i], imtype, normalize)) 16 | return image_numpy 17 | image_numpy = image_tensor.cpu().float().numpy() 18 | if normalize: 19 | image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 20 | else: 21 | image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0 22 | image_numpy = np.clip(image_numpy, 0, 255) 23 | if image_numpy.shape[2] == 1 or image_numpy.shape[2] > 3: 24 | image_numpy = image_numpy[:,:,0] 25 | return image_numpy.astype(imtype) 26 | 27 | # Converts a one-hot tensor into a colorful label map 28 | def tensor2label(label_tensor, n_label, imtype=np.uint8): 29 | if n_label == 0: 30 | return tensor2im(label_tensor, imtype) 31 | label_tensor = label_tensor.float() 32 | if label_tensor.size()[0] > 1: 33 | label_tensor = label_tensor.max(0, keepdim=True)[1] 34 | label_tensor = Colorize(n_label)(label_tensor) 35 | label_numpy = np.transpose(label_tensor.numpy(), (1, 2, 0)) 36 | return label_numpy.astype(imtype) 37 | 38 | def tensor2edgemap(label_tensor, imtype=np.uint8): 39 | edgemap = torch.argmax(label_tensor,dim=0,keepdim=True) 40 | edgemap = edgemap.squeeze(0) 41 | edgemap = edgemap.cpu().float().numpy() 42 | return edgemap.astype(imtype) 43 | 44 | 45 | def save_image(image_numpy, image_path): 46 | image_pil = Image.fromarray(image_numpy) 47 | image_pil.save(image_path) 48 | 49 | def mkdirs(paths): 50 | if isinstance(paths, list) and not isinstance(paths, str): 51 | for path in paths: 52 | mkdir(path) 53 | else: 54 | mkdir(paths) 55 | 56 | def mkdir(path): 57 | if not os.path.exists(path): 58 | os.makedirs(path) 59 | 60 | ############################################################################### 61 | # Code from 62 | # https://github.com/ycszen/pytorch-seg/blob/master/transform.py 63 | # Modified so it complies with the Citscape label map colors 64 | ############################################################################### 65 | def uint82bin(n, count=8): 66 | """returns the binary of integer n, count refers to amount of bits""" 67 | return ''.join([str((n >> y) & 1) for y in range(count-1, -1, -1)]) 68 | 69 | def labelcolormap(N): 70 | if N == 20: # cityscape 71 | label_colours = [(0,0,0) 72 | # 0=Background 73 | ,(128,0,0),(255,0,0),(0,85,0),(170,0,51),(255,85,0) 74 | # 1=Hat, 2=Hair, 3=Glove, 4=Sunglasses, 5=UpperClothes 75 | ,(0,0,85),(0,119,221),(85,85,0),(0,85,85),(85,51,0) 76 | # 6=Dress, 7=Coat, 8=Socks, 9=Pants, 10=Jumpsuits 77 | ,(52,86,128),(0,128,0),(0,0,255),(51,170,221),(0,255,255) 78 | # 11=Scarf, 12=Skirt, 13=Face, 14=LeftArm, 15=RightArm 79 | ,(85,255,170),(170,255,85),(255,255,0),(255,170,0)] 80 | # 16=LeftLeg, 17=RightLeg, 18=LeftShoe, 19=RightShoe 81 | cmap = np.array(label_colours,dtype=np.uint8) 82 | else: 83 | cmap = np.zeros((N, 3), dtype=np.uint8) 84 | for i in range(N): 85 | r, g, b = 0, 0, 0 86 | id = i 87 | for j in range(7): 88 | str_id = uint82bin(id) 89 | r = r ^ (np.uint8(str_id[-1]) << (7-j)) 90 | g = g ^ (np.uint8(str_id[-2]) << (7-j)) 91 | b = b ^ (np.uint8(str_id[-3]) << (7-j)) 92 | id = id >> 3 93 | cmap[i, 0] = r 94 | cmap[i, 1] = g 95 | cmap[i, 2] = b 96 | return cmap 97 | 98 | class Colorize(object): 99 | def __init__(self, n=20): 100 | self.cmap = labelcolormap(n) 101 | self.cmap = torch.from_numpy(self.cmap[:n]) 102 | 103 | def __call__(self, gray_image): 104 | size = gray_image.size() 105 | color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0) 106 | 107 | for label in range(0, len(self.cmap)): 108 | mask = (label == gray_image[0]).cpu() 109 | color_image[0][mask] = self.cmap[label][0] 110 | color_image[1][mask] = self.cmap[label][1] 111 | color_image[2][mask] = self.cmap[label][2] 112 | 113 | return color_image 114 | -------------------------------------------------------------------------------- /shape_generation/util/visualizer.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import ntpath 4 | import time 5 | from . import util 6 | from . import html 7 | # import scipy.misc 8 | from PIL import Image 9 | try: 10 | from StringIO import StringIO # Python 2.7 11 | except ImportError: 12 | from io import BytesIO # Python 3.x 13 | 14 | class Visualizer(): 15 | def __init__(self, opt): 16 | # self.opt = opt 17 | self.tf_log = opt.tf_log 18 | self.use_html = opt.isTrain and not opt.no_html 19 | self.win_size = opt.display_winsize 20 | self.name = opt.name 21 | if self.tf_log: 22 | import tensorflow as tf 23 | self.tf = tf 24 | self.log_dir = os.path.join(opt.checkpoints_dir, opt.name, 'logs') 25 | #self.writer = tf.summary.FileWriter(self.log_dir) 26 | self.writer = tf.summary.create_file_writer(self.log_dir) 27 | 28 | if self.use_html: 29 | self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web') 30 | self.img_dir = os.path.join(self.web_dir, 'images') 31 | print('create web directory %s...' % self.web_dir) 32 | util.mkdirs([self.web_dir, self.img_dir]) 33 | self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt') 34 | with open(self.log_name, "a") as log_file: 35 | now = time.strftime("%c") 36 | log_file.write('================ Training Loss (%s) ================\n' % now) 37 | 38 | # |visuals|: dictionary of images to display or save 39 | def display_current_results(self, visuals, epoch, step): 40 | # if self.tf_log: # show images in tensorboard output 41 | # img_summaries = [] 42 | # for label, image_numpy in visuals.items(): 43 | # # Write the image to a string 44 | # try: 45 | # s = StringIO() 46 | # except: 47 | # s = BytesIO() 48 | # Image.fromarray(image_numpy).save(s, format="jpeg") 49 | # # Create an Image object 50 | # img_sum = self.tf.summary.image(encoded_image_string=s.getvalue(), height=image_numpy.shape[0], width=image_numpy.shape[1]) 51 | # # Create a Summary value 52 | # img_summaries.append(self.tf.summary.scaler(tag=label, image=img_sum)) 53 | 54 | # # Create and write Summary 55 | # summary = self.tf.summary(value=img_summaries) 56 | # self.writer.add_summary(summary, step) 57 | 58 | if self.use_html: # save images to a html file 59 | for label, image_numpy in visuals.items(): 60 | if isinstance(image_numpy, list): 61 | for i in range(len(image_numpy)): 62 | img_path = os.path.join(self.img_dir, 'epoch%.3d_%s_%d.jpg' % (epoch, label, i)) 63 | util.save_image(image_numpy[i], img_path) 64 | else: 65 | img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.jpg' % (epoch, label)) 66 | util.save_image(image_numpy, img_path) 67 | 68 | # update website 69 | webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=30) 70 | for n in range(epoch, 0, -1): 71 | webpage.add_header('epoch [%d]' % n) 72 | ims = [] 73 | txts = [] 74 | links = [] 75 | 76 | for label, image_numpy in visuals.items(): 77 | if isinstance(image_numpy, list): 78 | for i in range(len(image_numpy)): 79 | img_path = 'epoch%.3d_%s_%d.jpg' % (n, label, i) 80 | ims.append(img_path) 81 | txts.append(label+str(i)) 82 | links.append(img_path) 83 | else: 84 | img_path = 'epoch%.3d_%s.jpg' % (n, label) 85 | ims.append(img_path) 86 | txts.append(label) 87 | links.append(img_path) 88 | if len(ims) < 10: 89 | webpage.add_images(ims, txts, links, width=self.win_size) 90 | else: 91 | num = int(round(len(ims)/2.0)) 92 | webpage.add_images(ims[:num], txts[:num], links[:num], width=self.win_size) 93 | webpage.add_images(ims[num:], txts[num:], links[num:], width=self.win_size) 94 | webpage.save() 95 | 96 | # errors: dictionary of error labels and values 97 | def plot_current_errors(self, errors, step): 98 | if self.tf_log: 99 | with self.writer.as_default(): 100 | for tag, value in errors.items(): 101 | self.tf.summary.scalar(tag, value, step=step) 102 | self.writer.flush() 103 | # summary = self.tf.summary(value=[self.tf.summary.Value(tag=tag, simple_value=value)]) 104 | # self.writer.add_summary(summary, step) 105 | 106 | # errors: same format as |errors| of plotCurrentErrors 107 | def print_current_errors(self, epoch, i, errors, t): 108 | message = '(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t) 109 | for k, v in errors.items(): 110 | if v != 0: 111 | message += '%s: %.3f ' % (k, v) 112 | 113 | print(message) 114 | with open(self.log_name, "a") as log_file: 115 | log_file.write('%s\n' % message) 116 | 117 | # save image to the disk 118 | def save_images(self, webpage, visuals, image_path): 119 | image_dir = webpage.get_image_dir() 120 | short_path = ntpath.basename(image_path[0]) 121 | name = os.path.splitext(short_path)[0] 122 | 123 | webpage.add_header(name) 124 | ims = [] 125 | txts = [] 126 | links = [] 127 | 128 | for label, image_numpy in visuals.items(): 129 | image_name = '%s_%s.png' % (name, label) 130 | save_path = os.path.join(image_dir, image_name) 131 | util.save_image(image_numpy, save_path) 132 | 133 | ims.append(image_name) 134 | txts.append(label) 135 | links.append(image_name) 136 | webpage.add_images(ims, txts, links, width=self.win_size) 137 | --------------------------------------------------------------------------------