├── src ├── model │ ├── init │ ├── atten.py │ ├── partialconv.py │ ├── patchtransmit.py │ ├── model.py │ ├── loss.py │ └── networks.py ├── metrics.py ├── config.py ├── dataset.py ├── PartPainting.py └── utils.py ├── test.py ├── train.py ├── LICENSE ├── checkpoints ├── cat │ └── config.yml ├── cub │ └── config.yml ├── paris │ └── config.yml ├── flowers │ └── config.yml ├── places2 │ └── config.yml ├── celeba-hq │ └── config.yml ├── cityscapes │ └── config.yml └── places2_road │ └── config.yml ├── main.py ├── README.md ├── datasets ├── places2 │ └── val.flist ├── places2_road │ └── val.flist ├── cat │ └── test.flist └── cub │ └── test.flist ├── scores ├── metrics.py ├── inception.py └── fid_score.py └── requirements.txt /src/model/init: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | from main import main 2 | main(mode=2) -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | from main import main 2 | main(mode=1) -------------------------------------------------------------------------------- /src/metrics.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | class PSNR(nn.Module): 5 | def __init__(self, max_val): 6 | super(PSNR, self).__init__() 7 | 8 | base10 = torch.log(torch.tensor(10.0)) 9 | max_val = torch.tensor(max_val).float() 10 | 11 | self.register_buffer('base10', base10) 12 | self.register_buffer('max_val', 20 * torch.log(max_val) / base10) 13 | 14 | def __call__(self, a, b): 15 | mse = torch.mean((a.float() - b.float()) ** 2) 16 | 17 | if mse == 0: 18 | return torch.tensor(0) 19 | 20 | return self.max_val - 10 * torch.log(mse) / self.base10 -------------------------------------------------------------------------------- /src/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import yaml 3 | 4 | class Config(dict): 5 | 6 | def __init__(self, config_path): 7 | with open(config_path, 'r') as f: 8 | self._yaml = f.read() 9 | self._dict = yaml.load(self._yaml, Loader=yaml.FullLoader) 10 | self._dict['PATH'] = os.path.dirname(config_path) 11 | 12 | def __getattr__(self, name): 13 | if self._dict.get(name) is not None: 14 | return self._dict[name] 15 | 16 | return None 17 | 18 | def print(self): 19 | print('Model configurations:') 20 | print('---------------------------------') 21 | print(self._yaml) 22 | print('\n---------------------------------') 23 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 VISION @ OUC 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /checkpoints/cat/config.yml: -------------------------------------------------------------------------------- 1 | MODE: 1 # 1: train, 2: test, 3: eval 2 | SEED: 10 # random seed 3 | GPU: [0] # list of gpu ids 4 | MODEL: 1 # 1: PartPainting 5 | DATATYPE: 1 # 1: objects, 2:scenes 6 | 7 | TRAIN_FLIST: ./datasets/cat/train.flist 8 | VAL_FLIST: ./datasets/cat/test.flist 9 | TEST_MASK: ./datasets/cat/mask.flist 10 | 11 | LR: 0.0002 # learning rate 12 | G_LR: 0.0001 13 | D_LR: 0.0004 14 | BETA1: 0.0 # adam optimizer beta1 15 | BETA2: 0.9 # adam optimizer beta2 16 | BATCH_SIZE: 8 # input batch size for training 17 | INPUT_SIZE: 256 # input image size for training 0 for original size 18 | MAX_ITERS: 500000 # maximum number of iterations to train the model 19 | CENTER: 0 20 | CATMASK: True # True or False 21 | MEAN: [0.5,0.5,0.5] # mean value 22 | STD: [0.5,0.5,0.5] # standard deviation 23 | COARSE_ITE: 0 24 | 25 | 26 | G1_ADV_LOSS_WEIGHT: 0.1 27 | G1_CONTENT_LOSS_WEIGHT: 1 28 | G1_COLOR_LOSS_WEIGHT: 10 29 | 30 | G2_ADV_LOSS_WEIGHT: 0.1 31 | G2_L1_LOSS_WEIGHT: 8 32 | G2_STYLE_LOSS_WEIGHT: 250 33 | G2_COLOR_LOSS_WEIGHT: 10 34 | G2_MRF_LOSS_WEIGHT: 0.05 35 | 36 | KLD_LOSS_WEIGHT: 0.001 37 | 38 | INTERVAL: 200 39 | SAMPLE_SIZE: 8 # number of images to sample 40 | 41 | SAVE_INTERAL: 50000 42 | -------------------------------------------------------------------------------- /checkpoints/cub/config.yml: -------------------------------------------------------------------------------- 1 | MODE: 1 # 1: train, 2: test, 3: eval 2 | SEED: 10 # random seed 3 | GPU: [0] # list of gpu ids 4 | MODEL: 1 # 1: PartPainting 5 | DATATYPE: 1 # 1: objects, 2:scenes 6 | 7 | TRAIN_FLIST: ./datasets/cub/train.flist 8 | VAL_FLIST: ./datasets/cub/test.flist 9 | TEST_MASK: ./datasets/cub/mask.flist 10 | 11 | LR: 0.0002 # learning rate 12 | G_LR: 0.0001 13 | D_LR: 0.0004 14 | BETA1: 0.0 # adam optimizer beta1 15 | BETA2: 0.9 # adam optimizer beta2 16 | BATCH_SIZE: 8 # input batch size for training 17 | INPUT_SIZE: 256 # input image size for training 0 for original size 18 | MAX_ITERS: 500000 # maximum number of iterations to train the model 19 | CENTER: 0 20 | CATMASK: True # True or False 21 | MEAN: [0.5,0.5,0.5] # mean value 22 | STD: [0.5,0.5,0.5] # standard deviation 23 | COARSE_ITE: 0 24 | 25 | 26 | G1_ADV_LOSS_WEIGHT: 0.1 27 | G1_CONTENT_LOSS_WEIGHT: 1 28 | G1_COLOR_LOSS_WEIGHT: 10 29 | 30 | G2_ADV_LOSS_WEIGHT: 0.1 31 | G2_L1_LOSS_WEIGHT: 8 32 | G2_STYLE_LOSS_WEIGHT: 250 33 | G2_COLOR_LOSS_WEIGHT: 10 34 | G2_MRF_LOSS_WEIGHT: 0.05 35 | 36 | KLD_LOSS_WEIGHT: 0.001 37 | 38 | INTERVAL: 200 39 | SAMPLE_SIZE: 8 # number of images to sample 40 | 41 | SAVE_INTERAL: 50000 42 | -------------------------------------------------------------------------------- /checkpoints/paris/config.yml: -------------------------------------------------------------------------------- 1 | MODE: 1 # 1: train, 2: test, 3: eval 2 | SEED: 10 # random seed 3 | GPU: [0] # list of gpu ids 4 | MODEL: 1 # 1: PartPainting 5 | DATATYPE: 2 # 1: objects, 2:scenes 6 | 7 | TRAIN_FLIST: ./datasets/paris/train.flist 8 | VAL_FLIST: ./datasets/paris/test.flist 9 | TEST_MASK: ./datasets/paris/mask.flist 10 | 11 | LR: 0.0002 # learning rate 12 | G_LR: 0.0001 13 | D_LR: 0.0004 14 | BETA1: 0.0 # adam optimizer beta1 15 | BETA2: 0.9 # adam optimizer beta2 16 | BATCH_SIZE: 8 # input batch size for training 17 | INPUT_SIZE: 256 # input image size for training 0 for original size 18 | MAX_ITERS: 500000 # maximum number of iterations to train the model 19 | CENTER: 0 20 | CATMASK: True # True or False 21 | MEAN: [0.5,0.5,0.5] # mean value 22 | STD: [0.5,0.5,0.5] # standard deviation 23 | COARSE_ITE: 0 24 | 25 | 26 | G1_ADV_LOSS_WEIGHT: 0.1 27 | G1_CONTENT_LOSS_WEIGHT: 1 28 | G1_COLOR_LOSS_WEIGHT: 10 29 | 30 | G2_ADV_LOSS_WEIGHT: 0.1 31 | G2_L1_LOSS_WEIGHT: 8 32 | G2_STYLE_LOSS_WEIGHT: 250 33 | G2_COLOR_LOSS_WEIGHT: 10 34 | G2_MRF_LOSS_WEIGHT: 0.05 35 | 36 | KLD_LOSS_WEIGHT: 0.001 37 | 38 | INTERVAL: 200 39 | SAMPLE_SIZE: 8 # number of images to sample 40 | 41 | SAVE_INTERAL: 50000 42 | -------------------------------------------------------------------------------- /checkpoints/flowers/config.yml: -------------------------------------------------------------------------------- 1 | MODE: 1 # 1: train, 2: test, 3: eval 2 | SEED: 10 # random seed 3 | GPU: [0] # list of gpu ids 4 | MODEL: 1 # 1: PartPainting 5 | DATATYPE: 1 # 1: objects, 2:scenes 6 | 7 | TRAIN_FLIST: ./datasets/flowers/train.flist 8 | VAL_FLIST: ./datasets/flowers/test.flist 9 | TEST_MASK: ./datasets/flowers/mask.flist 10 | 11 | LR: 0.0002 # learning rate 12 | G_LR: 0.0001 13 | D_LR: 0.0004 14 | BETA1: 0.0 # adam optimizer beta1 15 | BETA2: 0.9 # adam optimizer beta2 16 | BATCH_SIZE: 8 # input batch size for training 17 | INPUT_SIZE: 256 # input image size for training 0 for original size 18 | MAX_ITERS: 500000 # maximum number of iterations to train the model 19 | CENTER: 0 20 | CATMASK: True # True or False 21 | MEAN: [0.5,0.5,0.5] # mean value 22 | STD: [0.5,0.5,0.5] # standard deviation 23 | COARSE_ITE: 0 24 | 25 | 26 | G1_ADV_LOSS_WEIGHT: 0.1 27 | G1_CONTENT_LOSS_WEIGHT: 1 28 | G1_COLOR_LOSS_WEIGHT: 10 29 | 30 | G2_ADV_LOSS_WEIGHT: 0.1 31 | G2_L1_LOSS_WEIGHT: 8 32 | G2_STYLE_LOSS_WEIGHT: 250 33 | G2_COLOR_LOSS_WEIGHT: 10 34 | G2_MRF_LOSS_WEIGHT: 0.05 35 | 36 | KLD_LOSS_WEIGHT: 0.001 37 | 38 | INTERVAL: 200 39 | SAMPLE_SIZE: 8 # number of images to sample 40 | 41 | SAVE_INTERAL: 50000 42 | -------------------------------------------------------------------------------- /checkpoints/places2/config.yml: -------------------------------------------------------------------------------- 1 | MODE: 1 # 1: train, 2: test, 3: eval 2 | SEED: 10 # random seed 3 | GPU: [0] # list of gpu ids 4 | MODEL: 1 # 1: PartPainting 5 | DATATYPE: 2 # 1: objects, 2:scenes 6 | 7 | TRAIN_FLIST: ./datasets/places2/train.flist 8 | VAL_FLIST: ./datasets/places2/test.flist 9 | TEST_MASK: ./datasets/places2/mask.flist 10 | 11 | LR: 0.0002 # learning rate 12 | G_LR: 0.0001 13 | D_LR: 0.0004 14 | BETA1: 0.0 # adam optimizer beta1 15 | BETA2: 0.9 # adam optimizer beta2 16 | BATCH_SIZE: 8 # input batch size for training 17 | INPUT_SIZE: 256 # input image size for training 0 for original size 18 | MAX_ITERS: 500000 # maximum number of iterations to train the model 19 | CENTER: 0 20 | CATMASK: True # True or False 21 | MEAN: [0.5,0.5,0.5] # mean value 22 | STD: [0.5,0.5,0.5] # standard deviation 23 | COARSE_ITE: 0 24 | 25 | 26 | G1_ADV_LOSS_WEIGHT: 0.1 27 | G1_CONTENT_LOSS_WEIGHT: 1 28 | G1_COLOR_LOSS_WEIGHT: 10 29 | 30 | G2_ADV_LOSS_WEIGHT: 0.1 31 | G2_L1_LOSS_WEIGHT: 8 32 | G2_STYLE_LOSS_WEIGHT: 250 33 | G2_COLOR_LOSS_WEIGHT: 10 34 | G2_MRF_LOSS_WEIGHT: 0.05 35 | 36 | KLD_LOSS_WEIGHT: 0.001 37 | 38 | INTERVAL: 200 39 | SAMPLE_SIZE: 8 # number of images to sample 40 | 41 | SAVE_INTERAL: 50000 42 | -------------------------------------------------------------------------------- /checkpoints/celeba-hq/config.yml: -------------------------------------------------------------------------------- 1 | MODE: 1 # 1: train, 2: test, 3: eval 2 | SEED: 10 # random seed 3 | GPU: [0] # list of gpu ids 4 | MODEL: 1 # 1: PartPainting 5 | DATATYPE: 1 # 1: objects, 2:scenes 6 | 7 | TRAIN_FLIST: ./datasets/celeba-hq/train.flist 8 | VAL_FLIST: ./datasets/celeba-hq/test.flist 9 | TEST_MASK: ./datasets/celeba-hq/mask.flist 10 | 11 | LR: 0.0002 # learning rate 12 | G_LR: 0.0001 13 | D_LR: 0.0004 14 | BETA1: 0.0 # adam optimizer beta1 15 | BETA2: 0.9 # adam optimizer beta2 16 | BATCH_SIZE: 8 # input batch size for training 17 | INPUT_SIZE: 256 # input image size for training 0 for original size 18 | MAX_ITERS: 500000 # maximum number of iterations to train the model 19 | CENTER: 0 20 | CATMASK: True # True or False 21 | MEAN: [0.5,0.5,0.5] # mean value 22 | STD: [0.5,0.5,0.5] # standard deviation 23 | COARSE_ITE: 0 24 | 25 | 26 | G1_ADV_LOSS_WEIGHT: 0.1 27 | G1_CONTENT_LOSS_WEIGHT: 1 28 | G1_COLOR_LOSS_WEIGHT: 10 29 | 30 | G2_ADV_LOSS_WEIGHT: 0.1 31 | G2_L1_LOSS_WEIGHT: 8 32 | G2_STYLE_LOSS_WEIGHT: 250 33 | G2_COLOR_LOSS_WEIGHT: 10 34 | G2_MRF_LOSS_WEIGHT: 0.05 35 | 36 | KLD_LOSS_WEIGHT: 0.001 37 | 38 | INTERVAL: 200 39 | SAMPLE_SIZE: 8 # number of images to sample 40 | 41 | SAVE_INTERAL: 50000 42 | -------------------------------------------------------------------------------- /checkpoints/cityscapes/config.yml: -------------------------------------------------------------------------------- 1 | MODE: 1 # 1: train, 2: test, 3: eval 2 | SEED: 10 # random seed 3 | GPU: [0] # list of gpu ids 4 | MODEL: 1 # 1: PartPainting 5 | DATATYPE: 2 # 1: objects, 2:scenes 6 | 7 | TRAIN_FLIST: ./datasets/cityscapes/train.flist 8 | VAL_FLIST: ./datasets/cityscapes/test.flist 9 | TEST_MASK: ./datasets/cityscapes/mask.flist 10 | 11 | LR: 0.0002 # learning rate 12 | G_LR: 0.0001 13 | D_LR: 0.0004 14 | BETA1: 0.0 # adam optimizer beta1 15 | BETA2: 0.9 # adam optimizer beta2 16 | BATCH_SIZE: 8 # input batch size for training 17 | INPUT_SIZE: 256 # input image size for training 0 for original size 18 | MAX_ITERS: 500000 # maximum number of iterations to train the model 19 | CENTER: 0 20 | CATMASK: True # True or False 21 | MEAN: [0.5,0.5,0.5] # mean value 22 | STD: [0.5,0.5,0.5] # standard deviation 23 | COARSE_ITE: 0 24 | 25 | 26 | G1_ADV_LOSS_WEIGHT: 0.1 27 | G1_CONTENT_LOSS_WEIGHT: 1 28 | G1_COLOR_LOSS_WEIGHT: 10 29 | 30 | G2_ADV_LOSS_WEIGHT: 0.1 31 | G2_L1_LOSS_WEIGHT: 8 32 | G2_STYLE_LOSS_WEIGHT: 250 33 | G2_COLOR_LOSS_WEIGHT: 10 34 | G2_MRF_LOSS_WEIGHT: 0.05 35 | 36 | KLD_LOSS_WEIGHT: 0.001 37 | 38 | INTERVAL: 200 39 | SAMPLE_SIZE: 8 # number of images to sample 40 | 41 | SAVE_INTERAL: 50000 42 | -------------------------------------------------------------------------------- /checkpoints/places2_road/config.yml: -------------------------------------------------------------------------------- 1 | MODE: 1 # 1: train, 2: test, 3: eval 2 | SEED: 10 # random seed 3 | GPU: [0] # list of gpu ids 4 | MODEL: 1 # 1: PartPainting 5 | DATATYPE: 2 # 1: objects, 2:scenes 6 | 7 | TRAIN_FLIST: ./datasets/places2_road/train.flist 8 | VAL_FLIST: ./datasets/places2_road/test.flist 9 | TEST_MASK: ./datasets/places2_road/mask.flist 10 | 11 | LR: 0.0002 # learning rate 12 | G_LR: 0.0001 13 | D_LR: 0.0004 14 | BETA1: 0.0 # adam optimizer beta1 15 | BETA2: 0.9 # adam optimizer beta2 16 | BATCH_SIZE: 8 # input batch size for training 17 | INPUT_SIZE: 256 # input image size for training 0 for original size 18 | MAX_ITERS: 500000 # maximum number of iterations to train the model 19 | CENTER: 0 20 | CATMASK: True # True or False 21 | MEAN: [0.5,0.5,0.5] # mean value 22 | STD: [0.5,0.5,0.5] # standard deviation 23 | COARSE_ITE: 0 24 | 25 | 26 | G1_ADV_LOSS_WEIGHT: 0.1 27 | G1_CONTENT_LOSS_WEIGHT: 1 28 | G1_COLOR_LOSS_WEIGHT: 10 29 | 30 | G2_ADV_LOSS_WEIGHT: 0.1 31 | G2_L1_LOSS_WEIGHT: 8 32 | G2_STYLE_LOSS_WEIGHT: 250 33 | G2_COLOR_LOSS_WEIGHT: 10 34 | G2_MRF_LOSS_WEIGHT: 0.05 35 | 36 | KLD_LOSS_WEIGHT: 0.001 37 | 38 | INTERVAL: 200 39 | SAMPLE_SIZE: 8 # number of images to sample 40 | 41 | SAVE_INTERAL: 50000 42 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import cv2 3 | import random 4 | import numpy as np 5 | import torch 6 | import argparse 7 | from shutil import copyfile 8 | from src.config import Config 9 | from src.PartPainting import PartPainting 10 | 11 | def main(mode=None): 12 | 13 | config = load_config(mode) 14 | 15 | # CUDA e.g. 0,1,2,3 16 | os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(str(e) for e in config.GPU) 17 | 18 | # INIT GPU 19 | if torch.cuda.is_available(): 20 | config.DEVICE = torch.device("cuda") 21 | print('\nGPU IS AVAILABLE\n') 22 | torch.backends.cudnn.benchmark = True 23 | else: 24 | config.DEVICE = torch.device("cpu") 25 | 26 | cv2.setNumThreads(0) 27 | 28 | torch.manual_seed(config.SEED) 29 | torch.cuda.manual_seed(config.SEED) 30 | np.random.seed(config.SEED) 31 | random.seed(config.SEED) 32 | 33 | if config.MODEL == 1: 34 | model = PartPainting(config) 35 | model.load() 36 | 37 | if config.MODE == 1: 38 | print("Start Training...\n") 39 | model.train() 40 | if config.MODE == 2: 41 | print("Start Testing...\n") 42 | model.test() 43 | 44 | 45 | def load_config(mode=None): 46 | 47 | parser = argparse.ArgumentParser() 48 | parser.add_argument('--path', '--checkpoints', type=str, default='./checkpoints', help='model checkpoints path (default: ./checkpoints)') 49 | 50 | args = parser.parse_args() 51 | config_path = os.path.join(args.path, 'config.yml') 52 | 53 | # load config file 54 | config = Config(config_path) 55 | 56 | # train mode 57 | if mode == 1: 58 | config.MODE = 1 59 | 60 | # test mode 61 | elif mode == 2: 62 | config.MODE = 2 63 | # config.INPUT_SIZE = 256 64 | # config.VAL_FLIST = args.input 65 | # config.RESULTS = args.output 66 | 67 | return config 68 | 69 | 70 | if __name__ == "__main__": 71 | main() 72 | -------------------------------------------------------------------------------- /src/model/atten.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import functools 4 | from torch.autograd import Variable 5 | 6 | class Atten(nn.Module): 7 | def __init__(self, fin, fout): 8 | super(Atten, self).__init__() 9 | 10 | self.f_q = nn.Conv2d(fin, fout, kernel_size=1) 11 | self.f_k = nn.Conv2d(fin, fout, kernel_size=1) 12 | self.f_v = nn.Conv2d(fin, fin, kernel_size=1) 13 | 14 | self.fout= fout 15 | 16 | # self.max_pool = nn.MaxPool2d(2) 17 | self.gamma = nn.Parameter(torch.zeros(1)) 18 | self.softmax = nn.Softmax(dim=-1) 19 | 20 | def forward(self, f, mask): 21 | (b, c, h, w) = f.shape 22 | c_shrink = self.fout 23 | 24 | # f_in 25 | f_in = f 26 | 27 | # mask crop 28 | f = f.view(b, c, -1) 29 | mask_one_channel = mask.view(b, 1, -1)[0][0] 30 | 31 | index_outside = torch.nonzero(1 - mask_one_channel) 32 | index_inside = torch.nonzero(mask_one_channel) 33 | 34 | f_outside = f[:, :, index_outside] 35 | f_inside = f[:, :, index_inside] 36 | 37 | # outside => query 38 | f_q = self.f_q(f_outside).view(b, c_shrink, -1) 39 | 40 | # inside => key 41 | f_k = self.f_k(f_inside).view(b, c_shrink, -1) 42 | 43 | # inside => value 44 | f_v = self.f_v(f_inside).view(b, c, -1) 45 | 46 | # attention => query x key 47 | energy = torch.bmm(f_q.permute(0, 2, 1), f_k) 48 | att = self.softmax(energy) 49 | 50 | # reshape value => value x attention 51 | r_v = torch.bmm(f_v, att.permute(0, 2, 1)) 52 | 53 | # paste r_v to f 54 | f[:, :, index_outside] = r_v.unsqueeze(-1) 55 | 56 | # f (b, c, -1) => f (b, c, h, w) 57 | f_reshape = f.view(b, c, h, w) 58 | 59 | # f_out 60 | f_out = f_reshape * (1 - mask) * self.gamma + f_in 61 | 62 | return f_reshape, f_out 63 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Painting from Part 2 | 3 | This repository provides the official PyTorch implementation of our paper "Painting from Part". 4 | 5 | Our paper can be found in https://openaccess.thecvf.com/content/ICCV2021/papers/Guo_Painting_From_Part_ICCV_2021_paper.pdf. 6 | 7 | 8 | ## Prerequisites 9 | 10 | - Linux 11 | - Python 3.7 12 | - NVIDIA GPU + CUDA CuDNN 13 | 14 | ## Getting Started 15 | 16 | 17 | ### Installation 18 | 19 | - Clone this repo: 20 | ```bash 21 | git clone https://github.com/zhenglab/partpainting.git 22 | cd partpainting 23 | ``` 24 | 25 | - Install [PyTorch](http://pytorch.org) and 1.7 and other dependencies (e.g., torchvision). 26 | - For Conda users, you can create a new Conda environment using `conda create --name --file requirements.txt`. 27 | 28 | ### Training 29 | 30 | Please change the pathes to your dataset path in `datasets` folder. 31 | 32 | The code defaults to regular outpainting task, and you may change mask types for other purpose in `src/dataset.py` and `src/utils.py`. 33 | 34 | ``` 35 | python train.py --path=$configpath$ 36 | 37 | For example: python train.py --path=./checkpoints/celeba-hq/ 38 | ``` 39 | 40 | ### Testing 41 | 42 | The model is automatically saved every 50,000 iterations, please rename the file `g.pth_$iter_number$` to `g.pth` and then run testing command. 43 | ``` 44 | python test.py --path=$configpath$ 45 | 46 | For example: python test.py --path=./checkpoints/celeba-hq/ 47 | ``` 48 | 49 | ### Irregular Outpainting Mask 50 | 51 | Irregular outpainting mask can be obtained from [https://pan.baidu.com/s/1cyL5Cp0OddyNggZPmMvl8A?pwd=jk2m](https://pan.baidu.com/s/1cyL5Cp0OddyNggZPmMvl8A?pwd=jk2m). Extraction code: jk2m. 52 | 53 | ## Citing 54 | ``` 55 | @inproceedings{guo@painting, 56 | author = {Guo, Dongsheng and Zhao, Haoru and Cheng, Yunhao and Zheng, Haiyong and Gu, Zhaorui and Zheng, Bing}, 57 | title = {Painting from Part}, 58 | booktitle = {ICCV}, 59 | year = {2021} 60 | } 61 | 62 | ``` 63 | -------------------------------------------------------------------------------- /src/model/partialconv.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn.functional as F 3 | from torch import nn, cuda 4 | from torch.autograd import Variable 5 | 6 | class PartialConv2d(nn.Conv2d): 7 | def __init__(self, *args, **kwargs): 8 | 9 | # whether the mask is multi-channel or not 10 | self.multi_channel = False 11 | self.return_mask = True 12 | 13 | super(PartialConv2d, self).__init__(*args, **kwargs) 14 | 15 | if self.multi_channel: 16 | self.weight_maskUpdater = torch.ones(self.out_channels, self.in_channels, self.kernel_size[0], self.kernel_size[1]) 17 | else: 18 | self.weight_maskUpdater = torch.ones(1, 1, self.kernel_size[0], self.kernel_size[1]) 19 | 20 | self.slide_winsize = self.weight_maskUpdater.shape[1] * self.weight_maskUpdater.shape[2] * self.weight_maskUpdater.shape[3] 21 | 22 | self.last_size = (None, None, None, None) 23 | self.update_mask = None 24 | self.mask_ratio = None 25 | 26 | def forward(self, input, mask_in=None): 27 | assert len(input.shape) == 4 28 | if mask_in is not None or self.last_size != tuple(input.shape): 29 | self.last_size = tuple(input.shape) 30 | 31 | with torch.no_grad(): 32 | if self.weight_maskUpdater.type() != input.type(): 33 | self.weight_maskUpdater = self.weight_maskUpdater.to(input) 34 | 35 | if mask_in is None: 36 | # if mask is not provided, create a mask 37 | if self.multi_channel: 38 | mask = torch.ones(input.data.shape[0], input.data.shape[1], input.data.shape[2], input.data.shape[3]).to(input) 39 | else: 40 | mask = torch.ones(1, 1, input.data.shape[2], input.data.shape[3]).to(input) 41 | else: 42 | mask = mask_in 43 | 44 | self.update_mask = F.conv2d(mask, self.weight_maskUpdater, bias=None, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=1) 45 | 46 | 47 | # for mixed precision training, change 1e-8 to 1e-6 48 | self.mask_ratio = self.slide_winsize/(self.update_mask + 1e-8) 49 | 50 | # self.mask_ratio = torch.max(self.update_mask)/(self.update_mask + 1e-8) 51 | self.update_mask = torch.clamp(self.update_mask, 0, 1) 52 | self.mask_ratio = torch.mul(self.mask_ratio, self.update_mask) 53 | 54 | raw_out = super(PartialConv2d, self).forward(torch.mul(input, mask) if mask_in is not None else input) 55 | 56 | if self.bias is not None: 57 | bias_view = self.bias.view(1, self.out_channels, 1, 1) 58 | output = torch.mul(raw_out - bias_view, self.mask_ratio) + bias_view 59 | output = torch.mul(output, self.update_mask) 60 | else: 61 | output = torch.mul(raw_out, self.mask_ratio) 62 | 63 | 64 | if self.return_mask: 65 | return output, self.update_mask 66 | else: 67 | return output -------------------------------------------------------------------------------- /datasets/places2/val.flist: -------------------------------------------------------------------------------- 1 | ./valley/Places365_val_00000475.jpg 2 | ./valley/Places365_val_00001591.jpg 3 | ./valley/Places365_val_00001810.jpg 4 | ./valley/Places365_val_00003442.jpg 5 | ./valley/Places365_val_00003465.jpg 6 | ./valley/Places365_val_00003488.jpg 7 | ./valley/Places365_val_00003524.jpg 8 | ./valley/Places365_val_00003666.jpg 9 | ./valley/Places365_val_00003810.jpg 10 | ./valley/Places365_val_00004227.jpg 11 | ./valley/Places365_val_00004419.jpg 12 | ./valley/Places365_val_00004944.jpg 13 | ./valley/Places365_val_00005005.jpg 14 | ./valley/Places365_val_00005346.jpg 15 | ./valley/Places365_val_00005390.jpg 16 | ./valley/Places365_val_00005520.jpg 17 | ./valley/Places365_val_00005591.jpg 18 | ./valley/Places365_val_00005624.jpg 19 | ./valley/Places365_val_00005900.jpg 20 | ./valley/Places365_val_00006309.jpg 21 | ./valley/Places365_val_00006484.jpg 22 | ./valley/Places365_val_00006596.jpg 23 | ./valley/Places365_val_00007059.jpg 24 | ./valley/Places365_val_00007856.jpg 25 | ./valley/Places365_val_00008613.jpg 26 | ./valley/Places365_val_00008910.jpg 27 | ./valley/Places365_val_00009622.jpg 28 | ./valley/Places365_val_00009848.jpg 29 | ./valley/Places365_val_00010320.jpg 30 | ./valley/Places365_val_00010645.jpg 31 | ./valley/Places365_val_00010798.jpg 32 | ./valley/Places365_val_00011808.jpg 33 | ./valley/Places365_val_00011928.jpg 34 | ./valley/Places365_val_00012148.jpg 35 | ./valley/Places365_val_00013041.jpg 36 | ./valley/Places365_val_00013783.jpg 37 | ./valley/Places365_val_00014231.jpg 38 | ./valley/Places365_val_00014353.jpg 39 | ./valley/Places365_val_00014414.jpg 40 | ./valley/Places365_val_00014899.jpg 41 | ./valley/Places365_val_00015205.jpg 42 | ./valley/Places365_val_00015431.jpg 43 | ./valley/Places365_val_00016523.jpg 44 | ./valley/Places365_val_00016717.jpg 45 | ./valley/Places365_val_00016918.jpg 46 | ./valley/Places365_val_00016936.jpg 47 | ./valley/Places365_val_00019186.jpg 48 | ./valley/Places365_val_00019306.jpg 49 | ./valley/Places365_val_00019409.jpg 50 | ./valley/Places365_val_00019476.jpg 51 | ./valley/Places365_val_00019526.jpg 52 | ./valley/Places365_val_00019868.jpg 53 | ./valley/Places365_val_00019895.jpg 54 | ./valley/Places365_val_00020088.jpg 55 | ./valley/Places365_val_00020095.jpg 56 | ./valley/Places365_val_00020254.jpg 57 | ./valley/Places365_val_00020447.jpg 58 | ./valley/Places365_val_00020956.jpg 59 | ./valley/Places365_val_00021269.jpg 60 | ./valley/Places365_val_00022454.jpg 61 | ./valley/Places365_val_00022455.jpg 62 | ./valley/Places365_val_00023192.jpg 63 | ./valley/Places365_val_00023893.jpg 64 | ./valley/Places365_val_00025964.jpg 65 | ./valley/Places365_val_00026542.jpg 66 | ./valley/Places365_val_00026718.jpg 67 | ./valley/Places365_val_00026944.jpg 68 | ./valley/Places365_val_00027261.jpg 69 | ./valley/Places365_val_00027342.jpg 70 | ./valley/Places365_val_00027649.jpg 71 | ./valley/Places365_val_00027894.jpg 72 | ./valley/Places365_val_00027949.jpg 73 | ./valley/Places365_val_00028091.jpg 74 | ./valley/Places365_val_00028179.jpg 75 | ./valley/Places365_val_00028249.jpg 76 | ./valley/Places365_val_00028354.jpg 77 | ./valley/Places365_val_00028846.jpg 78 | ./valley/Places365_val_00029821.jpg 79 | ./valley/Places365_val_00029974.jpg 80 | ./valley/Places365_val_00030314.jpg 81 | ./valley/Places365_val_00030415.jpg 82 | ./valley/Places365_val_00030456.jpg 83 | ./valley/Places365_val_00030559.jpg 84 | ./valley/Places365_val_00030687.jpg 85 | ./valley/Places365_val_00030713.jpg 86 | ./valley/Places365_val_00030788.jpg 87 | ./valley/Places365_val_00030849.jpg 88 | ./valley/Places365_val_00031023.jpg 89 | ./valley/Places365_val_00031167.jpg 90 | ./valley/Places365_val_00031211.jpg 91 | ./valley/Places365_val_00031562.jpg 92 | ./valley/Places365_val_00031656.jpg 93 | ./valley/Places365_val_00031961.jpg 94 | ./valley/Places365_val_00032645.jpg 95 | ./valley/Places365_val_00033676.jpg 96 | ./valley/Places365_val_00033706.jpg 97 | ./valley/Places365_val_00033939.jpg 98 | ./valley/Places365_val_00034757.jpg 99 | ./valley/Places365_val_00035422.jpg 100 | ./valley/Places365_val_00035762.jpg -------------------------------------------------------------------------------- /src/model/patchtransmit.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import functools 4 | from torch.autograd import Variable 5 | 6 | class PatchTransmit(nn.Module): 7 | def __init__(self, fin): 8 | super(PatchTransmit, self).__init__() 9 | 10 | nhidden = 128 11 | # self.max_pool = nn.MaxPool2d(2) 12 | # self.gamma = nn.Parameter(torch.zeros(1)) 13 | self.softmax = nn.Softmax(dim=-1) 14 | self.conv_w1 = nn.Conv2d(fin, fin, 1, padding=0) 15 | self.conv_w2 = nn.Conv2d(fin, fin, 1, padding=0) 16 | # self.norm = nn.InstanceNorm2d(fin) 17 | self.relu = nn.LeakyReLU(0.2, False) 18 | # self.relu = nn.ReLU() 19 | # self.mlp_shared = nn.Sequential( 20 | # nn.Conv2d(fin, nhidden, kernel_size=1, padding=0), 21 | # nn.ReLU() 22 | # ) 23 | # self.mlp_gamma = nn.Conv2d(nhidden, fin, kernel_size=1, padding=0) 24 | # self.mlp_beta = nn.Conv2d(nhidden, fin, kernel_size=1, padding=0) 25 | 26 | # self.down = nn.Conv2d(fin, fin//16, 1, padding=0) 27 | self.down = nn.Upsample(scale_factor=0.5) 28 | self.up = nn.Upsample(scale_factor=2) 29 | 30 | def forward(self, f, mask): 31 | # f_in 32 | f_in = f 33 | mask_in = mask 34 | 35 | # mask crop 36 | f = self.down(f) 37 | mask = self.down(mask) 38 | 39 | (b, c, h, w) = f.shape 40 | 41 | f = f.view(b, c, -1) 42 | mask_one_channel = mask.view(b, 1, -1)[0][0] 43 | 44 | index_outside = torch.nonzero(1 - mask_one_channel) 45 | index_inside = torch.nonzero(mask_one_channel) 46 | 47 | # f_outside = f[:, :, index_outside] # 4, 128, 12288, 1 48 | # f_inside = f[:, :, index_inside] # 4, 128, 4096, 1 49 | 50 | # f_o = f_outside.expand(f_ous) 51 | 52 | o_re = index_outside.shape[0] 53 | i_re = index_inside.shape[0] 54 | # print(o_re, i_re) 55 | f_outside = f[:, :, index_outside] # 4, 128, 12288, 1 56 | f_inside = f[:, :, index_inside] # 4, 128, 4096, 1 57 | # f_outside = torch.cat([f_outside]*i_re, dim = 3) 58 | # f_inside = torch.cat([f_inside]*o_re, dim = 3) 59 | 60 | # 28 for test 61 | # f_o = f_outside.expand(b, c, o_re, i_re) 62 | # f_i = f_inside.expand(b, c, i_re, o_re) 63 | 64 | # # b, 1, o, 1 65 | # io_abs = torch.abs(f_o - f_i.permute(0, 1, 3, 2)) 66 | 67 | # 29 for train and test 68 | # cosine 69 | f_o = f_outside.view(b, c, -1) 70 | f_i = f_inside.view(b, c, -1) 71 | matmul = torch.bmm(f_i.permute(0, 2, 1), f_o) 72 | f_i_abs = torch.sqrt(torch.sum(f_i.pow(2) + 1e-6, dim=1, keepdim=True)) 73 | f_o_abs = torch.sqrt(torch.sum(f_o.pow(2) + 1e-6, dim=1, keepdim=True)) 74 | abs_matmul = torch.bmm(f_i_abs.permute(0, 2, 1), f_o_abs) 75 | io_abs = matmul / abs_matmul 76 | # print(io_abs.shape) 77 | 78 | # print(torch.max(io_abs), torch.min(io_abs)) 79 | 80 | # 28 for train and test 81 | # _map = torch.argmin(torch.sum(io_abs, dim=1, keepdim=True), dim=3).view(b, o_re) 82 | _map = torch.argmax(io_abs, dim=1) 83 | # print(_map.shape) 84 | # .view(b, o_re) 85 | 86 | f_oo = f_outside 87 | for i in range(b): 88 | f_oo[i] = f_inside[i, :, _map[i], :] 89 | 90 | f[:, :, index_outside] = f_oo 91 | f_out = f.view(b, c, h, w) 92 | 93 | f_out = self.up(f_out) 94 | 95 | # f_final = self.conv(torch.cat((f_out, f_in), dim=1)) 96 | w1 = self.conv_w1(f_out) 97 | w2 = self.conv_w2(f_in) 98 | f_final = w1 + w2 99 | # f_final = w1 100 | # f_final = f_final * (1 - mask_in) + f_in * mask_in 101 | # f_final = self.norm(f_final) 102 | f_final = self.relu(f_final) 103 | # f_mlp = self.mlp_shared(f_out) 104 | # gamma = self.mlp_gamma(f_mlp) 105 | # beta = self.mlp_beta(f_mlp) 106 | 107 | # f_final = f_in * (1 + gamma) + beta 108 | 109 | return f_final, f_out, w1, w2 110 | # return f_final -------------------------------------------------------------------------------- /scores/metrics.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import argparse 4 | import matplotlib.pyplot as plt 5 | import cv2 6 | 7 | from glob import glob 8 | from ntpath import basename 9 | from imageio import imread 10 | from skimage.measure import compare_ssim 11 | from skimage.measure import compare_psnr 12 | from skimage.color import rgb2gray 13 | 14 | 15 | def parse_args(): 16 | parser = argparse.ArgumentParser(description='script to compute all statistics') 17 | parser.add_argument('--data-path', '--gt', help='Path to ground truth data', type=str) 18 | parser.add_argument('--output-path', '--o', help='Path to output data', type=str) 19 | parser.add_argument('--valrst-path', '--v', default='./', help='Path to save val result', type=str) 20 | parser.add_argument('--debug', default=0, help='Debug', type=int) 21 | args = parser.parse_args() 22 | return args 23 | 24 | 25 | def compare_mae(img_true, img_test): 26 | img_true = img_true.astype(np.float32) 27 | img_test = img_test.astype(np.float32) 28 | if np.sum(img_true + img_test) == 0: 29 | return 1 30 | return np.sum(np.abs(img_true - img_test)) / np.sum(img_true + img_test) 31 | 32 | 33 | def load_flist(flist): 34 | if isinstance(flist, list): 35 | return flist 36 | # flist: image file path, image directory path, text file flist path 37 | if isinstance(flist, str): 38 | if os.path.isdir(flist): 39 | flist = list(glob(flist + '/*.jpg')) + list(glob(flist + '/*.png'))+list(glob(path_true + '/*.JPG')) 40 | flist.sort() 41 | return flist 42 | 43 | if os.path.isfile(flist): 44 | try: 45 | return np.genfromtxt(flist, dtype=np.str, encoding='utf-8') 46 | except: 47 | return [flist] 48 | return [] 49 | 50 | args = parse_args() 51 | for arg in vars(args): 52 | print('[%s] =' % arg, getattr(args, arg)) 53 | 54 | path_true = args.data_path 55 | path_pred = args.output_path 56 | 57 | psnr = [] 58 | ssim = [] 59 | mae = [] 60 | names = [] 61 | index = 1 62 | 63 | files = load_flist(path_true) 64 | # files = list(glob(path_true + '/*.jpg')) + list(glob(path_true + '/*.png'))+list(glob(path_true + '/*.JPG')) 65 | 66 | def img_resize(img, height, width, centerCrop=True): 67 | imgh, imgw = img.shape[0:2] 68 | 69 | if centerCrop and imgh != imgw: 70 | # center crop 71 | side = np.minimum(imgh, imgw) 72 | j = (imgh - side) // 2 73 | i = (imgw - side) // 2 74 | img = img[j:j + side, i:i + side, ...] 75 | 76 | img = cv2.resize(img, dsize=(height, width)) 77 | 78 | return img 79 | else: 80 | img = cv2.resize(img, dsize=(height, width)) 81 | 82 | return img 83 | 84 | for fn in sorted(files): 85 | name = basename(str(fn)) 86 | names.append(name) 87 | 88 | # img_gt = (imread(str(fn)) / 255.0).astype(np.float32) 89 | # pred_name = str(fn).split('.')[0] + '_f.' + str(fn).split('.')[1] 90 | pred_name = str(fn) 91 | 92 | img_gt = (imread(str(fn)) / 255.0).astype(np.float32) 93 | # print(img_gt) 94 | img_pred = (imread(path_pred + '/' + basename(pred_name) ) / 255.0).astype(np.float32) 95 | 96 | img_gt = img_resize(img_gt, 256, 256) 97 | # print(img_gt.shape) 98 | img_gt = rgb2gray(img_gt) 99 | img_pred = rgb2gray(img_pred) 100 | 101 | if args.debug != 0: 102 | plt.subplot('121') 103 | plt.imshow(img_gt) 104 | plt.title('Groud truth') 105 | plt.subplot('122') 106 | plt.imshow(img_pred) 107 | plt.title('Output') 108 | plt.show() 109 | 110 | psnr.append(compare_psnr(img_gt, img_pred, data_range=1)) 111 | ssim.append(compare_ssim(img_gt, img_pred, data_range=1, win_size=51)) 112 | mae.append(compare_mae(img_gt, img_pred)) 113 | if np.mod(index, 100) == 0: 114 | print( 115 | str(index) + ' images processed', 116 | "PSNR: %.4f" % round(np.mean(psnr), 4), 117 | "SSIM: %.4f" % round(np.mean(ssim), 4), 118 | "MAE: %.4f" % round(np.mean(mae), 4), 119 | ) 120 | index += 1 121 | 122 | np.savez(args.output_path + '/metrics.npz', psnr=psnr, ssim=ssim, mae=mae, names=names) 123 | print( 124 | "PSNR: %.4f" % round(np.mean(psnr), 4), 125 | "PSNR Variance: %.4f" % round(np.var(psnr), 4), 126 | "SSIM: %.4f" % round(np.mean(ssim), 4), 127 | "SSIM Variance: %.4f" % round(np.var(ssim), 4), 128 | "MAE: %.4f" % round(np.mean(mae), 4), 129 | "MAE Variance: %.4f" % round(np.var(mae), 4) 130 | ) 131 | -------------------------------------------------------------------------------- /datasets/places2_road/val.flist: -------------------------------------------------------------------------------- 1 | ./desert_road/val/Places365_val_00011282.jpg 2 | ./desert_road/val/Places365_val_00026834.jpg 3 | ./desert_road/val/Places365_val_00005513.jpg 4 | ./desert_road/val/Places365_val_00010725.jpg 5 | ./desert_road/val/Places365_val_00028786.jpg 6 | ./desert_road/val/Places365_val_00012745.jpg 7 | ./desert_road/val/Places365_val_00034426.jpg 8 | ./desert_road/val/Places365_val_00004966.jpg 9 | ./desert_road/val/Places365_val_00000534.jpg 10 | ./desert_road/val/Places365_val_00017776.jpg 11 | ./desert_road/val/Places365_val_00019536.jpg 12 | ./desert_road/val/Places365_val_00027932.jpg 13 | ./desert_road/val/Places365_val_00028333.jpg 14 | ./desert_road/val/Places365_val_00027606.jpg 15 | ./desert_road/val/Places365_val_00004133.jpg 16 | ./desert_road/val/Places365_val_00010980.jpg 17 | ./desert_road/val/Places365_val_00026342.jpg 18 | ./desert_road/val/Places365_val_00033838.jpg 19 | ./desert_road/val/Places365_val_00004409.jpg 20 | ./desert_road/val/Places365_val_00031984.jpg 21 | ./desert_road/val/Places365_val_00024384.jpg 22 | ./desert_road/val/Places365_val_00007063.jpg 23 | ./desert_road/val/Places365_val_00032844.jpg 24 | ./desert_road/val/Places365_val_00022431.jpg 25 | ./desert_road/val/Places365_val_00012874.jpg 26 | ./desert_road/val/Places365_val_00029356.jpg 27 | ./desert_road/val/Places365_val_00010741.jpg 28 | ./desert_road/val/Places365_val_00007024.jpg 29 | ./desert_road/val/Places365_val_00026256.jpg 30 | ./desert_road/val/Places365_val_00036328.jpg 31 | ./desert_road/val/Places365_val_00012255.jpg 32 | ./desert_road/val/Places365_val_00025839.jpg 33 | ./desert_road/val/Places365_val_00026111.jpg 34 | ./desert_road/val/Places365_val_00012290.jpg 35 | ./desert_road/val/Places365_val_00001381.jpg 36 | ./desert_road/val/Places365_val_00008665.jpg 37 | ./desert_road/val/Places365_val_00010202.jpg 38 | ./desert_road/val/Places365_val_00000951.jpg 39 | ./desert_road/val/Places365_val_00011740.jpg 40 | ./desert_road/val/Places365_val_00018286.jpg 41 | ./desert_road/val/Places365_val_00014423.jpg 42 | ./desert_road/val/Places365_val_00017060.jpg 43 | ./desert_road/val/Places365_val_00001806.jpg 44 | ./desert_road/val/Places365_val_00019115.jpg 45 | ./desert_road/val/Places365_val_00029505.jpg 46 | ./desert_road/val/Places365_val_00003193.jpg 47 | ./desert_road/val/Places365_val_00016083.jpg 48 | ./desert_road/val/Places365_val_00029347.jpg 49 | ./desert_road/val/Places365_val_00003656.jpg 50 | ./desert_road/val/Places365_val_00021784.jpg 51 | ./desert_road/val/Places365_val_00026971.jpg 52 | ./desert_road/val/Places365_val_00028419.jpg 53 | ./desert_road/val/Places365_val_00002270.jpg 54 | ./desert_road/val/Places365_val_00000287.jpg 55 | ./desert_road/val/Places365_val_00004427.jpg 56 | ./desert_road/val/Places365_val_00021638.jpg 57 | ./desert_road/val/Places365_val_00003204.jpg 58 | ./desert_road/val/Places365_val_00028268.jpg 59 | ./desert_road/val/Places365_val_00034930.jpg 60 | ./desert_road/val/Places365_val_00001692.jpg 61 | ./desert_road/val/Places365_val_00014914.jpg 62 | ./desert_road/val/Places365_val_00009365.jpg 63 | ./desert_road/val/Places365_val_00005553.jpg 64 | ./desert_road/val/Places365_val_00024935.jpg 65 | ./desert_road/val/Places365_val_00007579.jpg 66 | ./desert_road/val/Places365_val_00001225.jpg 67 | ./desert_road/val/Places365_val_00016221.jpg 68 | ./desert_road/val/Places365_val_00002577.jpg 69 | ./desert_road/val/Places365_val_00024908.jpg 70 | ./desert_road/val/Places365_val_00027044.jpg 71 | ./desert_road/val/Places365_val_00002087.jpg 72 | ./desert_road/val/Places365_val_00018760.jpg 73 | ./desert_road/val/Places365_val_00009587.jpg 74 | ./desert_road/val/Places365_val_00013843.jpg 75 | ./desert_road/val/Places365_val_00005956.jpg 76 | ./desert_road/val/Places365_val_00003636.jpg 77 | ./desert_road/val/Places365_val_00000786.jpg 78 | ./desert_road/val/Places365_val_00015716.jpg 79 | ./desert_road/val/Places365_val_00028600.jpg 80 | ./desert_road/val/Places365_val_00008802.jpg 81 | ./desert_road/val/Places365_val_00012242.jpg 82 | ./desert_road/val/Places365_val_00021418.jpg 83 | ./desert_road/val/Places365_val_00029253.jpg 84 | ./desert_road/val/Places365_val_00021474.jpg 85 | ./desert_road/val/Places365_val_00012614.jpg 86 | ./desert_road/val/Places365_val_00033398.jpg 87 | ./desert_road/val/Places365_val_00001601.jpg 88 | ./desert_road/val/Places365_val_00007417.jpg 89 | ./desert_road/val/Places365_val_00000517.jpg 90 | ./desert_road/val/Places365_val_00017199.jpg 91 | ./desert_road/val/Places365_val_00031803.jpg 92 | ./desert_road/val/Places365_val_00030567.jpg 93 | ./desert_road/val/Places365_val_00005663.jpg 94 | ./desert_road/val/Places365_val_00005323.jpg 95 | ./desert_road/val/Places365_val_00026261.jpg 96 | ./desert_road/val/Places365_val_00006938.jpg 97 | ./desert_road/val/Places365_val_00001983.jpg 98 | ./desert_road/val/Places365_val_00014919.jpg 99 | ./desert_road/val/Places365_val_00004549.jpg 100 | ./desert_road/val/Places365_val_00000881.jpg -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # This file may be used to create an environment using: 2 | # $ conda create --name --file 3 | # platform: linux-64 4 | _libgcc_mutex=0.1=main 5 | absl-py=0.11.0=py37h89c1867_0 6 | addict=2.4.0=pypi_0 7 | aiohttp=3.7.4=py37h27cfd23_1 8 | async-timeout=3.0.1=py_1000 9 | attrs=20.3.0=pyhd3deb0d_0 10 | blas=1.0=mkl 11 | blinker=1.4=py_1 12 | brotlipy=0.7.0=py37hb5d75c8_1001 13 | bzip2=1.0.8=h516909a_3 14 | c-ares=1.17.1=h36c2ea0_0 15 | ca-certificates=2021.5.30=ha878542_0 16 | cachetools=4.2.1=pyhd8ed1ab_0 17 | certifi=2021.5.30=py37h89c1867_0 18 | cffi=1.14.5=py37h261ae71_0 19 | cftime=1.2.1=py37h161383b_1 20 | chardet=3.0.4=py37he5f6b98_1008 21 | click=7.1.2=pyh9f0ad1d_0 22 | cloudpickle=1.6.0=py_0 23 | cryptography=2.9.2=py37hb09aad4_0 24 | cudatoolkit=11.0.221=h6bb024c_0 25 | curl=7.71.1=hbc83047_1 26 | cycler=0.10.0=py37_0 27 | cytoolz=0.11.0=py37h7b6447c_0 28 | dask-core=2.30.0=py_0 29 | dbus=1.13.18=hb2f20db_0 30 | decorator=4.4.2=py_0 31 | dominate=2.6.0=pypi_0 32 | einops=0.3.0=pypi_0 33 | expat=2.2.10=he6710b0_2 34 | ffmpeg=4.3.1=h3215721_1 35 | ffmpy=0.3.0=pypi_0 36 | fontconfig=2.13.0=h9420a91_0 37 | freetype=2.10.4=h5ab3b9f_0 38 | glib=2.66.1=h92f7085_0 39 | gmp=6.2.1=h58526e2_0 40 | gnutls=3.6.13=h85f3911_1 41 | google-auth=1.24.0=pyhd3deb0d_0 42 | google-auth-oauthlib=0.4.1=py_2 43 | grpcio=1.33.2=py37haffed2e_2 44 | gst-plugins-base=1.14.0=hbbd80ab_1 45 | gstreamer=1.14.0=hb31296c_0 46 | hdf4=4.2.13=0 47 | hdf5=1.10.6=nompi_h3c11f04_101 48 | icu=58.2=he6710b0_3 49 | idna=2.10=pyh9f0ad1d_0 50 | imageio=2.9.0=py_0 51 | importlib-metadata=3.7.2=py37h89c1867_0 52 | intel-openmp=2020.2=254 53 | joblib=1.0.1=pypi_0 54 | jpeg=9b=h024ee3a_2 55 | kiwisolver=1.3.0=py37h2531618_0 56 | krb5=1.18.2=h173b8e3_0 57 | lame=3.100=h14c3975_1001 58 | lcms2=2.11=h396b838_0 59 | ld_impl_linux-64=2.33.1=h53a641e_7 60 | libcurl=7.71.1=h20c2e04_1 61 | libedit=3.1.20191231=h14c3975_1 62 | libffi=3.3=he6710b0_2 63 | libgcc-ng=9.1.0=hdf63c60_0 64 | libgfortran-ng=7.3.0=hdf63c60_0 65 | libiconv=1.16=h516909a_0 66 | libnetcdf=4.6.1=h2053bdc_3 67 | libpng=1.6.37=hbc83047_0 68 | libprotobuf=3.14.0=h8c45485_0 69 | libssh2=1.9.0=hab1572f_5 70 | libstdcxx-ng=9.1.0=hdf63c60_0 71 | libtiff=4.1.0=h2733197_1 72 | libuuid=1.0.3=h1bed415_2 73 | libuv=1.40.0=h7b6447c_0 74 | libxcb=1.14=h7b6447c_0 75 | libxml2=2.9.10=hb55368b_3 76 | lz4-c=1.9.2=heb0550a_3 77 | markdown=3.3.4=pyhd8ed1ab_0 78 | matlab=0.1=pypi_0 79 | matplotlib=3.3.2=0 80 | matplotlib-base=3.3.2=py37h817c723_0 81 | mkl=2020.2=256 82 | mkl-service=2.3.0=py37he904b0f_0 83 | mkl_fft=1.2.0=py37h23d657b_0 84 | mkl_random=1.1.1=py37h0573a6f_0 85 | mmaction2=0.12.0=dev_0 86 | mmcv-full=1.3.0=pypi_0 87 | multidict=5.1.0=py37h27cfd23_2 88 | natsort=7.1.1=pyhd8ed1ab_0 89 | ncurses=6.2=he6710b0_1 90 | netcdf4=1.5.6=py37hd5c503a_0 91 | nettle=3.6=he412f7d_0 92 | networkx=2.5=py_0 93 | ninja=1.10.1=py37hfd86e86_0 94 | numpy=1.19.2=py37h54aff64_0 95 | numpy-base=1.19.2=py37hfa32c7d_0 96 | oauthlib=3.0.1=py_0 97 | olefile=0.46=py37_0 98 | opencv-contrib-python=4.5.1.48=pypi_0 99 | opencv-python=4.4.0.46=pypi_0 100 | openh264=2.1.1=h8b12597_0 101 | openssl=1.1.1k=h27cfd23_0 102 | packaging=20.9=pypi_0 103 | pandas=1.0.1=py37hb3f55d8_0 104 | patsy=0.5.1=py_0 105 | pcre=8.44=he6710b0_0 106 | pillow=8.0.1=py37he98fc37_0 107 | pip=20.2.4=py37h06a4308_0 108 | protobuf=3.14.0=py37h2531618_1 109 | psutil=5.4.8=py37h14c3975_1000 110 | pyasn1=0.4.8=py_0 111 | pyasn1-modules=0.2.7=py_0 112 | pycparser=2.20=pyh9f0ad1d_2 113 | pyjwt=2.0.1=pyhd8ed1ab_0 114 | pyopenssl=19.1.0=py37_0 115 | pyparsing=2.4.7=py_0 116 | pyqt=5.9.2=py37h05f1152_2 117 | pysocks=1.7.1=py37h89c1867_3 118 | python=3.7.9=h7579374_0 119 | python-dateutil=2.8.1=py_0 120 | python_abi=3.7=1_cp37m 121 | pytorch=1.7.0=py3.7_cuda11.0.221_cudnn8.0.3_0 122 | pytz=2021.1=pyhd8ed1ab_0 123 | pywavelets=1.1.1=py37h7b6447c_2 124 | pyyaml=5.3.1=py37h7b6447c_1 125 | qt=5.9.7=h5867ecd_1 126 | readline=8.0=h7b6447c_0 127 | requests=2.25.1=pyhd3deb0d_0 128 | requests-oauthlib=1.3.0=pyh9f0ad1d_0 129 | rsa=4.7.2=pyh44b312d_0 130 | scikit-image=0.17.2=py37hdf5156a_0 131 | scikit-learn=0.24.2=pypi_0 132 | scikit-video=1.1.11=pypi_0 133 | scipy=1.1.0=pypi_0 134 | seaborn=0.11.1=hd8ed1ab_1 135 | seaborn-base=0.11.1=pyhd8ed1ab_1 136 | setuptools=50.3.1=py37h06a4308_1 137 | sip=4.19.8=py37hf484d3e_0 138 | six=1.15.0=py37h06a4308_0 139 | sqlite=3.33.0=h62c20be_0 140 | statsmodels=0.11.1=py37h8f50634_2 141 | tensorboard=2.4.1=pyhd8ed1ab_0 142 | tensorboard-plugin-wit=1.8.0=pyh44b312d_0 143 | tensorboardx=2.4=pypi_0 144 | threadpoolctl=2.2.0=pypi_0 145 | tifffile=2020.10.1=py37hdd07704_2 146 | tk=8.6.10=hbc83047_0 147 | toolz=0.11.1=py_0 148 | torchaudio=0.7.0=py37 149 | torchvision=0.8.1=py37_cu110 150 | tornado=6.0.4=py37h7b6447c_1 151 | tqdm=4.58.0=pypi_0 152 | typing-extensions=3.7.4.3=0 153 | typing_extensions=3.7.4.3=py_0 154 | urllib3=1.26.3=pyhd8ed1ab_0 155 | werkzeug=1.0.1=pyh9f0ad1d_0 156 | wheel=0.35.1=pyhd3eb1b0_0 157 | x264=1!152.20180806=h14c3975_0 158 | xz=5.2.5=h7b6447c_0 159 | yaml=0.2.5=h7b6447c_0 160 | yapf=0.31.0=pypi_0 161 | yarl=1.6.3=py37h4abf009_0 162 | zipp=3.4.1=pyhd8ed1ab_0 163 | zlib=1.2.11=h7b6447c_3 164 | zstd=1.4.5=h9ceee32_0 165 | -------------------------------------------------------------------------------- /scores/inception.py: -------------------------------------------------------------------------------- 1 | import torch.nn as nn 2 | import torch.nn.functional as F 3 | from torchvision import models 4 | 5 | 6 | class InceptionV3(nn.Module): 7 | """Pretrained InceptionV3 network returning feature maps""" 8 | 9 | # Index of default block of inception to return, 10 | # corresponds to output of final average pooling 11 | DEFAULT_BLOCK_INDEX = 3 12 | 13 | # Maps feature dimensionality to their output blocks indices 14 | BLOCK_INDEX_BY_DIM = { 15 | 64: 0, # First max pooling features 16 | 192: 1, # Second max pooling featurs 17 | 768: 2, # Pre-aux classifier features 18 | 2048: 3 # Final average pooling features 19 | } 20 | 21 | def __init__(self, 22 | output_blocks=[DEFAULT_BLOCK_INDEX], 23 | resize_input=True, 24 | normalize_input=True, 25 | requires_grad=False): 26 | """Build pretrained InceptionV3 27 | Parameters 28 | ---------- 29 | output_blocks : list of int 30 | Indices of blocks to return features of. Possible values are: 31 | - 0: corresponds to output of first max pooling 32 | - 1: corresponds to output of second max pooling 33 | - 2: corresponds to output which is fed to aux classifier 34 | - 3: corresponds to output of final average pooling 35 | resize_input : bool 36 | If true, bilinearly resizes input to width and height 299 before 37 | feeding input to model. As the network without fully connected 38 | layers is fully convolutional, it should be able to handle inputs 39 | of arbitrary size, so resizing might not be strictly needed 40 | normalize_input : bool 41 | If true, normalizes the input to the statistics the pretrained 42 | Inception network expects 43 | requires_grad : bool 44 | If true, parameters of the model require gradient. Possibly useful 45 | for finetuning the network 46 | """ 47 | super(InceptionV3, self).__init__() 48 | 49 | self.resize_input = resize_input 50 | self.normalize_input = normalize_input 51 | self.output_blocks = sorted(output_blocks) 52 | self.last_needed_block = max(output_blocks) 53 | 54 | assert self.last_needed_block <= 3, \ 55 | 'Last possible output block index is 3' 56 | 57 | self.blocks = nn.ModuleList() 58 | 59 | inception = models.inception_v3(pretrained=True) 60 | 61 | # Block 0: input to maxpool1 62 | block0 = [ 63 | inception.Conv2d_1a_3x3, 64 | inception.Conv2d_2a_3x3, 65 | inception.Conv2d_2b_3x3, 66 | nn.MaxPool2d(kernel_size=3, stride=2) 67 | ] 68 | self.blocks.append(nn.Sequential(*block0)) 69 | 70 | # Block 1: maxpool1 to maxpool2 71 | if self.last_needed_block >= 1: 72 | block1 = [ 73 | inception.Conv2d_3b_1x1, 74 | inception.Conv2d_4a_3x3, 75 | nn.MaxPool2d(kernel_size=3, stride=2) 76 | ] 77 | self.blocks.append(nn.Sequential(*block1)) 78 | 79 | # Block 2: maxpool2 to aux classifier 80 | if self.last_needed_block >= 2: 81 | block2 = [ 82 | inception.Mixed_5b, 83 | inception.Mixed_5c, 84 | inception.Mixed_5d, 85 | inception.Mixed_6a, 86 | inception.Mixed_6b, 87 | inception.Mixed_6c, 88 | inception.Mixed_6d, 89 | inception.Mixed_6e, 90 | ] 91 | self.blocks.append(nn.Sequential(*block2)) 92 | 93 | # Block 3: aux classifier to final avgpool 94 | if self.last_needed_block >= 3: 95 | block3 = [ 96 | inception.Mixed_7a, 97 | inception.Mixed_7b, 98 | inception.Mixed_7c, 99 | nn.AdaptiveAvgPool2d(output_size=(1, 1)) 100 | ] 101 | self.blocks.append(nn.Sequential(*block3)) 102 | 103 | for param in self.parameters(): 104 | param.requires_grad = requires_grad 105 | 106 | def forward(self, inp): 107 | """Get Inception feature maps 108 | Parameters 109 | ---------- 110 | inp : torch.autograd.Variable 111 | Input tensor of shape Bx3xHxW. Values are expected to be in 112 | range (0, 1) 113 | Returns 114 | ------- 115 | List of torch.autograd.Variable, corresponding to the selected output 116 | block, sorted ascending by index 117 | """ 118 | outp = [] 119 | x = inp 120 | 121 | if self.resize_input: 122 | x = F.upsample(x, size=(299, 299), mode='bilinear') 123 | 124 | if self.normalize_input: 125 | x = x.clone() 126 | x[:, 0] = x[:, 0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5 127 | x[:, 1] = x[:, 1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5 128 | x[:, 2] = x[:, 2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5 129 | 130 | for idx, block in enumerate(self.blocks): 131 | x = block(x) 132 | if idx in self.output_blocks: 133 | outp.append(x) 134 | 135 | if idx == self.last_needed_block: 136 | break 137 | 138 | return outp 139 | -------------------------------------------------------------------------------- /src/dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | import glob 3 | import scipy 4 | import torch 5 | import random 6 | import numpy as np 7 | import cv2 8 | import torchvision.transforms.functional as F 9 | 10 | from torch.utils.data import DataLoader 11 | from PIL import Image 12 | from imageio import imread 13 | from .utils import random_crop, center_crop, side_crop 14 | from skimage.feature import canny 15 | from skimage.color import rgb2gray, gray2rgb 16 | 17 | 18 | class Dataset(torch.utils.data.Dataset): 19 | def __init__(self, config, input_flist, test_mask, augment=True, training=True): 20 | super(Dataset, self).__init__() 21 | self.input_size = config.INPUT_SIZE 22 | self.center = config.CENTER 23 | self.model = config.MODEL 24 | self.augment = augment 25 | self.training = training 26 | self.data = self.load_flist(input_flist) 27 | self.side = config.SIDE 28 | self.mean = config.MEAN 29 | self.std = config.STD 30 | self.count = 0 31 | self.pos = None 32 | self.batchsize = config.BATCH_SIZE 33 | self.catmask = config.CATMASK 34 | self.datatype = config.DATATYPE 35 | if self.datatype == 2: 36 | self.scence_width = 512 37 | self.scence_height = 256 38 | self.known_mask = False 39 | # self.known_mask = not training 40 | if self.known_mask: 41 | self.test_mask = self.load_flist(test_mask) 42 | # if training == False: 43 | # self.test_mask = self.load_flist(test_mask) 44 | 45 | def __len__(self): 46 | return len(self.data) 47 | 48 | def __getitem__(self, index): 49 | item = self.load_item(index) 50 | 51 | return item 52 | 53 | def resize(self, img, width, height): 54 | img = cv2.resize(img, dsize=(width, height), interpolation=cv2.INTER_AREA) 55 | 56 | return img 57 | 58 | def load_name(self, index): 59 | name = self.data[index] 60 | return os.path.basename(name) 61 | 62 | def load_item(self, index): 63 | #size = self.input_size 64 | data = imread(self.data[index]) 65 | self.seq = index 66 | 67 | if len(data.shape) == 2: 68 | data = data[:, :, np.newaxis] 69 | data = data.repeat(3, axis=2) 70 | if self.datatype == 1: 71 | data = self.resize(data, self.input_size, self.input_size) 72 | if self.datatype == 2: 73 | data = self.resize(data, self.scence_width, self.scence_height) 74 | 75 | _, pos, mask = self.cpimage(data) 76 | 77 | fmask_data = mask 78 | 79 | z = torch.FloatTensor(np.random.normal(0, 1, (3, fmask_data.shape[1],fmask_data.shape[0]))) 80 | 81 | self.count += 1 82 | if self.count == self.batchsize: 83 | self.count = 0 84 | 85 | if self.augment and np.random.binomial(1, 0.5) > 0: 86 | data = data[:, ::-1, ...] 87 | fmask_data = fmask_data[:, ::-1, ...] 88 | 89 | data_norm = self.to_tensor_norm(data) 90 | data = self.to_tensor(data) 91 | input_data = data * (1 - self.to_tensor(fmask_data)) 92 | # input_data = data_norm * (1 - self.to_tensor(fmask_data)) 93 | mask_data = data * (1 - self.to_tensor(fmask_data)) 94 | 95 | return data, input_data, torch.IntTensor(pos),\ 96 | self.to_tensor(fmask_data), self.to_tensor(fmask_data),\ 97 | mask_data, z 98 | 99 | 100 | def img_resize(self, img, width, height, centerCrop=False): 101 | imgh, imgw = img.shape[0:2] 102 | 103 | if centerCrop and imgh != imgw: 104 | # center crop 105 | side = np.minimum(imgh, imgw) 106 | j = (imgh - side) // 2 107 | i = (imgw - side) // 2 108 | img = img[j:j + side, i:i + side, ...] 109 | 110 | img = cv2.resize(img, dsize=(width, height), interpolation=cv2.INTER_AREA) 111 | 112 | return img 113 | 114 | def locate_mask(self, data, mask): 115 | height, width = data.shape[0:2] 116 | coord = 0 117 | for i in range(width): 118 | for j in range(height): 119 | if (mask[i][j] != 0): 120 | coord = (j,i) 121 | break 122 | if (mask[i][j] != 0): 123 | break 124 | inner_img = data[i:i+128, j:j+128] 125 | return inner_img, coord 126 | 127 | def dealimage(self, data, mask): 128 | rc, pos = self.locate_mask(data, mask) 129 | return rc, pos 130 | 131 | # def cpimage(self, data): 132 | # rc, pos, mask = random_crop(data, int(data.shape[1]/2), self.datatype) 133 | # return rc, pos, mask 134 | def cpimage(self, data): 135 | if self.known_mask: 136 | # print(" seq: ",self.seq," mask file: ",self.mask_file[self.seq]) 137 | mask = imread(self.test_mask[self.seq]) 138 | rc, pos = self.dealimage(data, mask) 139 | self.pos = pos 140 | # rc, pos, mask = random_crop(data, int(data.shape[1]/2), self.datatype, self.count, self.pos, self.known_mask) 141 | rc, pos, mask = center_crop(data, int(data.shape[1]/2)) 142 | self.pos = pos 143 | return rc, pos, mask 144 | 145 | def gray_fmap(self, fmap_data): 146 | fmap_data = cv2.cvtColor(fmap_data, cv2.COLOR_BGR2GRAY) 147 | fmap_data[fmap_data < fmap_data.mean()+15] = 0 148 | fmap_data = cv2.equalizeHist(fmap_data) 149 | 150 | return fmap_data 151 | 152 | 153 | def load_flist(self, flist): 154 | if isinstance(flist, list): 155 | return flist 156 | 157 | if isinstance(flist, str): 158 | # print(flist) 159 | if os.path.isdir(flist): 160 | flist = list(glob.glob(flist + '/*.jpg')) + list(glob.glob(flist + '/*.png')) 161 | flist.sort() 162 | return flist 163 | 164 | if os.path.isfile(flist): 165 | # try: 166 | return np.genfromtxt(flist, dtype=np.str, encoding='utf-8') 167 | # except: 168 | # print(11, flist) 169 | # return [flist] 170 | 171 | return [] 172 | 173 | def to_tensor(self, img): 174 | img = Image.fromarray(img) 175 | img_t = F.to_tensor(img).float() 176 | return img_t 177 | 178 | def to_tensor_norm(self, img): 179 | img = Image.fromarray(img) 180 | img_t = F.to_tensor(img).float() 181 | img_t = F.normalize(img_t, self.mean, self.std) # 输入mean 和 std 182 | return img_t 183 | 184 | 185 | def create_iterator(self, batch_size): 186 | while True: 187 | sample_loader = DataLoader( 188 | dataset=self, 189 | batch_size=batch_size, 190 | drop_last=True 191 | ) 192 | 193 | for item in sample_loader: 194 | yield item -------------------------------------------------------------------------------- /src/PartPainting.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import torch 4 | import torch.nn as nn 5 | from torch.utils.data import DataLoader 6 | from .dataset import Dataset 7 | from .model.model import PartPModel 8 | from .utils import Progbar, create_dir, stitch_images, imsave, template_match 9 | from PIL import Image 10 | # from tensorboardX import SummaryWriter 11 | import torch.nn.functional as F 12 | 13 | from .metrics import PSNR 14 | 15 | class PartPainting(): 16 | def __init__(self, config): 17 | self.config = config 18 | self.model_name = 'PartPModel' 19 | self.Model = PartPModel(config, '').to(config.DEVICE) 20 | 21 | self.psnr = PSNR(255.0).to(config.DEVICE) 22 | 23 | self.train_dataset = Dataset(config, config.TRAIN_FLIST, config.TEST_MASK, augment=False, training=True) 24 | self.val_dataset = Dataset(config, config.VAL_FLIST, config.TEST_MASK, augment=False, training=False) 25 | self.sample_iterator = self.train_dataset.create_iterator(config.SAMPLE_SIZE) 26 | 27 | self.samples_path = os.path.join(config.PATH, 'samples') 28 | self.results_path = os.path.join(config.PATH, 'results') 29 | 30 | self.log_file = os.path.join(config.PATH, 'log-' + self.model_name + '.txt') 31 | 32 | # self.writer = SummaryWriter(os.path.join(config.PATH, 'runs')) 33 | 34 | def load(self): 35 | self.Model.load() 36 | 37 | def save(self, ite): 38 | self.Model.save(ite) 39 | 40 | def train(self): 41 | train_loader = DataLoader( 42 | dataset=self.train_dataset, 43 | batch_size=self.config.BATCH_SIZE, 44 | num_workers=4, 45 | drop_last=False, 46 | shuffle=True, 47 | pin_memory=True 48 | ) 49 | 50 | epoch = 0 51 | keep_training = True 52 | 53 | max_iter = int(self.config.MAX_ITERS) 54 | total = len(self.train_dataset) 55 | 56 | while(keep_training): 57 | epoch += 1 58 | 59 | probar = Progbar(total, width=20, stateful_metrics=['epoch', 'iter', 'mean_gate', 'max_gate', 'min_gate']) 60 | 61 | ite = self.Model.iteration 62 | 63 | for it in train_loader: 64 | self.Model.train() 65 | data, pdata, pos, fmask_data, half_fmask, mask, z = self.cuda(*it) 66 | 67 | outputs, d_loss, d_p_loss, g_loss, logs = self.Model.process(data, pdata, half_fmask, ite) 68 | 69 | self.Model.backward(d_loss, d_p_loss, g_loss) 70 | 71 | psnr = self.psnr(self.postprocess(data), self.postprocess(outputs)) 72 | mae = (torch.sum(torch.abs(data - outputs)) / torch.sum(data)).float() 73 | 74 | ite = self.Model.iteration 75 | 76 | # ------------------------------------------------------------------------------------ 77 | # end training 78 | 79 | if ite > max_iter: 80 | keep_training = False 81 | break 82 | 83 | # ------------------------------------------------------------------------------------ 84 | # save log & sample & eval & save model 85 | logs.append(('psnr', psnr.item())) 86 | logs.append(('mae', mae.item())) 87 | 88 | logs = [("epoch", epoch), ("iter", ite)] + logs 89 | # self.writer.add_scalars('Discriminator', {'domaink': d_loss}, epoch) 90 | # self.writer.add_scalars('Generator', {'domaink': g_loss}, epoch) 91 | # self.writer.add_scalars('Detail', self.log2dict(logs), epoch) 92 | 93 | # progbar 94 | probar.add(len(data), values=[x for x in logs]) 95 | 96 | if self.config.INTERVAL and ite % self.config.INTERVAL == 0: 97 | self.log(logs) 98 | self.sample() 99 | 100 | if self.config.SAVE_INTERAL and ite % self.config.SAVE_INTERAL == 0: 101 | self.save(ite) 102 | 103 | print('\nEnd trainging...') 104 | # self.writer.close() 105 | 106 | def log2dict(self, logs): 107 | dict = {} 108 | for i in range(2, len(logs)): 109 | dict[logs[i][0]] = logs[i][1] 110 | return dict 111 | 112 | def test(self): 113 | test_loader = DataLoader( 114 | dataset=self.val_dataset, 115 | batch_size=1 116 | ) 117 | 118 | create_dir(self.results_path) 119 | 120 | input_data = os.path.join(self.results_path, 'input') 121 | gt_data = os.path.join(self.results_path, 'gt') 122 | masks = os.path.join(self.results_path, 'mask_gt') 123 | state_1_results = os.path.join(self.results_path, 'output') 124 | # state_rec_results = os.path.join(self.results_path, 'output_blackbox') 125 | # features_path = os.path.join(self.results_path, 'part-patch') 126 | 127 | create_dir(input_data) 128 | create_dir(gt_data) 129 | create_dir(masks) 130 | create_dir(state_1_results) 131 | # create_dir(features_path) 132 | 133 | index = 0 134 | # progbar = Progbar(total, width=20, stateful_metrics=['it']) 135 | 136 | # a = [] 137 | for it in test_loader: 138 | 139 | # file name 140 | # name = str(index) + '_' + self.val_dataset.load_name(index) 141 | name = self.val_dataset.load_name(index) 142 | index += 1 143 | 144 | # data, pdata, fmask_data, mask = self.cuda(*it) 145 | data, pdata, pos, fmask_data, half_fmask, mask, z = self.cuda(*it) 146 | o, ys = self.Model(pdata, fmask_data) 147 | 148 | up = nn.Upsample(size=256) 149 | # features = [] 150 | # for i in range(len(ys)): 151 | # yys = torch.cat([torch.mean(ys[i], 1, True)]*3, dim=1) 152 | # yys = up(yys) 153 | # features.append(self.postprocess(yys)[0]) 154 | 155 | o = fmask_data * o + data * (1 - fmask_data) 156 | 157 | data = self.postprocess(data)[0] 158 | pdata = self.postprocess(pdata)[0] 159 | mask = self.postprocess(mask)[0] 160 | o = self.postprocess(o)[0] 161 | 162 | # for i in range(len(features)): 163 | # imsave(features[i], os.path.join(features_path, 'layer_' + i + name)) 164 | imsave(pdata, os.path.join(input_data, name)) 165 | imsave(data, os.path.join(gt_data, name)) 166 | imsave(mask, os.path.join(masks, name)) 167 | imsave(o, os.path.join(state_1_results, name)) 168 | 169 | print(index, name) 170 | 171 | print('\nEnd test....') 172 | 173 | def log(self, logs): 174 | with open(self.log_file, 'a') as f: 175 | f.write('%s\r\n' % ' '.join([str(item[1]) for item in logs])) 176 | 177 | def cuda(self, *args): 178 | return (item.to(self.config.DEVICE) for item in args) 179 | 180 | def postprocess(self, img): 181 | img = img * 255.0 182 | img = img.permute(0, 2, 3, 1) 183 | return img.int() 184 | 185 | def sample(self): 186 | 187 | ite = self.Model.iteration 188 | its = next(self.sample_iterator) 189 | 190 | data, pdata, pos, fmask_data, half_fmask, mask, z = self.cuda(*its) 191 | 192 | o_1s, ys = self.Model(pdata, half_fmask, pos, z) 193 | 194 | up = nn.Upsample(size=(256,256), mode='bilinear', align_corners=False) 195 | 196 | # features = [] 197 | # for i in range(len(ys)): 198 | # yys = torch.cat([torch.mean(ys[i], 1, True)]*3, dim=1) 199 | # yys = up(yys) 200 | # features.append(self.postprocess(yys)) 201 | 202 | o = o_1s * fmask_data + data * (1 - fmask_data) 203 | 204 | # draw sample image 205 | image_per_row = 1 206 | images = stitch_images( 207 | self.postprocess(mask), 208 | self.postprocess(o_1s), 209 | # *features, 210 | self.postprocess(o), 211 | self.postprocess(data), 212 | img_per_row = image_per_row 213 | ) 214 | 215 | path = os.path.join(self.samples_path) 216 | name = os.path.join(path, str(ite).zfill(5) + '.png') 217 | create_dir(path) 218 | 219 | print('\nSaving sample images...' + name) 220 | images.save(name) -------------------------------------------------------------------------------- /src/model/model.py: -------------------------------------------------------------------------------- 1 | import os 2 | import torch 3 | import torch.nn as nn 4 | import torch.optim as optim 5 | import torch.distributions as tdist 6 | import torchvision.models as models 7 | import imp 8 | 9 | import numpy as np 10 | 11 | from .networks import Generator, MultiscaleDiscriminator, DenseD, Dis_Inn 12 | # from .vae import VAE 13 | from .loss import ColorLoss, PerceptualLoss, AdversarialLoss, KLDLoss 14 | # StyleLoss, FeatureAvgLoss, MRFLoss 15 | from ..utils import template_match, Adam16 16 | 17 | import math 18 | 19 | 20 | class BaseModel(nn.Module): 21 | def __init__(self, name, config, f): 22 | super(BaseModel, self).__init__() 23 | 24 | self.name = name 25 | self.config = config 26 | self.iteration = 0 27 | self.device = config.DEVICE 28 | self.g_weights_path = os.path.join(config.PATH, 'g.pth') 29 | self.d_weights_path = os.path.join(config.PATH, 'd.pth') 30 | self.dp_weights_path = os.path.join(config.PATH, 'dp.pth') 31 | 32 | def load(self): 33 | if self.name == 'PartPModel': 34 | if os.path.exists(self.g_weights_path): 35 | print('Loading %s Model ...' % self.name) 36 | 37 | g_data = torch.load(self.g_weights_path) 38 | self.g.load_state_dict(g_data['params']) 39 | self.iteration = g_data['iteration'] 40 | 41 | if os.path.exists(self.d_weights_path): 42 | d_data = torch.load(self.d_weights_path) 43 | self.d.load_state_dict(d_data['params']) 44 | 45 | if os.path.exists(self.dp_weights_path): 46 | dp_data = torch.load(self.dp_weights_path) 47 | self.d_p.load_state_dict(dp_data['params']) 48 | 49 | 50 | def save(self, ite): 51 | print('\nSaving %s...\n' % self.name) 52 | if self.name == 'PartPModel': 53 | # print(self.name == 'PartPModel') 54 | torch.save({ 55 | 'iteration': self.iteration, 56 | 'params': self.g.state_dict()}, self.g_weights_path + '_' + str(ite)) 57 | torch.save({'params': self.d.state_dict()}, self.d_weights_path + '_' + str(ite)) 58 | torch.save({'params': self.d_p.state_dict()}, self.dp_weights_path + '_' + str(ite)) 59 | 60 | 61 | class PartPModel(BaseModel): 62 | def __init__(self, config, f): 63 | super(PartPModel, self).__init__('PartPModel', config, f) 64 | 65 | g = Generator() 66 | d_p = MultiscaleDiscriminator() 67 | d = DenseD() 68 | 69 | color_loss = ColorLoss() 70 | adversarial_loss = AdversarialLoss() 71 | l1_loss = nn.L1Loss() 72 | kld_loss = KLDLoss() 73 | 74 | content_loss = PerceptualLoss() 75 | 76 | self.add_module('g', g) 77 | self.add_module('d', d) 78 | self.add_module('d_p', d_p) 79 | 80 | self.add_module('content_loss', content_loss) 81 | self.add_module('adversarial_loss', adversarial_loss) 82 | self.add_module('color_loss', color_loss) 83 | self.add_module('l1_loss', l1_loss) 84 | self.add_module('kld_loss', kld_loss) 85 | 86 | self.g_optimizer = Adam16(params=g.parameters(), lr=float(config.G_LR), betas=(config.BETA1, config.BETA2), weight_decay=0.0, eps=1e-8) 87 | self.d_optimizer = Adam16(params=d.parameters(), lr=float(config.D_LR), betas=(config.BETA1, config.BETA2), weight_decay=0.0, eps=1e-8) 88 | self.d_p_optimizer = Adam16(params=d_p.parameters(), lr=float(config.D_LR), betas=(config.BETA1, config.BETA2), weight_decay=0.0, eps=1e-8) 89 | # self.g_optimizer = optim.Adam(params=g.parameters(), lr=float(config.LR / 2), betas=(config.BETA1, config.BETA2)) 90 | # self.d_optimizer = optim.Adam(params=d.parameters(), lr=float(config.LR * 2), betas=(config.BETA1, config.BETA2)) 91 | # self.d_p_optimizer = optim.Adam(params=d_p.parameters(), lr=float(config.LR * 2), betas=(config.BETA1, config.BETA2)) 92 | 93 | def process(self, data, pdata, half_fmask, ite): 94 | self.iteration += 1 95 | 96 | self.ite = ite 97 | 98 | mask = 1 - half_fmask 99 | # zero optimizers 100 | self.g_optimizer.zero_grad() 101 | self.d_optimizer.zero_grad() 102 | self.d_p_optimizer.zero_grad() 103 | 104 | o, (mu, logvar), ys = self.g(pdata, mask) 105 | 106 | kld_loss = self.kld_loss(mu, logvar) * self.config.KLD_LOSS_WEIGHT 107 | 108 | # coarse loss 109 | c_loss = 0 110 | f_loss = 0 111 | 112 | # total 113 | g_loss = 0 114 | d_p_loss = 0 115 | d_loss = 0 116 | # ---------------------------- G / D ---------------------------- 117 | # d loss 118 | d_real = data 119 | d_fake = o.detach() 120 | 121 | # d_real_arr = self.d(torch.cat((d_real, pdata), dim=1)) 122 | # d_fake_arr = self.d(torch.cat((d_fake, pdata), dim=1)) 123 | # g_fake_arr = self.d(torch.cat((o, pdata), dim=1)) 124 | 125 | # g_adv = 0 126 | # for i in range(len(d_real_arr)): 127 | # d_real_l = self.adversarial_loss(d_real_arr[i], True, True) 128 | # d_fake_l = self.adversarial_loss(d_fake_arr[i], False, True) 129 | # d_loss += (d_real_l + d_fake_l) / 2 130 | 131 | # g_adv += self.adversarial_loss(g_fake_arr[i], True, False) * self.config.G1_ADV_LOSS_WEIGHT 132 | 133 | # d_loss = d_loss / 2 134 | # f_loss += g_adv / 2 135 | 136 | g_adv = 0 137 | 138 | # cat 3 + 3 139 | # d_real_6 = self.d(torch.cat((d_real, pdata), dim=1)) 140 | # d_fake_6 = self.d(torch.cat((d_fake, pdata), dim=1)) 141 | # g_fake = self.d(torch.cat((o, pdata), dim=1)) 142 | d_real_6 = self.d(d_real) 143 | d_fake_6 = self.d(d_fake) 144 | g_fake = self.d(o) 145 | 146 | d_real_l = self.adversarial_loss(d_real_6, True, True) 147 | d_fake_l = self.adversarial_loss(d_fake_6, False, True) 148 | d_loss += (d_real_l + d_fake_l) / 2 149 | 150 | g_adv += self.adversarial_loss(g_fake, True, False) * self.config.G1_ADV_LOSS_WEIGHT 151 | 152 | # cat 3 153 | d_real_arr_p = self.d_p(d_real * half_fmask) 154 | d_fake_arr_p = self.d_p(d_fake * half_fmask) 155 | g_fake_arr_p = self.d_p(o * half_fmask) 156 | 157 | # d_real_arr_p = self.d_p(d_real) 158 | # d_fake_arr_p = self.d_p(d_fake) 159 | # g_fake_arr_p = self.d_p(o) 160 | 161 | g_p_adv = 0 162 | for i in range(len(d_real_arr_p)): 163 | d_p_real_l = self.adversarial_loss(d_real_arr_p[i], True, True) 164 | d_p_fake_l = self.adversarial_loss(d_fake_arr_p[i], False, True) 165 | d_p_loss += (d_p_real_l + d_p_fake_l) / 2 166 | 167 | g_p_adv += self.adversarial_loss(g_fake_arr_p[i], True, False) 168 | 169 | d_p_loss = d_p_loss / 2 170 | g_adv += g_p_adv / 2 * self.config.G1_ADV_LOSS_WEIGHT 171 | 172 | # g_adv += (self.adversarial_loss(g_fake, True, False) + self.adversarial_loss(g_fake_p, True, False)) * self.config.G1_ADV_LOSS_WEIGHT 173 | 174 | f_loss += g_adv / 2 175 | 176 | # g_1 content loss 177 | g_content_loss, g_sty_loss = self.content_loss(o, data) 178 | g_content_loss = g_content_loss * self.config.G1_CONTENT_LOSS_WEIGHT 179 | g_sty_loss = g_sty_loss * self.config.G2_STYLE_LOSS_WEIGHT 180 | c_loss += g_content_loss 181 | f_loss += g_sty_loss 182 | 183 | g_color_loss = self.color_loss(o, data) 184 | g_color_loss = g_color_loss * self.config.G1_COLOR_LOSS_WEIGHT 185 | c_loss += g_color_loss 186 | 187 | # g l1 loss 188 | g_l1_loss = self.l1_loss(o, data) * self.config.G2_L1_LOSS_WEIGHT 189 | c_loss += g_l1_loss 190 | 191 | g_loss = kld_loss + c_loss + f_loss 192 | # g_loss = c_loss + f_loss 193 | 194 | logs = [ 195 | ("l_d", d_loss.item()), 196 | ("l_dp", d_p_loss.item()), 197 | ("l_g_adv", g_adv.item()), 198 | ("l_g_con", g_content_loss.item()), 199 | ("l_color", g_color_loss.item()), 200 | ("l_l1", g_l1_loss.item()), 201 | ("l_kld", kld_loss.item()), 202 | ("l_sty", g_sty_loss.item()) 203 | ] 204 | # return o, d_loss, d_p_loss, c_loss + g_sty_loss, logs 205 | 206 | return o, d_loss, d_p_loss, g_loss, logs 207 | # return o, None, d_p_loss, g_loss, logs 208 | 209 | def forward(self, pdata, half_fmask, pos=None, z=None): 210 | o, (mu, logvar), ys = self.g(pdata, 1 - half_fmask) 211 | return o, ys 212 | 213 | def backward(self, d_loss, d_p_loss, g_loss): 214 | # if self.ite > self.config.COARSE_ITE: 215 | d_loss.backward() 216 | self.d_optimizer.step() 217 | 218 | d_p_loss.backward() 219 | self.d_p_optimizer.step() 220 | 221 | g_loss.backward() 222 | self.g_optimizer.step() 223 | -------------------------------------------------------------------------------- /scores/fid_score.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Calculates the Frechet Inception Distance (FID) to evalulate GANs 3 | The FID metric calculates the distance between two distributions of images. 4 | Typically, we have summary statistics (mean & covariance matrix) of one 5 | of these distributions, while the 2nd distribution is given by a GAN. 6 | When run as a stand-alone program, it compares the distribution of 7 | images that are stored as PNG/JPEG at a specified location with a 8 | distribution given by summary statistics (in pickle format). 9 | The FID is calculated by assuming that X_1 and X_2 are the activations of 10 | the pool_3 layer of the inception net for generated samples and real world 11 | samples respectivly. 12 | See --help to see further details. 13 | Code apapted from https://github.com/bioinf-jku/TTUR to use PyTorch instead 14 | of Tensorflow 15 | Copyright 2018 Institute of Bioinformatics, JKU Linz 16 | Licensed under the Apache License, Version 2.0 (the "License"); 17 | you may not use this file except in compliance with the License. 18 | You may obtain a copy of the License at 19 | http://www.apache.org/licenses/LICENSE-2.0 20 | Unless required by applicable law or agreed to in writing, software 21 | distributed under the License is distributed on an "AS IS" BASIS, 22 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 23 | See the License for the specific language governing permissions and 24 | limitations under the License. 25 | """ 26 | import os 27 | import pathlib 28 | from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter 29 | 30 | import torch 31 | import numpy as np 32 | from imageio import imread 33 | from scipy import linalg 34 | from torch.autograd import Variable 35 | from torch.nn.functional import adaptive_avg_pool2d 36 | 37 | from inception import InceptionV3 38 | 39 | 40 | parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) 41 | parser.add_argument('--path', type=str, nargs=2, help=('Path to the generated images or to .npz statistic files')) 42 | parser.add_argument('--batch-size', type=int, default=64, help='Batch size to use') 43 | parser.add_argument('--dims', type=int, default=2048, choices=list(InceptionV3.BLOCK_INDEX_BY_DIM), help=('Dimensionality of Inception features to use. By default, uses pool3 features')) 44 | parser.add_argument('-c', '--gpu', default='', type=str, help='GPU to use (leave blank for CPU only)') 45 | 46 | 47 | def get_activations(images, model, batch_size=64, dims=2048, 48 | cuda=False, verbose=False): 49 | """Calculates the activations of the pool_3 layer for all images. 50 | Params: 51 | -- images : Numpy array of dimension (n_images, 3, hi, wi). The values 52 | must lie between 0 and 1. 53 | -- model : Instance of inception model 54 | -- batch_size : the images numpy array is split into batches with 55 | batch size batch_size. A reasonable batch size depends 56 | on the hardware. 57 | -- dims : Dimensionality of features returned by Inception 58 | -- cuda : If set to True, use GPU 59 | -- verbose : If set to True and parameter out_step is given, the number 60 | of calculated batches is reported. 61 | Returns: 62 | -- A numpy array of dimension (num images, dims) that contains the 63 | activations of the given tensor when feeding inception with the 64 | query tensor. 65 | """ 66 | model.eval() 67 | 68 | d0 = images.shape[0] 69 | if batch_size > d0: 70 | print(('Warning: batch size is bigger than the data size. ' 71 | 'Setting batch size to data size')) 72 | batch_size = d0 73 | 74 | n_batches = d0 // batch_size 75 | n_used_imgs = n_batches * batch_size 76 | 77 | pred_arr = np.empty((n_used_imgs, dims)) 78 | for i in range(n_batches): 79 | if verbose: 80 | print('\rPropagating batch %d/%d' % (i + 1, n_batches), 81 | end='', flush=True) 82 | start = i * batch_size 83 | end = start + batch_size 84 | 85 | batch = torch.from_numpy(images[start:end]).type(torch.FloatTensor) 86 | batch = Variable(batch, volatile=True) 87 | if cuda: 88 | batch = batch.cuda() 89 | 90 | pred = model(batch)[0] 91 | 92 | # If model output is not scalar, apply global spatial average pooling. 93 | # This happens if you choose a dimensionality not equal 2048. 94 | if pred.shape[2] != 1 or pred.shape[3] != 1: 95 | pred = adaptive_avg_pool2d(pred, output_size=(1, 1)) 96 | 97 | pred_arr[start:end] = pred.cpu().data.numpy().reshape(batch_size, -1) 98 | 99 | if verbose: 100 | print(' done') 101 | 102 | return pred_arr 103 | 104 | 105 | def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6): 106 | """Numpy implementation of the Frechet Distance. 107 | The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) 108 | and X_2 ~ N(mu_2, C_2) is 109 | d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). 110 | Stable version by Dougal J. Sutherland. 111 | Params: 112 | -- mu1 : Numpy array containing the activations of a layer of the 113 | inception net (like returned by the function 'get_predictions') 114 | for generated samples. 115 | -- mu2 : The sample mean over activations, precalculated on an 116 | representive data set. 117 | -- sigma1: The covariance matrix over activations for generated samples. 118 | -- sigma2: The covariance matrix over activations, precalculated on an 119 | representive data set. 120 | Returns: 121 | -- : The Frechet Distance. 122 | """ 123 | 124 | mu1 = np.atleast_1d(mu1) 125 | mu2 = np.atleast_1d(mu2) 126 | 127 | sigma1 = np.atleast_2d(sigma1) 128 | sigma2 = np.atleast_2d(sigma2) 129 | 130 | assert mu1.shape == mu2.shape, \ 131 | 'Training and test mean vectors have different lengths' 132 | assert sigma1.shape == sigma2.shape, \ 133 | 'Training and test covariances have different dimensions' 134 | 135 | diff = mu1 - mu2 136 | 137 | # Product might be almost singular 138 | covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) 139 | if not np.isfinite(covmean).all(): 140 | msg = ('fid calculation produces singular product; ' 141 | 'adding %s to diagonal of cov estimates') % eps 142 | print(msg) 143 | offset = np.eye(sigma1.shape[0]) * eps 144 | covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) 145 | 146 | # Numerical error might give slight imaginary component 147 | if np.iscomplexobj(covmean): 148 | if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): 149 | m = np.max(np.abs(covmean.imag)) 150 | raise ValueError('Imaginary component {}'.format(m)) 151 | covmean = covmean.real 152 | 153 | tr_covmean = np.trace(covmean) 154 | 155 | return (diff.dot(diff) + np.trace(sigma1) + 156 | np.trace(sigma2) - 2 * tr_covmean) 157 | 158 | 159 | def calculate_activation_statistics(images, model, batch_size=64, 160 | dims=2048, cuda=False, verbose=False): 161 | """Calculation of the statistics used by the FID. 162 | Params: 163 | -- images : Numpy array of dimension (n_images, 3, hi, wi). The values 164 | must lie between 0 and 1. 165 | -- model : Instance of inception model 166 | -- batch_size : The images numpy array is split into batches with 167 | batch size batch_size. A reasonable batch size 168 | depends on the hardware. 169 | -- dims : Dimensionality of features returned by Inception 170 | -- cuda : If set to True, use GPU 171 | -- verbose : If set to True and parameter out_step is given, the 172 | number of calculated batches is reported. 173 | Returns: 174 | -- mu : The mean over samples of the activations of the pool_3 layer of 175 | the inception model. 176 | -- sigma : The covariance matrix of the activations of the pool_3 layer of 177 | the inception model. 178 | """ 179 | act = get_activations(images, model, batch_size, dims, cuda, verbose) 180 | mu = np.mean(act, axis=0) 181 | sigma = np.cov(act, rowvar=False) 182 | return mu, sigma 183 | 184 | 185 | def _compute_statistics_of_path(path, model, batch_size, dims, cuda): 186 | npz_file = os.path.join(path, 'statistics.npz') 187 | if os.path.exists(npz_file): 188 | f = np.load(npz_file) 189 | m, s = f['mu'][:], f['sigma'][:] 190 | f.close() 191 | else: 192 | path = pathlib.Path(path) 193 | # print(path) 194 | files = list(path.glob('*.jpg')) + list(path.glob('*.png')) 195 | 196 | # print(files) 197 | imgs = np.array([imread(str(fn)).astype(np.float32) for fn in files]) 198 | 199 | # Bring images to shape (B, 3, H, W) 200 | # print(imgs) 201 | imgs = imgs.transpose((0, 3, 1, 2)) 202 | 203 | # Rescale images to be between 0 and 1 204 | imgs /= 255 205 | 206 | m, s = calculate_activation_statistics(imgs, model, batch_size, dims, cuda) 207 | # np.savez(npz_file, mu=m, sigma=s) 208 | 209 | return m, s 210 | 211 | 212 | def calculate_fid_given_paths(paths, batch_size, cuda, dims): 213 | """Calculates the FID of two paths""" 214 | for p in paths: 215 | if not os.path.exists(p): 216 | raise RuntimeError('Invalid path: %s' % p) 217 | 218 | block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims] 219 | 220 | model = InceptionV3([block_idx]) 221 | if cuda: 222 | model.cuda() 223 | 224 | print('calculate path1 statistics...') 225 | m1, s1 = _compute_statistics_of_path(paths[0], model, batch_size, dims, cuda) 226 | print('calculate path2 statistics...') 227 | m2, s2 = _compute_statistics_of_path(paths[1], model, batch_size, dims, cuda) 228 | print('calculate frechet distance...') 229 | fid_value = calculate_frechet_distance(m1, s1, m2, s2) 230 | 231 | return fid_value 232 | 233 | 234 | if __name__ == '__main__': 235 | args = parser.parse_args() 236 | os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu 237 | 238 | fid_value = calculate_fid_given_paths(args.path, 239 | args.batch_size, 240 | args.gpu != '', 241 | args.dims) 242 | print('FID: ', round(fid_value, 4)) 243 | -------------------------------------------------------------------------------- /src/model/loss.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | import torchvision.models as models 5 | 6 | 7 | # KL Divergence loss used in VAE with an image encoder 8 | class KLDLoss(nn.Module): 9 | def forward(self, mu, logvar): 10 | return -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) 11 | 12 | class AdversarialLoss(nn.Module): 13 | r""" 14 | Adversarial loss 15 | https://arxiv.org/abs/1711.10337 16 | """ 17 | 18 | # def __init__(self, type='nsgan', target_real_label=1.0, target_fake_label=0.0): 19 | def __init__(self, type='nsgan', target_real_label=1.0, target_fake_label=0.0): 20 | r""" 21 | type = nsgan | lsgan | hinge | wgangp 22 | """ 23 | super(AdversarialLoss, self).__init__() 24 | 25 | self.type = type 26 | self.register_buffer('real_label', torch.tensor(target_real_label)) 27 | self.register_buffer('fake_label', torch.tensor(target_fake_label)) 28 | 29 | if type == 'nsgan': 30 | self.criterion = nn.BCELoss() 31 | 32 | elif type == 'lsgan': 33 | self.criterion = nn.MSELoss() 34 | 35 | elif type == 'hinge': 36 | self.criterion = nn.ReLU() 37 | elif type == 'wgangp': 38 | self.criterion = None 39 | 40 | def __call__(self, outputs, is_real, is_disc=None): 41 | if self.type == 'hinge': 42 | if is_disc: 43 | if is_real: 44 | outputs = -outputs 45 | return self.criterion(1 + outputs).mean() 46 | else: 47 | return (-outputs).mean() 48 | elif self.type == 'wgangp': 49 | if is_real: 50 | loss = outputs.mean() 51 | else: 52 | loss = -outputs.mean() 53 | return loss 54 | else: 55 | labels = (self.real_label if is_real else self.fake_label).expand_as(outputs) 56 | loss = self.criterion(outputs, labels) 57 | return loss 58 | 59 | 60 | def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0): 61 | """Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028 62 | 63 | Arguments: 64 | netD (network) -- discriminator network 65 | real_data (tensor array) -- real images 66 | fake_data (tensor array) -- generated images from the generator 67 | device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu') 68 | type (str) -- if we mix real and fake data or not [real | fake | mixed]. 69 | constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2 70 | lambda_gp (float) -- weight for this loss 71 | 72 | Returns the gradient penalty loss 73 | """ 74 | if lambda_gp > 0.0: 75 | if type == 'real': # either use real images, fake images, or a linear interpolation of two. 76 | interpolatesv = real_data 77 | elif type == 'fake': 78 | interpolatesv = fake_data 79 | elif type == 'mixed': 80 | alpha = torch.rand(real_data.shape[0], 1) 81 | alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape) 82 | alpha = alpha.to(device) 83 | interpolatesv = alpha * real_data + ((1 - alpha) * fake_data) 84 | else: 85 | raise NotImplementedError('{} not implemented'.format(type)) 86 | interpolatesv.requires_grad_(True) 87 | disc_interpolates = netD(interpolatesv) 88 | gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv, 89 | grad_outputs=torch.ones(disc_interpolates.size()).to(device), 90 | create_graph=True, retain_graph=True, only_inputs=True) 91 | gradients = gradients[0].view(real_data.size(0), -1) # flat the data 92 | gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps 93 | return gradient_penalty, gradients 94 | else: 95 | return 0.0, None 96 | 97 | class FeatureAvgLoss(nn.Module): 98 | def __init__(self): 99 | super(FeatureAvgLoss, self).__init__() 100 | self.criterion = nn.L1Loss() 101 | 102 | def forward(self, feature, mask): 103 | f_g = feature * mask 104 | f_b = feature * (1 - mask) 105 | f_g_avg = torch.sum(f_g, (2, 3)) / torch.sum(mask) 106 | f_b_avg = torch.sum(f_b, (2, 3)) / torch.sum((1 - mask)) 107 | 108 | # print(f_g_avg.shape) 109 | loss = self.criterion(f_g_avg, f_b_avg) 110 | return loss 111 | 112 | class ColorLoss(nn.Module): 113 | def __init__(self): 114 | super(ColorLoss, self).__init__() 115 | self.cosinesimilarity = nn.CosineSimilarity(dim=3) 116 | 117 | def forward(self, x, y): 118 | o = x.permute(0,2,3,1) 119 | data = y.permute(0,2,3,1) 120 | # color_loss_l = (self.cosinesimilarity(o, data) + 0.0001)**(.4) 121 | color_loss_l = (self.cosinesimilarity(o, data) + 0.0001) 122 | # color_loss_r = (self.cosinesimilarity(1-o, 1-data) + 0.0001)**(.4) 123 | color_loss_r = (self.cosinesimilarity(1-o, 1-data) + 0.0001) 124 | color_loss = 1 - torch.mean(torch.min(color_loss_l, color_loss_r)) 125 | 126 | return color_loss 127 | 128 | # class ColorLoss(nn.Module): 129 | # def __init__(self): 130 | # super(ColorLoss, self).__init__() 131 | # self.cosinesimilarity = nn.CosineSimilarity(dim=3) 132 | 133 | # def forward(self, x, y): 134 | # o = x.permute(0,2,3,1) 135 | # data = y.permute(0,2,3,1) 136 | # color_loss_l = self.cosinesimilarity(o, data) 137 | # color_loss_r = self.cosinesimilarity(1-o, 1-data) 138 | # color_loss = torch.mean((1 - torch.min(color_loss_l, color_loss_r) + 0.0001) ** .4) 139 | 140 | # return color_loss 141 | 142 | class PerceptualLoss(nn.Module): 143 | def __init__(self): 144 | super(PerceptualLoss, self).__init__() 145 | self.vgg = VGG19().cuda() 146 | self.criterion = nn.L1Loss() 147 | self.weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0] 148 | 149 | def compute_gram(self, x): 150 | b, ch, h, w = x.size() 151 | f = x.view(b, ch, w * h) 152 | f_T = f.transpose(1, 2) 153 | G = f.bmm(f_T) / (h * w * ch) 154 | 155 | return G 156 | 157 | def forward(self, x, y): 158 | x_vgg, y_vgg = self.vgg(x), self.vgg(y) 159 | loss = 0 160 | style_loss = 0 161 | for i in range(len(x_vgg)): 162 | loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach()) 163 | 164 | style_loss += self.criterion(self.compute_gram(x_vgg[3]), self.compute_gram(y_vgg[3])) 165 | style_loss += self.criterion(self.compute_gram(x_vgg[4]), self.compute_gram(y_vgg[4])) 166 | return loss, style_loss 167 | 168 | class StyleLoss(nn.Module): 169 | 170 | def __init__(self): 171 | super(StyleLoss, self).__init__() 172 | self.vgg = VGG19().cuda() 173 | self.criterion = torch.nn.L1Loss() 174 | self.weights = [0, 1, 1, 1, 1] 175 | 176 | def compute_gram(self, x): 177 | b, ch, h, w = x.size() 178 | f = x.view(b, ch, w * h) 179 | f_T = f.transpose(1, 2) 180 | G = f.bmm(f_T) / (h * w * ch) 181 | 182 | return G 183 | 184 | def __call__(self, x, y): 185 | # Compute features 186 | x_vgg, y_vgg = self.vgg(x), self.vgg(y) 187 | 188 | # Compute loss 189 | style_loss = 0.0 190 | for i in range(len(x_vgg)): 191 | style_loss += self.weights[i] * self.criterion(self.compute_gram(x_vgg[i]), self.compute_gram(y_vgg[i])) 192 | return style_loss 193 | 194 | 195 | class MRFLoss(nn.Module): 196 | def __init__(self): 197 | super(MRFLoss, self).__init__() 198 | self.vgg = VGG19().cuda() 199 | 200 | self.weights = [0, 0, .5, 1, 0] 201 | 202 | self.bias = 1.0 203 | self.nn_stretch_sigma = 0.5 204 | self.lambda_style = 1.0 205 | self.lambda_content = 1.0 206 | 207 | def sum_normalize(self, featmaps): 208 | reduce_sum = torch.sum(featmaps, dim=1, keepdim=True) 209 | return featmaps / reduce_sum 210 | 211 | def patch_extraction(self, featmaps): 212 | patch_size = 1 213 | patch_stride = 1 214 | patches_as_depth_vectors = featmaps.unfold(2, patch_size, patch_stride).unfold(3, patch_size, patch_stride) 215 | self.patches_OIHW = patches_as_depth_vectors.permute(0, 2, 3, 1, 4, 5) 216 | dims = self.patches_OIHW.size() 217 | self.patches_OIHW = self.patches_OIHW.view(-1, dims[3], dims[4], dims[5]) 218 | return self.patches_OIHW 219 | 220 | def compute_relative_distances(self, cdist): 221 | epsilon = 1e-5 222 | div = torch.min(cdist, dim=1, keepdim=True)[0] 223 | relative_dist = cdist / (div + epsilon) 224 | return relative_dist 225 | 226 | def exp_norm_relative_dist(self, relative_dist): 227 | scaled_dist = relative_dist 228 | dist_before_norm = torch.exp((self.bias - scaled_dist)/self.nn_stretch_sigma) 229 | self.cs_NCHW = self.sum_normalize(dist_before_norm) 230 | return self.cs_NCHW 231 | 232 | def mrf_loss(self, gen, tar): 233 | meanT = torch.mean(tar, 1, keepdim=True) 234 | gen_feats, tar_feats = gen - meanT, tar - meanT 235 | 236 | gen_feats_norm = torch.norm(gen_feats, p=2, dim=1, keepdim=True) 237 | tar_feats_norm = torch.norm(tar_feats, p=2, dim=1, keepdim=True) 238 | 239 | gen_normalized = gen_feats / gen_feats_norm 240 | tar_normalized = tar_feats / tar_feats_norm 241 | 242 | cosine_dist_l = [] 243 | BatchSize = tar.size(0) 244 | 245 | for i in range(BatchSize): 246 | tar_feat_i = tar_normalized[i:i+1, :, :, :] 247 | gen_feat_i = gen_normalized[i:i+1, :, :, :] 248 | patches_OIHW = self.patch_extraction(tar_feat_i) 249 | 250 | cosine_dist_i = F.conv2d(gen_feat_i, patches_OIHW) 251 | cosine_dist_l.append(cosine_dist_i) 252 | cosine_dist = torch.cat(cosine_dist_l, dim=0) 253 | cosine_dist_zero_2_one = - (cosine_dist - 1) / 2 254 | relative_dist = self.compute_relative_distances(cosine_dist_zero_2_one) 255 | rela_dist = self.exp_norm_relative_dist(relative_dist) 256 | dims_div_mrf = rela_dist.size() 257 | k_max_nc = torch.max(rela_dist.view(dims_div_mrf[0], dims_div_mrf[1], -1), dim=2)[0] 258 | div_mrf = torch.mean(k_max_nc, dim=1) 259 | div_mrf_sum = -torch.log(div_mrf) 260 | div_mrf_sum = torch.sum(div_mrf_sum) 261 | return div_mrf_sum 262 | 263 | def __call__(self, x, y): 264 | x_vgg, y_vgg = self.vgg(x), self.vgg(y) 265 | mrf_loss = 0.0 266 | for i in range(len(x_vgg)): 267 | mrf_loss += self.weights[i] * self.mrf_loss(x_vgg[i], y_vgg[i]) 268 | return mrf_loss 269 | 270 | 271 | class TVLoss(nn.Module): 272 | def __init__(self): 273 | super(TVLoss, self).__init__() 274 | 275 | def forward(self, x): 276 | batch_size = x.size()[0] 277 | h_x = x.size()[2] 278 | w_x = x.size()[3] 279 | count_h = self._tensor_size(x[:, :, 1:, :]) 280 | count_w = self._tensor_size(x[:, :, :, 1:]) 281 | h_tv = torch.pow((x[:, :, 1:, :]-x[:, :, :h_x-1, :]), 2).sum() 282 | w_tv = torch.pow((x[:, :, : , 1:]-x[:, :, :, :w_x-1]),2).sum() 283 | return 2 * (h_tv / count_h + w_tv / count_w) / batch_size 284 | 285 | def _tensor_size(self,t): 286 | return t.size()[1] * t.size()[2] * t.size()[3] 287 | 288 | class VGG19(nn.Module): 289 | def __init__(self, requires_grad=False): 290 | super().__init__() 291 | vgg_pretrained_features = models.vgg19(pretrained=True).features 292 | self.slice1 = torch.nn.Sequential() 293 | self.slice2 = torch.nn.Sequential() 294 | self.slice3 = torch.nn.Sequential() 295 | self.slice4 = torch.nn.Sequential() 296 | self.slice5 = torch.nn.Sequential() 297 | for x in range(2): 298 | self.slice1.add_module(str(x), vgg_pretrained_features[x]) 299 | for x in range(2, 7): 300 | self.slice2.add_module(str(x), vgg_pretrained_features[x]) 301 | for x in range(7, 12): 302 | self.slice3.add_module(str(x), vgg_pretrained_features[x]) 303 | for x in range(12, 21): 304 | self.slice4.add_module(str(x), vgg_pretrained_features[x]) 305 | for x in range(21, 30): 306 | self.slice5.add_module(str(x), vgg_pretrained_features[x]) 307 | if not requires_grad: 308 | for param in self.parameters(): 309 | param.requires_grad = False 310 | 311 | def forward(self, X): 312 | h_relu1 = self.slice1(X) 313 | h_relu2 = self.slice2(h_relu1) 314 | h_relu3 = self.slice3(h_relu2) 315 | h_relu4 = self.slice4(h_relu3) 316 | h_relu5 = self.slice5(h_relu4) 317 | out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5] 318 | return out -------------------------------------------------------------------------------- /src/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import cv2 3 | import sys 4 | import time 5 | import random 6 | import numpy as np 7 | import scipy 8 | import torch 9 | import matplotlib.pyplot as plt 10 | from PIL import Image, ImageDraw 11 | from skimage.color import rgb2gray, gray2rgb 12 | 13 | def create_dir(dir): 14 | if not os.path.exists(dir): 15 | os.makedirs(dir) 16 | 17 | def same_padding(images, ksizes, strides, rates): 18 | assert len(images.size()) == 4 19 | batch_size, channel, rows, cols = images.size() 20 | out_rows = (rows + strides[0] - 1) // strides[0] 21 | out_cols = (cols + strides[1] - 1) // strides[1] 22 | effective_k_row = (ksizes[0] - 1) * rates[0] + 1 23 | effective_k_col = (ksizes[1] - 1) * rates[1] + 1 24 | padding_rows = max(0, (out_rows-1)*strides[0]+effective_k_row-rows) 25 | padding_cols = max(0, (out_cols-1)*strides[1]+effective_k_col-cols) 26 | # Pad the input 27 | padding_top = int(padding_rows / 2.) 28 | padding_left = int(padding_cols / 2.) 29 | padding_bottom = padding_rows - padding_top 30 | padding_right = padding_cols - padding_left 31 | paddings = (padding_left, padding_right, padding_top, padding_bottom) 32 | images = torch.nn.ZeroPad2d(paddings)(images) 33 | return images 34 | 35 | def reduce_mean(x, axis=None, keepdim=False): 36 | if not axis: 37 | axis = range(len(x.shape)) 38 | for i in sorted(axis, reverse=True): 39 | x = torch.mean(x, dim=i, keepdim=keepdim) 40 | return x 41 | 42 | def reduce_sum(x, axis=None, keepdim=False): 43 | if not axis: 44 | axis = range(len(x.shape)) 45 | for i in sorted(axis, reverse=True): 46 | x = torch.sum(x, dim=i, keepdim=keepdim) 47 | return x 48 | 49 | def extract_image_patches(images, ksizes, strides, padding='same'): 50 | 51 | assert len(images.size()) == 4 52 | assert padding in ['same', 'valid'] 53 | batch_size, channel, height, width = images.size() 54 | 55 | if padding == 'same': 56 | images = same_padding(images, ksizes, strides, [1, 1]) 57 | elif padding == 'valid': 58 | pass 59 | else: 60 | raise NotImplementedError('Unsupported padding type: {}.\ 61 | Only "same" or "valid" are supported.'.format(padding)) 62 | batch_size, channel, height, width = images.size() 63 | 64 | unfold = torch.nn.Unfold(kernel_size=ksizes, 65 | padding=0, 66 | stride=strides) 67 | patches = unfold(images) 68 | return patches 69 | 70 | 71 | def stitch_images(inputs, *outputs, img_per_row=2): 72 | gap = 5 73 | columns = len(outputs) + 1 74 | 75 | height, width = inputs[0][:, :, 0].shape 76 | img = Image.new('RGB', (width * img_per_row * columns + gap * (img_per_row - 1), height * int(len(inputs) / img_per_row))) 77 | images = [inputs, *outputs] 78 | 79 | for ix in range(len(inputs)): 80 | xoffset = int(ix % img_per_row) * width * columns + int(ix % img_per_row) * gap 81 | yoffset = int(ix / img_per_row) * height 82 | 83 | for cat in range(len(images)): 84 | im = np.array((images[cat][ix]).cpu()).astype(np.uint8).squeeze() 85 | im = Image.fromarray(im) 86 | img.paste(im, (xoffset + cat * width, yoffset)) 87 | 88 | return img 89 | 90 | # def random_crop(npdata, crop_size, datatype): 91 | 92 | # height, width = npdata.shape[0:2] 93 | # mask = np.ones((height, width)) 94 | 95 | # if datatype == 1: 96 | # h = random.randint(0, height - crop_size) 97 | # w = random.randint(0, width - crop_size) 98 | # mask[h: h+crop_size, w: w+crop_size] = 0 99 | # crop_image = npdata[h: h+crop_size, w: w+crop_size] 100 | 101 | # if datatype == 2: 102 | # h = 0 103 | # w = random.randint(0, width - crop_size) 104 | # mask[:, w: w+crop_size] = 0 105 | # crop_image = npdata[:, w: w+crop_size] 106 | # return crop_image, (w, h), mask 107 | 108 | def random_crop(npdata, crop_size, datatype, count, pos, known_mask=None): 109 | 110 | height, width = npdata.shape[0:2] 111 | mask = np.ones((height, width)) 112 | 113 | if datatype == 1: 114 | if count == 0 and not known_mask: 115 | h = random.randint(0, height - crop_size) 116 | w = random.randint(0, width - crop_size) 117 | else: 118 | w, h = pos[0], pos[1] 119 | mask[h: h+crop_size, w: w+crop_size] = 0 120 | crop_image = npdata[h: h+crop_size, w: w+crop_size] 121 | 122 | if datatype == 2: 123 | h = 0 124 | w = random.randint(0, width - crop_size) 125 | mask[:, w: w+crop_size] = 0 126 | crop_image = npdata[:, w: w+crop_size] 127 | return crop_image, (w, h), mask 128 | 129 | def center_crop(npdata, crop_size): 130 | height, width = npdata.shape[0:2] 131 | mask = np.ones((height, width)) 132 | w = 50 133 | h = 100 134 | mask[h: h+crop_size, w: w+crop_size] = 0. 135 | 136 | crop_image = npdata[h: h+crop_size, w: w+crop_size] 137 | return crop_image, (w, h), mask 138 | 139 | def side_crop(data, crop_size): 140 | height, width = data.shape[0:2] 141 | mask = np.ones((height, width)) 142 | 143 | w = (width - crop_size) // 2 144 | h = 0 145 | mask[:, 0: w] = 0. 146 | mask[:, w+crop_size:] = 0. 147 | 148 | return (w, h), mask 149 | 150 | def imshow(img, title=''): 151 | fig = plt.gcf() 152 | fig.canvas.set_window_title(title) 153 | plt.axis('off') 154 | plt.imshow(img, interpolation='none') 155 | plt.show() 156 | 157 | 158 | def imsave(img, path): 159 | im = Image.fromarray(img.cpu().numpy().astype(np.uint8).squeeze()) 160 | im.save(path) 161 | 162 | def savetxt(arr, path): 163 | np.savetxt(path, arr.cpu().numpy().squeeze(), fmt='%.2f') 164 | 165 | def template_match(target, source): 166 | locs = [] 167 | _src = [] 168 | for i in range(target.shape[0]): 169 | src = source[i].detach().cpu().permute(1, 2, 0).numpy() 170 | tar = target[i].detach().cpu().permute(1, 2, 0).numpy() 171 | 172 | src_gray = cv2.cvtColor(src, cv2.COLOR_RGB2GRAY) 173 | tar_gray = cv2.cvtColor(tar, cv2.COLOR_RGB2GRAY) 174 | w, h = tar_gray.shape[::-1] 175 | 176 | res = cv2.matchTemplate(src_gray, tar_gray, cv2.TM_CCOEFF) 177 | min_val, max_val, min_loc, loc = cv2.minMaxLoc(res) 178 | locs.append(loc) 179 | 180 | src = src * 255 181 | im = Image.fromarray(src.astype(np.uint8).squeeze()) 182 | draw = ImageDraw.Draw(im) 183 | draw.rectangle([loc, (loc[0] + w, loc[1] + h)], outline=0) 184 | im = np.array(im) 185 | _src.append(im) 186 | 187 | return torch.Tensor(_src), locs 188 | 189 | 190 | def make_mask(data, pdata, pos, device): 191 | 192 | crop_size = pdata.shape[3] 193 | mask_with_pdata = torch.zeros(data.shape).to(device) 194 | mask_with_ones = torch.ones(data.shape).to(device) 195 | 196 | for po in range(len(pos)): 197 | w, h = pos[po][0], pos[po][1] 198 | mask_with_pdata[po, :, h: h+crop_size, w: w+crop_size] = pdata[po] 199 | mask_with_ones[po, :, h: h+crop_size, w: w+crop_size] = 0 200 | 201 | return mask_with_pdata, mask_with_ones 202 | 203 | 204 | class Progbar(object): 205 | """Displays a progress bar. 206 | 207 | Arguments: 208 | target: Total number of steps expected, None if unknown. 209 | width: Progress bar width on screen. 210 | verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose) 211 | stateful_metrics: Iterable of string names of metrics that 212 | should *not* be averaged over time. Metrics in this list 213 | will be displayed as-is. All others will be averaged 214 | by the progbar before display. 215 | interval: Minimum visual progress update interval (in seconds). 216 | """ 217 | 218 | def __init__(self, target, width=25, verbose=1, interval=0.05, 219 | stateful_metrics=None): 220 | self.target = target 221 | self.width = width 222 | self.verbose = verbose 223 | self.interval = interval 224 | if stateful_metrics: 225 | self.stateful_metrics = set(stateful_metrics) 226 | else: 227 | self.stateful_metrics = set() 228 | 229 | self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and 230 | sys.stdout.isatty()) or 231 | 'ipykernel' in sys.modules or 232 | 'posix' in sys.modules) 233 | self._total_width = 0 234 | self._seen_so_far = 0 235 | # We use a dict + list to avoid garbage collection 236 | # issues found in OrderedDict 237 | self._values = {} 238 | self._values_order = [] 239 | self._start = time.time() 240 | self._last_update = 0 241 | 242 | def update(self, current, values=None): 243 | """Updates the progress bar. 244 | 245 | Arguments: 246 | current: Index of current step. 247 | values: List of tuples: 248 | `(name, value_for_last_step)`. 249 | If `name` is in `stateful_metrics`, 250 | `value_for_last_step` will be displayed as-is. 251 | Else, an average of the metric over time will be displayed. 252 | """ 253 | values = values or [] 254 | for k, v in values: 255 | if k not in self._values_order: 256 | self._values_order.append(k) 257 | if k not in self.stateful_metrics: 258 | if k not in self._values: 259 | self._values[k] = [v * (current - self._seen_so_far), 260 | current - self._seen_so_far] 261 | else: 262 | self._values[k][0] += v * (current - self._seen_so_far) 263 | self._values[k][1] += (current - self._seen_so_far) 264 | else: 265 | self._values[k] = v 266 | self._seen_so_far = current 267 | 268 | now = time.time() 269 | info = ' - %.0fs' % (now - self._start) 270 | if self.verbose == 1: 271 | if (now - self._last_update < self.interval and 272 | self.target is not None and current < self.target): 273 | return 274 | 275 | prev_total_width = self._total_width 276 | if self._dynamic_display: 277 | sys.stdout.write('\b' * prev_total_width) 278 | sys.stdout.write('\r') 279 | else: 280 | sys.stdout.write('\n') 281 | 282 | if self.target is not None: 283 | numdigits = int(np.floor(np.log10(self.target))) + 1 284 | barstr = '%%%dd/%d [' % (numdigits, self.target) 285 | bar = barstr % current 286 | prog = float(current) / self.target 287 | prog_width = int(self.width * prog) 288 | if prog_width > 0: 289 | bar += ('=' * (prog_width - 1)) 290 | if current < self.target: 291 | bar += '>' 292 | else: 293 | bar += '=' 294 | bar += ('.' * (self.width - prog_width)) 295 | bar += ']' 296 | else: 297 | bar = '%7d/Unknown' % current 298 | 299 | self._total_width = len(bar) 300 | sys.stdout.write(bar) 301 | 302 | if current: 303 | time_per_unit = (now - self._start) / current 304 | else: 305 | time_per_unit = 0 306 | if self.target is not None and current < self.target: 307 | eta = time_per_unit * (self.target - current) 308 | if eta > 3600: 309 | eta_format = '%d:%02d:%02d' % (eta // 3600, 310 | (eta % 3600) // 60, 311 | eta % 60) 312 | elif eta > 60: 313 | eta_format = '%d:%02d' % (eta // 60, eta % 60) 314 | else: 315 | eta_format = '%ds' % eta 316 | 317 | info = ' - ETA: %s' % eta_format 318 | else: 319 | if time_per_unit >= 1: 320 | info += ' %.0fs/step' % time_per_unit 321 | elif time_per_unit >= 1e-3: 322 | info += ' %.0fms/step' % (time_per_unit * 1e3) 323 | else: 324 | info += ' %.0fus/step' % (time_per_unit * 1e6) 325 | 326 | for k in self._values_order: 327 | info += ' - %s:' % k 328 | if isinstance(self._values[k], list): 329 | avg = np.mean(self._values[k][0] / max(1, self._values[k][1])) 330 | if abs(avg) > 1e-3: 331 | info += ' %.4f' % avg 332 | else: 333 | info += ' %.4e' % avg 334 | else: 335 | info += ' %s' % self._values[k] 336 | 337 | self._total_width += len(info) 338 | if prev_total_width > self._total_width: 339 | info += (' ' * (prev_total_width - self._total_width)) 340 | 341 | if self.target is not None and current >= self.target: 342 | info += '\n' 343 | 344 | sys.stdout.write(info) 345 | sys.stdout.flush() 346 | 347 | elif self.verbose == 2: 348 | if self.target is None or current >= self.target: 349 | for k in self._values_order: 350 | info += ' - %s:' % k 351 | avg = np.mean(self._values[k][0] / max(1, self._values[k][1])) 352 | if avg > 1e-3: 353 | info += ' %.4f' % avg 354 | else: 355 | info += ' %.4e' % avg 356 | info += '\n' 357 | 358 | sys.stdout.write(info) 359 | sys.stdout.flush() 360 | 361 | self._last_update = now 362 | 363 | def add(self, n, values=None): 364 | self.update(self._seen_so_far + n, values) 365 | 366 | import math 367 | from torch.optim.optimizer import Optimizer 368 | 369 | class Adam16(Optimizer): 370 | def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0): 371 | 372 | defaults = dict(lr=lr, betas=betas, eps=eps, 373 | weight_decay=weight_decay) 374 | params = list(params) 375 | super(Adam16, self).__init__(params, defaults) 376 | 377 | # Safety modification to make sure we floatify our state 378 | def load_state_dict(self, state_dict): 379 | super(Adam16, self).load_state_dict(state_dict) 380 | for group in self.param_groups: 381 | for p in group['params']: 382 | 383 | self.state[p]['exp_avg'] = self.state[p]['exp_avg'].float() 384 | self.state[p]['exp_avg_sq'] = self.state[p]['exp_avg_sq'].float() 385 | self.state[p]['fp32_p'] = self.state[p]['fp32_p'].float() 386 | 387 | def step(self, closure=None): 388 | """Performs a single optimization step. 389 | Arguments: 390 | closure (callable, optional): A closure that reevaluates the model 391 | and returns the loss. 392 | """ 393 | loss = None 394 | if closure is not None: 395 | loss = closure() 396 | 397 | for group in self.param_groups: 398 | for p in group['params']: 399 | if p.grad is None: 400 | continue 401 | 402 | grad = p.grad.data.float() 403 | state = self.state[p] 404 | 405 | # State initialization 406 | if len(state) == 0: 407 | state['step'] = 0 408 | # Exponential moving average of gradient values 409 | state['exp_avg'] = grad.new().resize_as_(grad).zero_() 410 | # Exponential moving average of squared gradient values 411 | state['exp_avg_sq'] = grad.new().resize_as_(grad).zero_() 412 | # Fp32 copy of the weights 413 | state['fp32_p'] = p.data.float() 414 | 415 | exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] 416 | beta1, beta2 = group['betas'] 417 | 418 | state['step'] += 1 419 | 420 | if group['weight_decay'] != 0: 421 | grad = grad.add(group['weight_decay'], state['fp32_p']) 422 | 423 | # Decay the first and second moment running average coefficient 424 | exp_avg.mul_(beta1).add_(1 - beta1, grad) 425 | exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) 426 | denom = exp_avg_sq.sqrt().add_(group['eps']) 427 | 428 | bias_correction1 = 1 - beta1 ** state['step'] 429 | bias_correction2 = 1 - beta2 ** state['step'] 430 | step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1 431 | 432 | state['fp32_p'].addcdiv_(-step_size, exp_avg, denom) 433 | p.data = state['fp32_p'].float() 434 | 435 | return loss 436 | -------------------------------------------------------------------------------- /datasets/cat/test.flist: -------------------------------------------------------------------------------- 1 | ./afhq/val/cat/flickr_cat_000008.jpg 2 | ./afhq/val/cat/flickr_cat_000011.jpg 3 | ./afhq/val/cat/flickr_cat_000016.jpg 4 | ./afhq/val/cat/flickr_cat_000056.jpg 5 | ./afhq/val/cat/flickr_cat_000076.jpg 6 | ./afhq/val/cat/flickr_cat_000080.jpg 7 | ./afhq/val/cat/flickr_cat_000096.jpg 8 | ./afhq/val/cat/flickr_cat_000108.jpg 9 | ./afhq/val/cat/flickr_cat_000123.jpg 10 | ./afhq/val/cat/flickr_cat_000136.jpg 11 | ./afhq/val/cat/flickr_cat_000152.jpg 12 | ./afhq/val/cat/flickr_cat_000162.jpg 13 | ./afhq/val/cat/flickr_cat_000165.jpg 14 | ./afhq/val/cat/flickr_cat_000174.jpg 15 | ./afhq/val/cat/flickr_cat_000175.jpg 16 | ./afhq/val/cat/flickr_cat_000176.jpg 17 | ./afhq/val/cat/flickr_cat_000177.jpg 18 | ./afhq/val/cat/flickr_cat_000180.jpg 19 | ./afhq/val/cat/flickr_cat_000182.jpg 20 | ./afhq/val/cat/flickr_cat_000183.jpg 21 | ./afhq/val/cat/flickr_cat_000184.jpg 22 | ./afhq/val/cat/flickr_cat_000191.jpg 23 | ./afhq/val/cat/flickr_cat_000208.jpg 24 | ./afhq/val/cat/flickr_cat_000216.jpg 25 | ./afhq/val/cat/flickr_cat_000226.jpg 26 | ./afhq/val/cat/flickr_cat_000230.jpg 27 | ./afhq/val/cat/flickr_cat_000232.jpg 28 | ./afhq/val/cat/flickr_cat_000233.jpg 29 | ./afhq/val/cat/flickr_cat_000236.jpg 30 | ./afhq/val/cat/flickr_cat_000238.jpg 31 | ./afhq/val/cat/flickr_cat_000253.jpg 32 | ./afhq/val/cat/flickr_cat_000265.jpg 33 | ./afhq/val/cat/flickr_cat_000283.jpg 34 | ./afhq/val/cat/flickr_cat_000287.jpg 35 | ./afhq/val/cat/flickr_cat_000292.jpg 36 | ./afhq/val/cat/flickr_cat_000311.jpg 37 | ./afhq/val/cat/flickr_cat_000314.jpg 38 | ./afhq/val/cat/flickr_cat_000320.jpg 39 | ./afhq/val/cat/flickr_cat_000341.jpg 40 | ./afhq/val/cat/flickr_cat_000350.jpg 41 | ./afhq/val/cat/flickr_cat_000351.jpg 42 | ./afhq/val/cat/flickr_cat_000368.jpg 43 | ./afhq/val/cat/flickr_cat_000372.jpg 44 | ./afhq/val/cat/flickr_cat_000377.jpg 45 | ./afhq/val/cat/flickr_cat_000387.jpg 46 | ./afhq/val/cat/flickr_cat_000418.jpg 47 | ./afhq/val/cat/flickr_cat_000419.jpg 48 | ./afhq/val/cat/flickr_cat_000437.jpg 49 | ./afhq/val/cat/flickr_cat_000441.jpg 50 | ./afhq/val/cat/flickr_cat_000446.jpg 51 | ./afhq/val/cat/flickr_cat_000495.jpg 52 | ./afhq/val/cat/flickr_cat_000514.jpg 53 | ./afhq/val/cat/flickr_cat_000522.jpg 54 | ./afhq/val/cat/flickr_cat_000526.jpg 55 | ./afhq/val/cat/flickr_cat_000529.jpg 56 | ./afhq/val/cat/flickr_cat_000534.jpg 57 | ./afhq/val/cat/flickr_cat_000535.jpg 58 | ./afhq/val/cat/flickr_cat_000536.jpg 59 | ./afhq/val/cat/flickr_cat_000554.jpg 60 | ./afhq/val/cat/flickr_cat_000557.jpg 61 | ./afhq/val/cat/flickr_cat_000560.jpg 62 | ./afhq/val/cat/flickr_cat_000570.jpg 63 | ./afhq/val/cat/flickr_cat_000575.jpg 64 | ./afhq/val/cat/flickr_cat_000585.jpg 65 | ./afhq/val/cat/flickr_cat_000592.jpg 66 | ./afhq/val/cat/flickr_cat_000599.jpg 67 | ./afhq/val/cat/flickr_cat_000608.jpg 68 | ./afhq/val/cat/flickr_cat_000610.jpg 69 | ./afhq/val/cat/flickr_cat_000614.jpg 70 | ./afhq/val/cat/flickr_cat_000628.jpg 71 | ./afhq/val/cat/flickr_cat_000629.jpg 72 | ./afhq/val/cat/flickr_cat_000635.jpg 73 | ./afhq/val/cat/flickr_cat_000637.jpg 74 | ./afhq/val/cat/flickr_cat_000653.jpg 75 | ./afhq/val/cat/flickr_cat_000656.jpg 76 | ./afhq/val/cat/flickr_cat_000676.jpg 77 | ./afhq/val/cat/flickr_cat_000688.jpg 78 | ./afhq/val/cat/flickr_cat_000709.jpg 79 | ./afhq/val/cat/flickr_cat_000714.jpg 80 | ./afhq/val/cat/flickr_cat_000746.jpg 81 | ./afhq/val/cat/flickr_cat_000747.jpg 82 | ./afhq/val/cat/flickr_cat_000751.jpg 83 | ./afhq/val/cat/flickr_cat_000759.jpg 84 | ./afhq/val/cat/flickr_cat_000761.jpg 85 | ./afhq/val/cat/flickr_cat_000766.jpg 86 | ./afhq/val/cat/flickr_cat_000769.jpg 87 | ./afhq/val/cat/flickr_cat_000802.jpg 88 | ./afhq/val/cat/flickr_cat_000814.jpg 89 | ./afhq/val/cat/flickr_cat_000816.jpg 90 | ./afhq/val/cat/pixabay_cat_000007.jpg 91 | ./afhq/val/cat/pixabay_cat_000010.jpg 92 | ./afhq/val/cat/pixabay_cat_000037.jpg 93 | ./afhq/val/cat/pixabay_cat_000040.jpg 94 | ./afhq/val/cat/pixabay_cat_000051.jpg 95 | ./afhq/val/cat/pixabay_cat_000070.jpg 96 | ./afhq/val/cat/pixabay_cat_000076.jpg 97 | ./afhq/val/cat/pixabay_cat_000081.jpg 98 | ./afhq/val/cat/pixabay_cat_000096.jpg 99 | ./afhq/val/cat/pixabay_cat_000099.jpg 100 | ./afhq/val/cat/pixabay_cat_000108.jpg 101 | ./afhq/val/cat/pixabay_cat_000117.jpg 102 | ./afhq/val/cat/pixabay_cat_000126.jpg 103 | ./afhq/val/cat/pixabay_cat_000147.jpg 104 | ./afhq/val/cat/pixabay_cat_000151.jpg 105 | ./afhq/val/cat/pixabay_cat_000165.jpg 106 | ./afhq/val/cat/pixabay_cat_000169.jpg 107 | ./afhq/val/cat/pixabay_cat_000178.jpg 108 | ./afhq/val/cat/pixabay_cat_000181.jpg 109 | ./afhq/val/cat/pixabay_cat_000190.jpg 110 | ./afhq/val/cat/pixabay_cat_000202.jpg 111 | ./afhq/val/cat/pixabay_cat_000231.jpg 112 | ./afhq/val/cat/pixabay_cat_000241.jpg 113 | ./afhq/val/cat/pixabay_cat_000248.jpg 114 | ./afhq/val/cat/pixabay_cat_000276.jpg 115 | ./afhq/val/cat/pixabay_cat_000291.jpg 116 | ./afhq/val/cat/pixabay_cat_000296.jpg 117 | ./afhq/val/cat/pixabay_cat_000299.jpg 118 | ./afhq/val/cat/pixabay_cat_000302.jpg 119 | ./afhq/val/cat/pixabay_cat_000314.jpg 120 | ./afhq/val/cat/pixabay_cat_000315.jpg 121 | ./afhq/val/cat/pixabay_cat_000325.jpg 122 | ./afhq/val/cat/pixabay_cat_000339.jpg 123 | ./afhq/val/cat/pixabay_cat_000343.jpg 124 | ./afhq/val/cat/pixabay_cat_000353.jpg 125 | ./afhq/val/cat/pixabay_cat_000355.jpg 126 | ./afhq/val/cat/pixabay_cat_000380.jpg 127 | ./afhq/val/cat/pixabay_cat_000390.jpg 128 | ./afhq/val/cat/pixabay_cat_000392.jpg 129 | ./afhq/val/cat/pixabay_cat_000403.jpg 130 | ./afhq/val/cat/pixabay_cat_000417.jpg 131 | ./afhq/val/cat/pixabay_cat_000440.jpg 132 | ./afhq/val/cat/pixabay_cat_000441.jpg 133 | ./afhq/val/cat/pixabay_cat_000459.jpg 134 | ./afhq/val/cat/pixabay_cat_000468.jpg 135 | ./afhq/val/cat/pixabay_cat_000491.jpg 136 | ./afhq/val/cat/pixabay_cat_000492.jpg 137 | ./afhq/val/cat/pixabay_cat_000504.jpg 138 | ./afhq/val/cat/pixabay_cat_000514.jpg 139 | ./afhq/val/cat/pixabay_cat_000518.jpg 140 | ./afhq/val/cat/pixabay_cat_000531.jpg 141 | ./afhq/val/cat/pixabay_cat_000535.jpg 142 | ./afhq/val/cat/pixabay_cat_000540.jpg 143 | ./afhq/val/cat/pixabay_cat_000542.jpg 144 | ./afhq/val/cat/pixabay_cat_000573.jpg 145 | ./afhq/val/cat/pixabay_cat_000582.jpg 146 | ./afhq/val/cat/pixabay_cat_000592.jpg 147 | ./afhq/val/cat/pixabay_cat_000615.jpg 148 | ./afhq/val/cat/pixabay_cat_000623.jpg 149 | ./afhq/val/cat/pixabay_cat_000629.jpg 150 | ./afhq/val/cat/pixabay_cat_000636.jpg 151 | ./afhq/val/cat/pixabay_cat_000637.jpg 152 | ./afhq/val/cat/pixabay_cat_000651.jpg 153 | ./afhq/val/cat/pixabay_cat_000663.jpg 154 | ./afhq/val/cat/pixabay_cat_000666.jpg 155 | ./afhq/val/cat/pixabay_cat_000668.jpg 156 | ./afhq/val/cat/pixabay_cat_000696.jpg 157 | ./afhq/val/cat/pixabay_cat_000700.jpg 158 | ./afhq/val/cat/pixabay_cat_000708.jpg 159 | ./afhq/val/cat/pixabay_cat_000730.jpg 160 | ./afhq/val/cat/pixabay_cat_000735.jpg 161 | ./afhq/val/cat/pixabay_cat_000743.jpg 162 | ./afhq/val/cat/pixabay_cat_000751.jpg 163 | ./afhq/val/cat/pixabay_cat_000755.jpg 164 | ./afhq/val/cat/pixabay_cat_000756.jpg 165 | ./afhq/val/cat/pixabay_cat_000760.jpg 166 | ./afhq/val/cat/pixabay_cat_000791.jpg 167 | ./afhq/val/cat/pixabay_cat_000801.jpg 168 | ./afhq/val/cat/pixabay_cat_000803.jpg 169 | ./afhq/val/cat/pixabay_cat_000812.jpg 170 | ./afhq/val/cat/pixabay_cat_000829.jpg 171 | ./afhq/val/cat/pixabay_cat_000831.jpg 172 | ./afhq/val/cat/pixabay_cat_000833.jpg 173 | ./afhq/val/cat/pixabay_cat_000840.jpg 174 | ./afhq/val/cat/pixabay_cat_000842.jpg 175 | ./afhq/val/cat/pixabay_cat_000844.jpg 176 | ./afhq/val/cat/pixabay_cat_000847.jpg 177 | ./afhq/val/cat/pixabay_cat_000861.jpg 178 | ./afhq/val/cat/pixabay_cat_000867.jpg 179 | ./afhq/val/cat/pixabay_cat_000876.jpg 180 | ./afhq/val/cat/pixabay_cat_000889.jpg 181 | ./afhq/val/cat/pixabay_cat_000890.jpg 182 | ./afhq/val/cat/pixabay_cat_000893.jpg 183 | ./afhq/val/cat/pixabay_cat_000901.jpg 184 | ./afhq/val/cat/pixabay_cat_000929.jpg 185 | ./afhq/val/cat/pixabay_cat_000935.jpg 186 | ./afhq/val/cat/pixabay_cat_000968.jpg 187 | ./afhq/val/cat/pixabay_cat_000980.jpg 188 | ./afhq/val/cat/pixabay_cat_000988.jpg 189 | ./afhq/val/cat/pixabay_cat_000989.jpg 190 | ./afhq/val/cat/pixabay_cat_000990.jpg 191 | ./afhq/val/cat/pixabay_cat_001003.jpg 192 | ./afhq/val/cat/pixabay_cat_001018.jpg 193 | ./afhq/val/cat/pixabay_cat_001029.jpg 194 | ./afhq/val/cat/pixabay_cat_001040.jpg 195 | ./afhq/val/cat/pixabay_cat_001055.jpg 196 | ./afhq/val/cat/pixabay_cat_001080.jpg 197 | ./afhq/val/cat/pixabay_cat_001081.jpg 198 | ./afhq/val/cat/pixabay_cat_001084.jpg 199 | ./afhq/val/cat/pixabay_cat_001093.jpg 200 | ./afhq/val/cat/pixabay_cat_001098.jpg 201 | ./afhq/val/cat/pixabay_cat_001099.jpg 202 | ./afhq/val/cat/pixabay_cat_001105.jpg 203 | ./afhq/val/cat/pixabay_cat_001143.jpg 204 | ./afhq/val/cat/pixabay_cat_001144.jpg 205 | ./afhq/val/cat/pixabay_cat_001179.jpg 206 | ./afhq/val/cat/pixabay_cat_001195.jpg 207 | ./afhq/val/cat/pixabay_cat_001196.jpg 208 | ./afhq/val/cat/pixabay_cat_001205.jpg 209 | ./afhq/val/cat/pixabay_cat_001212.jpg 210 | ./afhq/val/cat/pixabay_cat_001269.jpg 211 | ./afhq/val/cat/pixabay_cat_001272.jpg 212 | ./afhq/val/cat/pixabay_cat_001286.jpg 213 | ./afhq/val/cat/pixabay_cat_001287.jpg 214 | ./afhq/val/cat/pixabay_cat_001309.jpg 215 | ./afhq/val/cat/pixabay_cat_001323.jpg 216 | ./afhq/val/cat/pixabay_cat_001342.jpg 217 | ./afhq/val/cat/pixabay_cat_001348.jpg 218 | ./afhq/val/cat/pixabay_cat_001359.jpg 219 | ./afhq/val/cat/pixabay_cat_001365.jpg 220 | ./afhq/val/cat/pixabay_cat_001367.jpg 221 | ./afhq/val/cat/pixabay_cat_001371.jpg 222 | ./afhq/val/cat/pixabay_cat_001385.jpg 223 | ./afhq/val/cat/pixabay_cat_001390.jpg 224 | ./afhq/val/cat/pixabay_cat_001405.jpg 225 | ./afhq/val/cat/pixabay_cat_001437.jpg 226 | ./afhq/val/cat/pixabay_cat_001455.jpg 227 | ./afhq/val/cat/pixabay_cat_001457.jpg 228 | ./afhq/val/cat/pixabay_cat_001460.jpg 229 | ./afhq/val/cat/pixabay_cat_001464.jpg 230 | ./afhq/val/cat/pixabay_cat_001465.jpg 231 | ./afhq/val/cat/pixabay_cat_001479.jpg 232 | ./afhq/val/cat/pixabay_cat_001503.jpg 233 | ./afhq/val/cat/pixabay_cat_001536.jpg 234 | ./afhq/val/cat/pixabay_cat_001562.jpg 235 | ./afhq/val/cat/pixabay_cat_001599.jpg 236 | ./afhq/val/cat/pixabay_cat_001607.jpg 237 | ./afhq/val/cat/pixabay_cat_001612.jpg 238 | ./afhq/val/cat/pixabay_cat_001632.jpg 239 | ./afhq/val/cat/pixabay_cat_001635.jpg 240 | ./afhq/val/cat/pixabay_cat_001643.jpg 241 | ./afhq/val/cat/pixabay_cat_001653.jpg 242 | ./afhq/val/cat/pixabay_cat_001665.jpg 243 | ./afhq/val/cat/pixabay_cat_001672.jpg 244 | ./afhq/val/cat/pixabay_cat_001687.jpg 245 | ./afhq/val/cat/pixabay_cat_001690.jpg 246 | ./afhq/val/cat/pixabay_cat_001699.jpg 247 | ./afhq/val/cat/pixabay_cat_001724.jpg 248 | ./afhq/val/cat/pixabay_cat_001727.jpg 249 | ./afhq/val/cat/pixabay_cat_001729.jpg 250 | ./afhq/val/cat/pixabay_cat_001786.jpg 251 | ./afhq/val/cat/pixabay_cat_001811.jpg 252 | ./afhq/val/cat/pixabay_cat_001860.jpg 253 | ./afhq/val/cat/pixabay_cat_001891.jpg 254 | ./afhq/val/cat/pixabay_cat_001917.jpg 255 | ./afhq/val/cat/pixabay_cat_001938.jpg 256 | ./afhq/val/cat/pixabay_cat_001951.jpg 257 | ./afhq/val/cat/pixabay_cat_001974.jpg 258 | ./afhq/val/cat/pixabay_cat_001985.jpg 259 | ./afhq/val/cat/pixabay_cat_001992.jpg 260 | ./afhq/val/cat/pixabay_cat_001996.jpg 261 | ./afhq/val/cat/pixabay_cat_001997.jpg 262 | ./afhq/val/cat/pixabay_cat_002009.jpg 263 | ./afhq/val/cat/pixabay_cat_002037.jpg 264 | ./afhq/val/cat/pixabay_cat_002064.jpg 265 | ./afhq/val/cat/pixabay_cat_002075.jpg 266 | ./afhq/val/cat/pixabay_cat_002081.jpg 267 | ./afhq/val/cat/pixabay_cat_002090.jpg 268 | ./afhq/val/cat/pixabay_cat_002117.jpg 269 | ./afhq/val/cat/pixabay_cat_002126.jpg 270 | ./afhq/val/cat/pixabay_cat_002129.jpg 271 | ./afhq/val/cat/pixabay_cat_002133.jpg 272 | ./afhq/val/cat/pixabay_cat_002138.jpg 273 | ./afhq/val/cat/pixabay_cat_002140.jpg 274 | ./afhq/val/cat/pixabay_cat_002149.jpg 275 | ./afhq/val/cat/pixabay_cat_002155.jpg 276 | ./afhq/val/cat/pixabay_cat_002156.jpg 277 | ./afhq/val/cat/pixabay_cat_002184.jpg 278 | ./afhq/val/cat/pixabay_cat_002198.jpg 279 | ./afhq/val/cat/pixabay_cat_002201.jpg 280 | ./afhq/val/cat/pixabay_cat_002205.jpg 281 | ./afhq/val/cat/pixabay_cat_002212.jpg 282 | ./afhq/val/cat/pixabay_cat_002232.jpg 283 | ./afhq/val/cat/pixabay_cat_002241.jpg 284 | ./afhq/val/cat/pixabay_cat_002256.jpg 285 | ./afhq/val/cat/pixabay_cat_002267.jpg 286 | ./afhq/val/cat/pixabay_cat_002269.jpg 287 | ./afhq/val/cat/pixabay_cat_002298.jpg 288 | ./afhq/val/cat/pixabay_cat_002307.jpg 289 | ./afhq/val/cat/pixabay_cat_002310.jpg 290 | ./afhq/val/cat/pixabay_cat_002316.jpg 291 | ./afhq/val/cat/pixabay_cat_002338.jpg 292 | ./afhq/val/cat/pixabay_cat_002341.jpg 293 | ./afhq/val/cat/pixabay_cat_002347.jpg 294 | ./afhq/val/cat/pixabay_cat_002379.jpg 295 | ./afhq/val/cat/pixabay_cat_002398.jpg 296 | ./afhq/val/cat/pixabay_cat_002410.jpg 297 | ./afhq/val/cat/pixabay_cat_002435.jpg 298 | ./afhq/val/cat/pixabay_cat_002438.jpg 299 | ./afhq/val/cat/pixabay_cat_002441.jpg 300 | ./afhq/val/cat/pixabay_cat_002452.jpg 301 | ./afhq/val/cat/pixabay_cat_002462.jpg 302 | ./afhq/val/cat/pixabay_cat_002464.jpg 303 | ./afhq/val/cat/pixabay_cat_002467.jpg 304 | ./afhq/val/cat/pixabay_cat_002472.jpg 305 | ./afhq/val/cat/pixabay_cat_002488.jpg 306 | ./afhq/val/cat/pixabay_cat_002510.jpg 307 | ./afhq/val/cat/pixabay_cat_002543.jpg 308 | ./afhq/val/cat/pixabay_cat_002552.jpg 309 | ./afhq/val/cat/pixabay_cat_002559.jpg 310 | ./afhq/val/cat/pixabay_cat_002582.jpg 311 | ./afhq/val/cat/pixabay_cat_002640.jpg 312 | ./afhq/val/cat/pixabay_cat_002650.jpg 313 | ./afhq/val/cat/pixabay_cat_002661.jpg 314 | ./afhq/val/cat/pixabay_cat_002662.jpg 315 | ./afhq/val/cat/pixabay_cat_002686.jpg 316 | ./afhq/val/cat/pixabay_cat_002694.jpg 317 | ./afhq/val/cat/pixabay_cat_002696.jpg 318 | ./afhq/val/cat/pixabay_cat_002702.jpg 319 | ./afhq/val/cat/pixabay_cat_002712.jpg 320 | ./afhq/val/cat/pixabay_cat_002724.jpg 321 | ./afhq/val/cat/pixabay_cat_002749.jpg 322 | ./afhq/val/cat/pixabay_cat_002773.jpg 323 | ./afhq/val/cat/pixabay_cat_002776.jpg 324 | ./afhq/val/cat/pixabay_cat_002798.jpg 325 | ./afhq/val/cat/pixabay_cat_002807.jpg 326 | ./afhq/val/cat/pixabay_cat_002823.jpg 327 | ./afhq/val/cat/pixabay_cat_002831.jpg 328 | ./afhq/val/cat/pixabay_cat_002839.jpg 329 | ./afhq/val/cat/pixabay_cat_002860.jpg 330 | ./afhq/val/cat/pixabay_cat_002862.jpg 331 | ./afhq/val/cat/pixabay_cat_002865.jpg 332 | ./afhq/val/cat/pixabay_cat_002871.jpg 333 | ./afhq/val/cat/pixabay_cat_002873.jpg 334 | ./afhq/val/cat/pixabay_cat_002883.jpg 335 | ./afhq/val/cat/pixabay_cat_002890.jpg 336 | ./afhq/val/cat/pixabay_cat_002905.jpg 337 | ./afhq/val/cat/pixabay_cat_002922.jpg 338 | ./afhq/val/cat/pixabay_cat_002927.jpg 339 | ./afhq/val/cat/pixabay_cat_002928.jpg 340 | ./afhq/val/cat/pixabay_cat_002973.jpg 341 | ./afhq/val/cat/pixabay_cat_002986.jpg 342 | ./afhq/val/cat/pixabay_cat_002989.jpg 343 | ./afhq/val/cat/pixabay_cat_002990.jpg 344 | ./afhq/val/cat/pixabay_cat_002997.jpg 345 | ./afhq/val/cat/pixabay_cat_003000.jpg 346 | ./afhq/val/cat/pixabay_cat_003016.jpg 347 | ./afhq/val/cat/pixabay_cat_003033.jpg 348 | ./afhq/val/cat/pixabay_cat_003038.jpg 349 | ./afhq/val/cat/pixabay_cat_003046.jpg 350 | ./afhq/val/cat/pixabay_cat_003048.jpg 351 | ./afhq/val/cat/pixabay_cat_003079.jpg 352 | ./afhq/val/cat/pixabay_cat_003117.jpg 353 | ./afhq/val/cat/pixabay_cat_003130.jpg 354 | ./afhq/val/cat/pixabay_cat_003134.jpg 355 | ./afhq/val/cat/pixabay_cat_003150.jpg 356 | ./afhq/val/cat/pixabay_cat_003165.jpg 357 | ./afhq/val/cat/pixabay_cat_003187.jpg 358 | ./afhq/val/cat/pixabay_cat_003197.jpg 359 | ./afhq/val/cat/pixabay_cat_003201.jpg 360 | ./afhq/val/cat/pixabay_cat_003204.jpg 361 | ./afhq/val/cat/pixabay_cat_003210.jpg 362 | ./afhq/val/cat/pixabay_cat_003211.jpg 363 | ./afhq/val/cat/pixabay_cat_003212.jpg 364 | ./afhq/val/cat/pixabay_cat_003218.jpg 365 | ./afhq/val/cat/pixabay_cat_003253.jpg 366 | ./afhq/val/cat/pixabay_cat_003313.jpg 367 | ./afhq/val/cat/pixabay_cat_003341.jpg 368 | ./afhq/val/cat/pixabay_cat_003356.jpg 369 | ./afhq/val/cat/pixabay_cat_003357.jpg 370 | ./afhq/val/cat/pixabay_cat_003358.jpg 371 | ./afhq/val/cat/pixabay_cat_003359.jpg 372 | ./afhq/val/cat/pixabay_cat_003361.jpg 373 | ./afhq/val/cat/pixabay_cat_003392.jpg 374 | ./afhq/val/cat/pixabay_cat_003413.jpg 375 | ./afhq/val/cat/pixabay_cat_003417.jpg 376 | ./afhq/val/cat/pixabay_cat_003432.jpg 377 | ./afhq/val/cat/pixabay_cat_003440.jpg 378 | ./afhq/val/cat/pixabay_cat_003455.jpg 379 | ./afhq/val/cat/pixabay_cat_003464.jpg 380 | ./afhq/val/cat/pixabay_cat_003472.jpg 381 | ./afhq/val/cat/pixabay_cat_003483.jpg 382 | ./afhq/val/cat/pixabay_cat_003486.jpg 383 | ./afhq/val/cat/pixabay_cat_003503.jpg 384 | ./afhq/val/cat/pixabay_cat_003512.jpg 385 | ./afhq/val/cat/pixabay_cat_003524.jpg 386 | ./afhq/val/cat/pixabay_cat_003529.jpg 387 | ./afhq/val/cat/pixabay_cat_003556.jpg 388 | ./afhq/val/cat/pixabay_cat_003562.jpg 389 | ./afhq/val/cat/pixabay_cat_003568.jpg 390 | ./afhq/val/cat/pixabay_cat_003577.jpg 391 | ./afhq/val/cat/pixabay_cat_003582.jpg 392 | ./afhq/val/cat/pixabay_cat_003595.jpg 393 | ./afhq/val/cat/pixabay_cat_003610.jpg 394 | ./afhq/val/cat/pixabay_cat_003613.jpg 395 | ./afhq/val/cat/pixabay_cat_003615.jpg 396 | ./afhq/val/cat/pixabay_cat_003686.jpg 397 | ./afhq/val/cat/pixabay_cat_003688.jpg 398 | ./afhq/val/cat/pixabay_cat_003701.jpg 399 | ./afhq/val/cat/pixabay_cat_003706.jpg 400 | ./afhq/val/cat/pixabay_cat_003712.jpg 401 | ./afhq/val/cat/pixabay_cat_003721.jpg 402 | ./afhq/val/cat/pixabay_cat_003723.jpg 403 | ./afhq/val/cat/pixabay_cat_003734.jpg 404 | ./afhq/val/cat/pixabay_cat_003742.jpg 405 | ./afhq/val/cat/pixabay_cat_003743.jpg 406 | ./afhq/val/cat/pixabay_cat_003748.jpg 407 | ./afhq/val/cat/pixabay_cat_003753.jpg 408 | ./afhq/val/cat/pixabay_cat_003755.jpg 409 | ./afhq/val/cat/pixabay_cat_003773.jpg 410 | ./afhq/val/cat/pixabay_cat_003794.jpg 411 | ./afhq/val/cat/pixabay_cat_003804.jpg 412 | ./afhq/val/cat/pixabay_cat_003811.jpg 413 | ./afhq/val/cat/pixabay_cat_003820.jpg 414 | ./afhq/val/cat/pixabay_cat_003842.jpg 415 | ./afhq/val/cat/pixabay_cat_003848.jpg 416 | ./afhq/val/cat/pixabay_cat_003874.jpg 417 | ./afhq/val/cat/pixabay_cat_003878.jpg 418 | ./afhq/val/cat/pixabay_cat_003890.jpg 419 | ./afhq/val/cat/pixabay_cat_003896.jpg 420 | ./afhq/val/cat/pixabay_cat_003901.jpg 421 | ./afhq/val/cat/pixabay_cat_003909.jpg 422 | ./afhq/val/cat/pixabay_cat_003911.jpg 423 | ./afhq/val/cat/pixabay_cat_003924.jpg 424 | ./afhq/val/cat/pixabay_cat_003931.jpg 425 | ./afhq/val/cat/pixabay_cat_003964.jpg 426 | ./afhq/val/cat/pixabay_cat_003977.jpg 427 | ./afhq/val/cat/pixabay_cat_003986.jpg 428 | ./afhq/val/cat/pixabay_cat_004011.jpg 429 | ./afhq/val/cat/pixabay_cat_004019.jpg 430 | ./afhq/val/cat/pixabay_cat_004029.jpg 431 | ./afhq/val/cat/pixabay_cat_004039.jpg 432 | ./afhq/val/cat/pixabay_cat_004044.jpg 433 | ./afhq/val/cat/pixabay_cat_004063.jpg 434 | ./afhq/val/cat/pixabay_cat_004092.jpg 435 | ./afhq/val/cat/pixabay_cat_004102.jpg 436 | ./afhq/val/cat/pixabay_cat_004104.jpg 437 | ./afhq/val/cat/pixabay_cat_004146.jpg 438 | ./afhq/val/cat/pixabay_cat_004166.jpg 439 | ./afhq/val/cat/pixabay_cat_004167.jpg 440 | ./afhq/val/cat/pixabay_cat_004171.jpg 441 | ./afhq/val/cat/pixabay_cat_004178.jpg 442 | ./afhq/val/cat/pixabay_cat_004179.jpg 443 | ./afhq/val/cat/pixabay_cat_004188.jpg 444 | ./afhq/val/cat/pixabay_cat_004189.jpg 445 | ./afhq/val/cat/pixabay_cat_004217.jpg 446 | ./afhq/val/cat/pixabay_cat_004231.jpg 447 | ./afhq/val/cat/pixabay_cat_004237.jpg 448 | ./afhq/val/cat/pixabay_cat_004256.jpg 449 | ./afhq/val/cat/pixabay_cat_004297.jpg 450 | ./afhq/val/cat/pixabay_cat_004303.jpg 451 | ./afhq/val/cat/pixabay_cat_004311.jpg 452 | ./afhq/val/cat/pixabay_cat_004334.jpg 453 | ./afhq/val/cat/pixabay_cat_004337.jpg 454 | ./afhq/val/cat/pixabay_cat_004343.jpg 455 | ./afhq/val/cat/pixabay_cat_004358.jpg 456 | ./afhq/val/cat/pixabay_cat_004373.jpg 457 | ./afhq/val/cat/pixabay_cat_004379.jpg 458 | ./afhq/val/cat/pixabay_cat_004415.jpg 459 | ./afhq/val/cat/pixabay_cat_004421.jpg 460 | ./afhq/val/cat/pixabay_cat_004422.jpg 461 | ./afhq/val/cat/pixabay_cat_004434.jpg 462 | ./afhq/val/cat/pixabay_cat_004461.jpg 463 | ./afhq/val/cat/pixabay_cat_004480.jpg 464 | ./afhq/val/cat/pixabay_cat_004482.jpg 465 | ./afhq/val/cat/pixabay_cat_004484.jpg 466 | ./afhq/val/cat/pixabay_cat_004505.jpg 467 | ./afhq/val/cat/pixabay_cat_004510.jpg 468 | ./afhq/val/cat/pixabay_cat_004513.jpg 469 | ./afhq/val/cat/pixabay_cat_004520.jpg 470 | ./afhq/val/cat/pixabay_cat_004542.jpg 471 | ./afhq/val/cat/pixabay_cat_004543.jpg 472 | ./afhq/val/cat/pixabay_cat_004548.jpg 473 | ./afhq/val/cat/pixabay_cat_004562.jpg 474 | ./afhq/val/cat/pixabay_cat_004584.jpg 475 | ./afhq/val/cat/pixabay_cat_004586.jpg 476 | ./afhq/val/cat/pixabay_cat_004595.jpg 477 | ./afhq/val/cat/pixabay_cat_004596.jpg 478 | ./afhq/val/cat/pixabay_cat_004601.jpg 479 | ./afhq/val/cat/pixabay_cat_004602.jpg 480 | ./afhq/val/cat/pixabay_cat_004616.jpg 481 | ./afhq/val/cat/pixabay_cat_004625.jpg 482 | ./afhq/val/cat/pixabay_cat_004635.jpg 483 | ./afhq/val/cat/pixabay_cat_004636.jpg 484 | ./afhq/val/cat/pixabay_cat_004652.jpg 485 | ./afhq/val/cat/pixabay_cat_004655.jpg 486 | ./afhq/val/cat/pixabay_cat_004662.jpg 487 | ./afhq/val/cat/pixabay_cat_004663.jpg 488 | ./afhq/val/cat/pixabay_cat_004685.jpg 489 | ./afhq/val/cat/pixabay_cat_004693.jpg 490 | ./afhq/val/cat/pixabay_cat_004759.jpg 491 | ./afhq/val/cat/pixabay_cat_004765.jpg 492 | ./afhq/val/cat/pixabay_cat_004766.jpg 493 | ./afhq/val/cat/pixabay_cat_004782.jpg 494 | ./afhq/val/cat/pixabay_cat_004783.jpg 495 | ./afhq/val/cat/pixabay_cat_004784.jpg 496 | ./afhq/val/cat/pixabay_cat_004793.jpg 497 | ./afhq/val/cat/pixabay_cat_004808.jpg 498 | ./afhq/val/cat/pixabay_cat_004816.jpg 499 | ./afhq/val/cat/pixabay_cat_004826.jpg 500 | ./afhq/val/cat/pixabay_cat_004832.jpg -------------------------------------------------------------------------------- /datasets/cub/test.flist: -------------------------------------------------------------------------------- 1 | ./CUB-256/11201.jpg 2 | ./CUB-256/10146.jpg 3 | ./CUB-256/11661.jpg 4 | ./CUB-256/10762.jpg 5 | ./CUB-256/11437.jpg 6 | ./CUB-256/11606.jpg 7 | ./CUB-256/10850.jpg 8 | ./CUB-256/10142.jpg 9 | ./CUB-256/10175.jpg 10 | ./CUB-256/10521.jpg 11 | ./CUB-256/10277.jpg 12 | ./CUB-256/10474.jpg 13 | ./CUB-256/10053.jpg 14 | ./CUB-256/11713.jpg 15 | ./CUB-256/11625.jpg 16 | ./CUB-256/10363.jpg 17 | ./CUB-256/10536.jpg 18 | ./CUB-256/10854.jpg 19 | ./CUB-256/11587.jpg 20 | ./CUB-256/10245.jpg 21 | ./CUB-256/11607.jpg 22 | ./CUB-256/11658.jpg 23 | ./CUB-256/11413.jpg 24 | ./CUB-256/10671.jpg 25 | ./CUB-256/10088.jpg 26 | ./CUB-256/11283.jpg 27 | ./CUB-256/11453.jpg 28 | ./CUB-256/10869.jpg 29 | ./CUB-256/10132.jpg 30 | ./CUB-256/10904.jpg 31 | ./CUB-256/10870.jpg 32 | ./CUB-256/11610.jpg 33 | ./CUB-256/10185.jpg 34 | ./CUB-256/11580.jpg 35 | ./CUB-256/10161.jpg 36 | ./CUB-256/10358.jpg 37 | ./CUB-256/10320.jpg 38 | ./CUB-256/11222.jpg 39 | ./CUB-256/11670.jpg 40 | ./CUB-256/11614.jpg 41 | ./CUB-256/11396.jpg 42 | ./CUB-256/09990.jpg 43 | ./CUB-256/10458.jpg 44 | ./CUB-256/10404.jpg 45 | ./CUB-256/11235.jpg 46 | ./CUB-256/11535.jpg 47 | ./CUB-256/11747.jpg 48 | ./CUB-256/11400.jpg 49 | ./CUB-256/11664.jpg 50 | ./CUB-256/10186.jpg 51 | ./CUB-256/10044.jpg 52 | ./CUB-256/10092.jpg 53 | ./CUB-256/10739.jpg 54 | ./CUB-256/10240.jpg 55 | ./CUB-256/10745.jpg 56 | ./CUB-256/11116.jpg 57 | ./CUB-256/11763.jpg 58 | ./CUB-256/10205.jpg 59 | ./CUB-256/11542.jpg 60 | ./CUB-256/10440.jpg 61 | ./CUB-256/10562.jpg 62 | ./CUB-256/11177.jpg 63 | ./CUB-256/11409.jpg 64 | ./CUB-256/10622.jpg 65 | ./CUB-256/10345.jpg 66 | ./CUB-256/11628.jpg 67 | ./CUB-256/10475.jpg 68 | ./CUB-256/10694.jpg 69 | ./CUB-256/11310.jpg 70 | ./CUB-256/11343.jpg 71 | ./CUB-256/10176.jpg 72 | ./CUB-256/10261.jpg 73 | ./CUB-256/10820.jpg 74 | ./CUB-256/10452.jpg 75 | ./CUB-256/10103.jpg 76 | ./CUB-256/10875.jpg 77 | ./CUB-256/10377.jpg 78 | ./CUB-256/10298.jpg 79 | ./CUB-256/10235.jpg 80 | ./CUB-256/11500.jpg 81 | ./CUB-256/11464.jpg 82 | ./CUB-256/11466.jpg 83 | ./CUB-256/10239.jpg 84 | ./CUB-256/11693.jpg 85 | ./CUB-256/10835.jpg 86 | ./CUB-256/10327.jpg 87 | ./CUB-256/11582.jpg 88 | ./CUB-256/11769.jpg 89 | ./CUB-256/11415.jpg 90 | ./CUB-256/10469.jpg 91 | ./CUB-256/10396.jpg 92 | ./CUB-256/10956.jpg 93 | ./CUB-256/10934.jpg 94 | ./CUB-256/11765.jpg 95 | ./CUB-256/10019.jpg 96 | ./CUB-256/11688.jpg 97 | ./CUB-256/10198.jpg 98 | ./CUB-256/11340.jpg 99 | ./CUB-256/11463.jpg 100 | ./CUB-256/10197.jpg 101 | ./CUB-256/11555.jpg 102 | ./CUB-256/10089.jpg 103 | ./CUB-256/11550.jpg 104 | ./CUB-256/11689.jpg 105 | ./CUB-256/11589.jpg 106 | ./CUB-256/11598.jpg 107 | ./CUB-256/10946.jpg 108 | ./CUB-256/10125.jpg 109 | ./CUB-256/10839.jpg 110 | ./CUB-256/11367.jpg 111 | ./CUB-256/11513.jpg 112 | ./CUB-256/11756.jpg 113 | ./CUB-256/11263.jpg 114 | ./CUB-256/11686.jpg 115 | ./CUB-256/11374.jpg 116 | ./CUB-256/11656.jpg 117 | ./CUB-256/11543.jpg 118 | ./CUB-256/11479.jpg 119 | ./CUB-256/10795.jpg 120 | ./CUB-256/10868.jpg 121 | ./CUB-256/11398.jpg 122 | ./CUB-256/10282.jpg 123 | ./CUB-256/10075.jpg 124 | ./CUB-256/11609.jpg 125 | ./CUB-256/10847.jpg 126 | ./CUB-256/10096.jpg 127 | ./CUB-256/10604.jpg 128 | ./CUB-256/11336.jpg 129 | ./CUB-256/11733.jpg 130 | ./CUB-256/10616.jpg 131 | ./CUB-256/10866.jpg 132 | ./CUB-256/10344.jpg 133 | ./CUB-256/10656.jpg 134 | ./CUB-256/10213.jpg 135 | ./CUB-256/10539.jpg 136 | ./CUB-256/10951.jpg 137 | ./CUB-256/10264.jpg 138 | ./CUB-256/09984.jpg 139 | ./CUB-256/11787.jpg 140 | ./CUB-256/11545.jpg 141 | ./CUB-256/10748.jpg 142 | ./CUB-256/10254.jpg 143 | ./CUB-256/11099.jpg 144 | ./CUB-256/11332.jpg 145 | ./CUB-256/10659.jpg 146 | ./CUB-256/11237.jpg 147 | ./CUB-256/11515.jpg 148 | ./CUB-256/11010.jpg 149 | ./CUB-256/11456.jpg 150 | ./CUB-256/10531.jpg 151 | ./CUB-256/11705.jpg 152 | ./CUB-256/10830.jpg 153 | ./CUB-256/10482.jpg 154 | ./CUB-256/10931.jpg 155 | ./CUB-256/11248.jpg 156 | ./CUB-256/10081.jpg 157 | ./CUB-256/11572.jpg 158 | ./CUB-256/11312.jpg 159 | ./CUB-256/10941.jpg 160 | ./CUB-256/10384.jpg 161 | ./CUB-256/10091.jpg 162 | ./CUB-256/10673.jpg 163 | ./CUB-256/10115.jpg 164 | ./CUB-256/11039.jpg 165 | ./CUB-256/11584.jpg 166 | ./CUB-256/10586.jpg 167 | ./CUB-256/11566.jpg 168 | ./CUB-256/10236.jpg 169 | ./CUB-256/11540.jpg 170 | ./CUB-256/10128.jpg 171 | ./CUB-256/10580.jpg 172 | ./CUB-256/11648.jpg 173 | ./CUB-256/10550.jpg 174 | ./CUB-256/10877.jpg 175 | ./CUB-256/11394.jpg 176 | ./CUB-256/11696.jpg 177 | ./CUB-256/10650.jpg 178 | ./CUB-256/11247.jpg 179 | ./CUB-256/10909.jpg 180 | ./CUB-256/11214.jpg 181 | ./CUB-256/11427.jpg 182 | ./CUB-256/10540.jpg 183 | ./CUB-256/11446.jpg 184 | ./CUB-256/10756.jpg 185 | ./CUB-256/11739.jpg 186 | ./CUB-256/11372.jpg 187 | ./CUB-256/10798.jpg 188 | ./CUB-256/11179.jpg 189 | ./CUB-256/10294.jpg 190 | ./CUB-256/10949.jpg 191 | ./CUB-256/11424.jpg 192 | ./CUB-256/10126.jpg 193 | ./CUB-256/10884.jpg 194 | ./CUB-256/10840.jpg 195 | ./CUB-256/10578.jpg 196 | ./CUB-256/10493.jpg 197 | ./CUB-256/11660.jpg 198 | ./CUB-256/11649.jpg 199 | ./CUB-256/10037.jpg 200 | ./CUB-256/10816.jpg 201 | ./CUB-256/10781.jpg 202 | ./CUB-256/10391.jpg 203 | ./CUB-256/11014.jpg 204 | ./CUB-256/10571.jpg 205 | ./CUB-256/11480.jpg 206 | ./CUB-256/10880.jpg 207 | ./CUB-256/11646.jpg 208 | ./CUB-256/10179.jpg 209 | ./CUB-256/10472.jpg 210 | ./CUB-256/11373.jpg 211 | ./CUB-256/10289.jpg 212 | ./CUB-256/11190.jpg 213 | ./CUB-256/10974.jpg 214 | ./CUB-256/10575.jpg 215 | ./CUB-256/10323.jpg 216 | ./CUB-256/10710.jpg 217 | ./CUB-256/10766.jpg 218 | ./CUB-256/10180.jpg 219 | ./CUB-256/10942.jpg 220 | ./CUB-256/11583.jpg 221 | ./CUB-256/10050.jpg 222 | ./CUB-256/11501.jpg 223 | ./CUB-256/10600.jpg 224 | ./CUB-256/11271.jpg 225 | ./CUB-256/10585.jpg 226 | ./CUB-256/11532.jpg 227 | ./CUB-256/10473.jpg 228 | ./CUB-256/11767.jpg 229 | ./CUB-256/11083.jpg 230 | ./CUB-256/11742.jpg 231 | ./CUB-256/10283.jpg 232 | ./CUB-256/10975.jpg 233 | ./CUB-256/11784.jpg 234 | ./CUB-256/10211.jpg 235 | ./CUB-256/10770.jpg 236 | ./CUB-256/10072.jpg 237 | ./CUB-256/10729.jpg 238 | ./CUB-256/10248.jpg 239 | ./CUB-256/10903.jpg 240 | ./CUB-256/10708.jpg 241 | ./CUB-256/11281.jpg 242 | ./CUB-256/11334.jpg 243 | ./CUB-256/10772.jpg 244 | ./CUB-256/10355.jpg 245 | ./CUB-256/10340.jpg 246 | ./CUB-256/10351.jpg 247 | ./CUB-256/10012.jpg 248 | ./CUB-256/11251.jpg 249 | ./CUB-256/10070.jpg 250 | ./CUB-256/10068.jpg 251 | ./CUB-256/11381.jpg 252 | ./CUB-256/10049.jpg 253 | ./CUB-256/11066.jpg 254 | ./CUB-256/10806.jpg 255 | ./CUB-256/10466.jpg 256 | ./CUB-256/10199.jpg 257 | ./CUB-256/10699.jpg 258 | ./CUB-256/11482.jpg 259 | ./CUB-256/10915.jpg 260 | ./CUB-256/11470.jpg 261 | ./CUB-256/10740.jpg 262 | ./CUB-256/10787.jpg 263 | ./CUB-256/10958.jpg 264 | ./CUB-256/11129.jpg 265 | ./CUB-256/10655.jpg 266 | ./CUB-256/10805.jpg 267 | ./CUB-256/10813.jpg 268 | ./CUB-256/10465.jpg 269 | ./CUB-256/10204.jpg 270 | ./CUB-256/10154.jpg 271 | ./CUB-256/11299.jpg 272 | ./CUB-256/10821.jpg 273 | ./CUB-256/11483.jpg 274 | ./CUB-256/09985.jpg 275 | ./CUB-256/11175.jpg 276 | ./CUB-256/11727.jpg 277 | ./CUB-256/10853.jpg 278 | ./CUB-256/11383.jpg 279 | ./CUB-256/10811.jpg 280 | ./CUB-256/11698.jpg 281 | ./CUB-256/10768.jpg 282 | ./CUB-256/11744.jpg 283 | ./CUB-256/11342.jpg 284 | ./CUB-256/11103.jpg 285 | ./CUB-256/11694.jpg 286 | ./CUB-256/11258.jpg 287 | ./CUB-256/10786.jpg 288 | ./CUB-256/10151.jpg 289 | ./CUB-256/10226.jpg 290 | ./CUB-256/11721.jpg 291 | ./CUB-256/10872.jpg 292 | ./CUB-256/11411.jpg 293 | ./CUB-256/10774.jpg 294 | ./CUB-256/10684.jpg 295 | ./CUB-256/10695.jpg 296 | ./CUB-256/10271.jpg 297 | ./CUB-256/10741.jpg 298 | ./CUB-256/11626.jpg 299 | ./CUB-256/11354.jpg 300 | ./CUB-256/11777.jpg 301 | ./CUB-256/11029.jpg 302 | ./CUB-256/11639.jpg 303 | ./CUB-256/11564.jpg 304 | ./CUB-256/11115.jpg 305 | ./CUB-256/10051.jpg 306 | ./CUB-256/10672.jpg 307 | ./CUB-256/10457.jpg 308 | ./CUB-256/10826.jpg 309 | ./CUB-256/10367.jpg 310 | ./CUB-256/11701.jpg 311 | ./CUB-256/11375.jpg 312 | ./CUB-256/10663.jpg 313 | ./CUB-256/10035.jpg 314 | ./CUB-256/11338.jpg 315 | ./CUB-256/10076.jpg 316 | ./CUB-256/10306.jpg 317 | ./CUB-256/11454.jpg 318 | ./CUB-256/10237.jpg 319 | ./CUB-256/10494.jpg 320 | ./CUB-256/10116.jpg 321 | ./CUB-256/11504.jpg 322 | ./CUB-256/10809.jpg 323 | ./CUB-256/10506.jpg 324 | ./CUB-256/11326.jpg 325 | ./CUB-256/11667.jpg 326 | ./CUB-256/10222.jpg 327 | ./CUB-256/11525.jpg 328 | ./CUB-256/10546.jpg 329 | ./CUB-256/11719.jpg 330 | ./CUB-256/10234.jpg 331 | ./CUB-256/11105.jpg 332 | ./CUB-256/10852.jpg 333 | ./CUB-256/10702.jpg 334 | ./CUB-256/11651.jpg 335 | ./CUB-256/11243.jpg 336 | ./CUB-256/10424.jpg 337 | ./CUB-256/10678.jpg 338 | ./CUB-256/11574.jpg 339 | ./CUB-256/10120.jpg 340 | ./CUB-256/10157.jpg 341 | ./CUB-256/11618.jpg 342 | ./CUB-256/10658.jpg 343 | ./CUB-256/11560.jpg 344 | ./CUB-256/11659.jpg 345 | ./CUB-256/10033.jpg 346 | ./CUB-256/10689.jpg 347 | ./CUB-256/10541.jpg 348 | ./CUB-256/10272.jpg 349 | ./CUB-256/10399.jpg 350 | ./CUB-256/11311.jpg 351 | ./CUB-256/10338.jpg 352 | ./CUB-256/10874.jpg 353 | ./CUB-256/11494.jpg 354 | ./CUB-256/10801.jpg 355 | ./CUB-256/10538.jpg 356 | ./CUB-256/10242.jpg 357 | ./CUB-256/10715.jpg 358 | ./CUB-256/10085.jpg 359 | ./CUB-256/11444.jpg 360 | ./CUB-256/10505.jpg 361 | ./CUB-256/10276.jpg 362 | ./CUB-256/11633.jpg 363 | ./CUB-256/10554.jpg 364 | ./CUB-256/11556.jpg 365 | ./CUB-256/10953.jpg 366 | ./CUB-256/10393.jpg 367 | ./CUB-256/11163.jpg 368 | ./CUB-256/10532.jpg 369 | ./CUB-256/10155.jpg 370 | ./CUB-256/10899.jpg 371 | ./CUB-256/11425.jpg 372 | ./CUB-256/10706.jpg 373 | ./CUB-256/11356.jpg 374 | ./CUB-256/10054.jpg 375 | ./CUB-256/10172.jpg 376 | ./CUB-256/10314.jpg 377 | ./CUB-256/10233.jpg 378 | ./CUB-256/11489.jpg 379 | ./CUB-256/10241.jpg 380 | ./CUB-256/11461.jpg 381 | ./CUB-256/11490.jpg 382 | ./CUB-256/11327.jpg 383 | ./CUB-256/10386.jpg 384 | ./CUB-256/10481.jpg 385 | ./CUB-256/10135.jpg 386 | ./CUB-256/11267.jpg 387 | ./CUB-256/10565.jpg 388 | ./CUB-256/11690.jpg 389 | ./CUB-256/11579.jpg 390 | ./CUB-256/11194.jpg 391 | ./CUB-256/11620.jpg 392 | ./CUB-256/10713.jpg 393 | ./CUB-256/10636.jpg 394 | ./CUB-256/10281.jpg 395 | ./CUB-256/10231.jpg 396 | ./CUB-256/11682.jpg 397 | ./CUB-256/10274.jpg 398 | ./CUB-256/11622.jpg 399 | ./CUB-256/11121.jpg 400 | ./CUB-256/11710.jpg 401 | ./CUB-256/11623.jpg 402 | ./CUB-256/10703.jpg 403 | ./CUB-256/10705.jpg 404 | ./CUB-256/10093.jpg 405 | ./CUB-256/11292.jpg 406 | ./CUB-256/10278.jpg 407 | ./CUB-256/10773.jpg 408 | ./CUB-256/11211.jpg 409 | ./CUB-256/11692.jpg 410 | ./CUB-256/10902.jpg 411 | ./CUB-256/10118.jpg 412 | ./CUB-256/10094.jpg 413 | ./CUB-256/10897.jpg 414 | ./CUB-256/11703.jpg 415 | ./CUB-256/11632.jpg 416 | ./CUB-256/11567.jpg 417 | ./CUB-256/11702.jpg 418 | ./CUB-256/10100.jpg 419 | ./CUB-256/11468.jpg 420 | ./CUB-256/10587.jpg 421 | ./CUB-256/11578.jpg 422 | ./CUB-256/11344.jpg 423 | ./CUB-256/10572.jpg 424 | ./CUB-256/11527.jpg 425 | ./CUB-256/11331.jpg 426 | ./CUB-256/11741.jpg 427 | ./CUB-256/10948.jpg 428 | ./CUB-256/11553.jpg 429 | ./CUB-256/10912.jpg 430 | ./CUB-256/10209.jpg 431 | ./CUB-256/10796.jpg 432 | ./CUB-256/11109.jpg 433 | ./CUB-256/11448.jpg 434 | ./CUB-256/10062.jpg 435 | ./CUB-256/10210.jpg 436 | ./CUB-256/11519.jpg 437 | ./CUB-256/11329.jpg 438 | ./CUB-256/11735.jpg 439 | ./CUB-256/11499.jpg 440 | ./CUB-256/11526.jpg 441 | ./CUB-256/10851.jpg 442 | ./CUB-256/10129.jpg 443 | ./CUB-256/10849.jpg 444 | ./CUB-256/11716.jpg 445 | ./CUB-256/10479.jpg 446 | ./CUB-256/11082.jpg 447 | ./CUB-256/10701.jpg 448 | ./CUB-256/10192.jpg 449 | ./CUB-256/10159.jpg 450 | ./CUB-256/10310.jpg 451 | ./CUB-256/10139.jpg 452 | ./CUB-256/11486.jpg 453 | ./CUB-256/10718.jpg 454 | ./CUB-256/10721.jpg 455 | ./CUB-256/10755.jpg 456 | ./CUB-256/11617.jpg 457 | ./CUB-256/11621.jpg 458 | ./CUB-256/10704.jpg 459 | ./CUB-256/10221.jpg 460 | ./CUB-256/11718.jpg 461 | ./CUB-256/10191.jpg 462 | ./CUB-256/10566.jpg 463 | ./CUB-256/11736.jpg 464 | ./CUB-256/10726.jpg 465 | ./CUB-256/10014.jpg 466 | ./CUB-256/10894.jpg 467 | ./CUB-256/11155.jpg 468 | ./CUB-256/10000.jpg 469 | ./CUB-256/11487.jpg 470 | ./CUB-256/11369.jpg 471 | ./CUB-256/10746.jpg 472 | ./CUB-256/10212.jpg 473 | ./CUB-256/11677.jpg 474 | ./CUB-256/10759.jpg 475 | ./CUB-256/11071.jpg 476 | ./CUB-256/11423.jpg 477 | ./CUB-256/10380.jpg 478 | ./CUB-256/11294.jpg 479 | ./CUB-256/10195.jpg 480 | ./CUB-256/11455.jpg 481 | ./CUB-256/11687.jpg 482 | ./CUB-256/10188.jpg 483 | ./CUB-256/10690.jpg 484 | ./CUB-256/10668.jpg 485 | ./CUB-256/11395.jpg 486 | ./CUB-256/11539.jpg 487 | ./CUB-256/10270.jpg 488 | ./CUB-256/10207.jpg 489 | ./CUB-256/11206.jpg 490 | ./CUB-256/10420.jpg 491 | ./CUB-256/11036.jpg 492 | ./CUB-256/10794.jpg 493 | ./CUB-256/10041.jpg 494 | ./CUB-256/10832.jpg 495 | ./CUB-256/10582.jpg 496 | ./CUB-256/10545.jpg 497 | ./CUB-256/10612.jpg 498 | ./CUB-256/11114.jpg 499 | ./CUB-256/11663.jpg 500 | ./CUB-256/10488.jpg 501 | ./CUB-256/11350.jpg 502 | ./CUB-256/11737.jpg 503 | ./CUB-256/11593.jpg 504 | ./CUB-256/10307.jpg 505 | ./CUB-256/10792.jpg 506 | ./CUB-256/10698.jpg 507 | ./CUB-256/11216.jpg 508 | ./CUB-256/10038.jpg 509 | ./CUB-256/11062.jpg 510 | ./CUB-256/10258.jpg 511 | ./CUB-256/11773.jpg 512 | ./CUB-256/11341.jpg 513 | ./CUB-256/10190.jpg 514 | ./CUB-256/10144.jpg 515 | ./CUB-256/10009.jpg 516 | ./CUB-256/10780.jpg 517 | ./CUB-256/11630.jpg 518 | ./CUB-256/10060.jpg 519 | ./CUB-256/11187.jpg 520 | ./CUB-256/10206.jpg 521 | ./CUB-256/10441.jpg 522 | ./CUB-256/10483.jpg 523 | ./CUB-256/10168.jpg 524 | ./CUB-256/11643.jpg 525 | ./CUB-256/11731.jpg 526 | ./CUB-256/11591.jpg 527 | ./CUB-256/10489.jpg 528 | ./CUB-256/10388.jpg 529 | ./CUB-256/10621.jpg 530 | ./CUB-256/10383.jpg 531 | ./CUB-256/11017.jpg 532 | ./CUB-256/10030.jpg 533 | ./CUB-256/10348.jpg 534 | ./CUB-256/10269.jpg 535 | ./CUB-256/11097.jpg 536 | ./CUB-256/11559.jpg 537 | ./CUB-256/10522.jpg 538 | ./CUB-256/10510.jpg 539 | ./CUB-256/11680.jpg 540 | ./CUB-256/11402.jpg 541 | ./CUB-256/11668.jpg 542 | ./CUB-256/11743.jpg 543 | ./CUB-256/10223.jpg 544 | ./CUB-256/11666.jpg 545 | ./CUB-256/10822.jpg 546 | ./CUB-256/10238.jpg 547 | ./CUB-256/10461.jpg 548 | ./CUB-256/11236.jpg 549 | ./CUB-256/10736.jpg 550 | ./CUB-256/10160.jpg 551 | ./CUB-256/11291.jpg 552 | ./CUB-256/10262.jpg 553 | ./CUB-256/11337.jpg 554 | ./CUB-256/11596.jpg 555 | ./CUB-256/10717.jpg 556 | ./CUB-256/11094.jpg 557 | ./CUB-256/10551.jpg 558 | ./CUB-256/11357.jpg 559 | ./CUB-256/10553.jpg 560 | ./CUB-256/10828.jpg 561 | ./CUB-256/11474.jpg 562 | ./CUB-256/11512.jpg 563 | ./CUB-256/10463.jpg 564 | ./CUB-256/10937.jpg 565 | ./CUB-256/10687.jpg 566 | ./CUB-256/11558.jpg 567 | ./CUB-256/10570.jpg 568 | ./CUB-256/11627.jpg 569 | ./CUB-256/11196.jpg 570 | ./CUB-256/10286.jpg 571 | ./CUB-256/10943.jpg 572 | ./CUB-256/10765.jpg 573 | ./CUB-256/11044.jpg 574 | ./CUB-256/10249.jpg 575 | ./CUB-256/11215.jpg 576 | ./CUB-256/10895.jpg 577 | ./CUB-256/11497.jpg 578 | ./CUB-256/11601.jpg 579 | ./CUB-256/10087.jpg 580 | ./CUB-256/10341.jpg 581 | ./CUB-256/10927.jpg 582 | ./CUB-256/10402.jpg 583 | ./CUB-256/10257.jpg 584 | ./CUB-256/10964.jpg 585 | ./CUB-256/10423.jpg 586 | ./CUB-256/10961.jpg 587 | ./CUB-256/11084.jpg 588 | ./CUB-256/11531.jpg 589 | ./CUB-256/11753.jpg 590 | ./CUB-256/11746.jpg 591 | ./CUB-256/11679.jpg 592 | ./CUB-256/10317.jpg 593 | ./CUB-256/10527.jpg 594 | ./CUB-256/11434.jpg 595 | ./CUB-256/10412.jpg 596 | ./CUB-256/11760.jpg 597 | ./CUB-256/11481.jpg 598 | ./CUB-256/10350.jpg 599 | ./CUB-256/10219.jpg 600 | ./CUB-256/11038.jpg 601 | ./CUB-256/11239.jpg 602 | ./CUB-256/11197.jpg 603 | ./CUB-256/10244.jpg 604 | ./CUB-256/09993.jpg 605 | ./CUB-256/11757.jpg 606 | ./CUB-256/09999.jpg 607 | ./CUB-256/11335.jpg 608 | ./CUB-256/10485.jpg 609 | ./CUB-256/11347.jpg 610 | ./CUB-256/11042.jpg 611 | ./CUB-256/11729.jpg 612 | ./CUB-256/10800.jpg 613 | ./CUB-256/11064.jpg 614 | ./CUB-256/10353.jpg 615 | ./CUB-256/11428.jpg 616 | ./CUB-256/10182.jpg 617 | ./CUB-256/11536.jpg 618 | ./CUB-256/10815.jpg 619 | ./CUB-256/10330.jpg 620 | ./CUB-256/10312.jpg 621 | ./CUB-256/10652.jpg 622 | ./CUB-256/10893.jpg 623 | ./CUB-256/10024.jpg 624 | ./CUB-256/09982.jpg 625 | ./CUB-256/11364.jpg 626 | ./CUB-256/10635.jpg 627 | ./CUB-256/11405.jpg 628 | ./CUB-256/11635.jpg 629 | ./CUB-256/10725.jpg 630 | ./CUB-256/10646.jpg 631 | ./CUB-256/11361.jpg 632 | ./CUB-256/11636.jpg 633 | ./CUB-256/10863.jpg 634 | ./CUB-256/11476.jpg 635 | ./CUB-256/11469.jpg 636 | ./CUB-256/10654.jpg 637 | ./CUB-256/10504.jpg 638 | ./CUB-256/11752.jpg 639 | ./CUB-256/11671.jpg 640 | ./CUB-256/10594.jpg 641 | ./CUB-256/11380.jpg 642 | ./CUB-256/11654.jpg 643 | ./CUB-256/11647.jpg 644 | ./CUB-256/10122.jpg 645 | ./CUB-256/11072.jpg 646 | ./CUB-256/11758.jpg 647 | ./CUB-256/11485.jpg 648 | ./CUB-256/11130.jpg 649 | ./CUB-256/10526.jpg 650 | ./CUB-256/10361.jpg 651 | ./CUB-256/11662.jpg 652 | ./CUB-256/11715.jpg 653 | ./CUB-256/11315.jpg 654 | ./CUB-256/10939.jpg 655 | ./CUB-256/11075.jpg 656 | ./CUB-256/09992.jpg 657 | ./CUB-256/10004.jpg 658 | ./CUB-256/11750.jpg 659 | ./CUB-256/10644.jpg 660 | ./CUB-256/10528.jpg 661 | ./CUB-256/10077.jpg 662 | ./CUB-256/11355.jpg 663 | ./CUB-256/11554.jpg 664 | ./CUB-256/11452.jpg 665 | ./CUB-256/10413.jpg 666 | ./CUB-256/10370.jpg 667 | ./CUB-256/10232.jpg 668 | ./CUB-256/11368.jpg 669 | ./CUB-256/11224.jpg 670 | ./CUB-256/10596.jpg 671 | ./CUB-256/10285.jpg 672 | ./CUB-256/10143.jpg 673 | ./CUB-256/10898.jpg 674 | ./CUB-256/10042.jpg 675 | ./CUB-256/11570.jpg 676 | ./CUB-256/11496.jpg 677 | ./CUB-256/10095.jpg 678 | ./CUB-256/11477.jpg 679 | ./CUB-256/10299.jpg 680 | ./CUB-256/11420.jpg 681 | ./CUB-256/10431.jpg 682 | ./CUB-256/10923.jpg 683 | ./CUB-256/10714.jpg 684 | ./CUB-256/11241.jpg 685 | ./CUB-256/10907.jpg 686 | ./CUB-256/10335.jpg 687 | ./CUB-256/11697.jpg 688 | ./CUB-256/11001.jpg 689 | ./CUB-256/11678.jpg 690 | ./CUB-256/10003.jpg 691 | ./CUB-256/10664.jpg 692 | ./CUB-256/11603.jpg 693 | ./CUB-256/10114.jpg 694 | ./CUB-256/11571.jpg 695 | ./CUB-256/11714.jpg 696 | ./CUB-256/11325.jpg 697 | ./CUB-256/10999.jpg 698 | ./CUB-256/10047.jpg 699 | ./CUB-256/10555.jpg 700 | ./CUB-256/10184.jpg 701 | ./CUB-256/10662.jpg 702 | ./CUB-256/10193.jpg 703 | ./CUB-256/10273.jpg 704 | ./CUB-256/11275.jpg 705 | ./CUB-256/11523.jpg 706 | ./CUB-256/10366.jpg 707 | ./CUB-256/10097.jpg 708 | ./CUB-256/10963.jpg 709 | ./CUB-256/10173.jpg 710 | ./CUB-256/10453.jpg 711 | ./CUB-256/11704.jpg 712 | ./CUB-256/10079.jpg 713 | ./CUB-256/10692.jpg 714 | ./CUB-256/11274.jpg 715 | ./CUB-256/11390.jpg 716 | ./CUB-256/10865.jpg 717 | ./CUB-256/11218.jpg 718 | ./CUB-256/11645.jpg 719 | ./CUB-256/10864.jpg 720 | ./CUB-256/11253.jpg 721 | ./CUB-256/10080.jpg 722 | ./CUB-256/11478.jpg 723 | ./CUB-256/10810.jpg 724 | ./CUB-256/10110.jpg 725 | ./CUB-256/11377.jpg 726 | ./CUB-256/10891.jpg 727 | ./CUB-256/10753.jpg 728 | ./CUB-256/10523.jpg 729 | ./CUB-256/10630.jpg 730 | ./CUB-256/10930.jpg 731 | ./CUB-256/10789.jpg 732 | ./CUB-256/10751.jpg 733 | ./CUB-256/10171.jpg 734 | ./CUB-256/11779.jpg 735 | ./CUB-256/10098.jpg 736 | ./CUB-256/10574.jpg 737 | ./CUB-256/10425.jpg 738 | ./CUB-256/11101.jpg 739 | ./CUB-256/10025.jpg 740 | ./CUB-256/10359.jpg 741 | ./CUB-256/11538.jpg 742 | ./CUB-256/10156.jpg 743 | ./CUB-256/11442.jpg 744 | ./CUB-256/11782.jpg 745 | ./CUB-256/10395.jpg 746 | ./CUB-256/10015.jpg 747 | ./CUB-256/11202.jpg 748 | ./CUB-256/10216.jpg 749 | ./CUB-256/11085.jpg 750 | ./CUB-256/11684.jpg 751 | ./CUB-256/11613.jpg 752 | ./CUB-256/11563.jpg 753 | ./CUB-256/10309.jpg 754 | ./CUB-256/11770.jpg 755 | ./CUB-256/11547.jpg 756 | ./CUB-256/10106.jpg 757 | ./CUB-256/10010.jpg 758 | ./CUB-256/10162.jpg 759 | ./CUB-256/10497.jpg 760 | ./CUB-256/11745.jpg 761 | ./CUB-256/10029.jpg 762 | ./CUB-256/11691.jpg 763 | ./CUB-256/10311.jpg 764 | ./CUB-256/10295.jpg 765 | ./CUB-256/10560.jpg 766 | ./CUB-256/10819.jpg 767 | ./CUB-256/11319.jpg 768 | ./CUB-256/10040.jpg 769 | ./CUB-256/10034.jpg 770 | ./CUB-256/10825.jpg 771 | ./CUB-256/11751.jpg 772 | ./CUB-256/11346.jpg 773 | ./CUB-256/11388.jpg 774 | ./CUB-256/10373.jpg 775 | ./CUB-256/11475.jpg 776 | ./CUB-256/10530.jpg 777 | ./CUB-256/10670.jpg 778 | ./CUB-256/11592.jpg 779 | ./CUB-256/10006.jpg 780 | ./CUB-256/11657.jpg 781 | ./CUB-256/10696.jpg 782 | ./CUB-256/11242.jpg 783 | ./CUB-256/11514.jpg 784 | ./CUB-256/10724.jpg 785 | ./CUB-256/11392.jpg 786 | ./CUB-256/11073.jpg 787 | ./CUB-256/11676.jpg 788 | ./CUB-256/10225.jpg 789 | ./CUB-256/10016.jpg 790 | ./CUB-256/10266.jpg 791 | ./CUB-256/11522.jpg 792 | ./CUB-256/11629.jpg 793 | ./CUB-256/10230.jpg 794 | ./CUB-256/10134.jpg 795 | ./CUB-256/10908.jpg 796 | ./CUB-256/10844.jpg 797 | ./CUB-256/10524.jpg 798 | ./CUB-256/11634.jpg 799 | ./CUB-256/10955.jpg 800 | ./CUB-256/10607.jpg 801 | ./CUB-256/11637.jpg 802 | ./CUB-256/11615.jpg 803 | ./CUB-256/11619.jpg 804 | ./CUB-256/10883.jpg 805 | ./CUB-256/11256.jpg 806 | ./CUB-256/11524.jpg 807 | ./CUB-256/11576.jpg 808 | ./CUB-256/11534.jpg 809 | ./CUB-256/11709.jpg 810 | ./CUB-256/11785.jpg 811 | ./CUB-256/11717.jpg 812 | ./CUB-256/11309.jpg 813 | ./CUB-256/11438.jpg 814 | ./CUB-256/10152.jpg 815 | ./CUB-256/10879.jpg 816 | ./CUB-256/10365.jpg 817 | ./CUB-256/11551.jpg 818 | ./CUB-256/10011.jpg 819 | ./CUB-256/10123.jpg 820 | ./CUB-256/10228.jpg 821 | ./CUB-256/10229.jpg 822 | ./CUB-256/11546.jpg 823 | ./CUB-256/11548.jpg 824 | ./CUB-256/10287.jpg 825 | ./CUB-256/11577.jpg 826 | ./CUB-256/11300.jpg 827 | ./CUB-256/11652.jpg 828 | ./CUB-256/11706.jpg 829 | ./CUB-256/10476.jpg 830 | ./CUB-256/11366.jpg 831 | ./CUB-256/11685.jpg 832 | ./CUB-256/10716.jpg 833 | ./CUB-256/11403.jpg 834 | ./CUB-256/10917.jpg 835 | ./CUB-256/11683.jpg 836 | ./CUB-256/10911.jpg 837 | ./CUB-256/10260.jpg 838 | ./CUB-256/10542.jpg 839 | ./CUB-256/11665.jpg 840 | ./CUB-256/10547.jpg 841 | ./CUB-256/10101.jpg 842 | ./CUB-256/10108.jpg 843 | ./CUB-256/11594.jpg 844 | ./CUB-256/11254.jpg 845 | ./CUB-256/10807.jpg 846 | ./CUB-256/11562.jpg 847 | ./CUB-256/11565.jpg 848 | ./CUB-256/11026.jpg 849 | ./CUB-256/10846.jpg 850 | ./CUB-256/10742.jpg 851 | ./CUB-256/11675.jpg 852 | ./CUB-256/11351.jpg 853 | ./CUB-256/11611.jpg 854 | ./CUB-256/10138.jpg 855 | ./CUB-256/10215.jpg 856 | ./CUB-256/10334.jpg 857 | ./CUB-256/10243.jpg 858 | ./CUB-256/11491.jpg 859 | ./CUB-256/10856.jpg 860 | ./CUB-256/10876.jpg 861 | ./CUB-256/11537.jpg 862 | ./CUB-256/11557.jpg 863 | ./CUB-256/10201.jpg 864 | ./CUB-256/11708.jpg 865 | ./CUB-256/11612.jpg 866 | ./CUB-256/11358.jpg 867 | ./CUB-256/11638.jpg 868 | ./CUB-256/10887.jpg 869 | ./CUB-256/10189.jpg 870 | ./CUB-256/10357.jpg 871 | ./CUB-256/11569.jpg 872 | ./CUB-256/10730.jpg 873 | ./CUB-256/10356.jpg 874 | ./CUB-256/11640.jpg 875 | ./CUB-256/10567.jpg 876 | ./CUB-256/11755.jpg 877 | ./CUB-256/10563.jpg 878 | ./CUB-256/10253.jpg 879 | ./CUB-256/10954.jpg 880 | ./CUB-256/11060.jpg 881 | ./CUB-256/10158.jpg 882 | ./CUB-256/10403.jpg 883 | ./CUB-256/10760.jpg 884 | ./CUB-256/10995.jpg 885 | ./CUB-256/10797.jpg 886 | ./CUB-256/09997.jpg 887 | ./CUB-256/10738.jpg 888 | ./CUB-256/10279.jpg 889 | ./CUB-256/11597.jpg 890 | ./CUB-256/11699.jpg 891 | ./CUB-256/10342.jpg 892 | ./CUB-256/11764.jpg 893 | ./CUB-256/11508.jpg 894 | ./CUB-256/11749.jpg 895 | ./CUB-256/11712.jpg 896 | ./CUB-256/10360.jpg 897 | ./CUB-256/10247.jpg 898 | ./CUB-256/10744.jpg 899 | ./CUB-256/10280.jpg 900 | ./CUB-256/10548.jpg 901 | ./CUB-256/11232.jpg 902 | ./CUB-256/10178.jpg 903 | ./CUB-256/11616.jpg 904 | ./CUB-256/10712.jpg 905 | ./CUB-256/10919.jpg 906 | ./CUB-256/11249.jpg 907 | ./CUB-256/10905.jpg 908 | ./CUB-256/10409.jpg 909 | ./CUB-256/10831.jpg 910 | ./CUB-256/10634.jpg 911 | ./CUB-256/11088.jpg 912 | ./CUB-256/10749.jpg 913 | ./CUB-256/11780.jpg 914 | ./CUB-256/11517.jpg 915 | ./CUB-256/10824.jpg -------------------------------------------------------------------------------- /src/model/networks.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import functools 4 | import torch.nn.functional as F 5 | from collections import OrderedDict 6 | import numpy as np 7 | from .partialconv import PartialConv2d 8 | from .patchtransmit import PatchTransmit 9 | import torch.nn.utils.spectral_norm as spectral_norm 10 | 11 | class BaseNetwork(nn.Module): 12 | def __init__(self): 13 | super(BaseNetwork, self).__init__() 14 | 15 | def init_weights(self, init_type='xavier', gain=0.02): 16 | def init_func(m): 17 | classname = m.__class__.__name__ 18 | if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): 19 | if init_type == 'normal': 20 | nn.init.normal_(m.weight.data, 0.0, gain) 21 | elif init_type == 'xavier': 22 | nn.init.xavier_normal_(m.weight.data, gain=gain) 23 | elif init_type == 'kaiming': 24 | nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') 25 | elif init_type == 'orthogonal': 26 | nn.init.orthogonal_(m.weight.data, gain=gain) 27 | 28 | if hasattr(m, 'bias') and m.bias is not None: 29 | nn.init.constant_(m.bias.data, 0.0) 30 | 31 | elif classname.find('BatchNorm2d') != -1: 32 | nn.init.normal_(m.weight.data, 1.0, gain) 33 | nn.init.constant_(m.bias.data, 0.0) 34 | 35 | self.apply(init_func) 36 | 37 | 38 | class Generator(BaseNetwork): 39 | 40 | def __init__(self): 41 | super().__init__() 42 | 43 | self.ngf = 64 44 | nf = self.ngf 45 | self.sw, self.sh = 8, 8 46 | 47 | self.z_dim = 256 48 | self.fc = nn.Linear(self.z_dim, 16 * nf * self.sw * self.sh) 49 | 50 | self.Encoder = Encoder() 51 | 52 | # without noise 53 | # self.head_0 = ResnetBlock(8 * nf, 16 * nf, d = 2) 54 | # with noise 55 | self.head_0 = ResnetBlock(16 * nf, 16 * nf, d = 2) 56 | 57 | self.G_middle_0 = ResnetBlock(16 * nf, 16 * nf, d = 2) 58 | self.G_middle_1 = ResnetBlock(16 * nf, 16 * nf, d = 2) 59 | 60 | self.up_0 = ResnetBlock(16 * nf, 8 * nf, d = 4) 61 | self.up_1 = ResnetBlock(8 * nf, 4 * nf, d = 4) 62 | self.up_2 = ResnetBlock(4 * nf, 2 * nf, d = 4) 63 | self.up_3 = ResnetBlock(2 * nf, 1 * nf) 64 | 65 | final_nc = nf 66 | 67 | self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1) 68 | 69 | self.up = nn.Upsample(scale_factor=2) 70 | 71 | self.tanh = nn.Tanh() 72 | # 28 for train & test 73 | # self.patchtransmit = PatchTransmit(nf*2) 74 | # 29 for train & test 75 | self.patchtransmit = PatchTransmit(nf) 76 | 77 | self.init_weights() 78 | 79 | def reparameterize(self, mu, logvar): 80 | std = torch.exp(0.5 * logvar) 81 | eps = torch.randn_like(std) 82 | # print(mu, logvar) 83 | 84 | return eps.mul(std) + mu 85 | 86 | def adain(self, f, z, eps=1e-6): 87 | b, c = f.size()[:2] 88 | 89 | f_view = f.view(b, c, -1) 90 | z_view = z.view(b, c, -1) 91 | 92 | f_mean = f_view.mean(dim=2).view(b, c, 1, 1) 93 | f_std = f_view.std(dim=2).view(b, c, 1, 1) + eps 94 | 95 | z_mean = z_view.mean(dim=2).view(b, c, 1, 1) 96 | z_std = z_view.std(dim=2).view(b, c, 1, 1) + eps 97 | 98 | z = f_std * (z - z_mean) / z_std + f_mean 99 | 100 | return z 101 | 102 | def forward(self, input, mask, z=None): 103 | mask_image = input 104 | 105 | [x_0, x_1, x_2, x_3, x_4, z_in], [mask_0, mask_1, mask_2, mask_3, mask_4], (mu, logvar) = self.Encoder(mask_image, mask) 106 | 107 | # part-noise 108 | z = self.reparameterize(mu, logvar) 109 | # normal noise 110 | # z = torch.randn(input.size(0), self.z_dim, dtype=torch.float32).cuda() 111 | # with/without noise 112 | z = self.fc(z) 113 | x = z.view(-1, 16 * self.ngf, self.sh, self.sw) 114 | y_1 = self.head_0(x, x_4, mask_4) 115 | # without noise 116 | # y_1 = self.head_0(x_4) 117 | # y_1 = self.head_0(x) 118 | # Local Adain 119 | # x = self.local_adain(x, mask_4) 120 | x = self.up(y_1) 121 | # 16, 16 122 | y_2_1 = self.G_middle_0(x, x_3, mask_3) 123 | # y_2_1 = self.G_middle_0(x) 124 | # x = self.local_adain(x, mask_3) 125 | 126 | y_2_2 = self.G_middle_1(y_2_1, x_3, mask_3) 127 | # y_2_2 = self.G_middle_1(y_2_1) 128 | # x = self.local_adain(x, mask_3) 129 | 130 | x = self.up(y_2_2) 131 | # 16, 32 132 | y_3 = self.up_0(x, x_2, mask_2) 133 | # y_3 = self.up_0(x) 134 | # x = self.local_adain(x, mask_2) 135 | x = self.up(y_3) 136 | # 8, 64 137 | y_4 = self.up_1(x, x_1, mask_1) 138 | # y_4 = self.up_1(x) 139 | # x = self.local_adain(x, mask_1) 140 | # Local Adain 141 | # f_a_2 = self.local_adain(f_a_2, masks[0], masks[2]) 142 | 143 | x = self.up(y_4) 144 | # 4, 128 145 | y_5 = self.up_2(x, x_0, mask_0, isprint=True) 146 | # y_5_a, _ = self.patchtransmit(y_5, mask_0) 147 | 148 | x = self.up(y_5) 149 | # 2, 256 150 | y_6 = self.up_3(x) 151 | # y_6 = self.up_3(x) 152 | y_6_a, _, w1, w2 = self.patchtransmit(y_6, mask) 153 | # y_6_a = self.patchtransmit(y_6, mask) 154 | 155 | # x = self.conv_img(F.leaky_relu(y_6_a, 2e-1)) 156 | x = self.conv_img(y_6_a) 157 | x = (self.tanh(x) + 1) / 2 158 | # x = self.tanh(x) 159 | 160 | # return x, (mu, logvar), (y_1, y_2_1, y_2_2, y_3, y_4, y_5, _, y_5_a, y_6) 161 | # y1 = torch.mean(y_6, dim=1, keepdim=True) 162 | # y2 = torch.mean(y_6_a, dim=1, keepdim=True) 163 | # y3 = torch.mean(_, dim=1, keepdim=True) 164 | # y4 = torch.mean(w1, dim=1, keepdim=True) 165 | # y5 = torch.mean(w2, dim=1, keepdim=True) 166 | # min = y3.min() 167 | # max = y3.max() 168 | # return x, (mu, logvar), (y1 - min / max, (y2 - min) / max, (y3 - min) / max, (y4 - min) / max, (y5 - min) / max ) 169 | return x, (mu, logvar), None 170 | 171 | class ResnetBlock(BaseNetwork): 172 | def __init__(self, fin, fout, kernel_size=3, d=2): 173 | super().__init__() 174 | 175 | fmiddle = min(fin, fout) 176 | self.d = d 177 | 178 | pw = (kernel_size - 1) // 2 179 | self.relu = nn.LeakyReLU(0.2, False) 180 | # self.relu = nn.ReLU() 181 | 182 | self.conv_block_1 = nn.Sequential( 183 | nn.ReflectionPad2d(pw), 184 | nn.Conv2d(fin, fmiddle, kernel_size=kernel_size), 185 | ) 186 | 187 | self.conv_block_2 = nn.Sequential( 188 | nn.ReflectionPad2d(pw), 189 | nn.Conv2d(fmiddle, fout, kernel_size=kernel_size) 190 | ) 191 | 192 | self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False) 193 | self.norm_1 = FeatureTransfer(fmiddle) 194 | self.norm_2 = FeatureRec(fmiddle) 195 | 196 | self.conv_1_1 = nn.Conv2d(fin+fin//2, fin, kernel_size=1, bias=False) 197 | self.conv_1_2 = nn.Conv2d(fin+fin//4, fin, kernel_size=1, bias=False) 198 | 199 | self.init_weights() 200 | self.inorm = nn.InstanceNorm2d(fout) 201 | 202 | self.tanh = nn.Tanh() 203 | 204 | def forward(self, x, t=None, mask=None, isprint=False): 205 | x_s = self.conv_s(x) 206 | # x_s = self.inorm(x_s) 207 | x_s = self.relu(x_s) 208 | 209 | d = self.d 210 | 211 | # 18, 19 212 | if t is not None: 213 | # print(x.shape, t.shape) 214 | x = torch.cat([x, t], 1) 215 | if d == 2: 216 | x = self.conv_1_1(x) 217 | x = self.relu(x) 218 | if d == 4: 219 | x = self.conv_1_2(x) 220 | x = self.relu(x) 221 | 222 | y = self.conv_block_1(x) 223 | if t is not None: 224 | y, g = self.norm_1(y, mask) 225 | # 32 226 | # y = yy + y 227 | y, gamma, beta = self.norm_2(y, mask, g) 228 | # y = self.inorm(y) 229 | y = self.relu(y) 230 | 231 | y = self.conv_block_2(y) 232 | # if t is not None: 233 | # y = self.norm_2(y, mask, f_h) 234 | y = self.relu(y) 235 | # print(gamma, beta) 236 | 237 | out = x_s + y 238 | return out 239 | 240 | def calc_mean_std(f, eps=1e-6): 241 | b, c = f.size()[:2] 242 | f_mean = f.mean(dim=2).view(b, c, 1, 1) 243 | f_std = f.std(dim=2).view(b, c, 1, 1) + eps 244 | 245 | return f_mean, f_std 246 | 247 | class FeatureTransfer(nn.Module): 248 | def __init__(self, out_nc): 249 | super(FeatureTransfer, self).__init__() 250 | self.norm = nn.InstanceNorm2d(out_nc, affine=False) 251 | 252 | def forward(self, f, mask): 253 | [b, c, h, w] = f.size() 254 | 255 | # f = self.norm(f) 256 | # _, f_std = calc_mean_std(f) 257 | 258 | f = f.view(b, c, -1) 259 | 260 | mask_one_channel = mask.view(b, 1, -1)[0][0] 261 | index_good = torch.nonzero(mask_one_channel) 262 | index_bad = torch.nonzero(1 - mask_one_channel) 263 | 264 | f_local = f[:, :, index_good] 265 | f_global = f[:, :, index_bad] 266 | 267 | # print(f_global.shape) 268 | # print(torch.mean(f_global[0][60]), torch.var(f_global[0][60])) 269 | # print(torch.mean(f_local[0][60]), torch.var(f_local[0][60])) 270 | 271 | # f_global = self.norm(f_global) 272 | f_global_mean, f_global_std = calc_mean_std(f_global) 273 | f_global = (f_global - f_global_mean) / f_global_std 274 | 275 | f_local_mean, f_local_std = calc_mean_std(f_local) 276 | 277 | # x = self.norm(f_global) 278 | f_global_adain = f_global * f_local_std + f_local_mean 279 | 280 | f[:, :, index_bad] = f_global_adain 281 | # print(torch.mean(f_local_mean), torch.mean(f_local_std)) 282 | 283 | f = f.view(b, c, h, w) 284 | 285 | return f, f_global 286 | 287 | class FeatureRec(BaseNetwork): 288 | def __init__(self, in_nc): 289 | super(FeatureRec, self).__init__() 290 | 291 | ks = 1 292 | pw = 0 293 | nhidden = 128 294 | 295 | self.mlp_shared = nn.Sequential( 296 | nn.Conv2d(in_nc, nhidden, kernel_size=ks, padding=pw), 297 | nn.ReLU() 298 | ) 299 | self.mlp_gamma = nn.Conv2d(nhidden, in_nc, kernel_size=ks, padding=pw) 300 | self.mlp_beta = nn.Conv2d(nhidden, in_nc, kernel_size=ks, padding=pw) 301 | self.init_weights() 302 | 303 | def forward(self, f, mask, f_1): 304 | [b, c, h, w] = f.size() 305 | 306 | f = f.view(b, c, -1) 307 | 308 | mask_one_channel = mask.view(b, 1, -1)[0][0] 309 | index_bad = torch.nonzero(1 - mask_one_channel) 310 | 311 | f_global = f[:, :, index_bad] 312 | # print(torch.mean(f_global[0][60]), torch.var(f_global[0][60])) 313 | # print(torch.mean(f_1), torch.var(f_1)) 314 | f_mlp = self.mlp_shared(f_1) 315 | gamma = self.mlp_gamma(f_mlp) 316 | beta = self.mlp_beta(f_mlp) 317 | 318 | f_global = f_global * (1 + gamma) + beta 319 | 320 | # print(torch.mean(f_global[0][60]), torch.var(f_global[0][60])) 321 | f[:, :, index_bad] = f_global 322 | 323 | f = f.view(b, c, h, w) 324 | 325 | return f, torch.mean(gamma), torch.mean(beta) 326 | 327 | class Encoder(BaseNetwork): 328 | def __init__(self, in_channels=3, nf=64, use_spectral_norm=True, init_weights=True): 329 | super(Encoder, self).__init__() 330 | 331 | self.nf = nf 332 | 333 | # 1, 128 334 | self.pc_0 = PartialConv2d(in_channels=in_channels, out_channels=nf, kernel_size=3, stride=2, padding=1) 335 | # 2, 64 336 | self.pc_1 = PartialConv2d(in_channels=nf, out_channels=nf*2, kernel_size=3, stride=2, padding=1) 337 | # 4, 32 338 | self.pc_2 = PartialConv2d(in_channels=nf*2, out_channels=nf*4, kernel_size=3, stride=2, padding=1) 339 | # 8, 16 340 | self.pc_3 = PartialConv2d(in_channels=nf*4, out_channels=nf*8, kernel_size=3, stride=2, padding=1) 341 | # 8, 8 342 | self.pc_4 = PartialConv2d(in_channels=nf*8, out_channels=nf*8, kernel_size=3, stride=2, padding=1) 343 | # 8, 4 344 | self.pc_5 = PartialConv2d(in_channels=nf*8, out_channels=nf*8, kernel_size=3, stride=2, padding=1) 345 | 346 | # Linear 347 | self.fc_mu = nn.Linear(nf * 8 * 4 * 4, 256) 348 | self.fc_var = nn.Linear(nf * 8 * 4 * 4, 256) 349 | 350 | self.relu = nn.LeakyReLU(0.2, False) 351 | self.down = nn.UpsamplingNearest2d(scale_factor=.5) 352 | 353 | if init_weights: 354 | self.init_weights() 355 | 356 | def norm(self, f, mask, nc): 357 | # [b, c, h, w] = f.size() 358 | 359 | # f = f.view(b, c, -1) 360 | 361 | # mask_one_channel = mask.view(b, 1, -1)[0][0] 362 | # index_good = torch.nonzero(mask_one_channel) 363 | 364 | # f_local = f[:, :, index_good] 365 | 366 | # norm = nn.InstanceNorm2d(nc, affine=False) 367 | # f_local = norm(f_local) 368 | 369 | # f[:, :, index_good] = f_local 370 | 371 | # f = f.view(b, c, h, w) 372 | norm = nn.InstanceNorm2d(nc) 373 | f = norm(f) 374 | return f 375 | 376 | 377 | def forward(self, x, mask): 378 | nf = self.nf 379 | 380 | x_0, mask_0 = self.pc_0(x, mask) 381 | x_0 = self.norm(x_0, mask_0, nf) 382 | x_0 = self.relu(x_0) 383 | 384 | mask_c_0 = self.down(mask) 385 | x_0 = x_0 * mask_c_0 386 | 387 | x_1, mask_1 = self.pc_1(x_0, mask_0) 388 | x_1 = self.norm(x_1, mask_1, nf*2) 389 | x_1 = self.relu(x_1) 390 | 391 | mask_c_1 = self.down(mask_c_0) 392 | x_1 = x_1 * mask_c_1 393 | 394 | x_2, mask_2 = self.pc_2(x_1, mask_1) 395 | x_2 = self.norm(x_2, mask_2, nf*2) 396 | x_2 = self.relu(x_2) 397 | 398 | mask_c_2 = self.down(mask_c_1) 399 | x_2 = x_2 * mask_c_2 400 | 401 | x_3, mask_3 = self.pc_3(x_2, mask_2) 402 | x_3 = self.norm(x_3, mask_3, nf*4) 403 | x_3 = self.relu(x_3) 404 | 405 | mask_c_3 = self.down(mask_c_2) 406 | x_3 = x_3 * mask_c_3 407 | 408 | x_4, mask_4 = self.pc_4(x_3, mask_3) 409 | x_4 = self.norm(x_4, mask_4, nf*4) 410 | x_4 = self.relu(x_4) 411 | 412 | mask_c_4 = self.down(mask_c_3) 413 | x_4 = x_4 * mask_c_4 414 | 415 | z, mask_5 = self.pc_5(x_4, mask_4) 416 | z = self.norm(z, mask_5, nf*4) 417 | z = self.relu(z) 418 | 419 | o = z.view(z.size(0), -1) 420 | 421 | mu = self.fc_mu(o) 422 | logvar = self.fc_var(o) 423 | 424 | return [x_0, x_1, x_2, x_3, x_4, z], [mask_c_0, mask_c_1, mask_c_2, mask_c_3, mask_c_4], (mu, logvar) 425 | 426 | class MultiscaleDiscriminator(BaseNetwork): 427 | 428 | def __init__(self): 429 | super().__init__() 430 | 431 | self.num_d = 2 432 | 433 | self.d_1 = NLayerDiscriminator() 434 | self.d_2 = NLayerDiscriminator() 435 | 436 | self.init_weights() 437 | 438 | def downsample(self, input): 439 | return F.avg_pool2d(input, kernel_size=3, 440 | stride=2, padding=[1, 1], 441 | count_include_pad=False) 442 | 443 | def forward(self, input): 444 | get_intermediate_features = False 445 | 446 | out_1 = self.d_1(input) 447 | input = self.downsample(input) 448 | out_2 = self.d_2(input) 449 | 450 | return [out_1, out_2] 451 | 452 | class NLayerDiscriminator(BaseNetwork): 453 | 454 | def __init__(self, in_channels=3): 455 | super().__init__() 456 | self.n_layers_D = 4 457 | 458 | kw = 4 459 | padw = int(np.ceil((kw - 1.0) / 2)) 460 | nf = 64 461 | input_nc = in_channels 462 | 463 | sequence = [[nn.Conv2d(input_nc, nf, kernel_size=kw, stride=2, padding=padw), 464 | nn.LeakyReLU(0.2, False)]] 465 | 466 | for n in range(1, self.n_layers_D): 467 | nf_prev = nf 468 | nf = min(nf * 2, 512) 469 | stride = 1 if n == self.n_layers_D - 1 else 2 470 | sequence += [[spectral_norm(nn.Conv2d(nf_prev, nf, kernel_size=kw, 471 | stride=stride, padding=padw)), 472 | nn.LeakyReLU(0.2, False) 473 | ]] 474 | 475 | sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]] 476 | 477 | # We divide the layers into groups to extract intermediate layer outputs 478 | for n in range(len(sequence)): 479 | self.add_module('model' + str(n), nn.Sequential(*sequence[n])) 480 | 481 | def forward(self, input): 482 | results = [input] 483 | for submodel in self.children(): 484 | intermediate_output = submodel(results[-1]) 485 | results.append(intermediate_output) 486 | 487 | get_intermediate_features = False 488 | if get_intermediate_features: 489 | return results[1:] 490 | else: 491 | return torch.sigmoid(results[-1]) 492 | 493 | class Dis_Inn(BaseNetwork): 494 | def __init__(self, in_channels=3, use_sigmoid=True, init_weights=True): 495 | super(Dis_Inn, self).__init__() 496 | self.use_sigmoid = use_sigmoid 497 | 498 | self.conv1 = nn.Sequential( 499 | spectral_norm(nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=4, stride=2, padding=1)), 500 | nn.LeakyReLU(0.2, inplace=True), 501 | ) 502 | 503 | self.conv2 = nn.Sequential( 504 | spectral_norm(nn.Conv2d(in_channels=64, out_channels=128, kernel_size=4, stride=2, padding=1)), 505 | nn.LeakyReLU(0.2, inplace=True), 506 | ) 507 | 508 | self.conv3 = nn.Sequential( 509 | spectral_norm(nn.Conv2d(in_channels=128, out_channels=256, kernel_size=4, stride=2, padding=1)), 510 | nn.LeakyReLU(0.2, inplace=True), 511 | ) 512 | 513 | self.conv4 = nn.Sequential( 514 | spectral_norm(nn.Conv2d(in_channels=256, out_channels=512, kernel_size=4, stride=1, padding=1)), 515 | nn.LeakyReLU(0.2, inplace=True), 516 | ) 517 | 518 | self.conv5 = nn.Sequential( 519 | spectral_norm(nn.Conv2d(in_channels=512, out_channels=1, kernel_size=4, stride=1, padding=1)), 520 | ) 521 | 522 | if init_weights: 523 | self.init_weights() 524 | 525 | def forward(self, x): 526 | conv1 = self.conv1(x) 527 | conv2 = self.conv2(conv1) 528 | conv3 = self.conv3(conv2) 529 | conv4 = self.conv4(conv3) 530 | conv5 = self.conv5(conv4) 531 | 532 | outputs = conv5 533 | if self.use_sigmoid: 534 | outputs = torch.sigmoid(conv5) 535 | 536 | return outputs 537 | 538 | class _DenseLayer(nn.Sequential): 539 | def __init__(self, num_input_features, growth_rate, bn_size, drop_rate): 540 | super(_DenseLayer, self).__init__() 541 | self.add_module('norm1', nn.BatchNorm2d(num_input_features)), 542 | self.add_module('relu1', nn.ReLU(inplace=True)), 543 | self.add_module('conv1', nn.Conv2d(num_input_features, bn_size*growth_rate, kernel_size=1, stride=1, bias=False)), 544 | self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)), 545 | self.add_module('relu2', nn.ReLU(inplace=True)), 546 | self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,kernel_size=3, stride=1, padding=1, bias=False)), 547 | self.drop_rate = drop_rate 548 | 549 | def forward(self, x): 550 | new_features = super(_DenseLayer, self).forward(x) 551 | if self.drop_rate > 0: 552 | new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) 553 | return torch.cat([x, new_features], 1) 554 | 555 | 556 | class _DenseBlock(nn.Sequential): 557 | def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate): 558 | super(_DenseBlock, self).__init__() 559 | for i in range(num_layers): 560 | layer = _DenseLayer(num_input_features + i * growth_rate, growth_rate, bn_size, drop_rate) 561 | self.add_module('denselayer%d' % (i + 1), layer) 562 | 563 | 564 | class _Transition(nn.Sequential): 565 | def __init__(self, num_input_features, num_output_features): 566 | super(_Transition, self).__init__() 567 | self.add_module('norm', nn.BatchNorm2d(num_input_features)) 568 | self.add_module('relu', nn.ReLU(inplace=True)) 569 | self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, 570 | kernel_size=1, stride=1, bias=False)) 571 | self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) 572 | 573 | class DenseD(nn.Module): 574 | def __init__(self, growth_rate=32, block_config=(3, 3, 3), 575 | num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000): 576 | 577 | super(DenseD, self).__init__() 578 | 579 | # First convolution 580 | self.features = nn.Sequential(OrderedDict([ 581 | ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)), 582 | ('norm0', nn.BatchNorm2d(num_init_features)), 583 | ('relu0', nn.ReLU(inplace=True)), 584 | ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)), 585 | ])) 586 | 587 | # Each denseblock 588 | num_features = num_init_features 589 | for i, num_layers in enumerate(block_config): 590 | block = _DenseBlock(num_layers=num_layers, num_input_features=num_features, 591 | bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate) 592 | self.features.add_module('denseblock%d' % (i + 1), block) 593 | num_features = num_features + num_layers * growth_rate 594 | if i != len(block_config) - 1: 595 | trans = _Transition(num_input_features=num_features, num_output_features=num_features // 2) 596 | self.features.add_module('transition%d' % (i + 1), trans) 597 | num_features = num_features // 2 598 | 599 | # Final batch norm 600 | self.features.add_module('norm5', nn.BatchNorm2d(num_features)) 601 | 602 | # Linear layer 603 | self.classifier = nn.Linear(num_features, num_classes) 604 | 605 | # output layer 606 | self.conv3 = nn.Sequential( 607 | spectral_norm(nn.Conv2d(in_channels=128, out_channels=1, kernel_size=4, stride=1, padding=1, bias=False)), 608 | nn.LeakyReLU(0.2, inplace=True), 609 | ) 610 | 611 | # Official init from torch repo. 612 | for m in self.modules(): 613 | if isinstance(m, nn.Conv2d): 614 | nn.init.kaiming_normal_(m.weight.data) 615 | elif isinstance(m, nn.BatchNorm2d): 616 | m.weight.data.fill_(1) 617 | m.bias.data.zero_() 618 | elif isinstance(m, nn.Linear): 619 | m.bias.data.zero_() 620 | 621 | def forward(self, x): 622 | features = self.features(x) 623 | out = F.relu(features, inplace=True) 624 | out = F.avg_pool2d(out, kernel_size=7, stride=1).view(features.size(0), -1) 625 | out = torch.sigmoid(out) 626 | return out 627 | 628 | 629 | class PatchDiscriminator(BaseNetwork): 630 | def __init__(self): 631 | super(PatchDiscriminator, self).__init__() 632 | # Down sampling 633 | in_channels = 3 634 | latent_channels = 64 635 | pad_type = 'zero' 636 | activation = 'lrelu' 637 | norm = 'none' 638 | 639 | self.block1 = Conv2dLayer(in_channels, latent_channels, 7, 1, 3, pad_type = pad_type, activation = activation, norm = norm, sn = True) 640 | self.block2 = Conv2dLayer(latent_channels, latent_channels * 2, 4, 2, 1, pad_type = pad_type, activation = activation, norm = norm, sn = True) 641 | self.block3 = Conv2dLayer(latent_channels * 2, latent_channels * 4, 4, 2, 1, pad_type = pad_type, activation = activation, norm = norm, sn = True) 642 | self.block4 = Conv2dLayer(latent_channels * 4, latent_channels * 4, 4, 2, 1, pad_type = pad_type, activation = activation, norm = norm, sn = True) 643 | self.block5 = Conv2dLayer(latent_channels * 4, latent_channels * 4, 4, 2, 1, pad_type = pad_type, activation = activation, norm = norm, sn = True) 644 | self.block6 = Conv2dLayer(latent_channels * 4, 1, 4, 2, 1, pad_type = pad_type, activation = 'none', norm = 'none', sn = True) 645 | 646 | self.init_weights() 647 | 648 | def forward(self, img): 649 | # the input x should contain 4 channels because it is a combination of recon image and mask 650 | x = img 651 | x = self.block1(x) # out: [B, 64, 256, 256] 652 | x = self.block2(x) # out: [B, 128, 128, 128] 653 | x = self.block3(x) # out: [B, 256, 64, 64] 654 | x = self.block4(x) # out: [B, 256, 32, 32] 655 | x = self.block5(x) # out: [B, 256, 16, 16] 656 | x = self.block6(x) # out: [B, 256, 8, 8] 657 | x = torch.sigmoid(x) 658 | return x 659 | 660 | 661 | class Conv2dLayer(nn.Module): 662 | def __init__(self, in_channels, out_channels, kernel_size, stride = 1, padding = 0, dilation = 1, pad_type = 'zero', activation = 'elu', norm = 'none', sn = False): 663 | super(Conv2dLayer, self).__init__() 664 | # Initialize the padding scheme 665 | if pad_type == 'reflect': 666 | self.pad = nn.ReflectionPad2d(padding) 667 | elif pad_type == 'replicate': 668 | self.pad = nn.ReplicationPad2d(padding) 669 | elif pad_type == 'zero': 670 | self.pad = nn.ZeroPad2d(padding) 671 | else: 672 | assert 0, "Unsupported padding type: {}".format(pad_type) 673 | 674 | # Initialize the normalization type 675 | if norm == 'bn': 676 | self.norm = nn.BatchNorm2d(out_channels) 677 | elif norm == 'in': 678 | self.norm = nn.InstanceNorm2d(out_channels) 679 | elif norm == 'ln': 680 | self.norm = LayerNorm(out_channels) 681 | elif norm == 'none': 682 | self.norm = None 683 | else: 684 | assert 0, "Unsupported normalization: {}".format(norm) 685 | 686 | # Initialize the activation funtion 687 | if activation == 'relu': 688 | self.activation = nn.ReLU(inplace = True) 689 | elif activation == 'lrelu': 690 | self.activation = nn.LeakyReLU(0.2, inplace = True) 691 | elif activation == 'elu': 692 | self.activation = nn.ELU(inplace=True) 693 | elif activation == 'selu': 694 | self.activation = nn.SELU(inplace = True) 695 | elif activation == 'tanh': 696 | self.activation = nn.Tanh() 697 | elif activation == 'sigmoid': 698 | self.activation = nn.Sigmoid() 699 | elif activation == 'none': 700 | self.activation = None 701 | else: 702 | assert 0, "Unsupported activation: {}".format(activation) 703 | 704 | # Initialize the convolution layers 705 | if sn: 706 | self.conv2d = spectral_norm(nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding = 0, dilation = dilation)) 707 | else: 708 | self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding = 0, dilation = dilation) 709 | 710 | def forward(self, x): 711 | x = self.pad(x) 712 | x = self.conv2d(x) 713 | if self.norm: 714 | x = self.norm(x) 715 | if self.activation: 716 | x = self.activation(x) 717 | return x --------------------------------------------------------------------------------