├── Adapt_CAAE ├── ResLearn.py ├── checkGPU.py ├── data │ └── UTKFace │ │ ├── 100_1_2_20170112213615815.jpg.chip.jpg │ │ ├── 103_0_2_20170112213001988.jpg.chip.jpg │ │ ├── 10_0_0_20170110225557604.jpg.chip.jpg │ │ ├── 110_1_0_20170120134701015.jpg.chip.jpg │ │ ├── 116_1_3_20170120134744096.jpg.chip.jpg │ │ ├── 11_0_1_20170103201136230.jpg.chip.jpg │ │ ├── 12_0_4_20170103201824880.jpg.chip.jpg │ │ ├── 13_0_0_20170110232526929.jpg.chip.jpg │ │ ├── 14_1_3_20170104221818294.jpg.chip.jpg │ │ ├── 15_0_0_20170110232443234.jpg.chip.jpg │ │ ├── 16_0_0_20170110232605131.jpg.chip.jpg │ │ ├── 17_1_0_20170109201558943.jpg.chip.jpg │ │ ├── 18_1_0_20170109213451167.jpg.chip.jpg │ │ ├── 19_1_0_20170116163837603.jpg.chip.jpg │ │ ├── 1_0_0_20161219140623097.jpg.chip.jpg │ │ ├── 20_0_0_20170105161706251.jpg.chip.jpg │ │ ├── 21_0_3_20170119153824472.jpg.chip.jpg │ │ ├── 2_1_2_20161219205100213.jpg.chip.jpg │ │ ├── 3_0_2_20161219210918485.jpg.chip.jpg │ │ ├── 43_0_0_20170104183504030.jpg.chip.jpg │ │ ├── 4_0_0_20170110213121589.jpg.chip.jpg │ │ ├── 58_0_0_20170120224028072.jpg.chip.jpg │ │ ├── 5_1_0_20170109194005140.jpg.chip.jpg │ │ ├── 61_0_0_20170117174637422.jpg.chip.jpg │ │ ├── 6_1_3_20170104222204751.jpg.chip.jpg │ │ ├── 78_0_0_20170111204704023.jpg.chip.jpg │ │ ├── 7_1_1_20170116225209209.jpg.chip.jpg │ │ ├── 8_0_1_20170110220106378.jpg.chip.jpg │ │ └── 9_0_0_20170110224240532.jpg.chip.jpg ├── main.py └── ops.py ├── Adapt_Pix2Pix ├── checkGPU.py ├── datasets │ └── facades │ │ ├── test │ │ ├── 1.jpg │ │ ├── 2.jpg │ │ ├── 3.jpg │ │ ├── 4.jpg │ │ └── 5.jpg │ │ ├── train │ │ ├── 1.jpg │ │ ├── 2.jpg │ │ ├── 3.jpg │ │ ├── 4.jpg │ │ └── 5.jpg │ │ └── val │ │ ├── 1.jpg │ │ ├── 2.jpg │ │ ├── 3.jpg │ │ ├── 4.jpg │ │ └── 5.jpg ├── download_dataset.sh ├── main.py ├── model.py ├── ops.py └── utils.py ├── NRDS ├── NRDS.m ├── NRDS.py ├── checkGPU.py ├── main.py ├── ops.py ├── results │ ├── adv1 │ │ ├── 50.png101.jpg │ │ ├── 50.png1010.jpg │ │ ├── 50.png102.jpg │ │ ├── 50.png103.jpg │ │ ├── 50.png104.jpg │ │ ├── 50.png105.jpg │ │ ├── 50.png106.jpg │ │ ├── 50.png107.jpg │ │ ├── 50.png108.jpg │ │ ├── 50.png109.jpg │ │ ├── 50.png11.jpg │ │ ├── 50.png110.jpg │ │ ├── 50.png12.jpg │ │ ├── 50.png13.jpg │ │ ├── 50.png14.jpg │ │ ├── 50.png15.jpg │ │ ├── 50.png16.jpg │ │ ├── 50.png17.jpg │ │ ├── 50.png18.jpg │ │ ├── 50.png19.jpg │ │ ├── 50.png21.jpg │ │ ├── 50.png210.jpg │ │ ├── 50.png22.jpg │ │ ├── 50.png23.jpg │ │ ├── 50.png24.jpg │ │ ├── 50.png25.jpg │ │ ├── 50.png26.jpg │ │ ├── 50.png27.jpg │ │ ├── 50.png28.jpg │ │ ├── 50.png29.jpg │ │ ├── 50.png31.jpg │ │ ├── 50.png310.jpg │ │ ├── 50.png32.jpg │ │ ├── 50.png33.jpg │ │ ├── 50.png34.jpg │ │ ├── 50.png35.jpg │ │ ├── 50.png36.jpg │ │ ├── 50.png37.jpg │ │ ├── 50.png38.jpg │ │ ├── 50.png39.jpg │ │ ├── 50.png41.jpg │ │ ├── 50.png410.jpg │ │ ├── 50.png42.jpg │ │ ├── 50.png43.jpg │ │ ├── 50.png44.jpg │ │ ├── 50.png45.jpg │ │ ├── 50.png46.jpg │ │ ├── 50.png47.jpg │ │ ├── 50.png48.jpg │ │ ├── 50.png49.jpg │ │ ├── 50.png51.jpg │ │ ├── 50.png510.jpg │ │ ├── 50.png52.jpg │ │ ├── 50.png53.jpg │ │ ├── 50.png54.jpg │ │ ├── 50.png55.jpg │ │ ├── 50.png56.jpg │ │ ├── 50.png57.jpg │ │ ├── 50.png58.jpg │ │ ├── 50.png59.jpg │ │ ├── 50.png61.jpg │ │ ├── 50.png610.jpg │ │ ├── 50.png62.jpg │ │ ├── 50.png63.jpg │ │ ├── 50.png64.jpg │ │ ├── 50.png65.jpg │ │ ├── 50.png66.jpg │ │ ├── 50.png67.jpg │ │ ├── 50.png68.jpg │ │ ├── 50.png69.jpg │ │ ├── 50.png71.jpg │ │ ├── 50.png710.jpg │ │ ├── 50.png72.jpg │ │ ├── 50.png73.jpg │ │ ├── 50.png74.jpg │ │ ├── 50.png75.jpg │ │ ├── 50.png76.jpg │ │ ├── 50.png77.jpg │ │ ├── 50.png78.jpg │ │ ├── 50.png79.jpg │ │ ├── 50.png81.jpg │ │ ├── 50.png810.jpg │ │ ├── 50.png82.jpg │ │ ├── 50.png83.jpg │ │ ├── 50.png84.jpg │ │ ├── 50.png85.jpg │ │ ├── 50.png86.jpg │ │ ├── 50.png87.jpg │ │ ├── 50.png88.jpg │ │ ├── 50.png89.jpg │ │ ├── 50.png91.jpg │ │ ├── 50.png910.jpg │ │ ├── 50.png92.jpg │ │ ├── 50.png93.jpg │ │ ├── 50.png94.jpg │ │ ├── 50.png95.jpg │ │ ├── 50.png96.jpg │ │ ├── 50.png97.jpg │ │ ├── 50.png98.jpg │ │ └── 50.png99.jpg │ ├── adv1e2 │ │ ├── 50.png11.jpg │ │ ├── 50.png12.jpg │ │ ├── 50.png13.jpg │ │ ├── 50.png14.jpg │ │ ├── 50.png15.jpg │ │ ├── 50.png16.jpg │ │ ├── 50.png17.jpg │ │ ├── 50.png18.jpg │ │ ├── 50.png19.jpg │ │ └── 50.png21.jpg │ ├── adv1e3 │ │ ├── 50.png11.jpg │ │ ├── 50.png12.jpg │ │ ├── 50.png13.jpg │ │ ├── 50.png14.jpg │ │ ├── 50.png15.jpg │ │ ├── 50.png16.jpg │ │ ├── 50.png17.jpg │ │ ├── 50.png18.jpg │ │ ├── 50.png19.jpg │ │ └── 50.png21.jpg │ ├── adv1e4 │ │ ├── 50.png11.jpg │ │ ├── 50.png12.jpg │ │ ├── 50.png13.jpg │ │ ├── 50.png14.jpg │ │ ├── 50.png15.jpg │ │ ├── 50.png16.jpg │ │ ├── 50.png17.jpg │ │ ├── 50.png18.jpg │ │ ├── 50.png19.jpg │ │ └── 50.png21.jpg │ ├── real │ │ ├── input.png1.jpg │ │ ├── input.png10.jpg │ │ ├── input.png2.jpg │ │ ├── input.png3.jpg │ │ ├── input.png4.jpg │ │ ├── input.png5.jpg │ │ ├── input.png6.jpg │ │ ├── input.png7.jpg │ │ ├── input.png8.jpg │ │ └── input.png9.jpg │ └── t └── save │ └── mat │ ├── 001_010.mat │ ├── 002_010.mat │ ├── 003_010.mat │ ├── 004_010.mat │ ├── 005_010.mat │ ├── 006_010.mat │ ├── 007_010.mat │ ├── 008_010.mat │ ├── 009_010.mat │ ├── 010_010.mat │ ├── 011_010.mat │ ├── 012_010.mat │ ├── 013_010.mat │ ├── 014_010.mat │ ├── 015_010.mat │ ├── 016_010.mat │ ├── 017_010.mat │ ├── 018_010.mat │ ├── 019_010.mat │ └── 020_010.mat └── README.md /Adapt_CAAE/ResLearn.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import os 3 | import time 4 | from glob import glob 5 | import tensorflow as tf 6 | import numpy as np 7 | from scipy.io import savemat 8 | from ops import * 9 | 10 | 11 | class ResLearn(object): 12 | def __init__(self, 13 | session, # TensorFlow session 14 | size_image=128, # size the input images 15 | size_kernel=5, # size of the kernels in convolution and deconvolution 16 | size_batch=100, # mini-batch size for training and testing, must be square of an integer 17 | num_input_channels=3, # number of channels of input images 18 | num_encoder_channels=64, # number of channels of the first conv layer of encoder 19 | num_z_channels=50, # number of channels of the layer z (noise or code) 20 | num_categories=10, # number of categories (age segments) in the training dataset 21 | num_gen_channels=1024, # number of channels of the first deconv layer of generator 22 | enable_tile_label=True, # enable to tile the label 23 | tile_ratio=1.0, # ratio of the length between tiled label and z 24 | is_training=True, # flag for training or testing mode 25 | enable_bn=True, # enable batch normalization 26 | save_dir='./save', # path to save checkpoints, samples, and summary 27 | dataset_name='UTKFace' # name of the dataset in the folder ./data 28 | ): 29 | 30 | self.session = session 31 | self.image_value_range = (-1, 1) 32 | self.size_image = size_image 33 | self.size_kernel = size_kernel 34 | self.size_batch = size_batch 35 | self.num_input_channels = num_input_channels 36 | self.num_encoder_channels = num_encoder_channels 37 | self.num_z_channels = num_z_channels 38 | self.num_categories = num_categories 39 | self.num_gen_channels = num_gen_channels 40 | self.enable_tile_label = enable_tile_label 41 | self.tile_ratio = tile_ratio 42 | self.is_training = is_training 43 | self.enable_bn = enable_bn 44 | self.save_dir = save_dir 45 | self.dataset_name = dataset_name 46 | 47 | # ************************************* input to graph ******************************************************** 48 | self.input_image = tf.placeholder( 49 | tf.float32, 50 | [self.size_batch, self.size_image, self.size_image, self.num_input_channels], 51 | name='input_images' 52 | ) 53 | self.age = tf.placeholder( 54 | tf.float32, 55 | [self.size_batch, self.num_categories], 56 | name='age_labels' 57 | ) 58 | self.gender = tf.placeholder( 59 | tf.float32, 60 | [self.size_batch, 2], 61 | name='gender_labels' 62 | ) 63 | self.z_prior = tf.placeholder( 64 | tf.float32, 65 | [self.size_batch, self.num_z_channels], 66 | name='z_prior' 67 | ) 68 | self.params = tf.placeholder( 69 | tf.float32, 70 | [2], 71 | name='params' 72 | ) 73 | # ************************************* build the graph ******************************************************* 74 | print '\n\tBuilding graph ...' 75 | # encoder: input image --> z 76 | self.z = self.encoder( 77 | image=self.input_image, 78 | name='lowBand_E', 79 | is_training=self.is_training, 80 | enable_bn=self.enable_bn 81 | ) 82 | 83 | # generator: z + label --> generated image 84 | self.G = self.generator( 85 | z=self.z, 86 | y=self.age, 87 | gender=self.gender, 88 | enable_tile_label=self.enable_tile_label, 89 | tile_ratio=self.tile_ratio, 90 | name='lowBand_G', 91 | is_training=self.is_training, 92 | enable_bn=self.enable_bn 93 | ) 94 | self.G_res = self.generator( 95 | z=self.z, # self.z_res 96 | y=self.age, 97 | gender=self.gender, 98 | enable_tile_label=self.enable_tile_label, 99 | tile_ratio=self.tile_ratio, 100 | name='highBand_G', 101 | enable_bn=True, 102 | is_training=self.is_training 103 | ) 104 | 105 | # discriminator on z 106 | self.D_z, self.D_z_logits = self.discriminator_z( 107 | z=self.z, 108 | is_training=self.is_training 109 | ) 110 | 111 | # discriminator on G 112 | self.D_G, self.D_G_logits = self.discriminator_img( 113 | image=self.G + self.G_res, 114 | y=self.age, 115 | gender=self.gender, 116 | is_training=self.is_training, 117 | enable_bn=True 118 | ) 119 | 120 | # discriminator on z_prior 121 | self.D_z_prior, self.D_z_prior_logits = self.discriminator_z( 122 | z=self.z_prior, 123 | is_training=self.is_training, 124 | reuse_variables=True 125 | ) 126 | 127 | # discriminator on input image 128 | self.D_input, self.D_input_logits = self.discriminator_img( 129 | image=self.input_image, 130 | y=self.age, 131 | gender=self.gender, 132 | is_training=self.is_training, 133 | reuse_variables=True, 134 | enable_bn=True 135 | ) 136 | 137 | # ************************************* loss functions ******************************************************* 138 | # loss function of encoder + generator 139 | #self.EG_loss = tf.nn.l2_loss(self.input_image - self.G) / self.size_batch # L2 loss 140 | self.EG_loss = tf.reduce_mean(tf.abs(self.input_image - self.G)) # L1 loss 141 | self.res_loss = tf.reduce_mean(tf.abs(self.input_image - (self.G + self.G_res * self.params[0]))) 142 | 143 | # loss function of discriminator on z 144 | self.D_z_loss_prior = tf.reduce_mean( 145 | tf.nn.sigmoid_cross_entropy_with_logits(self.D_z_prior_logits, tf.ones_like(self.D_z_prior_logits)) 146 | ) 147 | self.D_z_loss_z = tf.reduce_mean( 148 | tf.nn.sigmoid_cross_entropy_with_logits(self.D_z_logits, tf.zeros_like(self.D_z_logits)) 149 | ) 150 | self.E_z_loss = tf.reduce_mean( 151 | tf.nn.sigmoid_cross_entropy_with_logits(self.D_z_logits, tf.ones_like(self.D_z_logits)) 152 | ) 153 | # self.D_z_loss_prior = .5 * tf.reduce_mean((self.D_z_prior_logits - 1) ** 2) 154 | # self.D_z_loss_z = .5 * tf.reduce_mean(self.D_z_logits ** 2) 155 | # self.E_z_loss = .5 * tf.reduce_mean((self.D_z_logits - 1) ** 2) 156 | 157 | # loss function of discriminator on image 158 | self.D_img_loss_input = tf.reduce_mean( 159 | tf.nn.sigmoid_cross_entropy_with_logits(self.D_input_logits, tf.ones_like(self.D_input_logits)) 160 | ) 161 | self.D_img_loss_G = tf.reduce_mean( 162 | tf.nn.sigmoid_cross_entropy_with_logits(self.D_G_logits, tf.zeros_like(self.D_G_logits)) 163 | ) 164 | self.G_img_loss = tf.reduce_mean( 165 | tf.nn.sigmoid_cross_entropy_with_logits(self.D_G_logits, tf.ones_like(self.D_G_logits)) 166 | ) 167 | # self.D_img_loss_input = .5 * tf.reduce_mean((self.D_input_logits - 1) ** 2) 168 | # self.D_img_loss_G = .5 * tf.reduce_mean(self.D_G_logits ** 2) 169 | # self.G_img_loss = .5 * tf.reduce_mean((self.D_G_logits - 1) ** 2) 170 | 171 | # *********************************** trainable variables **************************************************** 172 | trainable_variables = tf.trainable_variables() 173 | # variables of encoder 174 | self.E_variables = [var for var in trainable_variables if 'lowBand_E' in var.name] 175 | # variables of generator 176 | self.G_variables = [var for var in trainable_variables if 'lowBand_G' in var.name] 177 | self.G_res_variables = [var for var in trainable_variables if 'highBand_G' in var.name] 178 | # variables of discriminator on z 179 | self.D_z_variables = [var for var in trainable_variables if 'D_z_' in var.name] 180 | # variables of discriminator on image 181 | self.D_img_variables = [var for var in trainable_variables if 'D_img_' in var.name] 182 | 183 | # ************************************* collect the summary *************************************** 184 | self.z_summary = tf.summary.histogram('z', self.z) 185 | self.z_prior_summary = tf.summary.histogram('z_prior', self.z_prior) 186 | self.EG_loss_summary = tf.summary.scalar('EG_loss', self.EG_loss) 187 | self.res_loss_summary = tf.summary.scalar('res_loss', self.res_loss) 188 | self.D_z_loss_z_summary = tf.summary.scalar('D_z_loss_z', self.D_z_loss_z) 189 | self.D_z_loss_prior_summary = tf.summary.scalar('D_z_loss_prior', self.D_z_loss_prior) 190 | self.E_z_loss_summary = tf.summary.scalar('E_z_loss', self.E_z_loss) 191 | self.D_z_logits_summary = tf.summary.histogram('D_z_logits', self.D_z_logits) 192 | self.D_z_prior_logits_summary = tf.summary.histogram('D_z_prior_logits', self.D_z_prior_logits) 193 | self.D_img_loss_input_summary = tf.summary.scalar('D_img_loss_input', self.D_img_loss_input) 194 | self.D_img_loss_G_summary = tf.summary.scalar('D_img_loss_G', self.D_img_loss_G) 195 | self.G_img_loss_summary = tf.summary.scalar('G_img_loss', self.G_img_loss) 196 | self.D_G_logits_summary = tf.summary.histogram('D_G_logits', self.D_G_logits) 197 | self.D_input_logits_summary = tf.summary.histogram('D_input_logits', self.D_input_logits) 198 | # for saving the graph and variables 199 | self.saver = tf.train.Saver(max_to_keep=2) 200 | 201 | def train(self, 202 | num_epochs=200, # number of epochs 203 | learning_rate=0.0002, # learning rate of optimizer 204 | beta1=0.5, # parameter for Adam optimizer 205 | decay_rate=1.0, # learning rate decay (0, 1], 1 means no decay 206 | enable_shuffle=True, # enable shuffle of the dataset 207 | use_trained_model=True, # used the saved checkpoint to initialize the model 208 | params=(1.0, 1.0) # supposed to be positive float, other values denote auto setting 209 | ): 210 | 211 | # *************************** load file names of images ****************************************************** 212 | file_names = glob(os.path.join('./data', self.dataset_name, '*.jpg')) 213 | size_data = len(file_names) 214 | np.random.seed(seed=1234) 215 | if enable_shuffle: 216 | np.random.shuffle(file_names) 217 | file_names = file_names[:5000] 218 | # *********************************** optimizer ************************************************************** 219 | # over all, there are three loss functions, weights may differ from the paper because of different datasets 220 | self.loss_EG = self.EG_loss 221 | self.loss_res = self.G_img_loss * self.params[0] 222 | self.loss_Dz = self.D_z_loss_prior + self.D_z_loss_z 223 | self.loss_Di = self.D_img_loss_input + self.D_img_loss_G 224 | 225 | # set learning rate decay 226 | self.EG_global_step = tf.Variable(0, trainable=False, name='global_step') 227 | EG_learning_rate = tf.train.exponential_decay( 228 | learning_rate=learning_rate, 229 | global_step=self.EG_global_step, 230 | decay_steps=size_data / self.size_batch * 2, 231 | decay_rate=decay_rate, 232 | staircase=True 233 | ) 234 | 235 | # optimizer for encoder + generator 236 | self.EG_optimizer = tf.train.AdamOptimizer( 237 | learning_rate=EG_learning_rate, 238 | beta1=beta1 239 | ).minimize( 240 | loss=self.loss_EG, 241 | global_step=self.EG_global_step, 242 | var_list=self.E_variables + self.G_variables 243 | ) 244 | self.res_optimizer = tf.train.AdamOptimizer( 245 | learning_rate=EG_learning_rate, 246 | beta1=beta1 247 | ).minimize( 248 | loss=self.loss_res, 249 | global_step=self.EG_global_step, 250 | var_list=self.G_res_variables 251 | ) 252 | 253 | # optimizer for discriminator on z 254 | self.D_z_optimizer = tf.train.AdamOptimizer( 255 | learning_rate=EG_learning_rate, 256 | beta1=beta1 257 | ).minimize( 258 | loss=self.loss_Dz, 259 | var_list=self.D_z_variables 260 | ) 261 | self.E_z_optimizer = tf.train.AdamOptimizer( 262 | learning_rate=EG_learning_rate, 263 | beta1=beta1 264 | ).minimize( 265 | loss=self.E_z_loss, 266 | var_list=self.E_variables 267 | ) 268 | 269 | # optimizer for discriminator on image 270 | self.D_img_optimizer = tf.train.AdamOptimizer( 271 | learning_rate=EG_learning_rate, 272 | beta1=beta1 273 | ).minimize( 274 | loss=self.loss_Di, 275 | var_list=self.D_img_variables 276 | ) 277 | 278 | # *********************************** tensorboard ************************************************************* 279 | # for visualization (TensorBoard): $ tensorboard --logdir path/to/log-directory 280 | self.EG_learning_rate_summary = tf.summary.scalar('EG_learning_rate', EG_learning_rate) 281 | self.param0_summary = tf.summary.scalar('param0', self.params[0]) 282 | self.param1_summary = tf.summary.scalar('param1', self.params[1]) 283 | self.summary = tf.summary.merge([ 284 | self.z_summary, self.z_prior_summary, 285 | self.D_z_loss_z_summary, self.D_z_loss_prior_summary, 286 | self.D_z_logits_summary, self.D_z_prior_logits_summary, 287 | self.EG_loss_summary, self.E_z_loss_summary, self.res_loss_summary, 288 | self.D_img_loss_input_summary, self.D_img_loss_G_summary, 289 | self.G_img_loss_summary, self.EG_learning_rate_summary, 290 | self.D_G_logits_summary, self.D_input_logits_summary, 291 | self.param0_summary, self.param1_summary 292 | ]) 293 | self.writer = tf.summary.FileWriter(os.path.join(self.save_dir, 'summary'), self.session.graph) 294 | 295 | # ************* get some random samples as testing data to visualize the learning process ********************* 296 | sample_files = file_names[0:self.size_batch] 297 | # file_names[0:self.size_batch] = [] 298 | sample = [load_image( 299 | image_path=sample_file, 300 | image_size=self.size_image, 301 | image_value_range=self.image_value_range, 302 | is_gray=(self.num_input_channels == 1), 303 | ) for sample_file in sample_files] 304 | if self.num_input_channels == 1: 305 | sample_images = np.array(sample).astype(np.float32)[:, :, :, None] 306 | else: 307 | sample_images = np.array(sample).astype(np.float32) 308 | sample_label_age = np.ones( 309 | shape=(len(sample_files), self.num_categories), 310 | dtype=np.float32 311 | ) * self.image_value_range[0] 312 | sample_label_gender = np.ones( 313 | shape=(len(sample_files), 2), 314 | dtype=np.float32 315 | ) * self.image_value_range[0] 316 | for i, label in enumerate(sample_files): 317 | label = int(str(sample_files[i]).split('/')[-1].split('_')[0]) 318 | if 0 <= label <= 5: 319 | label = 0 320 | elif 6 <= label <= 10: 321 | label = 1 322 | elif 11 <= label <= 15: 323 | label = 2 324 | elif 16 <= label <= 20: 325 | label = 3 326 | elif 21 <= label <= 30: 327 | label = 4 328 | elif 31 <= label <= 40: 329 | label = 5 330 | elif 41 <= label <= 50: 331 | label = 6 332 | elif 51 <= label <= 60: 333 | label = 7 334 | elif 61 <= label <= 70: 335 | label = 8 336 | else: 337 | label = 9 338 | sample_label_age[i, label] = self.image_value_range[-1] 339 | gender = int(str(sample_files[i]).split('/')[-1].split('_')[1]) 340 | sample_label_gender[i, gender] = self.image_value_range[-1] 341 | 342 | # ******************************************* training ******************************************************* 343 | print '\n\tPreparing for training ...' 344 | 345 | # initialize the graph 346 | tf.global_variables_initializer().run() 347 | 348 | # load check point 349 | if use_trained_model: 350 | if self.load_checkpoint(): 351 | print("\tSUCCESS ^_^") 352 | else: 353 | print("\tFAILED >_= 0 364 | except: 365 | params[i] = 0 366 | flag_auto_setting[i] = True 367 | 368 | for epoch in range(num_epochs): 369 | if enable_shuffle: 370 | np.random.shuffle(file_names) 371 | for ind_batch in range(num_batches): 372 | start_time = time.time() 373 | # read batch images and labels 374 | batch_files = file_names[ind_batch*self.size_batch:(ind_batch+1)*self.size_batch] 375 | batch = [load_image( 376 | image_path=batch_file, 377 | image_size=self.size_image, 378 | image_value_range=self.image_value_range, 379 | is_gray=(self.num_input_channels == 1), 380 | ) for batch_file in batch_files] 381 | if self.num_input_channels == 1: 382 | batch_images = np.array(batch).astype(np.float32)[:, :, :, None] 383 | else: 384 | batch_images = np.array(batch).astype(np.float32) 385 | batch_label_age = np.ones( 386 | shape=(len(batch_files), self.num_categories), 387 | dtype=np.float 388 | ) * self.image_value_range[0] 389 | batch_label_gender = np.ones( 390 | shape=(len(batch_files), 2), 391 | dtype=np.float 392 | ) * self.image_value_range[0] 393 | for i, label in enumerate(batch_files): 394 | label = int(str(batch_files[i]).split('/')[-1].split('_')[0]) 395 | if 0 <= label <= 5: 396 | label = 0 397 | elif 6 <= label <= 10: 398 | label = 1 399 | elif 11 <= label <= 15: 400 | label = 2 401 | elif 16 <= label <= 20: 402 | label = 3 403 | elif 21 <= label <= 30: 404 | label = 4 405 | elif 31 <= label <= 40: 406 | label = 5 407 | elif 41 <= label <= 50: 408 | label = 6 409 | elif 51 <= label <= 60: 410 | label = 7 411 | elif 61 <= label <= 70: 412 | label = 8 413 | else: 414 | label = 9 415 | batch_label_age[i, label] = self.image_value_range[-1] 416 | gender = int(str(batch_files[i]).split('/')[-1].split('_')[1]) 417 | batch_label_gender[i, gender] = self.image_value_range[-1] 418 | 419 | # prior distribution on the prior of z 420 | batch_z_prior = np.random.uniform( 421 | self.image_value_range[0], 422 | self.image_value_range[-1], 423 | [self.size_batch, self.num_z_channels] 424 | ).astype(np.float32) 425 | 426 | # update 427 | _, _, _, _, EG_err, res_err, Ez_err, Dz_err, Dzp_err, Gi_err, DiG_err, Di_err = self.session.run( 428 | fetches=[ 429 | self.EG_optimizer, 430 | self.res_optimizer, 431 | self.D_z_optimizer, 432 | self.D_img_optimizer, 433 | # self.E_z_optimizer, 434 | self.EG_loss, 435 | self.res_loss, 436 | self.E_z_loss, 437 | self.D_z_loss_z, 438 | self.D_z_loss_prior, 439 | self.G_img_loss, 440 | self.D_img_loss_G, 441 | self.D_img_loss_input 442 | ], 443 | feed_dict={ 444 | self.input_image: batch_images, 445 | self.age: batch_label_age, 446 | self.gender: batch_label_gender, 447 | self.z_prior: batch_z_prior, 448 | self.params: params 449 | } 450 | ) 451 | # update residual twice 452 | _ = self.session.run( 453 | fetches=[ 454 | self.res_optimizer 455 | ], 456 | feed_dict={ 457 | self.input_image: batch_images, 458 | self.age: batch_label_age, 459 | self.gender: batch_label_gender, 460 | self.z_prior: batch_z_prior, 461 | self.params: params 462 | } 463 | ) 464 | 465 | print("\nEpoch: [%3d/%3d] Batch: [%3d/%3d]\n\tEG_err=%.4f\tres_err=%.4f" % 466 | (epoch+1, num_epochs, ind_batch+1, num_batches, EG_err, res_err)) 467 | # print("\tEz=%.4f\tDz=%.4f\tDzp=%.4f" % (Ez_err, Dz_err, Dzp_err)) 468 | print("\tGi=%.4f\tDi=%.4f\tDiG=%.4f" % (Gi_err, Di_err, DiG_err)) 469 | 470 | # estimate left run time 471 | elapse = time.time() - start_time 472 | time_left = ((num_epochs - epoch - 1) * num_batches + (num_batches - ind_batch - 1)) * elapse 473 | print("\tTime left: %02d:%02d:%02d" % 474 | (int(time_left / 3600), int(time_left % 3600 / 60), time_left % 60)) 475 | 476 | # add to summary 477 | summary = self.summary.eval( 478 | feed_dict={ 479 | self.input_image: batch_images, 480 | self.age: batch_label_age, 481 | self.gender: batch_label_gender, 482 | self.z_prior: batch_z_prior, 483 | self.params: params 484 | } 485 | ) 486 | self.writer.add_summary(summary, self.EG_global_step.eval()) 487 | 488 | # update the params 489 | if flag_auto_setting[0]: 490 | params[0] = 1e-4#params[0] -= .1 * (res_err - EG_err) 491 | #params[0] = np.clip(params[0], 0, 1) 492 | if flag_auto_setting[1]: 493 | params[1] -= .1 * (res_err - EG_err) 494 | params[1] = np.clip(params[1], 0, 1) 495 | 496 | # save sample images for each epoch 497 | name = '{:02d}'.format(epoch+1) 498 | self.sample(sample_images, sample_label_age, sample_label_gender, name) 499 | self.test(sample_images, sample_label_gender, name) 500 | 501 | # save checkpoint for each 10 epoch 502 | if np.mod(epoch, 10) == 9: 503 | self.save_checkpoint() 504 | 505 | # save the trained model 506 | self.save_checkpoint() 507 | # close the summary writer 508 | self.writer.close() 509 | 510 | def encoder(self, image, reuse_variables=False, name='encoder', enable_bn=False, is_training=True): 511 | if reuse_variables: 512 | tf.get_variable_scope().reuse_variables() 513 | num_layers = int(np.log2(self.size_image)) - int(self.size_kernel / 2) 514 | current = image 515 | # conv layers with stride 2 516 | for i in range(num_layers): 517 | current = conv2d( 518 | input_map=current, 519 | num_output_channels=self.num_encoder_channels * (2 ** i), 520 | size_kernel=self.size_kernel, 521 | name=name + '_conv' + str(i) 522 | ) 523 | if enable_bn: 524 | current = tf.contrib.layers.batch_norm( 525 | current, 526 | scale=True, 527 | is_training=is_training, 528 | scope=name + '_bn' + str(i), 529 | reuse=reuse_variables 530 | ) 531 | current = tf.nn.relu(current) 532 | 533 | # fully connection layer 534 | current = fc( 535 | input_vector=tf.reshape(current, [self.size_batch, -1]), 536 | num_output_length=self.num_z_channels, 537 | name=name + '_fc' 538 | ) 539 | if enable_bn: 540 | current = tf.contrib.layers.batch_norm( 541 | current, 542 | scale=False, 543 | is_training=is_training, 544 | scope=name + '_bn' + '_fc', 545 | reuse=reuse_variables 546 | ) 547 | 548 | # output 549 | return tf.nn.tanh(current) 550 | 551 | def generator(self, z, y, gender, reuse_variables=False, enable_tile_label=True, tile_ratio=1.0, name='decoder', 552 | enable_bn=False, is_training=True): 553 | if reuse_variables: 554 | tf.get_variable_scope().reuse_variables() 555 | num_layers = int(np.log2(self.size_image)) - int(self.size_kernel / 2) 556 | if enable_tile_label: 557 | duplicate = int(self.num_z_channels * tile_ratio / self.num_categories) 558 | else: 559 | duplicate = 1 560 | z = concat_label(z, y, duplicate=duplicate) 561 | if enable_tile_label: 562 | duplicate = int(self.num_z_channels * tile_ratio / 2) 563 | else: 564 | duplicate = 1 565 | z = concat_label(z, gender, duplicate=duplicate) 566 | size_mini_map = int(self.size_image / 2 ** num_layers) 567 | # fc layer 568 | current = fc( 569 | input_vector=z, 570 | num_output_length=self.num_gen_channels * size_mini_map * size_mini_map, 571 | name=name + '_fc' 572 | ) 573 | if enable_bn: 574 | current = tf.contrib.layers.batch_norm( 575 | current, 576 | scale=False, 577 | is_training=is_training, 578 | scope=name + '_bn' + '_fc', 579 | reuse=reuse_variables 580 | ) 581 | # reshape to cube for deconv 582 | current = tf.reshape(current, [-1, size_mini_map, size_mini_map, self.num_gen_channels]) 583 | current = tf.nn.relu(current) 584 | # deconv layers with stride 2 585 | for i in range(num_layers): 586 | current = deconv2d( 587 | input_map=current, 588 | output_shape=[self.size_batch, 589 | size_mini_map * 2 ** (i + 1), 590 | size_mini_map * 2 ** (i + 1), 591 | int(self.num_gen_channels / 2 ** (i + 1))], 592 | size_kernel=self.size_kernel, 593 | name=name + '_deconv' + str(i) 594 | ) 595 | if enable_bn: 596 | current = tf.contrib.layers.batch_norm( 597 | current, 598 | scale=False, 599 | is_training=is_training, 600 | scope=name + '_bn' + str(i), 601 | reuse=reuse_variables 602 | ) 603 | current = tf.nn.relu(current) 604 | current = deconv2d( 605 | input_map=current, 606 | output_shape=[self.size_batch, 607 | self.size_image, 608 | self.size_image, 609 | int(self.num_gen_channels / 2 ** (i + 2))], 610 | size_kernel=self.size_kernel, 611 | stride=1, 612 | name=name + '_deconv' + str(i+1) 613 | ) 614 | if enable_bn: 615 | current = tf.contrib.layers.batch_norm( 616 | current, 617 | scale=False, 618 | is_training=is_training, 619 | scope=name + '_bn' + str(i+1), 620 | reuse=reuse_variables 621 | ) 622 | current = tf.nn.relu(current) 623 | current = deconv2d( 624 | input_map=current, 625 | output_shape=[self.size_batch, 626 | self.size_image, 627 | self.size_image, 628 | self.num_input_channels], 629 | size_kernel=self.size_kernel, 630 | stride=1, 631 | name=name + '_deconv' + str(i + 2) 632 | ) 633 | 634 | # output 635 | return tf.nn.tanh(current) 636 | 637 | def discriminator_z(self, z, is_training=True, reuse_variables=False, num_hidden_layer_channels=(64, 32, 16), enable_bn=True): 638 | if reuse_variables: 639 | tf.get_variable_scope().reuse_variables() 640 | current = z 641 | # fully connection layer 642 | for i in range(len(num_hidden_layer_channels)): 643 | name = 'D_z_fc' + str(i) 644 | current = fc( 645 | input_vector=current, 646 | num_output_length=num_hidden_layer_channels[i], 647 | name=name 648 | ) 649 | if enable_bn: 650 | name = 'D_z_bn' + str(i) 651 | current = tf.contrib.layers.batch_norm( 652 | current, 653 | scale=False, 654 | is_training=is_training, 655 | scope=name, 656 | reuse=reuse_variables 657 | ) 658 | current = tf.nn.relu(current) 659 | # output layer 660 | name = 'D_z_fc' + str(i+1) 661 | current = fc( 662 | input_vector=current, 663 | num_output_length=1, 664 | name=name 665 | ) 666 | return tf.nn.sigmoid(current), current 667 | 668 | def discriminator_img(self, image, y, gender, is_training=True, reuse_variables=False, enable_bn=True, 669 | num_hidden_layer_channels=(64, 128, 256, 512)): 670 | if reuse_variables: 671 | tf.get_variable_scope().reuse_variables() 672 | num_layers = len(num_hidden_layer_channels) 673 | current = image 674 | # conv layers with stride 2 675 | for i in range(num_layers): 676 | name = 'D_img_conv' + str(i) 677 | current = conv2d( 678 | input_map=current, 679 | num_output_channels=num_hidden_layer_channels[i], 680 | size_kernel=self.size_kernel, 681 | name=name 682 | ) 683 | if enable_bn: 684 | name = 'D_img_bn' + str(i) 685 | current = tf.contrib.layers.batch_norm( 686 | current, 687 | scale=False, 688 | is_training=is_training, 689 | scope=name, 690 | reuse=reuse_variables 691 | ) 692 | current = tf.nn.relu(current) 693 | if i == 0: 694 | current = concat_label(current, y) 695 | current = concat_label(current, gender, int(self.num_categories / 2)) 696 | # fully connection layer 697 | name = 'D_img_fc1' 698 | current = fc( 699 | input_vector=tf.reshape(current, [self.size_batch, -1]), 700 | num_output_length=1024, 701 | name=name 702 | ) 703 | current = lrelu(current) 704 | name = 'D_img_fc2' 705 | current = fc( 706 | input_vector=current, 707 | num_output_length=1, 708 | name=name 709 | ) 710 | # output 711 | return tf.nn.sigmoid(current), current 712 | 713 | def unet(self, image, y=None, gender=None, name='unet'): 714 | s = self.size_image 715 | self.gf_dim = self.num_encoder_channels 716 | s2, s4, s8, s16, s32, s64 = int(s/2), int(s/4), int(s/8), int(s/16), int(s/32), int(s/64) 717 | 718 | # image is (256 x 256 x input_c_dim) 719 | e1 = conv2d(image, self.gf_dim, name='g_e1_conv') 720 | # e1 is (128 x 128 x self.gf_dim) 721 | e2 = self.g_bn_e2(conv2d(lrelu(e1), self.gf_dim*2, name=name + 'g_e2_conv')) 722 | # e2 is (64 x 64 x self.gf_dim*2) 723 | e3 = self.g_bn_e3(conv2d(lrelu(e2), self.gf_dim*4, name=name + 'g_e3_conv')) 724 | # e3 is (32 x 32 x self.gf_dim*4) 725 | e4 = self.g_bn_e4(conv2d(lrelu(e3), self.gf_dim*8, name=name + 'g_e4_conv')) 726 | # e4 is (16 x 16 x self.gf_dim*8) 727 | e5 = self.g_bn_e5(conv2d(lrelu(e4), self.gf_dim*8, name=name + 'g_e5_conv')) 728 | # e5 is (8 x 8 x self.gf_dim*8) 729 | e6 = self.g_bn_e6(conv2d(lrelu(e5), self.gf_dim*8, name=name + 'g_e6_conv')) 730 | # e6 is (4 x 4 x self.gf_dim*8) 731 | e7 = self.g_bn_e7(conv2d(lrelu(e6), self.gf_dim*8, name=name + 'g_e7_conv')) 732 | # e7 is (2 x 2 x self.gf_dim*8) 733 | # e8 = self.g_bn_e8(conv2d(lrelu(e7), self.gf_dim*8, name=name + 'g_e8_conv')) 734 | # e8 is (1 x 1 x self.gf_dim*8) 735 | 736 | self.d1 = deconv2d(tf.nn.relu(e7), [self.size_batch, s64, s64, self.gf_dim*8], name=name + 'g_d1') 737 | d1 = tf.nn.dropout(self.g_bn_d1(self.d1), 0.5) 738 | d1 = tf.concat(3, [d1, e6]) 739 | # d1 is (2 x 2 x self.gf_dim*8*2) 740 | 741 | self.d2 = deconv2d(tf.nn.relu(d1), [self.size_batch, s32, s32, self.gf_dim*8], name=name + 'g_d2') 742 | d2 = tf.nn.dropout(self.g_bn_d2(self.d2), 0.5) 743 | d2 = tf.concat(3, [d2, e5]) 744 | # d2 is (4 x 4 x self.gf_dim*8*2) 745 | 746 | self.d3 = deconv2d(tf.nn.relu(d2), [self.size_batch, s16, s16, self.gf_dim*8], name=name + 'g_d3') 747 | d3 = tf.nn.dropout(self.g_bn_d3(self.d3), 0.5) 748 | d3 = tf.concat(3, [d3, e4]) 749 | # d3 is (8 x 8 x self.gf_dim*8*2) 750 | 751 | self.d4 = deconv2d(tf.nn.relu(d3), [self.size_batch, s8, s8, self.gf_dim*4], name=name + 'g_d4') 752 | d4 = self.g_bn_d4(self.d4) 753 | d4 = tf.concat(3, [d4, e3]) 754 | # d4 is (16 x 16 x self.gf_dim*8*2) 755 | 756 | self.d5 = deconv2d(tf.nn.relu(d4), [self.size_batch, s4, s4, self.gf_dim*2], name=name + 'g_d5') 757 | d5 = self.g_bn_d5(self.d5) 758 | d5 = tf.concat(3, [d5, e2]) 759 | # d5 is (32 x 32 x self.gf_dim*4*2) 760 | 761 | self.d6 = deconv2d(tf.nn.relu(d5), [self.size_batch, s2, s2, self.gf_dim], name=name + 'g_d6') 762 | d6 = self.g_bn_d6(self.d6) 763 | d6 = tf.concat(3, [d6, e1]) 764 | # d6 is (64 x 64 x self.gf_dim*2*2) 765 | 766 | # self.d7 = deconv2d(tf.nn.relu(d6), [self.size_batch, s2, s2, self.gf_dim], name=name + 'g_d7') 767 | # d7 = self.g_bn_d7(self.d7) 768 | # d7 = tf.concat(3, [d7, e1]) 769 | # d7 is (128 x 128 x self.gf_dim*1*2) 770 | 771 | self.d7 = deconv2d(tf.nn.relu(d6), [self.size_batch, s, s, self.num_input_channels], name=name + 'g_d7') 772 | # d8 is (256 x 256 x output_c_dim) 773 | 774 | return tf.nn.tanh(self.d7) 775 | 776 | def save_checkpoint(self): 777 | checkpoint_dir = os.path.join(self.save_dir, 'checkpoint') 778 | if not os.path.exists(checkpoint_dir): 779 | os.makedirs(checkpoint_dir) 780 | self.saver.save( 781 | sess=self.session, 782 | save_path=os.path.join(checkpoint_dir, 'model'), 783 | global_step=self.EG_global_step.eval() 784 | ) 785 | 786 | def load_checkpoint(self): 787 | print("\n\tLoading pre-trained model ...") 788 | checkpoint_dir = os.path.join(self.save_dir, 'checkpoint') 789 | checkpoints = tf.train.get_checkpoint_state(checkpoint_dir) 790 | if checkpoints and checkpoints.model_checkpoint_path: 791 | checkpoints_name = os.path.basename(checkpoints.model_checkpoint_path) 792 | self.saver.restore(self.session, os.path.join(checkpoint_dir, checkpoints_name)) 793 | return True 794 | else: 795 | return False 796 | 797 | def sample(self, images, labels, gender, name): 798 | sample_dir = os.path.join(self.save_dir, 'samples') 799 | if not os.path.exists(sample_dir): 800 | os.makedirs(sample_dir) 801 | G, G_res = self.session.run( 802 | [self.G, self.G_res], 803 | feed_dict={ 804 | self.input_image: images, 805 | self.age: labels, 806 | self.gender: gender 807 | } 808 | ) 809 | size_frame = int(np.sqrt(self.size_batch)) 810 | save_batch_images( 811 | batch_images=G, 812 | save_path=os.path.join(sample_dir, name + '_G.png'), 813 | image_value_range=self.image_value_range, 814 | size_frame=[size_frame, size_frame] 815 | ) 816 | save_batch_images( 817 | batch_images=G_res, 818 | save_path=os.path.join(sample_dir, name + '_res.png'), 819 | image_value_range=self.image_value_range, 820 | size_frame=[size_frame, size_frame] 821 | ) 822 | save_batch_images( 823 | batch_images=G + G_res, 824 | save_path=os.path.join(sample_dir, name + '_Gres.png'), 825 | image_value_range=self.image_value_range, 826 | size_frame=[size_frame, size_frame] 827 | ) 828 | 829 | def test(self, images, gender, name): 830 | test_dir = os.path.join(self.save_dir, 'test') 831 | if not os.path.exists(test_dir): 832 | os.makedirs(test_dir) 833 | images = images[:int(np.sqrt(self.size_batch)), :, :, :] 834 | gender = gender[:int(np.sqrt(self.size_batch)), :] 835 | size_sample = images.shape[0] 836 | labels = np.arange(size_sample) 837 | labels = np.repeat(labels, size_sample) 838 | query_labels = np.ones( 839 | shape=(size_sample ** 2, self.num_categories), 840 | dtype=np.float32 841 | ) * self.image_value_range[0] 842 | for i in range(query_labels.shape[0]): 843 | query_labels[i, labels[i]] = self.image_value_range[-1] 844 | query_images = np.tile(images, [size_sample, 1, 1, 1]) 845 | query_gender = np.tile(gender, [size_sample, 1]) 846 | G, G_res = self.session.run( 847 | [self.G, self.G_res], 848 | feed_dict={ 849 | self.input_image: query_images, 850 | self.age: query_labels, 851 | self.gender: query_gender 852 | } 853 | ) 854 | save_batch_images( 855 | batch_images=query_images, 856 | save_path=os.path.join(test_dir, 'input.png'), 857 | image_value_range=self.image_value_range, 858 | size_frame=[size_sample, size_sample] 859 | ) 860 | save_batch_images( 861 | batch_images=G, 862 | save_path=os.path.join(test_dir, name + '_G.png'), 863 | image_value_range=self.image_value_range, 864 | size_frame=[size_sample, size_sample] 865 | ) 866 | save_batch_images( 867 | batch_images=G_res, 868 | save_path=os.path.join(test_dir, name + '_res.png'), 869 | image_value_range=self.image_value_range, 870 | size_frame=[size_sample, size_sample] 871 | ) 872 | save_batch_images( 873 | batch_images=G + G_res, 874 | save_path=os.path.join(test_dir, name + '_Gres.png'), 875 | image_value_range=self.image_value_range, 876 | size_frame=[size_sample, size_sample] 877 | ) 878 | 879 | def custom_test(self, testing_samples_dir): 880 | if not self.load_checkpoint(): 881 | print("\tFAILED >_= 0 61 | 62 | config = tf.ConfigProto() 63 | gpu_stat = gpu_memory_usage() 64 | total_memory = gpu_stat[0]['total'] 65 | if usage > total_memory: 66 | usage_percentage = 1.0 67 | else: 68 | usage_percentage = usage / total_memory 69 | config.gpu_options.allow_growth = allow_growth 70 | config.gpu_options.per_process_gpu_memory_fraction = usage_percentage 71 | return config 72 | except: 73 | print 'Failed to set memory usage!' 74 | return None 75 | 76 | 77 | if __name__ == '__main__': 78 | gpu_memory_usage() 79 | -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/100_1_2_20170112213615815.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/100_1_2_20170112213615815.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/103_0_2_20170112213001988.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/103_0_2_20170112213001988.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/10_0_0_20170110225557604.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/10_0_0_20170110225557604.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/110_1_0_20170120134701015.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/110_1_0_20170120134701015.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/116_1_3_20170120134744096.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/116_1_3_20170120134744096.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/11_0_1_20170103201136230.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/11_0_1_20170103201136230.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/12_0_4_20170103201824880.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/12_0_4_20170103201824880.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/13_0_0_20170110232526929.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/13_0_0_20170110232526929.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/14_1_3_20170104221818294.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/14_1_3_20170104221818294.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/15_0_0_20170110232443234.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/15_0_0_20170110232443234.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/16_0_0_20170110232605131.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/16_0_0_20170110232605131.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/17_1_0_20170109201558943.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/17_1_0_20170109201558943.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/18_1_0_20170109213451167.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/18_1_0_20170109213451167.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/19_1_0_20170116163837603.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/19_1_0_20170116163837603.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/1_0_0_20161219140623097.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/1_0_0_20161219140623097.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/20_0_0_20170105161706251.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/20_0_0_20170105161706251.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/21_0_3_20170119153824472.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/21_0_3_20170119153824472.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/2_1_2_20161219205100213.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/2_1_2_20161219205100213.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/3_0_2_20161219210918485.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/3_0_2_20161219210918485.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/43_0_0_20170104183504030.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/43_0_0_20170104183504030.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/4_0_0_20170110213121589.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/4_0_0_20170110213121589.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/58_0_0_20170120224028072.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/58_0_0_20170120224028072.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/5_1_0_20170109194005140.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/5_1_0_20170109194005140.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/61_0_0_20170117174637422.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/61_0_0_20170117174637422.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/6_1_3_20170104222204751.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/6_1_3_20170104222204751.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/78_0_0_20170111204704023.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/78_0_0_20170111204704023.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/7_1_1_20170116225209209.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/7_1_1_20170116225209209.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/8_0_1_20170110220106378.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/8_0_1_20170110220106378.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/data/UTKFace/9_0_0_20170110224240532.jpg.chip.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_CAAE/data/UTKFace/9_0_0_20170110224240532.jpg.chip.jpg -------------------------------------------------------------------------------- /Adapt_CAAE/main.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from ResLearn import ResLearn 3 | import checkGPU 4 | import numpy as np 5 | 6 | 7 | flags = tf.app.flags 8 | flags.DEFINE_integer(flag_name='epoch', default_value=50, docstring='number of epochs') 9 | flags.DEFINE_integer(flag_name='batch_size', default_value=25, docstring='batch size') 10 | flags.DEFINE_integer(flag_name='is_train', default_value=1, docstring='training mode') 11 | flags.DEFINE_integer(flag_name='is_bn', default_value=0, docstring='enable batch normalization') 12 | flags.DEFINE_string(flag_name='dataset', default_value='UTKFace', docstring='dataset name') 13 | flags.DEFINE_string(flag_name='savedir', default_value='save', docstring='dir for saving training results') 14 | flags.DEFINE_string(flag_name='testdir', default_value='None', docstring='dir for testing images') 15 | flags.DEFINE_float(flag_name='param0', default_value=1.0, docstring='weight of discriminator loss on fake images') 16 | flags.DEFINE_float(flag_name='param1', default_value=1.0, docstring='weight of reconstruct loss on artifact modeling') 17 | flags.DEFINE_integer(flag_name='is_schedule', default_value=0, docstring='scheduled running') 18 | flags.DEFINE_integer(flag_name='day', default_value=1, docstring='date') 19 | flags.DEFINE_integer(flag_name='hr', default_value=0, docstring='hour') 20 | flags.DEFINE_integer(flag_name='min', default_value=0, docstring='minute') 21 | 22 | FLAGS = flags.FLAGS 23 | 24 | 25 | gpu_memory_require = 7.0 26 | 27 | 28 | def main(_): 29 | from datetime import datetime 30 | if FLAGS.is_schedule: 31 | today = datetime.today() 32 | checkGPU.auto_queue( 33 | gpu_memory_require=gpu_memory_require, 34 | interval=1, 35 | schedule=datetime(year=today.year, month=today.month, day=FLAGS.day, hour=FLAGS.hr, minute=FLAGS.min) 36 | ) 37 | config = checkGPU.set_memory_usage( 38 | usage=gpu_memory_require, 39 | allow_growth=True 40 | ) 41 | 42 | # print settings 43 | import pprint 44 | pprint.pprint(FLAGS.__flags) 45 | 46 | with tf.Session(config=config) as session: 47 | model = ResLearn( 48 | session, # TensorFlow session 49 | is_training=FLAGS.is_train, # flag for training or testing mode 50 | save_dir=FLAGS.savedir, # path to save checkpoints, samples, and summary 51 | dataset_name=FLAGS.dataset, # name of the dataset in the folder ./data 52 | size_batch=FLAGS.batch_size, 53 | enable_bn=FLAGS.is_bn 54 | ) 55 | if FLAGS.is_train: 56 | print '\n\tTraining Mode' 57 | model.train( 58 | num_epochs=FLAGS.epoch, # number of epochs 59 | params=[FLAGS.param0, FLAGS.param1] 60 | ) 61 | else: 62 | print '\n\tTesting Mode' 63 | model.custom_test( 64 | testing_samples_dir=FLAGS.testdir + '/*jpg' 65 | ) 66 | 67 | 68 | if __name__ == '__main__': 69 | if 0: 70 | print 'Run on CPU' 71 | with tf.device("/cpu:0"): 72 | gpu_memory_require = 0.0 73 | tf.app.run() 74 | 75 | tf.app.run() 76 | 77 | -------------------------------------------------------------------------------- /Adapt_CAAE/ops.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import tensorflow as tf 3 | import numpy as np 4 | from scipy.misc import imread, imresize, imsave 5 | 6 | 7 | class batch_norm(object): 8 | def __init__(self, epsilon=1e-5, momentum=0.9, name="batch_norm"): 9 | with tf.variable_scope(name): 10 | self.epsilon = epsilon 11 | self.momentum = momentum 12 | self.name = name 13 | 14 | def __call__(self, x, train=True): 15 | return tf.contrib.layers.batch_norm(x, decay=self.momentum, updates_collections=None, epsilon=self.epsilon, 16 | scale=True, scope=self.name) 17 | 18 | 19 | def conv2d(input_map, num_output_channels, size_kernel=5, stride=2, name='conv2d'): 20 | with tf.variable_scope(name): 21 | stddev = np.sqrt(2.0 / (np.sqrt(input_map.get_shape()[-1].value * num_output_channels) * size_kernel ** 2)) 22 | stddev = 0.02 23 | kernel = tf.get_variable( 24 | name='w', 25 | shape=[size_kernel, size_kernel, input_map.get_shape()[-1], num_output_channels], 26 | dtype=tf.float32, 27 | initializer=tf.truncated_normal_initializer(stddev=stddev) 28 | ) 29 | biases = tf.get_variable( 30 | name='b', 31 | shape=[num_output_channels], 32 | dtype=tf.float32, 33 | initializer=tf.constant_initializer(0.0) 34 | ) 35 | conv = tf.nn.conv2d(input_map, kernel, strides=[1, stride, stride, 1], padding='SAME') 36 | return tf.nn.bias_add(conv, biases) 37 | 38 | 39 | def fc(input_vector, num_output_length, name='fc'): 40 | with tf.variable_scope(name): 41 | stddev = np.sqrt(1.0 / (np.sqrt(input_vector.get_shape()[-1].value * num_output_length))) 42 | stddev = 0.02 43 | w = tf.get_variable( 44 | name='w', 45 | shape=[input_vector.get_shape()[1], num_output_length], 46 | dtype=tf.float32, 47 | initializer=tf.random_normal_initializer(stddev=stddev) 48 | ) 49 | b = tf.get_variable( 50 | name='b', 51 | shape=[num_output_length], 52 | dtype=tf.float32, 53 | initializer=tf.constant_initializer(0.0) 54 | ) 55 | return tf.matmul(input_vector, w) + b 56 | 57 | 58 | def deconv2d(input_map, output_shape, size_kernel=5, stride=2, stddev=0.02, name='deconv2d'): 59 | with tf.variable_scope(name): 60 | stddev = np.sqrt(1.0 / (np.sqrt(input_map.get_shape()[-1].value * output_shape[-1]) * size_kernel ** 2)) 61 | stddev = 0.02 62 | # filter : [height, width, output_channels, in_channels] 63 | kernel = tf.get_variable( 64 | name='w', 65 | shape=[size_kernel, size_kernel, output_shape[-1], input_map.get_shape()[-1]], 66 | dtype=tf.float32, 67 | initializer=tf.random_normal_initializer(stddev=stddev) 68 | ) 69 | biases = tf.get_variable( 70 | name='b', 71 | shape=[output_shape[-1]], 72 | dtype=tf.float32, 73 | initializer=tf.constant_initializer(0.0) 74 | ) 75 | deconv = tf.nn.conv2d_transpose(input_map, kernel, strides=[1, stride, stride, 1], output_shape=output_shape) 76 | return tf.nn.bias_add(deconv, biases) 77 | 78 | 79 | def lrelu(logits, leak=0.2): 80 | return tf.maximum(logits, leak*logits) 81 | 82 | 83 | def concat_label(x, label, duplicate=1): 84 | x_shape = x.get_shape().as_list() 85 | if duplicate < 1: 86 | return x 87 | # duplicate the label to enhance its effect, does it really affect the result? 88 | label = tf.tile(label, [1, duplicate]) 89 | label_shape = label.get_shape().as_list() 90 | if len(x_shape) == 2: 91 | return tf.concat(1, [x, label]) 92 | elif len(x_shape) == 4: 93 | label = tf.reshape(label, [x_shape[0], 1, 1, label_shape[-1]]) 94 | return tf.concat(3, [x, label*tf.ones([x_shape[0], x_shape[1], x_shape[2], label_shape[-1]])]) 95 | 96 | 97 | def load_image( 98 | image_path, # path of a image 99 | image_size=64, # expected size of the image 100 | image_value_range=(-1, 1), # expected pixel value range of the image 101 | is_gray=False, # gray scale or color image 102 | ): 103 | if is_gray: 104 | image = imread(image_path, flatten=True).astype(np.float32) 105 | else: 106 | image = imread(image_path).astype(np.float32) 107 | image = imresize(image, [image_size, image_size]) 108 | image = image.astype(np.float32) * (image_value_range[-1] - image_value_range[0]) / 255.0 + image_value_range[0] 109 | return image 110 | 111 | 112 | def save_batch_images( 113 | batch_images, # a batch of images 114 | save_path, # path to save the images 115 | image_value_range=(-1,1), # value range of the input batch images 116 | size_frame=None # size of the image matrix, number of images in each row and column 117 | ): 118 | # transform the pixcel value to 0~1 119 | images = (batch_images - image_value_range[0]) / (image_value_range[-1] - image_value_range[0]) 120 | if size_frame is None: 121 | auto_size = int(np.ceil(np.sqrt(images.shape[0]))) 122 | size_frame = [auto_size, auto_size] 123 | img_h, img_w = batch_images.shape[1], batch_images.shape[2] 124 | frame = np.zeros([img_h * size_frame[0], img_w * size_frame[1], 3]) 125 | for ind, image in enumerate(images): 126 | ind_col = ind % size_frame[1] 127 | ind_row = ind // size_frame[1] 128 | frame[(ind_row * img_h):(ind_row * img_h + img_h), (ind_col * img_w):(ind_col * img_w + img_w), :] = image 129 | imsave(save_path, np.clip(frame, 0.0, 1.0)) # imsave(save_path, frame) 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | -------------------------------------------------------------------------------- /Adapt_Pix2Pix/checkGPU.py: -------------------------------------------------------------------------------- 1 | # check GPU status 2 | from pynvml import * 3 | from time import sleep 4 | from datetime import datetime 5 | 6 | 7 | def gpu_memory_usage(is_print=True): 8 | try: 9 | nvmlInit() 10 | # version = nvmlSystemGetDriverVersion() 11 | deviceCount = nvmlDeviceGetCount() 12 | GPU = {} 13 | for i in range(deviceCount): 14 | GPU[i] = {} 15 | handle = nvmlDeviceGetHandleByIndex(i) 16 | info = nvmlDeviceGetMemoryInfo(handle) 17 | GPU[i]['total'] = info.total / 1024.0 / 1024.0 / 1024.0 18 | GPU[i]['free'] = info.free / 1024.0 / 1024.0 / 1024.0 19 | if is_print: 20 | print("\nGPU #%d Memory Usage:" 21 | "\n\tTotal:\t%4.2fGB\n\tFree:\t%4.2fGB" % 22 | (i, GPU[i]['total'], GPU[i]['free'])) 23 | print datetime.now() 24 | nvmlShutdown() 25 | return GPU 26 | except: 27 | print "Fail to check GPU status!" 28 | exit(0) 29 | 30 | 31 | def auto_queue(gpu_memory_require=3.2, interval=1, schedule=None): 32 | # input arg: schedule = datetime(year, month, day, hour, minute, second) 33 | if schedule is None: 34 | schedule = datetime.today() 35 | else: 36 | print '\nScheduled time: ', schedule 37 | 38 | # wait until the scheduled time 39 | now = datetime.today() 40 | while now.year < schedule.year or now.month < schedule.month or now.day < schedule.day or \ 41 | now.hour < schedule.hour or now.minute < schedule.minute or now.second < schedule.second: 42 | now = datetime.today() 43 | sleep(interval) 44 | 45 | gpu_stat = gpu_memory_usage() 46 | if gpu_stat[0]['total'] < gpu_memory_require: 47 | print 'Memory requirement is larger than GPU total memory' 48 | exit(1) 49 | while gpu_stat[0]['free'] < gpu_memory_require: 50 | sleep(interval) # second 51 | gpu_stat = gpu_memory_usage() 52 | return gpu_stat 53 | 54 | 55 | def set_memory_usage(usage=12.0, allow_growth=True): 56 | auto_queue(gpu_memory_require=usage) 57 | try: 58 | import tensorflow as tf 59 | assert type(usage) is int or float 60 | assert usage >= 0 61 | 62 | config = tf.ConfigProto() 63 | gpu_stat = gpu_memory_usage() 64 | total_memory = gpu_stat[0]['total'] 65 | if usage > total_memory: 66 | usage_percentage = 1.0 67 | else: 68 | usage_percentage = usage / total_memory 69 | config.gpu_options.allow_growth = allow_growth 70 | config.gpu_options.per_process_gpu_memory_fraction = usage_percentage 71 | return config 72 | except: 73 | print 'Failed to set memory usage!' 74 | return None 75 | 76 | 77 | if __name__ == '__main__': 78 | gpu_memory_usage() 79 | -------------------------------------------------------------------------------- /Adapt_Pix2Pix/datasets/facades/test/1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_Pix2Pix/datasets/facades/test/1.jpg -------------------------------------------------------------------------------- /Adapt_Pix2Pix/datasets/facades/test/2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_Pix2Pix/datasets/facades/test/2.jpg -------------------------------------------------------------------------------- /Adapt_Pix2Pix/datasets/facades/test/3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_Pix2Pix/datasets/facades/test/3.jpg -------------------------------------------------------------------------------- /Adapt_Pix2Pix/datasets/facades/test/4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_Pix2Pix/datasets/facades/test/4.jpg -------------------------------------------------------------------------------- /Adapt_Pix2Pix/datasets/facades/test/5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_Pix2Pix/datasets/facades/test/5.jpg -------------------------------------------------------------------------------- /Adapt_Pix2Pix/datasets/facades/train/1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_Pix2Pix/datasets/facades/train/1.jpg -------------------------------------------------------------------------------- /Adapt_Pix2Pix/datasets/facades/train/2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_Pix2Pix/datasets/facades/train/2.jpg -------------------------------------------------------------------------------- /Adapt_Pix2Pix/datasets/facades/train/3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_Pix2Pix/datasets/facades/train/3.jpg -------------------------------------------------------------------------------- /Adapt_Pix2Pix/datasets/facades/train/4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_Pix2Pix/datasets/facades/train/4.jpg -------------------------------------------------------------------------------- /Adapt_Pix2Pix/datasets/facades/train/5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_Pix2Pix/datasets/facades/train/5.jpg -------------------------------------------------------------------------------- /Adapt_Pix2Pix/datasets/facades/val/1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_Pix2Pix/datasets/facades/val/1.jpg -------------------------------------------------------------------------------- /Adapt_Pix2Pix/datasets/facades/val/2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_Pix2Pix/datasets/facades/val/2.jpg -------------------------------------------------------------------------------- /Adapt_Pix2Pix/datasets/facades/val/3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_Pix2Pix/datasets/facades/val/3.jpg -------------------------------------------------------------------------------- /Adapt_Pix2Pix/datasets/facades/val/4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_Pix2Pix/datasets/facades/val/4.jpg -------------------------------------------------------------------------------- /Adapt_Pix2Pix/datasets/facades/val/5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/Adapt_Pix2Pix/datasets/facades/val/5.jpg -------------------------------------------------------------------------------- /Adapt_Pix2Pix/download_dataset.sh: -------------------------------------------------------------------------------- 1 | mkdir datasets 2 | FILE=$1 3 | URL=https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets/$FILE.tar.gz 4 | TAR_FILE=./datasets/$FILE.tar.gz 5 | TARGET_DIR=./datasets/$FILE/ 6 | wget -N $URL -O $TAR_FILE 7 | mkdir $TARGET_DIR 8 | tar -zxvf $TAR_FILE -C ./datasets/ 9 | rm $TAR_FILE 10 | -------------------------------------------------------------------------------- /Adapt_Pix2Pix/main.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import scipy.misc 4 | import numpy as np 5 | import checkGPU 6 | from model import pix2pix 7 | import tensorflow as tf 8 | 9 | parser = argparse.ArgumentParser(description='') 10 | parser.add_argument('--dataset_name', dest='dataset_name', default='facades', help='name of the dataset') 11 | parser.add_argument('--epoch', dest='epoch', type=int, default=200, help='# of epoch') 12 | parser.add_argument('--batch_size', dest='batch_size', type=int, default=1, help='# images in batch') 13 | parser.add_argument('--train_size', dest='train_size', type=int, default=1e8, help='# images used to train') 14 | parser.add_argument('--load_size', dest='load_size', type=int, default=286, help='scale images to this size') 15 | parser.add_argument('--fine_size', dest='fine_size', type=int, default=256, help='then crop to this size') 16 | parser.add_argument('--ngf', dest='ngf', type=int, default=64, help='# of gen filters in first conv layer') 17 | parser.add_argument('--ndf', dest='ndf', type=int, default=64, help='# of discri filters in first conv layer') 18 | parser.add_argument('--input_nc', dest='input_nc', type=int, default=3, help='# of input image channels') 19 | parser.add_argument('--output_nc', dest='output_nc', type=int, default=3, help='# of output image channels') 20 | parser.add_argument('--niter', dest='niter', type=int, default=200, help='# of iter at starting learning rate') 21 | parser.add_argument('--lr', dest='lr', type=float, default=0.0002, help='initial learning rate for adam') 22 | parser.add_argument('--beta1', dest='beta1', type=float, default=0.5, help='momentum term of adam') 23 | parser.add_argument('--flip', dest='flip', type=bool, default=True, help='if flip the images for data argumentation') 24 | parser.add_argument('--which_direction', dest='which_direction', default='AtoB', help='AtoB or BtoA') 25 | parser.add_argument('--phase', dest='phase', default='train', help='train, test') 26 | parser.add_argument('--save_epoch_freq', dest='save_epoch_freq', type=int, default=50, help='save a model every save_epoch_freq epochs (does not overwrite previously saved models)') 27 | parser.add_argument('--save_latest_freq', dest='save_latest_freq', type=int, default=5000, help='save the latest model every latest_freq sgd iterations (overwrites the previous latest model)') 28 | parser.add_argument('--print_freq', dest='print_freq', type=int, default=50, help='print the debug information every print_freq iterations') 29 | parser.add_argument('--continue_train', dest='continue_train', type=bool, default=False, help='if continue training, load the latest model: 1: true, 0: false') 30 | parser.add_argument('--serial_batches', dest='serial_batches', type=bool, default=False, help='f 1, takes images in order to make batches, otherwise takes them randomly') 31 | parser.add_argument('--serial_batch_iter', dest='serial_batch_iter', type=bool, default=True, help='iter into serial image list') 32 | parser.add_argument('--checkpoint_dir', dest='checkpoint_dir', default='checkpoint', help='models are saved here') 33 | parser.add_argument('--sample_dir', dest='sample_dir', default='sample', help='sample are saved here') 34 | parser.add_argument('--test_dir', dest='test_dir', default='test', help='test sample are saved here') 35 | parser.add_argument('--log_dir', dest='log_dir', default='log', help='log are saved here') 36 | parser.add_argument('--save_dir', dest='save_dir', default='save', help='save all') 37 | 38 | args = parser.parse_args() 39 | 40 | gpu_memory_require = 4.0 41 | 42 | 43 | def main(_): 44 | args.checkpoint_dir = os.path.join(args.save_dir, args.checkpoint_dir) 45 | args.sample_dir = os.path.join(args.save_dir, args.sample_dir) 46 | args.test_dir = os.path.join(args.save_dir, args.test_dir) 47 | args.log_dir = os.path.join(args.save_dir, args.log_dir) 48 | if not os.path.exists(args.checkpoint_dir): 49 | os.makedirs(args.checkpoint_dir) 50 | if not os.path.exists(args.sample_dir): 51 | os.makedirs(args.sample_dir) 52 | if not os.path.exists(args.test_dir): 53 | os.makedirs(args.test_dir) 54 | 55 | checkGPU.auto_queue( 56 | gpu_memory_require=gpu_memory_require, 57 | interval=1, 58 | ) 59 | config = checkGPU.set_memory_usage( 60 | usage=gpu_memory_require, 61 | allow_growth=True 62 | ) 63 | 64 | with tf.Session(config=config) as sess: 65 | model = pix2pix(sess, image_size=args.fine_size, batch_size=args.batch_size, 66 | output_size=args.fine_size, dataset_name=args.dataset_name, 67 | checkpoint_dir=args.checkpoint_dir, sample_dir=args.sample_dir) 68 | 69 | if args.phase == 'train': 70 | model.train(args) 71 | else: 72 | model.test(args) 73 | 74 | if __name__ == '__main__': 75 | 76 | if 0: 77 | print 'Run on CPU' 78 | with tf.device("/cpu:0"): 79 | gpu_memory_require = 0.0 80 | tf.app.run() 81 | 82 | tf.app.run() 83 | -------------------------------------------------------------------------------- /Adapt_Pix2Pix/model.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import os 3 | import time 4 | from glob import glob 5 | import tensorflow as tf 6 | import numpy as np 7 | from six.moves import xrange 8 | 9 | from ops import * 10 | from utils import * 11 | 12 | class pix2pix(object): 13 | def __init__(self, sess, image_size=256, 14 | batch_size=1, sample_size=1, output_size=256, 15 | gf_dim=64, df_dim=64, 16 | input_c_dim=3, output_c_dim=3, dataset_name='facades', 17 | checkpoint_dir=None, sample_dir=None): 18 | """ 19 | 20 | Args: 21 | sess: TensorFlow session 22 | batch_size: The size of batch. Should be specified before training. 23 | output_size: (optional) The resolution in pixels of the images. [256] 24 | gf_dim: (optional) Dimension of gen filters in first conv layer. [64] 25 | df_dim: (optional) Dimension of discrim filters in first conv layer. [64] 26 | input_c_dim: (optional) Dimension of input image color. For grayscale input, set to 1. [3] 27 | output_c_dim: (optional) Dimension of output image color. For grayscale input, set to 1. [3] 28 | """ 29 | self.sess = sess 30 | self.is_grayscale = (input_c_dim == 1) 31 | self.batch_size = batch_size 32 | self.image_size = image_size 33 | self.sample_size = sample_size 34 | self.output_size = output_size 35 | 36 | self.gf_dim = gf_dim 37 | self.df_dim = df_dim 38 | 39 | self.input_c_dim = input_c_dim 40 | self.output_c_dim = output_c_dim 41 | 42 | # batch normalization : deals with poor initialization helps gradient flow 43 | self.d_bn1 = batch_norm(name='d_bn1') 44 | self.d_bn2 = batch_norm(name='d_bn2') 45 | self.d_bn3 = batch_norm(name='d_bn3') 46 | 47 | self.g_bn_e2 = batch_norm(name='g_bn_e2') 48 | self.g_bn_e3 = batch_norm(name='g_bn_e3') 49 | self.g_bn_e4 = batch_norm(name='g_bn_e4') 50 | self.g_bn_e5 = batch_norm(name='g_bn_e5') 51 | self.g_bn_e6 = batch_norm(name='g_bn_e6') 52 | self.g_bn_e7 = batch_norm(name='g_bn_e7') 53 | self.g_bn_e8 = batch_norm(name='g_bn_e8') 54 | 55 | self.g_bn_d1 = batch_norm(name='g_bn_d1') 56 | self.g_bn_d2 = batch_norm(name='g_bn_d2') 57 | self.g_bn_d3 = batch_norm(name='g_bn_d3') 58 | self.g_bn_d4 = batch_norm(name='g_bn_d4') 59 | self.g_bn_d5 = batch_norm(name='g_bn_d5') 60 | self.g_bn_d6 = batch_norm(name='g_bn_d6') 61 | self.g_bn_d7 = batch_norm(name='g_bn_d7') 62 | 63 | self.g_bn_e2p = batch_norm(name='pg_bn_e2') 64 | self.g_bn_e3p = batch_norm(name='pg_bn_e3') 65 | self.g_bn_e4p = batch_norm(name='pg_bn_e4') 66 | self.g_bn_e5p = batch_norm(name='pg_bn_e5') 67 | self.g_bn_e6p = batch_norm(name='pg_bn_e6') 68 | self.g_bn_e7p = batch_norm(name='pg_bn_e7') 69 | self.g_bn_e8p = batch_norm(name='pg_bn_e8') 70 | 71 | self.g_bn_d1p = batch_norm(name='pg_bn_d1') 72 | self.g_bn_d2p = batch_norm(name='pg_bn_d2') 73 | self.g_bn_d3p = batch_norm(name='pg_bn_d3') 74 | self.g_bn_d4p = batch_norm(name='pg_bn_d4') 75 | self.g_bn_d5p = batch_norm(name='pg_bn_d5') 76 | self.g_bn_d6p = batch_norm(name='pg_bn_d6') 77 | self.g_bn_d7p = batch_norm(name='pg_bn_d7') 78 | 79 | self.dataset_name = dataset_name 80 | self.checkpoint_dir = checkpoint_dir 81 | self.build_model() 82 | 83 | def build_model(self): 84 | self.real_data = tf.placeholder(tf.float32, 85 | [self.batch_size, self.image_size, self.image_size, 86 | self.input_c_dim + self.output_c_dim], 87 | name='real_A_and_B_images') 88 | 89 | self.real_B = self.real_data[:, :, :, :self.input_c_dim] 90 | self.real_A = self.real_data[:, :, :, self.input_c_dim:self.input_c_dim + self.output_c_dim] 91 | 92 | self.const_B = self.generator(self.real_A) 93 | self.res_B = self.generator_p(self.real_A) 94 | self.fake_B = self.const_B + self.res_B 95 | 96 | self.real_AB = tf.concat(3, [self.real_A, self.real_B]) 97 | self.fake_AB = tf.concat(3, [self.real_A, self.fake_B]) 98 | self.D, self.D_logits = self.discriminator(self.real_AB, reuse=False) 99 | self.D_, self.D_logits_ = self.discriminator(self.fake_AB, reuse=True) 100 | 101 | 102 | self.d_sum = tf.summary.histogram("d", self.D) 103 | self.d__sum = tf.summary.histogram("d_", self.D_) 104 | # self.fake_B_sum = tf.summary.image("fake_B", self.fake_B) 105 | 106 | self.d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(self.D_logits, tf.ones_like(self.D))) 107 | self.d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(self.D_logits_, tf.zeros_like(self.D_))) 108 | self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(self.D_logits_, tf.ones_like(self.D_))) 109 | self.const_loss = tf.reduce_mean(tf.abs(self.real_B - self.const_B)) 110 | 111 | self.d_loss_real_sum = tf.summary.scalar("d_loss_real", self.d_loss_real) 112 | self.d_loss_fake_sum = tf.summary.scalar("d_loss_fake", self.d_loss_fake) 113 | 114 | self.d_loss = self.d_loss_real + self.d_loss_fake 115 | 116 | self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss) 117 | self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss) 118 | self.const_loss_sum = tf.summary.scalar("const_loss", self.const_loss) 119 | 120 | t_vars = tf.trainable_variables() 121 | 122 | self.d_vars = [var for var in t_vars if 'd_' in var.name] 123 | self.g_vars = [var for var in t_vars if 'pg_' in var.name] 124 | self.const_vars = [var for var in t_vars if 'g_' in var.name] 125 | 126 | self.saver = tf.train.Saver(max_to_keep=2) 127 | 128 | 129 | def load_random_samples(self): 130 | data = np.random.choice(glob('./datasets/{}/val/*.jpg'.format(self.dataset_name)), self.batch_size) 131 | sample = [load_data(sample_file) for sample_file in data] 132 | 133 | if (self.is_grayscale): 134 | sample_images = np.array(sample).astype(np.float32)[:, :, :, None] 135 | else: 136 | sample_images = np.array(sample).astype(np.float32) 137 | return sample_images 138 | 139 | def sample_model(self, sample_dir, epoch, idx): 140 | sample_images = self.load_random_samples() 141 | const, res, fake, d_loss, g_loss, const_loss = self.sess.run( 142 | [self.const_B, self.res_B, self.fake_B, self.d_loss, self.g_loss, self.const_loss], 143 | feed_dict={self.real_data: sample_images} 144 | ) 145 | save_images(const, [self.batch_size, 1], 146 | './{}/train_{:02d}_c.png'.format(sample_dir, epoch)) 147 | save_images(res, [self.batch_size, 1], 148 | './{}/train_{:02d}_r.png'.format(sample_dir, epoch)) 149 | save_images(fake, [self.batch_size, 1], 150 | './{}/train_{:02d}.png'.format(sample_dir, epoch)) 151 | print("[Sample] d_loss: {:.8f}, g_loss: {:.8f}, L1: {:.4f}".format(d_loss, g_loss, const_loss)) 152 | 153 | def train(self, args): 154 | """Train pix2pix""" 155 | d_optim = tf.train.AdamOptimizer(args.lr, beta1=args.beta1) \ 156 | .minimize(self.d_loss, var_list=self.d_vars) 157 | g_optim = tf.train.AdamOptimizer(args.lr, beta1=args.beta1) \ 158 | .minimize(self.g_loss, var_list=self.g_vars) 159 | const_optim = tf.train.AdamOptimizer(args.lr, beta1=args.beta1) \ 160 | .minimize(self.const_loss, var_list=self.const_vars) 161 | tf.global_variables_initializer().run() 162 | 163 | self.g_sum = tf.summary.merge([self.d__sum, 164 | self.d_loss_fake_sum, self.g_loss_sum, self.const_loss_sum]) 165 | self.d_sum = tf.summary.merge([self.d_sum, self.d_loss_real_sum, self.d_loss_sum]) 166 | self.writer = tf.summary.FileWriter(args.log_dir, self.sess.graph) 167 | 168 | counter = 1 169 | start_time = time.time() 170 | 171 | if self.load(self.checkpoint_dir): 172 | print(" [*] Load SUCCESS") 173 | else: 174 | print(" [!] Load failed...") 175 | 176 | # np.random.seed(seed=2017) 177 | 178 | for epoch in xrange(args.epoch): 179 | data = glob('./datasets/{}/train/*.jpg'.format(self.dataset_name)) 180 | #np.random.shuffle(data) 181 | batch_idxs = min(len(data), args.train_size) // self.batch_size 182 | 183 | for idx in xrange(0, batch_idxs): 184 | batch_files = data[idx*self.batch_size:(idx+1)*self.batch_size] 185 | batch = [load_data(batch_file) for batch_file in batch_files] 186 | if (self.is_grayscale): 187 | batch_images = np.array(batch).astype(np.float32)[:, :, :, None] 188 | else: 189 | batch_images = np.array(batch).astype(np.float32) 190 | 191 | # Update G network 192 | _, summary_str = self.sess.run([const_optim, self.const_loss_sum], 193 | feed_dict={self.real_data: batch_images}) 194 | self.writer.add_summary(summary_str, counter) 195 | # _, summary_str = self.sess.run([const_optim, self.const_loss_sum], 196 | # feed_dict={self.real_data: batch_images}) 197 | # self.writer.add_summary(summary_str, counter) 198 | 199 | 200 | # Update D network 201 | _, summary_str = self.sess.run([d_optim, self.d_sum], 202 | feed_dict={ self.real_data: batch_images }) 203 | self.writer.add_summary(summary_str, counter) 204 | 205 | # Update G network 206 | _, summary_str = self.sess.run([g_optim, self.g_sum], 207 | feed_dict={ self.real_data: batch_images }) 208 | self.writer.add_summary(summary_str, counter) 209 | 210 | # Run g_optim twice to make sure that d_loss does not go to zero (different from paper) 211 | # _, summary_str = self.sess.run([g_optim, self.g_sum], 212 | # feed_dict={ self.real_data: batch_images }) 213 | # self.writer.add_summary(summary_str, counter) 214 | 215 | errD_fake = self.d_loss_fake.eval({self.real_data: batch_images}) 216 | errD_real = self.d_loss_real.eval({self.real_data: batch_images}) 217 | errG = self.g_loss.eval({self.real_data: batch_images}) 218 | errConst = self.const_loss.eval({self.real_data: batch_images}) 219 | 220 | counter += 1 221 | print("Epoch: [%2d] [%4d/%4d] time: %4.4f\n" 222 | "\tL1=%.8f\t adv_G=%.8f\n" 223 | "\tD_real=%.8f\tD_fake: %.8f" 224 | % (epoch, idx, batch_idxs, time.time() - start_time, 225 | errConst, errG, errD_real, errD_fake)) 226 | 227 | self.sample_model(args.sample_dir, epoch, idx) 228 | 229 | if np.mod(epoch, 10) == 9: 230 | self.save(args.checkpoint_dir, counter) 231 | 232 | def discriminator(self, image, y=None, reuse=False): 233 | # image is 256 x 256 x (input_c_dim + output_c_dim) 234 | if reuse: 235 | tf.get_variable_scope().reuse_variables() 236 | else: 237 | assert tf.get_variable_scope().reuse == False 238 | 239 | h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv')) 240 | # h0 is (128 x 128 x self.df_dim) 241 | h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv'))) 242 | # h1 is (64 x 64 x self.df_dim*2) 243 | h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv'))) 244 | # h2 is (32x 32 x self.df_dim*4) 245 | h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, d_h=1, d_w=1, name='d_h3_conv'))) 246 | # h3 is (16 x 16 x self.df_dim*8) 247 | h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h3_lin') 248 | 249 | return tf.nn.sigmoid(h4), h4 250 | 251 | def generator(self, image, y=None): 252 | s = self.output_size 253 | s2, s4, s8, s16, s32, s64, s128 = int(s/2), int(s/4), int(s/8), int(s/16), int(s/32), int(s/64), int(s/128) 254 | 255 | # image is (256 x 256 x input_c_dim) 256 | e1 = conv2d(image, self.gf_dim, name='g_e1_conv') 257 | # e1 is (128 x 128 x self.gf_dim) 258 | e2 = self.g_bn_e2(conv2d(lrelu(e1), self.gf_dim*2, name='g_e2_conv')) 259 | # e2 is (64 x 64 x self.gf_dim*2) 260 | e3 = self.g_bn_e3(conv2d(lrelu(e2), self.gf_dim*4, name='g_e3_conv')) 261 | # e3 is (32 x 32 x self.gf_dim*4) 262 | e4 = self.g_bn_e4(conv2d(lrelu(e3), self.gf_dim*8, name='g_e4_conv')) 263 | # e4 is (16 x 16 x self.gf_dim*8) 264 | e5 = self.g_bn_e5(conv2d(lrelu(e4), self.gf_dim*8, name='g_e5_conv')) 265 | # e5 is (8 x 8 x self.gf_dim*8) 266 | e6 = self.g_bn_e6(conv2d(lrelu(e5), self.gf_dim*8, name='g_e6_conv')) 267 | # e6 is (4 x 4 x self.gf_dim*8) 268 | e7 = self.g_bn_e7(conv2d(lrelu(e6), self.gf_dim*8, name='g_e7_conv')) 269 | # e7 is (2 x 2 x self.gf_dim*8) 270 | e8 = self.g_bn_e8(conv2d(lrelu(e7), self.gf_dim*8, name='g_e8_conv')) 271 | # e8 is (1 x 1 x self.gf_dim*8) 272 | 273 | self.d1, self.d1_w, self.d1_b = deconv2d(tf.nn.relu(e8), 274 | [self.batch_size, s128, s128, self.gf_dim*8], name='g_d1', with_w=True) 275 | d1 = tf.nn.dropout(self.g_bn_d1(self.d1), 0.5) 276 | d1 = tf.concat(3, [d1, e7]) 277 | # d1 is (2 x 2 x self.gf_dim*8*2) 278 | 279 | self.d2, self.d2_w, self.d2_b = deconv2d(tf.nn.relu(d1), 280 | [self.batch_size, s64, s64, self.gf_dim*8], name='g_d2', with_w=True) 281 | d2 = tf.nn.dropout(self.g_bn_d2(self.d2), 0.5) 282 | d2 = tf.concat(3, [d2, e6]) 283 | # d2 is (4 x 4 x self.gf_dim*8*2) 284 | 285 | self.d3, self.d3_w, self.d3_b = deconv2d(tf.nn.relu(d2), 286 | [self.batch_size, s32, s32, self.gf_dim*8], name='g_d3', with_w=True) 287 | d3 = tf.nn.dropout(self.g_bn_d3(self.d3), 0.5) 288 | d3 = tf.concat(3, [d3, e5]) 289 | # d3 is (8 x 8 x self.gf_dim*8*2) 290 | 291 | self.d4, self.d4_w, self.d4_b = deconv2d(tf.nn.relu(d3), 292 | [self.batch_size, s16, s16, self.gf_dim*8], name='g_d4', with_w=True) 293 | d4 = self.g_bn_d4(self.d4) 294 | d4 = tf.concat(3, [d4, e4]) 295 | # d4 is (16 x 16 x self.gf_dim*8*2) 296 | 297 | self.d5, self.d5_w, self.d5_b = deconv2d(tf.nn.relu(d4), 298 | [self.batch_size, s8, s8, self.gf_dim*4], name='g_d5', with_w=True) 299 | d5 = self.g_bn_d5(self.d5) 300 | d5 = tf.concat(3, [d5, e3]) 301 | # d5 is (32 x 32 x self.gf_dim*4*2) 302 | 303 | self.d6, self.d6_w, self.d6_b = deconv2d(tf.nn.relu(d5), 304 | [self.batch_size, s4, s4, self.gf_dim*2], name='g_d6', with_w=True) 305 | d6 = self.g_bn_d6(self.d6) 306 | d6 = tf.concat(3, [d6, e2]) 307 | # d6 is (64 x 64 x self.gf_dim*2*2) 308 | 309 | self.d7, self.d7_w, self.d7_b = deconv2d(tf.nn.relu(d6), 310 | [self.batch_size, s2, s2, self.gf_dim], name='g_d7', with_w=True) 311 | d7 = self.g_bn_d7(self.d7) 312 | d7 = tf.concat(3, [d7, e1]) 313 | # d7 is (128 x 128 x self.gf_dim*1*2) 314 | 315 | self.d8, self.d8_w, self.d8_b = deconv2d(tf.nn.relu(d7), 316 | [self.batch_size, s, s, self.output_c_dim], name='g_d8', with_w=True) 317 | # d8 is (256 x 256 x output_c_dim) 318 | 319 | return tf.nn.tanh(self.d8) 320 | 321 | def generator_p(self, image, y=None): 322 | s = self.output_size 323 | s2, s4, s8, s16, s32, s64, s128 = int(s/2), int(s/4), int(s/8), int(s/16), int(s/32), int(s/64), int(s/128) 324 | 325 | # image is (256 x 256 x input_c_dim) 326 | e1 = conv2d(image, self.gf_dim, name='pg_e1_conv') 327 | # e1 is (128 x 128 x self.gf_dim) 328 | e2 = self.g_bn_e2p(conv2d(lrelu(e1), self.gf_dim*2, name='pg_e2_conv')) 329 | # e2 is (64 x 64 x self.gf_dim*2) 330 | e3 = self.g_bn_e3p(conv2d(lrelu(e2), self.gf_dim*4, name='pg_e3_conv')) 331 | # e3 is (32 x 32 x self.gf_dim*4) 332 | e4 = self.g_bn_e4p(conv2d(lrelu(e3), self.gf_dim*8, name='pg_e4_conv')) 333 | # e4 is (16 x 16 x self.gf_dim*8) 334 | e5 = self.g_bn_e5p(conv2d(lrelu(e4), self.gf_dim*8, name='pg_e5_conv')) 335 | # e5 is (8 x 8 x self.gf_dim*8) 336 | e6 = self.g_bn_e6p(conv2d(lrelu(e5), self.gf_dim*8, name='pg_e6_conv')) 337 | # e6 is (4 x 4 x self.gf_dim*8) 338 | e7 = self.g_bn_e7p(conv2d(lrelu(e6), self.gf_dim*8, name='pg_e7_conv')) 339 | # e7 is (2 x 2 x self.gf_dim*8) 340 | e8 = self.g_bn_e8p(conv2d(lrelu(e7), self.gf_dim*8, name='pg_e8_conv')) 341 | # e8 is (1 x 1 x self.gf_dim*8) 342 | 343 | self.d1, self.d1_w, self.d1_b = deconv2d(tf.nn.relu(e8), 344 | [self.batch_size, s128, s128, self.gf_dim*8], name='pg_d1', with_w=True) 345 | d1 = tf.nn.dropout(self.g_bn_d1p(self.d1), 0.5) 346 | d1 = tf.concat(3, [d1, e7]) 347 | # d1 is (2 x 2 x self.gf_dim*8*2) 348 | 349 | self.d2, self.d2_w, self.d2_b = deconv2d(tf.nn.relu(d1), 350 | [self.batch_size, s64, s64, self.gf_dim*8], name='pg_d2', with_w=True) 351 | d2 = tf.nn.dropout(self.g_bn_d2p(self.d2), 0.5) 352 | d2 = tf.concat(3, [d2, e6]) 353 | # d2 is (4 x 4 x self.gf_dim*8*2) 354 | 355 | self.d3, self.d3_w, self.d3_b = deconv2d(tf.nn.relu(d2), 356 | [self.batch_size, s32, s32, self.gf_dim*8], name='pg_d3', with_w=True) 357 | d3 = tf.nn.dropout(self.g_bn_d3p(self.d3), 0.5) 358 | d3 = tf.concat(3, [d3, e5]) 359 | # d3 is (8 x 8 x self.gf_dim*8*2) 360 | 361 | self.d4, self.d4_w, self.d4_b = deconv2d(tf.nn.relu(d3), 362 | [self.batch_size, s16, s16, self.gf_dim*8], name='pg_d4', with_w=True) 363 | d4 = self.g_bn_d4p(self.d4) 364 | d4 = tf.concat(3, [d4, e4]) 365 | # d4 is (16 x 16 x self.gf_dim*8*2) 366 | 367 | self.d5, self.d5_w, self.d5_b = deconv2d(tf.nn.relu(d4), 368 | [self.batch_size, s8, s8, self.gf_dim*4], name='pg_d5', with_w=True) 369 | d5 = self.g_bn_d5p(self.d5) 370 | d5 = tf.concat(3, [d5, e3]) 371 | # d5 is (32 x 32 x self.gf_dim*4*2) 372 | 373 | self.d6, self.d6_w, self.d6_b = deconv2d(tf.nn.relu(d5), 374 | [self.batch_size, s4, s4, self.gf_dim*2], name='pg_d6', with_w=True) 375 | d6 = self.g_bn_d6p(self.d6) 376 | d6 = tf.concat(3, [d6, e2]) 377 | # d6 is (64 x 64 x self.gf_dim*2*2) 378 | 379 | self.d7, self.d7_w, self.d7_b = deconv2d(tf.nn.relu(d6), 380 | [self.batch_size, s2, s2, self.gf_dim], name='pg_d7', with_w=True) 381 | d7 = self.g_bn_d7p(self.d7) 382 | d7 = tf.concat(3, [d7, e1]) 383 | # d7 is (128 x 128 x self.gf_dim*1*2) 384 | 385 | self.d8, self.d8_w, self.d8_b = deconv2d(tf.nn.relu(d7), 386 | [self.batch_size, s, s, self.output_c_dim], name='pg_d8', with_w=True) 387 | # d8 is (256 x 256 x output_c_dim) 388 | 389 | return tf.nn.tanh(self.d8) 390 | 391 | def save(self, checkpoint_dir, step): 392 | model_name = "pix2pix.model" 393 | model_dir = "%s_%s_%s" % (self.dataset_name, self.batch_size, self.output_size) 394 | checkpoint_dir = os.path.join(checkpoint_dir, model_dir) 395 | 396 | if not os.path.exists(checkpoint_dir): 397 | os.makedirs(checkpoint_dir) 398 | 399 | self.saver.save(self.sess, 400 | os.path.join(checkpoint_dir, model_name), 401 | global_step=step) 402 | 403 | def load(self, checkpoint_dir): 404 | print(" [*] Reading checkpoint...") 405 | 406 | model_dir = "%s_%s_%s" % (self.dataset_name, self.batch_size, self.output_size) 407 | checkpoint_dir = os.path.join(checkpoint_dir, model_dir) 408 | 409 | ckpt = tf.train.get_checkpoint_state(checkpoint_dir) 410 | if ckpt and ckpt.model_checkpoint_path: 411 | ckpt_name = os.path.basename(ckpt.model_checkpoint_path) 412 | self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name)) 413 | return True 414 | else: 415 | return False 416 | 417 | def test(self, args): 418 | """Test pix2pix""" 419 | tf.global_variables_initializer().run() 420 | 421 | sample_files = glob('./datasets/{}/val/*.jpg'.format(self.dataset_name)) 422 | 423 | # sort testing input 424 | n = [int(i) for i in map(lambda x: x.split('/')[-1].split('.jpg')[0], sample_files)] 425 | sample_files = [x for (y, x) in sorted(zip(n, sample_files))] 426 | 427 | # load testing input 428 | print("Loading testing images ...") 429 | sample = [load_data(sample_file, is_test=True) for sample_file in sample_files] 430 | 431 | if (self.is_grayscale): 432 | sample_images = np.array(sample).astype(np.float32)[:, :, :, None] 433 | else: 434 | sample_images = np.array(sample).astype(np.float32) 435 | 436 | sample_images = [sample_images[i:i+self.batch_size] 437 | for i in xrange(0, len(sample_images), self.batch_size)] 438 | sample_images = np.array(sample_images) 439 | print(sample_images.shape) 440 | 441 | start_time = time.time() 442 | if self.load(self.checkpoint_dir): 443 | print(" [*] Load SUCCESS") 444 | else: 445 | print(" [!] Load failed...") 446 | 447 | for i, sample_image in enumerate(sample_images): 448 | idx = i+1 449 | print("sampling image ", idx) 450 | # samples = self.sess.run( 451 | # self.fake_B, 452 | # feed_dict={self.real_data: sample_image} 453 | # ) 454 | const, res, fake, realA, realB, d_loss, g_loss, const_loss = self.sess.run( 455 | [self.const_B, self.res_B, self.fake_B, self.real_A, self.real_B,self.d_loss, self.g_loss, self.const_loss], 456 | feed_dict={self.real_data: sample_image} 457 | ) 458 | save_images(const, [self.batch_size, 1], 459 | './{}/test_{:04d}_c.png'.format(args.test_dir, idx)) 460 | save_images(res, [self.batch_size, 1], 461 | './{}/test_{:04d}_r.png'.format(args.test_dir, idx)) 462 | save_images(fake, [self.batch_size, 1], 463 | './{}/test_{:04d}.png'.format(args.test_dir, idx)) 464 | save_images(realA, [self.batch_size, 1], 465 | './{}/test_{:04d}_A.png'.format(args.test_dir, idx)) 466 | save_images(realB, [self.batch_size, 1], 467 | './{}/test_{:04d}_B.png'.format(args.test_dir, idx)) 468 | -------------------------------------------------------------------------------- /Adapt_Pix2Pix/ops.py: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy as np 3 | import tensorflow as tf 4 | 5 | from tensorflow.python.framework import ops 6 | 7 | from utils import * 8 | 9 | class batch_norm(object): 10 | # h1 = lrelu(tf.contrib.layers.batch_norm(conv2d(h0, self.df_dim*2, name='d_h1_conv'),decay=0.9,updates_collections=None,epsilon=0.00001,scale=True,scope="d_h1_conv")) 11 | def __init__(self, epsilon=1e-5, momentum = 0.9, name="batch_norm"): 12 | with tf.variable_scope(name): 13 | self.epsilon = epsilon 14 | self.momentum = momentum 15 | self.name = name 16 | 17 | def __call__(self, x, train=True): 18 | return tf.contrib.layers.batch_norm(x, decay=self.momentum, updates_collections=None, epsilon=self.epsilon, scale=True, scope=self.name) 19 | 20 | def binary_cross_entropy(preds, targets, name=None): 21 | """Computes binary cross entropy given `preds`. 22 | 23 | For brevity, let `x = `, `z = targets`. The logistic loss is 24 | 25 | loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i])) 26 | 27 | Args: 28 | preds: A `Tensor` of type `float32` or `float64`. 29 | targets: A `Tensor` of the same type and shape as `preds`. 30 | """ 31 | eps = 1e-12 32 | with ops.op_scope([preds, targets], name, "bce_loss") as name: 33 | preds = ops.convert_to_tensor(preds, name="preds") 34 | targets = ops.convert_to_tensor(targets, name="targets") 35 | return tf.reduce_mean(-(targets * tf.log(preds + eps) + 36 | (1. - targets) * tf.log(1. - preds + eps))) 37 | 38 | def conv_cond_concat(x, y): 39 | """Concatenate conditioning vector on feature map axis.""" 40 | x_shapes = x.get_shape() 41 | y_shapes = y.get_shape() 42 | return tf.concat(3, [x, y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])]) 43 | 44 | def conv2d(input_, output_dim, 45 | k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, 46 | name="conv2d"): 47 | with tf.variable_scope(name): 48 | w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim], 49 | initializer=tf.truncated_normal_initializer(stddev=stddev)) 50 | conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME') 51 | 52 | biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) 53 | conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape()) 54 | 55 | return conv 56 | 57 | def deconv2d(input_, output_shape, 58 | k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, 59 | name="deconv2d", with_w=False): 60 | with tf.variable_scope(name): 61 | # filter : [height, width, output_channels, in_channels] 62 | w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]], 63 | initializer=tf.random_normal_initializer(stddev=stddev)) 64 | 65 | try: 66 | deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape, 67 | strides=[1, d_h, d_w, 1]) 68 | 69 | # Support for verisons of TensorFlow before 0.7.0 70 | except AttributeError: 71 | deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape, 72 | strides=[1, d_h, d_w, 1]) 73 | 74 | biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0)) 75 | deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape()) 76 | 77 | if with_w: 78 | return deconv, w, biases 79 | else: 80 | return deconv 81 | 82 | 83 | def lrelu(x, leak=0.2, name="lrelu"): 84 | return tf.maximum(x, leak*x) 85 | 86 | def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False): 87 | shape = input_.get_shape().as_list() 88 | 89 | with tf.variable_scope(scope or "Linear"): 90 | matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32, 91 | tf.random_normal_initializer(stddev=stddev)) 92 | bias = tf.get_variable("bias", [output_size], 93 | initializer=tf.constant_initializer(bias_start)) 94 | if with_w: 95 | return tf.matmul(input_, matrix) + bias, matrix, bias 96 | else: 97 | return tf.matmul(input_, matrix) + bias 98 | -------------------------------------------------------------------------------- /Adapt_Pix2Pix/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Some codes from https://github.com/Newmu/dcgan_code 3 | """ 4 | from __future__ import division 5 | import math 6 | import json 7 | import random 8 | import pprint 9 | import scipy.misc 10 | import numpy as np 11 | from time import gmtime, strftime 12 | 13 | pp = pprint.PrettyPrinter() 14 | 15 | get_stddev = lambda x, k_h, k_w: 1/math.sqrt(k_w*k_h*x.get_shape()[-1]) 16 | 17 | # ----------------------------- 18 | # new added functions for pix2pix 19 | 20 | def load_data(image_path, flip=True, is_test=False): 21 | img_A, img_B = load_image(image_path) 22 | img_A, img_B = preprocess_A_and_B(img_A, img_B, flip=flip, is_test=is_test) 23 | 24 | img_A = img_A/127.5 - 1. 25 | img_B = img_B/127.5 - 1. 26 | 27 | img_AB = np.concatenate((img_A, img_B), axis=2) 28 | # img_AB shape: (fine_size, fine_size, input_c_dim + output_c_dim) 29 | return img_AB 30 | 31 | def load_image(image_path): 32 | input_img = imread(image_path) 33 | w = int(input_img.shape[1]) 34 | w2 = int(w/2) 35 | img_A = input_img[:, 0:w2] 36 | img_B = input_img[:, w2:w] 37 | 38 | return img_A, img_B 39 | 40 | def preprocess_A_and_B(img_A, img_B, load_size=286, fine_size=256, flip=True, is_test=False): 41 | if is_test: 42 | img_A = scipy.misc.imresize(img_A, [fine_size, fine_size]) 43 | img_B = scipy.misc.imresize(img_B, [fine_size, fine_size]) 44 | else: 45 | img_A = scipy.misc.imresize(img_A, [load_size, load_size]) 46 | img_B = scipy.misc.imresize(img_B, [load_size, load_size]) 47 | 48 | h1 = int(np.ceil(np.random.uniform(1e-2, load_size-fine_size))) 49 | w1 = int(np.ceil(np.random.uniform(1e-2, load_size-fine_size))) 50 | img_A = img_A[h1:h1+fine_size, w1:w1+fine_size] 51 | img_B = img_B[h1:h1+fine_size, w1:w1+fine_size] 52 | 53 | if flip and np.random.random() > 0.5: 54 | img_A = np.fliplr(img_A) 55 | img_B = np.fliplr(img_B) 56 | 57 | return img_A, img_B 58 | 59 | # ----------------------------- 60 | 61 | def get_image(image_path, image_size, is_crop=True, resize_w=64, is_grayscale = False): 62 | return transform(imread(image_path, is_grayscale), image_size, is_crop, resize_w) 63 | 64 | def save_images(images, size, image_path): 65 | return imsave(inverse_transform(images), size, image_path) 66 | 67 | def imread(path, is_grayscale = False): 68 | if (is_grayscale): 69 | return scipy.misc.imread(path, flatten = True).astype(np.float) 70 | else: 71 | return scipy.misc.imread(path).astype(np.float) 72 | 73 | def merge_images(images, size): 74 | return inverse_transform(images) 75 | 76 | def merge(images, size): 77 | h, w = images.shape[1], images.shape[2] 78 | img = np.zeros((h * size[0], w * size[1], 3)) 79 | for idx, image in enumerate(images): 80 | i = idx % size[1] 81 | j = idx // size[1] 82 | img[j*h:j*h+h, i*w:i*w+w, :] = image 83 | 84 | return img 85 | 86 | def imsave(images, size, path): 87 | return scipy.misc.imsave(path, merge(images, size)) 88 | 89 | def transform(image, npx=64, is_crop=True, resize_w=64): 90 | # npx : # of pixels width/height of image 91 | if is_crop: 92 | cropped_image = center_crop(image, npx, resize_w=resize_w) 93 | else: 94 | cropped_image = image 95 | return np.array(cropped_image)/127.5 - 1. 96 | 97 | def inverse_transform(images): 98 | return (images+1.)/2. 99 | 100 | 101 | -------------------------------------------------------------------------------- /NRDS/NRDS.m: -------------------------------------------------------------------------------- 1 | %% compute Normalized Relative Discriminator Score (NRSD) 2 | clc; clear; close all 3 | 4 | %% load data 5 | data_path = './save/mat'; 6 | files = dir(fullfile(data_path, '*.mat')); 7 | curve_real = []; 8 | curve_adv1 = []; 9 | curve_adv1e2 = []; 10 | curve_adv1e3 = []; 11 | curve_adv1e4 = []; 12 | for i = 1:length(files) 13 | file = files(i).name; 14 | load(fullfile(data_path, file)) 15 | curve_real = [ curve_real ; [mean(real), std(real)] ]; 16 | curve_adv1 = [ curve_adv1 ; [mean(adv1), std(adv1)] ]; 17 | curve_adv1e2 = [ curve_adv1e2 ; [mean(adv1e2), std(adv1e2)] ]; 18 | curve_adv1e3 = [ curve_adv1e3 ; [mean(adv1e3), std(adv1e3)] ]; 19 | curve_adv1e4 = [ curve_adv1e4 ; [mean(adv1e4), std(adv1e4)] ]; 20 | end 21 | 22 | %% plot 23 | figure; hold on 24 | plot(1:epochs, curve_real(:,1), '-', 'linewidth', 2) 25 | plot(1:epochs, curve_adv1(:,1), '--', 'linewidth', 2) 26 | plot(1:epochs, curve_adv1e2(:,1), ':', 'linewidth', 2) 27 | plot(1:epochs, curve_adv1e3(:,1), '-.', 'linewidth', 2) 28 | plot(1:epochs, curve_adv1e4(:,1), '-', 'linewidth', 2) 29 | legend('real', 'adv1', 'adv1e-2', 'adv1e-3', 'adv1e-4', 'location', 'best') 30 | xlabel('Epoch') 31 | ylabel('Avg. output of discriminator') 32 | grid on 33 | set(gca, 'fontsize', 16) 34 | 35 | %% compute area under the curves 36 | a_real = trapz(curve_real(:,1)); 37 | a_adv1 = trapz(curve_adv1(:,1)); 38 | a_adv1e2 = trapz(curve_adv1e2(:,1)); 39 | a_adv1e3 = trapz(curve_adv1e3(:,1)); 40 | a_adv1e4 = trapz(curve_adv1e4(:,1)); 41 | 42 | din = a_adv1 + a_adv1e2 + a_adv1e3 + a_adv1e4; 43 | 44 | score = nan(4, 1); 45 | score(1) = a_adv1 / din; 46 | score(2) = a_adv1e2 / din; 47 | score(3) = a_adv1e3 / din; 48 | score(4) = a_adv1e4 / din; 49 | 50 | score 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | -------------------------------------------------------------------------------- /NRDS/NRDS.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import os 3 | import time 4 | from glob import glob 5 | import tensorflow as tf 6 | import numpy as np 7 | from scipy.io import savemat 8 | from ops import * 9 | 10 | 11 | class NRDS(object): 12 | def __init__(self, 13 | session, # TensorFlow session 14 | size_image=128, # size the input images 15 | size_kernel=5, # size of the kernels in convolution and deconvolution 16 | size_batch=1, # mini-batch size for training and testing, must be square of an integer 17 | num_input_channels=3, # number of channels of input images 18 | enable_bn=True, # enable batch normalization 19 | save_dir='./save', # path to save checkpoints, samples, and summary 20 | real_files_dir=None, # directory of the real data 21 | fake_files_dirs=None # directories of the fake data 22 | ): 23 | 24 | self.session = session 25 | self.image_value_range = (-1, 1) 26 | self.size_image = size_image 27 | self.size_kernel = size_kernel 28 | self.size_batch = size_batch 29 | self.num_input_channels = num_input_channels 30 | self.enable_bn = enable_bn 31 | self.save_dir = save_dir 32 | self.real_files_dir = real_files_dir 33 | self.fake_files_dirs = fake_files_dirs 34 | if self.real_files_dir is None or self.fake_files_dirs is None: 35 | print('Missing real or fake samples!') 36 | exit() 37 | 38 | # ************************************* input to graph ******************************************************** 39 | self.real = tf.placeholder( 40 | tf.float32, 41 | [self.size_batch, self.size_image, self.size_image, self.num_input_channels], 42 | name='input_real' 43 | ) 44 | self.fake = tf.placeholder( 45 | tf.float32, 46 | [self.size_batch, self.size_image, self.size_image, self.num_input_channels], 47 | name='input_fake' 48 | ) 49 | 50 | # ************************************* build the graph ******************************************************* 51 | # discriminator on real images 52 | self.D_real, self.D_real_logits = self.discriminator( 53 | image=self.real, 54 | enable_bn=self.enable_bn, 55 | reuse_variables=False 56 | ) 57 | # discriminator on real image 58 | self.D_fake, self.D_fake_logits = self.discriminator( 59 | image=self.fake, 60 | enable_bn=self.enable_bn, 61 | reuse_variables=True 62 | ) 63 | 64 | # ************************************* loss functions ******************************************************* 65 | self.loss_real = tf.reduce_mean( 66 | tf.nn.sigmoid_cross_entropy_with_logits(self.D_real_logits, tf.ones_like(self.D_real_logits)) 67 | ) 68 | self.loss_fake = tf.reduce_mean( 69 | tf.nn.sigmoid_cross_entropy_with_logits(self.D_fake_logits, tf.zeros_like(self.D_fake_logits)) 70 | ) 71 | 72 | # ************************************* collect the summary *************************************** 73 | self.loss_real_summary = tf.summary.scalar('real', self.loss_real) 74 | self.loss_fake_summary = tf.summary.scalar('fake', self.loss_fake) 75 | self.summary = tf.summary.merge([ 76 | self.loss_real_summary, self.loss_fake_summary 77 | ]) 78 | self.writer = tf.summary.FileWriter(os.path.join(self.save_dir, 'summary'), self.session.graph) 79 | self.global_step = tf.Variable(0, trainable=False, name='global_step') 80 | 81 | def train(self, 82 | num_epochs=50, # number of epochs 83 | learning_rate=0.0002, # learning rate of optimizer 84 | beta1=0.5, # parameter for Adam optimizer 85 | ): 86 | # *************************** load file names of images ****************************************************** 87 | real_files = glob(os.path.join(self.real_files_dir, '*.jpg')) 88 | print("real samples:\t%d" % len(real_files)) 89 | fake_files_separate = [] 90 | fake_files = [] 91 | for i, fake_dir in enumerate(self.fake_files_dirs): 92 | fake_files_separate.append(glob(os.path.join(fake_dir, '*.jpg'))) 93 | fake_files.extend(glob(os.path.join(fake_dir, '*.jpg'))) 94 | print("%s samples:\t%d" % (self.fake_files_dirs[i].split('/')[-1], len(fake_files_separate[i]))) 95 | 96 | # *********************************** optimizer ************************************************************** 97 | optimizer = tf.train.AdamOptimizer( 98 | learning_rate=learning_rate, 99 | beta1=beta1 100 | ).minimize( 101 | loss=self.loss_real + self.loss_fake, 102 | ) 103 | 104 | # ******************************************* training ******************************************************* 105 | tf.global_variables_initializer().run() 106 | mat_save_path = os.path.join(self.save_dir, 'mat') 107 | np.random.seed(seed=1234) 108 | if not os.path.exists(mat_save_path): 109 | os.makedirs(mat_save_path) 110 | for epoch in xrange(num_epochs): 111 | np.random.shuffle(real_files) 112 | np.random.shuffle(fake_files) 113 | num_batches = min(len(real_files), len(fake_files)) // self.size_batch 114 | for ind_batch in range(0, num_batches): 115 | real_files_batch = real_files[ind_batch * self.size_batch: (ind_batch + 1) * self.size_batch] 116 | fake_files_batch = fake_files[ind_batch * self.size_batch: (ind_batch + 1) * self.size_batch] 117 | real_images_batch = [ 118 | load_image( 119 | image_path=batch_file, 120 | image_size=self.size_image, 121 | image_value_range=self.image_value_range, 122 | is_gray=(self.num_input_channels == 1), 123 | ) for batch_file in real_files_batch] 124 | fake_images_batch = [ 125 | load_image( 126 | image_path=batch_file, 127 | image_size=self.size_image, 128 | image_value_range=self.image_value_range, 129 | is_gray=self.num_input_channels == 1, 130 | ) for batch_file in fake_files_batch] 131 | if self.num_input_channels == 1: 132 | batch_images_real = np.array(real_images_batch).astype(np.float32)[:, :, :, None] 133 | batch_images_fake = np.array(fake_images_batch).astype(np.float32)[:, :, :, None] 134 | else: 135 | batch_images_real = np.array(real_images_batch).astype(np.float32) 136 | batch_images_fake = np.array(fake_images_batch).astype(np.float32) 137 | 138 | # Update the discriminator 139 | _, loss_real, loss_fake, summary = self.session.run( 140 | fetches=[optimizer, self.loss_real, self.loss_fake, self.summary], 141 | feed_dict={ 142 | self.real: batch_images_real, 143 | self.fake: batch_images_fake 144 | } 145 | ) 146 | self.writer.add_summary(summary, self.global_step.eval()) 147 | print("\nEpoch %03d/%03d\tBatch %03d/%03d\t loss_real=%.4f\t loss_fake=%.4f\n" 148 | % (epoch+1, num_epochs, ind_batch+1, num_batches, loss_real, loss_fake)) 149 | 150 | # testing on fake data 151 | mat = dict() 152 | mat['epochs'] = num_epochs 153 | mat['batches'] = num_batches 154 | num_competitors = len(fake_files_separate) 155 | for i in range(num_competitors): 156 | files = fake_files_separate[i] 157 | folder_name = self.fake_files_dirs[i].split('/')[-1] 158 | mat[folder_name] = [] 159 | 160 | for ind in range(0, len(files) // self.size_batch): 161 | files_batch = files[ind * self.size_batch: (ind + 1) * self.size_batch] 162 | images_batch = [ 163 | load_image( 164 | image_path=batch_file, 165 | image_size=self.size_image, 166 | image_value_range=self.image_value_range, 167 | is_gray=(self.num_input_channels == 1), 168 | ) for batch_file in files_batch] 169 | if self.num_input_channels == 1: 170 | images_batch = np.array(images_batch).astype(np.float32)[:, :, :, None] 171 | else: 172 | images_batch = np.array(images_batch).astype(np.float32) 173 | output = self.D_fake.eval({self.fake: images_batch}) 174 | mat[folder_name].append(output) 175 | 176 | # testing on real data 177 | mat['real'] = [] 178 | for ind in xrange(0, len(real_files) // self.size_batch): 179 | files_batch = real_files[ind * self.size_batch: (ind + 1) * self.size_batch] 180 | images_batch = [ 181 | load_image( 182 | image_path=batch_file, 183 | image_size=self.size_image, 184 | image_value_range=self.image_value_range, 185 | is_gray=(self.num_input_channels == 1), 186 | ) for batch_file in files_batch] 187 | if self.num_input_channels == 1: 188 | images_batch = np.array(images_batch).astype(np.float32)[:, :, :, None] 189 | else: 190 | images_batch = np.array(images_batch).astype(np.float32) 191 | output = self.D_real.eval({self.real: images_batch}) 192 | mat['real'].append(output) 193 | 194 | savemat(os.path.join(mat_save_path, '{:03d}_{:03d}.mat'.format(epoch + 1, ind_batch + 1)), mat) 195 | 196 | def discriminator(self, image, is_training=True, reuse_variables=False, enable_bn=True, 197 | num_hidden_layer_channels=(64, 128, 256, 512)): 198 | if reuse_variables: 199 | tf.get_variable_scope().reuse_variables() 200 | num_layers = len(num_hidden_layer_channels) 201 | current = image 202 | # conv layers with stride 2 203 | for i in range(num_layers): 204 | name = 'D_img_conv' + str(i) 205 | current = conv2d( 206 | input_map=current, 207 | num_output_channels=num_hidden_layer_channels[i], 208 | size_kernel=self.size_kernel, 209 | name=name 210 | ) 211 | if enable_bn: 212 | name = 'D_img_bn' + str(i) 213 | current = tf.contrib.layers.batch_norm( 214 | current, 215 | scale=False, 216 | is_training=is_training, 217 | scope=name, 218 | reuse=reuse_variables 219 | ) 220 | current = tf.nn.relu(current) 221 | # fully connection layer 222 | name = 'D_img_fc1' 223 | current = fc( 224 | input_vector=tf.reshape(current, [self.size_batch, -1]), 225 | num_output_length=1024, 226 | name=name 227 | ) 228 | current = lrelu(current) 229 | name = 'D_img_fc2' 230 | current = fc( 231 | input_vector=current, 232 | num_output_length=1, 233 | name=name 234 | ) 235 | # output 236 | return tf.nn.sigmoid(current), current 237 | 238 | 239 | 240 | -------------------------------------------------------------------------------- /NRDS/checkGPU.py: -------------------------------------------------------------------------------- 1 | # check GPU status 2 | from pynvml import * 3 | from time import sleep 4 | from datetime import datetime 5 | 6 | 7 | def gpu_memory_usage(is_print=True): 8 | try: 9 | nvmlInit() 10 | # version = nvmlSystemGetDriverVersion() 11 | deviceCount = nvmlDeviceGetCount() 12 | GPU = {} 13 | for i in range(deviceCount): 14 | GPU[i] = {} 15 | handle = nvmlDeviceGetHandleByIndex(i) 16 | info = nvmlDeviceGetMemoryInfo(handle) 17 | GPU[i]['total'] = info.total / 1024.0 / 1024.0 / 1024.0 18 | GPU[i]['free'] = info.free / 1024.0 / 1024.0 / 1024.0 19 | if is_print: 20 | print("\nGPU #%d Memory Usage:" 21 | "\n\tTotal:\t%4.2fGB\n\tFree:\t%4.2fGB" % 22 | (i, GPU[i]['total'], GPU[i]['free'])) 23 | print datetime.now() 24 | nvmlShutdown() 25 | return GPU 26 | except: 27 | print "Fail to check GPU status!" 28 | exit(0) 29 | 30 | 31 | def auto_queue(gpu_memory_require=3.2, interval=1, schedule=None): 32 | # input arg: schedule = datetime(year, month, day, hour, minute, second) 33 | if schedule is None: 34 | schedule = datetime.today() 35 | else: 36 | print '\nScheduled time: ', schedule 37 | 38 | # wait until the scheduled time 39 | now = datetime.today() 40 | while now.year < schedule.year or now.month < schedule.month or now.day < schedule.day or \ 41 | now.hour < schedule.hour or now.minute < schedule.minute or now.second < schedule.second: 42 | now = datetime.today() 43 | sleep(interval) 44 | 45 | gpu_stat = gpu_memory_usage() 46 | if gpu_stat[0]['total'] < gpu_memory_require: 47 | print 'Memory requirement is larger than GPU total memory' 48 | exit(1) 49 | while gpu_stat[0]['free'] < gpu_memory_require: 50 | sleep(interval) # second 51 | gpu_stat = gpu_memory_usage() 52 | return gpu_stat 53 | 54 | 55 | def set_memory_usage(usage=12.0, allow_growth=True): 56 | auto_queue(gpu_memory_require=usage) 57 | try: 58 | import tensorflow as tf 59 | assert type(usage) is int or float 60 | assert usage >= 0 61 | 62 | config = tf.ConfigProto() 63 | gpu_stat = gpu_memory_usage() 64 | total_memory = gpu_stat[0]['total'] 65 | if usage > total_memory: 66 | usage_percentage = 1.0 67 | else: 68 | usage_percentage = usage / total_memory 69 | config.gpu_options.allow_growth = allow_growth 70 | config.gpu_options.per_process_gpu_memory_fraction = usage_percentage 71 | return config 72 | except: 73 | print 'Failed to set memory usage!' 74 | return None 75 | 76 | 77 | if __name__ == '__main__': 78 | gpu_memory_usage() 79 | -------------------------------------------------------------------------------- /NRDS/main.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from NRDS import NRDS 3 | import checkGPU 4 | import numpy as np 5 | 6 | 7 | flags = tf.app.flags 8 | flags.DEFINE_integer(flag_name='epoch', default_value=100, docstring='number of epochs') 9 | flags.DEFINE_integer(flag_name='batch_size', default_value=1, docstring='batch size') 10 | flags.DEFINE_integer(flag_name='is_bn', default_value=0, docstring='enable batch normalization') 11 | flags.DEFINE_string(flag_name='save_dir', default_value='save', docstring='dir for saving training results') 12 | flags.DEFINE_integer(flag_name='is_schedule', default_value=0, docstring='scheduled running') 13 | flags.DEFINE_integer(flag_name='day', default_value=1, docstring='date') 14 | flags.DEFINE_integer(flag_name='hr', default_value=0, docstring='hour') 15 | flags.DEFINE_integer(flag_name='min', default_value=0, docstring='minute') 16 | 17 | FLAGS = flags.FLAGS 18 | 19 | gpu_memory_require = 5.0 20 | 21 | 22 | def main(_): 23 | from datetime import datetime 24 | if FLAGS.is_schedule: 25 | today = datetime.today() 26 | checkGPU.auto_queue( 27 | gpu_memory_require=gpu_memory_require, 28 | interval=1, 29 | schedule=datetime(year=today.year, month=today.month, day=FLAGS.day, hour=FLAGS.hr, minute=FLAGS.min) 30 | ) 31 | config = checkGPU.set_memory_usage( 32 | usage=gpu_memory_require, 33 | allow_growth=True 34 | ) 35 | 36 | # print settings 37 | import pprint 38 | pprint.pprint(FLAGS.__flags) 39 | 40 | with tf.Session(config=config) as session: 41 | model = NRDS( 42 | session, # TensorFlow session 43 | save_dir=FLAGS.save_dir, # path to save checkpoints, samples, and summary 44 | size_batch=FLAGS.batch_size, 45 | enable_bn=FLAGS.is_bn, 46 | real_files_dir='./results/real', 47 | fake_files_dirs=[ 48 | './results/adv1', 49 | './results/adv1e2', 50 | './results/adv1e3', 51 | './results/adv1e4', 52 | ] 53 | ) 54 | model.train( 55 | num_epochs=FLAGS.epoch, # number of epochs 56 | ) 57 | 58 | 59 | if __name__ == '__main__': 60 | if 0: 61 | print 'Run on CPU' 62 | with tf.device("/cpu:0"): 63 | gpu_memory_require = 0.0 64 | tf.app.run() 65 | 66 | tf.app.run() 67 | 68 | -------------------------------------------------------------------------------- /NRDS/ops.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import tensorflow as tf 3 | import numpy as np 4 | from scipy.misc import imread, imresize, imsave 5 | 6 | 7 | class batch_norm(object): 8 | def __init__(self, epsilon=1e-5, momentum=0.9, name="batch_norm"): 9 | with tf.variable_scope(name): 10 | self.epsilon = epsilon 11 | self.momentum = momentum 12 | self.name = name 13 | 14 | def __call__(self, x, train=True): 15 | return tf.contrib.layers.batch_norm(x, decay=self.momentum, updates_collections=None, epsilon=self.epsilon, 16 | scale=True, scope=self.name) 17 | 18 | 19 | def conv2d(input_map, num_output_channels, size_kernel=5, stride=2, name='conv2d'): 20 | with tf.variable_scope(name): 21 | stddev = np.sqrt(2.0 / (np.sqrt(input_map.get_shape()[-1].value * num_output_channels) * size_kernel ** 2)) 22 | stddev = 0.02 23 | kernel = tf.get_variable( 24 | name='w', 25 | shape=[size_kernel, size_kernel, input_map.get_shape()[-1], num_output_channels], 26 | dtype=tf.float32, 27 | initializer=tf.truncated_normal_initializer(stddev=stddev) 28 | ) 29 | biases = tf.get_variable( 30 | name='b', 31 | shape=[num_output_channels], 32 | dtype=tf.float32, 33 | initializer=tf.constant_initializer(0.0) 34 | ) 35 | conv = tf.nn.conv2d(input_map, kernel, strides=[1, stride, stride, 1], padding='SAME') 36 | return tf.nn.bias_add(conv, biases) 37 | 38 | 39 | def fc(input_vector, num_output_length, name='fc'): 40 | with tf.variable_scope(name): 41 | stddev = np.sqrt(1.0 / (np.sqrt(input_vector.get_shape()[-1].value * num_output_length))) 42 | stddev = 0.02 43 | w = tf.get_variable( 44 | name='w', 45 | shape=[input_vector.get_shape()[1], num_output_length], 46 | dtype=tf.float32, 47 | initializer=tf.random_normal_initializer(stddev=stddev) 48 | ) 49 | b = tf.get_variable( 50 | name='b', 51 | shape=[num_output_length], 52 | dtype=tf.float32, 53 | initializer=tf.constant_initializer(0.0) 54 | ) 55 | return tf.matmul(input_vector, w) + b 56 | 57 | 58 | def deconv2d(input_map, output_shape, size_kernel=5, stride=2, stddev=0.02, name='deconv2d'): 59 | with tf.variable_scope(name): 60 | stddev = np.sqrt(1.0 / (np.sqrt(input_map.get_shape()[-1].value * output_shape[-1]) * size_kernel ** 2)) 61 | stddev = 0.02 62 | # filter : [height, width, output_channels, in_channels] 63 | kernel = tf.get_variable( 64 | name='w', 65 | shape=[size_kernel, size_kernel, output_shape[-1], input_map.get_shape()[-1]], 66 | dtype=tf.float32, 67 | initializer=tf.random_normal_initializer(stddev=stddev) 68 | ) 69 | biases = tf.get_variable( 70 | name='b', 71 | shape=[output_shape[-1]], 72 | dtype=tf.float32, 73 | initializer=tf.constant_initializer(0.0) 74 | ) 75 | deconv = tf.nn.conv2d_transpose(input_map, kernel, strides=[1, stride, stride, 1], output_shape=output_shape) 76 | return tf.nn.bias_add(deconv, biases) 77 | 78 | 79 | def lrelu(logits, leak=0.2): 80 | return tf.maximum(logits, leak*logits) 81 | 82 | 83 | def concat_label(x, label, duplicate=1): 84 | x_shape = x.get_shape().as_list() 85 | if duplicate < 1: 86 | return x 87 | # duplicate the label to enhance its effect, does it really affect the result? 88 | label = tf.tile(label, [1, duplicate]) 89 | label_shape = label.get_shape().as_list() 90 | if len(x_shape) == 2: 91 | return tf.concat(1, [x, label]) 92 | elif len(x_shape) == 4: 93 | label = tf.reshape(label, [x_shape[0], 1, 1, label_shape[-1]]) 94 | return tf.concat(3, [x, label*tf.ones([x_shape[0], x_shape[1], x_shape[2], label_shape[-1]])]) 95 | 96 | 97 | def load_image( 98 | image_path, # path of a image 99 | image_size=64, # expected size of the image 100 | image_value_range=(-1, 1), # expected pixel value range of the image 101 | is_gray=False, # gray scale or color image 102 | ): 103 | if is_gray: 104 | image = imread(image_path, flatten=True).astype(np.float32) 105 | else: 106 | image = imread(image_path).astype(np.float32) 107 | image = imresize(image, [image_size, image_size]) 108 | image = image.astype(np.float32) * (image_value_range[-1] - image_value_range[0]) / 255.0 + image_value_range[0] 109 | return image 110 | 111 | 112 | def save_batch_images( 113 | batch_images, # a batch of images 114 | save_path, # path to save the images 115 | image_value_range=(-1,1), # value range of the input batch images 116 | size_frame=None # size of the image matrix, number of images in each row and column 117 | ): 118 | # transform the pixcel value to 0~1 119 | images = (batch_images - image_value_range[0]) / (image_value_range[-1] - image_value_range[0]) 120 | if size_frame is None: 121 | auto_size = int(np.ceil(np.sqrt(images.shape[0]))) 122 | size_frame = [auto_size, auto_size] 123 | img_h, img_w = batch_images.shape[1], batch_images.shape[2] 124 | frame = np.zeros([img_h * size_frame[0], img_w * size_frame[1], 3]) 125 | for ind, image in enumerate(images): 126 | ind_col = ind % size_frame[1] 127 | ind_row = ind // size_frame[1] 128 | frame[(ind_row * img_h):(ind_row * img_h + img_h), (ind_col * img_w):(ind_col * img_w + img_w), :] = image 129 | imsave(save_path, np.clip(frame, 0.0, 1.0)) # imsave(save_path, frame) 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png101.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png101.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png1010.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png1010.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png102.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png102.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png103.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png103.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png104.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png104.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png105.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png105.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png106.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png106.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png107.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png107.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png108.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png108.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png109.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png109.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png11.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png11.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png110.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png110.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png12.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png12.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png13.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png13.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png14.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png14.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png15.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png15.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png16.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png16.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png17.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png17.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png18.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png18.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png19.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png19.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png21.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png21.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png210.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png210.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png22.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png22.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png23.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png23.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png24.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png24.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png25.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png25.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png26.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png26.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png27.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png27.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png28.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png28.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png29.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png29.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png31.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png31.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png310.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png310.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png32.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png32.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png33.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png33.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png34.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png34.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png35.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png35.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png36.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png36.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png37.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png37.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png38.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png38.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png39.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png39.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png41.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png41.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png410.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png410.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png42.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png42.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png43.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png43.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png44.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png44.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png45.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png45.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png46.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png46.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png47.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png47.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png48.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png48.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png49.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png49.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png51.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png51.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png510.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png510.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png52.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png52.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png53.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png53.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png54.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png54.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png55.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png55.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png56.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png56.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png57.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png57.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png58.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png58.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png59.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png59.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png61.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png61.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png610.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png610.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png62.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png62.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png63.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png63.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png64.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png64.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png65.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png65.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png66.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png66.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png67.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png67.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png68.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png68.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png69.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png69.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png71.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png71.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png710.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png710.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png72.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png72.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png73.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png73.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png74.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png74.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png75.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png75.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png76.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png76.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png77.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png77.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png78.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png78.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png79.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png79.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png81.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png81.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png810.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png810.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png82.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png82.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png83.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png83.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png84.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png84.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png85.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png85.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png86.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png86.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png87.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png87.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png88.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png88.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png89.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png89.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png91.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png91.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png910.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png910.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png92.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png92.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png93.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png93.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png94.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png94.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png95.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png95.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png96.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png96.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png97.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png97.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png98.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png98.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1/50.png99.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1/50.png99.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e2/50.png11.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e2/50.png11.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e2/50.png12.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e2/50.png12.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e2/50.png13.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e2/50.png13.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e2/50.png14.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e2/50.png14.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e2/50.png15.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e2/50.png15.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e2/50.png16.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e2/50.png16.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e2/50.png17.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e2/50.png17.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e2/50.png18.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e2/50.png18.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e2/50.png19.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e2/50.png19.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e2/50.png21.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e2/50.png21.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e3/50.png11.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e3/50.png11.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e3/50.png12.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e3/50.png12.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e3/50.png13.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e3/50.png13.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e3/50.png14.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e3/50.png14.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e3/50.png15.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e3/50.png15.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e3/50.png16.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e3/50.png16.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e3/50.png17.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e3/50.png17.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e3/50.png18.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e3/50.png18.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e3/50.png19.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e3/50.png19.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e3/50.png21.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e3/50.png21.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e4/50.png11.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e4/50.png11.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e4/50.png12.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e4/50.png12.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e4/50.png13.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e4/50.png13.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e4/50.png14.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e4/50.png14.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e4/50.png15.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e4/50.png15.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e4/50.png16.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e4/50.png16.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e4/50.png17.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e4/50.png17.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e4/50.png18.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e4/50.png18.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e4/50.png19.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e4/50.png19.jpg -------------------------------------------------------------------------------- /NRDS/results/adv1e4/50.png21.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/adv1e4/50.png21.jpg -------------------------------------------------------------------------------- /NRDS/results/real/input.png1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/real/input.png1.jpg -------------------------------------------------------------------------------- /NRDS/results/real/input.png10.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/real/input.png10.jpg -------------------------------------------------------------------------------- /NRDS/results/real/input.png2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/real/input.png2.jpg -------------------------------------------------------------------------------- /NRDS/results/real/input.png3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/real/input.png3.jpg -------------------------------------------------------------------------------- /NRDS/results/real/input.png4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/real/input.png4.jpg -------------------------------------------------------------------------------- /NRDS/results/real/input.png5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/real/input.png5.jpg -------------------------------------------------------------------------------- /NRDS/results/real/input.png6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/real/input.png6.jpg -------------------------------------------------------------------------------- /NRDS/results/real/input.png7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/real/input.png7.jpg -------------------------------------------------------------------------------- /NRDS/results/real/input.png8.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/real/input.png8.jpg -------------------------------------------------------------------------------- /NRDS/results/real/input.png9.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/results/real/input.png9.jpg -------------------------------------------------------------------------------- /NRDS/results/t: -------------------------------------------------------------------------------- 1 | a 2 | -------------------------------------------------------------------------------- /NRDS/save/mat/001_010.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/save/mat/001_010.mat -------------------------------------------------------------------------------- /NRDS/save/mat/002_010.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/save/mat/002_010.mat -------------------------------------------------------------------------------- /NRDS/save/mat/003_010.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/save/mat/003_010.mat -------------------------------------------------------------------------------- /NRDS/save/mat/004_010.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/save/mat/004_010.mat -------------------------------------------------------------------------------- /NRDS/save/mat/005_010.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/save/mat/005_010.mat -------------------------------------------------------------------------------- /NRDS/save/mat/006_010.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/save/mat/006_010.mat -------------------------------------------------------------------------------- /NRDS/save/mat/007_010.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/save/mat/007_010.mat -------------------------------------------------------------------------------- /NRDS/save/mat/008_010.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/save/mat/008_010.mat -------------------------------------------------------------------------------- /NRDS/save/mat/009_010.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/save/mat/009_010.mat -------------------------------------------------------------------------------- /NRDS/save/mat/010_010.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/save/mat/010_010.mat -------------------------------------------------------------------------------- /NRDS/save/mat/011_010.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/save/mat/011_010.mat -------------------------------------------------------------------------------- /NRDS/save/mat/012_010.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/save/mat/012_010.mat -------------------------------------------------------------------------------- /NRDS/save/mat/013_010.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/save/mat/013_010.mat -------------------------------------------------------------------------------- /NRDS/save/mat/014_010.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/save/mat/014_010.mat -------------------------------------------------------------------------------- /NRDS/save/mat/015_010.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/save/mat/015_010.mat -------------------------------------------------------------------------------- /NRDS/save/mat/016_010.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/save/mat/016_010.mat -------------------------------------------------------------------------------- /NRDS/save/mat/017_010.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/save/mat/017_010.mat -------------------------------------------------------------------------------- /NRDS/save/mat/018_010.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/save/mat/018_010.mat -------------------------------------------------------------------------------- /NRDS/save/mat/019_010.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/save/mat/019_010.mat -------------------------------------------------------------------------------- /NRDS/save/mat/020_010.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ZZUTK/Decoupled-Learning-Conditional-GAN/46ff2560a2e23c273f68fdb1a529d7e883e07d35/NRDS/save/mat/020_010.mat -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Decoupled-Learning-for-Conditional-Adversarial-Networks 2 | [Decoupled Learning for Conditional Adversarial Networks](https://arxiv.org/pdf/1801.06790.pdf) 3 | 4 | ## Pre-requisites 5 | * Python 2.7x 6 | * TensorFlow 1.x 7 | * Matlab 8 | * pynvml 9 | 10 | ## Adaptation on [Pix2Pix](https://github.com/phillipi/pix2pix) 11 | Training 12 | ``` 13 | $ cd Adapt_Pix2Pix 14 | $ python main.py 15 | ``` 16 | 17 | ## Adaptation on [CAAE](https://github.com/ZZUTK/Face-Aging-CAAE) 18 | Training 19 | ``` 20 | $ cd Adapt_CAAE 21 | $ python main.py 22 | ``` 23 | 24 | ## NRDS demo 25 | ``` 26 | $ cd NRDS 27 | $ python main.py 28 | ``` 29 | 30 | Plot curves in Matlab: 31 | ``` 32 | >> NRDS 33 | ``` 34 | 35 | ## Citation 36 | ``` 37 | @inproceedings{zhang2018decoupled, 38 | title={Decoupled learning for conditional adversarial networks}, 39 | author={Zhang, Zhifei and Song, Yang and Qi, Hairong}, 40 | booktitle={2018 IEEE Winter Conference on Applications of Computer Vision (WACV)}, 41 | pages={700--708}, 42 | year={2018}, 43 | organization={IEEE} 44 | } 45 | ``` 46 | --------------------------------------------------------------------------------