├── results ├── CelebA.png └── LSUN_bed.png ├── images ├── DCGAN_Generator.PNG └── DCGAN_Discriminator.PNG ├── db ├── celebA │ └── README.md └── lsun │ ├── README.md │ └── lmdb2img.py ├── dbread.py ├── README.md ├── train.py └── model.py /results/CelebA.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HyeongminLEE/Tensorflow_DCGAN/HEAD/results/CelebA.png -------------------------------------------------------------------------------- /results/LSUN_bed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HyeongminLEE/Tensorflow_DCGAN/HEAD/results/LSUN_bed.png -------------------------------------------------------------------------------- /images/DCGAN_Generator.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HyeongminLEE/Tensorflow_DCGAN/HEAD/images/DCGAN_Generator.PNG -------------------------------------------------------------------------------- /images/DCGAN_Discriminator.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/HyeongminLEE/Tensorflow_DCGAN/HEAD/images/DCGAN_Discriminator.PNG -------------------------------------------------------------------------------- /db/celebA/README.md: -------------------------------------------------------------------------------- 1 | # CelebA DB (Cropped Face, 156253 Samples) 2 | 3 | - Data Download: [link](https://www.dropbox.com/s/e0ig4nf1v94hyj8/CelebA_128crop_FD.zip?dl=0) 4 | 5 | - After Download, unzip 6 | 7 | - Create Filelist 8 | 9 | ```bash 10 | cd 11 | dir /b /s > filelist.txt 12 | ``` 13 | - You can set the filelist name other than filelist.txt if you want. 14 | - In filelist.txt, delete line: your_database_dir/filelist.txt -------------------------------------------------------------------------------- /db/lsun/README.md: -------------------------------------------------------------------------------- 1 | # LSUN Database (Room images) 2 | 3 | - Data Download: [link](https://github.com/fyu/lsun) 4 | 5 | - You can get lmdb data 6 | 7 | - lmdb to png conversion 8 | 9 | ```bash 10 | activate python2 11 | python lmdb2img.py convert --out_dir 12 | ``` 13 | 14 | - Create Filelist 15 | 16 | ```bash 17 | cd 18 | dir /b /s > filelist.txt 19 | ``` 20 | - You can set the filelist name other than filelist.txt if you want. 21 | - In filelist.txt, delete line: your_database_dir/filelist.txt -------------------------------------------------------------------------------- /dbread.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy import misc 3 | 4 | 5 | class DBreader: 6 | def __init__(self, filename, batch_size, resize=0, labeled=True, color=True): 7 | self.color = color 8 | self.labeled = labeled 9 | 10 | self.batch_size = batch_size 11 | # filename: Directory of the filelist.txt(Database list) 12 | with open(filename) as f: 13 | tmp_filelist = f.readlines() 14 | tmp_filelist = [x.strip() for x in tmp_filelist] 15 | tmp_filelist = np.array(tmp_filelist) 16 | 17 | self.file_len = len(tmp_filelist) 18 | 19 | self.filelist = [] 20 | self.labellist = [] 21 | if self.labeled: 22 | for i in range(self.file_len): 23 | splited = (tmp_filelist[i]).split(" ") 24 | self.filelist.append(splited[0]) 25 | self.labellist.append(splited[1]) 26 | else: 27 | self.filelist = tmp_filelist 28 | 29 | self.batch_idx = 0 30 | self.total_batch = int(self.file_len / batch_size) 31 | self.idx_shuffled = np.arange(self.file_len) 32 | np.random.shuffle(self.idx_shuffled) 33 | self.resize = resize 34 | 35 | self.filelist = np.array(self.filelist) 36 | self.labellist = np.array(self.labellist) 37 | 38 | # Method for get the next batch 39 | def next_batch(self): 40 | if self.batch_idx == self.total_batch: 41 | np.random.shuffle(self.idx_shuffled) 42 | self.batch_idx = 0 43 | 44 | batch = [] 45 | idx_set = self.idx_shuffled[self.batch_idx * self.batch_size:(self.batch_idx + 1) * self.batch_size] 46 | batch_filelist = self.filelist[idx_set] 47 | 48 | for i in range(self.batch_size): 49 | im = misc.imread(batch_filelist[i]) 50 | if self.resize != 0: 51 | im = misc.imresize(im, self.resize) 52 | if self.color: 53 | if im.shape[2] > 3: 54 | im = im[:, :, 0:3] 55 | batch.append(im) 56 | 57 | if self.labeled: 58 | label = self.labellist[idx_set] 59 | self.batch_idx += 1 60 | return np.array(batch).astype(np.float32), np.array(label).astype(np.int32) 61 | 62 | self.batch_idx += 1 63 | return np.array(batch).astype(np.float32) 64 | -------------------------------------------------------------------------------- /db/lsun/lmdb2img.py: -------------------------------------------------------------------------------- 1 | # This code is modified from 'https://github.com/fyu/lsun/blob/master/data.py' 2 | # !/usr/bin/env python2.7 3 | 4 | from __future__ import print_function 5 | import argparse 6 | import cv2 7 | import lmdb 8 | import numpy 9 | import os 10 | from os.path import exists, join 11 | 12 | __author__ = 'Fisher Yu' 13 | __email__ = 'fy@cs.princeton.edu' 14 | __license__ = 'MIT' 15 | 16 | 17 | def convert(db_path): 18 | print('Converting', db_path) 19 | env = lmdb.open(db_path, map_size=1099511627776, 20 | max_readers=100, readonly=True) 21 | idx = 0 22 | with env.begin(write=False) as txn: 23 | cursor = txn.cursor() 24 | for key, val in cursor: 25 | print('[', str(idx).zfill(7), '] ', 'Current key:', key) 26 | if idx > 607315: 27 | break 28 | img = cv2.imdecode(numpy.fromstring(val, dtype=numpy.uint8), 1) 29 | filedir = './data/Img_' + str(idx).zfill(7) + '.png' 30 | cv2.imwrite(filedir, img) 31 | idx += 1 32 | 33 | 34 | def main(): 35 | parser = argparse.ArgumentParser() 36 | parser.add_argument('command', nargs='?', type=str, 37 | choices=['view', 'export'], 38 | help='view: view the images in the lmdb database ' 39 | 'interactively.\n' 40 | 'export: Export the images in the lmdb databases ' 41 | 'to a folder. The images are grouped in subfolders' 42 | ' determinted by the prefiex of image key.') 43 | parser.add_argument('lmdb_path', nargs='+', type=str, 44 | help='The path to the lmdb database folder. ' 45 | 'Support multiple database paths.') 46 | parser.add_argument('--out_dir', type=str, default='') 47 | parser.add_argument('--flat', action='store_true', 48 | help='If enabled, the images are imported into output ' 49 | 'directory directly instead of hierarchical ' 50 | 'directories.') 51 | args = parser.parse_args() 52 | 53 | command = args.command 54 | lmdb_paths = args.lmdb_path 55 | 56 | for lmdb_path in lmdb_paths: 57 | if command == 'convert': 58 | convert(lmdb_path) 59 | 60 | 61 | if __name__ == '__main__': 62 | main() 63 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DCGAN in Tensorflow 2 | 3 | Basic Implementation (Study friendly) of DCGAN in Tensorflow 4 | 5 | [[Paper](https://arxiv.org/pdf/1511.06434.pdf) | [Post(in Korean)](https://hyeongminlee.github.io/post/gan003_dcgan/) | [Pytorch Version](https://github.com/taeoh-kim/Pytorch_DCGAN)] 6 | 7 | - GAN: [[Pytorch](https://github.com/taeoh-kim/GANin50lines)][[Tensorflow](https://github.com/HyeongminLEE/GANin50lines)] 8 | - DCGAN: [[Pytorch](https://github.com/taeoh-kim/Pytorch_DCGAN)][[Tensorflow](https://github.com/HyeongminLEE/Tensorflow_DCGAN)] 9 | - InfoGAN: [[Pytorch](https://github.com/taeoh-kim/Pytorch_Pix2Pix)][Tensorflow] 10 | - Pix2Pix: [[Pytorch](https://github.com/taeoh-kim/Pytorch_Pix2Pix)][[Tensorflow](https://github.com/HyeongminLEE/Tensorflow_Pix2Pix)] 11 | - DiscoGAN: [[Pytorch](https://github.com/taeoh-kim/Pytorch_DiscoGAN)][[Tensorflow](https://github.com/HyeongminLEE/Tensorflow_DiscoGAN)] 12 | 13 | ## 1. Environments 14 | 15 | - Windows 10 16 | - Python 3.5.3 (Anaconda) 17 | - Tensorflow 1.4.0 18 | - Numpy 1.13.1 19 | - lmdb (pip install lmdb): for LSUN Dataset 20 | - cv2 (conda install -c conda-forge opencv): for LSUN Dataset 21 | 22 | 23 | ## 2. Networks and Parameters 24 | 25 | ### 2.1 Hyper-Parameters 26 | 27 | - Image Size = 64x64 (Both in CelebA and LSUN-Bedroom) 28 | - Batch Size = 128 (~32 is OK) 29 | - Learning Rate = 0.0002 30 | - Adam_beta1 = 0.5 31 | - z_dim = 100 32 | - Epoch = 5 in CelebA is Enough, 1 in LSUN is Enough. Sometimes it can be diverge. 33 | 34 | ### 2.2 Generator Networks (network.py) 35 | 36 |

37 | 38 | ### 2.3 Discriminator Networks (network.py) 39 | 40 |

41 | 42 | 43 | ## 3. Run (Train) 44 | 45 | You can modify hyper-parameter. Look at the parsing part of the code. 46 | 47 | ### 3. 1 CelebA DB (Cropped Face, 156253 Samples) 48 | 49 | - Database Setting: [link](https://github.com/HyeongminLEE/Tensorflow_DCGAN/tree/master/db/celebA) 50 | 51 | - Train & Test 52 | 53 | ```bash 54 | python train.py --filelist --out_dir 55 | ``` 56 | 57 | - Test results will be saved in 'output_directory' 58 | 59 | ### 3. 2 LSUN-Bedroom DB (3033042 Samples) 60 | 61 | - Database Setting: [link](https://github.com/HyeongminLEE/Tensorflow_DCGAN/tree/master/db/lsun) 62 | 63 | - Train & Test 64 | 65 | ```bash 66 | python train.py --filelist --out_dir 67 | ``` 68 | 69 | - Test results will be saved in 'output_directory' 70 | 71 | 72 | ## 4. Results 73 | 74 | DCGAN with CelebA (6 Epochs) 75 | 76 |

77 | 78 | DCGAN with LSUN (1 Epochs) 79 | 80 |

81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import tensorflow as tf 4 | import numpy as np 5 | import dbread as db 6 | from model import DCGAN 7 | import scipy.misc 8 | 9 | parser = argparse.ArgumentParser(description='Easy Implementation of DCGAN') 10 | 11 | # parameters 12 | parser.add_argument('--filelist', type=str, default='filelist.txt') 13 | parser.add_argument('--out_dir', type=str, default='./output') 14 | parser.add_argument('--epochs', type=int, default=10) 15 | parser.add_argument('--batch_size', type=int, default=32) 16 | 17 | 18 | # Function for save the generated result 19 | def save_visualization(X, nh_nw, save_path='./vis/sample.jpg'): 20 | nh, nw = nh_nw 21 | h, w = X.shape[1], X.shape[2] 22 | img = np.zeros((h * nh, w * nw, 3)) 23 | 24 | for n, x in enumerate(X): 25 | j = int(n / nw) 26 | i = int(n % nw) 27 | img[j * h:j * h + h, i * w:i * w + w, :] = x 28 | 29 | scipy.misc.imsave(save_path, img) 30 | 31 | 32 | def main(): 33 | args = parser.parse_args() 34 | filelist_dir = args.filelist 35 | output_dir = args.out_dir 36 | 37 | if not os.path.exists(output_dir): 38 | os.makedirs(output_dir) 39 | 40 | total_epoch = args.epochs 41 | batch_size = args.batch_size 42 | n_noise = 100 43 | 44 | database = db.DBreader(filelist_dir, batch_size, resize=[64, 64, 3], labeled=False) 45 | 46 | sess = tf.Session() 47 | model = DCGAN(sess, batch_size) 48 | sess.run(tf.global_variables_initializer()) 49 | 50 | total_batch = database.total_batch 51 | 52 | visualization_num = 14 * 14 53 | noise_test = np.random.normal(size=(visualization_num, n_noise)) 54 | 55 | loss_D = 0.0 56 | loss_G = 0.0 57 | for epoch in range(total_epoch): 58 | for step in range(total_batch): 59 | batch_xs = database.next_batch() # Get the next batch 60 | batch_xs = batch_xs * (2.0 / 255.0) - 1 # normalization 61 | noise_g = np.random.normal(size=(batch_size, n_noise)) 62 | noise_d = np.random.normal(size=(batch_size, n_noise)) 63 | 64 | # Train Generator twice while training Discriminator once for first 200 steps 65 | if epoch == 0 and step < 200: 66 | adventage = 2 67 | else: 68 | adventage = 1 69 | 70 | if step % adventage == 0: 71 | loss_D = model.train_discrim(batch_xs, noise_d) # Train Discriminator and get the loss value 72 | loss_G = model.train_gen(noise_g) # Train Generator and get the loss value 73 | 74 | print('Epoch: [', epoch, '/', total_epoch, '], ', 'Step: [', step, '/', total_batch, '], D_loss: ', 75 | loss_D, ', G_loss: ', loss_G) 76 | 77 | if step == 0 or (step + 1) % 10 == 0: 78 | generated_samples = model.sample_generator(noise_test, batch_size=visualization_num) 79 | savepath = output_dir + '/output_' + 'EP' + str(epoch).zfill(3) + "_Batch" + str(step).zfill(6) + '.jpg' 80 | save_visualization(generated_samples, (14, 14), save_path=savepath) 81 | 82 | 83 | if __name__ == "__main__": 84 | main() 85 | -------------------------------------------------------------------------------- /model.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | 4 | 5 | # Class for batch normalization node 6 | class batch_norm(object): 7 | def __init__(self, epsilon=1e-5, momentum=0.9, name="batch_norm"): 8 | with tf.variable_scope(name): 9 | self.epsilon = epsilon 10 | self.momentum = momentum 11 | self.name = name 12 | 13 | def __call__(self, x, train=True): 14 | return tf.contrib.layers.batch_norm(x, 15 | decay=self.momentum, 16 | updates_collections=None, 17 | epsilon=self.epsilon, 18 | scale=True, 19 | is_training=train, 20 | scope=self.name, 21 | reuse=tf.AUTO_REUSE # if tensorflow vesrion < 1.4, delete this line 22 | ) 23 | 24 | 25 | # leaky relu function 26 | def lrelu(X, leak=0.2): 27 | f1 = 0.5 * (1 + leak) 28 | f2 = 0.5 * (1 - leak) 29 | return f1 * X + f2 * tf.abs(X) 30 | 31 | 32 | class DCGAN: 33 | # Network Parameters 34 | def __init__(self, sess, batch_size): 35 | self.learning_rate = 0.0002 36 | 37 | self.sess = sess 38 | 39 | self.batch_size = batch_size 40 | 41 | self.image_shape = [64, 64, 3] 42 | self.dim_z = 100 43 | self.dim_W1 = 1024 44 | self.dim_W2 = 512 45 | self.dim_W3 = 256 46 | self.dim_W4 = 128 47 | self.dim_W5 = 3 48 | 49 | self.G_W1 = tf.Variable(tf.truncated_normal([4, 4, self.dim_W1, self.dim_z], stddev=0.02), name="G_W1") 50 | self.G_bn1 = batch_norm(name="G_bn1") 51 | 52 | self.G_W2 = tf.Variable(tf.truncated_normal([4, 4, self.dim_W2, self.dim_W1], stddev=0.02), name='G_W2') 53 | self.G_bn2 = batch_norm(name="G_bn2") 54 | 55 | self.G_W3 = tf.Variable(tf.truncated_normal([4, 4, self.dim_W3, self.dim_W2], stddev=0.02), name='G_W3') 56 | self.G_bn3 = batch_norm(name="G_bn3") 57 | 58 | self.G_W4 = tf.Variable(tf.truncated_normal([4, 4, self.dim_W4, self.dim_W3], stddev=0.02), name='G_W4') 59 | self.G_bn4 = batch_norm(name="G_bn4") 60 | 61 | self.G_W5 = tf.Variable(tf.truncated_normal([4, 4, self.dim_W5, self.dim_W4], stddev=0.02), name='G_W5') 62 | 63 | self.D_W1 = tf.Variable(tf.truncated_normal([4, 4, self.dim_W5, self.dim_W4], stddev=0.02), name='D_W1') 64 | 65 | self.D_W2 = tf.Variable(tf.truncated_normal([4, 4, self.dim_W4, self.dim_W3], stddev=0.02), name='D_W2') 66 | self.D_bn2 = batch_norm(name="D_bn2") 67 | 68 | self.D_W3 = tf.Variable(tf.truncated_normal([4, 4, self.dim_W3, self.dim_W2], stddev=0.02), name='D_W3') 69 | self.D_bn3 = batch_norm(name="D_bn3") 70 | 71 | self.D_W4 = tf.Variable(tf.truncated_normal([4, 4, self.dim_W2, self.dim_W1], stddev=0.02), name='D_W4') 72 | self.D_bn4 = batch_norm(name="D_bn4") 73 | 74 | self.D_W5 = tf.Variable(tf.truncated_normal([4, 4, self.dim_W1, 1], stddev=0.02), name='D_W5') 75 | 76 | self.gen_params = [ 77 | self.G_W1, 78 | self.G_W2, 79 | self.G_W3, 80 | self.G_W4, 81 | self.G_W5 82 | ] 83 | 84 | self.discrim_params = [ 85 | self.D_W1, 86 | self.D_W2, 87 | self.D_W3, 88 | self.D_W4, 89 | self.D_W5 90 | ] 91 | 92 | self._build_model() 93 | 94 | # Build the Network 95 | def _build_model(self): 96 | self.Z = tf.placeholder(tf.float32, [self.batch_size, self.dim_z]) 97 | 98 | self.image_real = tf.placeholder(tf.float32, [self.batch_size] + self.image_shape) 99 | image_gen = self.generate(self.Z) 100 | 101 | d_real = self.discriminate(self.image_real) 102 | d_gen = self.discriminate(image_gen) 103 | 104 | self.discrim_cost = -tf.reduce_mean(tf.log(d_real) + tf.log(1 - d_gen)) 105 | 106 | self.gen_cost = -tf.reduce_mean(tf.log(d_gen)) 107 | 108 | self.train_op_discrim = tf.train.AdamOptimizer(self.learning_rate, beta1=0.5).minimize(self.discrim_cost, 109 | var_list=self.discrim_params) 110 | self.train_op_gen = tf.train.AdamOptimizer(self.learning_rate, beta1=0.5).minimize(self.gen_cost, 111 | var_list=self.gen_params) 112 | 113 | def generate(self, Z): 114 | h1 = tf.reshape(Z, [self.batch_size, 1, 1, self.dim_z]) 115 | h1 = tf.nn.conv2d_transpose(h1, self.G_W1, output_shape=[self.batch_size, 4, 4, self.dim_W1], 116 | strides=[1, 4, 4, 1]) 117 | h1 = tf.nn.relu(self.G_bn1(h1)) 118 | 119 | h2 = tf.nn.conv2d_transpose(h1, self.G_W2, output_shape=[self.batch_size, 8, 8, self.dim_W2], 120 | strides=[1, 2, 2, 1]) 121 | h2 = tf.nn.relu(self.G_bn2(h2)) 122 | 123 | h3 = tf.nn.conv2d_transpose(h2, self.G_W3, output_shape=[self.batch_size, 16, 16, self.dim_W3], 124 | strides=[1, 2, 2, 1]) 125 | h3 = tf.nn.relu(self.G_bn3(h3)) 126 | 127 | h4 = tf.nn.conv2d_transpose(h3, self.G_W4, output_shape=[self.batch_size, 32, 32, self.dim_W4], 128 | strides=[1, 2, 2, 1]) 129 | h4 = tf.nn.relu(self.G_bn4(h4)) 130 | 131 | h5 = tf.nn.conv2d_transpose(h4, self.G_W5, output_shape=[self.batch_size, 64, 64, self.dim_W5], 132 | strides=[1, 2, 2, 1]) 133 | 134 | x = tf.nn.tanh(h5) 135 | return x 136 | 137 | def discriminate(self, image): 138 | h1 = lrelu(tf.nn.conv2d(image, self.D_W1, strides=[1, 2, 2, 1], padding='SAME')) 139 | h2 = lrelu(self.D_bn2(tf.nn.conv2d(h1, self.D_W2, strides=[1, 2, 2, 1], padding='SAME'))) 140 | h3 = lrelu(self.D_bn3(tf.nn.conv2d(h2, self.D_W3, strides=[1, 2, 2, 1], padding='SAME'))) 141 | h4 = lrelu(self.D_bn4(tf.nn.conv2d(h3, self.D_W4, strides=[1, 2, 2, 1], padding='SAME'))) 142 | h5 = lrelu(tf.nn.conv2d(h4, self.D_W5, strides=[1, 4, 4, 1], padding='SAME')) 143 | h5 = tf.reshape(h5, [self.batch_size, 1]) 144 | y = tf.nn.sigmoid(h5) 145 | return y 146 | 147 | # Method for generating the fake images 148 | def sample_generator(self, noise_z, batch_size=1): 149 | noise_z = np.array(noise_z).reshape([batch_size, self.dim_z]) 150 | 151 | Z = tf.placeholder(tf.float32, [batch_size, self.dim_z]) 152 | h1 = tf.reshape(Z, [batch_size, 1, 1, self.dim_z]) 153 | h1 = tf.nn.conv2d_transpose(h1, self.G_W1, output_shape=[batch_size, 4, 4, self.dim_W1], 154 | strides=[1, 4, 4, 1]) 155 | h1 = tf.nn.relu(self.G_bn1(h1)) 156 | 157 | output_shape_l2 = [batch_size, 8, 8, self.dim_W2] 158 | h2 = tf.nn.conv2d_transpose(h1, self.G_W2, output_shape=output_shape_l2, strides=[1, 2, 2, 1]) 159 | h2 = tf.nn.relu(self.G_bn2(h2)) 160 | 161 | output_shape_l3 = [batch_size, 16, 16, self.dim_W3] 162 | h3 = tf.nn.conv2d_transpose(h2, self.G_W3, output_shape=output_shape_l3, strides=[1, 2, 2, 1]) 163 | h3 = tf.nn.relu(self.G_bn3(h3)) 164 | 165 | output_shape_l4 = [batch_size, 32, 32, self.dim_W4] 166 | h4 = tf.nn.conv2d_transpose(h3, self.G_W4, output_shape=output_shape_l4, strides=[1, 2, 2, 1]) 167 | h4 = tf.nn.relu(self.G_bn4(h4)) 168 | 169 | output_shape_l5 = [batch_size, 64, 64, self.dim_W5] 170 | h5 = tf.nn.conv2d_transpose(h4, self.G_W5, output_shape=output_shape_l5, strides=[1, 2, 2, 1]) 171 | 172 | x = tf.nn.tanh(h5) 173 | 174 | generated_samples = self.sess.run(x, feed_dict={Z: noise_z}) 175 | generated_samples = (generated_samples + 1.) / 2. 176 | return generated_samples 177 | 178 | # Train Generator and return the loss 179 | def train_gen(self, noise_z): 180 | _, loss_val_G = self.sess.run([self.train_op_gen, self.gen_cost], feed_dict={self.Z: noise_z}) 181 | return loss_val_G 182 | 183 | # Train Discriminator and return the loss 184 | def train_discrim(self, batch_xs, noise_z): 185 | _, loss_val_D = self.sess.run([self.train_op_discrim, self.discrim_cost], 186 | feed_dict={self.image_real: batch_xs, self.Z: noise_z}) 187 | return loss_val_D 188 | --------------------------------------------------------------------------------