├── checkpoints └── checkpoint ├── readme ├── eq_gamma.png ├── eq_global.png ├── eq_losses.png ├── eq_objective.png ├── conv_measure_vis.png ├── eq_conv_measure.png ├── generated_from_Z.png └── eq_autoencoder_loss.png ├── config.py ├── datasets ├── prepare_celeba.py └── download_celebA.py ├── LICENSE ├── utils ├── custom_ops.py └── misc.py ├── generator.py ├── discriminator.py ├── README.md └── main.py /checkpoints/checkpoint: -------------------------------------------------------------------------------- 1 | model_checkpoint_path: "BEGAN_64_64_0321.tfmod" 2 | -------------------------------------------------------------------------------- /readme/eq_gamma.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/artcg/BEGAN/HEAD/readme/eq_gamma.png -------------------------------------------------------------------------------- /readme/eq_global.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/artcg/BEGAN/HEAD/readme/eq_global.png -------------------------------------------------------------------------------- /readme/eq_losses.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/artcg/BEGAN/HEAD/readme/eq_losses.png -------------------------------------------------------------------------------- /readme/eq_objective.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/artcg/BEGAN/HEAD/readme/eq_objective.png -------------------------------------------------------------------------------- /readme/conv_measure_vis.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/artcg/BEGAN/HEAD/readme/conv_measure_vis.png -------------------------------------------------------------------------------- /readme/eq_conv_measure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/artcg/BEGAN/HEAD/readme/eq_conv_measure.png -------------------------------------------------------------------------------- /readme/generated_from_Z.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/artcg/BEGAN/HEAD/readme/generated_from_Z.png -------------------------------------------------------------------------------- /readme/eq_autoencoder_loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/artcg/BEGAN/HEAD/readme/eq_autoencoder_loss.png -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | checkpoint_path = 'checkpoints' 2 | checkpoint_prefix = 'BEGAN_64_64' 3 | data_path = 'datasets/CelebA_64_64.h5' 4 | -------------------------------------------------------------------------------- /datasets/prepare_celeba.py: -------------------------------------------------------------------------------- 1 | from glob import glob 2 | import os 3 | import numpy as np 4 | import h5py 5 | from tqdm import tqdm 6 | from scipy.misc import imread, imresize 7 | 8 | filenames = glob(os.path.join("img_align_celeba", "*.jpg")) 9 | filenames = np.sort(filenames) 10 | w, h = 64, 64 # Change this if you wish to use larger images 11 | data = np.zeros((len(filenames), w * h * 3), dtype=np.uint8) 12 | 13 | # This preprocessing is appriate for CelebA but should be adapted 14 | # (or removed entirely) for other datasets. 15 | 16 | 17 | def get_image(image_path, w=64, h=64): 18 | im = imread(image_path).astype(np.float) 19 | orig_h, orig_w = im.shape[:2] 20 | new_h = int(orig_h * w / orig_w) 21 | im = imresize(im, (new_h, w)) 22 | margin = int(round((new_h - h)/2)) 23 | return im[margin:margin+h] 24 | 25 | for n, fname in tqdm(enumerate(filenames)): 26 | image = get_image(fname, w, h) 27 | data[n] = image.flatten() 28 | 29 | with h5py.File('datasets/celeba.h5', 'w') as f: 30 | f.create_dataset("images", data=data) 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Arthur Goldberg 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /utils/custom_ops.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | 4 | 5 | 6 | def leaky_rectify(x, leakiness=0.01): 7 | assert leakiness <= 1 8 | ret = tf.maximum(x, leakiness * x) 9 | return ret 10 | 11 | 12 | def custom_conv2d(input_layer, output_dim, k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, in_dim=None, 13 | padding='SAME', scope="conv2d"): 14 | with tf.variable_scope(scope): 15 | w = tf.get_variable('w', [k_h, k_w, in_dim or input_layer.shape[-1], output_dim], 16 | initializer=tf.truncated_normal_initializer(stddev=stddev)) 17 | conv = tf.nn.conv2d(input_layer, w, 18 | strides=[1, d_h, d_w, 1], padding=padding) 19 | b = tf.get_variable("b", shape=output_dim, initializer=tf.constant_initializer(0.)) 20 | conv = tf.nn.bias_add(conv, b) 21 | return conv 22 | 23 | 24 | 25 | def custom_fc(input_layer, output_size, scope='Linear', 26 | in_dim=None, stddev=0.02, bias_start=0.0): 27 | shape = input_layer.shape 28 | if len(shape) > 2: 29 | input_layer = tf.reshape(input_layer, [-1, int(np.prod(shape[1:]))]) 30 | shape = input_layer.shape 31 | with tf.variable_scope(scope): 32 | matrix = tf.get_variable("weight", 33 | [in_dim or shape[1], output_size], 34 | dtype=tf.float32, 35 | initializer=tf.random_normal_initializer(stddev=stddev)) 36 | bias = tf.get_variable("bias", [output_size], initializer=tf.constant_initializer(bias_start)) 37 | return tf.nn.bias_add(tf.matmul(input_layer, matrix), bias) 38 | -------------------------------------------------------------------------------- /datasets/download_celebA.py: -------------------------------------------------------------------------------- 1 | """ 2 | Author: Akash Rana (github.com/akash9182) 3 | 4 | Modification of 5 | - https://github.com/carpedm20/DCGAN-tensorflow/blob/master/download.py 6 | - http://stackoverflow.com/a/39225039 7 | - 8 | """ 9 | 10 | import os 11 | import zipfile 12 | import requests 13 | import subprocess 14 | from tqdm import tqdm 15 | from collections import OrderedDict 16 | 17 | base_path = './' 18 | 19 | def download_file_from_google_drive(id, destination): 20 | URL = "https://docs.google.com/uc?export=download" 21 | session = requests.Session() 22 | 23 | response = session.get(URL, params={ 'id': id }, stream=True) 24 | token = get_confirm_token(response) 25 | 26 | if token: 27 | params = { 'id' : id, 'confirm' : token } 28 | response = session.get(URL, params=params, stream=True) 29 | 30 | save_response_content(response, destination) 31 | 32 | def get_confirm_token(response): 33 | for key, value in response.cookies.items(): 34 | if key.startswith('download_warning'): 35 | return value 36 | return None 37 | 38 | def save_response_content(response, destination, chunk_size=32*1024): 39 | total_size = int(response.headers.get('content-length', 0)) 40 | with open(destination, "wb") as f: 41 | for chunk in tqdm(response.iter_content(chunk_size), total=total_size, 42 | unit='B', unit_scale=True, desc=destination): 43 | if chunk: # filter out keep-alive new chunks 44 | f.write(chunk) 45 | 46 | def unzip(filepath): 47 | print("Extracting: " + filepath) 48 | base_path = os.path.dirname(filepath) 49 | with zipfile.ZipFile(filepath) as zf: 50 | zf.extractall(base_path) 51 | os.remove(filepath) 52 | 53 | def download_celeb_a(base_path): 54 | data_path = os.path.join(base_path, 'CelebA') 55 | images_path = os.path.join(data_path, 'images') 56 | if os.path.exists(data_path): 57 | print('[!] Found Celeb-A - skip') 58 | return 59 | 60 | filename, drive_id = "img_align_celeba.zip", "0B7EVK8r0v71pZjFTYXZWM3FlRnM" 61 | save_path = os.path.join(base_path, filename) 62 | 63 | if os.path.exists(save_path): 64 | print('[*] {} already exists'.format(save_path)) 65 | else: 66 | download_file_from_google_drive(drive_id, save_path) 67 | 68 | zip_dir = '' 69 | with zipfile.ZipFile(save_path) as zf: 70 | zip_dir = zf.namelist()[0] 71 | zf.extractall(base_path) 72 | if not os.path.exists(data_path): 73 | os.mkdir(data_path) 74 | os.rename(os.path.join(base_path, "img_align_celeba"), images_path) 75 | os.remove(save_path) 76 | 77 | 78 | 79 | if __name__ == '__main__': 80 | download_celeb_a(base_path) 81 | -------------------------------------------------------------------------------- /generator.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from utils.custom_ops import custom_fc, custom_conv2d 3 | 4 | if False: # This to silence pyflake 5 | custom_ops 6 | 7 | 8 | def decoder(Z, batch_size, num_filters, hidden_size, image_size): 9 | ''' 10 | The Boundary Equilibrium GAN deliberately uses a simple generator 11 | architecture. 12 | 13 | Upsampling is 3x3 convolutions, with nearest neighbour resizing 14 | to the desired resolution. 15 | 16 | Args: 17 | Z: Latent space 18 | batch_size: Batch size of generations 19 | num_filters: Number of filters in convolutional layers 20 | hidden_size: Dimensionality of encoding 21 | image_size: First dimension of generated image (must be 64 or 128) 22 | scope_name: Tensorflow scope name 23 | reuse_scope: Tensorflow scope handling 24 | Returns: 25 | Flattened tensor of generated images, with dimensionality: 26 | [batch_size, image_size * image_size * 3] 27 | ''' 28 | layer_1 = custom_fc(Z, 8 * 8 * num_filters, scope='l1') 29 | 30 | layer_1 = tf.reshape(layer_1, [-1, 8, 8, num_filters]) # '-1' is batch size 31 | 32 | conv_1 = custom_conv2d(layer_1, num_filters, k_h=3, k_w=3, d_h=1, d_w=1, scope='c1') 33 | conv_1 = tf.nn.elu(conv_1) 34 | 35 | conv_2 = custom_conv2d(conv_1, num_filters, k_h=3, k_w=3, d_h=1, d_w=1, scope='c2') 36 | conv_2 = tf.nn.elu(conv_2) 37 | 38 | layer_2 = tf.image.resize_nearest_neighbor(conv_2, [16, 16]) 39 | 40 | conv_3 = custom_conv2d(layer_2, num_filters, k_h=3, k_w=3, d_h=1, d_w=1, scope='c3') 41 | conv_3 = tf.nn.elu(conv_3) 42 | 43 | conv_4 = custom_conv2d(conv_3, num_filters, k_h=3, k_w=3, d_h=1, d_w=1, scope='c4') 44 | conv_4 = tf.nn.elu(conv_4) 45 | 46 | layer_3 = tf.image.resize_nearest_neighbor(conv_4, [32, 32]) 47 | 48 | conv_5 = custom_conv2d(layer_3, num_filters, k_h=3, k_w=3, d_h=1, d_w=1, scope='c5') 49 | conv_5 = tf.nn.elu(conv_5) 50 | 51 | conv_6 = custom_conv2d(conv_5, num_filters, k_h=3, k_w=3, d_h=1, d_w=1, scope='c6') 52 | conv_6 = tf.nn.elu(conv_6) 53 | 54 | layer_4 = tf.image.resize_nearest_neighbor(conv_6, [64, 64]) 55 | 56 | conv_7 = custom_conv2d(layer_4, num_filters, k_h=3, k_w=3, d_h=1, d_w=1, scope='c7') 57 | conv_7 = tf.nn.elu(conv_7) 58 | 59 | conv_8 = custom_conv2d(conv_7, num_filters, k_h=3, k_w=3, d_h=1, d_w=1, scope='c8') 60 | conv_8 = tf.nn.elu(conv_8) 61 | 62 | if image_size == 64: 63 | im = conv_8 64 | else: 65 | layer_5 = tf.image.resize_nearest_neighbor(conv_8, [128, 128]) 66 | 67 | conv_9 = custom_conv2d(layer_5, num_filters, k_h=3, k_w=3, d_h=1, d_w=1, scope='c9') 68 | conv_9 = tf.nn.elu(conv_9) 69 | 70 | conv_10 = custom_conv2d(conv_9, num_filters, k_h=3, k_w=3, d_h=1, d_w=1, scope='c10') 71 | im = tf.nn.elu(conv_10) 72 | 73 | im = custom_conv2d(im, 3, k_h=3, k_w=3, d_h=1, d_w=1, scope='im') 74 | im = tf.sigmoid(im) 75 | im = tf.reshape(im, [-1, image_size * image_size * 3]) 76 | return im 77 | 78 | def began_generator(Z, batch_size, num_filters, hidden_size, image_size, 79 | scope_name="generator", reuse_scope=False): 80 | with tf.variable_scope(scope_name) as scope: 81 | if reuse_scope: 82 | scope.reuse_variables() 83 | return decoder(Z, batch_size, num_filters, hidden_size, image_size) 84 | 85 | -------------------------------------------------------------------------------- /utils/misc.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | import scipy.misc 4 | from glob import glob 5 | from config import data_path 6 | 7 | 8 | def loadData(size): 9 | import h5py 10 | with h5py.File(data_path, 'r') as hf: 11 | faces = hf['images'] 12 | choice = np.random.choice(len(faces), size, replace=False) 13 | faces = faces[sorted(choice)] 14 | faces = np.array(faces, dtype=np.float16) 15 | return faces / 255 16 | 17 | 18 | def loadJPGs(path='/home/arthur/devel/input/', width=64, height=64): 19 | filenames = glob(path+"*.jpg") 20 | filenames = np.sort(filenames) 21 | 22 | def imread(path): 23 | return scipy.misc.imread(path) 24 | 25 | def scaleHeight(x, height=64): 26 | h, w = x.shape[:2] 27 | return scipy.misc.imresize(x, [height, int((float(w)/h)*height)]) 28 | 29 | def cropSides(x, width=64): 30 | w = x.shape[1] 31 | j = int(round((w - width)/2.)) 32 | return x[:, j:j+width, :] 33 | 34 | def get_image(image_path, width=64, height=64): 35 | return cropSides(scaleHeight(imread(image_path), height=height), 36 | width=width) 37 | 38 | images = np.zeros((len(filenames), width * height * 3), dtype=np.uint8) 39 | 40 | for n, i in enumerate(filenames): 41 | im = get_image(i) 42 | images[n] = im.flatten() 43 | images = np.array(images, dtype=np.float16) 44 | return images / 255 45 | 46 | 47 | def dataIterator(data, batch_size): 48 | ''' 49 | From great jupyter notebook by Tim Sainburg: 50 | http://github.com/timsainb/Tensorflow-MultiGPU-VAE-GAN 51 | ''' 52 | batch_idx = 0 53 | while True: 54 | length = len(data[0]) 55 | assert all(len(i) == length for i in data) 56 | idxs = np.arange(0, length) 57 | np.random.shuffle(idxs) 58 | for batch_idx in range(0, length, batch_size): 59 | cur_idxs = idxs[batch_idx:batch_idx + batch_size] 60 | images_batch = data[0][cur_idxs] 61 | # images_batch = images_batch.astype("float32") 62 | yield images_batch 63 | 64 | 65 | def create_image(im): 66 | ''' 67 | From great jupyter notebook by Tim Sainburg: 68 | http://github.com/timsainb/Tensorflow-MultiGPU-VAE-GAN 69 | ''' 70 | d1 = int(np.sqrt((np.product(im.shape) / 3))) 71 | im = np.array(im, dtype=np.float32) 72 | return np.reshape(im, (d1, d1, 3)) 73 | 74 | 75 | def plot_gens(images, rowlabels, losses): 76 | ''' 77 | From great jupyter notebook by Tim Sainburg: 78 | http://github.com/timsainb/Tensorflow-MultiGPU-VAE-GAN 79 | ''' 80 | examples = 8 81 | fig, ax = plt.subplots(nrows=len(images), ncols=examples, figsize=(18, 8)) 82 | for i in range(examples): 83 | for j in range(len(images)): 84 | ax[(j, i)].imshow(create_image(images[j][i]), cmap=plt.cm.gray, 85 | interpolation='nearest') 86 | ax[(j, i)].axis('off') 87 | title = '' 88 | for i in rowlabels: 89 | title += ' {}, '.format(i) 90 | fig.suptitle('Top to Bottom: {}'.format(title)) 91 | plt.show() 92 | #fig.savefig(''.join(['imgs/test_',str(epoch).zfill(4),'.png']),dpi=100) 93 | fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(20, 10), linewidth = 4) 94 | 95 | D_plt, = plt.semilogy((losses['discriminator']), linewidth=4, ls='-', 96 | color='b', alpha=.5, label='D') 97 | G_plt, = plt.semilogy((losses['generator']), linewidth=4, ls='-', 98 | color='k', alpha=.5, label='G') 99 | 100 | plt.gca() 101 | leg = plt.legend(handles=[D_plt, G_plt], 102 | fontsize=20) 103 | leg.get_frame().set_alpha(0.5) 104 | plt.show() 105 | -------------------------------------------------------------------------------- /discriminator.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from utils.custom_ops import custom_fc, custom_conv2d 3 | from generator import decoder 4 | 5 | 6 | def began_discriminator(D_I, batch_size, num_filters, hidden_size, image_size, 7 | scope_name="discriminator", reuse_scope=False): 8 | ''' 9 | Unlike most generative adversarial networks, the boundary 10 | equilibrium uses an autoencoder as a discriminator. 11 | 12 | For simplicity, the decoder architecture is the same as the generator. 13 | 14 | Downsampling is 3x3 convolutions with a stride of 2. 15 | Upsampling is 3x3 convolutions, with nearest neighbour resizing 16 | to the desired resolution. 17 | 18 | Args: 19 | D_I: a batch of images [batch_size, 64 x 64 x 3] 20 | batch_size: Batch size of encodings 21 | num_filters: Number of filters in convolutional layers 22 | hidden_size: Dimensionality of encoding 23 | image_size: First dimension of generated image (must be 64 or 128) 24 | scope_name: Tensorflow scope name 25 | reuse_scope: Tensorflow scope handling 26 | Returns: 27 | Flattened tensor of re-created images, with dimensionality: 28 | [batch_size, image_size * image_size * 3] 29 | ''' 30 | 31 | 32 | with tf.variable_scope(scope_name) as scope: 33 | if reuse_scope: 34 | scope.reuse_variables() 35 | 36 | layer_1 = tf.reshape(D_I, [-1, image_size, image_size, 3]) # '-1' is batch size 37 | 38 | conv_0 = custom_conv2d(layer_1, 3, k_h=3, k_w=3, d_h=1, d_w=1, scope='ec0') 39 | conv_0 = tf.nn.elu(conv_0) 40 | 41 | conv_1 =custom_conv2d(conv_0, num_filters, k_h=3, k_w=3, d_h=1, d_w=1, scope='ec1') 42 | conv_1 = tf.nn.elu(conv_1) 43 | 44 | 45 | conv_2 =custom_conv2d(conv_1, num_filters, k_h=3, k_w=3, d_h=1, d_w=1, scope='ec2') 46 | conv_2 = tf.nn.elu(conv_2) 47 | 48 | 49 | layer_2 = custom_conv2d(conv_2, 2 * num_filters, k_h=3, k_w=3, d_h=2, d_w=2, scope='el2') 50 | layer_2 = tf.nn.elu(layer_2) 51 | 52 | conv_3 = custom_conv2d(layer_2, 2 * num_filters, k_h=3, k_w=3, d_h=1, d_w=1, scope='ec3') 53 | conv_3 = tf.nn.elu(conv_3) 54 | 55 | conv_4 =custom_conv2d(conv_3, 2 * num_filters, k_h=3, k_w=3, d_h=1, d_w=1, scope='ec4') 56 | conv_4 = tf.nn.elu(conv_4) 57 | 58 | 59 | layer_3 = custom_conv2d(conv_2, 3 * num_filters, k_h=3, k_w=3, d_h=2, d_w=2, scope='el3') 60 | layer_3 = tf.nn.elu(layer_3) 61 | 62 | conv_5 = custom_conv2d(layer_3, 3 * num_filters, k_h=3, k_w=3, d_h=1, d_w=1, scope='ec5') 63 | conv_5 = tf.nn.elu(conv_5) 64 | 65 | conv_6 =custom_conv2d(conv_5, 3 * num_filters, k_h=3, k_w=3, d_h=1, d_w=1, scope='ec6') 66 | conv_6 = tf.nn.elu(conv_6) 67 | 68 | 69 | layer_4 = custom_conv2d(conv_6, 4 * num_filters, k_h=3, k_w=3, d_h=2, d_w=2, scope='el4') 70 | layer_4 = tf.nn.elu(layer_4) 71 | 72 | conv_7 = custom_conv2d(layer_4, 4 * num_filters, k_h=3, k_w=3, d_h=1, d_w=1, scope='ec7') 73 | conv_7 = tf.nn.elu(conv_7) 74 | 75 | conv_8 =custom_conv2d(conv_7, 4 * num_filters, k_h=3, k_w=3, d_h=1, d_w=1, scope='ec8') 76 | conv_8 = tf.nn.elu(conv_8) 77 | 78 | if image_size == 64: 79 | enc = custom_fc(conv_8, hidden_size, scope='enc') 80 | else: 81 | layer_5 = custom_conv2d(conv_8, 5 * num_filters, k_h=3, k_w=3, d_h=2, d_w=2, scope='el5') 82 | layer_5 = tf.nn.elu(layer_5) 83 | 84 | conv_9 = custom_conv2d(layer_5, 5 * num_filters, k_h=3, k_w=3, d_h=1, d_w=1, scope='ec9') 85 | conv_9 = tf.nn.elu(conv_9) 86 | 87 | conv_10 =custom_conv2d(conv_9, 5 * num_filters, k_h=3, k_w=3, d_h=1, d_w=1, scope='ec10') 88 | conv_10 = tf.nn.elu(conv_10) 89 | enc = custom_fc(conv_10, hidden_size, scope='enc') 90 | 91 | # add elu before decoding? 92 | return decoder(enc, batch_size=batch_size, num_filters=num_filters, 93 | hidden_size=hidden_size, image_size=image_size) 94 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # BEGAN: Boundary Equibilibrium Generative Adversarial Networks 2 | 3 | This is an implementation of the paper on Boundary Equilibrium Generative Adversarial Networks [(Berthelot, Schumm and Metz, 2017)](#references). 4 | 5 | ## Dependencies 6 | 7 | * Python 3+ 8 | * numpy 9 | * Tensorflow 10 | * tqdm 11 | * h5py 12 | * scipy (optional) 13 | 14 | ## What are Boundary Equilibrium Generative Adversarial Networks? 15 | 16 | Unlike standard generative adversarial networks [(Goodfellow et al. 2014)](#references), boundary equilibrium generative adversarial networks (BEGAN) use an auto-encoder as a disciminator. An auto-encoder loss is defined, and an approximation of the Wasserstein distance is then computed between the pixelwise auto-encoder loss distributions of real and generated samples. 17 | 18 |
19 |
20 |
25 |
26 |
31 |
32 |
37 |
38 |
43 |
44 |
49 |
50 |
55 |
56 |
152 |
153 |