├── .gitignore ├── LICENSE ├── README.markdown ├── iter_funcs.py ├── mnist └── .gitignore ├── model.py ├── plot.py ├── theano_funcs.py ├── train.py ├── utils.py └── weights └── .gitignore /.gitignore: -------------------------------------------------------------------------------- 1 | *.swo 2 | *.swp 3 | *.pyc 4 | *.png 5 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Hendrik Weideman 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.markdown: -------------------------------------------------------------------------------- 1 | The relevant blog post is here: [http://hjweide.github.io/adversarial-autoencoders](http://hjweide.github.io/adversarial-autoencoders) 2 | 3 | A Lasagne and Theano implementation of the paper [Adversarial 4 | Autoencoders](http://arxiv.org/abs/1511.05644) by Alireza Makhzani, Jonathon 5 | Shlens, Navdeep Jaitly, and Ian Goodfellow. 6 | 7 | Several design choices were made based on the discussion on 8 | [/r/machinelearning](https://www.reddit.com/r/MachineLearning/comments/3ybj4d/151105644_adversarial_autoencod 9 | ers/?). 10 | 11 | To use this code: 12 | 13 | 1. Download the [MNIST data files](http://yann.lecun.com/exdb/mnist/). 14 | 2. Unzip and copy to the mnist directory. 15 | 3. Run ```python train.py``` to train a model, the weights will be saved to the ```weights``` directory. 16 | 4. Run ```python plot.py``` to generate the visualizations. 17 | 18 | Sample images generated by traversing the latent space of the adversarial autoencoder: 19 | ![data generated by traversing latent space](https://hjweide.github.io/images/2016-04-30-adversarial-autoencoders-data_space.png) 20 | -------------------------------------------------------------------------------- /iter_funcs.py: -------------------------------------------------------------------------------- 1 | def get_batch_idx(N, batch_size): 2 | num_batches = (N + batch_size - 1) / batch_size 3 | 4 | for i in range(num_batches): 5 | start, end = i * batch_size, (i + 1) * batch_size 6 | idx = slice(start, end) 7 | 8 | yield idx 9 | 10 | 11 | if __name__ == '__main__': 12 | import numpy as np 13 | X = np.random.random((14, 4)) 14 | print('Original data') 15 | print(X) 16 | print('As batches') 17 | for idx in get_batch_idx(X.shape[0], 4): 18 | print(X[idx]) 19 | -------------------------------------------------------------------------------- /mnist/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hjweide/adversarial-autoencoder/181c59a063a1e5db96b14a04cdded54e4e12ef26/mnist/.gitignore -------------------------------------------------------------------------------- /model.py: -------------------------------------------------------------------------------- 1 | import cPickle as pickle 2 | from lasagne.layers import InputLayer 3 | from lasagne.layers import ConcatLayer 4 | from lasagne.layers import DenseLayer 5 | from lasagne.layers import get_all_layers 6 | from lasagne.layers import get_all_params 7 | from lasagne.nonlinearities import linear, rectify, sigmoid 8 | 9 | 10 | def save_weights(weights, filename): 11 | with open(filename, 'wb') as f: 12 | pickle.dump(weights, f, protocol=pickle.HIGHEST_PROTOCOL) 13 | 14 | 15 | def load_weights(layer, filename): 16 | with open(filename, 'rb') as f: 17 | src_params_list = pickle.load(f) 18 | 19 | dst_params_list = get_all_params(layer) 20 | # assign the parameter values stored on disk to the model 21 | for src_params, dst_params in zip(src_params_list, dst_params_list): 22 | dst_params.set_value(src_params) 23 | 24 | 25 | def build_model(): 26 | num_input = 28 * 28 27 | # should really use more dimensions, but this is nice for visualization 28 | num_code = 2 29 | num_hidden = 1000 30 | 31 | l_encoder_in = InputLayer((None, num_input), name='l_encoder_in') 32 | 33 | # first layer of the encoder/generator 34 | l_dense1 = DenseLayer( 35 | l_encoder_in, num_units=num_hidden, nonlinearity=rectify, 36 | name='l_encoder_dense1', 37 | ) 38 | l_dense1.params[l_dense1.W].add('generator') 39 | l_dense1.params[l_dense1.b].add('generator') 40 | 41 | # second layer of the encoder/generator 42 | l_dense2 = DenseLayer( 43 | l_dense1, num_units=num_hidden, nonlinearity=rectify, 44 | name='l_encoder_dense2', 45 | ) 46 | l_dense2.params[l_dense2.W].add('generator') 47 | l_dense2.params[l_dense2.b].add('generator') 48 | 49 | # output of the encoder/generator: q(z|x) 50 | l_encoder_out = DenseLayer( 51 | l_dense2, num_units=num_code, nonlinearity=linear, 52 | name='l_encoder_out', 53 | ) 54 | l_encoder_out.params[l_encoder_out.W].add('generator') 55 | l_encoder_out.params[l_encoder_out.b].add('generator') 56 | 57 | # first layer of the decoder 58 | l_decoder_in = DenseLayer( 59 | l_encoder_out, num_units=num_hidden, nonlinearity=rectify, 60 | name='l_decoder_dense1', 61 | ) 62 | # second layer of the decoder 63 | l_dense5 = DenseLayer( 64 | l_decoder_in, num_units=num_hidden, nonlinearity=rectify, 65 | name='l_decoder_dense2', 66 | ) 67 | 68 | # output of the decoder: p(x|z) 69 | l_decoder_out = DenseLayer( 70 | l_dense5, num_units=num_input, nonlinearity=sigmoid, 71 | name='l_decoder_out', 72 | ) 73 | 74 | # input layer providing samples from p(z) 75 | l_prior = InputLayer((None, num_code), name='l_prior_in') 76 | 77 | # concatenate samples from q(z|x) to samples from p(z) 78 | l_concat = ConcatLayer( 79 | [l_encoder_out, l_prior], axis=0, name='l_prior_encoder_concat', 80 | ) 81 | 82 | # first layer of the discriminator 83 | l_dense6 = DenseLayer( 84 | l_concat, num_units=num_hidden, nonlinearity=rectify, 85 | name='l_discriminator_dense1', 86 | ) 87 | l_dense6.params[l_dense6.W].add('discriminator') 88 | l_dense6.params[l_dense6.b].add('discriminator') 89 | 90 | # second layer of the discriminator 91 | l_dense7 = DenseLayer( 92 | l_dense6, num_units=num_hidden, nonlinearity=rectify, 93 | name='l_discriminator_dense2', 94 | ) 95 | l_dense7.params[l_dense7.W].add('discriminator') 96 | l_dense7.params[l_dense7.b].add('discriminator') 97 | 98 | # output layer of the discriminator 99 | l_discriminator_out = DenseLayer( 100 | l_dense7, num_units=1, nonlinearity=sigmoid, 101 | name='l_discriminator_out', 102 | ) 103 | l_discriminator_out.params[l_discriminator_out.W].add('discriminator') 104 | l_discriminator_out.params[l_discriminator_out.b].add('discriminator') 105 | 106 | model_layers = get_all_layers([l_decoder_out, l_discriminator_out]) 107 | 108 | # put all layers in a dictionary for convenience 109 | return {layer.name: layer for layer in model_layers} 110 | 111 | 112 | if __name__ == '__main__': 113 | layer_dict = build_model() 114 | print('collected %d layers' % (len(layer_dict.keys()))) 115 | for name in layer_dict: 116 | print('%s: %r' % (name, layer_dict[name])) 117 | -------------------------------------------------------------------------------- /plot.py: -------------------------------------------------------------------------------- 1 | import model 2 | import theano_funcs 3 | import utils 4 | 5 | from iter_funcs import get_batch_idx 6 | 7 | # credit to @fulhack: https://twitter.com/fulhack/status/721842480140967936 8 | import seaborn # NOQA - never used, but improves matplotlib's style 9 | import matplotlib.pyplot as plt 10 | import numpy as np 11 | 12 | from mpl_toolkits.axes_grid1 import ImageGrid 13 | from sklearn.decomposition import PCA 14 | 15 | from os.path import join 16 | 17 | 18 | def plot(Z1, y1, Z2, y2, filename=None, title=None): 19 | digit_colors = [ 20 | 'red', 'green', 'blue', 'cyan', 'magenta', 21 | 'yellow', 'black', 'white', 'orange', 'gray', 22 | ] 23 | 24 | legend, labels = [], [] 25 | for i in range(0, 10): 26 | idx1 = y1 == i 27 | idx2 = y2 == i 28 | pc1 = plt.scatter( 29 | Z1[idx1, 0], Z1[idx1, 1], 30 | marker='o', color=digit_colors[i], 31 | ) 32 | legend.append(pc1) 33 | labels.append('%d' % i) 34 | pc2 = plt.scatter( 35 | Z2[idx2, 0], Z2[idx2, 1], 36 | marker='x', color=digit_colors[i], 37 | ) 38 | legend.append(pc2) 39 | labels.append('%d' % i) 40 | 41 | # only plot digit colors to avoid cluttering the legend 42 | plt.legend(legend[::2], labels[::2], loc='upper left', ncol=1) 43 | if title is not None: 44 | plt.title(title) 45 | if filename is None: 46 | filename = 'plot.png' 47 | plt.savefig(filename, bbox_inches='tight') 48 | 49 | 50 | # always a good sanity check 51 | def plot_pca(): 52 | print('loading data') 53 | X_train, y_train, X_test, y_test = utils.load_mnist() 54 | pca = PCA(n_components=2) 55 | 56 | print('transforming training data') 57 | Z_train = pca.fit_transform(X_train) 58 | 59 | print('transforming test data') 60 | Z_test = pca.transform(X_test) 61 | 62 | plot(Z_train, y_train, Z_test, y_test, 63 | filename='pca.png', title='projected onto principle components') 64 | 65 | 66 | def plot_autoencoder(weightsfile): 67 | print('building model') 68 | layers = model.build_model() 69 | 70 | batch_size = 128 71 | 72 | print('compiling theano function') 73 | encoder_func = theano_funcs.create_encoder_func(layers) 74 | 75 | print('loading weights from %s' % (weightsfile)) 76 | model.load_weights([ 77 | layers['l_decoder_out'], 78 | layers['l_discriminator_out'], 79 | ], weightsfile) 80 | 81 | print('loading data') 82 | X_train, y_train, X_test, y_test = utils.load_mnist() 83 | 84 | train_datapoints = [] 85 | print('transforming training data') 86 | for train_idx in get_batch_idx(X_train.shape[0], batch_size): 87 | X_train_batch = X_train[train_idx] 88 | train_batch_codes = encoder_func(X_train_batch) 89 | train_datapoints.append(train_batch_codes) 90 | 91 | test_datapoints = [] 92 | print('transforming test data') 93 | for test_idx in get_batch_idx(X_test.shape[0], batch_size): 94 | X_test_batch = X_test[test_idx] 95 | test_batch_codes = encoder_func(X_test_batch) 96 | test_datapoints.append(test_batch_codes) 97 | 98 | Z_train = np.vstack(train_datapoints) 99 | Z_test = np.vstack(test_datapoints) 100 | 101 | plot(Z_train, y_train, Z_test, y_test, 102 | filename='adversarial_train_val.png', 103 | title='projected onto latent space of autoencoder') 104 | 105 | 106 | def plot_latent_space(weightsfile): 107 | print('building model') 108 | layers = model.build_model() 109 | batch_size = 128 110 | decoder_func = theano_funcs.create_decoder_func(layers) 111 | 112 | print('loading weights from %s' % (weightsfile)) 113 | model.load_weights([ 114 | layers['l_decoder_out'], 115 | layers['l_discriminator_out'], 116 | ], weightsfile) 117 | 118 | # regularly-spaced grid of points sampled from p(z) 119 | Z = np.mgrid[2:-2.2:-0.2, -2:2.2:0.2].reshape(2, -1).T[:, ::-1].astype(np.float32) 120 | 121 | reconstructions = [] 122 | print('generating samples') 123 | for idx in get_batch_idx(Z.shape[0], batch_size): 124 | Z_batch = Z[idx] 125 | X_batch = decoder_func(Z_batch) 126 | reconstructions.append(X_batch) 127 | 128 | X = np.vstack(reconstructions) 129 | X = X.reshape(X.shape[0], 28, 28) 130 | 131 | fig = plt.figure(1, (12., 12.)) 132 | ax1 = plt.axes(frameon=False) 133 | ax1.get_xaxis().set_visible(False) 134 | ax1.get_yaxis().set_visible(False) 135 | plt.title('samples generated from latent space of autoencoder') 136 | grid = ImageGrid( 137 | fig, 111, nrows_ncols=(21, 21), 138 | share_all=True) 139 | 140 | print('plotting latent space') 141 | for i, x in enumerate(X): 142 | img = (x * 255).astype(np.uint8) 143 | grid[i].imshow(img, cmap='Greys_r') 144 | grid[i].get_xaxis().set_visible(False) 145 | grid[i].get_yaxis().set_visible(False) 146 | grid[i].set_frame_on(False) 147 | 148 | plt.savefig('latent_train_val.png', bbox_inches='tight') 149 | 150 | 151 | if __name__ == '__main__': 152 | weightsfile = join('weights', 'weights_train_val.pickle') 153 | #plot_autoencoder(weightsfile) 154 | #plot_pca() 155 | plot_latent_space(weightsfile) 156 | -------------------------------------------------------------------------------- /theano_funcs.py: -------------------------------------------------------------------------------- 1 | import theano 2 | import theano.tensor as T 3 | from lasagne.layers import get_output 4 | from lasagne.layers import get_all_params 5 | from lasagne.updates import nesterov_momentum 6 | 7 | 8 | # forward pass for the encoder, q(z|x) 9 | def create_encoder_func(layers): 10 | X = T.fmatrix('X') 11 | X_batch = T.fmatrix('X_batch') 12 | 13 | Z = get_output(layers['l_encoder_out'], X, deterministic=True) 14 | 15 | encoder_func = theano.function( 16 | inputs=[theano.In(X_batch)], 17 | outputs=Z, 18 | givens={ 19 | X: X_batch, 20 | }, 21 | ) 22 | 23 | return encoder_func 24 | 25 | 26 | # forward pass for the decoder, p(x|z) 27 | def create_decoder_func(layers): 28 | Z = T.fmatrix('Z') 29 | Z_batch = T.fmatrix('Z_batch') 30 | 31 | X = get_output( 32 | layers['l_decoder_out'], 33 | inputs={ 34 | layers['l_encoder_out']: Z 35 | }, 36 | deterministic=True 37 | ) 38 | 39 | decoder_func = theano.function( 40 | inputs=[theano.In(Z_batch)], 41 | outputs=X, 42 | givens={ 43 | Z: Z_batch, 44 | }, 45 | ) 46 | 47 | return decoder_func 48 | 49 | 50 | # forward/backward (optional) pass for the encoder/decoder pair 51 | def create_encoder_decoder_func(layers, apply_updates=False): 52 | X = T.fmatrix('X') 53 | X_batch = T.fmatrix('X_batch') 54 | 55 | X_hat = get_output(layers['l_decoder_out'], X, deterministic=False) 56 | 57 | # reconstruction loss 58 | encoder_decoder_loss = T.mean( 59 | T.mean(T.sqr(X - X_hat), axis=1) 60 | ) 61 | 62 | if apply_updates: 63 | # all layers that participate in the forward pass should be updated 64 | encoder_decoder_params = get_all_params( 65 | layers['l_decoder_out'], trainable=True) 66 | 67 | encoder_decoder_updates = nesterov_momentum( 68 | encoder_decoder_loss, encoder_decoder_params, 0.01, 0.9) 69 | else: 70 | encoder_decoder_updates = None 71 | 72 | encoder_decoder_func = theano.function( 73 | inputs=[theano.In(X_batch)], 74 | outputs=encoder_decoder_loss, 75 | updates=encoder_decoder_updates, 76 | givens={ 77 | X: X_batch, 78 | }, 79 | ) 80 | 81 | return encoder_decoder_func 82 | 83 | 84 | # forward/backward (optional) pass for discriminator 85 | def create_discriminator_func(layers, apply_updates=False): 86 | X = T.fmatrix('X') 87 | pz = T.fmatrix('pz') 88 | 89 | X_batch = T.fmatrix('X_batch') 90 | pz_batch = T.fmatrix('pz_batch') 91 | 92 | # the discriminator receives samples from q(z|x) and p(z) 93 | # and should predict to which distribution each sample belongs 94 | discriminator_outputs = get_output( 95 | layers['l_discriminator_out'], 96 | inputs={ 97 | layers['l_prior_in']: pz, 98 | layers['l_encoder_in']: X, 99 | }, 100 | deterministic=False, 101 | ) 102 | 103 | # label samples from q(z|x) as 1 and samples from p(z) as 0 104 | discriminator_targets = T.vertical_stack( 105 | T.ones((X_batch.shape[0], 1)), 106 | T.zeros((pz_batch.shape[0], 1)) 107 | ) 108 | 109 | discriminator_loss = T.mean( 110 | T.nnet.binary_crossentropy( 111 | discriminator_outputs, 112 | discriminator_targets, 113 | ) 114 | ) 115 | 116 | if apply_updates: 117 | # only layers that are part of the discriminator should be updated 118 | discriminator_params = get_all_params( 119 | layers['l_discriminator_out'], trainable=True, discriminator=True) 120 | 121 | discriminator_updates = nesterov_momentum( 122 | discriminator_loss, discriminator_params, 0.1, 0.0) 123 | else: 124 | discriminator_updates = None 125 | 126 | discriminator_func = theano.function( 127 | inputs=[ 128 | theano.In(X_batch), 129 | theano.In(pz_batch), 130 | ], 131 | outputs=discriminator_loss, 132 | updates=discriminator_updates, 133 | givens={ 134 | X: X_batch, 135 | pz: pz_batch, 136 | }, 137 | ) 138 | 139 | return discriminator_func 140 | 141 | 142 | # forward/backward (optional) pass for the generator 143 | # note that the generator is the same network as the encoder, 144 | # but updated separately 145 | def create_generator_func(layers, apply_updates=False): 146 | X = T.fmatrix('X') 147 | X_batch = T.fmatrix('X_batch') 148 | 149 | # no need to pass an input to l_prior_in here 150 | generator_outputs = get_output( 151 | layers['l_encoder_out'], X, deterministic=False) 152 | 153 | # so pass the output of the generator as the output of the concat layer 154 | discriminator_outputs = get_output( 155 | layers['l_discriminator_out'], 156 | inputs={ 157 | layers['l_prior_encoder_concat']: generator_outputs, 158 | }, 159 | deterministic=False 160 | ) 161 | 162 | # the discriminator learns to predict 1 for q(z|x), 163 | # so the generator should fool it into predicting 0 164 | generator_targets = T.zeros_like(X_batch.shape[0]) 165 | 166 | # so the generator needs to push the discriminator's output to 0 167 | generator_loss = T.mean( 168 | T.nnet.binary_crossentropy( 169 | discriminator_outputs, 170 | generator_targets, 171 | ) 172 | ) 173 | 174 | if apply_updates: 175 | # only layers that are part of the generator (i.e., encoder) 176 | # should be updated 177 | generator_params = get_all_params( 178 | layers['l_discriminator_out'], trainable=True, generator=True) 179 | 180 | generator_updates = nesterov_momentum( 181 | generator_loss, generator_params, 0.1, 0.0) 182 | else: 183 | generator_updates = None 184 | 185 | generator_func = theano.function( 186 | inputs=[ 187 | theano.In(X_batch), 188 | ], 189 | outputs=generator_loss, 190 | updates=generator_updates, 191 | givens={ 192 | X: X_batch, 193 | }, 194 | ) 195 | 196 | return generator_func 197 | 198 | 199 | if __name__ == '__main__': 200 | import model 201 | print('building model') 202 | layers = model.build_model() 203 | 204 | print('compiling theano functions') 205 | encoder_decoder_func = create_encoder_decoder_func(layers) 206 | discriminator_func = create_discriminator_func(layers) 207 | generator_func = create_generator_func(layers) 208 | 209 | import numpy as np 210 | X = np.random.random((16, 28 * 28)).astype(np.float32) 211 | pz = np.random.uniform(-2, 2, size=(16, 2)).astype(np.float32) 212 | 213 | print('X.shape = %r' % (X.shape,)) 214 | print('pz.shape = %r' % (pz.shape,)) 215 | 216 | print('running the three forward passes') 217 | print encoder_decoder_func(X) 218 | print discriminator_func(X, pz) 219 | print generator_func(X) 220 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | # credit to https://github.com/dnouri/nolearn 2 | # for inspiration when I was first learning to use Lasagne 3 | 4 | import model 5 | import theano_funcs 6 | import utils 7 | 8 | from iter_funcs import get_batch_idx 9 | 10 | import numpy as np 11 | from lasagne.layers import get_all_param_values 12 | from os.path import join 13 | 14 | 15 | def train_autoencoder(): 16 | print('building model') 17 | layers = model.build_model() 18 | 19 | max_epochs = 5000 20 | batch_size = 128 21 | weightsfile = join('weights', 'weights_train_val.pickle') 22 | 23 | print('compiling theano functions for training') 24 | print(' encoder/decoder') 25 | encoder_decoder_update = theano_funcs.create_encoder_decoder_func( 26 | layers, apply_updates=True) 27 | print(' discriminator') 28 | discriminator_update = theano_funcs.create_discriminator_func( 29 | layers, apply_updates=True) 30 | print(' generator') 31 | generator_update = theano_funcs.create_generator_func( 32 | layers, apply_updates=True) 33 | 34 | print('compiling theano functions for validation') 35 | print(' encoder/decoder') 36 | encoder_decoder_func = theano_funcs.create_encoder_decoder_func(layers) 37 | print(' discriminator') 38 | discriminator_func = theano_funcs.create_discriminator_func(layers) 39 | print(' generator') 40 | generator_func = theano_funcs.create_generator_func(layers) 41 | 42 | print('loading data') 43 | X_train, y_train, X_test, y_test = utils.load_mnist() 44 | 45 | try: 46 | for epoch in range(1, max_epochs + 1): 47 | print('epoch %d' % (epoch)) 48 | 49 | # compute loss on training data and apply gradient updates 50 | train_reconstruction_losses = [] 51 | train_discriminative_losses = [] 52 | train_generative_losses = [] 53 | for train_idx in get_batch_idx(X_train.shape[0], batch_size): 54 | X_train_batch = X_train[train_idx] 55 | # 1.) update the encoder/decoder to min. reconstruction loss 56 | train_batch_reconstruction_loss =\ 57 | encoder_decoder_update(X_train_batch) 58 | 59 | # sample from p(z) 60 | pz_train_batch = np.random.uniform( 61 | low=-2, high=2, 62 | size=(X_train_batch.shape[0], 2)).astype( 63 | np.float32) 64 | 65 | # 2.) update discriminator to separate q(z|x) from p(z) 66 | train_batch_discriminative_loss =\ 67 | discriminator_update(X_train_batch, pz_train_batch) 68 | 69 | # 3.) update generator to output q(z|x) that mimic p(z) 70 | train_batch_generative_loss = generator_update(X_train_batch) 71 | 72 | train_reconstruction_losses.append( 73 | train_batch_reconstruction_loss) 74 | train_discriminative_losses.append( 75 | train_batch_discriminative_loss) 76 | train_generative_losses.append( 77 | train_batch_generative_loss) 78 | 79 | # average over minibatches 80 | train_reconstruction_losses_mean = np.mean( 81 | train_reconstruction_losses) 82 | train_discriminative_losses_mean = np.mean( 83 | train_discriminative_losses) 84 | train_generative_losses_mean = np.mean( 85 | train_generative_losses) 86 | 87 | print(' train: rec = %.6f, dis = %.6f, gen = %.6f' % ( 88 | train_reconstruction_losses_mean, 89 | train_discriminative_losses_mean, 90 | train_generative_losses_mean, 91 | )) 92 | 93 | # compute loss on test data 94 | test_reconstruction_losses = [] 95 | test_discriminative_losses = [] 96 | test_generative_losses = [] 97 | for test_idx in get_batch_idx(X_test.shape[0], batch_size): 98 | X_test_batch = X_test[test_idx] 99 | test_batch_reconstruction_loss =\ 100 | encoder_decoder_func(X_test_batch) 101 | 102 | # sample from p(z) 103 | pz_test_batch = np.random.uniform( 104 | low=-2, high=2, 105 | size=(X_test.shape[0], 2)).astype( 106 | np.float32) 107 | 108 | test_batch_discriminative_loss =\ 109 | discriminator_func(X_test_batch, pz_test_batch) 110 | 111 | test_batch_generative_loss = generator_func(X_test_batch) 112 | 113 | test_reconstruction_losses.append( 114 | test_batch_reconstruction_loss) 115 | test_discriminative_losses.append( 116 | test_batch_discriminative_loss) 117 | test_generative_losses.append( 118 | test_batch_generative_loss) 119 | 120 | test_reconstruction_losses_mean = np.mean( 121 | test_reconstruction_losses) 122 | test_discriminative_losses_mean = np.mean( 123 | test_discriminative_losses) 124 | test_generative_losses_mean = np.mean( 125 | test_generative_losses) 126 | 127 | print(' test: rec = %.6f, dis = %.6f, gen = %.6f' % ( 128 | test_reconstruction_losses_mean, 129 | test_discriminative_losses_mean, 130 | test_generative_losses_mean, 131 | )) 132 | 133 | except KeyboardInterrupt: 134 | print('caught ctrl-c, stopped training') 135 | weights = get_all_param_values([ 136 | layers['l_decoder_out'], 137 | layers['l_discriminator_out'], 138 | ]) 139 | print('saving weights to %s' % (weightsfile)) 140 | model.save_weights(weights, weightsfile) 141 | 142 | 143 | if __name__ == '__main__': 144 | train_autoencoder() 145 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sklearn.utils import shuffle 3 | 4 | 5 | # download mnist here: http://yann.lecun.com/exdb/mnist/ 6 | # g-unzip and copy to mnist directory 7 | # inspiration: 8 | # https://github.com/Newmu/dcgan_code/blob/master/mnist/load.py#L14 9 | def load_mnist(): 10 | with open('mnist/train-images-idx3-ubyte', 'rb') as f: 11 | data = np.fromfile(file=f, dtype=np.uint8) 12 | X_train = data[16:].reshape(60000, 28 * 28).astype(np.float32) 13 | with open('mnist/train-labels-idx1-ubyte', 'rb') as f: 14 | data = np.fromfile(file=f, dtype=np.uint8) 15 | y_train = data[8:].reshape(60000).astype(np.uint8) 16 | 17 | with open('mnist/t10k-images-idx3-ubyte', 'rb') as f: 18 | data = np.fromfile(file=f, dtype=np.uint8) 19 | X_test = data[16:].reshape(10000, 28 * 28).astype(np.float32) 20 | with open('mnist/t10k-labels-idx1-ubyte', 'rb') as f: 21 | data = np.fromfile(file=f, dtype=np.uint8) 22 | y_test = data[8:].reshape(10000).astype(np.uint8) 23 | 24 | X_train, y_train = shuffle(X_train, y_train) 25 | X_test, y_test = shuffle(X_test, y_test) 26 | 27 | X_train /= 255. 28 | X_test /= 255. 29 | 30 | return X_train, y_train, X_test, y_test 31 | 32 | 33 | if __name__ == '__main__': 34 | X_train, _, X_test, _ = load_mnist() 35 | print X_train.shape, X_test.shape 36 | print X_train.min(), X_test.min() 37 | print X_train.mean(), X_test.mean() 38 | print X_train.max(), X_test.max() 39 | -------------------------------------------------------------------------------- /weights/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hjweide/adversarial-autoencoder/181c59a063a1e5db96b14a04cdded54e4e12ef26/weights/.gitignore --------------------------------------------------------------------------------