├── .spyproject ├── codestyle.ini ├── encoding.ini ├── vcs.ini └── workspace.ini ├── AAE.py ├── GAE.py ├── GAE_CFAR.py ├── GAE_GAN.py ├── GAE_image_completion.py ├── README.rst ├── SAAE.py ├── SSAAE.py ├── autoencoder_torch.py ├── dcgan.py └── helpers.py /.spyproject/codestyle.ini: -------------------------------------------------------------------------------- 1 | [codestyle] 2 | indentation = True 3 | 4 | [main] 5 | version = '0.1.0' 6 | 7 | -------------------------------------------------------------------------------- /.spyproject/encoding.ini: -------------------------------------------------------------------------------- 1 | [encoding] 2 | text_encoding = utf-8 3 | 4 | [main] 5 | version = '0.1.0' 6 | 7 | -------------------------------------------------------------------------------- /.spyproject/vcs.ini: -------------------------------------------------------------------------------- 1 | [vcs] 2 | use_version_control = False 3 | version_control_system = 4 | 5 | [main] 6 | version = '0.1.0' 7 | 8 | -------------------------------------------------------------------------------- /.spyproject/workspace.ini: -------------------------------------------------------------------------------- 1 | [workspace] 2 | save_data_on_exit = True 3 | restore_data_on_startup = True 4 | save_history = True 5 | save_non_project_files = False 6 | 7 | [main] 8 | version = '0.1.0' 9 | recent_files = [u'/home/ali/.config/spyder/temp.py', u'/home/ali/Projects/Mygithub/adverserial-autoencoder-keas/SAAE.py', u'/home/ali/Projects/Mygithub/adverserial-autoencoder-keas/SSAAE.py', u'/home/ali/Projects/Mygithub/adverserial-autoencoder-keas/autoencoder_torch.py', u'/home/ali/Projects/Mygithub/adverserial-autoencoder-keas/dcgan.py', u'/home/ali/Projects/Mygithub/adverserial-autoencoder-keas/GAE_CFAR.py', u'/home/ali/Projects/Mygithub/adverserial-autoencoder-keas/GAE_GAN.py', u'/home/ali/Projects/Mygithub/adverserial-autoencoder-keas/GAE_image_completion.py', u'/home/ali/Projects/Mygithub/adverserial-autoencoder-keas/helpers.py', u'/home/ali/Projects/Mygithub/adverserial-autoencoder-keas/AAE.py'] 10 | 11 | -------------------------------------------------------------------------------- /AAE.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Author : Ali Mirzaei 3 | # Date : 19/09/2017 4 | 5 | 6 | from keras.models import Sequential, Model 7 | from keras.layers import Dense, Input, Flatten, Reshape 8 | from keras.datasets import mnist 9 | from keras.optimizers import Adam,SGD 10 | from keras.initializers import RandomNormal 11 | import numpy as np 12 | import matplotlib 13 | import helpers 14 | 15 | matplotlib.use('Agg') 16 | 17 | import matplotlib.pyplot as plt 18 | plt.ioff() 19 | 20 | initializer = RandomNormal(mean=0.0, stddev=0.01, seed=None) 21 | class AAN(): 22 | def __init__(self, img_shape=(28, 28), encoded_dim=2): 23 | self.encoded_dim = encoded_dim 24 | self.optimizer_reconst = Adam(0.01) 25 | self.optimizer_discriminator = Adam(0.01) 26 | self._initAndCompileFullModel(img_shape, encoded_dim) 27 | 28 | def _genEncoderModel(self, img_shape, encoded_dim): 29 | """ Build Encoder Model Based on Paper Configuration 30 | Args: 31 | img_shape (tuple) : shape of input image 32 | encoded_dim (int) : number of latent variables 33 | Return: 34 | A sequential keras model 35 | """ 36 | encoder = Sequential() 37 | encoder.add(Flatten(input_shape=img_shape)) 38 | encoder.add(Dense(1000, activation='relu', kernel_initializer=initializer, 39 | bias_initializer=initializer)) 40 | encoder.add(Dense(1000, activation='relu', kernel_initializer=initializer, 41 | bias_initializer=initializer)) 42 | encoder.add(Dense(encoded_dim, kernel_initializer=initializer, 43 | bias_initializer=initializer)) 44 | encoder.summary() 45 | return encoder 46 | 47 | def _getDecoderModel(self, encoded_dim, img_shape): 48 | """ Build Decoder Model Based on Paper Configuration 49 | Args: 50 | encoded_dim (int) : number of latent variables 51 | img_shape (tuple) : shape of target images 52 | Return: 53 | A sequential keras model 54 | """ 55 | decoder = Sequential() 56 | decoder.add(Dense(1000, activation='relu', input_dim=encoded_dim, kernel_initializer=initializer, 57 | bias_initializer=initializer)) 58 | decoder.add(Dense(1000, activation='relu', kernel_initializer=initializer, 59 | bias_initializer=initializer)) 60 | decoder.add(Dense(np.prod(img_shape), activation='sigmoid', kernel_initializer=initializer, 61 | bias_initializer=initializer)) 62 | decoder.add(Reshape(img_shape)) 63 | decoder.summary() 64 | return decoder 65 | 66 | def _getDescriminator(self, encoded_dim): 67 | """ Build Descriminator Model Based on Paper Configuration 68 | Args: 69 | encoded_dim (int) : number of latent variables 70 | Return: 71 | A sequential keras model 72 | """ 73 | discriminator = Sequential() 74 | discriminator.add(Dense(1000, activation='relu', 75 | input_dim=encoded_dim, kernel_initializer=initializer, 76 | bias_initializer=initializer)) 77 | discriminator.add(Dense(1000, activation='relu', kernel_initializer=initializer, 78 | bias_initializer=initializer)) 79 | discriminator.add(Dense(1, activation='sigmoid', kernel_initializer=initializer, 80 | bias_initializer=initializer)) 81 | discriminator.summary() 82 | return discriminator 83 | 84 | def _initAndCompileFullModel(self, img_shape, encoded_dim): 85 | self.encoder = self._genEncoderModel(img_shape, encoded_dim) 86 | self.decoder = self._getDecoderModel(encoded_dim, img_shape) 87 | self.discriminator = self._getDescriminator(encoded_dim) 88 | img = Input(shape=img_shape) 89 | encoded_repr = self.encoder(img) 90 | gen_img = self.decoder(encoded_repr) 91 | self.autoencoder = Model(img, gen_img) 92 | valid = self.discriminator(encoded_repr) 93 | self.encoder_discriminator = Model(img, valid) 94 | self.discriminator.compile(optimizer=self.optimizer_discriminator, 95 | loss='binary_crossentropy', 96 | metrics=['accuracy']) 97 | self.autoencoder.compile(optimizer=self.optimizer_reconst, 98 | loss ='mse') 99 | for layer in self.discriminator.layers: 100 | layer.trainable = False 101 | self.encoder_discriminator.compile(optimizer=self.optimizer_discriminator, 102 | loss='binary_crossentropy', 103 | metrics=['accuracy']) 104 | def imagegrid(self, epochnumber): 105 | fig = plt.figure(figsize=[20, 20]) 106 | images = self.generateImages(100) 107 | for index,img in enumerate(images): 108 | img = img.reshape((28, 28)) 109 | ax = fig.add_subplot(10, 10, index+1) 110 | ax.set_axis_off() 111 | ax.imshow(img, cmap="gray") 112 | fig.savefig("images/AAE/"+str(epochnumber)+".png") 113 | plt.show() 114 | plt.close(fig) 115 | def generateImages(self, n=100): 116 | latents = 5*np.random.normal(size=(n, self.encoded_dim)) 117 | imgs = self.decoder.predict(latents) 118 | return imgs 119 | 120 | def train(self, x_train, batch_size=100, epochs=5000, save_interval=500): 121 | half_batch = int(batch_size / 2) 122 | for epoch in range(epochs): 123 | #---------------Train Discriminator ------------- 124 | # Select a random half batch of images 125 | idx = np.random.randint(0, x_train.shape[0], half_batch) 126 | imgs = x_train[idx] 127 | # Generate a half batch of new images 128 | latent_fake = self.encoder.predict(imgs) 129 | #gen_imgs = self.decoder.predict(latent_fake) 130 | latent_real = 5*np.random.normal(size=(half_batch, self.encoded_dim)) 131 | valid = np.ones((half_batch, 1)) 132 | fake = np.zeros((half_batch, 1)) 133 | # Train the discriminator 134 | d_loss_real = self.discriminator.train_on_batch(latent_real, valid) 135 | d_loss_fake = self.discriminator.train_on_batch(latent_fake, fake) 136 | d_loss = 0.5 * np.add(d_loss_real, d_loss_fake) 137 | 138 | idx = np.random.randint(0, x_train.shape[0], batch_size) 139 | imgs = x_train[idx] 140 | # Generator wants the discriminator to label the generated representations as valid 141 | valid_y = np.ones((batch_size, 1)) 142 | 143 | # Train the autoencode reconstruction 144 | g_loss_reconstruction = self.autoencoder.train_on_batch(imgs, imgs) 145 | 146 | # Train generator 147 | g_logg_similarity = self.encoder_discriminator.train_on_batch(imgs, valid_y) 148 | # Plot the progress 149 | print ("%d [D loss: %f, acc: %.2f%%] [G acc: %f, mse: %f]" % (epoch, d_loss[0], 100*d_loss[1], 150 | g_logg_similarity[1], g_loss_reconstruction)) 151 | if(epoch % save_interval == 0): 152 | self.imagegrid(epoch) 153 | 154 | 155 | if __name__ == '__main__': 156 | # Load MNIST dataset 157 | (x_train, y_train), (x_test, y_test) = mnist.load_data() 158 | x_train = x_train.astype(np.float32) / 255. 159 | x_test = x_test.astype(np.float32) / 255. 160 | ann = AAN(encoded_dim=8) 161 | ann.train(x_train) 162 | generated = ann.generateImages(10000) 163 | L= helpers.approximateLogLiklihood(generated, x_test) 164 | print "Log Likelihood" 165 | print L 166 | -------------------------------------------------------------------------------- /GAE.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # -*- coding: utf-8 -*- 4 | # Author : Ali Mirzaei 5 | # Date : 19/09/2017 6 | 7 | import glob 8 | from keras.models import Sequential, Model 9 | from keras.layers import Dense, Input, Flatten, Reshape 10 | from keras.datasets import mnist, cifar10 11 | from keras.optimizers import Adam 12 | import numpy as np 13 | import matplotlib.pyplot as plt 14 | from sklearn.neighbors.kde import KernelDensity 15 | from mpl_toolkits.mplot3d import Axes3D 16 | import helpers 17 | from sklearn.model_selection import GridSearchCV 18 | import keras 19 | from keras.initializers import RandomNormal 20 | 21 | initializer = RandomNormal(mean=0.0, stddev=0.01, seed=None) 22 | 23 | class GAE(): 24 | def __init__(self, img_shape=(28, 28), encoded_dim=2): 25 | self.encoded_dim = encoded_dim 26 | self.optimizer = Adam(0.001) 27 | self.optimizer_discriminator = Adam(0.00001) 28 | self._initAndCompileFullModel(img_shape, encoded_dim) 29 | self.img_shape = img_shape 30 | 31 | def _genEncoderModel(self, img_shape, encoded_dim): 32 | """ Build Encoder Model Based on Paper Configuration 33 | Args: 34 | img_shape (tuple) : shape of input image 35 | encoded_dim (int) : number of latent variables 36 | Return: 37 | A sequential keras model 38 | """ 39 | encoder = Sequential() 40 | encoder.add(Flatten(input_shape=img_shape)) 41 | encoder.add(Dense(1000, activation='relu')) 42 | encoder.add(Dense(1000, activation='relu')) 43 | encoder.add(Dense(encoded_dim)) 44 | encoder.summary() 45 | return encoder 46 | 47 | def _getDecoderModel(self, encoded_dim, img_shape): 48 | """ Build Decoder Model Based on Paper Configuration 49 | Args: 50 | encoded_dim (int) : number of latent variables 51 | img_shape (tuple) : shape of target images 52 | Return: 53 | A sequential keras model 54 | """ 55 | decoder = Sequential() 56 | decoder.add(Dense(1000, activation='relu', input_dim=encoded_dim)) 57 | decoder.add(Dense(1000, activation='relu')) 58 | decoder.add(Dense(np.prod(img_shape), activation='sigmoid')) 59 | decoder.add(Reshape(img_shape)) 60 | decoder.summary() 61 | return decoder 62 | 63 | def _getDescriminator(self, img_shape): 64 | """ Build Descriminator Model Based on Paper Configuration 65 | Args: 66 | encoded_dim (int) : number of latent variables 67 | Return: 68 | A sequential keras model 69 | """ 70 | discriminator = Sequential() 71 | discriminator.add(Flatten(input_shape=img_shape)) 72 | discriminator.add(Dense(1000, activation='relu', 73 | kernel_initializer=initializer, 74 | bias_initializer=initializer)) 75 | discriminator.add(Dense(1000, activation='relu', kernel_initializer=initializer, 76 | bias_initializer=initializer)) 77 | discriminator.add(Dense(1, activation='sigmoid', kernel_initializer=initializer, 78 | bias_initializer=initializer)) 79 | discriminator.summary() 80 | return discriminator 81 | 82 | def _initAndCompileFullModel(self, img_shape, encoded_dim): 83 | self.encoder = self._genEncoderModel(img_shape, encoded_dim) 84 | self.decoder = self._getDecoderModel(encoded_dim, img_shape) 85 | self.discriminator = self._getDescriminator(img_shape) 86 | img = Input(shape=img_shape) 87 | encoded_repr = self.encoder(img) 88 | gen_img = self.decoder(encoded_repr) 89 | self.autoencoder = Model(img, gen_img) 90 | self.autoencoder.compile(optimizer=self.optimizer, loss='mse') 91 | self.discriminator.compile(optimizer=self.optimizer, 92 | loss='binary_crossentropy', 93 | metrics=['accuracy']) 94 | for layer in self.discriminator.layers: 95 | layer.trainable = False 96 | 97 | latent = Input(shape=(encoded_dim,)) 98 | gen_image_from_latent = self.decoder(latent) 99 | is_real = self.discriminator(gen_image_from_latent) 100 | self.decoder_discriminator = Model(latent, is_real) 101 | self.decoder_discriminator.compile(optimizer=self.optimizer_discriminator, loss='binary_crossentropy', 102 | metrics=['accuracy']) 103 | 104 | def imagegrid(self, epochnumber): 105 | fig = plt.figure(figsize=[20, 20]) 106 | for i in range(-5, 5): 107 | for j in range(-5,5): 108 | topred = np.array((i*0.5,j*0.5)) 109 | topred = topred.reshape((1, 2)) 110 | img = self.decoder.predict(topred) 111 | img = img.reshape(self.img_shape) 112 | ax = fig.add_subplot(10, 10, (i+5)*10+j+5+1) 113 | ax.set_axis_off() 114 | ax.imshow(img, cmap="gray") 115 | fig.savefig(str(epochnumber)+".png") 116 | plt.show() 117 | plt.close(fig) 118 | 119 | def train(self, x_train, batch_size=32, epochs=5): 120 | fileNames = glob.glob('models/GAE/weights_mnist_autoencoder.*') 121 | fileNames.sort() 122 | if(len(fileNames) != 0): 123 | savedEpoch = int(fileNames[-1].split('.')[1]) 124 | self.autoencoder.load_weights(fileNames[-1]) 125 | else: 126 | savedEpoch=-1 127 | if(savedEpoch