├── __init__.py ├── README.md ├── utilsnn.py ├── auto_encoder.py ├── train_test_MNIST.py └── RBM.py /__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Reducing the Dimensionality of Data with Neural Networks 2 | 3 | ## Overview 4 | 5 | This repository contains Python scripts for reducing the dimensionality of data using neural networks. Two main techniques are implemented: Restricted Boltzmann Machine (RBM) and autoencoders. These techniques are commonly used for feature learning and dimensionality reduction tasks in machine learning. 6 | 7 | ## Files 8 | 9 | - **RBM.py**: Implementation of a Restricted Boltzmann Machine (RBM) for dimensionality reduction. 10 | - **auto_encoder.py**: Implementation of an autoencoder for dimensionality reduction and data reconstruction. 11 | - **train_test_MNIST.py**: Training and Testing script for validating the implemented neural network models on MNIST dataset. 12 | - **utilsnn.py**: Utility functions for neural network operations, such as image preprocessing. 13 | 14 | ## Instructions 15 | 16 | To use the provided scripts: 17 | 18 | 1. Make sure you have Python installed on your system. 19 | 2. Install the required dependencies. 20 | -------------------------------------------------------------------------------- /utilsnn.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | import sklearn.preprocessing as prep 4 | from PIL import Image 5 | from util import tile_raster_images 6 | 7 | 8 | def show_image(path, n_w, img_shape, tile_shape): 9 | image = Image.fromarray( 10 | tile_raster_images( 11 | X=n_w.T, img_shape=img_shape, tile_shape=tile_shape, tile_spacing=(1, 1) 12 | ) 13 | ) 14 | image.save(path) 15 | 16 | 17 | def get_random_block_from_data(data, batch_size): 18 | start_index = np.random.randint(0, len(data) - batch_size) 19 | return data[start_index : (start_index + batch_size)] 20 | 21 | 22 | def min_max_scale(X_train, X_test): 23 | preprocessor = prep.MinMaxScaler().fit(np.concatenate((X_train, X_test), axis=0)) 24 | X_train = preprocessor.transform(X_train) 25 | X_test = preprocessor.transform(X_test) 26 | return X_train, X_test 27 | 28 | 29 | def mean_normalization(X_train, X_test): 30 | data = np.concatenate((X_train, X_test), axis=0) 31 | mean = data.mean(axis=0) 32 | std = data.std(axis=0) 33 | return (X_train - mean) / std, (X_test - mean) / std 34 | 35 | 36 | def xavier_init(fan_in, fan_out, function): 37 | if function is tf.nn.sigmoid: 38 | low = -4.0 * np.sqrt(6.0 / (fan_in + fan_out)) 39 | high = 4.0 * np.sqrt(6.0 / (fan_in + fan_out)) 40 | return tf.random_uniform( 41 | (fan_in, fan_out), minval=low, maxval=high, dtype=tf.float32 42 | ) 43 | elif function is tf.nn.tanh: 44 | low = -1 * np.sqrt(6.0 / (fan_in + fan_out)) 45 | high = 1 * np.sqrt(6.0 / (fan_in + fan_out)) 46 | return tf.random_uniform( 47 | (fan_in, fan_out), minval=low, maxval=high, dtype=tf.float32 48 | ) 49 | -------------------------------------------------------------------------------- /auto_encoder.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from utilsnn import xavier_init 3 | 4 | 5 | class AutoEncoder(object): 6 | def __init__( 7 | self, 8 | input_size, 9 | layer_sizes, 10 | layer_names, 11 | symmetric_weights=False, 12 | optimizer=tf.train.AdamOptimizer(), 13 | transfer_function=tf.nn.sigmoid, 14 | ): 15 | 16 | self.layer_names = layer_names 17 | self.tied_weights = symmetric_weights 18 | 19 | # Build the encoding layers 20 | self.x = tf.placeholder(tf.float32, [None, input_size]) 21 | next_layer_input = self.x 22 | 23 | assert len(layer_sizes) == len(layer_names) 24 | 25 | self.encoding_matrices = [] 26 | self.encoding_biases = [] 27 | 28 | for i in range(len(layer_sizes)): 29 | dim = layer_sizes[i] 30 | input_dim = int(next_layer_input.get_shape()[1]) 31 | 32 | # Initialize W using xavier initialization 33 | W = tf.get_variable( 34 | layer_names[i][0], 35 | [input_dim, dim], 36 | initializer=tf.contrib.layers.xavier_initializer(), 37 | ) 38 | 39 | # Initialize b to zero 40 | b = tf.Variable(tf.zeros([dim]), name=layer_names[i][1]) 41 | 42 | # We are going to use tied-weights so store the W matrix for later reference. 43 | self.encoding_matrices.append(W) 44 | self.encoding_biases.append(b) 45 | 46 | output = transfer_function(tf.matmul(next_layer_input, W) + b) 47 | 48 | # the input into the next layer is the output of this layer 49 | next_layer_input = output 50 | 51 | # The fully encoded x value is now stored in the next_layer_input 52 | self.encoded_x = next_layer_input 53 | 54 | # build the reconstruction layers by reversing the reductions 55 | layer_sizes.reverse() 56 | self.encoding_matrices.reverse() 57 | 58 | self.decoding_matrices = [] 59 | self.decoding_biases = [] 60 | 61 | for i, dim in enumerate(layer_sizes[1:] + [int(self.x.get_shape()[1])]): 62 | W = None 63 | # if we are using tied weights, so just lookup the encoding matrix for this step and transpose it 64 | if symmetric_weights: 65 | W = tf.identity(tf.transpose(self.encoding_matrices[i])) 66 | else: 67 | W = tf.Variable( 68 | xavier_init( 69 | self.encoding_matrices[i].get_shape()[1].value, 70 | self.encoding_matrices[i].get_shape()[0].value, 71 | transfer_function, 72 | ) 73 | ) 74 | b = tf.Variable(tf.zeros([dim])) 75 | self.decoding_matrices.append(W) 76 | self.decoding_biases.append(b) 77 | 78 | output = transfer_function(tf.matmul(next_layer_input, W) + b) 79 | next_layer_input = output 80 | 81 | # i need to reverse the encoding matrices back for loading weights 82 | self.encoding_matrices.reverse() 83 | self.decoding_matrices.reverse() 84 | 85 | # the fully encoded and reconstructed value of x is here: 86 | self.reconstructed_x = next_layer_input 87 | 88 | # compute cost 89 | self.cost = tf.sqrt(tf.reduce_mean(tf.square(self.x - self.reconstructed_x))) 90 | self.optimizer = optimizer.minimize(self.cost) 91 | 92 | # initalize variables 93 | init = tf.global_variables_initializer() 94 | self.sess = tf.Session() 95 | self.sess.run(init) 96 | 97 | def transform(self, X): 98 | return self.sess.run(self.encoded_x, {self.x: X}) 99 | 100 | def reconstruct(self, X): 101 | return self.sess.run(self.reconstructed_x, feed_dict={self.x: X}) 102 | 103 | def load_rbm_weights(self, path, layer_names, layer): 104 | saver = tf.train.Saver( 105 | {layer_names[0]: self.encoding_matrices[layer]}, 106 | {layer_names[1]: self.encoding_biases[layer]}, 107 | ) 108 | saver.restore(self.sess, path) 109 | 110 | if not self.tied_weights: 111 | self.sess.run( 112 | self.decoding_matrices[layer].assign( 113 | tf.transpose(self.encoding_matrices[layer]) 114 | ) 115 | ) 116 | 117 | def print_weights(self): 118 | print("Matrices") 119 | for i in range(len(self.encoding_matrices)): 120 | print("Matrice", i) 121 | print(self.encoding_matrices[i].eval(self.sess).shape) 122 | print(self.encoding_matrices[i].eval(self.sess)) 123 | if not self.tied_weights: 124 | print(self.decoding_matrices[i].eval(self.sess).shape) 125 | print(self.decoding_matrices[i].eval(self.sess)) 126 | 127 | def load_weights(self, path): 128 | dict_w = self.get_dict_layer_names() 129 | saver = tf.train.Saver(dict_w) 130 | saver.restore(self.sess, path) 131 | 132 | def save_weights(self, path): 133 | dict_w = self.get_dict_layer_names() 134 | saver = tf.train.Saver(dict_w) 135 | save_path = saver.save(self.sess, path) 136 | 137 | def get_dict_layer_names(self): 138 | dict_w = {} 139 | for i in range(len(self.layer_names)): 140 | dict_w[self.layer_names[i][0]] = self.encoding_matrices[i] 141 | dict_w[self.layer_names[i][1]] = self.encoding_biases[i] 142 | if not self.tied_weights: 143 | dict_w[self.layer_names[i][0] + "d"] = self.decoding_matrices[i] 144 | dict_w[self.layer_names[i][1] + "d"] = self.decoding_biases[i] 145 | return dict_w 146 | 147 | def partial_fit(self, X): 148 | cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict={self.x: X}) 149 | return cost 150 | -------------------------------------------------------------------------------- /train_test_MNIST.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tensorflow as tf 3 | from sklearn import preprocessing 4 | import matplotlib.pyplot as plt 5 | from RBM import RBM 6 | from auto_encoder import AutoEncoder 7 | from datetime import datetime 8 | 9 | 10 | # Import MNIST data 11 | from tensorflow.examples.tutorials.mnist import input_data 12 | 13 | mnist_data = input_data.read_data_sets("MNIST", one_hot=True) 14 | 15 | # Train Test splits 16 | X_train, y_train, X_test, y_test = ( 17 | mnist_data.train.images, 18 | mnist_data.train.labels, 19 | mnist_data.test.images, 20 | mnist_data.test.labels, 21 | ) 22 | 23 | # Scale the data matrix to [0, 1] range 24 | min_max_scaler = preprocessing.MinMaxScaler() 25 | X_train_minmax = min_max_scaler.fit_transform(X_train) 26 | X_test_minmax = min_max_scaler.fit_transform(X_test) 27 | 28 | # Define Network params 29 | nb_epoch = 1 30 | batch_size = 30 31 | iters = len(X_train) / batch_size 32 | 33 | # RBMs 34 | rbm_1 = RBM(784, 1000, ["rbmW_1", "rbma_1", "rbmb_1"], 0.3) 35 | rbm_2 = RBM(1000, 500, ["rbmW_2", "rbma_2", "rbmb_2"], 0.3) 36 | rbm_3 = RBM(500, 250, ["rbmW_3", "rbma_3", "rbmb_3"], 0.3) 37 | rbm_4 = RBM(250, 30, ["rbmW_4", "rbma_4", "rbmb_4"], 0.3) 38 | 39 | # Uncomment below to load pre-trained weights 40 | # rbm_1.restore_weights('./weights/rbmW_1.weights') 41 | # rbm_2.restore_weights('./weights/rbmW_2.weights') 42 | # rbm_3.restore_weights('./weights/rbmW_3.weights') 43 | # rbm_4.restore_weights('./weights/rbmW_4.weights') 44 | 45 | # Autoencoder 46 | autoencoder = AutoEncoder( 47 | 784, 48 | [1000, 500, 250, 30], 49 | [ 50 | ["rbmW_1", "rbmb_1"], 51 | ["rbmW_2", "rbmb_2"], 52 | ["rbmW_3", "rbmb_3"], 53 | ["rbmW_4", "rbmb_4"], 54 | ], 55 | symmetric_weights=True, 56 | ) 57 | 58 | 59 | # Make a directory to store weights 60 | if not os.path.exists("weights"): 61 | os.makedirs("weights") 62 | 63 | # Train first RBM 64 | print("Training first RBM") 65 | start_time = datetime.now() 66 | 67 | for i in range(nb_epoch): 68 | for j in range(iters): 69 | batch_xs, batch_ys = mnist_data.train.next_batch(batch_size) 70 | rbm_1.partial_fit(batch_xs) 71 | print("Epoch:", i, "Cost:", rbm_1.compute_cost(X_train)) 72 | 73 | stop_time = datetime.now() 74 | elapsed_time = stop_time - start_time 75 | print("RBM 1 -- Elapsed Time: ", elapsed_time) 76 | print("Saving Weights...") 77 | rbm_1.save_weights("./weights/rbm_1.weights") 78 | 79 | # Train second RBM 80 | start_time = datetime.now() 81 | print("Training second RBM") 82 | for i in range(nb_epoch): 83 | for j in range(iters): 84 | batch_xs, batch_ys = mnist_data.train.next_batch(batch_size) 85 | batch_xs = rbm_1.transform(batch_xs) 86 | rbm_2.partial_fit(batch_xs) 87 | print("Epoch:", i, "Cost:", rbm_2.compute_cost(rbm_1.transform(X_train))) 88 | 89 | stop_time = datetime.now() 90 | elapsed_time = stop_time - start_time 91 | print("RBM 2 -- Elapsed Time: ", elapsed_time) 92 | print("Saving Weights...") 93 | rbm_2.save_weights("./weights/rbm_2.weights") 94 | 95 | # Train third RBM 96 | start_time = datetime.now() 97 | print("Training third RBM") 98 | for i in range(nb_epoch): 99 | for j in range(iters): 100 | batch_xs, batch_ys = mnist_data.train.next_batch(batch_size) 101 | batch_xs = rbm_2.transform(rbm_1.transform(batch_xs)) 102 | rbm_3.partial_fit(batch_xs) 103 | print( 104 | "Epoch:", 105 | i, 106 | "Cost: ", 107 | rbm_3.compute_cost(rbm_2.transform(rbm_1.transform(X_train))), 108 | ) 109 | 110 | stop_time = datetime.now() 111 | elapsed_time = stop_time - start_time 112 | print("RBM 3 -- Elapsed Time: ", elapsed_time) 113 | print("Saving Weights...") 114 | rbm_3.save_weights("./weights/rbm_3.weights") 115 | 116 | 117 | # Train fourth RBM 118 | start_time = datetime.now() 119 | print("Training fourth RBM") 120 | for i in range(nb_epoch): 121 | for j in range(iters): 122 | batch_xs, batch_ys = mnist_data.train.next_batch(batch_size) 123 | batch_xs = rbm_3.transform(rbm_2.transform(rbm_1.transform(batch_xs))) 124 | rbm_4.partial_fit(batch_xs) 125 | print( 126 | "Epoch:", 127 | i, 128 | "Cost:", 129 | rbm_4.compute_cost(rbm_3.transform(rbm_2.transform(rbm_1.transform(X_train)))), 130 | ) 131 | 132 | stop_time = datetime.now() 133 | elapsed_time = stop_time - start_time 134 | print("RBM 4 -- Elapsed Time: ", elapsed_time) 135 | print("Saving Weights...") 136 | rbm_4.save_weights("./weights/rbm_4.weights") 137 | 138 | 139 | # Load RBM weights to Autoencoder 140 | autoencoder.load_rbm_weights("./weights/rbm_1.weights", ["rbmW_1", "rbmb_1"], 0) 141 | autoencoder.load_rbm_weights("./weights/rbm_2.weights", ["rbmW_2", "rbmb_2"], 1) 142 | autoencoder.load_rbm_weights("./weights/rbm_3.weights", ["rbmW_3", "rbmb_3"], 2) 143 | autoencoder.load_rbm_weights("./weights/rbm_4.weights", ["rbmW_4", "rbmb_4"], 3) 144 | 145 | # Uncomment below to load pre-trained weights 146 | # autoencoder.load_weights('./weights/ae.weights') 147 | 148 | # Train Autoencoder 149 | print("Training Autoencoder") 150 | start_time = datetime.now() 151 | for i in range(nb_epoch): 152 | cost = 0.0 153 | for j in range(iters): 154 | batch_xs, batch_ys = mnist_data.train.next_batch(batch_size) 155 | cost += autoencoder.partial_fit(batch_xs) 156 | print("Epoch:", i, "Cost:", cost) 157 | 158 | stop_time = datetime.now() 159 | elapsed_time = stop_time - start_time 160 | print("AutoEnocder -- Elapsed Time: ", elapsed_time) 161 | print("Saving Weights...") 162 | 163 | autoencoder.save_weights("./weights/ae.weights") 164 | autoencoder.load_weights("./weights/ae.weights") 165 | 166 | fig, ax = plt.subplots() 167 | 168 | print(autoencoder.transform(X_test)[:, 0]) 169 | print(autoencoder.transform(X_test)[:, 1]) 170 | 171 | plt.scatter( 172 | autoencoder.transform(X_test)[:, 0], autoencoder.transform(X_test)[:, 1], alpha=0.5 173 | ) 174 | plt.show() 175 | plt.savefig("ae_scatter_plot") 176 | -------------------------------------------------------------------------------- /RBM.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import numpy as np 3 | import tensorflow as tf 4 | 5 | 6 | class RBM(object): 7 | def __init__(self, n_visibe, n_hidden, layer_names, learning_rate=0.01): 8 | """ 9 | :param n_visibe: number of visible units 10 | :param n_hidden: number of hidden units 11 | :param layer_names: layers names 12 | :param learning_rate:optional, default = 0.01 13 | """ 14 | # Initialize params 15 | self.n_visible = n_visibe 16 | self.n_hidden = n_hidden 17 | self.learning_rate = learning_rate 18 | self.layer_names = layer_names 19 | 20 | self.weights_and_biases = self._initialize_weights_and_biases() 21 | self.visible_0, self.rbm_W, self.rbm_a, self.rbm_b = self._create_placeholders() 22 | 23 | # Weights initialization 24 | 25 | self.n_w = np.zeros([self.n_visible, self.n_hidden], np.float32) 26 | self.n_vb = np.zeros([self.n_visible], np.float32) 27 | self.n_hb = np.zeros([self.n_hidden], np.float32) 28 | self.o_w = np.random.normal(0.0, 0.01, [self.n_visible, self.n_hidden]) 29 | self.o_vb = np.zeros([self.n_visible], np.float32) 30 | self.o_hb = np.zeros([self.n_hidden], np.float32) 31 | 32 | self.hprobs_0, self.hstates_0, self.visible_1, self.hprobs_1 = ( 33 | self._gibbs_sampling_step() 34 | ) 35 | self.positive_grad, self.negative_grad = self._compute_gradients() 36 | self.update_W, self.update_a, self.update_b = self._update_weights_and_biases() 37 | 38 | # sampling functions 39 | self.h_sample = tf.nn.sigmoid( 40 | tf.matmul(self.visible_0, self.rbm_W) + self.rbm_b 41 | ) 42 | self.v_sample = tf.nn.sigmoid( 43 | tf.matmul(self.h_sample, tf.transpose(self.rbm_W)) + self.rbm_a 44 | ) 45 | 46 | self.error = self._compute_cost() 47 | 48 | self.sess = tf.Session() 49 | init_op = tf.global_variables_initializer() 50 | self.sess.run(init_op) 51 | 52 | def _initialize_weights_and_biases(self): 53 | weights_and_biases = { 54 | # Weights are initialized to small random values chosen from a zero-mean Gaussian with a standard deviation of 0.01 55 | # Large random values increases the speed of the training but results in slightly worse final model. 56 | "W": tf.Variable( 57 | tf.random_normal( 58 | [self.n_visible, self.n_hidden], stddev=0.01, dtype=tf.float32 59 | ), 60 | name=self.layer_names[0], 61 | ), 62 | # a - bias of visible units; b - bias of hidden units 63 | # Hidden biases are initialized to zero. Helpful to initialize the bias of visible unit i to log[pi/(1−pi)] 64 | "a": tf.Variable( 65 | tf.zeros([self.n_visible], dtype=tf.float32), name=self.layer_names[1] 66 | ), 67 | "b": tf.Variable( 68 | tf.zeros([self.n_hidden], dtype=tf.float32), name=self.layer_names[2] 69 | ), 70 | } 71 | 72 | return weights_and_biases 73 | 74 | def _create_placeholders(self): 75 | visible_0 = tf.placeholder(tf.float32, [None, self.n_visible]) 76 | rbm_W = tf.placeholder(tf.float32, [self.n_visible, self.n_hidden]) 77 | # Hidden bias and visible bias 78 | rbm_a = tf.placeholder(tf.float32, [self.n_visible]) 79 | rbm_b = tf.placeholder(tf.float32, [self.n_hidden]) 80 | return visible_0, rbm_W, rbm_a, rbm_b 81 | 82 | def _gibbs_sampling_step(self): 83 | hprobs_0 = tf.nn.sigmoid(tf.matmul(self.visible_0, self.rbm_W) + self.rbm_b) 84 | hstates_0 = self._sample_probs(hprobs_0) 85 | 86 | # It is common to use the probability, pi, instead of sampling a binary value 87 | visible_1 = tf.nn.sigmoid( 88 | tf.matmul(hprobs_0, tf.transpose(self.rbm_W)) + self.rbm_a 89 | ) 90 | # When hidden units are being driven by reconstructions, always use probabilities without sampling. 91 | hprobs_1 = tf.nn.sigmoid(tf.matmul(visible_1, self.rbm_W) + self.rbm_b) 92 | # hstates_1 = self._sample_probs(hprobs_1) 93 | 94 | return hprobs_0, hstates_0, visible_1, hprobs_1 95 | 96 | def _sample_probs(self, probs): 97 | """ 98 | 99 | :param probs: tensor of probabilities 100 | :return: binary states 101 | """ 102 | 103 | # The hidden unit turns on if this probability is greater than a random number uniformly distributed between 0 and 1 104 | return tf.nn.relu(tf.sign(probs - tf.random_uniform(tf.shape(probs)))) 105 | 106 | def _compute_gradients(self): 107 | positive_grad = tf.matmul(tf.transpose(self.visible_0), self.hstates_0) 108 | negative_grad = tf.matmul(tf.transpose(self.visible_1), self.hprobs_1) 109 | 110 | return positive_grad, negative_grad 111 | 112 | def _update_weights_and_biases(self): 113 | update_W = self.rbm_W + self.learning_rate * ( 114 | self.positive_grad - self.negative_grad 115 | ) / tf.to_float(tf.shape(self.visible_0)[0]) 116 | update_a = self.rbm_a + self.learning_rate * tf.reduce_mean( 117 | self.visible_0 - self.visible_1, 0 118 | ) 119 | update_b = self.rbm_b + self.learning_rate * tf.reduce_mean( 120 | self.hprobs_0 - self.hprobs_1, 0 121 | ) 122 | return update_W, update_a, update_b 123 | 124 | def restore_weights(self, path): 125 | saver = tf.train.Saver( 126 | { 127 | self.layer_names[0]: self.weights_and_biases["W"], 128 | self.layer_names[1]: self.weights_and_biases["a"], 129 | self.layer_names[2]: self.weights_and_biases["b"], 130 | } 131 | ) 132 | 133 | saver.restore(self.sess, path) 134 | 135 | self.o_w = self.weights_and_biases["W"].eval(self.sess) 136 | self.o_vb = self.weights_and_biases["a"].eval(self.sess) 137 | self.o_hb = self.weights_and_biases["b"].eval(self.sess) 138 | 139 | def save_weights(self, path): 140 | self.sess.run(self.weights_and_biases["W"].assign(self.o_w)) 141 | self.sess.run(self.weights_and_biases["a"].assign(self.o_vb)) 142 | self.sess.run(self.weights_and_biases["b"].assign(self.o_hb)) 143 | 144 | saver = tf.train.Saver( 145 | { 146 | self.layer_names[0]: self.weights_and_biases["W"], 147 | self.layer_names[1]: self.weights_and_biases["a"], 148 | self.layer_names[2]: self.weights_and_biases["b"], 149 | } 150 | ) 151 | saver.save(self.sess, path) 152 | 153 | def _compute_cost(self): 154 | return tf.reduce_mean(tf.square(self.visible_0 - self.v_sample)) 155 | 156 | def compute_cost(self, batch): 157 | return self.sess.run( 158 | self.error, 159 | feed_dict={ 160 | self.visible_0: batch, 161 | self.rbm_W: self.o_w, 162 | self.rbm_a: self.o_vb, 163 | self.rbm_b: self.o_hb, 164 | }, 165 | ) 166 | 167 | def partial_fit(self, batch_x): 168 | # It is often more efficient to divide the training set into small “mini-batches” of 10 to 100 cases 169 | self.n_w, self.n_vb, self.n_hb = self.sess.run( 170 | [self.update_W, self.update_a, self.update_b], 171 | feed_dict={ 172 | self.visible_0: batch_x, 173 | self.rbm_W: self.o_w, 174 | self.rbm_a: self.o_vb, 175 | self.rbm_b: self.o_hb, 176 | }, 177 | ) 178 | 179 | self.o_w = self.n_w 180 | self.o_vb = self.n_vb 181 | self.o_hb = self.n_hb 182 | 183 | return self.sess.run( 184 | self.error, 185 | feed_dict={ 186 | self.visible_0: batch_x, 187 | self.rbm_W: self.n_w, 188 | self.rbm_a: self.n_vb, 189 | self.rbm_b: self.n_hb, 190 | }, 191 | ) 192 | 193 | def transform(self, batch_x): 194 | return self.sess.run( 195 | self.h_sample, 196 | { 197 | self.visible_0: batch_x, 198 | self.rbm_W: self.o_w, 199 | self.rbm_a: self.o_vb, 200 | self.rbm_b: self.o_hb, 201 | }, 202 | ) 203 | --------------------------------------------------------------------------------