├── README.md ├── binary_2.py ├── .gitignore └── binary_gnn_gru.py /README.md: -------------------------------------------------------------------------------- 1 | # Inference in PGM by GNN 2 | A Tensorflow implemen tation of the paper [**Inference in Probabilistic Graphical Models by Graph Neural Networks**](https://arxiv.org/pdf/1803.07710.pdf) 3 | -------------------------------------------------------------------------------- /binary_2.py: -------------------------------------------------------------------------------- 1 | 2 | self.CM_0 = Conv2D(filters=message_size * 2 , kernel_size=1, activation=tf.nn.relu, kernel_initializer=conv_initializer, name='CM_0') # * 2 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | def edge2node(self, edge, size=message_size, name=None): # 11 | hor0 = tf.slice(edge, [0, 0, 0, 0], [-1, input_dimensions, input_dimensions_m1, size]) 12 | hor1 = tf.slice(edge, [0, 0, 0, size], [-1, input_dimensions, input_dimensions_m1, size]) 13 | ver0 = tf.slice(edge, [0, input_dimensions, 0, 0], [-1, input_dimensions, input_dimensions_m1, size]) 14 | ver1 = tf.slice(edge, [0, input_dimensions, 0, size], [-1, input_dimensions, input_dimensions_m1, size]) 15 | hor_ = tf.pad(hor0, self.pad0) + tf.pad(hor1, self.pad1) 16 | ver_ = tf.pad(ver0, self.pad0) + tf.pad(ver1, self.pad1) 17 | return tf.add(hor_, tf.transpose(ver_, [0, 2, 1, 3]), name=name) 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | repar_degree = main_layers.edge2node(tf.ones([1, input_dimensions_2, input_dimensions_m1, 2]), size=1, name='repar_degree') # 2 -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | -------------------------------------------------------------------------------- /binary_gnn_gru.py: -------------------------------------------------------------------------------- 1 | #%% (0) Important libraries 2 | import tensorflow as tf 3 | import numpy as np 4 | from numpy import random 5 | import matplotlib.pyplot as plt 6 | from IPython import display 7 | # % matplotlib inline 8 | 9 | 10 | 11 | #%% (1) Dataset creation. 12 | 13 | from tensorflow.examples.tutorials.mnist import input_data 14 | mnist = input_data.read_data_sets('./Data MNIST/', one_hot=False) 15 | 16 | 17 | 18 | #%% (2) Model definition. 19 | 20 | import tensorflow as tf 21 | from tensorflow.layers import Flatten, Conv2D 22 | 23 | import tensorflow.keras as keras 24 | import tensorflow.keras.layers as layers 25 | 26 | dtype = tf.float32 27 | conv_initializer = tf.contrib.layers.xavier_initializer(dtype=dtype, uniform=False) # tf.initializers.truncated_normal(dtype=dtype, mean=0, stddev=0.1) 28 | 29 | 0 30 | time_step = 10 31 | label_size = 2 32 | hidden_size = 5 33 | message_size = 5 34 | bp_time_step = 30 35 | input_dimensions = 28 36 | message_hidden_size = 64 37 | 38 | input_dimensions_2 = input_dimensions * 2 39 | input_dimensions_m1 = input_dimensions - 1 40 | 41 | input_layer = tf.placeholder(dtype=dtype, shape=(None, input_dimensions, input_dimensions, 1), name='input') # [0, 1] 42 | expected_output = tf.placeholder(dtype=tf.int32, shape=(None, input_dimensions, input_dimensions), name='expected_output') # [0, 1] 43 | 44 | hyper_h = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(1, ), mean=0, stddev=1), name='hyper_h') # 45 | hyper_eta = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(1, input_dimensions, input_dimensions, 1), mean=0, stddev=1), name='hyper_eta') 46 | hyper_beta = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(1, input_dimensions_2, input_dimensions_m1, 1), mean=0, stddev=1), name='hyper_beta') 47 | 48 | batch = tf.shape(input_layer)[0] 49 | 50 | 51 | def node2edge(node, name=None): 52 | hor0 = tf.slice(node, [0, 0, 0, 0], [-1, input_dimensions, input_dimensions_m1, -1]) 53 | hor1 = tf.slice(node, [0, 0, 1, 0], [-1, input_dimensions, input_dimensions_m1, -1]) 54 | hor = tf.concat([hor0, hor1], axis=-1) 55 | ver0 = tf.transpose(tf.slice(node, [0, 0, 0, 0], [-1, input_dimensions_m1, input_dimensions, -1]), [0, 2, 1, 3]) 56 | ver1 = tf.transpose(tf.slice(node, [0, 1, 0, 0], [-1, input_dimensions_m1, input_dimensions, -1]), [0, 2, 1, 3]) 57 | ver = tf.concat([ver0, ver1], axis=-1) 58 | return tf.concat([hor, ver], axis=1, name=name) 59 | 60 | 61 | potential_size = 3 62 | unary = tf.square(hyper_h) - (input_layer * 2 - 1) * tf.square(hyper_eta) # 63 | unary_edge = node2edge(unary, 'unary_edge') 64 | binary_edge = tf.tile(tf.square(hyper_beta), [batch, 1, 1, 1], name='binary_edge') 65 | potential = tf.concat([unary_edge, binary_edge], axis=-1, name='potential') 66 | # potential_ = tf.reshape(potential, [1, -1, input_dimensions_2, input_dimensions_m1, potential_size]) 67 | # potential_dup = tf.tile(tf.expand_dims(potential, axis=0), [time_step, 1, 1, 1, 1], name='potential_dup') 68 | 69 | 70 | # depth_size = hidden_size * 2 + potential_size 71 | # H_0 = tf.tensordot(input_layer, tf.zeros(dtype=dtype, shape=(1, hidden_size)), axes=1, name='H_0') 72 | # E_t = node2edge(H_0, 'E_t') # H_tm1 73 | # E_t_bar = tf.concat([E_t, potential], axis=-1) # X_t 74 | # M_t_2 = Conv2D(filters=message_hidden_size, kernel_size=1, activation=tf.nn.relu, kernel_initializer=conv_initializer)(E_t_bar) 75 | # M_t_1 = Conv2D(filters=message_hidden_size, kernel_size=1, activation=tf.nn.relu, kernel_initializer=conv_initializer)(M_t_2) 76 | # M_t_0 = Conv2D(filters=message_size , kernel_size=1, activation=tf.nn.relu, kernel_initializer=conv_initializer)(M_t_1) 77 | # M_t_bar = edge2node(M_t_0, 'M_t_bar') 78 | # H_t = GRU(H_tm1, M_t_bar) 79 | # gru_cell = tf.nn.rnn_cell.GRUCell(hidden_size) 80 | # O, H_t = tf.nn.dynamic_rnn(gru_cell, tf.expand_dims(tf.reshape(tf.slice(M_t_bar, [0, 0, 0, 0], [-1, 1, 1, -1]), [-1, hidden_size]), axis=0), \ 81 | # initial_state=tf.reshape(tf.slice(H_0, [0, 0, 0, 0], [-1, 1, 1, -1]), [-1, hidden_size]), time_major=True) # H_tm1 82 | 83 | 84 | class GNN_GRU: 85 | def __init__(self): 86 | self.pad0 = [[0, 0], [0, 0], [0, 1], [0, 0]] 87 | self.pad1 = [[0, 0], [0, 0], [1, 0], [0, 0]] 88 | 89 | self.CM_2 = Conv2D(filters=message_hidden_size, kernel_size=1, activation=tf.nn.relu, kernel_initializer=conv_initializer, name='CM_2') 90 | self.CM_1 = Conv2D(filters=message_hidden_size, kernel_size=1, activation=tf.nn.relu, kernel_initializer=conv_initializer, name='CM_1') 91 | self.CM_0 = Conv2D(filters=message_size * 2 , kernel_size=1, activation=tf.nn.relu, kernel_initializer=conv_initializer, name='CM_0') # * 2 92 | 93 | # Weights for input vectors of shape (message_size, hidden_size) 94 | self.Wr = Conv2D(filters=hidden_size, kernel_size=1, activation=None, kernel_initializer=conv_initializer, use_bias=True, name='C_Wr') 95 | self.Wz = Conv2D(filters=hidden_size, kernel_size=1, activation=None, kernel_initializer=conv_initializer, use_bias=True, name='C_Wz') 96 | self.Wh = Conv2D(filters=hidden_size, kernel_size=1, activation=None, kernel_initializer=conv_initializer, use_bias=True, name='C_Wh') 97 | 98 | # Weights for hidden vectors of shape (hidden_size, hidden_size) 99 | self.Ur = Conv2D(filters=hidden_size, kernel_size=1, activation=None, kernel_initializer=conv_initializer, use_bias=True, name='C_Ur') 100 | self.Uz = Conv2D(filters=hidden_size, kernel_size=1, activation=None, kernel_initializer=conv_initializer, use_bias=True, name='C_Uz') 101 | self.Uh = Conv2D(filters=hidden_size, kernel_size=1, activation=None, kernel_initializer=conv_initializer, use_bias=True, name='C_Uh') 102 | 103 | # Biases for hidden vectors of shape (hidden_size,) 104 | # self.br = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(1, 1, hidden_size), mean=0, stddev=0.01), name='br') 105 | # self.bz = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(1, 1, hidden_size), mean=0, stddev=0.01), name='bz') 106 | # self.bh = tf.Variable(tf.truncated_normal(dtype=dtype, shape=(1, 1, hidden_size), mean=0, stddev=0.01), name='bh') 107 | # self.Br = tf.tile(self.br, [input_dimensions, input_dimensions, 1], name='C_Br') 108 | # self.Bz = tf.tile(self.bz, [input_dimensions, input_dimensions, 1], name='C_Bz') 109 | # self.Bh = tf.tile(self.bh, [input_dimensions, input_dimensions, 1], name='C_Bh') 110 | 111 | self.X = tf.ones([time_step, 1]) 112 | self.H_0 = tf.tensordot(input_layer, tf.zeros(dtype=dtype, shape=(1, hidden_size)), axes=1, name='H_0') 113 | self.H_ts = tf.scan(self.forward_pass, self.X, initializer=self.H_0, name='H_ts') 114 | self.H_cur = tf.squeeze(tf.slice(self.H_ts, [time_step - 1, 0, 0, 0, 0], [1, -1, -1, -1, -1]), axis=0, name='H_cur') 115 | 116 | 117 | def get_current_state(self): 118 | return self.H_cur 119 | 120 | def forward_pass(self, H_tm1, X_t): 121 | E_t = node2edge(H_tm1) 122 | E_t_bar = tf.concat([E_t, potential], axis=-1) # 123 | 124 | M_t_2 = self.CM_2(E_t_bar) 125 | M_t_1 = self.CM_1(M_t_2) 126 | M_t_0 = self.CM_0(M_t_1) 127 | M_t_bar = self.edge2node(M_t_0) 128 | 129 | Z_t = tf.sigmoid(self.Wz(M_t_bar) + self.Uz(H_tm1)) 130 | R_t = tf.sigmoid(self.Wr(M_t_bar) + self.Ur(H_tm1)) 131 | 132 | H_proposal = tf.tanh(self.Wh(M_t_bar) + self.Uh(tf.multiply(R_t, H_tm1))) 133 | 134 | H_t = tf.multiply(1 - Z_t, H_tm1) + tf.multiply(Z_t, H_proposal) 135 | 136 | return H_t 137 | 138 | def edge2node(self, edge, size=message_size, name=None): # 139 | hor0 = tf.slice(edge, [0, 0, 0, 0], [-1, input_dimensions, input_dimensions_m1, size]) 140 | hor1 = tf.slice(edge, [0, 0, 0, size], [-1, input_dimensions, input_dimensions_m1, size]) 141 | ver0 = tf.slice(edge, [0, input_dimensions, 0, 0], [-1, input_dimensions, input_dimensions_m1, size]) 142 | ver1 = tf.slice(edge, [0, input_dimensions, 0, size], [-1, input_dimensions, input_dimensions_m1, size]) 143 | hor_ = tf.pad(hor0, self.pad0) + tf.pad(hor1, self.pad1) 144 | ver_ = tf.pad(ver0, self.pad0) + tf.pad(ver1, self.pad1) 145 | return tf.add(hor_, tf.transpose(ver_, [0, 2, 1, 3]), name=name) 146 | 147 | main_layers = GNN_GRU() 148 | H_cur = main_layers.get_current_state() 149 | 150 | 151 | map_2 = Conv2D(filters=message_hidden_size, kernel_size=1, activation=tf.nn.relu , kernel_initializer=conv_initializer, name='CR_2')(H_cur) 152 | map_1 = Conv2D(filters=message_hidden_size, kernel_size=1, activation=tf.nn.relu , kernel_initializer=conv_initializer, name='CR_1')(map_2) 153 | map_0 = Conv2D(filters=label_size , kernel_size=1, activation=tf.nn.softmax, kernel_initializer=conv_initializer, name='CR_0')(map_1) 154 | output_layer = map_0 # tf.subtract(1., map_0, name='soft_output') # 155 | label_layer = tf.cast(tf.argmax(output_layer, axis=-1), dtype=tf.int32, name='output') 156 | 157 | 158 | ones_repar_unary = np.ones((1, 1, 1, label_size)) 159 | ones_repar_unary[:, :, :, :1] *= -1 160 | ones_repar_binary = np.array([[1, -1]]) 161 | ones_repar_binary = - ones_repar_binary.T.dot(ones_repar_binary).reshape((1, 1, 1, label_size, -1)) 162 | 163 | repar_unary = tf.multiply(unary, tf.constant(ones_repar_unary, dtype=dtype), name='repar_unary') 164 | repar_binary = tf.multiply(tf.expand_dims(binary_edge, axis=-1), tf.constant(ones_repar_binary, dtype=dtype), name='repar_binary') 165 | repar_degree = main_layers.edge2node(tf.ones([1, input_dimensions_2, input_dimensions_m1, 2]), size=1, name='repar_degree') # 2 166 | 167 | def loop(E_0, V_0, time=time_step): 168 | pad0 = [[0, 0], [0, 0], [0, 1], [0, 0]] 169 | pad1 = [[0, 0], [0, 0], [1, 0], [0, 0]] 170 | E_ts, V_ts = [E_0], [V_0] 171 | for i in range(time): 172 | E, V = E_ts[-1], V_ts[-1] 173 | hor0 = tf.slice(V, [0, 0, 0, 0], [-1, input_dimensions, input_dimensions_m1, -1]) 174 | hor1 = tf.slice(V, [0, 0, 1, 0], [-1, input_dimensions, input_dimensions_m1, -1]) 175 | ver0 = tf.transpose(tf.slice(V, [0, 0, 0, 0], [-1, input_dimensions_m1, input_dimensions, -1]), [0, 2, 1, 3]) 176 | ver1 = tf.transpose(tf.slice(V, [0, 1, 0, 0], [-1, input_dimensions_m1, input_dimensions, -1]), [0, 2, 1, 3]) 177 | v0 = tf.expand_dims(tf.concat([hor0, ver0], axis=1), axis=-1) # b, 2*, -1, 2 1 178 | v1 = tf.transpose(tf.expand_dims(tf.concat([hor1, ver1], axis=1), axis=-1), [0, 1, 2, 4, 3]) # 1 2 179 | mbu0 = tf.reduce_min(E + v1, axis=-1, keepdims=True) 180 | mbu1 = tf.reduce_min(E + v0, axis=-2, keepdims=True) 181 | nnu0 = tf.divide(v0 - mbu0, 2) 182 | nnu1 = tf.divide(v1 - mbu1, 2) 183 | E_ts.append(E + (nnu0 + nnu1) / 4) # 184 | npu0 = tf.squeeze(v0 - nnu0, axis=-1) 185 | npu1 = tf.squeeze(tf.transpose(v1 - nnu1, [0, 1, 2, 4, 3]), axis=-1) 186 | npu2 = tf.pad(npu0, pad0) + tf.pad(npu1, pad1) 187 | hor_ = tf.slice(npu2, [0, 0, 0, 0], [-1, input_dimensions, input_dimensions, -1]) 188 | ver_ = tf.slice(npu2, [0, input_dimensions, 0, 0], [-1, input_dimensions, input_dimensions, -1]) 189 | V_t_bar = hor_ + tf.transpose(ver_, [0, 2, 1, 3]) 190 | V_ts.append(tf.divide(V_t_bar, repar_degree)) 191 | return V_ts[-1] 192 | 193 | V_t = loop(repar_binary, repar_unary, bp_time_step) 194 | V_softmin = tf.nn.softmax(- V_t, axis=-1, name='bp_output') # 195 | V_label = tf.cast(tf.argmin(V_t, axis=-1), dtype=tf.int32, name='bp_label') # 196 | one_hot_V_t = tf.one_hot(V_label, depth=label_size, dtype=dtype) 197 | 198 | 199 | bp_lambda = 0.5 200 | one_hot_output = tf.one_hot(expected_output, depth=label_size, dtype=dtype) 201 | loss = tf.reduce_mean(- (one_hot_output + tf.stop_gradient(bp_lambda * one_hot_V_t)) * tf.log(output_layer), name='loss') + 0.001 * tf.square(hyper_h) # 202 | accuracy = 1 - tf.reduce_mean(tf.cast(tf.equal(label_layer, expected_output), dtype=dtype), name='accuracy') 203 | # loss = tf.reduce_mean(- one_hot_output * tf.log(V_softmin), name='loss') + 0.01 * tf.square(hyper_h) 204 | # accuracy = 1 - tf.reduce_mean(tf.cast(tf.equal(V_label, expected_output), dtype=dtype), name='accuracy') 205 | 206 | print(np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]), len(tf.get_default_graph().get_operations())) 207 | print([v.name for v in tf.trainable_variables()]) 208 | hhh = [v for v in tf.trainable_variables() if v.name == 'hyper_h:0'][0] 209 | # session = tf.Session() 210 | # init_variables = tf.global_variables_initializer() 211 | # session.run(init_variables) 212 | # iii = mnist.test.images[:4] 213 | # l, aaa, pu = session.run([loss, accuracy, V_t], feed_dict={input_layer: iii.reshape((-1, 28, 28, 1)), expected_output: iii.reshape((-1, 28, 28)).astype(np.int32)}) 214 | # print(l, aaa, pu.shape) 215 | # exit() 216 | 217 | 218 | 219 | #%% (3) Initialize and train the model. 220 | 221 | # Initialize a session 222 | session = tf.Session() 223 | 224 | # Use the Adam optimizer for training 225 | num_epoch = 2 226 | batch_size = 16 227 | train_step = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss) # 228 | 229 | # Initialize all the variables 230 | init_variables = tf.global_variables_initializer() 231 | session.run(init_variables) 232 | 233 | # Initialize the losses 234 | train_losses = [] 235 | validation_losses = [] 236 | 237 | # Perform all the iterations 238 | 239 | np.random.seed(1000) # 2000, 8000, 100, .01, 2 240 | display_step = 100#625 241 | num_train = 8000#mnist.train.num_examples 242 | Y_train = mnist.train.images[:num_train] 243 | Y_train = (np.round(Y_train.reshape((-1, input_dimensions, input_dimensions, 1)))).astype(np.int32) 244 | X_train = Y_train * 1.0 245 | Y_train = Y_train.squeeze() 246 | p, q = .14, .5 247 | flipped = np.random.choice([True, False], size=Y_train.shape, p=[p, 1 - p]) 248 | salted = np.random.choice([True, False], size=Y_train.shape, p=[q, 1 - q]) 249 | peppered = ~ salted 250 | X_train[flipped & salted] = 1 251 | X_train[flipped & peppered] = 0 252 | num_test = 2000#mnist.test .num_examples 253 | Y_test = mnist.test .images[:num_test ] 254 | Y_test = (np.round(Y_test .reshape((-1, input_dimensions, input_dimensions, 1)))).astype(np.int32) 255 | X_test = Y_test * 1.0 256 | Y_test = Y_test .squeeze() 257 | flipped = np.random.choice([True, False], size=Y_test .shape, p=[p, 1 - p]) 258 | salted = np.random.choice([True, False], size=Y_test .shape, p=[q, 1 - q]) 259 | peppered = ~ salted 260 | X_test [flipped & salted] = 1 261 | X_test [flipped & peppered] = 0 262 | 263 | for epoch in range(num_epoch): 264 | avg_cost = 0 265 | total_batch = num_train // batch_size 266 | for i in range(0, total_batch * batch_size, batch_size): 267 | batch_x, batch_y = X_train[i : i + batch_size], Y_train[i : i + batch_size] 268 | _, c = session.run([train_step, accuracy], feed_dict={input_layer: batch_x, expected_output: batch_y}) 269 | avg_cost += c / total_batch 270 | iba1 = i // batch_size + 1 271 | if iba1 % display_step == 0: 272 | print('Iteration %04d, cost: %.9f' % (i + batch_size, avg_cost * total_batch / iba1)) 273 | train_loss = avg_cost 274 | 275 | avg_test = 0 276 | total_batch = num_test // batch_size 277 | for i in range(0, total_batch * batch_size, batch_size): 278 | batch_x, batch_y = X_test [i : i + batch_size], Y_test [i : i + batch_size] 279 | c = session.run(accuracy, feed_dict={input_layer: batch_x, expected_output: batch_y}) 280 | avg_test += c / total_batch 281 | validation_loss = avg_test 282 | X_nn, X_bp = session.run([label_layer, V_label], feed_dict={input_layer: X_test[:batch_size], expected_output: Y_test[:batch_size]}) # 283 | 284 | train_losses += [train_loss] 285 | validation_losses += [validation_loss] 286 | print('Epoch %03d, train loss: %.4f, test loss: %.4f' % (epoch + 1, train_loss, validation_loss)) 287 | 288 | if (epoch + 1) % num_epoch == 0: 289 | plt.plot(train_losses, '-b', label='Train loss') 290 | plt.plot(validation_losses, '-r', label='Validation loss') 291 | plt.title('Loss') 292 | plt.xlabel('Epoch') 293 | plt.ylabel('Loss') 294 | plt.legend(); plt.show() 295 | 296 | print(session.run(tf.square(hhh))) 297 | plt.imshow(Y_test[0], cmap='Greys') 298 | plt.show() 299 | plt.imshow(X_nn [0], cmap='Greys') 300 | plt.show() 301 | plt.imshow(X_bp [0], cmap='Greys') 302 | plt.show() --------------------------------------------------------------------------------