├── .idea
├── encodings.xml
├── misc.xml
├── modules.xml
├── workspace.xml
└── 自动编码器.iml
├── AutoEncoder.py
├── README.md
├── logs
├── events.out.tfevents.1554276502.LAPTOP-LV1HQP82
└── events.out.tfevents.1554276973.LAPTOP-LV1HQP82
├── mnist_data
├── t10k-images-idx3-ubyte.gz
├── t10k-labels-idx1-ubyte.gz
├── train-images-idx3-ubyte.gz
└── train-labels-idx1-ubyte.gz
├── 实现降噪自动编码器
├── DenoiseAutoEncoder.py
├── logs
│ └── events.out.tfevents.1554273765.LAPTOP-LV1HQP82
├── 知识点
│ ├── DA.png
│ ├── 参数初始化1.png
│ └── 参数初始化2.png
└── 计算图.png
├── 结果.png
├── 自动编码器原理
├── 稀疏自动编码器.png
├── 自动编码器1 (1).png
├── 自动编码器1 (2).png
├── 自动编码器1 (3).png
├── 自动编码器1 (4).png
├── 自动编码器1 (5).png
└── 降噪自动编码器.png
└── 计算图.png
/.idea/encodings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/workspace.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 | true
72 | DEFINITION_ORDER
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 | Internationalization
86 |
87 |
88 | Python
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 |
167 |
168 |
169 |
170 |
171 |
172 |
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 |
199 |
200 |
201 |
202 |
203 |
204 |
205 |
206 |
207 |
208 |
209 |
210 |
211 |
212 | 1553570747281
213 |
214 |
215 | 1553570747281
216 |
217 |
218 |
219 |
220 |
221 |
222 |
223 |
224 |
225 |
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
234 |
235 |
236 |
237 |
238 |
239 |
240 |
241 |
242 |
243 |
244 |
245 |
246 |
247 |
248 |
249 |
250 |
251 |
252 |
253 |
254 |
255 |
256 |
257 |
258 |
259 |
260 |
261 |
262 |
263 |
264 |
265 |
266 |
267 |
268 |
269 |
270 |
271 |
272 |
273 |
274 |
275 |
276 |
277 |
278 |
279 |
280 |
281 |
282 |
283 |
284 |
285 |
286 |
287 |
288 |
289 |
290 |
291 |
292 |
293 |
294 |
295 |
296 |
297 |
298 |
299 |
300 |
301 |
302 |
303 |
304 |
305 |
306 |
307 |
308 |
309 |
310 |
311 |
312 |
313 |
314 |
315 |
316 |
317 |
318 |
--------------------------------------------------------------------------------
/.idea/自动编码器.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/AutoEncoder.py:
--------------------------------------------------------------------------------
1 | import matplotlib.pyplot as plt
2 | import numpy as np
3 | import tensorflow as tf
4 | from tensorflow.examples.tutorials.mnist import input_data
5 |
6 | # 控制训练过程的参数
7 | learning_rate = 0.01
8 | training_epochs = 20
9 | batch_size = 256
10 | display_step = 5
11 | examples_to_show = 10
12 | # w网络模型参数
13 | n_input_units = 784 # 输入神经元数量 MNIST data input (img shape : 28*28)
14 | n_hidden1_units = 256 # 编码起第一隐藏层神经元数量(让编码器和解码器都有同样规模的隐藏层
15 | n_hidden2_units = 128 # 编码起第二隐藏层神经元数量(让编码器和解码器都有同样规模的隐藏层
16 | n_output_units = n_input_units # 解码器输出层神经元数量必须等于输入数据的units数量
17 |
18 | # 对一个张量进行全面汇总(均值,标准差,最大最小值,直方图)
19 | def varible_summaries(var):
20 | with tf.name_scope('summaries'):
21 | mean = tf.reduce_mean(var)
22 | stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
23 | tf.summary.scalar('mean', mean)
24 | tf.summary.scalar('stddev', stddev)
25 | tf.summary.scalar('max', tf.reduce_max(var))
26 | tf.summary.scalar('min', tf.reduce_min(var))
27 | tf.summary.histogram('histogram', var)
28 |
29 |
30 | # 根据输入输出节点数量返回权重
31 | def WeightsVarible(n_in, n_out, name_str='weights'):
32 | return tf.Variable(tf.random_normal([n_in, n_out]), dtype=tf.float32, name=name_str)
33 |
34 |
35 | # 根据输出节点数量返回偏置
36 | def BiasesVarible(n_out, name_str='biases'):
37 | return tf.Variable(tf.random_normal([n_out]), dtype=tf.float32, name=name_str)
38 |
39 |
40 | # 构建编码器
41 | def Encoder(x_origin, activate_func=tf.nn.sigmoid):
42 | # 编码器第一隐藏层
43 | with tf.name_scope('Layer1'):
44 | weights = WeightsVarible(n_input_units, n_hidden1_units)
45 | biases = BiasesVarible(n_hidden1_units)
46 | x_code1 = activate_func(tf.nn.xw_plus_b(x_origin, weights, biases))
47 | varible_summaries(weights)
48 | # 编码器第二隐藏层
49 | with tf.name_scope('Layer2'):
50 | weights = WeightsVarible(n_hidden1_units, n_hidden2_units)
51 | biases = BiasesVarible(n_hidden2_units)
52 | x_code = activate_func(tf.nn.xw_plus_b(x_code1, weights, biases))
53 | varible_summaries(weights)
54 | return x_code
55 |
56 |
57 | # 构建解吗器
58 | def Decoder(x_code, activate_func=tf.nn.sigmoid):
59 | # 解码器第一隐藏层
60 | with tf.name_scope('Layer'):
61 | weights = WeightsVarible(n_hidden2_units, n_hidden1_units)
62 | biases = BiasesVarible(n_hidden1_units)
63 | x_decode1 = activate_func(tf.nn.xw_plus_b(x_code, weights, biases))
64 | varible_summaries(weights)
65 | # 解码器第二隐藏层
66 | with tf.name_scope('Layer'):
67 | weights = WeightsVarible(n_hidden1_units, n_output_units)
68 | biases = BiasesVarible(n_output_units)
69 | x_decode = activate_func(tf.nn.xw_plus_b(x_decode1, weights, biases))
70 | varible_summaries(weights)
71 | return x_decode
72 |
73 |
74 | # 调用上面写的函数构造计算图
75 | with tf.Graph().as_default():
76 | # 计算图输入
77 | with tf.name_scope('X_origin'):
78 | X_origin = tf.placeholder(tf.float32, [None, n_input_units])
79 | # 构建编码器
80 | with tf.name_scope('Encoder'):
81 | X_code = Encoder(X_origin)
82 | # 构建解吗器
83 | with tf.name_scope('Decoder'):
84 | X_decode = Decoder(X_code)
85 | # 定义损失节点
86 | with tf.name_scope('Loss'):
87 | Loss = tf.reduce_mean(tf.pow(X_origin - X_decode, 2))
88 | # 定义优化器
89 | with tf.name_scope('Train'):
90 | Optimizer = tf.train.RMSPropOptimizer(learning_rate)
91 | Train = Optimizer.minimize(Loss)
92 |
93 | # 为计算图添加损失节点的标量汇总(scalar summary)
94 | with tf.name_scope('LossSummary'):
95 | tf.summary.scalar('loss', Loss)
96 | tf.summary.scalar('learning_rate', learning_rate)
97 |
98 | # 为计算图添加图像汇总
99 | with tf.name_scope('ImageSummary'):
100 | image_origin = tf.reshape(X_origin, [-1, 28, 28, 1])
101 | image_reconstructed = tf.reshape(X_decode, [-1, 28, 28, 1])
102 | tf.summary.image('image_origin', image_origin, 10)
103 | tf.summary.image('image_reconstructed', image_reconstructed, 10)
104 |
105 | # 聚合所有汇总节点
106 | merged_summary = tf.summary.merge_all()
107 |
108 | init = tf.global_variables_initializer()
109 |
110 | print("把计算图写入事件文件,在TensorBoard里面查看")
111 | writer = tf.summary.FileWriter(logdir='logs', graph=tf.get_default_graph())
112 | writer.flush()
113 |
114 | # 读取数据集
115 | mnist = input_data.read_data_sets('mnist_data/', one_hot=True)
116 |
117 | with tf.Session() as sess:
118 | sess.run(init)
119 | total_batch = int(mnist.train.num_examples / batch_size)
120 | for epoch in range(training_epochs):
121 | for i in range(total_batch):
122 | batch_xs, batch_ys = mnist.train.next_batch(batch_size)
123 | _, loss = sess.run([Train, Loss], feed_dict={X_origin: batch_xs})
124 |
125 | if epoch % display_step == 0:
126 | print("epoch : %03d, loss = %.3f" % (epoch + 1, loss))
127 | # 运行汇总节点,更新事件文件
128 | summary_str = sess.run(merged_summary, feed_dict={X_origin: batch_xs})
129 | writer.add_summary(summary_str, epoch)
130 | writer.flush()
131 |
132 | writer.close()
133 | print("训练完毕!")
134 |
135 | # 把训练好的编码器-解码器模型用在测试集上,输出重建后的样本数据
136 | reconstructions = sess.run(X_decode, feed_dict={X_origin: mnist.test.images[:examples_to_show]})
137 | # 比较原始图像与重建后的图像
138 | f, a = plt.subplots(2, 10, figsize=(10, 2))
139 | for i in range(examples_to_show):
140 | a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28)))
141 | a[1][i].imshow(np.reshape(reconstructions[i], (28, 28)))
142 | f.show()
143 | plt.draw()
144 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | 详细介绍见博客:
2 |
3 | [自动编码器(含两个隐藏层)](https://fanfuhan.github.io/2019/04/03/AutoEncoder/#more)
4 |
5 | [降噪自动编码器](https://fanfuhan.github.io/2019/04/03/DenoiseAutoEncoder/#more)
6 |
--------------------------------------------------------------------------------
/logs/events.out.tfevents.1554276502.LAPTOP-LV1HQP82:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fanfuhan/AutoEncoder_sample/c452edaff80653b2d018c85d93a50290227aa56e/logs/events.out.tfevents.1554276502.LAPTOP-LV1HQP82
--------------------------------------------------------------------------------
/logs/events.out.tfevents.1554276973.LAPTOP-LV1HQP82:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fanfuhan/AutoEncoder_sample/c452edaff80653b2d018c85d93a50290227aa56e/logs/events.out.tfevents.1554276973.LAPTOP-LV1HQP82
--------------------------------------------------------------------------------
/mnist_data/t10k-images-idx3-ubyte.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fanfuhan/AutoEncoder_sample/c452edaff80653b2d018c85d93a50290227aa56e/mnist_data/t10k-images-idx3-ubyte.gz
--------------------------------------------------------------------------------
/mnist_data/t10k-labels-idx1-ubyte.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fanfuhan/AutoEncoder_sample/c452edaff80653b2d018c85d93a50290227aa56e/mnist_data/t10k-labels-idx1-ubyte.gz
--------------------------------------------------------------------------------
/mnist_data/train-images-idx3-ubyte.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fanfuhan/AutoEncoder_sample/c452edaff80653b2d018c85d93a50290227aa56e/mnist_data/train-images-idx3-ubyte.gz
--------------------------------------------------------------------------------
/mnist_data/train-labels-idx1-ubyte.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fanfuhan/AutoEncoder_sample/c452edaff80653b2d018c85d93a50290227aa56e/mnist_data/train-labels-idx1-ubyte.gz
--------------------------------------------------------------------------------
/实现降噪自动编码器/DenoiseAutoEncoder.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import sklearn.preprocessing as prep
3 | import tensorflow as tf
4 | from tensorflow.examples.tutorials.mnist import input_data
5 |
6 | # Xavier均匀初始化
7 | def xavier_init(fan_in, fan_out, constant = 1):
8 | low = -constant * np.sqrt(6.0 / (fan_in + fan_out))
9 | high = constant * np.sqrt(6.0 / (fan_in + fan_out))
10 | return tf.random_uniform((fan_in, fan_out), minval=low, maxval=high, dtype=tf.float32)
11 |
12 | # 加性高斯噪声的自动编码器
13 | class AdditiveGaussianNoiseAutoencoder(object):
14 |
15 | def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus, optimizer=tf.train.AdamOptimizer(), scale=0.1):
16 | self.n_input = n_input
17 | self.n_hidden = n_hidden
18 | self.transfer = transfer_function
19 | self.training_scale = scale
20 | self.weights = dict()
21 |
22 | # 构建计算图
23 | with tf.name_scope('raw_input'):
24 | self.x = tf.placeholder(tf.float32, [None, self.n_input])
25 | with tf.name_scope('NoiseAdder'):
26 | self.scale = tf.placeholder(tf.float32)
27 | self.noise_x = self.x + self.scale * tf.random_normal((n_input,))
28 | with tf.name_scope('encoder'):
29 | self.weights['w1'] = tf.Variable(xavier_init(self.n_input, self.n_hidden), name='weight1')
30 | self.weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32), name='bias1')
31 | self.hidden = self.transfer(tf.add(tf.matmul(self.noise_x, self.weights['w1']), self.weights['b1']))
32 | with tf.name_scope('reconstruction'):
33 | self.weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32), name='weight2')
34 | self.weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32), name='bias2')
35 | self.reconstruction = tf.nn.xw_plus_b(self.hidden, self.weights['w2'], self.weights['b2']) # hidden * w2 + b2
36 | with tf.name_scope('loss'):
37 | self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2))
38 | with tf.name_scope('train'):
39 | self.optimizer = optimizer.minimize(self.cost)
40 |
41 | init = tf.global_variables_initializer()
42 | self.sess = tf.Session()
43 | self.sess.run(init)
44 | print("begin to run session...")
45 |
46 | # 在一个批次上训练模型
47 | def partial_fit(self, X):
48 | cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict={self.x: X, self.scale: self.training_scale})
49 | return cost
50 |
51 | # 在给定样本集合上计算损失(用于测试阶段)
52 | def calc_total_cost(self, X):
53 | return self.sess.run(self.cost, feed_dict={self.x: X, self.scale: self.training_scale})
54 |
55 | # 返回自编码器隐含层的输出结果,获得抽象后的高阶特征表示
56 | def transform(self, X):
57 | return self.sess.run(self.hidden, feed_dict={self.x: X, self.scale: self.training_scale})
58 |
59 | # 将隐藏层的高阶特征作为输入,将其重建为原始输入数据
60 | def generate(self, hidden = None):
61 | if hidden == None:
62 | hidden = np.random.normal(size=self.weights['b1'])
63 | return self.sess.run(self.reconstruction, feed_dict={self.hidden: hidden})
64 |
65 | # 整体运行一遍复原过程,包括提取高阶特征以及重建原始数据,输入原始数据,输出复原后的数据
66 | def reconstruction(self, X):
67 | return self.sess.run(self.reconstruction, feed_dict={self.x: X, self.scale: self.training_scale})
68 |
69 | # 获取隐含层的权重
70 | def getWeights(self):
71 | return self.sess.run(self.weights['w1'])
72 |
73 | # 获取隐含层的偏置
74 | def getBiases(self):
75 | return self.sess.run(self.weights['b1'])
76 |
77 |
78 |
79 |
80 | AGN_AutoEncoder = AdditiveGaussianNoiseAutoencoder(n_input=784, n_hidden=200,
81 | optimizer=tf.train.AdamOptimizer(learning_rate=0.01), scale=0.01)
82 | print("把计算图写入事件文件,在TensorBoard里面查看")
83 | writer = tf.summary.FileWriter(logdir='logs', graph=AGN_AutoEncoder.sess.graph)
84 | writer.close()
85 |
86 | # 读取数据集
87 | mnist = input_data.read_data_sets('../mnist_data/', one_hot=True)
88 |
89 | # 使用sklearn.preprocessing 的数据标准化操作(0均值标准差为1) 预处理数据
90 | # 首先在训练集上估计均值与方差,然后将其作用到训练集和测试集
91 | def standard_scale(x_train, x_test):
92 | preprocesser = prep.StandardScaler().fit(x_train)
93 | x_train = preprocesser.transform(x_train)
94 | x_test = preprocesser.transform(x_test)
95 | return x_train, x_test
96 |
97 | # 获取随机block数据的函数:取一个从0到len(data) - batch_size的随机整数
98 | # 以这个随机整数为起始索引,抽出一个batch_size的批次样本
99 | def get_random_block_from_data(data, batch_size):
100 | start_index = np.random.randint(0, len(data) - batch_size)
101 | return data[start_index: start_index + batch_size]
102 |
103 | # 使用标准化操作变换数据集
104 | X_train, X_test = standard_scale(mnist.train.images, mnist.test.images)
105 |
106 | # 定义训练参数
107 | n_samples = int(mnist.train.num_examples)
108 | training_epochs = 20
109 | batch_size = 128
110 | display_step = 1 # 输出训练结果的间隔
111 |
112 | # 开始训练,每次epoch开始时将avg_cost设为0,计算总共需要的batch数量,
113 | # 这里使用的是有放回抽样,所以不能保证每个样本被抽到并参与训练
114 | for epoch in range(training_epochs):
115 | avg_cost = 0
116 | total_batch = int(n_samples / batch_size)
117 | for i in range(total_batch):
118 | batch_xs = get_random_block_from_data(X_train, batch_size)
119 | cost = AGN_AutoEncoder.partial_fit(batch_xs)
120 | avg_cost += cost / batch_size
121 | avg_cost /= total_batch
122 |
123 | if epoch % display_step == 0:
124 | print("epoch : %03d, cost = %.3f" % (epoch + 1, avg_cost))
125 |
126 |
127 | # 计算测试集上的cost
128 | print("total cost :", str(AGN_AutoEncoder.calc_total_cost(X_test)))
129 |
--------------------------------------------------------------------------------
/实现降噪自动编码器/logs/events.out.tfevents.1554273765.LAPTOP-LV1HQP82:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fanfuhan/AutoEncoder_sample/c452edaff80653b2d018c85d93a50290227aa56e/实现降噪自动编码器/logs/events.out.tfevents.1554273765.LAPTOP-LV1HQP82
--------------------------------------------------------------------------------
/实现降噪自动编码器/知识点/DA.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fanfuhan/AutoEncoder_sample/c452edaff80653b2d018c85d93a50290227aa56e/实现降噪自动编码器/知识点/DA.png
--------------------------------------------------------------------------------
/实现降噪自动编码器/知识点/参数初始化1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fanfuhan/AutoEncoder_sample/c452edaff80653b2d018c85d93a50290227aa56e/实现降噪自动编码器/知识点/参数初始化1.png
--------------------------------------------------------------------------------
/实现降噪自动编码器/知识点/参数初始化2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fanfuhan/AutoEncoder_sample/c452edaff80653b2d018c85d93a50290227aa56e/实现降噪自动编码器/知识点/参数初始化2.png
--------------------------------------------------------------------------------
/实现降噪自动编码器/计算图.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fanfuhan/AutoEncoder_sample/c452edaff80653b2d018c85d93a50290227aa56e/实现降噪自动编码器/计算图.png
--------------------------------------------------------------------------------
/结果.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fanfuhan/AutoEncoder_sample/c452edaff80653b2d018c85d93a50290227aa56e/结果.png
--------------------------------------------------------------------------------
/自动编码器原理/稀疏自动编码器.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fanfuhan/AutoEncoder_sample/c452edaff80653b2d018c85d93a50290227aa56e/自动编码器原理/稀疏自动编码器.png
--------------------------------------------------------------------------------
/自动编码器原理/自动编码器1 (1).png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fanfuhan/AutoEncoder_sample/c452edaff80653b2d018c85d93a50290227aa56e/自动编码器原理/自动编码器1 (1).png
--------------------------------------------------------------------------------
/自动编码器原理/自动编码器1 (2).png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fanfuhan/AutoEncoder_sample/c452edaff80653b2d018c85d93a50290227aa56e/自动编码器原理/自动编码器1 (2).png
--------------------------------------------------------------------------------
/自动编码器原理/自动编码器1 (3).png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fanfuhan/AutoEncoder_sample/c452edaff80653b2d018c85d93a50290227aa56e/自动编码器原理/自动编码器1 (3).png
--------------------------------------------------------------------------------
/自动编码器原理/自动编码器1 (4).png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fanfuhan/AutoEncoder_sample/c452edaff80653b2d018c85d93a50290227aa56e/自动编码器原理/自动编码器1 (4).png
--------------------------------------------------------------------------------
/自动编码器原理/自动编码器1 (5).png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fanfuhan/AutoEncoder_sample/c452edaff80653b2d018c85d93a50290227aa56e/自动编码器原理/自动编码器1 (5).png
--------------------------------------------------------------------------------
/自动编码器原理/降噪自动编码器.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fanfuhan/AutoEncoder_sample/c452edaff80653b2d018c85d93a50290227aa56e/自动编码器原理/降噪自动编码器.png
--------------------------------------------------------------------------------
/计算图.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fanfuhan/AutoEncoder_sample/c452edaff80653b2d018c85d93a50290227aa56e/计算图.png
--------------------------------------------------------------------------------