├── 1.Basic_TensorFlow.ipynb ├── 2.FCN_MNIST.ipynb ├── 3.Context_Saving.ipynb ├── 4.tf_CNN.ipynb ├── 5.tf_TFRecord.ipynb ├── 6.tf_RNN.ipynb ├── 7.tf_TensorBoard.ipynb ├── README.md └── tf_GAN.ipynb /1.Basic_TensorFlow.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 6, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "import numpy as np\n", 12 | "A=np.array([[11,12,13],[21,22,23],[31,32,33]])\n", 13 | "B=np.ones(shape=(3,3))" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": 1, 19 | "metadata": { 20 | "collapsed": true 21 | }, 22 | "outputs": [], 23 | "source": [ 24 | "import tensorflow as tf\n" 25 | ] 26 | }, 27 | { 28 | "cell_type": "markdown", 29 | "metadata": {}, 30 | "source": [ 31 | "#### 基本运算;常量、变量、占位符" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": 8, 37 | "metadata": { 38 | "collapsed": false 39 | }, 40 | "outputs": [ 41 | { 42 | "name": "stdout", 43 | "output_type": "stream", 44 | "text": [ 45 | "Tensor(\"add:0\", shape=(2,), dtype=int32)\n", 46 | "[3 6]\n" 47 | ] 48 | }, 49 | { 50 | "data": { 51 | "text/plain": [ 52 | ">" 53 | ] 54 | }, 55 | "execution_count": 8, 56 | "metadata": {}, 57 | "output_type": "execute_result" 58 | } 59 | ], 60 | "source": [ 61 | "a=tf.constant([1,2],name=\"a\")\n", 62 | "b=tf.constant([2,4],name=\"b\")\n", 63 | "result = a+b\n", 64 | "print(result)\n", 65 | "\n", 66 | "#上面只是定义了计算图,并没有运行计算图,所以不会输出运算结果\n", 67 | "\n", 68 | "sess=tf.Session()\n", 69 | "a=sess.run(result)\n", 70 | "print(a)\n", 71 | "sess.close\n", 72 | "\n", 73 | "#打开会话,运行计算图,关闭计算图" 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": 9, 79 | "metadata": { 80 | "collapsed": false 81 | }, 82 | "outputs": [ 83 | { 84 | "name": "stdout", 85 | "output_type": "stream", 86 | "text": [ 87 | "[ 1 4 9 16]\n", 88 | "[ 1 4 9 16]\n" 89 | ] 90 | } 91 | ], 92 | "source": [ 93 | "with tf.Session() as sess:\n", 94 | " a=tf.constant([1,2,3,4])\n", 95 | " b=tf.constant([1,2,3,4])\n", 96 | " result=tf.multiply(a,b)\n", 97 | " c=sess.run(result)\n", 98 | " print(c)\n", 99 | " \n", 100 | "print(c)\n", 101 | "\n", 102 | "# with 结束,计算会话自动关闭" 103 | ] 104 | }, 105 | { 106 | "cell_type": "code", 107 | "execution_count": 10, 108 | "metadata": { 109 | "collapsed": false 110 | }, 111 | "outputs": [ 112 | { 113 | "name": "stdout", 114 | "output_type": "stream", 115 | "text": [ 116 | "[[ 3.95757794]]\n", 117 | "[[ 3.95757794]]\n" 118 | ] 119 | } 120 | ], 121 | "source": [ 122 | "import tensorflow as tf\n", 123 | "#(2,3,1单元)3层前向神经网络(无激活函数)\n", 124 | "\n", 125 | "#生成服从标准差为1的正态分布的随机变量,作为初始化矩阵\n", 126 | "w1=tf.Variable(tf.random_normal([2,3],stddev=1,seed=1))\n", 127 | "w2=tf.Variable(tf.random_normal([3,1],stddev=1,seed=1))\n", 128 | "\n", 129 | "x=tf.constant([[0.7,0.9]])\n", 130 | "#矩阵乘法\n", 131 | "a=tf.matmul(x,w1)\n", 132 | "b=tf.matmul(a,w2)\n", 133 | "\n", 134 | "sess=tf.Session()\n", 135 | "#需要运行初始化赋值,前面只是定义,没运算\n", 136 | "sess.run(w1.initializer)\n", 137 | "sess.run(w2.initializer)\n", 138 | "y=sess.run(b)\n", 139 | "\n", 140 | "sess.close()\n", 141 | "print(y)\n", 142 | "\n", 143 | "with tf.Session() as sess:\n", 144 | " w3=tf.Variable(tf.random_normal([2,3],stddev=1,seed=1))\n", 145 | " w4=tf.Variable(tf.random_normal([3,1],stddev=1,seed=1))\n", 146 | " x1=tf.constant([[0.7,0.9]])\n", 147 | " a1=tf.matmul(x,w3)\n", 148 | " b1=tf.matmul(a1,w4)\n", 149 | " #可以直接嵌入初始化\n", 150 | " sess.run(tf.global_variables_initializer())\n", 151 | " print(sess.run(b1))\n" 152 | ] 153 | }, 154 | { 155 | "cell_type": "code", 156 | "execution_count": 11, 157 | "metadata": { 158 | "collapsed": false 159 | }, 160 | "outputs": [ 161 | { 162 | "name": "stdout", 163 | "output_type": "stream", 164 | "text": [ 165 | "[[ 3.95757794]]\n", 166 | "[[ 3.95757794]\n", 167 | " [ 1.657197 ]\n", 168 | " [ 7.20209646]]\n" 169 | ] 170 | }, 171 | { 172 | "data": { 173 | "text/plain": [ 174 | ">" 175 | ] 176 | }, 177 | "execution_count": 11, 178 | "metadata": {}, 179 | "output_type": "execute_result" 180 | } 181 | ], 182 | "source": [ 183 | "import tensorflow as tf\n", 184 | "w1=tf.Variable(tf.random_normal([2,3],stddev=1,seed=1))\n", 185 | "w2=tf.Variable(tf.random_normal([3,1],stddev=1,seed=1))\n", 186 | "\n", 187 | "#因为需要重复输入x,而每建一个x就会生成一个结点,计算图的效率会低。所以使用占位符\n", 188 | "x=tf.placeholder(tf.float32,shape=(1,2))\n", 189 | "x1=tf.placeholder(tf.float32,shape=(3,2))\n", 190 | "a=tf.matmul(x,w1)\n", 191 | "a1=tf.matmul(x1,w1)\n", 192 | "y=tf.matmul(a,w2)\n", 193 | "y1=tf.matmul(a1,w2)\n", 194 | "\n", 195 | "sess=tf.Session()\n", 196 | "sess.run(tf.global_variables_initializer())\n", 197 | "#运行y时将占位符填上,feed_dict为字典,变量名不可变\n", 198 | "y_hat=sess.run(y,feed_dict={x:[[0.7,0.9]]})\n", 199 | "y_hat1=sess.run(y1,feed_dict={x1:[[0.7,0.9],[0.2,0.5],[1,2]]})# batch = 3\n", 200 | "print(y_hat)\n", 201 | "print(y_hat1)\n", 202 | "sess.close\n" 203 | ] 204 | }, 205 | { 206 | "cell_type": "markdown", 207 | "metadata": {}, 208 | "source": [ 209 | "#### 简单的分类模型" 210 | ] 211 | }, 212 | { 213 | "cell_type": "code", 214 | "execution_count": null, 215 | "metadata": { 216 | "collapsed": true 217 | }, 218 | "outputs": [], 219 | "source": [ 220 | "import tensorflow as tf\n", 221 | "from numpy.random import RandomState\n", 222 | "\n", 223 | "batch_size=10\n", 224 | "w1=tf.Variable(tf.random_normal([2,3],stddev=1,seed=1))\n", 225 | "w2=tf.Variable(tf.random_normal([3,1],stddev=1,seed=1))\n", 226 | "\n", 227 | "# None 可以根据batch 大小确定维度,在shape的一个维度上使用None,方便不大的batch\n", 228 | "x=tf.placeholder(tf.float32,shape=(None,2))\n", 229 | "y=tf.placeholder(tf.float32,shape=(None,1))\n", 230 | "\n", 231 | "a=tf.matmul(x,w1)\n", 232 | "yhat=tf.matmul(a,w2)\n", 233 | "\n", 234 | "#定义交叉熵为损失函数,训练过程使用Adam算法最小化交叉熵\n", 235 | "cross_entropy=-tf.reduce_mean(y*tf.log(tf.clip_by_value(yhat,1e-10,1.0)))\n", 236 | "train_step=tf.train.AdamOptimizer(0.001).minimize(cross_entropy)\n", 237 | "\n", 238 | "rdm=RandomState(1)\n", 239 | "data_size=516\n", 240 | "\n", 241 | "#生成两个特征,共data_size个样本\n", 242 | "X=rdm.rand(data_size,2)\n", 243 | "#定义规则给出样本标签,所有x1+x2<1的样本认为是正样本,其他为负样本。Y,1为正样本\n", 244 | "Y = [[int(x1+x2 < 1)] for (x1, x2) in X]\n", 245 | "\n", 246 | "with tf.Session() as sess:\n", 247 | " sess.run(tf.global_variables_initializer())\n", 248 | " print(sess.run(w1))\n", 249 | " print(sess.run(w2))\n", 250 | " steps=11000\n", 251 | " for i in range(steps):\n", 252 | " \n", 253 | " #选定每一个批量读取的首尾位置,确保在1个epoch内采样训练\n", 254 | " start = i * batch_size % data_size\n", 255 | " end = min(start + batch_size,data_size)\n", 256 | " sess.run(train_step,feed_dict={x:X[start:end],y:Y[start:end]})\n", 257 | " if i % 1000 == 0:\n", 258 | " training_loss= sess.run(cross_entropy,feed_dict={x:X,y:Y})\n", 259 | " print(\"在迭代 %d 次后,训练损失为 %g\"%(i,training_loss))" 260 | ] 261 | }, 262 | { 263 | "cell_type": "markdown", 264 | "metadata": { 265 | "collapsed": true 266 | }, 267 | "source": [ 268 | "#### 激活函数和偏置项:\n", 269 | "a=tf.nn.relu(tf.matmul(x,w1)+biases1)\n", 270 | "\n", 271 | "yhat=tf.nn.relu(tf.matmul(a,w2)+biases2)" 272 | ] 273 | }, 274 | { 275 | "cell_type": "markdown", 276 | "metadata": { 277 | "collapsed": true 278 | }, 279 | "source": [ 280 | "#### 交叉熵函数\n", 281 | "cross_entropy=-tf.reduce_mean(y*tf.log(tf.clip_by_value(yhat,1e-10,1.0)))\n", 282 | "\n", 283 | "tf.reduce_mean(x)表示计算全局平均值。tf.clip_by_value()函数可以将张量中的数值限制在一个范围内。tf.log()对张量内的所有元素依次求对数。交叉熵函数一般会与softmax回归一起使用,TensorFlow将它们进行了统一封装:cross_entropy=tf.nn.softmax_cross_entropy_with_logits(yhat,y)\n", 284 | "\n" 285 | ] 286 | }, 287 | { 288 | "cell_type": "markdown", 289 | "metadata": {}, 290 | "source": [ 291 | "#### 均方误差损失函数\n", 292 | "mse=tf.reduce_mean(tf.square(y-yhat))\n", 293 | "\n", 294 | "tf.select(tf.greater(y,yhat),y-yhat,yhat-y)\n", 295 | "\n", 296 | "tf.greater()的输入是两个张量,比较两个张量中的每一个元素,并返回比较结果(true或false的向量)。tf.select()有三个参数,第一个参数条件为真时选择第二个参数中的值,否则选择第三个参数的值。" 297 | ] 298 | }, 299 | { 300 | "cell_type": "code", 301 | "execution_count": 10, 302 | "metadata": { 303 | "collapsed": true 304 | }, 305 | "outputs": [], 306 | "source": [ 307 | "#decayed_learning_rate=learning_rate*decay_rate^(global_steps/decay_steps),指数衰减函数的定义\n", 308 | "\n", 309 | "global_step=tf.Variable(0)\n", 310 | "#使用exponential_decay生成学习速率,因为staircase=tire,每100次迭代,学习率×0.96\n", 311 | "learning_rate=tf.train.exponential_decay(0.1,global_step,100,0.96,staircase=True)\n", 312 | "#在minimize中导入global_step将自动更新\n", 313 | "#learning_step=tf.train.GtadientDescentOptimizer(learning_rate).minimize(loss_function,global_step=global_step)\n" 314 | ] 315 | }, 316 | { 317 | "cell_type": "markdown", 318 | "metadata": { 319 | "collapsed": true 320 | }, 321 | "source": [ 322 | "#### 带L2正则化的损失函数\n", 323 | "w=tf.Variable(tf.random_normal([2,1],stddev=1,seed=1))\n", 324 | "\n", 325 | "yhat=tf.matmul(x,w)\n", 326 | "\n", 327 | "loss=tf.reduce_mean(tf.square(y-yhat))+tf.contrib.layers.l2_regularizer(lambda)(w)" 328 | ] 329 | }, 330 | { 331 | "cell_type": "code", 332 | "execution_count": 1, 333 | "metadata": { 334 | "collapsed": false 335 | }, 336 | "outputs": [ 337 | { 338 | "name": "stdout", 339 | "output_type": "stream", 340 | "text": [ 341 | "6.5\n", 342 | "12.75\n" 343 | ] 344 | } 345 | ], 346 | "source": [ 347 | "import tensorflow as tf\n", 348 | "w=tf.constant([[1.0,-5.0],[-3.0,4.0]])\n", 349 | "with tf.Session() as sess:\n", 350 | " #L1正则化:(1+5+3+4)×0.5\n", 351 | " print(sess.run(tf.contrib.layers.l1_regularizer(0.5)(w)))\n", 352 | " #L2正则化:(1+25+9+16)/2×0.5,L2正则化会处以2,无偏估计?\n", 353 | " print(sess.run(tf.contrib.layers.l2_regularizer(0.5)(w)))" 354 | ] 355 | }, 356 | { 357 | "cell_type": "markdown", 358 | "metadata": {}, 359 | "source": [ 360 | "神经网络结构复杂后,定义网络结构的部分和计算损失函数的部分可能不在同一个函数中。所以采用collection在一个计算图中保留一组实体(如张量)。" 361 | ] 362 | }, 363 | { 364 | "cell_type": "code", 365 | "execution_count": 2, 366 | "metadata": { 367 | "collapsed": true 368 | }, 369 | "outputs": [], 370 | "source": [ 371 | "import tensorflow as tf\n", 372 | "\n", 373 | "#通过集合(collection)计算一个5层神经网络带L2正则化的损失函数\n", 374 | "\n", 375 | "#随机正态初始化一层神经网络的权重,并将权重的L2正则化损失加入名为losses的集合中,返回初始化的权重\n", 376 | "def get_weight(shape,lambd):\n", 377 | " var=tf.Variable(tf.random_normal(shape),dtype=tf.float32)\n", 378 | " \n", 379 | " #tf.add_to_collection函数将新生成变量的L2正则化损失项加入集合,第一个参数为集合名,第二个参数为加入集合的内容。\n", 380 | " tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(lambd)(var))\n", 381 | " return var\n", 382 | "\n", 383 | "x=tf.placeholder(tf.float32,shape=(None,2))\n", 384 | "y=tf.placeholder(tf.float32,shape=(None,1))\n", 385 | "batch_size=8\n", 386 | "\n", 387 | "#定义每一层中结点个数和层数\n", 388 | "layer_dimension=[2,10,10,10,1]\n", 389 | "n_layers=len(layer_dimension)\n", 390 | "\n", 391 | "#该变量维护前向传播时最深层的结点,最开始为输入层\n", 392 | "cur_layer=x\n", 393 | "\n", 394 | "#输入层结点个数\n", 395 | "in_dimension=layer_dimension[0]\n", 396 | "\n", 397 | "#通过循环生成5层全连接神经网络\n", 398 | "for i in range(1,n_layers):\n", 399 | " \n", 400 | " #下一层节点数\n", 401 | " out_dimension=layer_dimension[i]\n", 402 | " \n", 403 | " #生成当前层中权重的变量,并将这个变量的L2正则化损失加入计算图上的集合\n", 404 | " #[in_dimension,out_dimension],例第一层到第二层之间的权重维度为2×10\n", 405 | " weight=get_weight([in_dimension,out_dimension],0.001)\n", 406 | " \n", 407 | " #偏置项和后一层维度相等,为什么是wx+0.1,而不是wx+b??\n", 408 | " bias=tf.Variable(tf.constant(0.1,shape=[out_dimension]))\n", 409 | " \n", 410 | " #使用ReLU激活函数,cur_layer储存传播一层后的激活情况,后一层激活函数的输出\n", 411 | " cur_layer=tf.nn.relu(tf.matmul(cur_layer,weight)+bias)\n", 412 | " in_dimension=layer_dimension[i]\n", 413 | " \n", 414 | "mse_loss=tf.reduce_mean(tf.square(y-cur_layer))\n", 415 | "\n", 416 | "#将均方误差函数加入损失集合\n", 417 | "tf.add_to_collection('losses',mse_loss)\n", 418 | "\n", 419 | "#get_collection返回一个列表,这个列表是集合中的所有元素,这些元素就是组成损失函数的误差和正则项,相加得最终损失函数\n", 420 | "loss=tf.add_n(tf.get_collection('losses'))\n", 421 | "\n" 422 | ] 423 | }, 424 | { 425 | "cell_type": "code", 426 | "execution_count": 4, 427 | "metadata": { 428 | "collapsed": false 429 | }, 430 | "outputs": [ 431 | { 432 | "name": "stdout", 433 | "output_type": "stream", 434 | "text": [ 435 | "0.0\n", 436 | "10.0\n" 437 | ] 438 | } 439 | ], 440 | "source": [ 441 | "import tensorflow as tf\n", 442 | "\n", 443 | "#tf.assign(A, new_number),这个函数的功能主要是把A的值变为new_number\n", 444 | "A=tf.Variable(tf.constant(0.0),dtype=tf.float32)\n", 445 | "with tf.Session() as sess: \n", 446 | " sess.run(tf.global_variables_initializer()) \n", 447 | " print (sess.run(A)) \n", 448 | " sess.run(tf.assign(A, 10)) \n", 449 | " print (sess.run(A)) " 450 | ] 451 | }, 452 | { 453 | "cell_type": "code", 454 | "execution_count": 2, 455 | "metadata": { 456 | "collapsed": false 457 | }, 458 | "outputs": [ 459 | { 460 | "name": "stdout", 461 | "output_type": "stream", 462 | "text": [ 463 | "[0.0, 0.0]\n", 464 | "[5.0, 4.5]\n", 465 | "[10.0, 4.5549998]\n", 466 | "[10.0, 4.6094499]\n" 467 | ] 468 | } 469 | ], 470 | "source": [ 471 | "import tensorflow as tf\n", 472 | "\n", 473 | "#滑动平均模型\n", 474 | "\n", 475 | "#定义一个变量计算滑动平均,初始值为0,所有需要计算滑动平均的变量必须是实数型\n", 476 | "v1=tf.Variable(0,dtype=tf.float32)\n", 477 | "\n", 478 | "#step变量模拟神经网络中的迭代次数,用于动态控制衰减率\n", 479 | "step=tf.Variable(0,trainable=False)\n", 480 | "\n", 481 | "#定义一个滑动平均的类,初始化时给定了衰减率和控制衰减率的变量step\n", 482 | "ema=tf.train.ExponentialMovingAverage(0.99,step)\n", 483 | "\n", 484 | "#定义一个更新变量的滑动平均操作,给定一个列表,每次执行操作时更新列表所有变量\n", 485 | "maintain_averages_op=ema.apply([v1])\n", 486 | "\n", 487 | "with tf.Session() as sess:\n", 488 | " \n", 489 | " #初始化所有变量\n", 490 | " sess.run(tf.global_variables_initializer())\n", 491 | " \n", 492 | " #通过ema.average(v1)获取滑动平均之后变量的取值。初始化后,v1的值和滑动平均都为0\n", 493 | " print(sess.run([v1,ema.average(v1)]))\n", 494 | " \n", 495 | " #更新变量v1的值为5,tf.assign将数值分配给变量\n", 496 | " sess.run(tf.assign(v1,5))\n", 497 | " \n", 498 | " #更新v1的滑动平均值。衰减率为min{0.99,(1+step)/(10+step)≈0.1}=0.1,所以v1的滑动平均值会更新为 0.1×0+0.9×5=4.5\n", 499 | " sess.run(maintain_averages_op)\n", 500 | " print(sess.run([v1,ema.average(v1)]))\n", 501 | " \n", 502 | " #将迭代设置为10000步\n", 503 | " sess.run(tf.assign(step,10000))\n", 504 | " \n", 505 | " #更新v1的值为10\n", 506 | " sess.run(tf.assign(v1,10))\n", 507 | " \n", 508 | " #更新v1的滑动平均值。衰减率为min{0.99,(1+step)/(10+step)≈0.999}=0.99,所以v1的滑动平均会被更新为0.99×4.5+0.01×10=4.555\n", 509 | " sess.run(maintain_averages_op)\n", 510 | " print(sess.run([v1,ema.average(v1)]))\n", 511 | " \n", 512 | " #再次更新滑动平均值,得到新的滑动平均值为0.99×4.555+0.01×10=4.60945\n", 513 | " sess.run(maintain_averages_op)\n", 514 | " print(sess.run([v1,ema.average(v1)]))\n", 515 | "\n" 516 | ] 517 | } 518 | ], 519 | "metadata": { 520 | "kernelspec": { 521 | "display_name": "Python 3", 522 | "language": "python", 523 | "name": "python3" 524 | }, 525 | "language_info": { 526 | "codemirror_mode": { 527 | "name": "ipython", 528 | "version": 3 529 | }, 530 | "file_extension": ".py", 531 | "mimetype": "text/x-python", 532 | "name": "python", 533 | "nbconvert_exporter": "python", 534 | "pygments_lexer": "ipython3", 535 | "version": "3.5.3" 536 | } 537 | }, 538 | "nbformat": 4, 539 | "nbformat_minor": 2 540 | } 541 | -------------------------------------------------------------------------------- /2.FCN_MNIST.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "collapsed": true 7 | }, 8 | "source": [ 9 | "### MNIST 手写字体识别" 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": 3, 15 | "metadata": { 16 | "collapsed": false 17 | }, 18 | "outputs": [ 19 | { 20 | "name": "stdout", 21 | "output_type": "stream", 22 | "text": [ 23 | "Extracting ./data/MNIST/train-images-idx3-ubyte.gz\n", 24 | "Extracting ./data/MNIST/train-labels-idx1-ubyte.gz\n", 25 | "Extracting ./data/MNIST/t10k-images-idx3-ubyte.gz\n", 26 | "Extracting ./data/MNIST/t10k-labels-idx1-ubyte.gz\n", 27 | "Training data size: 55000\n", 28 | "Validating data size: 5000\n", 29 | "Testing data size: 10000\n" 30 | ] 31 | } 32 | ], 33 | "source": [ 34 | "#导入数据\n", 35 | "import tensorflow as tf\n", 36 | "\n", 37 | "#原网站提供了6W张训练图片和1W张测试图片,导入的该工具会从训练图片分出5000张作为验证集\n", 38 | "from tensorflow.examples.tutorials.mnist import input_data\n", 39 | "\n", 40 | "#读取路径为当前路径下的data文件夹下的MNIST文件夹内,如果该文件夹没有,则自动下载数据至该文件夹\n", 41 | "mnist = input_data.read_data_sets(\"./data/MNIST/\", one_hot=True)\n", 42 | "\n", 43 | "print(\"Training data size: \", mnist.train.num_examples) \n", 44 | "print (\"Validating data size: \", mnist.validation.num_examples) \n", 45 | "print (\"Testing data size: \", mnist.test.num_examples) " 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "execution_count": 4, 51 | "metadata": { 52 | "collapsed": false 53 | }, 54 | "outputs": [ 55 | { 56 | "name": "stdout", 57 | "output_type": "stream", 58 | "text": [ 59 | "X shapr: (100, 784)\n", 60 | "Y shape: (100, 10)\n" 61 | ] 62 | } 63 | ], 64 | "source": [ 65 | "#为了方便使用SGD,mnist.train.next_batch函数可以从所有训练数据中取一个小批量投入训练\n", 66 | "\n", 67 | "batch_size=100\n", 68 | "\n", 69 | "#从训练集选取batch_size个训练数据\n", 70 | "xs,ys=mnist.train.next_batch(batch_size)\n", 71 | "\n", 72 | "#将图片展开成一个长度为28×28=784的一维数组,一张图片可作为一个特征向量。所以batch为100的矩阵维度为100×784\n", 73 | "print('X shapr:',xs.shape)\n", 74 | "print('Y shape:',ys.shape)" 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": 3, 80 | "metadata": { 81 | "collapsed": false 82 | }, 83 | "outputs": [ 84 | { 85 | "name": "stdout", 86 | "output_type": "stream", 87 | "text": [ 88 | "Extracting ./data/MNIST/train-images-idx3-ubyte.gz\n", 89 | "Extracting ./data/MNIST/train-labels-idx1-ubyte.gz\n", 90 | "Extracting ./data/MNIST/t10k-images-idx3-ubyte.gz\n", 91 | "Extracting ./data/MNIST/t10k-labels-idx1-ubyte.gz\n" 92 | ] 93 | } 94 | ], 95 | "source": [ 96 | "import tensorflow as tf\n", 97 | "from tensorflow.examples.tutorials.mnist import input_data\n", 98 | "mnist = input_data.read_data_sets(\"./data/MNIST/\", one_hot=True)\n", 99 | "\n", 100 | "\n", 101 | "#输入结点数为像素点数,输出结点数为类别数\n", 102 | "INPUT_NODE=784\n", 103 | "OUTPUT_NODE=10\n", 104 | "\n", 105 | "#一个隐藏层\n", 106 | "LAYER1_NODE=500\n", 107 | "\n", 108 | "#一个批量中的样本量,数据量越小训练过程越接近SGD,数据量越大训练过程越接近梯度下降\n", 109 | "BATCH_SIZE=100\n", 110 | "\n", 111 | "#学习率和学习衰减率\n", 112 | "LEARNING_RATE_BASE=0.8\n", 113 | "LEARNING_RATE_DECAY=0.99\n", 114 | "\n", 115 | "#正则化系数、迭代次数和滑动平均衰减率\n", 116 | "REGULARIZATION_RATE=0.0001\n", 117 | "TRAINING_STEPS=3000\n", 118 | "MOVING_AVERAGE_DECAY=0.99\n", 119 | "\n", 120 | "#定义推断函数,给定所有参数下计算神经网络的前向传播结果。参数avg_class可确定推断中使不使用滑动平均模型\n", 121 | "def inference(input_tensor,avg_class,weights1,biases1,weights2,biases2):\n", 122 | " \n", 123 | " #没有提供滑动平均类时,直接使用参数当前的取值\n", 124 | " if avg_class == None:\n", 125 | " \n", 126 | " #计算隐藏层前向传播结果,使用ReLU激活函数\n", 127 | " layer1=tf.nn.relu(tf.matmul(input_tensor,weights1)+biases1)\n", 128 | " \n", 129 | " #计算输出层的前向传播结果\n", 130 | " return tf.matmul(layer1,weights2)+biases2\n", 131 | " else:\n", 132 | " \n", 133 | " #首先使用avg_class.averaage函数计算变量的滑动均值,然后计算相应的前向传播结果\n", 134 | " layer1=tf.nn.relu(tf.matmul(input_tensor,avg_class.average(weights1))+avg_class.average(biases1))\n", 135 | " return tf.matmul(layer1,avg_class.average(weights2))+avg_class.average(biases2)\n", 136 | " \n", 137 | "#模型训练函数\n", 138 | "\n", 139 | "def train(mnist):\n", 140 | " x=tf.placeholder(tf.float32,[None,INPUT_NODE],name='x-input')\n", 141 | " y=tf.placeholder(tf.float32,[None,OUTPUT_NODE],name='y-input')\n", 142 | " \n", 143 | " #生成隐藏层参数\n", 144 | " weights1=tf.Variable(tf.truncated_normal([INPUT_NODE,LAYER1_NODE],stddev=0.1))\n", 145 | " biases1=tf.Variable(tf.constant(0.1,shape=[LAYER1_NODE]))\n", 146 | " \n", 147 | " #生成输出层参数\n", 148 | " weights2=tf.Variable(tf.truncated_normal([LAYER1_NODE,OUTPUT_NODE],stddev=0.1))\n", 149 | " biases2=tf.Variable(tf.constant(0.1,shape=[OUTPUT_NODE]))\n", 150 | " \n", 151 | " #计算当前参数下前向传播的结果,这里设为‘None’不会计算滑动平均值\n", 152 | " y_hat=inference(x,None,weights1,biases1,weights2,biases2)\n", 153 | "\n", 154 | " #定义储存迭代数的变量,这个变量不需要计算滑动平均值,所以这里指定的这个变量为不饿训练变量(trainable=False)\n", 155 | " global_step=tf.Variable(0,trainable=False)\n", 156 | " \n", 157 | " #给定滑动平均衰减率和迭代数,初始化滑动平均类。\n", 158 | " variable_averages=tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)\n", 159 | " \n", 160 | " #在所有代表神经网络参数的变量上使用滑动平均,其他超参数不需要。tf.trainable_variables返回的就是图上的集合GraphKeys.TRAINABLE_VARIABLES中的元素。\n", 161 | " variables_averages_op=variable_averages.apply(tf.trainable_variables())\n", 162 | " \n", 163 | " #计算使用滑动平均后的前向传播结果,滑动平均不会改变变量本身,而是使用影子变量记录滑动平均值,需要使用滑动平均再明确调用average函数\n", 164 | " average_y_hat=inference(x,variable_averages,weights1,biases1,weights2,biases2)\n", 165 | " \n", 166 | " #~使用tf.argmax函数得到正确答案对应的类别编号\n", 167 | " cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y_hat, labels=tf.argmax(y, 1))\n", 168 | " \n", 169 | " #计算当前批量中所有样本的交叉熵均值\n", 170 | " cross_entropy_mean=tf.reduce_mean(cross_entropy)\n", 171 | " \n", 172 | " #计算L2正则化损失函数\n", 173 | " regularizer=tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)\n", 174 | " \n", 175 | " #计算模型的正则化损失,只计算神经网络权重的正则化损失,不使用偏置项\n", 176 | " regularization=regularizer(weights1)+regularizer(weights2)\n", 177 | " \n", 178 | " #总损失函数\n", 179 | " loss=cross_entropy_mean+regularization\n", 180 | " \n", 181 | " #设置指数衰减学习率.基础学习率、当前迭代次数、一个epoch所需要的迭代次数、学习衰减率\n", 182 | " learning_rate = tf.train.exponential_decay(\n", 183 | " LEARNING_RATE_BASE,\n", 184 | " global_step,\n", 185 | " mnist.train.num_examples / BATCH_SIZE,\n", 186 | " LEARNING_RATE_DECAY,\n", 187 | " staircase=True)\n", 188 | " \n", 189 | " #使用梯度下降优化算法优化损失函数,损失函数包括交叉熵损失和L2正则化损失\n", 190 | " train_step=tf.train.GradientDescentOptimizer(learning_rate).minimize(loss,global_step=global_step)\n", 191 | " \n", 192 | " #在训练神经网络模型时,每过一遍数据既需要通过反向传播来更新参数,也要更新每个参数的滑动平均值。为了一次完成多个操作\n", 193 | " #train_op=tf.group(train_step,variables_averages_op)\n", 194 | " with tf.control_dependencies([train_step,variables_averages_op]):\n", 195 | " train_op=tf.no_op(name='train')\n", 196 | " \n", 197 | " \n", 198 | " #检验使用滑动平均模型的前向传播结果是否正确。tf.argmax(average_y_hat,1)计算每一个样本的预测。average_y_hat是一个batch_size×10的二维数组,\n", 199 | " #每一行表示一个样本的前向传播结果。tf.argmax的第二个参数“1”表示选取最大值的操作只在第一个维度中进行,即每一行选取最大值的下标。\n", 200 | " #于是得到的结果为长度为batch的一维数组,数组的值表示每个样本对应识别的类别。tf.equal判断两个张量的每一维度是否相等,相等返回Ture。\n", 201 | " correct_prediction=tf.equal(tf.argmax(average_y_hat,1),tf.argmax(y,1))\n", 202 | " \n", 203 | " #该运算首先将布尔型数值转为实数型,再计算均值。该均值为模型在这一组数据上的正确率。\n", 204 | " accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))\n", 205 | "\n", 206 | " #初始化会话并开始训练过程\n", 207 | " with tf.Session() as sess:\n", 208 | " \n", 209 | " tf.global_variables_initializer().run()\n", 210 | " \n", 211 | " #准备验证数据,可通过验证数据简要判断停止条件和训练效果。\n", 212 | " validate_feed = {x: mnist.validation.images, y: mnist.validation.labels}\n", 213 | " \n", 214 | " #准备测试数据,作为模型最终性能的判别标准\n", 215 | " test_feed={x:mnist.test.images,y:mnist.test.labels}\n", 216 | " \n", 217 | " # 迭代地训练神经网络。\n", 218 | " for i in range(TRAINING_STEPS):\n", 219 | " \n", 220 | " #每1000次输出1次验证集上的测试结果\n", 221 | " if i % 500 == 0:\n", 222 | " \n", 223 | " #计算滑动平均模型在验证集上的结果,数据集小可一次处理所有验证数据。如果验证集大,需要分为更小的batch。\n", 224 | " validate_acc = sess.run(accuracy, feed_dict=validate_feed)\n", 225 | " print(\"After %d training step(s), validation accuracy using average model is %g \" % (i, validate_acc))\n", 226 | " \n", 227 | " #生成一次迭代需要用到的批量数据,并运行训练过程\n", 228 | " xs,ys=mnist.train.next_batch(BATCH_SIZE)\n", 229 | " sess.run(train_op,feed_dict={x:xs,y:ys})\n", 230 | " \n", 231 | " #训练结束后再测试集上计算最终模型准确度\n", 232 | " test_acc=sess.run(accuracy,feed_dict=test_feed)\n", 233 | " print((\"After %d training step(s), test accuracy using average model is %g\" %(TRAINING_STEPS, test_acc)))\n", 234 | "\n", 235 | " \n", 236 | " " 237 | ] 238 | }, 239 | { 240 | "cell_type": "code", 241 | "execution_count": 4, 242 | "metadata": { 243 | "collapsed": false 244 | }, 245 | "outputs": [ 246 | { 247 | "name": "stdout", 248 | "output_type": "stream", 249 | "text": [ 250 | "After 0 training step(s), validation accuracy using average model is 0.1354 \n", 251 | "After 500 training step(s), validation accuracy using average model is 0.9682 \n", 252 | "After 1000 training step(s), validation accuracy using average model is 0.975 \n", 253 | "After 1500 training step(s), validation accuracy using average model is 0.9794 \n", 254 | "After 2000 training step(s), validation accuracy using average model is 0.9816 \n", 255 | "After 2500 training step(s), validation accuracy using average model is 0.983 \n", 256 | "After 3000 training step(s), test accuracy using average model is 0.9815\n" 257 | ] 258 | } 259 | ], 260 | "source": [ 261 | "train(mnist)" 262 | ] 263 | }, 264 | { 265 | "cell_type": "markdown", 266 | "metadata": { 267 | "collapsed": true 268 | }, 269 | "source": [ 270 | "### 重构MNIST识别\n", 271 | "在了解了TensorFlow的变量管理和模型持久化操作后,我们可以重构上面简单的全连接网络。\n", 272 | "\n", 273 | "切分功能模块,推断过程抽象为单独库函数。模型分为三个模块:mnist_inference.py定义了前向传播过程与神经网络中的参数。\n", 274 | "mnist_train.py定义了神经网络的训练过程。mnist_eval.py定义了测试过程" 275 | ] 276 | }, 277 | { 278 | "cell_type": "code", 279 | "execution_count": 1, 280 | "metadata": { 281 | "collapsed": true 282 | }, 283 | "outputs": [], 284 | "source": [ 285 | "#mnist_inference.py\n", 286 | "# -*- coding: utf-8 -*-\n", 287 | "import tensorflow as tf\n", 288 | "\n", 289 | "#定义神经网络结构相关的参数\n", 290 | "INPUT_NODE=784\n", 291 | "OUTPUT_NODE=10\n", 292 | "LAYER1_NODE=500\n", 293 | "\n", 294 | "#通过tf.get_variable函数获取变量。在训练神经网络时会创建这些变量;在测试时会通过保存的模型加载这些变量的取值。\n", 295 | "#而且更加方便的是,因为可以在变量加载时将滑动平均变量重命名,所以可以直接通过同样的名字在训练是使用变量自身,\n", 296 | "#而在测试时使用变量的滑动平均值。在这个函数中也会将变量的正则化损失加入损失集合。\n", 297 | "def get_weight_variable(shape, regularizer):\n", 298 | " weights=tf.get_variable('weights',shape,initializer=tf.truncated_normal_initializer(stddev=0.1))\n", 299 | " \n", 300 | " #当给出了正则化生成函数时,将当前变量的正则化损失加入名为losses的集合。\n", 301 | " if regularizer != None:\n", 302 | " tf.add_to_collection('losses',regularizer(weights))\n", 303 | " return weights\n", 304 | "\n", 305 | "#定义神经网络的前向传播过程\n", 306 | "def inference(input_tensor,regularizer):\n", 307 | " \n", 308 | " #声明第一层神经网络的的变量并完成前向传播过程\n", 309 | " with tf.variable_scope('layer1'):\n", 310 | " #这里tf.get_variable或tf.Variable没有本质区别,因为在训练或测试中没有在同一个程序中多次调用该函数。\n", 311 | " #如果在同一程序中多次调用,在第一次调用后需要将reuse参数设为True。\n", 312 | " weights=get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer)\n", 313 | " biases=tf.get_variable('biases', [LAYER1_NODE], initializer = tf.constant_initializer(0.0))\n", 314 | " layer1=tf.nn.relu(tf.matmul(input_tensor, weights)+biases)\n", 315 | " \n", 316 | " #类似的声明第二层神经网络的变量并完成前向传播过程\n", 317 | " with tf.variable_scope('layer2'):\n", 318 | " weights=get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer)\n", 319 | " biases=tf.get_variable('biases', [OUTPUT_NODE], initializer = tf.constant_initializer(0.0))\n", 320 | " layer2 = tf.matmul(layer1, weights) + biases\n", 321 | " \n", 322 | " return layer2\n", 323 | " " 324 | ] 325 | }, 326 | { 327 | "cell_type": "code", 328 | "execution_count": null, 329 | "metadata": { 330 | "collapsed": false 331 | }, 332 | "outputs": [ 333 | { 334 | "name": "stdout", 335 | "output_type": "stream", 336 | "text": [ 337 | "Extracting ./data/MNIST/train-images-idx3-ubyte.gz\n", 338 | "Extracting ./data/MNIST/train-labels-idx1-ubyte.gz\n", 339 | "Extracting ./data/MNIST/t10k-images-idx3-ubyte.gz\n", 340 | "Extracting ./data/MNIST/t10k-labels-idx1-ubyte.gz\n", 341 | "After 1 training steps, loss on training batch is 2.81242.\n" 342 | ] 343 | } 344 | ], 345 | "source": [ 346 | "#mnist_train.py\n", 347 | "# -*- coding: utf-8 -*-\n", 348 | "import os\n", 349 | "import tensorflow as tf\n", 350 | "from tensorflow.examples.tutorials.mnist import input_data\n", 351 | "import mnist_inference\n", 352 | "\n", 353 | "# 当前py文件在同一个地址\n", 354 | "\n", 355 | "# 配置神经网络的参数\n", 356 | "BATCH_SIZE = 100\n", 357 | "LEARNING_RATE_BASE = 0.8\n", 358 | "LEARNING_RATE_DECAY = 0.99\n", 359 | "REGULARIZATION_RATE = 0.0001\n", 360 | "TRAINING_STEPS = 10000\n", 361 | "MOVING_AVERAGE_DECAY = 0.99\n", 362 | "MODEL_SAVE_PATH = \"./model/fcn_mnist\"\n", 363 | "MODEL_NAME = \"fcn_mnist.ckpt\"\n", 364 | "\n", 365 | "\n", 366 | "def train(mnist):\n", 367 | " x = tf.placeholder(tf.float32, [None, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS], name='x-input')\n", 368 | " y = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-output')\n", 369 | "\n", 370 | " regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)\n", 371 | " # 调用推断过程\n", 372 | " y_hat = inference(x, regularizer)\n", 373 | " global_step = tf.Variable(0, trainable=False)\n", 374 | "\n", 375 | " # 定义损失函数、学习率、滑动平均操作及训练过程\n", 376 | " variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)\n", 377 | " variables_average_op = variable_averages.apply(tf.trainable_variables())\n", 378 | " cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y_hat, labels=tf.argmax(y, 1))\n", 379 | " cross_entropy_mean = tf.reduce_mean(cross_entropy)\n", 380 | " loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))\n", 381 | " learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE,\n", 382 | " LEARNING_RATE_DECAY)\n", 383 | " train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)\n", 384 | "\n", 385 | " with tf.control_dependencies([train_step, variables_average_op]):\n", 386 | " train_op = tf.no_op(name='train')\n", 387 | "\n", 388 | " # 初始化TF持久化类\n", 389 | " saver = tf.train.Saver()\n", 390 | " with tf.Session() as sess:\n", 391 | " sess.run(tf.global_variables_initializer())\n", 392 | "\n", 393 | " # 在训练过程中不再测试模型在验证数据上的表现,验证和测试的过程会有独立的过程完成\n", 394 | " for i in range(TRAINING_STEPS):\n", 395 | " xs, ys = mnist.train.next_batch(BATCH_SIZE)\n", 396 | " reshaped_xs=np.reshape(xs,(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))\n", 397 | " _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y: ys})\n", 398 | "\n", 399 | " # 每1000次迭代保存一次模型\n", 400 | " if i % 1000 == 0:\n", 401 | " # 输出模型在当前训练批量下的损失函数大小\n", 402 | " print('After %d training steps, loss on training batch is %g.' % (step, loss_value))\n", 403 | "\n", 404 | " # 保存当前模型,并使用global_step 参数特定地命名\n", 405 | " saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)\n", 406 | "\n", 407 | "\n", 408 | "def main(argv=None):\n", 409 | " mnist = input_data.read_data_sets('./data/MNIST/', one_hot=True)\n", 410 | " train(mnist)\n", 411 | "\n", 412 | "\n", 413 | "if __name__ == '__main__':\n", 414 | " tf.app.run()\n" 415 | ] 416 | }, 417 | { 418 | "cell_type": "code", 419 | "execution_count": null, 420 | "metadata": { 421 | "collapsed": true 422 | }, 423 | "outputs": [], 424 | "source": [ 425 | "#mnist_eval.py\n", 426 | "import time\n", 427 | "import tensorflow as tf\n", 428 | "from tensorflow.examples.tutorials.mnist import input_data\n", 429 | "import mnist_inference\n", 430 | "import mnist_train\n", 431 | "\n", 432 | "#每10秒加载一次最新的模型,并在测试数据上测试最新模型的准确率\n", 433 | "EVAL_INTERVAL_SECS=10\n", 434 | "\n", 435 | "def evaluate(mnist):\n", 436 | " with tf.Graph().as_default() as g:\n", 437 | " #定义输入输出格式\n", 438 | " x=tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')\n", 439 | " y=tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-output')\n", 440 | " validate_feed={x:mnist.validation.images, y:mnist.validation.labes}\n", 441 | " \n", 442 | " #直接通过调用封装好的函数来计算前向传播结果。因为测试时不关注正则化损失值,所以用于计算正则化损失的函数可以设置为None\n", 443 | " y_hat=mnist_inference.inference(x, None)\n", 444 | " \n", 445 | " #使用前向传播结果计算准确度,如需对未知样本进行分类,使用tf.argmax(y_hat, 1)就可以得到输入样本的预测类别\n", 446 | " corret_prediction=tf.equal(tf.argmax(y_hat, 1), tf.argmax(y, 1))\n", 447 | " accuracy=tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n", 448 | " \n", 449 | " #通过变量重命名的方式加载模型,因此前向传播过程不需调用滑动平均操作取平均值。完全共用mnist_inference.py定义的传播过程\n", 450 | " variable_averages=tf.train.ExponentialMovingAverage(mnist_train.MOVING_AVERAGE_DECAY)\n", 451 | " variables_to_restore=variable_averages.variables_to_restore()\n", 452 | " \n", 453 | " #variables_to_restore函数生成tf.train.Saver类所需要的变量重命名字典\n", 454 | " saver=tf.train.Saver(variables_to_restore)\n", 455 | " \n", 456 | " #每隔EVAL_INTERVAL_SECS秒调用一次计算准确度的过程以检测训练过程中的正确率变化\n", 457 | " while True:\n", 458 | " with tf.Session() as sess:\n", 459 | " #tf.train.get_checkpoint_state函数会通过checkpoint文件自动找到目录中最新模型的文件名\n", 460 | " ckpt=tf.train.get_checkpoint_state(mnist_train.MODEL_SAVE_PATH)\n", 461 | " if ckpt and ckpt.model_checkpoint_path:\n", 462 | " #加载模型\n", 463 | " saver.restore(sess, ckpt.model_checkpoint_path)\n", 464 | " #通过文件名得到模型保存时迭代的次数\n", 465 | " global_step=ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]\n", 466 | " accuracy_score=sess.run(accuracy, feed_dict=validate_feed)\n", 467 | " print(\"After %s training step(s), validation accuracy = %g\" % (global_step, accuracy_score))\n", 468 | " else:\n", 469 | " print('No checkpoint file found')\n", 470 | " return\n", 471 | " time.sleep(EVAL_INTERVAL_SECS)\n", 472 | " \n", 473 | "def main(argv=None):\n", 474 | " mnist=input_data.read_data_sets('./data/MNIST/', one_hot=True)\n", 475 | " evaluate(mnist)\n", 476 | " \n", 477 | "if __name__=='__main__':\n", 478 | " tf.app.run()\n", 479 | " \n", 480 | " " 481 | ] 482 | }, 483 | { 484 | "cell_type": "code", 485 | "execution_count": null, 486 | "metadata": { 487 | "collapsed": true 488 | }, 489 | "outputs": [], 490 | "source": [] 491 | } 492 | ], 493 | "metadata": { 494 | "kernelspec": { 495 | "display_name": "Python 3", 496 | "language": "python", 497 | "name": "python3" 498 | }, 499 | "language_info": { 500 | "codemirror_mode": { 501 | "name": "ipython", 502 | "version": 3 503 | }, 504 | "file_extension": ".py", 505 | "mimetype": "text/x-python", 506 | "name": "python", 507 | "nbconvert_exporter": "python", 508 | "pygments_lexer": "ipython3", 509 | "version": "3.5.3" 510 | } 511 | }, 512 | "nbformat": 4, 513 | "nbformat_minor": 2 514 | } 515 | -------------------------------------------------------------------------------- /3.Context_Saving.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "collapsed": true 7 | }, 8 | "source": [ 9 | "### 变量管理\n", 10 | "在定义inference函数时需要提供神经网络的所有参数,NN过大时馈送参数很麻烦。TensorFlow 提供了通过变量名称来创建或获取一个变量的机制。通过这个机制可以在不同函数中直接通过变量的名字来使用变量,而不需要将变量以参数的形式到处传递。TensorFlow主要通过tf.get_variable和tf.variable_scope函数实现通过变量名获取变量值。" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 2, 16 | "metadata": { 17 | "collapsed": true 18 | }, 19 | "outputs": [], 20 | "source": [ 21 | "#下面两个定义等价\n", 22 | "v1=tf.Variable(tf.constant(1.0,shape=[1]),name='v')\n", 23 | "\n", 24 | "#提供未读信息与初始化方法,指定变量名称为必须项\n", 25 | "v=tf.get_variable(\"v\",shape=[1],initializer=tf.constant_initializer(1.0))\n" 26 | ] 27 | }, 28 | { 29 | "cell_type": "markdown", 30 | "metadata": { 31 | "collapsed": true 32 | }, 33 | "source": [ 34 | "tf.get_variable首先会创建名为“v”的参数,有同名参数会创建失败。tf.get_variable获取一个已创建的变量需要使用tf.variable_scope来控制。" 35 | ] 36 | }, 37 | { 38 | "cell_type": "code", 39 | "execution_count": 3, 40 | "metadata": { 41 | "collapsed": false 42 | }, 43 | "outputs": [ 44 | { 45 | "name": "stdout", 46 | "output_type": "stream", 47 | "text": [ 48 | "True\n" 49 | ] 50 | } 51 | ], 52 | "source": [ 53 | "#在名为foo的命名空间内创建名为v的变量\n", 54 | "with tf.variable_scope(\"foo\"):\n", 55 | " v=tf.get_variable('v',[1],initializer=tf.constant_initializer(1.0))\n", 56 | " \n", 57 | "#因为在命名空间foo中已经存在名字为v的变量,下面的代码将报错:\n", 58 | "#with tf.variable_scope(\"foo\"):\n", 59 | "# v=tf.get_variable(\"v\",[1])\n", 60 | "\n", 61 | "#在生成上下文管理器时,将参数reuse设置为Ture。这样tf.get_variable函数将直接获取已经声明的变量\n", 62 | "with tf.variable_scope(\"foo\",reuse=True):\n", 63 | " v1=tf.get_variable('v',[1])\n", 64 | " print (v == v1)\n", 65 | " \n", 66 | "#将参数reuse设置为ture时,tf.variable_scope将只能获取已经创建的变量。若命名空间foo无该变量,则会报错。" 67 | ] 68 | }, 69 | { 70 | "cell_type": "code", 71 | "execution_count": 8, 72 | "metadata": { 73 | "collapsed": false 74 | }, 75 | "outputs": [ 76 | { 77 | "name": "stdout", 78 | "output_type": "stream", 79 | "text": [ 80 | "False\n", 81 | "True\n", 82 | "True\n" 83 | ] 84 | } 85 | ], 86 | "source": [ 87 | "#当tf.variable_scope函数嵌套时,reuse参数的取值如何确定\n", 88 | "\n", 89 | "with tf.variable_scope('root'):\n", 90 | " \n", 91 | " #可以通过tf.get_variable_scope().reuse函数获取当前上下文管理器中reuse参数的信息,以下输出False,即最外层reuse是False\n", 92 | " print(tf.get_variable_scope().reuse)\n", 93 | " \n", 94 | " #新建一个嵌套的上下文管理器,并指定reuse为Ture\n", 95 | " with tf.variable_scope('foo',reuse=True):\n", 96 | " #输出True\n", 97 | " print(tf.get_variable_scope().reuse)\n", 98 | " \n", 99 | " #新建一个嵌套的上下文管理器,但不指定reuse,这时reuse的取值会和外面一层保持一致,以下输出True\n", 100 | " with tf.variable_scope('bat'):\n", 101 | " print(tf.get_variable_scope().reuse)\n", 102 | " #退出reuse设置为True的上下文后,又会输出False\n", 103 | " " 104 | ] 105 | }, 106 | { 107 | "cell_type": "markdown", 108 | "metadata": {}, 109 | "source": [ 110 | "tf.variable_scope函数生成上下文管理器,也会创建一个TensorFlow中的命名空间,在命名空间中创建的变量名称会带上命名空间名作为前缀。以下展示了如何通过tf.variable_scope管理变量的名称。" 111 | ] 112 | }, 113 | { 114 | "cell_type": "code", 115 | "execution_count": 9, 116 | "metadata": { 117 | "collapsed": false 118 | }, 119 | "outputs": [ 120 | { 121 | "name": "stdout", 122 | "output_type": "stream", 123 | "text": [ 124 | "v:0\n", 125 | "foo/v:0\n", 126 | "foo/bar/v:0\n", 127 | "foo/bar/v:0\n", 128 | "True\n" 129 | ] 130 | } 131 | ], 132 | "source": [ 133 | "v1=tf.get_variable('v',[1])\n", 134 | "#“:0”表示这个变量是生成变量这个运算的第一个结果\n", 135 | "print(v1.name)\n", 136 | "\n", 137 | "with tf.variable_scope('foo'):\n", 138 | " v2=tf.get_variable('v',[1])\n", 139 | " #在tf.variable_scope中创建的变量,名称前会加入命名空间的名称\n", 140 | " print(v2.name)\n", 141 | " \n", 142 | "with tf.variable_scope('foo'):\n", 143 | " with tf.variable_scope('bar'):\n", 144 | " v3=tf.get_variable('v',[1])\n", 145 | " print(v3.name)\n", 146 | " \n", 147 | "#创建一个名称为空的命名空间,并设置reuse=True\n", 148 | "with tf.variable_scope('',reuse=True):\n", 149 | " #可以直接通过带命名空间名称的变量名来获取其他命名空间下的变量\n", 150 | " v4=tf.get_variable('foo/bar/v',[1])\n", 151 | " print(v4.name)\n", 152 | " print(v4 == v3)" 153 | ] 154 | }, 155 | { 156 | "cell_type": "markdown", 157 | "metadata": {}, 158 | "source": [ 159 | "通过变量管理,我们可以将上面定义的推断过程函数做一些改进。" 160 | ] 161 | }, 162 | { 163 | "cell_type": "code", 164 | "execution_count": 2, 165 | "metadata": { 166 | "collapsed": true 167 | }, 168 | "outputs": [], 169 | "source": [ 170 | "INPUT_NODE=10\n", 171 | "LAYER1_NODE=32\n", 172 | "OUTPUT_NODE=2\n", 173 | "\n", 174 | "def inference (input_tensor,reuse=False):\n", 175 | " \n", 176 | " #定义第一层神经网络的变量和前向传播过程\n", 177 | " #根据传进来的reuse判断是创建新变量还是使用已经创建好的。没有传入reuse参数时,默认为False.\n", 178 | " with tf.variable_scope('layer1',reuse=reuse):\n", 179 | " weights=tf.get_variable('weights',[INPUT_NODE,LAYER1_NODE],initializer=tf.truncated_normal_initializer(stddev=0.1))\n", 180 | " biases=tf.get_variable('biases',[LAYER1_NODE],initializer=tf.constant_initializer(0.0))\n", 181 | " layer1=tf.nn.relu(tf.matmul(input_tensor,weights)+biases)\n", 182 | " \n", 183 | " #定义第二层神经网络的变量与前向传播过程,matmul函数注意左乘和右乘的区别\n", 184 | " with tf.variable_scope('layer2',reuse=reuse):\n", 185 | " weights=tf.get_variable('weights',[LAYER1_NODE,OUTPUT_NODE],initializer=tf.truncated_normal_initializer(stddev=0.1))\n", 186 | " biases=tf.get_variable('biases',[OUTPUT_NODE],initializer=tf.constant_initializer(0.0))\n", 187 | " layer2=tf.matmul(layer1,weights)+biases\n", 188 | " return(layer2)\n", 189 | "\n", 190 | "x=tf.placeholder(tf.float32,[None,INPUT_NODE])\n", 191 | "y=inference(x)\n", 192 | "\n", 193 | "#使用训练好的神经网络进行推断时,直接调用inference(new_x,True)。使用上述代码表述1,不需要将所有变量作为参数传递到不同函数中。" 194 | ] 195 | }, 196 | { 197 | "cell_type": "markdown", 198 | "metadata": { 199 | "collapsed": true 200 | }, 201 | "source": [ 202 | "#### Tensorflow代码持久化\n", 203 | "保存训练好的模型,包括计算图、权重参数等" 204 | ] 205 | }, 206 | { 207 | "cell_type": "code", 208 | "execution_count": 5, 209 | "metadata": { 210 | "collapsed": true 211 | }, 212 | "outputs": [], 213 | "source": [ 214 | "#保存TensorFlow计算图\n", 215 | "import tensorflow as tf\n", 216 | "\n", 217 | "#声明两个变量,并计算它们的和\n", 218 | "v1=tf.Variable(tf.constant(1.0,shape=[1]),name='v1')\n", 219 | "v2=tf.Variable(tf.constant(2.0,shape=[1]),name='v2')\n", 220 | "result=v1+v2\n", 221 | "\n", 222 | "#声明tf.train.Saver()类用于保存模型\n", 223 | "saver=tf.train.Saver()\n", 224 | "\n", 225 | "with tf.Session() as sess:\n", 226 | " sess.run(tf.global_variables_initializer())\n", 227 | " saver.save(sess,'model/1/model1.ckpt')\n", 228 | "\n", 229 | "#*.ckpt.meta保存了Tensorflow计算图的结构。*.ckpt保存了TensorFlow中每个变量的取值。checkpoint保存了一个目录下所有的模型文件列表" 230 | ] 231 | }, 232 | { 233 | "cell_type": "code", 234 | "execution_count": 1, 235 | "metadata": { 236 | "collapsed": false 237 | }, 238 | "outputs": [ 239 | { 240 | "name": "stdout", 241 | "output_type": "stream", 242 | "text": [ 243 | "INFO:tensorflow:Restoring parameters from model/1/model1.ckpt\n", 244 | "[ 3.]\n" 245 | ] 246 | } 247 | ], 248 | "source": [ 249 | "import tensorflow as tf\n", 250 | "\n", 251 | "v1=tf.Variable(tf.constant(22.0,shape=[1]),name='v1')\n", 252 | "v2=tf.Variable(tf.constant(2.0,shape=[1]),name='v2')\n", 253 | "result=v1+v2\n", 254 | "\n", 255 | "saver=tf.train.Saver()\n", 256 | "\n", 257 | "with tf.Session() as sess:\n", 258 | " \n", 259 | " #加载已保存的模型,并从已保存模型中的变量值计算加法\n", 260 | " saver.restore(sess,'model/1/model1.ckpt')\n", 261 | " print(sess.run(result))\n", 262 | " \n", 263 | "#模型计算图不能更改,更改了就不能运行,只少了初始化变量,变量值从保存中抽取" 264 | ] 265 | }, 266 | { 267 | "cell_type": "markdown", 268 | "metadata": { 269 | "collapsed": true 270 | }, 271 | "source": [ 272 | "以上默认保存和加载TensorFlow计算图上定义的全部变量。如需要选择保存或加载部分变量,在声明tf.train.Saver类时可以提供一个列表指定需要保存或加载的变量。例如saver=tf.train.Saver([v1])构建声明,那么只会保存或加载变量v1。tf.train.Saver也支持在保存或加载时给变量命名。" 273 | ] 274 | }, 275 | { 276 | "cell_type": "code", 277 | "execution_count": 2, 278 | "metadata": { 279 | "collapsed": false 280 | }, 281 | "outputs": [ 282 | { 283 | "name": "stdout", 284 | "output_type": "stream", 285 | "text": [ 286 | "INFO:tensorflow:Restoring parameters from model/1/model1.ckpt\n", 287 | "[ 3.]\n" 288 | ] 289 | } 290 | ], 291 | "source": [ 292 | "#这里声明的变量名称和已保存模型中的变量名称不同\n", 293 | "v_1=tf.Variable(tf.constant(3.0,shape=[1]),name='v_1')\n", 294 | "v_2=tf.Variable(tf.constant(2.0,shape=[1]),name='v_2')\n", 295 | "result=v_1+v_2\n", 296 | "\n", 297 | "#如果直接使用tf.train.Saver()加载模型会找不到变量而报错\n", 298 | "#使用字典重命名变量就能加载原来的模型。\n", 299 | "\n", 300 | "saver=tf.train.Saver({'v1':v_1,'v2':v_2})\n", 301 | "with tf.Session() as sess:\n", 302 | " \n", 303 | " saver.restore(sess,'model/1/model1.ckpt')\n", 304 | " print(sess.run(result))" 305 | ] 306 | }, 307 | { 308 | "cell_type": "code", 309 | "execution_count": 1, 310 | "metadata": { 311 | "collapsed": false 312 | }, 313 | "outputs": [ 314 | { 315 | "name": "stdout", 316 | "output_type": "stream", 317 | "text": [ 318 | "v:0\n", 319 | "WARNING:tensorflow:From :12: all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.\n", 320 | "Instructions for updating:\n", 321 | "Please use tf.global_variables instead.\n", 322 | "v:0\n", 323 | "v/ExponentialMovingAverage:0\n", 324 | "[10.0, 0.099999905]\n" 325 | ] 326 | } 327 | ], 328 | "source": [ 329 | "#保存滑动平均模型的案例\n", 330 | "\n", 331 | "import tensorflow as tf\n", 332 | "\n", 333 | "v=tf.Variable(0,dtype=tf.float32,name='v')\n", 334 | "\n", 335 | "#在没有声明滑动平均模型时只有一个变量v,下面语句会输出“v:0”\n", 336 | "for variables in tf.global_variables():\n", 337 | " print(variables.name)\n", 338 | " \n", 339 | "ema=tf.train.ExponentialMovingAverage(0.99)\n", 340 | "maintain_averages_op=ema.apply(tf.all_variables())\n", 341 | "\n", 342 | "#在声明滑动平均模型后,TensorFlow会自动生成一个影子变量\n", 343 | "for variables in tf.global_variables():\n", 344 | " print(variables.name)\n", 345 | " \n", 346 | "saver=tf.train.Saver()\n", 347 | "with tf.Session() as sess:\n", 348 | " sess.run(tf.global_variables_initializer())\n", 349 | " sess.run(tf.assign(v,10))\n", 350 | " sess.run(maintain_averages_op)\n", 351 | " \n", 352 | " #保存时,tf会将v和v/ExponentialMovingAverage两个变量都存下来\n", 353 | " saver.save(sess,'model/test/movingaverage/ema.ckpt')\n", 354 | " print(sess.run([v, ema.average(v)]))" 355 | ] 356 | }, 357 | { 358 | "cell_type": "code", 359 | "execution_count": 2, 360 | "metadata": { 361 | "collapsed": false 362 | }, 363 | "outputs": [ 364 | { 365 | "name": "stdout", 366 | "output_type": "stream", 367 | "text": [ 368 | "INFO:tensorflow:Restoring parameters from model/test/movingaverage/ema.ckpt\n", 369 | "0.0999999\n" 370 | ] 371 | } 372 | ], 373 | "source": [ 374 | "#以下展示通过重命名直接读取变量的滑动平均值\n", 375 | "import tensorflow as tf\n", 376 | "v=tf.Variable(0, dtype=tf.float32,name='v')\n", 377 | "\n", 378 | "#通过变量重命名将原来变量v的滑动平均值直接赋值给v\n", 379 | "saver=tf.train.Saver({'v/ExponentialMovingAverage':v})\n", 380 | "with tf.Session() as sess:\n", 381 | " saver.restore(sess, 'model/test/movingaverage/ema.ckpt')\n", 382 | " print(sess.run(v))" 383 | ] 384 | }, 385 | { 386 | "cell_type": "code", 387 | "execution_count": 1, 388 | "metadata": { 389 | "collapsed": false 390 | }, 391 | "outputs": [ 392 | { 393 | "name": "stdout", 394 | "output_type": "stream", 395 | "text": [ 396 | "{'v/ExponentialMovingAverage': }\n", 397 | "INFO:tensorflow:Restoring parameters from model/test/movingaverage/ema.ckpt\n", 398 | "0.0999999\n" 399 | ] 400 | } 401 | ], 402 | "source": [ 403 | "#为了方便加载时重命名滑动平均值,tf.train.ExponentialMovingAverage提供了\n", 404 | "#variables_to_restore函数生成tf.train.Saver所需要的变量重命名字典。\n", 405 | "\n", 406 | "import tensorflow as tf\n", 407 | "\n", 408 | "v=tf.Variable(0, dtype=tf.float32,name='v')\n", 409 | "ema=tf.train.ExponentialMovingAverage(0.99)\n", 410 | "\n", 411 | "#通过variables_to_restore函数可以直接生成上面代码中提供的字典{'v/ExponentialMovingAverage':v}\n", 412 | "print(ema.variables_to_restore())\n", 413 | "\n", 414 | "saver=tf.train.Saver(ema.variables_to_restore())\n", 415 | "with tf.Session() as sess:\n", 416 | " saver.restore(sess,'model/test/movingaverage/ema.ckpt')\n", 417 | " print(sess.run(v))\n" 418 | ] 419 | }, 420 | { 421 | "cell_type": "markdown", 422 | "metadata": { 423 | "collapsed": true 424 | }, 425 | "source": [ 426 | "tf提供了convert_to_constants函数,该函数可以将计算图中的变量及其取值通过常量的方式保存,整个计算图和变量可以统一放在一个文件中。" 427 | ] 428 | }, 429 | { 430 | "cell_type": "code", 431 | "execution_count": 3, 432 | "metadata": { 433 | "collapsed": false 434 | }, 435 | "outputs": [ 436 | { 437 | "name": "stdout", 438 | "output_type": "stream", 439 | "text": [ 440 | "INFO:tensorflow:Froze 2 variables.\n", 441 | "Converted 2 variables to const ops.\n" 442 | ] 443 | } 444 | ], 445 | "source": [ 446 | "import tensorflow as tf\n", 447 | "from tensorflow.python.framework import graph_util\n", 448 | "\n", 449 | "v1=tf.Variable(tf.constant(2.0, shape=[1]),name='v1')\n", 450 | "v2=tf.Variable(tf.constant(8.0, shape=[1]),name='v2')\n", 451 | "result = v1 + v2\n", 452 | "\n", 453 | "with tf.Session() as sess:\n", 454 | " sess.run(tf.global_variables_initializer())\n", 455 | " \n", 456 | " #导出当前计算图的GraphDef部分,只需要这一部分就能完成从输入层到输出层的计算过程\n", 457 | " graph_def=tf.get_default_graph().as_graph_def()\n", 458 | " \n", 459 | " #将图中的变量及其取值转化为常量,同时将图中不必要的结点去掉。一些系统运算也会被转化为计算图中的结点(比如变量初始化操作)。\n", 460 | " #如果只关心程序中定义的某些计算,那么和这些计算无关的结点就没有必要导出并保存。\n", 461 | " #以下最后一个参数['add']给出了需要保存的结点名称。add结点是上面定义的求幂运算。\n", 462 | " output_graph_def=graph_util.convert_variables_to_constants(sess, graph_def,['add'] )\n", 463 | " \n", 464 | " #将导出的模型存入文件\n", 465 | " with tf.gfile.GFile('./model/test/1/combined_model.pb','wb') as f:\n", 466 | " f.write(output_graph_def.SerializeToString())" 467 | ] 468 | }, 469 | { 470 | "cell_type": "code", 471 | "execution_count": 7, 472 | "metadata": { 473 | "collapsed": false 474 | }, 475 | "outputs": [ 476 | { 477 | "name": "stdout", 478 | "output_type": "stream", 479 | "text": [ 480 | "[array([ 10.], dtype=float32)]\n" 481 | ] 482 | } 483 | ], 484 | "source": [ 485 | "#以下可以直接计算定义的加法运算结果,当只需要计算图某个结点的取值时,该方法很方便\n", 486 | "\n", 487 | "import tensorflow as tf\n", 488 | "from tensorflow.python.platform import gfile\n", 489 | "\n", 490 | "with tf.Session() as sess:\n", 491 | " model_filename='./model/test/1/combined_model.pb'\n", 492 | " \n", 493 | " #读取保存的模型文件,并将文件解析成对应的GraphDef Protocol Buffer。\n", 494 | " with gfile.FastGFile(model_filename,'rb') as f:\n", 495 | " graph_def=tf.GraphDef()\n", 496 | " graph_def.ParseFromString(f.read())\n", 497 | " \n", 498 | " #将graph_def中保存的图加载到当前的图中。return_elements=['add:0']给出了返回的张量名称。在保存的时候给出的是计算结点的名称,\n", 499 | " #所以为“add”。在加载的时候给出的张量的名称,所以是add:0。\n", 500 | " result=tf.import_graph_def(graph_def, return_elements=['add:0'])\n", 501 | " print(sess.run(result))" 502 | ] 503 | } 504 | ], 505 | "metadata": { 506 | "kernelspec": { 507 | "display_name": "Python 3", 508 | "language": "python", 509 | "name": "python3" 510 | }, 511 | "language_info": { 512 | "codemirror_mode": { 513 | "name": "ipython", 514 | "version": 3 515 | }, 516 | "file_extension": ".py", 517 | "mimetype": "text/x-python", 518 | "name": "python", 519 | "nbconvert_exporter": "python", 520 | "pygments_lexer": "ipython3", 521 | "version": "3.5.3" 522 | } 523 | }, 524 | "nbformat": 4, 525 | "nbformat_minor": 2 526 | } 527 | -------------------------------------------------------------------------------- /4.tf_CNN.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "#卷积层的前向传播过程\n", 12 | "\n", 13 | "#通过tf.get_variable的方法创建卷积核的权重变量和偏置项变量。卷积层的参数数量只和卷积核尺寸、深度以及当前层结点矩阵的深度有关。\n", 14 | "#这里声明的参数变量是一个四维矩阵,前两个维度代表卷积核的尺寸,第三个维度代表当前层的深度,第四个代表卷积核的深度(卷积核数量)。\n", 15 | "filter_weight=tf.get_variable('weights', [5, 5, 3, 16], initializer=tf.truncated_normal_initializer(stddev=0.1))\n", 16 | "\n", 17 | "#和卷积层的权重类似,当前层矩阵上不同位置的偏置项也是共享的,共有下一层深度个不同的偏置项。\n", 18 | "#本案例16为卷积核的深度,即下一层结点矩阵的深度。\n", 19 | "biases=tf.get_variable('biases', [16], initializer=tf._constant_initializer(0.1))\n", 20 | "\n", 21 | "#tf.nn.conv2d提供了非常方便的函数来实现卷积层的前向传播算法。这个函数的第一个输入为当前层的结点张量。\n", 22 | "#该张量是一个四维张量,后面三个维度对应一个结点张量,第一个维度对应输入批量。\n", 23 | "#例如input[0,:,:,:]表示第一张图片。tf.nn.conv2d第二个参数提供了卷积层的权重,第三个参数为不同维度上的步幅。\n", 24 | "#虽然第三个参数提供的是一个长度为4的数组,但第一维度和第四维度都必须为1,即一次一张图片一个色彩通道,卷积步幅只对长宽有效。\n", 25 | "#最后一个参数是填充方法,TF中提供SAME和VALID,SAME表示添加0填充,VALID表示不添加。\n", 26 | "conv=tf.nn.conv2d(input, filter_weights, strides=[1,1,1,1], padding='SAME')\n", 27 | "\n", 28 | "#tf.nn.bias_add提供了方便的函数为每个结点加上偏置项。注意这里不能直接使用加法,因为矩阵上不同位置上的结点都需要加上相同的偏置项。\n", 29 | "#若下一层大小为2×2,但偏置项只有一个数(深度为1),而2×2矩阵中每一个值都需要加上该偏置项。\n", 30 | "bias=tf.nn.bias_add(conv, biases)\n", 31 | "\n", 32 | "#将计算结果通过ReLU函数完成去线性化\n", 33 | "actived_conv=tf.nn.relu(bias)\n" 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "execution_count": null, 39 | "metadata": { 40 | "collapsed": true 41 | }, 42 | "outputs": [], 43 | "source": [ 44 | "#最大池化层的前向传播算法\n", 45 | "\n", 46 | "#tf.nn.max_pool实现了最大池化层的前向传播过程,它的参数和tf.nn.conv2d函数类似。\n", 47 | "#ksize提供了卷积核的尺寸,strides提供了步幅信息,padding提供是否使用0填充。\n", 48 | "pool=tf.nn.max_pool(actived_conv, ksize=[1,3,3,1],strides=[1,2,2,1], padding='SAME')" 49 | ] 50 | }, 51 | { 52 | "cell_type": "markdown", 53 | "metadata": {}, 54 | "source": [ 55 | "### 卷积层与池化层理解\n", 56 | "#### 1. 假设我们输入矩阵\n", 57 | "\n", 58 | "$\n", 59 | "M=\\left(\\begin{array}{c}\n", 60 | "1&-1&0\\\\\n", 61 | "-1&2&1\\\\\n", 62 | "0&2&-2\n", 63 | "\\end{array}\\right)\n", 64 | "$" 65 | ] 66 | }, 67 | { 68 | "cell_type": "code", 69 | "execution_count": 2, 70 | "metadata": { 71 | "collapsed": false 72 | }, 73 | "outputs": [ 74 | { 75 | "name": "stdout", 76 | "output_type": "stream", 77 | "text": [ 78 | "Matrix shape is: (3, 3, 1)\n" 79 | ] 80 | } 81 | ], 82 | "source": [ 83 | "import tensorflow as tf\n", 84 | "import numpy as np\n", 85 | "\n", 86 | "M = np.array([\n", 87 | " [[1],[-1],[0]],\n", 88 | " [[-1],[2],[1]],\n", 89 | " [[0],[2],[-2]]\n", 90 | " ])\n", 91 | "\n", 92 | "print (\"Matrix shape is: \",M.shape)" 93 | ] 94 | }, 95 | { 96 | "cell_type": "markdown", 97 | "metadata": {}, 98 | "source": [ 99 | "#### 2. 定义卷积核, 深度为1。\n", 100 | "$\n", 101 | "W=\\left(\\begin{array}{c}\n", 102 | "1&-1\\\\\n", 103 | "0&2\n", 104 | "\\end{array}\\right)\n", 105 | "$" 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "execution_count": 3, 111 | "metadata": { 112 | "collapsed": true 113 | }, 114 | "outputs": [], 115 | "source": [ 116 | "filter_weight = tf.get_variable('weights', [2, 2, 1, 1], initializer = tf.constant_initializer([\n", 117 | " [1, -1],\n", 118 | " [0, 2]]))\n", 119 | "biases = tf.get_variable('biases', [1], initializer = tf.constant_initializer(1))" 120 | ] 121 | }, 122 | { 123 | "cell_type": "code", 124 | "execution_count": 4, 125 | "metadata": { 126 | "collapsed": true 127 | }, 128 | "outputs": [], 129 | "source": [ 130 | "#调整输入格式符合TensorFlow要求\n", 131 | "M = np.asarray(M, dtype='float32')\n", 132 | "M = M.reshape(1, 3, 3, 1)" 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": 6, 138 | "metadata": { 139 | "collapsed": false 140 | }, 141 | "outputs": [ 142 | { 143 | "name": "stdout", 144 | "output_type": "stream", 145 | "text": [ 146 | "convoluted_M: \n", 147 | " [[[[ 7.]\n", 148 | " [ 1.]]\n", 149 | "\n", 150 | " [[-1.]\n", 151 | " [-1.]]]]\n", 152 | "pooled_M: \n", 153 | " [[[[ 0.25]\n", 154 | " [ 0.5 ]]\n", 155 | "\n", 156 | " [[ 1. ]\n", 157 | " [-2. ]]]]\n" 158 | ] 159 | } 160 | ], 161 | "source": [ 162 | "#计算矩阵通过卷积核和池化滤波器计算后的结果\n", 163 | "x = tf.placeholder('float32', [1, None, None, 1])\n", 164 | "conv = tf.nn.conv2d(x, filter_weight, strides = [1, 2, 2, 1], padding = 'SAME')\n", 165 | "bias = tf.nn.bias_add(conv, biases)\n", 166 | "pool = tf.nn.avg_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n", 167 | "with tf.Session() as sess:\n", 168 | " tf.global_variables_initializer().run()\n", 169 | " convoluted_M = sess.run(bias,feed_dict={x:M})\n", 170 | " pooled_M = sess.run(pool,feed_dict={x:M})\n", 171 | " \n", 172 | " print (\"convoluted_M: \\n\", convoluted_M)\n", 173 | " print (\"pooled_M: \\n\", pooled_M)" 174 | ] 175 | }, 176 | { 177 | "cell_type": "markdown", 178 | "metadata": {}, 179 | "source": [ 180 | "### LeNet-5 TensorFlow 实现" 181 | ] 182 | }, 183 | { 184 | "cell_type": "code", 185 | "execution_count": null, 186 | "metadata": { 187 | "collapsed": true 188 | }, 189 | "outputs": [], 190 | "source": [ 191 | "#训练LeNet-5和前面全连接网络非常相似,\n", 192 | "#调整输入数据占位符的搁置,输入为一个四维张量\n", 193 | "\n", 194 | "#第一个维度表示一个batch中样本的数量,第二个维度和第三个维度表示图片尺寸,第四个维度是图片深度\n", 195 | "x=tf.placeholder(tf.float32,[BATCH_SIZE,\n", 196 | " mnist_inference.IMAGE_SIZE,\n", 197 | " mnist_inference.IMAGE_SIZE,\n", 198 | " mnist_inference.NUM_CHANNELES], name='x-input')\n" 199 | ] 200 | }, 201 | { 202 | "cell_type": "code", 203 | "execution_count": 1, 204 | "metadata": { 205 | "collapsed": true 206 | }, 207 | "outputs": [], 208 | "source": [ 209 | "import tensorflow as tf\n", 210 | "\n", 211 | "# 配置卷积神经网络的架构参数\n", 212 | "INPUT_NODE = 784\n", 213 | "OUTPUT_NODE = 10\n", 214 | "\n", 215 | "IMAGE_SIZE = 28\n", 216 | "NUM_CHANNELS = 1\n", 217 | "NUM_LABELS = 10\n", 218 | "\n", 219 | "# 第一层卷积层的尺寸和深度\n", 220 | "CONV1_DEEP = 32\n", 221 | "CONV1_SIZE = 5\n", 222 | "# 第二层卷积层的尺寸和深度\n", 223 | "CONV2_DEEP = 64\n", 224 | "CONV2_SIZE = 5\n", 225 | "# 全连接层的结点个数\n", 226 | "FC_SIZE = 512\n", 227 | "\n", 228 | "\n", 229 | "# 定义卷积神经网络的前向传播过程。这里添加了一个新的参数train,用于区分训练过程和测试过程。在这个程序中将用到dropout方法,\n", 230 | "# dropout方法可进一步提升模型的可靠性并防止过拟合,dropout过程只在训练时使用\n", 231 | "def inference(input_tensor, train, regularizer):\n", 232 | " # 声明第一层卷积层的变量并实现前向传播过程。通过使用不同命名空间来隔离不同层的变量,让每一层中的变量命名只需要考虑在当前层的作用,\n", 233 | " # 不需担心重命名的问题。第一层输出为28×28×32的张量\n", 234 | " with tf.variable_scope('layer1-conv1'):\n", 235 | " conv1_weights = tf.get_variable('weight', [CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_DEEP],\n", 236 | " initializer=tf.truncated_normal_initializer(stddev=0.1))\n", 237 | " conv1_biases = tf.get_variable('bias', [CONV1_DEEP], initializer=tf.constant_initializer(0.0))\n", 238 | "\n", 239 | " # 使用边长为5,深度为32的卷积核,卷积核的移动步幅为1,且使用0填充\n", 240 | " conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')\n", 241 | " relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))\n", 242 | "\n", 243 | " # 实现第二层池化层的前向传播过程。该最大池化层卷积核边长为2,使用0填充,移动步幅为2.\n", 244 | " # 该层的输入为28×28×32的张量,输出为14×14×32的张量\n", 245 | " with tf.name_scope('layer2-pool1'):\n", 246 | " pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1],strides=[1,1,1,1], padding='SAME')\n", 247 | "\n", 248 | " # 声明第三层卷积层的变量并实现前向传播过程,该卷积层的输入为14×14×32的张量,输出为14×14×64的矩阵\n", 249 | " with tf.variable_scope('layer3-conv2'):\n", 250 | " conv2_weights = tf.get_variable('weight', [CONV2_SIZE, CONV2_SIZE, CONV1_DEEP, CONV2_DEEP],\n", 251 | " initializer=tf.truncated_normal_initializer(stddev=0.1))\n", 252 | " conv2_biases = tf.get_variable('bias', [CONV2_DEEP], initializer=tf.constant_initializer(0.0))\n", 253 | "\n", 254 | " # 使用尺寸为5×5,深度为64的卷积核,卷积核的移动步幅为1,且使用0填充\n", 255 | " conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME')\n", 256 | " relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))\n", 257 | "\n", 258 | " # 实现第四层池化层的前向传播过程,输入为14×14×64,输出为7×7×64的张量\n", 259 | " with tf.name_scope('layer4-pool2'):\n", 260 | " pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n", 261 | "\n", 262 | " # 将第四层池化层的输出转化为第五层全连接层的输入格式。第四层为7×7×64的张量,第五层输入为向量,所以需要将该张量拉成一个向量\n", 263 | " # pool2.get_shape函数取第四层输出张量的维度,每层的输入输出都为一个BATCH的张量,所以这里得到的维度也包含一个BATCH中数据的数量。\n", 264 | " pool_shape = pool2.get_shape().as_list()\n", 265 | "\n", 266 | " # 计算将张量拉直成向量后的长度,该长度等于张量维度累乘。注意这里的pool_shape[0]为一个batch中数据的个数\n", 267 | " nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]\n", 268 | "\n", 269 | " # 通过tf.reshape函数将第四层的输出变成一个批量的向量\n", 270 | " reshaped = tf.reshape(pool2, [pool_shape[0], nodes])\n", 271 | "\n", 272 | " # 声明第五层全连接层的变量并实现前向传播过程。输入长度为3136的向量,输出长度为512的向量。该层引入了dropout的概念,\n", 273 | " # dropout在训练时随机将部分结点的输出改为0.dropout一般只在全连接层而不是卷积层或池化层使用。\n", 274 | " with tf.variable_scope('layer5-fcl'):\n", 275 | " fc1_weights = tf.get_variable('weight', [nodes, FC_SIZE],\n", 276 | " initializer=tf.truncated_normal_initializer(stddev=0.1))\n", 277 | "\n", 278 | " # 只有全连接层权重需要加入正则化\n", 279 | " if regularizer != None:\n", 280 | " tf.add_to_collection('losses', regularizer(fc1_weights))\n", 281 | " fc1_biases = tf.get_variable('bias', [FC_SIZE], initializer=tf.constant_initializer(0.1))\n", 282 | "\n", 283 | " fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)\n", 284 | " if train: fc1 = tf.nn.dropout(fc1, 0.5)\n", 285 | "\n", 286 | " # 声明第六层全连接层变量并实现前向传播,输入长度为512的向量,输出长度为10的向量。输出通过softmax锐化后可得到最后的分类结果。\n", 287 | " with tf.variable_scope('layer6-fc2'):\n", 288 | " fc2_weights = tf.get_variable('weight', [FC_SIZE, NUM_LABELS],\n", 289 | " initializer=tf.truncated_normal_initializer(stddev=0.1))\n", 290 | " if regularizer != None:\n", 291 | " tf.add_to_collection('losses', regularizer(fc1_weights))\n", 292 | "\n", 293 | " fc2_biases = tf.get_variable('bias', [NUM_LABELS], initializer=tf.constant_initializer(0.1))\n", 294 | " logit = tf.matmul(fc1, fc2_weights) + fc2_biases\n", 295 | " return logit\n", 296 | "\n" 297 | ] 298 | }, 299 | { 300 | "cell_type": "code", 301 | "execution_count": null, 302 | "metadata": { 303 | "collapsed": false 304 | }, 305 | "outputs": [ 306 | { 307 | "name": "stdout", 308 | "output_type": "stream", 309 | "text": [ 310 | "Extracting ./data/MNIST/train-images-idx3-ubyte.gz\n", 311 | "Extracting ./data/MNIST/train-labels-idx1-ubyte.gz\n", 312 | "Extracting ./data/MNIST/t10k-images-idx3-ubyte.gz\n", 313 | "Extracting ./data/MNIST/t10k-labels-idx1-ubyte.gz\n", 314 | "After 1 training steps, loss on training batch is 14.1371.\n" 315 | ] 316 | } 317 | ], 318 | "source": [ 319 | "# -*- coding: utf-8 -*-\n", 320 | "import os\n", 321 | "from tensorflow.examples.tutorials.mnist import input_data\n", 322 | "import numpy as np\n", 323 | "\n", 324 | "# 配置神经网络的超参数\n", 325 | "BATCH_SIZE = 16\n", 326 | "LEARNING_RATE_BASE = 0.8\n", 327 | "LEARNING_RATE_DECAY = 0.99\n", 328 | "REGULARIZATION_RATE = 0.0001\n", 329 | "TRAINING_STEPS = 10000\n", 330 | "MOVING_AVERAGE_DECAY = 0.99\n", 331 | "MODEL_SAVE_PATH = \"./model/fcn_mnist\"\n", 332 | "MODEL_NAME = \"fcn_mnist.ckpt\"\n", 333 | "\n", 334 | "\n", 335 | "def train(mnist):\n", 336 | " x = tf.placeholder(tf.float32, [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS], name='x-input')\n", 337 | " y = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-output')\n", 338 | "\n", 339 | " regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)\n", 340 | " # 调用推断过程\n", 341 | " y_hat = inference(x, True, regularizer)\n", 342 | " global_step = tf.Variable(0, trainable=False)\n", 343 | "\n", 344 | " # 定义损失函数、学习率、滑动平均操作及训练过程\n", 345 | " variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)\n", 346 | " variables_average_op = variable_averages.apply(tf.trainable_variables())\n", 347 | " cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y_hat, labels=tf.argmax(y, 1))\n", 348 | " cross_entropy_mean = tf.reduce_mean(cross_entropy)\n", 349 | " loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))\n", 350 | " learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE,\n", 351 | " LEARNING_RATE_DECAY)\n", 352 | " train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)\n", 353 | "\n", 354 | " with tf.control_dependencies([train_step, variables_average_op]):\n", 355 | " train_op = tf.no_op(name='train')\n", 356 | "\n", 357 | " # 初始化TF持久化类\n", 358 | " saver = tf.train.Saver()\n", 359 | " with tf.Session() as sess:\n", 360 | " sess.run(tf.global_variables_initializer())\n", 361 | "\n", 362 | " # 在训练过程中不再测试模型在验证数据上的表现,验证和测试的过程会有独立的过程完成\n", 363 | " for i in range(TRAINING_STEPS):\n", 364 | " xs, ys = mnist.train.next_batch(BATCH_SIZE)\n", 365 | " reshaped_xs=np.reshape(xs,(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))\n", 366 | " _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: reshaped_xs, y: ys})\n", 367 | "\n", 368 | " # 每1000次迭代保存一次模型\n", 369 | " if i % 1000 == 0:\n", 370 | " # 输出模型在当前训练批量下的损失函数大小\n", 371 | " print('After %d training steps, loss on training batch is %g.' % (step, loss_value))\n", 372 | "\n", 373 | " # 保存当前模型,并使用global_step 参数特定地命名\n", 374 | " saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)\n", 375 | "\n", 376 | "\n", 377 | "def main(argv=None):\n", 378 | " mnist = input_data.read_data_sets('./data/MNIST/', one_hot=True)\n", 379 | " train(mnist)\n", 380 | "\n", 381 | "\n", 382 | "if __name__ == '__main__':\n", 383 | " tf.app.run()\n" 384 | ] 385 | }, 386 | { 387 | "cell_type": "markdown", 388 | "metadata": {}, 389 | "source": [ 390 | "以上是LeNet-5的实现,该网络一共两个卷积层和池化层,通过它们抽取的特征图在转换为一个向量后传入全连接层完成预测" 391 | ] 392 | }, 393 | { 394 | "cell_type": "code", 395 | "execution_count": 2, 396 | "metadata": { 397 | "collapsed": false, 398 | "scrolled": true 399 | }, 400 | "outputs": [ 401 | { 402 | "data": { 403 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYQAAACgCAYAAAAM9xtHAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzsvXmQHcd54Pn7Mqvee32gcaNxkgBJkCAIgCAJ3hQlitQV\n9ow0tnVYsk3bGms10njsHcfuyPPPxkasIyZ2I/aY9XhnNL6k8KHDtGTdtMRLIgmCxH3fV6NxNLrR\nd7+jKvPbP6re69d3N9BNglL9gmC/V1WvMiurKr/M70pRVTIyMjIyMsw7XYGMjIyMjJuDTCBkZGRk\nZACZQMjIyMjISMkEQkZGRkYGkAmEjIyMjIyUTCBkZGRkZABzKBBE5MMickxETorIl+aqnIyMjIyM\n2UHmIg5BRCxwHPgAcAF4C/h1VT0864VlZGRkZMwKczVDeAg4qaqnVbUCfA346ByVlZGRkZExC8yV\nQFgFtNV9v5Buy8jIyMi4SQneqYJF5HPA5wCampoe2LBhwztVlYyMjIx3Jbt27epU1aWzdb65Egjt\nwJq676vTbTVU9cvAlwG2bdumO3funKOqZGRkZPx8IiLnZvN8c6UyegtYLyLrRCQHfAr4zhyVlZGR\nkZExC8zJDEFVYxH5t8DzgAX+UlUPzUVZGRkZGRmzw5zZEFT1B8AP5ur8GRkZGRmzSxapnJGRkZEB\nZAIhIyMjIyMlEwgZGRkZGUAmEDIyMjIyUjKBkJGRkZEBZAIhIyMjIyMlEwgZGRkZGUAmEDIyMjIy\nUjKBkJGRkZEBZAIhIyMjIyMlEwgZGRkZGUAmEDIyMjIyUqYUCCLylyLSISIH67YtEpEfi8iJ9O/C\nun1/LCInReSYiHxoriqekZGRkTG7TGeG8NfAh0dt+xLwgqquB15IvyMiG0nWPrgn/c2fiYidtdpm\nZGRkZMwZUwoEVf0pcG3U5o8CX0k/fwX4WN32r6lqWVXPACeBh2aprhkZGRkZc8j1rofQqqqX0s+X\ngdb08yrgjbrjLqTbMjIyMmaRCnFPG+7CLjo6Bljz1O+AyKyXounfjoOvsIiLnLvQyx0f/hw/r+bX\nG74qVVWG223aiMjnRGSniOy8evXqjVYjIyPjFwhPjrbtPwU8K1oLIIIHku5o9hDAAVHXSRw5bl/V\nCD7pNme7rJuB6xUIV0RkBUD6tyPd3g6sqTtudbptDKr6ZVXdpqrbli5dep3VyMjI+EWjp6eH4wcO\nE0VFnARYD/sO7+PUhXZklmcJV7uucebAfnwlRqggcQP7ju7h0qVLs17WzcD1CoTvAM+mn58F/qlu\n+6dEJC8i64D1wJs3VsWMjIwMOHToEN57FixYwIbNGwlyOawXImO4d+O9rF+5sjZq7+npuaGy2tra\nUFWWLl7E7Zu3oLkCqgVUSmzZeB/Lly+vlVUqlW742m4WpuN2+vfAduAuEbkgIp8F/hPwARE5ATyT\nfkdVDwHfAA4DPwK+qKpuriqfkZHxi4Gqcvfdd2NMtcuKaFqygryvkFMLGhMbqY3aFyxYwIULF66r\nHIDVq1cjqRrKKDSGDYRSYtBZREFkuKxCoUBvb+9sXOY7jtwMerBt27bpzp073+lqZGRk3ISUy2Vy\nudxIFY2PQAKQCmgZaEkU/iSd+vWqc6r9Yf3vYyDwJcCBsUSEhCTe9Oo91AmHtxsR2aWq22brfD+f\npvKMjIyfC6IoIp/Pj+1wTZh6FeVBmhNhkI5tR3TmcTztsjo6OkaM/KsEAKYA0gSEhCjgQUGMqR3f\n3t7+rjc0ZwIhIyPjpsXa6cS1GlCHH6cvDoIA7/2UZ1BVpnJucaIoFlyA90IkoHUOlqtWrZpWWTcz\nmUDIyMi4aRm2GUzCtRNU9v81p777v427e8+ePVOO3MebGYzGIpSO/YDo4Fc48u3/nZCalgpIhMrp\n06ff1bOETCBkZGTclJTL5Wkdd2bn83hZxO13rRt3/+bNm6c8R2dn5zRK8nS2XQAJuP2O9em2YZEg\nIrS0tLyr3VEzgZCRkXFT8tprr015jKpiyp7Q9OOdrW2rJwiCSTtpVeXKlSvTqJGivoKaEB+XAT8m\nInfZsmXTOM/NSyYQJsKDKuAruMEzoGXm0ttYFYg6cQPnwCulOdJFKonXRNR9EVe8QLm6cTZOTETZ\nQzx4ETfUjs7SJXgF/CC+uw38IMr0DYUzQiOcgiteIh5spzjHDtPeeyqDbVDpoYjOzn0Yg+IAtELU\nexl8PCfllD3g+8H1V+2tEPWBKwEx4yr4J0FVec973jPODp/o7SPllddeouJjJAyIpIBFqURlXnv1\nVVSVMgoeptI6iQgbN26sfR/x2CqgyoEj+yi7CGvKGCqIjYiimKM791PBjWvQfjeSCYSJMB4ETnzv\n/yA6swcOfo3CDB/qmSAe3NHnKZ3fQfv+5wmNzMmLKyhBPER4/gXc6dfIa5lZKUjAE5I3/Qy0vUZ0\n5hXEzI4INUC0/x8pX3yNvtf+isTvY/Ybx0uIJcKdeRF3/jUa7NwGHJm+Y7jTrzJ04oc0oLg6T5nZ\nI8YqDB35FsHZl9j/3T+HORCoeUroge8xdPwHEHXSe34/7ti38Ue/CQR4Mz31Tz1hGI4x0kZi8MRE\n1vHYtkcRYoyWCL2CCjk8Dz/8IEf2HcKiIOAwDAwMTFhOdUZRLau+U1QpgcTcsX4D1kMsDVjvUM1j\n1LHm/jvouXI5eYF/DsgEwiiqD4diEBwN4lHx4BvAyLjH3kg5NSw4cnhRjA2xCCpaO3b2DFUC1jKQ\nM4QxIPlaUrAbKyN5IZSAgoup+gFe7zlH/E6VQIvJ+fINqdZWZq1N6jsClRCnFj+O8J91DxLJYUTJ\nx3nAJJ7tszTAHG6bEAQayg7CmObGCOYiI70JwcQU4goUy5hSkQFpRgmS4C5fmNH9qo60RxuVQ1Xs\n3r8nOPocVw/8gKJ1rP7gv+FCZz/lpZuJTEjfK9/nrn0/4up/+y94AcvUKiERGWvA1oji/ufgwHcY\nunwAEG594lc526Xk1z8GRuj9279g2Xf/gYv//f+t/WxoaGja13mzkQmEFFXl2LFjtQdR1OGxlL0S\napHYRIzWgRw6dGjGnVK1c9+zZ8/I7Sihh4LzVDQGFEk7PRHhwIED190B1o+OHICrkHcFvJDqqobp\n7u5GVWfU+XnvcbFnqL8f8UKoSuCracGGj5mKatu89NJLI6beKqA2JO9AvQNVPL52zCuvvDLtutbX\nR1W5evUqxpjhgCQPea+Ijqy/qhLHMUNDQ9clGEql0tgUB76CJSa2iRqnelbvPdWEjzO559X2++lP\nf1rXfon+Js4FaJQncobR05Cf/vSn1/UcHz16lHK5nPxWY7xYytZCroDPWxocoJp0MgKVSmXG11TF\ne5/es5goCIg8xFcvUnCWCrDiyc9gltyGakz/qdOoMywIShg8IFy8eHHCc49R87jq8y8UNIeXCu7S\nObzxVIIWVj35ScgtQxAK5RIV62jU4Xv7wgsvzPj6bhZ+4QVC9SUSEe666666PUmIeqAGr4I1BtWR\nU+1NmzYBiYfCdB5yVaWtrQ0R4f777x/xG/GCt2UQgydC0w6i+rBu2bJlRvrJekNZc3Nz3VV58EKo\nEWIM9Z2eiLBw4UJEpPZQT+e6Tpw4gQ0szfMawDgiAlQqjH68JjtX9R6ICE899dTYA5zHixJ6C6LU\nD6WffPLJGc2iVJUdO3YgIjXf81rbisejGImA4ZG0iJDL5WhsbKS/v39a5VQpFosUCgUKhUKtfAA8\nxJJDxKMSp3fCY4xh6dKlqCo/+9nPpnU9kHS4IsKTTz5Z2+cxIA5jDV5AorTgOp588smakJtuGx45\ncoQNGzYMB42pxygk99zjVLCmAt4n5Qnk83lUdcYCXFUxxrB7zy7Ue2Sa0ygjw8/funXjeyCNixUO\nHtoPrnJdM7Zbb7115j+6ScgEQtoRjfciCMrK930cWypx+kwbJZMb9/eLFi2aVlkiwi233DJueLw3\ncLb9CkG5n3X3PDnuQ++co1gsTuul/fGPf0xra+vYHeohbMSVIoZKEagdV2/9zDPPcPbs2WkJoTvv\nvDMZxmPBh9iohB8EJRwx9Z/qXBNdlyBcGwqxlSJd5PGYEQ9uVZBMV2BeuXKFRx99dJzyFCeGUuww\nJY/6cNzft7S0sGvXrmmVdfLkSRoaGkZeT7Wei9ZhSyWG+gYoYwl1pHa/2rlPJYCq5wvDsfU1ChGW\n/fuOIdEQd773lxivl5uWv3/Kq6++yt133z3qBAVEDAUtQ+MCFq++E2uU2AlxKiSqvPe9750yz1D9\nLOzEiWNUXJHN996DyvQt/c47qte6evXqCY+rzlwgiVmIK0Ns2HgXzMTQX/fs3XnnndOu483GL3wu\no66uLhYtWnRD3gGqSnd3d210PRHd3d0sWLDghsqqqjqm6mBvJJ/LTNi7dy9bt26d1rHe+wk7nhdf\nfJH3v//9N1QXVWX79u088sgjE5YzWR1mQvU8b1c7T8WePXu47777bvg8Z8+eZe3atRPun7T91Kez\nt+rMShOVpBhGzummZmBggKamJnbv3cmmzfcMDyw8cOw5Yi1w/mIPa5/5DNJ7gfD06+w+d4ktv/QH\ndHz3myzvvUKXBhR+7/PMo2HSsi5cuMCqVat5Y8dr3L/tvmEB6z3++HOUCOnTRbRsfIriqVdZ3HuO\n3Wevcu+//AM6vv6nLHWezkFl2Rf/EEuOOI4Jgutde2xmzHYuoykFgoisAb5KsiqaAl9W1f9HRBYB\nXwfWAmeBT6hqd/qbPwY+S6KP+Heq+vxkZbxTAsF7T3t7O2vWrJn64BtkNjuO8+fPc8stt0y4/8KF\nC5OOiGaCc25ao/vpcPjw4RHufVVmu1OdrU5/Kl599VUef/zxCev+wx/+kI985CM3XM502uftEkxT\ndXaekWqH0d+nS2dnJ0uWLCHW8ojZgvqQnOlFJQAnxCbkwgt/zm0r5hGZBvzGjyEu5sRbb3DP/Y8w\nFFiuXmrnllW3Tdg+Bw4cYPPmTURaRkc5E+TiPghz+LgAGC789K9ZuWweJQ3J3fMrYCJ62s6zcPVq\nvCpHDh5j8+Z7p5ly48Z5J5LbxcAfqepG4BHgiyKyEfgS8IKqrgdeSL+T7vsUcA/wYeDPRObCreHG\nERHWrFkza94qg4ODE+5zbvac2uvVTuOxZMmSWStr9+7dk+6fSduNUTOkzHZHNtn5ZnPg8cgjj0xq\nYP7gBz84a2VNdR/eDlSVS5cuTXqMGWGf8KO+j+SNN96YcF8URVxsb6MSjXRXzVHBmSY0tpy+0IH1\nSugrlKSBQBVRywsvvshdjzxBJRCshTUr1kwa9dzf3093Xxe+7h21XknM4S1ENHCp6xoVqxgp4awH\nIjyO137yAstWr8V4izGWuzdveNuEwVww5bwmXTv5Uvq5X0SOkKyT/FHgfelhXwFeBv5Duv1rqloG\nzojISeAhkjUVbipqHkX12RHVcfGFr7ByaUBAiLvjGdyZH5OzFu7+V7D/7xN94ebfGnO+N998c3yD\nKLB9+/axgTYK0eFvEPoSXYMBcamTxYuWcvnqAEtWbqAQXWCQCk2bnyUSqNcQT9bpVY2XVRxgSpfh\nxPcBi2x8Fo5+FSoNsPXX0EN/B1j6zSpaNj454rcPPvjgpJ3+TDrzqq1m9G9KpdKYOhMp3W/8FQub\nPWXJU5bV5PUieVV659/BvP5jlK3SsOnTxORGPMjnz5+f0LB32223jdzgASLcka9DHGHXfQTOvUis\nMX7xZqT7KFbgQvsQt3zwsyN+aq2dsTAbaN9HY+dBjAps+STuwNfAxNhNvw4Hn8P7CmbLJ/CSr1lK\nRIS1a9dOOAs4duzYKIcIUPXoob9DrTDYeCcXdj7PXWs3MFRs59i5ElvuW4LRRoK7f33MsHCiWWF1\nADU5ZoLPY7n//vsn3FcsFll964ox2yMjSZCbtdx662pi74mkQMFVGLR5Qo14+ulnal5C4pQYR29f\nN4XCynHLUhxNTU0jtjkjCJ5KkKi6lrUuTs0JFuOaMX6Q0AuPPf3+JDDNgDiDGOHEiWOsX3/XOCXd\n/MxoNicia4H7gB1AayosAC6TqJQgERZtdT+7kG676RjjBqgeL5aGxgBvLVFgieMBPDnK6oAcFRMS\nyfjNNm5kZcqGDRtGFqVKHIPVmIoExL0XmN/ciBhlXu4aPedP4LwlyNvUu2b61zW6A7fAYF8fQ2Ej\nsRbAlvEEaBCDqyA+QJ2lr/vyuL+fiBMnTsx4dtXd3T1m2/nz58deQygsXNyAs/PIO8PV84eRwIJt\noKPtFF5DjA+BHIEfWefJ1uge4wAggIEyIdiA8rUOvBeMBPReupDsdMKilrGus1MmQ7NjUymY7j6c\ntcO6diOIGJA48ZSyIXHX2PpPZueqn5nWHBZUEVUip1T6u1i8ZB42rNCUW8pty+ZjXBNB7IBozD2c\nrjPBjZLLjXXSqFKqTO3LXxWQ+UAYoIEmmTjgbv78BRPumypuoOrBpkSYIA+uHxPmcTYamdzOJM/H\nurUz8Gi6yZi2QBCRZuA54A9Vta9+nyZP1Ix6BhH5nIjsFJGdk73Ac0lXV9eI78fPtrFz+6tIpQwY\nYnFcProHNWVUQg7tPwCSw0nIyTPnuHbt2ojfT6ZbbWxsrH2O45iDBw+yc+9rGDF4kSRvuzGYOEfs\nQ/oGBygFEJQN58+dJyqPjLScrurj8uXL7HhrF2f2HyAfhXjrOLT/JOBQHCf2HQB1lHNlhrqL464V\nO17noKpcu3Ztxh3HeNkg6z1OnHMcOXKEHdvfRFUwKsQ5KA8OgQY4PMXua4gtkHOWo2fb6OvrG1GP\ncb2r6upd5dKlS+zau4PD23cS+jwOy+nzx1AaEB9wrfNy7VgjJdrbxy4PPh2BKCK0tbWxY8cOTrad\nRtSjIhw5/DoeAR9yct8h1Dhi8Vw+c4qLHSPfiYnKUVUWLlxY+14sFjl06BA7dr2BeIvVHF3t5zEV\n8MYjZoABdThRyjbPufNnxwyMjh49Om5ZM1lb4Ebp7+1nul3Kqqd+E9d7kf75E9vVgrDO0F0j6cCn\n60asGrDykWco93QiK9ZTllzqajviKEzwzjsZXC/TEggiEpIIg79V1X9MN18RkRXp/hVAR7q9Haif\nV65Ot41AVb+sqttUdVt9HnIFKkkCEVIPZhxJ4BZa/QyuljDFAzE+3R4DEYBGVNLvPikwPcvwHezv\n76+9DKrKnbeuYdujTxCZHLF15L3SevcmrGvAeMc9WzajvoIAd6y7dcSLuHfv3rT+vlrb6jIaKNDU\n1MTOnTtRVay1bN68mUcefAiNPaE68mKwLgZTIdSY/LwGmiKlYoVbbr2FsNBQ6xT6+/spFotpyWnS\npQlmEK2trTz84AOsu3czJetBDRu33EUsBrCsv3cz3njwlnBxjhUrhqfpQ0NDdZHbPm3H5J+II2xI\n1DzD3YSrtj5R+g2N0jom+tmrV6+iIgwPIZTGxkaOHj1aMwZv2LCBbY8+hBLjbYzBEzTn8baEEZi3\ndBFOSvgQNqxdRfOCxto93LFjR51aI/VyqXtpRaQ2EFi+fAUPbN3Gxke2EYsDE3HrutuBCnFQYWHr\nYlQDEIdWgnHtTUkn6dOrrl69J3ZR7RhVZc2aNTz88MPcfsstqBhE4e5ND6CiVKzjjnuTjJxeDK1r\nb2PFsiW1oMHOzs7U17+u4Np9EFpbW3n99ddRTdrynnvu4eEHHwATIyiLV9xClPPEWNQ0YMOAyCpG\nity6dj2FQhJF7JzjwIEDdeqn9N6nD3EljhlRCQUoUs33U31Sqs999W2tvYMj3tlUyKUHV6rnSw+J\noojp+iVVNKDh8d8jv+KBCY/x6UVUEK68/gK9L32PWvc3zaGsASrBcnJPfh5ZcCd5jZLgzhEIsR++\n4r4Te+jY8wbVyMMznd3sPn2ZcnqhL728n+e+9hqu+v5GsO87bxCd6007j5jj3/k2xdNHJ7HGzB7T\nWVNZgL8Ajqjq/1m36zvAs+nnZ4F/qtv+KRHJi8g6YD3w5nQrNHj4b8nt+xpa6kI0Rg/+A3b/3wIV\nLh38Phz6KkN7vob1hq+f+CrfPvttnj/2I4xGxIf+geDg19BLu0HBHPom5ui3MBqDKN86+3d8+9zf\n18qK45h8Pl+9TjCGEAjwWJ+6FAaNBBQxcQTqyANhanyqH5Vu3bqVl19+GcFQAXqO76fv4C6MKhWS\nkfq2bdtG+ss7iwtiJBbixkWUECpGEFlAQ/N8kDINgWBQKjKsr25ubh5ezEOVnoO78BfP1d+zEZ8d\nkA8baXYVct4j3pKLc5g4AAkQgTyCCUa659XPaiIMZqCHawd2ARFKQJNJBELvmTe5cu4tiBzqQjpP\n7mDo4gGsBxVDz/HtxMVkxHvrresw6ug5up+hM0eIEBYvXsyGDRtqemsRwfgiRWcx3qFOyDUvJFSH\nBCXCwlJyopQ1AiqY1IIgIjz88MO1qNQSQvfhXfQdOVAbTOzevZPFixenxwMkcRgNbgAbBeTzjSAV\nvPHk5y8l1BJIQFHH3nNI1F0xhtAVuXZgN2FpADCoq1Pf1P3GFfIEvgg27dFVCRFwOQQo+DJhaJM4\nlPR3S5Ys4fyVdvBK7B3+0hm6D+0GdTjnyOVyPPbYYyPrpgFeLN4qPhdC5Mm5Eupj8uWIwHuUfNJF\npm1eHaisXJnq2r2j9+Buhk4eRAWi0hDVDh/19J7YzeV93wcp4VTo2vsjend9C9Ei4hyde56nc+/3\nCUgGUV2H/5nes6/hddg2goA3irtwgsGf/ABMCUlzEM0FOcosPLaXhtPHh5tqlhNIGWNqA0Le+CEL\n974GpR4wsKdtkKMDZQ53JAOGE0f7udIbs+P1IwDs+sNv0vPiad78zz8AgW+tfZiLH/8cb23+ZYyH\naMJSZ4fpOMs+DvwmcEBE9qbb/iPwn4BviMhngXPAJwBU9ZCIfAM4TDJ4/KKqTtvFpsELUSDExXYa\nCgvwaUh8SIRvOw5rVhCESZ4ckw/xGnFGukBCHDGKMnBiFwuXb0U1ouJiCgJx7NPR3nBZg4ODY9Uj\nwNInf6f23QJs/syw5Nz86Qml6Pve9z4AcirIqz9OXOM2bSFPOMZolZzcENyTGKeX123OA/NGHTpa\n27pz587ELvHTH9F06jgej/ndPwIStUu9p4MF7ILVsOA3hi9/yyeGr3nTbyLAeGbYavvk8ETf/Eua\nUU7uepU7fucPIExGgPMGT+NFodMiKzbQUjpNMGRh5Xr6dv8zzfkBgpMX8Fs+SevipUBE0xsvoDjC\n2zZw5swZ1q9fP6JcYxpo2vLrtfqvrdtXdahtZHyqKqMCSrDjZZwR2LgRi+H++0d56AkgIdz77HBc\n8rINWJL7UD1kIr+tzs5O1t1+O0P/7T/T2GgZePPHNH/2f0ZkfANwy+2PAY/Vvuc2f2a4Kpt+c/j6\nR/3ulqUrwQgGi//hczSJEA/2Yh96H0eOHBnrzisBZtNnyAHLADY8DSRtOZUP2qlTp5I1BDov0fjm\ny3jj0TvvoVxOnEgVRcTQVDlGs4Hiq39F/onP0xJcw1roOfoGLXe/l3m5Xqz3wBBDHVdp8d2Y/j6M\nxEBQswEYJ+R+8m28hnR9/SKLP/mvmVcXXT+7JPabSuhr71TzHJQlmgTjFeIGPA58+nRZg1WI0hvs\n1GMIEJu4jDig2Vm0YpPYjXwJKjlMZQDMSMeSuWA6XkavMrG8fnqC3/wJ8Cc3UC8GrlxhqLKI+SRB\nsF1XLoFI8jACVzqqyaoEK8KlS5dYmEr6KKpwpaODxQqI0nW5nbIP0ynqcEqH+gjFWSVVhUjdyGPe\nvNFd/PUjInzgAx9Iv4zdP1dub5rI4aqGYISRdRwrQ/p/oZroDhLVGaM6yqmWLpwpk17/9TrGj3eq\nungHHa8FZjM2YDw7DsnI/o477pidMlJGLigz/AwH4XjdhdTudLVO6X/jUj8WH04XQvpcaS21VkPD\nROL++tC6PGTG64iOtV71OxsksRqp5oFU8zciMabWrrm61dTtVxSsQSSdZRplHN3UnHBTpq5QVZqX\ntbK4NZ26KixuXZV2LUnDtLZWx9TJE1Sv+w6DkNbWZbXWXrx8Fctal9Wmxq2trbS2trJgwcSeBzdU\nf+ryqHiHi2c/sX4ul0s6ZK1OvWXO1nMtl8tpTpqEJEI3SS5Xm25L6g7o6w2P6T7VRJirUmgu1HTj\n1WNm8z5U1TRJ3IdibVCrg3o/q6oIEUlsIn44j5Km5Xjn5uR+eDdSvaGqk3rrXC/VwVOtXHUUh6p2\nK6l/HPCqI5KJGJL3tF4YVgWn89VMuHWYYSFe/clkyeiuB2OGBwlqR9atra1tvJ9cNyMGSiKIkVpb\nOucSr7LqsTpSZeVEidWn7wuIMfi84AvJb+bajnDzCQRVIo0xPjEJiw5QMR40wtmAKBcTpGnAAo2J\nxbCq3ARaJudiIuMpN4YIQugEoy45j0TEQZzohFPa29tRIkpAfOUgnUdeJNFyxVzd9zIMJrbwziPb\nIbpIH3Dp+D8TXzpODHQd+mc6jo/MWgqJR6GTGKMVMAZbFzWrxHQf3E7n4VfoAErte+g68rOk7xxs\no3fvz5IqaETHgefTBUYqdB1+CeLqMn8RJ0+eTF4yb6iYiJKRmp+/9z41zBe5euQFcF2UUToPb4ee\n0/QCV4+/RPex7cRA37G36Dj8Cv1A/5k3uXzgFaJ6V0ljiIwlVxGggI8sBoMXh3jFW49qjsSN0ibT\nYTMIWLwEiBYoqSeSGGIP3uClnN5Hx+HDhwG4BnQfe5GOo68mD37cR9/+V4ES+H6uHnoFGATXy5VD\nz0P5GmjElYPPE1/an9jg0pGgtRbFM5SHcpAs0OLFJn2RB1xE754XGLy0mz6g8/gOuo68TgQMntxF\n1/FXiYCBE29x9fDriU2w6wSXD/0MfKLJFRGKxSLWJM4KRkI8AYLFGwvWJgb36Brth5O6ezwdh34C\nvpcY6Dj6CnH7foY0ouPEi/SdeZ1u4MSh5+k4sZ1YEzcFfOIXZmwyJKrkBEySK6q/vx9NbVVXjv2M\na0dexQG+6wxdh1/Aq4Lro/Pwi0m/VOpMnou4BER0HXwZ6KgtADU0NJQOMiKG8oBJEjw255JH0+Aw\n4lALA9pvnCtIAAAgAElEQVSHmDJQIhAhNhYXDgGCqCXwFvCUvSOwYM2wi2d9kr9oqIQXR8WYpK1n\nsLqSrQQEsaPiJvaCGnYGEDqKlr7BeJx9k+MRvDdELlkrzXpFxvmptclzpmK42B/RO1SGfB4PdLef\no7e7jw2L8yDQXBji6rmzPPjg7YDn9t99kstXu7i6ZBAP/Itv/i09xQF6Ht1CxNx32G9Pwo2ZsOlj\nNPkC3grOG+zm36YJh9Mcqz/4bxEXg03mWx9d+2kqVCi4BhCP2fxJmtTSJCaRrps/Rs7nUGexpsLH\n1nycoE4ar127FiWgQIR2HaIxdhz54Z+zeME85jcr8bkrtHVcY9mipUTHz+IX38sCdw13rZOi76TA\nVUw8QBKaAb29vcyfP5/IwpVSSIOLWOINsfHs370vyXB69RgNnCdZTqaE9Bwh7/N07PgbFs0LaBTL\nuX3fY2VhkJYg5vTL/zfLFq+ioaECRy/Cpk8CwbAnzVOPU/zTvWA8zXWBdoJnaM/3aAmUyrGf0dOw\nmBZ3meLFduYvWEmx3EFMEwED5CpnyRvIU6EyeJJQPcVL+wlX3gskSdPKKJ1Ni8lducT6L3weTOIS\niyqdV4cIbSeFDb8EPk9/1zWuRsJalEWbHqbjja/RWwy5ZUvAUG8/jQsK9BehYoRVLqwZeRehFEuX\nyQdCx6ntLChepgFH8cD3GShdoql5MX0H/gEqngWFPP7U9zh2vsj6VcswHYcxyzfhjWXXrl088MAD\niFf6uwZxOaHFJPajwcE+mppauPLiV1m2qJnKtU6aVtyFL15gILCYgZOYofM02QohvbjyJeabMv1t\nu8j3HWOJWK688d9pfewLQJI0rYKh+eO/Rdfff5XCtm1JkJJJYnQDPJVjP2KZQP/OdjpdyKIFMf7o\ndwk2fpSW+CK+5yIDA900R/2Yci8FVyJHP/liGwyswc9bjbFgVYgELhcjpFxh9f3J/Tlz5gxbtmwh\nFw2wIL5AhRBTaad0eSfzTUT3qe00lK6QtxHnXvwyq1oX0BwOcm13G3HUR/P8ZfTvOE3+4d8FErvI\nLbfcAotXUex3dBthjRhy81rSDsmgCJevRJT7O7njg7+L9zlOn7xEqEPc+tF/j6fMyTOHaaCZtZvy\nzF+2iIt7L1MqK7dvTjrMqsNDbICtD9H91k5WfPpTKJDPVS04U2PtIKV9z3Gxo8jaD3923GOqnb4H\nVv0P/476TLZmgrii0Tgs2nmA4OI+Tl2LWP/+Z6m5e9WRZBH2iBpWf/F/wogDLEbhc7/8RKJKVEDg\nt377QyDpKnAYlm1dzse+/vuJt5UobN7Cx/QqplJVcs2iznMcbjqBYKUBb02SxMp4lIAIQ06SoZ1a\ng6hL55aWPDm8JR2x5tIFMWKSGx6AsYjxkEaz1k9WwzDk9Zde44n3PUZJPNYIhTgmIAYTUhIhH1cI\njcN4T0EteIdRg3iwiSNa7XwtLS1USAzAaz73h8QmeeADkqjM/v5+5mHwJsYbT4E86iM8gnWOsjQQ\n2ggTR1hTJNYQ65vIiSV2DagZwhHw1kuvsnDFEpYvX06sLSz64pfQuqAc5xyhtSAVrA+JbEzoBRdU\nMN4AETlvMbYPCFHj05FOEt1pLJh0hFapVMjlcuQR8p9JDOBlIO+gXI7ABqx+z7OgQmSSFln+ni8k\n3/GEwUIWv+cLLIuTc5/s7eS2xWtZ+vkvJZkrfUS5XGb79u08+tCjaCiEXrFeMKqoqeCdUlBDPgbn\nc4Tah9KIwVMwJSLjEBsRiqGr4woPPJC6H4ph8Rf/Y+2uW4WmphYAjB2kmM8hcQg0gamQV7DewIiV\n0orgY6wXQmKcc8yvJDPUPXv2sGjRInIKLFnJkt//UvquRlix9F69RNPSVgoo6oQ8ZfIaYLymbria\nrOqFYiUmoIzXHJiQUD2QR004/JIKhHjWfOGPk/ssyVO+Zs0a2traWLN8Pk6UgBIQkPcxFTwiCraY\nukAPotJE7PNAGSshVpK1gXMKe/ftZcuWLYlXls0TfPy3WLtwebImRZivxk4j4lnz3t+gIkKMYIE7\nPvrvQSGSmJCAu3/pfwFJDKqisOrp/xGkmgAvvSckhtTwkWdY/sgzta61dfn01yY+/spXuaO1ldW3\nTZ11WDEUJXlHqyJhwcLpqSxDHJcOvcmylQu4a0kD4sHL8EJWw4UknbsKePEYrc5MU6+ztCYg6apM\nib2guqn6t4IQCFgNIZe4jEdi5tSwfPOpjKrayPR/AuRq1TQIJvEKSY+TtFuu/iTp9IPUbS+k7mTp\n8cMUCgWeeOoJ8IJxlkCFQGLwnsAZgsgSSi41lOWAAJGk80wWUDGIDvtUi8iwN5ARAoYfulKplBiW\njWLUIlFSmygAJ4YcitVUGKnBeEvgDIZUOHqDkMPiefTJJ+jp6QEgSEda9Y+JMQbvITae2MYYDwFK\nGDcSqgUEb8oEmgjZgGTMB4l3hKglTFM/j6efzpNcWKkagSAhmIAw7RiqVsUQA5JuC5J70NnRkdw5\nSe4dJjH2P/roo4kHqHjUN2JMjAliKlbBGUxsMCjeRSiGQD04g/gc4h2BS8pZtnRZ7X4glhAZbpn0\n5nsP4pI2zkmyYIyJbaLCkhBDDqtKsviiAhZxhopYVAK8TdrkvvvuS2xXkj5ctbcpJLCGxUuXps9r\njBIQq0MwBBhMmt7LIUQi5LSA9RBoBOII1GHCEBGfaBBrXaWhaqe36XX29fUlM0YR0JDAW4QQh8EQ\nYnxAPgoIvSeUZnI0IGrJeRm+VhVUHFu3buXixYsoikWwJl8rsnFE4KUBMeSQkQMtgZoIq7r1Vl88\nC8kCxyNtCKEOux5U/xXy43jljaKmqy+WiY0j9FHNVjE6uWFcSWwXFqGh9pwC6Kj05GN1QJLa50RA\nNMJ6JTIeQgHxjHYcuHa1u3b5QbXxIDEsS92/WgF1f+v+5aFuFT2Ttu3cchMKhLePJUuWJLffOgKJ\niY0wIAE+CHDWIzZiUGK8QIWYihG8hsRBjBOPF4jTtVSrD8VE+si+vjS42wuqFUyYThNdgJGYsgiB\ngnONqFecFqhYT+wjnAWM4iWZhmLjSZPliSSqBets4rkhhiGxeIaIxILm8eTpD5JxkmKTiFkqycxA\nPI6RgVXjMSYv0DRIAo5GUs3MqoCNQ+JgAEeAOEvOCdY4SjmlbEBNAQgpS9ImkVqcaNLxKqgIly9f\nntS7R0yyXKnxBm8FtEwxiBgwjXifBJW5ZOiGIFhNvF+sCCqeoXB4NjZ6rYN6HAEhnrLJUwojnOQA\nRySa2jqS0aX1UNaIioXYJIKoZAJcPIR3vuZMMd59UFW6urpSI7Cl4COcGBIrUmLn8RLjTA7v82il\nREVjhIiKBHhxOBGMT+YrkCx+U22/uUzUNl6cRhVrLa7iJ10jY8cbO1BV8oWGmv1MVbna0THi/RAR\n4gkdO5I0JcM2i7F1ERH279+fpJ2XpGP2zuOc49ixY2PquHz5+DmT3g38QguEfD6PkKhATnUWGej3\n3P2hTxOu3kh/j+dKZ8Ta9/82fd1lzl8p0tx6G92DvXRd9ixc/QClniG6e4YftO7u7gk7olOnTiUf\nlqylPBAz2F0mUujrjBnoL7PksU9w8uxligND3LLtQ1zs95R6y6x95pOc6Vb6eyK6yiuTKTgBjz/+\n+KTXFojjCovpGYy42hfTetv99PVW6O7owwl09fUw1NUPqnT3DNDTF+G1hY7eCh19nsY199TONVHq\niqrufyZUV5mrZ9WqJNVVTEy5d5CeviGW3b6N01eLDPUW6Q1XsnjjR+jrjmlafgdXZBGDvRFXBkLW\nf/jjVPrKnLtWSj3zdNLFV3p6ehCF1qc+SX9PxPnOPmIJ6e/3lLsHkHkr6est0dMXgWuhq6fItQFP\n4x0bOXu2g75ex5L7f6V2vsk6zERFI1xoLzLQ53At61jxyIcodxc5c9WCh/7+CteuDrJ4w4P09JXo\n6SnivaWr5OjobyGct6I2Zh3vPhhjaGxsTPTxYciV/oje3iLklnH+0kV6umMW3bGVzq4SUe8gaz78\nLMePt1PsVnK3bMCu2UZPd4l49d1USDrUu+++u9bJ1S+6472f1fWCJxPauVyOhoamcdtXVTl9+hSP\nP5GkHnfOoaoEQRLbsHTZMs6cPlMrY2hoiJZ5VbXQWAHT0tJCIRh/RhKYgL6+vtpaE9VZSZi64N69\n4W7e3JHE3Yo3yexqlrInvxP8wi+QMz1f8dSQoyQ60Ank6O7duyfM4FgsFseOJut0hl6Gz+rT8hzj\nTxFv1L+9egURHjtq9bExx06xtsD4dRnWEU+X6V5T0sl6PBPrUquLFY3HTBb0mc5lTNY+U11T/ZOU\nJGBJGM+J9MyZMzNbBrK+HlSdq3zt8+R3fZj6IEdV5Sc/+QnPPPPMFPeqOtqevIxLly6NcBevZ2Bg\ngMbGRq51ddFy+RVyfgiCAH/3r2COPAexoT+/kob19+E1IBfEoBH4eZz+my9zmyvRY0Oanv0CoYaT\n3scLFy7QunI1x4/t5S5OYnzEYNky774Pw8F/BA0oLnuAYMlaAl9CchWoKN4s5vzf/BmrqNAphmXP\n/n6iDn4bs/2/E+sh/NzivR9XhZHsrP4vcbRLcvMkHdFEke6TpfNtaGjAjfZNl6SMJPN6khojCVYx\n4D2hH79uU3WckwtXj0ljBRJLjJ/welSThdQnGzSMbj/vfSLdHDUjWpWJctJP1nEmZae5YVxSX6uG\nkPFdDFUnj2vYunXr1GtTeIcCpdRY6Ce4/KlUUz/5yU/G1i85I0CSUiV1KxUgcJoYqEf/RhM99/UO\n3kQ9+GQhS8vMXvr6EXo1IHLKevhqCZO7gdanRhlNoVDAGMOSpUsxWqEUhDgxOFXKmkfJM9R3HvV5\nAgWthMSukRKKLfVREUODryQ6/ClYuHAhocDGuzehPkK9UAgq4ELK0kzJNuOGribpO2yBKFpAxc4H\nlMXlGOOFlqLHkntbhcFc8AstEESEc+fOjbsvSapnGCLAAyElrAxgtDJh0OBUL8qY/PkKqCEWoURI\nRWxipKIfYwbQcR4uVeXll1+etJxt2yYeMDgMFZM632oMDAATqwE2btw4aadXjSGoYoxQMX14G+FH\npYLcvn38JTFEhBdeeGHcfd57igSJILYAFZCeuk5nJNOJPp/oeuLk7ETGIsQUGADcyCDTOpYtm9wT\nphZNXl82ACYpSwKQJFZG6MUwOJFsprW1ddLna6KMnbFCSQyxCVLJNgiUxj0W4K233ppwX+0aphiQ\nXIjO0TZ4Du8m715aWlom3FefOVjUkE8iuDBaQYiJgiImbqISRon5Ww0Vo4ReQfOJLcxYhMT2NhlJ\n9Hyy/rJVg0qMuJCKgdhWUCkBIahgnQWjiAoOwZkYZzQ1lifMVYDo28FN53b6dlIf9j96lBob+N6+\nC8Qov3bvGiqHn8N5odTby8LH/s2IKaiqUi6Xxy7yMopSqVRLplfFC/zN68domNeI95ZP37sCPfgj\nYgzBnU9DbuRoV0RqOZMm48qVK+Omgf67fZexLqa94xr/uuENGhfPT7rbjb+eOD7UXddrr73GE088\nMWk5996b+MJXezInEd89+30Afnntr5KvU4C8973vnfA8Tz311EgjY6qeO9LRzaGOElY9v7Z1DfHu\nf8LlHD2DAa0Pf2LMeUa373js2rWLBx98cMz253ZfwFuFUsS/WNZLWDxG6BVzz6+mnm3DjTOuCnAc\nyuXyuHX69r42HMKd8wvcceV7NDc0oFaRjZ8acZyqsnfvXu67775JO+LR+XgSdZTn+7vOULIhTpVP\n37+G4r7vYK2nf+G9LF65paaqrD7/47XLiPNOuJZ00glGwPZLr5LDsmbt+Omoq4bw6a7sJyiRgBiP\nDZpwqWddKCUKUeJU4CwE6SDBqCEyQuwhr0Hi+jllKYYKQk5iRD3lIKLB5ChhCdXg03gixJNTR0lC\njMQYFUIHJRkWsnEcz0n0+NvBL/QMYTT1I7BEUaHU9KGqGJLRQf3x3ns6Ojqm1REVCgVeeeWV4bLS\n98lam6QfFgEcsXqUuOazWvWeqI48pqM6aG1tJYqi2m+riIC1yWg4SHXKSuobropP0zBMRxgk56t6\nVyXJyn1Nf+zT3DQ65tiJzpOE9afHpH9GX6k1iohF/FgvqAnVf6N44IEH2LVr15h21DBEUEIxoFUP\nF03ckkg6Q1XlzJkz07rfMJxiZExZqogk7Y0oThLvIhFfu2eqyptvvlkzaE6GiIxca0IBBOcdxlhC\nk3RQOSt4sVgt145TVXp7e6d1PbUUFKnabfTzBS5xNx4VsFV/XG9v74yWeR2UkJxC4CqYGHIi5OOI\nwvK1eAw5FTi1lxxFRMAsasFSTqP1IVDDwMDApGV4hDyOiBDjQiIfIrGjJYpRdeTnN2M0IKdDdJ0/\nTMEOYb3FhQWcerr98PMwep2VdxPTSX9dEJE3RWSfiBwSkf813b5IRH4sIifSvwvrfvPHInJSRI6J\nyIfm8gJulGoHVM1Lc/lysihK6kyarCxGmuNRYgQh8p79+/dTqVRquZGma+StjpJ3795dSxUTeMVr\niE3D2IxUsB5EHNeuXWPv3r21PEwzMSiHYZLa4MiRI7VtHknWi/UGq1Eac5qc+8TxU1y82I6ITOnF\nVE+SgAsuXbzI5atXEFU8hoHKQM1dbypEpOYlcvToUfp6e6jGecRicJL4r8fWYlQQCRgaGqq1DUy+\nQFE9xhjuv/9+RKS2jgWA1cTxcsAGGJOsqys+uSdtFy5y6NAhRIR169ZN+x5U71scx5w4cSJpLyDG\nUhEBk7igSm3ljEQNd/Xq1WmN2Ou57bZkIfndu3cTVyokz6/gBCppPE6klkA9IgHdfdc4uH8/IsKC\nBQtmtO53ta2rLpnlSmJr8yTrGjssmiog+vr6Rtyn+fPnT7scgHlbfg02f4rKpt9GgwB79yc4Jfeg\nax5AjefcS38J5TYqx38EXln08d/g8LYPsuh3/qDmsHHs2LEJz6+qGE2C1nL3/Cpy76eYv+WTYEPY\n/Gl2XltOcfE6AoW2V77F/KFTRLufR9Qz/zNfpOOXf5tbP/dHtfMdOnRoRtd3MzGdGUIZeL+q3gts\nBT4sIo8AXwJeUNX1wAvpd0RkI/Ap4B7gw8CfybvE0rJ06dKarrY6N2jQOFmTWANQiyNHYAxbtmxJ\n3FZn6O1TPf6+++5LvT0cRuIkHjsdkcaEWE0Sxy1atKg2Qqx2LjOlfnF7q47QC43eoGpwkvju4zzr\nN9xRiwmY6TV5HCtWrWbZ0qVIOkZcmJs3rFKawbnuuusuWuYvAHV4G9DsY0STVBDWOwatYmOhsbFx\nhMfQTNqmeuzWrVtro1cXR+Q0WdbUi0sjTZNXZNXKVWzatGlS3/nJygrDkPXr1+PSiaERxeLSiVmS\nk8sYi1flnnvuqdknJvPwmoj777+fMJfDqaeaftCrQ4kw3hJJAOpYOL+FLXWZTa835uDee+8ln8uB\nJtH7Hoh9gE+j+FtaWmrtXG2PmWPIk04cRbh9030c2LMnHSTFYHyi1xfL7jfeZMvGexPnjOS/SfX6\n1XUZpBY9J2lwWBJA9sjjj7P/jV2oV0TL+DR+JBLPSz97iRWty0YEmk1mG7nZmfJp04TqfCtM/ynw\nUeAr6favAB9LP38U+JqqllX1DHASeGhWaz1HVDtcESGnUHBQdAYLtHVdw+og/SXLdT3PozDGpC6s\nwpLAEsdQ6esF7wjwxEYRMzsmnvoXsFkckRH+5dN30rvkViQu0zMUoWZi76npYAlJop7zECu5ssHV\nLVwz4/oqIMKdrYso+wDRxGMlIqYx6qN54+bJTzLD8kSEgcttxJLD9ffQvGYdvuzpHwRcGTXXL5Dr\nsWliRhVDoAG3r13GuS4wccypto40782Nk0TVKg/cuQaJKwRRCXEKpp+gUmH+qk2gMkIFOh4z8Wzy\nGiMqaORRd4WozkQ5G21Xj4jloQcfITQF8vl8GovgCYKQxx97zxhBOryK3syxNjlnmMsTBHniOEYd\nhDbH00+PdcF96KF3RXc3LtOKQ0hH+LuAO4D/oqr/QUR6VHVBul+AblVdICJ/Cryhqn+T7vsL4Ieq\n+g8Tnf+djEPIyMi4+XDOcerUKe68884pjz37vf+LFeuWktcG2PSr11VeR0fHlF5jEdD+/H9l9aoW\n8AHBln+FVtPkvEO8I3EIqupUdSvJYlUPicimUftHWpCmgYh8TkR2isjOq1evTv2DjIyMXxiSfFzT\nc99c+0u/R35ggAOnJo5Qn2rg294+Ztn3secAlj3wBFouc6qzm2Tx07HC4BfG7VRVe0TkJRLbwBUR\nWaGql0RkBdCRHtYO1M/PVqfbRp/ry8CXIZkhXE/lMzIy5p7Ozs4ZeQXNBiKSLBE7Dbw0Yx78PJvN\nFHaCSZhO9HoAhEs2IYs2cYepeuhpmoIzEToDAwOzujri2810vIyWikhVNdQAfAA4CnwHeDY97Fng\nn9LP3wE+JSJ5EVkHrAfenO2KZ2RkvD2cOXPmHSt7OqNtM/J/Y3jppZemVdZUs4hanlZDLeWLGTVD\nePPNd3dXN50ZwgrgK6kdwQDfUNXvich24Bsi8lngHPAJAFU9JCLfAA6TuLt/UVVnfw3JjIyMt4X6\nBHdvJ6pKsVhMIolv4BxPPfXUtI4dGhq6obJEhKefHneZ+XcNUwoEVd1PdUmwkdu7gHGvXlX/BPiT\nG65dRkbGO86qVaumTHI4V0w3AHA8qmtFzJs3b1p1v9FU321tbTfkzXQzkEUqZ2RkTMrSpUvfEWFQ\nDVY8e/bscLS3B1XH1/ef5xv72xkCypcP4Q9+g1M//q8jfl+pVJg/f/606i4iFAoF2traRkVfK9/b\nc4Gv77uYrIG965+JDv0dh7/z/1EfF1+pVN71wgAygZCRkXGTs3bt2hGduooF74lVcV7p3PsSsQu5\nbcXIJTSvJ5/QmjVrRsVMCKVkzUQ8EF07SeQ9t926gLCa+kX1XZu7aDSZQMjIyLjpcc7R3d0NJumY\nLSHeOLyAF0do+imlCWfa29vTJS+vPz6gPnusN4LRJIbZi/v/27vzKKnKM4/j3997q3pBQUSUKKgg\nonGJ+4JK1ESNJvHojMk4TmJiNjXLJHGyqGg0y2QmanI0mTEaOdHoGBFxiSLRRDRqMhPBnV0CKEQQ\nBNxAaLqq7vvMH/cWlhzFRrqpCz6fc+rU3ar6qe6u+9z7rgS1UUuzoTrmzpm9YR+sYDwhOOcKL0kS\n+vXrR72708pSjVQlEoma2iFtrU/bzcCBAze4iKux6WiLpdQUs7EmoxHSVtrynle7Dh3WrT2wm80T\ngnPuHa1rWtKNTcAW1kmIEYtGm6qsKIPebqKSDRSBNquSAtWkF53qpCNp542Z4TYf7/kpNJ1z62Zm\nTJ8+/S3nw26K2AGhjWx47whKSdaMBNzNTWRrnVBqAQyLWWGVQsQsUlGZ1jdNhLrxdffQFe/pCXKc\nc++scSKpQghvTEyUhKx7GLxpwNHuU6o3exVaUwyVzZuc7dm8Clk2r0/jnOsRbW1t73pOZ7fp8ITg\nnHtHG9pqx20aPCE4595RCIG777672WG4HuYJwTnXJSeccEKzQ3A9zBOCc65LkiRh1aqOrCuA1SCf\nnrPeN6BqQD6OZX0P9alS0nw5GtFSiAZpCmZ0kk0+Ux8B0zZk6j63QbqcECQlkp6SND5f7ydpgqTZ\n+fPWDceOlDRH0ixJx/dE4M65jUsSrW3tQA0scNPkxdw8ZQkvrOgkAuX0BdKZt1KdfguikyqRufdf\nBU+PhZmjwUQMNcZ//mqe+MbtTLz+flLEPaecxOPaifv7DgHSfKAI1wzrc4fwTWBmw/r5wANmNgx4\nIF9H0p7AacBeZBPpXJUPne2c24RJIhEsX76SCPSyKu1xNbMXLiPEFJYtZSWtpJSBVsoW6GMVLFSJ\n1CBkncp22HIAMaxmxdylmKAy4zk62hKSNGvTL2/N1DRd6ocgaRDwcbIhrb+Vbz4ZODpfvgF4CDgv\n3z7GzDqB5yTNAQ4BHum2qJ1zzSHos1Ufpjw9hQr9qSYpq+ct4K+vzqPfa/PZZZAIBCZN/DPRygxS\nmaiElIQnHplEGqu0hJRSWuL111/myb9ORFWIbRVKKyM1sqTjmqOrdwg/B87lzX21B5jZonx5MTAg\nXx4IPN9w3IJ8m3NuM2DAB/bbl4BRjrDjzjty+PARvH/fvSklJRQChw4fzmGHHU45b6oaSTj4sEM5\ndPjhEAUYfbbow/7DDyRajaQDkuqqbAC5Zn6497iuTKF5IrDEzJ54u2PM6jVHXSfpLEmPS3p86dKl\n6/NS51xTCWJKVQlVQXtrmUiEli2InR3ESgWjDERMQgFaQyQASQIk2WSUsUWUQ4kVfdtQJ1S3zqoh\ng5cYNc07jmUk6SfAZ8imw2wD+gB3AAcDR5vZIknbAw+Z2e6SRgKY2U/y1/8R+IGZvW2RUeNYRlWg\nTC1LLyqRL4BlFxaBGsQSMVQINI5BXh9TJBLRmrlOq2TlYkb2j5a9h3NuQ9W/W28u4claH0XK+Yk9\nzR5phFJbdkhK9iW0hmdFiFoz1LTXK3dNd49l9I7nRjMbaWaDzGwwWWXxn8zsdGAccEZ+2BnAXfny\nOOA0Sa2ShgDDgC7PPF22CFY/hdeTVY3UjEC+zypEq//HWDYRd6yt+UghdgIpFaCcgqgRIqBIoEaM\nhln0rvjObYCyvfm8HYFIKUsG9Y1KQC3EejKAbGIB8cbZR5ASsCDPA022IRfLlwDHSZoNHJuvY2bT\ngbHADOAPwNfMLH3bd1mbUtBKmDqazmlj6Jw2hurUW0iUn/C1knEPTuKVl1YC2bVINvZ5OZ9irwqh\njeeevocWapAYn9h7m+z2wABKBKsiBWreFd+5d2+tr09oeKy9PzTWDLzFdVg9R+gt3tdtPOuVEMzs\nITM7MV9+ycyOMbNhZnasmb3ccNx/mNlQM9vdzO5dr4gsu3SoJi3UglELRkwEViYrhXyZjxzzQfp2\nPkE9vAQAAA4MSURBVJu3WE5ISSEIVs1l390OIaYpSXlbvjy0zKQ7r6dvqTdEcflXTwNS5v/1LmKs\nsWq9AnPOvXsNpxo/4RdWAYvTSxDLrN5qL9hqL+i7F7bN/hCyi/xOduAXl/4nc+L2XDTyQogdVLOa\nBb5/zZ1MnvMXRl7475T79OPnU5fQPnhvLhv3e5ZU4FtXj+Y7F16MbTeUIB/72znnGhVugpysarhC\njKWsYlj59NZG1mIhFR0lKJshLK9vjpTSEjEYwUQlGClGey1QK+UnfkuJqhEqrZBUqSVJfntbwJzo\nnHNdsNErlZuhYi3c8NBUfvnQVK58cAq/uv+p7MQvsapkHCGRRAgWMGqU0hKrV03jsGH7sipAS4y0\nW5YMJo0fDaTUlBBohRYgKVMieDJwzrkGhSw1MUF7v21pzcsaI4FUgYRIL4zrZ/4NJVWMGrffdCuD\nh+7IAfvswaQ5j/KFU/+FM84+g537lXjkL48xZPCuRBJK3pTNOefWqXCXyIFIB1Ba05sgEIh5w4QA\nvMreuw/jirF302f7o5k5ZRKH7n9AVsFQa+Oe/5vIUceMYOLEiXz8lI+SxECwKq/L+z8659y6FK4O\nASClSmplSqQEKevHEoUFZRnMUqpRKIk8cvtYTv3kp1kUa0ACqlBNy5TqHV5QVg2hCrypI5tzzm3a\n3gN1CEawMiVVkZT3UzYkI6tcjkQllBMoUeKIUz7FophSUQKCKi2UEyEJBUOCGKFKudkfzDnnCq14\nCcGgqg5uev5mbp03llvnjeX2+beRYnklcMoFV93I2ccdCVZh6SsrmTf7WVYvW8R5142mRX15bckS\nIvDC3+dRFSyMUPYSI+ecW6fiJQQZ9brukomSCVkklfIioApbL57JRaNv4sGHH+DTn/tXBg8dxPJ+\n2/Pa5BnU7FX+3tqbnST67LQLLdseRGtK3uHNOefc2ylgHUIkWkpVFWxNvjLKlBAtiOUobScmIR/A\nrn5MjRolRCQhUK8xqEQohSJmPuec2zDdXYdQuGankUBFgRZLCGsu6pVNnGEQ1Uo1AQj5ALvZOCgQ\nsg8TAzEYLVQxIi2h7S1+inPOubUVLiEAtFmVF28cRWu1AhgVlel/xtcgERZLfPjII7ngkss5YcSh\nJBaz4qAAWEolJAix6vmlbDlwoN8aOOdcF3XpdClpnqSpkp6W9Hi+rZ+kCZJm589bNxw/UtIcSbMk\nHb9+IdVAKdutrtArMXolsFXowJKsDiHRUh788//ysSOGZCMYKbJ05Uog8IqJhc9M4Sff/TrH7jco\nGznVOedcl6zP9fOHzGy/hvKq84EHzGwY8EC+jqQ9yeZN2As4AbhKUtL1H1OCqvEqVTqq2eP1KiQG\nJgPbhi9/+zzmzlzCvXePh9hB395bYMDvx4zhkD32Zfxdt3HAgfuT+u2Bc851WZcqlSXNAw4ys2UN\n22bRAzOmAVQxZCIoiy3ri6A1Y6pXUqMl5BXKBlCDYEQrg1JSjDIiJSGYIZ/3wDm3GWpWxzQD7pf0\nhKSz8m0DzGxRvrwYGJAvDwSeb3jtgnxbl39U2aAk8oErRIII2VQ4gJjzzAxqCkSBhRQLJWqUMKWI\nhFL+sRKreTJwzrku6mql8ggzWyhpO2CCpGcad5qZSVqv9qt5YjkLYKeddmrcQ4ww6prHSEJn/Vi+\ndPYR+f6X2W2PD1DWACbc+xsG79Cba8Y9QfL6y8x/dgHXX/cDPnH69zj9k0fwTx89joH9h7LYjPps\nzc45595alxKCmS3Mn5dI+h1wCPCipO0bioyW5IcvBHZsePmgfNva7zkKGAVZkdEbOyAk0Jm8Stna\n6wcTqVcQt2PBgCX0HrIXf3lqGj86+3gO2m5/pnfORW3HYXEKl1z0HToOfD+jRl1GLUIpeDJwzrl1\necc6BElbAMHMVuTLE4AfAccAL5nZJZLOB/qZ2bmS9gJGkyWNHcgqnIeta17lN9UhGBgpDz80G4vZ\nS8rlVkaMGEoMIqQGSWQ1gQRRphOjlX3Um6m2AtJOOpNWWtMISQCrEE0Qyl7F7JzbrDSjY9oA4Hd5\nWXwJGG1mf5D0GDBW0heB+cCpAGY2XdJYYAZQA762rmSwtlSQII7+0K4N4RmWVyqbRDSj1YQpq0iO\nAZ62FRAjMWnNCoaSbNyjigJllTAiFQItKVlPtmhrZlhD2TxtEQgmaqpRKmYXDeec6zEFHLoCSOGJ\nayaQxFKWCsqBfc46Kp9ScwXnnPdTLr/0e4SY0JaU6NznWNLJE7LiJlm9WRJVy4a+TkhRNUApxSgh\nwWpS2mLCqgC9rIaphGIH2vlIbP6jNHSTds65Qtr8h7/Ox6Kwaa/QOXspldnLiJOXobR+gq4x/6kZ\nHDhsV2684kr6A621Vkb++GpeE4y95LsgoX2O57bLLqI86CiCSpw6MPD7Mb+kLYhPfuRQ9t3pUGKA\n1cCTM5/nlK9czJzFHWAVOov3W3HOuR5XvHKRECEGaqVAOWatjKqtAWU90zCVueW+Wxhz3bXcc/v4\nrLa6YwUnDV7Flgannv992vpvw4/P/BTXTprPtrtsxYgt4cVZcPTBx3DNpT/i8+ddzM9+PJITP38m\n917/awIw/blXGDYw62wdqOKT6Tjn3msKWWRkRIQR82HrRFYKFIhgb1y+R0WUpsSknFUL5I9Sw7sE\ng6hsPasvSKmRULJIVCAlUk4DlYS8l0ONgBDr0bnaOeeaYLMf7ZQIRmDZD39Ir0ol27ZlH1ovOI+g\nFLPAnnvswdRZTxJoYdn0GXzvv6/nV6OuoCZoiRELAaWRQAL1iuOq8ov+JPvQyqbbCQRIGu8Hivcr\ncc65jaF4dwgGyHg4vI+YtIMgqUaOrM6GpBX0EsY2iFVs1WsXhnS8yMgbRvGLW/7E9HvGsNvOe3Ll\nfeMYvvuuvPy3KcRyYMvBexCB9gJWmTjn3Lu1+d8hACA6+gTal9eTVd5bDbC0N0qW89VvXMBrq+Zy\nztnn8M+fPZN//MyZrNbNtBsc9aVzWVFJoRzorR2I1Reymdi8FMg5595W8e4QnHPOdcnm3+zUOedc\nU3hCcM45B3hCcM45l/OE4JxzDvCE4JxzLucJwTnnHOAJwTnnXM4TgnPOOaAgHdMkrQBmNTuOLugP\nLGt2EF3gcXYvj7N7bQpxbgoxAuxuZr27682KMnTFrO7sbddTJD3ucXYfj7N7eZzdZ1OIEbI4u/P9\nvMjIOecc4AnBOedcrigJYVSzA+gij7N7eZzdy+PsPptCjNDNcRaiUtk551zzFeUOwTnnXJM1PSFI\nOkHSLElzJJ3fxDh2lPSgpBmSpkv6Zr69n6QJkmbnz1s3vGZkHvcsScdv5HgTSU9JGl/UOCX1lXSb\npGckzZR0WEHj/Lf8bz5N0s2S2ooQp6TrJC2RNK1h23rHJelASVPzff8lSRshzp/mf/cpkn4nqW8R\n42zY921JJql/UeOU9PX8dzpd0mU9EqeZNe1BNofZXGAXsmmNJwN7NimW7YED8uXewN+APYHLgPPz\n7ecDl+bLe+bxtgJD8s+RbMR4vwWMBsbn64WLE7gB+FK+3AL0LVqcwEDgOaA9Xx8LfK4IcQJHAgcA\n0xq2rXdcwKPAcEDAvcBHN0KcHwFK+fKlRY0z374j8EdgPtC/iHECHwLuB1rz9e16Is5m3yEcAswx\ns2fNrAKMAU5uRiBmtsjMnsyXVwAzyU4WJ5Od2Mif/yFfPhkYY2adZvYcMIfs8/Q4SYOAjwO/bthc\nqDglbUX2j30tgJlVzOzVosWZKwHtkkpAL+CFIsRpZn8GXl5r83rFJWl7oI+ZTbTsLPE/Da/psTjN\n7D4zq+WrE4FBRYwzdwVwLtmM7nVFi/MrwCVm1pkfs6Qn4mx2QhgIPN+wviDf1lSSBgP7A5OAAWa2\nKN+1GBiQLzcz9p+T/QPHhm1Fi3MIsBT4TV609WtJWxQtTjNbCPwM+DuwCHjNzO4rWpwN1jeugfny\n2ts3pi+QXaFCweKUdDKw0Mwmr7WrUHECuwEflDRJ0sOSDu6JOJudEApH0pbA7cA5Zra8cV+eaZva\nLEvSicASM3vi7Y4pQpxkV90HAFeb2f7ASrIijjWKEGdeBn8yWQLbAdhC0umNxxQhzrdS1LgaSboQ\nqAE3NTuWtUnqBVwAXNzsWLqgBPQjKwL6LjC2u+suoPkJYSFZ+V3doHxbU0gqkyWDm8zsjnzzi/nt\nF/lz/VatWbEfAZwkaR5ZEduHJf22gHEuABaY2aR8/TayBFG0OI8FnjOzpWZWBe4ADi9gnHXrG9dC\n3iiuadze4yR9DjgR+HSevKBYcQ4luxCYnH+fBgFPSnpfweKE7Pt0h2UeJSsd6N/dcTY7ITwGDJM0\nRFILcBowrhmB5Nn2WmCmmV3esGsccEa+fAZwV8P20yS1ShoCDCOrxOlRZjbSzAaZ2WCy39efzOz0\nAsa5GHhe0u75pmOAGUWLk6yoaLikXvn/wDFk9UdFi7NuveLKi5eWSxqef77PNrymx0g6gaxY8yQz\nW7VW/IWI08ymmtl2ZjY4/z4tIGtYsrhIcebuJKtYRtJuZI00lnV7nN1ZO/5uHsDHyFr0zAUubGIc\nI8huv6cAT+ePjwHbAA8As8lq+fs1vObCPO5ZdHNLgy7GfDRvtDIqXJzAfsDj+e/0TmDrgsb5Q+AZ\nYBpwI1mLjabHCdxMVq9RJTtZffHdxAUclH+2ucCV5B1SezjOOWRl2/Xv0q+KGOda++eRtzIqWpxk\nCeC3+c99EvhwT8TpPZWdc84BzS8ycs45VxCeEJxzzgGeEJxzzuU8ITjnnAM8ITjnnMt5QnDOOQd4\nQnDOOZfzhOCccw6A/wex7Om75aNkMgAAAABJRU5ErkJggg==\n", 404 | "text/plain": [ 405 | "" 406 | ] 407 | }, 408 | "metadata": {}, 409 | "output_type": "display_data" 410 | } 411 | ], 412 | "source": [ 413 | "import matplotlib.pyplot as plt\n", 414 | "import tensorflow as tf\n", 415 | "\n", 416 | "\n", 417 | "image_raw_data = tf.gfile.FastGFile(\"../../data/raw_image/image.png\", \"rb\").read()\n", 418 | "\n", 419 | "with tf.Session() as sess:\n", 420 | " img_data = tf.image.decode_png(image_raw_data)\n", 421 | " plt.imshow(img_data.eval())\n", 422 | " plt.show()" 423 | ] 424 | }, 425 | { 426 | "cell_type": "markdown", 427 | "metadata": { 428 | "collapsed": true 429 | }, 430 | "source": [ 431 | "如下描述的是一个Inception模块,即最后一个全连接层前的Inception模块" 432 | ] 433 | }, 434 | { 435 | "cell_type": "markdown", 436 | "metadata": { 437 | "collapsed": true 438 | }, 439 | "source": [ 440 | "### Inception-v3 module" 441 | ] 442 | }, 443 | { 444 | "cell_type": "code", 445 | "execution_count": null, 446 | "metadata": { 447 | "collapsed": true 448 | }, 449 | "outputs": [], 450 | "source": [ 451 | "#Inception-v3模型共96个卷积层,用原始API会很冗长。TensorFlow-Slim可以简洁地实现卷积层\n", 452 | "#通过TensorFlow-Slim可以在一行中实现一个卷积层的前向传播算法。slim.conv2d函数有三个参数是必填的:输入结点的矩阵、卷积核的深度和尺寸\n", 453 | "#可选参数还有卷积核的步幅、padding、激活函数等\n", 454 | "net=slim.conv2d(input, 32, [3, 3])" 455 | ] 456 | }, 457 | { 458 | "cell_type": "code", 459 | "execution_count": null, 460 | "metadata": { 461 | "collapsed": true 462 | }, 463 | "outputs": [], 464 | "source": [ 465 | "#以下实现Inception相对复杂的模块,即最后一层并联Inception module\n", 466 | "#slim.arg_scope函数可设置默认的参数取值。默认下stride=1, padding='SAME'。进一步减少冗余代码\n", 467 | "#以下列表中的函数,将会使用后面定义的默认值\n", 468 | "with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME')\n", 469 | "\n", 470 | "#此处省略了Inception-v3模型中其它的网络结构而直接实现最后的并联Inception模块。\n", 471 | "#若net为上一层的输出结点矩阵,以下首先为Inception模块声明一个统一的变量命名空间\n", 472 | "with tf.variable_scope('Mixed_7c'):\n", 473 | " \n", 474 | " #为Inception模块中每一条路径声明一个命名空间\n", 475 | " with tf.variable_scope('Branch_0'):\n", 476 | " #实现一个卷积核尺寸为1,深度为320的卷积层\n", 477 | " branch_0=slim.conv2d(net, 320, [1, 1], scope='Conv2d_0a_1×1')\n", 478 | " \n", 479 | " #Inception模块中第二条路径,这条计算路径上的结构本身就是一个Inception模块\n", 480 | " with tf.variable_scope('Branch_1'):\n", 481 | " branch_1=slim.con2d(net, 384, [1, 1], scope='Conv2d_0a_1×1')\n", 482 | " \n", 483 | " #tf.concat函数可以将多个矩阵拼接起来,tf.concat第一个参数指定了拼接的维度,‘3’表示矩阵在深度这个维度上进行拼接\n", 484 | " branch_1=tf.concat(3, [slim.conv2d(branch_1, 384, [1, 3], scope='Conv2d_0b_1×3'),\n", 485 | " slim.conv2d(branch_1, 384, [3, 1], scope='Conv2d_0c_3×1')])\n", 486 | " \n", 487 | " #Inception模块中第三条路径,该计算路径也是一个Inception结构\n", 488 | " with tf.variable_scope('Branch_2'):\n", 489 | " branch_2=slim.conv2d(net, 448, [1, 1], scope='Conv2d_0a_1×1')\n", 490 | " branch_2=slim.conv2d(branch_2, 384, [3, 3], scope='Conv2d_0b_3×3')\n", 491 | " branch_2=tf.concat(3, [slim.comv2d(branch_2, 384, [1, 3], scope='Conv2d_0c_1×3'), \n", 492 | " slim.conv2d(branch_2, 384, [3, 1], scope='Conv2d_0d_3×1')])\n", 493 | " \n", 494 | " #Inception 模块中第四条路径\n", 495 | " with tf.variable_scope('Branch_3'):\n", 496 | " branch_3=slim.avg_pool2d(net, [3, 3], scope='AcgPool_0a_3×3')\n", 497 | " branch_3=slim.conv2d(branch_3, 192, [1, 1], scope='Conv2d_0b_1×1')\n", 498 | " \n", 499 | " #当前Inception模块的最后输出是由上面四个计算结果拼接得到的\n", 500 | " net=tf.concat(3, [branch_0, branch_1, branch_2, branch_3])\n", 501 | " \n" 502 | ] 503 | } 504 | ], 505 | "metadata": { 506 | "kernelspec": { 507 | "display_name": "Python 3", 508 | "language": "python", 509 | "name": "python3" 510 | }, 511 | "language_info": { 512 | "codemirror_mode": { 513 | "name": "ipython", 514 | "version": 3 515 | }, 516 | "file_extension": ".py", 517 | "mimetype": "text/x-python", 518 | "name": "python", 519 | "nbconvert_exporter": "python", 520 | "pygments_lexer": "ipython3", 521 | "version": "3.5.3" 522 | } 523 | }, 524 | "nbformat": 4, 525 | "nbformat_minor": 2 526 | } 527 | -------------------------------------------------------------------------------- /6.tf_RNN.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 4, 6 | "metadata": { 7 | "collapsed": false 8 | }, 9 | "outputs": [ 10 | { 11 | "name": "stdout", 12 | "output_type": "stream", 13 | "text": [ 14 | "[ 1.75907902]\n", 15 | "[ 2.75076384]\n", 16 | "[ 3.01494396]\n", 17 | "[ 3.07383406]\n", 18 | "[ 3.09134016]\n" 19 | ] 20 | } 21 | ], 22 | "source": [ 23 | "# 以下实现了简单的循环神经网络前向传播过程\n", 24 | "\n", 25 | "import numpy as np\n", 26 | "\n", 27 | "X = [1, 2, 3, 4, 5]\n", 28 | "state = [0.1, 0.2]\n", 29 | "\n", 30 | "# 分开定义不同输入部分的权重\n", 31 | "w_cell_state = np.asarray([[0.1, 0.2], [0.3, 0.4]])\n", 32 | "w_cell_input = np.asarray([0.5, 0.6])\n", 33 | "b_cell = np.asarray([0.1, -0.1])\n", 34 | "\n", 35 | "# 定义用于输出的全连接层参数\n", 36 | "w_output = np.asarray([[1.0], [2.0]])\n", 37 | "b_output = 0.1\n", 38 | "\n", 39 | "# 按照时间顺序执行循环神经网络的前向传播过程\n", 40 | "for i in range(len(X)):\n", 41 | " # 计算循环体中的全连接层神经网络\n", 42 | " activations = np.dot(state, w_cell_state) + X[i] * w_cell_input + b_cell\n", 43 | " state = np.tanh(activations)\n", 44 | " # 根据当前时刻状态计算最终输出\n", 45 | " final_output = np.dot(state, w_output) + b_output\n", 46 | " print(final_output)" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": null, 52 | "metadata": { 53 | "collapsed": true 54 | }, 55 | "outputs": [], 56 | "source": [ 57 | "# LSTM结构的前向传播过程\n", 58 | "\n", 59 | "# 定义一个LSTM结构,使用的变量将在函数中自动声明\n", 60 | "lstm = rnn_cell.BasicLSTMCell(hidden_size)\n", 61 | "\n", 62 | "# 将LSTM中的状态初始化为零数组\n", 63 | "state = lstm.zero_state(batch_size, tf.float32)\n", 64 | "# 损失函数\n", 65 | "loss = 0.0\n", 66 | "\n", 67 | "# 在训练时为避免梯度消失问题,规定一个最大序列长度,num_steps\n", 68 | "for i in range (num_steps):\n", 69 | " # 在第一个时刻声明LSTM结构中使用的变量,之后的时刻复用该变量\n", 70 | " if i>0: tf.get_variable_scope().reuse_variables()\n", 71 | " # 每一步处理一个时刻,将当前输入和前一时刻状态传入定义的LSTM结构可以得到当前LSTM的输出与更新后的状态\n", 72 | " lstm_output, state = lstm(current_input, state)\n", 73 | " \n", 74 | " # 将当前时刻LSTM的输出传入一个全连接层已得到最后的输出。\n", 75 | " final_output = fully_connected(lstm_output)\n", 76 | " # 计算当前时刻输出损失\n", 77 | " loss += calc_loss(final_output, expected_output)" 78 | ] 79 | }, 80 | { 81 | "cell_type": "code", 82 | "execution_count": null, 83 | "metadata": { 84 | "collapsed": true 85 | }, 86 | "outputs": [], 87 | "source": [ 88 | "# 深层循环神经网络的前向传播过程\n", 89 | "\n", 90 | "# 定义一个基本的LSTM结构作为循环体的基础,深层循环也支持其他循环体结构\n", 91 | "lstm = rnn_cell.BasicLSTMCell(lstm_size)\n", 92 | "\n", 93 | "# 通过MultiRNNCell实现深度RNN中每一个时刻的前向传播过程,即x_t到h_t需要多少个LSTM结构\n", 94 | "stacked_lstm = rnn_cell.MultiRNNCell([lstm]*number_of_layers)\n", 95 | "state = stacked_lstm.zero_state(batch_size, tf.float32)\n", 96 | "\n", 97 | "# 计算每一时刻的前向传播结果\n", 98 | "for i in range(len(num_steps)):\n", 99 | " if i> 0: tf.get_variable_scope().reuse_variables()\n", 100 | " stacked_lstm_output, state = stacked_lstm(current_input, state)\n", 101 | " final_output = fully_connected(stacked_lstm_output)\n", 102 | " loss += calc_loss(final_output, expected_output)\n" 103 | ] 104 | }, 105 | { 106 | "cell_type": "code", 107 | "execution_count": null, 108 | "metadata": { 109 | "collapsed": true 110 | }, 111 | "outputs": [], 112 | "source": [ 113 | "# 多层LSTM网络层级间Dropout\n", 114 | "lstm = rnn_cell.BasicLSTMCell(lstm_size)\n", 115 | "# input_keep_prob控制输入的Dropout概率,output_kep_dropout控制输出的概率\n", 116 | "dropout_lstm = tf.nn.rnn_cell.DropoutWrapper(lstm, output_keep_prob=0.5)\n", 117 | "stacked_lstm = tf.nn.rnn_cell.MultiRNNCell([dropout_lstm]*number_of_layers)" 118 | ] 119 | }, 120 | { 121 | "cell_type": "code", 122 | "execution_count": 1, 123 | "metadata": { 124 | "collapsed": true 125 | }, 126 | "outputs": [], 127 | "source": [ 128 | "#数据预处理相关的函数\n", 129 | "from __future__ import absolute_import\n", 130 | "from __future__ import division\n", 131 | "from __future__ import print_function\n", 132 | "\n", 133 | "import collections\n", 134 | "import os\n", 135 | "import sys\n", 136 | "\n", 137 | "import tensorflow as tf\n", 138 | "\n", 139 | "Py3 = sys.version_info[0] == 3\n", 140 | "\n", 141 | "def _read_words(filename):\n", 142 | " with tf.gfile.GFile(filename, \"r\") as f:\n", 143 | " if Py3:\n", 144 | " return f.read().replace(\"\\n\", \"\").split()\n", 145 | " else:\n", 146 | " return f.read().decode(\"utf-8\").replace(\"\\n\", \"\").split()\n", 147 | "\n", 148 | "\n", 149 | "def _build_vocab(filename):\n", 150 | " data = _read_words(filename)\n", 151 | "\n", 152 | " counter = collections.Counter(data)\n", 153 | " count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n", 154 | "\n", 155 | " words, _ = list(zip(*count_pairs))\n", 156 | " word_to_id = dict(zip(words, range(len(words))))\n", 157 | "\n", 158 | " return word_to_id\n", 159 | "\n", 160 | "\n", 161 | "def _file_to_word_ids(filename, word_to_id):\n", 162 | " data = _read_words(filename)\n", 163 | " return [word_to_id[word] for word in data if word in word_to_id]\n", 164 | "\n", 165 | "\n", 166 | "def ptb_raw_data(data_path=None):\n", 167 | " \"\"\"Load PTB raw data from data directory \"data_path\".\n", 168 | " Reads PTB text files, converts strings to integer ids,\n", 169 | " and performs mini-batching of the inputs.\n", 170 | " The PTB dataset comes from Tomas Mikolov's webpage:\n", 171 | " http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz\n", 172 | " Args:\n", 173 | " data_path: string path to the directory where simple-examples.tgz has\n", 174 | " been extracted.\n", 175 | " Returns:\n", 176 | " tuple (train_data, valid_data, test_data, vocabulary)\n", 177 | " where each of the data objects can be passed to PTBIterator.\n", 178 | " \"\"\"\n", 179 | "\n", 180 | " train_path = os.path.join(data_path, \"ptb.train.txt\")\n", 181 | " valid_path = os.path.join(data_path, \"ptb.valid.txt\")\n", 182 | " test_path = os.path.join(data_path, \"ptb.test.txt\")\n", 183 | "\n", 184 | " word_to_id = _build_vocab(train_path)\n", 185 | " train_data = _file_to_word_ids(train_path, word_to_id)\n", 186 | " valid_data = _file_to_word_ids(valid_path, word_to_id)\n", 187 | " test_data = _file_to_word_ids(test_path, word_to_id)\n", 188 | " vocabulary = len(word_to_id)\n", 189 | " return train_data, valid_data, test_data, vocabulary\n", 190 | "\n", 191 | "\n", 192 | "def ptb_producer(raw_data, batch_size, num_steps, name=None):\n", 193 | " \"\"\"Iterate on the raw PTB data.\n", 194 | " This chunks up raw_data into batches of examples and returns Tensors that\n", 195 | " are drawn from these batches.\n", 196 | " Args:\n", 197 | " raw_data: one of the raw data outputs from ptb_raw_data.\n", 198 | " batch_size: int, the batch size.\n", 199 | " num_steps: int, the number of unrolls.\n", 200 | " name: the name of this operation (optional).\n", 201 | " Returns:\n", 202 | " A pair of Tensors, each shaped [batch_size, num_steps]. The second element\n", 203 | " of the tuple is the same data time-shifted to the right by one.\n", 204 | " Raises:\n", 205 | " tf.errors.InvalidArgumentError: if batch_size or num_steps are too high.\n", 206 | " \"\"\"\n", 207 | " with tf.name_scope(name, \"PTBProducer\", [raw_data, batch_size, num_steps]):\n", 208 | " raw_data = tf.convert_to_tensor(raw_data, name=\"raw_data\", dtype=tf.int32)\n", 209 | "\n", 210 | " data_len = tf.size(raw_data)\n", 211 | " batch_len = data_len // batch_size\n", 212 | " data = tf.reshape(raw_data[0 : batch_size * batch_len],\n", 213 | " [batch_size, batch_len])\n", 214 | "\n", 215 | " epoch_size = (batch_len - 1) // num_steps\n", 216 | " assertion = tf.assert_positive(\n", 217 | " epoch_size,\n", 218 | " message=\"epoch_size == 0, decrease batch_size or num_steps\")\n", 219 | " with tf.control_dependencies([assertion]):\n", 220 | " epoch_size = tf.identity(epoch_size, name=\"epoch_size\")\n", 221 | "\n", 222 | " i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()\n", 223 | " x = tf.strided_slice(data, [0, i * num_steps],\n", 224 | " [batch_size, (i + 1) * num_steps])\n", 225 | " x.set_shape([batch_size, num_steps])\n", 226 | " y = tf.strided_slice(data, [0, i * num_steps + 1],\n", 227 | " [batch_size, (i + 1) * num_steps + 1])\n", 228 | " y.set_shape([batch_size, num_steps])\n", 229 | " return x, y" 230 | ] 231 | }, 232 | { 233 | "cell_type": "code", 234 | "execution_count": 13, 235 | "metadata": { 236 | "collapsed": false 237 | }, 238 | "outputs": [ 239 | { 240 | "name": "stdout", 241 | "output_type": "stream", 242 | "text": [ 243 | "X0: [[9970 9971 9972 9974 9975]\n", 244 | " [ 332 7147 328 1452 8595]\n", 245 | " [1969 0 98 89 2254]\n", 246 | " [ 3 3 2 14 24]]\n", 247 | "Y0: [[9971 9972 9974 9975 9976]\n", 248 | " [7147 328 1452 8595 59]\n", 249 | " [ 0 98 89 2254 0]\n", 250 | " [ 3 2 14 24 198]]\n", 251 | "X1: [[9976 9980 9981 9982 9983]\n", 252 | " [ 59 1569 105 2231 1]\n", 253 | " [ 0 312 1641 4 1063]\n", 254 | " [ 198 150 2262 10 0]]\n", 255 | "Y1: [[9980 9981 9982 9983 9984]\n", 256 | " [1569 105 2231 1 895]\n", 257 | " [ 312 1641 4 1063 8]\n", 258 | " [ 150 2262 10 0 507]]\n", 259 | "X2: [[9984 9986 9987 9988 9989]\n", 260 | " [ 895 1 5574 4 618]\n", 261 | " [ 8 713 0 264 820]\n", 262 | " [ 507 74 2619 0 1]]\n", 263 | "Y2: [[9986 9987 9988 9989 9991]\n", 264 | " [ 1 5574 4 618 2]\n", 265 | " [ 713 0 264 820 2]\n", 266 | " [ 74 2619 0 1 8]]\n" 267 | ] 268 | } 269 | ], 270 | "source": [ 271 | "import tensorflow as tf\n", 272 | "# PTB数据集1w个不同的字符,929589个单词\n", 273 | "# 训练时需要按照固定长度截断,并组合成一个Batch\n", 274 | "#import reader\n", 275 | "\n", 276 | "# 读取原始数据\n", 277 | "data_path = './data/PTB-dataset-Tomas-Mikolov/data'\n", 278 | "train_data, valid_data, test_data, _ = ptb_raw_data(data_path)\n", 279 | "\n", 280 | "# 将数据组织成批量为4,截断长度为5的数组\n", 281 | "result = ptb_producer(train_data, 4, 5)\n", 282 | "\n", 283 | "# 读取第一个批量中的数据,包含每个时刻的输入与对应的正确输出\n", 284 | "# 通过队列依次读取batch。\n", 285 | "with tf.Session() as sess:\n", 286 | " coord = tf.train.Coordinator()\n", 287 | " threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n", 288 | " for i in range(3):\n", 289 | " x, y = sess.run(result)\n", 290 | " print (\"X%d: \"%i, x)\n", 291 | " print (\"Y%d: \"%i, y)\n", 292 | " coord.request_stop()\n", 293 | " coord.join(threads)" 294 | ] 295 | }, 296 | { 297 | "cell_type": "markdown", 298 | "metadata": { 299 | "collapsed": true 300 | }, 301 | "source": [ 302 | "### 循环神经网络实现语言模型" 303 | ] 304 | }, 305 | { 306 | "cell_type": "code", 307 | "execution_count": 2, 308 | "metadata": { 309 | "collapsed": true 310 | }, 311 | "outputs": [], 312 | "source": [ 313 | "import tensorflow as tf\n", 314 | "import numpy as np\n", 315 | "# 运行上面的reader.py\n", 316 | "\n", 317 | "data_path = './data/PTB-dataset-Tomas-Mikolov/data'\n", 318 | "# 隐藏层单元数与LSTM层级数\n", 319 | "hidden_size = 200\n", 320 | "num_layers = 2\n", 321 | "#词典规模\n", 322 | "vocab_size = 10000\n", 323 | "\n", 324 | "learning_rate = 1.0\n", 325 | "train_batch_size = 5\n", 326 | "# 训练数据截断长度\n", 327 | "train_num_step = 35\n", 328 | "\n", 329 | "# 在测试时不需要使用截断,测试数据为一个超长序列\n", 330 | "eval_batch_size = 1\n", 331 | "eval_num_step = 1\n", 332 | "num_epoch = 3\n", 333 | "#结点不被Dropout的概率\n", 334 | "keep_prob = 0.5\n", 335 | "\n", 336 | "# 用于控制梯度爆炸的参数\n", 337 | "max_grad_norm = 5" 338 | ] 339 | }, 340 | { 341 | "cell_type": "code", 342 | "execution_count": 3, 343 | "metadata": { 344 | "collapsed": false 345 | }, 346 | "outputs": [], 347 | "source": [ 348 | "# 通过ptbmodel 的类描述模型\n", 349 | "class PTBModel(object):\n", 350 | " def __init__(self, is_training, batch_size, num_steps):\n", 351 | " # 记录使用的Batch大小和截断长度\n", 352 | " self.batch_size = batch_size\n", 353 | " self.num_steps = num_steps\n", 354 | "\n", 355 | " # 定义输入层,维度为批量大小×截断长度\n", 356 | " self.input_data = tf.placeholder(tf.int32, [batch_size, num_steps])\n", 357 | " # 定义预期输出\n", 358 | " self.targets = tf.placeholder(tf.int32, [batch_size, num_steps])\n", 359 | "\n", 360 | " # 定义使用LSTM结构为循环体,带Dropout的深度RNN\n", 361 | " lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(hidden_size)\n", 362 | " if is_training:\n", 363 | " lstm_cell = tf.nn.rnn_cell.DropoutWrapper(lstm_cell, output_keep_prob=keep_prob)\n", 364 | " cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * num_layers)\n", 365 | "\n", 366 | " # 初始化状态为0\n", 367 | " self.initial_state = cell.zero_state(batch_size, tf.float32)\n", 368 | "\n", 369 | " # 将单词ID转换为单词向量,embedding的维度为vocab_size*hidden_size\n", 370 | " embedding = tf.get_variable('embedding', [vocab_size, hidden_size])\n", 371 | " # 将一个批量内的单词ID转化为词向量,转化后的输入维度为批量大小×截断长度×隐藏单元数,有问题\n", 372 | " inputs = tf.nn.embedding_lookup(embedding, self.input_data)\n", 373 | "\n", 374 | " # 只在训练时使用Dropout\n", 375 | " if is_training: inputs = tf.nn.dropout(inputs, keep_prob)\n", 376 | "\n", 377 | " # 定义输出列表,这里先将不同时刻LSTM的输出收集起来,再通过全连接层得到最终输出\n", 378 | " outputs = []\n", 379 | " # state 储存不同批量中LSTM的状态,初始为0\n", 380 | " state = self.initial_state\n", 381 | " with tf.variable_scope('RNN'):\n", 382 | " for time_step in range(num_steps):\n", 383 | " if time_step > 0: tf.get_variable_scope().reuse_variables()\n", 384 | " # 从输入数据获取当前时刻的输入,并传入LSTM结构\n", 385 | " cell_output, state = cell(inputs[:, time_step, :], state)\n", 386 | " # 将当前输出加入输出队列\n", 387 | " outputs.append(cell_output)\n", 388 | "\n", 389 | " # 将输出队列展开成[batch,hidden*num_step]的形状,再reshape为[batch*num_step, hidden]\n", 390 | " output = tf.reshape(tf.concat(outputs, 1), [-1, hidden_size])\n", 391 | "\n", 392 | " # 将LSTM的输出传入全连接层以生成最后的预测结果。最后结果在每时刻上都是长度为vocab_size的张量\n", 393 | " # 且经过softmax层后表示下一个位置不同词的概率\n", 394 | " weight = tf.get_variable('weight', [hidden_size, vocab_size])\n", 395 | " bias = tf.get_variable('bias', [vocab_size])\n", 396 | " logits = tf.matmul(output, weight) + bias\n", 397 | "\n", 398 | " # 定义交叉熵损失函数,一个序列的交叉熵之和\n", 399 | " loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(\n", 400 | " [logits], # 预测的结果\n", 401 | " [tf.reshape(self.targets, [-1])], # 期望正确的结果,这里将[batch_size, num_steps]压缩为一维张量\n", 402 | " [tf.ones([batch_size * num_steps], dtype=tf.float32)]) # 损失的权重,所有为1表明不同批量和时刻的重要程度一样\n", 403 | "\n", 404 | " # 计算每个批量的平均损失\n", 405 | " self.cost = tf.reduce_sum(loss) / batch_size\n", 406 | " self.final_state = state\n", 407 | "\n", 408 | " # 只在训练模型时定义反向传播操作\n", 409 | " if not is_training: return\n", 410 | " trainable_variable = tf.trainable_variables()\n", 411 | "\n", 412 | " # 控制梯度爆炸问题\n", 413 | " grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, trainable_variable), max_grad_norm)\n", 414 | " optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n", 415 | " # 定义训练步骤\n", 416 | " self.train_op = optimizer.apply_gradients(zip(grads, trainable_variable))" 417 | ] 418 | }, 419 | { 420 | "cell_type": "code", 421 | "execution_count": 4, 422 | "metadata": { 423 | "collapsed": true 424 | }, 425 | "outputs": [], 426 | "source": [ 427 | "def run_epoch(session, model, data, train_op, output_log, epoch_size):\n", 428 | " total_costs = 0.0\n", 429 | " iters = 0\n", 430 | " state = session.run(model.initial_state)\n", 431 | "\n", 432 | " # # 使用当前数据训练或测试模型\n", 433 | " for step in range(epoch_size):\n", 434 | " x, y = session.run(data)\n", 435 | " # 在当前批量上运行train_op并计算损失值,交叉熵计算的是下一个单词为给定单词的概率\n", 436 | " cost, state, _ = session.run([model.cost, model.final_state, train_op],\n", 437 | " {model.input_data: x, model.targets: y, model.initial_state: state})\n", 438 | " # 将不同时刻和批量的概率就可得到困惑度的对数形式,将这个和做指数运算就可得到困惑度\n", 439 | " total_costs += cost\n", 440 | " iters += model.num_steps\n", 441 | "\n", 442 | " # 只在训练时输出日志\n", 443 | " if output_log and step % 100 == 0:\n", 444 | " print(\"After %d steps, perplexity is %.3f\" % (step, np.exp(total_costs / iters)))\n", 445 | " return np.exp(total_costs / iters)\n", 446 | " " 447 | ] 448 | }, 449 | { 450 | "cell_type": "code", 451 | "execution_count": 5, 452 | "metadata": { 453 | "collapsed": false 454 | }, 455 | "outputs": [ 456 | { 457 | "name": "stdout", 458 | "output_type": "stream", 459 | "text": [ 460 | "In iteration: 1\n", 461 | "After 0 steps, perplexity is 9912.484\n", 462 | "Epoch: 1 Validation Perplexity: 1529.922\n", 463 | "In iteration: 2\n", 464 | "After 0 steps, perplexity is 189996.796\n", 465 | "Epoch: 2 Validation Perplexity: 9924.143\n", 466 | "In iteration: 3\n", 467 | "After 0 steps, perplexity is 5511.998\n", 468 | "Epoch: 3 Validation Perplexity: 2795.859\n", 469 | "Test Perplexity: 3203.213\n" 470 | ] 471 | } 472 | ], 473 | "source": [ 474 | "def main():\n", 475 | " train_data, valid_data, test_data, _ = ptb_raw_data(data_path)\n", 476 | "\n", 477 | " # 计算一个epoch需要训练的次数\n", 478 | " train_data_len = len(train_data)\n", 479 | " train_batch_len = train_data_len // train_batch_size\n", 480 | " train_epoch_size = (train_batch_len - 1) // train_num_step\n", 481 | "\n", 482 | " valid_data_len = len(valid_data)\n", 483 | " valid_batch_len = valid_data_len // eval_batch_size\n", 484 | " valid_epoch_size = (valid_batch_len - 1) // eval_num_step\n", 485 | "\n", 486 | " test_data_len = len(test_data)\n", 487 | " test_batch_len = test_data_len // eval_batch_size\n", 488 | " test_epoch_size = (test_batch_len - 1) // eval_num_step\n", 489 | "\n", 490 | " initializer = tf.random_uniform_initializer(-0.05, 0.05)\n", 491 | " with tf.variable_scope(\"language_model\", reuse=None, initializer=initializer):\n", 492 | " train_model = PTBModel(True, train_batch_size, train_num_step)\n", 493 | "\n", 494 | " with tf.variable_scope(\"language_model\", reuse=True, initializer=initializer):\n", 495 | " eval_model = PTBModel(False, eval_batch_size, eval_num_step)\n", 496 | "\n", 497 | " # 训练模型。\n", 498 | " with tf.Session() as session:\n", 499 | " tf.global_variables_initializer().run()\n", 500 | "\n", 501 | " train_queue = ptb_producer(train_data, train_model.batch_size, train_model.num_steps)\n", 502 | " eval_queue = ptb_producer(valid_data, eval_model.batch_size, eval_model.num_steps)\n", 503 | " test_queue = ptb_producer(test_data, eval_model.batch_size, eval_model.num_steps)\n", 504 | "\n", 505 | " coord = tf.train.Coordinator()\n", 506 | " threads = tf.train.start_queue_runners(sess=session, coord=coord)\n", 507 | "\n", 508 | " for i in range(num_epoch):\n", 509 | " print(\"In iteration: %d\" % (i + 1))\n", 510 | " run_epoch(session, train_model, train_queue, train_model.train_op, True, train_epoch_size)\n", 511 | "\n", 512 | " valid_perplexity = run_epoch(session, eval_model, eval_queue, tf.no_op(), False, valid_epoch_size)\n", 513 | " print(\"Epoch: %d Validation Perplexity: %.3f\" % (i + 1, valid_perplexity))\n", 514 | "\n", 515 | " test_perplexity = run_epoch(session, eval_model, test_queue, tf.no_op(), False, test_epoch_size)\n", 516 | " print(\"Test Perplexity: %.3f\" % test_perplexity)\n", 517 | "\n", 518 | " coord.request_stop()\n", 519 | " coord.join(threads)\n", 520 | "\n", 521 | "if __name__ == \"__main__\":\n", 522 | " main()" 523 | ] 524 | }, 525 | { 526 | "cell_type": "code", 527 | "execution_count": null, 528 | "metadata": { 529 | "collapsed": true 530 | }, 531 | "outputs": [], 532 | "source": [] 533 | } 534 | ], 535 | "metadata": { 536 | "kernelspec": { 537 | "display_name": "Python 3", 538 | "language": "python", 539 | "name": "python3" 540 | }, 541 | "language_info": { 542 | "codemirror_mode": { 543 | "name": "ipython", 544 | "version": 3 545 | }, 546 | "file_extension": ".py", 547 | "mimetype": "text/x-python", 548 | "name": "python", 549 | "nbconvert_exporter": "python", 550 | "pygments_lexer": "ipython3", 551 | "version": "3.5.3" 552 | } 553 | }, 554 | "nbformat": 4, 555 | "nbformat_minor": 2 556 | } 557 | -------------------------------------------------------------------------------- /7.tf_TensorBoard.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "import tensorflow as tf\n", 12 | "\n", 13 | "# 定义一个简单的计算图\n", 14 | "input1 = tf.constant([1.0, 2.0, 3.0], name='input_one')\n", 15 | "input2 = tf.Variable(tf.random_uniform([3]), name='input_two')\n", 16 | "output = tf.add_n([input1, input2], name='add')\n", 17 | "\n", 18 | "# 生成一个写日志的writer,并将当前计算图写入日志\n", 19 | "writer = tf.summary.FileWriter('log/test', tf.get_default_graph())\n", 20 | "writer.close()" 21 | ] 22 | }, 23 | { 24 | "cell_type": "markdown", 25 | "metadata": { 26 | "collapsed": true 27 | }, 28 | "source": [ 29 | "### TensorFlow 计算图可视化" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": 1, 35 | "metadata": { 36 | "collapsed": true 37 | }, 38 | "outputs": [], 39 | "source": [ 40 | "# tf计算图中同一个命名空间下的所有结点会缩略成一个结点,如tr.variable_scope()\n", 41 | "import tensorflow as tf\n", 42 | "\n", 43 | "#将输入定义放入各自的命名空间,TensorBoard才能根据命名空间整理计算图上的结点\n", 44 | "with tf.variable_scope('input_1'):\n", 45 | " input1 = tf.constant([1.0, 2.0, 3.0], name='input_one')\n", 46 | "#下面的运算与结点都会收缩到input_2结点中\n", 47 | "with tf.variable_scope('input_2'):\n", 48 | " input2_1 = tf.Variable(tf.random_uniform([3]), name='input_two')\n", 49 | " input2 = tf.multiply(input2_1, 3.0, name='multiply')\n", 50 | "output = tf.add_n([input1, input2], name='add')\n", 51 | "writer = tf.summary.FileWriter('log/test', tf.get_default_graph())\n", 52 | "writer.close()" 53 | ] 54 | }, 55 | { 56 | "cell_type": "markdown", 57 | "metadata": { 58 | "collapsed": true 59 | }, 60 | "source": [ 61 | "### 监控指标可视化" 62 | ] 63 | }, 64 | { 65 | "cell_type": "code", 66 | "execution_count": null, 67 | "metadata": { 68 | "collapsed": true 69 | }, 70 | "outputs": [], 71 | "source": [ 72 | "# 将TensorFlow程序运行时的信息输出到TensorBoard日志文件中\n", 73 | "import tensorflow as tf\n", 74 | "from tensorflow.examples.tutorials.mnist import input_data\n", 75 | "\n", 76 | "summary_dir = 'log/test'\n", 77 | "batch_size = 100\n", 78 | "train_steps = 30000" 79 | ] 80 | }, 81 | { 82 | "cell_type": "code", 83 | "execution_count": null, 84 | "metadata": { 85 | "collapsed": true 86 | }, 87 | "outputs": [], 88 | "source": [ 89 | "# 生成变量监控信息并定义生成监控信息日志的操作,var给出了需要记录的张量,name给出了在可视化结果中显示的图表名称\n", 90 | "\n", 91 | "def variable_summaries(var, name):\n", 92 | " # 将生成监控信息的操作放在同一命名空间下\n", 93 | " with tf.name_scope('summaries'):\n", 94 | " # 记录张量的取值分布。给定图表名和张量,tf.histogram_summary函数会生成一个summary protocool buffer\n", 95 | " # 将Summary写入TensorBoard日志文件后,HISTOGRAPH栏下可看到对应名称的图表。\n", 96 | " tf.summary.histogram(name, var)\n", 97 | "\n", 98 | " # 定义生成平均值信息日志的操作。记录变量均值信息的日志标签名为“mean/”+name,其中mean为命名空间。\n", 99 | " # 相同命名空间中的指标会被整合到同一栏,name给出了当前监控指标属于哪一个变量\n", 100 | " mean = tf.reduce_mean(var)\n", 101 | " tf.summary.scalar('mean/'+name, mean)\n", 102 | " # 计算变量的标准差,并定义生成其日志的操作\n", 103 | " stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n", 104 | " tf.summary.scalar('stddev/'+name, stddev)" 105 | ] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "execution_count": null, 110 | "metadata": { 111 | "collapsed": true 112 | }, 113 | "outputs": [], 114 | "source": [ 115 | "# 定义一层全连接网络\n", 116 | "def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):\n", 117 | " # 将同一层神经网络放在统一的命名空间下\n", 118 | " with tf.name_scope(layer_name):\n", 119 | " # 声明神经网络的权重,并调用生成权重监控信息日志的函数\n", 120 | " with tf.name_scope('weights'):\n", 121 | " weights = tf.Variable(tf.truncated_normal([input_dim, output_dim], stddev=0.1))\n", 122 | " variable_summaries(weights, layer_name+'/weights')\n", 123 | "\n", 124 | " # 声明神经网络的偏置项,并调用生成偏置项监控信息日志的函数\n", 125 | " with tf.name_scope('biases'):\n", 126 | " biases = tf.Variable(tf.constant(0.0, shape=[output_dim]))\n", 127 | " variable_summaries(biases, layer_name+'/biases')\n", 128 | " with tf.name_scope('Wx_plus_b'):\n", 129 | " preactivate = tf.matmul(input_tensor, weights)+biases\n", 130 | " # 记录神经网络输出结点在馈送到激活函数前的分布\n", 131 | " tf.summary.histogram(layer_name+'/pre_activations', preactivate)\n", 132 | " activations = act(preactivate, name='activations')\n", 133 | " # 记录输出结点经激活函数后的分布\n", 134 | " tf.summary.histogram(layer_name+'activations', activations)\n", 135 | " return activations" 136 | ] 137 | }, 138 | { 139 | "cell_type": "code", 140 | "execution_count": null, 141 | "metadata": { 142 | "collapsed": true 143 | }, 144 | "outputs": [], 145 | "source": [ 146 | "def main():\n", 147 | " mnist = input_data.read_data_sets('data/MNIST', one_hot=True)\n", 148 | " # 定义输入\n", 149 | " with tf.name_scope('input'):\n", 150 | " x = tf.placeholder(tf.float32, [None, 784], name='x_input')\n", 151 | " y = tf.placeholder(tf.float32, [None, 10], name='y_output')\n", 152 | "\n", 153 | " # 将输入向量还原为像素矩阵,并将图片信息写入日志\n", 154 | " with tf.name_scope('input_reshape'):\n", 155 | " image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])\n", 156 | " tf.summary.image('input', image_shaped_input, 10)\n", 157 | "\n", 158 | " hidden1 = nn_layer(x, 784, 500, 'layer1')\n", 159 | " y_hat = nn_layer(hidden1, 500, 10, 'layer2', act=tf.identity)\n", 160 | "\n", 161 | " # 计算交叉熵并生成交叉熵的监控日志\n", 162 | " with tf.name_scope('cross_entropy'):\n", 163 | " cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_hat, labels=y))\n", 164 | " tf.summary.scalar('cross_entropy', cross_entropy)\n", 165 | "\n", 166 | " with tf.name_scope('train'):\n", 167 | " train_step = tf.train.AdamOptimizer(0.001).minimize(cross_entropy)\n", 168 | "\n", 169 | " # 计算模型在当前给定数据集上的正确率,并生成监控日志\n", 170 | " with tf.name_scope('accuracy'):\n", 171 | " correct_prediction = tf.equal(tf.argmax(y_hat, 1), tf.argmax(y, 1))\n", 172 | " accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n", 173 | " tf.summary.scalar('accuracy', accuracy)\n", 174 | "\n", 175 | " # merge_all函数会整理所有的日志生成操作,Session中运行一次就相当于将所有日志生成操作运行一次写入文件\n", 176 | " merged = tf.summary.merge_all()\n", 177 | "\n", 178 | " with tf.Session() as sess:\n", 179 | " summary_writer = tf.summary.FileWriter(summary_dir, sess.graph)\n", 180 | " tf.global_variables_initializer().run()\n", 181 | "\n", 182 | " for i in range(train_steps):\n", 183 | " xs, ys = mnist.train.next_batch(batch_size)\n", 184 | " # 运行训练步骤以及所有的日志生成操作,得到这次运行的日志。\n", 185 | " summary, _ = sess.run([merged, train_step], feed_dict={x: xs, y: ys})\n", 186 | " # 将得到的所有日志写入日志文件,这样TensorBoard程序就可以拿到这次运行所对应的运行信息。\n", 187 | " summary_writer.add_summary(summary, i)\n", 188 | " if i % 1000 == 0:\n", 189 | " validation_acc = sess.run(accuracy, feed_dict={x: mnist.validation.images, y: mnist.validation.labels})\n", 190 | " print((\"After %d training steps, validation accuracy is %g\" %(i, validation_acc)))\n", 191 | " summary_writer.close()\n", 192 | "\n", 193 | "if __name__ == '__main__':\n", 194 | " main()" 195 | ] 196 | }, 197 | { 198 | "cell_type": "code", 199 | "execution_count": null, 200 | "metadata": { 201 | "collapsed": true 202 | }, 203 | "outputs": [], 204 | "source": [] 205 | }, 206 | { 207 | "cell_type": "code", 208 | "execution_count": null, 209 | "metadata": { 210 | "collapsed": true 211 | }, 212 | "outputs": [], 213 | "source": [] 214 | }, 215 | { 216 | "cell_type": "code", 217 | "execution_count": null, 218 | "metadata": { 219 | "collapsed": true 220 | }, 221 | "outputs": [], 222 | "source": [] 223 | }, 224 | { 225 | "cell_type": "code", 226 | "execution_count": null, 227 | "metadata": { 228 | "collapsed": true 229 | }, 230 | "outputs": [], 231 | "source": [] 232 | }, 233 | { 234 | "cell_type": "code", 235 | "execution_count": null, 236 | "metadata": { 237 | "collapsed": true 238 | }, 239 | "outputs": [], 240 | "source": [] 241 | }, 242 | { 243 | "cell_type": "code", 244 | "execution_count": null, 245 | "metadata": { 246 | "collapsed": true 247 | }, 248 | "outputs": [], 249 | "source": [] 250 | }, 251 | { 252 | "cell_type": "code", 253 | "execution_count": null, 254 | "metadata": { 255 | "collapsed": true 256 | }, 257 | "outputs": [], 258 | "source": [] 259 | }, 260 | { 261 | "cell_type": "code", 262 | "execution_count": null, 263 | "metadata": { 264 | "collapsed": true 265 | }, 266 | "outputs": [], 267 | "source": [] 268 | } 269 | ], 270 | "metadata": { 271 | "kernelspec": { 272 | "display_name": "Python 3", 273 | "language": "python", 274 | "name": "python3" 275 | }, 276 | "language_info": { 277 | "codemirror_mode": { 278 | "name": "ipython", 279 | "version": 3 280 | }, 281 | "file_extension": ".py", 282 | "mimetype": "text/x-python", 283 | "name": "python", 284 | "nbconvert_exporter": "python", 285 | "pygments_lexer": "ipython3", 286 | "version": "3.5.3" 287 | } 288 | }, 289 | "nbformat": 4, 290 | "nbformat_minor": 2 291 | } 292 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # TensorFlow_Notes 2 | 3 | TensorFlow 学习笔记,从最开始的 TensorFlow 基础操作、MNIST手写字体识别到模型的持久化和TensorBoard可视化等内容提供了TF的学习笔记。 4 | 5 | - [TensorFlow基础操作与ML基本概念](https://github.com/Horatio-JSY/TensorFlow_Notes/blob/master/1.Basic_TensorFlow.ipynb) 6 | - [完整的全连接网络实现MNIST手写字体识别](https://github.com/Horatio-JSY/TensorFlow_Notes/blob/master/2.FCN_MNIST.ipynb) 7 | - [TF的变量管理与保存计算图等模型持久化操作](https://github.com/Horatio-JSY/TensorFlow_Notes/blob/master/3.Context_Saving.ipynb) 8 | - [卷积操作与卷积层,LeNet-5和Inception模块](https://github.com/Horatio-JSY/TensorFlow_Notes/blob/master/4.tf_CNN.ipynb) 9 | - [TFRecord数据与基本的图像处理方法(队列与多线程未完善)](https://github.com/Horatio-JSY/TensorFlow_Notes/blob/master/5.tf_TFRecord.ipynb) 10 | - [简单的循环神经网络与PTB数据集的语言建模](https://github.com/Horatio-JSY/TensorFlow_Notes/blob/master/6.tf_RNN.ipynb) 11 | - [TensorBoard基础,计算图与监控指标的可视化](https://github.com/Horatio-JSY/TensorFlow_Notes/blob/master/7.tf_TensorBoard.ipynb) 12 | -------------------------------------------------------------------------------- /tf_GAN.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": false 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "import tensorflow as tf\n", 12 | "from tensorflow.examples.tutorials.mnist import input_data\n", 13 | "import numpy as np\n", 14 | "import matplotlib.pyplot as plt\n", 15 | "import matplotlib.gridspec as gridspec\n", 16 | "import os" 17 | ] 18 | }, 19 | { 20 | "cell_type": "code", 21 | "execution_count": 2, 22 | "metadata": { 23 | "collapsed": true 24 | }, 25 | "outputs": [], 26 | "source": [ 27 | "#该函数将给出权重初始化的方法\n", 28 | "def variable_init(size):\n", 29 | " in_dim = size[0]\n", 30 | " w_stddev = 1. / tf.sqrt(in_dim / 2.)\n", 31 | " return tf.random_normal(shape=size, stddev=w_stddev)" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": 3, 37 | "metadata": { 38 | "collapsed": false 39 | }, 40 | "outputs": [], 41 | "source": [ 42 | "#定义输入矩阵的占位符,输入层单元为784,None代表批量大小的占位,X代表输入的真实图片\n", 43 | "X = tf.placeholder(tf.float32, shape=[None, 784])\n", 44 | "\n", 45 | "#定义判别器的权重和偏置项,由此可知判别网络为三层全连接网络\n", 46 | "D_W1 = tf.Variable(variable_init([784, 128]))\n", 47 | "D_b1 = tf.Variable(tf.zeros(shape=[128]))\n", 48 | "\n", 49 | "D_W2 = tf.Variable(variable_init([128, 1]))\n", 50 | "D_b2 = tf.Variable(tf.zeros(shape=[1]))\n", 51 | "\n", 52 | "theta_D = [D_W1, D_W2, D_b1, D_b2]\n", 53 | "\n", 54 | "#定义生成器的输入噪声为100维度的向量组,None根据批量大小确定\n", 55 | "Z = tf.placeholder(tf.float32, shape=[None, 100])\n", 56 | "\n", 57 | "#定义生成器的权重与偏置项。输入层为100个神经元且接受随机噪声,\n", 58 | "#输出层为784个神经元输出手写字体图片。生成网络为三层全连接网络\n", 59 | "G_W1 = tf.Variable(variable_init([100, 128]))\n", 60 | "G_b1 = tf.Variable(tf.zeros(shape=[128]))\n", 61 | "\n", 62 | "G_W2 = tf.Variable(variable_init([128, 784]))\n", 63 | "G_b2 = tf.Variable(tf.zeros(shape=[784]))\n", 64 | "\n", 65 | "theta_G = [G_W1, G_W2, G_b1, G_b2]" 66 | ] 67 | }, 68 | { 69 | "cell_type": "code", 70 | "execution_count": 4, 71 | "metadata": { 72 | "collapsed": true 73 | }, 74 | "outputs": [], 75 | "source": [ 76 | "#定义一个可以生成m*n阶随机矩阵的函数,该矩阵的元素服从均匀分布\n", 77 | "def sample_Z(m, n):\n", 78 | " return np.random.uniform(-1., 1., size=[m, n])" 79 | ] 80 | }, 81 | { 82 | "cell_type": "code", 83 | "execution_count": 5, 84 | "metadata": { 85 | "collapsed": true 86 | }, 87 | "outputs": [], 88 | "source": [ 89 | "#定义生成器\n", 90 | "def generator(z):\n", 91 | " \n", 92 | " #第一层先计算 y=z*G_W1+G-b1,然后投入激活函数计算G_h1=ReLU(y),G_h1 为第二次层神经网络的输出激活值\n", 93 | " G_h1 = tf.nn.relu(tf.matmul(z, G_W1) + G_b1)\n", 94 | " \n", 95 | " #以下两个语句计算第二层传播到第三层的激活结果,第三层的激活结果是含有784个元素的向量,该向量转化28×28就可以表示图像\n", 96 | " G_log_prob = tf.matmul(G_h1, G_W2) + G_b2\n", 97 | " G_prob = tf.nn.sigmoid(G_log_prob)\n", 98 | " return G_prob" 99 | ] 100 | }, 101 | { 102 | "cell_type": "code", 103 | "execution_count": 6, 104 | "metadata": { 105 | "collapsed": true 106 | }, 107 | "outputs": [], 108 | "source": [ 109 | "#定义判别器\n", 110 | "def discriminator(x):\n", 111 | " \n", 112 | " #计算D_h1=ReLU(x*D_W1+D_b1),该层的输入为含784个元素的向量\n", 113 | " D_h1 = tf.nn.relu(tf.matmul(x, D_W1) + D_b1)\n", 114 | " \n", 115 | " #计算第三层的输出结果。因为使用的是Sigmoid函数,则该输出结果是一个取值为[0,1]间的标量(见上述权重定义)\n", 116 | " #即判别输入的图像到底是真(=1)还是假(=0)\n", 117 | " D_logit = tf.matmul(D_h1, D_W2) + D_b2\n", 118 | " D_prob = tf.nn.sigmoid(D_logit)\n", 119 | " \n", 120 | " #返回判别为真的概率和第三层的输入值,输出D_logit是为了将其输入tf.nn.sigmoid_cross_entropy_with_logits()以构建损失函数\n", 121 | " return D_prob, D_logit" 122 | ] 123 | }, 124 | { 125 | "cell_type": "code", 126 | "execution_count": 7, 127 | "metadata": { 128 | "collapsed": true 129 | }, 130 | "outputs": [], 131 | "source": [ 132 | "#该函数用于输出生成图片\n", 133 | "def plot(samples):\n", 134 | " fig = plt.figure(figsize=(4, 4))\n", 135 | " gs = gridspec.GridSpec(4, 4)\n", 136 | " gs.update(wspace=0.05, hspace=0.05)\n", 137 | "\n", 138 | " for i, sample in enumerate(samples):\n", 139 | " ax = plt.subplot(gs[i])\n", 140 | " plt.axis('off')\n", 141 | " ax.set_xticklabels([])\n", 142 | " ax.set_yticklabels([])\n", 143 | " ax.set_aspect('equal')\n", 144 | " plt.imshow(sample.reshape(28, 28), cmap='Greys_r')\n", 145 | "\n", 146 | " return fig\n" 147 | ] 148 | }, 149 | { 150 | "cell_type": "markdown", 151 | "metadata": {}, 152 | "source": [ 153 | "#### 交叉熵损失函数\n", 154 | "sigmoid_cross_entropy_with_logits函数的输入是logits和targets,logits就是神经网络模型中的 W * X矩阵,且不需要经过Sigmoid激活函数。而targets的shape和logits相同,即正确的标注值。若令x = logits、 z = labels,那么该函数的表达式为z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))" 155 | ] 156 | }, 157 | { 158 | "cell_type": "code", 159 | "execution_count": 8, 160 | "metadata": { 161 | "collapsed": true 162 | }, 163 | "outputs": [], 164 | "source": [ 165 | "#输入随机噪声z而输出生成样本\n", 166 | "G_sample = generator(Z)\n", 167 | "\n", 168 | "#分别输入真实图片和生成的图片,并投入判别器以判断真伪\n", 169 | "D_real, D_logit_real = discriminator(X)\n", 170 | "D_fake, D_logit_fake = discriminator(G_sample)\n", 171 | "\n", 172 | "#以下为原论文的判别器损失和生成器损失,但本实现并没有使用该损失函数\n", 173 | "# D_loss = -tf.reduce_mean(tf.log(D_real) + tf.log(1. - D_fake))\n", 174 | "# G_loss = -tf.reduce_mean(tf.log(D_fake))\n", 175 | "\n", 176 | "# 我们使用交叉熵作为判别器和生成器的损失函数,因为sigmoid_cross_entropy_with_logits内部会对预测输入执行Sigmoid函数,\n", 177 | "#所以我们取判别器最后一层未投入激活函数的值,即D_h1*D_W2+D_b2。\n", 178 | "#tf.ones_like(D_logit_real)创建维度和D_logit_real相等的全是1的标注,真实图片。\n", 179 | "D_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_real, labels=tf.ones_like(D_logit_real)))\n", 180 | "D_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.zeros_like(D_logit_fake)))\n", 181 | "\n", 182 | "#损失函数为两部分,即E[log(D(x))]+E[log(1-D(G(z)))],将真的判别为假和将假的判别为真\n", 183 | "D_loss = D_loss_real + D_loss_fake\n", 184 | "\n", 185 | "#同样使用交叉熵构建生成器损失函数\n", 186 | "G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.ones_like(D_logit_fake)))\n", 187 | "\n", 188 | "#定义判别器和生成器的优化方法为Adam算法,关键字var_list表明最小化损失函数所更新的权重矩阵\n", 189 | "D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list=theta_D)\n", 190 | "G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=theta_G)" 191 | ] 192 | }, 193 | { 194 | "cell_type": "code", 195 | "execution_count": 9, 196 | "metadata": { 197 | "collapsed": false 198 | }, 199 | "outputs": [ 200 | { 201 | "name": "stdout", 202 | "output_type": "stream", 203 | "text": [ 204 | "Extracting ./data/MNIST/train-images-idx3-ubyte.gz\n", 205 | "Extracting ./data/MNIST/train-labels-idx1-ubyte.gz\n", 206 | "Extracting ./data/MNIST/t10k-images-idx3-ubyte.gz\n", 207 | "Extracting ./data/MNIST/t10k-labels-idx1-ubyte.gz\n" 208 | ] 209 | } 210 | ], 211 | "source": [ 212 | "#选择训练的批量大小和随机生成噪声的维度\n", 213 | "mb_size = 128\n", 214 | "Z_dim = 100\n", 215 | "\n", 216 | "#读取数据集MNIST,并放在当前目录data文件夹下MNIST文件夹中,如果该地址没有数据,则下载数据至该文件夹\n", 217 | "mnist = input_data.read_data_sets(\"./data/MNIST/\", one_hot=True)" 218 | ] 219 | }, 220 | { 221 | "cell_type": "code", 222 | "execution_count": 10, 223 | "metadata": { 224 | "collapsed": false 225 | }, 226 | "outputs": [ 227 | { 228 | "name": "stdout", 229 | "output_type": "stream", 230 | "text": [ 231 | "Iter: 0\n", 232 | "D loss: 1.671\n", 233 | "G_loss: 1.718\n", 234 | "\n", 235 | "Iter: 2000\n", 236 | "D loss: 0.05008\n", 237 | "G_loss: 4.74\n", 238 | "\n", 239 | "Iter: 4000\n", 240 | "D loss: 0.3667\n", 241 | "G_loss: 4.85\n", 242 | "\n", 243 | "Iter: 6000\n", 244 | "D loss: 0.3974\n", 245 | "G_loss: 4.059\n", 246 | "\n", 247 | "Iter: 8000\n", 248 | "D loss: 0.7007\n", 249 | "G_loss: 2.628\n", 250 | "\n", 251 | "Iter: 10000\n", 252 | "D loss: 0.4421\n", 253 | "G_loss: 3.05\n", 254 | "\n", 255 | "Iter: 12000\n", 256 | "D loss: 0.7872\n", 257 | "G_loss: 2.562\n", 258 | "\n", 259 | "Iter: 14000\n", 260 | "D loss: 0.7155\n", 261 | "G_loss: 2.877\n", 262 | "\n", 263 | "Iter: 16000\n", 264 | "D loss: 0.9827\n", 265 | "G_loss: 2.042\n", 266 | "\n", 267 | "Iter: 18000\n", 268 | "D loss: 0.7171\n", 269 | "G_loss: 1.966\n", 270 | "\n" 271 | ] 272 | } 273 | ], 274 | "source": [ 275 | "#打开一个会话运行计算图\n", 276 | "sess = tf.Session()\n", 277 | "\n", 278 | "#初始化所有定义的变量\n", 279 | "sess.run(tf.global_variables_initializer())\n", 280 | "\n", 281 | "#如果当前目录下不存在out文件夹,则创建该文件夹\n", 282 | "if not os.path.exists('out/'):\n", 283 | " os.makedirs('out/')\n", 284 | "\n", 285 | "#初始化,并开始迭代训练,100W次\n", 286 | "i = 0\n", 287 | "for it in range(20000):\n", 288 | " \n", 289 | " #每2000次输出一张生成器生成的图片\n", 290 | " if it % 2000 == 0:\n", 291 | " samples = sess.run(G_sample, feed_dict={Z: sample_Z(16, Z_dim)})\n", 292 | "\n", 293 | " fig = plot(samples)\n", 294 | " plt.savefig('out/{}.png'.format(str(i).zfill(3)), bbox_inches='tight')\n", 295 | " i += 1\n", 296 | " plt.close(fig)\n", 297 | " \n", 298 | " #next_batch抽取下一个批量的图片,该方法返回一个矩阵,即shape=[mb_size,784],每一行是一张图片,共批量大小行\n", 299 | " X_mb, _ = mnist.train.next_batch(mb_size)\n", 300 | " \n", 301 | " #投入数据并根据优化方法迭代一次,计算损失后返回损失值\n", 302 | " _, D_loss_curr = sess.run([D_solver, D_loss], feed_dict={X: X_mb, Z: sample_Z(mb_size, Z_dim)})\n", 303 | " _, G_loss_curr = sess.run([G_solver, G_loss], feed_dict={Z: sample_Z(mb_size, Z_dim)})\n", 304 | "\n", 305 | "\n", 306 | " #每迭代2000次输出迭代数、生成器损失和判别器损失\n", 307 | " if it % 2000 == 0:\n", 308 | " print('Iter: {}'.format(it))\n", 309 | " print('D loss: {:.4}'. format(D_loss_curr))\n", 310 | " print('G_loss: {:.4}'.format(G_loss_curr))\n", 311 | " print()" 312 | ] 313 | }, 314 | { 315 | "cell_type": "code", 316 | "execution_count": null, 317 | "metadata": { 318 | "collapsed": true 319 | }, 320 | "outputs": [], 321 | "source": [] 322 | } 323 | ], 324 | "metadata": { 325 | "kernelspec": { 326 | "display_name": "Python 3", 327 | "language": "python", 328 | "name": "python3" 329 | }, 330 | "language_info": { 331 | "codemirror_mode": { 332 | "name": "ipython", 333 | "version": 3 334 | }, 335 | "file_extension": ".py", 336 | "mimetype": "text/x-python", 337 | "name": "python", 338 | "nbconvert_exporter": "python", 339 | "pygments_lexer": "ipython3", 340 | "version": "3.5.3" 341 | } 342 | }, 343 | "nbformat": 4, 344 | "nbformat_minor": 2 345 | } 346 | --------------------------------------------------------------------------------