├── 02_01_tf_gradient.ipynb ├── 02_02_tf_linear_regression.ipynb ├── 03_01_MNIST.ipynb ├── 04_01_More_Dense.ipynb ├── 04_02_More_Dense_Fashion_mnist.ipynb ├── 04_03_change_neural_count.ipynb ├── 04_04_change_epoch.ipynb ├── 04_05_activation_function.ipynb ├── 04_06_GridSearchCV.ipynb ├── 06_01_Model.ipynb ├── 07_01_Callback.ipynb ├── 08_01_TensorBoard.ipynb ├── 08_02_TensorBoard_Tuning.ipynb ├── 09_01_What_If_Tool_Notebook_Usage.ipynb ├── 0_video ├── night.mp4 ├── pedestrians.mp4 └── 出處.txt ├── 10_01_Custom_Callback.ipynb ├── 10_02_Custom_Callback_loss.ipynb ├── 11_01_CNN_MNIST.ipynb ├── 11_02_Convolutions.ipynb ├── 11_03_CNN_Visualization.ipynb ├── 12_01_CatAndDog.ipynb ├── 13_01_Canvas.py ├── 13_02_CNN_model.py ├── 14_01_Keras_applications_1.ipynb ├── 14_02_Keras_applications2.ipynb ├── 15_01_Keras_applications3.ipynb ├── 15_02_Mask_Detection.ipynb ├── 17_01_Tensorflow_Object_Detection_API_Test.ipynb ├── 18_01_Tensorflow_Object_Detection_API_Video.ipynb ├── 19_01_Image_Autoencoder.ipynb ├── 20_01_Image_segmentation.ipynb ├── 22_01_tf.data_basics.ipynb ├── 23_01_Premade_Estimator.ipynb ├── 23_02_Text_Classification.ipynb ├── 24_01_debug.ipynb ├── 25_01_RNN.ipynb ├── 26_01_IMDB_Using_LSTM.ipynb ├── 26_02_IMDB_Using_LSTM_complete.ipynb ├── 26_03_Translation.ipynb ├── 27_01_CycleGAN.ipynb ├── 28_01_agent_env.py ├── 28_02_cartpole_random.py ├── 29_01_cartpole_deterministic.py ├── 29_02_Policy_Evaluation.ipynb ├── 29_03_Policy_Iteration.ipynb ├── 29_04_Value_Iteration.ipynb ├── 30_01_AutoKeras_MNIST.ipynb ├── 30_02_AutoKeras_Fashion_MNIST.ipynb ├── ImageDataset.ipynb ├── README.md ├── cmn-eng └── cmn.txt ├── cnn_class.py ├── images ├── Tiger.jpg ├── Tiger2.jpg ├── Tiger3.jpg ├── bird01.jpg ├── elephant.jpg ├── elephant2.jpg ├── input.jpg ├── style.jpg ├── with-mask.jpg ├── without-mask.jpg └── 太陽花.jpg ├── images_2 ├── detection1.png ├── detection2.png ├── image1.jpg └── image2.jpg ├── lib ├── atari │ ├── __init__.py │ ├── helpers.py │ └── state_processor.py └── envs │ ├── blackjack.py │ ├── cliff_walking.py │ ├── gridworld.py │ └── windy_gridworld.py ├── mnist_model.h5 ├── myDigits ├── 0.png ├── 1.png ├── 2.png ├── 3.png ├── 4.png ├── 5.png ├── 6.png ├── 7.png ├── 8.png ├── 9.png ├── bird.jpg ├── conv_blur_matrix.png └── conv_org.png └── test_data ├── Sandal1.jpg ├── Sandal2.jpg ├── Trouser.jpg ├── bag1.jpg ├── pant1.jpg ├── shirt1.jpg └── t-shirt.jpg /02_01_tf_gradient.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import numpy as np \n", 10 | "import tensorflow as tf " 11 | ] 12 | }, 13 | { 14 | "cell_type": "markdown", 15 | "metadata": {}, 16 | "source": [ 17 | "## 一階導數" 18 | ] 19 | }, 20 | { 21 | "cell_type": "code", 22 | "execution_count": 3, 23 | "metadata": {}, 24 | "outputs": [ 25 | { 26 | "data": { 27 | "text/plain": [ 28 | "6.0" 29 | ] 30 | }, 31 | "execution_count": 3, 32 | "metadata": {}, 33 | "output_type": "execute_result" 34 | } 35 | ], 36 | "source": [ 37 | "# x 宣告為 tf.constant,就要加 g.watch(x)\n", 38 | "x = tf.Variable(3.0)\n", 39 | "\n", 40 | "# 自動微分\n", 41 | "with tf.GradientTape() as g:\n", 42 | " #g.watch(x)\n", 43 | " y = x * x\n", 44 | " \n", 45 | "# g.gradient(y, x) 取得梯度\n", 46 | "dy_dx = g.gradient(y, x) # Will compute to 6.0\n", 47 | "\n", 48 | "# 轉換為 NumPy array 格式,方便顯示\n", 49 | "dy_dx.numpy()" 50 | ] 51 | }, 52 | { 53 | "cell_type": "markdown", 54 | "metadata": {}, 55 | "source": [ 56 | "## 二階導數" 57 | ] 58 | }, 59 | { 60 | "cell_type": "code", 61 | "execution_count": 7, 62 | "metadata": {}, 63 | "outputs": [ 64 | { 65 | "data": { 66 | "text/plain": [ 67 | "(6.0, 2.0)" 68 | ] 69 | }, 70 | "execution_count": 7, 71 | "metadata": {}, 72 | "output_type": "execute_result" 73 | } 74 | ], 75 | "source": [ 76 | "x = tf.constant(3.0)\n", 77 | "with tf.GradientTape() as g:\n", 78 | " g.watch(x)\n", 79 | " with tf.GradientTape() as gg:\n", 80 | " gg.watch(x)\n", 81 | " y = x * x\n", 82 | " dy_dx = gg.gradient(y, x) # Will compute to 6.0\n", 83 | "d2y_dx2 = g.gradient(dy_dx, x) # Will compute to 2.0\n", 84 | "\n", 85 | "dy_dx.numpy(), d2y_dx2.numpy()" 86 | ] 87 | }, 88 | { 89 | "cell_type": "markdown", 90 | "metadata": {}, 91 | "source": [ 92 | "## 多個變數" 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": 8, 98 | "metadata": {}, 99 | "outputs": [ 100 | { 101 | "data": { 102 | "text/plain": [ 103 | "(6.0, 108.0)" 104 | ] 105 | }, 106 | "execution_count": 8, 107 | "metadata": {}, 108 | "output_type": "execute_result" 109 | } 110 | ], 111 | "source": [ 112 | "x = tf.constant(3.0)\n", 113 | "with tf.GradientTape(persistent=True) as g:\n", 114 | " g.watch(x)\n", 115 | " y = x * x\n", 116 | " z = y * y\n", 117 | "dz_dx = g.gradient(z, x) # 108.0 (4*x^3 at x = 3)\n", 118 | "dy_dx = g.gradient(y, x) # 6.0\n", 119 | "del g # Drop the reference to the tape\n", 120 | "\n", 121 | "dy_dx.numpy(), dz_dx.numpy()" 122 | ] 123 | }, 124 | { 125 | "cell_type": "markdown", 126 | "metadata": {}, 127 | "source": [ 128 | "## PyTorch" 129 | ] 130 | }, 131 | { 132 | "cell_type": "code", 133 | "execution_count": 1, 134 | "metadata": {}, 135 | "outputs": [ 136 | { 137 | "name": "stdout", 138 | "output_type": "stream", 139 | "text": [ 140 | "tensor(6.)\n" 141 | ] 142 | } 143 | ], 144 | "source": [ 145 | "import torch\n", 146 | "\n", 147 | "x = torch.tensor(3.0, requires_grad=True)\n", 148 | "y=x*x\n", 149 | "\n", 150 | "# 反向傳導\n", 151 | "y.backward()\n", 152 | "\n", 153 | "print(x.grad)" 154 | ] 155 | }, 156 | { 157 | "cell_type": "code", 158 | "execution_count": null, 159 | "metadata": {}, 160 | "outputs": [], 161 | "source": [] 162 | } 163 | ], 164 | "metadata": { 165 | "kernelspec": { 166 | "display_name": "Python 3", 167 | "language": "python", 168 | "name": "python3" 169 | }, 170 | "language_info": { 171 | "codemirror_mode": { 172 | "name": "ipython", 173 | "version": 3 174 | }, 175 | "file_extension": ".py", 176 | "mimetype": "text/x-python", 177 | "name": "python", 178 | "nbconvert_exporter": "python", 179 | "pygments_lexer": "ipython3", 180 | "version": "3.6.8" 181 | } 182 | }, 183 | "nbformat": 4, 184 | "nbformat_minor": 4 185 | } 186 | -------------------------------------------------------------------------------- /02_02_tf_linear_regression.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 2, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import numpy as np \n", 10 | "import tensorflow as tf " 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 3, 16 | "metadata": {}, 17 | "outputs": [], 18 | "source": [ 19 | "# y_pred = W*X + b\n", 20 | "W = tf.Variable(0.0)\n", 21 | "b = tf.Variable(0.0)\n", 22 | "\n", 23 | "# 定義損失函數\n", 24 | "def loss(y, y_pred):\n", 25 | " return tf.reduce_mean(tf.square(y - y_pred))\n", 26 | "\n", 27 | "# 定義預測值\n", 28 | "def predict(X):\n", 29 | " return W * X + b\n", 30 | " \n", 31 | "# 定義訓練函數\n", 32 | "def train(X, y, epochs=40, lr=0.0001):\n", 33 | " current_loss=0\n", 34 | " # 執行訓練\n", 35 | " for epoch in range(epochs):\n", 36 | " with tf.GradientTape() as t:\n", 37 | " t.watch(tf.constant(X))\n", 38 | " current_loss = loss(y, predict(X))\n", 39 | "\n", 40 | " # 取得 W, b 個別的梯度\n", 41 | " dW, db = t.gradient(current_loss, [W, b])\n", 42 | " \n", 43 | " # 更新權重\n", 44 | " # 新權重 = 原權重 — 學習率(learning_rate) * 梯度(gradient)\n", 45 | " W.assign_sub(lr * dW) # W -= lr * dW\n", 46 | " b.assign_sub(lr * db)\n", 47 | "\n", 48 | " # 顯示每一訓練週期的損失函數\n", 49 | " print(f'Epoch {epoch}: Loss: {current_loss.numpy()}') " 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": 53, 55 | "metadata": {}, 56 | "outputs": [], 57 | "source": [ 58 | "# 產生隨機資料\n", 59 | "# random linear data: 100 between 0-50\n", 60 | "n = 100\n", 61 | "X = np.linspace(0, 50, n) \n", 62 | "y = np.linspace(0, 50, n) \n", 63 | " \n", 64 | "# Adding noise to the random linear data \n", 65 | "X += np.random.uniform(-10, 10, n) \n", 66 | "y += np.random.uniform(-10, 10, n) " 67 | ] 68 | }, 69 | { 70 | "cell_type": "code", 71 | "execution_count": 54, 72 | "metadata": {}, 73 | "outputs": [ 74 | { 75 | "data": { 76 | "text/plain": [ 77 | "array([ 5.38294718, 1.94276417, -8.37501403, -6.40284991, 6.87768111,\n", 78 | " 0.12431388, 4.39094417, 12.78579777, -1.96043348, 6.35422106,\n", 79 | " -4.47947653, -1.12331781, 10.49331752, 13.40039593, 15.8816284 ,\n", 80 | " 10.92547935, -0.30156411, 3.66637014, 4.50511852, 1.41980001,\n", 81 | " 7.77366745, 15.32141817, 8.04980248, 3.26906279, 9.84402869,\n", 82 | " 21.34127217, 8.92269785, 14.56563513, 7.83764139, 12.45273703,\n", 83 | " 21.03533589, 22.3663821 , 16.57285645, 20.57792235, 19.98120153,\n", 84 | " 20.51642765, 22.06538254, 18.54817757, 21.61113336, 24.88900568,\n", 85 | " 17.39942682, 19.24425361, 22.12835657, 20.41988719, 21.5936741 ,\n", 86 | " 31.10920515, 19.81676031, 21.29832193, 34.22646822, 34.19005246,\n", 87 | " 17.3054497 , 24.82338141, 23.59069832, 35.75285691, 25.99587878,\n", 88 | " 34.16395673, 34.95407377, 38.3440525 , 32.34310143, 37.64683249,\n", 89 | " 23.23112083, 37.88796555, 23.99310685, 22.87758889, 31.23100008,\n", 90 | " 31.56768344, 24.06398027, 36.97790922, 32.55015751, 40.8206573 ,\n", 91 | " 34.33431791, 27.30742013, 29.84283604, 27.77896723, 38.63257119,\n", 92 | " 43.59655302, 37.76700934, 45.68674128, 41.50352776, 46.02302577,\n", 93 | " 40.61536233, 50.75921043, 42.75283561, 31.97195932, 38.13386709,\n", 94 | " 41.2131749 , 48.46682155, 41.21735819, 51.74794269, 39.32384974,\n", 95 | " 45.07161315, 39.03449967, 40.91495234, 40.03708033, 38.38996869,\n", 96 | " 55.74373029, 49.57960378, 45.2567044 , 43.55227553, 43.65622342])" 97 | ] 98 | }, 99 | "execution_count": 54, 100 | "metadata": {}, 101 | "output_type": "execute_result" 102 | } 103 | ], 104 | "source": [ 105 | "X" 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "execution_count": 55, 111 | "metadata": {}, 112 | "outputs": [ 113 | { 114 | "data": { 115 | "text/plain": [ 116 | "array([ 3.98692946, 3.80554069, -5.01779136, 1.65616575, -5.79729205,\n", 117 | " 8.61274754, 6.2495736 , 1.29733753, -4.42140602, 0.59913235,\n", 118 | " 4.29121876, -3.86639114, 1.18279607, 7.77777927, 1.42172742,\n", 119 | " 15.01760263, 4.96066675, 8.35228537, 16.75597901, 5.9410508 ,\n", 120 | " 18.85238481, 7.29952268, 16.83631919, 12.05054103, 5.50667593,\n", 121 | " 3.94317849, 7.57582587, 11.27119617, 14.35786523, 12.25413196,\n", 122 | " 16.72603032, 17.59902786, 7.38957026, 18.41352868, 26.31809073,\n", 123 | " 27.00300486, 18.28667723, 20.79084282, 21.74185004, 26.14745228,\n", 124 | " 20.86659552, 23.76205408, 17.74653749, 21.07369246, 13.80165432,\n", 125 | " 31.97247331, 15.0478634 , 29.81188898, 16.658727 , 26.88285151,\n", 126 | " 34.65670357, 35.07948387, 22.2868203 , 28.01750151, 17.56725386,\n", 127 | " 37.33750636, 29.61389735, 25.67206426, 24.52502085, 24.27158806,\n", 128 | " 29.71007221, 30.42369212, 40.424231 , 24.28888218, 25.85017136,\n", 129 | " 23.60967625, 29.87577683, 43.42784449, 30.23469895, 29.65581381,\n", 130 | " 26.95485945, 44.41741978, 37.21564215, 44.64873703, 38.72754359,\n", 131 | " 40.38130649, 35.20879105, 39.34907675, 41.44490381, 37.89766773,\n", 132 | " 33.22047369, 39.11557906, 37.53721642, 32.60326037, 34.9513954 ,\n", 133 | " 37.41929671, 40.79252304, 35.58403649, 36.50133279, 43.68592609,\n", 134 | " 35.45733081, 45.23758925, 42.16872794, 46.88314349, 51.73691804,\n", 135 | " 52.58007295, 56.84315627, 40.44072212, 44.7703998 , 51.84605413])" 136 | ] 137 | }, 138 | "execution_count": 55, 139 | "metadata": {}, 140 | "output_type": "execute_result" 141 | } 142 | ], 143 | "source": [ 144 | "y" 145 | ] 146 | }, 147 | { 148 | "cell_type": "code", 149 | "execution_count": 56, 150 | "metadata": {}, 151 | "outputs": [ 152 | { 153 | "name": "stdout", 154 | "output_type": "stream", 155 | "text": [ 156 | "Epoch 0: Loss: 811.0114135742188\n", 157 | "Epoch 1: Loss: 576.4406127929688\n", 158 | "Epoch 2: Loss: 415.0347900390625\n", 159 | "Epoch 3: Loss: 303.9730529785156\n", 160 | "Epoch 4: Loss: 227.55245971679688\n", 161 | "Epoch 5: Loss: 174.96817016601562\n", 162 | "Epoch 6: Loss: 138.7853546142578\n", 163 | "Epoch 7: Loss: 113.88824462890625\n", 164 | "Epoch 8: Loss: 96.75672912597656\n", 165 | "Epoch 9: Loss: 84.96862030029297\n", 166 | "Epoch 10: Loss: 76.8572769165039\n", 167 | "Epoch 11: Loss: 71.27587890625\n", 168 | "Epoch 12: Loss: 67.4352798461914\n", 169 | "Epoch 13: Loss: 64.79254150390625\n", 170 | "Epoch 14: Loss: 62.97401809692383\n", 171 | "Epoch 15: Loss: 61.72263717651367\n", 172 | "Epoch 16: Loss: 60.86150360107422\n", 173 | "Epoch 17: Loss: 60.268890380859375\n", 174 | "Epoch 18: Loss: 59.861045837402344\n", 175 | "Epoch 19: Loss: 59.58033752441406\n", 176 | "Epoch 20: Loss: 59.38710403442383\n", 177 | "Epoch 21: Loss: 59.25407409667969\n", 178 | "Epoch 22: Loss: 59.16246032714844\n", 179 | "Epoch 23: Loss: 59.09934616088867\n", 180 | "Epoch 24: Loss: 59.05583953857422\n", 181 | "Epoch 25: Loss: 59.02583312988281\n", 182 | "Epoch 26: Loss: 59.00510787963867\n", 183 | "Epoch 27: Loss: 58.99077606201172\n", 184 | "Epoch 28: Loss: 58.9808349609375\n", 185 | "Epoch 29: Loss: 58.97392272949219\n", 186 | "Epoch 30: Loss: 58.969085693359375\n", 187 | "Epoch 31: Loss: 58.965694427490234\n", 188 | "Epoch 32: Loss: 58.96327590942383\n", 189 | "Epoch 33: Loss: 58.961544036865234\n", 190 | "Epoch 34: Loss: 58.960269927978516\n", 191 | "Epoch 35: Loss: 58.959327697753906\n", 192 | "Epoch 36: Loss: 58.958595275878906\n", 193 | "Epoch 37: Loss: 58.95802307128906\n", 194 | "Epoch 38: Loss: 58.957550048828125\n", 195 | "Epoch 39: Loss: 58.957149505615234\n" 196 | ] 197 | }, 198 | { 199 | "data": { 200 | "text/plain": [ 201 | "(0.9381567, 0.033580042)" 202 | ] 203 | }, 204 | "execution_count": 56, 205 | "metadata": {}, 206 | "output_type": "execute_result" 207 | } 208 | ], 209 | "source": [ 210 | "# reset W,b\n", 211 | "W = tf.Variable(0.0)\n", 212 | "b = tf.Variable(0.0)\n", 213 | "\n", 214 | "# 執行訓練\n", 215 | "train(X, y)\n", 216 | "\n", 217 | "# W、b 的最佳解\n", 218 | "W.numpy(), b.numpy()" 219 | ] 220 | }, 221 | { 222 | "cell_type": "code", 223 | "execution_count": 57, 224 | "metadata": {}, 225 | "outputs": [ 226 | { 227 | "data": { 228 | "text/plain": [ 229 | "" 230 | ] 231 | }, 232 | "execution_count": 57, 233 | "metadata": {}, 234 | "output_type": "execute_result" 235 | }, 236 | { 237 | "data": { 238 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXkAAAD8CAYAAACSCdTiAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8li6FKAAAgAElEQVR4nO3de3hU1bn48e9LiAIKJ6LAD8MlaaVClZuiYoMt6KmXSjH1SMVWxVql2suxfVpLrLZPW+kjPVqPl6NVWnrAakEUDIieWjRiK1UUJBbkIlqoBlAQikKJEML7+2MmYS57JjOz957Ze+b9PA9PMmtm9qwk+s6ad71rLVFVjDHGFKdOhe6AMcYY/1iQN8aYImZB3hhjipgFeWOMKWIW5I0xpohZkDfGmCLmSZAXkQoReVxE1ovIOhE5U0R6isgSEdkY/XqMF69ljDEmc16N5O8G/qiqg4HhwDqgDnhOVQcBz0VvG2OMySNxuxhKRHoArwOf0JiLicgGYKyqbhORvsBSVT3R1YsZY4zJSmcPrvEJYAfwvyIyHFgJ3AD0UdVtANFA37ujCx133HFaVVXlQZeMMaZ0rFy58gNV7eV0nxdBvjNwCvAdVV0uIneTRWpGRKYAUwAGDBjAihUrPOiSMcaUDhH5R6r7vMjJNwFNqro8evtxIkH//WiahujX7U5PVtUZqjpKVUf16uX4RmSMMSZHroO8qr4HvCsibfn2c4C1wCJgcrRtMrDQ7WsZY4zJjhfpGoDvAI+IyBHA34GvEXkDmSciXwfeASZ69FrGGGMy5EmQV9VGYJTDXee4vXZLSwtNTU18/PHHbi9V8rp06UK/fv0oLy8vdFeMMXni1UjeN01NTXTv3p2qqipEpNDdCS1VZefOnTQ1NVFdXV3o7hhj8iTwQf7jjz+2AO8BEeHYY49lx44dhe6KMYFVv2oLtz+zga27mzm+ois3nncitSMrC90tVwIf5AEL8B6x36MxqdWv2sJNC1bT3NIKwJbdzdy0YDVAqAO9bVBmjDHA7c9saA/wbZpbWrn9mQ0F6pE3LMgXwNFHHw3A1q1bueSSS9I+9q677mLfvn1ZXX/p0qWMHz8+5/4ZU4q27m7Oqj0sLMh7pLW1teMHJTj++ON5/PHH0z4mlyBvjMne8RVds2oPCwvyGdi8eTODBw9m8uTJDBs2jEsuuYR9+/ZRVVXFz3/+c8aMGcNjjz3G22+/zfnnn8+pp57KWWedxfr16wHYtGkTZ555Jqeddho//vGP46578sknA5E3iR/84AcMHTqUYcOGce+993LPPfewdetWxo0bx7hx4wD405/+xJlnnskpp5zCxIkT2bt3LwB//OMfGTx4MGPGjGHBggV5/g0ZE343nnciXcvL4tq6lpdx43nh3lcxFBOv7b77XWhs9PaaI0bAXXd1+LANGzYwc+ZMampquPrqq7n//vuBSO35iy++CMA555zDAw88wKBBg1i+fDnf/OY3aWho4IYbbuD666/nyiuv5L777nO8/owZM9i0aROrVq2ic+fO7Nq1i549e3LnnXfy/PPPc9xxx/HBBx8wbdo0nn32WY466ih++ctfcuedd/LDH/6Qa6+9loaGBk444QQuvfRS734/xpSItslVq64pUf3796empgaAyy+/nHvuuQegPaDu3buXv/71r0yceHhh7/79+wFYtmwZ8+fPB+CKK65g6tSpSdd/9tlnue666+jcOfIn6dmzZ9JjXn75ZdauXdvejwMHDnDmmWeyfv16qqurGTRoUHv/ZsyY4cnPbUwpqR1ZGfqgnihcQT6DEbdfEssP224fddRRABw6dIiKigoaU3zS6Kh8UVUzesznP/955syZE9fe2Nho5ZHGGEeWk8/QO++8w0svvQTAnDlzGDNmTNz9PXr0oLq6msceewyIBOTXX38dgJqaGubOnQvAI4884nj9c889lwceeICDBw8CsGvXLgC6d+/Onj17ABg9ejTLli3jrbfeAmDfvn28+eabDB48mE2bNvH222+3988YY8CCfMaGDBnC7NmzGTZsGLt27eL6669PeswjjzzCzJkzGT58OCeddBILF0Y23rz77ru57777OO200/jwww8dr3/NNdcwYMAAhg0bxvDhw/nDH/4AwJQpU7jgggsYN24cvXr1YtasWVx22WUMGzaM0aNHs379erp06cKMGTO48MILGTNmDAMHDvTvF2GMCRXXx/95adSoUZp4aMi6desYMmRIgXoUsXnzZsaPH8+aNWsK2g8vBOH3aYzxloisVFWnTSJtJG+MMcUsXBOvBVJVVVUUo3hjTPD4vSmaBXljjCmQfGyKZukaY4wpkHxsimYjeWNMTvKx93ox7u8ea+vuZvrtfo/7F07n5+dcy4p+J7W3e8WCvDEma/lIMxTr/u7tWluZ/9jNnPL3yHqaIds3tQd5LzdFs3RNDn76059yxx13pLy/vr6etWvX5rFHxuRXPtIMxbq/OwAPPwydO7cH+BsvuIHfnxLZHtzrTdEsyPvAgrwpdvnYe70o93dvagIRuOKKyO2aGupf/Qd//ewEBKis6MptFw+16pp0/Mrh/eIXv+Chhx6if//+9OrVi1NPPZXf/OY3zJgxgwMHDnDCCSfw+9//nsbGRhYtWsQLL7zAtGnTmD9/Pg0NDUmP69atmwc/rTGFcXxFV7Y4BFsv0wz5eI28OXQIJkyAp5463LZxI5xwArVA7agBvr20JyN5EdksIqtFpFFEVkTbeorIEhHZGP16jBevlU5bDm/L7maUwzm8+lVbXF135cqVzJ07l1WrVrFgwQJeffVVAC6++GJeffVVXn/9dYYMGcLMmTP5zGc+w4QJE7j99ttpbGzkk5/8pOPjjAmzfOy9XjT7uz/+OJSVHQ7w990HqnDCCXl5eS9H8uNU9YOY23XAc6o6XUTqoreT99j1ULocnpvR/F/+8he+9KUvtY++J0yYAMCaNWu45ZZb2L17N3v37uW8885zfH6mjzMmLPKx93ro93d/7z3o2/fw7REj4JVXoLw8r93wM11zETA2+v1sYCk+B3k/c3hOW/leddVV1NfXM3z4cGbNmsXSpUsdn5vp44wJk3zsvR7K/d1VYdIkmDfvcNvatVCgPaO8mnhV4E8islJEpkTb+qjqNoDo195OTxSRKSKyQkRW7Nixw1Un/Dqj8bOf/SxPPPEEzc3N7NmzhyeffBKAPXv20LdvX1paWuK2EI7dHjjd44wxRebJJ6FTp8MB/o47IkG/gJsCejWSr1HVrSLSG1giIuszfaKqzgBmQGQXSjeduPG8E+PqasGbHN4pp5zCpZdeyogRIxg4cCBnnXUWALfeeitnnHEGAwcOZOjQoe2BfdKkSVx77bXcc889PP744ykfZ4wpEh98AL16Hb49aBCsXg1HHlm4PkV5vtWwiPwU2AtcC4xV1W0i0hdYqqppo60XWw0X+wo5t2yrYWM8pApXXw2zZh1ua2yE4cPz2o10Ww27HsmLyFFAJ1XdE/3+XODnwCJgMjA9+nWh29fKRChzeMaY8Jk7Fy677PDtW2+FW24pXH9S8CJd0wd4Ijox2Rn4g6r+UUReBeaJyNeBd4CJaa5hjDHhsHkzVFcfvl1ZGal57xrM+n3XQV5V/w4kfTZR1Z3AOW6vH72WHVTtgSCdAmZMKCXGoYcfhq9+tTB9yVDgtzXo0qULO3futADlkqqyc+dOunTpUuiuGBM+//EfyQFeNfABHkKwrUG/fv1oamrCbXmlibxh9uvXr9DdMCWgaAogNmyAwYPj2957D/r0KUx/chD4IF9eXk51bP7LGBNoRbNFcOLI/aKLoL6+MH1xIfDpGmNMuIR+i+CvfMU5NRPCAA8hGMkbY8IltFsEv/MODBwY37Z6NZx8cmH64xEbyRtjPOXX9iK+EokP8DU1kdF7yAM82EjeGOOB2InWf+taTnmZ0NJ6uCIusFsEf+c78D//E99WZJV8FuSNMa4kTrTubm6hvJNwTLdydu9rybm6xtcKHafUzPLlcPrp3lw/QCzIG5MHbQFry+5mykRoVaUyzKWFMZwmWlsOKd2O6Myqn5yb0zV9rdBJnFT91KcipZJFynLyxvgs9sQygNZoOsCrk8sKzY+JVl8qdJwWNB06VNQBHizIG+M7p4DVJlSlhSmkmlCt6Jb7CUievnHs2BEJ7gsWHG6bOzeSey+B7VIsyBvjs44CU5BLC+tXbaFmegPVdU9RM73B8VPHjeedSHlZcrDc+/HBnD+leFahIwK9E84rUoVLL82pX2FkQd4Yn3UUmIJaWhibZlJSp5dqR1Zy1BHJ03sthzTnTymuD/G+7rrkUfrBg0VXOZMJC/LG+MwpYLUJbGkh2eXFP2xucbxGrp9SakdWctvFQ6ms6IoAlRVdue3ioR1Pun70USS4P/jg4bZf/zoS3Muc/wbFzqprjPFZW2AKW3VNNnnx4yu6tk8sJ7bnKusDgJzy6yU4ck9kQd6YPAjjiWXZBO6Ozlf2teb9rLPgxRfj2/bvhyOO8Ob6IWfpGmOMo2zy4unSK5nm9rO2d29k9B4b4L/xjcjo3QJ8O88P8nbD6SBvY0zheDECr5ne4PiJAMg9ZWWpmTi+HuRtjCleXqSZ0k2+Zr2S1Sk1s2sXHHOMmy4WNQvyxhhfpcrtt2mr2Ekb5Pfvh8SjK7t0gebcqneK5uSqDFiQNyYDpRQUvOY0KZsoballitRM/aot3D69Ieu/SdGcXJUhzyZeRaRMRFaJyOLo7Z4iskRENka/2ucpE0q+TRyWiNhJ2VScKna2nn1+coDfvLk9wOf6Nwn9yVVZ8rK65gZgXcztOuA5VR0EPBe9bUzolFpQ8EPtyEqW1Z3NXZeO6Lhip7UVRDj++WfiHjfklv+jflck+eDmbxLak6ty5EmQF5F+wIXAb2OaLwJmR7+fDdR68VrG5FupBQU/dbiSVQQ6x2eRq6Yupmrq4rgg7uZvEsqTq1zwKid/F/BDoHtMWx9V3QagqttEpLfjM40JOD9Wc5Yyx4qd//xPuPfeuKYvXHUPa/t8Iq6tLYin+pt0EqF+1Za0ufWOFm4lCvt8jOuRvIiMB7ar6socnz9FRFaIyIodO3a47Y4xnnO9WZZJrW2734QAX3Pbc0kBHg6/sabaD6hVtcPcfDb74hTDfIzrxVAichtwBXAQ6AL0ABYApwFjo6P4vsBSVU37f4UthjJBFfbRnF9c/V7SLGhKrICByBtrbDCuX7WF7897vf0QlliVFV1ZVnd29j9QglQLuby6vlfSLYZyPZJX1ZtUtZ+qVgGTgAZVvRxYBEyOPmwysNDtaxlTKG0Th5umX8iyurMtwONilPvAA8kBvqEhbsVqJqPt2pGVHEoxSPVqvqQY5mP8rJOfDswTka8D7wATfXwtY0yepatwSfkmmMV2BJmstnWTm89EMczHeLpBmaouVdXx0e93quo5qjoo+nWXl69ljCmsrEa5IskBXjUuwGdyClUiN7n5TBTDfIztQmlMAOUS8PJ5PciwFHH+/KTg/q1r76R66uK4fuSa+mlL65Q5fELwYi1DzoeXBIhta2BMwHi97N6vZfwdliI6BN4ht/yfYz9ySv1E1Y6s5HuPNjre50XuPIxnAcSykbwxAeP1Clu/VuymHOWe0s8xNVNz23Mp++F2gjPfC5z8+GTkFwvyxgSM1xUdflaIxFUdnX10JMDHmj27Pe+erh9ug3Q+c+dhq523IG9MwHg9Ks3LKFcEzjgjvk0Vrrwyo364DdL5zJ2HbS8jC/LGBIzXo1JfR7mDBnVYNRPbj/JO8Y8t7yTtC6jcBul8rWUIW+28TbwaEzBtwcmrFbZeXq9thWunzZv4y4PXxN/5xBNQ28E+hIlzsTG3s5ngLOQK5LDVztsZr8bkoBS3OWjLRa+bdkHynRnEEa+2CMhkywM/Ffr1ndgZryb0ghRUS+1koTZjx3yadfs+imur+uGTiAj/ncHqUq/SHG7KLb3g9Sctv1mQN4EXtKBa6CCTd01N0L8/FTFNt429igfPuAQAhYx+dq/SHEHIiYepdt4mXk3gBa2aIQhBJm9EoH//uKaqqYvbA3ybTH52ryaAS+3QD7csyJvAC1pQLYkg0717UtXMwlc2Uz11sePDM/nZvSpzLIb9ZPLJ0jUm8IJWzZDtyUKhsn079OkT3zZpEsyZw0XAq1s+4pGX3yF2mrWjn93r+ZSw5cQLzaprTOAFsZohSBPBnslwG+BsfvYg/u2KUbrqGgvyJhSKMqh6wJPfS/fusHdvfNu//gXdurnuX1hOVgo7K6E0oRemaoZ8cV119OGHUFER39avH7z7rmd9DNp8SimyIG9MiMSO3DuJJJ1vmnEpZxYnNLkRtPmUUmTVNcaEROLuh04HWEMHo+QRI5ID/PbtvgR4sEqYILCRvDEh4bRewInjKLm52TnH7vOcnFXCFJ4FeWNCIpM8tuMoOU+pmVRsPqWwLF1jTEikymOXiTgvLqqpSQ7wb76Z1wBvCs9G8saERKpFWEk1562t0Nnhf20PgruVsoaP65G8iHQRkVdE5HUReUNEfhZt7ykiS0RkY/TrMe67a0zp6mhbgPpVWyIj98QAn+IQj2yF7dg7E+F6MZSICHCUqu4VkXLgReAG4GJgl6pOF5E64BhVnZruWrYYyuRDpqPRxMeNG9yL59fvCOQodt31P2DIA7+Ka/vy1+7iK9+5xLM+ulnYZJ8A/OXrYiiNvEu0LZcrj/5T4CJgbLR9NrAUSBvkjfFbpguInB738MvvtN9f6O2O26lCp04MSWiuim4ktiVFzXwuQTfXhU1B2yq61Hgy8SoiZSLSCGwHlqjqcqCPqm4DiH7tneK5U0RkhYis2LFjhxfdMSalTLctzqRcseCHN4tAp/j/haumLm4P8OAcgHNNu+S6+2bQtoouNZ4EeVVtVdURQD/gdBE5OYvnzlDVUao6qlevXl50x5iUMh2NZrrsviDL8x98MKlq5qrvPBgX3Ns4BeBcg26uC5tsa4PC8rSEUlV3E0nLnA+8LyJ9AaJft3v5WsbkItPRaKbL7vO+PF8Errsuvk2V2q9dmHEAzjXo5roffEnsvx9gXlTX9BKRiuj3XYF/B9YDi4DJ0YdNBha6fS1j3Mp0NOr0uER5XZ4vklzzHlM1k00AThVcO4lQXfcUNdMbUqZuakdWsqzubDZNv5BldWdnlFO3rQ0Ky4vqmmFEJlbLiLxpzFPVn4vIscA8YADwDjBRVXelu5ZV15h8CFV1zaxZ8LWvxbfNmwcTJ+Z8Sac93hN5vee7Vdf4y/aTNyaMfNyOoKPdLMH2fA8T20/elLxQjSTzsNdM7H4y1XVPOT7GJkaLg+1dY4peaFZqzp+fHOCnTfMswN9Sv5pP3vQ0VXVP8cmbnuaW+kituk2MFjcbyZuil65kMDCjeZ9H77fUr45bzNWq2n67qA8mNxbkTfFqS9E4LcUHd+kIz9I/edoGeM5y5yP95ix/l2m1QwHb871YWZA3RSmTCpJc0xGeLNP/85/hc5+Lb7vuOvj1rzN6/WwDcqpTpFpjSjAtqBcnC/KmKHW0LYGbdITr9I+L0XuubzBlKSpoypz6kmE/bOQfDjbxaopSulRMpis1s712h+kfpwVNra1ZpWdy3ZLgsjP6Z9WeTmgmsg1gQd4UqVSpmLbabzejzqyrUTZtSg7uV17ZvoNkNnJ9g5lWO5TLRw9oH7mXiXD56AHt+fhs2IZj4WLpGlOU/KwYyeraHk+sHl/R1XEiuaJbOTXTG9KmT6bVDs0pqCeyDcfCxYK8KUptAS6XvHFH+eaMrt2tGzQnBL0DB6C83NXP5fQGU14m7P34IP/c1wL4t1972+8l1VuU1dUHkwV5U1TcTghmOrGZshpl50447rj4tksvhblzs/9hHDi9wfxr/0F2N7fEPc7rdQAdVStZXX1wWZA3RcOL0kZXlTN5qnlPDPSpXsHL9Em6aqVKq64JNJt4NUXDiwnBnPLNX/xicoDft8+XAA/J1S2peJk+SfXzC7ieyDb+siBvioYXE4JZVc7s2RMJ7otjTmSaNCkS3Lv6l5/O5GhCr9Mntr9NeFmQN0XDi0CU8QEXItCjR3ybKsyZk/Fr5Srdm1Y2JzZlww7+CC8L8qZoeBGIEk9YOqZbOUd27sT3Hm2kZnoDuwefnJya2b3bt9SMk3RrALI5sSkbuR79ZwrPDg0xRcXL5faxE7lHHGzhzV99KflBBfj/x6nSxeuTnEy42KEhpmRks9FWR28Ibbnvzb8cn/zkAg6O3KwBMKXHgrwpSZmUW457fj7T/nR/3PNGXz+L93scx6b8djeJ7RppMmVB3oSG08gbOh7ROj0vbT388L5QVsa0mPuaevRmzPW/AyL5aGPCwnLyJhSc8tDlZQIKLYcO/zecmJtOlb9OVYLolJqpmnq4RNJy3yaI0uXkXVfXiEh/EXleRNaJyBsickO0vaeILBGRjdGvx7h9LVO6nEbeLa0aF+AhefFTqhF74j7ql7/2VHKAf+896l9rsooSE2pepGsOAt9X1ddEpDuwUkSWAFcBz6nqdBGpA+qAqR68nilB2Sxoin1sque1qkZG9AcOsvm/vhh/51e/Cg8/DEBtH283+TIm31wHeVXdBmyLfr9HRNYBlcBFwNjow2YDS7Egb3KUaovdVI/t6HmVFV1ZdtM5yU8OUPrSGC94uhhKRKqAkcByoE/0DaDtjaC3l69lSovTQqfyMqG8U3zaJXHxk9Pzvv3qguQA/9e/WoA3Rcmz6hoRORqYD3xXVT+SDM+OFJEpwBSAAQMGeNUdk0YYz+dMVRvu1JZu7/dNAat5N8ZvnlTXiEg5sBh4RlXvjLZtAMaq6jYR6QssVdW068utusZ/JbtaMk/bABtTCH5X1wgwE1jXFuCjFgGTo99PBha6fS3jXsmdz/nQQ0kB/seX/Zj615rab9ev2kLN9Aaq656iZnqDHUhtiooX6Zoa4ApgtYg0Rtt+BEwH5onI14F3gIkevJZxqaTO53QYvbfVvD8eXd0KuD5oxJgg86K65kUiO5w6cShfMIWUqtqkqPYFTxPc28R+enH6ZPOzJ9/IKciHcb7DFDfbarjEFPW+4K+8khzgn3iC6oQA32br7uaUZZn/3NeSddom8cSmtk8Flv4xhWRBvsQU7b7gInDGGfFtqlBbm/YwkcSVr7GynacoufkOEwoW5EPKzWRh7chKltWdzX9fOgKg/UCMUI44Bw1KHr2rxlXOOH16AfjX/oO0pqmwyXaeIujzHTbBXJosyIeQF2mB0KcWNm2KBPe33jrc9pvfOJZFtn16OaZbeVz77uaWlJNJkP08RZDPQQ3939vkzIJ8CHmRFgh1akEEPvGJ+DZVuOaalE+pHVlJtyOS6wxSjePLO0nW8xRBnu8I9d/buGJBPoS8SAsEPbXg6O67k1Mzhw5lvKgp3c8WO8qv6FrO7ROHZz1PEeT5jlD+vY0n7NCQEPKiDDJUpZR79kCPHvFtM2fC1VdndZm0m5XVne2mh+2CemJTqP7exlM2kg8hL9ICuV4j75N3//Zv8QH+9NMjI/cOArxTP4OcTvFbKf/spc6CfAh5kRbI5Rp5nbx76aVIauajjw63tbbC8uUdPjVVP4HAplP8FuRUkvGXHf9XonJZmVkzvcH3dAfNzdCtW3zbmjVw0kkZXyIv/TQmQHzdoMyET64jct8n7448Mj7A/+hHkdRMFgE+XX9sktGUIgvyJSjXcjrf6sDvvDOSmjlw4HBbSwv84hc5XS7TftriIFMKLMiXoFxHup5P3u3fHwnu3//+4baFCyOj9865F35l0k9bHGRKhZVQlqBcy+lSnc6U0+Sdj4d4ZNLPdJ9mbDLSFBML8iXoxvNOdDwdKpMRues68FtvhZ/8JL5t71446qjcr+mgo35a3t6UCgvyJcjTEXmmWlrgiCPi2668EmbP9u8107DFQaZUWJAPOL8OofBjZWbKvgbwfFU3n2aMCRObeA2wME0OOvX19ZtvSw7wO3YUPMCDLQ4ypcNG8gEWpsnB2L6KHmLTf02If8C558IzzxSgZ6kFdZ8ZY7xkQT7AwjQ52Nanzb8cn3xnwsjdzkE1Jn8syAdYmCYHv73uGb6/6N64ts9NmcHB6k+yLKatLa3TNuqP3VfGAr0x3rOcfICFYudAVRBJCvBVUxezvXf/pL7a4RXG5JcnI3kR+R0wHtiuqidH23oCjwJVwGbgy6r6Ty9er1QUpNQxGw5VMzW3PcfW3c1UpuhrmFJQxhQDr9I1s4D/AR6KaasDnlPV6SJSF7091aPXKxmBnBx89FGYNCm+7bXXYOTIuNSMkzCloIwpBp6ka1T1z8CuhOaLgLaVLrOBWi9eyxSYSHKAV4WRIzN6eihSUMYUET8nXvuo6jYAVd0mIr2dHiQiU4ApAAMGDPCxO8YVjxY0BT4FZUyR8ezQEBGpAhbH5OR3q2pFzP3/VNVj0l3DDg0JoCVLIjXusZ5/HsaOLUh3jDHJ0h0a4udI/n0R6RsdxfcFtvv4WoHmRV14QWrLA7gdgTEmO36WUC4CJke/nwws9PG1AsuLrQnyvr2BSHKAV7UAb0wIeRLkRWQO8BJwoog0icjXgenA50VkI/D56O2S40VdeN5qy9esSQ7uCxa4Cu52+pIxheVJukZVL0tx1zleXD/MvKgLz0ttuUNqpv61JlcpIberW237A2Pcs20NfOZFXbivteWf+hRs3BjXVDV1MQBdXW43kOkGa07BHLDtD4zxgG1r4DMv6sJ9qS3fujUyeo8J8N+o/VF7gAf3KaFMPoGkmm/42ZNv2PYHxnjARvI+86Iu3PPacofUTPXUxThl3t2khDL5BJJqtJ/Y5kV/jClFFuTzwIutCWKv0Zbe+N6jjdkF/G99C+6/P77t0CEQ4fjpDZ6nhDI5fSnboG3bHxiTHUvXFFAulSc5lVO+/35k9B4b4Bctat9BEvxJCWVy+lKqoF3Rtdy2PzDGA56tePVCKa14Taw8gUgQ6+gIupoUI+7Kiq4sqzs7+QlZLGhKnAAdN7gXz6/f4Wt1S7rfA9j2B8ZkolArXk0auR7tl3E55amnRnaGjNXSAp1T/8kTU0L5qG7paL7Bgrox7liQ91G6Ou9ca987nMzctY+i9K8AAAogSURBVAuOPTb+zttug7q6rPqez/NlA7mdsjFFwoK8TzoaCeda+552MtPDvWbscA9jioNNvMbwcgl+R1sR5DrR6TSZ+Zenf0rtKf3iH7hvn6vtCFK92Vh1izHhYiP5KK9z0B2NhBNz0RXdylGF7z3ayO3PbEg7ydie3ti7F7p3j7vvhepT+NGU27lx/S5XKZBMyh/DzLZMMKXCgnyU1znoTNIxbcE6pzcYh9RM+2pVDyZJi/lwj3xNKhsTBBbko7zOQWczEs7qDWbsWHjhhbim83/8BOsPlGf2/CwU64RoPieVjSk0y8lHeZ2DzmQhUJuM3mD274+M3mMDfHk5qLIhIcB3dN1SZ5PKppTYSD7Kjxx0piPhDlM7HVTN+LpLZRGy35cpJTaSj8pm5O21VJU2s9bOSw7wmzYlVc34sktlEbPflyklNpKPUagcdOIkZ2WPI3nx5s8nPzBFSWQxT5L6wX5fppSUzN41oSmZs8OzjTFZKvm9a0JRMnfHHXDjjfFt69bB4MGF6U+CbN4k3byhhubN2JiQKIkgH+iSOVXo5DA1EqDRezZvkm7eUEPxZmxMyJTExGtgS+ZEkgO8qicBPp9bNOT6WC+fa4xxVhJBPnD7sCxcmJx7f+UVz0bvOR0skkY2b5Ju3lAD+2ZsTIj5HuRF5HwR2SAib4lIdvvdeiRQJXMiUFsb36YKp53m2Ut4PSLO5k3SzRtq4N6MjSkCvgZ5ESkD7gMuAD4NXCYin/bzNZ0Usga+nUjy6N2j1EwiP7ZoyPRN0s0baqDejI0pEn5PvJ4OvKWqfwcQkbnARcBan183ScH2YXnttcgpTbGWLYPPfMa3l/R6RWc2deVuatCtft0Y7/laJy8ilwDnq+o10dtXAGeo6rdjHjMFmAIwYMCAU//xj3/41p+8K1DNe67nxxpjwildnbzfOXmHKEdclFPVGao6SlVH9erVy+fu5Mnll+ctNeMkEOkpY0wg+J2uaQL6x9zuB2z1+TUL5913YcCA+La//Q2GDs17V4p1m2BjTHb8DvKvAoNEpBrYAkwCvuLzaxZG4sh99Gh46aXC9MUYY6J8Tdeo6kHg28AzwDpgnqq+4edr5t0PfuCcmrEAb4wJAN+3NVDVp4Gn/X6dvPvwQ6ioiG97+WU444zC9McYYxyUxIpXz/XuHR/gx4+PjN4twBtjAqYkNijzzGOPwZe/HN926JBzqaQxxgSABflM/OtfcPTR8W2rV8PJJxemP8YYkyEL8h0ZPBg2xOz5csUV8NBDjg+1vdCNMUFjQT6Vp5+GCy+Mb2ttdd77HdsL3RgTTBbkE+3fD126xLe98orjLpGxI/dOIrQmrGgNzMEkxpiSVRTVNZ4dkPHb38YH+C9+MeU2wIl7ticG+Da2F7oxppBCP5L3JE2yeTNUV8e3tbRA59S/Hqc9253YXujGmEIK/Uje1QEZhw7BOefEB/jNmyOj9zQBHjIbodte6MaYQgt9kM/5gIxHHoGyMmhoiNz+3e8iwX3gwIxeN9UIvUwkcDs/enneqzEmXEKfrsn6gIymJugfszFmTQ288EIk4GfhxvNODMWe7Vb1Y0xpC/1IPuMj41RhwoT4AL9xI7z4YtYBHsKzZ7vX570aY8Il9CP5jI6Mmz8fLrnk8O377oNvftOT1w5aUE/k9XmvxphwCX2QhzTB9r33oG/fw7dHjIjUvJeX569zBeb1ea/GmHAJfbrGkSpMmhQf4NeuhVWrSirAQxbpLGNMUSq+IL94cWTrgUcfjdz+1a8iQX/IkML2q0DCMndgjPFHUaRrAPjgA4g9CHzQoMhOkUceWbg+BUQY5g6MMf4ojpF8c3N8gG9shDfftABvjCl5xTGSLy+Hyy+HE0+EW24pdG+MMSYwiiPId+4Mv/99oXthjDGBUxzpGmOMMY5cBXkRmSgib4jIIREZlXDfTSLylohsEJHz3HXTGGNMLtyma9YAFwMPxjaKyKeBScBJwPHAsyLyKVXteG9eY4wxnnE1klfVdarqtAnKRcBcVd2vqpuAt4DT3byWMcaY7PmVk68E3o253RRtM8YYk0cdpmtE5Fng/zncdbOqLkz1NIc2x/PxRGQKMAVgwIABHXXHGGNMFjoM8qr67zlctwmI2dOXfsDWFNefAcwAGDVqlPNBqcYYY3LiV7pmETBJRI4UkWpgEPCKT69ljDEmBVHNffAsIl8C7gV6AbuBRlU9L3rfzcDVwEHgu6r6fxlcbwfwjzQPOQ74IOcOF1aY+w7h7n+Y+w7h7n+Y+w7h6f9AVe3ldIerIJ9vIrJCVUd1/MjgCXPfIdz9D3PfIdz9D3PfIfz9B1vxaowxRc2CvDHGFLGwBfkZhe6AC2HuO4S7/2HuO4S7/2HuO4S//+HKyRtjjMlO2EbyxhhjshD4IF8MO12KyPnRPr4lInWF7k9HROR3IrJdRNbEtPUUkSUisjH69ZhC9jEVEekvIs+LyLrofzc3RNsD338R6SIir4jI69G+/yzaHvi+txGRMhFZJSKLo7fD1PfNIrJaRBpFZEW0LTT9TyXwQZ7DO13+ObYxYafL84H7RaQs/91LL9qn+4ALgE8Dl0X7HmSziPxOY9UBz6nqIOC56O0gOgh8X1WHAKOBb0V/32Ho/37gbFUdDowAzheR0YSj721uANbF3A5T3wHGqeqImLLJsPU/SeCDfBHsdHk68Jaq/l1VDwBzifQ9sFT1z8CuhOaLgNnR72cDtXntVIZUdZuqvhb9fg+RgFNJCPqvEXujN8uj/5QQ9B1ARPoBFwK/jWkORd/TCHv/gx/k0wjLTpdh6WdH+qjqNogEUqB3gfvTIRGpAkYCywlJ/6PpjkZgO7BEVUPTd+Au4IfAoZi2sPQdIm+ofxKRldGNEyFc/XcUiDNe/d7pssDC0s+iIiJHA/OJbKnxkYjTnyF4ogfrjBCRCuAJETm50H3KhIiMB7ar6koRGVvo/uSoRlW3ikhvYImIrC90h7wQiCDv906XBRaWfnbkfRHpq6rbRKQvkZFmIIlIOZEA/4iqLog2h6b/AKq6W0SWEpkbCUPfa4AJIvIFoAvQQ0QeJhx9B0BVt0a/bheRJ4ikWkPT/1TCnK4Jy06XrwKDRKRaRI4gMlm8qMB9ysUiYHL0+8lAqk9YBSWRIftMYJ2q3hlzV+D7LyK9oiN4RKQr8O/AekLQd1W9SVX7qWoVkf/GG1T1ckLQdwAROUpEurd9D5xLpOgjFP1PS1UD/Q/4EpHR8H7gfeCZmPtuBt4GNgAXFLqvaX6GLwBvRvt6c6H7k0F/5wDbgJbo7/7rwLFEqgs2Rr/2LHQ/U/R9DJF02N+Axui/L4Sh/8AwYFW072uAn0TbA9/3hJ9jLLA4TH0HPgG8Hv33Rtv/p2Hpf7p/tuLVGGOKWJjTNcYYYzpgQd4YY4qYBXljjCliFuSNMaaIWZA3xpgiZkHeGGOKmAV5Y4wpYhbkjTGmiP1/gnW7qOFJdOkAAAAASUVORK5CYII=\n", 239 | "text/plain": [ 240 | "
" 241 | ] 242 | }, 243 | "metadata": { 244 | "needs_background": "light" 245 | }, 246 | "output_type": "display_data" 247 | } 248 | ], 249 | "source": [ 250 | "import matplotlib.pyplot as plt \n", 251 | "\n", 252 | "plt.scatter(X, y, label='data')\n", 253 | "plt.plot(X, predict(X), 'r-', label='predicted')\n", 254 | "plt.legend()" 255 | ] 256 | }, 257 | { 258 | "cell_type": "code", 259 | "execution_count": null, 260 | "metadata": {}, 261 | "outputs": [], 262 | "source": [] 263 | } 264 | ], 265 | "metadata": { 266 | "kernelspec": { 267 | "display_name": "Python 3", 268 | "language": "python", 269 | "name": "python3" 270 | }, 271 | "language_info": { 272 | "codemirror_mode": { 273 | "name": "ipython", 274 | "version": 3 275 | }, 276 | "file_extension": ".py", 277 | "mimetype": "text/x-python", 278 | "name": "python", 279 | "nbconvert_exporter": "python", 280 | "pygments_lexer": "ipython3", 281 | "version": "3.6.8" 282 | } 283 | }, 284 | "nbformat": 4, 285 | "nbformat_minor": 4 286 | } 287 | -------------------------------------------------------------------------------- /07_01_Callback.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import tensorflow as tf\n", 10 | "mnist = tf.keras.datasets.mnist\n", 11 | "\n", 12 | "\n", 13 | "# 匯入 MNIST 手寫阿拉伯數字 訓練資料\n", 14 | "(x_train, y_train),(x_test, y_test) = mnist.load_data()\n", 15 | "\n", 16 | "\n", 17 | "# 訓練/測試資料的 X/y 維度\n", 18 | "x_train.shape, y_train.shape,x_test.shape, y_test.shape\n", 19 | "\n", 20 | "# 特徵縮放,使用常態化(Normalization),公式 = (x - min) / (max - min)\n", 21 | "# 顏色範圍:0~255,所以,公式簡化為 x / 255\n", 22 | "# 注意,顏色0為白色,與RGB顏色不同,(0,0,0) 為黑色。\n", 23 | "x_train_norm, x_test_norm = x_train / 255.0, x_test / 255.0" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": 2, 29 | "metadata": {}, 30 | "outputs": [], 31 | "source": [ 32 | "# 建立模型\n", 33 | "model = tf.keras.models.Sequential([\n", 34 | " tf.keras.layers.Flatten(input_shape=(28, 28)),\n", 35 | " tf.keras.layers.Dense(128, activation='relu'),\n", 36 | " tf.keras.layers.Dropout(0.2),\n", 37 | " tf.keras.layers.Dense(10, activation='softmax')\n", 38 | "])\n" 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "execution_count": 3, 44 | "metadata": {}, 45 | "outputs": [], 46 | "source": [ 47 | "# 設定優化器(optimizer)、損失函數(loss)、效能衡量指標(metrics)的類別\n", 48 | "model.compile(optimizer='adam',\n", 49 | " loss='sparse_categorical_crossentropy',\n", 50 | " metrics=['accuracy'])\n" 51 | ] 52 | }, 53 | { 54 | "cell_type": "markdown", 55 | "metadata": {}, 56 | "source": [ 57 | "## EarlyStopping callbacks" 58 | ] 59 | }, 60 | { 61 | "cell_type": "code", 62 | "execution_count": 8, 63 | "metadata": {}, 64 | "outputs": [], 65 | "source": [ 66 | "# validation loss 三個執行週期沒改善就停止訓練\n", 67 | "my_callbacks = [\n", 68 | " tf.keras.callbacks.EarlyStopping(patience=3, monitor = 'val_accuracy'),\n", 69 | "]" 70 | ] 71 | }, 72 | { 73 | "cell_type": "code", 74 | "execution_count": 9, 75 | "metadata": {}, 76 | "outputs": [ 77 | { 78 | "name": "stdout", 79 | "output_type": "stream", 80 | "text": [ 81 | "Train on 48000 samples, validate on 12000 samples\n", 82 | "Epoch 1/20\n", 83 | "48000/48000 [==============================] - 4s 74us/sample - loss: 0.0333 - accuracy: 0.9890 - val_loss: 0.0852 - val_accuracy: 0.9780\n", 84 | "Epoch 2/20\n", 85 | "48000/48000 [==============================] - 3s 71us/sample - loss: 0.0306 - accuracy: 0.9904 - val_loss: 0.0787 - val_accuracy: 0.9803\n", 86 | "Epoch 3/20\n", 87 | "48000/48000 [==============================] - 3s 71us/sample - loss: 0.0301 - accuracy: 0.9894 - val_loss: 0.0897 - val_accuracy: 0.9774\n", 88 | "Epoch 4/20\n", 89 | "48000/48000 [==============================] - 3s 71us/sample - loss: 0.0285 - accuracy: 0.9905 - val_loss: 0.0902 - val_accuracy: 0.9778\n", 90 | "Epoch 5/20\n", 91 | "48000/48000 [==============================] - 3s 71us/sample - loss: 0.0263 - accuracy: 0.9911 - val_loss: 0.0887 - val_accuracy: 0.9781\n" 92 | ] 93 | } 94 | ], 95 | "source": [ 96 | "# 訓練 20 次,但實際只訓練 13次就停止了\n", 97 | "history = model.fit(x_train_norm, y_train, epochs=20, validation_split=0.2, callbacks=my_callbacks)\n" 98 | ] 99 | }, 100 | { 101 | "cell_type": "code", 102 | "execution_count": 14, 103 | "metadata": {}, 104 | "outputs": [ 105 | { 106 | "data": { 107 | "text/plain": [ 108 | "([,\n", 109 | " ,\n", 110 | " ,\n", 111 | " ,\n", 112 | " ],\n", 113 | " )" 114 | ] 115 | }, 116 | "execution_count": 14, 117 | "metadata": {}, 118 | "output_type": "execute_result" 119 | }, 120 | { 121 | "data": { 122 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYAAAAD4CAYAAADlwTGnAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8li6FKAAAgAElEQVR4nO3de3RV5bnv8e9Dwh0hUSMCIYA3LkUEu4iKVVSwirXKZuyLVMWqyPZCsDr2qUjPsdixK2g72mrAumml1erWs1tlq6fVKrRUZVdCkIBcRFAwBFGDQiISIJfn/DFnzMoiJCsksBLm7zNGRta8rncu5f3N+c5nrpi7IyIi0dMh1Q0QEZHUUACIiESUAkBEJKIUACIiEaUAEBGJqPRUN6A5TjzxRB84cGCqmyEi0q6sXLlyp7tnJc5vVwEwcOBACgsLU90MEZF2xcw+bGi+hoBERCJKASAiElEKABGRiFIAiIhElAJARCSiFAAiIhGlABARiah29RyAiEgkfPEFbNsGxcXB723b4LvfhVNOadW3UQCIiBxNBw5AScnBHXzt6+JiKCurv02HDnDuuQoAEZE2q6YGPvnk0B37tm3B8sQ/xHXiidC/PwwaBGPHBq9zcoLf/ftD376Q3vrdtQJARCQZ7rB796E79m3bgjP7ysr623XvXteZjxhRv2PPyYHsbOjWLSWHpAAQEQGoqAg68IY69trXe/bU3yY9PejA+/eHMWMOPnPPyYGMDDBLzTE1QQEgIse+qirYsaPxcfedOw/ernfvoBMfOhQuu6x+x96/f7A8Le3oH08rUQCISPvmDp991vCwTO3vjz6C6ur62/XqVdeZjx5dv2PPyYF+/aBz59Qc01GiABCRtm3PnoPP2BNfV1TU36Zz52BoJicHLr64fsdeexbfs2dqjqcNUQCISOpUVsL27Y1XzezaVX8bs6Aqpn9/GDkSvv3tg8/es7La7Lh7W6IAEJEjr6oK1q2DFSugsBBWrw46+B07Di6JPP74oBMfMAAuuODgcfe+faFjx9QcxzFGASD1FRfDa6/Bli0wfDiMGgWnndaub3TJUVZTA5s2BZ197U9RUd0wTa9ewf9Xl19+8Jl7dnZQNilHhQIg6srK4K9/hcWLg47/vfeC+WZ1Z2bduwf1y6NGBZfco0YF4dClS+raLW2DO3z4YXBWX9vZr1wJ5eXB8q5d4eyz4V//NbjROno0nHpq8GSrpJwCIGoqK2H58qCzf+01KCgIqiO6dw+eQLztNrj00uCsf8MGWLUqOHtbtQp+9zt49NFgP2lpQWncqFF1wTByJGRmpvb45Mj6+OO6jr62068tn+zYEc46C669FmKxoLMfOvSIPMEqrcM8cfytDYvFYq4/Ct9M7rBxY12Hv3Rp8EVTHToE/0gvvTT4Oe886NSp8X3V1ARDQ/GhsGpVMI5ba+DAuquE2mDIztYNufbo88+Ds/n4Dr+kJFjWoQMMG1Z3Vh+LBVeJx3jZZHtlZivdPXbQ/GQCwMwuBx4G0oBfu/vchOWZwELgVGAfcJO7rw2X3QncAhjwK3f/RTj/eOD/AgOBrcA/u3vC7f76FABJ+vRTWLKkrtOv/Ud7yil1Hf4ll7Te2fonn9QFQu3vTZvqhpBOOKH+8NGoUXDGGbqv0Jbs2QNvv11/KOf99+uWn3563Vn96NHBf0ON1bcbhx0AZpYGvAdcCpQAK4DJ7r4+bp2fAHvc/X4zGwLMd/dxZjYceBbIBQ4ArwC3ufsmM3sI+Nzd55rZTCDT3e9prC0KgEOoqIA33gg6+8WLg04Ygg5+3DgYPz7o9Fv5mwQbtWcPrFlTd5VQVATvvBN8EyIEY8OJ9xXOPDOYL0fW/v1BFU78MM6GDcEVHgQ3ZGvP6kePhq9/XUN77VxLAuA8YLa7XxZO3wvg7nPi1vkjMMfd3wyn3wfGABcCl7n71HD+/wH2u/tDZrYRuMjdd5hZH2Cpuw9urC0KgFBNTdCh1p7hv/lm8I+6Y0c4//y6s/yzz25bZ9mVlUFHEz98VFRU99W3HTrAkCH1h49GjQrKAuXwVFXB+vX1K3LeeafuC8uysurO6ms7/d69U9tmaXWHCoBk7s70A7bFTZcA5ySssxqYBLxpZrnAACAbWAv82MxOACqAK4DaHry3u+8ACEPgpEM0fBowDSAnJyeJ5h6jasszX3stGN6pvfE2fDjcfnvQ4V94Ydu+LO/YMTjrHzECpkwJ5rnD1q31h4+WLoWnn67bLienLgxqf+fk6L5Cotryy/hhnFWr6pdfxmJw9911HX7//vocIyyZAGjo/47Ey4a5wMNmVgS8A6wCqtx9g5k9CLwG7CEIiqrmNNDdFwALILgCaM627VpZWdAR1nb6teWZffrAhAlBhz9+fDDdnpkF34E+aBBMmlQ3v7Q0CIT4q4WXXqq7r5CZWf+ewqhRMHhwdCpO3IOTgvgbtCtX1l1NJZZfxmJBZZfKLyVOMv9aSoD+cdPZwEfxK7h7OXAjgJkZsCX8wd0fBx4Plz0Q7g/gEzPrEzcE9GkLjqP9iy/PXLw4eF1dHXxP+EUXBeWZ48fD174WjTO2rKy6oaxaX34ZDF/EXy08+ijs2xcs79IluI8Qf6UwYkTKvmu9VX38cf0z+8LCICSh7spq8uS6M3uVX0oSkrkHkE5wE3gcsJ3gJvB33H1d3DoZwF53P2BmtwAXuPuUcNlJ7v6pmeUArwLnufuu8MbxZ3E3gY939+831pZj6h5Aa5ZnRllVVfA5xt9TWLWq7vtjOnQIKo4Sq5BOPDG17W7Mrl1BBx/f4TdUfll7k1bll9KElpaBXgH8gqAMdKG7/9jMbgVw98fCG8VPAtXAeuDm2pJOM3sDOAGoBO529yXh/BOA/wJygGLgn9z988ba0e4DoLS07onbxYuDL7qCI1eeGVW1wyOJN5uLi+vW6dev/vDRyJHBMwxH++rqyy+D8sv4ipzNm+uWn3Za/Ru0o0ZBjx5Ht43S7rUoANqKdhcAFRVBhU7tWX5teWZGRlCeWdvpH83yzCj77LP69xWKiuqXP2Zk1D3RXBsMQ4a03heP7d8flMbGV+Qkll/G19qr/FJaiQLgaKipCeqrazv8N96oK88cM6auw//619tWeWaUVVQE9xXirxbWrKmrnOncue5L8eLvKzR1Fl5bfhk/jLNmzcHll7UdfiwGJ598ZI9VIksBcKQ0Vp5Z2+FfcIEu29uT6uqg6irxKy8++yxYbhY8GRs/fNSvX/2Hq1atgr17g/V79qx/Zh+LqYxVjioFQGspLw++PTOxPPPkk+s6/GOhPFPqcw/+cEnizeatW+uv17VrEArxD1ep/FJSrCUPgkVbZWXwjZm1HX58eebYsXDrrUGnH5XyzKgyC77ULjs7+AtUtXbtCs78S0qCoaFhw1R+Ke2G/k9NFF+euXhxcLb/xRdBBxCLwT331JVnqvROMjOD5zRE2iEFABy6PHPQoODhmtryTH0njYgcQ6IZAE2VZ/7gByrPFJFjXjQCoKnyzH//d5VnikjkRCMAbrkFFi4MXn/ta3V/9vDCC1WeKSKRFY0AuP76oGJn/Hjo2zfVrRERaROiEQCq0hAROYieThERiSgFgIhIRCkAREQiSgEgIhJRCgARkYhSAIiIRJQCQEQkohQAIiIRpQAQEYmopALAzC43s41mttnMZjawPNPMFpnZGjMrMLPhccvuMrN1ZrbWzJ4xsy7h/JFm9paZFZlZoZnltt5hiYhIU5oMADNLA+YDE4BhwGQzG5aw2iygyN1HAFOAh8Nt+wEzgJi7DwfSgGvCbR4C7nf3kcB94bSIiBwlyVwB5AKb3f0Ddz8APAtcnbDOMGAJgLu/Cww0s97hsnSgq5mlA92Aj8L5DvQMX/eKmy8iIkdBMgHQD9gWN10Szou3GpgEEA7lDACy3X078FOgGNgBlLn7q+E23wN+YmbbwnXubejNzWxaOERUWFpamtxRiYhIk5IJgIb+0rknTM8FMs2sCMgDVgFVZpZJcLUwCOgLdDez68JtbgPucvf+wF3A4w29ubsvcPeYu8eysrKSaK6IiCQjmQAoAfrHTWeTMFzj7uXufmM4nj8FyAK2AOOBLe5e6u6VwPPAmHCzG8JpgN8TDDWJiMhRkkwArABON7NBZtaJ4Cbui/ErmFlGuAxgKvC6u5cTDP2ca2bdzMyAccCGcL2PgLHh60uATS07FBERaY4m/yCMu1eZ2XTgzwRVPAvdfZ2Z3RoufwwYCjxpZtXAeuDmcNlyM/sD8DZQRTA0tCDc9S3Aw+HN4X3AtFY9MhERaZS5Jw7nt12xWMwLCwtT3QwRkXbFzFa6eyxxvp4EFhGJKAWAiEhEKQBERCJKASAiElEKABGRiFIAiIhElAJARCSiFAAiIhGlABARiSgFgIhIRCkAREQiSgEgIhJRCgARkYhSAIiIRJQCQEQkohQAIiIRpQAQEYkoBYCISEQpAEREIkoBICISUUkFgJldbmYbzWyzmc1sYHmmmS0yszVmVmBmw+OW3WVm68xsrZk9Y2Zd4pblhftdZ2YPtc4hiYhIMpoMADNLA+YDE4BhwGQzG5aw2iygyN1HAFOAh8Nt+wEzgJi7DwfSgGvCZRcDVwMj3P1rwE9b5YhERCQpyVwB5AKb3f0Ddz8APEvQcccbBiwBcPd3gYFm1jtclg50NbN0oBvwUTj/NmCuu+8Pt/u0RUciIiLNkkwA9AO2xU2XhPPirQYmAZhZLjAAyHb37QRn9sXADqDM3V8NtzkDuMDMlpvZ38xsdENvbmbTzKzQzApLS0uTPS4REWlCMgFgDczzhOm5QKaZFQF5wCqgyswyCa4WBgF9ge5mdl24TTqQCZwL/C/gv8zsoPdy9wXuHnP3WFZWVjLHJCIiSUhPYp0SoH/cdDZ1wzgAuHs5cCNA2IlvCX8uA7a4e2m47HlgDPBUuN/n3d2BAjOrAU4EdJovInIUJHMFsAI43cwGmVkngpu4L8avYGYZ4TKAqcDrYSgUA+eaWbcwGMYBG8L1/hu4JNz+DKATsLOlByQiIslp8grA3avMbDrwZ4IqnoXuvs7Mbg2XPwYMBZ40s2pgPXBzuGy5mf0BeBuoIhgaWhDueiGw0MzWAgeAG8KrAREROQqsPfW5sVjMCwsLU90MEZF2xcxWunsscb6eBBYRiSgFgIhIRCkAREQiSgEgIhJRCgARkYhSAIiIRJQCQEQkohQAIiIRpQAQEYkoBYCISEQpAEREIkoBICISUQoAEZGIUgCIiESUAkBEJKIUACIiEaUAEBGJKAWAiEhEKQBERCJKASAiElFJBYCZXW5mG81ss5nNbGB5ppktMrM1ZlZgZsPjlt1lZuvMbK2ZPWNmXRK2/TczczM7seWHIyIiyWoyAMwsDZgPTACGAZPNbFjCarOAIncfAUwBHg637QfMAGLuPhxIA66J23d/4FKguOWHIiIizZHMFUAusNndP3D3A8CzwNUJ6wwDlgC4+7vAQDPrHS5LB7qaWTrQDfgobrufA98H/PAPQUREDkcyAdAP2BY3XRLOi7camARgZrnAACDb3bcDPyU4w98BlLn7q+F6VwHb3X11Y29uZtPMrNDMCktLS5NoroiIJCOZALAG5iWesc8FMs2sCMgDVgFVZpZJcLUwCOgLdDez68ysG/AD4L6m3tzdF7h7zN1jWVlZSTRXRESSkZ7EOiVA/7jpbOoP4+Du5cCNAGZmwJbw5zJgi7uXhsueB8YQXDEMAlYHq5MNvG1mue7+cUsOSEREkpNMAKwATjezQcB2gpu434lfwcwygL3hPYKpwOvuXm5mxcC54Rl/BTAOKHT3d4CT4rbfSnCjeGcrHJOIiCShyQBw9yozmw78maCKZ6G7rzOzW8PljwFDgSfNrBpYD9wcLltuZn8A3gaqCIaGFhyRIxERkWYx9/ZTgBOLxbywsDDVzRARaVfMbKW7xxLn60lgEZGIUgCIiESUAkBEJKIUACIiEaUAEBGJKAWAiEhEKQBERCJKASAiElEKABGRiFIAiIhElAJARCSiFAAiIhGlABARiSgFgIhIRCkAREQiSgEgIhJRCgARkYhSAIiIRJQCQEQkopIKADO73Mw2mtlmM5vZwPJMM1tkZmvMrMDMhsctu8vM1pnZWjN7xsy6hPN/YmbvhtssMrOM1jssERFpSpMBYGZpwHxgAjAMmGxmwxJWmwUUufsIYArwcLhtP2AGEHP34UAacE24zWvA8HCb94B7W344IiKSrGSuAHKBze7+gbsfAJ4Frk5YZxiwBMDd3wUGmlnvcFk60NXM0oFuwEfheq+6e1W4zltAdouOREREmiWZAOgHbIubLgnnxVsNTAIws1xgAJDt7tuBnwLFwA6gzN1fbeA9bgJebl7TRUSkJZIJAGtgnidMzwUyzawIyANWAVVmlklwtTAI6At0N7Pr6u3c7AdAFfB0g29uNs3MCs2ssLS0NInmiohIMpIJgBKgf9x0NuEwTi13L3f3G919JME9gCxgCzAe2OLupe5eCTwPjKndzsxuAK4ErnX3xFCp3fcCd4+5eywrK6sZhyYiIo1JJgBWAKeb2SAz60RwE/fF+BXMLCNcBjAVeN3dywmGfs41s25mZsA4YEO4zeXAPcBV7r63dQ5HRESSld7UCu5eZWbTgT8TVPEsdPd1ZnZruPwxYCjwpJlVA+uBm8Nly83sD8DbBMM8q4AF4a7nAZ2B14Js4C13v7U1D05ERA7NDjHy0ibFYjEvLCxMdTNERNoVM1vp7rHE+XoSWEQkohQAIiIRpQAQEYkoBYCISEQpAEREIkoBICISUQoAEZGIUgCIiESUAkBEJKIUACIiEaUAEBGJKAWAiEhEKQBERCJKASAiElEKABGRiFIAiIhElAJARCSiFAAiIhGlABARiSgFgNSz+fPN3PXKXVz1zFUs3bo01c0RkSMoqQAws8vNbKOZbTazmQ0szzSzRWa2xswKzGx43LK7zGydma01s2fMrEs4/3gze83MNoW/M1vvsKQ5aryGV99/lSv/80rOyD+DeSvmsXz7ci5+4mKuePoKVn+8OtVNFJEjoMkAMLM0YD4wARgGTDazYQmrzQKK3H0EMAV4ONy2HzADiLn7cCANuCbcZiawxN1PB5aE03IUfbH/C+YVzGPY/GFc9tRlFH5UyH1j76P4e8VsvXMrP7n0J7xV8haj/mMUUxZNYevuralusoi0omSuAHKBze7+gbsfAJ4Frk5YZxhBJ467vwsMNLPe4bJ0oKuZpQPdgI/C+VcDT4SvnwAmHvZRSLNs+mwTd758J/1+1o+8l/Po1aUXT/3DU3z4vQ+ZfdFs+hzXh64du/JvY/6N92e8z/fP/z6/X/97Bs8bzF2v3MXOvTtTfQgi0gqSCYB+wLa46ZJwXrzVwCQAM8sFBgDZ7r4d+ClQDOwAytz91XCb3u6+AyD8fdLhHoQ0rcZreGXzK1zx9BWcMe8Mfln4S64afBVv3fwWy6cu59oR19I5vfNB22V2zWTu+LlsytvE9SOu55GCRzj1kVP58es/5ssDX6bgSESktSQTANbAPE+YngtkmlkRkAesAqrCcf2rgUFAX6C7mV3XnAaa2TQzKzSzwtLS0uZsKkD5/nLyl+czZN4QJjw9gVUfr2L22NkU31XMU5Oe4pzsc5LaT3bPbH591a9Ze9taLhl0Cf/7r/+b0/JP4z8K/4PK6sojfBQiciQkEwAlQP+46WzqhnEAcPdyd7/R3UcS3APIArYA44Et7l7q7pXA88CYcLNPzKwPQPj704be3N0XuHvM3WNZWVnNOLRoe++z95jx8gyyf5bNjFdmcEK3E3h60tN8+L0P+eFFP+TkHicf1n6HZg1l0b8sYtlNyzg181Ru/eOtDP/lcJ5b/xzuiecFItKWJRMAK4DTzWyQmXUiuIn7YvwKZpYRLgOYCrzu7uUEQz/nmlk3MzNgHLAhXO9F4Ibw9Q3ACy07FKnxGv606U9MeHoCg+cN5rHCx5g4ZCIFUwv4+81/5ztnfodOaZ2a3lESxvQfwxs3vsGL17xIeod0/vH3/8i5j5+r0lGRdsSSOWszsyuAXxBU8Sx09x+b2a0A7v6YmZ0HPAlUA+uBm919V7jt/cC/AFUEQ0NT3X2/mZ0A/BeQQxAU/+TunzfWjlgs5oWFhYd3pMewsn1l/Lbot8xbMY/Nn2+mT48+3Ba7jWlfn0bvHr2b3kELVddU8+TqJ7lv6X2UlJcw4bQJzB0/lxG9Rxzx9xaRppnZSnePHTS/PV22KwDqe3fnu8wrmMcTq59gz4E9nJd9HjPOmcGkoZNa7Uy/OSoqK5i/Yj4PvPEAu/ft5roR1/Gji3/EwIyBR70tIlJHAXCMqPEaXt70Mo8UPMKr779Kp7ROXDP8GvJy84j1Pei/b0rsqtjFg8se5OHlD1PjNdwx+g5mXTCLE7udmOqmiUSSAqCdK9tXxm+KfsO8gnm8v+t9+h7X96thnpO6t80K2pLyEmYvnc1vin5Dj049uOf8e7jznDvp3ql7qpsmEikKgHZqQ+mGr4Z5vqz8kvP7n09ebh6Thk6iY1rHVDcvKetL1zNrySxe2PgCfXr0YfZFs7lp1E2kd0hPddNEIkEB0I5U11Tzp01/Ir8gn9c+eI3OaZ2ZfOZk8nLzOLvP2alu3mFbVryMexbfw7JtyzjjhDN44JIHmDR0EkGBmIgcKQqAdmD3vt0sXLWQ+Svm88GuD+h3XD9uH307t5x9C1ndj41nINydl957iXuX3Mv60vXk9svlofEPMXbg2FQ3TeSYpQBow9aXrid/eT5PrnmSvZV7+UbON5iRO4OJQya2m2Ge5kosHb3i9CuYM26OSkdFjgAFQBtTXVPNHzf9kUeWP8KSLUvonNaZa8+8lum50xnVZ1Sqm3fUVFRWMK9gHg+8+QBl+8q4/qzr+dFFP2JAxoBUN03kmKEAaCN2Vez6aphny+4tZPfM5o7RdzD17KmRLpPcVbGLuW/O5ZGCR1Q6KtLKFAAptu7TdeQX5PO7Nb9jb+VeLhxwIXm5eUwcMlHVMHFUOirS+hQAKVBdU81L771EfkE+f9nyF7qkd+HaM68lLzePs04+K9XNa9NUOirSehQAR9HnFZ/z+NuP82jho2zdvZWcXjncHrudqWdP5YRuJ6S6ee1KfOno4BMG88C4B/iHIf+g0lGRZlAAHAXvfPIO+QX5PLXmKSqqKrho4EXk5eZx1eCrdObaAomlo+f0O4cHxz+o0lGRJCkAjpCqmipe3Pgi+QX5LN26lK7pXYNhnnPyVNLYyqpqqoLS0b/ex/Yvtqt0VCRJCoBW9tnez3h81ePMXzGf4rJicnrlMH30dG4++2aO73p8qpt3TKuorCC/IJ85b85R6ahIEhQArWT1x6vJL8jn6XeeZl/VPi4eeDF5uXl8e/C3NcxzlNWWjj68/GEcZ/ro6cy6YJbus4gkUAC0QFVNFS+8+wL5Bfn87cO/0TW9K9ePuJ7pudM5s/eZR709Ut+2sm3MXjqb367+LT069WDm+TO589w76daxW6qbJtImKAAOw869O/n127/m0RWPsq18GwMzBnLH6Du4adRNGuZpg9Z9uo5Zf5nFixtfpE+PPtx/0f3cOOpGXZlJ5CkAmqHo4yLyl+fzn2v/k31V+xg3aBx5uXlcecaVpHVIO+LvLy3zZvGb3LP4Hv5n2/+odFQEBUCTqmqqWLRhEfkF+bxR/AbdOnb7aphn+EnDj8h7ypFTWzo6c/FMNuzcwDn9zuGhSx/iwgEXprppIkedAuAQdu7dya9W/opHCx+lpLyEQRmDmJ47nRtH3khm18xWfS85+hJLR791+reYM26O7t1IpBwqADokufHlZrbRzDab2cwGlmea2SIzW2NmBWY2PJw/2MyK4n7Kzex74bKRZvZWOL/QzHJbepDNsWrHKm564Sayf5bNrL/MYsiJQ3jhmhfYlLeJu8+7W53/MSK9Qzo3jbqJTXmbeHD8gyzbtoyzHjuL7/73dykuK05180RSqskrADNLA94DLgVKgBXAZHdfH7fOT4A97n6/mQ0B5rv7uAb2sx04x90/NLNXgZ+7+8tmdgXwfXe/qLG2tPQKoLK6kkXvLuKR5Y+wbNsyunXsxg1n3cD03OkMyxp22PuV9uPzis+Dbx1d/ggA03Onc+837lXpqKTcvqp97N63m10Vu9i9b3e9n137djF5+GQGZQ46rH0f6gogmfKIXGCzu38Q7uhZ4Gpgfdw6w4A5AO7+rpkNNLPe7v5J3DrjgPfd/cNw2oGe4etewEfNOaDmKP2ylAUrF/DLwl+y/YvtnJJ5Cj/75s+4cdSNZHTJOFJvK23Q8V2P56FLHyIvN4/ZS2fz87d+zq/e/pVKR6XFKqsrD+q0D+rIK3axe3/CdPh6f/X+Rvc/oveIww6AQ0nmCuAfgcvdfWo4fT3BWfz0uHUeALq4+93hUM7/hOusjFtnIfC2u88Lp4cCfwaMYChqTFw4xL//NGAaQE5Oztc//PCgVZo0ZdEUfrfmd3zz1G+Sl5vHhNMmqJpHgPqlo32P68vssbNVOhpR1TXVlO0va7jTjp+3/+B5u/btYm/l3kb3n94hncwumWR0ySCjSwaZXcPXnTMOnlc7Ha7fq0svuqR3OexjO+ybwGb2T8BlCQGQ6+55cev0BB4GRgHvAEOAqe6+OlzeieAM/2u1VwVm9gjwN3d/zsz+GZjm7uMba8vhDgFt/nwzldWVDM0a2uxtJRoSS0fnjJvDxCETVTrajrg7Xxz4otFhlMbmle8vb3T/HaxDvc45sZNubF5m10y6pndN2f9PLQmA84DZ7n5ZOH0vgLvPOcT6BmwBRrh7eTjvauAOd/9m3HplQIa7e7hNmbv3bGiftdrCV0HIscvdeXHji9y75F427NzAudnn8uD4B1U6epS4O3sr9zbZcScOo9TOK9tfRo3XNPoePTv3TLrTTjwj79GpBx0sqbqZNqcl9wBWAKeb2SCCm7jXAN9J2HkGsNfdDwBTgddrO//QZOCZhP1+BIwFlgKXAJuSOxSRI8PMuHrI1XzrjG/xRNET/HDpDxn727FcecaVzBk3R8+DJGF/1f7Gx78bGUbZvW83lTWVje6/e8fu9TroPj36MCxrWKPDKPz/rb8AAASKSURBVLWde8/OPTX0myCp5wDCKp1fAGnAQnf/sZndCuDuj4VXCU8C1QQ3h292913htt2AbcAp7l4Wt89vEAwbpQP7gNvj7xk0RFcAcjTtrdxL/vJ85i6bS9m+MqacNYUfXfwjcnrlpLppR0xldeVB4+ANjXcf6ix9X9W+RvffKa3TV2fa9Trqzoce/6796dWlF53SOh2lT+LYogfBRA5TeyodrfEayveXN7vjrv3Zc2BPo/tPs7R6HXWy49+1r1tyI1MOnwJApIW2lW3jh0t/yBOrn+C4Tscx8xszmXHOjFYtHXV39hzY0+yOu7azL99fjnPof9OG0atLr4M76SbGv2t/unfsrhvj7ZACQKSVrP10LbOWzOKl916i73F9uf+i+/nuyO+S3iEdd//qgZ7GOurGxsGrvbrR9z+u03ENd9KdGy8lzOiSwXGdj2u3NzLl8CkARFrZGx++wT2L7+HvJX/npO4nAbB7324OVB9odLuu6V2bHO8+1FBKz8499YyCNFtLqoBEpAEXDLiAZTct44WNL/Dchue+qlBpbAy8V+dedE7vnOqmiwAKAJEWMTMmDpnIxCETU90UkWbTYKCISEQpAEREIkoBICISUQoAEZGIUgCIiESUAkBEJKIUACIiEaUAEBGJqHb1VRBmVgo0/29CBk4EdrZic451+ryaR59X8+jzar6WfGYD3D0rcWa7CoCWMLPChr4LQxqmz6t59Hk1jz6v5jsSn5mGgEREIkoBICISUVEKgAWpbkA7o8+refR5NY8+r+Zr9c8sMvcARESkvihdAYiISBwFgIhIRB3zAWBmC83sUzNbm+q2tAdm1t/M/mpmG8xsnZndmeo2tWVm1sXMCsxsdfh53Z/qNrUHZpZmZqvM7P+lui1tnZltNbN3zKzIzFr1b+Ie8/cAzOxCYA/wpLsPT3V72joz6wP0cfe3zew4YCUw0d3Xp7hpbZKZGdDd3feYWUfgTeBOd38rxU1r08zsbiAG9HT3K1PdnrbMzLYCMXdv9QfnjvkrAHd/Hfg81e1oL9x9h7u/Hb7+AtgA9Ettq9ouD+wJJzuGP8f2WVULmVk28C3g16luS9Qd8wEgh8/MBgKjgOWpbUnbFg5nFAGfAq+5uz6vxv0C+D5Qk+qGtBMOvGpmK81sWmvuWAEgDTKzHsBzwPfcvTzV7WnL3L3a3UcC2UCumWmo8RDM7ErgU3dfmeq2tCPnu/vZwATgjnBYu1UoAOQg4Vj2c8DT7v58qtvTXrj7bmApcHmKm9KWnQ9cFY5rPwtcYmZPpbZJbZu7fxT+/hRYBOS21r4VAFJPeFPzcWCDu/8s1e1p68wsy8wywtddgfHAu6ltVdvl7ve6e7a7DwSuAf7i7teluFltlpl1D4sxMLPuwDeBVqtoPOYDwMyeAf4ODDazEjO7OdVtauPOB64nODMrCn+uSHWj2rA+wF/NbA2wguAegEobpbX0Bt40s9VAAfBHd3+ltXZ+zJeBiohIw475KwAREWmYAkBEJKIUACIiEaUAEBGJKAWAiEhEKQBERCJKASAiElH/H05b6vilRl9HAAAAAElFTkSuQmCC\n", 123 | "text/plain": [ 124 | "
" 125 | ] 126 | }, 127 | "metadata": { 128 | "needs_background": "light" 129 | }, 130 | "output_type": "display_data" 131 | } 132 | ], 133 | "source": [ 134 | "# 對訓練過程的準確度繪圖\n", 135 | "import matplotlib.pyplot as plt\n", 136 | "plt.plot([1,2,3,4,5], history.history['accuracy'], 'r')\n", 137 | "plt.plot([1,2,3,4,5], history.history['val_accuracy'], 'g')\n", 138 | "plt.xticks([1,2,3,4,5])" 139 | ] 140 | }, 141 | { 142 | "cell_type": "code", 143 | "execution_count": 7, 144 | "metadata": {}, 145 | "outputs": [ 146 | { 147 | "data": { 148 | "text/plain": [ 149 | "[0.08174725284399464, 0.9779]" 150 | ] 151 | }, 152 | "execution_count": 7, 153 | "metadata": {}, 154 | "output_type": "execute_result" 155 | } 156 | ], 157 | "source": [ 158 | "# 評估,打分數\n", 159 | "score=model.evaluate(x_test_norm, y_test, verbose=0)\n", 160 | "score" 161 | ] 162 | }, 163 | { 164 | "cell_type": "markdown", 165 | "metadata": {}, 166 | "source": [ 167 | "## TensorBoard callback" 168 | ] 169 | }, 170 | { 171 | "cell_type": "code", 172 | "execution_count": 17, 173 | "metadata": {}, 174 | "outputs": [ 175 | { 176 | "name": "stdout", 177 | "output_type": "stream", 178 | "text": [ 179 | "Train on 48000 samples, validate on 12000 samples\n", 180 | "Epoch 1/10\n", 181 | "48000/48000 [==============================] - 4s 86us/sample - loss: 0.0260 - accuracy: 0.9908 - val_loss: 0.0920 - val_accuracy: 0.9780\n", 182 | "Epoch 2/10\n", 183 | "48000/48000 [==============================] - 4s 84us/sample - loss: 0.0244 - accuracy: 0.9917 - val_loss: 0.0928 - val_accuracy: 0.9783\n", 184 | "Epoch 3/10\n", 185 | "48000/48000 [==============================] - 4s 77us/sample - loss: 0.0245 - accuracy: 0.9918 - val_loss: 0.0895 - val_accuracy: 0.9792\n", 186 | "Epoch 4/10\n", 187 | "48000/48000 [==============================] - 4s 81us/sample - loss: 0.0224 - accuracy: 0.9926 - val_loss: 0.0940 - val_accuracy: 0.9792\n", 188 | "Epoch 5/10\n", 189 | "48000/48000 [==============================] - 4s 87us/sample - loss: 0.0224 - accuracy: 0.9922 - val_loss: 0.0924 - val_accuracy: 0.9803\n", 190 | "Epoch 6/10\n", 191 | "48000/48000 [==============================] - 4s 74us/sample - loss: 0.0217 - accuracy: 0.9923 - val_loss: 0.0924 - val_accuracy: 0.9793\n", 192 | "Epoch 7/10\n", 193 | "48000/48000 [==============================] - 4s 74us/sample - loss: 0.0213 - accuracy: 0.9927 - val_loss: 0.0888 - val_accuracy: 0.9800\n", 194 | "Epoch 8/10\n", 195 | "48000/48000 [==============================] - 4s 90us/sample - loss: 0.0197 - accuracy: 0.9931 - val_loss: 0.0999 - val_accuracy: 0.9778\n", 196 | "Epoch 9/10\n", 197 | "48000/48000 [==============================] - 3s 73us/sample - loss: 0.0200 - accuracy: 0.9935 - val_loss: 0.1012 - val_accuracy: 0.9790\n", 198 | "Epoch 10/10\n", 199 | "48000/48000 [==============================] - 4s 73us/sample - loss: 0.0204 - accuracy: 0.9932 - val_loss: 0.0997 - val_accuracy: 0.9793\n" 200 | ] 201 | } 202 | ], 203 | "source": [ 204 | "# 定義 tensorboard callback\n", 205 | "tensorboard_callback = [tf.keras.callbacks.TensorBoard(log_dir='.\\\\logs')]\n", 206 | "\n", 207 | "# 訓練 10 次\n", 208 | "history = model.fit(x_train_norm, y_train, epochs=10, validation_split=0.2, callbacks=tensorboard_callback)" 209 | ] 210 | }, 211 | { 212 | "cell_type": "code", 213 | "execution_count": 4, 214 | "metadata": {}, 215 | "outputs": [ 216 | { 217 | "name": "stdout", 218 | "output_type": "stream", 219 | "text": [ 220 | "Train on 48000 samples, validate on 12000 samples\n", 221 | "Epoch 1/10\n", 222 | "48000/48000 [==============================] - 5s 95us/sample - loss: 0.3235 - accuracy: 0.9047 - val_loss: 0.1569 - val_accuracy: 0.9548\n", 223 | "Epoch 2/10\n", 224 | "48000/48000 [==============================] - 5s 99us/sample - loss: 0.1549 - accuracy: 0.9536 - val_loss: 0.1147 - val_accuracy: 0.9659\n", 225 | "Epoch 3/10\n", 226 | "48000/48000 [==============================] - 6s 117us/sample - loss: 0.1181 - accuracy: 0.9641 - val_loss: 0.0988 - val_accuracy: 0.9707\n", 227 | "Epoch 4/10\n", 228 | "48000/48000 [==============================] - 4s 84us/sample - loss: 0.0942 - accuracy: 0.9714 - val_loss: 0.0920 - val_accuracy: 0.9717\n", 229 | "Epoch 5/10\n", 230 | "48000/48000 [==============================] - 4s 85us/sample - loss: 0.0815 - accuracy: 0.9742 - val_loss: 0.0848 - val_accuracy: 0.9737\n", 231 | "Epoch 6/10\n", 232 | "48000/48000 [==============================] - 4s 86us/sample - loss: 0.0695 - accuracy: 0.9778 - val_loss: 0.0807 - val_accuracy: 0.9770\n", 233 | "Epoch 7/10\n", 234 | "48000/48000 [==============================] - 4s 79us/sample - loss: 0.0609 - accuracy: 0.9809 - val_loss: 0.0789 - val_accuracy: 0.9767\n", 235 | "Epoch 8/10\n", 236 | "48000/48000 [==============================] - 4s 86us/sample - loss: 0.0545 - accuracy: 0.9826 - val_loss: 0.0768 - val_accuracy: 0.9775\n", 237 | "Epoch 9/10\n", 238 | "48000/48000 [==============================] - 5s 94us/sample - loss: 0.0492 - accuracy: 0.9836 - val_loss: 0.0810 - val_accuracy: 0.9778\n", 239 | "Epoch 10/10\n", 240 | "48000/48000 [==============================] - 5s 95us/sample - loss: 0.0463 - accuracy: 0.9844 - val_loss: 0.0781 - val_accuracy: 0.9776\n" 241 | ] 242 | }, 243 | { 244 | "data": { 245 | "text/plain": [ 246 | "" 247 | ] 248 | }, 249 | "execution_count": 4, 250 | "metadata": {}, 251 | "output_type": "execute_result" 252 | } 253 | ], 254 | "source": [ 255 | "# 定義 ModelCheckpoint callback\n", 256 | "checkpoint_filepath = '.\\\\tmp\\\\checkpoint'\n", 257 | "model_checkpoint_callback = [tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_filepath, save_weights_only=True)]\n", 258 | "\n", 259 | "# 訓練 10 次\n", 260 | "model.fit(x_train_norm, y_train, epochs=10, validation_split=0.2, callbacks=model_checkpoint_callback)" 261 | ] 262 | }, 263 | { 264 | "cell_type": "code", 265 | "execution_count": 5, 266 | "metadata": {}, 267 | "outputs": [ 268 | { 269 | "name": "stdout", 270 | "output_type": "stream", 271 | "text": [ 272 | "Train on 48000 samples, validate on 12000 samples\n", 273 | "Epoch 1/5\n", 274 | "48000/48000 [==============================] - 4s 82us/sample - loss: 0.0401 - accuracy: 0.9870 - val_loss: 0.0770 - val_accuracy: 0.9783\n", 275 | "Epoch 2/5\n", 276 | "48000/48000 [==============================] - 4s 87us/sample - loss: 0.0406 - accuracy: 0.9867 - val_loss: 0.0824 - val_accuracy: 0.9768\n", 277 | "Epoch 3/5\n", 278 | "48000/48000 [==============================] - 4s 81us/sample - loss: 0.0370 - accuracy: 0.9877 - val_loss: 0.0807 - val_accuracy: 0.9777\n", 279 | "Epoch 4/5\n", 280 | "48000/48000 [==============================] - 4s 79us/sample - loss: 0.0335 - accuracy: 0.9885 - val_loss: 0.0769 - val_accuracy: 0.9796\n", 281 | "Epoch 5/5\n", 282 | "48000/48000 [==============================] - 5s 96us/sample - loss: 0.0328 - accuracy: 0.9888 - val_loss: 0.0831 - val_accuracy: 0.9778\n" 283 | ] 284 | }, 285 | { 286 | "data": { 287 | "text/plain": [ 288 | "" 289 | ] 290 | }, 291 | "execution_count": 5, 292 | "metadata": {}, 293 | "output_type": "execute_result" 294 | } 295 | ], 296 | "source": [ 297 | "# 載入最近的檢查點的權重\n", 298 | "model.load_weights(checkpoint_filepath)\n", 299 | "# 訓練 5 次\n", 300 | "model.fit(x_train_norm, y_train, epochs=5, validation_split=0.2, callbacks=model_checkpoint_callback)" 301 | ] 302 | }, 303 | { 304 | "cell_type": "code", 305 | "execution_count": null, 306 | "metadata": {}, 307 | "outputs": [], 308 | "source": [] 309 | } 310 | ], 311 | "metadata": { 312 | "kernelspec": { 313 | "display_name": "Python 3", 314 | "language": "python", 315 | "name": "python3" 316 | }, 317 | "language_info": { 318 | "codemirror_mode": { 319 | "name": "ipython", 320 | "version": 3 321 | }, 322 | "file_extension": ".py", 323 | "mimetype": "text/x-python", 324 | "name": "python", 325 | "nbconvert_exporter": "python", 326 | "pygments_lexer": "ipython3", 327 | "version": "3.6.8" 328 | } 329 | }, 330 | "nbformat": 4, 331 | "nbformat_minor": 4 332 | } 333 | -------------------------------------------------------------------------------- /08_01_TensorBoard.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# TensorBoard 測試" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 7, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "# 刪除 log 目錄\n", 17 | "!rd .\\logs /S /Q " 18 | ] 19 | }, 20 | { 21 | "cell_type": "code", 22 | "execution_count": 8, 23 | "metadata": {}, 24 | "outputs": [], 25 | "source": [ 26 | "import tensorflow as tf\n", 27 | "mnist = tf.keras.datasets.mnist\n", 28 | "\n", 29 | "\n", 30 | "# 匯入 MNIST 手寫阿拉伯數字 訓練資料\n", 31 | "(x_train, y_train),(x_test, y_test) = mnist.load_data()\n", 32 | "\n", 33 | "\n", 34 | "# 訓練/測試資料的 X/y 維度\n", 35 | "x_train.shape, y_train.shape,x_test.shape, y_test.shape\n", 36 | "\n", 37 | "# 特徵縮放,使用常態化(Normalization),公式 = (x - min) / (max - min)\n", 38 | "# 顏色範圍:0~255,所以,公式簡化為 x / 255\n", 39 | "# 注意,顏色0為白色,與RGB顏色不同,(0,0,0) 為黑色。\n", 40 | "x_train_norm, x_test_norm = x_train / 255.0, x_test / 255.0" 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": 9, 46 | "metadata": {}, 47 | "outputs": [], 48 | "source": [ 49 | "# 建立模型\n", 50 | "model = tf.keras.models.Sequential([\n", 51 | " tf.keras.layers.Flatten(input_shape=(28, 28)),\n", 52 | " tf.keras.layers.Dense(128, activation='relu'),\n", 53 | " tf.keras.layers.Dropout(0.2),\n", 54 | " tf.keras.layers.Dense(10, activation='softmax')\n", 55 | "])\n" 56 | ] 57 | }, 58 | { 59 | "cell_type": "code", 60 | "execution_count": 10, 61 | "metadata": {}, 62 | "outputs": [], 63 | "source": [ 64 | "# 設定優化器(optimizer)、損失函數(loss)、效能衡量指標(metrics)的類別\n", 65 | "loss_object = tf.keras.losses.SparseCategoricalCrossentropy()\n", 66 | "optimizer = tf.keras.optimizers.Adam()\n", 67 | "\n", 68 | "# Define 訓練及測試的效能衡量指標(Metrics)\n", 69 | "train_loss = tf.keras.metrics.Mean('train_loss', dtype=tf.float32)\n", 70 | "train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy('train_accuracy')\n", 71 | "test_loss = tf.keras.metrics.Mean('test_loss', dtype=tf.float32)\n", 72 | "test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy('test_accuracy')" 73 | ] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "execution_count": 11, 78 | "metadata": {}, 79 | "outputs": [], 80 | "source": [ 81 | "def train_step(model, optimizer, x_train, y_train):\n", 82 | " # 自動微分\n", 83 | " with tf.GradientTape() as tape:\n", 84 | " predictions = model(x_train, training=True)\n", 85 | " loss = loss_object(y_train, predictions)\n", 86 | " grads = tape.gradient(loss, model.trainable_variables)\n", 87 | " optimizer.apply_gradients(zip(grads, model.trainable_variables))\n", 88 | "\n", 89 | " # 計算訓練的效能衡量指標\n", 90 | " train_loss(loss)\n", 91 | " train_accuracy(y_train, predictions)\n", 92 | "\n", 93 | "def test_step(model, x_test, y_test):\n", 94 | " # 預測\n", 95 | " predictions = model(x_test)\n", 96 | " # 計算損失\n", 97 | " loss = loss_object(y_test, predictions)\n", 98 | "\n", 99 | " # 計算測試的效能衡量指標\n", 100 | " test_loss(loss)\n", 101 | " test_accuracy(y_test, predictions)" 102 | ] 103 | }, 104 | { 105 | "cell_type": "code", 106 | "execution_count": 12, 107 | "metadata": {}, 108 | "outputs": [], 109 | "source": [ 110 | "import datetime\n", 111 | "\n", 112 | "current_time = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n", 113 | "# 指定訓練的 log 檔名\n", 114 | "train_log_dir = '.\\\\logs\\\\gradient_tape\\\\' + current_time + '\\\\train'\n", 115 | "# 指定測試的 log 檔名\n", 116 | "test_log_dir = '.\\\\logs\\\\gradient_tape\\\\' + current_time + '\\\\test'\n", 117 | "\n", 118 | "# 開啟檔案\n", 119 | "train_summary_writer = tf.summary.create_file_writer(train_log_dir)\n", 120 | "test_summary_writer = tf.summary.create_file_writer(test_log_dir)" 121 | ] 122 | }, 123 | { 124 | "cell_type": "code", 125 | "execution_count": 14, 126 | "metadata": {}, 127 | "outputs": [], 128 | "source": [ 129 | "# 將訓練/測試資料轉成 Tensorflow Dataset\n", 130 | "train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))\n", 131 | "test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))\n", 132 | "\n", 133 | "# 每次從 60000 筆訓練資料隨機抽出 64 筆\n", 134 | "train_dataset = train_dataset.shuffle(60000).batch(64)\n", 135 | "# 每次從 10000 筆測試資料隨機抽出 64 筆\n", 136 | "test_dataset = test_dataset.batch(64)" 137 | ] 138 | }, 139 | { 140 | "cell_type": "code", 141 | "execution_count": 15, 142 | "metadata": {}, 143 | "outputs": [ 144 | { 145 | "name": "stdout", 146 | "output_type": "stream", 147 | "text": [ 148 | "Epoch 1, Loss: 10.923014640808105, Accuracy: 32.018333435058594, Test Loss: 10.517812728881836, Test Accuracy: 34.68000030517578\n", 149 | "Epoch 2, Loss: 9.937530517578125, Accuracy: 38.22833251953125, Test Loss: 9.53105640411377, Test Accuracy: 40.77000045776367\n", 150 | "Epoch 3, Loss: 9.210996627807617, Accuracy: 42.731666564941406, Test Loss: 8.645798683166504, Test Accuracy: 46.18000030517578\n", 151 | "Epoch 4, Loss: 8.332685470581055, Accuracy: 48.17500305175781, Test Loss: 7.5365800857543945, Test Accuracy: 53.1099967956543\n", 152 | "Epoch 5, Loss: 7.85085391998291, Accuracy: 51.19333267211914, Test Loss: 7.9227752685546875, Test Accuracy: 50.790000915527344\n" 153 | ] 154 | } 155 | ], 156 | "source": [ 157 | "EPOCHS = 5\n", 158 | "\n", 159 | "for epoch in range(EPOCHS):\n", 160 | " for (x_train, y_train) in train_dataset:\n", 161 | " train_step(model, optimizer, x_train, y_train)\n", 162 | " with train_summary_writer.as_default():\n", 163 | " tf.summary.scalar('loss', train_loss.result(), step=epoch)\n", 164 | " tf.summary.scalar('accuracy', train_accuracy.result(), step=epoch)\n", 165 | "\n", 166 | " for (x_test, y_test) in test_dataset:\n", 167 | " test_step(model, x_test, y_test)\n", 168 | " with test_summary_writer.as_default():\n", 169 | " tf.summary.scalar('loss', test_loss.result(), step=epoch)\n", 170 | " tf.summary.scalar('accuracy', test_accuracy.result(), step=epoch)\n", 171 | " \n", 172 | " template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'\n", 173 | " print (template.format(epoch+1,\n", 174 | " train_loss.result(), \n", 175 | " train_accuracy.result()*100,\n", 176 | " test_loss.result(), \n", 177 | " test_accuracy.result()*100))\n", 178 | "\n", 179 | " # Reset metrics every epoch\n", 180 | " train_loss.reset_states()\n", 181 | " test_loss.reset_states()\n", 182 | " train_accuracy.reset_states()\n", 183 | " test_accuracy.reset_states()\n" 184 | ] 185 | }, 186 | { 187 | "cell_type": "code", 188 | "execution_count": 16, 189 | "metadata": {}, 190 | "outputs": [], 191 | "source": [ 192 | "# 載入 TensorBoard notebook extension,即可在 jupyter notebook 啟動 Tensorboard\n", 193 | "%load_ext tensorboard" 194 | ] 195 | }, 196 | { 197 | "cell_type": "code", 198 | "execution_count": 17, 199 | "metadata": {}, 200 | "outputs": [ 201 | { 202 | "data": { 203 | "text/plain": [ 204 | "ERROR: Timed out waiting for TensorBoard to start. It may still be running as pid 15784." 205 | ] 206 | }, 207 | "metadata": {}, 208 | "output_type": "display_data" 209 | } 210 | ], 211 | "source": [ 212 | "# 啟動 Tensorboard\n", 213 | "%tensorboard --logdir logs/gradient_tape" 214 | ] 215 | }, 216 | { 217 | "cell_type": "markdown", 218 | "metadata": {}, 219 | "source": [ 220 | "## 使用瀏覽器輸入以下網址,即可觀看訓練資訊:\n", 221 | "## http://localhost:6006/" 222 | ] 223 | }, 224 | { 225 | "cell_type": "code", 226 | "execution_count": 29, 227 | "metadata": {}, 228 | "outputs": [ 229 | { 230 | "name": "stdout", 231 | "output_type": "stream", 232 | "text": [ 233 | "成功: 處理程序 \"tensorboard.exe\" (PID 17852) 已經終止了。\n" 234 | ] 235 | } 236 | ], 237 | "source": [ 238 | "!taskkill /IM \"tensorboard.exe\" /F\n", 239 | "# or \n", 240 | "# !taskkill /F /PID 15784" 241 | ] 242 | }, 243 | { 244 | "cell_type": "markdown", 245 | "metadata": {}, 246 | "source": [ 247 | "## 寫入圖片" 248 | ] 249 | }, 250 | { 251 | "cell_type": "code", 252 | "execution_count": 26, 253 | "metadata": {}, 254 | "outputs": [ 255 | { 256 | "data": { 257 | "text/plain": [ 258 | "(1, 28, 28, 1)" 259 | ] 260 | }, 261 | "execution_count": 26, 262 | "metadata": {}, 263 | "output_type": "execute_result" 264 | } 265 | ], 266 | "source": [ 267 | "# 任意找一張圖片\n", 268 | "img = x_train[0].numpy().reshape((-1, 28, 28, 1))\n", 269 | "img.shape" 270 | ] 271 | }, 272 | { 273 | "cell_type": "code", 274 | "execution_count": 27, 275 | "metadata": {}, 276 | "outputs": [], 277 | "source": [ 278 | "# 指定 log 檔名\n", 279 | "logdir = \".\\\\logs\\\\train_data\\\\\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n", 280 | "# Creates a file writer for the log directory.\n", 281 | "file_writer = tf.summary.create_file_writer(logdir)\n", 282 | "\n", 283 | "# Using the file writer, log the reshaped image.\n", 284 | "with file_writer.as_default():\n", 285 | " # 將圖片寫入 log 檔\n", 286 | " tf.summary.image(\"Training data\", img, step=0)" 287 | ] 288 | }, 289 | { 290 | "cell_type": "code", 291 | "execution_count": 28, 292 | "metadata": {}, 293 | "outputs": [ 294 | { 295 | "data": { 296 | "text/plain": [ 297 | "ERROR: Timed out waiting for TensorBoard to start. It may still be running as pid 17852." 298 | ] 299 | }, 300 | "metadata": {}, 301 | "output_type": "display_data" 302 | } 303 | ], 304 | "source": [ 305 | "%tensorboard --logdir logs/train_data" 306 | ] 307 | }, 308 | { 309 | "cell_type": "code", 310 | "execution_count": null, 311 | "metadata": {}, 312 | "outputs": [], 313 | "source": [] 314 | } 315 | ], 316 | "metadata": { 317 | "kernelspec": { 318 | "display_name": "Python 3", 319 | "language": "python", 320 | "name": "python3" 321 | }, 322 | "language_info": { 323 | "codemirror_mode": { 324 | "name": "ipython", 325 | "version": 3 326 | }, 327 | "file_extension": ".py", 328 | "mimetype": "text/x-python", 329 | "name": "python", 330 | "nbconvert_exporter": "python", 331 | "pygments_lexer": "ipython3", 332 | "version": "3.6.8" 333 | } 334 | }, 335 | "nbformat": 4, 336 | "nbformat_minor": 4 337 | } 338 | -------------------------------------------------------------------------------- /08_02_TensorBoard_Tuning.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# TensorBoard 測試 2 -- 效能調校" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 2, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "import tensorflow as tf\n", 17 | "mnist = tf.keras.datasets.mnist\n", 18 | "\n", 19 | "\n", 20 | "# 匯入 MNIST 手寫阿拉伯數字 訓練資料\n", 21 | "(x_train, y_train),(x_test, y_test) = mnist.load_data()\n", 22 | "\n", 23 | "\n", 24 | "# 訓練/測試資料的 X/y 維度\n", 25 | "x_train.shape, y_train.shape,x_test.shape, y_test.shape\n", 26 | "\n", 27 | "# 特徵縮放,使用常態化(Normalization),公式 = (x - min) / (max - min)\n", 28 | "# 顏色範圍:0~255,所以,公式簡化為 x / 255\n", 29 | "# 注意,顏色0為白色,與RGB顏色不同,(0,0,0) 為黑色。\n", 30 | "x_train_norm, x_test_norm = x_train / 255.0, x_test / 255.0\n" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 4, 36 | "metadata": {}, 37 | "outputs": [], 38 | "source": [ 39 | "# 建立、訓練、評估模型\n", 40 | "def train_test_model(hparams):\n", 41 | " # 建立模型\n", 42 | " model = tf.keras.models.Sequential([\n", 43 | " tf.keras.layers.Flatten(input_shape=(28, 28)),\n", 44 | " tf.keras.layers.Dense(128, activation='relu'),\n", 45 | " tf.keras.layers.Dropout(0.2),\n", 46 | " tf.keras.layers.Dense(10, activation='softmax')\n", 47 | " ])\n", 48 | " # 設定優化器(optimizer)、損失函數(loss)、效能衡量指標(metrics)的類別\n", 49 | " model.compile(optimizer='adam',\n", 50 | " loss='sparse_categorical_crossentropy',\n", 51 | " metrics=['accuracy'])\n", 52 | " # 訓練\n", 53 | " model.fit(x_train_norm, y_train, epochs=5, validation_split=0.2)\n", 54 | " # 評估,打分數\n", 55 | " _, accuracy = model.evaluate(x_test_norm, y_test, verbose=0)\n", 56 | " return accuracy" 57 | ] 58 | }, 59 | { 60 | "cell_type": "code", 61 | "execution_count": 5, 62 | "metadata": {}, 63 | "outputs": [], 64 | "source": [ 65 | "# 定義訓練函數,包括依參數訓練並寫入 log\n", 66 | "def run(run_dir, hparams):\n", 67 | " with tf.summary.create_file_writer(run_dir).as_default():\n", 68 | " hp.hparams(hparams) # record the values used in this trial\n", 69 | " accuracy = train_test_model(hparams)\n", 70 | " tf.summary.scalar('accuracy', accuracy, step=1)" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": 9, 76 | "metadata": {}, 77 | "outputs": [], 78 | "source": [ 79 | "# 參數組合\n", 80 | "from tensorboard.plugins.hparams import api as hp\n", 81 | "\n", 82 | "HP_NUM_UNITS = hp.HParam('num_units', hp.Discrete([16, 32]))\n", 83 | "HP_DROPOUT = hp.HParam('dropout', hp.RealInterval(0.1, 0.2))\n", 84 | "HP_OPTIMIZER = hp.HParam('optimizer', hp.Discrete(['adam', 'sgd']))" 85 | ] 86 | }, 87 | { 88 | "cell_type": "code", 89 | "execution_count": 10, 90 | "metadata": {}, 91 | "outputs": [ 92 | { 93 | "name": "stdout", 94 | "output_type": "stream", 95 | "text": [ 96 | "--- Starting trial: run-0\n", 97 | "{'num_units': 16, 'dropout': 0.1, 'optimizer': 'adam'}\n", 98 | "Train on 48000 samples, validate on 12000 samples\n", 99 | "Epoch 1/5\n", 100 | "48000/48000 [==============================] - 7s 140us/sample - loss: 0.3312 - accuracy: 0.9025 - val_loss: 0.1531 - val_accuracy: 0.9563\n", 101 | "Epoch 2/5\n", 102 | "48000/48000 [==============================] - 4s 75us/sample - loss: 0.1603 - accuracy: 0.9522 - val_loss: 0.1254 - val_accuracy: 0.9642\n", 103 | "Epoch 3/5\n", 104 | "48000/48000 [==============================] - 4s 74us/sample - loss: 0.1193 - accuracy: 0.9644 - val_loss: 0.1074 - val_accuracy: 0.9690\n", 105 | "Epoch 4/5\n", 106 | "48000/48000 [==============================] - 4s 75us/sample - loss: 0.0958 - accuracy: 0.9709 - val_loss: 0.0912 - val_accuracy: 0.9732\n", 107 | "Epoch 5/5\n", 108 | "48000/48000 [==============================] - 4s 75us/sample - loss: 0.0820 - accuracy: 0.9751 - val_loss: 0.0901 - val_accuracy: 0.9730\n", 109 | "--- Starting trial: run-1\n", 110 | "{'num_units': 16, 'dropout': 0.1, 'optimizer': 'sgd'}\n", 111 | "Train on 48000 samples, validate on 12000 samples\n", 112 | "Epoch 1/5\n", 113 | "48000/48000 [==============================] - 4s 80us/sample - loss: 0.3332 - accuracy: 0.9029 - val_loss: 0.1622 - val_accuracy: 0.9541\n", 114 | "Epoch 2/5\n", 115 | "48000/48000 [==============================] - 4s 74us/sample - loss: 0.1615 - accuracy: 0.9528 - val_loss: 0.1294 - val_accuracy: 0.9600\n", 116 | "Epoch 3/5\n", 117 | "48000/48000 [==============================] - 4s 74us/sample - loss: 0.1195 - accuracy: 0.9641 - val_loss: 0.1001 - val_accuracy: 0.9711\n", 118 | "Epoch 4/5\n", 119 | "48000/48000 [==============================] - 4s 73us/sample - loss: 0.0967 - accuracy: 0.9712 - val_loss: 0.0958 - val_accuracy: 0.9711\n", 120 | "Epoch 5/5\n", 121 | "48000/48000 [==============================] - 4s 73us/sample - loss: 0.0813 - accuracy: 0.9746 - val_loss: 0.0805 - val_accuracy: 0.9749\n", 122 | "--- Starting trial: run-2\n", 123 | "{'num_units': 16, 'dropout': 0.2, 'optimizer': 'adam'}\n", 124 | "Train on 48000 samples, validate on 12000 samples\n", 125 | "Epoch 1/5\n", 126 | "48000/48000 [==============================] - 4s 82us/sample - loss: 0.3217 - accuracy: 0.9069 - val_loss: 0.1539 - val_accuracy: 0.9547\n", 127 | "Epoch 2/5\n", 128 | "48000/48000 [==============================] - 4s 75us/sample - loss: 0.1553 - accuracy: 0.9544 - val_loss: 0.1104 - val_accuracy: 0.9678\n", 129 | "Epoch 3/5\n", 130 | "48000/48000 [==============================] - 4s 74us/sample - loss: 0.1171 - accuracy: 0.9648 - val_loss: 0.0967 - val_accuracy: 0.9732\n", 131 | "Epoch 4/5\n", 132 | "48000/48000 [==============================] - 4s 75us/sample - loss: 0.0943 - accuracy: 0.9704 - val_loss: 0.0859 - val_accuracy: 0.9746\n", 133 | "Epoch 5/5\n", 134 | "48000/48000 [==============================] - 4s 76us/sample - loss: 0.0781 - accuracy: 0.9755 - val_loss: 0.0879 - val_accuracy: 0.9737\n", 135 | "--- Starting trial: run-3\n", 136 | "{'num_units': 16, 'dropout': 0.2, 'optimizer': 'sgd'}\n", 137 | "Train on 48000 samples, validate on 12000 samples\n", 138 | "Epoch 1/5\n", 139 | "48000/48000 [==============================] - 4s 81us/sample - loss: 0.3227 - accuracy: 0.9068 - val_loss: 0.1563 - val_accuracy: 0.9546\n", 140 | "Epoch 2/5\n", 141 | "48000/48000 [==============================] - 4s 77us/sample - loss: 0.1565 - accuracy: 0.9528 - val_loss: 0.1139 - val_accuracy: 0.9663\n", 142 | "Epoch 3/5\n", 143 | "48000/48000 [==============================] - 4s 76us/sample - loss: 0.1179 - accuracy: 0.9643 - val_loss: 0.1110 - val_accuracy: 0.9669\n", 144 | "Epoch 4/5\n", 145 | "48000/48000 [==============================] - 4s 74us/sample - loss: 0.0955 - accuracy: 0.9713 - val_loss: 0.0883 - val_accuracy: 0.9741\n", 146 | "Epoch 5/5\n", 147 | "48000/48000 [==============================] - 4s 77us/sample - loss: 0.0807 - accuracy: 0.9747 - val_loss: 0.0861 - val_accuracy: 0.9738\n", 148 | "--- Starting trial: run-4\n", 149 | "{'num_units': 32, 'dropout': 0.1, 'optimizer': 'adam'}\n", 150 | "Train on 48000 samples, validate on 12000 samples\n", 151 | "Epoch 1/5\n", 152 | "48000/48000 [==============================] - 4s 80us/sample - loss: 0.3229 - accuracy: 0.9066 - val_loss: 0.1551 - val_accuracy: 0.9565\n", 153 | "Epoch 2/5\n", 154 | "48000/48000 [==============================] - 3s 73us/sample - loss: 0.1566 - accuracy: 0.9537 - val_loss: 0.1193 - val_accuracy: 0.9641\n", 155 | "Epoch 3/5\n", 156 | "48000/48000 [==============================] - 4s 74us/sample - loss: 0.1134 - accuracy: 0.9657 - val_loss: 0.0960 - val_accuracy: 0.9720\n", 157 | "Epoch 4/5\n", 158 | "48000/48000 [==============================] - 4s 73us/sample - loss: 0.0910 - accuracy: 0.9708 - val_loss: 0.0922 - val_accuracy: 0.9724\n", 159 | "Epoch 5/5\n", 160 | "48000/48000 [==============================] - 4s 74us/sample - loss: 0.0777 - accuracy: 0.9757 - val_loss: 0.0883 - val_accuracy: 0.9740\n", 161 | "--- Starting trial: run-5\n", 162 | "{'num_units': 32, 'dropout': 0.1, 'optimizer': 'sgd'}\n", 163 | "Train on 48000 samples, validate on 12000 samples\n", 164 | "Epoch 1/5\n", 165 | "48000/48000 [==============================] - 4s 78us/sample - loss: 0.3277 - accuracy: 0.9057 - val_loss: 0.1659 - val_accuracy: 0.9541\n", 166 | "Epoch 2/5\n", 167 | "48000/48000 [==============================] - 4s 73us/sample - loss: 0.1598 - accuracy: 0.9529 - val_loss: 0.1133 - val_accuracy: 0.9672\n", 168 | "Epoch 3/5\n", 169 | "48000/48000 [==============================] - 4s 73us/sample - loss: 0.1168 - accuracy: 0.9650 - val_loss: 0.1052 - val_accuracy: 0.9672\n", 170 | "Epoch 4/5\n", 171 | "48000/48000 [==============================] - 4s 73us/sample - loss: 0.0945 - accuracy: 0.9712 - val_loss: 0.0925 - val_accuracy: 0.9731\n", 172 | "Epoch 5/5\n", 173 | "48000/48000 [==============================] - 4s 74us/sample - loss: 0.0799 - accuracy: 0.9766 - val_loss: 0.0862 - val_accuracy: 0.9746\n", 174 | "--- Starting trial: run-6\n", 175 | "{'num_units': 32, 'dropout': 0.2, 'optimizer': 'adam'}\n", 176 | "Train on 48000 samples, validate on 12000 samples\n", 177 | "Epoch 1/5\n", 178 | "48000/48000 [==============================] - 4s 79us/sample - loss: 0.3272 - accuracy: 0.9051 - val_loss: 0.1561 - val_accuracy: 0.9557\n", 179 | "Epoch 2/5\n", 180 | "48000/48000 [==============================] - 4s 73us/sample - loss: 0.1601 - accuracy: 0.9523 - val_loss: 0.1214 - val_accuracy: 0.9638\n", 181 | "Epoch 3/5\n", 182 | "48000/48000 [==============================] - 4s 74us/sample - loss: 0.1203 - accuracy: 0.9642 - val_loss: 0.1020 - val_accuracy: 0.9703\n", 183 | "Epoch 4/5\n", 184 | "48000/48000 [==============================] - 4s 75us/sample - loss: 0.0961 - accuracy: 0.9708 - val_loss: 0.0934 - val_accuracy: 0.9722\n", 185 | "Epoch 5/5\n", 186 | "48000/48000 [==============================] - 4s 81us/sample - loss: 0.0815 - accuracy: 0.9748 - val_loss: 0.0826 - val_accuracy: 0.9762\n", 187 | "--- Starting trial: run-7\n", 188 | "{'num_units': 32, 'dropout': 0.2, 'optimizer': 'sgd'}\n", 189 | "Train on 48000 samples, validate on 12000 samples\n", 190 | "Epoch 1/5\n", 191 | "48000/48000 [==============================] - 4s 79us/sample - loss: 0.3234 - accuracy: 0.9065 - val_loss: 0.1576 - val_accuracy: 0.9549\n", 192 | "Epoch 2/5\n", 193 | "48000/48000 [==============================] - 4s 76us/sample - loss: 0.1573 - accuracy: 0.9531 - val_loss: 0.1158 - val_accuracy: 0.9655\n", 194 | "Epoch 3/5\n", 195 | "48000/48000 [==============================] - 4s 76us/sample - loss: 0.1157 - accuracy: 0.9655 - val_loss: 0.1026 - val_accuracy: 0.9679\n", 196 | "Epoch 4/5\n", 197 | "48000/48000 [==============================] - 4s 76us/sample - loss: 0.0951 - accuracy: 0.9711 - val_loss: 0.0896 - val_accuracy: 0.9737\n", 198 | "Epoch 5/5\n", 199 | "48000/48000 [==============================] - 4s 76us/sample - loss: 0.0791 - accuracy: 0.9753 - val_loss: 0.0873 - val_accuracy: 0.9753\n" 200 | ] 201 | } 202 | ], 203 | "source": [ 204 | "# 依每一參數組合執行訓練\n", 205 | "session_num = 0\n", 206 | "\n", 207 | "for num_units in HP_NUM_UNITS.domain.values:\n", 208 | " for dropout_rate in (HP_DROPOUT.domain.min_value, HP_DROPOUT.domain.max_value):\n", 209 | " for optimizer in HP_OPTIMIZER.domain.values:\n", 210 | " hparams = {\n", 211 | " HP_NUM_UNITS: num_units,\n", 212 | " HP_DROPOUT: dropout_rate,\n", 213 | " HP_OPTIMIZER: optimizer,\n", 214 | " }\n", 215 | " run_name = \"run-%d\" % session_num\n", 216 | " print('--- Starting trial: %s' % run_name)\n", 217 | " print({h.name: hparams[h] for h in hparams})\n", 218 | " run('logs/hparam_tuning/' + run_name, hparams)\n", 219 | " session_num += 1" 220 | ] 221 | }, 222 | { 223 | "cell_type": "code", 224 | "execution_count": 11, 225 | "metadata": {}, 226 | "outputs": [], 227 | "source": [ 228 | "# 載入 TensorBoard notebook extension,即可在 jupyter notebook 啟動 Tensorboard\n", 229 | "%load_ext tensorboard" 230 | ] 231 | }, 232 | { 233 | "cell_type": "code", 234 | "execution_count": 12, 235 | "metadata": {}, 236 | "outputs": [ 237 | { 238 | "data": { 239 | "text/plain": [ 240 | "ERROR: Timed out waiting for TensorBoard to start. It may still be running as pid 5524." 241 | ] 242 | }, 243 | "metadata": {}, 244 | "output_type": "display_data" 245 | } 246 | ], 247 | "source": [ 248 | "# 啟動 Tensorboard\n", 249 | "%tensorboard --logdir logs/hparam_tuning" 250 | ] 251 | }, 252 | { 253 | "cell_type": "markdown", 254 | "metadata": {}, 255 | "source": [ 256 | "## 使用瀏覽器輸入以下網址,即可觀看訓練資訊:\n", 257 | "## http://localhost:6006/" 258 | ] 259 | }, 260 | { 261 | "cell_type": "code", 262 | "execution_count": 13, 263 | "metadata": {}, 264 | "outputs": [ 265 | { 266 | "name": "stdout", 267 | "output_type": "stream", 268 | "text": [ 269 | "成功: 處理程序 \"tensorboard.exe\" (PID 5524) 已經終止了。\n" 270 | ] 271 | } 272 | ], 273 | "source": [ 274 | "!taskkill /IM \"tensorboard.exe\" /F\n", 275 | "# or \n", 276 | "# !taskkill /F /PID 15784" 277 | ] 278 | }, 279 | { 280 | "cell_type": "code", 281 | "execution_count": null, 282 | "metadata": {}, 283 | "outputs": [], 284 | "source": [] 285 | } 286 | ], 287 | "metadata": { 288 | "kernelspec": { 289 | "display_name": "Python 3", 290 | "language": "python", 291 | "name": "python3" 292 | }, 293 | "language_info": { 294 | "codemirror_mode": { 295 | "name": "ipython", 296 | "version": 3 297 | }, 298 | "file_extension": ".py", 299 | "mimetype": "text/x-python", 300 | "name": "python", 301 | "nbconvert_exporter": "python", 302 | "pygments_lexer": "ipython3", 303 | "version": "3.6.8" 304 | } 305 | }, 306 | "nbformat": 4, 307 | "nbformat_minor": 4 308 | } 309 | -------------------------------------------------------------------------------- /0_video/night.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/0_video/night.mp4 -------------------------------------------------------------------------------- /0_video/pedestrians.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/0_video/pedestrians.mp4 -------------------------------------------------------------------------------- /0_video/出處.txt: -------------------------------------------------------------------------------- 1 | highway.mp4:Python Image Processing Cookbook 2 | -------------------------------------------------------------------------------- /13_01_Canvas.py: -------------------------------------------------------------------------------- 1 | from tkinter import * 2 | from tkinter import filedialog 3 | 4 | from PIL import ImageDraw, Image, ImageGrab 5 | import numpy as np 6 | from skimage import color 7 | from skimage import io 8 | import os 9 | import io 10 | 11 | class Paint(object): 12 | 13 | # 類別初始化函數 14 | def __init__(self): 15 | self.root = Tk() 16 | 17 | #defining Canvas 18 | self.c = Canvas(self.root, bg='white', width=280, height=280) 19 | 20 | self.image1 = Image.new('RGB', (280, 280), color = 'white') 21 | self.draw = ImageDraw.Draw(self.image1) 22 | 23 | self.c.grid(row=1, columnspan=6) 24 | 25 | # 建立【辨識】按鈕 26 | self.classify_button = Button(self.root, text='辨識', command=lambda:self.classify(self.c)) 27 | self.classify_button.grid(row=0, column=0, columnspan=2, sticky='EWNS') 28 | 29 | # 建立【清畫面】按鈕 30 | self.clear = Button(self.root, text='清畫面', command=self.clear) 31 | self.clear.grid(row=0, column=2, columnspan=2, sticky='EWNS') 32 | 33 | # 建立【存檔】按鈕 34 | self.savefile = Button(self.root, text='存檔', command=self.savefile) 35 | self.savefile.grid(row=0, column=4, columnspan=2, sticky='EWNS') 36 | 37 | # 建立【預測】文字框 38 | self.prediction_text = Text(self.root, height=2, width=10) 39 | self.prediction_text.grid(row=2, column=4, columnspan=2) 40 | 41 | # self.model = self.loadModel() 42 | 43 | # 定義滑鼠事件處理函數 44 | self.setup() 45 | 46 | # 監聽事件 47 | self.root.mainloop() 48 | 49 | # 滑鼠事件處理函數 50 | def setup(self): 51 | self.old_x = None 52 | self.old_y = None 53 | self.line_width = 15 54 | self.color = 'black' 55 | 56 | # 定義滑鼠事件處理函數,包括移動滑鼠及鬆開滑鼠按鈕 57 | self.c.bind('', self.paint) 58 | self.c.bind('', self.reset) 59 | 60 | # 移動滑鼠 處理函數 61 | def paint(self, event): 62 | paint_color = self.color 63 | if self.old_x and self.old_y: 64 | self.c.create_line(self.old_x, self.old_y, event.x, event.y, 65 | width=self.line_width, fill=paint_color, 66 | capstyle=ROUND, smooth=TRUE, splinesteps=36) 67 | # 顯示設定>100%,抓到的區域會變小 68 | # 畫圖同時寫到記憶體,避免螢幕字型放大,造成抓到的畫布區域不足 69 | self.draw.line((self.old_x, self.old_y, event.x, event.y), fill='black', width=5) 70 | 71 | self.old_x = event.x 72 | self.old_y = event.y 73 | 74 | # 鬆開滑鼠按鈕 處理函數 75 | def reset(self, event): 76 | self.old_x, self.old_y = None, None 77 | 78 | # 【清畫面】處理函數 79 | def clear(self): 80 | self.c.delete("all") 81 | self.image1 = Image.new('RGB', (280, 280), color = 'white') 82 | self.draw = ImageDraw.Draw(self.image1) 83 | self.prediction_text.delete("1.0", END) 84 | 85 | # 【存檔】處理函數 86 | def savefile(self): 87 | f = filedialog.asksaveasfilename( defaultextension=".png", filetypes = [("png file",".png")]) 88 | if f is None: # asksaveasfile return `None` if dialog closed with "cancel". 89 | return 90 | #print(f) 91 | self.image1.save(f) 92 | 93 | # 【辨識】處理函數 94 | def classify(self, widget): 95 | pass 96 | 97 | 98 | 99 | if __name__ == '__main__': 100 | Paint() 101 | -------------------------------------------------------------------------------- /13_02_CNN_model.py: -------------------------------------------------------------------------------- 1 | from tkinter import * 2 | from tkinter import filedialog 3 | 4 | from PIL import ImageDraw, Image, ImageGrab 5 | import numpy as np 6 | from skimage import color 7 | from skimage import io 8 | import os 9 | import io 10 | 11 | from cnn_class import getData, trainModel, loadModel 12 | 13 | 14 | class Paint(object): 15 | 16 | # 類別初始化函數 17 | def __init__(self): 18 | self.root = Tk() 19 | 20 | #defining Canvas 21 | self.c = Canvas(self.root, bg='white', width=280, height=280) 22 | 23 | self.image1 = Image.new('RGB', (280, 280), color = 'white') 24 | self.draw = ImageDraw.Draw(self.image1) 25 | 26 | self.c.grid(row=1, columnspan=6) 27 | 28 | # 建立【辨識】按鈕 29 | self.classify_button = Button(self.root, text='辨識', command=lambda:self.classify(self.c)) 30 | self.classify_button.grid(row=0, column=0, columnspan=2, sticky='EWNS') 31 | 32 | # 建立【清畫面】按鈕 33 | self.clear = Button(self.root, text='清畫面', command=self.clear) 34 | self.clear.grid(row=0, column=2, columnspan=2, sticky='EWNS') 35 | 36 | # 建立【存檔】按鈕 37 | self.savefile = Button(self.root, text='存檔', command=self.savefile) 38 | self.savefile.grid(row=0, column=4, columnspan=2, sticky='EWNS') 39 | 40 | # 建立【預測】文字框 41 | self.prediction_text = Text(self.root, height=2, width=10) 42 | self.prediction_text.grid(row=2, column=4, columnspan=2) 43 | 44 | # self.model = self.loadModel() 45 | 46 | # 定義滑鼠事件處理函數 47 | self.setup() 48 | 49 | # 監聽事件 50 | self.root.mainloop() 51 | 52 | # 滑鼠事件處理函數 53 | def setup(self): 54 | self.old_x = None 55 | self.old_y = None 56 | self.line_width = 15 57 | self.color = 'black' 58 | 59 | # 定義滑鼠事件處理函數,包括移動滑鼠及鬆開滑鼠按鈕 60 | self.c.bind('', self.paint) 61 | self.c.bind('', self.reset) 62 | 63 | # 移動滑鼠 處理函數 64 | def paint(self, event): 65 | paint_color = self.color 66 | if self.old_x and self.old_y: 67 | self.c.create_line(self.old_x, self.old_y, event.x, event.y, 68 | width=self.line_width, fill=paint_color, 69 | capstyle=ROUND, smooth=TRUE, splinesteps=36) 70 | # 顯示設定>100%,抓到的區域會變小 71 | # 畫圖同時寫到記憶體,避免螢幕字型放大,造成抓到的畫布區域不足 72 | self.draw.line((self.old_x, self.old_y, event.x, event.y), fill='black', width=self.line_width) 73 | 74 | self.old_x = event.x 75 | self.old_y = event.y 76 | 77 | # 鬆開滑鼠按鈕 處理函數 78 | def reset(self, event): 79 | self.old_x, self.old_y = None, None 80 | 81 | # 【清畫面】處理函數 82 | def clear(self): 83 | self.c.delete("all") 84 | self.image1 = Image.new('RGB', (280, 280), color = 'white') 85 | self.draw = ImageDraw.Draw(self.image1) 86 | self.prediction_text.delete("1.0", END) 87 | 88 | # 【存檔】處理函數 89 | def savefile(self): 90 | f = filedialog.asksaveasfilename( defaultextension=".png", filetypes = [("png file",".png")]) 91 | if f is None: # asksaveasfile return `None` if dialog closed with "cancel". 92 | return 93 | #print(f) 94 | self.image1.save(f) 95 | 96 | # 【辨識】處理函數 97 | def classify(self, widget): 98 | # self.image1.save('原圖.png') 99 | img = self.image1.resize((28, 28), ImageGrab.Image.ANTIALIAS).convert('L') 100 | # img.save('縮小.png') 101 | 102 | img = np.array(img) 103 | # Change pixels to work with our classifier 104 | img = (255 - img) / 255 105 | 106 | img2=Image.fromarray(img) 107 | #img2.save('2.png') 108 | 109 | img = np.reshape(img, (1, 28, 28, 1)) 110 | 111 | # Predict digit 112 | pred = model.predict([img]) 113 | # Get index with highest probability 114 | pred = np.argmax(pred) 115 | #print(pred) 116 | self.prediction_text.delete("1.0", END) 117 | self.prediction_text.insert(END, pred) 118 | 119 | 120 | if __name__ == '__main__': 121 | # 訓練模型或載入既有的模型 122 | if(os.path.exists('mnist_model.h5')): 123 | print('load model ...') 124 | model = loadModel() 125 | else: 126 | print('train model ...') 127 | X_train, y_train, X_test, y_test = getData() 128 | model = trainModel(X_train, y_train, X_test, y_test) 129 | 130 | print(model.summary()) 131 | 132 | # 顯示視窗 133 | Paint() 134 | 135 | -------------------------------------------------------------------------------- /14_01_Keras_applications_1.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# 模型完全採用" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [ 15 | { 16 | "name": "stdout", 17 | "output_type": "stream", 18 | "text": [ 19 | "A local file was found, but it seems to be incomplete or outdated because the auto file hash does not match the original value of 2cb95161c43110f7111970584f804107 so we will re-download the data.\n", 20 | "Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels.h5\n", 21 | "102973440/102967424 [==============================] - 42s 0us/step\n", 22 | "Predicted: [('n02504013', 'Indian_elephant', 0.8198577), ('n02504458', 'African_elephant', 0.117787644), ('n01871265', 'tusker', 0.058297537)]\n" 23 | ] 24 | } 25 | ], 26 | "source": [ 27 | "from tensorflow.keras.applications.resnet50 import ResNet50\n", 28 | "from tensorflow.keras.preprocessing import image\n", 29 | "from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions\n", 30 | "import numpy as np\n", 31 | "\n", 32 | "# 預先訓練好的模型 -- ResNet50\n", 33 | "model = ResNet50(weights='imagenet')\n", 34 | "\n", 35 | "# 任意一張圖片,例如大象\n", 36 | "img_path = './images/elephant.jpg'\n", 37 | "# 載入圖檔,並縮放寬高為 (224, 224) \n", 38 | "img = image.load_img(img_path, target_size=(224, 224))\n", 39 | "# 加一維,變成 (1, 224, 224, 3),最後一維是色彩\n", 40 | "x = image.img_to_array(img)\n", 41 | "x = np.expand_dims(x, axis=0)\n", 42 | "x = preprocess_input(x)\n", 43 | "\n", 44 | "# 預測\n", 45 | "preds = model.predict(x)\n", 46 | "# decode the results into a list of tuples (class, description, probability)\n", 47 | "# (one such list for each sample in the batch)\n", 48 | "print('Predicted:', decode_predictions(preds, top=3)[0])" 49 | ] 50 | }, 51 | { 52 | "cell_type": "markdown", 53 | "metadata": {}, 54 | "source": [ 55 | "# 上一張是側面照,這次換正面照" 56 | ] 57 | }, 58 | { 59 | "cell_type": "code", 60 | "execution_count": 2, 61 | "metadata": {}, 62 | "outputs": [ 63 | { 64 | "name": "stdout", 65 | "output_type": "stream", 66 | "text": [ 67 | "Predicted: [('n01871265', 'tusker', 0.79292905), ('n02504458', 'African_elephant', 0.17253475), ('n02504013', 'Indian_elephant', 0.03453612)]\n" 68 | ] 69 | } 70 | ], 71 | "source": [ 72 | "img_path = './images/elephant2.jpg'\n", 73 | "# 載入圖檔,並縮放寬高為 (224, 224) \n", 74 | "img = image.load_img(img_path, target_size=(224, 224))\n", 75 | "# 加一維,變成 (1, 224, 224, 3),最後一維是色彩\n", 76 | "x = image.img_to_array(img)\n", 77 | "x = np.expand_dims(x, axis=0)\n", 78 | "x = preprocess_input(x)\n", 79 | "\n", 80 | "# 預測\n", 81 | "preds = model.predict(x)\n", 82 | "# decode the results into a list of tuples (class, description, probability)\n", 83 | "# (one such list for each sample in the batch)\n", 84 | "print('Predicted:', decode_predictions(preds, top=3)[0])" 85 | ] 86 | }, 87 | { 88 | "cell_type": "code", 89 | "execution_count": null, 90 | "metadata": {}, 91 | "outputs": [], 92 | "source": [] 93 | } 94 | ], 95 | "metadata": { 96 | "kernelspec": { 97 | "display_name": "Python 3", 98 | "language": "python", 99 | "name": "python3" 100 | }, 101 | "language_info": { 102 | "codemirror_mode": { 103 | "name": "ipython", 104 | "version": 3 105 | }, 106 | "file_extension": ".py", 107 | "mimetype": "text/x-python", 108 | "name": "python", 109 | "nbconvert_exporter": "python", 110 | "pygments_lexer": "ipython3", 111 | "version": "3.8.3" 112 | } 113 | }, 114 | "nbformat": 4, 115 | "nbformat_minor": 4 116 | } 117 | -------------------------------------------------------------------------------- /14_02_Keras_applications2.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# 部分採用,擷取特徵" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 45, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "from tensorflow.keras.applications.vgg16 import VGG16\n", 17 | "from tensorflow.keras.preprocessing import image\n", 18 | "from tensorflow.keras.applications.vgg16 import preprocess_input\n", 19 | "import numpy as np\n", 20 | "\n", 21 | "# 預先訓練好的模型 -- VGG16, 不含後三層(辨識層)\n", 22 | "model = VGG16(weights='imagenet', include_top=False)\n", 23 | "\n", 24 | "# 任意一張圖片,例如大象\n", 25 | "img_path = './images/elephant.jpg'\n", 26 | "# 載入圖檔,並縮放寬高為 (224, 224) \n", 27 | "img = image.load_img(img_path, target_size=(224, 224))\n", 28 | "# 加一維,變成 (1, 224, 224, 3),最後一維是色彩\n", 29 | "x = image.img_to_array(img)\n", 30 | "x = np.expand_dims(x, axis=0)\n", 31 | "x = preprocess_input(x)\n", 32 | "\n", 33 | "features = model.predict(x)" 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "execution_count": 46, 39 | "metadata": {}, 40 | "outputs": [ 41 | { 42 | "name": "stdout", 43 | "output_type": "stream", 44 | "text": [ 45 | "Model: \"vgg16\"\n", 46 | "_________________________________________________________________\n", 47 | "Layer (type) Output Shape Param # \n", 48 | "=================================================================\n", 49 | "input_2 (InputLayer) [(None, None, None, 3)] 0 \n", 50 | "_________________________________________________________________\n", 51 | "block1_conv1 (Conv2D) (None, None, None, 64) 1792 \n", 52 | "_________________________________________________________________\n", 53 | "block1_conv2 (Conv2D) (None, None, None, 64) 36928 \n", 54 | "_________________________________________________________________\n", 55 | "block1_pool (MaxPooling2D) (None, None, None, 64) 0 \n", 56 | "_________________________________________________________________\n", 57 | "block2_conv1 (Conv2D) (None, None, None, 128) 73856 \n", 58 | "_________________________________________________________________\n", 59 | "block2_conv2 (Conv2D) (None, None, None, 128) 147584 \n", 60 | "_________________________________________________________________\n", 61 | "block2_pool (MaxPooling2D) (None, None, None, 128) 0 \n", 62 | "_________________________________________________________________\n", 63 | "block3_conv1 (Conv2D) (None, None, None, 256) 295168 \n", 64 | "_________________________________________________________________\n", 65 | "block3_conv2 (Conv2D) (None, None, None, 256) 590080 \n", 66 | "_________________________________________________________________\n", 67 | "block3_conv3 (Conv2D) (None, None, None, 256) 590080 \n", 68 | "_________________________________________________________________\n", 69 | "block3_pool (MaxPooling2D) (None, None, None, 256) 0 \n", 70 | "_________________________________________________________________\n", 71 | "block4_conv1 (Conv2D) (None, None, None, 512) 1180160 \n", 72 | "_________________________________________________________________\n", 73 | "block4_conv2 (Conv2D) (None, None, None, 512) 2359808 \n", 74 | "_________________________________________________________________\n", 75 | "block4_conv3 (Conv2D) (None, None, None, 512) 2359808 \n", 76 | "_________________________________________________________________\n", 77 | "block4_pool (MaxPooling2D) (None, None, None, 512) 0 \n", 78 | "_________________________________________________________________\n", 79 | "block5_conv1 (Conv2D) (None, None, None, 512) 2359808 \n", 80 | "_________________________________________________________________\n", 81 | "block5_conv2 (Conv2D) (None, None, None, 512) 2359808 \n", 82 | "_________________________________________________________________\n", 83 | "block5_conv3 (Conv2D) (None, None, None, 512) 2359808 \n", 84 | "_________________________________________________________________\n", 85 | "block5_pool (MaxPooling2D) (None, None, None, 512) 0 \n", 86 | "=================================================================\n", 87 | "Total params: 14,714,688\n", 88 | "Trainable params: 14,714,688\n", 89 | "Non-trainable params: 0\n", 90 | "_________________________________________________________________\n" 91 | ] 92 | } 93 | ], 94 | "source": [ 95 | "model.summary()" 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": 47, 101 | "metadata": {}, 102 | "outputs": [ 103 | { 104 | "name": "stdout", 105 | "output_type": "stream", 106 | "text": [ 107 | "Model: \"vgg16\"\n", 108 | "_________________________________________________________________\n", 109 | "Layer (type) Output Shape Param # \n", 110 | "=================================================================\n", 111 | "input_3 (InputLayer) [(None, 224, 224, 3)] 0 \n", 112 | "_________________________________________________________________\n", 113 | "block1_conv1 (Conv2D) (None, 224, 224, 64) 1792 \n", 114 | "_________________________________________________________________\n", 115 | "block1_conv2 (Conv2D) (None, 224, 224, 64) 36928 \n", 116 | "_________________________________________________________________\n", 117 | "block1_pool (MaxPooling2D) (None, 112, 112, 64) 0 \n", 118 | "_________________________________________________________________\n", 119 | "block2_conv1 (Conv2D) (None, 112, 112, 128) 73856 \n", 120 | "_________________________________________________________________\n", 121 | "block2_conv2 (Conv2D) (None, 112, 112, 128) 147584 \n", 122 | "_________________________________________________________________\n", 123 | "block2_pool (MaxPooling2D) (None, 56, 56, 128) 0 \n", 124 | "_________________________________________________________________\n", 125 | "block3_conv1 (Conv2D) (None, 56, 56, 256) 295168 \n", 126 | "_________________________________________________________________\n", 127 | "block3_conv2 (Conv2D) (None, 56, 56, 256) 590080 \n", 128 | "_________________________________________________________________\n", 129 | "block3_conv3 (Conv2D) (None, 56, 56, 256) 590080 \n", 130 | "_________________________________________________________________\n", 131 | "block3_pool (MaxPooling2D) (None, 28, 28, 256) 0 \n", 132 | "_________________________________________________________________\n", 133 | "block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160 \n", 134 | "_________________________________________________________________\n", 135 | "block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808 \n", 136 | "_________________________________________________________________\n", 137 | "block4_conv3 (Conv2D) (None, 28, 28, 512) 2359808 \n", 138 | "_________________________________________________________________\n", 139 | "block4_pool (MaxPooling2D) (None, 14, 14, 512) 0 \n", 140 | "_________________________________________________________________\n", 141 | "block5_conv1 (Conv2D) (None, 14, 14, 512) 2359808 \n", 142 | "_________________________________________________________________\n", 143 | "block5_conv2 (Conv2D) (None, 14, 14, 512) 2359808 \n", 144 | "_________________________________________________________________\n", 145 | "block5_conv3 (Conv2D) (None, 14, 14, 512) 2359808 \n", 146 | "_________________________________________________________________\n", 147 | "block5_pool (MaxPooling2D) (None, 7, 7, 512) 0 \n", 148 | "_________________________________________________________________\n", 149 | "flatten (Flatten) (None, 25088) 0 \n", 150 | "_________________________________________________________________\n", 151 | "fc1 (Dense) (None, 4096) 102764544 \n", 152 | "_________________________________________________________________\n", 153 | "fc2 (Dense) (None, 4096) 16781312 \n", 154 | "_________________________________________________________________\n", 155 | "predictions (Dense) (None, 1000) 4097000 \n", 156 | "=================================================================\n", 157 | "Total params: 138,357,544\n", 158 | "Trainable params: 138,357,544\n", 159 | "Non-trainable params: 0\n", 160 | "_________________________________________________________________\n" 161 | ] 162 | } 163 | ], 164 | "source": [ 165 | "model2 = VGG16(weights='imagenet', include_top=True)\n", 166 | "model2.summary()" 167 | ] 168 | }, 169 | { 170 | "cell_type": "code", 171 | "execution_count": 48, 172 | "metadata": {}, 173 | "outputs": [ 174 | { 175 | "name": "stdout", 176 | "output_type": "stream", 177 | "text": [ 178 | "[[[[ 0. 0. 0. ... 0. 0.\n", 179 | " 0. ]\n", 180 | " [ 0. 0. 42.547802 ... 0. 0.\n", 181 | " 0. ]\n", 182 | " [ 1.0753415 0. 23.495632 ... 0. 0.\n", 183 | " 0. ]\n", 184 | " ...\n", 185 | " [ 0. 0. 0. ... 0. 0.\n", 186 | " 0. ]\n", 187 | " [ 0. 0. 0. ... 0. 0.\n", 188 | " 0. ]\n", 189 | " [ 0. 0. 0. ... 0. 0.\n", 190 | " 0. ]]\n", 191 | "\n", 192 | " [[ 0. 0. 36.33888 ... 0. 0.\n", 193 | " 3.4029133 ]\n", 194 | " [ 0. 0. 80.23636 ... 7.871857 0.\n", 195 | " 0. ]\n", 196 | " [ 0. 0. 48.751343 ... 0. 0.\n", 197 | " 0. ]\n", 198 | " ...\n", 199 | " [ 0. 0. 0. ... 4.5813737 0.\n", 200 | " 0. ]\n", 201 | " [ 0. 0. 0. ... 0. 0.\n", 202 | " 0. ]\n", 203 | " [ 0. 0. 0. ... 0. 0.\n", 204 | " 0. ]]\n", 205 | "\n", 206 | " [[ 0. 0. 9.853498 ... 0. 0.\n", 207 | " 2.4919808 ]\n", 208 | " [ 0. 0. 25.249674 ... 42.27692 0.\n", 209 | " 15.798368 ]\n", 210 | " [ 0. 0. 0. ... 26.4542 2.2518532\n", 211 | " 0. ]\n", 212 | " ...\n", 213 | " [ 0. 0. 0. ... 4.5165315 0.\n", 214 | " 0. ]\n", 215 | " [ 0. 0. 0. ... 0. 0.\n", 216 | " 0. ]\n", 217 | " [ 0. 0. 0. ... 0. 0.\n", 218 | " 0. ]]\n", 219 | "\n", 220 | " ...\n", 221 | "\n", 222 | " [[ 0. 0. 0. ... 0. 0.\n", 223 | " 0. ]\n", 224 | " [ 0. 0. 0. ... 0. 0.\n", 225 | " 0. ]\n", 226 | " [ 0. 0. 12.312049 ... 0. 0.\n", 227 | " 0. ]\n", 228 | " ...\n", 229 | " [ 0. 0. 22.432764 ... 0. 0.\n", 230 | " 0. ]\n", 231 | " [ 0. 0. 0. ... 0. 0.\n", 232 | " 0. ]\n", 233 | " [ 0. 0. 0. ... 0. 0.\n", 234 | " 0. ]]\n", 235 | "\n", 236 | " [[ 0. 0. 0. ... 0. 0.\n", 237 | " 0. ]\n", 238 | " [ 0. 0. 0. ... 0. 0.\n", 239 | " 0. ]\n", 240 | " [ 0. 0. 0. ... 0. 0.\n", 241 | " 0. ]\n", 242 | " ...\n", 243 | " [ 0. 0. 10.4888525 ... 0. 0.\n", 244 | " 0. ]\n", 245 | " [ 0. 0. 0. ... 0. 0.\n", 246 | " 0. ]\n", 247 | " [ 0. 0. 0. ... 0. 0.\n", 248 | " 0. ]]\n", 249 | "\n", 250 | " [[ 0. 0. 0. ... 0. 0.\n", 251 | " 0. ]\n", 252 | " [ 0. 0. 0. ... 0. 0.\n", 253 | " 0. ]\n", 254 | " [ 0. 0. 0.50065726 ... 0. 0.\n", 255 | " 0. ]\n", 256 | " ...\n", 257 | " [ 0. 0. 0. ... 0. 0.\n", 258 | " 0. ]\n", 259 | " [ 0. 0. 0. ... 0. 0.\n", 260 | " 0. ]\n", 261 | " [ 0. 0. 0. ... 0. 0.\n", 262 | " 0. ]]]]\n" 263 | ] 264 | } 265 | ], 266 | "source": [ 267 | "print(features)" 268 | ] 269 | }, 270 | { 271 | "cell_type": "code", 272 | "execution_count": 49, 273 | "metadata": {}, 274 | "outputs": [ 275 | { 276 | "name": "stdout", 277 | "output_type": "stream", 278 | "text": [ 279 | "(1, 7, 7, 512)\n" 280 | ] 281 | } 282 | ], 283 | "source": [ 284 | "print(features.shape)" 285 | ] 286 | }, 287 | { 288 | "cell_type": "markdown", 289 | "metadata": {}, 290 | "source": [ 291 | "# 使用 cosine_similarity 比較特徵向量" 292 | ] 293 | }, 294 | { 295 | "cell_type": "code", 296 | "execution_count": 50, 297 | "metadata": {}, 298 | "outputs": [ 299 | { 300 | "data": { 301 | "text/plain": [ 302 | "array(['bird01.jpg', 'elephant.jpg', 'elephant2.jpg', 'input.jpg',\n", 303 | " 'style.jpg', 'Tiger.jpg', 'Tiger2.jpg', 'Tiger3.jpg', '太陽花.jpg'],\n", 304 | " dtype=' test_function -> test_function\n", 126 | "output_type": "error", 127 | "traceback": [ 128 | "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", 129 | "\u001b[1;31mInternalError\u001b[0m Traceback (most recent call last)", 130 | "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcompile\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"adam\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m\"binary_crossentropy\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmetrics\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m\"accuracy\"\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m \u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx_train\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0my_train\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m32\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mepochs\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m2\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mvalidation_split\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m0.2\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 3\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msave\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'lstm_imdb.h5'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", 131 | "\u001b[1;32mC:\\anaconda3\\lib\\site-packages\\tensorflow\\python\\keras\\engine\\training.py\u001b[0m in \u001b[0;36m_method_wrapper\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 106\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0m_method_wrapper\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 107\u001b[0m \u001b[1;32mif\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_in_multi_worker_mode\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m \u001b[1;31m# pylint: disable=protected-access\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 108\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mmethod\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 109\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 110\u001b[0m \u001b[1;31m# Running inside `run_distribute_coordinator` already.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", 132 | "\u001b[1;32mC:\\anaconda3\\lib\\site-packages\\tensorflow\\python\\keras\\engine\\training.py\u001b[0m in \u001b[0;36mfit\u001b[1;34m(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)\u001b[0m\n\u001b[0;32m 1121\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1122\u001b[0m steps_per_execution=self._steps_per_execution)\n\u001b[1;32m-> 1123\u001b[1;33m val_logs = self.evaluate(\n\u001b[0m\u001b[0;32m 1124\u001b[0m \u001b[0mx\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mval_x\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1125\u001b[0m \u001b[0my\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mval_y\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", 133 | "\u001b[1;32mC:\\anaconda3\\lib\\site-packages\\tensorflow\\python\\keras\\engine\\training.py\u001b[0m in \u001b[0;36m_method_wrapper\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 106\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0m_method_wrapper\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 107\u001b[0m \u001b[1;32mif\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_in_multi_worker_mode\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m \u001b[1;31m# pylint: disable=protected-access\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 108\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mmethod\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 109\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 110\u001b[0m \u001b[1;31m# Running inside `run_distribute_coordinator` already.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", 134 | "\u001b[1;32mC:\\anaconda3\\lib\\site-packages\\tensorflow\\python\\keras\\engine\\training.py\u001b[0m in \u001b[0;36mevaluate\u001b[1;34m(self, x, y, batch_size, verbose, sample_weight, steps, callbacks, max_queue_size, workers, use_multiprocessing, return_dict)\u001b[0m\n\u001b[0;32m 1377\u001b[0m \u001b[1;32mwith\u001b[0m \u001b[0mtrace\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTrace\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'TraceContext'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mgraph_type\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m'test'\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mstep_num\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mstep\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1378\u001b[0m \u001b[0mcallbacks\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mon_test_batch_begin\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mstep\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1379\u001b[1;33m \u001b[0mtmp_logs\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtest_function\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0miterator\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1380\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mdata_handler\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshould_sync\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1381\u001b[0m \u001b[0mcontext\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0masync_wait\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", 135 | "\u001b[1;32mC:\\anaconda3\\lib\\site-packages\\tensorflow\\python\\eager\\def_function.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, *args, **kwds)\u001b[0m\n\u001b[0;32m 778\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 779\u001b[0m \u001b[0mcompiler\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;34m\"nonXla\"\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 780\u001b[1;33m \u001b[0mresult\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 781\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 782\u001b[0m \u001b[0mnew_tracing_count\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_get_tracing_count\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", 136 | "\u001b[1;32mC:\\anaconda3\\lib\\site-packages\\tensorflow\\python\\eager\\def_function.py\u001b[0m in \u001b[0;36m_call\u001b[1;34m(self, *args, **kwds)\u001b[0m\n\u001b[0;32m 812\u001b[0m \u001b[1;31m# In this case we have not created variables on the first call. So we can\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 813\u001b[0m \u001b[1;31m# run the first trace but we should fail if variables are created.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 814\u001b[1;33m \u001b[0mresults\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_stateful_fn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwds\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 815\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_created_variables\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 816\u001b[0m raise ValueError(\"Creating variables on a non-first call to a function\"\n", 137 | "\u001b[1;32mC:\\anaconda3\\lib\\site-packages\\tensorflow\\python\\eager\\function.py\u001b[0m in \u001b[0;36m__call__\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 2827\u001b[0m \u001b[1;32mwith\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_lock\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2828\u001b[0m \u001b[0mgraph_function\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_maybe_define_function\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 2829\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mgraph_function\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_filtered_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;31m# pylint: disable=protected-access\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 2830\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2831\u001b[0m \u001b[1;33m@\u001b[0m\u001b[0mproperty\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", 138 | "\u001b[1;32mC:\\anaconda3\\lib\\site-packages\\tensorflow\\python\\eager\\function.py\u001b[0m in \u001b[0;36m_filtered_call\u001b[1;34m(self, args, kwargs, cancellation_manager)\u001b[0m\n\u001b[0;32m 1841\u001b[0m \u001b[0;31m`\u001b[0m\u001b[0margs\u001b[0m\u001b[0;31m`\u001b[0m \u001b[1;32mand\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m`\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;31m`\u001b[0m\u001b[1;33m.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1842\u001b[0m \"\"\"\n\u001b[1;32m-> 1843\u001b[1;33m return self._call_flat(\n\u001b[0m\u001b[0;32m 1844\u001b[0m [t for t in nest.flatten((args, kwargs), expand_composites=True)\n\u001b[0;32m 1845\u001b[0m if isinstance(t, (ops.Tensor,\n", 139 | "\u001b[1;32mC:\\anaconda3\\lib\\site-packages\\tensorflow\\python\\eager\\function.py\u001b[0m in \u001b[0;36m_call_flat\u001b[1;34m(self, args, captured_inputs, cancellation_manager)\u001b[0m\n\u001b[0;32m 1921\u001b[0m and executing_eagerly):\n\u001b[0;32m 1922\u001b[0m \u001b[1;31m# No tape is watching; skip to running the function.\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 1923\u001b[1;33m return self._build_call_outputs(self._inference_function.call(\n\u001b[0m\u001b[0;32m 1924\u001b[0m ctx, args, cancellation_manager=cancellation_manager))\n\u001b[0;32m 1925\u001b[0m forward_backward = self._select_forward_and_backward_functions(\n", 140 | "\u001b[1;32mC:\\anaconda3\\lib\\site-packages\\tensorflow\\python\\eager\\function.py\u001b[0m in \u001b[0;36mcall\u001b[1;34m(self, ctx, args, cancellation_manager)\u001b[0m\n\u001b[0;32m 543\u001b[0m \u001b[1;32mwith\u001b[0m \u001b[0m_InterpolateFunctionError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 544\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mcancellation_manager\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 545\u001b[1;33m outputs = execute.execute(\n\u001b[0m\u001b[0;32m 546\u001b[0m \u001b[0mstr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msignature\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mname\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 547\u001b[0m \u001b[0mnum_outputs\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_num_outputs\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", 141 | "\u001b[1;32mC:\\anaconda3\\lib\\site-packages\\tensorflow\\python\\eager\\execute.py\u001b[0m in \u001b[0;36mquick_execute\u001b[1;34m(op_name, num_outputs, inputs, attrs, ctx, name)\u001b[0m\n\u001b[0;32m 57\u001b[0m \u001b[1;32mtry\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 58\u001b[0m \u001b[0mctx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mensure_initialized\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 59\u001b[1;33m tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,\n\u001b[0m\u001b[0;32m 60\u001b[0m inputs, attrs, num_outputs)\n\u001b[0;32m 61\u001b[0m \u001b[1;32mexcept\u001b[0m \u001b[0mcore\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_NotOkStatusException\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", 142 | "\u001b[1;31mInternalError\u001b[0m: Failed to call ThenRnnForward with model config: [rnn_mode, rnn_input_mode, rnn_direction_mode]: 2, 0, 0 , [num_layers, input_size, num_units, dir_count, max_seq_length, batch_size, cell_num_units]: [1, 128, 64, 1, 200, 32, 64] \n\t [[{{node CudnnRNN}}]]\n\t [[functional_1/bidirectional/forward_lstm/PartitionedCall]] [Op:__inference_test_function_12202]\n\nFunction call stack:\ntest_function -> test_function -> test_function\n" 143 | ] 144 | } 145 | ], 146 | "source": [ 147 | "model.compile(\"adam\", \"binary_crossentropy\", metrics=[\"accuracy\"])\n", 148 | "model.fit(x_train, y_train, batch_size=32, epochs=2, validation_split=0.2)\n" 149 | ] 150 | }, 151 | { 152 | "cell_type": "markdown", 153 | "metadata": {}, 154 | "source": [ 155 | "## 模型評估" 156 | ] 157 | }, 158 | { 159 | "cell_type": "code", 160 | "execution_count": null, 161 | "metadata": {}, 162 | "outputs": [], 163 | "source": [ 164 | "model.evaluate(x_test, y_test)" 165 | ] 166 | } 167 | ], 168 | "metadata": { 169 | "colab": { 170 | "collapsed_sections": [], 171 | "name": "bidirectional_lstm_imdb", 172 | "provenance": [], 173 | "toc_visible": true 174 | }, 175 | "kernelspec": { 176 | "display_name": "Python 3", 177 | "language": "python", 178 | "name": "python3" 179 | }, 180 | "language_info": { 181 | "codemirror_mode": { 182 | "name": "ipython", 183 | "version": 3 184 | }, 185 | "file_extension": ".py", 186 | "mimetype": "text/x-python", 187 | "name": "python", 188 | "nbconvert_exporter": "python", 189 | "pygments_lexer": "ipython3", 190 | "version": "3.8.3" 191 | } 192 | }, 193 | "nbformat": 4, 194 | "nbformat_minor": 1 195 | } 196 | -------------------------------------------------------------------------------- /28_01_agent_env.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | # 環境類別 4 | class Environment: 5 | # 初始化 6 | def __init__(self): 7 | # 最多走10步 8 | self.steps_left = 10 9 | 10 | def get_observation(self): 11 | # 狀態空間(State Space) 12 | return [0.0, 1.0, 2.0] 13 | 14 | def get_actions(self): 15 | # 行動空間(Action Space) 16 | return [0, 1] 17 | 18 | def is_done(self): 19 | # 回合(Episode)是否結束 20 | return self.steps_left == 0 21 | 22 | # 步驟 23 | def step(self, action): 24 | # 回合(Episode)結束 25 | if self.is_done(): 26 | raise Exception("Game is over") 27 | 28 | # 減少1步 29 | self.steps_left -= 1 30 | 31 | # 隨機策略,任意行動,並給予獎勵(亂數值) 32 | return random.choice(self.get_observation()), random.random() 33 | 34 | 35 | # 代理人類別 36 | class Agent: 37 | # 初始化 38 | def __init__(self): 39 | pass 40 | 41 | def action(self, env): 42 | # 觀察或是取得狀態 43 | current_obs = env.get_observation() 44 | # 採取行動 45 | actions = env.get_actions() 46 | return random.choice(actions) 47 | 48 | 49 | if __name__ == "__main__": 50 | # 實驗 51 | # 建立環境、代理人物件 52 | env = Environment() 53 | agent = Agent() 54 | 55 | # 累計報酬 56 | total_reward=0 57 | while not env.is_done(): 58 | # 採取行動 59 | action = agent.action(env) 60 | 61 | # 進到下一步 62 | state, reward = env.step(action) 63 | 64 | # 報酬累計 65 | #print(reward) 66 | total_reward += reward 67 | 68 | # 顯示累計報酬 69 | print(f"累計報酬: {total_reward:.4f}") 70 | -------------------------------------------------------------------------------- /28_02_cartpole_random.py: -------------------------------------------------------------------------------- 1 | import gym 2 | import pandas as pd 3 | 4 | # 載入遊戲 5 | env = gym.make("CartPole-v0") 6 | 7 | # 初始化 8 | total_rewards = 0.0 9 | total_steps = 0 10 | obs = env.reset() 11 | 12 | # 玩50回合 13 | no = 50 14 | all_steps=[] 15 | all_rewards=[] 16 | while True: 17 | # 隨機行動 18 | action = env.action_space.sample() 19 | # 進入下一步 20 | obs, reward, done, _ = env.step(action) 21 | # 渲染 22 | env.render() 23 | 24 | # 累計報酬 25 | total_rewards += reward 26 | # 累計步驟總數 27 | total_steps += 1 28 | if done: 29 | # 重置 30 | env.reset() 31 | 32 | all_rewards.append(total_rewards) 33 | all_steps.append(total_steps) 34 | total_rewards = 0 35 | total_steps=0 36 | no-=1 37 | if no == 0: 38 | break 39 | 40 | # 結束遊戲 41 | env.close() 42 | 43 | df = pd.DataFrame({'steps':all_steps, 'rewards':all_rewards}) 44 | print(df) 45 | -------------------------------------------------------------------------------- /29_01_cartpole_deterministic.py: -------------------------------------------------------------------------------- 1 | import gym 2 | import pandas as pd 3 | import math 4 | 5 | # 載入遊戲 6 | env = gym.make("CartPole-v0") 7 | 8 | # 初始化 9 | total_rewards = 0.0 10 | total_steps = 0 11 | observation = env.reset() 12 | 13 | # 台車行進方向 14 | left=0 15 | right=1 16 | 17 | max_angle = 8 18 | 19 | # 代理人類別 20 | class Agent: 21 | # 初始化 22 | def __init__(self): 23 | self.direction = left 24 | self.last_direction=right 25 | 26 | # 自訂策略 27 | def act(self, observation): 28 | # cart_position:台車位置(Cart Position) 29 | # cart_velocity:台車速度(Cart Velocity) 30 | # pole_angle:平衡桿角度(Pole Angle) 31 | # pole_velocity:平衡桿速度(Pole Velocity At Tip) 32 | cart_position, cart_velocity, pole_angle, pole_velocity = observation 33 | 34 | ''' 35 | 行動策略: 36 | 1. 設定每次行動採一左一右,盡量不離中心點。 37 | 2. 平衡桿角度偏右8度以上,就往右前進,直到角度偏右小於8度。 38 | 3. 反之,偏左也是同樣處理。 39 | ''' 40 | if pole_angle < math.radians(max_angle) and pole_angle > math.radians(-max_angle): 41 | self.direction = (self.last_direction + 1) % 2 42 | elif pole_angle >= math.radians(max_angle): 43 | self.direction = right 44 | else: 45 | self.direction = left 46 | 47 | self.last_direction = self.direction 48 | 49 | return self.direction 50 | 51 | # 玩50回合 52 | no = 50 53 | all_steps=[] 54 | all_rewards=[] 55 | agent = Agent() 56 | while True: 57 | # 依策略行動 58 | action = agent.act(observation) #env.action_space.sample() 59 | # 進入下一步 60 | observation, reward, done, _ = env.step(action) 61 | # 渲染 62 | env.render() 63 | 64 | # 累計報酬 65 | total_rewards += reward 66 | # 累計步驟總數 67 | total_steps += 1 68 | if done: 69 | # 重置 70 | env.reset() 71 | agent = Agent() 72 | 73 | all_rewards.append(total_rewards) 74 | all_steps.append(total_steps) 75 | total_rewards = 0 76 | total_steps=0 77 | no-=1 78 | if no == 0: 79 | break 80 | 81 | # 結束遊戲 82 | env.close() 83 | 84 | df = pd.DataFrame({'steps':all_steps, 'rewards':all_rewards}) 85 | print(df) 86 | -------------------------------------------------------------------------------- /29_02_Policy_Evaluation.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# 策略評估(Policy evaluation)" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "from IPython.core.debugger import set_trace\n", 17 | "import numpy as np\n", 18 | "import pprint\n", 19 | "import sys\n", 20 | "from lib.envs.gridworld import GridworldEnv" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": 2, 26 | "metadata": {}, 27 | "outputs": [], 28 | "source": [ 29 | "# 載入遊戲\n", 30 | "pp = pprint.PrettyPrinter(indent=2)\n", 31 | "env = GridworldEnv()" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": 3, 37 | "metadata": {}, 38 | "outputs": [], 39 | "source": [ 40 | "# 策略評估函數\n", 41 | "def policy_eval(policy, env, discount_factor=1.0, theta=0.00001):\n", 42 | " \"\"\"\n", 43 | " policy: [S, A] shaped matrix representing the policy.\n", 44 | " env.P:狀態轉移機率矩陣\n", 45 | " env.P[s][a]:(prob, next_state, reward, done) 陣列\n", 46 | " env.nS:狀態總數\n", 47 | " env.nA:行動總數\n", 48 | " theta: 狀態值函數變動 <= theta,即停止進行。\n", 49 | " discount_factor: 獎勵折扣率(Gamma)\n", 50 | " \n", 51 | " 傳回【狀態值函數】\n", 52 | " \"\"\"\n", 53 | " # 一開始【狀態值函數】均為 0\n", 54 | " V = np.zeros(env.nS)\n", 55 | " while True:\n", 56 | " delta = 0\n", 57 | " # For each state, perform a \"full backup\"\n", 58 | " for s in range(env.nS):\n", 59 | " v = 0\n", 60 | " # 執行每一種可能的行動\n", 61 | " for a, action_prob in enumerate(policy[s]):\n", 62 | " # 行動可能到達的每一種狀態\n", 63 | " for prob, next_state, reward, done in env.P[s][a]:\n", 64 | " # 計算【狀態值函數】期望值,參考圖三. 狀態值函數公式\n", 65 | " v += action_prob * prob * (reward + discount_factor * V[next_state])\n", 66 | " # 找出每一個位置的這一輪與上一輪狀態值函數差異最大者\n", 67 | " delta = max(delta, np.abs(v - V[s]))\n", 68 | " V[s] = v\n", 69 | " # 最大差異 < 事先設定值 theta,則停止評估\n", 70 | " if delta < theta:\n", 71 | " break\n", 72 | " return np.array(V)" 73 | ] 74 | }, 75 | { 76 | "cell_type": "markdown", 77 | "metadata": {}, 78 | "source": [ 79 | "## 測試" 80 | ] 81 | }, 82 | { 83 | "cell_type": "code", 84 | "execution_count": 4, 85 | "metadata": {}, 86 | "outputs": [], 87 | "source": [ 88 | "# 隨機策略,每一個位置往上、下、左、右走的機率均為 0.25。\n", 89 | "random_policy = np.ones([env.nS, env.nA]) / env.nA\n", 90 | "# 策略評估\n", 91 | "v = policy_eval(random_policy, env)" 92 | ] 93 | }, 94 | { 95 | "cell_type": "markdown", 96 | "metadata": {}, 97 | "source": [ 98 | "## 顯示狀態值函數" 99 | ] 100 | }, 101 | { 102 | "cell_type": "code", 103 | "execution_count": 5, 104 | "metadata": {}, 105 | "outputs": [ 106 | { 107 | "name": "stdout", 108 | "output_type": "stream", 109 | "text": [ 110 | "Value Function:\n", 111 | "[ 0. -13.99993529 -19.99990698 -21.99989761 -13.99993529\n", 112 | " -17.9999206 -19.99991379 -19.99991477 -19.99990698 -19.99991379\n", 113 | " -17.99992725 -13.99994569 -21.99989761 -19.99991477 -13.99994569\n", 114 | " 0. ]\n", 115 | "\n", 116 | "Reshaped Grid Value Function:\n", 117 | "[[ 0. -13.99993529 -19.99990698 -21.99989761]\n", 118 | " [-13.99993529 -17.9999206 -19.99991379 -19.99991477]\n", 119 | " [-19.99990698 -19.99991379 -17.99992725 -13.99994569]\n", 120 | " [-21.99989761 -19.99991477 -13.99994569 0. ]]\n", 121 | "\n" 122 | ] 123 | } 124 | ], 125 | "source": [ 126 | "print(\"Value Function:\")\n", 127 | "print(v)\n", 128 | "print(\"\")\n", 129 | "\n", 130 | "print(\"Reshaped Grid Value Function:\")\n", 131 | "print(v.reshape(env.shape))\n", 132 | "print(\"\")" 133 | ] 134 | }, 135 | { 136 | "cell_type": "markdown", 137 | "metadata": {}, 138 | "source": [ 139 | "## 顯示最後結果" 140 | ] 141 | }, 142 | { 143 | "cell_type": "code", 144 | "execution_count": 6, 145 | "metadata": {}, 146 | "outputs": [], 147 | "source": [ 148 | "# 最後結果\n", 149 | "# Test: Make sure the evaluated policy is what we expected\n", 150 | "expected_v = np.array([0, -14, -20, -22, -14, -18, -20, -20, -20, -20, -18, -14, -22, -20, -14, 0])\n", 151 | "np.testing.assert_array_almost_equal(v, expected_v, decimal=2)" 152 | ] 153 | }, 154 | { 155 | "cell_type": "code", 156 | "execution_count": null, 157 | "metadata": {}, 158 | "outputs": [], 159 | "source": [] 160 | } 161 | ], 162 | "metadata": { 163 | "kernelspec": { 164 | "display_name": "Python 3", 165 | "language": "python", 166 | "name": "python3" 167 | }, 168 | "language_info": { 169 | "codemirror_mode": { 170 | "name": "ipython", 171 | "version": 3 172 | }, 173 | "file_extension": ".py", 174 | "mimetype": "text/x-python", 175 | "name": "python", 176 | "nbconvert_exporter": "python", 177 | "pygments_lexer": "ipython3", 178 | "version": "3.8.3" 179 | } 180 | }, 181 | "nbformat": 4, 182 | "nbformat_minor": 1 183 | } 184 | -------------------------------------------------------------------------------- /29_03_Policy_Iteration.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# 策略迭代(Policy Iteration)" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 4, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "import numpy as np\n", 17 | "import pprint\n", 18 | "import sys\n", 19 | "from lib.envs.gridworld import GridworldEnv" 20 | ] 21 | }, 22 | { 23 | "cell_type": "code", 24 | "execution_count": 5, 25 | "metadata": {}, 26 | "outputs": [], 27 | "source": [ 28 | "# 載入遊戲\n", 29 | "pp = pprint.PrettyPrinter(indent=2)\n", 30 | "env = GridworldEnv()" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 6, 36 | "metadata": {}, 37 | "outputs": [], 38 | "source": [ 39 | "# 策略評估函數\n", 40 | "def policy_eval(policy, env, discount_factor=1.0, theta=0.00001):\n", 41 | " \"\"\"\n", 42 | " policy: [S, A] shaped matrix representing the policy.\n", 43 | " env.P:狀態轉移機率矩陣\n", 44 | " env.P[s][a]:(prob, next_state, reward, done) 陣列\n", 45 | " env.nS:狀態總數\n", 46 | " env.nA:行動總數\n", 47 | " theta: 狀態值函數變動 <= theta,即停止進行。\n", 48 | " discount_factor: 獎勵折扣率(Gamma)\n", 49 | " \n", 50 | " 傳回【狀態值函數】\n", 51 | " \"\"\"\n", 52 | " # 一開始【狀態值函數】均為 0\n", 53 | " V = np.zeros(env.nS)\n", 54 | " while True:\n", 55 | " delta = 0\n", 56 | " # For each state, perform a \"full backup\"\n", 57 | " for s in range(env.nS):\n", 58 | " v = 0\n", 59 | " # 執行每一種可能的行動\n", 60 | " for a, action_prob in enumerate(policy[s]):\n", 61 | " # 行動可能到達的每一種狀態\n", 62 | " for prob, next_state, reward, done in env.P[s][a]:\n", 63 | " # 計算【狀態值函數】期望值,參考圖三. 狀態值函數公式\n", 64 | " v += action_prob * prob * (reward + discount_factor * V[next_state])\n", 65 | " # 找出每一個位置的這一輪與上一輪狀態值函數差異最大者\n", 66 | " delta = max(delta, np.abs(v - V[s]))\n", 67 | " V[s] = v\n", 68 | " # 最大差異 < 事先設定值 theta,則停止評估\n", 69 | " if delta < theta:\n", 70 | " break\n", 71 | " return np.array(V)" 72 | ] 73 | }, 74 | { 75 | "cell_type": "code", 76 | "execution_count": 7, 77 | "metadata": {}, 78 | "outputs": [], 79 | "source": [ 80 | "# 策略改善函數\n", 81 | "def policy_improvement(env, policy_eval_fn=policy_eval, discount_factor=1.0):\n", 82 | " \"\"\"\n", 83 | " policy_eval_fn: 策略評估函數。\n", 84 | " discount_factor: 獎勵折扣率(Gamma)\n", 85 | " \n", 86 | " Returns:\n", 87 | " 傳回 (policy, V) 陣列. \n", 88 | " - policy:是最佳策略, [S, A] 二為矩陣,含每個狀態/行動組合的機率分配.\n", 89 | " - V:狀態值函數。\n", 90 | " \n", 91 | " \"\"\"\n", 92 | "\n", 93 | " # 走下一步的處理,計算行動值函數\n", 94 | " def one_step_lookahead(state, V):\n", 95 | " \"\"\"\n", 96 | " state: 狀態值\n", 97 | " V: 狀態值函數\n", 98 | " \n", 99 | " 傳回【行動值函數】\n", 100 | " \"\"\"\n", 101 | " A = np.zeros(env.nA)\n", 102 | " # 圖四. 行動值函數公式\n", 103 | " for a in range(env.nA):\n", 104 | " for prob, next_state, reward, done in env.P[state][a]:\n", 105 | " A[a] += prob * (reward + discount_factor * V[next_state])\n", 106 | " return A\n", 107 | " \n", 108 | " # 隨機策略,每一個位置往上、下、左、右走的機率均為 0.25。\n", 109 | " policy = np.ones([env.nS, env.nA]) / env.nA\n", 110 | " \n", 111 | " while True:\n", 112 | " # 策略評估\n", 113 | " V = policy_eval_fn(policy, env, discount_factor)\n", 114 | " \n", 115 | " # 策略是否趨於穩定的旗標\n", 116 | " policy_stable = True\n", 117 | " \n", 118 | " # 每一個狀態均採貪婪戰術\n", 119 | " for s in range(env.nS):\n", 120 | " # 貪婪戰術\n", 121 | " chosen_a = np.argmax(policy[s])\n", 122 | " \n", 123 | " # 走下一步的處理,計算行動值函數\n", 124 | " action_values = one_step_lookahead(s, V)\n", 125 | " # 找出最大行動值函數\n", 126 | " best_a = np.argmax(action_values)\n", 127 | " \n", 128 | " # 貪婪戰術 不等於 最大行動值函數,繼續迭代\n", 129 | " if chosen_a != best_a:\n", 130 | " policy_stable = False\n", 131 | " # 設定目前狀態下最佳的行動\n", 132 | " policy[s] = np.eye(env.nA)[best_a]\n", 133 | " \n", 134 | " # 策略趨於穩定就結束訓練\n", 135 | " if policy_stable:\n", 136 | " return policy, V" 137 | ] 138 | }, 139 | { 140 | "cell_type": "markdown", 141 | "metadata": {}, 142 | "source": [ 143 | "## 測試:顯示策略,即在每個狀態/行動組合的機率分配" 144 | ] 145 | }, 146 | { 147 | "cell_type": "code", 148 | "execution_count": 8, 149 | "metadata": {}, 150 | "outputs": [ 151 | { 152 | "name": "stdout", 153 | "output_type": "stream", 154 | "text": [ 155 | "Policy Probability Distribution:\n", 156 | "[[1. 0. 0. 0.]\n", 157 | " [0. 0. 0. 1.]\n", 158 | " [0. 0. 0. 1.]\n", 159 | " [0. 0. 1. 0.]\n", 160 | " [1. 0. 0. 0.]\n", 161 | " [1. 0. 0. 0.]\n", 162 | " [1. 0. 0. 0.]\n", 163 | " [0. 0. 1. 0.]\n", 164 | " [1. 0. 0. 0.]\n", 165 | " [1. 0. 0. 0.]\n", 166 | " [0. 1. 0. 0.]\n", 167 | " [0. 0. 1. 0.]\n", 168 | " [1. 0. 0. 0.]\n", 169 | " [0. 1. 0. 0.]\n", 170 | " [0. 1. 0. 0.]\n", 171 | " [1. 0. 0. 0.]]\n", 172 | "\n" 173 | ] 174 | } 175 | ], 176 | "source": [ 177 | "policy, v = policy_improvement(env)\n", 178 | "print(\"Policy Probability Distribution:\")\n", 179 | "print(policy)\n", 180 | "print(\"\")" 181 | ] 182 | }, 183 | { 184 | "cell_type": "markdown", 185 | "metadata": {}, 186 | "source": [ 187 | "## 顯示狀態值函數" 188 | ] 189 | }, 190 | { 191 | "cell_type": "code", 192 | "execution_count": 9, 193 | "metadata": { 194 | "scrolled": true 195 | }, 196 | "outputs": [ 197 | { 198 | "name": "stdout", 199 | "output_type": "stream", 200 | "text": [ 201 | "Reshaped Grid Policy (0=up, 1=right, 2=down, 3=left):\n", 202 | "[[0 3 3 2]\n", 203 | " [0 0 0 2]\n", 204 | " [0 0 1 2]\n", 205 | " [0 1 1 0]]\n", 206 | "\n", 207 | "Value Function:\n", 208 | "[ 0. -1. -2. -3. -1. -2. -3. -2. -2. -3. -2. -1. -3. -2. -1. 0.]\n", 209 | "\n", 210 | "Reshaped Grid Value Function:\n", 211 | "[[ 0. -1. -2. -3.]\n", 212 | " [-1. -2. -3. -2.]\n", 213 | " [-2. -3. -2. -1.]\n", 214 | " [-3. -2. -1. 0.]]\n", 215 | "\n" 216 | ] 217 | } 218 | ], 219 | "source": [ 220 | "print(\"Reshaped Grid Policy (0=up, 1=right, 2=down, 3=left):\")\n", 221 | "print(np.reshape(np.argmax(policy, axis=1), env.shape))\n", 222 | "print(\"\")\n", 223 | "\n", 224 | "print(\"Value Function:\")\n", 225 | "print(v)\n", 226 | "print(\"\")\n", 227 | "\n", 228 | "print(\"Reshaped Grid Value Function:\")\n", 229 | "print(v.reshape(env.shape))\n", 230 | "print(\"\")" 231 | ] 232 | }, 233 | { 234 | "cell_type": "markdown", 235 | "metadata": {}, 236 | "source": [ 237 | "## 顯示最後結果" 238 | ] 239 | }, 240 | { 241 | "cell_type": "code", 242 | "execution_count": 10, 243 | "metadata": {}, 244 | "outputs": [], 245 | "source": [ 246 | "# Test the value function\n", 247 | "expected_v = np.array([ 0, -1, -2, -3, -1, -2, -3, -2, -2, -3, -2, -1, -3, -2, -1, 0])\n", 248 | "np.testing.assert_array_almost_equal(v, expected_v, decimal=2)" 249 | ] 250 | }, 251 | { 252 | "cell_type": "code", 253 | "execution_count": null, 254 | "metadata": {}, 255 | "outputs": [], 256 | "source": [] 257 | } 258 | ], 259 | "metadata": { 260 | "kernelspec": { 261 | "display_name": "Python 3", 262 | "language": "python", 263 | "name": "python3" 264 | }, 265 | "language_info": { 266 | "codemirror_mode": { 267 | "name": "ipython", 268 | "version": 3 269 | }, 270 | "file_extension": ".py", 271 | "mimetype": "text/x-python", 272 | "name": "python", 273 | "nbconvert_exporter": "python", 274 | "pygments_lexer": "ipython3", 275 | "version": "3.8.3" 276 | } 277 | }, 278 | "nbformat": 4, 279 | "nbformat_minor": 1 280 | } 281 | -------------------------------------------------------------------------------- /29_04_Value_Iteration.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# 值迭代(Value Iteration)" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "import numpy as np\n", 17 | "import pprint\n", 18 | "import sys\n", 19 | "from lib.envs.gridworld import GridworldEnv" 20 | ] 21 | }, 22 | { 23 | "cell_type": "markdown", 24 | "metadata": {}, 25 | "source": [ 26 | "## 載入遊戲" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": 2, 32 | "metadata": {}, 33 | "outputs": [], 34 | "source": [ 35 | "pp = pprint.PrettyPrinter(indent=2)\n", 36 | "env = GridworldEnv()" 37 | ] 38 | }, 39 | { 40 | "cell_type": "markdown", 41 | "metadata": {}, 42 | "source": [ 43 | "## 定義值迭代(Value Iteration)函數" 44 | ] 45 | }, 46 | { 47 | "cell_type": "code", 48 | "execution_count": 3, 49 | "metadata": {}, 50 | "outputs": [], 51 | "source": [ 52 | "def value_iteration(env, theta=0.0001, discount_factor=1.0):\n", 53 | " \"\"\"\n", 54 | " env.P:狀態轉移機率矩陣\n", 55 | " env.P[s][a]:(prob, next_state, reward, done) 陣列\n", 56 | " env.nS:狀態總數\n", 57 | " env.nA:行動總數\n", 58 | " theta: 狀態值函數變動 <= theta,即停止進行。\n", 59 | " discount_factor: 獎勵折扣率(Gamma)\n", 60 | " \n", 61 | " Returns:\n", 62 | " 傳回 (policy, V) 陣列. \n", 63 | " - policy:是最佳策略, [S, A] 二為矩陣,含每個狀態/行動組合的機率分配.\n", 64 | " - V:狀態值函數。 \n", 65 | " \"\"\"\n", 66 | " \n", 67 | " def one_step_lookahead(state, V):\n", 68 | " \"\"\"\n", 69 | " state: 狀態值\n", 70 | " V: 狀態值函數\n", 71 | " \n", 72 | " 傳回【行動值函數】\n", 73 | " \"\"\"\n", 74 | " A = np.zeros(env.nA)\n", 75 | " for a in range(env.nA):\n", 76 | " for prob, next_state, reward, done in env.P[state][a]:\n", 77 | " A[a] += prob * (reward + discount_factor * V[next_state])\n", 78 | " return A\n", 79 | " \n", 80 | " V = np.zeros(env.nS)\n", 81 | " while True:\n", 82 | " delta = 0\n", 83 | " # 每一個狀態均採貪婪戰術\n", 84 | " for s in range(env.nS):\n", 85 | " # 走下一步的處理,計算行動值函數\n", 86 | " A = one_step_lookahead(s, V)\n", 87 | " # 找出最大行動值函數\n", 88 | " best_action_value = np.max(A)\n", 89 | " # 找出每一個位置的這一輪與上一輪狀態值函數差異最大者\n", 90 | " delta = max(delta, np.abs(best_action_value - V[s]))\n", 91 | " # 更新值函數\n", 92 | " V[s] = best_action_value \n", 93 | " # 最大差異 < 事先設定值 theta,則停止評估\n", 94 | " if delta < theta:\n", 95 | " break\n", 96 | " \n", 97 | " # 再作一次,產生 policy\n", 98 | " policy = np.zeros([env.nS, env.nA])\n", 99 | " for s in range(env.nS):\n", 100 | " # 走下一步的處理,計算行動值函數\n", 101 | " A = one_step_lookahead(s, V)\n", 102 | " best_action = np.argmax(A)\n", 103 | " # 設定目前狀態下最佳的行動\n", 104 | " policy[s, best_action] = 1.0\n", 105 | " \n", 106 | " return policy, V" 107 | ] 108 | }, 109 | { 110 | "cell_type": "markdown", 111 | "metadata": {}, 112 | "source": [ 113 | "## 測試:顯示策略,即在每個狀態/行動組合的機率分配" 114 | ] 115 | }, 116 | { 117 | "cell_type": "code", 118 | "execution_count": null, 119 | "metadata": {}, 120 | "outputs": [], 121 | "source": [ 122 | "policy, v = value_iteration(env)\n", 123 | "\n", 124 | "print(\"Policy Probability Distribution:\")\n", 125 | "print(policy)\n", 126 | "print(\"\")" 127 | ] 128 | }, 129 | { 130 | "cell_type": "markdown", 131 | "metadata": {}, 132 | "source": [ 133 | "## 顯示狀態值函數" 134 | ] 135 | }, 136 | { 137 | "cell_type": "code", 138 | "execution_count": 4, 139 | "metadata": {}, 140 | "outputs": [ 141 | { 142 | "name": "stdout", 143 | "output_type": "stream", 144 | "text": [ 145 | "Policy Probability Distribution:\n", 146 | "[[1. 0. 0. 0.]\n", 147 | " [0. 0. 0. 1.]\n", 148 | " [0. 0. 0. 1.]\n", 149 | " [0. 0. 1. 0.]\n", 150 | " [1. 0. 0. 0.]\n", 151 | " [1. 0. 0. 0.]\n", 152 | " [1. 0. 0. 0.]\n", 153 | " [0. 0. 1. 0.]\n", 154 | " [1. 0. 0. 0.]\n", 155 | " [1. 0. 0. 0.]\n", 156 | " [0. 1. 0. 0.]\n", 157 | " [0. 0. 1. 0.]\n", 158 | " [1. 0. 0. 0.]\n", 159 | " [0. 1. 0. 0.]\n", 160 | " [0. 1. 0. 0.]\n", 161 | " [1. 0. 0. 0.]]\n", 162 | "\n", 163 | "Reshaped Grid Policy (0=up, 1=right, 2=down, 3=left):\n", 164 | "[[0 3 3 2]\n", 165 | " [0 0 0 2]\n", 166 | " [0 0 1 2]\n", 167 | " [0 1 1 0]]\n", 168 | "\n", 169 | "Value Function:\n", 170 | "[ 0. -1. -2. -3. -1. -2. -3. -2. -2. -3. -2. -1. -3. -2. -1. 0.]\n", 171 | "\n", 172 | "Reshaped Grid Value Function:\n", 173 | "[[ 0. -1. -2. -3.]\n", 174 | " [-1. -2. -3. -2.]\n", 175 | " [-2. -3. -2. -1.]\n", 176 | " [-3. -2. -1. 0.]]\n", 177 | "\n" 178 | ] 179 | } 180 | ], 181 | "source": [ 182 | "print(\"Reshaped Grid Policy (0=up, 1=right, 2=down, 3=left):\")\n", 183 | "print(np.reshape(np.argmax(policy, axis=1), env.shape))\n", 184 | "print(\"\")\n", 185 | "\n", 186 | "print(\"Value Function:\")\n", 187 | "print(v)\n", 188 | "print(\"\")\n", 189 | "\n", 190 | "print(\"Reshaped Grid Value Function:\")\n", 191 | "print(v.reshape(env.shape))\n", 192 | "print(\"\")" 193 | ] 194 | }, 195 | { 196 | "cell_type": "markdown", 197 | "metadata": {}, 198 | "source": [ 199 | "## 顯示最後結果" 200 | ] 201 | }, 202 | { 203 | "cell_type": "code", 204 | "execution_count": 5, 205 | "metadata": {}, 206 | "outputs": [], 207 | "source": [ 208 | "# Test the value function\n", 209 | "expected_v = np.array([ 0, -1, -2, -3, -1, -2, -3, -2, -2, -3, -2, -1, -3, -2, -1, 0])\n", 210 | "np.testing.assert_array_almost_equal(v, expected_v, decimal=2)" 211 | ] 212 | }, 213 | { 214 | "cell_type": "code", 215 | "execution_count": null, 216 | "metadata": {}, 217 | "outputs": [], 218 | "source": [] 219 | } 220 | ], 221 | "metadata": { 222 | "anaconda-cloud": {}, 223 | "kernelspec": { 224 | "display_name": "Python 3", 225 | "language": "python", 226 | "name": "python3" 227 | }, 228 | "language_info": { 229 | "codemirror_mode": { 230 | "name": "ipython", 231 | "version": 3 232 | }, 233 | "file_extension": ".py", 234 | "mimetype": "text/x-python", 235 | "name": "python", 236 | "nbconvert_exporter": "python", 237 | "pygments_lexer": "ipython3", 238 | "version": "3.8.3" 239 | } 240 | }, 241 | "nbformat": 4, 242 | "nbformat_minor": 1 243 | } 244 | -------------------------------------------------------------------------------- /30_01_AutoKeras_MNIST.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# AutoKeras MNIST 辨識" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 51, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "import tensorflow as tf\n", 17 | "mnist = tf.keras.datasets.mnist\n" 18 | ] 19 | }, 20 | { 21 | "cell_type": "code", 22 | "execution_count": 52, 23 | "metadata": {}, 24 | "outputs": [], 25 | "source": [ 26 | "# 匯入 MNIST 手寫阿拉伯數字 訓練資料\n", 27 | "(x_train, y_train),(x_test, y_test) = mnist.load_data()\n" 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": 53, 33 | "metadata": {}, 34 | "outputs": [ 35 | { 36 | "name": "stdout", 37 | "output_type": "stream", 38 | "text": [ 39 | "Trial 1 Complete [00h 01m 52s]\n", 40 | "val_loss: 0.038410965353250504\n", 41 | "\n", 42 | "Best val_loss So Far: 0.038410965353250504\n", 43 | "Total elapsed time: 00h 01m 52s\n", 44 | "INFO:tensorflow:Oracle triggered exit\n", 45 | "Epoch 1/10\n", 46 | "1875/1875 [==============================] - 11s 6ms/step - loss: 0.1594 - accuracy: 0.9516\n", 47 | "Epoch 2/10\n", 48 | "1875/1875 [==============================] - 11s 6ms/step - loss: 0.0745 - accuracy: 0.9768\n", 49 | "Epoch 3/10\n", 50 | "1875/1875 [==============================] - 11s 6ms/step - loss: 0.0605 - accuracy: 0.9811\n", 51 | "Epoch 4/10\n", 52 | "1875/1875 [==============================] - 11s 6ms/step - loss: 0.0517 - accuracy: 0.9841\n", 53 | "Epoch 5/10\n", 54 | "1875/1875 [==============================] - 11s 6ms/step - loss: 0.0452 - accuracy: 0.9859\n", 55 | "Epoch 6/10\n", 56 | "1875/1875 [==============================] - 11s 6ms/step - loss: 0.0409 - accuracy: 0.9872\n", 57 | "Epoch 7/10\n", 58 | "1875/1875 [==============================] - 11s 6ms/step - loss: 0.0359 - accuracy: 0.9884\n", 59 | "Epoch 8/10\n", 60 | "1875/1875 [==============================] - 11s 6ms/step - loss: 0.0340 - accuracy: 0.9892\n", 61 | "Epoch 9/10\n", 62 | "1875/1875 [==============================] - 11s 6ms/step - loss: 0.0305 - accuracy: 0.9901\n", 63 | "Epoch 10/10\n", 64 | "1875/1875 [==============================] - 11s 6ms/step - loss: 0.0311 - accuracy: 0.9899\n", 65 | "INFO:tensorflow:Assets written to: .\\image_classifier\\best_model\\assets\n" 66 | ] 67 | } 68 | ], 69 | "source": [ 70 | "import autokeras as ak\n", 71 | "\n", 72 | "# 初始化影像分類器(image classifier)\n", 73 | "model = ak.ImageClassifier(\n", 74 | " overwrite=True,\n", 75 | " max_trials=1)\n", 76 | "# 訓練模型\n", 77 | "model.fit(x_train, y_train, epochs=10)\n" 78 | ] 79 | }, 80 | { 81 | "cell_type": "code", 82 | "execution_count": 54, 83 | "metadata": {}, 84 | "outputs": [ 85 | { 86 | "name": "stdout", 87 | "output_type": "stream", 88 | "text": [ 89 | "WARNING:tensorflow:11 out of the last 11 calls to .predict_function at 0x000002367826EB80> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n", 90 | "313/313 [==============================] - 1s 3ms/step - loss: 0.0347 - accuracy: 0.9881\n", 91 | "[0.03469261899590492, 0.988099992275238]\n" 92 | ] 93 | } 94 | ], 95 | "source": [ 96 | "# 預測\n", 97 | "predicted_y = model.predict(x_test)\n", 98 | "\n", 99 | "# 評估,打分數\n", 100 | "print(model.evaluate(x_test, y_test))" 101 | ] 102 | }, 103 | { 104 | "cell_type": "code", 105 | "execution_count": 55, 106 | "metadata": {}, 107 | "outputs": [ 108 | { 109 | "name": "stdout", 110 | "output_type": "stream", 111 | "text": [ 112 | "prediction: 7 2 1 0 4 1 4 9 5 9 0 6 9 0 1 5 9 7 3 4\n", 113 | "actual : 7 2 1 0 4 1 4 9 5 9 0 6 9 0 1 5 9 7 3 4\n" 114 | ] 115 | } 116 | ], 117 | "source": [ 118 | "# 比較 20 筆\n", 119 | "print('prediction:', ' '.join(predicted_y[0:20].ravel()))\n", 120 | "print('actual :', ' '.join(y_test[0:20].astype(str)))\n" 121 | ] 122 | }, 123 | { 124 | "cell_type": "code", 125 | "execution_count": 57, 126 | "metadata": {}, 127 | "outputs": [ 128 | { 129 | "data": { 130 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPsAAAD4CAYAAAAq5pAIAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjIsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+WH4yJAAAOVUlEQVR4nO3df7BcdXnH8c8n4RIkBkiMhJggIE0L1CmglyCm09pxYICmBToFYUaEGWZiWxHpUCvFzsi0/2TaqtXWYqNkTCvEcfhR45gWaaqDDgVzoRQSQgJigJA0ASOQoObn0z/uiXMNd7972XP2B3ner5md3T3Pnj3P7NzPPbv7PWe/jggBOPRN6ncDAHqDsANJEHYgCcIOJEHYgSQO6+XGDveUOEJTe7lJIJWf61Xtjl0er1Yr7LbPl/Q5SZMlfTkiFpcef4Sm6my/v84mARQ8GKta1jp+G297sqQvSLpA0mmSrrB9WqfPB6C76nxmny/pqYh4OiJ2S/qapIuaaQtA0+qEfY6k58bc31Qt+yW2F9kesT2yR7tqbA5AHXXCPt6XAK859jYilkTEcEQMD2lKjc0BqKNO2DdJOn7M/bmSNtdrB0C31An7aknzbJ9k+3BJl0ta0UxbAJrW8dBbROy1fa2kezQ69LY0ItY21hmARtUaZ4+IlZJWNtQLgC7icFkgCcIOJEHYgSQIO5AEYQeSIOxAEoQdSIKwA0kQdiAJwg4kQdiBJAg7kARhB5Ig7EAShB1IgrADSRB2IAnCDiRB2IEkCDuQBGEHkujplM3AWIcdN6tY3z3vbV3b9tCG54v19X/xjmL9mMfHnRX5F2as+3mxPul7/1OsdwN7diAJwg4kQdiBJAg7kARhB5Ig7EAShB1IgnF21PLyB99TrP/4wtbjzTee+R/FdT90VPcmCL715bcX638w7e5iffqlR9Ta/sI57661fidqhd32Rkk7JO2TtDcihptoCkDzmtiz/05EvNjA8wDoIj6zA0nUDXtI+rbth2wvGu8BthfZHrE9ske7am4OQKfqvo1fEBGbbR8r6V7bT0TEfWMfEBFLJC2RpKM8I2puD0CHau3ZI2Jzdb1N0t2S5jfRFIDmdRx221NtTztwW9J5ktY01RiAZtV5Gz9L0t22DzzP7RFRHjhFz006/dRi/YmPTi3Wv3fe3xfrb528urz9Af0O+Jqjn23ziHrj6IOo47BHxNOSTm+wFwBdNJj/dgE0jrADSRB2IAnCDiRB2IEkOMX1EPfqSdOK9Q0X3NLmGd7UXDM99sWXWv8c9G3PnNXDTl7raD3V822yZweSIOxAEoQdSIKwA0kQdiAJwg4kQdiBJBhn74HD5s4p1td9Ym6xPuv+8vTARy1/oGVt0q7yjwNt2LO7WH9u7zHF+vGHvVSsX73mqpa1n6x7S3HdWavLvR9z/3PFeuzc2bJ29Eu9H+fuN/bsQBKEHUiCsANJEHYgCcIOJEHYgSQIO5AE4+wNmHzM0cX6/G/9qFj/t5krivUFI9e+7p4OmPLv5Z96/vjvXl2s71u7vliffOq8Yn3G+h+2ru3fUFy3nb211s6HPTuQBGEHkiDsQBKEHUiCsANJEHYgCcIOJME4+wRNOqL1FL677iiPs98087+K9V+760+K9VPuXlus7ytWy9qNo7ddf92TtdZH77Tds9teanub7TVjls2wfa/tJ6vr6d1tE0BdE3kb/xVJ5x+07EZJqyJinqRV1X0AA6xt2CPiPknbD1p8kaRl1e1lki5uuC8ADev0C7pZEbFFkqrrY1s90PYi2yO2R/ZoV4ebA1BX17+Nj4glETEcEcNDmtLtzQFoodOwb7U9W5Kq623NtQSgGzoN+wpJB34j+CpJ32imHQDd0nac3fZySe+TNNP2JkmfkrRY0tdtXyPpWUmXdrPJXpg8vTx6+MRf/2rL2vpT/6m47kNtvqo45a+eLtb3vfJK+QmACWgb9oi4okXp/Q33AqCLOFwWSIKwA0kQdiAJwg4kQdiBJDjFtbL5g6cW6+sv+YeWtRWvloftbl14brG+74XWP7cMNIU9O5AEYQeSIOxAEoQdSIKwA0kQdiAJwg4kwTh7ZcfZP+t43c/9qHwC4Js2MI6O/mPPDiRB2IEkCDuQBGEHkiDsQBKEHUiCsANJMM5eWb5gSZtHtP6/eMdpXy2uec5nbijWT1qxu1if/N2Hi3VgItizA0kQdiAJwg4kQdiBJAg7kARhB5Ig7EASjLNX5k8ZKtb3xL6WtemTjiiu+8QHvlB+7staP7ckvXPVHxXrR69uvf2dc6O47lHl2aI189FXyw9o48XfmNqyNuu724rr7uN3ABrVds9ue6ntbbbXjFl2s+3nbT9SXS7sbpsA6prI2/ivSDp/nOWfjYgzqsvKZtsC0LS2YY+I+yRt70EvALqozhd019p+tHqb33KyM9uLbI/YHtmjXTU2B6COTsN+i6STJZ0haYukT7d6YEQsiYjhiBge0pQONwegro7CHhFbI2JfROyX9CVJ85ttC0DTOgq77dlj7l4iaU2rxwIYDI4oj8PaXi7pfZJmStoq6VPV/TMkhaSNkj4cEVvabewoz4izXf6N9X7Z8M9nlesLv9ijTvL4wS4X69c/fnmxPmPhhibbOSQ8GKv0Smwf94Vte1BNRFwxzuJba3cFoKc4XBZIgrADSRB2IAnCDiRB2IEk2g69NWmQh958WHlgYvf7Tm9Z+9A/frO47pGTyocJLzzyhWJ9yJOL9UPVfu0v1n/99uuK9ZM//t9NtvOGUBp6Y88OJEHYgSQIO5AEYQeSIOxAEoQdSIKwA0nwU9KV2Lu3WB/6z4da1paf8rZa2/78H5ZP5dw3VD4V9L1/9oOWtcXHre6op0Ewqc2+aO7pbc+qxhjs2YEkCDuQBGEHkiDsQBKEHUiCsANJEHYgCcbZB8DUOx6stf43Tz+nZW3xleVx9p/G7mL93ff9cbF+wpfL59q/eN1PW9ZGzvpqcV00iz07kARhB5Ig7EAShB1IgrADSRB2IAnCDiTBOPsh4O33FH6X/sryukf68GJ93W+XJ+y98oRzi/WVJ95TqNbb1zz7fzOK9XnaWOv5DzVtX23bx9v+ju11ttfa/li1fIbte20/WV1P7367ADo1kX+teyXdEBGnSnqPpI/YPk3SjZJWRcQ8Sauq+wAGVNuwR8SWiHi4ur1D0jpJcyRdJGlZ9bBlki7uVpMA6ntdH5psnyjpTEkPSpoVEVuk0X8Iko5tsc4i2yO2R/aoPOcZgO6ZcNhtv1nSnZKuj4hXJrpeRCyJiOGIGB7SlE56BNCACYXd9pBGg35bRNxVLd5qe3ZVny1pW3daBNCEtlM227ZGP5Nvj4jrxyz/W0k/jojFtm+UNCMi/rz0XIM8ZfMb2aRp01rWtt0+u7juA+9a3nQ7E7Yr9hTrCx8v/8T2kZf9pFjf99LLr7unN7rSlM0TGWdfoNHR2sdsP1Itu0nSYklft32NpGclXdpEswC6o23YI+L7klrNUsBuGniD4HBZIAnCDiRB2IEkCDuQBGEHkuAU10PA/h07WtaO+2j5ZMTfW/r7xfpNJ36rWD9nyr5i/c6dM1vWPrnyA8V1f+VPHyjWy1vGwdizA0kQdiAJwg4kQdiBJAg7kARhB5Ig7EASbc9nbxLns7/xbL3uvcX6jrN+Vqyf8pcvtqztfea5jnpCa6Xz2dmzA0kQdiAJwg4kQdiBJAg7kARhB5Ig7EASnM+Oolmfv79cb7P+3uZaQU3s2YEkCDuQBGEHkiDsQBKEHUiCsANJEHYgibZht3287e/YXmd7re2PVctvtv287Ueqy4XdbxdApyZyUM1eSTdExMO2p0l6yPa9Ve2zEfF33WsPQFMmMj/7Fklbqts7bK+TNKfbjQFo1uv6zG77RElnSnqwWnSt7UdtL7U97jxDthfZHrE9ske7ajULoHMTDrvtN0u6U9L1EfGKpFsknSzpDI3u+T893noRsSQihiNieEhTGmgZQCcmFHbbQxoN+m0RcZckRcTWiNgXEfslfUnS/O61CaCuiXwbb0m3SloXEZ8Zs3z2mIddImlN8+0BaMpEvo1fIOlKSY/ZfqRadpOkK2yfISkkbZT04a50CKARE/k2/vuSxvsd6pXNtwOgWziCDkiCsANJEHYgCcIOJEHYgSQIO5AEYQeSIOxAEoQdSIKwA0kQdiAJwg4kQdiBJAg7kIQjoncbs1+Q9MyYRTMlvdizBl6fQe1tUPuS6K1TTfZ2QkS8dbxCT8P+mo3bIxEx3LcGCga1t0HtS6K3TvWqN97GA0kQdiCJfod9SZ+3XzKovQ1qXxK9daonvfX1MzuA3un3nh1AjxB2IIm+hN32+bbX237K9o396KEV2xttP1ZNQz3S516W2t5me82YZTNs32v7yep63Dn2+tTbQEzjXZhmvK+vXb+nP+/5Z3bbkyVtkHSupE2SVku6IiIe72kjLdjeKGk4Ivp+AIbt35K0U9K/RMQ7q2V/I2l7RCyu/lFOj4hPDEhvN0va2e9pvKvZimaPnWZc0sWSrlYfX7tCX5epB69bP/bs8yU9FRFPR8RuSV+TdFEf+hh4EXGfpO0HLb5I0rLq9jKN/rH0XIveBkJEbImIh6vbOyQdmGa8r69doa+e6EfY50h6bsz9TRqs+d5D0rdtP2R7Ub+bGcesiNgijf7xSDq2z/0crO003r100DTjA/PadTL9eV39CPt4U0kN0vjfgoh4l6QLJH2keruKiZnQNN69Ms404wOh0+nP6+pH2DdJOn7M/bmSNvehj3FFxObqepukuzV4U1FvPTCDbnW9rc/9/MIgTeM93jTjGoDXrp/Tn/cj7KslzbN9ku3DJV0uaUUf+ngN21OrL05ke6qk8zR4U1GvkHRVdfsqSd/oYy+/ZFCm8W41zbj6/Nr1ffrziOj5RdKFGv1G/oeSPtmPHlr09Q5J/1td1va7N0nLNfq2bo9G3xFdI+ktklZJerK6njFAvf2rpMckParRYM3uU2+/qdGPho9KeqS6XNjv167QV09eNw6XBZLgCDogCcIOJEHYgSQIO5AEYQeSIOxAEoQdSOL/AalATNTnPIw4AAAAAElFTkSuQmCC\n", 131 | "text/plain": [ 132 | "
" 133 | ] 134 | }, 135 | "metadata": { 136 | "needs_background": "light" 137 | }, 138 | "output_type": "display_data" 139 | } 140 | ], 141 | "source": [ 142 | "# 顯示錯誤的資料圖像\n", 143 | "import matplotlib.pyplot as plt\n", 144 | "\n", 145 | "X2 = x_test[8,:,:]\n", 146 | "plt.imshow(X2.reshape(28,28))\n", 147 | "plt.show() " 148 | ] 149 | }, 150 | { 151 | "cell_type": "code", 152 | "execution_count": 59, 153 | "metadata": {}, 154 | "outputs": [ 155 | { 156 | "name": "stdout", 157 | "output_type": "stream", 158 | "text": [ 159 | "[['0']\n", 160 | " ['1']\n", 161 | " ['2']\n", 162 | " ['3']\n", 163 | " ['4']\n", 164 | " ['5']\n", 165 | " ['6']\n", 166 | " ['7']\n", 167 | " ['8']\n", 168 | " ['9']]\n" 169 | ] 170 | } 171 | ], 172 | "source": [ 173 | "# 使用小畫家,寫0~9,實際測試看看\n", 174 | "from skimage import io\n", 175 | "from skimage.transform import resize\n", 176 | "import numpy as np\n", 177 | "\n", 178 | "X_ALL = np.empty((0, 28, 28))\n", 179 | "for i in range(10): \n", 180 | " image1 = io.imread(f'./myDigits/{i}.png', as_gray=True)\n", 181 | " #image1 = Image.open(uploaded_file).convert('LA')\n", 182 | " image_resized = resize(image1, (28, 28), anti_aliasing=True) \n", 183 | " X1 = image_resized.reshape(1, 28, 28) #/ 255\n", 184 | " # 反轉顏色\n", 185 | " # 顏色0為白色,與RGB顏色不同,(0,0,0) 為黑色。\n", 186 | " # 還原特徵縮放\n", 187 | " X1 = (np.abs(1-X1) * 255).astype(int)\n", 188 | " X_ALL = np.concatenate([X_ALL, X1])\n", 189 | "predictions = model.predict(X_ALL)\n", 190 | "print(predictions)" 191 | ] 192 | }, 193 | { 194 | "cell_type": "code", 195 | "execution_count": null, 196 | "metadata": {}, 197 | "outputs": [], 198 | "source": [] 199 | } 200 | ], 201 | "metadata": { 202 | "kernelspec": { 203 | "display_name": "Python 3", 204 | "language": "python", 205 | "name": "python3" 206 | }, 207 | "language_info": { 208 | "codemirror_mode": { 209 | "name": "ipython", 210 | "version": 3 211 | }, 212 | "file_extension": ".py", 213 | "mimetype": "text/x-python", 214 | "name": "python", 215 | "nbconvert_exporter": "python", 216 | "pygments_lexer": "ipython3", 217 | "version": "3.8.3" 218 | } 219 | }, 220 | "nbformat": 4, 221 | "nbformat_minor": 4 222 | } 223 | -------------------------------------------------------------------------------- /30_02_AutoKeras_Fashion_MNIST.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# AutoKeras Fashion MNIST 辨識" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "import tensorflow as tf\n", 17 | "mnist = tf.keras.datasets.fashion_mnist\n" 18 | ] 19 | }, 20 | { 21 | "cell_type": "code", 22 | "execution_count": 2, 23 | "metadata": {}, 24 | "outputs": [], 25 | "source": [ 26 | "# 匯入 MNIST 手寫阿拉伯數字 訓練資料\n", 27 | "(x_train, y_train),(x_test, y_test) = mnist.load_data()\n" 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": 3, 33 | "metadata": {}, 34 | "outputs": [ 35 | { 36 | "name": "stdout", 37 | "output_type": "stream", 38 | "text": [ 39 | "Trial 1 Complete [00h 01m 50s]\n", 40 | "val_loss: 0.22313417494297028\n", 41 | "\n", 42 | "Best val_loss So Far: 0.22313417494297028\n", 43 | "Total elapsed time: 00h 01m 50s\n", 44 | "INFO:tensorflow:Oracle triggered exit\n", 45 | "Epoch 1/10\n", 46 | "1875/1875 [==============================] - 11s 6ms/step - loss: 0.4258 - accuracy: 0.8496\n", 47 | "Epoch 2/10\n", 48 | "1875/1875 [==============================] - 11s 6ms/step - loss: 0.3130 - accuracy: 0.8883\n", 49 | "Epoch 3/10\n", 50 | "1875/1875 [==============================] - 11s 6ms/step - loss: 0.2785 - accuracy: 0.8999\n", 51 | "Epoch 4/10\n", 52 | "1875/1875 [==============================] - 11s 6ms/step - loss: 0.2595 - accuracy: 0.9064\n", 53 | "Epoch 5/10\n", 54 | "1875/1875 [==============================] - 11s 6ms/step - loss: 0.2426 - accuracy: 0.9110\n", 55 | "Epoch 6/10\n", 56 | "1875/1875 [==============================] - 11s 6ms/step - loss: 0.2354 - accuracy: 0.9134\n", 57 | "Epoch 7/10\n", 58 | "1875/1875 [==============================] - 11s 6ms/step - loss: 0.2262 - accuracy: 0.9166\n", 59 | "Epoch 8/10\n", 60 | "1875/1875 [==============================] - 11s 6ms/step - loss: 0.2160 - accuracy: 0.9218\n", 61 | "Epoch 9/10\n", 62 | "1875/1875 [==============================] - 11s 6ms/step - loss: 0.2078 - accuracy: 0.9247\n", 63 | "Epoch 10/10\n", 64 | "1875/1875 [==============================] - 11s 6ms/step - loss: 0.2072 - accuracy: 0.9235\n", 65 | "WARNING:tensorflow:From C:\\anaconda3\\lib\\site-packages\\tensorflow\\python\\training\\tracking\\tracking.py:111: Model.state_updates (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version.\n", 66 | "Instructions for updating:\n", 67 | "This property should not be used in TensorFlow 2.0, as updates are applied automatically.\n", 68 | "WARNING:tensorflow:From C:\\anaconda3\\lib\\site-packages\\tensorflow\\python\\training\\tracking\\tracking.py:111: Layer.updates (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.\n", 69 | "Instructions for updating:\n", 70 | "This property should not be used in TensorFlow 2.0, as updates are applied automatically.\n", 71 | "INFO:tensorflow:Assets written to: .\\image_classifier\\best_model\\assets\n" 72 | ] 73 | } 74 | ], 75 | "source": [ 76 | "import autokeras as ak\n", 77 | "\n", 78 | "# 初始化影像分類器(image classifier)\n", 79 | "model = ak.ImageClassifier(\n", 80 | " overwrite=True,\n", 81 | " max_trials=1)\n", 82 | "# 訓練模型\n", 83 | "model.fit(x_train, y_train, epochs=10)\n" 84 | ] 85 | }, 86 | { 87 | "cell_type": "code", 88 | "execution_count": 4, 89 | "metadata": {}, 90 | "outputs": [ 91 | { 92 | "name": "stdout", 93 | "output_type": "stream", 94 | "text": [ 95 | "313/313 [==============================] - 1s 3ms/step - loss: 0.2274 - accuracy: 0.9194\n", 96 | "[0.2273847460746765, 0.9193999767303467]\n" 97 | ] 98 | } 99 | ], 100 | "source": [ 101 | "# 預測\n", 102 | "predicted_y = model.predict(x_test)\n", 103 | "\n", 104 | "# 評估,打分數\n", 105 | "print(model.evaluate(x_test, y_test))" 106 | ] 107 | }, 108 | { 109 | "cell_type": "code", 110 | "execution_count": 5, 111 | "metadata": {}, 112 | "outputs": [ 113 | { 114 | "name": "stdout", 115 | "output_type": "stream", 116 | "text": [ 117 | "prediction: 9 2 1 1 6 1 4 6 5 7 4 5 5 3 4 1 2 4 8 0\n", 118 | "actual : 9 2 1 1 6 1 4 6 5 7 4 5 7 3 4 1 2 4 8 0\n" 119 | ] 120 | } 121 | ], 122 | "source": [ 123 | "# 比較 20 筆\n", 124 | "print('prediction:', ' '.join(predicted_y[0:20].ravel()))\n", 125 | "print('actual :', ' '.join(y_test[0:20].astype(str)))\n" 126 | ] 127 | }, 128 | { 129 | "cell_type": "code", 130 | "execution_count": 9, 131 | "metadata": {}, 132 | "outputs": [], 133 | "source": [ 134 | "label_dict = {0: 'T-shirt/top', \n", 135 | "1:'Trouser',\n", 136 | "2:'Pullover', \n", 137 | "3:'Dress', \n", 138 | "4:'Coat',\n", 139 | "5:'Sandal', \n", 140 | "6:'Shirt', \n", 141 | "7:'Sneaker', \n", 142 | "8:'Bag', \n", 143 | "9:'Ankle boot'}" 144 | ] 145 | }, 146 | { 147 | "cell_type": "code", 148 | "execution_count": 13, 149 | "metadata": {}, 150 | "outputs": [ 151 | { 152 | "name": "stdout", 153 | "output_type": "stream", 154 | "text": [ 155 | "WARNING:tensorflow:7 out of the last 319 calls to .predict_function at 0x000001D73F54A9D0> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/tutorials/customization/performance#python_or_tensor_args and https://www.tensorflow.org/api_docs/python/tf/function for more details.\n" 156 | ] 157 | }, 158 | { 159 | "data": { 160 | "text/html": [ 161 | "
\n", 162 | "\n", 175 | "\n", 176 | " \n", 177 | " \n", 178 | " \n", 179 | " \n", 180 | " \n", 181 | " \n", 182 | " \n", 183 | " \n", 184 | " \n", 185 | " \n", 186 | " \n", 187 | " \n", 188 | " \n", 189 | " \n", 190 | " \n", 191 | " \n", 192 | " \n", 193 | " \n", 194 | " \n", 195 | " \n", 196 | " \n", 197 | " \n", 198 | " \n", 199 | " \n", 200 | " \n", 201 | " \n", 202 | " \n", 203 | " \n", 204 | " \n", 205 | " \n", 206 | " \n", 207 | " \n", 208 | " \n", 209 | " \n", 210 | " \n", 211 | " \n", 212 | " \n", 213 | " \n", 214 | " \n", 215 | " \n", 216 | " \n", 217 | " \n", 218 | " \n", 219 | " \n", 220 | "
actualpredict
0bag1Bag
1pant1Trouser
2Sandal1Bag
3Sandal2Bag
4shirt1T-shirt/top
5t-shirtT-shirt/top
6TrouserTrouser
\n", 221 | "
" 222 | ], 223 | "text/plain": [ 224 | " actual predict\n", 225 | "0 bag1 Bag\n", 226 | "1 pant1 Trouser\n", 227 | "2 Sandal1 Bag\n", 228 | "3 Sandal2 Bag\n", 229 | "4 shirt1 T-shirt/top\n", 230 | "5 t-shirt T-shirt/top\n", 231 | "6 Trouser Trouser" 232 | ] 233 | }, 234 | "execution_count": 13, 235 | "metadata": {}, 236 | "output_type": "execute_result" 237 | } 238 | ], 239 | "source": [ 240 | "# 使用小畫家,寫0~9,實際測試看看\n", 241 | "from skimage import io\n", 242 | "from skimage.transform import resize\n", 243 | "import numpy as np\n", 244 | "import pandas as pd\n", 245 | "import os\n", 246 | "\n", 247 | "my_path='./test_data'\n", 248 | "X_ALL = np.empty((0, 28, 28))\n", 249 | "actual_labels=[]\n", 250 | "for i in os.listdir(my_path): \n", 251 | " actual_labels.append(i[:-4])\n", 252 | " image1 = io.imread(f'{my_path}/{i}', as_gray=True)\n", 253 | " #image1 = Image.open(uploaded_file).convert('LA')\n", 254 | " image_resized = resize(image1, (28, 28), anti_aliasing=True) \n", 255 | " X1 = image_resized.reshape(1, 28, 28) #/ 255\n", 256 | " # 反轉顏色\n", 257 | " # 顏色0為白色,與RGB顏色不同,(0,0,0) 為黑色。\n", 258 | " # 還原特徵縮放\n", 259 | " X1 = (np.abs(1-X1) * 255).astype(int)\n", 260 | " X_ALL = np.concatenate([X_ALL, X1])\n", 261 | "predictions = model.predict(X_ALL)\n", 262 | "df = pd.DataFrame({'actual': actual_labels, 'predict':[label_dict[k] for k in predictions.ravel().astype(int)]})\n", 263 | "df " 264 | ] 265 | }, 266 | { 267 | "cell_type": "code", 268 | "execution_count": null, 269 | "metadata": {}, 270 | "outputs": [], 271 | "source": [] 272 | } 273 | ], 274 | "metadata": { 275 | "kernelspec": { 276 | "display_name": "Python 3", 277 | "language": "python", 278 | "name": "python3" 279 | }, 280 | "language_info": { 281 | "codemirror_mode": { 282 | "name": "ipython", 283 | "version": 3 284 | }, 285 | "file_extension": ".py", 286 | "mimetype": "text/x-python", 287 | "name": "python", 288 | "nbconvert_exporter": "python", 289 | "pygments_lexer": "ipython3", 290 | "version": "3.8.3" 291 | } 292 | }, 293 | "nbformat": 4, 294 | "nbformat_minor": 4 295 | } 296 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## IT 邦幫忙 鐵人賽發文 2 | ## [【輕鬆掌握 Keras 及相關應用】](https://ithelp.ithome.com.tw/articles/10233272)系列發文的範例程式 3 | 4 | ### [【Day 01:輕鬆掌握 Keras】](https://ithelp.ithome.com.tw/articles/10233272) 5 | ### [【Day 02:梯度下降與自動微分】](https://ithelp.ithome.com.tw/articles/10233555) 6 | ### [【Day 03:撰寫第一支完整的 Keras 程式】](https://ithelp.ithome.com.tw/articles/10233758) 7 | ### [【Day 04:神經網路的效能調校(Performance Tuning)】](https://ithelp.ithome.com.tw/articles/10234059) 8 | ### [【Day 5:神經網路的效能調校(續)】](https://ithelp.ithome.com.tw/articles/10234203) 9 | ### [【Day 06:Keras 模型結構】](https://ithelp.ithome.com.tw/articles/10234389) 10 | ### [【Day 07:Keras Callback 的使用】](https://ithelp.ithome.com.tw/articles/10234641) 11 | ### [【Day 08:TensorBoard 的初體驗】](https://ithelp.ithome.com.tw/articles/10234818) 12 | ### [【Day 09:再探TensorBoard】](https://ithelp.ithome.com.tw/articles/10235066) 13 | ### [【Day 10:運用自訂Callback 追蹤訓練過程】](https://ithelp.ithome.com.tw/articles/10235293) 14 | ### [【Day 11:卷積神經網路(CNN) 剖析】](https://ithelp.ithome.com.tw/articles/10235547) 15 | ### [【Day 12:影像資料增補(Data Augmentation)】](https://ithelp.ithome.com.tw/articles/10235805) 16 | ### [【Day 13:測試 CNN 的桌面程式】](https://ithelp.ithome.com.tw/articles/10236118) 17 | ### [【Day 14:預先訓練好的模型(Keras Applications)】](https://ithelp.ithome.com.tw/articles/10236654) 18 | ### [【Day 15:戴口罩偵測實作】](https://ithelp.ithome.com.tw/articles/10237020) 19 | ### [【Day 16:TensorFlow 2 Object Detection API 安裝】](https://ithelp.ithome.com.tw/articles/10237443) 20 | ### [【Day 17:TensorFlow 2 Object Detection API 實作】](https://ithelp.ithome.com.tw/articles/10238231) 21 | ### [【Day 18:自駕車(Self-driving) 動態物件偵測實作】](https://ithelp.ithome.com.tw/articles/10238971) 22 | ### [【Day 19:Autoencoder 與去除雜訊】](https://ithelp.ithome.com.tw/articles/10239870) 23 | ### [【Day 20:使用 U-Net 作影像分割(Image Segmentation)】](https://ithelp.ithome.com.tw/articles/10240314) 24 | ### [【Day 21:Batch Normalization 筆記整理】](https://ithelp.ithome.com.tw/articles/10241052) 25 | ### [【Day 22:Tensorflow Dataset 相關用法整理】](https://ithelp.ithome.com.tw/articles/10241789) 26 | ### [【Day 23:Tensorflow 架構與其他模組介紹】](https://ithelp.ithome.com.tw/articles/10242414) 27 | ### [【Day 24:機器學習永遠不會跟你講錯 -- Keras 除錯技巧】](https://ithelp.ithome.com.tw/articles/10242956) 28 | ### [【Day 25:Keras 自然語言處理(NLP)實作】](https://ithelp.ithome.com.tw/articles/10243582) 29 | ### [【Day 26:Keras 自然語言處理(NLP)應用】](https://ithelp.ithome.com.tw/articles/10244283) 30 | ### [【Day 27:使用Keras撰寫 生成式對抗網路(GAN)】](https://ithelp.ithome.com.tw/articles/10244859) 31 | ### [【Day 28:從直覺的角度初探強化學習】](https://ithelp.ithome.com.tw/articles/10245605) 32 | ### [【Day 29:深究強化學習】](https://ithelp.ithome.com.tw/articles/10246035) 33 | ### [【Day 30:取代資料科學家 -- AutoKeras 入門】](https://ithelp.ithome.com.tw/articles/10246684) 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /cnn_class.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import tensorflow as tf 4 | from tensorflow import keras 5 | from tensorflow.keras.layers import * 6 | from tensorflow.keras.utils import to_categorical 7 | import os 8 | 9 | # 取得 MNIST 資料 10 | def getData(): 11 | (X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data() 12 | img_rows, img_cols = 28, 28 13 | 14 | y_train = to_categorical(y_train, num_classes=10) 15 | y_test = to_categorical(y_test, num_classes=10) 16 | 17 | # CNN 需加一維 18 | X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1) 19 | X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1) 20 | 21 | return X_train/255, y_train, X_test/255, y_test 22 | 23 | # 訓練模型 24 | def trainModel(X_train, y_train, X_test, y_test): 25 | batch_size = 64 26 | epochs = 15 27 | 28 | model = tf.keras.models.Sequential() 29 | 30 | model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu', input_shape=(28,28,1))) 31 | model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu')) 32 | model.add(MaxPool2D(pool_size=(2, 2))) 33 | model.add(Dropout(rate=0.25)) 34 | 35 | model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu')) 36 | model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu')) 37 | model.add(MaxPool2D(pool_size=(2, 2))) 38 | model.add(Dropout(rate=0.25)) 39 | 40 | model.add(Flatten()) 41 | model.add(Dense(256, activation='relu')) 42 | model.add(Dropout(rate=0.5)) 43 | model.add(Dense(10, activation='softmax')) 44 | 45 | datagen = tf.keras.preprocessing.image.ImageDataGenerator( 46 | rotation_range=10, 47 | zoom_range=0.1, 48 | width_shift_range=0.1, 49 | height_shift_range=0.1) 50 | 51 | model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) 52 | datagen.fit(X_train) 53 | history = model.fit(datagen.flow(X_train, y_train, batch_size=batch_size), epochs=epochs, 54 | validation_data=datagen.flow(X_test, y_test, batch_size=batch_size), verbose=2, 55 | steps_per_epoch=X_train.shape[0]//batch_size) 56 | 57 | model.save('mnist_model.h5') 58 | return model 59 | 60 | # 載入模型 61 | def loadModel(): 62 | return tf.keras.models.load_model('mnist_model.h5') 63 | 64 | -------------------------------------------------------------------------------- /images/Tiger.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/images/Tiger.jpg -------------------------------------------------------------------------------- /images/Tiger2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/images/Tiger2.jpg -------------------------------------------------------------------------------- /images/Tiger3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/images/Tiger3.jpg -------------------------------------------------------------------------------- /images/bird01.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/images/bird01.jpg -------------------------------------------------------------------------------- /images/elephant.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/images/elephant.jpg -------------------------------------------------------------------------------- /images/elephant2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/images/elephant2.jpg -------------------------------------------------------------------------------- /images/input.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/images/input.jpg -------------------------------------------------------------------------------- /images/style.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/images/style.jpg -------------------------------------------------------------------------------- /images/with-mask.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/images/with-mask.jpg -------------------------------------------------------------------------------- /images/without-mask.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/images/without-mask.jpg -------------------------------------------------------------------------------- /images/太陽花.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/images/太陽花.jpg -------------------------------------------------------------------------------- /images_2/detection1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/images_2/detection1.png -------------------------------------------------------------------------------- /images_2/detection2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/images_2/detection2.png -------------------------------------------------------------------------------- /images_2/image1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/images_2/image1.jpg -------------------------------------------------------------------------------- /images_2/image2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/images_2/image2.jpg -------------------------------------------------------------------------------- /lib/atari/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /lib/atari/helpers.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | class AtariEnvWrapper(object): 4 | """ 5 | Wraps an Atari environment to end an episode when a life is lost. 6 | """ 7 | def __init__(self, env): 8 | self.env = env 9 | 10 | def __getattr__(self, name): 11 | return getattr(self.env, name) 12 | 13 | def step(self, *args, **kwargs): 14 | lives_before = self.env.ale.lives() 15 | next_state, reward, done, info = self.env.step(*args, **kwargs) 16 | lives_after = self.env.ale.lives() 17 | 18 | # End the episode when a life is lost 19 | if lives_before > lives_after: 20 | done = True 21 | 22 | # Clip rewards to [-1,1] 23 | reward = max(min(reward, 1), -1) 24 | 25 | return next_state, reward, done, info 26 | 27 | def atari_make_initial_state(state): 28 | return np.stack([state] * 4, axis=2) 29 | 30 | def atari_make_next_state(state, next_state): 31 | return np.append(state[:,:,1:], np.expand_dims(next_state, 2), axis=2) -------------------------------------------------------------------------------- /lib/atari/state_processor.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | class StateProcessor(): 5 | """ 6 | Processes a raw Atari iamges. Resizes it and converts it to grayscale. 7 | """ 8 | def __init__(self): 9 | # Build the Tensorflow graph 10 | with tf.variable_scope("state_processor"): 11 | self.input_state = tf.placeholder(shape=[210, 160, 3], dtype=tf.uint8) 12 | self.output = tf.image.rgb_to_grayscale(self.input_state) 13 | self.output = tf.image.crop_to_bounding_box(self.output, 34, 0, 160, 160) 14 | self.output = tf.image.resize_images( 15 | self.output, [84, 84], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) 16 | self.output = tf.squeeze(self.output) 17 | 18 | def process(self, state, sess=None): 19 | """ 20 | Args: 21 | sess: A Tensorflow session object 22 | state: A [210, 160, 3] Atari RGB State 23 | 24 | Returns: 25 | A processed [84, 84, 1] state representing grayscale values. 26 | """ 27 | sess = sess or tf.get_default_session() 28 | return sess.run(self.output, { self.input_state: state }) -------------------------------------------------------------------------------- /lib/envs/blackjack.py: -------------------------------------------------------------------------------- 1 | import gym 2 | from gym import spaces 3 | from gym.utils import seeding 4 | 5 | def cmp(a, b): 6 | return int((a > b)) - int((a < b)) 7 | 8 | # 1 = Ace, 2-10 = Number cards, Jack/Queen/King = 10 9 | deck = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10] 10 | 11 | 12 | def draw_card(np_random): 13 | return np_random.choice(deck) 14 | 15 | 16 | def draw_hand(np_random): 17 | return [draw_card(np_random), draw_card(np_random)] 18 | 19 | 20 | def usable_ace(hand): # Does this hand have a usable ace? 21 | return 1 in hand and sum(hand) + 10 <= 21 22 | 23 | 24 | def sum_hand(hand): # Return current hand total 25 | if usable_ace(hand): 26 | return sum(hand) + 10 27 | return sum(hand) 28 | 29 | 30 | def is_bust(hand): # Is this hand a bust? 31 | return sum_hand(hand) > 21 32 | 33 | 34 | def score(hand): # What is the score of this hand (0 if bust) 35 | return 0 if is_bust(hand) else sum_hand(hand) 36 | 37 | 38 | def is_natural(hand): # Is this hand a natural blackjack? 39 | return sorted(hand) == [1, 10] 40 | 41 | 42 | class BlackjackEnv(gym.Env): 43 | """Simple blackjack environment 44 | Blackjack is a card game where the goal is to obtain cards that sum to as 45 | near as possible to 21 without going over. They're playing against a fixed 46 | dealer. 47 | Face cards (Jack, Queen, King) have point value 10. 48 | Aces can either count as 11 or 1, and it's called 'usable' at 11. 49 | This game is placed with an infinite deck (or with replacement). 50 | The game starts with each (player and dealer) having one face up and one 51 | face down card. 52 | The player can request additional cards (hit=1) until they decide to stop 53 | (stick=0) or exceed 21 (bust). 54 | After the player sticks, the dealer reveals their facedown card, and draws 55 | until their sum is 17 or greater. If the dealer goes bust the player wins. 56 | If neither player nor dealer busts, the outcome (win, lose, draw) is 57 | decided by whose sum is closer to 21. The reward for winning is +1, 58 | drawing is 0, and losing is -1. 59 | The observation of a 3-tuple of: the players current sum, 60 | the dealer's one showing card (1-10 where 1 is ace), 61 | and whether or not the player holds a usable ace (0 or 1). 62 | This environment corresponds to the version of the blackjack problem 63 | described in Example 5.1 in Reinforcement Learning: An Introduction 64 | by Sutton and Barto (1998). 65 | https://webdocs.cs.ualberta.ca/~sutton/book/the-book.html 66 | """ 67 | def __init__(self, natural=False): 68 | self.action_space = spaces.Discrete(2) 69 | self.observation_space = spaces.Tuple(( 70 | spaces.Discrete(32), 71 | spaces.Discrete(11), 72 | spaces.Discrete(2))) 73 | self._seed() 74 | 75 | # Flag to payout 1.5 on a "natural" blackjack win, like casino rules 76 | # Ref: http://www.bicyclecards.com/how-to-play/blackjack/ 77 | self.natural = natural 78 | # Start the first game 79 | self._reset() # Number of 80 | self.nA = 2 81 | 82 | def reset(self): 83 | return self._reset() 84 | 85 | def step(self, action): 86 | return self._step(action) 87 | 88 | def _seed(self, seed=None): 89 | self.np_random, seed = seeding.np_random(seed) 90 | return [seed] 91 | 92 | def _step(self, action): 93 | assert self.action_space.contains(action) 94 | if action: # hit: add a card to players hand and return 95 | self.player.append(draw_card(self.np_random)) 96 | if is_bust(self.player): 97 | done = True 98 | reward = -1 99 | else: 100 | done = False 101 | reward = 0 102 | else: # stick: play out the dealers hand, and score 103 | done = True 104 | while sum_hand(self.dealer) < 17: 105 | self.dealer.append(draw_card(self.np_random)) 106 | reward = cmp(score(self.player), score(self.dealer)) 107 | if self.natural and is_natural(self.player) and reward == 1: 108 | reward = 1.5 109 | return self._get_obs(), reward, done, {} 110 | 111 | def _get_obs(self): 112 | return (sum_hand(self.player), self.dealer[0], usable_ace(self.player)) 113 | 114 | def _reset(self): 115 | self.dealer = draw_hand(self.np_random) 116 | self.player = draw_hand(self.np_random) 117 | 118 | # Auto-draw another card if the score is less than 12 119 | while sum_hand(self.player) < 12: 120 | self.player.append(draw_card(self.np_random)) 121 | 122 | return self._get_obs() 123 | -------------------------------------------------------------------------------- /lib/envs/cliff_walking.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import sys 3 | from gym.envs.toy_text import discrete 4 | 5 | 6 | UP = 0 7 | RIGHT = 1 8 | DOWN = 2 9 | LEFT = 3 10 | 11 | class CliffWalkingEnv(discrete.DiscreteEnv): 12 | 13 | metadata = {'render.modes': ['human', 'ansi']} 14 | 15 | def _limit_coordinates(self, coord): 16 | coord[0] = min(coord[0], self.shape[0] - 1) 17 | coord[0] = max(coord[0], 0) 18 | coord[1] = min(coord[1], self.shape[1] - 1) 19 | coord[1] = max(coord[1], 0) 20 | return coord 21 | 22 | def _calculate_transition_prob(self, current, delta): 23 | new_position = np.array(current) + np.array(delta) 24 | new_position = self._limit_coordinates(new_position).astype(int) 25 | new_state = np.ravel_multi_index(tuple(new_position), self.shape) 26 | reward = -100.0 if self._cliff[tuple(new_position)] else -1.0 27 | is_done = self._cliff[tuple(new_position)] or (tuple(new_position) == (3,11)) 28 | return [(1.0, new_state, reward, is_done)] 29 | 30 | def __init__(self): 31 | self.shape = (4, 12) 32 | 33 | nS = np.prod(self.shape) 34 | nA = 4 35 | 36 | # Cliff Location 37 | self._cliff = np.zeros(self.shape, dtype=np.bool) 38 | self._cliff[3, 1:-1] = True 39 | 40 | # Calculate transition probabilities 41 | P = {} 42 | for s in range(nS): 43 | position = np.unravel_index(s, self.shape) 44 | P[s] = { a : [] for a in range(nA) } 45 | P[s][UP] = self._calculate_transition_prob(position, [-1, 0]) 46 | P[s][RIGHT] = self._calculate_transition_prob(position, [0, 1]) 47 | P[s][DOWN] = self._calculate_transition_prob(position, [1, 0]) 48 | P[s][LEFT] = self._calculate_transition_prob(position, [0, -1]) 49 | 50 | # We always start in state (3, 0) 51 | isd = np.zeros(nS) 52 | isd[np.ravel_multi_index((3,0), self.shape)] = 1.0 53 | 54 | super(CliffWalkingEnv, self).__init__(nS, nA, P, isd) 55 | 56 | def render(self, mode='human', close=False): 57 | self._render(mode, close) 58 | 59 | def _render(self, mode='human', close=False): 60 | if close: 61 | return 62 | 63 | outfile = StringIO() if mode == 'ansi' else sys.stdout 64 | 65 | for s in range(self.nS): 66 | position = np.unravel_index(s, self.shape) 67 | # print(self.s) 68 | if self.s == s: 69 | output = " x " 70 | elif position == (3,11): 71 | output = " T " 72 | elif self._cliff[position]: 73 | output = " C " 74 | else: 75 | output = " o " 76 | 77 | if position[1] == 0: 78 | output = output.lstrip() 79 | if position[1] == self.shape[1] - 1: 80 | output = output.rstrip() 81 | output += "\n" 82 | 83 | outfile.write(output) 84 | outfile.write("\n") 85 | -------------------------------------------------------------------------------- /lib/envs/gridworld.py: -------------------------------------------------------------------------------- 1 | import io 2 | import numpy as np 3 | import sys 4 | from gym.envs.toy_text import discrete 5 | 6 | UP = 0 7 | RIGHT = 1 8 | DOWN = 2 9 | LEFT = 3 10 | 11 | class GridworldEnv(discrete.DiscreteEnv): 12 | """ 13 | Grid World environment from Sutton's Reinforcement Learning book chapter 4. 14 | You are an agent on an MxN grid and your goal is to reach the terminal 15 | state at the top left or the bottom right corner. 16 | 17 | For example, a 4x4 grid looks as follows: 18 | 19 | T o o o 20 | o x o o 21 | o o o o 22 | o o o T 23 | 24 | x is your position and T are the two terminal states. 25 | 26 | You can take actions in each direction (UP=0, RIGHT=1, DOWN=2, LEFT=3). 27 | Actions going off the edge leave you in your current state. 28 | You receive a reward of -1 at each step until you reach a terminal state. 29 | """ 30 | 31 | metadata = {'render.modes': ['human', 'ansi']} 32 | 33 | def __init__(self, shape=[4,4]): 34 | if not isinstance(shape, (list, tuple)) or not len(shape) == 2: 35 | raise ValueError('shape argument must be a list/tuple of length 2') 36 | 37 | self.shape = shape 38 | 39 | nS = np.prod(shape) 40 | nA = 4 41 | 42 | MAX_Y = shape[0] 43 | MAX_X = shape[1] 44 | 45 | P = {} 46 | grid = np.arange(nS).reshape(shape) 47 | it = np.nditer(grid, flags=['multi_index']) 48 | 49 | while not it.finished: 50 | s = it.iterindex 51 | y, x = it.multi_index 52 | 53 | # P[s][a] = (prob, next_state, reward, is_done) 54 | P[s] = {a : [] for a in range(nA)} 55 | 56 | is_done = lambda s: s == 0 or s == (nS - 1) 57 | reward = 0.0 if is_done(s) else -1.0 58 | 59 | # We're stuck in a terminal state 60 | if is_done(s): 61 | P[s][UP] = [(1.0, s, reward, True)] 62 | P[s][RIGHT] = [(1.0, s, reward, True)] 63 | P[s][DOWN] = [(1.0, s, reward, True)] 64 | P[s][LEFT] = [(1.0, s, reward, True)] 65 | # Not a terminal state 66 | else: 67 | ns_up = s if y == 0 else s - MAX_X 68 | ns_right = s if x == (MAX_X - 1) else s + 1 69 | ns_down = s if y == (MAX_Y - 1) else s + MAX_X 70 | ns_left = s if x == 0 else s - 1 71 | P[s][UP] = [(1.0, ns_up, reward, is_done(ns_up))] 72 | P[s][RIGHT] = [(1.0, ns_right, reward, is_done(ns_right))] 73 | P[s][DOWN] = [(1.0, ns_down, reward, is_done(ns_down))] 74 | P[s][LEFT] = [(1.0, ns_left, reward, is_done(ns_left))] 75 | 76 | it.iternext() 77 | 78 | # Initial state distribution is uniform 79 | isd = np.ones(nS) / nS 80 | 81 | # We expose the model of the environment for educational purposes 82 | # This should not be used in any model-free learning algorithm 83 | self.P = P 84 | 85 | super(GridworldEnv, self).__init__(nS, nA, P, isd) 86 | 87 | def _render(self, mode='human', close=False): 88 | """ Renders the current gridworld layout 89 | 90 | For example, a 4x4 grid with the mode="human" looks like: 91 | T o o o 92 | o x o o 93 | o o o o 94 | o o o T 95 | where x is your position and T are the two terminal states. 96 | """ 97 | if close: 98 | return 99 | 100 | outfile = io.StringIO() if mode == 'ansi' else sys.stdout 101 | 102 | grid = np.arange(self.nS).reshape(self.shape) 103 | it = np.nditer(grid, flags=['multi_index']) 104 | while not it.finished: 105 | s = it.iterindex 106 | y, x = it.multi_index 107 | 108 | if self.s == s: 109 | output = " x " 110 | elif s == 0 or s == self.nS - 1: 111 | output = " T " 112 | else: 113 | output = " o " 114 | 115 | if x == 0: 116 | output = output.lstrip() 117 | if x == self.shape[1] - 1: 118 | output = output.rstrip() 119 | 120 | outfile.write(output) 121 | 122 | if x == self.shape[1] - 1: 123 | outfile.write("\n") 124 | 125 | it.iternext() 126 | -------------------------------------------------------------------------------- /lib/envs/windy_gridworld.py: -------------------------------------------------------------------------------- 1 | import gym 2 | import numpy as np 3 | import sys 4 | from gym.envs.toy_text import discrete 5 | 6 | UP = 0 7 | RIGHT = 1 8 | DOWN = 2 9 | LEFT = 3 10 | 11 | class WindyGridworldEnv(discrete.DiscreteEnv): 12 | 13 | metadata = {'render.modes': ['human', 'ansi']} 14 | 15 | def _limit_coordinates(self, coord): 16 | coord[0] = min(coord[0], self.shape[0] - 1) 17 | coord[0] = max(coord[0], 0) 18 | coord[1] = min(coord[1], self.shape[1] - 1) 19 | coord[1] = max(coord[1], 0) 20 | return coord 21 | 22 | def _calculate_transition_prob(self, current, delta, winds): 23 | new_position = np.array(current) + np.array(delta) + np.array([-1, 0]) * winds[tuple(current)] 24 | new_position = self._limit_coordinates(new_position).astype(int) 25 | new_state = np.ravel_multi_index(tuple(new_position), self.shape) 26 | is_done = tuple(new_position) == (3, 7) 27 | return [(1.0, new_state, -1.0, is_done)] 28 | 29 | def __init__(self): 30 | self.shape = (7, 10) 31 | 32 | nS = np.prod(self.shape) 33 | nA = 4 34 | 35 | # Wind strength 36 | winds = np.zeros(self.shape) 37 | winds[:,[3,4,5,8]] = 1 38 | winds[:,[6,7]] = 2 39 | 40 | # Calculate transition probabilities 41 | P = {} 42 | for s in range(nS): 43 | position = np.unravel_index(s, self.shape) 44 | P[s] = { a : [] for a in range(nA) } 45 | P[s][UP] = self._calculate_transition_prob(position, [-1, 0], winds) 46 | P[s][RIGHT] = self._calculate_transition_prob(position, [0, 1], winds) 47 | P[s][DOWN] = self._calculate_transition_prob(position, [1, 0], winds) 48 | P[s][LEFT] = self._calculate_transition_prob(position, [0, -1], winds) 49 | 50 | # We always start in state (3, 0) 51 | isd = np.zeros(nS) 52 | isd[np.ravel_multi_index((3,0), self.shape)] = 1.0 53 | 54 | super(WindyGridworldEnv, self).__init__(nS, nA, P, isd) 55 | 56 | def render(self, mode='human', close=False): 57 | self._render(mode, close) 58 | 59 | def _render(self, mode='human', close=False): 60 | if close: 61 | return 62 | 63 | outfile = StringIO() if mode == 'ansi' else sys.stdout 64 | 65 | for s in range(self.nS): 66 | position = np.unravel_index(s, self.shape) 67 | # print(self.s) 68 | if self.s == s: 69 | output = " x " 70 | elif position == (3,7): 71 | output = " T " 72 | else: 73 | output = " o " 74 | 75 | if position[1] == 0: 76 | output = output.lstrip() 77 | if position[1] == self.shape[1] - 1: 78 | output = output.rstrip() 79 | output += "\n" 80 | 81 | outfile.write(output) 82 | outfile.write("\n") 83 | -------------------------------------------------------------------------------- /mnist_model.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/mnist_model.h5 -------------------------------------------------------------------------------- /myDigits/0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/myDigits/0.png -------------------------------------------------------------------------------- /myDigits/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/myDigits/1.png -------------------------------------------------------------------------------- /myDigits/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/myDigits/2.png -------------------------------------------------------------------------------- /myDigits/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/myDigits/3.png -------------------------------------------------------------------------------- /myDigits/4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/myDigits/4.png -------------------------------------------------------------------------------- /myDigits/5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/myDigits/5.png -------------------------------------------------------------------------------- /myDigits/6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/myDigits/6.png -------------------------------------------------------------------------------- /myDigits/7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/myDigits/7.png -------------------------------------------------------------------------------- /myDigits/8.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/myDigits/8.png -------------------------------------------------------------------------------- /myDigits/9.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/myDigits/9.png -------------------------------------------------------------------------------- /myDigits/bird.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/myDigits/bird.jpg -------------------------------------------------------------------------------- /myDigits/conv_blur_matrix.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/myDigits/conv_blur_matrix.png -------------------------------------------------------------------------------- /myDigits/conv_org.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/myDigits/conv_org.png -------------------------------------------------------------------------------- /test_data/Sandal1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/test_data/Sandal1.jpg -------------------------------------------------------------------------------- /test_data/Sandal2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/test_data/Sandal2.jpg -------------------------------------------------------------------------------- /test_data/Trouser.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/test_data/Trouser.jpg -------------------------------------------------------------------------------- /test_data/bag1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/test_data/bag1.jpg -------------------------------------------------------------------------------- /test_data/pant1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/test_data/pant1.jpg -------------------------------------------------------------------------------- /test_data/shirt1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/test_data/shirt1.jpg -------------------------------------------------------------------------------- /test_data/t-shirt.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mc6666/Keras_tutorial/a20ffc616cfc93415358eec9df479b0cc2d6d953/test_data/t-shirt.jpg --------------------------------------------------------------------------------