├── Model.ipynb ├── README.md └── datasets ├── test_catvnoncat.h5 └── train_catvnoncat.h5 /Model.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [ 8 | { 9 | "name": "stderr", 10 | "output_type": "stream", 11 | "text": [ 12 | "D:\\Anaconda\\lib\\site-packages\\h5py\\__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n", 13 | " from ._conv import register_converters as _register_converters\n" 14 | ] 15 | } 16 | ], 17 | "source": [ 18 | "# 0 各种import\n", 19 | "import numpy as np\n", 20 | "import h5py\n", 21 | "import matplotlib.pyplot as plt\n", 22 | "%matplotlib inline\n", 23 | "plt.rcParams['figure.figsize'] = (5.0, 4.0) # 设定图片的默认尺寸\n", 24 | "plt.rcParams['image.interpolation'] = 'nearest'\n", 25 | "plt.rcParams['image.cmap'] = 'gray'\n", 26 | "%load_ext autoreload\n", 27 | "%autoreload 2\n", 28 | "np.random.seed(1)" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": 2, 34 | "metadata": { 35 | "collapsed": true 36 | }, 37 | "outputs": [], 38 | "source": [ 39 | "# 定义要用到的方法\n", 40 | "def sigmoid(Z):\n", 41 | " \"\"\"\n", 42 | " 使用numpy实现sigmoid函数\n", 43 | " Arguments:\n", 44 | " Z -- 任何尺寸的numpy array\n", 45 | " Returns:\n", 46 | " A -- 输出sigmoid(z), 形状和Z一样\n", 47 | " cache -- 就是Z,在反向传播中会用到\n", 48 | " \"\"\"\n", 49 | " A = 1/(1+np.exp(-Z))\n", 50 | " cache = Z\n", 51 | " return A, cache\n", 52 | "\n", 53 | "\n", 54 | "def relu(Z):\n", 55 | " \"\"\"\n", 56 | " 使用numpy实现relu函数\n", 57 | " Arguments:\n", 58 | " Z -- 任何尺寸的numpy array\n", 59 | " Returns:\n", 60 | " A -- 输出relu(z), 形状和Z一样\n", 61 | " cache -- 就是Z,在反向传播中会用到\n", 62 | " \"\"\"\n", 63 | " A = np.maximum(0,Z)\n", 64 | " assert(A.shape == Z.shape)\n", 65 | " cache = Z \n", 66 | " return A, cache\n", 67 | "\n", 68 | "\n", 69 | "def relu_backward(dA, cache):\n", 70 | " \"\"\"\n", 71 | " 实现了relu单元的反向传播\n", 72 | " Arguments:\n", 73 | " dA -- 激活函数的梯度\n", 74 | " cache -- 之前定义的relu函数中的返回值,前向传播之前的Z\n", 75 | " Returns:\n", 76 | " dZ -- 损失函数对Z的梯度\n", 77 | " \"\"\"\n", 78 | " Z = cache\n", 79 | " dZ = np.array(dA, copy=True) # relu导数为1(输入大于0),所以直接直接复制一份dA即可\n", 80 | " dZ[Z <= 0] = 0 # 当输入小于0时,relu导数为0,所以dZ中小于0的数变为0\n", 81 | " assert (dZ.shape == Z.shape)\n", 82 | " return dZ\n", 83 | "\n", 84 | "\n", 85 | "def sigmoid_backward(dA, cache):\n", 86 | " \"\"\"\n", 87 | " 实现了sigmoid单元的反向传播\n", 88 | " Arguments:\n", 89 | " dA -- 激活函数的梯度\n", 90 | " cache -- 之前定义的sigmoid函数中的返回值,前向传播之前的Z\n", 91 | " Returns:\n", 92 | " dZ -- 损失函数对Z的梯度\n", 93 | " \"\"\"\n", 94 | " Z = cache\n", 95 | " s = 1/(1+np.exp(-Z))\n", 96 | " dZ = dA * s * (1-s) # dA乘sigmoid导数\n", 97 | " assert (dZ.shape == Z.shape)\n", 98 | " return dZ\n", 99 | "\n", 100 | "\n", 101 | "def load_data():\n", 102 | " \"\"\"\n", 103 | " 读取数据\n", 104 | " \"\"\"\n", 105 | " train_dataset = h5py.File('datasets/train_catvnoncat.h5', \"r\")\n", 106 | " train_set_x_orig = np.array(train_dataset[\"train_set_x\"]) # 训练样本 shape:(209, 64, 64, 3)\n", 107 | " train_set_y_orig = np.array(train_dataset[\"train_set_y\"]) # 训练样本标签 shape:(1, 209)\n", 108 | " \n", 109 | " test_dataset = h5py.File('datasets/test_catvnoncat.h5', \"r\")\n", 110 | " test_set_x_orig = np.array(test_dataset[\"test_set_x\"]) # 测试样本 shape:(50, 64, 64, 3)\n", 111 | " test_set_y_orig = np.array(test_dataset[\"test_set_y\"]) # 测试样本标签 shape:(1, 50)\n", 112 | " \n", 113 | " classes = np.array(test_dataset[\"list_classes\"][:]) # 标签类别(一共两类:是猫、不是猫)\n", 114 | " \n", 115 | " train_set_y_orig = train_set_y_orig.reshape((1, -1)) # 确保标签是一行数据 下同\n", 116 | " test_set_y_orig = test_set_y_orig.reshape((1, -1))\n", 117 | " return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes" 118 | ] 119 | }, 120 | { 121 | "cell_type": "code", 122 | "execution_count": 3, 123 | "metadata": {}, 124 | "outputs": [ 125 | { 126 | "name": "stdout", 127 | "output_type": "stream", 128 | "text": [ 129 | "训练集样本数量: 209\n", 130 | "测试集样本数量: 50\n", 131 | "每个图片的尺寸(最后的3是RGB三色通道): (64, 64, 3)\n", 132 | "train_x_orig shape: (209, 64, 64, 3)\n", 133 | "train_y shape: (1, 209)\n", 134 | "test_x_orig shape: (50, 64, 64, 3)\n", 135 | "test_y shape: (1, 50)\n" 136 | ] 137 | } 138 | ], 139 | "source": [ 140 | "# 把数据弄下来\n", 141 | "train_x_orig, train_y, test_x_orig, test_y, classes = load_data()\n", 142 | "\n", 143 | "# 瞅一瞅你的数据\n", 144 | "m_train = train_x_orig.shape[0]\n", 145 | "num_px = train_x_orig.shape[1]\n", 146 | "m_test = test_x_orig.shape[0]\n", 147 | "print (\"训练集样本数量: \" + str(m_train))\n", 148 | "print (\"测试集样本数量: \" + str(m_test))\n", 149 | "print (\"每个图片的尺寸(最后的3是RGB三色通道): (\" + str(num_px) + \", \" + str(num_px) + \", 3)\")\n", 150 | "print (\"train_x_orig shape: \" + str(train_x_orig.shape))\n", 151 | "print (\"train_y shape: \" + str(train_y.shape))\n", 152 | "print (\"test_x_orig shape: \" + str(test_x_orig.shape))\n", 153 | "print (\"test_y shape: \" + str(test_y.shape))" 154 | ] 155 | }, 156 | { 157 | "cell_type": "code", 158 | "execution_count": 4, 159 | "metadata": {}, 160 | "outputs": [ 161 | { 162 | "name": "stdout", 163 | "output_type": "stream", 164 | "text": [ 165 | "train_x shape: (12288, 209)\n", 166 | "test_x shape: (12288, 50)\n" 167 | ] 168 | } 169 | ], 170 | "source": [ 171 | "# 改变训练集和测试集的shape\n", 172 | "train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T\n", 173 | "test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T\n", 174 | "\n", 175 | "# 标准化数据,使其值为0到1之间\n", 176 | "train_x = train_x_flatten/255.\n", 177 | "test_x = test_x_flatten/255.\n", 178 | "print (\"train_x shape: \" + str(train_x.shape))\n", 179 | "print (\"test_x shape: \" + str(test_x.shape))" 180 | ] 181 | }, 182 | { 183 | "cell_type": "code", 184 | "execution_count": 5, 185 | "metadata": { 186 | "collapsed": true 187 | }, 188 | "outputs": [], 189 | "source": [ 190 | "# 给定神经网络的层数和每层的节点数\n", 191 | "layers_dims = [12288, 20, 7, 5, 1] # 5-layer model" 192 | ] 193 | }, 194 | { 195 | "cell_type": "code", 196 | "execution_count": 6, 197 | "metadata": { 198 | "collapsed": true 199 | }, 200 | "outputs": [], 201 | "source": [ 202 | "# 1 初始化权重\n", 203 | "def initialize_parameters_deep(layer_dims):\n", 204 | " \"\"\"\n", 205 | " Arguments:\n", 206 | " layer_dims -- 数组或集合,表示每一层的节点数量。\n", 207 | " Returns:\n", 208 | " parameters -- 字典,包含\"W1\", \"b1\", ..., \"WL\", \"bL\":\n", 209 | " Wl -- 权重矩阵,其shape为 (layer_dims[l], layer_dims[l-1])\n", 210 | " bl -- 变差向量,其shape为 (layer_dims[l], 1) \n", 211 | " \"\"\"\n", 212 | " np.random.seed(3)\n", 213 | " parameters = {}\n", 214 | " L = len(layer_dims) # 网络层数\n", 215 | " for l in range(1, L):\n", 216 | " parameters['W' + str(l)] = np.random.randn(layer_dims[l] ,layer_dims[l-1]) / np.sqrt(layer_dims[l-1])# *0.01 \n", 217 | " parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))\n", 218 | " assert(parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l-1]))\n", 219 | " assert(parameters['b' + str(l)].shape == (layer_dims[l], 1))\n", 220 | " return parameters" 221 | ] 222 | }, 223 | { 224 | "cell_type": "code", 225 | "execution_count": 7, 226 | "metadata": { 227 | "collapsed": true 228 | }, 229 | "outputs": [], 230 | "source": [ 231 | "# 2.1 前向传播的线性部分\n", 232 | "def linear_forward(A, W, b):\n", 233 | " \"\"\"\n", 234 | " 实现一层的前向传播的线性部分。\n", 235 | " Arguments:\n", 236 | " A -- 前一层的激活值(或者输入的数据),其shape为(前一层的节点数, 样本数)\n", 237 | " W -- 权重矩阵,其shape为(当前层的节点数,前一层的节点数)\n", 238 | " b -- 偏差向量,其shape为(当前层的节点数,样本数)\n", 239 | " Returns:\n", 240 | " Z -- 激活函数的输入值\n", 241 | " cache -- 字典,包含\"A\", \"W\", \"b\"; 存储这些值以供后向传播使用。\n", 242 | " \"\"\"\n", 243 | " Z = np.dot(W, A) + b\n", 244 | " assert(Z.shape == (W.shape[0], A.shape[1]))\n", 245 | " cache = (A, W, b)\n", 246 | " return Z, cache" 247 | ] 248 | }, 249 | { 250 | "cell_type": "code", 251 | "execution_count": 8, 252 | "metadata": { 253 | "collapsed": true 254 | }, 255 | "outputs": [], 256 | "source": [ 257 | "# 2.2 前向传播的线性激活部分\n", 258 | "def linear_activation_forward(A_prev, W, b, activation):\n", 259 | " \"\"\"\n", 260 | " 实现前向传播的线性-激活部分\n", 261 | " Arguments:\n", 262 | " A_prev -- 前一层的激活值(或输入数据),其shape为(前一层的节点数,样本数)\n", 263 | " W -- 权重矩阵,其shape为(当前层的节点数,前一层的节点数)\n", 264 | " b -- 偏差向量,其shape为(当前层的节点数,样本数)\n", 265 | " activation -- 在这一层要用的激活函数,是字符串格式:\"sigmoid\" 或者 \"relu\" 等\n", 266 | " Returns:\n", 267 | " A -- 激活函数的输出值\n", 268 | " cache -- 字典,包含\"linear_cache\" 和 \"activation_cache\"; 存储这些值以供后向传播使用。\n", 269 | " \"linear_cache\"是linear_forward方法的返回值,包含\"A\", \"W\", \"b\"\n", 270 | " \"activation_cache\"是本方法的返回值,其值为激活函数(\"sigmoid\" 或者 \"relu\" 等)\n", 271 | " \"\"\"\n", 272 | " if activation == \"sigmoid\":\n", 273 | " # 输入: \"A_prev, W, b\". 输出: \"A, activation_cache\".\n", 274 | " Z, linear_cache = linear_forward(A_prev, W, b)\n", 275 | " A, activation_cache = sigmoid(Z)\n", 276 | " elif activation == \"relu\":\n", 277 | " # 输入: \"A_prev, W, b\". 输出: \"A, activation_cache\".\n", 278 | " Z, linear_cache = linear_forward(A_prev, W, b)\n", 279 | " A, activation_cache = relu(Z)\n", 280 | " assert (A.shape == (W.shape[0], A_prev.shape[1]))\n", 281 | " cache = (linear_cache, activation_cache)\n", 282 | " return A, cache" 283 | ] 284 | }, 285 | { 286 | "cell_type": "code", 287 | "execution_count": 9, 288 | "metadata": { 289 | "collapsed": true 290 | }, 291 | "outputs": [], 292 | "source": [ 293 | "# 2.3 前向传播\n", 294 | "def L_model_forward(X, parameters):\n", 295 | " \"\"\"\n", 296 | " 实现前向传播\n", 297 | " Arguments:\n", 298 | " X -- 数据,其shape为(输入数据size,样本数)\n", 299 | " parameters -- initialize_parameters_deep()方法输出的值,\n", 300 | " 字典,包含\"W1\", \"b1\", ..., \"WL\", \"bL\":\n", 301 | " Returns:\n", 302 | " AL -- 最后的激活函数输出值\n", 303 | " caches -- 包含: 每一个linear_relu_forward()的cache (有L-1个, 索引从 0 到 L-2)\n", 304 | " linear_sigmoid_forward()的cache (只有1个, 索引为 L-1)\n", 305 | " \"\"\"\n", 306 | " caches = []\n", 307 | " A = X\n", 308 | " L = len(parameters) // 2 # 神经网络的层数\n", 309 | " # 实现(L-1)次线性-rulu,并将cache添加到caches中\n", 310 | " for l in range(1, L):\n", 311 | " A_prev = A \n", 312 | " A, cache = linear_activation_forward(A_prev, parameters[\"W\"+str(l)], parameters[\"b\"+str(l)], activation = \"relu\")\n", 313 | " caches.append(cache)\n", 314 | " # 实现最后的这一次 线性-sigmoid,并将cache添加到caches中 (这里的sigmoid作用是将结果转换到0~1之间,输出作为概率)\n", 315 | " AL, cache = linear_activation_forward(A, parameters[\"W\"+str(L)], parameters[\"b\"+str(L)], activation = \"sigmoid\")\n", 316 | " caches.append(cache)\n", 317 | " assert(AL.shape == (1,X.shape[1]))\n", 318 | " return AL, caches" 319 | ] 320 | }, 321 | { 322 | "cell_type": "code", 323 | "execution_count": 10, 324 | "metadata": { 325 | "collapsed": true 326 | }, 327 | "outputs": [], 328 | "source": [ 329 | "# 3 代价函数的计算\n", 330 | "def compute_cost(AL, Y):\n", 331 | " \"\"\"\n", 332 | " 计算上面的代价函数\n", 333 | " Arguments:\n", 334 | " AL -- 是一个向量,每个值代表该样本的几率,shape为(1,样本数)\n", 335 | " Y -- 真实值向量,shape为(1,样本数)\n", 336 | " Returns:\n", 337 | " cost -- 交叉熵\n", 338 | " \"\"\"\n", 339 | " m = Y.shape[1]\n", 340 | " # 用AL和y计算cost\n", 341 | " cost = -1/m * np.sum(Y*np.log(AL) + (1-Y)*np.log(1-AL))\n", 342 | " cost = np.squeeze(cost) # 确保cost的shape是你想要的 (会把比如 [[17]] 变成 17).\n", 343 | " assert(cost.shape == ())\n", 344 | " return cost" 345 | ] 346 | }, 347 | { 348 | "cell_type": "code", 349 | "execution_count": 11, 350 | "metadata": { 351 | "collapsed": true 352 | }, 353 | "outputs": [], 354 | "source": [ 355 | "# 4.1 后向传播的线性部分\n", 356 | "def linear_backward(dZ, cache):\n", 357 | " \"\"\"\n", 358 | " 实现一层的后向传播的线性部分\n", 359 | " Arguments:\n", 360 | " dZ --当前层的线性导数(导数全是cost的导数)\n", 361 | " cache -- 元组(A_prev, W, b),由前向传播过程的当前层得来\n", 362 | " Returns:\n", 363 | " dA_prev -- 前一层的激活函数的导数,其shape和A_prev一样\n", 364 | " dW -- 当前层 W 的导数,其shape和 W 一样\n", 365 | " db -- 当前层 b 的导数,其shape和 b 一样\n", 366 | " \"\"\"\n", 367 | " A_prev, W, b = cache\n", 368 | " m = A_prev.shape[1]\n", 369 | " dW = 1/m * np.dot(dZ, A_prev.T)\n", 370 | " db = 1/m * np.sum(dZ, axis = 1, keepdims = True)\n", 371 | " dA_prev = np.dot(W.T, dZ)\n", 372 | " assert (dA_prev.shape == A_prev.shape)\n", 373 | " assert (dW.shape == W.shape)\n", 374 | " assert (db.shape == b.shape)\n", 375 | " return dA_prev, dW, db" 376 | ] 377 | }, 378 | { 379 | "cell_type": "code", 380 | "execution_count": 12, 381 | "metadata": { 382 | "collapsed": true 383 | }, 384 | "outputs": [], 385 | "source": [ 386 | "# 4.2 后向传播的线性激活部分 \n", 387 | "def linear_activation_backward(dA, cache, activation):\n", 388 | " \"\"\"\n", 389 | " 实现一层的后向传播的线性和激活函数部分\n", 390 | " Arguments:\n", 391 | " dA -- 当前层激活函数的导数\n", 392 | " cache -- 元组:(linear_cache, activation_cache),前向传播时存的\n", 393 | " activation -- 这层要用的激活函数,string值: \"sigmoid\" 或者 \"relu\"\n", 394 | " Returns:\n", 395 | " dA_prev -- 前一层的激活函数的导数,其shape和A_prev一样\n", 396 | " dW -- 当前层 W 的导数,其shape和 W 一样\n", 397 | " db -- 当前层 b 的导数,其shape和 b 一样\n", 398 | " \"\"\"\n", 399 | " linear_cache, activation_cache = cache\n", 400 | " if activation == \"relu\":\n", 401 | " dZ = relu_backward(dA, activation_cache)\n", 402 | " dA_prev, dW, db = linear_backward(dZ, linear_cache)\n", 403 | " elif activation == \"sigmoid\":\n", 404 | " dZ = sigmoid_backward(dA, activation_cache)\n", 405 | " dA_prev, dW, db = linear_backward(dZ, linear_cache)\n", 406 | " return dA_prev, dW, db" 407 | ] 408 | }, 409 | { 410 | "cell_type": "code", 411 | "execution_count": 13, 412 | "metadata": { 413 | "collapsed": true 414 | }, 415 | "outputs": [], 416 | "source": [ 417 | "# 4.3 后向传播\n", 418 | "def L_model_backward(AL, Y, caches):\n", 419 | " \"\"\"\n", 420 | " 实现后向传播: (L-1) 次 [LINEAR->RELU] 和 1 次 LINEAR->SIGMOID\n", 421 | " Arguments:\n", 422 | " AL -- 是一个向量,每个值代表该样本的几率,(来自L_model_forward())\n", 423 | " Y -- 真实值向量(不是猫 0 , 是猫 1 )\n", 424 | " caches -- 包含:\n", 425 | " 前向传播时存下来的数据:caches[0 ~ L-2]是relu部分 caches[L-1]是sigmoid部分\n", 426 | " caches[l]具体包括:[线性cache[A, W, b],激活cache[Z]]\n", 427 | " Returns:\n", 428 | " grads -- 一个用来存储导数的字典:\n", 429 | " grads[\"dA\" + str(l)] = ...\n", 430 | " grads[\"dW\" + str(l)] = ...\n", 431 | " grads[\"db\" + str(l)] = ...\n", 432 | " \"\"\"\n", 433 | " grads = {}\n", 434 | " L = len(caches) # 神经网络的层数\n", 435 | " m = AL.shape[1]\n", 436 | " Y = Y.reshape(AL.shape) # 这行之后,Y 的shape就和 AL 一样了\n", 437 | " \n", 438 | " # 初始化后向传播,该值为损失函数对预测值AL求导的结果(Y是真实值)\n", 439 | " dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) \n", 440 | " \n", 441 | " # 第L层 (SIGMOID -> LINEAR) 的导数. \n", 442 | " # 输入: \"AL, Y, caches\". \n", 443 | " # 输出: \"grads[\"dAL\"], grads[\"dWL\"], grads[\"dbL\"]\n", 444 | " current_cache = caches[L-1]\n", 445 | " grads[\"dA\" + str(L)], grads[\"dW\" + str(L)], grads[\"db\" + str(L)] = linear_activation_backward(dAL, current_cache, activation = \"sigmoid\")\n", 446 | " for l in reversed(range(L - 1)):\n", 447 | " # 第l层: (RELU -> LINEAR) 的导数.\n", 448 | " # 输入: \"grads[\"dA\" + str(l + 2)], caches\". \n", 449 | " # 输出: \"grads[\"dA\" + str(l + 1)] , grads[\"dW\" + str(l + 1)] , grads[\"db\" + str(l + 1)] \n", 450 | " current_cache = caches[l]\n", 451 | " dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads[\"dA\" + str(l + 2)], current_cache, activation = \"relu\")\n", 452 | " grads[\"dA\" + str(l + 1)] = dA_prev_temp\n", 453 | " grads[\"dW\" + str(l + 1)] = dW_temp\n", 454 | " grads[\"db\" + str(l + 1)] = db_temp\n", 455 | " return grads" 456 | ] 457 | }, 458 | { 459 | "cell_type": "code", 460 | "execution_count": 14, 461 | "metadata": { 462 | "collapsed": true 463 | }, 464 | "outputs": [], 465 | "source": [ 466 | "# 5 更新参数\n", 467 | "def update_parameters(parameters, grads, learning_rate):\n", 468 | " \"\"\"\n", 469 | " 用梯度下降法更新参数\n", 470 | " Arguments:\n", 471 | " parameters -- 字典,包含参数\n", 472 | " grads -- 字典,包含导数,由L_model_backward方法得到\n", 473 | " Returns:\n", 474 | " parameters --字典,包含更新了的参数\n", 475 | " parameters[\"W\" + str(l)] = ... \n", 476 | " parameters[\"b\" + str(l)] = ...\n", 477 | " \"\"\"\n", 478 | " L = len(parameters) // 2 # 神经网络的层数\n", 479 | " # 更新每一个参数\n", 480 | " for l in range(L):\n", 481 | " parameters[\"W\" + str(l+1)] -= learning_rate * grads[\"dW\" + str(l+1)]\n", 482 | " parameters[\"b\" + str(l+1)] -= learning_rate * grads[\"db\" + str(l+1)]\n", 483 | " return parameters" 484 | ] 485 | }, 486 | { 487 | "cell_type": "code", 488 | "execution_count": 15, 489 | "metadata": { 490 | "collapsed": true 491 | }, 492 | "outputs": [], 493 | "source": [ 494 | "# 6 预测\n", 495 | "def predict(X, y, parameters):\n", 496 | " \"\"\"\n", 497 | " Arguments:\n", 498 | " X -- 输入数据 \n", 499 | " parameters -- 训练好的模型的参数\n", 500 | " Returns:\n", 501 | " p -- 对给定X的预测\n", 502 | " \"\"\"\n", 503 | " m = X.shape[1] # 样本数量\n", 504 | " n = len(parameters) // 2 # 神经网络的层数\n", 505 | " p = np.zeros((1,m))\n", 506 | " # 前向传播\n", 507 | " probas, caches = L_model_forward(X, parameters)\n", 508 | " # 把估计值(0~1之间)转化为0或者1\n", 509 | " for i in range(0, probas.shape[1]):\n", 510 | " if probas[0,i] > 0.5: # 因为结果shape是(1,样本数),是个矩阵,所以不能直接probas[i]\n", 511 | " p[0,i] = 1\n", 512 | " else:\n", 513 | " p[0,i] = 0\n", 514 | " print(\"Accuracy: \" + str(np.sum((p == y)/m)))\n", 515 | " return p" 516 | ] 517 | }, 518 | { 519 | "cell_type": "code", 520 | "execution_count": 16, 521 | "metadata": { 522 | "collapsed": true 523 | }, 524 | "outputs": [], 525 | "source": [ 526 | "# final:神经网络!\n", 527 | "def L_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):#lr was 0.009\n", 528 | " \"\"\"\n", 529 | " 实现一个L层的神经网络: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID.\n", 530 | " Arguments:\n", 531 | " X -- 数据,shape为 (样本数, 像素 * 像素 * 3)\n", 532 | " Y -- 真实值向量 (是猫:1,不是猫:0),shape为 (1, 样本数)\n", 533 | " layers_dims -- 包含每一层的节点数\n", 534 | " learning_rate -- 梯度下降更新的速率\n", 535 | " num_iterations -- 迭代次数\n", 536 | " print_cost -- 如果置为True,每迭代100次就输出一次cost\n", 537 | " Returns:\n", 538 | " parameters -- 由此模型学习到的参数,可以用它们去做预测\n", 539 | " \"\"\"\n", 540 | " np.random.seed(1)\n", 541 | " costs = [] # keep track of cost\n", 542 | " # 1 参数初始化\n", 543 | " parameters = initialize_parameters_deep(layers_dims)\n", 544 | " # 梯度下降循环\n", 545 | " for i in range(0, num_iterations):\n", 546 | " # 2 前向传播: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.\n", 547 | " AL, caches = L_model_forward(X, parameters)\n", 548 | " # 3 计算cost.\n", 549 | " cost = compute_cost(AL, Y)\n", 550 | " # 4 后向传播\n", 551 | " grads = L_model_backward(AL, Y, caches)\n", 552 | " # 5 更新参数\n", 553 | " parameters = update_parameters(parameters, grads, learning_rate)\n", 554 | " # 每迭代100次就输出一次cost\n", 555 | " if print_cost and i % 100 == 0:\n", 556 | " print (\"Cost after iteration %i: %f\" %(i, cost))\n", 557 | " if print_cost and i % 100 == 0:\n", 558 | " costs.append(cost)\n", 559 | " # 画出cost曲线\n", 560 | " plt.plot(np.squeeze(costs))\n", 561 | " plt.ylabel('cost')\n", 562 | " plt.xlabel('iterations (per tens)')\n", 563 | " plt.title(\"Learning rate =\" + str(learning_rate))\n", 564 | " plt.show()\n", 565 | " return parameters" 566 | ] 567 | }, 568 | { 569 | "cell_type": "code", 570 | "execution_count": 17, 571 | "metadata": {}, 572 | "outputs": [ 573 | { 574 | "name": "stdout", 575 | "output_type": "stream", 576 | "text": [ 577 | "Cost after iteration 0: 0.715732\n", 578 | "Cost after iteration 100: 0.674738\n", 579 | "Cost after iteration 200: 0.660337\n", 580 | "Cost after iteration 300: 0.646289\n", 581 | "Cost after iteration 400: 0.629813\n", 582 | "Cost after iteration 500: 0.606006\n", 583 | "Cost after iteration 600: 0.569004\n", 584 | "Cost after iteration 700: 0.519797\n", 585 | "Cost after iteration 800: 0.464157\n", 586 | "Cost after iteration 900: 0.408420\n", 587 | "Cost after iteration 1000: 0.373155\n", 588 | "Cost after iteration 1100: 0.305724\n", 589 | "Cost after iteration 1200: 0.268102\n", 590 | "Cost after iteration 1300: 0.238725\n", 591 | "Cost after iteration 1400: 0.206323\n", 592 | "Cost after iteration 1500: 0.179439\n", 593 | "Cost after iteration 1600: 0.157987\n", 594 | "Cost after iteration 1700: 0.142404\n", 595 | "Cost after iteration 1800: 0.128652\n", 596 | "Cost after iteration 1900: 0.112443\n", 597 | "Cost after iteration 2000: 0.085056\n", 598 | "Cost after iteration 2100: 0.057584\n", 599 | "Cost after iteration 2200: 0.044568\n", 600 | "Cost after iteration 2300: 0.038083\n", 601 | "Cost after iteration 2400: 0.034411\n" 602 | ] 603 | }, 604 | { 605 | "data": { 606 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAU4AAAEWCAYAAAAJjn7zAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4wLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvpW3flQAAIABJREFUeJzt3Xl8FPX9x/HXOyEJEG4Id7gEQRQU\nCZdHpbW2UC1eaMF6VlG01Lb20F9/tbba/mq1trUeVVBR6oGAt7VSrbec4ZRwiZzhDMh9h3x+f+zQ\nrnEDG8iw2d3P8/HYR3ZnvjvzmSy8853Zme/IzHDOORe/jEQX4JxzycaD0znnKsmD0znnKsmD0znn\nKsmD0znnKsmD0znnKsmD04VC0j8lXZXoOpwLgwdnipG0XNLXE12HmQ00s6cSXQeApPckXXcM1pMj\n6QlJ2yStk3TLYdr/OGi3NXhfTtS8dpLelbRL0sLoz1TSI5J2RD32StoeNf89SXui5i8KZ4vTlwen\nqzRJNRJdw0HVqRbg10AnoC3wVeDnkgbEaijpm8BtwNlAO6AD8JuoJs8Bs4DGwP8CEyTlAZjZcDOr\nc/ARtB1fbhUjotp0rqLtcwEPzjQi6TxJsyVtkTRJUveoebdJ+kzSdknzJV0YNe9qSR9L+rOkz4Ff\nB9M+kvRHSZslLZM0MOo9/+nlxdG2vaQPgnW/LekhSU9XsA39JRVLulXSOmC0pIaSXpdUEiz/dUmt\ng/a/A84EHgx6Xw8G07tIekvS55IWSbq0Cn7FVwJ3mdlmM1sAjAKurqDtVcDjZlZkZpuBuw62lXQ8\ncCpwh5ntNrMXgE+Ai2P8PnKD6dWid58uPDjThKRTgSeAG4j0Yh4FXo3aPfyMSMDUJ9LzeVpSi6hF\n9AGWAk2B30VNWwQ0Ae4BHpekCko4VNtngWlBXb8GrjjM5jQHGhHp2V1P5N/x6OB1G2A38CCAmf0v\n8CH/7YGNCMLmrWC9TYGhwMOSToy1MkkPB39sYj3mBm0aAi2BOVFvnQPEXGYwvXzbZpIaB/OWmtn2\ncvNjLetioAT4oNz030vaGPzB619BDe4IeXCmj2HAo2Y21cwOBMcf9wJ9AcxsvJmtMbMyM3se+BTo\nHfX+NWb2gJmVmtnuYNoKMxtlZgeI9HhaAM0qWH/MtpLaAL2AX5nZPjP7CHj1MNtSRqQ3tjfokW0y\nsxfMbFcQNr8DzjrE+88DlpvZ6GB7ZgIvAINjNTazm8ysQQWPg732OsHPrVFv3QrUraCGOjHaErQv\nP+9Qy7oKGGNfHHTiViK7/q2AkcBrko6roA53BDw400db4CfRvSUgn0gvCUlXRu3GbwFOItI7PGhV\njGWuO/jEzHYFT+vEaHeoti2Bz6OmVbSuaCVmtufgC0m1JT0qaYWkbUR6Xw0kZVbw/rZAn3K/i+8S\n6ckeqR3Bz3pR0+oB22O0Pdi+fFuC9uXnxVyWpHwifyDGRE8P/jhuD/6wPAV8DHwrzu1wcfDgTB+r\ngN+V6y3VNrPnJLUlcjxuBNDYzBoA84Do3e6whtFaCzSSVDtqWv5h3lO+lp8AnYE+ZlYP+EowXRW0\nXwW8X+53UcfMboy1shjfYkc/igCC45RrgZOj3noyUFTBNhTFaLvezDYF8zpIqltufvllXQlMMrOl\nFazjIOOLn6U7Sh6cqSlLUs2oRw0iwThcUh9F5Eo6N/jPmUvkP1cJgKRriPQ4Q2dmK4BCIl84ZUvq\nB3y7koupS+S45hZJjYA7ys1fT2TX9aDXgeMlXSEpK3j0knRCBTV+4Vvsco/o445jgF8GX1Z1IXJ4\n5MkKah4DXCupa3B89JcH25rZYmA2cEfw+V0IdCdyOCHaleWXL6mBpG8e/NwlfZfIH5KJFdThjoAH\nZ2p6g0iQHHz82swKifxHfhDYDCwh+BbXzOYD9wGTiYRMNyK7d8fKd4F+wCbgt8DzRI6/xusvQC1g\nIzAFeLPc/PuBwcE37n8NjoN+AxgCrCFyGOEPQA5H5w4iX7KtAN4H7jWzNwEktQl6qG0Agun3AO8G\n7VfwxcAfAhQQ+azuBgabWcnBmcEfmNZ8+TSkLCK/wxIiv48fABeYmZ/LWYXkAxm76kbS88BCMyvf\nc3SuWvAep0u4YDf5OEkZipwwfj7wcqLrcq4i1emqC5e+mgMvEjmPsxi40cxmJbYk5yrmu+rOOVdJ\nvqvunHOVlHS76k2aNLF27dolugznXIqZMWPGRjPLi6dt0gVnu3btKCwsTHQZzrkUI2lFvG19V905\n5yrJg9M55yrJg9M55yrJg9M55yrJg9M55yrJg9M55yrJg9M55yoppYOzrMz401uLWby+okG4nXOu\n8lI6ODft3MfYaSu56olprN26+/BvcM65OKR0cObVzWH0Nb3YvqeUa0ZPZ9ue/YkuyTmXAkINTkkD\ngntWL5F0W4z5fw5uEDZb0uLgpllV6sSW9Xnk8p4s2bCDG8bMYG/pgapehXMuzYQWnMEdBh8CBgJd\ngaGSuka3MbMfm9kpZnYK8ACRMRmr3BmdmnDvJd2ZvHQTPxs/l7IyH0rPOXfkwuxx9gaWmNlSM9sH\njCUysndFhgLPhVXMhT1ac+uALrw6Zw13v7kwrNU459JAmKMjteKL98cuBvrEahjcnrY98E4F868H\nrgdo06bNERc0/KwOrN26m5EfLKV5vZp874z2R7ws51z6CrPHGes+zhXtIw8BJphZzAOQZjbSzArM\nrCAvL67h8mIXJHHHt0/kmyc2465/zOeNT9Ye8bKcc+krzOAsBvKjXrcmcivWWIYQ4m56tMwMcf+Q\nHvRs05AfPT+bqUs3HYvVOudSSJjBOR3oJKm9pGwi4fhq+UaSOgMNidzT+5iomZXJqCsLaN2wFsPG\nFPoJ8s65SgktOM2sFBgBTAQWAOPMrEjSnZIGRTUdCoy1Y3zXuIa52Tx1TW9ysjK5+olprNu651iu\n3jmXxJLuLpcFBQVWlbfOKFqzlUsfmUx+o9qMG96PejWzqmzZzrnkIWmGmRXE0zalrxyKx4kt6/PI\nFZET5K99cjpzVm0h2f6YOOeOrbQPToAzO+Vx36UnM2/1Ns5/6GO+9dePGDN5OVt3+yWazrkvS/td\n9Wjb9uznldlrGDttJUVrtlEzK4Nzu7VkaO98erZtiBTrDCvnXCqozK66B2cFPineynPTV/LKrNXs\n3HeATk3rMKR3Gy7q0YqGudmhr985d2x5cFahnXtLeX3uGp6btorZq7aQnZnBgJOaM6RXPn07NCYj\nw3uhzqUCD86QLFi7jbHTVvLSrNVs21NKm0a1uaRnay7u2ZqWDWolpCbnXNXw4AzZnv0HeHPeOp6f\nvorJSzeRocgXTN/plc/XT2hGdg3/zs25ZOPBeQyt3LSL8TNWMWFGMWu37qFRbjYX9mjFpQX5dG5e\nN9HlOefi5MGZAAfKjA8/LWFc4Sremr+e/QeMk/Mb8J2CfC7o0ZLa2WEOROWcO1oenAn2+c59vDRr\nNeOmr2LR+u00qJ3FFX3bcmW/duTVzUl0ec65GDw4qwkzY8aKzYz6cCn/mr+erMwMLurRiuvObE/H\npr4b71x1Upng9P3HEEmioF0jCto1YtnGnTz+0VLGFxYzdvoqvtalKcPO7EDfDo38xHrnkoz3OI+x\nTTv28vSUlYyZvJxNO/fRrVV9hn2lA986qTk1Mv3beOcSxXfVk8Ce/Qd4ceZqHvtwKUs37qRVg1rc\n2P84Luvdxk+qdy4BPDiTSFmZ8c7CDTzy/mcUrthMr3YN+cPF3emQVyfRpTmXVnxYuSSSkSG+3rUZ\n44f3457B3Vm0bjsD7/+QR97/jNIDZYkuzzkXgwdnNSGJSwvyefuWszjr+Dzu/udCLvrbJBau25bo\n0pxz5XhwVjNN69Xk0St68uBlPVi9eTfffuAj/vzWYvaVeu/TuerCg7MaksR53Vvy1i1n8a1uLbj/\n358y6MGPmLNqS6JLc87hwVmtNcrN5v4hPXjsygI279rHhQ9/zO/fWMCe/TFvP++cO0ZCDU5JAyQt\nkrRE0m0VtLlU0nxJRZKeDbOeZPX1rs3414/P4tKCfB79YCkD7//Qb2nsXAKFFpySMoGHgIFAV2Co\npK7l2nQC/gc43cxOBH4UVj3Jrn6tLO6+uDvPXNeHHXtLuWzUFJZs8PB0LhHC7HH2BpaY2VIz2weM\nBc4v12YY8JCZbQYwsw0h1pMSTu/YhOeG9QXE0FFT+axkR6JLci7thBmcrYBVUa+Lg2nRjgeOl/Sx\npCmSBsRakKTrJRVKKiwpKQmp3OTRsWkdnhvWh7Iy47JRU1i+cWeiS3IurYQZnLGuGyx/mVINoBPQ\nHxgKPCapwZfeZDbSzArMrCAvL6/KC01GnZrV5dlhfdl/wBg6agorN+1KdEnOpY0wg7MYyI963RpY\nE6PNK2a238yWAYuIBKmLQ+fmdXn62j7s3n+AoaOmsOpzD0/njoUwg3M60ElSe0nZwBDg1XJtXga+\nCiCpCZFd96Uh1pRyurasx9PX9mH7nv0MHTWF1Vt2J7ok51JeaMFpZqXACGAisAAYZ2ZFku6UNCho\nNhHYJGk+8C7wMzPbFFZNqeqkVvV5+ro+bN29n6Ejp7B2q4enc2Hy0ZFSyOxVW7jisak0rpPN8zf0\no1m9mokuybmk4aMjpalT8hvw1LW92bhjH0NHTmHDtj2JLsm5lOTBmWJObdOQJ6/pxbpte7jssamU\nbN+b6JKcSzkenCmooF0jRl/di9Wbd3PF41PZvc+vbXeuKnlwpqg+HRrzt8tPZdH67fzmtaJEl+Nc\nSvHgTGH9OzflxrOOY+z0Vbwye3Wiy3EuZXhwprhbzjmegrYN+cWLn7DML810rkp4cKa4GpkZ3D+0\nBzUyMxjx7Ez2lvrxTueOlgdnGmjVoBZ/vORkitZs4/dvLEx0Oc4lPQ/ONHFO12Zcc3o7npy0nIlF\n6xJdjnNJzYMzjdw2sAvdWtXnZ+PnULzZBwRx7kh5cKaRnBqZPHhZD8oMbn5uFvv9vu3OHREPzjTT\ntnEu/3dRN2au3MJ9/1qc6HKcS0oenGlo0MktGdo7n0fe/4z3F/uI+s5VlgdnmvrVeSfSuVldbnl+\nNut9MBDnKsWDM03Vyo4c79y5r5QfjZ3NgbLkGl7QuUTy4ExjnZrV5c7zT2Ly0k08+M6SRJfjXNLw\n4Exzl/RszQWntOT+fy9m6lIffN+5eHhwpjlJ/PbCbuQ3qs3PJsxl177SRJfkXLXnwemok1ODuy/q\nzsrPd/kpSs7FwYPTAdDvuMZ8t08bnvh4GTNXbk50Oc5Va6EGp6QBkhZJWiLpthjzr5ZUIml28Lgu\nzHrcod02sAst6tXk5xPmsme/j6LkXEVCC05JmcBDwECgKzBUUtcYTZ83s1OCx2Nh1eMOr27NLP7v\nom4s2bCDB975NNHlOFdthdnj7A0sMbOlZrYPGAucH+L6XBXo37kpF5/amkfeX8q81VsTXY5z1VKY\nwdkKWBX1ujiYVt7FkuZKmiApP9aCJF0vqVBSYUmJXyIYttvPO4FGudn8fMJcHwjEuRjCDE7FmFb+\n8pTXgHZm1h14G3gq1oLMbKSZFZhZQV5eXhWX6cprUDubu84/iflrt/Ho+58luhznqp0wg7MYiO5B\ntgbWRDcws01mdvDG36OAniHW4yphwEnNObd7C/767yV8un57ostxrloJMzinA50ktZeUDQwBXo1u\nIKlF1MtBwIIQ63GV9JtBJ5Kbk8nPJsz1a9mdixJacJpZKTACmEgkEMeZWZGkOyUNCprdLKlI0hzg\nZuDqsOpxldekTg6/HnQis1dtYfTHyxJdjnPVhsySqydRUFBghYWFiS4jbZgZw8YU8tGSjbz5w6/Q\nrkluoktyLhSSZphZQTxt/cohd0iS+O0F3cjKzODWF+ZS5rvsznlwusNrXr8mvzz3BKYu+5xnp61M\ndDnOJZwHp4vLpQX5nNGxCb9/YwGrt+xOdDnOJZQHp4uLJH5/UTcM+MWLn5Bsx8adq0oenC5u+Y1q\nc+uALry/uIQXZq5OdDnOJYwHp6uUK/q2pVe7htz5WhEb/CZvLk15cLpKycgQd1/cnT2lZdz+yjzf\nZXdpyYPTVdpxeXW45ZzjmVi0njc+WZfocpw75jw43RG57oz2dGtVnztencfmnfsSXY5zx5QHpzsi\nNTIzuGdwd7bs2s+dr89PdDnOHVMenO6IndCiHt//akdemrWadxauT3Q5zh0zHpzuqHz/qx3p3Kwu\nv3hxHtv27E90Oc4dEx6c7qhk14jssm/Yvoffv7Ew0eU4d0x4cLqjdnJ+A647swPPTVvJpCUbE12O\nc6Hz4HRV4pZzjqd9k1xufXEuu/aVJroc50LlwemqRM2sTO6+qBurPt/NHycuTnQ5zoXKg9NVmT4d\nGnNF37aMnrSMGSs+T3Q5zoXGg9NVqVsHdqFl/Vr8fMJc9uw/kOhynAuFB6erUnVyavB/F3Xjs5Kd\nPPDOp4kux7lQeHC6KnfW8XkM7tmaR95fyrzVWxNdjnNVLtTglDRA0iJJSyTddoh2gyWZpLhulOSq\nv9vP7Urj3GyufWo6SzbsSHQ5zlWp0IJTUibwEDAQ6AoMldQ1Rru6RG4NPDWsWtyxV792Fn+/tg8H\nyowhIyezaN32RJfkXJWJKzglXRLPtHJ6A0vMbKmZ7QPGAufHaHcXcA/go+KmmM7N6zL2+n5kZogh\nIyf7brtLGfH2OP8nzmnRWgGrol4XB9P+Q1IPIN/MXj/UgiRdL6lQUmFJSUk89bpqomPTOoy7oR+1\ns2tw2agpzFq5OdElOXfUDhmckgZKegBoJemvUY8ngcNdHqIY0/4zXLikDODPwE8OV6SZjTSzAjMr\nyMvLO1xzV820bZzL8zf0pUHtbK54fBrTl/s5ni65Ha7HuQYoJLIbPSPq8SrwzcO8txjIj3rdOlje\nQXWBk4D3JC0H+gKv+hdEqal1w9qMu6EfTevlcOXj0/yadpfUFM89YyRlmdn+4HlDIrvXcw/znhrA\nYuBsYDUwHbjMzIoqaP8e8FMzKzzUcgsKCqyw8JBNXDVWsn0vlz82leWbdvLoFT3p37lpoktyDgBJ\nM8wsro5bvMc435JUT1IjYA4wWtKfDvUGMysFRgATgQXAODMrknSnpEFxrtelmLy6OTx3fV86Nq3D\n9WNm8K8iv2eRSz7x9jhnmVkPSdcR6W3eIWmumXUPv8Qv8h5nati6az9Xjp5G0eqt3D+kB+d2b5Ho\nklyaC6PHWUNSC+BS4JDfgDsXj/q1s3j62t6ckt+AHzw3k5dmFSe6JOfiFm9w3klkl/szM5suqQPg\nFyK7o1K3ZhZPfa83fdo35pZxc5jppyq5JBFXcJrZeDPrbmY3Bq+XmtnF4Zbm0kFuTg0eu6qAxrk5\n3P3GQuI5dORcosV75VBrSS9J2iBpvaQXJLUOuziXHnJzavDDr3di2vLPeXfRhkSX49xhxburPprI\nuZstiVz981owzbkqMaRXPu2b5PKHfy7iQJn3Ol31Fm9w5pnZaDMrDR5PAn4Jj6syWZkZ/OybnVm0\nfjsvzvQvilz1Fm9wbpR0uaTM4HE5sCnMwlz6GXhSc07Ob8Cf3lrso8e7ai3e4PwekVOR1gFrgcHA\nNWEV5dKTJG4b0IW1W/fw1KTliS7HuQrFG5x3AVeZWZ6ZNSUSpL8OrSqXtvod15ivds7joXeXsHXX\n/kSX41xM8QZndzP7z0l2ZvY50COckly6+/mALmzfW8rD7y9JdCnOxRRvcGYEg3sAEFyzXiOckly6\nO6FFPS7s0YrRHy9nzZbdiS7HuS+JNzjvAyZJukvSncAkIqO2OxeKW845Hgz+/NbiRJfi3JfEe+XQ\nGOBiYD1QAlxkZn8PszCX3lo3rM1Vp7XlhZnFfr8iV+3EfbM2M5tvZg+a2QNmNj/MopwDuKl/R3Jz\nanDvxIWJLsW5L/D7qrtqq2FuNjf2P463F2xg2jK/3YarPjw4XbX2vdPb07xeTX7/zwU+AIirNjw4\nXbVWMyuTH5/TiVkrtzCxaH2iy3EO8OB0SeDiU1vTsWkd7pm4kNIDZYkuxzkPTlf91cjM4NYBXVha\nspNxhT4AiEs8D06XFL5+QlMK2jbkL28vZte+0kSX49JcqMEpaYCkRZKWSLotxvzhkj6RNFvSR5K6\nhlmPS16S+J9vdWHD9r2M+mBZostxaS604JSUCTwEDAS6AkNjBOOzZtbNzE4hciXSIW857NJbz7aN\nOK97C/7y78W8Mnt1ostxaSzM6817A0vMbCmApLHA+cB/Tp43s21R7XMBP9/EHdK9g09m44693DJu\nDtmZGQzs5rcVdsdemLvqrYBVUa+Lg2lfIOn7kj4j0uO8OdaCJF0vqVBSYUlJSSjFuuRQKzuTx6/q\nFdxWeBZvz/dTlNyxF2ZwKsa0L/UozewhMzsOuBX4ZawFmdlIMysws4K8PL9jR7rLzanB6Gt6cWLL\netz0zEw+WOx/TN2xFWZwFgP5Ua9bA2sO0X4scEGI9bgUUq9mFmO+14eOTeswbEwhkz/zO7m4YyfM\n4JwOdJLUXlI2MITInTL/Q1KnqJfnAp+GWI9LMfVrZ/H3a3vTplFtrn1qOoXL/Xp2d2yEFpxmVgqM\nACYCC4BxZlYk6U5Jg4JmIyQVSZoN3AJcFVY9LjU1rpPDM8P60LxeTa4ePZ3Zq7YkuiSXBpRsAycU\nFBRYYWFhostw1czarbu59NHJbN21n+eu78uJLesnuiSXZCTNMLOCeNr6lUMuJbSoX4tnr+tLnZwa\nXP7YVB/82IXKg9OljPxGtXl2WF+yMjP47mNT+axkR6JLcinKg9OllHZNcnl2WF/AuGzUFD5d7z1P\nV/U8OF3K6di0Dk9f14cDZcaFD0/iX0XrEl2SSzEenC4ldWlej1dHnEH7Jrlc//cZ3P/2p5SVJdcX\noa768uB0Katlg1qMH96Pi3q04s9vL+bGZ2awY68PSeeOngenS2k1szK579KTuf28rry9YAMXPvQx\nyzfuTHRZLsl5cLqUJ4lrz2jPmO/1pmTHXgY9+BHv+/Xt7ih4cLq0cXrHJrw24gxaNqjFNaOn8ej7\nn/mdM90R8eB0aSW/UW1evOk0BnZrwe//uZAfjp3N7n0HEl2WSzIenC7t1M6uwYNDe/DzAZ15be4a\nLv7bJIo370p0WS6JeHC6tCSJm/p35Imre7Fq8y7Of/Bj5vgAIS5OHpwurX21c1Ne/v7p1MrOZMjI\nKby7cEOiS3JJwIPTpb3j8urw4k2n0SEvl+vGFDJu+qrDv8mlNQ9O54CmdWvy/A39OO24xvz8hbnc\n//an/o27q5AHp3OBOjk1eOLqXlx0auRKo1+89AmlB8oSXZarhsK8PbBzSScrM4P7LjmZFvVr8tC7\nn7Fh214euKwHtbP9v4r7L+9xOleOJH72zS7cdcFJvLtoA5eNmsqmHXsTXZarRjw4navAFX3b8rfL\ne7Jg7TYGPzKZlZv8XE8X4cHp3CF888TmPDusD5t37eOiv33MJ8VbE12SqwZCDU5JAyQtkrRE0m0x\n5t8iab6kuZL+LaltmPU4dyR6tm3EhOGnkVMjk++MnMyb83xg5HQXWnBKygQeAgYCXYGhkrqWazYL\nKDCz7sAE4J6w6nHuaHRsWoeXbjqNjk3rMPzpGdzxyjz27Pdr3NNVmD3O3sASM1tqZvuAscD50Q3M\n7F0zO3jgaArQOsR6nDsqTevVZMLw07j2jPY8NXkFFz08iaV+Q7i0FGZwtgKiL8EoDqZV5Frgn7Fm\nSLpeUqGkwpISH0fRJU52jQxuP68rj19VwJqtuznvgY94aVZxostyx1iYwakY02JeiiHpcqAAuDfW\nfDMbaWYFZlaQl5dXhSU6d2TOPqEZ//zhmZzUsj4/fn4OPxk3h51+W460EWZwFgP5Ua9bA2vKN5L0\ndeB/gUFm5ifLuaTRon4tnh3Wh5u/1pEXZxXz7Qc/YsHabYkuyx0DYQbndKCTpPaSsoEhwKvRDST1\nAB4lEpo+LI1LOjUyM7jlG5155to+bN9TyvkPfczfp6zw69xTXGjBaWalwAhgIrAAGGdmRZLulDQo\naHYvUAcYL2m2pFcrWJxz1dppHZvwzx+eSd8Ojbn95Xnc9MxMtu7en+iyXEiUbH8ZCwoKrLCwMNFl\nOBdTWZkx6sOl3DtxEc3q1eT287ryzRObIcU65O+qE0kzzKwgnrZ+5ZBzVSgjQ9xw1nGMH96POjk1\nGP70DK58YhpLNmxPdGmuCnlwOheCHm0a8o+bz+COb3dl9qotDPjLh/z29fls3+O776nAg9O5kNTI\nzOCa09vz7k/7M7hnax7/eBlf/eP7TJhRTFlZch0ic1/kwelcyJrUyeHui7vz8k2n07phLX46fg4X\nPzKJucV+c7hk5cHp3DFycn4DXrzxNO4d3J1Vn+/i/Ic+5rYX5vpYn0nIg9O5YygjQ1xSkM87P+3P\ntae3Z8KMYvr/8T0efOdTD9Ak4qcjOZdASzZs57f/WMB7i0rIrpHBoJNbcvVp7TipVf1El5Z2KnM6\nkgenc9XAp+u389Tk5bwwYzW79x+goG1DrjqtHQNOak5Wpu8YHgsenM4lqa279zO+cBVjJq9g5ee7\naFYvh8v7tGVonzY0qZOT6PJSmgenc0nuQJnx3qINPDlpOR9+upHszAzOO7kFV5/Wju6tGyS6vJRU\nmeD0e546Vw1lZoizT2jG2Sc0Y8mG7Tw1aQUvzCzmxZmrOaFFPS7p2ZoLerSiUW52oktNS97jdC5J\nbNuzn5dnrWZ8YTGfrN5KVqb4WpemXNIzn7M65/mx0KPku+rOpbiF67YxobCYl2evZuOOfTSpk8OF\nPVpySUE+xzerm+jykpIHp3NpYv+BMt5bVML4wlW8s3ADpWVG99b1uaRnawad3Ir6tbMSXWLS8OB0\nLg1t3LGXV2avYXzhKhau206trEwuLWjNNae3p12T3ESXV+15cDqXxsyMeau38eSk5bw6ZzWlZcY5\nJzTjujM70KtdQx8btAIenM45ADZs28OYySt4ZuoKNu/aT7dW9bnuzPZ8q1sL/zKpHA9O59wX7N53\ngBdnFfP4R8tYWrKTFvVrctVp7Rjaq40fBw14cDrnYiorM95bvIHHPlzGpM82UTs7k0sL8vnh2Z1o\nmObnhPoJ8M65mDIyxNe6NOPXwS5wAAAL9ElEQVRrXZpRtGYrj3+0jKenrOBfRet48Luncmqbhoku\nMSmEepBD0gBJiyQtkXRbjPlfkTRTUqmkwWHW4pz7ohNb1udPl57CizedRmamuPSRyYz6YKnf2jgO\noQWnpEzgIWAg0BUYKqlruWYrgauBZ8Oqwzl3aN1bN+D1H5zJ2Sc05XdvLGDYmBls2bUv0WVVa2H2\nOHsDS8xsqZntA8YC50c3MLPlZjYXKAuxDufcYdSvlcUjl/fkV+d15f3FGzj3rx8xa+XmRJdVbYUZ\nnK2AVVGvi4NplSbpekmFkgpLSkqqpDjn3BdJ4ntntGf88NOQ4NJHJ/P4R8t81z2GMIMz1lm2R/QJ\nmNlIMysws4K8vLyjLMs5dyin5DfgHz84k/6dm3LX6/O54e8z2LrLb2scLczgLAbyo163BtaEuD7n\nXBWpXzuLkVf05PbzuvLOwg2c+8CHzFnld+U8KMzgnA50ktReUjYwBHg1xPU556qQJK49oz3jh/fD\nDAY/MonRH/uuO4QYnGZWCowAJgILgHFmViTpTkmDACT1klQMXAI8KqkorHqcc0emR5uG/OPmMzjr\n+Kb85rX53Pj0TLbuTu9dd79yyDkXFzPjsQ+X8Yc3F9KyQS0e/u6pKXU3zspcOeRX+Tvn4iKJYV/p\nwPM39GP/gTIuengSf5+8PC133T04nXOV0rNtQ964+UxO79iY218pYsRzs9i+J7123T04nXOV1jA3\nm8ev6sWtA7rw5rx1fPuBjyhaszXRZR0zHpzOuSOSkSFu7H8czw3ry+79B7jw4Uk8M3VFWuy6e3A6\n545K7/aNeOPmM+nTvhH/+9I8fjh2Njv2lia6rFB5cDrnjlrjOjk8dU1vfnLO8bw+dw2DHviIN+et\nZW/pgUSXFgofj9M5VyUyMsQPzu5Ez3YN+cm4OQx/eib1atbg3O4tOP+UVvRu14iMjNS435Gfx+mc\nq3KlB8r4+LNNvDxrNROL1rFr3wFa1q/JoFNacWGPVnRuXv3u/e63znDOVRu79pXy1vz1vDxrNR98\nupEDZcYJLepxwSktGXRKS1rUr5XoEgEPTudcNbVxx17+MXctL81azexVW5Dg1DYNOallPbq0qEeX\n5nU5vlldcnOO/VFED07nXLW3fONOXp69mg8Wl7Bo3XZ27vvvF0ltG9emc7O6dGlRjxOa16Vz87q0\nbZxLZojHSD04nXNJpazMWL1lNwvWbmPRuu0sXLedheu2sWzjTsqCiKqZlUHrhrVpVi+HZnVrkhf8\nbFavZmRavZrk1c2hZlbmEdXgd7l0ziWVjAyR36g2+Y1q840Tm/9n+p79B1iyYQcL1m5j4brtrNmy\nm/Xb9jBt+eds2LaXfQe+fNed+rWyaFYvh1FXFtC2cW4o9XpwOueqrZpZmZzUqn7MUZjMjC279rN+\n+x7Wb9vL+m17KNke+bl+2x7q1cwKrS4PTudcUpJEw9xsGuZm06X54dtXJb9yyDnnKsmD0znnKsmD\n0znnKsmD0znnKsmD0znnKinU4JQ0QNIiSUsk3RZjfo6k54P5UyW1C7Me55yrCqEFp6RM4CFgINAV\nGCqpa7lm1wKbzawj8GfgD2HV45xzVSXMHmdvYImZLTWzfcBY4Pxybc4HngqeTwDOlpQaA/Y551JW\nmCfAtwJWRb0uBvpU1MbMSiVtBRoDG6MbSboeuD54uUPSokrW0qT8MlNAqm1Tqm0P+DYli4Pb1Dbe\nN4QZnLF6juVHFImnDWY2Ehh5xIVIhfFevJ8sUm2bUm17wLcpWRzJNoW5q14M5Ee9bg2sqaiNpBpA\nfeDzEGtyzrmjFmZwTgc6SWovKRsYArxars2rwFXB88HAO5Zs49w559JOaLvqwTHLEcBEIBN4wsyK\nJN0JFJrZq8DjwN8lLSHS0xwSUjlHvJtfjaXaNqXa9oBvU7Ko9DYl3UDGzjmXaH7lkHPOVZIHp3PO\nVVJKB+fhLvlMRpKWS/pE0mxJSXnzJUlPSNogaV7UtEaS3pL0afCzYSJrrKwKtunXklYHn9VsSd9K\nZI2VJSlf0ruSFkgqkvTDYHpSflaH2J5Kf04pe4wzuORzMXAOkdOepgNDzWx+Qgs7SpKWAwVmlrQn\nIUv6CrADGGNmJwXT7gE+N7O7gz9yDc3s1kTWWRkVbNOvgR1m9sdE1nakJLUAWpjZTEl1gRnABcDV\nJOFndYjtuZRKfk6p3OOM55JPlwBm9gFfPl83+vLbp4j8g04aFWxTUjOztWY2M3i+HVhA5Gq/pPys\nDrE9lZbKwRnrks8j+iVVMwb8S9KM4FLUVNHMzNZC5B840DTB9VSVEZLmBrvySbFLG0swclkPYCop\n8FmV2x6o5OeUysEZ1+WcSeh0MzuVyKhT3w92EV319DfgOOAUYC1wX2LLOTKS6gAvAD8ys22Jrudo\nxdieSn9OqRyc8VzymXTMbE3wcwPwEpFDEqlgfXAM6uCxqA0Jrueomdl6MztgZmXAKJLws5KURSRk\nnjGzF4PJSftZxdqeI/mcUjk447nkM6lIyg0OaiMpF/gGMO/Q70oa0ZffXgW8ksBaqsTBcAlcSJJ9\nVsEQj48DC8zsT1GzkvKzqmh7juRzStlv1QGC0wr+wn8v+fxdgks6KpI6EOllQuRy2WeTcZskPQf0\nJzKc13rgDuBlYBzQBlgJXGJmSfNlSwXb1J/I7p8By4EbDh4bTAaSzgA+BD4ByoLJvyByXDDpPqtD\nbM9QKvk5pXRwOudcGFJ5V90550Lhwemcc5Xkwemcc5Xkwemcc5Xkwemcc5XkwekqJGlS8LOdpMuq\neNm/iLWusEi6QNKvQlr2Lw7fqtLL7CbpyaperqsafjqSOyxJ/YGfmtl5lXhPppkdOMT8HWZWpyrq\ni7OeScCgox1VKtZ2hbUtkt4GvmdmK6t62e7oeI/TVUjSjuDp3cCZwViFP5aUKeleSdODgRFuCNr3\nD8Y7fJbIScZIejkYkKTo4KAkku4GagXLeyZ6XYq4V9I8RcYd/U7Ust+TNEHSQknPBFeCIOluSfOD\nWr40NJik44G9B0NT0pOSHpH0oaTFks4Lpse9XVHLjrUtl0uaFkx7NBjiEEk7JP1O0hxJUyQ1C6Zf\nEmzvHEkfRC3+NcK7D5c7GmbmD3/EfBAZoxAiV8C8HjX9euCXwfMcoBBoH7TbCbSPatso+FmLyKVs\njaOXHWNdFwNvEbnaqxmRK1NaBMveSmTMgQxgMnAG0AhYxH/3nhrE2I5rgPuiXj8JvBkspxORcQ1q\nVma7YtUePD+BSOBlBa8fBq4Mnhvw7eD5PVHr+gRoVb5+4HTgtUT/O/DHlx+h3eXSpbRvAN0lDQ5e\n1ycSQPuAaWa2LKrtzZIuDJ7nB+02HWLZZwDPWWR3eL2k94FewLZg2cUAkmYD7YApwB7gMUn/AF6P\nscwWQEm5aeMsMqjDp5KWAl0quV0VORvoCUwPOsS1+O8gGPui6ptBZJBtgI+BJyWNA17876LYALSM\nY53uGPPgdEdCwA/MbOIXJkaOhe4s9/rrQD8z2yXpPSI9u8MtuyJ7o54fAGpY5DbUvYkE1hBgBPC1\ncu/bTSQEo5U/uG/EuV2HIeApM/ufGPP2W9CVPFg/gJkNl9QHOBeYLekUM9tE5He1O871umPIj3G6\neGwH6ka9ngjcGAzRhaTjg9GayqsPbA5CswvQN2re/oPvL+cD4DvB8cY84CvAtIoKU2Rsxfpm9gbw\nIyKDNZS3AOhYbtolkjIkHQd0ILK7H+92lRe9Lf8GBktqGiyjkaS2h3qzpOPMbKqZ/QrYyH+HQzye\nJBtRKV14j9PFYy5QKmkOkeOD9xPZTZ4ZfEFTQuzbJ7wJDJc0l0gwTYmaNxKYK2mmmX03avpLQD9g\nDpFe4M/NbF0QvLHUBV6RVJNIb+/HMdp8ANwnSVE9vkXA+0SOow43sz2SHotzu8r7wrZI+iWRUfoz\ngP3A94EVh3j/vZI6BfX/O9h2gK8C/4hj/e4Y89ORXFqQdD+RL1reDs6PfN3MJiS4rApJyiES7GeY\nWWmi63Ff5LvqLl38H1A70UVUQhvgNg/N6sl7nM45V0ne43TOuUry4HTOuUry4HTOuUry4HTOuUry\n4HTOuUr6f/ydTE7hBbBmAAAAAElFTkSuQmCC\n", 607 | "text/plain": [ 608 | "" 609 | ] 610 | }, 611 | "metadata": {}, 612 | "output_type": "display_data" 613 | } 614 | ], 615 | "source": [ 616 | "# 训练参数\n", 617 | "parameters = L_layer_model(train_x, train_y, layers_dims, num_iterations = 2500, print_cost = True)" 618 | ] 619 | }, 620 | { 621 | "cell_type": "code", 622 | "execution_count": 18, 623 | "metadata": {}, 624 | "outputs": [ 625 | { 626 | "name": "stdout", 627 | "output_type": "stream", 628 | "text": [ 629 | "Accuracy: 0.9952153110047844\n" 630 | ] 631 | } 632 | ], 633 | "source": [ 634 | "# 去预测训练集\n", 635 | "pred_train = predict(train_x, train_y, parameters)" 636 | ] 637 | }, 638 | { 639 | "cell_type": "code", 640 | "execution_count": 19, 641 | "metadata": {}, 642 | "outputs": [ 643 | { 644 | "name": "stdout", 645 | "output_type": "stream", 646 | "text": [ 647 | "Accuracy: 0.78\n" 648 | ] 649 | } 650 | ], 651 | "source": [ 652 | "# 去预测测试集\n", 653 | "pred_test = predict(test_x, test_y, parameters)" 654 | ] 655 | }, 656 | { 657 | "cell_type": "code", 658 | "execution_count": null, 659 | "metadata": { 660 | "collapsed": true 661 | }, 662 | "outputs": [], 663 | "source": [] 664 | } 665 | ], 666 | "metadata": { 667 | "kernelspec": { 668 | "display_name": "Python 3", 669 | "language": "python", 670 | "name": "python3" 671 | }, 672 | "language_info": { 673 | "codemirror_mode": { 674 | "name": "ipython", 675 | "version": 3 676 | }, 677 | "file_extension": ".py", 678 | "mimetype": "text/x-python", 679 | "name": "python", 680 | "nbconvert_exporter": "python", 681 | "pygments_lexer": "ipython3", 682 | "version": "3.6.3" 683 | } 684 | }, 685 | "nbformat": 4, 686 | "nbformat_minor": 2 687 | } 688 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Build-a-deep-neural-network-DNN-using-Numpy 2 | 不用框架使用numpy从零搭建深度神经网络(DNN) 3 | 4 | python3以上版本即可 5 | 6 | 博客地址:https://blog.csdn.net/huwenxing0801/article/details/84308832 7 | 8 | -------------------------------------------------------------------------------- /datasets/test_catvnoncat.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/649453932/Build-a-deep-neural-network-DNN-using-Numpy/8ddf35f4e155eab9e28809db009e6e4777960843/datasets/test_catvnoncat.h5 -------------------------------------------------------------------------------- /datasets/train_catvnoncat.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/649453932/Build-a-deep-neural-network-DNN-using-Numpy/8ddf35f4e155eab9e28809db009e6e4777960843/datasets/train_catvnoncat.h5 --------------------------------------------------------------------------------