├── AlexNet └── deploy.prototxt ├── BN-GoogLeNet ├── deploy.prototxt ├── solver.prototxt └── train_val.prototxt ├── DenseNet-121 ├── solver.prototxt └── train_val.prototxt ├── DenseNet-161 ├── solver.prototxt └── train_val.prototxt ├── GoogLeNet ├── deploy.prototxt ├── solver.prototxt └── train_val.prototxt ├── Inception-ResNet-v2 ├── deploy.prototxt ├── solver.prototxt └── train_val.prototxt ├── Inception-v3 ├── deploy.prototxt ├── solver.prototxt └── train_val.prototxt ├── Inception-v4 ├── deploy.prototxt ├── solver.prototxt └── train_val.prototxt ├── README.md ├── ResNet-101 ├── deploy.prototxt ├── solver.prototxt └── train_val.prototxt ├── ResNet-152 ├── deploy.prototxt ├── solver.prototxt └── train_val.prototxt ├── ResNet-50 ├── deploy.prototxt ├── solver.prototxt └── train_val.prototxt ├── SE-BN-Inception ├── deploy.prototxt ├── solver.prototxt └── train_val.prototxt ├── SE-ResNet-50 ├── solver.prototxt └── train_val.prototxt ├── SqueezeNet ├── deploy.prototxt ├── solver.prototxt ├── squeezenet_v1.1.caffemodel └── train_val.prototxt ├── VGG16 ├── deploy.prototxt ├── solver.prototxt └── train_val.prototxt └── VGG19 ├── deploy.prototxt ├── solver.prototxt └── train_val.prototxt /AlexNet/deploy.prototxt: -------------------------------------------------------------------------------- 1 | name: "AlexNet" 2 | layer { 3 | name: "data" 4 | type: "Input" 5 | top: "data" 6 | input_param { shape: { dim: 1 dim: 3 dim: 227 dim: 227 } } 7 | } 8 | layer { 9 | name: "conv1" 10 | type: "Convolution" 11 | bottom: "data" 12 | top: "conv1" 13 | param { 14 | lr_mult: 1 15 | decay_mult: 1 16 | } 17 | param { 18 | lr_mult: 2 19 | decay_mult: 0 20 | } 21 | convolution_param { 22 | num_output: 96 23 | kernel_size: 11 24 | stride: 4 25 | } 26 | } 27 | layer { 28 | name: "relu1" 29 | type: "ReLU" 30 | bottom: "conv1" 31 | top: "conv1" 32 | } 33 | layer { 34 | name: "norm1" 35 | type: "LRN" 36 | bottom: "conv1" 37 | top: "norm1" 38 | lrn_param { 39 | local_size: 5 40 | alpha: 0.0001 41 | beta: 0.75 42 | } 43 | } 44 | layer { 45 | name: "pool1" 46 | type: "Pooling" 47 | bottom: "norm1" 48 | top: "pool1" 49 | pooling_param { 50 | pool: MAX 51 | kernel_size: 3 52 | stride: 2 53 | } 54 | } 55 | layer { 56 | name: "conv2" 57 | type: "Convolution" 58 | bottom: "pool1" 59 | top: "conv2" 60 | param { 61 | lr_mult: 1 62 | decay_mult: 1 63 | } 64 | param { 65 | lr_mult: 2 66 | decay_mult: 0 67 | } 68 | convolution_param { 69 | num_output: 256 70 | pad: 2 71 | kernel_size: 5 72 | group: 2 73 | } 74 | } 75 | layer { 76 | name: "relu2" 77 | type: "ReLU" 78 | bottom: "conv2" 79 | top: "conv2" 80 | } 81 | layer { 82 | name: "norm2" 83 | type: "LRN" 84 | bottom: "conv2" 85 | top: "norm2" 86 | lrn_param { 87 | local_size: 5 88 | alpha: 0.0001 89 | beta: 0.75 90 | } 91 | } 92 | layer { 93 | name: "pool2" 94 | type: "Pooling" 95 | bottom: "norm2" 96 | top: "pool2" 97 | pooling_param { 98 | pool: MAX 99 | kernel_size: 3 100 | stride: 2 101 | } 102 | } 103 | layer { 104 | name: "conv3" 105 | type: "Convolution" 106 | bottom: "pool2" 107 | top: "conv3" 108 | param { 109 | lr_mult: 1 110 | decay_mult: 1 111 | } 112 | param { 113 | lr_mult: 2 114 | decay_mult: 0 115 | } 116 | convolution_param { 117 | num_output: 384 118 | pad: 1 119 | kernel_size: 3 120 | } 121 | } 122 | layer { 123 | name: "relu3" 124 | type: "ReLU" 125 | bottom: "conv3" 126 | top: "conv3" 127 | } 128 | layer { 129 | name: "conv4" 130 | type: "Convolution" 131 | bottom: "conv3" 132 | top: "conv4" 133 | param { 134 | lr_mult: 1 135 | decay_mult: 1 136 | } 137 | param { 138 | lr_mult: 2 139 | decay_mult: 0 140 | } 141 | convolution_param { 142 | num_output: 384 143 | pad: 1 144 | kernel_size: 3 145 | group: 2 146 | } 147 | } 148 | layer { 149 | name: "relu4" 150 | type: "ReLU" 151 | bottom: "conv4" 152 | top: "conv4" 153 | } 154 | layer { 155 | name: "conv5" 156 | type: "Convolution" 157 | bottom: "conv4" 158 | top: "conv5" 159 | param { 160 | lr_mult: 1 161 | decay_mult: 1 162 | } 163 | param { 164 | lr_mult: 2 165 | decay_mult: 0 166 | } 167 | convolution_param { 168 | num_output: 256 169 | pad: 1 170 | kernel_size: 3 171 | group: 2 172 | } 173 | } 174 | layer { 175 | name: "relu5" 176 | type: "ReLU" 177 | bottom: "conv5" 178 | top: "conv5" 179 | } 180 | layer { 181 | name: "pool5" 182 | type: "Pooling" 183 | bottom: "conv5" 184 | top: "pool5" 185 | pooling_param { 186 | pool: MAX 187 | kernel_size: 3 188 | stride: 2 189 | } 190 | } 191 | layer { 192 | name: "fc6" 193 | type: "InnerProduct" 194 | bottom: "pool5" 195 | top: "fc6" 196 | param { 197 | lr_mult: 1 198 | decay_mult: 1 199 | } 200 | param { 201 | lr_mult: 2 202 | decay_mult: 0 203 | } 204 | inner_product_param { 205 | num_output: 4096 206 | } 207 | } 208 | layer { 209 | name: "relu6" 210 | type: "ReLU" 211 | bottom: "fc6" 212 | top: "fc6" 213 | } 214 | layer { 215 | name: "drop6" 216 | type: "Dropout" 217 | bottom: "fc6" 218 | top: "fc6" 219 | dropout_param { 220 | dropout_ratio: 0.5 221 | } 222 | } 223 | layer { 224 | name: "fc7" 225 | type: "InnerProduct" 226 | bottom: "fc6" 227 | top: "fc7" 228 | param { 229 | lr_mult: 1 230 | decay_mult: 1 231 | } 232 | param { 233 | lr_mult: 2 234 | decay_mult: 0 235 | } 236 | inner_product_param { 237 | num_output: 4096 238 | } 239 | } 240 | layer { 241 | name: "relu7" 242 | type: "ReLU" 243 | bottom: "fc7" 244 | top: "fc7" 245 | } 246 | layer { 247 | name: "drop7" 248 | type: "Dropout" 249 | bottom: "fc7" 250 | top: "fc7" 251 | dropout_param { 252 | dropout_ratio: 0.5 253 | } 254 | } 255 | layer { 256 | name: "fc8" 257 | type: "InnerProduct" 258 | bottom: "fc7" 259 | top: "fc8" 260 | param { 261 | lr_mult: 1 262 | decay_mult: 1 263 | } 264 | param { 265 | lr_mult: 2 266 | decay_mult: 0 267 | } 268 | inner_product_param { 269 | num_output: 1000 270 | } 271 | } 272 | layer { 273 | name: "prob" 274 | type: "Softmax" 275 | bottom: "fc8" 276 | top: "prob" 277 | } 278 | -------------------------------------------------------------------------------- /BN-GoogLeNet/solver.prototxt: -------------------------------------------------------------------------------- 1 | # the definition of neural network model 2 | net: "train_val.prototxt" 3 | # test_iter is related to batch_size in test layer, test_iter * batch_size = the number of test data 4 | test_iter: 512 5 | # carry out test once every 5 training iterations 6 | test_interval: 10 7 | # exclude test phase when test_initialization = false 8 | # test_initialization: false 9 | # display information once every 10 training iterations 10 | display: 1 11 | # 12 | average_loss: 40 13 | # the initial learning rate 14 | base_lr: 0.001 15 | lr_policy: "poly" 16 | stepsize: 320000 17 | gamma: 0.96 18 | # The max number of iterations 19 | max_iter: 10001 20 | power: 1.0 21 | momentum: 0.9 22 | # weight decay item, in case of overfitting 23 | weight_decay: 0.0002 24 | # save once every 50 training iterations 25 | snapshot: 10 26 | # save path 27 | snapshot_prefix: "inception-v2-cervix" 28 | solver_mode: GPU 29 | -------------------------------------------------------------------------------- /DenseNet-121/solver.prototxt: -------------------------------------------------------------------------------- 1 | train_net: "train_val.prototxt" 2 | #test_net: "test_densenet.prototxt" 3 | #test_iter: 200 4 | #test_interval: 800 5 | base_lr: 0.001 6 | display: 1 7 | max_iter: 2001 8 | lr_policy: "multistep" 9 | gamma: 0.1 10 | momentum: 0.9 11 | weight_decay: 0.0001 12 | solver_mode: GPU 13 | random_seed: 831486 14 | stepvalue: 115000 15 | stepvalue: 172500 16 | type: "Nesterov" 17 | snapshot_prefix: "densenet-121-product" 18 | snapshot: 50 19 | -------------------------------------------------------------------------------- /DenseNet-161/solver.prototxt: -------------------------------------------------------------------------------- 1 | train_net: "train_val.prototxt" 2 | #test_net: "test_densenet.prototxt" 3 | #test_iter: 200 4 | #test_interval: 800 5 | base_lr: 0.001 6 | display: 1 7 | max_iter: 2001 8 | lr_policy: "multistep" 9 | gamma: 0.1 10 | momentum: 0.9 11 | weight_decay: 0.0001 12 | solver_mode: GPU 13 | random_seed: 831486 14 | stepvalue: 115000 15 | stepvalue: 172500 16 | type: "Nesterov" 17 | snapshot_prefix: "densenet-161-product" 18 | snapshot: 50 19 | -------------------------------------------------------------------------------- /GoogLeNet/deploy.prototxt: -------------------------------------------------------------------------------- 1 | name: "GoogleNet" 2 | input: "data" 3 | input_shape { 4 | dim: 1 5 | dim: 3 6 | dim: 224 7 | dim: 224 8 | } 9 | layer { 10 | name: "conv1/7x7_s2" 11 | type: "Convolution" 12 | bottom: "data" 13 | top: "conv1/7x7_s2" 14 | param { 15 | lr_mult: 1 16 | decay_mult: 1 17 | } 18 | param { 19 | lr_mult: 2 20 | decay_mult: 0 21 | } 22 | convolution_param { 23 | num_output: 64 24 | pad: 3 25 | kernel_size: 7 26 | stride: 2 27 | weight_filler { 28 | type: "xavier" 29 | } 30 | bias_filler { 31 | type: "constant" 32 | value: 0.2 33 | } 34 | } 35 | } 36 | layer { 37 | name: "conv1/relu_7x7" 38 | type: "ReLU" 39 | bottom: "conv1/7x7_s2" 40 | top: "conv1/7x7_s2" 41 | } 42 | layer { 43 | name: "pool1/3x3_s2" 44 | type: "Pooling" 45 | bottom: "conv1/7x7_s2" 46 | top: "pool1/3x3_s2" 47 | pooling_param { 48 | pool: MAX 49 | kernel_size: 3 50 | stride: 2 51 | } 52 | } 53 | layer { 54 | name: "pool1/norm1" 55 | type: "LRN" 56 | bottom: "pool1/3x3_s2" 57 | top: "pool1/norm1" 58 | lrn_param { 59 | local_size: 5 60 | alpha: 0.0001 61 | beta: 0.75 62 | } 63 | } 64 | layer { 65 | name: "conv2/3x3_reduce" 66 | type: "Convolution" 67 | bottom: "pool1/norm1" 68 | top: "conv2/3x3_reduce" 69 | param { 70 | lr_mult: 1 71 | decay_mult: 1 72 | } 73 | param { 74 | lr_mult: 2 75 | decay_mult: 0 76 | } 77 | convolution_param { 78 | num_output: 64 79 | kernel_size: 1 80 | weight_filler { 81 | type: "xavier" 82 | } 83 | bias_filler { 84 | type: "constant" 85 | value: 0.2 86 | } 87 | } 88 | } 89 | layer { 90 | name: "conv2/relu_3x3_reduce" 91 | type: "ReLU" 92 | bottom: "conv2/3x3_reduce" 93 | top: "conv2/3x3_reduce" 94 | } 95 | layer { 96 | name: "conv2/3x3" 97 | type: "Convolution" 98 | bottom: "conv2/3x3_reduce" 99 | top: "conv2/3x3" 100 | param { 101 | lr_mult: 1 102 | decay_mult: 1 103 | } 104 | param { 105 | lr_mult: 2 106 | decay_mult: 0 107 | } 108 | convolution_param { 109 | num_output: 192 110 | pad: 1 111 | kernel_size: 3 112 | weight_filler { 113 | type: "xavier" 114 | } 115 | bias_filler { 116 | type: "constant" 117 | value: 0.2 118 | } 119 | } 120 | } 121 | layer { 122 | name: "conv2/relu_3x3" 123 | type: "ReLU" 124 | bottom: "conv2/3x3" 125 | top: "conv2/3x3" 126 | } 127 | layer { 128 | name: "conv2/norm2" 129 | type: "LRN" 130 | bottom: "conv2/3x3" 131 | top: "conv2/norm2" 132 | lrn_param { 133 | local_size: 5 134 | alpha: 0.0001 135 | beta: 0.75 136 | } 137 | } 138 | layer { 139 | name: "pool2/3x3_s2" 140 | type: "Pooling" 141 | bottom: "conv2/norm2" 142 | top: "pool2/3x3_s2" 143 | pooling_param { 144 | pool: MAX 145 | kernel_size: 3 146 | stride: 2 147 | } 148 | } 149 | layer { 150 | name: "inception_3a/1x1" 151 | type: "Convolution" 152 | bottom: "pool2/3x3_s2" 153 | top: "inception_3a/1x1" 154 | param { 155 | lr_mult: 1 156 | decay_mult: 1 157 | } 158 | param { 159 | lr_mult: 2 160 | decay_mult: 0 161 | } 162 | convolution_param { 163 | num_output: 64 164 | kernel_size: 1 165 | weight_filler { 166 | type: "xavier" 167 | } 168 | bias_filler { 169 | type: "constant" 170 | value: 0.2 171 | } 172 | } 173 | } 174 | layer { 175 | name: "inception_3a/relu_1x1" 176 | type: "ReLU" 177 | bottom: "inception_3a/1x1" 178 | top: "inception_3a/1x1" 179 | } 180 | layer { 181 | name: "inception_3a/3x3_reduce" 182 | type: "Convolution" 183 | bottom: "pool2/3x3_s2" 184 | top: "inception_3a/3x3_reduce" 185 | param { 186 | lr_mult: 1 187 | decay_mult: 1 188 | } 189 | param { 190 | lr_mult: 2 191 | decay_mult: 0 192 | } 193 | convolution_param { 194 | num_output: 96 195 | kernel_size: 1 196 | weight_filler { 197 | type: "xavier" 198 | } 199 | bias_filler { 200 | type: "constant" 201 | value: 0.2 202 | } 203 | } 204 | } 205 | layer { 206 | name: "inception_3a/relu_3x3_reduce" 207 | type: "ReLU" 208 | bottom: "inception_3a/3x3_reduce" 209 | top: "inception_3a/3x3_reduce" 210 | } 211 | layer { 212 | name: "inception_3a/3x3" 213 | type: "Convolution" 214 | bottom: "inception_3a/3x3_reduce" 215 | top: "inception_3a/3x3" 216 | param { 217 | lr_mult: 1 218 | decay_mult: 1 219 | } 220 | param { 221 | lr_mult: 2 222 | decay_mult: 0 223 | } 224 | convolution_param { 225 | num_output: 128 226 | pad: 1 227 | kernel_size: 3 228 | weight_filler { 229 | type: "xavier" 230 | } 231 | bias_filler { 232 | type: "constant" 233 | value: 0.2 234 | } 235 | } 236 | } 237 | layer { 238 | name: "inception_3a/relu_3x3" 239 | type: "ReLU" 240 | bottom: "inception_3a/3x3" 241 | top: "inception_3a/3x3" 242 | } 243 | layer { 244 | name: "inception_3a/5x5_reduce" 245 | type: "Convolution" 246 | bottom: "pool2/3x3_s2" 247 | top: "inception_3a/5x5_reduce" 248 | param { 249 | lr_mult: 1 250 | decay_mult: 1 251 | } 252 | param { 253 | lr_mult: 2 254 | decay_mult: 0 255 | } 256 | convolution_param { 257 | num_output: 16 258 | kernel_size: 1 259 | weight_filler { 260 | type: "xavier" 261 | } 262 | bias_filler { 263 | type: "constant" 264 | value: 0.2 265 | } 266 | } 267 | } 268 | layer { 269 | name: "inception_3a/relu_5x5_reduce" 270 | type: "ReLU" 271 | bottom: "inception_3a/5x5_reduce" 272 | top: "inception_3a/5x5_reduce" 273 | } 274 | layer { 275 | name: "inception_3a/5x5" 276 | type: "Convolution" 277 | bottom: "inception_3a/5x5_reduce" 278 | top: "inception_3a/5x5" 279 | param { 280 | lr_mult: 1 281 | decay_mult: 1 282 | } 283 | param { 284 | lr_mult: 2 285 | decay_mult: 0 286 | } 287 | convolution_param { 288 | num_output: 32 289 | pad: 2 290 | kernel_size: 5 291 | weight_filler { 292 | type: "xavier" 293 | } 294 | bias_filler { 295 | type: "constant" 296 | value: 0.2 297 | } 298 | } 299 | } 300 | layer { 301 | name: "inception_3a/relu_5x5" 302 | type: "ReLU" 303 | bottom: "inception_3a/5x5" 304 | top: "inception_3a/5x5" 305 | } 306 | layer { 307 | name: "inception_3a/pool" 308 | type: "Pooling" 309 | bottom: "pool2/3x3_s2" 310 | top: "inception_3a/pool" 311 | pooling_param { 312 | pool: MAX 313 | kernel_size: 3 314 | stride: 1 315 | pad: 1 316 | } 317 | } 318 | layer { 319 | name: "inception_3a/pool_proj" 320 | type: "Convolution" 321 | bottom: "inception_3a/pool" 322 | top: "inception_3a/pool_proj" 323 | param { 324 | lr_mult: 1 325 | decay_mult: 1 326 | } 327 | param { 328 | lr_mult: 2 329 | decay_mult: 0 330 | } 331 | convolution_param { 332 | num_output: 32 333 | kernel_size: 1 334 | weight_filler { 335 | type: "xavier" 336 | } 337 | bias_filler { 338 | type: "constant" 339 | value: 0.2 340 | } 341 | } 342 | } 343 | layer { 344 | name: "inception_3a/relu_pool_proj" 345 | type: "ReLU" 346 | bottom: "inception_3a/pool_proj" 347 | top: "inception_3a/pool_proj" 348 | } 349 | layer { 350 | name: "inception_3a/output" 351 | type: "Concat" 352 | bottom: "inception_3a/1x1" 353 | bottom: "inception_3a/3x3" 354 | bottom: "inception_3a/5x5" 355 | bottom: "inception_3a/pool_proj" 356 | top: "inception_3a/output" 357 | } 358 | layer { 359 | name: "inception_3b/1x1" 360 | type: "Convolution" 361 | bottom: "inception_3a/output" 362 | top: "inception_3b/1x1" 363 | param { 364 | lr_mult: 1 365 | decay_mult: 1 366 | } 367 | param { 368 | lr_mult: 2 369 | decay_mult: 0 370 | } 371 | convolution_param { 372 | num_output: 128 373 | kernel_size: 1 374 | weight_filler { 375 | type: "xavier" 376 | } 377 | bias_filler { 378 | type: "constant" 379 | value: 0.2 380 | } 381 | } 382 | } 383 | layer { 384 | name: "inception_3b/relu_1x1" 385 | type: "ReLU" 386 | bottom: "inception_3b/1x1" 387 | top: "inception_3b/1x1" 388 | } 389 | layer { 390 | name: "inception_3b/3x3_reduce" 391 | type: "Convolution" 392 | bottom: "inception_3a/output" 393 | top: "inception_3b/3x3_reduce" 394 | param { 395 | lr_mult: 1 396 | decay_mult: 1 397 | } 398 | param { 399 | lr_mult: 2 400 | decay_mult: 0 401 | } 402 | convolution_param { 403 | num_output: 128 404 | kernel_size: 1 405 | weight_filler { 406 | type: "xavier" 407 | } 408 | bias_filler { 409 | type: "constant" 410 | value: 0.2 411 | } 412 | } 413 | } 414 | layer { 415 | name: "inception_3b/relu_3x3_reduce" 416 | type: "ReLU" 417 | bottom: "inception_3b/3x3_reduce" 418 | top: "inception_3b/3x3_reduce" 419 | } 420 | layer { 421 | name: "inception_3b/3x3" 422 | type: "Convolution" 423 | bottom: "inception_3b/3x3_reduce" 424 | top: "inception_3b/3x3" 425 | param { 426 | lr_mult: 1 427 | decay_mult: 1 428 | } 429 | param { 430 | lr_mult: 2 431 | decay_mult: 0 432 | } 433 | convolution_param { 434 | num_output: 192 435 | pad: 1 436 | kernel_size: 3 437 | weight_filler { 438 | type: "xavier" 439 | } 440 | bias_filler { 441 | type: "constant" 442 | value: 0.2 443 | } 444 | } 445 | } 446 | layer { 447 | name: "inception_3b/relu_3x3" 448 | type: "ReLU" 449 | bottom: "inception_3b/3x3" 450 | top: "inception_3b/3x3" 451 | } 452 | layer { 453 | name: "inception_3b/5x5_reduce" 454 | type: "Convolution" 455 | bottom: "inception_3a/output" 456 | top: "inception_3b/5x5_reduce" 457 | param { 458 | lr_mult: 1 459 | decay_mult: 1 460 | } 461 | param { 462 | lr_mult: 2 463 | decay_mult: 0 464 | } 465 | convolution_param { 466 | num_output: 32 467 | kernel_size: 1 468 | weight_filler { 469 | type: "xavier" 470 | } 471 | bias_filler { 472 | type: "constant" 473 | value: 0.2 474 | } 475 | } 476 | } 477 | layer { 478 | name: "inception_3b/relu_5x5_reduce" 479 | type: "ReLU" 480 | bottom: "inception_3b/5x5_reduce" 481 | top: "inception_3b/5x5_reduce" 482 | } 483 | layer { 484 | name: "inception_3b/5x5" 485 | type: "Convolution" 486 | bottom: "inception_3b/5x5_reduce" 487 | top: "inception_3b/5x5" 488 | param { 489 | lr_mult: 1 490 | decay_mult: 1 491 | } 492 | param { 493 | lr_mult: 2 494 | decay_mult: 0 495 | } 496 | convolution_param { 497 | num_output: 96 498 | pad: 2 499 | kernel_size: 5 500 | weight_filler { 501 | type: "xavier" 502 | } 503 | bias_filler { 504 | type: "constant" 505 | value: 0.2 506 | } 507 | } 508 | } 509 | layer { 510 | name: "inception_3b/relu_5x5" 511 | type: "ReLU" 512 | bottom: "inception_3b/5x5" 513 | top: "inception_3b/5x5" 514 | } 515 | layer { 516 | name: "inception_3b/pool" 517 | type: "Pooling" 518 | bottom: "inception_3a/output" 519 | top: "inception_3b/pool" 520 | pooling_param { 521 | pool: MAX 522 | kernel_size: 3 523 | stride: 1 524 | pad: 1 525 | } 526 | } 527 | layer { 528 | name: "inception_3b/pool_proj" 529 | type: "Convolution" 530 | bottom: "inception_3b/pool" 531 | top: "inception_3b/pool_proj" 532 | param { 533 | lr_mult: 1 534 | decay_mult: 1 535 | } 536 | param { 537 | lr_mult: 2 538 | decay_mult: 0 539 | } 540 | convolution_param { 541 | num_output: 64 542 | kernel_size: 1 543 | weight_filler { 544 | type: "xavier" 545 | } 546 | bias_filler { 547 | type: "constant" 548 | value: 0.2 549 | } 550 | } 551 | } 552 | layer { 553 | name: "inception_3b/relu_pool_proj" 554 | type: "ReLU" 555 | bottom: "inception_3b/pool_proj" 556 | top: "inception_3b/pool_proj" 557 | } 558 | layer { 559 | name: "inception_3b/output" 560 | type: "Concat" 561 | bottom: "inception_3b/1x1" 562 | bottom: "inception_3b/3x3" 563 | bottom: "inception_3b/5x5" 564 | bottom: "inception_3b/pool_proj" 565 | top: "inception_3b/output" 566 | } 567 | layer { 568 | name: "pool3/3x3_s2" 569 | type: "Pooling" 570 | bottom: "inception_3b/output" 571 | top: "pool3/3x3_s2" 572 | pooling_param { 573 | pool: MAX 574 | kernel_size: 3 575 | stride: 2 576 | } 577 | } 578 | layer { 579 | name: "inception_4a/1x1" 580 | type: "Convolution" 581 | bottom: "pool3/3x3_s2" 582 | top: "inception_4a/1x1" 583 | param { 584 | lr_mult: 1 585 | decay_mult: 1 586 | } 587 | param { 588 | lr_mult: 2 589 | decay_mult: 0 590 | } 591 | convolution_param { 592 | num_output: 192 593 | kernel_size: 1 594 | weight_filler { 595 | type: "xavier" 596 | } 597 | bias_filler { 598 | type: "constant" 599 | value: 0.2 600 | } 601 | } 602 | } 603 | layer { 604 | name: "inception_4a/relu_1x1" 605 | type: "ReLU" 606 | bottom: "inception_4a/1x1" 607 | top: "inception_4a/1x1" 608 | } 609 | layer { 610 | name: "inception_4a/3x3_reduce" 611 | type: "Convolution" 612 | bottom: "pool3/3x3_s2" 613 | top: "inception_4a/3x3_reduce" 614 | param { 615 | lr_mult: 1 616 | decay_mult: 1 617 | } 618 | param { 619 | lr_mult: 2 620 | decay_mult: 0 621 | } 622 | convolution_param { 623 | num_output: 96 624 | kernel_size: 1 625 | weight_filler { 626 | type: "xavier" 627 | } 628 | bias_filler { 629 | type: "constant" 630 | value: 0.2 631 | } 632 | } 633 | } 634 | layer { 635 | name: "inception_4a/relu_3x3_reduce" 636 | type: "ReLU" 637 | bottom: "inception_4a/3x3_reduce" 638 | top: "inception_4a/3x3_reduce" 639 | } 640 | layer { 641 | name: "inception_4a/3x3" 642 | type: "Convolution" 643 | bottom: "inception_4a/3x3_reduce" 644 | top: "inception_4a/3x3" 645 | param { 646 | lr_mult: 1 647 | decay_mult: 1 648 | } 649 | param { 650 | lr_mult: 2 651 | decay_mult: 0 652 | } 653 | convolution_param { 654 | num_output: 208 655 | pad: 1 656 | kernel_size: 3 657 | weight_filler { 658 | type: "xavier" 659 | } 660 | bias_filler { 661 | type: "constant" 662 | value: 0.2 663 | } 664 | } 665 | } 666 | layer { 667 | name: "inception_4a/relu_3x3" 668 | type: "ReLU" 669 | bottom: "inception_4a/3x3" 670 | top: "inception_4a/3x3" 671 | } 672 | layer { 673 | name: "inception_4a/5x5_reduce" 674 | type: "Convolution" 675 | bottom: "pool3/3x3_s2" 676 | top: "inception_4a/5x5_reduce" 677 | param { 678 | lr_mult: 1 679 | decay_mult: 1 680 | } 681 | param { 682 | lr_mult: 2 683 | decay_mult: 0 684 | } 685 | convolution_param { 686 | num_output: 16 687 | kernel_size: 1 688 | weight_filler { 689 | type: "xavier" 690 | } 691 | bias_filler { 692 | type: "constant" 693 | value: 0.2 694 | } 695 | } 696 | } 697 | layer { 698 | name: "inception_4a/relu_5x5_reduce" 699 | type: "ReLU" 700 | bottom: "inception_4a/5x5_reduce" 701 | top: "inception_4a/5x5_reduce" 702 | } 703 | layer { 704 | name: "inception_4a/5x5" 705 | type: "Convolution" 706 | bottom: "inception_4a/5x5_reduce" 707 | top: "inception_4a/5x5" 708 | param { 709 | lr_mult: 1 710 | decay_mult: 1 711 | } 712 | param { 713 | lr_mult: 2 714 | decay_mult: 0 715 | } 716 | convolution_param { 717 | num_output: 48 718 | pad: 2 719 | kernel_size: 5 720 | weight_filler { 721 | type: "xavier" 722 | } 723 | bias_filler { 724 | type: "constant" 725 | value: 0.2 726 | } 727 | } 728 | } 729 | layer { 730 | name: "inception_4a/relu_5x5" 731 | type: "ReLU" 732 | bottom: "inception_4a/5x5" 733 | top: "inception_4a/5x5" 734 | } 735 | layer { 736 | name: "inception_4a/pool" 737 | type: "Pooling" 738 | bottom: "pool3/3x3_s2" 739 | top: "inception_4a/pool" 740 | pooling_param { 741 | pool: MAX 742 | kernel_size: 3 743 | stride: 1 744 | pad: 1 745 | } 746 | } 747 | layer { 748 | name: "inception_4a/pool_proj" 749 | type: "Convolution" 750 | bottom: "inception_4a/pool" 751 | top: "inception_4a/pool_proj" 752 | param { 753 | lr_mult: 1 754 | decay_mult: 1 755 | } 756 | param { 757 | lr_mult: 2 758 | decay_mult: 0 759 | } 760 | convolution_param { 761 | num_output: 64 762 | kernel_size: 1 763 | weight_filler { 764 | type: "xavier" 765 | } 766 | bias_filler { 767 | type: "constant" 768 | value: 0.2 769 | } 770 | } 771 | } 772 | layer { 773 | name: "inception_4a/relu_pool_proj" 774 | type: "ReLU" 775 | bottom: "inception_4a/pool_proj" 776 | top: "inception_4a/pool_proj" 777 | } 778 | layer { 779 | name: "inception_4a/output" 780 | type: "Concat" 781 | bottom: "inception_4a/1x1" 782 | bottom: "inception_4a/3x3" 783 | bottom: "inception_4a/5x5" 784 | bottom: "inception_4a/pool_proj" 785 | top: "inception_4a/output" 786 | } 787 | layer { 788 | name: "loss1/ave_pool" 789 | type: "Pooling" 790 | bottom: "inception_4a/output" 791 | top: "loss1/ave_pool" 792 | pooling_param { 793 | pool: AVE 794 | kernel_size: 5 795 | stride: 3 796 | } 797 | } 798 | layer { 799 | name: "loss1/conv" 800 | type: "Convolution" 801 | bottom: "loss1/ave_pool" 802 | top: "loss1/conv" 803 | param { 804 | lr_mult: 1 805 | decay_mult: 1 806 | } 807 | param { 808 | lr_mult: 2 809 | decay_mult: 0 810 | } 811 | convolution_param { 812 | num_output: 128 813 | kernel_size: 1 814 | weight_filler { 815 | type: "xavier" 816 | } 817 | bias_filler { 818 | type: "constant" 819 | value: 0.2 820 | } 821 | } 822 | } 823 | layer { 824 | name: "loss1/relu_conv" 825 | type: "ReLU" 826 | bottom: "loss1/conv" 827 | top: "loss1/conv" 828 | } 829 | layer { 830 | name: "loss1/fc" 831 | type: "InnerProduct" 832 | bottom: "loss1/conv" 833 | top: "loss1/fc" 834 | param { 835 | lr_mult: 1 836 | decay_mult: 1 837 | } 838 | param { 839 | lr_mult: 2 840 | decay_mult: 0 841 | } 842 | inner_product_param { 843 | num_output: 1024 844 | weight_filler { 845 | type: "xavier" 846 | } 847 | bias_filler { 848 | type: "constant" 849 | value: 0.2 850 | } 851 | } 852 | } 853 | layer { 854 | name: "loss1/relu_fc" 855 | type: "ReLU" 856 | bottom: "loss1/fc" 857 | top: "loss1/fc" 858 | } 859 | layer { 860 | name: "loss1/drop_fc" 861 | type: "Dropout" 862 | bottom: "loss1/fc" 863 | top: "loss1/fc" 864 | dropout_param { 865 | dropout_ratio: 0.7 866 | } 867 | } 868 | layer { 869 | name: "inception_4b/1x1" 870 | type: "Convolution" 871 | bottom: "inception_4a/output" 872 | top: "inception_4b/1x1" 873 | param { 874 | lr_mult: 1 875 | decay_mult: 1 876 | } 877 | param { 878 | lr_mult: 2 879 | decay_mult: 0 880 | } 881 | convolution_param { 882 | num_output: 160 883 | kernel_size: 1 884 | weight_filler { 885 | type: "xavier" 886 | } 887 | bias_filler { 888 | type: "constant" 889 | value: 0.2 890 | } 891 | } 892 | } 893 | layer { 894 | name: "inception_4b/relu_1x1" 895 | type: "ReLU" 896 | bottom: "inception_4b/1x1" 897 | top: "inception_4b/1x1" 898 | } 899 | layer { 900 | name: "inception_4b/3x3_reduce" 901 | type: "Convolution" 902 | bottom: "inception_4a/output" 903 | top: "inception_4b/3x3_reduce" 904 | param { 905 | lr_mult: 1 906 | decay_mult: 1 907 | } 908 | param { 909 | lr_mult: 2 910 | decay_mult: 0 911 | } 912 | convolution_param { 913 | num_output: 112 914 | kernel_size: 1 915 | weight_filler { 916 | type: "xavier" 917 | } 918 | bias_filler { 919 | type: "constant" 920 | value: 0.2 921 | } 922 | } 923 | } 924 | layer { 925 | name: "inception_4b/relu_3x3_reduce" 926 | type: "ReLU" 927 | bottom: "inception_4b/3x3_reduce" 928 | top: "inception_4b/3x3_reduce" 929 | } 930 | layer { 931 | name: "inception_4b/3x3" 932 | type: "Convolution" 933 | bottom: "inception_4b/3x3_reduce" 934 | top: "inception_4b/3x3" 935 | param { 936 | lr_mult: 1 937 | decay_mult: 1 938 | } 939 | param { 940 | lr_mult: 2 941 | decay_mult: 0 942 | } 943 | convolution_param { 944 | num_output: 224 945 | pad: 1 946 | kernel_size: 3 947 | weight_filler { 948 | type: "xavier" 949 | } 950 | bias_filler { 951 | type: "constant" 952 | value: 0.2 953 | } 954 | } 955 | } 956 | layer { 957 | name: "inception_4b/relu_3x3" 958 | type: "ReLU" 959 | bottom: "inception_4b/3x3" 960 | top: "inception_4b/3x3" 961 | } 962 | layer { 963 | name: "inception_4b/5x5_reduce" 964 | type: "Convolution" 965 | bottom: "inception_4a/output" 966 | top: "inception_4b/5x5_reduce" 967 | param { 968 | lr_mult: 1 969 | decay_mult: 1 970 | } 971 | param { 972 | lr_mult: 2 973 | decay_mult: 0 974 | } 975 | convolution_param { 976 | num_output: 24 977 | kernel_size: 1 978 | weight_filler { 979 | type: "xavier" 980 | } 981 | bias_filler { 982 | type: "constant" 983 | value: 0.2 984 | } 985 | } 986 | } 987 | layer { 988 | name: "inception_4b/relu_5x5_reduce" 989 | type: "ReLU" 990 | bottom: "inception_4b/5x5_reduce" 991 | top: "inception_4b/5x5_reduce" 992 | } 993 | layer { 994 | name: "inception_4b/5x5" 995 | type: "Convolution" 996 | bottom: "inception_4b/5x5_reduce" 997 | top: "inception_4b/5x5" 998 | param { 999 | lr_mult: 1 1000 | decay_mult: 1 1001 | } 1002 | param { 1003 | lr_mult: 2 1004 | decay_mult: 0 1005 | } 1006 | convolution_param { 1007 | num_output: 64 1008 | pad: 2 1009 | kernel_size: 5 1010 | weight_filler { 1011 | type: "xavier" 1012 | } 1013 | bias_filler { 1014 | type: "constant" 1015 | value: 0.2 1016 | } 1017 | } 1018 | } 1019 | layer { 1020 | name: "inception_4b/relu_5x5" 1021 | type: "ReLU" 1022 | bottom: "inception_4b/5x5" 1023 | top: "inception_4b/5x5" 1024 | } 1025 | layer { 1026 | name: "inception_4b/pool" 1027 | type: "Pooling" 1028 | bottom: "inception_4a/output" 1029 | top: "inception_4b/pool" 1030 | pooling_param { 1031 | pool: MAX 1032 | kernel_size: 3 1033 | stride: 1 1034 | pad: 1 1035 | } 1036 | } 1037 | layer { 1038 | name: "inception_4b/pool_proj" 1039 | type: "Convolution" 1040 | bottom: "inception_4b/pool" 1041 | top: "inception_4b/pool_proj" 1042 | param { 1043 | lr_mult: 1 1044 | decay_mult: 1 1045 | } 1046 | param { 1047 | lr_mult: 2 1048 | decay_mult: 0 1049 | } 1050 | convolution_param { 1051 | num_output: 64 1052 | kernel_size: 1 1053 | weight_filler { 1054 | type: "xavier" 1055 | } 1056 | bias_filler { 1057 | type: "constant" 1058 | value: 0.2 1059 | } 1060 | } 1061 | } 1062 | layer { 1063 | name: "inception_4b/relu_pool_proj" 1064 | type: "ReLU" 1065 | bottom: "inception_4b/pool_proj" 1066 | top: "inception_4b/pool_proj" 1067 | } 1068 | layer { 1069 | name: "inception_4b/output" 1070 | type: "Concat" 1071 | bottom: "inception_4b/1x1" 1072 | bottom: "inception_4b/3x3" 1073 | bottom: "inception_4b/5x5" 1074 | bottom: "inception_4b/pool_proj" 1075 | top: "inception_4b/output" 1076 | } 1077 | layer { 1078 | name: "inception_4c/1x1" 1079 | type: "Convolution" 1080 | bottom: "inception_4b/output" 1081 | top: "inception_4c/1x1" 1082 | param { 1083 | lr_mult: 1 1084 | decay_mult: 1 1085 | } 1086 | param { 1087 | lr_mult: 2 1088 | decay_mult: 0 1089 | } 1090 | convolution_param { 1091 | num_output: 128 1092 | kernel_size: 1 1093 | weight_filler { 1094 | type: "xavier" 1095 | } 1096 | bias_filler { 1097 | type: "constant" 1098 | value: 0.2 1099 | } 1100 | } 1101 | } 1102 | layer { 1103 | name: "inception_4c/relu_1x1" 1104 | type: "ReLU" 1105 | bottom: "inception_4c/1x1" 1106 | top: "inception_4c/1x1" 1107 | } 1108 | layer { 1109 | name: "inception_4c/3x3_reduce" 1110 | type: "Convolution" 1111 | bottom: "inception_4b/output" 1112 | top: "inception_4c/3x3_reduce" 1113 | param { 1114 | lr_mult: 1 1115 | decay_mult: 1 1116 | } 1117 | param { 1118 | lr_mult: 2 1119 | decay_mult: 0 1120 | } 1121 | convolution_param { 1122 | num_output: 128 1123 | kernel_size: 1 1124 | weight_filler { 1125 | type: "xavier" 1126 | } 1127 | bias_filler { 1128 | type: "constant" 1129 | value: 0.2 1130 | } 1131 | } 1132 | } 1133 | layer { 1134 | name: "inception_4c/relu_3x3_reduce" 1135 | type: "ReLU" 1136 | bottom: "inception_4c/3x3_reduce" 1137 | top: "inception_4c/3x3_reduce" 1138 | } 1139 | layer { 1140 | name: "inception_4c/3x3" 1141 | type: "Convolution" 1142 | bottom: "inception_4c/3x3_reduce" 1143 | top: "inception_4c/3x3" 1144 | param { 1145 | lr_mult: 1 1146 | decay_mult: 1 1147 | } 1148 | param { 1149 | lr_mult: 2 1150 | decay_mult: 0 1151 | } 1152 | convolution_param { 1153 | num_output: 256 1154 | pad: 1 1155 | kernel_size: 3 1156 | weight_filler { 1157 | type: "xavier" 1158 | } 1159 | bias_filler { 1160 | type: "constant" 1161 | value: 0.2 1162 | } 1163 | } 1164 | } 1165 | layer { 1166 | name: "inception_4c/relu_3x3" 1167 | type: "ReLU" 1168 | bottom: "inception_4c/3x3" 1169 | top: "inception_4c/3x3" 1170 | } 1171 | layer { 1172 | name: "inception_4c/5x5_reduce" 1173 | type: "Convolution" 1174 | bottom: "inception_4b/output" 1175 | top: "inception_4c/5x5_reduce" 1176 | param { 1177 | lr_mult: 1 1178 | decay_mult: 1 1179 | } 1180 | param { 1181 | lr_mult: 2 1182 | decay_mult: 0 1183 | } 1184 | convolution_param { 1185 | num_output: 24 1186 | kernel_size: 1 1187 | weight_filler { 1188 | type: "xavier" 1189 | } 1190 | bias_filler { 1191 | type: "constant" 1192 | value: 0.2 1193 | } 1194 | } 1195 | } 1196 | layer { 1197 | name: "inception_4c/relu_5x5_reduce" 1198 | type: "ReLU" 1199 | bottom: "inception_4c/5x5_reduce" 1200 | top: "inception_4c/5x5_reduce" 1201 | } 1202 | layer { 1203 | name: "inception_4c/5x5" 1204 | type: "Convolution" 1205 | bottom: "inception_4c/5x5_reduce" 1206 | top: "inception_4c/5x5" 1207 | param { 1208 | lr_mult: 1 1209 | decay_mult: 1 1210 | } 1211 | param { 1212 | lr_mult: 2 1213 | decay_mult: 0 1214 | } 1215 | convolution_param { 1216 | num_output: 64 1217 | pad: 2 1218 | kernel_size: 5 1219 | weight_filler { 1220 | type: "xavier" 1221 | } 1222 | bias_filler { 1223 | type: "constant" 1224 | value: 0.2 1225 | } 1226 | } 1227 | } 1228 | layer { 1229 | name: "inception_4c/relu_5x5" 1230 | type: "ReLU" 1231 | bottom: "inception_4c/5x5" 1232 | top: "inception_4c/5x5" 1233 | } 1234 | layer { 1235 | name: "inception_4c/pool" 1236 | type: "Pooling" 1237 | bottom: "inception_4b/output" 1238 | top: "inception_4c/pool" 1239 | pooling_param { 1240 | pool: MAX 1241 | kernel_size: 3 1242 | stride: 1 1243 | pad: 1 1244 | } 1245 | } 1246 | layer { 1247 | name: "inception_4c/pool_proj" 1248 | type: "Convolution" 1249 | bottom: "inception_4c/pool" 1250 | top: "inception_4c/pool_proj" 1251 | param { 1252 | lr_mult: 1 1253 | decay_mult: 1 1254 | } 1255 | param { 1256 | lr_mult: 2 1257 | decay_mult: 0 1258 | } 1259 | convolution_param { 1260 | num_output: 64 1261 | kernel_size: 1 1262 | weight_filler { 1263 | type: "xavier" 1264 | } 1265 | bias_filler { 1266 | type: "constant" 1267 | value: 0.2 1268 | } 1269 | } 1270 | } 1271 | layer { 1272 | name: "inception_4c/relu_pool_proj" 1273 | type: "ReLU" 1274 | bottom: "inception_4c/pool_proj" 1275 | top: "inception_4c/pool_proj" 1276 | } 1277 | layer { 1278 | name: "inception_4c/output" 1279 | type: "Concat" 1280 | bottom: "inception_4c/1x1" 1281 | bottom: "inception_4c/3x3" 1282 | bottom: "inception_4c/5x5" 1283 | bottom: "inception_4c/pool_proj" 1284 | top: "inception_4c/output" 1285 | } 1286 | layer { 1287 | name: "inception_4d/1x1" 1288 | type: "Convolution" 1289 | bottom: "inception_4c/output" 1290 | top: "inception_4d/1x1" 1291 | param { 1292 | lr_mult: 1 1293 | decay_mult: 1 1294 | } 1295 | param { 1296 | lr_mult: 2 1297 | decay_mult: 0 1298 | } 1299 | convolution_param { 1300 | num_output: 112 1301 | kernel_size: 1 1302 | weight_filler { 1303 | type: "xavier" 1304 | } 1305 | bias_filler { 1306 | type: "constant" 1307 | value: 0.2 1308 | } 1309 | } 1310 | } 1311 | layer { 1312 | name: "inception_4d/relu_1x1" 1313 | type: "ReLU" 1314 | bottom: "inception_4d/1x1" 1315 | top: "inception_4d/1x1" 1316 | } 1317 | layer { 1318 | name: "inception_4d/3x3_reduce" 1319 | type: "Convolution" 1320 | bottom: "inception_4c/output" 1321 | top: "inception_4d/3x3_reduce" 1322 | param { 1323 | lr_mult: 1 1324 | decay_mult: 1 1325 | } 1326 | param { 1327 | lr_mult: 2 1328 | decay_mult: 0 1329 | } 1330 | convolution_param { 1331 | num_output: 144 1332 | kernel_size: 1 1333 | weight_filler { 1334 | type: "xavier" 1335 | } 1336 | bias_filler { 1337 | type: "constant" 1338 | value: 0.2 1339 | } 1340 | } 1341 | } 1342 | layer { 1343 | name: "inception_4d/relu_3x3_reduce" 1344 | type: "ReLU" 1345 | bottom: "inception_4d/3x3_reduce" 1346 | top: "inception_4d/3x3_reduce" 1347 | } 1348 | layer { 1349 | name: "inception_4d/3x3" 1350 | type: "Convolution" 1351 | bottom: "inception_4d/3x3_reduce" 1352 | top: "inception_4d/3x3" 1353 | param { 1354 | lr_mult: 1 1355 | decay_mult: 1 1356 | } 1357 | param { 1358 | lr_mult: 2 1359 | decay_mult: 0 1360 | } 1361 | convolution_param { 1362 | num_output: 288 1363 | pad: 1 1364 | kernel_size: 3 1365 | weight_filler { 1366 | type: "xavier" 1367 | } 1368 | bias_filler { 1369 | type: "constant" 1370 | value: 0.2 1371 | } 1372 | } 1373 | } 1374 | layer { 1375 | name: "inception_4d/relu_3x3" 1376 | type: "ReLU" 1377 | bottom: "inception_4d/3x3" 1378 | top: "inception_4d/3x3" 1379 | } 1380 | layer { 1381 | name: "inception_4d/5x5_reduce" 1382 | type: "Convolution" 1383 | bottom: "inception_4c/output" 1384 | top: "inception_4d/5x5_reduce" 1385 | param { 1386 | lr_mult: 1 1387 | decay_mult: 1 1388 | } 1389 | param { 1390 | lr_mult: 2 1391 | decay_mult: 0 1392 | } 1393 | convolution_param { 1394 | num_output: 32 1395 | kernel_size: 1 1396 | weight_filler { 1397 | type: "xavier" 1398 | } 1399 | bias_filler { 1400 | type: "constant" 1401 | value: 0.2 1402 | } 1403 | } 1404 | } 1405 | layer { 1406 | name: "inception_4d/relu_5x5_reduce" 1407 | type: "ReLU" 1408 | bottom: "inception_4d/5x5_reduce" 1409 | top: "inception_4d/5x5_reduce" 1410 | } 1411 | layer { 1412 | name: "inception_4d/5x5" 1413 | type: "Convolution" 1414 | bottom: "inception_4d/5x5_reduce" 1415 | top: "inception_4d/5x5" 1416 | param { 1417 | lr_mult: 1 1418 | decay_mult: 1 1419 | } 1420 | param { 1421 | lr_mult: 2 1422 | decay_mult: 0 1423 | } 1424 | convolution_param { 1425 | num_output: 64 1426 | pad: 2 1427 | kernel_size: 5 1428 | weight_filler { 1429 | type: "xavier" 1430 | } 1431 | bias_filler { 1432 | type: "constant" 1433 | value: 0.2 1434 | } 1435 | } 1436 | } 1437 | layer { 1438 | name: "inception_4d/relu_5x5" 1439 | type: "ReLU" 1440 | bottom: "inception_4d/5x5" 1441 | top: "inception_4d/5x5" 1442 | } 1443 | layer { 1444 | name: "inception_4d/pool" 1445 | type: "Pooling" 1446 | bottom: "inception_4c/output" 1447 | top: "inception_4d/pool" 1448 | pooling_param { 1449 | pool: MAX 1450 | kernel_size: 3 1451 | stride: 1 1452 | pad: 1 1453 | } 1454 | } 1455 | layer { 1456 | name: "inception_4d/pool_proj" 1457 | type: "Convolution" 1458 | bottom: "inception_4d/pool" 1459 | top: "inception_4d/pool_proj" 1460 | param { 1461 | lr_mult: 1 1462 | decay_mult: 1 1463 | } 1464 | param { 1465 | lr_mult: 2 1466 | decay_mult: 0 1467 | } 1468 | convolution_param { 1469 | num_output: 64 1470 | kernel_size: 1 1471 | weight_filler { 1472 | type: "xavier" 1473 | } 1474 | bias_filler { 1475 | type: "constant" 1476 | value: 0.2 1477 | } 1478 | } 1479 | } 1480 | layer { 1481 | name: "inception_4d/relu_pool_proj" 1482 | type: "ReLU" 1483 | bottom: "inception_4d/pool_proj" 1484 | top: "inception_4d/pool_proj" 1485 | } 1486 | layer { 1487 | name: "inception_4d/output" 1488 | type: "Concat" 1489 | bottom: "inception_4d/1x1" 1490 | bottom: "inception_4d/3x3" 1491 | bottom: "inception_4d/5x5" 1492 | bottom: "inception_4d/pool_proj" 1493 | top: "inception_4d/output" 1494 | } 1495 | layer { 1496 | name: "loss2/ave_pool" 1497 | type: "Pooling" 1498 | bottom: "inception_4d/output" 1499 | top: "loss2/ave_pool" 1500 | pooling_param { 1501 | pool: AVE 1502 | kernel_size: 5 1503 | stride: 3 1504 | } 1505 | } 1506 | layer { 1507 | name: "loss2/conv" 1508 | type: "Convolution" 1509 | bottom: "loss2/ave_pool" 1510 | top: "loss2/conv" 1511 | param { 1512 | lr_mult: 1 1513 | decay_mult: 1 1514 | } 1515 | param { 1516 | lr_mult: 2 1517 | decay_mult: 0 1518 | } 1519 | convolution_param { 1520 | num_output: 128 1521 | kernel_size: 1 1522 | weight_filler { 1523 | type: "xavier" 1524 | } 1525 | bias_filler { 1526 | type: "constant" 1527 | value: 0.2 1528 | } 1529 | } 1530 | } 1531 | layer { 1532 | name: "loss2/relu_conv" 1533 | type: "ReLU" 1534 | bottom: "loss2/conv" 1535 | top: "loss2/conv" 1536 | } 1537 | layer { 1538 | name: "loss2/fc" 1539 | type: "InnerProduct" 1540 | bottom: "loss2/conv" 1541 | top: "loss2/fc" 1542 | param { 1543 | lr_mult: 1 1544 | decay_mult: 1 1545 | } 1546 | param { 1547 | lr_mult: 2 1548 | decay_mult: 0 1549 | } 1550 | inner_product_param { 1551 | num_output: 1024 1552 | weight_filler { 1553 | type: "xavier" 1554 | } 1555 | bias_filler { 1556 | type: "constant" 1557 | value: 0.2 1558 | } 1559 | } 1560 | } 1561 | layer { 1562 | name: "loss2/relu_fc" 1563 | type: "ReLU" 1564 | bottom: "loss2/fc" 1565 | top: "loss2/fc" 1566 | } 1567 | layer { 1568 | name: "loss2/drop_fc" 1569 | type: "Dropout" 1570 | bottom: "loss2/fc" 1571 | top: "loss2/fc" 1572 | dropout_param { 1573 | dropout_ratio: 0.7 1574 | } 1575 | } 1576 | layer { 1577 | name: "inception_4e/1x1" 1578 | type: "Convolution" 1579 | bottom: "inception_4d/output" 1580 | top: "inception_4e/1x1" 1581 | param { 1582 | lr_mult: 1 1583 | decay_mult: 1 1584 | } 1585 | param { 1586 | lr_mult: 2 1587 | decay_mult: 0 1588 | } 1589 | convolution_param { 1590 | num_output: 256 1591 | kernel_size: 1 1592 | weight_filler { 1593 | type: "xavier" 1594 | } 1595 | bias_filler { 1596 | type: "constant" 1597 | value: 0.2 1598 | } 1599 | } 1600 | } 1601 | layer { 1602 | name: "inception_4e/relu_1x1" 1603 | type: "ReLU" 1604 | bottom: "inception_4e/1x1" 1605 | top: "inception_4e/1x1" 1606 | } 1607 | layer { 1608 | name: "inception_4e/3x3_reduce" 1609 | type: "Convolution" 1610 | bottom: "inception_4d/output" 1611 | top: "inception_4e/3x3_reduce" 1612 | param { 1613 | lr_mult: 1 1614 | decay_mult: 1 1615 | } 1616 | param { 1617 | lr_mult: 2 1618 | decay_mult: 0 1619 | } 1620 | convolution_param { 1621 | num_output: 160 1622 | kernel_size: 1 1623 | weight_filler { 1624 | type: "xavier" 1625 | } 1626 | bias_filler { 1627 | type: "constant" 1628 | value: 0.2 1629 | } 1630 | } 1631 | } 1632 | layer { 1633 | name: "inception_4e/relu_3x3_reduce" 1634 | type: "ReLU" 1635 | bottom: "inception_4e/3x3_reduce" 1636 | top: "inception_4e/3x3_reduce" 1637 | } 1638 | layer { 1639 | name: "inception_4e/3x3" 1640 | type: "Convolution" 1641 | bottom: "inception_4e/3x3_reduce" 1642 | top: "inception_4e/3x3" 1643 | param { 1644 | lr_mult: 1 1645 | decay_mult: 1 1646 | } 1647 | param { 1648 | lr_mult: 2 1649 | decay_mult: 0 1650 | } 1651 | convolution_param { 1652 | num_output: 320 1653 | pad: 1 1654 | kernel_size: 3 1655 | weight_filler { 1656 | type: "xavier" 1657 | } 1658 | bias_filler { 1659 | type: "constant" 1660 | value: 0.2 1661 | } 1662 | } 1663 | } 1664 | layer { 1665 | name: "inception_4e/relu_3x3" 1666 | type: "ReLU" 1667 | bottom: "inception_4e/3x3" 1668 | top: "inception_4e/3x3" 1669 | } 1670 | layer { 1671 | name: "inception_4e/5x5_reduce" 1672 | type: "Convolution" 1673 | bottom: "inception_4d/output" 1674 | top: "inception_4e/5x5_reduce" 1675 | param { 1676 | lr_mult: 1 1677 | decay_mult: 1 1678 | } 1679 | param { 1680 | lr_mult: 2 1681 | decay_mult: 0 1682 | } 1683 | convolution_param { 1684 | num_output: 32 1685 | kernel_size: 1 1686 | weight_filler { 1687 | type: "xavier" 1688 | } 1689 | bias_filler { 1690 | type: "constant" 1691 | value: 0.2 1692 | } 1693 | } 1694 | } 1695 | layer { 1696 | name: "inception_4e/relu_5x5_reduce" 1697 | type: "ReLU" 1698 | bottom: "inception_4e/5x5_reduce" 1699 | top: "inception_4e/5x5_reduce" 1700 | } 1701 | layer { 1702 | name: "inception_4e/5x5" 1703 | type: "Convolution" 1704 | bottom: "inception_4e/5x5_reduce" 1705 | top: "inception_4e/5x5" 1706 | param { 1707 | lr_mult: 1 1708 | decay_mult: 1 1709 | } 1710 | param { 1711 | lr_mult: 2 1712 | decay_mult: 0 1713 | } 1714 | convolution_param { 1715 | num_output: 128 1716 | pad: 2 1717 | kernel_size: 5 1718 | weight_filler { 1719 | type: "xavier" 1720 | } 1721 | bias_filler { 1722 | type: "constant" 1723 | value: 0.2 1724 | } 1725 | } 1726 | } 1727 | layer { 1728 | name: "inception_4e/relu_5x5" 1729 | type: "ReLU" 1730 | bottom: "inception_4e/5x5" 1731 | top: "inception_4e/5x5" 1732 | } 1733 | layer { 1734 | name: "inception_4e/pool" 1735 | type: "Pooling" 1736 | bottom: "inception_4d/output" 1737 | top: "inception_4e/pool" 1738 | pooling_param { 1739 | pool: MAX 1740 | kernel_size: 3 1741 | stride: 1 1742 | pad: 1 1743 | } 1744 | } 1745 | layer { 1746 | name: "inception_4e/pool_proj" 1747 | type: "Convolution" 1748 | bottom: "inception_4e/pool" 1749 | top: "inception_4e/pool_proj" 1750 | param { 1751 | lr_mult: 1 1752 | decay_mult: 1 1753 | } 1754 | param { 1755 | lr_mult: 2 1756 | decay_mult: 0 1757 | } 1758 | convolution_param { 1759 | num_output: 128 1760 | kernel_size: 1 1761 | weight_filler { 1762 | type: "xavier" 1763 | } 1764 | bias_filler { 1765 | type: "constant" 1766 | value: 0.2 1767 | } 1768 | } 1769 | } 1770 | layer { 1771 | name: "inception_4e/relu_pool_proj" 1772 | type: "ReLU" 1773 | bottom: "inception_4e/pool_proj" 1774 | top: "inception_4e/pool_proj" 1775 | } 1776 | layer { 1777 | name: "inception_4e/output" 1778 | type: "Concat" 1779 | bottom: "inception_4e/1x1" 1780 | bottom: "inception_4e/3x3" 1781 | bottom: "inception_4e/5x5" 1782 | bottom: "inception_4e/pool_proj" 1783 | top: "inception_4e/output" 1784 | } 1785 | layer { 1786 | name: "pool4/3x3_s2" 1787 | type: "Pooling" 1788 | bottom: "inception_4e/output" 1789 | top: "pool4/3x3_s2" 1790 | pooling_param { 1791 | pool: MAX 1792 | kernel_size: 3 1793 | stride: 2 1794 | } 1795 | } 1796 | layer { 1797 | name: "inception_5a/1x1" 1798 | type: "Convolution" 1799 | bottom: "pool4/3x3_s2" 1800 | top: "inception_5a/1x1" 1801 | param { 1802 | lr_mult: 1 1803 | decay_mult: 1 1804 | } 1805 | param { 1806 | lr_mult: 2 1807 | decay_mult: 0 1808 | } 1809 | convolution_param { 1810 | num_output: 256 1811 | kernel_size: 1 1812 | weight_filler { 1813 | type: "xavier" 1814 | } 1815 | bias_filler { 1816 | type: "constant" 1817 | value: 0.2 1818 | } 1819 | } 1820 | } 1821 | layer { 1822 | name: "inception_5a/relu_1x1" 1823 | type: "ReLU" 1824 | bottom: "inception_5a/1x1" 1825 | top: "inception_5a/1x1" 1826 | } 1827 | layer { 1828 | name: "inception_5a/3x3_reduce" 1829 | type: "Convolution" 1830 | bottom: "pool4/3x3_s2" 1831 | top: "inception_5a/3x3_reduce" 1832 | param { 1833 | lr_mult: 1 1834 | decay_mult: 1 1835 | } 1836 | param { 1837 | lr_mult: 2 1838 | decay_mult: 0 1839 | } 1840 | convolution_param { 1841 | num_output: 160 1842 | kernel_size: 1 1843 | weight_filler { 1844 | type: "xavier" 1845 | } 1846 | bias_filler { 1847 | type: "constant" 1848 | value: 0.2 1849 | } 1850 | } 1851 | } 1852 | layer { 1853 | name: "inception_5a/relu_3x3_reduce" 1854 | type: "ReLU" 1855 | bottom: "inception_5a/3x3_reduce" 1856 | top: "inception_5a/3x3_reduce" 1857 | } 1858 | layer { 1859 | name: "inception_5a/3x3" 1860 | type: "Convolution" 1861 | bottom: "inception_5a/3x3_reduce" 1862 | top: "inception_5a/3x3" 1863 | param { 1864 | lr_mult: 1 1865 | decay_mult: 1 1866 | } 1867 | param { 1868 | lr_mult: 2 1869 | decay_mult: 0 1870 | } 1871 | convolution_param { 1872 | num_output: 320 1873 | pad: 1 1874 | kernel_size: 3 1875 | weight_filler { 1876 | type: "xavier" 1877 | } 1878 | bias_filler { 1879 | type: "constant" 1880 | value: 0.2 1881 | } 1882 | } 1883 | } 1884 | layer { 1885 | name: "inception_5a/relu_3x3" 1886 | type: "ReLU" 1887 | bottom: "inception_5a/3x3" 1888 | top: "inception_5a/3x3" 1889 | } 1890 | layer { 1891 | name: "inception_5a/5x5_reduce" 1892 | type: "Convolution" 1893 | bottom: "pool4/3x3_s2" 1894 | top: "inception_5a/5x5_reduce" 1895 | param { 1896 | lr_mult: 1 1897 | decay_mult: 1 1898 | } 1899 | param { 1900 | lr_mult: 2 1901 | decay_mult: 0 1902 | } 1903 | convolution_param { 1904 | num_output: 32 1905 | kernel_size: 1 1906 | weight_filler { 1907 | type: "xavier" 1908 | } 1909 | bias_filler { 1910 | type: "constant" 1911 | value: 0.2 1912 | } 1913 | } 1914 | } 1915 | layer { 1916 | name: "inception_5a/relu_5x5_reduce" 1917 | type: "ReLU" 1918 | bottom: "inception_5a/5x5_reduce" 1919 | top: "inception_5a/5x5_reduce" 1920 | } 1921 | layer { 1922 | name: "inception_5a/5x5" 1923 | type: "Convolution" 1924 | bottom: "inception_5a/5x5_reduce" 1925 | top: "inception_5a/5x5" 1926 | param { 1927 | lr_mult: 1 1928 | decay_mult: 1 1929 | } 1930 | param { 1931 | lr_mult: 2 1932 | decay_mult: 0 1933 | } 1934 | convolution_param { 1935 | num_output: 128 1936 | pad: 2 1937 | kernel_size: 5 1938 | weight_filler { 1939 | type: "xavier" 1940 | } 1941 | bias_filler { 1942 | type: "constant" 1943 | value: 0.2 1944 | } 1945 | } 1946 | } 1947 | layer { 1948 | name: "inception_5a/relu_5x5" 1949 | type: "ReLU" 1950 | bottom: "inception_5a/5x5" 1951 | top: "inception_5a/5x5" 1952 | } 1953 | layer { 1954 | name: "inception_5a/pool" 1955 | type: "Pooling" 1956 | bottom: "pool4/3x3_s2" 1957 | top: "inception_5a/pool" 1958 | pooling_param { 1959 | pool: MAX 1960 | kernel_size: 3 1961 | stride: 1 1962 | pad: 1 1963 | } 1964 | } 1965 | layer { 1966 | name: "inception_5a/pool_proj" 1967 | type: "Convolution" 1968 | bottom: "inception_5a/pool" 1969 | top: "inception_5a/pool_proj" 1970 | param { 1971 | lr_mult: 1 1972 | decay_mult: 1 1973 | } 1974 | param { 1975 | lr_mult: 2 1976 | decay_mult: 0 1977 | } 1978 | convolution_param { 1979 | num_output: 128 1980 | kernel_size: 1 1981 | weight_filler { 1982 | type: "xavier" 1983 | } 1984 | bias_filler { 1985 | type: "constant" 1986 | value: 0.2 1987 | } 1988 | } 1989 | } 1990 | layer { 1991 | name: "inception_5a/relu_pool_proj" 1992 | type: "ReLU" 1993 | bottom: "inception_5a/pool_proj" 1994 | top: "inception_5a/pool_proj" 1995 | } 1996 | layer { 1997 | name: "inception_5a/output" 1998 | type: "Concat" 1999 | bottom: "inception_5a/1x1" 2000 | bottom: "inception_5a/3x3" 2001 | bottom: "inception_5a/5x5" 2002 | bottom: "inception_5a/pool_proj" 2003 | top: "inception_5a/output" 2004 | } 2005 | layer { 2006 | name: "inception_5b/1x1" 2007 | type: "Convolution" 2008 | bottom: "inception_5a/output" 2009 | top: "inception_5b/1x1" 2010 | param { 2011 | lr_mult: 1 2012 | decay_mult: 1 2013 | } 2014 | param { 2015 | lr_mult: 2 2016 | decay_mult: 0 2017 | } 2018 | convolution_param { 2019 | num_output: 384 2020 | kernel_size: 1 2021 | weight_filler { 2022 | type: "xavier" 2023 | } 2024 | bias_filler { 2025 | type: "constant" 2026 | value: 0.2 2027 | } 2028 | } 2029 | } 2030 | layer { 2031 | name: "inception_5b/relu_1x1" 2032 | type: "ReLU" 2033 | bottom: "inception_5b/1x1" 2034 | top: "inception_5b/1x1" 2035 | } 2036 | layer { 2037 | name: "inception_5b/3x3_reduce" 2038 | type: "Convolution" 2039 | bottom: "inception_5a/output" 2040 | top: "inception_5b/3x3_reduce" 2041 | param { 2042 | lr_mult: 1 2043 | decay_mult: 1 2044 | } 2045 | param { 2046 | lr_mult: 2 2047 | decay_mult: 0 2048 | } 2049 | convolution_param { 2050 | num_output: 192 2051 | kernel_size: 1 2052 | weight_filler { 2053 | type: "xavier" 2054 | } 2055 | bias_filler { 2056 | type: "constant" 2057 | value: 0.2 2058 | } 2059 | } 2060 | } 2061 | layer { 2062 | name: "inception_5b/relu_3x3_reduce" 2063 | type: "ReLU" 2064 | bottom: "inception_5b/3x3_reduce" 2065 | top: "inception_5b/3x3_reduce" 2066 | } 2067 | layer { 2068 | name: "inception_5b/3x3" 2069 | type: "Convolution" 2070 | bottom: "inception_5b/3x3_reduce" 2071 | top: "inception_5b/3x3" 2072 | param { 2073 | lr_mult: 1 2074 | decay_mult: 1 2075 | } 2076 | param { 2077 | lr_mult: 2 2078 | decay_mult: 0 2079 | } 2080 | convolution_param { 2081 | num_output: 384 2082 | pad: 1 2083 | kernel_size: 3 2084 | weight_filler { 2085 | type: "xavier" 2086 | } 2087 | bias_filler { 2088 | type: "constant" 2089 | value: 0.2 2090 | } 2091 | } 2092 | } 2093 | layer { 2094 | name: "inception_5b/relu_3x3" 2095 | type: "ReLU" 2096 | bottom: "inception_5b/3x3" 2097 | top: "inception_5b/3x3" 2098 | } 2099 | layer { 2100 | name: "inception_5b/5x5_reduce" 2101 | type: "Convolution" 2102 | bottom: "inception_5a/output" 2103 | top: "inception_5b/5x5_reduce" 2104 | param { 2105 | lr_mult: 1 2106 | decay_mult: 1 2107 | } 2108 | param { 2109 | lr_mult: 2 2110 | decay_mult: 0 2111 | } 2112 | convolution_param { 2113 | num_output: 48 2114 | kernel_size: 1 2115 | weight_filler { 2116 | type: "xavier" 2117 | } 2118 | bias_filler { 2119 | type: "constant" 2120 | value: 0.2 2121 | } 2122 | } 2123 | } 2124 | layer { 2125 | name: "inception_5b/relu_5x5_reduce" 2126 | type: "ReLU" 2127 | bottom: "inception_5b/5x5_reduce" 2128 | top: "inception_5b/5x5_reduce" 2129 | } 2130 | layer { 2131 | name: "inception_5b/5x5" 2132 | type: "Convolution" 2133 | bottom: "inception_5b/5x5_reduce" 2134 | top: "inception_5b/5x5" 2135 | param { 2136 | lr_mult: 1 2137 | decay_mult: 1 2138 | } 2139 | param { 2140 | lr_mult: 2 2141 | decay_mult: 0 2142 | } 2143 | convolution_param { 2144 | num_output: 128 2145 | pad: 2 2146 | kernel_size: 5 2147 | weight_filler { 2148 | type: "xavier" 2149 | } 2150 | bias_filler { 2151 | type: "constant" 2152 | value: 0.2 2153 | } 2154 | } 2155 | } 2156 | layer { 2157 | name: "inception_5b/relu_5x5" 2158 | type: "ReLU" 2159 | bottom: "inception_5b/5x5" 2160 | top: "inception_5b/5x5" 2161 | } 2162 | layer { 2163 | name: "inception_5b/pool" 2164 | type: "Pooling" 2165 | bottom: "inception_5a/output" 2166 | top: "inception_5b/pool" 2167 | pooling_param { 2168 | pool: MAX 2169 | kernel_size: 3 2170 | stride: 1 2171 | pad: 1 2172 | } 2173 | } 2174 | layer { 2175 | name: "inception_5b/pool_proj" 2176 | type: "Convolution" 2177 | bottom: "inception_5b/pool" 2178 | top: "inception_5b/pool_proj" 2179 | param { 2180 | lr_mult: 1 2181 | decay_mult: 1 2182 | } 2183 | param { 2184 | lr_mult: 2 2185 | decay_mult: 0 2186 | } 2187 | convolution_param { 2188 | num_output: 128 2189 | kernel_size: 1 2190 | weight_filler { 2191 | type: "xavier" 2192 | } 2193 | bias_filler { 2194 | type: "constant" 2195 | value: 0.2 2196 | } 2197 | } 2198 | } 2199 | layer { 2200 | name: "inception_5b/relu_pool_proj" 2201 | type: "ReLU" 2202 | bottom: "inception_5b/pool_proj" 2203 | top: "inception_5b/pool_proj" 2204 | } 2205 | layer { 2206 | name: "inception_5b/output" 2207 | type: "Concat" 2208 | bottom: "inception_5b/1x1" 2209 | bottom: "inception_5b/3x3" 2210 | bottom: "inception_5b/5x5" 2211 | bottom: "inception_5b/pool_proj" 2212 | top: "inception_5b/output" 2213 | } 2214 | layer { 2215 | name: "pool5/7x7_s1" 2216 | type: "Pooling" 2217 | bottom: "inception_5b/output" 2218 | top: "pool5/7x7_s1" 2219 | pooling_param { 2220 | pool: AVE 2221 | kernel_size: 7 2222 | stride: 1 2223 | } 2224 | } 2225 | layer { 2226 | name: "pool5/drop_7x7_s1" 2227 | type: "Dropout" 2228 | bottom: "pool5/7x7_s1" 2229 | top: "pool5/7x7_s1" 2230 | dropout_param { 2231 | dropout_ratio: 0.4 2232 | } 2233 | } 2234 | layer { 2235 | name: "my-classifier" 2236 | type: "InnerProduct" 2237 | bottom: "pool5/7x7_s1" 2238 | top: "my-classifier" 2239 | param { 2240 | lr_mult: 1 2241 | decay_mult: 1 2242 | } 2243 | param { 2244 | lr_mult: 2 2245 | decay_mult: 0 2246 | } 2247 | inner_product_param { 2248 | num_output: 3 2249 | weight_filler { 2250 | type: "xavier" 2251 | } 2252 | bias_filler { 2253 | type: "constant" 2254 | value: 0 2255 | } 2256 | } 2257 | } 2258 | layer { 2259 | name: "prob" 2260 | type: "Softmax" 2261 | bottom: "my-classifier" 2262 | top: "prob" 2263 | } 2264 | -------------------------------------------------------------------------------- /GoogLeNet/solver.prototxt: -------------------------------------------------------------------------------- 1 | # the definition of neural network model 2 | net: "train_val.prototxt" 3 | # test_iter is related to batch_size in test layer, test_iter * batch_size = the number of test data 4 | test_iter: 761 5 | # carry out test once every 5 training iterations 6 | test_interval: 10000 7 | # exclude test phase when test_initialization = false 8 | # test_initialization: false 9 | # display information once every 10 training iterations 10 | display: 1 11 | # 12 | average_loss: 40 13 | # the initial learning rate 14 | base_lr: 0.001 15 | lr_policy: "poly" 16 | stepsize: 320000 17 | gamma: 0.96 18 | # The max number of iterations 19 | max_iter: 10001 20 | power: 1.0 21 | momentum: 0.9 22 | # weight decay item, in case of overfitting 23 | weight_decay: 0.0002 24 | # save once every 50 training iterations 25 | snapshot: 10 26 | # save path 27 | snapshot_prefix: "inception-v1-cervix" 28 | solver_mode: GPU 29 | -------------------------------------------------------------------------------- /Inception-ResNet-v2/solver.prototxt: -------------------------------------------------------------------------------- 1 | # the definition of neural network model 2 | net: "train_val.prototxt" 3 | # test_iter is related to batch_size in test layer, test_iter * batch_size = the number of test data 4 | test_iter: 761 5 | # carry out test once every 5 training iterations 6 | test_interval: 10 7 | # exclude test phase when test_initialization = false 8 | # test_initialization: false 9 | # display information once every 10 training iterations 10 | display: 1 11 | clip_gradients: 40 12 | average_loss: 40 13 | # the initial learning rate 14 | base_lr: 0.001 15 | lr_policy: "poly" 16 | stepsize: 320000 17 | gamma: 0.96 18 | # The max number of iterations 19 | max_iter: 50001 20 | power: 1.0 21 | momentum: 0.9 22 | # weight decay item, in case of overfitting 23 | weight_decay: 0.0002 24 | # save once every 50 training iterations 25 | snapshot: 10 26 | # save path 27 | snapshot_prefix: "inception-resnet-v2-cervix" 28 | solver_mode: GPU 29 | -------------------------------------------------------------------------------- /Inception-v3/solver.prototxt: -------------------------------------------------------------------------------- 1 | # the definition of neural network model 2 | net: "train_val.prototxt" 3 | # test_iter is related to batch_size in test layer, test_iter * batch_size = the number of test data 4 | test_iter: 761 5 | # carry out test once every 5 training iterations 6 | test_interval: 100 7 | # exclude test phase when test_initialization = false 8 | # test_initialization: false 9 | # display information once every 10 training iterations 10 | display: 1 11 | # 12 | average_loss: 40 13 | # the initial learning rate 14 | base_lr: 0.001 15 | lr_policy: "poly" 16 | stepsize: 320000 17 | gamma: 0.96 18 | # The max number of iterations 19 | max_iter: 50001 20 | power: 1.0 21 | momentum: 0.9 22 | # weight decay item, in case of overfitting 23 | weight_decay: 0.0002 24 | # save once every 50 training iterations 25 | snapshot: 100 26 | # save path 27 | snapshot_prefix: "inception-v3-cervix-all" 28 | solver_mode: GPU 29 | -------------------------------------------------------------------------------- /Inception-v4/solver.prototxt: -------------------------------------------------------------------------------- 1 | # the definition of neural network model 2 | net: "train_val.prototxt" 3 | # test_iter is related to batch_size in test layer, test_iter * batch_size = the number of test data 4 | test_iter: 761 5 | # carry out test once every 5 training iterations 6 | test_interval: 10 7 | # exclude test phase when test_initialization = false 8 | # test_initialization: false 9 | # display information once every 10 training iterations 10 | display: 1 11 | # 12 | average_loss: 40 13 | # the initial learning rate 14 | base_lr: 0.001 15 | lr_policy: "poly" 16 | stepsize: 320000 17 | gamma: 0.96 18 | # The max number of iterations 19 | max_iter: 10001 20 | power: 1.0 21 | momentum: 0.9 22 | # weight decay item, in case of overfitting 23 | weight_decay: 0.0002 24 | # save once every 50 training iterations 25 | snapshot: 10 26 | # save path 27 | snapshot_prefix: "inception-v4-cervix" 28 | solver_mode: GPU 29 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # caffe-model-zoo 2 | ![screenshot](https://user-images.githubusercontent.com/21311442/33640664-cbcbeff2-da6c-11e7-97c8-1ad8d7fdf4c0.png) 3 | 4 | Caffe Pretrained Models can download from 5 | Link:[http://pan.baidu.com/s/1pL0ZIYJ](http://pan.baidu.com/s/1pL0ZIYJ) 6 | Password: [xp68]() 7 | -------------------------------------------------------------------------------- /ResNet-101/solver.prototxt: -------------------------------------------------------------------------------- 1 | # the definition of neural network model 2 | net: "train_val.prototxt" 3 | # test_iter is related to batch_size in test layer, test_iter * batch_size = the number of test data 4 | test_iter: 761 5 | # carry out test once every 5 training iterations 6 | test_interval: 10000 7 | # exclude test phase when test_initialization = false 8 | # test_initialization: false 9 | # display information once every 10 training iterations 10 | display: 1 11 | # 12 | average_loss: 40 13 | # the initial learning rate 14 | base_lr: 0.001 15 | lr_policy: "poly" 16 | stepsize: 320000 17 | gamma: 0.96 18 | # The max number of iterations 19 | max_iter: 10001 20 | power: 1.0 21 | momentum: 0.9 22 | # weight decay item, in case of overfitting 23 | weight_decay: 0.0002 24 | # save once every 50 training iterations 25 | snapshot: 20 26 | # save path 27 | snapshot_prefix: "resnet-101-cervix" 28 | solver_mode: GPU 29 | -------------------------------------------------------------------------------- /ResNet-152/solver.prototxt: -------------------------------------------------------------------------------- 1 | # the definition of neural network model 2 | net: "train_val.prototxt" 3 | # test_iter is related to batch_size in test layer, test_iter * batch_size = the number of test data 4 | test_iter: 761 5 | # carry out test once every 5 training iterations 6 | test_interval: 10000 7 | # exclude test phase when test_initialization = false 8 | # test_initialization: false 9 | # display information once every 10 training iterations 10 | display: 1 11 | # 12 | average_loss: 40 13 | # the initial learning rate 14 | base_lr: 0.001 15 | lr_policy: "poly" 16 | stepsize: 320000 17 | gamma: 0.96 18 | # The max number of iterations 19 | max_iter: 10001 20 | power: 1.0 21 | momentum: 0.9 22 | # weight decay item, in case of overfitting 23 | weight_decay: 0.0002 24 | # save once every 50 training iterations 25 | snapshot: 20 26 | # save path 27 | snapshot_prefix: "resnet-152-cervix" 28 | solver_mode: CPU 29 | -------------------------------------------------------------------------------- /ResNet-152/train_val.prototxt: -------------------------------------------------------------------------------- 1 | # the definition of neural network model 2 | net: "train_val.prototxt" 3 | # test_iter is related to batch_size in test layer, test_iter * batch_size = the number of test data 4 | test_iter: 761 5 | # carry out test once every 5 training iterations 6 | test_interval: 10000 7 | # exclude test phase when test_initialization = false 8 | # test_initialization: false 9 | # display information once every 10 training iterations 10 | display: 1 11 | # 12 | average_loss: 40 13 | # the initial learning rate 14 | base_lr: 0.001 15 | lr_policy: "poly" 16 | stepsize: 320000 17 | gamma: 0.96 18 | # The max number of iterations 19 | max_iter: 10001 20 | power: 1.0 21 | momentum: 0.9 22 | # weight decay item, in case of overfitting 23 | weight_decay: 0.0002 24 | # save once every 50 training iterations 25 | snapshot: 20 26 | # save path 27 | snapshot_prefix: "resnet-152-cervix" 28 | solver_mode: CPU 29 | -------------------------------------------------------------------------------- /ResNet-50/deploy.prototxt: -------------------------------------------------------------------------------- 1 | name: "ResNet-50" 2 | input: "data" 3 | input_dim: 1 4 | input_dim: 3 5 | input_dim: 224 6 | input_dim: 224 7 | layer { 8 | bottom: "data" 9 | top: "conv1" 10 | name: "conv1" 11 | type: "Convolution" 12 | convolution_param { 13 | num_output: 64 14 | kernel_size: 7 15 | pad: 3 16 | stride: 2 17 | } 18 | } 19 | 20 | layer { 21 | bottom: "conv1" 22 | top: "conv1" 23 | name: "bn_conv1" 24 | type: "BatchNorm" 25 | batch_norm_param { 26 | use_global_stats: true 27 | } 28 | } 29 | 30 | layer { 31 | bottom: "conv1" 32 | top: "conv1" 33 | name: "scale_conv1" 34 | type: "Scale" 35 | scale_param { 36 | bias_term: true 37 | } 38 | } 39 | 40 | layer { 41 | bottom: "conv1" 42 | top: "conv1" 43 | name: "conv1_relu" 44 | type: "ReLU" 45 | } 46 | 47 | layer { 48 | bottom: "conv1" 49 | top: "pool1" 50 | name: "pool1" 51 | type: "Pooling" 52 | pooling_param { 53 | kernel_size: 3 54 | stride: 2 55 | pool: MAX 56 | } 57 | } 58 | 59 | layer { 60 | bottom: "pool1" 61 | top: "res2a_branch1" 62 | name: "res2a_branch1" 63 | type: "Convolution" 64 | convolution_param { 65 | num_output: 256 66 | kernel_size: 1 67 | pad: 0 68 | stride: 1 69 | bias_term: false 70 | } 71 | } 72 | 73 | layer { 74 | bottom: "res2a_branch1" 75 | top: "res2a_branch1" 76 | name: "bn2a_branch1" 77 | type: "BatchNorm" 78 | batch_norm_param { 79 | use_global_stats: true 80 | } 81 | } 82 | 83 | layer { 84 | bottom: "res2a_branch1" 85 | top: "res2a_branch1" 86 | name: "scale2a_branch1" 87 | type: "Scale" 88 | scale_param { 89 | bias_term: true 90 | } 91 | } 92 | 93 | layer { 94 | bottom: "pool1" 95 | top: "res2a_branch2a" 96 | name: "res2a_branch2a" 97 | type: "Convolution" 98 | convolution_param { 99 | num_output: 64 100 | kernel_size: 1 101 | pad: 0 102 | stride: 1 103 | bias_term: false 104 | } 105 | } 106 | 107 | layer { 108 | bottom: "res2a_branch2a" 109 | top: "res2a_branch2a" 110 | name: "bn2a_branch2a" 111 | type: "BatchNorm" 112 | batch_norm_param { 113 | use_global_stats: true 114 | } 115 | } 116 | 117 | layer { 118 | bottom: "res2a_branch2a" 119 | top: "res2a_branch2a" 120 | name: "scale2a_branch2a" 121 | type: "Scale" 122 | scale_param { 123 | bias_term: true 124 | } 125 | } 126 | 127 | layer { 128 | bottom: "res2a_branch2a" 129 | top: "res2a_branch2a" 130 | name: "res2a_branch2a_relu" 131 | type: "ReLU" 132 | } 133 | 134 | layer { 135 | bottom: "res2a_branch2a" 136 | top: "res2a_branch2b" 137 | name: "res2a_branch2b" 138 | type: "Convolution" 139 | convolution_param { 140 | num_output: 64 141 | kernel_size: 3 142 | pad: 1 143 | stride: 1 144 | bias_term: false 145 | } 146 | } 147 | 148 | layer { 149 | bottom: "res2a_branch2b" 150 | top: "res2a_branch2b" 151 | name: "bn2a_branch2b" 152 | type: "BatchNorm" 153 | batch_norm_param { 154 | use_global_stats: true 155 | } 156 | } 157 | 158 | layer { 159 | bottom: "res2a_branch2b" 160 | top: "res2a_branch2b" 161 | name: "scale2a_branch2b" 162 | type: "Scale" 163 | scale_param { 164 | bias_term: true 165 | } 166 | } 167 | 168 | layer { 169 | bottom: "res2a_branch2b" 170 | top: "res2a_branch2b" 171 | name: "res2a_branch2b_relu" 172 | type: "ReLU" 173 | } 174 | 175 | layer { 176 | bottom: "res2a_branch2b" 177 | top: "res2a_branch2c" 178 | name: "res2a_branch2c" 179 | type: "Convolution" 180 | convolution_param { 181 | num_output: 256 182 | kernel_size: 1 183 | pad: 0 184 | stride: 1 185 | bias_term: false 186 | } 187 | } 188 | 189 | layer { 190 | bottom: "res2a_branch2c" 191 | top: "res2a_branch2c" 192 | name: "bn2a_branch2c" 193 | type: "BatchNorm" 194 | batch_norm_param { 195 | use_global_stats: true 196 | } 197 | } 198 | 199 | layer { 200 | bottom: "res2a_branch2c" 201 | top: "res2a_branch2c" 202 | name: "scale2a_branch2c" 203 | type: "Scale" 204 | scale_param { 205 | bias_term: true 206 | } 207 | } 208 | 209 | layer { 210 | bottom: "res2a_branch1" 211 | bottom: "res2a_branch2c" 212 | top: "res2a" 213 | name: "res2a" 214 | type: "Eltwise" 215 | } 216 | 217 | layer { 218 | bottom: "res2a" 219 | top: "res2a" 220 | name: "res2a_relu" 221 | type: "ReLU" 222 | } 223 | 224 | layer { 225 | bottom: "res2a" 226 | top: "res2b_branch2a" 227 | name: "res2b_branch2a" 228 | type: "Convolution" 229 | convolution_param { 230 | num_output: 64 231 | kernel_size: 1 232 | pad: 0 233 | stride: 1 234 | bias_term: false 235 | } 236 | } 237 | 238 | layer { 239 | bottom: "res2b_branch2a" 240 | top: "res2b_branch2a" 241 | name: "bn2b_branch2a" 242 | type: "BatchNorm" 243 | batch_norm_param { 244 | use_global_stats: true 245 | } 246 | } 247 | 248 | layer { 249 | bottom: "res2b_branch2a" 250 | top: "res2b_branch2a" 251 | name: "scale2b_branch2a" 252 | type: "Scale" 253 | scale_param { 254 | bias_term: true 255 | } 256 | } 257 | 258 | layer { 259 | bottom: "res2b_branch2a" 260 | top: "res2b_branch2a" 261 | name: "res2b_branch2a_relu" 262 | type: "ReLU" 263 | } 264 | 265 | layer { 266 | bottom: "res2b_branch2a" 267 | top: "res2b_branch2b" 268 | name: "res2b_branch2b" 269 | type: "Convolution" 270 | convolution_param { 271 | num_output: 64 272 | kernel_size: 3 273 | pad: 1 274 | stride: 1 275 | bias_term: false 276 | } 277 | } 278 | 279 | layer { 280 | bottom: "res2b_branch2b" 281 | top: "res2b_branch2b" 282 | name: "bn2b_branch2b" 283 | type: "BatchNorm" 284 | batch_norm_param { 285 | use_global_stats: true 286 | } 287 | } 288 | 289 | layer { 290 | bottom: "res2b_branch2b" 291 | top: "res2b_branch2b" 292 | name: "scale2b_branch2b" 293 | type: "Scale" 294 | scale_param { 295 | bias_term: true 296 | } 297 | } 298 | 299 | layer { 300 | bottom: "res2b_branch2b" 301 | top: "res2b_branch2b" 302 | name: "res2b_branch2b_relu" 303 | type: "ReLU" 304 | } 305 | 306 | layer { 307 | bottom: "res2b_branch2b" 308 | top: "res2b_branch2c" 309 | name: "res2b_branch2c" 310 | type: "Convolution" 311 | convolution_param { 312 | num_output: 256 313 | kernel_size: 1 314 | pad: 0 315 | stride: 1 316 | bias_term: false 317 | } 318 | } 319 | 320 | layer { 321 | bottom: "res2b_branch2c" 322 | top: "res2b_branch2c" 323 | name: "bn2b_branch2c" 324 | type: "BatchNorm" 325 | batch_norm_param { 326 | use_global_stats: true 327 | } 328 | } 329 | 330 | layer { 331 | bottom: "res2b_branch2c" 332 | top: "res2b_branch2c" 333 | name: "scale2b_branch2c" 334 | type: "Scale" 335 | scale_param { 336 | bias_term: true 337 | } 338 | } 339 | 340 | layer { 341 | bottom: "res2a" 342 | bottom: "res2b_branch2c" 343 | top: "res2b" 344 | name: "res2b" 345 | type: "Eltwise" 346 | } 347 | 348 | layer { 349 | bottom: "res2b" 350 | top: "res2b" 351 | name: "res2b_relu" 352 | type: "ReLU" 353 | } 354 | 355 | layer { 356 | bottom: "res2b" 357 | top: "res2c_branch2a" 358 | name: "res2c_branch2a" 359 | type: "Convolution" 360 | convolution_param { 361 | num_output: 64 362 | kernel_size: 1 363 | pad: 0 364 | stride: 1 365 | bias_term: false 366 | } 367 | } 368 | 369 | layer { 370 | bottom: "res2c_branch2a" 371 | top: "res2c_branch2a" 372 | name: "bn2c_branch2a" 373 | type: "BatchNorm" 374 | batch_norm_param { 375 | use_global_stats: true 376 | } 377 | } 378 | 379 | layer { 380 | bottom: "res2c_branch2a" 381 | top: "res2c_branch2a" 382 | name: "scale2c_branch2a" 383 | type: "Scale" 384 | scale_param { 385 | bias_term: true 386 | } 387 | } 388 | 389 | layer { 390 | bottom: "res2c_branch2a" 391 | top: "res2c_branch2a" 392 | name: "res2c_branch2a_relu" 393 | type: "ReLU" 394 | } 395 | 396 | layer { 397 | bottom: "res2c_branch2a" 398 | top: "res2c_branch2b" 399 | name: "res2c_branch2b" 400 | type: "Convolution" 401 | convolution_param { 402 | num_output: 64 403 | kernel_size: 3 404 | pad: 1 405 | stride: 1 406 | bias_term: false 407 | } 408 | } 409 | 410 | layer { 411 | bottom: "res2c_branch2b" 412 | top: "res2c_branch2b" 413 | name: "bn2c_branch2b" 414 | type: "BatchNorm" 415 | batch_norm_param { 416 | use_global_stats: true 417 | } 418 | } 419 | 420 | layer { 421 | bottom: "res2c_branch2b" 422 | top: "res2c_branch2b" 423 | name: "scale2c_branch2b" 424 | type: "Scale" 425 | scale_param { 426 | bias_term: true 427 | } 428 | } 429 | 430 | layer { 431 | bottom: "res2c_branch2b" 432 | top: "res2c_branch2b" 433 | name: "res2c_branch2b_relu" 434 | type: "ReLU" 435 | } 436 | 437 | layer { 438 | bottom: "res2c_branch2b" 439 | top: "res2c_branch2c" 440 | name: "res2c_branch2c" 441 | type: "Convolution" 442 | convolution_param { 443 | num_output: 256 444 | kernel_size: 1 445 | pad: 0 446 | stride: 1 447 | bias_term: false 448 | } 449 | } 450 | 451 | layer { 452 | bottom: "res2c_branch2c" 453 | top: "res2c_branch2c" 454 | name: "bn2c_branch2c" 455 | type: "BatchNorm" 456 | batch_norm_param { 457 | use_global_stats: true 458 | } 459 | } 460 | 461 | layer { 462 | bottom: "res2c_branch2c" 463 | top: "res2c_branch2c" 464 | name: "scale2c_branch2c" 465 | type: "Scale" 466 | scale_param { 467 | bias_term: true 468 | } 469 | } 470 | 471 | layer { 472 | bottom: "res2b" 473 | bottom: "res2c_branch2c" 474 | top: "res2c" 475 | name: "res2c" 476 | type: "Eltwise" 477 | } 478 | 479 | layer { 480 | bottom: "res2c" 481 | top: "res2c" 482 | name: "res2c_relu" 483 | type: "ReLU" 484 | } 485 | 486 | layer { 487 | bottom: "res2c" 488 | top: "res3a_branch1" 489 | name: "res3a_branch1" 490 | type: "Convolution" 491 | convolution_param { 492 | num_output: 512 493 | kernel_size: 1 494 | pad: 0 495 | stride: 2 496 | bias_term: false 497 | } 498 | } 499 | 500 | layer { 501 | bottom: "res3a_branch1" 502 | top: "res3a_branch1" 503 | name: "bn3a_branch1" 504 | type: "BatchNorm" 505 | batch_norm_param { 506 | use_global_stats: true 507 | } 508 | } 509 | 510 | layer { 511 | bottom: "res3a_branch1" 512 | top: "res3a_branch1" 513 | name: "scale3a_branch1" 514 | type: "Scale" 515 | scale_param { 516 | bias_term: true 517 | } 518 | } 519 | 520 | layer { 521 | bottom: "res2c" 522 | top: "res3a_branch2a" 523 | name: "res3a_branch2a" 524 | type: "Convolution" 525 | convolution_param { 526 | num_output: 128 527 | kernel_size: 1 528 | pad: 0 529 | stride: 2 530 | bias_term: false 531 | } 532 | } 533 | 534 | layer { 535 | bottom: "res3a_branch2a" 536 | top: "res3a_branch2a" 537 | name: "bn3a_branch2a" 538 | type: "BatchNorm" 539 | batch_norm_param { 540 | use_global_stats: true 541 | } 542 | } 543 | 544 | layer { 545 | bottom: "res3a_branch2a" 546 | top: "res3a_branch2a" 547 | name: "scale3a_branch2a" 548 | type: "Scale" 549 | scale_param { 550 | bias_term: true 551 | } 552 | } 553 | 554 | layer { 555 | bottom: "res3a_branch2a" 556 | top: "res3a_branch2a" 557 | name: "res3a_branch2a_relu" 558 | type: "ReLU" 559 | } 560 | 561 | layer { 562 | bottom: "res3a_branch2a" 563 | top: "res3a_branch2b" 564 | name: "res3a_branch2b" 565 | type: "Convolution" 566 | convolution_param { 567 | num_output: 128 568 | kernel_size: 3 569 | pad: 1 570 | stride: 1 571 | bias_term: false 572 | } 573 | } 574 | 575 | layer { 576 | bottom: "res3a_branch2b" 577 | top: "res3a_branch2b" 578 | name: "bn3a_branch2b" 579 | type: "BatchNorm" 580 | batch_norm_param { 581 | use_global_stats: true 582 | } 583 | } 584 | 585 | layer { 586 | bottom: "res3a_branch2b" 587 | top: "res3a_branch2b" 588 | name: "scale3a_branch2b" 589 | type: "Scale" 590 | scale_param { 591 | bias_term: true 592 | } 593 | } 594 | 595 | layer { 596 | bottom: "res3a_branch2b" 597 | top: "res3a_branch2b" 598 | name: "res3a_branch2b_relu" 599 | type: "ReLU" 600 | } 601 | 602 | layer { 603 | bottom: "res3a_branch2b" 604 | top: "res3a_branch2c" 605 | name: "res3a_branch2c" 606 | type: "Convolution" 607 | convolution_param { 608 | num_output: 512 609 | kernel_size: 1 610 | pad: 0 611 | stride: 1 612 | bias_term: false 613 | } 614 | } 615 | 616 | layer { 617 | bottom: "res3a_branch2c" 618 | top: "res3a_branch2c" 619 | name: "bn3a_branch2c" 620 | type: "BatchNorm" 621 | batch_norm_param { 622 | use_global_stats: true 623 | } 624 | } 625 | 626 | layer { 627 | bottom: "res3a_branch2c" 628 | top: "res3a_branch2c" 629 | name: "scale3a_branch2c" 630 | type: "Scale" 631 | scale_param { 632 | bias_term: true 633 | } 634 | } 635 | 636 | layer { 637 | bottom: "res3a_branch1" 638 | bottom: "res3a_branch2c" 639 | top: "res3a" 640 | name: "res3a" 641 | type: "Eltwise" 642 | } 643 | 644 | layer { 645 | bottom: "res3a" 646 | top: "res3a" 647 | name: "res3a_relu" 648 | type: "ReLU" 649 | } 650 | 651 | layer { 652 | bottom: "res3a" 653 | top: "res3b_branch2a" 654 | name: "res3b_branch2a" 655 | type: "Convolution" 656 | convolution_param { 657 | num_output: 128 658 | kernel_size: 1 659 | pad: 0 660 | stride: 1 661 | bias_term: false 662 | } 663 | } 664 | 665 | layer { 666 | bottom: "res3b_branch2a" 667 | top: "res3b_branch2a" 668 | name: "bn3b_branch2a" 669 | type: "BatchNorm" 670 | batch_norm_param { 671 | use_global_stats: true 672 | } 673 | } 674 | 675 | layer { 676 | bottom: "res3b_branch2a" 677 | top: "res3b_branch2a" 678 | name: "scale3b_branch2a" 679 | type: "Scale" 680 | scale_param { 681 | bias_term: true 682 | } 683 | } 684 | 685 | layer { 686 | bottom: "res3b_branch2a" 687 | top: "res3b_branch2a" 688 | name: "res3b_branch2a_relu" 689 | type: "ReLU" 690 | } 691 | 692 | layer { 693 | bottom: "res3b_branch2a" 694 | top: "res3b_branch2b" 695 | name: "res3b_branch2b" 696 | type: "Convolution" 697 | convolution_param { 698 | num_output: 128 699 | kernel_size: 3 700 | pad: 1 701 | stride: 1 702 | bias_term: false 703 | } 704 | } 705 | 706 | layer { 707 | bottom: "res3b_branch2b" 708 | top: "res3b_branch2b" 709 | name: "bn3b_branch2b" 710 | type: "BatchNorm" 711 | batch_norm_param { 712 | use_global_stats: true 713 | } 714 | } 715 | 716 | layer { 717 | bottom: "res3b_branch2b" 718 | top: "res3b_branch2b" 719 | name: "scale3b_branch2b" 720 | type: "Scale" 721 | scale_param { 722 | bias_term: true 723 | } 724 | } 725 | 726 | layer { 727 | bottom: "res3b_branch2b" 728 | top: "res3b_branch2b" 729 | name: "res3b_branch2b_relu" 730 | type: "ReLU" 731 | } 732 | 733 | layer { 734 | bottom: "res3b_branch2b" 735 | top: "res3b_branch2c" 736 | name: "res3b_branch2c" 737 | type: "Convolution" 738 | convolution_param { 739 | num_output: 512 740 | kernel_size: 1 741 | pad: 0 742 | stride: 1 743 | bias_term: false 744 | } 745 | } 746 | 747 | layer { 748 | bottom: "res3b_branch2c" 749 | top: "res3b_branch2c" 750 | name: "bn3b_branch2c" 751 | type: "BatchNorm" 752 | batch_norm_param { 753 | use_global_stats: true 754 | } 755 | } 756 | 757 | layer { 758 | bottom: "res3b_branch2c" 759 | top: "res3b_branch2c" 760 | name: "scale3b_branch2c" 761 | type: "Scale" 762 | scale_param { 763 | bias_term: true 764 | } 765 | } 766 | 767 | layer { 768 | bottom: "res3a" 769 | bottom: "res3b_branch2c" 770 | top: "res3b" 771 | name: "res3b" 772 | type: "Eltwise" 773 | } 774 | 775 | layer { 776 | bottom: "res3b" 777 | top: "res3b" 778 | name: "res3b_relu" 779 | type: "ReLU" 780 | } 781 | 782 | layer { 783 | bottom: "res3b" 784 | top: "res3c_branch2a" 785 | name: "res3c_branch2a" 786 | type: "Convolution" 787 | convolution_param { 788 | num_output: 128 789 | kernel_size: 1 790 | pad: 0 791 | stride: 1 792 | bias_term: false 793 | } 794 | } 795 | 796 | layer { 797 | bottom: "res3c_branch2a" 798 | top: "res3c_branch2a" 799 | name: "bn3c_branch2a" 800 | type: "BatchNorm" 801 | batch_norm_param { 802 | use_global_stats: true 803 | } 804 | } 805 | 806 | layer { 807 | bottom: "res3c_branch2a" 808 | top: "res3c_branch2a" 809 | name: "scale3c_branch2a" 810 | type: "Scale" 811 | scale_param { 812 | bias_term: true 813 | } 814 | } 815 | 816 | layer { 817 | bottom: "res3c_branch2a" 818 | top: "res3c_branch2a" 819 | name: "res3c_branch2a_relu" 820 | type: "ReLU" 821 | } 822 | 823 | layer { 824 | bottom: "res3c_branch2a" 825 | top: "res3c_branch2b" 826 | name: "res3c_branch2b" 827 | type: "Convolution" 828 | convolution_param { 829 | num_output: 128 830 | kernel_size: 3 831 | pad: 1 832 | stride: 1 833 | bias_term: false 834 | } 835 | } 836 | 837 | layer { 838 | bottom: "res3c_branch2b" 839 | top: "res3c_branch2b" 840 | name: "bn3c_branch2b" 841 | type: "BatchNorm" 842 | batch_norm_param { 843 | use_global_stats: true 844 | } 845 | } 846 | 847 | layer { 848 | bottom: "res3c_branch2b" 849 | top: "res3c_branch2b" 850 | name: "scale3c_branch2b" 851 | type: "Scale" 852 | scale_param { 853 | bias_term: true 854 | } 855 | } 856 | 857 | layer { 858 | bottom: "res3c_branch2b" 859 | top: "res3c_branch2b" 860 | name: "res3c_branch2b_relu" 861 | type: "ReLU" 862 | } 863 | 864 | layer { 865 | bottom: "res3c_branch2b" 866 | top: "res3c_branch2c" 867 | name: "res3c_branch2c" 868 | type: "Convolution" 869 | convolution_param { 870 | num_output: 512 871 | kernel_size: 1 872 | pad: 0 873 | stride: 1 874 | bias_term: false 875 | } 876 | } 877 | 878 | layer { 879 | bottom: "res3c_branch2c" 880 | top: "res3c_branch2c" 881 | name: "bn3c_branch2c" 882 | type: "BatchNorm" 883 | batch_norm_param { 884 | use_global_stats: true 885 | } 886 | } 887 | 888 | layer { 889 | bottom: "res3c_branch2c" 890 | top: "res3c_branch2c" 891 | name: "scale3c_branch2c" 892 | type: "Scale" 893 | scale_param { 894 | bias_term: true 895 | } 896 | } 897 | 898 | layer { 899 | bottom: "res3b" 900 | bottom: "res3c_branch2c" 901 | top: "res3c" 902 | name: "res3c" 903 | type: "Eltwise" 904 | } 905 | 906 | layer { 907 | bottom: "res3c" 908 | top: "res3c" 909 | name: "res3c_relu" 910 | type: "ReLU" 911 | } 912 | 913 | layer { 914 | bottom: "res3c" 915 | top: "res3d_branch2a" 916 | name: "res3d_branch2a" 917 | type: "Convolution" 918 | convolution_param { 919 | num_output: 128 920 | kernel_size: 1 921 | pad: 0 922 | stride: 1 923 | bias_term: false 924 | } 925 | } 926 | 927 | layer { 928 | bottom: "res3d_branch2a" 929 | top: "res3d_branch2a" 930 | name: "bn3d_branch2a" 931 | type: "BatchNorm" 932 | batch_norm_param { 933 | use_global_stats: true 934 | } 935 | } 936 | 937 | layer { 938 | bottom: "res3d_branch2a" 939 | top: "res3d_branch2a" 940 | name: "scale3d_branch2a" 941 | type: "Scale" 942 | scale_param { 943 | bias_term: true 944 | } 945 | } 946 | 947 | layer { 948 | bottom: "res3d_branch2a" 949 | top: "res3d_branch2a" 950 | name: "res3d_branch2a_relu" 951 | type: "ReLU" 952 | } 953 | 954 | layer { 955 | bottom: "res3d_branch2a" 956 | top: "res3d_branch2b" 957 | name: "res3d_branch2b" 958 | type: "Convolution" 959 | convolution_param { 960 | num_output: 128 961 | kernel_size: 3 962 | pad: 1 963 | stride: 1 964 | bias_term: false 965 | } 966 | } 967 | 968 | layer { 969 | bottom: "res3d_branch2b" 970 | top: "res3d_branch2b" 971 | name: "bn3d_branch2b" 972 | type: "BatchNorm" 973 | batch_norm_param { 974 | use_global_stats: true 975 | } 976 | } 977 | 978 | layer { 979 | bottom: "res3d_branch2b" 980 | top: "res3d_branch2b" 981 | name: "scale3d_branch2b" 982 | type: "Scale" 983 | scale_param { 984 | bias_term: true 985 | } 986 | } 987 | 988 | layer { 989 | bottom: "res3d_branch2b" 990 | top: "res3d_branch2b" 991 | name: "res3d_branch2b_relu" 992 | type: "ReLU" 993 | } 994 | 995 | layer { 996 | bottom: "res3d_branch2b" 997 | top: "res3d_branch2c" 998 | name: "res3d_branch2c" 999 | type: "Convolution" 1000 | convolution_param { 1001 | num_output: 512 1002 | kernel_size: 1 1003 | pad: 0 1004 | stride: 1 1005 | bias_term: false 1006 | } 1007 | } 1008 | 1009 | layer { 1010 | bottom: "res3d_branch2c" 1011 | top: "res3d_branch2c" 1012 | name: "bn3d_branch2c" 1013 | type: "BatchNorm" 1014 | batch_norm_param { 1015 | use_global_stats: true 1016 | } 1017 | } 1018 | 1019 | layer { 1020 | bottom: "res3d_branch2c" 1021 | top: "res3d_branch2c" 1022 | name: "scale3d_branch2c" 1023 | type: "Scale" 1024 | scale_param { 1025 | bias_term: true 1026 | } 1027 | } 1028 | 1029 | layer { 1030 | bottom: "res3c" 1031 | bottom: "res3d_branch2c" 1032 | top: "res3d" 1033 | name: "res3d" 1034 | type: "Eltwise" 1035 | } 1036 | 1037 | layer { 1038 | bottom: "res3d" 1039 | top: "res3d" 1040 | name: "res3d_relu" 1041 | type: "ReLU" 1042 | } 1043 | 1044 | layer { 1045 | bottom: "res3d" 1046 | top: "res4a_branch1" 1047 | name: "res4a_branch1" 1048 | type: "Convolution" 1049 | convolution_param { 1050 | num_output: 1024 1051 | kernel_size: 1 1052 | pad: 0 1053 | stride: 2 1054 | bias_term: false 1055 | } 1056 | } 1057 | 1058 | layer { 1059 | bottom: "res4a_branch1" 1060 | top: "res4a_branch1" 1061 | name: "bn4a_branch1" 1062 | type: "BatchNorm" 1063 | batch_norm_param { 1064 | use_global_stats: true 1065 | } 1066 | } 1067 | 1068 | layer { 1069 | bottom: "res4a_branch1" 1070 | top: "res4a_branch1" 1071 | name: "scale4a_branch1" 1072 | type: "Scale" 1073 | scale_param { 1074 | bias_term: true 1075 | } 1076 | } 1077 | 1078 | layer { 1079 | bottom: "res3d" 1080 | top: "res4a_branch2a" 1081 | name: "res4a_branch2a" 1082 | type: "Convolution" 1083 | convolution_param { 1084 | num_output: 256 1085 | kernel_size: 1 1086 | pad: 0 1087 | stride: 2 1088 | bias_term: false 1089 | } 1090 | } 1091 | 1092 | layer { 1093 | bottom: "res4a_branch2a" 1094 | top: "res4a_branch2a" 1095 | name: "bn4a_branch2a" 1096 | type: "BatchNorm" 1097 | batch_norm_param { 1098 | use_global_stats: true 1099 | } 1100 | } 1101 | 1102 | layer { 1103 | bottom: "res4a_branch2a" 1104 | top: "res4a_branch2a" 1105 | name: "scale4a_branch2a" 1106 | type: "Scale" 1107 | scale_param { 1108 | bias_term: true 1109 | } 1110 | } 1111 | 1112 | layer { 1113 | bottom: "res4a_branch2a" 1114 | top: "res4a_branch2a" 1115 | name: "res4a_branch2a_relu" 1116 | type: "ReLU" 1117 | } 1118 | 1119 | layer { 1120 | bottom: "res4a_branch2a" 1121 | top: "res4a_branch2b" 1122 | name: "res4a_branch2b" 1123 | type: "Convolution" 1124 | convolution_param { 1125 | num_output: 256 1126 | kernel_size: 3 1127 | pad: 1 1128 | stride: 1 1129 | bias_term: false 1130 | } 1131 | } 1132 | 1133 | layer { 1134 | bottom: "res4a_branch2b" 1135 | top: "res4a_branch2b" 1136 | name: "bn4a_branch2b" 1137 | type: "BatchNorm" 1138 | batch_norm_param { 1139 | use_global_stats: true 1140 | } 1141 | } 1142 | 1143 | layer { 1144 | bottom: "res4a_branch2b" 1145 | top: "res4a_branch2b" 1146 | name: "scale4a_branch2b" 1147 | type: "Scale" 1148 | scale_param { 1149 | bias_term: true 1150 | } 1151 | } 1152 | 1153 | layer { 1154 | bottom: "res4a_branch2b" 1155 | top: "res4a_branch2b" 1156 | name: "res4a_branch2b_relu" 1157 | type: "ReLU" 1158 | } 1159 | 1160 | layer { 1161 | bottom: "res4a_branch2b" 1162 | top: "res4a_branch2c" 1163 | name: "res4a_branch2c" 1164 | type: "Convolution" 1165 | convolution_param { 1166 | num_output: 1024 1167 | kernel_size: 1 1168 | pad: 0 1169 | stride: 1 1170 | bias_term: false 1171 | } 1172 | } 1173 | 1174 | layer { 1175 | bottom: "res4a_branch2c" 1176 | top: "res4a_branch2c" 1177 | name: "bn4a_branch2c" 1178 | type: "BatchNorm" 1179 | batch_norm_param { 1180 | use_global_stats: true 1181 | } 1182 | } 1183 | 1184 | layer { 1185 | bottom: "res4a_branch2c" 1186 | top: "res4a_branch2c" 1187 | name: "scale4a_branch2c" 1188 | type: "Scale" 1189 | scale_param { 1190 | bias_term: true 1191 | } 1192 | } 1193 | 1194 | layer { 1195 | bottom: "res4a_branch1" 1196 | bottom: "res4a_branch2c" 1197 | top: "res4a" 1198 | name: "res4a" 1199 | type: "Eltwise" 1200 | } 1201 | 1202 | layer { 1203 | bottom: "res4a" 1204 | top: "res4a" 1205 | name: "res4a_relu" 1206 | type: "ReLU" 1207 | } 1208 | 1209 | layer { 1210 | bottom: "res4a" 1211 | top: "res4b_branch2a" 1212 | name: "res4b_branch2a" 1213 | type: "Convolution" 1214 | convolution_param { 1215 | num_output: 256 1216 | kernel_size: 1 1217 | pad: 0 1218 | stride: 1 1219 | bias_term: false 1220 | } 1221 | } 1222 | 1223 | layer { 1224 | bottom: "res4b_branch2a" 1225 | top: "res4b_branch2a" 1226 | name: "bn4b_branch2a" 1227 | type: "BatchNorm" 1228 | batch_norm_param { 1229 | use_global_stats: true 1230 | } 1231 | } 1232 | 1233 | layer { 1234 | bottom: "res4b_branch2a" 1235 | top: "res4b_branch2a" 1236 | name: "scale4b_branch2a" 1237 | type: "Scale" 1238 | scale_param { 1239 | bias_term: true 1240 | } 1241 | } 1242 | 1243 | layer { 1244 | bottom: "res4b_branch2a" 1245 | top: "res4b_branch2a" 1246 | name: "res4b_branch2a_relu" 1247 | type: "ReLU" 1248 | } 1249 | 1250 | layer { 1251 | bottom: "res4b_branch2a" 1252 | top: "res4b_branch2b" 1253 | name: "res4b_branch2b" 1254 | type: "Convolution" 1255 | convolution_param { 1256 | num_output: 256 1257 | kernel_size: 3 1258 | pad: 1 1259 | stride: 1 1260 | bias_term: false 1261 | } 1262 | } 1263 | 1264 | layer { 1265 | bottom: "res4b_branch2b" 1266 | top: "res4b_branch2b" 1267 | name: "bn4b_branch2b" 1268 | type: "BatchNorm" 1269 | batch_norm_param { 1270 | use_global_stats: true 1271 | } 1272 | } 1273 | 1274 | layer { 1275 | bottom: "res4b_branch2b" 1276 | top: "res4b_branch2b" 1277 | name: "scale4b_branch2b" 1278 | type: "Scale" 1279 | scale_param { 1280 | bias_term: true 1281 | } 1282 | } 1283 | 1284 | layer { 1285 | bottom: "res4b_branch2b" 1286 | top: "res4b_branch2b" 1287 | name: "res4b_branch2b_relu" 1288 | type: "ReLU" 1289 | } 1290 | 1291 | layer { 1292 | bottom: "res4b_branch2b" 1293 | top: "res4b_branch2c" 1294 | name: "res4b_branch2c" 1295 | type: "Convolution" 1296 | convolution_param { 1297 | num_output: 1024 1298 | kernel_size: 1 1299 | pad: 0 1300 | stride: 1 1301 | bias_term: false 1302 | } 1303 | } 1304 | 1305 | layer { 1306 | bottom: "res4b_branch2c" 1307 | top: "res4b_branch2c" 1308 | name: "bn4b_branch2c" 1309 | type: "BatchNorm" 1310 | batch_norm_param { 1311 | use_global_stats: true 1312 | } 1313 | } 1314 | 1315 | layer { 1316 | bottom: "res4b_branch2c" 1317 | top: "res4b_branch2c" 1318 | name: "scale4b_branch2c" 1319 | type: "Scale" 1320 | scale_param { 1321 | bias_term: true 1322 | } 1323 | } 1324 | 1325 | layer { 1326 | bottom: "res4a" 1327 | bottom: "res4b_branch2c" 1328 | top: "res4b" 1329 | name: "res4b" 1330 | type: "Eltwise" 1331 | } 1332 | 1333 | layer { 1334 | bottom: "res4b" 1335 | top: "res4b" 1336 | name: "res4b_relu" 1337 | type: "ReLU" 1338 | } 1339 | 1340 | layer { 1341 | bottom: "res4b" 1342 | top: "res4c_branch2a" 1343 | name: "res4c_branch2a" 1344 | type: "Convolution" 1345 | convolution_param { 1346 | num_output: 256 1347 | kernel_size: 1 1348 | pad: 0 1349 | stride: 1 1350 | bias_term: false 1351 | } 1352 | } 1353 | 1354 | layer { 1355 | bottom: "res4c_branch2a" 1356 | top: "res4c_branch2a" 1357 | name: "bn4c_branch2a" 1358 | type: "BatchNorm" 1359 | batch_norm_param { 1360 | use_global_stats: true 1361 | } 1362 | } 1363 | 1364 | layer { 1365 | bottom: "res4c_branch2a" 1366 | top: "res4c_branch2a" 1367 | name: "scale4c_branch2a" 1368 | type: "Scale" 1369 | scale_param { 1370 | bias_term: true 1371 | } 1372 | } 1373 | 1374 | layer { 1375 | bottom: "res4c_branch2a" 1376 | top: "res4c_branch2a" 1377 | name: "res4c_branch2a_relu" 1378 | type: "ReLU" 1379 | } 1380 | 1381 | layer { 1382 | bottom: "res4c_branch2a" 1383 | top: "res4c_branch2b" 1384 | name: "res4c_branch2b" 1385 | type: "Convolution" 1386 | convolution_param { 1387 | num_output: 256 1388 | kernel_size: 3 1389 | pad: 1 1390 | stride: 1 1391 | bias_term: false 1392 | } 1393 | } 1394 | 1395 | layer { 1396 | bottom: "res4c_branch2b" 1397 | top: "res4c_branch2b" 1398 | name: "bn4c_branch2b" 1399 | type: "BatchNorm" 1400 | batch_norm_param { 1401 | use_global_stats: true 1402 | } 1403 | } 1404 | 1405 | layer { 1406 | bottom: "res4c_branch2b" 1407 | top: "res4c_branch2b" 1408 | name: "scale4c_branch2b" 1409 | type: "Scale" 1410 | scale_param { 1411 | bias_term: true 1412 | } 1413 | } 1414 | 1415 | layer { 1416 | bottom: "res4c_branch2b" 1417 | top: "res4c_branch2b" 1418 | name: "res4c_branch2b_relu" 1419 | type: "ReLU" 1420 | } 1421 | 1422 | layer { 1423 | bottom: "res4c_branch2b" 1424 | top: "res4c_branch2c" 1425 | name: "res4c_branch2c" 1426 | type: "Convolution" 1427 | convolution_param { 1428 | num_output: 1024 1429 | kernel_size: 1 1430 | pad: 0 1431 | stride: 1 1432 | bias_term: false 1433 | } 1434 | } 1435 | 1436 | layer { 1437 | bottom: "res4c_branch2c" 1438 | top: "res4c_branch2c" 1439 | name: "bn4c_branch2c" 1440 | type: "BatchNorm" 1441 | batch_norm_param { 1442 | use_global_stats: true 1443 | } 1444 | } 1445 | 1446 | layer { 1447 | bottom: "res4c_branch2c" 1448 | top: "res4c_branch2c" 1449 | name: "scale4c_branch2c" 1450 | type: "Scale" 1451 | scale_param { 1452 | bias_term: true 1453 | } 1454 | } 1455 | 1456 | layer { 1457 | bottom: "res4b" 1458 | bottom: "res4c_branch2c" 1459 | top: "res4c" 1460 | name: "res4c" 1461 | type: "Eltwise" 1462 | } 1463 | 1464 | layer { 1465 | bottom: "res4c" 1466 | top: "res4c" 1467 | name: "res4c_relu" 1468 | type: "ReLU" 1469 | } 1470 | 1471 | layer { 1472 | bottom: "res4c" 1473 | top: "res4d_branch2a" 1474 | name: "res4d_branch2a" 1475 | type: "Convolution" 1476 | convolution_param { 1477 | num_output: 256 1478 | kernel_size: 1 1479 | pad: 0 1480 | stride: 1 1481 | bias_term: false 1482 | } 1483 | } 1484 | 1485 | layer { 1486 | bottom: "res4d_branch2a" 1487 | top: "res4d_branch2a" 1488 | name: "bn4d_branch2a" 1489 | type: "BatchNorm" 1490 | batch_norm_param { 1491 | use_global_stats: true 1492 | } 1493 | } 1494 | 1495 | layer { 1496 | bottom: "res4d_branch2a" 1497 | top: "res4d_branch2a" 1498 | name: "scale4d_branch2a" 1499 | type: "Scale" 1500 | scale_param { 1501 | bias_term: true 1502 | } 1503 | } 1504 | 1505 | layer { 1506 | bottom: "res4d_branch2a" 1507 | top: "res4d_branch2a" 1508 | name: "res4d_branch2a_relu" 1509 | type: "ReLU" 1510 | } 1511 | 1512 | layer { 1513 | bottom: "res4d_branch2a" 1514 | top: "res4d_branch2b" 1515 | name: "res4d_branch2b" 1516 | type: "Convolution" 1517 | convolution_param { 1518 | num_output: 256 1519 | kernel_size: 3 1520 | pad: 1 1521 | stride: 1 1522 | bias_term: false 1523 | } 1524 | } 1525 | 1526 | layer { 1527 | bottom: "res4d_branch2b" 1528 | top: "res4d_branch2b" 1529 | name: "bn4d_branch2b" 1530 | type: "BatchNorm" 1531 | batch_norm_param { 1532 | use_global_stats: true 1533 | } 1534 | } 1535 | 1536 | layer { 1537 | bottom: "res4d_branch2b" 1538 | top: "res4d_branch2b" 1539 | name: "scale4d_branch2b" 1540 | type: "Scale" 1541 | scale_param { 1542 | bias_term: true 1543 | } 1544 | } 1545 | 1546 | layer { 1547 | bottom: "res4d_branch2b" 1548 | top: "res4d_branch2b" 1549 | name: "res4d_branch2b_relu" 1550 | type: "ReLU" 1551 | } 1552 | 1553 | layer { 1554 | bottom: "res4d_branch2b" 1555 | top: "res4d_branch2c" 1556 | name: "res4d_branch2c" 1557 | type: "Convolution" 1558 | convolution_param { 1559 | num_output: 1024 1560 | kernel_size: 1 1561 | pad: 0 1562 | stride: 1 1563 | bias_term: false 1564 | } 1565 | } 1566 | 1567 | layer { 1568 | bottom: "res4d_branch2c" 1569 | top: "res4d_branch2c" 1570 | name: "bn4d_branch2c" 1571 | type: "BatchNorm" 1572 | batch_norm_param { 1573 | use_global_stats: true 1574 | } 1575 | } 1576 | 1577 | layer { 1578 | bottom: "res4d_branch2c" 1579 | top: "res4d_branch2c" 1580 | name: "scale4d_branch2c" 1581 | type: "Scale" 1582 | scale_param { 1583 | bias_term: true 1584 | } 1585 | } 1586 | 1587 | layer { 1588 | bottom: "res4c" 1589 | bottom: "res4d_branch2c" 1590 | top: "res4d" 1591 | name: "res4d" 1592 | type: "Eltwise" 1593 | } 1594 | 1595 | layer { 1596 | bottom: "res4d" 1597 | top: "res4d" 1598 | name: "res4d_relu" 1599 | type: "ReLU" 1600 | } 1601 | 1602 | layer { 1603 | bottom: "res4d" 1604 | top: "res4e_branch2a" 1605 | name: "res4e_branch2a" 1606 | type: "Convolution" 1607 | convolution_param { 1608 | num_output: 256 1609 | kernel_size: 1 1610 | pad: 0 1611 | stride: 1 1612 | bias_term: false 1613 | } 1614 | } 1615 | 1616 | layer { 1617 | bottom: "res4e_branch2a" 1618 | top: "res4e_branch2a" 1619 | name: "bn4e_branch2a" 1620 | type: "BatchNorm" 1621 | batch_norm_param { 1622 | use_global_stats: true 1623 | } 1624 | } 1625 | 1626 | layer { 1627 | bottom: "res4e_branch2a" 1628 | top: "res4e_branch2a" 1629 | name: "scale4e_branch2a" 1630 | type: "Scale" 1631 | scale_param { 1632 | bias_term: true 1633 | } 1634 | } 1635 | 1636 | layer { 1637 | bottom: "res4e_branch2a" 1638 | top: "res4e_branch2a" 1639 | name: "res4e_branch2a_relu" 1640 | type: "ReLU" 1641 | } 1642 | 1643 | layer { 1644 | bottom: "res4e_branch2a" 1645 | top: "res4e_branch2b" 1646 | name: "res4e_branch2b" 1647 | type: "Convolution" 1648 | convolution_param { 1649 | num_output: 256 1650 | kernel_size: 3 1651 | pad: 1 1652 | stride: 1 1653 | bias_term: false 1654 | } 1655 | } 1656 | 1657 | layer { 1658 | bottom: "res4e_branch2b" 1659 | top: "res4e_branch2b" 1660 | name: "bn4e_branch2b" 1661 | type: "BatchNorm" 1662 | batch_norm_param { 1663 | use_global_stats: true 1664 | } 1665 | } 1666 | 1667 | layer { 1668 | bottom: "res4e_branch2b" 1669 | top: "res4e_branch2b" 1670 | name: "scale4e_branch2b" 1671 | type: "Scale" 1672 | scale_param { 1673 | bias_term: true 1674 | } 1675 | } 1676 | 1677 | layer { 1678 | bottom: "res4e_branch2b" 1679 | top: "res4e_branch2b" 1680 | name: "res4e_branch2b_relu" 1681 | type: "ReLU" 1682 | } 1683 | 1684 | layer { 1685 | bottom: "res4e_branch2b" 1686 | top: "res4e_branch2c" 1687 | name: "res4e_branch2c" 1688 | type: "Convolution" 1689 | convolution_param { 1690 | num_output: 1024 1691 | kernel_size: 1 1692 | pad: 0 1693 | stride: 1 1694 | bias_term: false 1695 | } 1696 | } 1697 | 1698 | layer { 1699 | bottom: "res4e_branch2c" 1700 | top: "res4e_branch2c" 1701 | name: "bn4e_branch2c" 1702 | type: "BatchNorm" 1703 | batch_norm_param { 1704 | use_global_stats: true 1705 | } 1706 | } 1707 | 1708 | layer { 1709 | bottom: "res4e_branch2c" 1710 | top: "res4e_branch2c" 1711 | name: "scale4e_branch2c" 1712 | type: "Scale" 1713 | scale_param { 1714 | bias_term: true 1715 | } 1716 | } 1717 | 1718 | layer { 1719 | bottom: "res4d" 1720 | bottom: "res4e_branch2c" 1721 | top: "res4e" 1722 | name: "res4e" 1723 | type: "Eltwise" 1724 | } 1725 | 1726 | layer { 1727 | bottom: "res4e" 1728 | top: "res4e" 1729 | name: "res4e_relu" 1730 | type: "ReLU" 1731 | } 1732 | 1733 | layer { 1734 | bottom: "res4e" 1735 | top: "res4f_branch2a" 1736 | name: "res4f_branch2a" 1737 | type: "Convolution" 1738 | convolution_param { 1739 | num_output: 256 1740 | kernel_size: 1 1741 | pad: 0 1742 | stride: 1 1743 | bias_term: false 1744 | } 1745 | } 1746 | 1747 | layer { 1748 | bottom: "res4f_branch2a" 1749 | top: "res4f_branch2a" 1750 | name: "bn4f_branch2a" 1751 | type: "BatchNorm" 1752 | batch_norm_param { 1753 | use_global_stats: true 1754 | } 1755 | } 1756 | 1757 | layer { 1758 | bottom: "res4f_branch2a" 1759 | top: "res4f_branch2a" 1760 | name: "scale4f_branch2a" 1761 | type: "Scale" 1762 | scale_param { 1763 | bias_term: true 1764 | } 1765 | } 1766 | 1767 | layer { 1768 | bottom: "res4f_branch2a" 1769 | top: "res4f_branch2a" 1770 | name: "res4f_branch2a_relu" 1771 | type: "ReLU" 1772 | } 1773 | 1774 | layer { 1775 | bottom: "res4f_branch2a" 1776 | top: "res4f_branch2b" 1777 | name: "res4f_branch2b" 1778 | type: "Convolution" 1779 | convolution_param { 1780 | num_output: 256 1781 | kernel_size: 3 1782 | pad: 1 1783 | stride: 1 1784 | bias_term: false 1785 | } 1786 | } 1787 | 1788 | layer { 1789 | bottom: "res4f_branch2b" 1790 | top: "res4f_branch2b" 1791 | name: "bn4f_branch2b" 1792 | type: "BatchNorm" 1793 | batch_norm_param { 1794 | use_global_stats: true 1795 | } 1796 | } 1797 | 1798 | layer { 1799 | bottom: "res4f_branch2b" 1800 | top: "res4f_branch2b" 1801 | name: "scale4f_branch2b" 1802 | type: "Scale" 1803 | scale_param { 1804 | bias_term: true 1805 | } 1806 | } 1807 | 1808 | layer { 1809 | bottom: "res4f_branch2b" 1810 | top: "res4f_branch2b" 1811 | name: "res4f_branch2b_relu" 1812 | type: "ReLU" 1813 | } 1814 | 1815 | layer { 1816 | bottom: "res4f_branch2b" 1817 | top: "res4f_branch2c" 1818 | name: "res4f_branch2c" 1819 | type: "Convolution" 1820 | convolution_param { 1821 | num_output: 1024 1822 | kernel_size: 1 1823 | pad: 0 1824 | stride: 1 1825 | bias_term: false 1826 | } 1827 | } 1828 | 1829 | layer { 1830 | bottom: "res4f_branch2c" 1831 | top: "res4f_branch2c" 1832 | name: "bn4f_branch2c" 1833 | type: "BatchNorm" 1834 | batch_norm_param { 1835 | use_global_stats: true 1836 | } 1837 | } 1838 | 1839 | layer { 1840 | bottom: "res4f_branch2c" 1841 | top: "res4f_branch2c" 1842 | name: "scale4f_branch2c" 1843 | type: "Scale" 1844 | scale_param { 1845 | bias_term: true 1846 | } 1847 | } 1848 | 1849 | layer { 1850 | bottom: "res4e" 1851 | bottom: "res4f_branch2c" 1852 | top: "res4f" 1853 | name: "res4f" 1854 | type: "Eltwise" 1855 | } 1856 | 1857 | layer { 1858 | bottom: "res4f" 1859 | top: "res4f" 1860 | name: "res4f_relu" 1861 | type: "ReLU" 1862 | } 1863 | 1864 | layer { 1865 | bottom: "res4f" 1866 | top: "res5a_branch1" 1867 | name: "res5a_branch1" 1868 | type: "Convolution" 1869 | convolution_param { 1870 | num_output: 2048 1871 | kernel_size: 1 1872 | pad: 0 1873 | stride: 2 1874 | bias_term: false 1875 | } 1876 | } 1877 | 1878 | layer { 1879 | bottom: "res5a_branch1" 1880 | top: "res5a_branch1" 1881 | name: "bn5a_branch1" 1882 | type: "BatchNorm" 1883 | batch_norm_param { 1884 | use_global_stats: true 1885 | } 1886 | } 1887 | 1888 | layer { 1889 | bottom: "res5a_branch1" 1890 | top: "res5a_branch1" 1891 | name: "scale5a_branch1" 1892 | type: "Scale" 1893 | scale_param { 1894 | bias_term: true 1895 | } 1896 | } 1897 | 1898 | layer { 1899 | bottom: "res4f" 1900 | top: "res5a_branch2a" 1901 | name: "res5a_branch2a" 1902 | type: "Convolution" 1903 | convolution_param { 1904 | num_output: 512 1905 | kernel_size: 1 1906 | pad: 0 1907 | stride: 2 1908 | bias_term: false 1909 | } 1910 | } 1911 | 1912 | layer { 1913 | bottom: "res5a_branch2a" 1914 | top: "res5a_branch2a" 1915 | name: "bn5a_branch2a" 1916 | type: "BatchNorm" 1917 | batch_norm_param { 1918 | use_global_stats: true 1919 | } 1920 | } 1921 | 1922 | layer { 1923 | bottom: "res5a_branch2a" 1924 | top: "res5a_branch2a" 1925 | name: "scale5a_branch2a" 1926 | type: "Scale" 1927 | scale_param { 1928 | bias_term: true 1929 | } 1930 | } 1931 | 1932 | layer { 1933 | bottom: "res5a_branch2a" 1934 | top: "res5a_branch2a" 1935 | name: "res5a_branch2a_relu" 1936 | type: "ReLU" 1937 | } 1938 | 1939 | layer { 1940 | bottom: "res5a_branch2a" 1941 | top: "res5a_branch2b" 1942 | name: "res5a_branch2b" 1943 | type: "Convolution" 1944 | convolution_param { 1945 | num_output: 512 1946 | kernel_size: 3 1947 | pad: 1 1948 | stride: 1 1949 | bias_term: false 1950 | } 1951 | } 1952 | 1953 | layer { 1954 | bottom: "res5a_branch2b" 1955 | top: "res5a_branch2b" 1956 | name: "bn5a_branch2b" 1957 | type: "BatchNorm" 1958 | batch_norm_param { 1959 | use_global_stats: true 1960 | } 1961 | } 1962 | 1963 | layer { 1964 | bottom: "res5a_branch2b" 1965 | top: "res5a_branch2b" 1966 | name: "scale5a_branch2b" 1967 | type: "Scale" 1968 | scale_param { 1969 | bias_term: true 1970 | } 1971 | } 1972 | 1973 | layer { 1974 | bottom: "res5a_branch2b" 1975 | top: "res5a_branch2b" 1976 | name: "res5a_branch2b_relu" 1977 | type: "ReLU" 1978 | } 1979 | 1980 | layer { 1981 | bottom: "res5a_branch2b" 1982 | top: "res5a_branch2c" 1983 | name: "res5a_branch2c" 1984 | type: "Convolution" 1985 | convolution_param { 1986 | num_output: 2048 1987 | kernel_size: 1 1988 | pad: 0 1989 | stride: 1 1990 | bias_term: false 1991 | } 1992 | } 1993 | 1994 | layer { 1995 | bottom: "res5a_branch2c" 1996 | top: "res5a_branch2c" 1997 | name: "bn5a_branch2c" 1998 | type: "BatchNorm" 1999 | batch_norm_param { 2000 | use_global_stats: true 2001 | } 2002 | } 2003 | 2004 | layer { 2005 | bottom: "res5a_branch2c" 2006 | top: "res5a_branch2c" 2007 | name: "scale5a_branch2c" 2008 | type: "Scale" 2009 | scale_param { 2010 | bias_term: true 2011 | } 2012 | } 2013 | 2014 | layer { 2015 | bottom: "res5a_branch1" 2016 | bottom: "res5a_branch2c" 2017 | top: "res5a" 2018 | name: "res5a" 2019 | type: "Eltwise" 2020 | } 2021 | 2022 | layer { 2023 | bottom: "res5a" 2024 | top: "res5a" 2025 | name: "res5a_relu" 2026 | type: "ReLU" 2027 | } 2028 | 2029 | layer { 2030 | bottom: "res5a" 2031 | top: "res5b_branch2a" 2032 | name: "res5b_branch2a" 2033 | type: "Convolution" 2034 | convolution_param { 2035 | num_output: 512 2036 | kernel_size: 1 2037 | pad: 0 2038 | stride: 1 2039 | bias_term: false 2040 | } 2041 | } 2042 | 2043 | layer { 2044 | bottom: "res5b_branch2a" 2045 | top: "res5b_branch2a" 2046 | name: "bn5b_branch2a" 2047 | type: "BatchNorm" 2048 | batch_norm_param { 2049 | use_global_stats: true 2050 | } 2051 | } 2052 | 2053 | layer { 2054 | bottom: "res5b_branch2a" 2055 | top: "res5b_branch2a" 2056 | name: "scale5b_branch2a" 2057 | type: "Scale" 2058 | scale_param { 2059 | bias_term: true 2060 | } 2061 | } 2062 | 2063 | layer { 2064 | bottom: "res5b_branch2a" 2065 | top: "res5b_branch2a" 2066 | name: "res5b_branch2a_relu" 2067 | type: "ReLU" 2068 | } 2069 | 2070 | layer { 2071 | bottom: "res5b_branch2a" 2072 | top: "res5b_branch2b" 2073 | name: "res5b_branch2b" 2074 | type: "Convolution" 2075 | convolution_param { 2076 | num_output: 512 2077 | kernel_size: 3 2078 | pad: 1 2079 | stride: 1 2080 | bias_term: false 2081 | } 2082 | } 2083 | 2084 | layer { 2085 | bottom: "res5b_branch2b" 2086 | top: "res5b_branch2b" 2087 | name: "bn5b_branch2b" 2088 | type: "BatchNorm" 2089 | batch_norm_param { 2090 | use_global_stats: true 2091 | } 2092 | } 2093 | 2094 | layer { 2095 | bottom: "res5b_branch2b" 2096 | top: "res5b_branch2b" 2097 | name: "scale5b_branch2b" 2098 | type: "Scale" 2099 | scale_param { 2100 | bias_term: true 2101 | } 2102 | } 2103 | 2104 | layer { 2105 | bottom: "res5b_branch2b" 2106 | top: "res5b_branch2b" 2107 | name: "res5b_branch2b_relu" 2108 | type: "ReLU" 2109 | } 2110 | 2111 | layer { 2112 | bottom: "res5b_branch2b" 2113 | top: "res5b_branch2c" 2114 | name: "res5b_branch2c" 2115 | type: "Convolution" 2116 | convolution_param { 2117 | num_output: 2048 2118 | kernel_size: 1 2119 | pad: 0 2120 | stride: 1 2121 | bias_term: false 2122 | } 2123 | } 2124 | 2125 | layer { 2126 | bottom: "res5b_branch2c" 2127 | top: "res5b_branch2c" 2128 | name: "bn5b_branch2c" 2129 | type: "BatchNorm" 2130 | batch_norm_param { 2131 | use_global_stats: true 2132 | } 2133 | } 2134 | 2135 | layer { 2136 | bottom: "res5b_branch2c" 2137 | top: "res5b_branch2c" 2138 | name: "scale5b_branch2c" 2139 | type: "Scale" 2140 | scale_param { 2141 | bias_term: true 2142 | } 2143 | } 2144 | 2145 | layer { 2146 | bottom: "res5a" 2147 | bottom: "res5b_branch2c" 2148 | top: "res5b" 2149 | name: "res5b" 2150 | type: "Eltwise" 2151 | } 2152 | 2153 | layer { 2154 | bottom: "res5b" 2155 | top: "res5b" 2156 | name: "res5b_relu" 2157 | type: "ReLU" 2158 | } 2159 | 2160 | layer { 2161 | bottom: "res5b" 2162 | top: "res5c_branch2a" 2163 | name: "res5c_branch2a" 2164 | type: "Convolution" 2165 | convolution_param { 2166 | num_output: 512 2167 | kernel_size: 1 2168 | pad: 0 2169 | stride: 1 2170 | bias_term: false 2171 | } 2172 | } 2173 | 2174 | layer { 2175 | bottom: "res5c_branch2a" 2176 | top: "res5c_branch2a" 2177 | name: "bn5c_branch2a" 2178 | type: "BatchNorm" 2179 | batch_norm_param { 2180 | use_global_stats: true 2181 | } 2182 | } 2183 | 2184 | layer { 2185 | bottom: "res5c_branch2a" 2186 | top: "res5c_branch2a" 2187 | name: "scale5c_branch2a" 2188 | type: "Scale" 2189 | scale_param { 2190 | bias_term: true 2191 | } 2192 | } 2193 | 2194 | layer { 2195 | bottom: "res5c_branch2a" 2196 | top: "res5c_branch2a" 2197 | name: "res5c_branch2a_relu" 2198 | type: "ReLU" 2199 | } 2200 | 2201 | layer { 2202 | bottom: "res5c_branch2a" 2203 | top: "res5c_branch2b" 2204 | name: "res5c_branch2b" 2205 | type: "Convolution" 2206 | convolution_param { 2207 | num_output: 512 2208 | kernel_size: 3 2209 | pad: 1 2210 | stride: 1 2211 | bias_term: false 2212 | } 2213 | } 2214 | 2215 | layer { 2216 | bottom: "res5c_branch2b" 2217 | top: "res5c_branch2b" 2218 | name: "bn5c_branch2b" 2219 | type: "BatchNorm" 2220 | batch_norm_param { 2221 | use_global_stats: true 2222 | } 2223 | } 2224 | 2225 | layer { 2226 | bottom: "res5c_branch2b" 2227 | top: "res5c_branch2b" 2228 | name: "scale5c_branch2b" 2229 | type: "Scale" 2230 | scale_param { 2231 | bias_term: true 2232 | } 2233 | } 2234 | 2235 | layer { 2236 | bottom: "res5c_branch2b" 2237 | top: "res5c_branch2b" 2238 | name: "res5c_branch2b_relu" 2239 | type: "ReLU" 2240 | } 2241 | 2242 | layer { 2243 | bottom: "res5c_branch2b" 2244 | top: "res5c_branch2c" 2245 | name: "res5c_branch2c" 2246 | type: "Convolution" 2247 | convolution_param { 2248 | num_output: 2048 2249 | kernel_size: 1 2250 | pad: 0 2251 | stride: 1 2252 | bias_term: false 2253 | } 2254 | } 2255 | 2256 | layer { 2257 | bottom: "res5c_branch2c" 2258 | top: "res5c_branch2c" 2259 | name: "bn5c_branch2c" 2260 | type: "BatchNorm" 2261 | batch_norm_param { 2262 | use_global_stats: true 2263 | } 2264 | } 2265 | 2266 | layer { 2267 | bottom: "res5c_branch2c" 2268 | top: "res5c_branch2c" 2269 | name: "scale5c_branch2c" 2270 | type: "Scale" 2271 | scale_param { 2272 | bias_term: true 2273 | } 2274 | } 2275 | 2276 | layer { 2277 | bottom: "res5b" 2278 | bottom: "res5c_branch2c" 2279 | top: "res5c" 2280 | name: "res5c" 2281 | type: "Eltwise" 2282 | } 2283 | 2284 | layer { 2285 | bottom: "res5c" 2286 | top: "res5c" 2287 | name: "res5c_relu" 2288 | type: "ReLU" 2289 | } 2290 | 2291 | layer { 2292 | bottom: "res5c" 2293 | top: "pool5" 2294 | name: "pool5" 2295 | type: "Pooling" 2296 | pooling_param { 2297 | kernel_size: 7 2298 | stride: 1 2299 | pool: AVE 2300 | } 2301 | } 2302 | layer { 2303 | bottom: "pool5" 2304 | top: "my-classifier" 2305 | name: "my-classifier" 2306 | type: "InnerProduct" 2307 | inner_product_param { 2308 | num_output: 3 2309 | } 2310 | } 2311 | layer { 2312 | bottom: "my-classifier" 2313 | top: "prob" 2314 | name: "prob" 2315 | type: "Softmax" 2316 | } 2317 | 2318 | -------------------------------------------------------------------------------- /ResNet-50/solver.prototxt: -------------------------------------------------------------------------------- 1 | # the definition of neural network model 2 | net: "train_val.prototxt" 3 | # test_iter is related to batch_size in test layer, test_iter * batch_size = the number of test data 4 | test_iter: 512 5 | # carry out test once every 5 training iterations 6 | test_interval: 10 7 | # exclude test phase when test_initialization = false 8 | # test_initialization: false 9 | # display information once every 10 training iterations 10 | display: 1 11 | # 12 | average_loss: 40 13 | # the initial learning rate 14 | base_lr: 0.001 15 | lr_policy: "poly" 16 | stepsize: 320000 17 | gamma: 0.96 18 | # The max number of iterations 19 | max_iter: 501 20 | power: 1.0 21 | momentum: 0.9 22 | # weight decay item, in case of overfitting 23 | weight_decay: 0.0002 24 | # save once every 50 training iterations 25 | snapshot: 10 26 | # save path 27 | snapshot_prefix: "resnet-50-cervix" 28 | solver_mode: GPU 29 | -------------------------------------------------------------------------------- /ResNet-50/train_val.prototxt: -------------------------------------------------------------------------------- 1 | name: "ResNet-50" 2 | layer { 3 | name: "data" 4 | type: "Data" 5 | top: "data" 6 | top: "label" 7 | include { 8 | phase: TRAIN 9 | } 10 | transform_param { 11 | mirror: true 12 | crop_size: 224 13 | mean_file: "mean.binaryproto" 14 | } 15 | data_param { 16 | source: "train_lmdb" 17 | batch_size: 24 18 | backend: LMDB 19 | } 20 | } 21 | layer { 22 | name: "data" 23 | type: "Data" 24 | top: "data" 25 | top: "label" 26 | include { 27 | phase: TEST 28 | } 29 | transform_param { 30 | mirror: false 31 | crop_size: 224 32 | mean_file: "mean.binaryproto" 33 | } 34 | data_param { 35 | source: "val_lmdb" 36 | batch_size: 1 37 | backend: LMDB 38 | } 39 | } 40 | layer { 41 | bottom: "data" 42 | top: "conv1" 43 | name: "conv1" 44 | type: "Convolution" 45 | convolution_param { 46 | num_output: 64 47 | kernel_size: 7 48 | pad: 3 49 | stride: 2 50 | } 51 | } 52 | 53 | layer { 54 | bottom: "conv1" 55 | top: "conv1" 56 | name: "bn_conv1" 57 | type: "BatchNorm" 58 | batch_norm_param { 59 | use_global_stats: true 60 | } 61 | } 62 | 63 | layer { 64 | bottom: "conv1" 65 | top: "conv1" 66 | name: "scale_conv1" 67 | type: "Scale" 68 | scale_param { 69 | bias_term: true 70 | } 71 | } 72 | 73 | layer { 74 | bottom: "conv1" 75 | top: "conv1" 76 | name: "conv1_relu" 77 | type: "ReLU" 78 | } 79 | 80 | layer { 81 | bottom: "conv1" 82 | top: "pool1" 83 | name: "pool1" 84 | type: "Pooling" 85 | pooling_param { 86 | kernel_size: 3 87 | stride: 2 88 | pool: MAX 89 | } 90 | } 91 | 92 | layer { 93 | bottom: "pool1" 94 | top: "res2a_branch1" 95 | name: "res2a_branch1" 96 | type: "Convolution" 97 | convolution_param { 98 | num_output: 256 99 | kernel_size: 1 100 | pad: 0 101 | stride: 1 102 | bias_term: false 103 | } 104 | } 105 | 106 | layer { 107 | bottom: "res2a_branch1" 108 | top: "res2a_branch1" 109 | name: "bn2a_branch1" 110 | type: "BatchNorm" 111 | batch_norm_param { 112 | use_global_stats: true 113 | } 114 | } 115 | 116 | layer { 117 | bottom: "res2a_branch1" 118 | top: "res2a_branch1" 119 | name: "scale2a_branch1" 120 | type: "Scale" 121 | scale_param { 122 | bias_term: true 123 | } 124 | } 125 | 126 | layer { 127 | bottom: "pool1" 128 | top: "res2a_branch2a" 129 | name: "res2a_branch2a" 130 | type: "Convolution" 131 | convolution_param { 132 | num_output: 64 133 | kernel_size: 1 134 | pad: 0 135 | stride: 1 136 | bias_term: false 137 | } 138 | } 139 | 140 | layer { 141 | bottom: "res2a_branch2a" 142 | top: "res2a_branch2a" 143 | name: "bn2a_branch2a" 144 | type: "BatchNorm" 145 | batch_norm_param { 146 | use_global_stats: true 147 | } 148 | } 149 | 150 | layer { 151 | bottom: "res2a_branch2a" 152 | top: "res2a_branch2a" 153 | name: "scale2a_branch2a" 154 | type: "Scale" 155 | scale_param { 156 | bias_term: true 157 | } 158 | } 159 | 160 | layer { 161 | bottom: "res2a_branch2a" 162 | top: "res2a_branch2a" 163 | name: "res2a_branch2a_relu" 164 | type: "ReLU" 165 | } 166 | 167 | layer { 168 | bottom: "res2a_branch2a" 169 | top: "res2a_branch2b" 170 | name: "res2a_branch2b" 171 | type: "Convolution" 172 | convolution_param { 173 | num_output: 64 174 | kernel_size: 3 175 | pad: 1 176 | stride: 1 177 | bias_term: false 178 | } 179 | } 180 | 181 | layer { 182 | bottom: "res2a_branch2b" 183 | top: "res2a_branch2b" 184 | name: "bn2a_branch2b" 185 | type: "BatchNorm" 186 | batch_norm_param { 187 | use_global_stats: true 188 | } 189 | } 190 | 191 | layer { 192 | bottom: "res2a_branch2b" 193 | top: "res2a_branch2b" 194 | name: "scale2a_branch2b" 195 | type: "Scale" 196 | scale_param { 197 | bias_term: true 198 | } 199 | } 200 | 201 | layer { 202 | bottom: "res2a_branch2b" 203 | top: "res2a_branch2b" 204 | name: "res2a_branch2b_relu" 205 | type: "ReLU" 206 | } 207 | 208 | layer { 209 | bottom: "res2a_branch2b" 210 | top: "res2a_branch2c" 211 | name: "res2a_branch2c" 212 | type: "Convolution" 213 | convolution_param { 214 | num_output: 256 215 | kernel_size: 1 216 | pad: 0 217 | stride: 1 218 | bias_term: false 219 | } 220 | } 221 | 222 | layer { 223 | bottom: "res2a_branch2c" 224 | top: "res2a_branch2c" 225 | name: "bn2a_branch2c" 226 | type: "BatchNorm" 227 | batch_norm_param { 228 | use_global_stats: true 229 | } 230 | } 231 | 232 | layer { 233 | bottom: "res2a_branch2c" 234 | top: "res2a_branch2c" 235 | name: "scale2a_branch2c" 236 | type: "Scale" 237 | scale_param { 238 | bias_term: true 239 | } 240 | } 241 | 242 | layer { 243 | bottom: "res2a_branch1" 244 | bottom: "res2a_branch2c" 245 | top: "res2a" 246 | name: "res2a" 247 | type: "Eltwise" 248 | } 249 | 250 | layer { 251 | bottom: "res2a" 252 | top: "res2a" 253 | name: "res2a_relu" 254 | type: "ReLU" 255 | } 256 | 257 | layer { 258 | bottom: "res2a" 259 | top: "res2b_branch2a" 260 | name: "res2b_branch2a" 261 | type: "Convolution" 262 | convolution_param { 263 | num_output: 64 264 | kernel_size: 1 265 | pad: 0 266 | stride: 1 267 | bias_term: false 268 | } 269 | } 270 | 271 | layer { 272 | bottom: "res2b_branch2a" 273 | top: "res2b_branch2a" 274 | name: "bn2b_branch2a" 275 | type: "BatchNorm" 276 | batch_norm_param { 277 | use_global_stats: true 278 | } 279 | } 280 | 281 | layer { 282 | bottom: "res2b_branch2a" 283 | top: "res2b_branch2a" 284 | name: "scale2b_branch2a" 285 | type: "Scale" 286 | scale_param { 287 | bias_term: true 288 | } 289 | } 290 | 291 | layer { 292 | bottom: "res2b_branch2a" 293 | top: "res2b_branch2a" 294 | name: "res2b_branch2a_relu" 295 | type: "ReLU" 296 | } 297 | 298 | layer { 299 | bottom: "res2b_branch2a" 300 | top: "res2b_branch2b" 301 | name: "res2b_branch2b" 302 | type: "Convolution" 303 | convolution_param { 304 | num_output: 64 305 | kernel_size: 3 306 | pad: 1 307 | stride: 1 308 | bias_term: false 309 | } 310 | } 311 | 312 | layer { 313 | bottom: "res2b_branch2b" 314 | top: "res2b_branch2b" 315 | name: "bn2b_branch2b" 316 | type: "BatchNorm" 317 | batch_norm_param { 318 | use_global_stats: true 319 | } 320 | } 321 | 322 | layer { 323 | bottom: "res2b_branch2b" 324 | top: "res2b_branch2b" 325 | name: "scale2b_branch2b" 326 | type: "Scale" 327 | scale_param { 328 | bias_term: true 329 | } 330 | } 331 | 332 | layer { 333 | bottom: "res2b_branch2b" 334 | top: "res2b_branch2b" 335 | name: "res2b_branch2b_relu" 336 | type: "ReLU" 337 | } 338 | 339 | layer { 340 | bottom: "res2b_branch2b" 341 | top: "res2b_branch2c" 342 | name: "res2b_branch2c" 343 | type: "Convolution" 344 | convolution_param { 345 | num_output: 256 346 | kernel_size: 1 347 | pad: 0 348 | stride: 1 349 | bias_term: false 350 | } 351 | } 352 | 353 | layer { 354 | bottom: "res2b_branch2c" 355 | top: "res2b_branch2c" 356 | name: "bn2b_branch2c" 357 | type: "BatchNorm" 358 | batch_norm_param { 359 | use_global_stats: true 360 | } 361 | } 362 | 363 | layer { 364 | bottom: "res2b_branch2c" 365 | top: "res2b_branch2c" 366 | name: "scale2b_branch2c" 367 | type: "Scale" 368 | scale_param { 369 | bias_term: true 370 | } 371 | } 372 | 373 | layer { 374 | bottom: "res2a" 375 | bottom: "res2b_branch2c" 376 | top: "res2b" 377 | name: "res2b" 378 | type: "Eltwise" 379 | } 380 | 381 | layer { 382 | bottom: "res2b" 383 | top: "res2b" 384 | name: "res2b_relu" 385 | type: "ReLU" 386 | } 387 | 388 | layer { 389 | bottom: "res2b" 390 | top: "res2c_branch2a" 391 | name: "res2c_branch2a" 392 | type: "Convolution" 393 | convolution_param { 394 | num_output: 64 395 | kernel_size: 1 396 | pad: 0 397 | stride: 1 398 | bias_term: false 399 | } 400 | } 401 | 402 | layer { 403 | bottom: "res2c_branch2a" 404 | top: "res2c_branch2a" 405 | name: "bn2c_branch2a" 406 | type: "BatchNorm" 407 | batch_norm_param { 408 | use_global_stats: true 409 | } 410 | } 411 | 412 | layer { 413 | bottom: "res2c_branch2a" 414 | top: "res2c_branch2a" 415 | name: "scale2c_branch2a" 416 | type: "Scale" 417 | scale_param { 418 | bias_term: true 419 | } 420 | } 421 | 422 | layer { 423 | bottom: "res2c_branch2a" 424 | top: "res2c_branch2a" 425 | name: "res2c_branch2a_relu" 426 | type: "ReLU" 427 | } 428 | 429 | layer { 430 | bottom: "res2c_branch2a" 431 | top: "res2c_branch2b" 432 | name: "res2c_branch2b" 433 | type: "Convolution" 434 | convolution_param { 435 | num_output: 64 436 | kernel_size: 3 437 | pad: 1 438 | stride: 1 439 | bias_term: false 440 | } 441 | } 442 | 443 | layer { 444 | bottom: "res2c_branch2b" 445 | top: "res2c_branch2b" 446 | name: "bn2c_branch2b" 447 | type: "BatchNorm" 448 | batch_norm_param { 449 | use_global_stats: true 450 | } 451 | } 452 | 453 | layer { 454 | bottom: "res2c_branch2b" 455 | top: "res2c_branch2b" 456 | name: "scale2c_branch2b" 457 | type: "Scale" 458 | scale_param { 459 | bias_term: true 460 | } 461 | } 462 | 463 | layer { 464 | bottom: "res2c_branch2b" 465 | top: "res2c_branch2b" 466 | name: "res2c_branch2b_relu" 467 | type: "ReLU" 468 | } 469 | 470 | layer { 471 | bottom: "res2c_branch2b" 472 | top: "res2c_branch2c" 473 | name: "res2c_branch2c" 474 | type: "Convolution" 475 | convolution_param { 476 | num_output: 256 477 | kernel_size: 1 478 | pad: 0 479 | stride: 1 480 | bias_term: false 481 | } 482 | } 483 | 484 | layer { 485 | bottom: "res2c_branch2c" 486 | top: "res2c_branch2c" 487 | name: "bn2c_branch2c" 488 | type: "BatchNorm" 489 | batch_norm_param { 490 | use_global_stats: true 491 | } 492 | } 493 | 494 | layer { 495 | bottom: "res2c_branch2c" 496 | top: "res2c_branch2c" 497 | name: "scale2c_branch2c" 498 | type: "Scale" 499 | scale_param { 500 | bias_term: true 501 | } 502 | } 503 | 504 | layer { 505 | bottom: "res2b" 506 | bottom: "res2c_branch2c" 507 | top: "res2c" 508 | name: "res2c" 509 | type: "Eltwise" 510 | } 511 | 512 | layer { 513 | bottom: "res2c" 514 | top: "res2c" 515 | name: "res2c_relu" 516 | type: "ReLU" 517 | } 518 | 519 | layer { 520 | bottom: "res2c" 521 | top: "res3a_branch1" 522 | name: "res3a_branch1" 523 | type: "Convolution" 524 | convolution_param { 525 | num_output: 512 526 | kernel_size: 1 527 | pad: 0 528 | stride: 2 529 | bias_term: false 530 | } 531 | } 532 | 533 | layer { 534 | bottom: "res3a_branch1" 535 | top: "res3a_branch1" 536 | name: "bn3a_branch1" 537 | type: "BatchNorm" 538 | batch_norm_param { 539 | use_global_stats: true 540 | } 541 | } 542 | 543 | layer { 544 | bottom: "res3a_branch1" 545 | top: "res3a_branch1" 546 | name: "scale3a_branch1" 547 | type: "Scale" 548 | scale_param { 549 | bias_term: true 550 | } 551 | } 552 | 553 | layer { 554 | bottom: "res2c" 555 | top: "res3a_branch2a" 556 | name: "res3a_branch2a" 557 | type: "Convolution" 558 | convolution_param { 559 | num_output: 128 560 | kernel_size: 1 561 | pad: 0 562 | stride: 2 563 | bias_term: false 564 | } 565 | } 566 | 567 | layer { 568 | bottom: "res3a_branch2a" 569 | top: "res3a_branch2a" 570 | name: "bn3a_branch2a" 571 | type: "BatchNorm" 572 | batch_norm_param { 573 | use_global_stats: true 574 | } 575 | } 576 | 577 | layer { 578 | bottom: "res3a_branch2a" 579 | top: "res3a_branch2a" 580 | name: "scale3a_branch2a" 581 | type: "Scale" 582 | scale_param { 583 | bias_term: true 584 | } 585 | } 586 | 587 | layer { 588 | bottom: "res3a_branch2a" 589 | top: "res3a_branch2a" 590 | name: "res3a_branch2a_relu" 591 | type: "ReLU" 592 | } 593 | 594 | layer { 595 | bottom: "res3a_branch2a" 596 | top: "res3a_branch2b" 597 | name: "res3a_branch2b" 598 | type: "Convolution" 599 | convolution_param { 600 | num_output: 128 601 | kernel_size: 3 602 | pad: 1 603 | stride: 1 604 | bias_term: false 605 | } 606 | } 607 | 608 | layer { 609 | bottom: "res3a_branch2b" 610 | top: "res3a_branch2b" 611 | name: "bn3a_branch2b" 612 | type: "BatchNorm" 613 | batch_norm_param { 614 | use_global_stats: true 615 | } 616 | } 617 | 618 | layer { 619 | bottom: "res3a_branch2b" 620 | top: "res3a_branch2b" 621 | name: "scale3a_branch2b" 622 | type: "Scale" 623 | scale_param { 624 | bias_term: true 625 | } 626 | } 627 | 628 | layer { 629 | bottom: "res3a_branch2b" 630 | top: "res3a_branch2b" 631 | name: "res3a_branch2b_relu" 632 | type: "ReLU" 633 | } 634 | 635 | layer { 636 | bottom: "res3a_branch2b" 637 | top: "res3a_branch2c" 638 | name: "res3a_branch2c" 639 | type: "Convolution" 640 | convolution_param { 641 | num_output: 512 642 | kernel_size: 1 643 | pad: 0 644 | stride: 1 645 | bias_term: false 646 | } 647 | } 648 | 649 | layer { 650 | bottom: "res3a_branch2c" 651 | top: "res3a_branch2c" 652 | name: "bn3a_branch2c" 653 | type: "BatchNorm" 654 | batch_norm_param { 655 | use_global_stats: true 656 | } 657 | } 658 | 659 | layer { 660 | bottom: "res3a_branch2c" 661 | top: "res3a_branch2c" 662 | name: "scale3a_branch2c" 663 | type: "Scale" 664 | scale_param { 665 | bias_term: true 666 | } 667 | } 668 | 669 | layer { 670 | bottom: "res3a_branch1" 671 | bottom: "res3a_branch2c" 672 | top: "res3a" 673 | name: "res3a" 674 | type: "Eltwise" 675 | } 676 | 677 | layer { 678 | bottom: "res3a" 679 | top: "res3a" 680 | name: "res3a_relu" 681 | type: "ReLU" 682 | } 683 | 684 | layer { 685 | bottom: "res3a" 686 | top: "res3b_branch2a" 687 | name: "res3b_branch2a" 688 | type: "Convolution" 689 | convolution_param { 690 | num_output: 128 691 | kernel_size: 1 692 | pad: 0 693 | stride: 1 694 | bias_term: false 695 | } 696 | } 697 | 698 | layer { 699 | bottom: "res3b_branch2a" 700 | top: "res3b_branch2a" 701 | name: "bn3b_branch2a" 702 | type: "BatchNorm" 703 | batch_norm_param { 704 | use_global_stats: true 705 | } 706 | } 707 | 708 | layer { 709 | bottom: "res3b_branch2a" 710 | top: "res3b_branch2a" 711 | name: "scale3b_branch2a" 712 | type: "Scale" 713 | scale_param { 714 | bias_term: true 715 | } 716 | } 717 | 718 | layer { 719 | bottom: "res3b_branch2a" 720 | top: "res3b_branch2a" 721 | name: "res3b_branch2a_relu" 722 | type: "ReLU" 723 | } 724 | 725 | layer { 726 | bottom: "res3b_branch2a" 727 | top: "res3b_branch2b" 728 | name: "res3b_branch2b" 729 | type: "Convolution" 730 | convolution_param { 731 | num_output: 128 732 | kernel_size: 3 733 | pad: 1 734 | stride: 1 735 | bias_term: false 736 | } 737 | } 738 | 739 | layer { 740 | bottom: "res3b_branch2b" 741 | top: "res3b_branch2b" 742 | name: "bn3b_branch2b" 743 | type: "BatchNorm" 744 | batch_norm_param { 745 | use_global_stats: true 746 | } 747 | } 748 | 749 | layer { 750 | bottom: "res3b_branch2b" 751 | top: "res3b_branch2b" 752 | name: "scale3b_branch2b" 753 | type: "Scale" 754 | scale_param { 755 | bias_term: true 756 | } 757 | } 758 | 759 | layer { 760 | bottom: "res3b_branch2b" 761 | top: "res3b_branch2b" 762 | name: "res3b_branch2b_relu" 763 | type: "ReLU" 764 | } 765 | 766 | layer { 767 | bottom: "res3b_branch2b" 768 | top: "res3b_branch2c" 769 | name: "res3b_branch2c" 770 | type: "Convolution" 771 | convolution_param { 772 | num_output: 512 773 | kernel_size: 1 774 | pad: 0 775 | stride: 1 776 | bias_term: false 777 | } 778 | } 779 | 780 | layer { 781 | bottom: "res3b_branch2c" 782 | top: "res3b_branch2c" 783 | name: "bn3b_branch2c" 784 | type: "BatchNorm" 785 | batch_norm_param { 786 | use_global_stats: true 787 | } 788 | } 789 | 790 | layer { 791 | bottom: "res3b_branch2c" 792 | top: "res3b_branch2c" 793 | name: "scale3b_branch2c" 794 | type: "Scale" 795 | scale_param { 796 | bias_term: true 797 | } 798 | } 799 | 800 | layer { 801 | bottom: "res3a" 802 | bottom: "res3b_branch2c" 803 | top: "res3b" 804 | name: "res3b" 805 | type: "Eltwise" 806 | } 807 | 808 | layer { 809 | bottom: "res3b" 810 | top: "res3b" 811 | name: "res3b_relu" 812 | type: "ReLU" 813 | } 814 | 815 | layer { 816 | bottom: "res3b" 817 | top: "res3c_branch2a" 818 | name: "res3c_branch2a" 819 | type: "Convolution" 820 | convolution_param { 821 | num_output: 128 822 | kernel_size: 1 823 | pad: 0 824 | stride: 1 825 | bias_term: false 826 | } 827 | } 828 | 829 | layer { 830 | bottom: "res3c_branch2a" 831 | top: "res3c_branch2a" 832 | name: "bn3c_branch2a" 833 | type: "BatchNorm" 834 | batch_norm_param { 835 | use_global_stats: true 836 | } 837 | } 838 | 839 | layer { 840 | bottom: "res3c_branch2a" 841 | top: "res3c_branch2a" 842 | name: "scale3c_branch2a" 843 | type: "Scale" 844 | scale_param { 845 | bias_term: true 846 | } 847 | } 848 | 849 | layer { 850 | bottom: "res3c_branch2a" 851 | top: "res3c_branch2a" 852 | name: "res3c_branch2a_relu" 853 | type: "ReLU" 854 | } 855 | 856 | layer { 857 | bottom: "res3c_branch2a" 858 | top: "res3c_branch2b" 859 | name: "res3c_branch2b" 860 | type: "Convolution" 861 | convolution_param { 862 | num_output: 128 863 | kernel_size: 3 864 | pad: 1 865 | stride: 1 866 | bias_term: false 867 | } 868 | } 869 | 870 | layer { 871 | bottom: "res3c_branch2b" 872 | top: "res3c_branch2b" 873 | name: "bn3c_branch2b" 874 | type: "BatchNorm" 875 | batch_norm_param { 876 | use_global_stats: true 877 | } 878 | } 879 | 880 | layer { 881 | bottom: "res3c_branch2b" 882 | top: "res3c_branch2b" 883 | name: "scale3c_branch2b" 884 | type: "Scale" 885 | scale_param { 886 | bias_term: true 887 | } 888 | } 889 | 890 | layer { 891 | bottom: "res3c_branch2b" 892 | top: "res3c_branch2b" 893 | name: "res3c_branch2b_relu" 894 | type: "ReLU" 895 | } 896 | 897 | layer { 898 | bottom: "res3c_branch2b" 899 | top: "res3c_branch2c" 900 | name: "res3c_branch2c" 901 | type: "Convolution" 902 | convolution_param { 903 | num_output: 512 904 | kernel_size: 1 905 | pad: 0 906 | stride: 1 907 | bias_term: false 908 | } 909 | } 910 | 911 | layer { 912 | bottom: "res3c_branch2c" 913 | top: "res3c_branch2c" 914 | name: "bn3c_branch2c" 915 | type: "BatchNorm" 916 | batch_norm_param { 917 | use_global_stats: true 918 | } 919 | } 920 | 921 | layer { 922 | bottom: "res3c_branch2c" 923 | top: "res3c_branch2c" 924 | name: "scale3c_branch2c" 925 | type: "Scale" 926 | scale_param { 927 | bias_term: true 928 | } 929 | } 930 | 931 | layer { 932 | bottom: "res3b" 933 | bottom: "res3c_branch2c" 934 | top: "res3c" 935 | name: "res3c" 936 | type: "Eltwise" 937 | } 938 | 939 | layer { 940 | bottom: "res3c" 941 | top: "res3c" 942 | name: "res3c_relu" 943 | type: "ReLU" 944 | } 945 | 946 | layer { 947 | bottom: "res3c" 948 | top: "res3d_branch2a" 949 | name: "res3d_branch2a" 950 | type: "Convolution" 951 | convolution_param { 952 | num_output: 128 953 | kernel_size: 1 954 | pad: 0 955 | stride: 1 956 | bias_term: false 957 | } 958 | } 959 | 960 | layer { 961 | bottom: "res3d_branch2a" 962 | top: "res3d_branch2a" 963 | name: "bn3d_branch2a" 964 | type: "BatchNorm" 965 | batch_norm_param { 966 | use_global_stats: true 967 | } 968 | } 969 | 970 | layer { 971 | bottom: "res3d_branch2a" 972 | top: "res3d_branch2a" 973 | name: "scale3d_branch2a" 974 | type: "Scale" 975 | scale_param { 976 | bias_term: true 977 | } 978 | } 979 | 980 | layer { 981 | bottom: "res3d_branch2a" 982 | top: "res3d_branch2a" 983 | name: "res3d_branch2a_relu" 984 | type: "ReLU" 985 | } 986 | 987 | layer { 988 | bottom: "res3d_branch2a" 989 | top: "res3d_branch2b" 990 | name: "res3d_branch2b" 991 | type: "Convolution" 992 | convolution_param { 993 | num_output: 128 994 | kernel_size: 3 995 | pad: 1 996 | stride: 1 997 | bias_term: false 998 | } 999 | } 1000 | 1001 | layer { 1002 | bottom: "res3d_branch2b" 1003 | top: "res3d_branch2b" 1004 | name: "bn3d_branch2b" 1005 | type: "BatchNorm" 1006 | batch_norm_param { 1007 | use_global_stats: true 1008 | } 1009 | } 1010 | 1011 | layer { 1012 | bottom: "res3d_branch2b" 1013 | top: "res3d_branch2b" 1014 | name: "scale3d_branch2b" 1015 | type: "Scale" 1016 | scale_param { 1017 | bias_term: true 1018 | } 1019 | } 1020 | 1021 | layer { 1022 | bottom: "res3d_branch2b" 1023 | top: "res3d_branch2b" 1024 | name: "res3d_branch2b_relu" 1025 | type: "ReLU" 1026 | } 1027 | 1028 | layer { 1029 | bottom: "res3d_branch2b" 1030 | top: "res3d_branch2c" 1031 | name: "res3d_branch2c" 1032 | type: "Convolution" 1033 | convolution_param { 1034 | num_output: 512 1035 | kernel_size: 1 1036 | pad: 0 1037 | stride: 1 1038 | bias_term: false 1039 | } 1040 | } 1041 | 1042 | layer { 1043 | bottom: "res3d_branch2c" 1044 | top: "res3d_branch2c" 1045 | name: "bn3d_branch2c" 1046 | type: "BatchNorm" 1047 | batch_norm_param { 1048 | use_global_stats: true 1049 | } 1050 | } 1051 | 1052 | layer { 1053 | bottom: "res3d_branch2c" 1054 | top: "res3d_branch2c" 1055 | name: "scale3d_branch2c" 1056 | type: "Scale" 1057 | scale_param { 1058 | bias_term: true 1059 | } 1060 | } 1061 | 1062 | layer { 1063 | bottom: "res3c" 1064 | bottom: "res3d_branch2c" 1065 | top: "res3d" 1066 | name: "res3d" 1067 | type: "Eltwise" 1068 | } 1069 | 1070 | layer { 1071 | bottom: "res3d" 1072 | top: "res3d" 1073 | name: "res3d_relu" 1074 | type: "ReLU" 1075 | } 1076 | 1077 | layer { 1078 | bottom: "res3d" 1079 | top: "res4a_branch1" 1080 | name: "res4a_branch1" 1081 | type: "Convolution" 1082 | convolution_param { 1083 | num_output: 1024 1084 | kernel_size: 1 1085 | pad: 0 1086 | stride: 2 1087 | bias_term: false 1088 | } 1089 | } 1090 | 1091 | layer { 1092 | bottom: "res4a_branch1" 1093 | top: "res4a_branch1" 1094 | name: "bn4a_branch1" 1095 | type: "BatchNorm" 1096 | batch_norm_param { 1097 | use_global_stats: true 1098 | } 1099 | } 1100 | 1101 | layer { 1102 | bottom: "res4a_branch1" 1103 | top: "res4a_branch1" 1104 | name: "scale4a_branch1" 1105 | type: "Scale" 1106 | scale_param { 1107 | bias_term: true 1108 | } 1109 | } 1110 | 1111 | layer { 1112 | bottom: "res3d" 1113 | top: "res4a_branch2a" 1114 | name: "res4a_branch2a" 1115 | type: "Convolution" 1116 | convolution_param { 1117 | num_output: 256 1118 | kernel_size: 1 1119 | pad: 0 1120 | stride: 2 1121 | bias_term: false 1122 | } 1123 | } 1124 | 1125 | layer { 1126 | bottom: "res4a_branch2a" 1127 | top: "res4a_branch2a" 1128 | name: "bn4a_branch2a" 1129 | type: "BatchNorm" 1130 | batch_norm_param { 1131 | use_global_stats: true 1132 | } 1133 | } 1134 | 1135 | layer { 1136 | bottom: "res4a_branch2a" 1137 | top: "res4a_branch2a" 1138 | name: "scale4a_branch2a" 1139 | type: "Scale" 1140 | scale_param { 1141 | bias_term: true 1142 | } 1143 | } 1144 | 1145 | layer { 1146 | bottom: "res4a_branch2a" 1147 | top: "res4a_branch2a" 1148 | name: "res4a_branch2a_relu" 1149 | type: "ReLU" 1150 | } 1151 | 1152 | layer { 1153 | bottom: "res4a_branch2a" 1154 | top: "res4a_branch2b" 1155 | name: "res4a_branch2b" 1156 | type: "Convolution" 1157 | convolution_param { 1158 | num_output: 256 1159 | kernel_size: 3 1160 | pad: 1 1161 | stride: 1 1162 | bias_term: false 1163 | } 1164 | } 1165 | 1166 | layer { 1167 | bottom: "res4a_branch2b" 1168 | top: "res4a_branch2b" 1169 | name: "bn4a_branch2b" 1170 | type: "BatchNorm" 1171 | batch_norm_param { 1172 | use_global_stats: true 1173 | } 1174 | } 1175 | 1176 | layer { 1177 | bottom: "res4a_branch2b" 1178 | top: "res4a_branch2b" 1179 | name: "scale4a_branch2b" 1180 | type: "Scale" 1181 | scale_param { 1182 | bias_term: true 1183 | } 1184 | } 1185 | 1186 | layer { 1187 | bottom: "res4a_branch2b" 1188 | top: "res4a_branch2b" 1189 | name: "res4a_branch2b_relu" 1190 | type: "ReLU" 1191 | } 1192 | 1193 | layer { 1194 | bottom: "res4a_branch2b" 1195 | top: "res4a_branch2c" 1196 | name: "res4a_branch2c" 1197 | type: "Convolution" 1198 | convolution_param { 1199 | num_output: 1024 1200 | kernel_size: 1 1201 | pad: 0 1202 | stride: 1 1203 | bias_term: false 1204 | } 1205 | } 1206 | 1207 | layer { 1208 | bottom: "res4a_branch2c" 1209 | top: "res4a_branch2c" 1210 | name: "bn4a_branch2c" 1211 | type: "BatchNorm" 1212 | batch_norm_param { 1213 | use_global_stats: true 1214 | } 1215 | } 1216 | 1217 | layer { 1218 | bottom: "res4a_branch2c" 1219 | top: "res4a_branch2c" 1220 | name: "scale4a_branch2c" 1221 | type: "Scale" 1222 | scale_param { 1223 | bias_term: true 1224 | } 1225 | } 1226 | 1227 | layer { 1228 | bottom: "res4a_branch1" 1229 | bottom: "res4a_branch2c" 1230 | top: "res4a" 1231 | name: "res4a" 1232 | type: "Eltwise" 1233 | } 1234 | 1235 | layer { 1236 | bottom: "res4a" 1237 | top: "res4a" 1238 | name: "res4a_relu" 1239 | type: "ReLU" 1240 | } 1241 | 1242 | layer { 1243 | bottom: "res4a" 1244 | top: "res4b_branch2a" 1245 | name: "res4b_branch2a" 1246 | type: "Convolution" 1247 | convolution_param { 1248 | num_output: 256 1249 | kernel_size: 1 1250 | pad: 0 1251 | stride: 1 1252 | bias_term: false 1253 | } 1254 | } 1255 | 1256 | layer { 1257 | bottom: "res4b_branch2a" 1258 | top: "res4b_branch2a" 1259 | name: "bn4b_branch2a" 1260 | type: "BatchNorm" 1261 | batch_norm_param { 1262 | use_global_stats: true 1263 | } 1264 | } 1265 | 1266 | layer { 1267 | bottom: "res4b_branch2a" 1268 | top: "res4b_branch2a" 1269 | name: "scale4b_branch2a" 1270 | type: "Scale" 1271 | scale_param { 1272 | bias_term: true 1273 | } 1274 | } 1275 | 1276 | layer { 1277 | bottom: "res4b_branch2a" 1278 | top: "res4b_branch2a" 1279 | name: "res4b_branch2a_relu" 1280 | type: "ReLU" 1281 | } 1282 | 1283 | layer { 1284 | bottom: "res4b_branch2a" 1285 | top: "res4b_branch2b" 1286 | name: "res4b_branch2b" 1287 | type: "Convolution" 1288 | convolution_param { 1289 | num_output: 256 1290 | kernel_size: 3 1291 | pad: 1 1292 | stride: 1 1293 | bias_term: false 1294 | } 1295 | } 1296 | 1297 | layer { 1298 | bottom: "res4b_branch2b" 1299 | top: "res4b_branch2b" 1300 | name: "bn4b_branch2b" 1301 | type: "BatchNorm" 1302 | batch_norm_param { 1303 | use_global_stats: true 1304 | } 1305 | } 1306 | 1307 | layer { 1308 | bottom: "res4b_branch2b" 1309 | top: "res4b_branch2b" 1310 | name: "scale4b_branch2b" 1311 | type: "Scale" 1312 | scale_param { 1313 | bias_term: true 1314 | } 1315 | } 1316 | 1317 | layer { 1318 | bottom: "res4b_branch2b" 1319 | top: "res4b_branch2b" 1320 | name: "res4b_branch2b_relu" 1321 | type: "ReLU" 1322 | } 1323 | 1324 | layer { 1325 | bottom: "res4b_branch2b" 1326 | top: "res4b_branch2c" 1327 | name: "res4b_branch2c" 1328 | type: "Convolution" 1329 | convolution_param { 1330 | num_output: 1024 1331 | kernel_size: 1 1332 | pad: 0 1333 | stride: 1 1334 | bias_term: false 1335 | } 1336 | } 1337 | 1338 | layer { 1339 | bottom: "res4b_branch2c" 1340 | top: "res4b_branch2c" 1341 | name: "bn4b_branch2c" 1342 | type: "BatchNorm" 1343 | batch_norm_param { 1344 | use_global_stats: true 1345 | } 1346 | } 1347 | 1348 | layer { 1349 | bottom: "res4b_branch2c" 1350 | top: "res4b_branch2c" 1351 | name: "scale4b_branch2c" 1352 | type: "Scale" 1353 | scale_param { 1354 | bias_term: true 1355 | } 1356 | } 1357 | 1358 | layer { 1359 | bottom: "res4a" 1360 | bottom: "res4b_branch2c" 1361 | top: "res4b" 1362 | name: "res4b" 1363 | type: "Eltwise" 1364 | } 1365 | 1366 | layer { 1367 | bottom: "res4b" 1368 | top: "res4b" 1369 | name: "res4b_relu" 1370 | type: "ReLU" 1371 | } 1372 | 1373 | layer { 1374 | bottom: "res4b" 1375 | top: "res4c_branch2a" 1376 | name: "res4c_branch2a" 1377 | type: "Convolution" 1378 | convolution_param { 1379 | num_output: 256 1380 | kernel_size: 1 1381 | pad: 0 1382 | stride: 1 1383 | bias_term: false 1384 | } 1385 | } 1386 | 1387 | layer { 1388 | bottom: "res4c_branch2a" 1389 | top: "res4c_branch2a" 1390 | name: "bn4c_branch2a" 1391 | type: "BatchNorm" 1392 | batch_norm_param { 1393 | use_global_stats: true 1394 | } 1395 | } 1396 | 1397 | layer { 1398 | bottom: "res4c_branch2a" 1399 | top: "res4c_branch2a" 1400 | name: "scale4c_branch2a" 1401 | type: "Scale" 1402 | scale_param { 1403 | bias_term: true 1404 | } 1405 | } 1406 | 1407 | layer { 1408 | bottom: "res4c_branch2a" 1409 | top: "res4c_branch2a" 1410 | name: "res4c_branch2a_relu" 1411 | type: "ReLU" 1412 | } 1413 | 1414 | layer { 1415 | bottom: "res4c_branch2a" 1416 | top: "res4c_branch2b" 1417 | name: "res4c_branch2b" 1418 | type: "Convolution" 1419 | convolution_param { 1420 | num_output: 256 1421 | kernel_size: 3 1422 | pad: 1 1423 | stride: 1 1424 | bias_term: false 1425 | } 1426 | } 1427 | 1428 | layer { 1429 | bottom: "res4c_branch2b" 1430 | top: "res4c_branch2b" 1431 | name: "bn4c_branch2b" 1432 | type: "BatchNorm" 1433 | batch_norm_param { 1434 | use_global_stats: true 1435 | } 1436 | } 1437 | 1438 | layer { 1439 | bottom: "res4c_branch2b" 1440 | top: "res4c_branch2b" 1441 | name: "scale4c_branch2b" 1442 | type: "Scale" 1443 | scale_param { 1444 | bias_term: true 1445 | } 1446 | } 1447 | 1448 | layer { 1449 | bottom: "res4c_branch2b" 1450 | top: "res4c_branch2b" 1451 | name: "res4c_branch2b_relu" 1452 | type: "ReLU" 1453 | } 1454 | 1455 | layer { 1456 | bottom: "res4c_branch2b" 1457 | top: "res4c_branch2c" 1458 | name: "res4c_branch2c" 1459 | type: "Convolution" 1460 | convolution_param { 1461 | num_output: 1024 1462 | kernel_size: 1 1463 | pad: 0 1464 | stride: 1 1465 | bias_term: false 1466 | } 1467 | } 1468 | 1469 | layer { 1470 | bottom: "res4c_branch2c" 1471 | top: "res4c_branch2c" 1472 | name: "bn4c_branch2c" 1473 | type: "BatchNorm" 1474 | batch_norm_param { 1475 | use_global_stats: true 1476 | } 1477 | } 1478 | 1479 | layer { 1480 | bottom: "res4c_branch2c" 1481 | top: "res4c_branch2c" 1482 | name: "scale4c_branch2c" 1483 | type: "Scale" 1484 | scale_param { 1485 | bias_term: true 1486 | } 1487 | } 1488 | 1489 | layer { 1490 | bottom: "res4b" 1491 | bottom: "res4c_branch2c" 1492 | top: "res4c" 1493 | name: "res4c" 1494 | type: "Eltwise" 1495 | } 1496 | 1497 | layer { 1498 | bottom: "res4c" 1499 | top: "res4c" 1500 | name: "res4c_relu" 1501 | type: "ReLU" 1502 | } 1503 | 1504 | layer { 1505 | bottom: "res4c" 1506 | top: "res4d_branch2a" 1507 | name: "res4d_branch2a" 1508 | type: "Convolution" 1509 | convolution_param { 1510 | num_output: 256 1511 | kernel_size: 1 1512 | pad: 0 1513 | stride: 1 1514 | bias_term: false 1515 | } 1516 | } 1517 | 1518 | layer { 1519 | bottom: "res4d_branch2a" 1520 | top: "res4d_branch2a" 1521 | name: "bn4d_branch2a" 1522 | type: "BatchNorm" 1523 | batch_norm_param { 1524 | use_global_stats: true 1525 | } 1526 | } 1527 | 1528 | layer { 1529 | bottom: "res4d_branch2a" 1530 | top: "res4d_branch2a" 1531 | name: "scale4d_branch2a" 1532 | type: "Scale" 1533 | scale_param { 1534 | bias_term: true 1535 | } 1536 | } 1537 | 1538 | layer { 1539 | bottom: "res4d_branch2a" 1540 | top: "res4d_branch2a" 1541 | name: "res4d_branch2a_relu" 1542 | type: "ReLU" 1543 | } 1544 | 1545 | layer { 1546 | bottom: "res4d_branch2a" 1547 | top: "res4d_branch2b" 1548 | name: "res4d_branch2b" 1549 | type: "Convolution" 1550 | convolution_param { 1551 | num_output: 256 1552 | kernel_size: 3 1553 | pad: 1 1554 | stride: 1 1555 | bias_term: false 1556 | } 1557 | } 1558 | 1559 | layer { 1560 | bottom: "res4d_branch2b" 1561 | top: "res4d_branch2b" 1562 | name: "bn4d_branch2b" 1563 | type: "BatchNorm" 1564 | batch_norm_param { 1565 | use_global_stats: true 1566 | } 1567 | } 1568 | 1569 | layer { 1570 | bottom: "res4d_branch2b" 1571 | top: "res4d_branch2b" 1572 | name: "scale4d_branch2b" 1573 | type: "Scale" 1574 | scale_param { 1575 | bias_term: true 1576 | } 1577 | } 1578 | 1579 | layer { 1580 | bottom: "res4d_branch2b" 1581 | top: "res4d_branch2b" 1582 | name: "res4d_branch2b_relu" 1583 | type: "ReLU" 1584 | } 1585 | 1586 | layer { 1587 | bottom: "res4d_branch2b" 1588 | top: "res4d_branch2c" 1589 | name: "res4d_branch2c" 1590 | type: "Convolution" 1591 | convolution_param { 1592 | num_output: 1024 1593 | kernel_size: 1 1594 | pad: 0 1595 | stride: 1 1596 | bias_term: false 1597 | } 1598 | } 1599 | 1600 | layer { 1601 | bottom: "res4d_branch2c" 1602 | top: "res4d_branch2c" 1603 | name: "bn4d_branch2c" 1604 | type: "BatchNorm" 1605 | batch_norm_param { 1606 | use_global_stats: true 1607 | } 1608 | } 1609 | 1610 | layer { 1611 | bottom: "res4d_branch2c" 1612 | top: "res4d_branch2c" 1613 | name: "scale4d_branch2c" 1614 | type: "Scale" 1615 | scale_param { 1616 | bias_term: true 1617 | } 1618 | } 1619 | 1620 | layer { 1621 | bottom: "res4c" 1622 | bottom: "res4d_branch2c" 1623 | top: "res4d" 1624 | name: "res4d" 1625 | type: "Eltwise" 1626 | } 1627 | 1628 | layer { 1629 | bottom: "res4d" 1630 | top: "res4d" 1631 | name: "res4d_relu" 1632 | type: "ReLU" 1633 | } 1634 | 1635 | layer { 1636 | bottom: "res4d" 1637 | top: "res4e_branch2a" 1638 | name: "res4e_branch2a" 1639 | type: "Convolution" 1640 | convolution_param { 1641 | num_output: 256 1642 | kernel_size: 1 1643 | pad: 0 1644 | stride: 1 1645 | bias_term: false 1646 | } 1647 | } 1648 | 1649 | layer { 1650 | bottom: "res4e_branch2a" 1651 | top: "res4e_branch2a" 1652 | name: "bn4e_branch2a" 1653 | type: "BatchNorm" 1654 | batch_norm_param { 1655 | use_global_stats: true 1656 | } 1657 | } 1658 | 1659 | layer { 1660 | bottom: "res4e_branch2a" 1661 | top: "res4e_branch2a" 1662 | name: "scale4e_branch2a" 1663 | type: "Scale" 1664 | scale_param { 1665 | bias_term: true 1666 | } 1667 | } 1668 | 1669 | layer { 1670 | bottom: "res4e_branch2a" 1671 | top: "res4e_branch2a" 1672 | name: "res4e_branch2a_relu" 1673 | type: "ReLU" 1674 | } 1675 | 1676 | layer { 1677 | bottom: "res4e_branch2a" 1678 | top: "res4e_branch2b" 1679 | name: "res4e_branch2b" 1680 | type: "Convolution" 1681 | convolution_param { 1682 | num_output: 256 1683 | kernel_size: 3 1684 | pad: 1 1685 | stride: 1 1686 | bias_term: false 1687 | } 1688 | } 1689 | 1690 | layer { 1691 | bottom: "res4e_branch2b" 1692 | top: "res4e_branch2b" 1693 | name: "bn4e_branch2b" 1694 | type: "BatchNorm" 1695 | batch_norm_param { 1696 | use_global_stats: true 1697 | } 1698 | } 1699 | 1700 | layer { 1701 | bottom: "res4e_branch2b" 1702 | top: "res4e_branch2b" 1703 | name: "scale4e_branch2b" 1704 | type: "Scale" 1705 | scale_param { 1706 | bias_term: true 1707 | } 1708 | } 1709 | 1710 | layer { 1711 | bottom: "res4e_branch2b" 1712 | top: "res4e_branch2b" 1713 | name: "res4e_branch2b_relu" 1714 | type: "ReLU" 1715 | } 1716 | 1717 | layer { 1718 | bottom: "res4e_branch2b" 1719 | top: "res4e_branch2c" 1720 | name: "res4e_branch2c" 1721 | type: "Convolution" 1722 | convolution_param { 1723 | num_output: 1024 1724 | kernel_size: 1 1725 | pad: 0 1726 | stride: 1 1727 | bias_term: false 1728 | } 1729 | } 1730 | 1731 | layer { 1732 | bottom: "res4e_branch2c" 1733 | top: "res4e_branch2c" 1734 | name: "bn4e_branch2c" 1735 | type: "BatchNorm" 1736 | batch_norm_param { 1737 | use_global_stats: true 1738 | } 1739 | } 1740 | 1741 | layer { 1742 | bottom: "res4e_branch2c" 1743 | top: "res4e_branch2c" 1744 | name: "scale4e_branch2c" 1745 | type: "Scale" 1746 | scale_param { 1747 | bias_term: true 1748 | } 1749 | } 1750 | 1751 | layer { 1752 | bottom: "res4d" 1753 | bottom: "res4e_branch2c" 1754 | top: "res4e" 1755 | name: "res4e" 1756 | type: "Eltwise" 1757 | } 1758 | 1759 | layer { 1760 | bottom: "res4e" 1761 | top: "res4e" 1762 | name: "res4e_relu" 1763 | type: "ReLU" 1764 | } 1765 | 1766 | layer { 1767 | bottom: "res4e" 1768 | top: "res4f_branch2a" 1769 | name: "res4f_branch2a" 1770 | type: "Convolution" 1771 | convolution_param { 1772 | num_output: 256 1773 | kernel_size: 1 1774 | pad: 0 1775 | stride: 1 1776 | bias_term: false 1777 | } 1778 | } 1779 | 1780 | layer { 1781 | bottom: "res4f_branch2a" 1782 | top: "res4f_branch2a" 1783 | name: "bn4f_branch2a" 1784 | type: "BatchNorm" 1785 | batch_norm_param { 1786 | use_global_stats: true 1787 | } 1788 | } 1789 | 1790 | layer { 1791 | bottom: "res4f_branch2a" 1792 | top: "res4f_branch2a" 1793 | name: "scale4f_branch2a" 1794 | type: "Scale" 1795 | scale_param { 1796 | bias_term: true 1797 | } 1798 | } 1799 | 1800 | layer { 1801 | bottom: "res4f_branch2a" 1802 | top: "res4f_branch2a" 1803 | name: "res4f_branch2a_relu" 1804 | type: "ReLU" 1805 | } 1806 | 1807 | layer { 1808 | bottom: "res4f_branch2a" 1809 | top: "res4f_branch2b" 1810 | name: "res4f_branch2b" 1811 | type: "Convolution" 1812 | convolution_param { 1813 | num_output: 256 1814 | kernel_size: 3 1815 | pad: 1 1816 | stride: 1 1817 | bias_term: false 1818 | } 1819 | } 1820 | 1821 | layer { 1822 | bottom: "res4f_branch2b" 1823 | top: "res4f_branch2b" 1824 | name: "bn4f_branch2b" 1825 | type: "BatchNorm" 1826 | batch_norm_param { 1827 | use_global_stats: true 1828 | } 1829 | } 1830 | 1831 | layer { 1832 | bottom: "res4f_branch2b" 1833 | top: "res4f_branch2b" 1834 | name: "scale4f_branch2b" 1835 | type: "Scale" 1836 | scale_param { 1837 | bias_term: true 1838 | } 1839 | } 1840 | 1841 | layer { 1842 | bottom: "res4f_branch2b" 1843 | top: "res4f_branch2b" 1844 | name: "res4f_branch2b_relu" 1845 | type: "ReLU" 1846 | } 1847 | 1848 | layer { 1849 | bottom: "res4f_branch2b" 1850 | top: "res4f_branch2c" 1851 | name: "res4f_branch2c" 1852 | type: "Convolution" 1853 | convolution_param { 1854 | num_output: 1024 1855 | kernel_size: 1 1856 | pad: 0 1857 | stride: 1 1858 | bias_term: false 1859 | } 1860 | } 1861 | 1862 | layer { 1863 | bottom: "res4f_branch2c" 1864 | top: "res4f_branch2c" 1865 | name: "bn4f_branch2c" 1866 | type: "BatchNorm" 1867 | batch_norm_param { 1868 | use_global_stats: true 1869 | } 1870 | } 1871 | 1872 | layer { 1873 | bottom: "res4f_branch2c" 1874 | top: "res4f_branch2c" 1875 | name: "scale4f_branch2c" 1876 | type: "Scale" 1877 | scale_param { 1878 | bias_term: true 1879 | } 1880 | } 1881 | 1882 | layer { 1883 | bottom: "res4e" 1884 | bottom: "res4f_branch2c" 1885 | top: "res4f" 1886 | name: "res4f" 1887 | type: "Eltwise" 1888 | } 1889 | 1890 | layer { 1891 | bottom: "res4f" 1892 | top: "res4f" 1893 | name: "res4f_relu" 1894 | type: "ReLU" 1895 | } 1896 | 1897 | layer { 1898 | bottom: "res4f" 1899 | top: "res5a_branch1" 1900 | name: "res5a_branch1" 1901 | type: "Convolution" 1902 | convolution_param { 1903 | num_output: 2048 1904 | kernel_size: 1 1905 | pad: 0 1906 | stride: 2 1907 | bias_term: false 1908 | } 1909 | } 1910 | 1911 | layer { 1912 | bottom: "res5a_branch1" 1913 | top: "res5a_branch1" 1914 | name: "bn5a_branch1" 1915 | type: "BatchNorm" 1916 | batch_norm_param { 1917 | use_global_stats: true 1918 | } 1919 | } 1920 | 1921 | layer { 1922 | bottom: "res5a_branch1" 1923 | top: "res5a_branch1" 1924 | name: "scale5a_branch1" 1925 | type: "Scale" 1926 | scale_param { 1927 | bias_term: true 1928 | } 1929 | } 1930 | 1931 | layer { 1932 | bottom: "res4f" 1933 | top: "res5a_branch2a" 1934 | name: "res5a_branch2a" 1935 | type: "Convolution" 1936 | convolution_param { 1937 | num_output: 512 1938 | kernel_size: 1 1939 | pad: 0 1940 | stride: 2 1941 | bias_term: false 1942 | } 1943 | } 1944 | 1945 | layer { 1946 | bottom: "res5a_branch2a" 1947 | top: "res5a_branch2a" 1948 | name: "bn5a_branch2a" 1949 | type: "BatchNorm" 1950 | batch_norm_param { 1951 | use_global_stats: true 1952 | } 1953 | } 1954 | 1955 | layer { 1956 | bottom: "res5a_branch2a" 1957 | top: "res5a_branch2a" 1958 | name: "scale5a_branch2a" 1959 | type: "Scale" 1960 | scale_param { 1961 | bias_term: true 1962 | } 1963 | } 1964 | 1965 | layer { 1966 | bottom: "res5a_branch2a" 1967 | top: "res5a_branch2a" 1968 | name: "res5a_branch2a_relu" 1969 | type: "ReLU" 1970 | } 1971 | 1972 | layer { 1973 | bottom: "res5a_branch2a" 1974 | top: "res5a_branch2b" 1975 | name: "res5a_branch2b" 1976 | type: "Convolution" 1977 | convolution_param { 1978 | num_output: 512 1979 | kernel_size: 3 1980 | pad: 1 1981 | stride: 1 1982 | bias_term: false 1983 | } 1984 | } 1985 | 1986 | layer { 1987 | bottom: "res5a_branch2b" 1988 | top: "res5a_branch2b" 1989 | name: "bn5a_branch2b" 1990 | type: "BatchNorm" 1991 | batch_norm_param { 1992 | use_global_stats: true 1993 | } 1994 | } 1995 | 1996 | layer { 1997 | bottom: "res5a_branch2b" 1998 | top: "res5a_branch2b" 1999 | name: "scale5a_branch2b" 2000 | type: "Scale" 2001 | scale_param { 2002 | bias_term: true 2003 | } 2004 | } 2005 | 2006 | layer { 2007 | bottom: "res5a_branch2b" 2008 | top: "res5a_branch2b" 2009 | name: "res5a_branch2b_relu" 2010 | type: "ReLU" 2011 | } 2012 | 2013 | layer { 2014 | bottom: "res5a_branch2b" 2015 | top: "res5a_branch2c" 2016 | name: "res5a_branch2c" 2017 | type: "Convolution" 2018 | convolution_param { 2019 | num_output: 2048 2020 | kernel_size: 1 2021 | pad: 0 2022 | stride: 1 2023 | bias_term: false 2024 | } 2025 | } 2026 | 2027 | layer { 2028 | bottom: "res5a_branch2c" 2029 | top: "res5a_branch2c" 2030 | name: "bn5a_branch2c" 2031 | type: "BatchNorm" 2032 | batch_norm_param { 2033 | use_global_stats: true 2034 | } 2035 | } 2036 | 2037 | layer { 2038 | bottom: "res5a_branch2c" 2039 | top: "res5a_branch2c" 2040 | name: "scale5a_branch2c" 2041 | type: "Scale" 2042 | scale_param { 2043 | bias_term: true 2044 | } 2045 | } 2046 | 2047 | layer { 2048 | bottom: "res5a_branch1" 2049 | bottom: "res5a_branch2c" 2050 | top: "res5a" 2051 | name: "res5a" 2052 | type: "Eltwise" 2053 | } 2054 | 2055 | layer { 2056 | bottom: "res5a" 2057 | top: "res5a" 2058 | name: "res5a_relu" 2059 | type: "ReLU" 2060 | } 2061 | 2062 | layer { 2063 | bottom: "res5a" 2064 | top: "res5b_branch2a" 2065 | name: "res5b_branch2a" 2066 | type: "Convolution" 2067 | convolution_param { 2068 | num_output: 512 2069 | kernel_size: 1 2070 | pad: 0 2071 | stride: 1 2072 | bias_term: false 2073 | } 2074 | } 2075 | 2076 | layer { 2077 | bottom: "res5b_branch2a" 2078 | top: "res5b_branch2a" 2079 | name: "bn5b_branch2a" 2080 | type: "BatchNorm" 2081 | batch_norm_param { 2082 | use_global_stats: true 2083 | } 2084 | } 2085 | 2086 | layer { 2087 | bottom: "res5b_branch2a" 2088 | top: "res5b_branch2a" 2089 | name: "scale5b_branch2a" 2090 | type: "Scale" 2091 | scale_param { 2092 | bias_term: true 2093 | } 2094 | } 2095 | 2096 | layer { 2097 | bottom: "res5b_branch2a" 2098 | top: "res5b_branch2a" 2099 | name: "res5b_branch2a_relu" 2100 | type: "ReLU" 2101 | } 2102 | 2103 | layer { 2104 | bottom: "res5b_branch2a" 2105 | top: "res5b_branch2b" 2106 | name: "res5b_branch2b" 2107 | type: "Convolution" 2108 | convolution_param { 2109 | num_output: 512 2110 | kernel_size: 3 2111 | pad: 1 2112 | stride: 1 2113 | bias_term: false 2114 | } 2115 | } 2116 | 2117 | layer { 2118 | bottom: "res5b_branch2b" 2119 | top: "res5b_branch2b" 2120 | name: "bn5b_branch2b" 2121 | type: "BatchNorm" 2122 | batch_norm_param { 2123 | use_global_stats: true 2124 | } 2125 | } 2126 | 2127 | layer { 2128 | bottom: "res5b_branch2b" 2129 | top: "res5b_branch2b" 2130 | name: "scale5b_branch2b" 2131 | type: "Scale" 2132 | scale_param { 2133 | bias_term: true 2134 | } 2135 | } 2136 | 2137 | layer { 2138 | bottom: "res5b_branch2b" 2139 | top: "res5b_branch2b" 2140 | name: "res5b_branch2b_relu" 2141 | type: "ReLU" 2142 | } 2143 | 2144 | layer { 2145 | bottom: "res5b_branch2b" 2146 | top: "res5b_branch2c" 2147 | name: "res5b_branch2c" 2148 | type: "Convolution" 2149 | convolution_param { 2150 | num_output: 2048 2151 | kernel_size: 1 2152 | pad: 0 2153 | stride: 1 2154 | bias_term: false 2155 | } 2156 | } 2157 | 2158 | layer { 2159 | bottom: "res5b_branch2c" 2160 | top: "res5b_branch2c" 2161 | name: "bn5b_branch2c" 2162 | type: "BatchNorm" 2163 | batch_norm_param { 2164 | use_global_stats: true 2165 | } 2166 | } 2167 | 2168 | layer { 2169 | bottom: "res5b_branch2c" 2170 | top: "res5b_branch2c" 2171 | name: "scale5b_branch2c" 2172 | type: "Scale" 2173 | scale_param { 2174 | bias_term: true 2175 | } 2176 | } 2177 | 2178 | layer { 2179 | bottom: "res5a" 2180 | bottom: "res5b_branch2c" 2181 | top: "res5b" 2182 | name: "res5b" 2183 | type: "Eltwise" 2184 | } 2185 | 2186 | layer { 2187 | bottom: "res5b" 2188 | top: "res5b" 2189 | name: "res5b_relu" 2190 | type: "ReLU" 2191 | } 2192 | 2193 | layer { 2194 | bottom: "res5b" 2195 | top: "res5c_branch2a" 2196 | name: "res5c_branch2a" 2197 | type: "Convolution" 2198 | convolution_param { 2199 | num_output: 512 2200 | kernel_size: 1 2201 | pad: 0 2202 | stride: 1 2203 | bias_term: false 2204 | } 2205 | } 2206 | 2207 | layer { 2208 | bottom: "res5c_branch2a" 2209 | top: "res5c_branch2a" 2210 | name: "bn5c_branch2a" 2211 | type: "BatchNorm" 2212 | batch_norm_param { 2213 | use_global_stats: true 2214 | } 2215 | } 2216 | 2217 | layer { 2218 | bottom: "res5c_branch2a" 2219 | top: "res5c_branch2a" 2220 | name: "scale5c_branch2a" 2221 | type: "Scale" 2222 | scale_param { 2223 | bias_term: true 2224 | } 2225 | } 2226 | 2227 | layer { 2228 | bottom: "res5c_branch2a" 2229 | top: "res5c_branch2a" 2230 | name: "res5c_branch2a_relu" 2231 | type: "ReLU" 2232 | } 2233 | 2234 | layer { 2235 | bottom: "res5c_branch2a" 2236 | top: "res5c_branch2b" 2237 | name: "res5c_branch2b" 2238 | type: "Convolution" 2239 | convolution_param { 2240 | num_output: 512 2241 | kernel_size: 3 2242 | pad: 1 2243 | stride: 1 2244 | bias_term: false 2245 | } 2246 | } 2247 | 2248 | layer { 2249 | bottom: "res5c_branch2b" 2250 | top: "res5c_branch2b" 2251 | name: "bn5c_branch2b" 2252 | type: "BatchNorm" 2253 | batch_norm_param { 2254 | use_global_stats: true 2255 | } 2256 | } 2257 | 2258 | layer { 2259 | bottom: "res5c_branch2b" 2260 | top: "res5c_branch2b" 2261 | name: "scale5c_branch2b" 2262 | type: "Scale" 2263 | scale_param { 2264 | bias_term: true 2265 | } 2266 | } 2267 | 2268 | layer { 2269 | bottom: "res5c_branch2b" 2270 | top: "res5c_branch2b" 2271 | name: "res5c_branch2b_relu" 2272 | type: "ReLU" 2273 | } 2274 | 2275 | layer { 2276 | bottom: "res5c_branch2b" 2277 | top: "res5c_branch2c" 2278 | name: "res5c_branch2c" 2279 | type: "Convolution" 2280 | convolution_param { 2281 | num_output: 2048 2282 | kernel_size: 1 2283 | pad: 0 2284 | stride: 1 2285 | bias_term: false 2286 | } 2287 | } 2288 | 2289 | layer { 2290 | bottom: "res5c_branch2c" 2291 | top: "res5c_branch2c" 2292 | name: "bn5c_branch2c" 2293 | type: "BatchNorm" 2294 | batch_norm_param { 2295 | use_global_stats: true 2296 | } 2297 | } 2298 | 2299 | layer { 2300 | bottom: "res5c_branch2c" 2301 | top: "res5c_branch2c" 2302 | name: "scale5c_branch2c" 2303 | type: "Scale" 2304 | scale_param { 2305 | bias_term: true 2306 | } 2307 | } 2308 | 2309 | layer { 2310 | bottom: "res5b" 2311 | bottom: "res5c_branch2c" 2312 | top: "res5c" 2313 | name: "res5c" 2314 | type: "Eltwise" 2315 | } 2316 | 2317 | layer { 2318 | bottom: "res5c" 2319 | top: "res5c" 2320 | name: "res5c_relu" 2321 | type: "ReLU" 2322 | } 2323 | 2324 | layer { 2325 | bottom: "res5c" 2326 | top: "pool5" 2327 | name: "pool5" 2328 | type: "Pooling" 2329 | pooling_param { 2330 | kernel_size: 7 2331 | stride: 1 2332 | pool: AVE 2333 | } 2334 | } 2335 | 2336 | layer { 2337 | bottom: "pool5" 2338 | top: "my-classifier" 2339 | name: "my-classifier" 2340 | type: "InnerProduct" 2341 | param { 2342 | lr_mult: 10 2343 | decay_mult: 1 2344 | } 2345 | param { 2346 | lr_mult: 20 2347 | decay_mult: 0 2348 | } 2349 | inner_product_param { 2350 | num_output: 3 2351 | } 2352 | } 2353 | layer { 2354 | name: "loss1/loss1" 2355 | type: "SoftmaxWithLoss" 2356 | bottom: "my-classifier" 2357 | bottom: "label" 2358 | top: "loss1/loss1" 2359 | loss_weight: 0.3 2360 | } 2361 | layer { 2362 | name: "test/loss" 2363 | type: "SoftmaxWithLoss" 2364 | bottom: "my-classifier" 2365 | bottom: "label" 2366 | top: "test/loss" 2367 | include { 2368 | phase: TEST 2369 | } 2370 | } 2371 | layer { 2372 | name: "accuracy" 2373 | type: "Accuracy" 2374 | bottom: "my-classifier" 2375 | bottom: "label" 2376 | top: "accuracy" 2377 | include { 2378 | phase: TEST 2379 | } 2380 | } 2381 | -------------------------------------------------------------------------------- /SE-BN-Inception/solver.prototxt: -------------------------------------------------------------------------------- 1 | # the definition of neural network model 2 | net: "train_val.prototxt" 3 | # test_iter is related to batch_size in test layer, test_iter * batch_size = the number of test data 4 | #test_iter: 761 5 | # carry out test once every 5 training iterations 6 | #test_interval: 10000 7 | # exclude test phase when test_initialization = false 8 | # test_initialization: false 9 | # display information once every 10 training iterations 10 | display: 1 11 | # 12 | #average_loss: 40 13 | # the initial learning rate 14 | base_lr: 0.001 15 | lr_policy: "poly" 16 | stepsize: 320000 17 | gamma: 0.96 18 | # The max number of iterations 19 | max_iter: 5000 20 | power: 1.0 21 | momentum: 0.9 22 | # weight decay item, in case of overfitting 23 | weight_decay: 0.0002 24 | # save once every 50 training iterations 25 | snapshot: 25 26 | # save path 27 | snapshot_prefix: "se-bn-inception-dog" 28 | solver_mode: GPU 29 | -------------------------------------------------------------------------------- /SE-ResNet-50/solver.prototxt: -------------------------------------------------------------------------------- 1 | # the definition of neural network model 2 | net: "train_val.prototxt" 3 | # test_iter is related to batch_size in test layer, test_iter * batch_size = the number of test data 4 | #test_iter: 761 5 | # carry out test once every 5 training iterations 6 | #test_interval: 10000 7 | # exclude test phase when test_initialization = false 8 | # test_initialization: false 9 | # display information once every 10 training iterations 10 | display: 1 11 | # 12 | #average_loss: 40 13 | # the initial learning rate 14 | base_lr: 0.001 15 | lr_policy: "poly" 16 | stepsize: 320000 17 | gamma: 0.96 18 | # The max number of iterations 19 | max_iter: 5000 20 | power: 1.0 21 | momentum: 0.9 22 | # weight decay item, in case of overfitting 23 | weight_decay: 0.0002 24 | # save once every 50 training iterations 25 | snapshot: 50 26 | # save path 27 | snapshot_prefix: "se-resnet-50-dog" 28 | solver_mode: GPU 29 | -------------------------------------------------------------------------------- /SqueezeNet/deploy.prototxt: -------------------------------------------------------------------------------- 1 | input: "data" 2 | input_shape { 3 | dim: 1 4 | dim: 3 5 | dim: 227 6 | dim: 227 7 | } 8 | layer { 9 | name: "conv1" 10 | type: "Convolution" 11 | bottom: "data" 12 | top: "conv1" 13 | convolution_param { 14 | num_output: 64 15 | kernel_size: 3 16 | stride: 2 17 | weight_filler { 18 | type: "xavier" 19 | } 20 | } 21 | } 22 | layer { 23 | name: "relu_conv1" 24 | type: "ReLU" 25 | bottom: "conv1" 26 | top: "conv1" 27 | } 28 | layer { 29 | name: "pool1" 30 | type: "Pooling" 31 | bottom: "conv1" 32 | top: "pool1" 33 | pooling_param { 34 | pool: MAX 35 | kernel_size: 3 36 | stride: 2 37 | } 38 | } 39 | layer { 40 | name: "fire2/squeeze1x1" 41 | type: "Convolution" 42 | bottom: "pool1" 43 | top: "fire2/squeeze1x1" 44 | convolution_param { 45 | num_output: 16 46 | kernel_size: 1 47 | weight_filler { 48 | type: "xavier" 49 | } 50 | } 51 | } 52 | layer { 53 | name: "fire2/relu_squeeze1x1" 54 | type: "ReLU" 55 | bottom: "fire2/squeeze1x1" 56 | top: "fire2/squeeze1x1" 57 | } 58 | layer { 59 | name: "fire2/expand1x1" 60 | type: "Convolution" 61 | bottom: "fire2/squeeze1x1" 62 | top: "fire2/expand1x1" 63 | convolution_param { 64 | num_output: 64 65 | kernel_size: 1 66 | weight_filler { 67 | type: "xavier" 68 | } 69 | } 70 | } 71 | layer { 72 | name: "fire2/relu_expand1x1" 73 | type: "ReLU" 74 | bottom: "fire2/expand1x1" 75 | top: "fire2/expand1x1" 76 | } 77 | layer { 78 | name: "fire2/expand3x3" 79 | type: "Convolution" 80 | bottom: "fire2/squeeze1x1" 81 | top: "fire2/expand3x3" 82 | convolution_param { 83 | num_output: 64 84 | pad: 1 85 | kernel_size: 3 86 | weight_filler { 87 | type: "xavier" 88 | } 89 | } 90 | } 91 | layer { 92 | name: "fire2/relu_expand3x3" 93 | type: "ReLU" 94 | bottom: "fire2/expand3x3" 95 | top: "fire2/expand3x3" 96 | } 97 | layer { 98 | name: "fire2/concat" 99 | type: "Concat" 100 | bottom: "fire2/expand1x1" 101 | bottom: "fire2/expand3x3" 102 | top: "fire2/concat" 103 | } 104 | layer { 105 | name: "fire3/squeeze1x1" 106 | type: "Convolution" 107 | bottom: "fire2/concat" 108 | top: "fire3/squeeze1x1" 109 | convolution_param { 110 | num_output: 16 111 | kernel_size: 1 112 | weight_filler { 113 | type: "xavier" 114 | } 115 | } 116 | } 117 | layer { 118 | name: "fire3/relu_squeeze1x1" 119 | type: "ReLU" 120 | bottom: "fire3/squeeze1x1" 121 | top: "fire3/squeeze1x1" 122 | } 123 | layer { 124 | name: "fire3/expand1x1" 125 | type: "Convolution" 126 | bottom: "fire3/squeeze1x1" 127 | top: "fire3/expand1x1" 128 | convolution_param { 129 | num_output: 64 130 | kernel_size: 1 131 | weight_filler { 132 | type: "xavier" 133 | } 134 | } 135 | } 136 | layer { 137 | name: "fire3/relu_expand1x1" 138 | type: "ReLU" 139 | bottom: "fire3/expand1x1" 140 | top: "fire3/expand1x1" 141 | } 142 | layer { 143 | name: "fire3/expand3x3" 144 | type: "Convolution" 145 | bottom: "fire3/squeeze1x1" 146 | top: "fire3/expand3x3" 147 | convolution_param { 148 | num_output: 64 149 | pad: 1 150 | kernel_size: 3 151 | weight_filler { 152 | type: "xavier" 153 | } 154 | } 155 | } 156 | layer { 157 | name: "fire3/relu_expand3x3" 158 | type: "ReLU" 159 | bottom: "fire3/expand3x3" 160 | top: "fire3/expand3x3" 161 | } 162 | layer { 163 | name: "fire3/concat" 164 | type: "Concat" 165 | bottom: "fire3/expand1x1" 166 | bottom: "fire3/expand3x3" 167 | top: "fire3/concat" 168 | } 169 | layer { 170 | name: "pool3" 171 | type: "Pooling" 172 | bottom: "fire3/concat" 173 | top: "pool3" 174 | pooling_param { 175 | pool: MAX 176 | kernel_size: 3 177 | stride: 2 178 | } 179 | } 180 | layer { 181 | name: "fire4/squeeze1x1" 182 | type: "Convolution" 183 | bottom: "pool3" 184 | top: "fire4/squeeze1x1" 185 | convolution_param { 186 | num_output: 32 187 | kernel_size: 1 188 | weight_filler { 189 | type: "xavier" 190 | } 191 | } 192 | } 193 | layer { 194 | name: "fire4/relu_squeeze1x1" 195 | type: "ReLU" 196 | bottom: "fire4/squeeze1x1" 197 | top: "fire4/squeeze1x1" 198 | } 199 | layer { 200 | name: "fire4/expand1x1" 201 | type: "Convolution" 202 | bottom: "fire4/squeeze1x1" 203 | top: "fire4/expand1x1" 204 | convolution_param { 205 | num_output: 128 206 | kernel_size: 1 207 | weight_filler { 208 | type: "xavier" 209 | } 210 | } 211 | } 212 | layer { 213 | name: "fire4/relu_expand1x1" 214 | type: "ReLU" 215 | bottom: "fire4/expand1x1" 216 | top: "fire4/expand1x1" 217 | } 218 | layer { 219 | name: "fire4/expand3x3" 220 | type: "Convolution" 221 | bottom: "fire4/squeeze1x1" 222 | top: "fire4/expand3x3" 223 | convolution_param { 224 | num_output: 128 225 | pad: 1 226 | kernel_size: 3 227 | weight_filler { 228 | type: "xavier" 229 | } 230 | } 231 | } 232 | layer { 233 | name: "fire4/relu_expand3x3" 234 | type: "ReLU" 235 | bottom: "fire4/expand3x3" 236 | top: "fire4/expand3x3" 237 | } 238 | layer { 239 | name: "fire4/concat" 240 | type: "Concat" 241 | bottom: "fire4/expand1x1" 242 | bottom: "fire4/expand3x3" 243 | top: "fire4/concat" 244 | } 245 | layer { 246 | name: "fire5/squeeze1x1" 247 | type: "Convolution" 248 | bottom: "fire4/concat" 249 | top: "fire5/squeeze1x1" 250 | convolution_param { 251 | num_output: 32 252 | kernel_size: 1 253 | weight_filler { 254 | type: "xavier" 255 | } 256 | } 257 | } 258 | layer { 259 | name: "fire5/relu_squeeze1x1" 260 | type: "ReLU" 261 | bottom: "fire5/squeeze1x1" 262 | top: "fire5/squeeze1x1" 263 | } 264 | layer { 265 | name: "fire5/expand1x1" 266 | type: "Convolution" 267 | bottom: "fire5/squeeze1x1" 268 | top: "fire5/expand1x1" 269 | convolution_param { 270 | num_output: 128 271 | kernel_size: 1 272 | weight_filler { 273 | type: "xavier" 274 | } 275 | } 276 | } 277 | layer { 278 | name: "fire5/relu_expand1x1" 279 | type: "ReLU" 280 | bottom: "fire5/expand1x1" 281 | top: "fire5/expand1x1" 282 | } 283 | layer { 284 | name: "fire5/expand3x3" 285 | type: "Convolution" 286 | bottom: "fire5/squeeze1x1" 287 | top: "fire5/expand3x3" 288 | convolution_param { 289 | num_output: 128 290 | pad: 1 291 | kernel_size: 3 292 | weight_filler { 293 | type: "xavier" 294 | } 295 | } 296 | } 297 | layer { 298 | name: "fire5/relu_expand3x3" 299 | type: "ReLU" 300 | bottom: "fire5/expand3x3" 301 | top: "fire5/expand3x3" 302 | } 303 | layer { 304 | name: "fire5/concat" 305 | type: "Concat" 306 | bottom: "fire5/expand1x1" 307 | bottom: "fire5/expand3x3" 308 | top: "fire5/concat" 309 | } 310 | layer { 311 | name: "pool5" 312 | type: "Pooling" 313 | bottom: "fire5/concat" 314 | top: "pool5" 315 | pooling_param { 316 | pool: MAX 317 | kernel_size: 3 318 | stride: 2 319 | } 320 | } 321 | layer { 322 | name: "fire6/squeeze1x1" 323 | type: "Convolution" 324 | bottom: "pool5" 325 | top: "fire6/squeeze1x1" 326 | convolution_param { 327 | num_output: 48 328 | kernel_size: 1 329 | weight_filler { 330 | type: "xavier" 331 | } 332 | } 333 | } 334 | layer { 335 | name: "fire6/relu_squeeze1x1" 336 | type: "ReLU" 337 | bottom: "fire6/squeeze1x1" 338 | top: "fire6/squeeze1x1" 339 | } 340 | layer { 341 | name: "fire6/expand1x1" 342 | type: "Convolution" 343 | bottom: "fire6/squeeze1x1" 344 | top: "fire6/expand1x1" 345 | convolution_param { 346 | num_output: 192 347 | kernel_size: 1 348 | weight_filler { 349 | type: "xavier" 350 | } 351 | } 352 | } 353 | layer { 354 | name: "fire6/relu_expand1x1" 355 | type: "ReLU" 356 | bottom: "fire6/expand1x1" 357 | top: "fire6/expand1x1" 358 | } 359 | layer { 360 | name: "fire6/expand3x3" 361 | type: "Convolution" 362 | bottom: "fire6/squeeze1x1" 363 | top: "fire6/expand3x3" 364 | convolution_param { 365 | num_output: 192 366 | pad: 1 367 | kernel_size: 3 368 | weight_filler { 369 | type: "xavier" 370 | } 371 | } 372 | } 373 | layer { 374 | name: "fire6/relu_expand3x3" 375 | type: "ReLU" 376 | bottom: "fire6/expand3x3" 377 | top: "fire6/expand3x3" 378 | } 379 | layer { 380 | name: "fire6/concat" 381 | type: "Concat" 382 | bottom: "fire6/expand1x1" 383 | bottom: "fire6/expand3x3" 384 | top: "fire6/concat" 385 | } 386 | layer { 387 | name: "fire7/squeeze1x1" 388 | type: "Convolution" 389 | bottom: "fire6/concat" 390 | top: "fire7/squeeze1x1" 391 | convolution_param { 392 | num_output: 48 393 | kernel_size: 1 394 | weight_filler { 395 | type: "xavier" 396 | } 397 | } 398 | } 399 | layer { 400 | name: "fire7/relu_squeeze1x1" 401 | type: "ReLU" 402 | bottom: "fire7/squeeze1x1" 403 | top: "fire7/squeeze1x1" 404 | } 405 | layer { 406 | name: "fire7/expand1x1" 407 | type: "Convolution" 408 | bottom: "fire7/squeeze1x1" 409 | top: "fire7/expand1x1" 410 | convolution_param { 411 | num_output: 192 412 | kernel_size: 1 413 | weight_filler { 414 | type: "xavier" 415 | } 416 | } 417 | } 418 | layer { 419 | name: "fire7/relu_expand1x1" 420 | type: "ReLU" 421 | bottom: "fire7/expand1x1" 422 | top: "fire7/expand1x1" 423 | } 424 | layer { 425 | name: "fire7/expand3x3" 426 | type: "Convolution" 427 | bottom: "fire7/squeeze1x1" 428 | top: "fire7/expand3x3" 429 | convolution_param { 430 | num_output: 192 431 | pad: 1 432 | kernel_size: 3 433 | weight_filler { 434 | type: "xavier" 435 | } 436 | } 437 | } 438 | layer { 439 | name: "fire7/relu_expand3x3" 440 | type: "ReLU" 441 | bottom: "fire7/expand3x3" 442 | top: "fire7/expand3x3" 443 | } 444 | layer { 445 | name: "fire7/concat" 446 | type: "Concat" 447 | bottom: "fire7/expand1x1" 448 | bottom: "fire7/expand3x3" 449 | top: "fire7/concat" 450 | } 451 | layer { 452 | name: "fire8/squeeze1x1" 453 | type: "Convolution" 454 | bottom: "fire7/concat" 455 | top: "fire8/squeeze1x1" 456 | convolution_param { 457 | num_output: 64 458 | kernel_size: 1 459 | weight_filler { 460 | type: "xavier" 461 | } 462 | } 463 | } 464 | layer { 465 | name: "fire8/relu_squeeze1x1" 466 | type: "ReLU" 467 | bottom: "fire8/squeeze1x1" 468 | top: "fire8/squeeze1x1" 469 | } 470 | layer { 471 | name: "fire8/expand1x1" 472 | type: "Convolution" 473 | bottom: "fire8/squeeze1x1" 474 | top: "fire8/expand1x1" 475 | convolution_param { 476 | num_output: 256 477 | kernel_size: 1 478 | weight_filler { 479 | type: "xavier" 480 | } 481 | } 482 | } 483 | layer { 484 | name: "fire8/relu_expand1x1" 485 | type: "ReLU" 486 | bottom: "fire8/expand1x1" 487 | top: "fire8/expand1x1" 488 | } 489 | layer { 490 | name: "fire8/expand3x3" 491 | type: "Convolution" 492 | bottom: "fire8/squeeze1x1" 493 | top: "fire8/expand3x3" 494 | convolution_param { 495 | num_output: 256 496 | pad: 1 497 | kernel_size: 3 498 | weight_filler { 499 | type: "xavier" 500 | } 501 | } 502 | } 503 | layer { 504 | name: "fire8/relu_expand3x3" 505 | type: "ReLU" 506 | bottom: "fire8/expand3x3" 507 | top: "fire8/expand3x3" 508 | } 509 | layer { 510 | name: "fire8/concat" 511 | type: "Concat" 512 | bottom: "fire8/expand1x1" 513 | bottom: "fire8/expand3x3" 514 | top: "fire8/concat" 515 | } 516 | layer { 517 | name: "fire9/squeeze1x1" 518 | type: "Convolution" 519 | bottom: "fire8/concat" 520 | top: "fire9/squeeze1x1" 521 | convolution_param { 522 | num_output: 64 523 | kernel_size: 1 524 | weight_filler { 525 | type: "xavier" 526 | } 527 | } 528 | } 529 | layer { 530 | name: "fire9/relu_squeeze1x1" 531 | type: "ReLU" 532 | bottom: "fire9/squeeze1x1" 533 | top: "fire9/squeeze1x1" 534 | } 535 | layer { 536 | name: "fire9/expand1x1" 537 | type: "Convolution" 538 | bottom: "fire9/squeeze1x1" 539 | top: "fire9/expand1x1" 540 | convolution_param { 541 | num_output: 256 542 | kernel_size: 1 543 | weight_filler { 544 | type: "xavier" 545 | } 546 | } 547 | } 548 | layer { 549 | name: "fire9/relu_expand1x1" 550 | type: "ReLU" 551 | bottom: "fire9/expand1x1" 552 | top: "fire9/expand1x1" 553 | } 554 | layer { 555 | name: "fire9/expand3x3" 556 | type: "Convolution" 557 | bottom: "fire9/squeeze1x1" 558 | top: "fire9/expand3x3" 559 | convolution_param { 560 | num_output: 256 561 | pad: 1 562 | kernel_size: 3 563 | weight_filler { 564 | type: "xavier" 565 | } 566 | } 567 | } 568 | layer { 569 | name: "fire9/relu_expand3x3" 570 | type: "ReLU" 571 | bottom: "fire9/expand3x3" 572 | top: "fire9/expand3x3" 573 | } 574 | layer { 575 | name: "fire9/concat" 576 | type: "Concat" 577 | bottom: "fire9/expand1x1" 578 | bottom: "fire9/expand3x3" 579 | top: "fire9/concat" 580 | } 581 | layer { 582 | name: "drop9" 583 | type: "Dropout" 584 | bottom: "fire9/concat" 585 | top: "fire9/concat" 586 | dropout_param { 587 | dropout_ratio: 0.5 588 | } 589 | } 590 | layer { 591 | name: "my-conv10" 592 | type: "Convolution" 593 | bottom: "fire9/concat" 594 | top: "my-conv10" 595 | convolution_param { 596 | num_output: 3 597 | kernel_size: 1 598 | weight_filler { 599 | type: "gaussian" 600 | mean: 0.0 601 | std: 0.01 602 | } 603 | } 604 | } 605 | layer { 606 | name: "relu_conv10" 607 | type: "ReLU" 608 | bottom: "my-conv10" 609 | top: "conv10" 610 | } 611 | layer { 612 | name: "pool10" 613 | type: "Pooling" 614 | bottom: "conv10" 615 | top: "pool10" 616 | pooling_param { 617 | pool: AVE 618 | global_pooling: true 619 | } 620 | } 621 | layer { 622 | name: "prob" 623 | type: "Softmax" 624 | bottom: "pool10" 625 | top: "prob" 626 | } 627 | -------------------------------------------------------------------------------- /SqueezeNet/solver.prototxt: -------------------------------------------------------------------------------- 1 | # the definition of neural network model 2 | net: "train_val.prototxt" 3 | # test_iter is related to batch_size in test layer, test_iter * batch_size = the number of test data 4 | test_iter: 761 5 | # carry out test once every 5 training iterations 6 | test_interval: 10000 7 | # exclude test phase when test_initialization = false 8 | # test_initialization: false 9 | # display information once every 10 training iterations 10 | display: 1 11 | # 12 | average_loss: 40 13 | # the initial learning rate 14 | base_lr: 0.001 15 | lr_policy: "poly" 16 | stepsize: 320000 17 | gamma: 0.96 18 | # The max number of iterations 19 | max_iter: 10001 20 | power: 1.0 21 | momentum: 0.9 22 | # weight decay item, in case of overfitting 23 | weight_decay: 0.0002 24 | # save once every 50 training iterations 25 | snapshot: 20 26 | # save path 27 | snapshot_prefix: "squeezenet" 28 | solver_mode: GPU 29 | -------------------------------------------------------------------------------- /SqueezeNet/squeezenet_v1.1.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SnailTyan/caffe-model-zoo/83fb66248583f20fc7ce2f9d40fc6fe09ef4d4b9/SqueezeNet/squeezenet_v1.1.caffemodel -------------------------------------------------------------------------------- /SqueezeNet/train_val.prototxt: -------------------------------------------------------------------------------- 1 | name: "SqueezeNet" 2 | layer { 3 | name: "data" 4 | type: "Data" 5 | top: "data" 6 | top: "label" 7 | include { 8 | phase: TRAIN 9 | } 10 | transform_param { 11 | mirror: true 12 | crop_size: 227 13 | mean_file: "mean.binaryproto" 14 | } 15 | data_param { 16 | source: "train_lmdb" 17 | batch_size: 64 18 | backend: LMDB 19 | } 20 | } 21 | layer { 22 | name: "data" 23 | type: "Data" 24 | top: "data" 25 | top: "label" 26 | include { 27 | phase: TEST 28 | } 29 | transform_param { 30 | mirror: false 31 | crop_size: 227 32 | mean_file: "mean.binaryproto" 33 | } 34 | data_param { 35 | source: "val_lmdb" 36 | batch_size: 1 37 | backend: LMDB 38 | } 39 | } 40 | layer { 41 | name: "conv1" 42 | type: "Convolution" 43 | bottom: "data" 44 | top: "conv1" 45 | convolution_param { 46 | num_output: 64 47 | kernel_size: 3 48 | stride: 2 49 | weight_filler { 50 | type: "xavier" 51 | } 52 | } 53 | } 54 | layer { 55 | name: "relu_conv1" 56 | type: "ReLU" 57 | bottom: "conv1" 58 | top: "conv1" 59 | } 60 | layer { 61 | name: "pool1" 62 | type: "Pooling" 63 | bottom: "conv1" 64 | top: "pool1" 65 | pooling_param { 66 | pool: MAX 67 | kernel_size: 3 68 | stride: 2 69 | } 70 | } 71 | layer { 72 | name: "fire2/squeeze1x1" 73 | type: "Convolution" 74 | bottom: "pool1" 75 | top: "fire2/squeeze1x1" 76 | convolution_param { 77 | num_output: 16 78 | kernel_size: 1 79 | weight_filler { 80 | type: "xavier" 81 | } 82 | } 83 | } 84 | layer { 85 | name: "fire2/relu_squeeze1x1" 86 | type: "ReLU" 87 | bottom: "fire2/squeeze1x1" 88 | top: "fire2/squeeze1x1" 89 | } 90 | layer { 91 | name: "fire2/expand1x1" 92 | type: "Convolution" 93 | bottom: "fire2/squeeze1x1" 94 | top: "fire2/expand1x1" 95 | convolution_param { 96 | num_output: 64 97 | kernel_size: 1 98 | weight_filler { 99 | type: "xavier" 100 | } 101 | } 102 | } 103 | layer { 104 | name: "fire2/relu_expand1x1" 105 | type: "ReLU" 106 | bottom: "fire2/expand1x1" 107 | top: "fire2/expand1x1" 108 | } 109 | layer { 110 | name: "fire2/expand3x3" 111 | type: "Convolution" 112 | bottom: "fire2/squeeze1x1" 113 | top: "fire2/expand3x3" 114 | convolution_param { 115 | num_output: 64 116 | pad: 1 117 | kernel_size: 3 118 | weight_filler { 119 | type: "xavier" 120 | } 121 | } 122 | } 123 | layer { 124 | name: "fire2/relu_expand3x3" 125 | type: "ReLU" 126 | bottom: "fire2/expand3x3" 127 | top: "fire2/expand3x3" 128 | } 129 | layer { 130 | name: "fire2/concat" 131 | type: "Concat" 132 | bottom: "fire2/expand1x1" 133 | bottom: "fire2/expand3x3" 134 | top: "fire2/concat" 135 | } 136 | layer { 137 | name: "fire3/squeeze1x1" 138 | type: "Convolution" 139 | bottom: "fire2/concat" 140 | top: "fire3/squeeze1x1" 141 | convolution_param { 142 | num_output: 16 143 | kernel_size: 1 144 | weight_filler { 145 | type: "xavier" 146 | } 147 | } 148 | } 149 | layer { 150 | name: "fire3/relu_squeeze1x1" 151 | type: "ReLU" 152 | bottom: "fire3/squeeze1x1" 153 | top: "fire3/squeeze1x1" 154 | } 155 | layer { 156 | name: "fire3/expand1x1" 157 | type: "Convolution" 158 | bottom: "fire3/squeeze1x1" 159 | top: "fire3/expand1x1" 160 | convolution_param { 161 | num_output: 64 162 | kernel_size: 1 163 | weight_filler { 164 | type: "xavier" 165 | } 166 | } 167 | } 168 | layer { 169 | name: "fire3/relu_expand1x1" 170 | type: "ReLU" 171 | bottom: "fire3/expand1x1" 172 | top: "fire3/expand1x1" 173 | } 174 | layer { 175 | name: "fire3/expand3x3" 176 | type: "Convolution" 177 | bottom: "fire3/squeeze1x1" 178 | top: "fire3/expand3x3" 179 | convolution_param { 180 | num_output: 64 181 | pad: 1 182 | kernel_size: 3 183 | weight_filler { 184 | type: "xavier" 185 | } 186 | } 187 | } 188 | layer { 189 | name: "fire3/relu_expand3x3" 190 | type: "ReLU" 191 | bottom: "fire3/expand3x3" 192 | top: "fire3/expand3x3" 193 | } 194 | layer { 195 | name: "fire3/concat" 196 | type: "Concat" 197 | bottom: "fire3/expand1x1" 198 | bottom: "fire3/expand3x3" 199 | top: "fire3/concat" 200 | } 201 | layer { 202 | name: "pool3" 203 | type: "Pooling" 204 | bottom: "fire3/concat" 205 | top: "pool3" 206 | pooling_param { 207 | pool: MAX 208 | kernel_size: 3 209 | stride: 2 210 | } 211 | } 212 | layer { 213 | name: "fire4/squeeze1x1" 214 | type: "Convolution" 215 | bottom: "pool3" 216 | top: "fire4/squeeze1x1" 217 | convolution_param { 218 | num_output: 32 219 | kernel_size: 1 220 | weight_filler { 221 | type: "xavier" 222 | } 223 | } 224 | } 225 | layer { 226 | name: "fire4/relu_squeeze1x1" 227 | type: "ReLU" 228 | bottom: "fire4/squeeze1x1" 229 | top: "fire4/squeeze1x1" 230 | } 231 | layer { 232 | name: "fire4/expand1x1" 233 | type: "Convolution" 234 | bottom: "fire4/squeeze1x1" 235 | top: "fire4/expand1x1" 236 | convolution_param { 237 | num_output: 128 238 | kernel_size: 1 239 | weight_filler { 240 | type: "xavier" 241 | } 242 | } 243 | } 244 | layer { 245 | name: "fire4/relu_expand1x1" 246 | type: "ReLU" 247 | bottom: "fire4/expand1x1" 248 | top: "fire4/expand1x1" 249 | } 250 | layer { 251 | name: "fire4/expand3x3" 252 | type: "Convolution" 253 | bottom: "fire4/squeeze1x1" 254 | top: "fire4/expand3x3" 255 | convolution_param { 256 | num_output: 128 257 | pad: 1 258 | kernel_size: 3 259 | weight_filler { 260 | type: "xavier" 261 | } 262 | } 263 | } 264 | layer { 265 | name: "fire4/relu_expand3x3" 266 | type: "ReLU" 267 | bottom: "fire4/expand3x3" 268 | top: "fire4/expand3x3" 269 | } 270 | layer { 271 | name: "fire4/concat" 272 | type: "Concat" 273 | bottom: "fire4/expand1x1" 274 | bottom: "fire4/expand3x3" 275 | top: "fire4/concat" 276 | } 277 | layer { 278 | name: "fire5/squeeze1x1" 279 | type: "Convolution" 280 | bottom: "fire4/concat" 281 | top: "fire5/squeeze1x1" 282 | convolution_param { 283 | num_output: 32 284 | kernel_size: 1 285 | weight_filler { 286 | type: "xavier" 287 | } 288 | } 289 | } 290 | layer { 291 | name: "fire5/relu_squeeze1x1" 292 | type: "ReLU" 293 | bottom: "fire5/squeeze1x1" 294 | top: "fire5/squeeze1x1" 295 | } 296 | layer { 297 | name: "fire5/expand1x1" 298 | type: "Convolution" 299 | bottom: "fire5/squeeze1x1" 300 | top: "fire5/expand1x1" 301 | convolution_param { 302 | num_output: 128 303 | kernel_size: 1 304 | weight_filler { 305 | type: "xavier" 306 | } 307 | } 308 | } 309 | layer { 310 | name: "fire5/relu_expand1x1" 311 | type: "ReLU" 312 | bottom: "fire5/expand1x1" 313 | top: "fire5/expand1x1" 314 | } 315 | layer { 316 | name: "fire5/expand3x3" 317 | type: "Convolution" 318 | bottom: "fire5/squeeze1x1" 319 | top: "fire5/expand3x3" 320 | convolution_param { 321 | num_output: 128 322 | pad: 1 323 | kernel_size: 3 324 | weight_filler { 325 | type: "xavier" 326 | } 327 | } 328 | } 329 | layer { 330 | name: "fire5/relu_expand3x3" 331 | type: "ReLU" 332 | bottom: "fire5/expand3x3" 333 | top: "fire5/expand3x3" 334 | } 335 | layer { 336 | name: "fire5/concat" 337 | type: "Concat" 338 | bottom: "fire5/expand1x1" 339 | bottom: "fire5/expand3x3" 340 | top: "fire5/concat" 341 | } 342 | layer { 343 | name: "pool5" 344 | type: "Pooling" 345 | bottom: "fire5/concat" 346 | top: "pool5" 347 | pooling_param { 348 | pool: MAX 349 | kernel_size: 3 350 | stride: 2 351 | } 352 | } 353 | layer { 354 | name: "fire6/squeeze1x1" 355 | type: "Convolution" 356 | bottom: "pool5" 357 | top: "fire6/squeeze1x1" 358 | convolution_param { 359 | num_output: 48 360 | kernel_size: 1 361 | weight_filler { 362 | type: "xavier" 363 | } 364 | } 365 | } 366 | layer { 367 | name: "fire6/relu_squeeze1x1" 368 | type: "ReLU" 369 | bottom: "fire6/squeeze1x1" 370 | top: "fire6/squeeze1x1" 371 | } 372 | layer { 373 | name: "fire6/expand1x1" 374 | type: "Convolution" 375 | bottom: "fire6/squeeze1x1" 376 | top: "fire6/expand1x1" 377 | convolution_param { 378 | num_output: 192 379 | kernel_size: 1 380 | weight_filler { 381 | type: "xavier" 382 | } 383 | } 384 | } 385 | layer { 386 | name: "fire6/relu_expand1x1" 387 | type: "ReLU" 388 | bottom: "fire6/expand1x1" 389 | top: "fire6/expand1x1" 390 | } 391 | layer { 392 | name: "fire6/expand3x3" 393 | type: "Convolution" 394 | bottom: "fire6/squeeze1x1" 395 | top: "fire6/expand3x3" 396 | convolution_param { 397 | num_output: 192 398 | pad: 1 399 | kernel_size: 3 400 | weight_filler { 401 | type: "xavier" 402 | } 403 | } 404 | } 405 | layer { 406 | name: "fire6/relu_expand3x3" 407 | type: "ReLU" 408 | bottom: "fire6/expand3x3" 409 | top: "fire6/expand3x3" 410 | } 411 | layer { 412 | name: "fire6/concat" 413 | type: "Concat" 414 | bottom: "fire6/expand1x1" 415 | bottom: "fire6/expand3x3" 416 | top: "fire6/concat" 417 | } 418 | layer { 419 | name: "fire7/squeeze1x1" 420 | type: "Convolution" 421 | bottom: "fire6/concat" 422 | top: "fire7/squeeze1x1" 423 | convolution_param { 424 | num_output: 48 425 | kernel_size: 1 426 | weight_filler { 427 | type: "xavier" 428 | } 429 | } 430 | } 431 | layer { 432 | name: "fire7/relu_squeeze1x1" 433 | type: "ReLU" 434 | bottom: "fire7/squeeze1x1" 435 | top: "fire7/squeeze1x1" 436 | } 437 | layer { 438 | name: "fire7/expand1x1" 439 | type: "Convolution" 440 | bottom: "fire7/squeeze1x1" 441 | top: "fire7/expand1x1" 442 | convolution_param { 443 | num_output: 192 444 | kernel_size: 1 445 | weight_filler { 446 | type: "xavier" 447 | } 448 | } 449 | } 450 | layer { 451 | name: "fire7/relu_expand1x1" 452 | type: "ReLU" 453 | bottom: "fire7/expand1x1" 454 | top: "fire7/expand1x1" 455 | } 456 | layer { 457 | name: "fire7/expand3x3" 458 | type: "Convolution" 459 | bottom: "fire7/squeeze1x1" 460 | top: "fire7/expand3x3" 461 | convolution_param { 462 | num_output: 192 463 | pad: 1 464 | kernel_size: 3 465 | weight_filler { 466 | type: "xavier" 467 | } 468 | } 469 | } 470 | layer { 471 | name: "fire7/relu_expand3x3" 472 | type: "ReLU" 473 | bottom: "fire7/expand3x3" 474 | top: "fire7/expand3x3" 475 | } 476 | layer { 477 | name: "fire7/concat" 478 | type: "Concat" 479 | bottom: "fire7/expand1x1" 480 | bottom: "fire7/expand3x3" 481 | top: "fire7/concat" 482 | } 483 | layer { 484 | name: "fire8/squeeze1x1" 485 | type: "Convolution" 486 | bottom: "fire7/concat" 487 | top: "fire8/squeeze1x1" 488 | convolution_param { 489 | num_output: 64 490 | kernel_size: 1 491 | weight_filler { 492 | type: "xavier" 493 | } 494 | } 495 | } 496 | layer { 497 | name: "fire8/relu_squeeze1x1" 498 | type: "ReLU" 499 | bottom: "fire8/squeeze1x1" 500 | top: "fire8/squeeze1x1" 501 | } 502 | layer { 503 | name: "fire8/expand1x1" 504 | type: "Convolution" 505 | bottom: "fire8/squeeze1x1" 506 | top: "fire8/expand1x1" 507 | convolution_param { 508 | num_output: 256 509 | kernel_size: 1 510 | weight_filler { 511 | type: "xavier" 512 | } 513 | } 514 | } 515 | layer { 516 | name: "fire8/relu_expand1x1" 517 | type: "ReLU" 518 | bottom: "fire8/expand1x1" 519 | top: "fire8/expand1x1" 520 | } 521 | layer { 522 | name: "fire8/expand3x3" 523 | type: "Convolution" 524 | bottom: "fire8/squeeze1x1" 525 | top: "fire8/expand3x3" 526 | convolution_param { 527 | num_output: 256 528 | pad: 1 529 | kernel_size: 3 530 | weight_filler { 531 | type: "xavier" 532 | } 533 | } 534 | } 535 | layer { 536 | name: "fire8/relu_expand3x3" 537 | type: "ReLU" 538 | bottom: "fire8/expand3x3" 539 | top: "fire8/expand3x3" 540 | } 541 | layer { 542 | name: "fire8/concat" 543 | type: "Concat" 544 | bottom: "fire8/expand1x1" 545 | bottom: "fire8/expand3x3" 546 | top: "fire8/concat" 547 | } 548 | layer { 549 | name: "fire9/squeeze1x1" 550 | type: "Convolution" 551 | bottom: "fire8/concat" 552 | top: "fire9/squeeze1x1" 553 | convolution_param { 554 | num_output: 64 555 | kernel_size: 1 556 | weight_filler { 557 | type: "xavier" 558 | } 559 | } 560 | } 561 | layer { 562 | name: "fire9/relu_squeeze1x1" 563 | type: "ReLU" 564 | bottom: "fire9/squeeze1x1" 565 | top: "fire9/squeeze1x1" 566 | } 567 | layer { 568 | name: "fire9/expand1x1" 569 | type: "Convolution" 570 | bottom: "fire9/squeeze1x1" 571 | top: "fire9/expand1x1" 572 | convolution_param { 573 | num_output: 256 574 | kernel_size: 1 575 | weight_filler { 576 | type: "xavier" 577 | } 578 | } 579 | } 580 | layer { 581 | name: "fire9/relu_expand1x1" 582 | type: "ReLU" 583 | bottom: "fire9/expand1x1" 584 | top: "fire9/expand1x1" 585 | } 586 | layer { 587 | name: "fire9/expand3x3" 588 | type: "Convolution" 589 | bottom: "fire9/squeeze1x1" 590 | top: "fire9/expand3x3" 591 | convolution_param { 592 | num_output: 256 593 | pad: 1 594 | kernel_size: 3 595 | weight_filler { 596 | type: "xavier" 597 | } 598 | } 599 | } 600 | layer { 601 | name: "fire9/relu_expand3x3" 602 | type: "ReLU" 603 | bottom: "fire9/expand3x3" 604 | top: "fire9/expand3x3" 605 | } 606 | layer { 607 | name: "fire9/concat" 608 | type: "Concat" 609 | bottom: "fire9/expand1x1" 610 | bottom: "fire9/expand3x3" 611 | top: "fire9/concat" 612 | } 613 | layer { 614 | name: "drop9" 615 | type: "Dropout" 616 | bottom: "fire9/concat" 617 | top: "fire9/concat" 618 | dropout_param { 619 | dropout_ratio: 0.5 620 | } 621 | } 622 | layer { 623 | name: "my-conv10" 624 | type: "Convolution" 625 | bottom: "fire9/concat" 626 | top: "my-conv10" 627 | convolution_param { 628 | num_output: 3 629 | kernel_size: 1 630 | weight_filler { 631 | type: "gaussian" 632 | mean: 0.0 633 | std: 0.01 634 | } 635 | } 636 | } 637 | layer { 638 | name: "relu_conv10" 639 | type: "ReLU" 640 | bottom: "my-conv10" 641 | top: "conv10" 642 | } 643 | layer { 644 | name: "pool10" 645 | type: "Pooling" 646 | bottom: "conv10" 647 | top: "pool10" 648 | pooling_param { 649 | pool: AVE 650 | global_pooling: true 651 | } 652 | } 653 | layer { 654 | name: "loss" 655 | type: "SoftmaxWithLoss" 656 | bottom: "pool10" 657 | bottom: "label" 658 | top: "loss" 659 | #include { 660 | # phase: TRAIN 661 | #} 662 | } 663 | -------------------------------------------------------------------------------- /VGG16/deploy.prototxt: -------------------------------------------------------------------------------- 1 | name: "VGG_ILSVRC_16_layer" 2 | input: "data" 3 | input_dim: 10 4 | input_dim: 3 5 | input_dim: 224 6 | input_dim: 224 7 | layer { 8 | bottom: "data" 9 | top: "conv1_1" 10 | name: "conv1_1" 11 | type: "Convolution" 12 | convolution_param { 13 | num_output: 64 14 | pad: 1 15 | kernel_size: 3 16 | } 17 | } 18 | layer { 19 | bottom: "conv1_1" 20 | top: "conv1_1" 21 | name: "relu1_1" 22 | type: "ReLU" 23 | } 24 | layer { 25 | bottom: "conv1_1" 26 | top: "conv1_2" 27 | name: "conv1_2" 28 | type: "Convolution" 29 | convolution_param { 30 | num_output: 64 31 | pad: 1 32 | kernel_size: 3 33 | } 34 | } 35 | layer { 36 | bottom: "conv1_2" 37 | top: "conv1_2" 38 | name: "relu1_2" 39 | type: "ReLU" 40 | } 41 | layer { 42 | bottom: "conv1_2" 43 | top: "pool1" 44 | name: "pool1" 45 | type: "Pooling" 46 | pooling_param { 47 | pool: MAX 48 | kernel_size: 2 49 | stride: 2 50 | } 51 | } 52 | layer { 53 | bottom: "pool1" 54 | top: "conv2_1" 55 | name: "conv2_1" 56 | type: "Convolution" 57 | convolution_param { 58 | num_output: 128 59 | pad: 1 60 | kernel_size: 3 61 | } 62 | } 63 | layer { 64 | bottom: "conv2_1" 65 | top: "conv2_1" 66 | name: "relu2_1" 67 | type: "ReLU" 68 | } 69 | layer { 70 | bottom: "conv2_1" 71 | top: "conv2_2" 72 | name: "conv2_2" 73 | type: "Convolution" 74 | convolution_param { 75 | num_output: 128 76 | pad: 1 77 | kernel_size: 3 78 | } 79 | } 80 | layer { 81 | bottom: "conv2_2" 82 | top: "conv2_2" 83 | name: "relu2_2" 84 | type: "ReLU" 85 | } 86 | layer { 87 | bottom: "conv2_2" 88 | top: "pool2" 89 | name: "pool2" 90 | type: "Pooling" 91 | pooling_param { 92 | pool: MAX 93 | kernel_size: 2 94 | stride: 2 95 | } 96 | } 97 | layer { 98 | bottom: "pool2" 99 | top: "conv3_1" 100 | name: "conv3_1" 101 | type: "Convolution" 102 | convolution_param { 103 | num_output: 256 104 | pad: 1 105 | kernel_size: 3 106 | } 107 | } 108 | layer { 109 | bottom: "conv3_1" 110 | top: "conv3_1" 111 | name: "relu3_1" 112 | type: "ReLU" 113 | } 114 | layer { 115 | bottom: "conv3_1" 116 | top: "conv3_2" 117 | name: "conv3_2" 118 | type: "Convolution" 119 | convolution_param { 120 | num_output: 256 121 | pad: 1 122 | kernel_size: 3 123 | } 124 | } 125 | layer { 126 | bottom: "conv3_2" 127 | top: "conv3_2" 128 | name: "relu3_2" 129 | type: "ReLU" 130 | } 131 | layer { 132 | bottom: "conv3_2" 133 | top: "conv3_3" 134 | name: "conv3_3" 135 | type: "Convolution" 136 | convolution_param { 137 | num_output: 256 138 | pad: 1 139 | kernel_size: 3 140 | } 141 | } 142 | layer { 143 | bottom: "conv3_3" 144 | top: "conv3_3" 145 | name: "relu3_3" 146 | type: "ReLU" 147 | } 148 | layer { 149 | bottom: "conv3_3" 150 | top: "pool3" 151 | name: "pool3" 152 | type: "Pooling" 153 | pooling_param { 154 | pool: MAX 155 | kernel_size: 2 156 | stride: 2 157 | } 158 | } 159 | layer { 160 | bottom: "pool3" 161 | top: "conv4_1" 162 | name: "conv4_1" 163 | type: "Convolution" 164 | convolution_param { 165 | num_output: 512 166 | pad: 1 167 | kernel_size: 3 168 | } 169 | } 170 | layer { 171 | bottom: "conv4_1" 172 | top: "conv4_1" 173 | name: "relu4_1" 174 | type: "ReLU" 175 | } 176 | layer { 177 | bottom: "conv4_1" 178 | top: "conv4_2" 179 | name: "conv4_2" 180 | type: "Convolution" 181 | convolution_param { 182 | num_output: 512 183 | pad: 1 184 | kernel_size: 3 185 | } 186 | } 187 | layer { 188 | bottom: "conv4_2" 189 | top: "conv4_2" 190 | name: "relu4_2" 191 | type: "ReLU" 192 | } 193 | layer { 194 | bottom: "conv4_2" 195 | top: "conv4_3" 196 | name: "conv4_3" 197 | type: "Convolution" 198 | convolution_param { 199 | num_output: 512 200 | pad: 1 201 | kernel_size: 3 202 | } 203 | } 204 | layer { 205 | bottom: "conv4_3" 206 | top: "conv4_3" 207 | name: "relu4_3" 208 | type: "ReLU" 209 | } 210 | layer { 211 | bottom: "conv4_3" 212 | top: "pool4" 213 | name: "pool4" 214 | type: "Pooling" 215 | pooling_param { 216 | pool: MAX 217 | kernel_size: 2 218 | stride: 2 219 | } 220 | } 221 | layer { 222 | bottom: "pool4" 223 | top: "conv5_1" 224 | name: "conv5_1" 225 | type: "Convolution" 226 | convolution_param { 227 | num_output: 512 228 | pad: 1 229 | kernel_size: 3 230 | } 231 | } 232 | layer { 233 | bottom: "conv5_1" 234 | top: "conv5_1" 235 | name: "relu5_1" 236 | type: "ReLU" 237 | } 238 | layer { 239 | bottom: "conv5_1" 240 | top: "conv5_2" 241 | name: "conv5_2" 242 | type: "Convolution" 243 | convolution_param { 244 | num_output: 512 245 | pad: 1 246 | kernel_size: 3 247 | } 248 | } 249 | layer { 250 | bottom: "conv5_2" 251 | top: "conv5_2" 252 | name: "relu5_2" 253 | type: "ReLU" 254 | } 255 | layer { 256 | bottom: "conv5_2" 257 | top: "conv5_3" 258 | name: "conv5_3" 259 | type: "Convolution" 260 | convolution_param { 261 | num_output: 512 262 | pad: 1 263 | kernel_size: 3 264 | } 265 | } 266 | layer { 267 | bottom: "conv5_3" 268 | top: "conv5_3" 269 | name: "relu5_3" 270 | type: "ReLU" 271 | } 272 | layer { 273 | bottom: "conv5_3" 274 | top: "pool5" 275 | name: "pool5" 276 | type: "Pooling" 277 | pooling_param { 278 | pool: MAX 279 | kernel_size: 2 280 | stride: 2 281 | } 282 | } 283 | layer { 284 | bottom: "pool5" 285 | top: "fc6" 286 | name: "fc6" 287 | type: "InnerProduct" 288 | inner_product_param { 289 | num_output: 4096 290 | } 291 | } 292 | layer { 293 | bottom: "fc6" 294 | top: "fc6" 295 | name: "relu6" 296 | type: "ReLU" 297 | } 298 | layer { 299 | bottom: "fc6" 300 | top: "fc6" 301 | name: "drop6" 302 | type: "Dropout" 303 | dropout_param { 304 | dropout_ratio: 0.5 305 | } 306 | } 307 | layer { 308 | bottom: "fc6" 309 | top: "fc7" 310 | name: "fc7" 311 | type: "InnerProduct" 312 | inner_product_param { 313 | num_output: 4096 314 | } 315 | } 316 | layer { 317 | bottom: "fc7" 318 | top: "fc7" 319 | name: "relu7" 320 | type: "ReLU" 321 | } 322 | layer { 323 | bottom: "fc7" 324 | top: "fc7" 325 | name: "drop7" 326 | type: "Dropout" 327 | dropout_param { 328 | dropout_ratio: 0.5 329 | } 330 | } 331 | layer { 332 | bottom: "fc7" 333 | top: "my-fc8" 334 | name: "my-fc8" 335 | type: "InnerProduct" 336 | inner_product_param { 337 | num_output: 3 338 | } 339 | } 340 | layer { 341 | bottom: "my-fc8" 342 | top: "prob" 343 | name: "prob" 344 | type: "Softmax" 345 | } 346 | -------------------------------------------------------------------------------- /VGG16/solver.prototxt: -------------------------------------------------------------------------------- 1 | # the definition of neural network model 2 | net: "train_val.prototxt" 3 | # test_iter is related to batch_size in test layer, test_iter * batch_size = the number of test data 4 | test_iter: 761 5 | # carry out test once every 5 training iterations 6 | test_interval: 1000 7 | # exclude test phase when test_initialization = false 8 | # test_initialization: false 9 | # display information once every 10 training iterations 10 | display: 1 11 | # 12 | average_loss: 40 13 | # the initial learning rate 14 | base_lr: 0.001 15 | lr_policy: "poly" 16 | stepsize: 320000 17 | gamma: 0.96 18 | # The max number of iterations 19 | max_iter: 5000001 20 | power: 1.0 21 | momentum: 0.9 22 | # weight decay item, in case of overfitting 23 | weight_decay: 0.0002 24 | # save once every 50 training iterations 25 | snapshot: 20 26 | # save path 27 | snapshot_prefix: "vgg16" 28 | solver_mode: GPU 29 | -------------------------------------------------------------------------------- /VGG16/train_val.prototxt: -------------------------------------------------------------------------------- 1 | name: "VGG_ILSVRC_16_layer" 2 | layer { 3 | name: "data" 4 | type: "Data" 5 | top: "data" 6 | top: "label" 7 | include { 8 | phase: TRAIN 9 | } 10 | transform_param { 11 | mirror: true 12 | crop_size: 224 13 | mean_file: "mean.binaryproto" 14 | } 15 | data_param { 16 | source: "train_lmdb" 17 | batch_size: 32 18 | backend: LMDB 19 | } 20 | } 21 | layer { 22 | name: "data" 23 | type: "Data" 24 | top: "data" 25 | top: "label" 26 | include { 27 | phase: TEST 28 | } 29 | transform_param { 30 | mirror: false 31 | crop_size: 224 32 | mean_file: "mean.binaryproto" 33 | } 34 | data_param { 35 | source: "val_lmdb" 36 | batch_size: 1 37 | backend: LMDB 38 | } 39 | } 40 | layer { 41 | bottom: "data" 42 | top: "conv1_1" 43 | name: "conv1_1" 44 | type: "Convolution" 45 | convolution_param { 46 | num_output: 64 47 | pad: 1 48 | kernel_size: 3 49 | } 50 | } 51 | layer { 52 | bottom: "conv1_1" 53 | top: "conv1_1" 54 | name: "relu1_1" 55 | type: "ReLU" 56 | } 57 | layer { 58 | bottom: "conv1_1" 59 | top: "conv1_2" 60 | name: "conv1_2" 61 | type: "Convolution" 62 | convolution_param { 63 | num_output: 64 64 | pad: 1 65 | kernel_size: 3 66 | } 67 | } 68 | layer { 69 | bottom: "conv1_2" 70 | top: "conv1_2" 71 | name: "relu1_2" 72 | type: "ReLU" 73 | } 74 | layer { 75 | bottom: "conv1_2" 76 | top: "pool1" 77 | name: "pool1" 78 | type: "Pooling" 79 | pooling_param { 80 | pool: MAX 81 | kernel_size: 2 82 | stride: 2 83 | } 84 | } 85 | layer { 86 | bottom: "pool1" 87 | top: "conv2_1" 88 | name: "conv2_1" 89 | type: "Convolution" 90 | convolution_param { 91 | num_output: 128 92 | pad: 1 93 | kernel_size: 3 94 | } 95 | } 96 | layer { 97 | bottom: "conv2_1" 98 | top: "conv2_1" 99 | name: "relu2_1" 100 | type: "ReLU" 101 | } 102 | layer { 103 | bottom: "conv2_1" 104 | top: "conv2_2" 105 | name: "conv2_2" 106 | type: "Convolution" 107 | convolution_param { 108 | num_output: 128 109 | pad: 1 110 | kernel_size: 3 111 | } 112 | } 113 | layer { 114 | bottom: "conv2_2" 115 | top: "conv2_2" 116 | name: "relu2_2" 117 | type: "ReLU" 118 | } 119 | layer { 120 | bottom: "conv2_2" 121 | top: "pool2" 122 | name: "pool2" 123 | type: "Pooling" 124 | pooling_param { 125 | pool: MAX 126 | kernel_size: 2 127 | stride: 2 128 | } 129 | } 130 | layer { 131 | bottom: "pool2" 132 | top: "conv3_1" 133 | name: "conv3_1" 134 | type: "Convolution" 135 | convolution_param { 136 | num_output: 256 137 | pad: 1 138 | kernel_size: 3 139 | } 140 | } 141 | layer { 142 | bottom: "conv3_1" 143 | top: "conv3_1" 144 | name: "relu3_1" 145 | type: "ReLU" 146 | } 147 | layer { 148 | bottom: "conv3_1" 149 | top: "conv3_2" 150 | name: "conv3_2" 151 | type: "Convolution" 152 | convolution_param { 153 | num_output: 256 154 | pad: 1 155 | kernel_size: 3 156 | } 157 | } 158 | layer { 159 | bottom: "conv3_2" 160 | top: "conv3_2" 161 | name: "relu3_2" 162 | type: "ReLU" 163 | } 164 | layer { 165 | bottom: "conv3_2" 166 | top: "conv3_3" 167 | name: "conv3_3" 168 | type: "Convolution" 169 | convolution_param { 170 | num_output: 256 171 | pad: 1 172 | kernel_size: 3 173 | } 174 | } 175 | layer { 176 | bottom: "conv3_3" 177 | top: "conv3_3" 178 | name: "relu3_3" 179 | type: "ReLU" 180 | } 181 | layer { 182 | bottom: "conv3_3" 183 | top: "pool3" 184 | name: "pool3" 185 | type: "Pooling" 186 | pooling_param { 187 | pool: MAX 188 | kernel_size: 2 189 | stride: 2 190 | } 191 | } 192 | layer { 193 | bottom: "pool3" 194 | top: "conv4_1" 195 | name: "conv4_1" 196 | type: "Convolution" 197 | convolution_param { 198 | num_output: 512 199 | pad: 1 200 | kernel_size: 3 201 | } 202 | } 203 | layer { 204 | bottom: "conv4_1" 205 | top: "conv4_1" 206 | name: "relu4_1" 207 | type: "ReLU" 208 | } 209 | layer { 210 | bottom: "conv4_1" 211 | top: "conv4_2" 212 | name: "conv4_2" 213 | type: "Convolution" 214 | convolution_param { 215 | num_output: 512 216 | pad: 1 217 | kernel_size: 3 218 | } 219 | } 220 | layer { 221 | bottom: "conv4_2" 222 | top: "conv4_2" 223 | name: "relu4_2" 224 | type: "ReLU" 225 | } 226 | layer { 227 | bottom: "conv4_2" 228 | top: "conv4_3" 229 | name: "conv4_3" 230 | type: "Convolution" 231 | convolution_param { 232 | num_output: 512 233 | pad: 1 234 | kernel_size: 3 235 | } 236 | } 237 | layer { 238 | bottom: "conv4_3" 239 | top: "conv4_3" 240 | name: "relu4_3" 241 | type: "ReLU" 242 | } 243 | layer { 244 | bottom: "conv4_3" 245 | top: "pool4" 246 | name: "pool4" 247 | type: "Pooling" 248 | pooling_param { 249 | pool: MAX 250 | kernel_size: 2 251 | stride: 2 252 | } 253 | } 254 | layer { 255 | bottom: "pool4" 256 | top: "conv5_1" 257 | name: "conv5_1" 258 | type: "Convolution" 259 | convolution_param { 260 | num_output: 512 261 | pad: 1 262 | kernel_size: 3 263 | } 264 | } 265 | layer { 266 | bottom: "conv5_1" 267 | top: "conv5_1" 268 | name: "relu5_1" 269 | type: "ReLU" 270 | } 271 | layer { 272 | bottom: "conv5_1" 273 | top: "conv5_2" 274 | name: "conv5_2" 275 | type: "Convolution" 276 | convolution_param { 277 | num_output: 512 278 | pad: 1 279 | kernel_size: 3 280 | } 281 | } 282 | layer { 283 | bottom: "conv5_2" 284 | top: "conv5_2" 285 | name: "relu5_2" 286 | type: "ReLU" 287 | } 288 | layer { 289 | bottom: "conv5_2" 290 | top: "conv5_3" 291 | name: "conv5_3" 292 | type: "Convolution" 293 | convolution_param { 294 | num_output: 512 295 | pad: 1 296 | kernel_size: 3 297 | } 298 | } 299 | layer { 300 | bottom: "conv5_3" 301 | top: "conv5_3" 302 | name: "relu5_3" 303 | type: "ReLU" 304 | } 305 | layer { 306 | bottom: "conv5_3" 307 | top: "pool5" 308 | name: "pool5" 309 | type: "Pooling" 310 | pooling_param { 311 | pool: MAX 312 | kernel_size: 2 313 | stride: 2 314 | } 315 | } 316 | layer { 317 | bottom: "pool5" 318 | top: "fc6" 319 | name: "fc6" 320 | type: "InnerProduct" 321 | inner_product_param { 322 | num_output: 4096 323 | } 324 | } 325 | layer { 326 | bottom: "fc6" 327 | top: "fc6" 328 | name: "relu6" 329 | type: "ReLU" 330 | } 331 | layer { 332 | bottom: "fc6" 333 | top: "fc6" 334 | name: "drop6" 335 | type: "Dropout" 336 | dropout_param { 337 | dropout_ratio: 0.5 338 | } 339 | } 340 | layer { 341 | bottom: "fc6" 342 | top: "fc7" 343 | name: "fc7" 344 | type: "InnerProduct" 345 | inner_product_param { 346 | num_output: 4096 347 | } 348 | } 349 | layer { 350 | bottom: "fc7" 351 | top: "fc7" 352 | name: "relu7" 353 | type: "ReLU" 354 | } 355 | layer { 356 | bottom: "fc7" 357 | top: "fc7" 358 | name: "drop7" 359 | type: "Dropout" 360 | dropout_param { 361 | dropout_ratio: 0.5 362 | } 363 | } 364 | layer { 365 | bottom: "fc7" 366 | top: "my-fc8" 367 | name: "my-fc8" 368 | type: "InnerProduct" 369 | inner_product_param { 370 | num_output: 3 371 | } 372 | } 373 | layer { 374 | name: "loss1/loss1" 375 | type: "SoftmaxWithLoss" 376 | bottom: "my-fc8" 377 | bottom: "label" 378 | top: "loss1/loss1" 379 | loss_weight: 0.3 380 | } 381 | layer { 382 | name: "accuracy" 383 | type: "Accuracy" 384 | bottom: "my-fc8" 385 | bottom: "label" 386 | top: "accuracy" 387 | include { 388 | phase: TEST 389 | } 390 | } 391 | -------------------------------------------------------------------------------- /VGG19/deploy.prototxt: -------------------------------------------------------------------------------- 1 | name: "VGG_19" 2 | input: "data" 3 | input_shape { 4 | dim: 1 5 | dim: 3 6 | dim: 224 7 | dim: 224 8 | } 9 | layer { 10 | bottom: "data" 11 | top: "conv1_1" 12 | name: "conv1_1" 13 | type: "Convolution" 14 | convolution_param { 15 | num_output: 64 16 | pad: 1 17 | kernel_size: 3 18 | } 19 | } 20 | layer { 21 | bottom: "conv1_1" 22 | top: "conv1_1" 23 | name: "relu1_1" 24 | type: "ReLU" 25 | } 26 | layer { 27 | bottom: "conv1_1" 28 | top: "conv1_2" 29 | name: "conv1_2" 30 | type: "Convolution" 31 | convolution_param { 32 | num_output: 64 33 | pad: 1 34 | kernel_size: 3 35 | } 36 | } 37 | layer { 38 | bottom: "conv1_2" 39 | top: "conv1_2" 40 | name: "relu1_2" 41 | type: "ReLU" 42 | } 43 | layer { 44 | bottom: "conv1_2" 45 | top: "pool1" 46 | name: "pool1" 47 | type: "Pooling" 48 | pooling_param { 49 | pool: MAX 50 | kernel_size: 2 51 | stride: 2 52 | } 53 | } 54 | layer { 55 | bottom: "pool1" 56 | top: "conv2_1" 57 | name: "conv2_1" 58 | type: "Convolution" 59 | convolution_param { 60 | num_output: 128 61 | pad: 1 62 | kernel_size: 3 63 | } 64 | } 65 | layer { 66 | bottom: "conv2_1" 67 | top: "conv2_1" 68 | name: "relu2_1" 69 | type: "ReLU" 70 | } 71 | layer { 72 | bottom: "conv2_1" 73 | top: "conv2_2" 74 | name: "conv2_2" 75 | type: "Convolution" 76 | convolution_param { 77 | num_output: 128 78 | pad: 1 79 | kernel_size: 3 80 | } 81 | } 82 | layer { 83 | bottom: "conv2_2" 84 | top: "conv2_2" 85 | name: "relu2_2" 86 | type: "ReLU" 87 | } 88 | layer { 89 | bottom: "conv2_2" 90 | top: "pool2" 91 | name: "pool2" 92 | type: "Pooling" 93 | pooling_param { 94 | pool: MAX 95 | kernel_size: 2 96 | stride: 2 97 | } 98 | } 99 | layer { 100 | bottom: "pool2" 101 | top: "conv3_1" 102 | name: "conv3_1" 103 | type: "Convolution" 104 | convolution_param { 105 | num_output: 256 106 | pad: 1 107 | kernel_size: 3 108 | } 109 | } 110 | layer { 111 | bottom: "conv3_1" 112 | top: "conv3_1" 113 | name: "relu3_1" 114 | type: "ReLU" 115 | } 116 | layer { 117 | bottom: "conv3_1" 118 | top: "conv3_2" 119 | name: "conv3_2" 120 | type: "Convolution" 121 | convolution_param { 122 | num_output: 256 123 | pad: 1 124 | kernel_size: 3 125 | } 126 | } 127 | layer { 128 | bottom: "conv3_2" 129 | top: "conv3_2" 130 | name: "relu3_2" 131 | type: "ReLU" 132 | } 133 | layer { 134 | bottom: "conv3_2" 135 | top: "conv3_3" 136 | name: "conv3_3" 137 | type: "Convolution" 138 | convolution_param { 139 | num_output: 256 140 | pad: 1 141 | kernel_size: 3 142 | } 143 | } 144 | layer { 145 | bottom: "conv3_3" 146 | top: "conv3_3" 147 | name: "relu3_3" 148 | type: "ReLU" 149 | } 150 | layer { 151 | bottom: "conv3_3" 152 | top: "conv3_4" 153 | name: "conv3_4" 154 | type: "Convolution" 155 | convolution_param { 156 | num_output: 256 157 | pad: 1 158 | kernel_size: 3 159 | } 160 | } 161 | layer { 162 | bottom: "conv3_4" 163 | top: "conv3_4" 164 | name: "relu3_4" 165 | type: "ReLU" 166 | } 167 | layer { 168 | bottom: "conv3_4" 169 | top: "pool3" 170 | name: "pool3" 171 | type: "Pooling" 172 | pooling_param { 173 | pool: MAX 174 | kernel_size: 2 175 | stride: 2 176 | } 177 | } 178 | layer { 179 | bottom: "pool3" 180 | top: "conv4_1" 181 | name: "conv4_1" 182 | type: "Convolution" 183 | convolution_param { 184 | num_output: 512 185 | pad: 1 186 | kernel_size: 3 187 | } 188 | } 189 | layer { 190 | bottom: "conv4_1" 191 | top: "conv4_1" 192 | name: "relu4_1" 193 | type: "ReLU" 194 | } 195 | layer { 196 | bottom: "conv4_1" 197 | top: "conv4_2" 198 | name: "conv4_2" 199 | type: "Convolution" 200 | convolution_param { 201 | num_output: 512 202 | pad: 1 203 | kernel_size: 3 204 | } 205 | } 206 | layer { 207 | bottom: "conv4_2" 208 | top: "conv4_2" 209 | name: "relu4_2" 210 | type: "ReLU" 211 | } 212 | layer { 213 | bottom: "conv4_2" 214 | top: "conv4_3" 215 | name: "conv4_3" 216 | type: "Convolution" 217 | convolution_param { 218 | num_output: 512 219 | pad: 1 220 | kernel_size: 3 221 | } 222 | } 223 | layer { 224 | bottom: "conv4_3" 225 | top: "conv4_3" 226 | name: "relu4_3" 227 | type: "ReLU" 228 | } 229 | layer { 230 | bottom: "conv4_3" 231 | top: "conv4_4" 232 | name: "conv4_4" 233 | type: "Convolution" 234 | convolution_param { 235 | num_output: 512 236 | pad: 1 237 | kernel_size: 3 238 | } 239 | } 240 | layer { 241 | bottom: "conv4_4" 242 | top: "conv4_4" 243 | name: "relu4_4" 244 | type: "ReLU" 245 | } 246 | layer { 247 | bottom: "conv4_4" 248 | top: "pool4" 249 | name: "pool4" 250 | type: "Pooling" 251 | pooling_param { 252 | pool: MAX 253 | kernel_size: 2 254 | stride: 2 255 | } 256 | } 257 | layer { 258 | bottom: "pool4" 259 | top: "conv5_1" 260 | name: "conv5_1" 261 | type: "Convolution" 262 | convolution_param { 263 | num_output: 512 264 | pad: 1 265 | kernel_size: 3 266 | } 267 | } 268 | layer { 269 | bottom: "conv5_1" 270 | top: "conv5_1" 271 | name: "relu5_1" 272 | type: "ReLU" 273 | } 274 | layer { 275 | bottom: "conv5_1" 276 | top: "conv5_2" 277 | name: "conv5_2" 278 | type: "Convolution" 279 | convolution_param { 280 | num_output: 512 281 | pad: 1 282 | kernel_size: 3 283 | } 284 | } 285 | layer { 286 | bottom: "conv5_2" 287 | top: "conv5_2" 288 | name: "relu5_2" 289 | type: "ReLU" 290 | } 291 | layer { 292 | bottom: "conv5_2" 293 | top: "conv5_3" 294 | name: "conv5_3" 295 | type: "Convolution" 296 | convolution_param { 297 | num_output: 512 298 | pad: 1 299 | kernel_size: 3 300 | } 301 | } 302 | layer { 303 | bottom: "conv5_3" 304 | top: "conv5_3" 305 | name: "relu5_3" 306 | type: "ReLU" 307 | } 308 | layer { 309 | bottom: "conv5_3" 310 | top: "conv5_4" 311 | name: "conv5_4" 312 | type: "Convolution" 313 | convolution_param { 314 | num_output: 512 315 | pad: 1 316 | kernel_size: 3 317 | } 318 | } 319 | layer { 320 | bottom: "conv5_4" 321 | top: "conv5_4" 322 | name: "relu5_4" 323 | type: "ReLU" 324 | } 325 | layer { 326 | bottom: "conv5_4" 327 | top: "pool5" 328 | name: "pool5" 329 | type: "Pooling" 330 | pooling_param { 331 | pool: MAX 332 | kernel_size: 2 333 | stride: 2 334 | } 335 | } 336 | layer { 337 | bottom: "pool5" 338 | top: "fc6" 339 | name: "fc6" 340 | type: "InnerProduct" 341 | inner_product_param { 342 | num_output: 4096 343 | } 344 | } 345 | layer { 346 | bottom: "fc6" 347 | top: "fc6" 348 | name: "relu6" 349 | type: "ReLU" 350 | } 351 | layer { 352 | bottom: "fc6" 353 | top: "fc6" 354 | name: "drop6" 355 | type: "Dropout" 356 | dropout_param { 357 | dropout_ratio: 0.5 358 | } 359 | } 360 | layer { 361 | bottom: "fc6" 362 | top: "fc7" 363 | name: "fc7" 364 | type: "InnerProduct" 365 | inner_product_param { 366 | num_output: 4096 367 | } 368 | } 369 | layer { 370 | bottom: "fc7" 371 | top: "fc7" 372 | name: "relu7" 373 | type: "ReLU" 374 | } 375 | layer { 376 | bottom: "fc7" 377 | top: "fc7" 378 | name: "drop7" 379 | type: "Dropout" 380 | dropout_param { 381 | dropout_ratio: 0.5 382 | } 383 | } 384 | layer { 385 | bottom: "fc7" 386 | top: "my-fc8" 387 | name: "my-fc8" 388 | type: "InnerProduct" 389 | inner_product_param { 390 | num_output: 3 391 | } 392 | } 393 | layer { 394 | name: "prob" 395 | type: "Softmax" 396 | bottom: "my-fc8" 397 | top: "prob" 398 | } 399 | -------------------------------------------------------------------------------- /VGG19/solver.prototxt: -------------------------------------------------------------------------------- 1 | # the definition of neural network model 2 | net: "train_val.prototxt" 3 | # test_iter is related to batch_size in test layer, test_iter * batch_size = the number of test data 4 | test_iter: 761 5 | # carry out test once every 5 training iterations 6 | test_interval: 1000 7 | # exclude test phase when test_initialization = false 8 | # test_initialization: false 9 | # display information once every 10 training iterations 10 | display: 1 11 | # 12 | average_loss: 40 13 | # the initial learning rate 14 | base_lr: 0.001 15 | lr_policy: "poly" 16 | stepsize: 320000 17 | gamma: 0.96 18 | # The max number of iterations 19 | max_iter: 5000001 20 | power: 1.0 21 | momentum: 0.9 22 | # weight decay item, in case of overfitting 23 | weight_decay: 0.0002 24 | # save once every 50 training iterations 25 | snapshot: 1000 26 | # save path 27 | snapshot_prefix: "vgg19" 28 | solver_mode: GPU 29 | -------------------------------------------------------------------------------- /VGG19/train_val.prototxt: -------------------------------------------------------------------------------- 1 | name: "VGG_19" 2 | layer { 3 | name: "data" 4 | type: "Data" 5 | top: "data" 6 | top: "label" 7 | include { 8 | phase: TRAIN 9 | } 10 | transform_param { 11 | mirror: true 12 | crop_size: 224 13 | mean_file: "mean.binaryproto" 14 | } 15 | data_param { 16 | source: "train_lmdb" 17 | batch_size: 36 18 | backend: LMDB 19 | } 20 | } 21 | layer { 22 | name: "data" 23 | type: "Data" 24 | top: "data" 25 | top: "label" 26 | include { 27 | phase: TEST 28 | } 29 | transform_param { 30 | mirror: false 31 | crop_size: 224 32 | mean_file: "mean.binaryproto" 33 | } 34 | data_param { 35 | source: "val_lmdb" 36 | batch_size: 1 37 | backend: LMDB 38 | } 39 | } 40 | layer { 41 | bottom: "data" 42 | top: "conv1_1" 43 | name: "conv1_1" 44 | type: "Convolution" 45 | convolution_param { 46 | num_output: 64 47 | pad: 1 48 | kernel_size: 3 49 | } 50 | } 51 | layer { 52 | bottom: "conv1_1" 53 | top: "conv1_1" 54 | name: "relu1_1" 55 | type: "ReLU" 56 | } 57 | layer { 58 | bottom: "conv1_1" 59 | top: "conv1_2" 60 | name: "conv1_2" 61 | type: "Convolution" 62 | convolution_param { 63 | num_output: 64 64 | pad: 1 65 | kernel_size: 3 66 | } 67 | } 68 | layer { 69 | bottom: "conv1_2" 70 | top: "conv1_2" 71 | name: "relu1_2" 72 | type: "ReLU" 73 | } 74 | layer { 75 | bottom: "conv1_2" 76 | top: "pool1" 77 | name: "pool1" 78 | type: "Pooling" 79 | pooling_param { 80 | pool: MAX 81 | kernel_size: 2 82 | stride: 2 83 | } 84 | } 85 | layer { 86 | bottom: "pool1" 87 | top: "conv2_1" 88 | name: "conv2_1" 89 | type: "Convolution" 90 | convolution_param { 91 | num_output: 128 92 | pad: 1 93 | kernel_size: 3 94 | } 95 | } 96 | layer { 97 | bottom: "conv2_1" 98 | top: "conv2_1" 99 | name: "relu2_1" 100 | type: "ReLU" 101 | } 102 | layer { 103 | bottom: "conv2_1" 104 | top: "conv2_2" 105 | name: "conv2_2" 106 | type: "Convolution" 107 | convolution_param { 108 | num_output: 128 109 | pad: 1 110 | kernel_size: 3 111 | } 112 | } 113 | layer { 114 | bottom: "conv2_2" 115 | top: "conv2_2" 116 | name: "relu2_2" 117 | type: "ReLU" 118 | } 119 | layer { 120 | bottom: "conv2_2" 121 | top: "pool2" 122 | name: "pool2" 123 | type: "Pooling" 124 | pooling_param { 125 | pool: MAX 126 | kernel_size: 2 127 | stride: 2 128 | } 129 | } 130 | layer { 131 | bottom: "pool2" 132 | top: "conv3_1" 133 | name: "conv3_1" 134 | type: "Convolution" 135 | convolution_param { 136 | num_output: 256 137 | pad: 1 138 | kernel_size: 3 139 | } 140 | } 141 | layer { 142 | bottom: "conv3_1" 143 | top: "conv3_1" 144 | name: "relu3_1" 145 | type: "ReLU" 146 | } 147 | layer { 148 | bottom: "conv3_1" 149 | top: "conv3_2" 150 | name: "conv3_2" 151 | type: "Convolution" 152 | convolution_param { 153 | num_output: 256 154 | pad: 1 155 | kernel_size: 3 156 | } 157 | } 158 | layer { 159 | bottom: "conv3_2" 160 | top: "conv3_2" 161 | name: "relu3_2" 162 | type: "ReLU" 163 | } 164 | layer { 165 | bottom: "conv3_2" 166 | top: "conv3_3" 167 | name: "conv3_3" 168 | type: "Convolution" 169 | convolution_param { 170 | num_output: 256 171 | pad: 1 172 | kernel_size: 3 173 | } 174 | } 175 | layer { 176 | bottom: "conv3_3" 177 | top: "conv3_3" 178 | name: "relu3_3" 179 | type: "ReLU" 180 | } 181 | layer { 182 | bottom: "conv3_3" 183 | top: "conv3_4" 184 | name: "conv3_4" 185 | type: "Convolution" 186 | convolution_param { 187 | num_output: 256 188 | pad: 1 189 | kernel_size: 3 190 | } 191 | } 192 | layer { 193 | bottom: "conv3_4" 194 | top: "conv3_4" 195 | name: "relu3_4" 196 | type: "ReLU" 197 | } 198 | layer { 199 | bottom: "conv3_4" 200 | top: "pool3" 201 | name: "pool3" 202 | type: "Pooling" 203 | pooling_param { 204 | pool: MAX 205 | kernel_size: 2 206 | stride: 2 207 | } 208 | } 209 | layer { 210 | bottom: "pool3" 211 | top: "conv4_1" 212 | name: "conv4_1" 213 | type: "Convolution" 214 | convolution_param { 215 | num_output: 512 216 | pad: 1 217 | kernel_size: 3 218 | } 219 | } 220 | layer { 221 | bottom: "conv4_1" 222 | top: "conv4_1" 223 | name: "relu4_1" 224 | type: "ReLU" 225 | } 226 | layer { 227 | bottom: "conv4_1" 228 | top: "conv4_2" 229 | name: "conv4_2" 230 | type: "Convolution" 231 | convolution_param { 232 | num_output: 512 233 | pad: 1 234 | kernel_size: 3 235 | } 236 | } 237 | layer { 238 | bottom: "conv4_2" 239 | top: "conv4_2" 240 | name: "relu4_2" 241 | type: "ReLU" 242 | } 243 | layer { 244 | bottom: "conv4_2" 245 | top: "conv4_3" 246 | name: "conv4_3" 247 | type: "Convolution" 248 | convolution_param { 249 | num_output: 512 250 | pad: 1 251 | kernel_size: 3 252 | } 253 | } 254 | layer { 255 | bottom: "conv4_3" 256 | top: "conv4_3" 257 | name: "relu4_3" 258 | type: "ReLU" 259 | } 260 | layer { 261 | bottom: "conv4_3" 262 | top: "conv4_4" 263 | name: "conv4_4" 264 | type: "Convolution" 265 | convolution_param { 266 | num_output: 512 267 | pad: 1 268 | kernel_size: 3 269 | } 270 | } 271 | layer { 272 | bottom: "conv4_4" 273 | top: "conv4_4" 274 | name: "relu4_4" 275 | type: "ReLU" 276 | } 277 | layer { 278 | bottom: "conv4_4" 279 | top: "pool4" 280 | name: "pool4" 281 | type: "Pooling" 282 | pooling_param { 283 | pool: MAX 284 | kernel_size: 2 285 | stride: 2 286 | } 287 | } 288 | layer { 289 | bottom: "pool4" 290 | top: "conv5_1" 291 | name: "conv5_1" 292 | type: "Convolution" 293 | convolution_param { 294 | num_output: 512 295 | pad: 1 296 | kernel_size: 3 297 | } 298 | } 299 | layer { 300 | bottom: "conv5_1" 301 | top: "conv5_1" 302 | name: "relu5_1" 303 | type: "ReLU" 304 | } 305 | layer { 306 | bottom: "conv5_1" 307 | top: "conv5_2" 308 | name: "conv5_2" 309 | type: "Convolution" 310 | convolution_param { 311 | num_output: 512 312 | pad: 1 313 | kernel_size: 3 314 | } 315 | } 316 | layer { 317 | bottom: "conv5_2" 318 | top: "conv5_2" 319 | name: "relu5_2" 320 | type: "ReLU" 321 | } 322 | layer { 323 | bottom: "conv5_2" 324 | top: "conv5_3" 325 | name: "conv5_3" 326 | type: "Convolution" 327 | convolution_param { 328 | num_output: 512 329 | pad: 1 330 | kernel_size: 3 331 | } 332 | } 333 | layer { 334 | bottom: "conv5_3" 335 | top: "conv5_3" 336 | name: "relu5_3" 337 | type: "ReLU" 338 | } 339 | layer { 340 | bottom: "conv5_3" 341 | top: "conv5_4" 342 | name: "conv5_4" 343 | type: "Convolution" 344 | convolution_param { 345 | num_output: 512 346 | pad: 1 347 | kernel_size: 3 348 | } 349 | } 350 | layer { 351 | bottom: "conv5_4" 352 | top: "conv5_4" 353 | name: "relu5_4" 354 | type: "ReLU" 355 | } 356 | layer { 357 | bottom: "conv5_4" 358 | top: "pool5" 359 | name: "pool5" 360 | type: "Pooling" 361 | pooling_param { 362 | pool: MAX 363 | kernel_size: 2 364 | stride: 2 365 | } 366 | } 367 | layer { 368 | bottom: "pool5" 369 | top: "fc6" 370 | name: "fc6" 371 | type: "InnerProduct" 372 | inner_product_param { 373 | num_output: 4096 374 | } 375 | } 376 | layer { 377 | bottom: "fc6" 378 | top: "fc6" 379 | name: "relu6" 380 | type: "ReLU" 381 | } 382 | layer { 383 | bottom: "fc6" 384 | top: "fc6" 385 | name: "drop6" 386 | type: "Dropout" 387 | dropout_param { 388 | dropout_ratio: 0.5 389 | } 390 | } 391 | layer { 392 | bottom: "fc6" 393 | top: "fc7" 394 | name: "fc7" 395 | type: "InnerProduct" 396 | inner_product_param { 397 | num_output: 4096 398 | } 399 | } 400 | layer { 401 | bottom: "fc7" 402 | top: "fc7" 403 | name: "relu7" 404 | type: "ReLU" 405 | } 406 | layer { 407 | bottom: "fc7" 408 | top: "fc7" 409 | name: "drop7" 410 | type: "Dropout" 411 | dropout_param { 412 | dropout_ratio: 0.5 413 | } 414 | } 415 | layer { 416 | bottom: "fc7" 417 | top: "my-fc8" 418 | name: "my-fc8" 419 | type: "InnerProduct" 420 | inner_product_param { 421 | num_output: 3 422 | } 423 | } 424 | layer { 425 | name: "loss1/loss1" 426 | type: "SoftmaxWithLoss" 427 | bottom: "my-fc8" 428 | bottom: "label" 429 | top: "loss1/loss1" 430 | loss_weight: 0.3 431 | } 432 | layer { 433 | name: "accuracy" 434 | type: "Accuracy" 435 | bottom: "my-fc8" 436 | bottom: "label" 437 | top: "accuracy" 438 | include { 439 | phase: TEST 440 | } 441 | } 442 | --------------------------------------------------------------------------------