├── weights └── download_weights.sh ├── caffe_layers ├── upsample_layer │ ├── upsample_layer.hpp │ ├── upsample_layer.cpp │ └── upsample_layer.cu ├── mish_layer │ ├── mish_layer.cpp │ ├── mish_layer.hpp │ └── mish_layer.cu └── pooling_layer │ └── pooling_layer.cpp ├── README.md ├── cfg ├── yolov3-tiny.cfg ├── mobilenet_v1_yolov3.cfg ├── mobilenet_v2_yolov3.cfg ├── yolov3.cfg ├── yolov3-spp.cfg └── yolov4.cfg ├── prototxt.py ├── cfg.py ├── prototxt ├── yolov3-tiny.prototxt └── mobilenet_v1_yolov3.prototxt └── darknet2caffe.py /weights/download_weights.sh: -------------------------------------------------------------------------------- 1 | #yolov3 2 | wget -c https://pjreddie.com/media/files/yolov3.weights 3 | 4 | #yolov3-spp 5 | wget -c https://pjreddie.com/media/files/yolov3-spp.weights 6 | 7 | #yolov3-tiny 8 | wget -c https://pjreddie.com/media/files/yolov3-tiny.weights 9 | 10 | #yolov4 11 | wget -c https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v3_optimal/yolov4.weights 12 | -------------------------------------------------------------------------------- /caffe_layers/upsample_layer/upsample_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_UPSAMPLE_LAYER_HPP_ 2 | #define CAFFE_UPSAMPLE_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | namespace caffe { 11 | 12 | template 13 | class UpsampleLayer : public Layer { 14 | public: 15 | explicit UpsampleLayer(const LayerParameter& param) 16 | : Layer(param) {} 17 | virtual void LayerSetUp(const vector*>& bottom, 18 | const vector*>& top); 19 | virtual void Reshape(const vector*>& bottom, 20 | const vector*>& top); 21 | 22 | virtual inline const char* type() const { return "Upsample"; } 23 | virtual inline int MinBottomBlobs() const { return 1; } 24 | virtual inline int MaxBottomBlobs() const { return 1; } 25 | virtual inline int ExactNumTopBlobs() const { return 1; } 26 | 27 | virtual void Forward_cpu(const vector*>& bottom, 28 | const vector*>& top); 29 | virtual void Forward_gpu(const vector*>& bottom, 30 | const vector*>& top); 31 | virtual void Backward_cpu(const vector*>& top, 32 | const vector& propagate_down, const vector*>& bottom); 33 | virtual void Backward_gpu(const vector*>& top, 34 | const vector& propagate_down, const vector*>& bottom); 35 | 36 | private: 37 | int scale_; 38 | }; 39 | 40 | 41 | 42 | } // namespace caffe 43 | 44 | #endif // CAFFE_UPSAMPLE_LAYER_HPP_ 45 | -------------------------------------------------------------------------------- /caffe_layers/mish_layer/mish_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layers/mish_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | 9 | template 10 | inline Dtype tanh_activate(Dtype x) { return (2 / (1 + expf(-2 * x)) - 1); } 11 | 12 | 13 | template 14 | inline Dtype softplus_activate(Dtype x, float threshold) { 15 | if (x > threshold) return x; // too large 16 | else if (x < -threshold) return expf(x); // too small 17 | return logf(expf(x) + 1); 18 | } 19 | 20 | template 21 | void MishLayer::Forward_cpu(const vector*>& bottom, 22 | const vector*>& top) { 23 | const Dtype* bottom_data = bottom[0]->cpu_data(); 24 | Dtype* top_data = top[0]->mutable_cpu_data(); 25 | const int count = bottom[0]->count(); 26 | 27 | const float MISH_THRESHOLD = 20; 28 | for (int i = 0; i < count; ++i) { 29 | float x_val = bottom_data[i]; 30 | top_data[i] = x_val * tanh_activate(softplus_activate(x_val, MISH_THRESHOLD)); 31 | } 32 | } 33 | 34 | template 35 | void MishLayer::Backward_cpu(const vector*>& top, 36 | const vector& propagate_down, 37 | const vector*>& bottom) { 38 | if (propagate_down[0]) { 39 | const Dtype* top_data = top[0]->cpu_data(); 40 | const Dtype* top_diff = top[0]->cpu_diff(); 41 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 42 | const int count = bottom[0]->count(); 43 | for (int i = 0; i < count; ++i) { 44 | // const Dtype sigmoid_x = top_data[i]; 45 | // bottom_diff[i] = top_diff[i] * sigmoid_x * (1. - sigmoid_x); 46 | 47 | const float MISH_THRESHOLD = 20.0f; 48 | // implementation from TensorFlow: https://github.com/tensorflow/addons/commit/093cdfa85d334cbe19a37624c33198f3140109ed 49 | // implementation from Pytorch: https://github.com/thomasbrandon/mish-cuda/blob/master/csrc/mish.h#L26-L31 50 | Dtype inp = top_data[i]; 51 | const Dtype sp = softplus_activate(inp, MISH_THRESHOLD); 52 | const Dtype grad_sp = 1 - exp(-sp); 53 | const Dtype tsp = tanh(sp); 54 | const Dtype grad_tsp = (1 - tsp*tsp) * grad_sp; 55 | const Dtype grad = inp * grad_tsp + tsp; 56 | bottom_diff[i] = top_diff[i] * grad; 57 | 58 | } 59 | } 60 | } 61 | 62 | #ifdef CPU_ONLY 63 | STUB_GPU(MishLayer); 64 | #endif 65 | 66 | INSTANTIATE_CLASS(MishLayer); 67 | REGISTER_LAYER_CLASS(Mish); 68 | 69 | } // namespace caffe 70 | -------------------------------------------------------------------------------- /caffe_layers/mish_layer/mish_layer.hpp: -------------------------------------------------------------------------------- 1 | #ifndef CAFFE_MISH_LAYER_HPP_ 2 | #define CAFFE_MISH_LAYER_HPP_ 3 | 4 | #include 5 | 6 | #include "caffe/blob.hpp" 7 | #include "caffe/layer.hpp" 8 | #include "caffe/proto/caffe.pb.h" 9 | 10 | #include "caffe/layers/neuron_layer.hpp" 11 | 12 | namespace caffe { 13 | 14 | /** 15 | * @brief Sigmoid function non-linearity @f$ 16 | * y = (1 + \exp(-x))^{-1} 17 | * @f$, a classic choice in neural networks. 18 | * 19 | * Note that the gradient vanishes as the values move away from 0. 20 | * The ReLULayer is often a better choice for this reason. 21 | */ 22 | template 23 | class MishLayer : public NeuronLayer { 24 | public: 25 | explicit MishLayer(const LayerParameter& param) 26 | : NeuronLayer(param) {} 27 | 28 | virtual inline const char* type() const { return "Mish"; } 29 | 30 | protected: 31 | /** 32 | * @param bottom input Blob vector (length 1) 33 | * -# @f$ (N \times C \times H \times W) @f$ 34 | * the inputs @f$ x @f$ 35 | * @param top output Blob vector (length 1) 36 | * -# @f$ (N \times C \times H \times W) @f$ 37 | * the computed outputs @f$ 38 | * y = (1 + \exp(-x))^{-1} 39 | * @f$ 40 | */ 41 | virtual void Forward_cpu(const vector*>& bottom, 42 | const vector*>& top); 43 | virtual void Forward_gpu(const vector*>& bottom, 44 | const vector*>& top); 45 | 46 | /** 47 | * @brief Computes the error gradient w.r.t. the sigmoid inputs. 48 | * 49 | * @param top output Blob vector (length 1), providing the error gradient with 50 | * respect to the outputs 51 | * -# @f$ (N \times C \times H \times W) @f$ 52 | * containing error gradients @f$ \frac{\partial E}{\partial y} @f$ 53 | * with respect to computed outputs @f$ y @f$ 54 | * @param propagate_down see Layer::Backward. 55 | * @param bottom input Blob vector (length 1) 56 | * -# @f$ (N \times C \times H \times W) @f$ 57 | * the inputs @f$ x @f$; Backward fills their diff with 58 | * gradients @f$ 59 | * \frac{\partial E}{\partial x} 60 | * = \frac{\partial E}{\partial y} y (1 - y) 61 | * @f$ if propagate_down[0] 62 | */ 63 | virtual void Backward_cpu(const vector*>& top, 64 | const vector& propagate_down, const vector*>& bottom); 65 | virtual void Backward_gpu(const vector*>& top, 66 | const vector& propagate_down, const vector*>& bottom); 67 | }; 68 | 69 | } // namespace caffe 70 | 71 | #endif // CAFFE_MISH_LAYER_HPP_ 72 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Requirements 2 | 3 | Python2.7 4 | 5 | Caffe 6 | 7 | Pytorch >= 0.40 8 | # Add Caffe Layers 9 | 1. Copy `caffe_layers/mish_layer/mish_layer.hpp,caffe_layers/upsample_layer/upsample_layer.hpp` into `include/caffe/layers/`. 10 | 2. Copy `caffe_layers/mish_layer/mish_layer.cpp mish_layer.cu,caffe_layers/upsample_layer/upsample_layer.cpp upsample_layer.cu` into `src/caffe/layers/`. 11 | 3. Copy `caffe_layers/pooling_layer/pooling_layer.cpp` into `src/caffe/layers/`.Note:only work for yolov3-tiny,use with caution. 12 | 4. Add below code into `src/caffe/proto/caffe.proto`. 13 | 14 | ``` 15 | // LayerParameter next available layer-specific ID: 147 (last added: recurrent_param) 16 | message LayerParameter { 17 | optional TileParameter tile_param = 138; 18 | optional VideoDataParameter video_data_param = 207; 19 | optional WindowDataParameter window_data_param = 129; 20 | ++optional UpsampleParameter upsample_param = 149; //added by chen for Yolov3, make sure this id 149 not the same as before. 21 | ++optional MishParameter mish_param = 150; //added by chen for yolov4,make sure this id 150 not the same as before. 22 | } 23 | 24 | // added by chen for YoloV3 25 | ++message UpsampleParameter{ 26 | ++ optional int32 scale = 1 [default = 1]; 27 | ++} 28 | 29 | // Message that stores parameters used by MishLayer 30 | ++message MishParameter { 31 | ++ enum Engine { 32 | ++ DEFAULT = 0; 33 | ++ CAFFE = 1; 34 | ++ CUDNN = 2; 35 | ++ } 36 | ++ optional Engine engine = 2 [default = DEFAULT]; 37 | ++} 38 | ``` 39 | 5.remake caffe. 40 | 41 | # Demo 42 | $ python cfg[in] weights[in] prototxt[out] caffemodel[out] 43 | 44 | Example 45 | ``` 46 | python cfg/yolov4.cfg weights/yolov4.weights prototxt/yolov4.prototxt caffemodel/yolov4.caffemodel 47 | ``` 48 | partial log as below. 49 | ``` 50 | I0522 10:19:19.015708 25251 net.cpp:228] layer1-act does not need backward computation. 51 | I0522 10:19:19.015712 25251 net.cpp:228] layer1-scale does not need backward computation. 52 | I0522 10:19:19.015714 25251 net.cpp:228] layer1-bn does not need backward computation. 53 | I0522 10:19:19.015718 25251 net.cpp:228] layer1-conv does not need backward computation. 54 | I0522 10:19:19.015722 25251 net.cpp:228] input does not need backward computation. 55 | I0522 10:19:19.015725 25251 net.cpp:270] This network produces output layer139-conv 56 | I0522 10:19:19.015731 25251 net.cpp:270] This network produces output layer150-conv 57 | I0522 10:19:19.015736 25251 net.cpp:270] This network produces output layer161-conv 58 | I0522 10:19:19.015911 25251 net.cpp:283] Network initialization done. 59 | unknow layer type yolo 60 | unknow layer type yolo 61 | save prototxt to prototxt/yolov4.prototxt 62 | save caffemodel to caffemodel/yolov4.caffemodel 63 | 64 | ``` 65 | -------------------------------------------------------------------------------- /cfg/yolov3-tiny.cfg: -------------------------------------------------------------------------------- 1 | [net] 2 | # Testing 3 | batch=1 4 | subdivisions=1 5 | # Training 6 | # batch=64 7 | # subdivisions=2 8 | width=416 9 | height=416 10 | channels=3 11 | momentum=0.9 12 | decay=0.0005 13 | angle=0 14 | saturation = 1.5 15 | exposure = 1.5 16 | hue=.1 17 | 18 | learning_rate=0.001 19 | burn_in=1000 20 | max_batches = 500200 21 | policy=steps 22 | steps=400000,450000 23 | scales=.1,.1 24 | 25 | [convolutional] 26 | batch_normalize=1 27 | filters=16 28 | size=3 29 | stride=1 30 | pad=1 31 | activation=leaky 32 | 33 | [maxpool] 34 | size=2 35 | stride=2 36 | 37 | [convolutional] 38 | batch_normalize=1 39 | filters=32 40 | size=3 41 | stride=1 42 | pad=1 43 | activation=leaky 44 | 45 | [maxpool] 46 | size=2 47 | stride=2 48 | 49 | [convolutional] 50 | batch_normalize=1 51 | filters=64 52 | size=3 53 | stride=1 54 | pad=1 55 | activation=leaky 56 | 57 | [maxpool] 58 | size=2 59 | stride=2 60 | 61 | [convolutional] 62 | batch_normalize=1 63 | filters=128 64 | size=3 65 | stride=1 66 | pad=1 67 | activation=leaky 68 | 69 | [maxpool] 70 | size=2 71 | stride=2 72 | 73 | [convolutional] 74 | batch_normalize=1 75 | filters=256 76 | size=3 77 | stride=1 78 | pad=1 79 | activation=leaky 80 | 81 | [maxpool] 82 | size=2 83 | stride=2 84 | 85 | [convolutional] 86 | batch_normalize=1 87 | filters=512 88 | size=3 89 | stride=1 90 | pad=1 91 | activation=leaky 92 | 93 | [maxpool] 94 | size=2 95 | stride=1 96 | 97 | [convolutional] 98 | batch_normalize=1 99 | filters=1024 100 | size=3 101 | stride=1 102 | pad=1 103 | activation=leaky 104 | 105 | ########### 106 | 107 | [convolutional] 108 | batch_normalize=1 109 | filters=256 110 | size=1 111 | stride=1 112 | pad=1 113 | activation=leaky 114 | 115 | [convolutional] 116 | batch_normalize=1 117 | filters=512 118 | size=3 119 | stride=1 120 | pad=1 121 | activation=leaky 122 | 123 | [convolutional] 124 | size=1 125 | stride=1 126 | pad=1 127 | filters=255 128 | activation=linear 129 | 130 | 131 | 132 | [yolo] 133 | mask = 3,4,5 134 | anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319 135 | classes=80 136 | num=6 137 | jitter=.3 138 | ignore_thresh = .7 139 | truth_thresh = 1 140 | random=1 141 | 142 | [route] 143 | layers = -4 144 | 145 | [convolutional] 146 | batch_normalize=1 147 | filters=128 148 | size=1 149 | stride=1 150 | pad=1 151 | activation=leaky 152 | 153 | [upsample] 154 | stride=2 155 | 156 | [route] 157 | layers = -1, 8 158 | 159 | [convolutional] 160 | batch_normalize=1 161 | filters=256 162 | size=3 163 | stride=1 164 | pad=1 165 | activation=leaky 166 | 167 | [convolutional] 168 | size=1 169 | stride=1 170 | pad=1 171 | filters=255 172 | activation=linear 173 | 174 | [yolo] 175 | mask = 0,1,2 176 | anchors = 10,14, 23,27, 37,58, 81,82, 135,169, 344,319 177 | classes=80 178 | num=6 179 | jitter=.3 180 | ignore_thresh = .7 181 | truth_thresh = 1 182 | random=1 183 | -------------------------------------------------------------------------------- /caffe_layers/upsample_layer/upsample_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "caffe/layers/upsample_layer.hpp" 3 | 4 | namespace caffe { 5 | 6 | template 7 | void UpsampleLayer::LayerSetUp( 8 | const vector*>& bottom, const vector*>& top) { 9 | UpsampleParameter upsample_param = this->layer_param_.upsample_param(); 10 | scale_ = upsample_param.scale(); 11 | } 12 | 13 | template 14 | void UpsampleLayer::Reshape( 15 | const vector*>& bottom, const vector*>& top) { 16 | vector out_shape; 17 | for (int i = 0; i < bottom[0]->num_axes(); i++) { 18 | out_shape.push_back(bottom[0]->shape(i)); 19 | } 20 | 21 | out_shape[bottom[0]->num_axes() - 1] *= scale_; 22 | out_shape[bottom[0]->num_axes() - 2] *= scale_; 23 | top[0]->Reshape(out_shape); 24 | } 25 | 26 | template 27 | void UpsampleLayer::Forward_cpu(const vector*>& bottom, 28 | const vector*>& top) { 29 | 30 | int N = top[0]->shape(0); 31 | int C = top[0]->shape(1); 32 | int H = top[0]->shape(2); 33 | int W = top[0]->shape(3); 34 | 35 | const Dtype *input = bottom[0]->cpu_data(); 36 | Dtype *output = top[0]->mutable_cpu_data(); 37 | for (int n = 0; n < N; n++) { 38 | for (int c = 0; c < C; c++) { 39 | for (int h = 0; h < H; h++) { 40 | for (int w = 0; w < W; w++) { 41 | int nw = w/scale_; 42 | int nh = h/scale_; 43 | int out_idx = (((n * C + c) * H) + h) * W + w; 44 | int in_idx = (((n * C + c) * (H / scale_)) + nh) * (W / scale_) + nw; 45 | output[out_idx] = input[in_idx]; 46 | } 47 | } 48 | } 49 | } 50 | } 51 | 52 | template 53 | void UpsampleLayer::Backward_cpu(const vector*>& top, 54 | const vector& propagate_down, const vector*>& bottom) { 55 | int N = bottom[0]->shape(0); 56 | int C = bottom[0]->shape(1); 57 | int H = bottom[0]->shape(2); 58 | int W = bottom[0]->shape(3); 59 | const Dtype *output_grad = top[0]->cpu_diff(); 60 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 61 | caffe_set(bottom[0]->count(), Dtype(0), bottom_diff); 62 | for (int n = 0; n < N; n++) { 63 | for (int c = 0; c < C; c++) { 64 | for (int h = 0; h < H; h++) { 65 | for (int w = 0; w < W; w++) { 66 | for (int i = 0; i < scale_; i++) { 67 | for (int j = 0; j < scale_; j++) { 68 | int nw = w * scale_ + i; 69 | int nh = h * scale_ + j; 70 | int out_idx = (((n * C + c) * H) + h) * W + w; 71 | int in_idx = (((n * C + c) * (H * scale_)) 72 | + nh) * (W * scale_) + nw; 73 | bottom_diff[out_idx] += output_grad[in_idx]; 74 | } 75 | } 76 | } 77 | } 78 | } 79 | } 80 | } 81 | 82 | #ifdef CPU_ONLY 83 | STUB_GPU(UpsampleLayer); 84 | #endif 85 | 86 | INSTANTIATE_CLASS(UpsampleLayer); 87 | REGISTER_LAYER_CLASS(Upsample); 88 | 89 | } // namespace caffe 90 | 91 | -------------------------------------------------------------------------------- /caffe_layers/upsample_layer/upsample_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "caffe/filler.hpp" 4 | #include "caffe/layers/upsample_layer.hpp" 5 | #include "caffe/util/math_functions.hpp" 6 | 7 | namespace caffe { 8 | 9 | __device__ int translate_idx(int ii, int d1, int d2, int d3, int scale_factor) { 10 | int x, y, z, w; 11 | w = ii % d3; 12 | ii = ii/d3; 13 | z = ii % d2; 14 | ii = ii/d2; 15 | y = ii % d1; 16 | ii = ii/d1; 17 | x = ii; 18 | w = w/scale_factor; 19 | z = z/scale_factor; 20 | d2 /= scale_factor; 21 | d3 /= scale_factor; 22 | return (((x*d1+y)*d2)+z)*d3+w; 23 | } 24 | 25 | __device__ int translate_idx_inv( 26 | int ii, int d1, int d2, int d3, int scale_factor, int off_x, int off_y) { 27 | int x, y, z, w; 28 | w = ii % d3; 29 | ii = ii/d3; 30 | z = ii % d2; 31 | ii = ii/d2; 32 | y = ii % d1; 33 | ii = ii/d1; 34 | x = ii; 35 | w = w*scale_factor+off_x; 36 | z = z*scale_factor+off_y; 37 | d2 *= scale_factor; 38 | d3 *= scale_factor; 39 | return (((x*d1+y)*d2)+z)*d3+w; 40 | } 41 | 42 | template 43 | __global__ void upscale(const Dtype *input, Dtype *output, 44 | int no_elements, int scale_factor, int d1, int d2, int d3) { 45 | int ii = threadIdx.x + blockDim.x * blockIdx.x; 46 | if (ii >= no_elements) return; 47 | int ipidx = translate_idx(ii, d1, d2, d3, scale_factor); 48 | output[ii]=input[ipidx]; 49 | } 50 | 51 | template 52 | __global__ void downscale(Dtype *gradInput_data, const Dtype *gradOutput_data, 53 | int no_elements, int scale_factor, int d1, int d2, 54 | int d3) { 55 | int ii = threadIdx.x + blockDim.x * blockIdx.x; 56 | if (ii >= no_elements) return; 57 | for (int i = 0; i < scale_factor; i++) { 58 | for (int j = 0; j < scale_factor; j++) { 59 | int ipidx = translate_idx_inv(ii, d1, d2, d3, scale_factor, i, j); 60 | gradInput_data[ii] += gradOutput_data[ipidx]; 61 | } 62 | } 63 | } 64 | 65 | 66 | 67 | template 68 | void UpsampleLayer::Forward_gpu(const vector*>& bottom, 69 | const vector*>& top) { 70 | int d1, d2, d3; 71 | 72 | d1 = top[0]->shape(1); 73 | d2 = top[0]->shape(2); 74 | d3 = top[0]->shape(3); 75 | 76 | 77 | int no_elements = top[0]->count(); 78 | 79 | upscale // NOLINT_NEXT_LINE(whitespace/operators) 80 | <<>>( 81 | bottom[0]->gpu_data(), 82 | top[0]->mutable_gpu_data(), no_elements, scale_, d1, d2, d3); 83 | } 84 | 85 | template 86 | void UpsampleLayer::Backward_gpu(const vector*>& top, 87 | const vector& propagate_down, const vector*>& bottom) { 88 | int d1, d2, d3; 89 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 90 | d1 = bottom[0]->shape(1); 91 | d2 = bottom[0]->shape(2); 92 | d3 = bottom[0]->shape(3); 93 | int no_elements = bottom[0]->count(); 94 | caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom_diff); 95 | downscale // NOLINT_NEXT_LINE(whitespace/operators) 96 | <<>>( 97 | bottom_diff, top[0]->gpu_diff(), no_elements, scale_, d1, d2, d3); 98 | } 99 | 100 | INSTANTIATE_LAYER_GPU_FUNCS(UpsampleLayer); 101 | 102 | } // namespace caffe 103 | 104 | -------------------------------------------------------------------------------- /caffe_layers/mish_layer/mish_layer.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "caffe/layers/mish_layer.hpp" 5 | 6 | namespace caffe { 7 | 8 | template 9 | __device__ Dtype tanh_activate_kernel(Dtype x){return (2/(1 + expf(-2*x)) - 1);} 10 | 11 | 12 | template 13 | __device__ Dtype softplus_kernel(Dtype x, float threshold = 20) { 14 | if (x > threshold) return x; // too large 15 | else if (x < -threshold) return expf(x); // too small 16 | return logf(expf(x) + 1); 17 | } 18 | 19 | /*__device__ float tanh_activate_kernel(float x){return (2/(1 + expf(-2*x)) - 1);} 20 | 21 | __device__ float softplus_kernel(float x, float threshold = 20) { 22 | if (x > threshold) return x; // too large 23 | else if (x < -threshold) return expf(x); // too small 24 | return logf(expf(x) + 1); 25 | }*/ 26 | 27 | template 28 | __global__ void MishForward(const int n, const Dtype* in, Dtype* out) { 29 | CUDA_KERNEL_LOOP(index, n) { 30 | out[index] = in[index] * tanh_activate_kernel(softplus_kernel(in[index])); 31 | } 32 | } 33 | 34 | template 35 | void MishLayer::Forward_gpu(const vector*>& bottom, 36 | const vector*>& top) { 37 | const Dtype* bottom_data = bottom[0]->gpu_data(); 38 | Dtype* top_data = top[0]->mutable_gpu_data(); 39 | const int count = bottom[0]->count(); 40 | // NOLINT_NEXT_LINE(whitespace/operators) 41 | MishForward<<>>( 42 | count, bottom_data, top_data); 43 | CUDA_POST_KERNEL_CHECK; 44 | // << " count: " << count << " bottom_data: " 45 | // << (unsigned long)bottom_data 46 | // << " top_data: " << (unsigned long)top_data 47 | // << " blocks: " << CAFFE_GET_BLOCKS(count) 48 | // << " threads: " << CAFFE_CUDA_NUM_THREADS; 49 | } 50 | 51 | template 52 | __global__ void MishBackward(const int n, const Dtype* in_diff, 53 | const Dtype* out_data, Dtype* out_diff) { 54 | CUDA_KERNEL_LOOP(index, n) { 55 | //const Dtype sigmoid_x = out_data[index]; 56 | //out_diff[index] = in_diff[index] * sigmoid_x * (1 - sigmoid_x); 57 | 58 | const float MISH_THRESHOLD = 20.0f; 59 | // implementation from TensorFlow: https://github.com/tensorflow/addons/blob/093cdfa85d334cbe19a37624c33198f3140109ed/tensorflow_addons/custom_ops/activations/cc/kernels/mish_op.h#L66-L80 60 | // implementation from Pytorch: https://github.com/thomasbrandon/mish-cuda/blob/master/csrc/mish.h#L26-L31 61 | const float inp = out_data[index]; 62 | const float sp = softplus_kernel(inp, MISH_THRESHOLD); 63 | const float grad_sp = 1 - expf(-sp); 64 | const float tsp = tanh(sp); 65 | const float grad_tsp = (1 - tsp*tsp) * grad_sp; 66 | const float grad = inp * grad_tsp + tsp; 67 | 68 | out_diff[index] = in_diff[index] * grad; 69 | 70 | } 71 | } 72 | 73 | template 74 | void MishLayer::Backward_gpu(const vector*>& top, 75 | const vector& propagate_down, 76 | const vector*>& bottom) { 77 | if (propagate_down[0]) { 78 | const Dtype* top_data = top[0]->gpu_data(); 79 | const Dtype* top_diff = top[0]->gpu_diff(); 80 | Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); 81 | const int count = bottom[0]->count(); 82 | // NOLINT_NEXT_LINE(whitespace/operators) 83 | MishBackward<<>>( 84 | count, top_diff, top_data, bottom_diff); 85 | CUDA_POST_KERNEL_CHECK; 86 | } 87 | } 88 | 89 | INSTANTIATE_LAYER_GPU_FUNCS(MishLayer); 90 | 91 | 92 | } // namespace caffe 93 | -------------------------------------------------------------------------------- /prototxt.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | try: 3 | import caffe.proto.caffe_pb2 as caffe_pb2 4 | except: 5 | try: 6 | import caffe_pb2 7 | except: 8 | print 'caffe_pb2.py not found. Try:' 9 | print ' protoc caffe.proto --python_out=.' 10 | exit() 11 | 12 | def parse_caffemodel(caffemodel): 13 | model = caffe_pb2.NetParameter() 14 | print 'Loading caffemodel: ', caffemodel 15 | with open(caffemodel, 'rb') as fp: 16 | model.ParseFromString(fp.read()) 17 | 18 | return model 19 | 20 | 21 | def parse_prototxt(protofile): 22 | def line_type(line): 23 | if line.find(':') >= 0: 24 | return 0 25 | elif line.find('{') >= 0: 26 | return 1 27 | return -1 28 | 29 | def parse_block(fp): 30 | block = OrderedDict() 31 | line = fp.readline().strip() 32 | while line != '}': 33 | ltype = line_type(line) 34 | if ltype == 0: # key: value 35 | #print line 36 | line = line.split('#')[0] 37 | key, value = line.split(':') 38 | key = key.strip() 39 | value = value.strip().strip('"') 40 | if block.has_key(key): 41 | if type(block[key]) == list: 42 | block[key].append(value) 43 | else: 44 | block[key] = [block[key], value] 45 | else: 46 | block[key] = value 47 | elif ltype == 1: # blockname { 48 | key = line.split('{')[0].strip() 49 | sub_block = parse_block(fp) 50 | block[key] = sub_block 51 | line = fp.readline().strip() 52 | line = line.split('#')[0] 53 | return block 54 | 55 | fp = open(protofile, 'r') 56 | props = OrderedDict() 57 | layers = [] 58 | line = fp.readline() 59 | while line != '': 60 | line = line.strip().split('#')[0] 61 | if line == '': 62 | line = fp.readline() 63 | continue 64 | ltype = line_type(line) 65 | if ltype == 0: # key: value 66 | key, value = line.split(':') 67 | key = key.strip() 68 | value = value.strip().strip('"') 69 | if props.has_key(key): 70 | if type(props[key]) == list: 71 | props[key].append(value) 72 | else: 73 | props[key] = [props[key], value] 74 | else: 75 | props[key] = value 76 | elif ltype == 1: # blockname { 77 | key = line.split('{')[0].strip() 78 | if key == 'layer': 79 | layer = parse_block(fp) 80 | layers.append(layer) 81 | else: 82 | props[key] = parse_block(fp) 83 | line = fp.readline() 84 | 85 | if len(layers) > 0: 86 | net_info = OrderedDict() 87 | net_info['props'] = props 88 | net_info['layers'] = layers 89 | return net_info 90 | else: 91 | return props 92 | 93 | def is_number(s): 94 | try: 95 | float(s) 96 | return True 97 | except ValueError: 98 | return False 99 | 100 | def print_prototxt(net_info): 101 | # whether add double quote 102 | def format_value(value): 103 | #str = u'%s' % value 104 | #if str.isnumeric(): 105 | if is_number(value): 106 | return value 107 | elif value == 'true' or value == 'false' or value == 'MAX' or value == 'SUM' or value == 'AVE': 108 | return value 109 | else: 110 | return '\"%s\"' % value 111 | 112 | def print_block(block_info, prefix, indent): 113 | blanks = ''.join([' ']*indent) 114 | print('%s%s {' % (blanks, prefix)) 115 | for key,value in block_info.items(): 116 | if type(value) == OrderedDict: 117 | print_block(value, key, indent+4) 118 | elif type(value) == list: 119 | for v in value: 120 | print('%s %s: %s' % (blanks, key, format_value(v))) 121 | else: 122 | print('%s %s: %s' % (blanks, key, format_value(value))) 123 | print('%s}' % blanks) 124 | 125 | props = net_info['props'] 126 | layers = net_info['layers'] 127 | print('name: \"%s\"' % props['name']) 128 | print('input: \"%s\"' % props['input']) 129 | print('input_dim: %s' % props['input_dim'][0]) 130 | print('input_dim: %s' % props['input_dim'][1]) 131 | print('input_dim: %s' % props['input_dim'][2]) 132 | print('input_dim: %s' % props['input_dim'][3]) 133 | print('') 134 | for layer in layers: 135 | print_block(layer, 'layer', 0) 136 | 137 | def save_prototxt(net_info, protofile, region=True): 138 | fp = open(protofile, 'w') 139 | # whether add double quote 140 | def format_value(value): 141 | #str = u'%s' % value 142 | #if str.isnumeric(): 143 | if is_number(value): 144 | return value 145 | elif value == 'true' or value == 'false' or value == 'MAX' or value == 'SUM' or value == 'AVE': 146 | return value 147 | else: 148 | return '\"%s\"' % value 149 | 150 | def print_block(block_info, prefix, indent): 151 | blanks = ''.join([' ']*indent) 152 | print >>fp, '%s%s {' % (blanks, prefix) 153 | for key,value in block_info.items(): 154 | if type(value) == OrderedDict: 155 | print_block(value, key, indent+4) 156 | elif type(value) == list: 157 | for v in value: 158 | print >> fp, '%s %s: %s' % (blanks, key, format_value(v)) 159 | else: 160 | print >> fp, '%s %s: %s' % (blanks, key, format_value(value)) 161 | print >> fp, '%s}' % blanks 162 | 163 | props = net_info['props'] 164 | layers = net_info['layers'] 165 | print >> fp, 'name: \"%s\"' % props['name'] 166 | print >> fp, 'input: \"%s\"' % props['input'] 167 | print >> fp, 'input_dim: %s' % props['input_dim'][0] 168 | print >> fp, 'input_dim: %s' % props['input_dim'][1] 169 | print >> fp, 'input_dim: %s' % props['input_dim'][2] 170 | print >> fp, 'input_dim: %s' % props['input_dim'][3] 171 | print >> fp, '' 172 | for layer in layers: 173 | if layer['type'] != 'Region' or region == True: 174 | print_block(layer, 'layer', 0) 175 | fp.close() 176 | 177 | 178 | if __name__ == '__main__': 179 | import sys 180 | if len(sys.argv) != 2: 181 | print('Usage: python prototxt.py model.prototxt') 182 | exit() 183 | 184 | net_info = parse_prototxt(sys.argv[1]) 185 | print_prototxt(net_info) 186 | save_prototxt(net_info, 'tmp.prototxt') 187 | -------------------------------------------------------------------------------- /cfg/mobilenet_v1_yolov3.cfg: -------------------------------------------------------------------------------- 1 | [net] 2 | # Training 3 | #batch=64 4 | #subdivisions=8 5 | 6 | # Testing 7 | batch=1 8 | subdivisions=1 9 | 10 | width=416 11 | height=416 12 | channels=3 13 | momentum=0.9 14 | decay=0.0005 15 | angle=0 16 | saturation = 1.5 17 | exposure = 1.5 18 | hue=.1 19 | 20 | learning_rate=0.00075 21 | burn_in=1000 22 | max_batches = 600200 23 | policy=steps 24 | steps=240000,400000,520000 25 | scales=.1,.1,.1 26 | 27 | #0th 3x3 input 416x416 output 208x208 28 | [convolutional] 29 | batch_normalize=1 30 | filters=32 31 | size=3 32 | stride=2 33 | pad=1 34 | activation=leaky 35 | 36 | #1th 3x3 input 208x208 output 208x208 37 | [depthwise_convolutional] 38 | batch_normalize=1 39 | #filters=32 40 | size=3 41 | stride=1 42 | pad=1 43 | #group=32 44 | activation=leaky 45 | 46 | #2th 1x1 input 208x208 output 208x208 47 | [convolutional] 48 | batch_normalize=1 49 | filters=64 50 | size=1 51 | stride=1 52 | pad=1 53 | activation=leaky 54 | 55 | #3th 3x3 input 208x208 output 104x104 56 | [depthwise_convolutional] 57 | batch_normalize=1 58 | #filters=64 59 | size=3 60 | stride=2 61 | pad=1 62 | #group=64 63 | activation=leaky 64 | 65 | #4th 1x1 input 104x104 output 104x104 66 | [convolutional] 67 | batch_normalize=1 68 | filters=128 69 | size=1 70 | stride=1 71 | pad=1 72 | activation=leaky 73 | 74 | #5th 3x3 input 104x104 output 104x104 75 | [depthwise_convolutional] 76 | batch_normalize=1 77 | #filters=128 78 | size=3 79 | stride=1 80 | pad=1 81 | #group=128 82 | activation=leaky 83 | 84 | #6th 1x1 input 104x104 output 104x104 85 | [convolutional] 86 | batch_normalize=1 87 | filters=128 88 | size=1 89 | stride=1 90 | pad=1 91 | activation=leaky 92 | 93 | #7th 3x3 input 104x104 output 52x52 94 | [depthwise_convolutional] 95 | batch_normalize=1 96 | #filters=128 97 | size=3 98 | stride=2 99 | pad=1 100 | #group=128 101 | activation=leaky 102 | 103 | #8th 1x1 input 52x52 output 52x52 104 | [convolutional] 105 | batch_normalize=1 106 | filters=256 107 | size=1 108 | stride=1 109 | pad=1 110 | activation=leaky 111 | 112 | #9th 3x3 input 52x52 output 52x52 113 | [depthwise_convolutional] 114 | batch_normalize=1 115 | #filters=256 116 | size=3 117 | stride=1 118 | pad=1 119 | #group=256 120 | activation=leaky 121 | 122 | #10th 1x1 input 52x52 output 52x52 123 | [convolutional] 124 | batch_normalize=1 125 | filters=256 126 | size=1 127 | stride=1 128 | pad=1 129 | activation=leaky 130 | 131 | #11th 3x3 input 52x52 output 26x26 132 | [depthwise_convolutional] 133 | batch_normalize=1 134 | #filters=256 135 | size=3 136 | stride=2 137 | pad=1 138 | #group=256 139 | activation=leaky 140 | 141 | #12th 1x1 input 26x26 output 26x26 142 | [convolutional] 143 | batch_normalize=1 144 | filters=512 145 | size=1 146 | stride=1 147 | pad=1 148 | activation=leaky 149 | 150 | #13th 3x3 input 26x26 output 26x26 151 | [depthwise_convolutional] 152 | batch_normalize=1 153 | #filters=512 154 | size=3 155 | stride=1 156 | pad=1 157 | #group=512 158 | activation=leaky 159 | 160 | #14th 1x1 input 26x26 output 26x26 161 | [convolutional] 162 | batch_normalize=1 163 | filters=512 164 | size=1 165 | stride=1 166 | pad=1 167 | activation=leaky 168 | 169 | #15th 3x3 input 26x26 output 26x26 170 | [depthwise_convolutional] 171 | batch_normalize=1 172 | #filters=512 173 | size=3 174 | stride=1 175 | pad=1 176 | #group=512 177 | activation=leaky 178 | 179 | #16th 1x1 input 26x26 output 26x26 180 | [convolutional] 181 | batch_normalize=1 182 | filters=512 183 | size=1 184 | stride=1 185 | pad=1 186 | activation=leaky 187 | 188 | 189 | #17th 3x3 input 26x26 output 26x26 190 | [depthwise_convolutional] 191 | batch_normalize=1 192 | #filters=512 193 | size=3 194 | stride=1 195 | pad=1 196 | #group=512 197 | activation=leaky 198 | 199 | #18th 1x1 input 26x26 output 26x26 200 | [convolutional] 201 | batch_normalize=1 202 | filters=512 203 | size=1 204 | stride=1 205 | pad=1 206 | activation=leaky 207 | 208 | #19th 3x3 input 26x26 output 26x26 209 | [depthwise_convolutional] 210 | batch_normalize=1 211 | #filters=512 212 | size=3 213 | stride=1 214 | pad=1 215 | #group=512 216 | activation=leaky 217 | 218 | #20th 1x1 input 26x26 output 26x26 219 | [convolutional] 220 | batch_normalize=1 221 | filters=512 222 | size=1 223 | stride=1 224 | pad=1 225 | activation=leaky 226 | 227 | #21th 3x3 input 26x26 output 26x26 228 | [depthwise_convolutional] 229 | batch_normalize=1 230 | #filters=512 231 | size=3 232 | stride=1 233 | pad=1 234 | #group=512 235 | activation=leaky 236 | 237 | #22th 1x1 input 26x26 output 26x26 238 | [convolutional] 239 | batch_normalize=1 240 | filters=512 241 | size=1 242 | stride=1 243 | pad=1 244 | activation=leaky 245 | 246 | #23th 3x3 input 26x26 output 13x13 247 | [depthwise_convolutional] 248 | batch_normalize=1 249 | #filters=512 250 | size=3 251 | stride=2 252 | pad=1 253 | #group=512 254 | activation=leaky 255 | 256 | #24th 1x1 input 13x13 output 13x13 257 | [convolutional] 258 | batch_normalize=1 259 | filters=1024 260 | size=1 261 | stride=1 262 | pad=1 263 | activation=leaky 264 | 265 | 266 | #25th 3x3 input 13x13 output 13x13 267 | [depthwise_convolutional] 268 | batch_normalize=1 269 | #filters=1024 270 | size=3 271 | stride=1 272 | pad=1 273 | #group=1024 274 | activation=leaky 275 | 276 | #26th 1x1 input 13x13 output 13x13 277 | [convolutional] 278 | batch_normalize=1 279 | filters=1024 280 | size=1 281 | stride=1 282 | pad=1 283 | activation=leaky 284 | 285 | ###################### 286 | #27th 1x1 input 13x13 output 13x13 287 | [convolutional] 288 | batch_normalize=1 289 | filters=512 290 | size=1 291 | stride=1 292 | pad=1 293 | activation=leaky 294 | 295 | #28th 3x3 input 13x13 output 13x13 296 | [convolutional] 297 | batch_normalize=1 298 | size=3 299 | stride=1 300 | pad=1 301 | filters=1024 302 | activation=leaky 303 | 304 | #29th 1x1 input 13x13 output 13x13 305 | [convolutional] 306 | batch_normalize=1 307 | filters=512 308 | size=1 309 | stride=1 310 | pad=1 311 | activation=leaky 312 | 313 | #30th 314 | [convolutional] 315 | batch_normalize=1 316 | size=3 317 | stride=1 318 | pad=1 319 | filters=1024 320 | activation=leaky 321 | 322 | #31th 323 | [convolutional] 324 | batch_normalize=1 325 | filters=512 326 | size=1 327 | stride=1 328 | pad=1 329 | activation=leaky 330 | 331 | #32th 332 | [convolutional] 333 | batch_normalize=1 334 | size=3 335 | stride=1 336 | pad=1 337 | filters=1024 338 | activation=leaky 339 | 340 | #33th 341 | [convolutional] 342 | size=1 343 | stride=1 344 | pad=1 345 | filters=255 346 | activation=linear 347 | 348 | #34th 349 | [yolo] 350 | mask = 6,7,8 351 | anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 352 | classes=80 353 | num=9 354 | jitter=.3 355 | ignore_thresh = .7 356 | truth_thresh = 1 357 | random=1 358 | 359 | #35th 360 | [route] 361 | layers = -4 362 | 363 | #36th 364 | [convolutional] 365 | batch_normalize=1 366 | filters=256 367 | size=1 368 | stride=1 369 | pad=1 370 | activation=leaky 371 | 372 | #37th 373 | [upsample] 374 | stride=2 375 | 376 | #38th 377 | [route] 378 | layers = -1, 22 379 | 380 | #39th 381 | [convolutional] 382 | batch_normalize=1 383 | filters=256 384 | size=1 385 | stride=1 386 | pad=1 387 | activation=leaky 388 | 389 | #40th 390 | [convolutional] 391 | batch_normalize=1 392 | size=3 393 | stride=1 394 | pad=1 395 | filters=512 396 | activation=leaky 397 | 398 | #41th 399 | [convolutional] 400 | batch_normalize=1 401 | filters=256 402 | size=1 403 | stride=1 404 | pad=1 405 | activation=leaky 406 | 407 | #42th 408 | [convolutional] 409 | batch_normalize=1 410 | size=3 411 | stride=1 412 | pad=1 413 | filters=512 414 | activation=leaky 415 | 416 | #43th 417 | [convolutional] 418 | batch_normalize=1 419 | filters=256 420 | size=1 421 | stride=1 422 | pad=1 423 | activation=leaky 424 | 425 | 426 | #44th 427 | [convolutional] 428 | batch_normalize=1 429 | size=3 430 | stride=1 431 | pad=1 432 | filters=512 433 | activation=leaky 434 | 435 | #45th 436 | [convolutional] 437 | size=1 438 | stride=1 439 | pad=1 440 | filters=255 441 | activation=linear 442 | 443 | #46th 444 | [yolo] 445 | mask = 3,4,5 446 | anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 447 | classes=80 448 | num=9 449 | jitter=.3 450 | ignore_thresh = .7 451 | truth_thresh = 1 452 | random=1 453 | 454 | #47th 455 | [route] 456 | layers = -4 457 | 458 | #48th 459 | [convolutional] 460 | batch_normalize=1 461 | filters=128 462 | size=1 463 | stride=1 464 | pad=1 465 | activation=leaky 466 | 467 | #49th 468 | [upsample] 469 | stride=2 470 | 471 | 472 | #50th 473 | [route] 474 | layers = -1, 10 475 | 476 | #51th 1x1 input 52x52 output 52x52 477 | [convolutional] 478 | batch_normalize=1 479 | filters=128 480 | size=1 481 | stride=1 482 | pad=1 483 | activation=leaky 484 | 485 | #52th 486 | [convolutional] 487 | batch_normalize=1 488 | size=3 489 | stride=1 490 | pad=1 491 | filters=256 492 | activation=leaky 493 | 494 | #53th 495 | [convolutional] 496 | batch_normalize=1 497 | filters=128 498 | size=1 499 | stride=1 500 | pad=1 501 | activation=leaky 502 | 503 | #54th 504 | [convolutional] 505 | batch_normalize=1 506 | size=3 507 | stride=1 508 | pad=1 509 | filters=256 510 | activation=leaky 511 | 512 | #55th 513 | [convolutional] 514 | batch_normalize=1 515 | filters=128 516 | size=1 517 | stride=1 518 | pad=1 519 | activation=leaky 520 | 521 | #56th 522 | [convolutional] 523 | batch_normalize=1 524 | size=3 525 | stride=1 526 | pad=1 527 | filters=256 528 | activation=leaky 529 | 530 | #57th 531 | [convolutional] 532 | size=1 533 | stride=1 534 | pad=1 535 | filters=255 536 | activation=linear 537 | 538 | #58th 539 | [yolo] 540 | mask = 0,1,2 541 | anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 542 | classes=80 543 | num=9 544 | jitter=.3 545 | ignore_thresh = .7 546 | truth_thresh = 1 547 | random=1 548 | 549 | 550 | -------------------------------------------------------------------------------- /cfg.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from collections import OrderedDict 3 | 4 | def parse_cfg(cfgfile): 5 | def erase_comment(line): 6 | line = line.split('#')[0] 7 | return line 8 | blocks = [] 9 | fp = open(cfgfile, 'r') 10 | block = None 11 | line = fp.readline() 12 | while line != '': 13 | line = line.rstrip() 14 | if line == '' or line[0] == '#': 15 | line = fp.readline() 16 | continue 17 | elif line[0] == '[': 18 | if block: 19 | blocks.append(block) 20 | block = OrderedDict() 21 | block['type'] = line.lstrip('[').rstrip(']') 22 | # set default value 23 | if block['type'] == 'convolutional': 24 | block['batch_normalize'] = 0 25 | else: 26 | line = erase_comment(line) 27 | key,value = line.split('=') 28 | key = key.strip() 29 | if key == 'type': 30 | key = '_type' 31 | value = value.strip() 32 | block[key] = value 33 | line = fp.readline() 34 | 35 | if block: 36 | blocks.append(block) 37 | fp.close() 38 | return blocks 39 | 40 | def print_cfg(blocks): 41 | for block in blocks: 42 | print('[%s]' % (block['type'])) 43 | for key,value in block.items(): 44 | if key != 'type': 45 | print('%s=%s' % (key, value)) 46 | print('') 47 | def save_cfg(blocks, cfgfile): 48 | with open(cfgfile, 'w') as fp: 49 | for block in blocks: 50 | fp.write('[%s]\n' % (block['type'])) 51 | for key,value in block.items(): 52 | if key != 'type': 53 | fp.write('%s=%s\n' % (key, value)) 54 | fp.write('\n') 55 | 56 | def print_cfg_nicely(blocks): 57 | print('layer filters size input output'); 58 | prev_width = 416 59 | prev_height = 416 60 | prev_filters = 3 61 | out_filters =[] 62 | out_widths =[] 63 | out_heights =[] 64 | ind = -2 65 | for block in blocks: 66 | ind = ind + 1 67 | if block['type'] == 'net': 68 | prev_width = int(block['width']) 69 | prev_height = int(block['height']) 70 | continue 71 | elif block['type'] == 'convolutional': 72 | filters = int(block['filters']) 73 | kernel_size = int(block['size']) 74 | stride = int(block['stride']) 75 | is_pad = int(block['pad']) 76 | pad = (kernel_size-1)/2 if is_pad else 0 77 | width = (prev_width + 2*pad - kernel_size)/stride + 1 78 | height = (prev_height + 2*pad - kernel_size)/stride + 1 79 | print('%5d %-6s %4d %d x %d / %d %3d x %3d x%4d -> %3d x %3d x%4d' % (ind, 'conv', filters, kernel_size, kernel_size, stride, prev_width, prev_height, prev_filters, width, height, filters)) 80 | prev_width = width 81 | prev_height = height 82 | prev_filters = filters 83 | out_widths.append(prev_width) 84 | out_heights.append(prev_height) 85 | out_filters.append(prev_filters) 86 | elif block['type'] == 'maxpool': 87 | pool_size = int(block['size']) 88 | stride = int(block['stride']) 89 | width = prev_width/stride 90 | height = prev_height/stride 91 | print('%5d %-6s %d x %d / %d %3d x %3d x%4d -> %3d x %3d x%4d' % (ind, 'max', pool_size, pool_size, stride, prev_width, prev_height, prev_filters, width, height, filters)) 92 | prev_width = width 93 | prev_height = height 94 | prev_filters = filters 95 | out_widths.append(prev_width) 96 | out_heights.append(prev_height) 97 | out_filters.append(prev_filters) 98 | elif block['type'] == 'avgpool': 99 | width = 1 100 | height = 1 101 | print('%5d %-6s %3d x %3d x%4d -> %3d' % (ind, 'avg', prev_width, prev_height, prev_filters, prev_filters)) 102 | prev_width = 1 103 | prev_height = 1 104 | out_widths.append(prev_width) 105 | out_heights.append(prev_height) 106 | out_filters.append(prev_filters) 107 | elif block['type'] == 'softmax': 108 | print('%5d %-6s -> %3d' % (ind, 'softmax', prev_filters)) 109 | out_widths.append(prev_width) 110 | out_heights.append(prev_height) 111 | out_filters.append(prev_filters) 112 | elif block['type'] == 'cost': 113 | print('%5d %-6s -> %3d' % (ind, 'cost', prev_filters)) 114 | out_widths.append(prev_width) 115 | out_heights.append(prev_height) 116 | out_filters.append(prev_filters) 117 | elif block['type'] == 'reorg': 118 | stride = int(block['stride']) 119 | filters = stride * stride * prev_filters 120 | width = prev_width/stride 121 | height = prev_height/stride 122 | print('%5d %-6s / %d %3d x %3d x%4d -> %3d x %3d x%4d' % (ind, 'reorg', stride, prev_width, prev_height, prev_filters, width, height, filters)) 123 | prev_width = width 124 | prev_height = height 125 | prev_filters = filters 126 | out_widths.append(prev_width) 127 | out_heights.append(prev_height) 128 | out_filters.append(prev_filters) 129 | elif block['type'] == 'route': 130 | layers = block['layers'].split(',') 131 | layers = [int(i) if int(i) > 0 else int(i)+ind for i in layers] 132 | if len(layers) == 1: 133 | print('%5d %-6s %d' % (ind, 'route', layers[0])) 134 | prev_width = out_widths[layers[0]] 135 | prev_height = out_heights[layers[0]] 136 | prev_filters = out_filters[layers[0]] 137 | elif len(layers) == 2: 138 | print('%5d %-6s %d %d' % (ind, 'route', layers[0], layers[1])) 139 | prev_width = out_widths[layers[0]] 140 | prev_height = out_heights[layers[0]] 141 | assert(prev_width == out_widths[layers[1]]) 142 | assert(prev_height == out_heights[layers[1]]) 143 | prev_filters = out_filters[layers[0]] + out_filters[layers[1]] 144 | out_widths.append(prev_width) 145 | out_heights.append(prev_height) 146 | out_filters.append(prev_filters) 147 | elif block['type'] == 'region': 148 | print('%5d %-6s' % (ind, 'detection')) 149 | out_widths.append(prev_width) 150 | out_heights.append(prev_height) 151 | out_filters.append(prev_filters) 152 | elif block['type'] == 'shortcut': 153 | from_id = int(block['from']) 154 | from_id = from_id if from_id > 0 else from_id+ind 155 | print('%5d %-6s %d' % (ind, 'shortcut', from_id)) 156 | prev_width = out_widths[from_id] 157 | prev_height = out_heights[from_id] 158 | prev_filters = out_filters[from_id] 159 | out_widths.append(prev_width) 160 | out_heights.append(prev_height) 161 | out_filters.append(prev_filters) 162 | elif block['type'] == 'softmax': 163 | print('%5d %-6s' % (ind, 'softmax')) 164 | out_widths.append(prev_width) 165 | out_heights.append(prev_height) 166 | out_filters.append(prev_filters) 167 | elif block['type'] == 'connected': 168 | filters = int(block['output']) 169 | print('%5d %-6s %d -> %3d' % (ind, 'connected', prev_filters, filters)) 170 | prev_filters = filters 171 | out_widths.append(1) 172 | out_heights.append(1) 173 | out_filters.append(prev_filters) 174 | else: 175 | print('unknown type %s' % (block['type'])) 176 | 177 | def load_conv(buf, start, conv_model): 178 | num_w = conv_model.weight.numel() 179 | num_b = conv_model.bias.numel() 180 | conv_model.bias.data.copy_(torch.from_numpy(buf[start:start+num_b])); start = start + num_b 181 | conv_model.weight.data.copy_(torch.from_numpy(buf[start:start+num_w])); start = start + num_w 182 | return start 183 | 184 | def save_conv(fp, conv_model): 185 | if conv_model.bias.is_cuda: 186 | convert2cpu(conv_model.bias.data).numpy().tofile(fp) 187 | convert2cpu(conv_model.weight.data).numpy().tofile(fp) 188 | else: 189 | conv_model.bias.data.numpy().tofile(fp) 190 | conv_model.weight.data.numpy().tofile(fp) 191 | 192 | def load_conv_bn(buf, start, conv_model, bn_model): 193 | num_w = conv_model.weight.numel() 194 | num_b = bn_model.bias.numel() 195 | bn_model.bias.data.copy_(torch.from_numpy(buf[start:start+num_b])); start = start + num_b 196 | bn_model.weight.data.copy_(torch.from_numpy(buf[start:start+num_b])); start = start + num_b 197 | bn_model.running_mean.copy_(torch.from_numpy(buf[start:start+num_b])); start = start + num_b 198 | bn_model.running_var.copy_(torch.from_numpy(buf[start:start+num_b])); start = start + num_b 199 | conv_model.weight.data.copy_(torch.from_numpy(buf[start:start+num_w])); start = start + num_w 200 | return start 201 | 202 | def save_conv_bn(fp, conv_model, bn_model): 203 | if bn_model.bias.is_cuda: 204 | convert2cpu(bn_model.bias.data).numpy().tofile(fp) 205 | convert2cpu(bn_model.weight.data).numpy().tofile(fp) 206 | convert2cpu(bn_model.running_mean).numpy().tofile(fp) 207 | convert2cpu(bn_model.running_var).numpy().tofile(fp) 208 | convert2cpu(conv_model.weight.data).numpy().tofile(fp) 209 | else: 210 | bn_model.bias.data.numpy().tofile(fp) 211 | bn_model.weight.data.numpy().tofile(fp) 212 | bn_model.running_mean.numpy().tofile(fp) 213 | bn_model.running_var.numpy().tofile(fp) 214 | conv_model.weight.data.numpy().tofile(fp) 215 | 216 | def save_conv_shrink_bn(fp, conv_model, bn_model, eps=1e-5): 217 | if bn_model.bias.is_cuda: 218 | bias = bn_model.bias.data - bn_model.running_mean * bn_model.weight.data / torch.sqrt(bn_model.running_var + eps) 219 | convert2cpu(bias).numpy().tofile(fp) 220 | s = conv_model.weight.data.size() 221 | weight = conv_model.weight.data * (bn_model.weight.data / torch.sqrt(bn_model.running_var + eps)).view(-1,1,1,1).repeat(1, s[1], s[2], s[3]) 222 | convert2cpu(weight).numpy().tofile(fp) 223 | else: 224 | bias = bn_model.bias.data - bn_model.running_mean * bn_model.weight.data / torch.sqrt(bn_model.running_var + eps) 225 | bias.numpy().tofile(fp) 226 | s = conv_model.weight.data.size() 227 | weight = conv_model.weight.data * (bn_model.weight.data / torch.sqrt(bn_model.running_var + eps)).view(-1,1,1,1).repeat(1, s[1], s[2], s[3]) 228 | weight.numpy().tofile(fp) 229 | 230 | def load_fc(buf, start, fc_model): 231 | num_w = fc_model.weight.numel() 232 | num_b = fc_model.bias.numel() 233 | fc_model.bias.data.copy_(torch.from_numpy(buf[start:start+num_b])); start = start + num_b 234 | fc_model.weight.data.copy_(torch.from_numpy(buf[start:start+num_w])); start = start + num_w 235 | return start 236 | 237 | def save_fc(fp, fc_model): 238 | fc_model.bias.data.numpy().tofile(fp) 239 | fc_model.weight.data.numpy().tofile(fp) 240 | 241 | 242 | if __name__ == '__main__': 243 | import sys 244 | if len(sys.argv) != 2: 245 | print('Usage: python cfg.py model.cfg') 246 | exit() 247 | 248 | blocks = parse_cfg(sys.argv[1]) 249 | print_cfg_nicely(blocks) 250 | -------------------------------------------------------------------------------- /cfg/mobilenet_v2_yolov3.cfg: -------------------------------------------------------------------------------- 1 | [net] 2 | # Testing 3 | batch=1 4 | subdivisions=1 5 | # Training 6 | #batch=64 7 | #subdivisions=16 8 | width=416 9 | height=416 10 | channels=3 11 | 12 | momentum=0.9 13 | decay=0.0005 14 | angle=0 15 | saturation = 1.5 16 | exposure = 1.5 17 | hue=.1 18 | 19 | learning_rate=0.001 20 | burn_in=1000 21 | max_batches = 500200 22 | policy=steps 23 | steps=400000,450000 24 | scales=.1,.1 25 | 26 | [convolutional] 27 | filters=32 28 | size=3 29 | stride=2 30 | pad=1 31 | batch_normalize=1 32 | activation=leaky 33 | 34 | [convolutional] 35 | filters=32 36 | size=1 37 | stride=1 38 | pad=1 39 | batch_normalize=1 40 | activation=leaky 41 | 42 | [depthwise_convolutional] 43 | #filters=32 44 | size=3 45 | stride=1 46 | #groups=32 47 | pad=1 48 | batch_normalize=1 49 | activation=leaky 50 | 51 | [convolutional] 52 | filters=16 53 | size=1 54 | stride=1 55 | pad=1 56 | batch_normalize=1 57 | activation=linear 58 | 59 | [convolutional] 60 | filters=96 61 | size=1 62 | stride=1 63 | pad=1 64 | batch_normalize=1 65 | activation=leaky 66 | 67 | [depthwise_convolutional] 68 | #filters=96 69 | size=3 70 | stride=2 71 | #groups=96 72 | pad=1 73 | batch_normalize=1 74 | activation=leaky 75 | 76 | [convolutional] 77 | filters=24 78 | size=1 79 | stride=1 80 | pad=1 81 | batch_normalize=1 82 | activation=linear 83 | 84 | [convolutional] 85 | filters=144 86 | size=1 87 | stride=1 88 | pad=1 89 | batch_normalize=1 90 | activation=leaky 91 | 92 | [depthwise_convolutional] 93 | #filters=144 94 | size=3 95 | stride=1 96 | #groups=144 97 | pad=1 98 | batch_normalize=1 99 | activation=leaky 100 | 101 | [convolutional] 102 | filters=24 103 | size=1 104 | stride=1 105 | pad=1 106 | batch_normalize=1 107 | activation=linear 108 | 109 | [shortcut] 110 | from=-4 111 | activation=linear 112 | 113 | [convolutional] 114 | filters=144 115 | size=1 116 | stride=1 117 | pad=1 118 | batch_normalize=1 119 | activation=leaky 120 | 121 | [depthwise_convolutional] 122 | #filters=144 123 | size=3 124 | stride=2 125 | #groups=144 126 | pad=1 127 | batch_normalize=1 128 | activation=leaky 129 | 130 | [convolutional] 131 | filters=32 132 | size=1 133 | stride=1 134 | pad=1 135 | batch_normalize=1 136 | activation=linear 137 | 138 | [convolutional] 139 | filters=192 140 | size=1 141 | stride=1 142 | pad=1 143 | batch_normalize=1 144 | activation=leaky 145 | 146 | [depthwise_convolutional] 147 | #filters=192 148 | size=3 149 | stride=1 150 | #groups=192 151 | pad=1 152 | batch_normalize=1 153 | activation=leaky 154 | 155 | [convolutional] 156 | filters=32 157 | size=1 158 | stride=1 159 | pad=1 160 | batch_normalize=1 161 | activation=linear 162 | 163 | [shortcut] 164 | from=-4 165 | activation=linear 166 | 167 | [convolutional] 168 | filters=192 169 | size=1 170 | stride=1 171 | pad=1 172 | batch_normalize=1 173 | activation=leaky 174 | 175 | [depthwise_convolutional] 176 | #filters=192 177 | size=3 178 | stride=1 179 | #groups=192 180 | pad=1 181 | batch_normalize=1 182 | activation=leaky 183 | 184 | [convolutional] 185 | filters=32 186 | size=1 187 | stride=1 188 | pad=1 189 | batch_normalize=1 190 | activation=linear 191 | 192 | [shortcut] 193 | from=-4 194 | activation=linear 195 | 196 | [convolutional] 197 | filters=192 198 | size=1 199 | stride=1 200 | pad=1 201 | batch_normalize=1 202 | activation=leaky 203 | 204 | [depthwise_convolutional] 205 | #filters=192 206 | size=3 207 | stride=1 208 | #groups=192 209 | pad=1 210 | batch_normalize=1 211 | activation=leaky 212 | 213 | [convolutional] 214 | filters=64 215 | size=1 216 | stride=1 217 | pad=1 218 | batch_normalize=1 219 | activation=linear 220 | 221 | [convolutional] 222 | filters=384 223 | size=1 224 | stride=1 225 | pad=1 226 | batch_normalize=1 227 | activation=leaky 228 | 229 | [depthwise_convolutional] 230 | #filters=384 231 | size=3 232 | stride=1 233 | #groups=384 234 | pad=1 235 | batch_normalize=1 236 | activation=leaky 237 | 238 | [convolutional] 239 | filters=64 240 | size=1 241 | stride=1 242 | pad=1 243 | batch_normalize=1 244 | activation=linear 245 | 246 | [shortcut] 247 | from=-4 248 | activation=linear 249 | 250 | [convolutional] 251 | filters=384 252 | size=1 253 | stride=1 254 | pad=1 255 | batch_normalize=1 256 | activation=leaky 257 | 258 | [depthwise_convolutional] 259 | #filters=384 260 | size=3 261 | stride=1 262 | #groups=384 263 | pad=1 264 | batch_normalize=1 265 | activation=leaky 266 | 267 | [convolutional] 268 | filters=64 269 | size=1 270 | stride=1 271 | pad=1 272 | batch_normalize=1 273 | activation=linear 274 | 275 | [shortcut] 276 | from=-4 277 | activation=linear 278 | 279 | [convolutional] 280 | filters=384 281 | size=1 282 | stride=1 283 | pad=1 284 | batch_normalize=1 285 | activation=leaky 286 | 287 | [depthwise_convolutional] 288 | #filters=384 289 | size=3 290 | stride=1 291 | #groups=384 292 | pad=1 293 | batch_normalize=1 294 | activation=leaky 295 | 296 | [convolutional] 297 | filters=64 298 | size=1 299 | stride=1 300 | pad=1 301 | batch_normalize=1 302 | activation=linear 303 | 304 | [shortcut] 305 | from=-4 306 | activation=linear 307 | 308 | [convolutional] 309 | filters=384 310 | size=1 311 | stride=1 312 | pad=1 313 | batch_normalize=1 314 | activation=leaky 315 | 316 | [depthwise_convolutional] 317 | #filters=384 318 | size=3 319 | stride=2 320 | #groups=384 321 | pad=1 322 | batch_normalize=1 323 | activation=leaky 324 | 325 | [convolutional] 326 | filters=96 327 | size=1 328 | stride=1 329 | pad=1 330 | batch_normalize=1 331 | activation=linear 332 | 333 | [convolutional] 334 | filters=576 335 | size=1 336 | stride=1 337 | pad=1 338 | batch_normalize=1 339 | activation=leaky 340 | 341 | [depthwise_convolutional] 342 | #filters=576 343 | size=3 344 | stride=1 345 | #groups=576 346 | pad=1 347 | batch_normalize=1 348 | activation=leaky 349 | 350 | [convolutional] 351 | filters=96 352 | size=1 353 | stride=1 354 | pad=1 355 | batch_normalize=1 356 | activation=linear 357 | 358 | [shortcut] 359 | from=-4 360 | activation=linear 361 | 362 | [convolutional] 363 | filters=576 364 | size=1 365 | stride=1 366 | pad=1 367 | batch_normalize=1 368 | activation=leaky 369 | 370 | [depthwise_convolutional] 371 | #filters=576 372 | size=3 373 | stride=1 374 | #groups=576 375 | pad=1 376 | batch_normalize=1 377 | activation=leaky 378 | 379 | [convolutional] 380 | filters=96 381 | size=1 382 | stride=1 383 | pad=1 384 | batch_normalize=1 385 | activation=linear 386 | 387 | [shortcut] 388 | from=-4 389 | activation=linear 390 | 391 | [convolutional] 392 | filters=576 393 | size=1 394 | stride=1 395 | pad=1 396 | batch_normalize=1 397 | activation=leaky 398 | 399 | [depthwise_convolutional] 400 | #filters=576 401 | size=3 402 | stride=2 403 | #groups=576 404 | pad=1 405 | batch_normalize=1 406 | activation=leaky 407 | 408 | [convolutional] 409 | filters=160 410 | size=1 411 | stride=1 412 | pad=1 413 | batch_normalize=1 414 | activation=linear 415 | 416 | [convolutional] 417 | filters=960 418 | size=1 419 | stride=1 420 | pad=1 421 | batch_normalize=1 422 | activation=leaky 423 | 424 | [depthwise_convolutional] 425 | #filters=960 426 | size=3 427 | stride=1 428 | #groups=960 429 | pad=1 430 | batch_normalize=1 431 | activation=leaky 432 | 433 | [convolutional] 434 | filters=160 435 | size=1 436 | stride=1 437 | pad=1 438 | batch_normalize=1 439 | activation=linear 440 | 441 | [shortcut] 442 | from=-4 443 | activation=linear 444 | 445 | [convolutional] 446 | filters=960 447 | size=1 448 | stride=1 449 | pad=1 450 | batch_normalize=1 451 | activation=leaky 452 | 453 | [depthwise_convolutional] 454 | #filters=960 455 | size=3 456 | stride=1 457 | #groups=960 458 | pad=1 459 | batch_normalize=1 460 | activation=leaky 461 | 462 | [convolutional] 463 | filters=160 464 | size=1 465 | stride=1 466 | pad=1 467 | batch_normalize=1 468 | activation=linear 469 | 470 | [shortcut] 471 | from=-4 472 | activation=linear 473 | 474 | [convolutional] 475 | filters=960 476 | size=1 477 | stride=1 478 | pad=1 479 | batch_normalize=1 480 | activation=leaky 481 | 482 | [depthwise_convolutional] 483 | #filters=960 484 | size=3 485 | stride=1 486 | #groups=960 487 | pad=1 488 | batch_normalize=1 489 | activation=leaky 490 | 491 | [convolutional] 492 | filters=320 493 | size=1 494 | stride=1 495 | pad=1 496 | batch_normalize=1 497 | activation=linear 498 | 499 | #62 500 | [convolutional] 501 | filters=1280 502 | size=1 503 | stride=1 504 | pad=1 505 | batch_normalize=1 506 | activation=leaky 507 | 508 | ###################### 509 | #63 510 | [convolutional] 511 | batch_normalize=1 512 | filters=512 513 | size=1 514 | stride=1 515 | pad=1 516 | activation=leaky 517 | 518 | #64 519 | [convolutional] 520 | batch_normalize=1 521 | size=3 522 | stride=1 523 | pad=1 524 | filters=1024 525 | activation=leaky 526 | 527 | #65 528 | [convolutional] 529 | batch_normalize=1 530 | filters=512 531 | size=1 532 | stride=1 533 | pad=1 534 | activation=leaky 535 | 536 | #66 537 | [convolutional] 538 | batch_normalize=1 539 | size=3 540 | stride=1 541 | pad=1 542 | filters=1024 543 | activation=leaky 544 | 545 | #67 546 | [convolutional] 547 | batch_normalize=1 548 | filters=512 549 | size=1 550 | stride=1 551 | pad=1 552 | activation=leaky 553 | 554 | #68 555 | [convolutional] 556 | batch_normalize=1 557 | size=3 558 | stride=1 559 | pad=1 560 | filters=1024 561 | activation=leaky 562 | 563 | #69 564 | [convolutional] 565 | size=1 566 | stride=1 567 | pad=1 568 | filters=255 569 | activation=linear 570 | 571 | 572 | #70 573 | [yolo] 574 | mask = 6,7,8 575 | anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 576 | classes=80 577 | num=9 578 | jitter=.3 579 | ignore_thresh = .7 580 | truth_thresh = 1 581 | random=1 582 | 583 | #71 584 | [route] 585 | layers = -4 586 | 587 | #72 588 | [convolutional] 589 | batch_normalize=1 590 | filters=256 591 | size=1 592 | stride=1 593 | pad=1 594 | activation=leaky 595 | 596 | #73 597 | [upsample] 598 | stride=2 599 | 600 | #74 601 | [route] 602 | layers = -1, 48 603 | 604 | 605 | 606 | [convolutional] 607 | batch_normalize=1 608 | filters=256 609 | size=1 610 | stride=1 611 | pad=1 612 | activation=leaky 613 | 614 | [convolutional] 615 | batch_normalize=1 616 | size=3 617 | stride=1 618 | pad=1 619 | filters=512 620 | activation=leaky 621 | 622 | [convolutional] 623 | batch_normalize=1 624 | filters=256 625 | size=1 626 | stride=1 627 | pad=1 628 | activation=leaky 629 | 630 | [convolutional] 631 | batch_normalize=1 632 | size=3 633 | stride=1 634 | pad=1 635 | filters=512 636 | activation=leaky 637 | 638 | [convolutional] 639 | batch_normalize=1 640 | filters=256 641 | size=1 642 | stride=1 643 | pad=1 644 | activation=leaky 645 | 646 | [convolutional] 647 | batch_normalize=1 648 | size=3 649 | stride=1 650 | pad=1 651 | filters=512 652 | activation=leaky 653 | 654 | [convolutional] 655 | size=1 656 | stride=1 657 | pad=1 658 | filters=255 659 | activation=linear 660 | 661 | 662 | [yolo] 663 | mask = 3,4,5 664 | anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 665 | classes=80 666 | num=9 667 | jitter=.3 668 | ignore_thresh = .7 669 | truth_thresh = 1 670 | random=1 671 | 672 | 673 | 674 | [route] 675 | layers = -4 676 | 677 | [convolutional] 678 | batch_normalize=1 679 | filters=128 680 | size=1 681 | stride=1 682 | pad=1 683 | activation=leaky 684 | 685 | [upsample] 686 | stride=2 687 | 688 | [route] 689 | layers = -1, 37 690 | 691 | 692 | 693 | [convolutional] 694 | batch_normalize=1 695 | filters=128 696 | size=1 697 | stride=1 698 | pad=1 699 | activation=leaky 700 | 701 | [convolutional] 702 | batch_normalize=1 703 | size=3 704 | stride=1 705 | pad=1 706 | filters=256 707 | activation=leaky 708 | 709 | [convolutional] 710 | batch_normalize=1 711 | filters=128 712 | size=1 713 | stride=1 714 | pad=1 715 | activation=leaky 716 | 717 | [convolutional] 718 | batch_normalize=1 719 | size=3 720 | stride=1 721 | pad=1 722 | filters=256 723 | activation=leaky 724 | 725 | [convolutional] 726 | batch_normalize=1 727 | filters=128 728 | size=1 729 | stride=1 730 | pad=1 731 | activation=leaky 732 | 733 | [convolutional] 734 | batch_normalize=1 735 | size=3 736 | stride=1 737 | pad=1 738 | filters=256 739 | activation=leaky 740 | 741 | [convolutional] 742 | size=1 743 | stride=1 744 | pad=1 745 | filters=255 746 | activation=linear 747 | 748 | 749 | [yolo] 750 | mask = 0,1,2 751 | anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 752 | classes=80 753 | num=9 754 | jitter=.3 755 | ignore_thresh = .7 756 | truth_thresh = 1 757 | random=1 758 | 759 | -------------------------------------------------------------------------------- /cfg/yolov3.cfg: -------------------------------------------------------------------------------- 1 | [net] 2 | # Testing 3 | batch=1 4 | subdivisions=1 5 | # Training 6 | #batch=64 7 | #subdivisions=16 8 | width=608 9 | height=608 10 | channels=3 11 | momentum=0.9 12 | decay=0.0005 13 | angle=0 14 | saturation = 1.5 15 | exposure = 1.5 16 | hue=.1 17 | 18 | learning_rate=0.001 19 | burn_in=1000 20 | max_batches = 500200 21 | policy=steps 22 | steps=400000,450000 23 | scales=.1,.1 24 | 25 | [convolutional] 26 | batch_normalize=1 27 | filters=32 28 | size=3 29 | stride=1 30 | pad=1 31 | activation=leaky 32 | 33 | # Downsample 34 | 35 | [convolutional] 36 | batch_normalize=1 37 | filters=64 38 | size=3 39 | stride=2 40 | pad=1 41 | activation=leaky 42 | 43 | [convolutional] 44 | batch_normalize=1 45 | filters=32 46 | size=1 47 | stride=1 48 | pad=1 49 | activation=leaky 50 | 51 | [convolutional] 52 | batch_normalize=1 53 | filters=64 54 | size=3 55 | stride=1 56 | pad=1 57 | activation=leaky 58 | 59 | [shortcut] 60 | from=-3 61 | activation=linear 62 | 63 | # Downsample 64 | 65 | [convolutional] 66 | batch_normalize=1 67 | filters=128 68 | size=3 69 | stride=2 70 | pad=1 71 | activation=leaky 72 | 73 | [convolutional] 74 | batch_normalize=1 75 | filters=64 76 | size=1 77 | stride=1 78 | pad=1 79 | activation=leaky 80 | 81 | [convolutional] 82 | batch_normalize=1 83 | filters=128 84 | size=3 85 | stride=1 86 | pad=1 87 | activation=leaky 88 | 89 | [shortcut] 90 | from=-3 91 | activation=linear 92 | 93 | [convolutional] 94 | batch_normalize=1 95 | filters=64 96 | size=1 97 | stride=1 98 | pad=1 99 | activation=leaky 100 | 101 | [convolutional] 102 | batch_normalize=1 103 | filters=128 104 | size=3 105 | stride=1 106 | pad=1 107 | activation=leaky 108 | 109 | [shortcut] 110 | from=-3 111 | activation=linear 112 | 113 | # Downsample 114 | 115 | [convolutional] 116 | batch_normalize=1 117 | filters=256 118 | size=3 119 | stride=2 120 | pad=1 121 | activation=leaky 122 | 123 | [convolutional] 124 | batch_normalize=1 125 | filters=128 126 | size=1 127 | stride=1 128 | pad=1 129 | activation=leaky 130 | 131 | [convolutional] 132 | batch_normalize=1 133 | filters=256 134 | size=3 135 | stride=1 136 | pad=1 137 | activation=leaky 138 | 139 | [shortcut] 140 | from=-3 141 | activation=linear 142 | 143 | [convolutional] 144 | batch_normalize=1 145 | filters=128 146 | size=1 147 | stride=1 148 | pad=1 149 | activation=leaky 150 | 151 | [convolutional] 152 | batch_normalize=1 153 | filters=256 154 | size=3 155 | stride=1 156 | pad=1 157 | activation=leaky 158 | 159 | [shortcut] 160 | from=-3 161 | activation=linear 162 | 163 | [convolutional] 164 | batch_normalize=1 165 | filters=128 166 | size=1 167 | stride=1 168 | pad=1 169 | activation=leaky 170 | 171 | [convolutional] 172 | batch_normalize=1 173 | filters=256 174 | size=3 175 | stride=1 176 | pad=1 177 | activation=leaky 178 | 179 | [shortcut] 180 | from=-3 181 | activation=linear 182 | 183 | [convolutional] 184 | batch_normalize=1 185 | filters=128 186 | size=1 187 | stride=1 188 | pad=1 189 | activation=leaky 190 | 191 | [convolutional] 192 | batch_normalize=1 193 | filters=256 194 | size=3 195 | stride=1 196 | pad=1 197 | activation=leaky 198 | 199 | [shortcut] 200 | from=-3 201 | activation=linear 202 | 203 | 204 | [convolutional] 205 | batch_normalize=1 206 | filters=128 207 | size=1 208 | stride=1 209 | pad=1 210 | activation=leaky 211 | 212 | [convolutional] 213 | batch_normalize=1 214 | filters=256 215 | size=3 216 | stride=1 217 | pad=1 218 | activation=leaky 219 | 220 | [shortcut] 221 | from=-3 222 | activation=linear 223 | 224 | [convolutional] 225 | batch_normalize=1 226 | filters=128 227 | size=1 228 | stride=1 229 | pad=1 230 | activation=leaky 231 | 232 | [convolutional] 233 | batch_normalize=1 234 | filters=256 235 | size=3 236 | stride=1 237 | pad=1 238 | activation=leaky 239 | 240 | [shortcut] 241 | from=-3 242 | activation=linear 243 | 244 | [convolutional] 245 | batch_normalize=1 246 | filters=128 247 | size=1 248 | stride=1 249 | pad=1 250 | activation=leaky 251 | 252 | [convolutional] 253 | batch_normalize=1 254 | filters=256 255 | size=3 256 | stride=1 257 | pad=1 258 | activation=leaky 259 | 260 | [shortcut] 261 | from=-3 262 | activation=linear 263 | 264 | [convolutional] 265 | batch_normalize=1 266 | filters=128 267 | size=1 268 | stride=1 269 | pad=1 270 | activation=leaky 271 | 272 | [convolutional] 273 | batch_normalize=1 274 | filters=256 275 | size=3 276 | stride=1 277 | pad=1 278 | activation=leaky 279 | 280 | [shortcut] 281 | from=-3 282 | activation=linear 283 | 284 | # Downsample 285 | 286 | [convolutional] 287 | batch_normalize=1 288 | filters=512 289 | size=3 290 | stride=2 291 | pad=1 292 | activation=leaky 293 | 294 | [convolutional] 295 | batch_normalize=1 296 | filters=256 297 | size=1 298 | stride=1 299 | pad=1 300 | activation=leaky 301 | 302 | [convolutional] 303 | batch_normalize=1 304 | filters=512 305 | size=3 306 | stride=1 307 | pad=1 308 | activation=leaky 309 | 310 | [shortcut] 311 | from=-3 312 | activation=linear 313 | 314 | 315 | [convolutional] 316 | batch_normalize=1 317 | filters=256 318 | size=1 319 | stride=1 320 | pad=1 321 | activation=leaky 322 | 323 | [convolutional] 324 | batch_normalize=1 325 | filters=512 326 | size=3 327 | stride=1 328 | pad=1 329 | activation=leaky 330 | 331 | [shortcut] 332 | from=-3 333 | activation=linear 334 | 335 | 336 | [convolutional] 337 | batch_normalize=1 338 | filters=256 339 | size=1 340 | stride=1 341 | pad=1 342 | activation=leaky 343 | 344 | [convolutional] 345 | batch_normalize=1 346 | filters=512 347 | size=3 348 | stride=1 349 | pad=1 350 | activation=leaky 351 | 352 | [shortcut] 353 | from=-3 354 | activation=linear 355 | 356 | 357 | [convolutional] 358 | batch_normalize=1 359 | filters=256 360 | size=1 361 | stride=1 362 | pad=1 363 | activation=leaky 364 | 365 | [convolutional] 366 | batch_normalize=1 367 | filters=512 368 | size=3 369 | stride=1 370 | pad=1 371 | activation=leaky 372 | 373 | [shortcut] 374 | from=-3 375 | activation=linear 376 | 377 | [convolutional] 378 | batch_normalize=1 379 | filters=256 380 | size=1 381 | stride=1 382 | pad=1 383 | activation=leaky 384 | 385 | [convolutional] 386 | batch_normalize=1 387 | filters=512 388 | size=3 389 | stride=1 390 | pad=1 391 | activation=leaky 392 | 393 | [shortcut] 394 | from=-3 395 | activation=linear 396 | 397 | 398 | [convolutional] 399 | batch_normalize=1 400 | filters=256 401 | size=1 402 | stride=1 403 | pad=1 404 | activation=leaky 405 | 406 | [convolutional] 407 | batch_normalize=1 408 | filters=512 409 | size=3 410 | stride=1 411 | pad=1 412 | activation=leaky 413 | 414 | [shortcut] 415 | from=-3 416 | activation=linear 417 | 418 | 419 | [convolutional] 420 | batch_normalize=1 421 | filters=256 422 | size=1 423 | stride=1 424 | pad=1 425 | activation=leaky 426 | 427 | [convolutional] 428 | batch_normalize=1 429 | filters=512 430 | size=3 431 | stride=1 432 | pad=1 433 | activation=leaky 434 | 435 | [shortcut] 436 | from=-3 437 | activation=linear 438 | 439 | [convolutional] 440 | batch_normalize=1 441 | filters=256 442 | size=1 443 | stride=1 444 | pad=1 445 | activation=leaky 446 | 447 | [convolutional] 448 | batch_normalize=1 449 | filters=512 450 | size=3 451 | stride=1 452 | pad=1 453 | activation=leaky 454 | 455 | [shortcut] 456 | from=-3 457 | activation=linear 458 | 459 | # Downsample 460 | 461 | [convolutional] 462 | batch_normalize=1 463 | filters=1024 464 | size=3 465 | stride=2 466 | pad=1 467 | activation=leaky 468 | 469 | [convolutional] 470 | batch_normalize=1 471 | filters=512 472 | size=1 473 | stride=1 474 | pad=1 475 | activation=leaky 476 | 477 | [convolutional] 478 | batch_normalize=1 479 | filters=1024 480 | size=3 481 | stride=1 482 | pad=1 483 | activation=leaky 484 | 485 | [shortcut] 486 | from=-3 487 | activation=linear 488 | 489 | [convolutional] 490 | batch_normalize=1 491 | filters=512 492 | size=1 493 | stride=1 494 | pad=1 495 | activation=leaky 496 | 497 | [convolutional] 498 | batch_normalize=1 499 | filters=1024 500 | size=3 501 | stride=1 502 | pad=1 503 | activation=leaky 504 | 505 | [shortcut] 506 | from=-3 507 | activation=linear 508 | 509 | [convolutional] 510 | batch_normalize=1 511 | filters=512 512 | size=1 513 | stride=1 514 | pad=1 515 | activation=leaky 516 | 517 | [convolutional] 518 | batch_normalize=1 519 | filters=1024 520 | size=3 521 | stride=1 522 | pad=1 523 | activation=leaky 524 | 525 | [shortcut] 526 | from=-3 527 | activation=linear 528 | 529 | [convolutional] 530 | batch_normalize=1 531 | filters=512 532 | size=1 533 | stride=1 534 | pad=1 535 | activation=leaky 536 | 537 | [convolutional] 538 | batch_normalize=1 539 | filters=1024 540 | size=3 541 | stride=1 542 | pad=1 543 | activation=leaky 544 | 545 | [shortcut] 546 | from=-3 547 | activation=linear 548 | 549 | ###################### 550 | 551 | [convolutional] 552 | batch_normalize=1 553 | filters=512 554 | size=1 555 | stride=1 556 | pad=1 557 | activation=leaky 558 | 559 | [convolutional] 560 | batch_normalize=1 561 | size=3 562 | stride=1 563 | pad=1 564 | filters=1024 565 | activation=leaky 566 | 567 | [convolutional] 568 | batch_normalize=1 569 | filters=512 570 | size=1 571 | stride=1 572 | pad=1 573 | activation=leaky 574 | 575 | [convolutional] 576 | batch_normalize=1 577 | size=3 578 | stride=1 579 | pad=1 580 | filters=1024 581 | activation=leaky 582 | 583 | [convolutional] 584 | batch_normalize=1 585 | filters=512 586 | size=1 587 | stride=1 588 | pad=1 589 | activation=leaky 590 | 591 | [convolutional] 592 | batch_normalize=1 593 | size=3 594 | stride=1 595 | pad=1 596 | filters=1024 597 | activation=leaky 598 | 599 | [convolutional] 600 | size=1 601 | stride=1 602 | pad=1 603 | filters=255 604 | activation=linear 605 | 606 | 607 | [yolo] 608 | mask = 6,7,8 609 | anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 610 | classes=80 611 | num=9 612 | jitter=.3 613 | ignore_thresh = .7 614 | truth_thresh = 1 615 | random=1 616 | 617 | 618 | [route] 619 | layers = -4 620 | 621 | [convolutional] 622 | batch_normalize=1 623 | filters=256 624 | size=1 625 | stride=1 626 | pad=1 627 | activation=leaky 628 | 629 | [upsample] 630 | stride=2 631 | 632 | [route] 633 | layers = -1, 61 634 | 635 | 636 | 637 | [convolutional] 638 | batch_normalize=1 639 | filters=256 640 | size=1 641 | stride=1 642 | pad=1 643 | activation=leaky 644 | 645 | [convolutional] 646 | batch_normalize=1 647 | size=3 648 | stride=1 649 | pad=1 650 | filters=512 651 | activation=leaky 652 | 653 | [convolutional] 654 | batch_normalize=1 655 | filters=256 656 | size=1 657 | stride=1 658 | pad=1 659 | activation=leaky 660 | 661 | [convolutional] 662 | batch_normalize=1 663 | size=3 664 | stride=1 665 | pad=1 666 | filters=512 667 | activation=leaky 668 | 669 | [convolutional] 670 | batch_normalize=1 671 | filters=256 672 | size=1 673 | stride=1 674 | pad=1 675 | activation=leaky 676 | 677 | [convolutional] 678 | batch_normalize=1 679 | size=3 680 | stride=1 681 | pad=1 682 | filters=512 683 | activation=leaky 684 | 685 | [convolutional] 686 | size=1 687 | stride=1 688 | pad=1 689 | filters=255 690 | activation=linear 691 | 692 | 693 | [yolo] 694 | mask = 3,4,5 695 | anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 696 | classes=80 697 | num=9 698 | jitter=.3 699 | ignore_thresh = .7 700 | truth_thresh = 1 701 | random=1 702 | 703 | 704 | 705 | [route] 706 | layers = -4 707 | 708 | [convolutional] 709 | batch_normalize=1 710 | filters=128 711 | size=1 712 | stride=1 713 | pad=1 714 | activation=leaky 715 | 716 | [upsample] 717 | stride=2 718 | 719 | [route] 720 | layers = -1, 36 721 | 722 | 723 | 724 | [convolutional] 725 | batch_normalize=1 726 | filters=128 727 | size=1 728 | stride=1 729 | pad=1 730 | activation=leaky 731 | 732 | [convolutional] 733 | batch_normalize=1 734 | size=3 735 | stride=1 736 | pad=1 737 | filters=256 738 | activation=leaky 739 | 740 | [convolutional] 741 | batch_normalize=1 742 | filters=128 743 | size=1 744 | stride=1 745 | pad=1 746 | activation=leaky 747 | 748 | [convolutional] 749 | batch_normalize=1 750 | size=3 751 | stride=1 752 | pad=1 753 | filters=256 754 | activation=leaky 755 | 756 | [convolutional] 757 | batch_normalize=1 758 | filters=128 759 | size=1 760 | stride=1 761 | pad=1 762 | activation=leaky 763 | 764 | [convolutional] 765 | batch_normalize=1 766 | size=3 767 | stride=1 768 | pad=1 769 | filters=256 770 | activation=leaky 771 | 772 | [convolutional] 773 | size=1 774 | stride=1 775 | pad=1 776 | filters=255 777 | activation=linear 778 | 779 | 780 | [yolo] 781 | mask = 0,1,2 782 | anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 783 | classes=80 784 | num=9 785 | jitter=.3 786 | ignore_thresh = .7 787 | truth_thresh = 1 788 | random=1 789 | 790 | -------------------------------------------------------------------------------- /prototxt/yolov3-tiny.prototxt: -------------------------------------------------------------------------------- 1 | name: "Darkent2Caffe" 2 | input: "data" 3 | input_dim: 1 4 | input_dim: 3 5 | input_dim: 416 6 | input_dim: 416 7 | 8 | layer { 9 | bottom: "data" 10 | top: "layer1-conv" 11 | name: "layer1-conv" 12 | type: "Convolution" 13 | convolution_param { 14 | num_output: 16 15 | kernel_size: 3 16 | pad: 1 17 | stride: 1 18 | bias_term: false 19 | } 20 | } 21 | layer { 22 | bottom: "layer1-conv" 23 | top: "layer1-conv" 24 | name: "layer1-bn" 25 | type: "BatchNorm" 26 | batch_norm_param { 27 | use_global_stats: true 28 | } 29 | } 30 | layer { 31 | bottom: "layer1-conv" 32 | top: "layer1-conv" 33 | name: "layer1-scale" 34 | type: "Scale" 35 | scale_param { 36 | bias_term: true 37 | } 38 | } 39 | layer { 40 | bottom: "layer1-conv" 41 | top: "layer1-conv" 42 | name: "layer1-act" 43 | type: "ReLU" 44 | relu_param { 45 | negative_slope: 0.1 46 | } 47 | } 48 | layer { 49 | bottom: "layer1-conv" 50 | top: "layer2-maxpool" 51 | name: "layer2-maxpool" 52 | type: "Pooling" 53 | pooling_param { 54 | stride: 2 55 | pool: MAX 56 | kernel_size: 2 57 | pad: 0 58 | } 59 | } 60 | layer { 61 | bottom: "layer2-maxpool" 62 | top: "layer3-conv" 63 | name: "layer3-conv" 64 | type: "Convolution" 65 | convolution_param { 66 | num_output: 32 67 | kernel_size: 3 68 | pad: 1 69 | stride: 1 70 | bias_term: false 71 | } 72 | } 73 | layer { 74 | bottom: "layer3-conv" 75 | top: "layer3-conv" 76 | name: "layer3-bn" 77 | type: "BatchNorm" 78 | batch_norm_param { 79 | use_global_stats: true 80 | } 81 | } 82 | layer { 83 | bottom: "layer3-conv" 84 | top: "layer3-conv" 85 | name: "layer3-scale" 86 | type: "Scale" 87 | scale_param { 88 | bias_term: true 89 | } 90 | } 91 | layer { 92 | bottom: "layer3-conv" 93 | top: "layer3-conv" 94 | name: "layer3-act" 95 | type: "ReLU" 96 | relu_param { 97 | negative_slope: 0.1 98 | } 99 | } 100 | layer { 101 | bottom: "layer3-conv" 102 | top: "layer4-maxpool" 103 | name: "layer4-maxpool" 104 | type: "Pooling" 105 | pooling_param { 106 | stride: 2 107 | pool: MAX 108 | kernel_size: 2 109 | pad: 0 110 | } 111 | } 112 | layer { 113 | bottom: "layer4-maxpool" 114 | top: "layer5-conv" 115 | name: "layer5-conv" 116 | type: "Convolution" 117 | convolution_param { 118 | num_output: 64 119 | kernel_size: 3 120 | pad: 1 121 | stride: 1 122 | bias_term: false 123 | } 124 | } 125 | layer { 126 | bottom: "layer5-conv" 127 | top: "layer5-conv" 128 | name: "layer5-bn" 129 | type: "BatchNorm" 130 | batch_norm_param { 131 | use_global_stats: true 132 | } 133 | } 134 | layer { 135 | bottom: "layer5-conv" 136 | top: "layer5-conv" 137 | name: "layer5-scale" 138 | type: "Scale" 139 | scale_param { 140 | bias_term: true 141 | } 142 | } 143 | layer { 144 | bottom: "layer5-conv" 145 | top: "layer5-conv" 146 | name: "layer5-act" 147 | type: "ReLU" 148 | relu_param { 149 | negative_slope: 0.1 150 | } 151 | } 152 | layer { 153 | bottom: "layer5-conv" 154 | top: "layer6-maxpool" 155 | name: "layer6-maxpool" 156 | type: "Pooling" 157 | pooling_param { 158 | stride: 2 159 | pool: MAX 160 | kernel_size: 2 161 | pad: 0 162 | } 163 | } 164 | layer { 165 | bottom: "layer6-maxpool" 166 | top: "layer7-conv" 167 | name: "layer7-conv" 168 | type: "Convolution" 169 | convolution_param { 170 | num_output: 128 171 | kernel_size: 3 172 | pad: 1 173 | stride: 1 174 | bias_term: false 175 | } 176 | } 177 | layer { 178 | bottom: "layer7-conv" 179 | top: "layer7-conv" 180 | name: "layer7-bn" 181 | type: "BatchNorm" 182 | batch_norm_param { 183 | use_global_stats: true 184 | } 185 | } 186 | layer { 187 | bottom: "layer7-conv" 188 | top: "layer7-conv" 189 | name: "layer7-scale" 190 | type: "Scale" 191 | scale_param { 192 | bias_term: true 193 | } 194 | } 195 | layer { 196 | bottom: "layer7-conv" 197 | top: "layer7-conv" 198 | name: "layer7-act" 199 | type: "ReLU" 200 | relu_param { 201 | negative_slope: 0.1 202 | } 203 | } 204 | layer { 205 | bottom: "layer7-conv" 206 | top: "layer8-maxpool" 207 | name: "layer8-maxpool" 208 | type: "Pooling" 209 | pooling_param { 210 | stride: 2 211 | pool: MAX 212 | kernel_size: 2 213 | pad: 0 214 | } 215 | } 216 | layer { 217 | bottom: "layer8-maxpool" 218 | top: "layer9-conv" 219 | name: "layer9-conv" 220 | type: "Convolution" 221 | convolution_param { 222 | num_output: 256 223 | kernel_size: 3 224 | pad: 1 225 | stride: 1 226 | bias_term: false 227 | } 228 | } 229 | layer { 230 | bottom: "layer9-conv" 231 | top: "layer9-conv" 232 | name: "layer9-bn" 233 | type: "BatchNorm" 234 | batch_norm_param { 235 | use_global_stats: true 236 | } 237 | } 238 | layer { 239 | bottom: "layer9-conv" 240 | top: "layer9-conv" 241 | name: "layer9-scale" 242 | type: "Scale" 243 | scale_param { 244 | bias_term: true 245 | } 246 | } 247 | layer { 248 | bottom: "layer9-conv" 249 | top: "layer9-conv" 250 | name: "layer9-act" 251 | type: "ReLU" 252 | relu_param { 253 | negative_slope: 0.1 254 | } 255 | } 256 | layer { 257 | bottom: "layer9-conv" 258 | top: "layer10-maxpool" 259 | name: "layer10-maxpool" 260 | type: "Pooling" 261 | pooling_param { 262 | stride: 2 263 | pool: MAX 264 | kernel_size: 2 265 | pad: 0 266 | } 267 | } 268 | layer { 269 | bottom: "layer10-maxpool" 270 | top: "layer11-conv" 271 | name: "layer11-conv" 272 | type: "Convolution" 273 | convolution_param { 274 | num_output: 512 275 | kernel_size: 3 276 | pad: 1 277 | stride: 1 278 | bias_term: false 279 | } 280 | } 281 | layer { 282 | bottom: "layer11-conv" 283 | top: "layer11-conv" 284 | name: "layer11-bn" 285 | type: "BatchNorm" 286 | batch_norm_param { 287 | use_global_stats: true 288 | } 289 | } 290 | layer { 291 | bottom: "layer11-conv" 292 | top: "layer11-conv" 293 | name: "layer11-scale" 294 | type: "Scale" 295 | scale_param { 296 | bias_term: true 297 | } 298 | } 299 | layer { 300 | bottom: "layer11-conv" 301 | top: "layer11-conv" 302 | name: "layer11-act" 303 | type: "ReLU" 304 | relu_param { 305 | negative_slope: 0.1 306 | } 307 | } 308 | layer { 309 | bottom: "layer11-conv" 310 | top: "layer12-maxpool" 311 | name: "layer12-maxpool" 312 | type: "Pooling" 313 | pooling_param { 314 | stride: 1 315 | pool: MAX 316 | kernel_size: 2 317 | pad: 0 318 | } 319 | } 320 | layer { 321 | bottom: "layer12-maxpool" 322 | top: "layer13-conv" 323 | name: "layer13-conv" 324 | type: "Convolution" 325 | convolution_param { 326 | num_output: 1024 327 | kernel_size: 3 328 | pad: 1 329 | stride: 1 330 | bias_term: false 331 | } 332 | } 333 | layer { 334 | bottom: "layer13-conv" 335 | top: "layer13-conv" 336 | name: "layer13-bn" 337 | type: "BatchNorm" 338 | batch_norm_param { 339 | use_global_stats: true 340 | } 341 | } 342 | layer { 343 | bottom: "layer13-conv" 344 | top: "layer13-conv" 345 | name: "layer13-scale" 346 | type: "Scale" 347 | scale_param { 348 | bias_term: true 349 | } 350 | } 351 | layer { 352 | bottom: "layer13-conv" 353 | top: "layer13-conv" 354 | name: "layer13-act" 355 | type: "ReLU" 356 | relu_param { 357 | negative_slope: 0.1 358 | } 359 | } 360 | layer { 361 | bottom: "layer13-conv" 362 | top: "layer14-conv" 363 | name: "layer14-conv" 364 | type: "Convolution" 365 | convolution_param { 366 | num_output: 256 367 | kernel_size: 1 368 | pad: 0 369 | stride: 1 370 | bias_term: false 371 | } 372 | } 373 | layer { 374 | bottom: "layer14-conv" 375 | top: "layer14-conv" 376 | name: "layer14-bn" 377 | type: "BatchNorm" 378 | batch_norm_param { 379 | use_global_stats: true 380 | } 381 | } 382 | layer { 383 | bottom: "layer14-conv" 384 | top: "layer14-conv" 385 | name: "layer14-scale" 386 | type: "Scale" 387 | scale_param { 388 | bias_term: true 389 | } 390 | } 391 | layer { 392 | bottom: "layer14-conv" 393 | top: "layer14-conv" 394 | name: "layer14-act" 395 | type: "ReLU" 396 | relu_param { 397 | negative_slope: 0.1 398 | } 399 | } 400 | layer { 401 | bottom: "layer14-conv" 402 | top: "layer15-conv" 403 | name: "layer15-conv" 404 | type: "Convolution" 405 | convolution_param { 406 | num_output: 512 407 | kernel_size: 3 408 | pad: 1 409 | stride: 1 410 | bias_term: false 411 | } 412 | } 413 | layer { 414 | bottom: "layer15-conv" 415 | top: "layer15-conv" 416 | name: "layer15-bn" 417 | type: "BatchNorm" 418 | batch_norm_param { 419 | use_global_stats: true 420 | } 421 | } 422 | layer { 423 | bottom: "layer15-conv" 424 | top: "layer15-conv" 425 | name: "layer15-scale" 426 | type: "Scale" 427 | scale_param { 428 | bias_term: true 429 | } 430 | } 431 | layer { 432 | bottom: "layer15-conv" 433 | top: "layer15-conv" 434 | name: "layer15-act" 435 | type: "ReLU" 436 | relu_param { 437 | negative_slope: 0.1 438 | } 439 | } 440 | layer { 441 | bottom: "layer15-conv" 442 | top: "layer16-conv" 443 | name: "layer16-conv" 444 | type: "Convolution" 445 | convolution_param { 446 | num_output: 255 447 | kernel_size: 1 448 | pad: 0 449 | stride: 1 450 | bias_term: true 451 | } 452 | } 453 | layer { 454 | bottom: "layer14-conv" 455 | top: "layer18-route" 456 | name: "layer18-route" 457 | type: "Concat" 458 | } 459 | layer { 460 | bottom: "layer18-route" 461 | top: "layer19-conv" 462 | name: "layer19-conv" 463 | type: "Convolution" 464 | convolution_param { 465 | num_output: 128 466 | kernel_size: 1 467 | pad: 0 468 | stride: 1 469 | bias_term: false 470 | } 471 | } 472 | layer { 473 | bottom: "layer19-conv" 474 | top: "layer19-conv" 475 | name: "layer19-bn" 476 | type: "BatchNorm" 477 | batch_norm_param { 478 | use_global_stats: true 479 | } 480 | } 481 | layer { 482 | bottom: "layer19-conv" 483 | top: "layer19-conv" 484 | name: "layer19-scale" 485 | type: "Scale" 486 | scale_param { 487 | bias_term: true 488 | } 489 | } 490 | layer { 491 | bottom: "layer19-conv" 492 | top: "layer19-conv" 493 | name: "layer19-act" 494 | type: "ReLU" 495 | relu_param { 496 | negative_slope: 0.1 497 | } 498 | } 499 | layer { 500 | bottom: "layer19-conv" 501 | top: "layer20-upsample" 502 | name: "layer20-upsample" 503 | type: "Upsample" 504 | upsample_param { 505 | scale: 2 506 | } 507 | } 508 | layer { 509 | bottom: "layer20-upsample" 510 | bottom: "layer9-conv" 511 | top: "layer21-route" 512 | name: "layer21-route" 513 | type: "Concat" 514 | } 515 | layer { 516 | bottom: "layer21-route" 517 | top: "layer22-conv" 518 | name: "layer22-conv" 519 | type: "Convolution" 520 | convolution_param { 521 | num_output: 256 522 | kernel_size: 3 523 | pad: 1 524 | stride: 1 525 | bias_term: false 526 | } 527 | } 528 | layer { 529 | bottom: "layer22-conv" 530 | top: "layer22-conv" 531 | name: "layer22-bn" 532 | type: "BatchNorm" 533 | batch_norm_param { 534 | use_global_stats: true 535 | } 536 | } 537 | layer { 538 | bottom: "layer22-conv" 539 | top: "layer22-conv" 540 | name: "layer22-scale" 541 | type: "Scale" 542 | scale_param { 543 | bias_term: true 544 | } 545 | } 546 | layer { 547 | bottom: "layer22-conv" 548 | top: "layer22-conv" 549 | name: "layer22-act" 550 | type: "ReLU" 551 | relu_param { 552 | negative_slope: 0.1 553 | } 554 | } 555 | layer { 556 | bottom: "layer22-conv" 557 | top: "layer23-conv" 558 | name: "layer23-conv" 559 | type: "Convolution" 560 | convolution_param { 561 | num_output: 255 562 | kernel_size: 1 563 | pad: 0 564 | stride: 1 565 | bias_term: true 566 | } 567 | } 568 | -------------------------------------------------------------------------------- /cfg/yolov3-spp.cfg: -------------------------------------------------------------------------------- 1 | [net] 2 | # Testing 3 | batch=1 4 | subdivisions=1 5 | # Training 6 | # batch=64 7 | # subdivisions=16 8 | width=608 9 | height=608 10 | channels=3 11 | momentum=0.9 12 | decay=0.0005 13 | angle=0 14 | saturation = 1.5 15 | exposure = 1.5 16 | hue=.1 17 | 18 | learning_rate=0.001 19 | burn_in=1000 20 | max_batches = 500200 21 | policy=steps 22 | steps=400000,450000 23 | scales=.1,.1 24 | 25 | [convolutional] 26 | batch_normalize=1 27 | filters=32 28 | size=3 29 | stride=1 30 | pad=1 31 | activation=leaky 32 | 33 | # Downsample 34 | 35 | [convolutional] 36 | batch_normalize=1 37 | filters=64 38 | size=3 39 | stride=2 40 | pad=1 41 | activation=leaky 42 | 43 | [convolutional] 44 | batch_normalize=1 45 | filters=32 46 | size=1 47 | stride=1 48 | pad=1 49 | activation=leaky 50 | 51 | [convolutional] 52 | batch_normalize=1 53 | filters=64 54 | size=3 55 | stride=1 56 | pad=1 57 | activation=leaky 58 | 59 | [shortcut] 60 | from=-3 61 | activation=linear 62 | 63 | # Downsample 64 | 65 | [convolutional] 66 | batch_normalize=1 67 | filters=128 68 | size=3 69 | stride=2 70 | pad=1 71 | activation=leaky 72 | 73 | [convolutional] 74 | batch_normalize=1 75 | filters=64 76 | size=1 77 | stride=1 78 | pad=1 79 | activation=leaky 80 | 81 | [convolutional] 82 | batch_normalize=1 83 | filters=128 84 | size=3 85 | stride=1 86 | pad=1 87 | activation=leaky 88 | 89 | [shortcut] 90 | from=-3 91 | activation=linear 92 | 93 | [convolutional] 94 | batch_normalize=1 95 | filters=64 96 | size=1 97 | stride=1 98 | pad=1 99 | activation=leaky 100 | 101 | [convolutional] 102 | batch_normalize=1 103 | filters=128 104 | size=3 105 | stride=1 106 | pad=1 107 | activation=leaky 108 | 109 | [shortcut] 110 | from=-3 111 | activation=linear 112 | 113 | # Downsample 114 | 115 | [convolutional] 116 | batch_normalize=1 117 | filters=256 118 | size=3 119 | stride=2 120 | pad=1 121 | activation=leaky 122 | 123 | [convolutional] 124 | batch_normalize=1 125 | filters=128 126 | size=1 127 | stride=1 128 | pad=1 129 | activation=leaky 130 | 131 | [convolutional] 132 | batch_normalize=1 133 | filters=256 134 | size=3 135 | stride=1 136 | pad=1 137 | activation=leaky 138 | 139 | [shortcut] 140 | from=-3 141 | activation=linear 142 | 143 | [convolutional] 144 | batch_normalize=1 145 | filters=128 146 | size=1 147 | stride=1 148 | pad=1 149 | activation=leaky 150 | 151 | [convolutional] 152 | batch_normalize=1 153 | filters=256 154 | size=3 155 | stride=1 156 | pad=1 157 | activation=leaky 158 | 159 | [shortcut] 160 | from=-3 161 | activation=linear 162 | 163 | [convolutional] 164 | batch_normalize=1 165 | filters=128 166 | size=1 167 | stride=1 168 | pad=1 169 | activation=leaky 170 | 171 | [convolutional] 172 | batch_normalize=1 173 | filters=256 174 | size=3 175 | stride=1 176 | pad=1 177 | activation=leaky 178 | 179 | [shortcut] 180 | from=-3 181 | activation=linear 182 | 183 | [convolutional] 184 | batch_normalize=1 185 | filters=128 186 | size=1 187 | stride=1 188 | pad=1 189 | activation=leaky 190 | 191 | [convolutional] 192 | batch_normalize=1 193 | filters=256 194 | size=3 195 | stride=1 196 | pad=1 197 | activation=leaky 198 | 199 | [shortcut] 200 | from=-3 201 | activation=linear 202 | 203 | 204 | [convolutional] 205 | batch_normalize=1 206 | filters=128 207 | size=1 208 | stride=1 209 | pad=1 210 | activation=leaky 211 | 212 | [convolutional] 213 | batch_normalize=1 214 | filters=256 215 | size=3 216 | stride=1 217 | pad=1 218 | activation=leaky 219 | 220 | [shortcut] 221 | from=-3 222 | activation=linear 223 | 224 | [convolutional] 225 | batch_normalize=1 226 | filters=128 227 | size=1 228 | stride=1 229 | pad=1 230 | activation=leaky 231 | 232 | [convolutional] 233 | batch_normalize=1 234 | filters=256 235 | size=3 236 | stride=1 237 | pad=1 238 | activation=leaky 239 | 240 | [shortcut] 241 | from=-3 242 | activation=linear 243 | 244 | [convolutional] 245 | batch_normalize=1 246 | filters=128 247 | size=1 248 | stride=1 249 | pad=1 250 | activation=leaky 251 | 252 | [convolutional] 253 | batch_normalize=1 254 | filters=256 255 | size=3 256 | stride=1 257 | pad=1 258 | activation=leaky 259 | 260 | [shortcut] 261 | from=-3 262 | activation=linear 263 | 264 | [convolutional] 265 | batch_normalize=1 266 | filters=128 267 | size=1 268 | stride=1 269 | pad=1 270 | activation=leaky 271 | 272 | [convolutional] 273 | batch_normalize=1 274 | filters=256 275 | size=3 276 | stride=1 277 | pad=1 278 | activation=leaky 279 | 280 | [shortcut] 281 | from=-3 282 | activation=linear 283 | 284 | # Downsample 285 | 286 | [convolutional] 287 | batch_normalize=1 288 | filters=512 289 | size=3 290 | stride=2 291 | pad=1 292 | activation=leaky 293 | 294 | [convolutional] 295 | batch_normalize=1 296 | filters=256 297 | size=1 298 | stride=1 299 | pad=1 300 | activation=leaky 301 | 302 | [convolutional] 303 | batch_normalize=1 304 | filters=512 305 | size=3 306 | stride=1 307 | pad=1 308 | activation=leaky 309 | 310 | [shortcut] 311 | from=-3 312 | activation=linear 313 | 314 | 315 | [convolutional] 316 | batch_normalize=1 317 | filters=256 318 | size=1 319 | stride=1 320 | pad=1 321 | activation=leaky 322 | 323 | [convolutional] 324 | batch_normalize=1 325 | filters=512 326 | size=3 327 | stride=1 328 | pad=1 329 | activation=leaky 330 | 331 | [shortcut] 332 | from=-3 333 | activation=linear 334 | 335 | 336 | [convolutional] 337 | batch_normalize=1 338 | filters=256 339 | size=1 340 | stride=1 341 | pad=1 342 | activation=leaky 343 | 344 | [convolutional] 345 | batch_normalize=1 346 | filters=512 347 | size=3 348 | stride=1 349 | pad=1 350 | activation=leaky 351 | 352 | [shortcut] 353 | from=-3 354 | activation=linear 355 | 356 | 357 | [convolutional] 358 | batch_normalize=1 359 | filters=256 360 | size=1 361 | stride=1 362 | pad=1 363 | activation=leaky 364 | 365 | [convolutional] 366 | batch_normalize=1 367 | filters=512 368 | size=3 369 | stride=1 370 | pad=1 371 | activation=leaky 372 | 373 | [shortcut] 374 | from=-3 375 | activation=linear 376 | 377 | [convolutional] 378 | batch_normalize=1 379 | filters=256 380 | size=1 381 | stride=1 382 | pad=1 383 | activation=leaky 384 | 385 | [convolutional] 386 | batch_normalize=1 387 | filters=512 388 | size=3 389 | stride=1 390 | pad=1 391 | activation=leaky 392 | 393 | [shortcut] 394 | from=-3 395 | activation=linear 396 | 397 | 398 | [convolutional] 399 | batch_normalize=1 400 | filters=256 401 | size=1 402 | stride=1 403 | pad=1 404 | activation=leaky 405 | 406 | [convolutional] 407 | batch_normalize=1 408 | filters=512 409 | size=3 410 | stride=1 411 | pad=1 412 | activation=leaky 413 | 414 | [shortcut] 415 | from=-3 416 | activation=linear 417 | 418 | 419 | [convolutional] 420 | batch_normalize=1 421 | filters=256 422 | size=1 423 | stride=1 424 | pad=1 425 | activation=leaky 426 | 427 | [convolutional] 428 | batch_normalize=1 429 | filters=512 430 | size=3 431 | stride=1 432 | pad=1 433 | activation=leaky 434 | 435 | [shortcut] 436 | from=-3 437 | activation=linear 438 | 439 | [convolutional] 440 | batch_normalize=1 441 | filters=256 442 | size=1 443 | stride=1 444 | pad=1 445 | activation=leaky 446 | 447 | [convolutional] 448 | batch_normalize=1 449 | filters=512 450 | size=3 451 | stride=1 452 | pad=1 453 | activation=leaky 454 | 455 | [shortcut] 456 | from=-3 457 | activation=linear 458 | 459 | # Downsample 460 | 461 | [convolutional] 462 | batch_normalize=1 463 | filters=1024 464 | size=3 465 | stride=2 466 | pad=1 467 | activation=leaky 468 | 469 | [convolutional] 470 | batch_normalize=1 471 | filters=512 472 | size=1 473 | stride=1 474 | pad=1 475 | activation=leaky 476 | 477 | [convolutional] 478 | batch_normalize=1 479 | filters=1024 480 | size=3 481 | stride=1 482 | pad=1 483 | activation=leaky 484 | 485 | [shortcut] 486 | from=-3 487 | activation=linear 488 | 489 | [convolutional] 490 | batch_normalize=1 491 | filters=512 492 | size=1 493 | stride=1 494 | pad=1 495 | activation=leaky 496 | 497 | [convolutional] 498 | batch_normalize=1 499 | filters=1024 500 | size=3 501 | stride=1 502 | pad=1 503 | activation=leaky 504 | 505 | [shortcut] 506 | from=-3 507 | activation=linear 508 | 509 | [convolutional] 510 | batch_normalize=1 511 | filters=512 512 | size=1 513 | stride=1 514 | pad=1 515 | activation=leaky 516 | 517 | [convolutional] 518 | batch_normalize=1 519 | filters=1024 520 | size=3 521 | stride=1 522 | pad=1 523 | activation=leaky 524 | 525 | [shortcut] 526 | from=-3 527 | activation=linear 528 | 529 | [convolutional] 530 | batch_normalize=1 531 | filters=512 532 | size=1 533 | stride=1 534 | pad=1 535 | activation=leaky 536 | 537 | [convolutional] 538 | batch_normalize=1 539 | filters=1024 540 | size=3 541 | stride=1 542 | pad=1 543 | activation=leaky 544 | 545 | [shortcut] 546 | from=-3 547 | activation=linear 548 | 549 | ###################### 550 | 551 | [convolutional] 552 | batch_normalize=1 553 | filters=512 554 | size=1 555 | stride=1 556 | pad=1 557 | activation=leaky 558 | 559 | [convolutional] 560 | batch_normalize=1 561 | size=3 562 | stride=1 563 | pad=1 564 | filters=1024 565 | activation=leaky 566 | 567 | [convolutional] 568 | batch_normalize=1 569 | filters=512 570 | size=1 571 | stride=1 572 | pad=1 573 | activation=leaky 574 | 575 | ### SPP ### 576 | [maxpool] 577 | stride=1 578 | size=5 579 | 580 | [route] 581 | layers=-2 582 | 583 | [maxpool] 584 | stride=1 585 | size=9 586 | 587 | [route] 588 | layers=-4 589 | 590 | [maxpool] 591 | stride=1 592 | size=13 593 | 594 | [route] 595 | layers=-1,-3,-5,-6 596 | 597 | ### End SPP ### 598 | 599 | [convolutional] 600 | batch_normalize=1 601 | filters=512 602 | size=1 603 | stride=1 604 | pad=1 605 | activation=leaky 606 | 607 | 608 | [convolutional] 609 | batch_normalize=1 610 | size=3 611 | stride=1 612 | pad=1 613 | filters=1024 614 | activation=leaky 615 | 616 | [convolutional] 617 | batch_normalize=1 618 | filters=512 619 | size=1 620 | stride=1 621 | pad=1 622 | activation=leaky 623 | 624 | [convolutional] 625 | batch_normalize=1 626 | size=3 627 | stride=1 628 | pad=1 629 | filters=1024 630 | activation=leaky 631 | 632 | [convolutional] 633 | size=1 634 | stride=1 635 | pad=1 636 | filters=255 637 | activation=linear 638 | 639 | 640 | [yolo] 641 | mask = 6,7,8 642 | anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 643 | classes=80 644 | num=9 645 | jitter=.3 646 | ignore_thresh = .7 647 | truth_thresh = 1 648 | random=1 649 | 650 | 651 | [route] 652 | layers = -4 653 | 654 | [convolutional] 655 | batch_normalize=1 656 | filters=256 657 | size=1 658 | stride=1 659 | pad=1 660 | activation=leaky 661 | 662 | [upsample] 663 | stride=2 664 | 665 | [route] 666 | layers = -1, 61 667 | 668 | 669 | 670 | [convolutional] 671 | batch_normalize=1 672 | filters=256 673 | size=1 674 | stride=1 675 | pad=1 676 | activation=leaky 677 | 678 | [convolutional] 679 | batch_normalize=1 680 | size=3 681 | stride=1 682 | pad=1 683 | filters=512 684 | activation=leaky 685 | 686 | [convolutional] 687 | batch_normalize=1 688 | filters=256 689 | size=1 690 | stride=1 691 | pad=1 692 | activation=leaky 693 | 694 | [convolutional] 695 | batch_normalize=1 696 | size=3 697 | stride=1 698 | pad=1 699 | filters=512 700 | activation=leaky 701 | 702 | [convolutional] 703 | batch_normalize=1 704 | filters=256 705 | size=1 706 | stride=1 707 | pad=1 708 | activation=leaky 709 | 710 | [convolutional] 711 | batch_normalize=1 712 | size=3 713 | stride=1 714 | pad=1 715 | filters=512 716 | activation=leaky 717 | 718 | [convolutional] 719 | size=1 720 | stride=1 721 | pad=1 722 | filters=255 723 | activation=linear 724 | 725 | 726 | [yolo] 727 | mask = 3,4,5 728 | anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 729 | classes=80 730 | num=9 731 | jitter=.3 732 | ignore_thresh = .7 733 | truth_thresh = 1 734 | random=1 735 | 736 | 737 | 738 | [route] 739 | layers = -4 740 | 741 | [convolutional] 742 | batch_normalize=1 743 | filters=128 744 | size=1 745 | stride=1 746 | pad=1 747 | activation=leaky 748 | 749 | [upsample] 750 | stride=2 751 | 752 | [route] 753 | layers = -1, 36 754 | 755 | 756 | 757 | [convolutional] 758 | batch_normalize=1 759 | filters=128 760 | size=1 761 | stride=1 762 | pad=1 763 | activation=leaky 764 | 765 | [convolutional] 766 | batch_normalize=1 767 | size=3 768 | stride=1 769 | pad=1 770 | filters=256 771 | activation=leaky 772 | 773 | [convolutional] 774 | batch_normalize=1 775 | filters=128 776 | size=1 777 | stride=1 778 | pad=1 779 | activation=leaky 780 | 781 | [convolutional] 782 | batch_normalize=1 783 | size=3 784 | stride=1 785 | pad=1 786 | filters=256 787 | activation=leaky 788 | 789 | [convolutional] 790 | batch_normalize=1 791 | filters=128 792 | size=1 793 | stride=1 794 | pad=1 795 | activation=leaky 796 | 797 | [convolutional] 798 | batch_normalize=1 799 | size=3 800 | stride=1 801 | pad=1 802 | filters=256 803 | activation=leaky 804 | 805 | [convolutional] 806 | size=1 807 | stride=1 808 | pad=1 809 | filters=255 810 | activation=linear 811 | 812 | 813 | [yolo] 814 | mask = 0,1,2 815 | anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 816 | classes=80 817 | num=9 818 | jitter=.3 819 | ignore_thresh = .7 820 | truth_thresh = 1 821 | random=1 822 | 823 | -------------------------------------------------------------------------------- /caffe_layers/pooling_layer/pooling_layer.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "caffe/layers/pooling_layer.hpp" 6 | #include "caffe/util/math_functions.hpp" 7 | 8 | namespace caffe { 9 | 10 | using std::min; 11 | using std::max; 12 | 13 | template 14 | void PoolingLayer::LayerSetUp(const vector*>& bottom, 15 | const vector*>& top) { 16 | PoolingParameter pool_param = this->layer_param_.pooling_param(); 17 | if (pool_param.global_pooling()) { 18 | CHECK(!(pool_param.has_kernel_size() || 19 | pool_param.has_kernel_h() || pool_param.has_kernel_w())) 20 | << "With Global_pooling: true Filter size cannot specified"; 21 | } else { 22 | CHECK(!pool_param.has_kernel_size() != 23 | !(pool_param.has_kernel_h() && pool_param.has_kernel_w())) 24 | << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; 25 | CHECK(pool_param.has_kernel_size() || 26 | (pool_param.has_kernel_h() && pool_param.has_kernel_w())) 27 | << "For non-square filters both kernel_h and kernel_w are required."; 28 | } 29 | CHECK((!pool_param.has_pad() && pool_param.has_pad_h() 30 | && pool_param.has_pad_w()) 31 | || (!pool_param.has_pad_h() && !pool_param.has_pad_w())) 32 | << "pad is pad OR pad_h and pad_w are required."; 33 | CHECK((!pool_param.has_stride() && pool_param.has_stride_h() 34 | && pool_param.has_stride_w()) 35 | || (!pool_param.has_stride_h() && !pool_param.has_stride_w())) 36 | << "Stride is stride OR stride_h and stride_w are required."; 37 | global_pooling_ = pool_param.global_pooling(); 38 | if (global_pooling_) { 39 | kernel_h_ = bottom[0]->height(); 40 | kernel_w_ = bottom[0]->width(); 41 | } else { 42 | if (pool_param.has_kernel_size()) { 43 | kernel_h_ = kernel_w_ = pool_param.kernel_size(); 44 | } else { 45 | kernel_h_ = pool_param.kernel_h(); 46 | kernel_w_ = pool_param.kernel_w(); 47 | } 48 | } 49 | CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero."; 50 | CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero."; 51 | if (!pool_param.has_pad_h()) { 52 | pad_h_ = pad_w_ = pool_param.pad(); 53 | } else { 54 | pad_h_ = pool_param.pad_h(); 55 | pad_w_ = pool_param.pad_w(); 56 | } 57 | if (!pool_param.has_stride_h()) { 58 | stride_h_ = stride_w_ = pool_param.stride(); 59 | } else { 60 | stride_h_ = pool_param.stride_h(); 61 | stride_w_ = pool_param.stride_w(); 62 | } 63 | if (global_pooling_) { 64 | CHECK(pad_h_ == 0 && pad_w_ == 0 && stride_h_ == 1 && stride_w_ == 1) 65 | << "With Global_pooling: true; only pad = 0 and stride = 1"; 66 | } 67 | if (pad_h_ != 0 || pad_w_ != 0) { 68 | CHECK(this->layer_param_.pooling_param().pool() 69 | == PoolingParameter_PoolMethod_AVE 70 | || this->layer_param_.pooling_param().pool() 71 | == PoolingParameter_PoolMethod_MAX) 72 | << "Padding implemented only for average and max pooling."; 73 | CHECK_LT(pad_h_, kernel_h_); 74 | CHECK_LT(pad_w_, kernel_w_); 75 | } 76 | } 77 | 78 | template 79 | void PoolingLayer::Reshape(const vector*>& bottom, 80 | const vector*>& top) { 81 | CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " 82 | << "corresponding to (num, channels, height, width)"; 83 | channels_ = bottom[0]->channels(); 84 | height_ = bottom[0]->height(); 85 | width_ = bottom[0]->width(); 86 | if (global_pooling_) { 87 | kernel_h_ = bottom[0]->height(); 88 | kernel_w_ = bottom[0]->width(); 89 | } 90 | pooled_height_ = static_cast(ceil(static_cast( 91 | height_ + 2 * pad_h_ - kernel_h_) / stride_h_)) + 1; 92 | pooled_width_ = static_cast(ceil(static_cast( 93 | width_ + 2 * pad_w_ - kernel_w_) / stride_w_)) + 1; 94 | 95 | /*added by chen for darknet yolov3 tiny maxpool layer stride=1,size =2*/ 96 | if((kernel_h_ - stride_h_) % 2 == 1){ 97 | pooled_height_ += 1; 98 | pooled_width_ += 1; 99 | } 100 | 101 | if (pad_h_ || pad_w_) { 102 | // If we have padding, ensure that the last pooling starts strictly 103 | // inside the image (instead of at the padding); otherwise clip the last. 104 | if ((pooled_height_ - 1) * stride_h_ >= height_ + pad_h_) { 105 | --pooled_height_; 106 | } 107 | if ((pooled_width_ - 1) * stride_w_ >= width_ + pad_w_) { 108 | --pooled_width_; 109 | } 110 | CHECK_LT((pooled_height_ - 1) * stride_h_, height_ + pad_h_); 111 | CHECK_LT((pooled_width_ - 1) * stride_w_, width_ + pad_w_); 112 | } 113 | top[0]->Reshape(bottom[0]->num(), channels_, pooled_height_, 114 | pooled_width_); 115 | if (top.size() > 1) { 116 | top[1]->ReshapeLike(*top[0]); 117 | } 118 | // If max pooling, we will initialize the vector index part. 119 | if (this->layer_param_.pooling_param().pool() == 120 | PoolingParameter_PoolMethod_MAX && top.size() == 1) { 121 | max_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, 122 | pooled_width_); 123 | } 124 | // If stochastic pooling, we will initialize the random index part. 125 | if (this->layer_param_.pooling_param().pool() == 126 | PoolingParameter_PoolMethod_STOCHASTIC) { 127 | rand_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, 128 | pooled_width_); 129 | } 130 | } 131 | 132 | // TODO(Yangqing): Is there a faster way to do pooling in the channel-first 133 | // case? 134 | template 135 | void PoolingLayer::Forward_cpu(const vector*>& bottom, 136 | const vector*>& top) { 137 | const Dtype* bottom_data = bottom[0]->cpu_data(); 138 | Dtype* top_data = top[0]->mutable_cpu_data(); 139 | const int top_count = top[0]->count(); 140 | // We'll output the mask to top[1] if it's of size >1. 141 | const bool use_top_mask = top.size() > 1; 142 | int* mask = NULL; // suppress warnings about uninitalized variables 143 | Dtype* top_mask = NULL; 144 | // Different pooling methods. We explicitly do the switch outside the for 145 | // loop to save time, although this results in more code. 146 | switch (this->layer_param_.pooling_param().pool()) { 147 | case PoolingParameter_PoolMethod_MAX: 148 | // Initialize 149 | if (use_top_mask) { 150 | top_mask = top[1]->mutable_cpu_data(); 151 | caffe_set(top_count, Dtype(-1), top_mask); 152 | } else { 153 | mask = max_idx_.mutable_cpu_data(); 154 | caffe_set(top_count, -1, mask); 155 | } 156 | caffe_set(top_count, Dtype(-FLT_MAX), top_data); 157 | // The main loop 158 | for (int n = 0; n < bottom[0]->num(); ++n) { 159 | for (int c = 0; c < channels_; ++c) { 160 | for (int ph = 0; ph < pooled_height_; ++ph) { 161 | for (int pw = 0; pw < pooled_width_; ++pw) { 162 | int hstart = ph * stride_h_ - pad_h_; 163 | int wstart = pw * stride_w_ - pad_w_; 164 | int hend = min(hstart + kernel_h_, height_); 165 | int wend = min(wstart + kernel_w_, width_); 166 | hstart = max(hstart, 0); 167 | wstart = max(wstart, 0); 168 | const int pool_index = ph * pooled_width_ + pw; 169 | for (int h = hstart; h < hend; ++h) { 170 | for (int w = wstart; w < wend; ++w) { 171 | const int index = h * width_ + w; 172 | if (bottom_data[index] > top_data[pool_index]) { 173 | top_data[pool_index] = bottom_data[index]; 174 | if (use_top_mask) { 175 | top_mask[pool_index] = static_cast(index); 176 | } else { 177 | mask[pool_index] = index; 178 | } 179 | } 180 | } 181 | } 182 | } 183 | } 184 | // compute offset 185 | bottom_data += bottom[0]->offset(0, 1); 186 | top_data += top[0]->offset(0, 1); 187 | if (use_top_mask) { 188 | top_mask += top[0]->offset(0, 1); 189 | } else { 190 | mask += top[0]->offset(0, 1); 191 | } 192 | } 193 | } 194 | break; 195 | case PoolingParameter_PoolMethod_AVE: 196 | for (int i = 0; i < top_count; ++i) { 197 | top_data[i] = 0; 198 | } 199 | // The main loop 200 | for (int n = 0; n < bottom[0]->num(); ++n) { 201 | for (int c = 0; c < channels_; ++c) { 202 | for (int ph = 0; ph < pooled_height_; ++ph) { 203 | for (int pw = 0; pw < pooled_width_; ++pw) { 204 | int hstart = ph * stride_h_ - pad_h_; 205 | int wstart = pw * stride_w_ - pad_w_; 206 | int hend = min(hstart + kernel_h_, height_ + pad_h_); 207 | int wend = min(wstart + kernel_w_, width_ + pad_w_); 208 | int pool_size = (hend - hstart) * (wend - wstart); 209 | hstart = max(hstart, 0); 210 | wstart = max(wstart, 0); 211 | hend = min(hend, height_); 212 | wend = min(wend, width_); 213 | for (int h = hstart; h < hend; ++h) { 214 | for (int w = wstart; w < wend; ++w) { 215 | top_data[ph * pooled_width_ + pw] += 216 | bottom_data[h * width_ + w]; 217 | } 218 | } 219 | top_data[ph * pooled_width_ + pw] /= pool_size; 220 | } 221 | } 222 | // compute offset 223 | bottom_data += bottom[0]->offset(0, 1); 224 | top_data += top[0]->offset(0, 1); 225 | } 226 | } 227 | break; 228 | case PoolingParameter_PoolMethod_STOCHASTIC: 229 | NOT_IMPLEMENTED; 230 | break; 231 | default: 232 | LOG(FATAL) << "Unknown pooling method."; 233 | } 234 | } 235 | 236 | template 237 | void PoolingLayer::Backward_cpu(const vector*>& top, 238 | const vector& propagate_down, const vector*>& bottom) { 239 | if (!propagate_down[0]) { 240 | return; 241 | } 242 | const Dtype* top_diff = top[0]->cpu_diff(); 243 | Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); 244 | // Different pooling methods. We explicitly do the switch outside the for 245 | // loop to save time, although this results in more codes. 246 | caffe_set(bottom[0]->count(), Dtype(0), bottom_diff); 247 | // We'll output the mask to top[1] if it's of size >1. 248 | const bool use_top_mask = top.size() > 1; 249 | const int* mask = NULL; // suppress warnings about uninitialized variables 250 | const Dtype* top_mask = NULL; 251 | switch (this->layer_param_.pooling_param().pool()) { 252 | case PoolingParameter_PoolMethod_MAX: 253 | // The main loop 254 | if (use_top_mask) { 255 | top_mask = top[1]->cpu_data(); 256 | } else { 257 | mask = max_idx_.cpu_data(); 258 | } 259 | for (int n = 0; n < top[0]->num(); ++n) { 260 | for (int c = 0; c < channels_; ++c) { 261 | for (int ph = 0; ph < pooled_height_; ++ph) { 262 | for (int pw = 0; pw < pooled_width_; ++pw) { 263 | const int index = ph * pooled_width_ + pw; 264 | const int bottom_index = 265 | use_top_mask ? top_mask[index] : mask[index]; 266 | bottom_diff[bottom_index] += top_diff[index]; 267 | } 268 | } 269 | bottom_diff += bottom[0]->offset(0, 1); 270 | top_diff += top[0]->offset(0, 1); 271 | if (use_top_mask) { 272 | top_mask += top[0]->offset(0, 1); 273 | } else { 274 | mask += top[0]->offset(0, 1); 275 | } 276 | } 277 | } 278 | break; 279 | case PoolingParameter_PoolMethod_AVE: 280 | // The main loop 281 | for (int n = 0; n < top[0]->num(); ++n) { 282 | for (int c = 0; c < channels_; ++c) { 283 | for (int ph = 0; ph < pooled_height_; ++ph) { 284 | for (int pw = 0; pw < pooled_width_; ++pw) { 285 | int hstart = ph * stride_h_ - pad_h_; 286 | int wstart = pw * stride_w_ - pad_w_; 287 | int hend = min(hstart + kernel_h_, height_ + pad_h_); 288 | int wend = min(wstart + kernel_w_, width_ + pad_w_); 289 | int pool_size = (hend - hstart) * (wend - wstart); 290 | hstart = max(hstart, 0); 291 | wstart = max(wstart, 0); 292 | hend = min(hend, height_); 293 | wend = min(wend, width_); 294 | for (int h = hstart; h < hend; ++h) { 295 | for (int w = wstart; w < wend; ++w) { 296 | bottom_diff[h * width_ + w] += 297 | top_diff[ph * pooled_width_ + pw] / pool_size; 298 | } 299 | } 300 | } 301 | } 302 | // offset 303 | bottom_diff += bottom[0]->offset(0, 1); 304 | top_diff += top[0]->offset(0, 1); 305 | } 306 | } 307 | break; 308 | case PoolingParameter_PoolMethod_STOCHASTIC: 309 | NOT_IMPLEMENTED; 310 | break; 311 | default: 312 | LOG(FATAL) << "Unknown pooling method."; 313 | } 314 | } 315 | 316 | 317 | #ifdef CPU_ONLY 318 | STUB_GPU(PoolingLayer); 319 | #endif 320 | 321 | INSTANTIATE_CLASS(PoolingLayer); 322 | 323 | } // namespace caffe 324 | -------------------------------------------------------------------------------- /cfg/yolov4.cfg: -------------------------------------------------------------------------------- 1 | [net] 2 | # Testing 3 | batch=1 4 | subdivisions=1 5 | # Training 6 | #batch=64 7 | #subdivisions=8 8 | width=608 9 | height=608 10 | channels=3 11 | momentum=0.949 12 | decay=0.0005 13 | angle=0 14 | saturation = 1.5 15 | exposure = 1.5 16 | hue=.1 17 | 18 | learning_rate=0.00261 19 | burn_in=1000 20 | max_batches = 500500 21 | policy=steps 22 | steps=400000,450000 23 | scales=.1,.1 24 | 25 | #cutmix=1 26 | mosaic=1 27 | 28 | #:104x104 54:52x52 85:26x26 104:13x13 for 416 29 | 30 | [convolutional] 31 | batch_normalize=1 32 | filters=32 33 | size=3 34 | stride=1 35 | pad=1 36 | activation=mish 37 | 38 | # Downsample 39 | 40 | [convolutional] 41 | batch_normalize=1 42 | filters=64 43 | size=3 44 | stride=2 45 | pad=1 46 | activation=mish 47 | 48 | [convolutional] 49 | batch_normalize=1 50 | filters=64 51 | size=1 52 | stride=1 53 | pad=1 54 | activation=mish 55 | 56 | [route] 57 | layers = -2 58 | 59 | [convolutional] 60 | batch_normalize=1 61 | filters=64 62 | size=1 63 | stride=1 64 | pad=1 65 | activation=mish 66 | 67 | [convolutional] 68 | batch_normalize=1 69 | filters=32 70 | size=1 71 | stride=1 72 | pad=1 73 | activation=mish 74 | 75 | [convolutional] 76 | batch_normalize=1 77 | filters=64 78 | size=3 79 | stride=1 80 | pad=1 81 | activation=mish 82 | 83 | [shortcut] 84 | from=-3 85 | activation=linear 86 | 87 | [convolutional] 88 | batch_normalize=1 89 | filters=64 90 | size=1 91 | stride=1 92 | pad=1 93 | activation=mish 94 | 95 | [route] 96 | layers = -1,-7 97 | 98 | [convolutional] 99 | batch_normalize=1 100 | filters=64 101 | size=1 102 | stride=1 103 | pad=1 104 | activation=mish 105 | 106 | # Downsample 107 | 108 | [convolutional] 109 | batch_normalize=1 110 | filters=128 111 | size=3 112 | stride=2 113 | pad=1 114 | activation=mish 115 | 116 | [convolutional] 117 | batch_normalize=1 118 | filters=64 119 | size=1 120 | stride=1 121 | pad=1 122 | activation=mish 123 | 124 | [route] 125 | layers = -2 126 | 127 | [convolutional] 128 | batch_normalize=1 129 | filters=64 130 | size=1 131 | stride=1 132 | pad=1 133 | activation=mish 134 | 135 | [convolutional] 136 | batch_normalize=1 137 | filters=64 138 | size=1 139 | stride=1 140 | pad=1 141 | activation=mish 142 | 143 | [convolutional] 144 | batch_normalize=1 145 | filters=64 146 | size=3 147 | stride=1 148 | pad=1 149 | activation=mish 150 | 151 | [shortcut] 152 | from=-3 153 | activation=linear 154 | 155 | [convolutional] 156 | batch_normalize=1 157 | filters=64 158 | size=1 159 | stride=1 160 | pad=1 161 | activation=mish 162 | 163 | [convolutional] 164 | batch_normalize=1 165 | filters=64 166 | size=3 167 | stride=1 168 | pad=1 169 | activation=mish 170 | 171 | [shortcut] 172 | from=-3 173 | activation=linear 174 | 175 | [convolutional] 176 | batch_normalize=1 177 | filters=64 178 | size=1 179 | stride=1 180 | pad=1 181 | activation=mish 182 | 183 | [route] 184 | layers = -1,-10 185 | 186 | [convolutional] 187 | batch_normalize=1 188 | filters=128 189 | size=1 190 | stride=1 191 | pad=1 192 | activation=mish 193 | 194 | # Downsample 195 | 196 | [convolutional] 197 | batch_normalize=1 198 | filters=256 199 | size=3 200 | stride=2 201 | pad=1 202 | activation=mish 203 | 204 | [convolutional] 205 | batch_normalize=1 206 | filters=128 207 | size=1 208 | stride=1 209 | pad=1 210 | activation=mish 211 | 212 | [route] 213 | layers = -2 214 | 215 | [convolutional] 216 | batch_normalize=1 217 | filters=128 218 | size=1 219 | stride=1 220 | pad=1 221 | activation=mish 222 | 223 | [convolutional] 224 | batch_normalize=1 225 | filters=128 226 | size=1 227 | stride=1 228 | pad=1 229 | activation=mish 230 | 231 | [convolutional] 232 | batch_normalize=1 233 | filters=128 234 | size=3 235 | stride=1 236 | pad=1 237 | activation=mish 238 | 239 | [shortcut] 240 | from=-3 241 | activation=linear 242 | 243 | [convolutional] 244 | batch_normalize=1 245 | filters=128 246 | size=1 247 | stride=1 248 | pad=1 249 | activation=mish 250 | 251 | [convolutional] 252 | batch_normalize=1 253 | filters=128 254 | size=3 255 | stride=1 256 | pad=1 257 | activation=mish 258 | 259 | [shortcut] 260 | from=-3 261 | activation=linear 262 | 263 | [convolutional] 264 | batch_normalize=1 265 | filters=128 266 | size=1 267 | stride=1 268 | pad=1 269 | activation=mish 270 | 271 | [convolutional] 272 | batch_normalize=1 273 | filters=128 274 | size=3 275 | stride=1 276 | pad=1 277 | activation=mish 278 | 279 | [shortcut] 280 | from=-3 281 | activation=linear 282 | 283 | [convolutional] 284 | batch_normalize=1 285 | filters=128 286 | size=1 287 | stride=1 288 | pad=1 289 | activation=mish 290 | 291 | [convolutional] 292 | batch_normalize=1 293 | filters=128 294 | size=3 295 | stride=1 296 | pad=1 297 | activation=mish 298 | 299 | [shortcut] 300 | from=-3 301 | activation=linear 302 | 303 | 304 | [convolutional] 305 | batch_normalize=1 306 | filters=128 307 | size=1 308 | stride=1 309 | pad=1 310 | activation=mish 311 | 312 | [convolutional] 313 | batch_normalize=1 314 | filters=128 315 | size=3 316 | stride=1 317 | pad=1 318 | activation=mish 319 | 320 | [shortcut] 321 | from=-3 322 | activation=linear 323 | 324 | [convolutional] 325 | batch_normalize=1 326 | filters=128 327 | size=1 328 | stride=1 329 | pad=1 330 | activation=mish 331 | 332 | [convolutional] 333 | batch_normalize=1 334 | filters=128 335 | size=3 336 | stride=1 337 | pad=1 338 | activation=mish 339 | 340 | [shortcut] 341 | from=-3 342 | activation=linear 343 | 344 | [convolutional] 345 | batch_normalize=1 346 | filters=128 347 | size=1 348 | stride=1 349 | pad=1 350 | activation=mish 351 | 352 | [convolutional] 353 | batch_normalize=1 354 | filters=128 355 | size=3 356 | stride=1 357 | pad=1 358 | activation=mish 359 | 360 | [shortcut] 361 | from=-3 362 | activation=linear 363 | 364 | [convolutional] 365 | batch_normalize=1 366 | filters=128 367 | size=1 368 | stride=1 369 | pad=1 370 | activation=mish 371 | 372 | [convolutional] 373 | batch_normalize=1 374 | filters=128 375 | size=3 376 | stride=1 377 | pad=1 378 | activation=mish 379 | 380 | [shortcut] 381 | from=-3 382 | activation=linear 383 | 384 | [convolutional] 385 | batch_normalize=1 386 | filters=128 387 | size=1 388 | stride=1 389 | pad=1 390 | activation=mish 391 | 392 | [route] 393 | layers = -1,-28 394 | 395 | [convolutional] 396 | batch_normalize=1 397 | filters=256 398 | size=1 399 | stride=1 400 | pad=1 401 | activation=mish 402 | 403 | # Downsample 404 | 405 | [convolutional] 406 | batch_normalize=1 407 | filters=512 408 | size=3 409 | stride=2 410 | pad=1 411 | activation=mish 412 | 413 | [convolutional] 414 | batch_normalize=1 415 | filters=256 416 | size=1 417 | stride=1 418 | pad=1 419 | activation=mish 420 | 421 | [route] 422 | layers = -2 423 | 424 | [convolutional] 425 | batch_normalize=1 426 | filters=256 427 | size=1 428 | stride=1 429 | pad=1 430 | activation=mish 431 | 432 | [convolutional] 433 | batch_normalize=1 434 | filters=256 435 | size=1 436 | stride=1 437 | pad=1 438 | activation=mish 439 | 440 | [convolutional] 441 | batch_normalize=1 442 | filters=256 443 | size=3 444 | stride=1 445 | pad=1 446 | activation=mish 447 | 448 | [shortcut] 449 | from=-3 450 | activation=linear 451 | 452 | 453 | [convolutional] 454 | batch_normalize=1 455 | filters=256 456 | size=1 457 | stride=1 458 | pad=1 459 | activation=mish 460 | 461 | [convolutional] 462 | batch_normalize=1 463 | filters=256 464 | size=3 465 | stride=1 466 | pad=1 467 | activation=mish 468 | 469 | [shortcut] 470 | from=-3 471 | activation=linear 472 | 473 | 474 | [convolutional] 475 | batch_normalize=1 476 | filters=256 477 | size=1 478 | stride=1 479 | pad=1 480 | activation=mish 481 | 482 | [convolutional] 483 | batch_normalize=1 484 | filters=256 485 | size=3 486 | stride=1 487 | pad=1 488 | activation=mish 489 | 490 | [shortcut] 491 | from=-3 492 | activation=linear 493 | 494 | 495 | [convolutional] 496 | batch_normalize=1 497 | filters=256 498 | size=1 499 | stride=1 500 | pad=1 501 | activation=mish 502 | 503 | [convolutional] 504 | batch_normalize=1 505 | filters=256 506 | size=3 507 | stride=1 508 | pad=1 509 | activation=mish 510 | 511 | [shortcut] 512 | from=-3 513 | activation=linear 514 | 515 | 516 | [convolutional] 517 | batch_normalize=1 518 | filters=256 519 | size=1 520 | stride=1 521 | pad=1 522 | activation=mish 523 | 524 | [convolutional] 525 | batch_normalize=1 526 | filters=256 527 | size=3 528 | stride=1 529 | pad=1 530 | activation=mish 531 | 532 | [shortcut] 533 | from=-3 534 | activation=linear 535 | 536 | 537 | [convolutional] 538 | batch_normalize=1 539 | filters=256 540 | size=1 541 | stride=1 542 | pad=1 543 | activation=mish 544 | 545 | [convolutional] 546 | batch_normalize=1 547 | filters=256 548 | size=3 549 | stride=1 550 | pad=1 551 | activation=mish 552 | 553 | [shortcut] 554 | from=-3 555 | activation=linear 556 | 557 | 558 | [convolutional] 559 | batch_normalize=1 560 | filters=256 561 | size=1 562 | stride=1 563 | pad=1 564 | activation=mish 565 | 566 | [convolutional] 567 | batch_normalize=1 568 | filters=256 569 | size=3 570 | stride=1 571 | pad=1 572 | activation=mish 573 | 574 | [shortcut] 575 | from=-3 576 | activation=linear 577 | 578 | [convolutional] 579 | batch_normalize=1 580 | filters=256 581 | size=1 582 | stride=1 583 | pad=1 584 | activation=mish 585 | 586 | [convolutional] 587 | batch_normalize=1 588 | filters=256 589 | size=3 590 | stride=1 591 | pad=1 592 | activation=mish 593 | 594 | [shortcut] 595 | from=-3 596 | activation=linear 597 | 598 | [convolutional] 599 | batch_normalize=1 600 | filters=256 601 | size=1 602 | stride=1 603 | pad=1 604 | activation=mish 605 | 606 | [route] 607 | layers = -1,-28 608 | 609 | [convolutional] 610 | batch_normalize=1 611 | filters=512 612 | size=1 613 | stride=1 614 | pad=1 615 | activation=mish 616 | 617 | # Downsample 618 | 619 | [convolutional] 620 | batch_normalize=1 621 | filters=1024 622 | size=3 623 | stride=2 624 | pad=1 625 | activation=mish 626 | 627 | [convolutional] 628 | batch_normalize=1 629 | filters=512 630 | size=1 631 | stride=1 632 | pad=1 633 | activation=mish 634 | 635 | [route] 636 | layers = -2 637 | 638 | [convolutional] 639 | batch_normalize=1 640 | filters=512 641 | size=1 642 | stride=1 643 | pad=1 644 | activation=mish 645 | 646 | [convolutional] 647 | batch_normalize=1 648 | filters=512 649 | size=1 650 | stride=1 651 | pad=1 652 | activation=mish 653 | 654 | [convolutional] 655 | batch_normalize=1 656 | filters=512 657 | size=3 658 | stride=1 659 | pad=1 660 | activation=mish 661 | 662 | [shortcut] 663 | from=-3 664 | activation=linear 665 | 666 | [convolutional] 667 | batch_normalize=1 668 | filters=512 669 | size=1 670 | stride=1 671 | pad=1 672 | activation=mish 673 | 674 | [convolutional] 675 | batch_normalize=1 676 | filters=512 677 | size=3 678 | stride=1 679 | pad=1 680 | activation=mish 681 | 682 | [shortcut] 683 | from=-3 684 | activation=linear 685 | 686 | [convolutional] 687 | batch_normalize=1 688 | filters=512 689 | size=1 690 | stride=1 691 | pad=1 692 | activation=mish 693 | 694 | [convolutional] 695 | batch_normalize=1 696 | filters=512 697 | size=3 698 | stride=1 699 | pad=1 700 | activation=mish 701 | 702 | [shortcut] 703 | from=-3 704 | activation=linear 705 | 706 | [convolutional] 707 | batch_normalize=1 708 | filters=512 709 | size=1 710 | stride=1 711 | pad=1 712 | activation=mish 713 | 714 | [convolutional] 715 | batch_normalize=1 716 | filters=512 717 | size=3 718 | stride=1 719 | pad=1 720 | activation=mish 721 | 722 | [shortcut] 723 | from=-3 724 | activation=linear 725 | 726 | [convolutional] 727 | batch_normalize=1 728 | filters=512 729 | size=1 730 | stride=1 731 | pad=1 732 | activation=mish 733 | 734 | [route] 735 | layers = -1,-16 736 | 737 | [convolutional] 738 | batch_normalize=1 739 | filters=1024 740 | size=1 741 | stride=1 742 | pad=1 743 | activation=mish 744 | 745 | ########################## 746 | 747 | [convolutional] 748 | batch_normalize=1 749 | filters=512 750 | size=1 751 | stride=1 752 | pad=1 753 | activation=leaky 754 | 755 | [convolutional] 756 | batch_normalize=1 757 | size=3 758 | stride=1 759 | pad=1 760 | filters=1024 761 | activation=leaky 762 | 763 | [convolutional] 764 | batch_normalize=1 765 | filters=512 766 | size=1 767 | stride=1 768 | pad=1 769 | activation=leaky 770 | 771 | ### SPP ### 772 | [maxpool] 773 | stride=1 774 | size=5 775 | 776 | [route] 777 | layers=-2 778 | 779 | [maxpool] 780 | stride=1 781 | size=9 782 | 783 | [route] 784 | layers=-4 785 | 786 | [maxpool] 787 | stride=1 788 | size=13 789 | 790 | [route] 791 | layers=-1,-3,-5,-6 792 | ### End SPP ### 793 | 794 | [convolutional] 795 | batch_normalize=1 796 | filters=512 797 | size=1 798 | stride=1 799 | pad=1 800 | activation=leaky 801 | 802 | [convolutional] 803 | batch_normalize=1 804 | size=3 805 | stride=1 806 | pad=1 807 | filters=1024 808 | activation=leaky 809 | 810 | [convolutional] 811 | batch_normalize=1 812 | filters=512 813 | size=1 814 | stride=1 815 | pad=1 816 | activation=leaky 817 | 818 | [convolutional] 819 | batch_normalize=1 820 | filters=256 821 | size=1 822 | stride=1 823 | pad=1 824 | activation=leaky 825 | 826 | [upsample] 827 | stride=2 828 | 829 | [route] 830 | layers = 85 831 | 832 | [convolutional] 833 | batch_normalize=1 834 | filters=256 835 | size=1 836 | stride=1 837 | pad=1 838 | activation=leaky 839 | 840 | [route] 841 | layers = -1, -3 842 | 843 | [convolutional] 844 | batch_normalize=1 845 | filters=256 846 | size=1 847 | stride=1 848 | pad=1 849 | activation=leaky 850 | 851 | [convolutional] 852 | batch_normalize=1 853 | size=3 854 | stride=1 855 | pad=1 856 | filters=512 857 | activation=leaky 858 | 859 | [convolutional] 860 | batch_normalize=1 861 | filters=256 862 | size=1 863 | stride=1 864 | pad=1 865 | activation=leaky 866 | 867 | [convolutional] 868 | batch_normalize=1 869 | size=3 870 | stride=1 871 | pad=1 872 | filters=512 873 | activation=leaky 874 | 875 | [convolutional] 876 | batch_normalize=1 877 | filters=256 878 | size=1 879 | stride=1 880 | pad=1 881 | activation=leaky 882 | 883 | [convolutional] 884 | batch_normalize=1 885 | filters=128 886 | size=1 887 | stride=1 888 | pad=1 889 | activation=leaky 890 | 891 | [upsample] 892 | stride=2 893 | 894 | [route] 895 | layers = 54 896 | 897 | [convolutional] 898 | batch_normalize=1 899 | filters=128 900 | size=1 901 | stride=1 902 | pad=1 903 | activation=leaky 904 | 905 | [route] 906 | layers = -1, -3 907 | 908 | [convolutional] 909 | batch_normalize=1 910 | filters=128 911 | size=1 912 | stride=1 913 | pad=1 914 | activation=leaky 915 | 916 | [convolutional] 917 | batch_normalize=1 918 | size=3 919 | stride=1 920 | pad=1 921 | filters=256 922 | activation=leaky 923 | 924 | [convolutional] 925 | batch_normalize=1 926 | filters=128 927 | size=1 928 | stride=1 929 | pad=1 930 | activation=leaky 931 | 932 | [convolutional] 933 | batch_normalize=1 934 | size=3 935 | stride=1 936 | pad=1 937 | filters=256 938 | activation=leaky 939 | 940 | [convolutional] 941 | batch_normalize=1 942 | filters=128 943 | size=1 944 | stride=1 945 | pad=1 946 | activation=leaky 947 | 948 | ########################## 949 | 950 | [convolutional] 951 | batch_normalize=1 952 | size=3 953 | stride=1 954 | pad=1 955 | filters=256 956 | activation=leaky 957 | 958 | [convolutional] 959 | size=1 960 | stride=1 961 | pad=1 962 | filters=255 963 | activation=linear 964 | 965 | 966 | [yolo] 967 | mask = 0,1,2 968 | anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401 969 | classes=80 970 | num=9 971 | jitter=.3 972 | ignore_thresh = .7 973 | truth_thresh = 1 974 | scale_x_y = 1.2 975 | iou_thresh=0.213 976 | cls_normalizer=1.0 977 | iou_normalizer=0.07 978 | iou_loss=ciou 979 | nms_kind=greedynms 980 | beta_nms=0.6 981 | 982 | 983 | [route] 984 | layers = -4 985 | 986 | [convolutional] 987 | batch_normalize=1 988 | size=3 989 | stride=2 990 | pad=1 991 | filters=256 992 | activation=leaky 993 | 994 | [route] 995 | layers = -1, -16 996 | 997 | [convolutional] 998 | batch_normalize=1 999 | filters=256 1000 | size=1 1001 | stride=1 1002 | pad=1 1003 | activation=leaky 1004 | 1005 | [convolutional] 1006 | batch_normalize=1 1007 | size=3 1008 | stride=1 1009 | pad=1 1010 | filters=512 1011 | activation=leaky 1012 | 1013 | [convolutional] 1014 | batch_normalize=1 1015 | filters=256 1016 | size=1 1017 | stride=1 1018 | pad=1 1019 | activation=leaky 1020 | 1021 | [convolutional] 1022 | batch_normalize=1 1023 | size=3 1024 | stride=1 1025 | pad=1 1026 | filters=512 1027 | activation=leaky 1028 | 1029 | [convolutional] 1030 | batch_normalize=1 1031 | filters=256 1032 | size=1 1033 | stride=1 1034 | pad=1 1035 | activation=leaky 1036 | 1037 | [convolutional] 1038 | batch_normalize=1 1039 | size=3 1040 | stride=1 1041 | pad=1 1042 | filters=512 1043 | activation=leaky 1044 | 1045 | [convolutional] 1046 | size=1 1047 | stride=1 1048 | pad=1 1049 | filters=255 1050 | activation=linear 1051 | 1052 | 1053 | [yolo] 1054 | mask = 3,4,5 1055 | anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401 1056 | classes=80 1057 | num=9 1058 | jitter=.3 1059 | ignore_thresh = .7 1060 | truth_thresh = 1 1061 | scale_x_y = 1.1 1062 | iou_thresh=0.213 1063 | cls_normalizer=1.0 1064 | iou_normalizer=0.07 1065 | iou_loss=ciou 1066 | nms_kind=greedynms 1067 | beta_nms=0.6 1068 | 1069 | 1070 | [route] 1071 | layers = -4 1072 | 1073 | [convolutional] 1074 | batch_normalize=1 1075 | size=3 1076 | stride=2 1077 | pad=1 1078 | filters=512 1079 | activation=leaky 1080 | 1081 | [route] 1082 | layers = -1, -37 1083 | 1084 | [convolutional] 1085 | batch_normalize=1 1086 | filters=512 1087 | size=1 1088 | stride=1 1089 | pad=1 1090 | activation=leaky 1091 | 1092 | [convolutional] 1093 | batch_normalize=1 1094 | size=3 1095 | stride=1 1096 | pad=1 1097 | filters=1024 1098 | activation=leaky 1099 | 1100 | [convolutional] 1101 | batch_normalize=1 1102 | filters=512 1103 | size=1 1104 | stride=1 1105 | pad=1 1106 | activation=leaky 1107 | 1108 | [convolutional] 1109 | batch_normalize=1 1110 | size=3 1111 | stride=1 1112 | pad=1 1113 | filters=1024 1114 | activation=leaky 1115 | 1116 | [convolutional] 1117 | batch_normalize=1 1118 | filters=512 1119 | size=1 1120 | stride=1 1121 | pad=1 1122 | activation=leaky 1123 | 1124 | [convolutional] 1125 | batch_normalize=1 1126 | size=3 1127 | stride=1 1128 | pad=1 1129 | filters=1024 1130 | activation=leaky 1131 | 1132 | [convolutional] 1133 | size=1 1134 | stride=1 1135 | pad=1 1136 | filters=255 1137 | activation=linear 1138 | 1139 | 1140 | [yolo] 1141 | mask = 6,7,8 1142 | anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401 1143 | classes=80 1144 | num=9 1145 | jitter=.3 1146 | ignore_thresh = .7 1147 | truth_thresh = 1 1148 | random=1 1149 | scale_x_y = 1.05 1150 | iou_thresh=0.213 1151 | cls_normalizer=1.0 1152 | iou_normalizer=0.07 1153 | iou_loss=ciou 1154 | nms_kind=greedynms 1155 | beta_nms=0.6 1156 | 1157 | -------------------------------------------------------------------------------- /darknet2caffe.py: -------------------------------------------------------------------------------- 1 | # The caffe module needs to be on the Python path; 2 | # we'll add it here explicitly. 3 | caffe_root='/home/chen/caffe/' 4 | #os.chdir(caffe_root) 5 | import sys 6 | sys.path.insert(0,caffe_root+'python') 7 | import caffe 8 | import numpy as np 9 | from collections import OrderedDict 10 | from cfg import * 11 | from prototxt import * 12 | 13 | def darknet2caffe(cfgfile, weightfile, protofile, caffemodel): 14 | net_info = cfg2prototxt(cfgfile) 15 | save_prototxt(net_info , protofile, region=False) 16 | 17 | net = caffe.Net(protofile, caffe.TEST) 18 | params = net.params 19 | 20 | blocks = parse_cfg(cfgfile) 21 | 22 | #Open the weights file 23 | fp = open(weightfile, "rb") 24 | 25 | #The first 4 values are header information 26 | # 1. Major version number 27 | # 2. Minor Version Number 28 | # 3. Subversion number 29 | # 4. IMages seen 30 | header = np.fromfile(fp, dtype = np.int32, count = 5) 31 | 32 | #fp = open(weightfile, 'rb') 33 | #header = np.fromfile(fp, count=5, dtype=np.int32) 34 | #header = np.ndarray(shape=(5,),dtype='int32',buffer=fp.read(20)) 35 | #print(header) 36 | buf = np.fromfile(fp, dtype = np.float32) 37 | #print(buf) 38 | fp.close() 39 | 40 | layers = [] 41 | layer_id = 1 42 | start = 0 43 | for block in blocks: 44 | if start >= buf.size: 45 | break 46 | 47 | if block['type'] == 'net': 48 | continue 49 | elif block['type'] == 'convolutional': 50 | batch_normalize = int(block['batch_normalize']) 51 | if block.has_key('name'): 52 | conv_layer_name = block['name'] 53 | bn_layer_name = '%s-bn' % block['name'] 54 | scale_layer_name = '%s-scale' % block['name'] 55 | else: 56 | conv_layer_name = 'layer%d-conv' % layer_id 57 | bn_layer_name = 'layer%d-bn' % layer_id 58 | scale_layer_name = 'layer%d-scale' % layer_id 59 | 60 | if batch_normalize: 61 | start = load_conv_bn2caffe(buf, start, params[conv_layer_name], params[bn_layer_name], params[scale_layer_name]) 62 | else: 63 | start = load_conv2caffe(buf, start, params[conv_layer_name]) 64 | layer_id = layer_id+1 65 | elif block['type'] == 'depthwise_convolutional': 66 | batch_normalize = int(block['batch_normalize']) 67 | if block.has_key('name'): 68 | conv_layer_name = block['name'] 69 | bn_layer_name = '%s-bn' % block['name'] 70 | scale_layer_name = '%s-scale' % block['name'] 71 | else: 72 | conv_layer_name = 'layer%d-dwconv' % layer_id 73 | bn_layer_name = 'layer%d-bn' % layer_id 74 | scale_layer_name = 'layer%d-scale' % layer_id 75 | 76 | if batch_normalize: 77 | start = load_conv_bn2caffe(buf, start, params[conv_layer_name], params[bn_layer_name], params[scale_layer_name]) 78 | else: 79 | start = load_conv2caffe(buf, start, params[conv_layer_name]) 80 | layer_id = layer_id+1 81 | elif block['type'] == 'connected': 82 | if block.has_key('name'): 83 | fc_layer_name = block['name'] 84 | else: 85 | fc_layer_name = 'layer%d-fc' % layer_id 86 | start = load_fc2caffe(buf, start, params[fc_layer_name]) 87 | layer_id = layer_id+1 88 | elif block['type'] == 'maxpool': 89 | layer_id = layer_id+1 90 | elif block['type'] == 'avgpool': 91 | layer_id = layer_id+1 92 | elif block['type'] == 'region': 93 | layer_id = layer_id + 1 94 | elif block['type'] == 'route': 95 | layer_id = layer_id + 1 96 | elif block['type'] == 'shortcut': 97 | layer_id = layer_id + 1 98 | elif block['type'] == 'softmax': 99 | layer_id = layer_id + 1 100 | elif block['type'] == 'cost': 101 | layer_id = layer_id + 1 102 | elif block['type'] == 'upsample': 103 | layer_id = layer_id + 1 104 | else: 105 | print('unknow layer type %s ' % block['type']) 106 | layer_id = layer_id + 1 107 | print('save prototxt to %s' % protofile) 108 | save_prototxt(net_info , protofile, region=True) 109 | print('save caffemodel to %s' % caffemodel) 110 | net.save(caffemodel) 111 | 112 | def load_conv2caffe(buf, start, conv_param): 113 | weight = conv_param[0].data 114 | bias = conv_param[1].data 115 | conv_param[1].data[...] = np.reshape(buf[start:start+bias.size], bias.shape); start = start + bias.size 116 | conv_param[0].data[...] = np.reshape(buf[start:start+weight.size], weight.shape); start = start + weight.size 117 | return start 118 | 119 | def load_fc2caffe(buf, start, fc_param): 120 | weight = fc_param[0].data 121 | bias = fc_param[1].data 122 | fc_param[1].data[...] = np.reshape(buf[start:start+bias.size], bias.shape); start = start + bias.size 123 | fc_param[0].data[...] = np.reshape(buf[start:start+weight.size], weight.shape); start = start + weight.size 124 | return start 125 | 126 | 127 | def load_conv_bn2caffe(buf, start, conv_param, bn_param, scale_param): 128 | conv_weight = conv_param[0].data 129 | running_mean = bn_param[0].data 130 | running_var = bn_param[1].data 131 | scale_weight = scale_param[0].data 132 | scale_bias = scale_param[1].data 133 | 134 | 135 | 136 | scale_param[1].data[...] = np.reshape(buf[start:start+scale_bias.size], scale_bias.shape); start = start + scale_bias.size 137 | #print scale_bias.size 138 | #print scale_bias 139 | 140 | scale_param[0].data[...] = np.reshape(buf[start:start+scale_weight.size], scale_weight.shape); start = start + scale_weight.size 141 | #print scale_weight.size 142 | 143 | bn_param[0].data[...] = np.reshape(buf[start:start+running_mean.size], running_mean.shape); start = start + running_mean.size 144 | #print running_mean.size 145 | 146 | bn_param[1].data[...] = np.reshape(buf[start:start+running_var.size], running_var.shape); start = start + running_var.size 147 | #print running_var.size 148 | 149 | bn_param[2].data[...] = np.array([1.0]) 150 | conv_param[0].data[...] = np.reshape(buf[start:start+conv_weight.size], conv_weight.shape); start = start + conv_weight.size 151 | #print conv_weight.size 152 | 153 | return start 154 | 155 | def cfg2prototxt(cfgfile): 156 | blocks = parse_cfg(cfgfile) 157 | 158 | prev_filters = 3 159 | layers = [] 160 | props = OrderedDict() 161 | bottom = 'data' 162 | layer_id = 1 163 | topnames = dict() 164 | for block in blocks: 165 | if block['type'] == 'net': 166 | props['name'] = 'Darkent2Caffe' 167 | props['input'] = 'data' 168 | props['input_dim'] = ['1'] 169 | props['input_dim'].append(block['channels']) 170 | props['input_dim'].append(block['height']) 171 | props['input_dim'].append(block['width']) 172 | continue 173 | elif block['type'] == 'convolutional': 174 | conv_layer = OrderedDict() 175 | conv_layer['bottom'] = bottom 176 | if block.has_key('name'): 177 | conv_layer['top'] = block['name'] 178 | conv_layer['name'] = block['name'] 179 | else: 180 | conv_layer['top'] = 'layer%d-conv' % layer_id 181 | conv_layer['name'] = 'layer%d-conv' % layer_id 182 | conv_layer['type'] = 'Convolution' 183 | convolution_param = OrderedDict() 184 | convolution_param['num_output'] = block['filters'] 185 | prev_filters = block['filters'] 186 | convolution_param['kernel_size'] = block['size'] 187 | if block['pad'] == '1': 188 | convolution_param['pad'] = str(int(convolution_param['kernel_size']) // 2) 189 | convolution_param['stride'] = block['stride'] 190 | if block['batch_normalize'] == '1': 191 | convolution_param['bias_term'] = 'false' 192 | else: 193 | convolution_param['bias_term'] = 'true' 194 | conv_layer['convolution_param'] = convolution_param 195 | layers.append(conv_layer) 196 | bottom = conv_layer['top'] 197 | 198 | if block['batch_normalize'] == '1': 199 | bn_layer = OrderedDict() 200 | bn_layer['bottom'] = bottom 201 | bn_layer['top'] = bottom 202 | if block.has_key('name'): 203 | bn_layer['name'] = '%s-bn' % block['name'] 204 | else: 205 | bn_layer['name'] = 'layer%d-bn' % layer_id 206 | bn_layer['type'] = 'BatchNorm' 207 | batch_norm_param = OrderedDict() 208 | batch_norm_param['use_global_stats'] = 'true' 209 | bn_layer['batch_norm_param'] = batch_norm_param 210 | layers.append(bn_layer) 211 | 212 | scale_layer = OrderedDict() 213 | scale_layer['bottom'] = bottom 214 | scale_layer['top'] = bottom 215 | if block.has_key('name'): 216 | scale_layer['name'] = '%s-scale' % block['name'] 217 | else: 218 | scale_layer['name'] = 'layer%d-scale' % layer_id 219 | scale_layer['type'] = 'Scale' 220 | scale_param = OrderedDict() 221 | scale_param['bias_term'] = 'true' 222 | scale_layer['scale_param'] = scale_param 223 | layers.append(scale_layer) 224 | 225 | if block['activation'] != 'linear': 226 | activate_layer = OrderedDict() 227 | activate_layer['bottom'] = bottom 228 | activate_layer['top'] = bottom 229 | if block.has_key('name'): 230 | activate_layer['name'] = '%s-act' % block['name'] 231 | else: 232 | activate_layer['name'] = 'layer%d-act' % layer_id 233 | if block['activation'] == 'leaky': 234 | activate_layer['type'] = 'ReLU' 235 | relu_param = OrderedDict() 236 | relu_param['negative_slope'] = '0.1' 237 | activate_layer['relu_param'] = relu_param 238 | elif block['activation'] == 'mish': 239 | activate_layer['type'] = 'Mish' 240 | layers.append(activate_layer) 241 | topnames[layer_id] = bottom 242 | layer_id = layer_id+1 243 | elif block['type'] == 'depthwise_convolutional': 244 | conv_layer = OrderedDict() 245 | conv_layer['bottom'] = bottom 246 | if block.has_key('name'): 247 | conv_layer['top'] = block['name'] 248 | conv_layer['name'] = block['name'] 249 | else: 250 | conv_layer['top'] = 'layer%d-dwconv' % layer_id 251 | conv_layer['name'] = 'layer%d-dwconv' % layer_id 252 | conv_layer['type'] = 'ConvolutionDepthwise' 253 | convolution_param = OrderedDict() 254 | convolution_param['num_output'] = prev_filters 255 | convolution_param['kernel_size'] = block['size'] 256 | if block['pad'] == '1': 257 | convolution_param['pad'] = str(int(convolution_param['kernel_size']) // 2) 258 | convolution_param['stride'] = block['stride'] 259 | if block['batch_normalize'] == '1': 260 | convolution_param['bias_term'] = 'false' 261 | else: 262 | convolution_param['bias_term'] = 'true' 263 | conv_layer['convolution_param'] = convolution_param 264 | layers.append(conv_layer) 265 | bottom = conv_layer['top'] 266 | 267 | if block['batch_normalize'] == '1': 268 | bn_layer = OrderedDict() 269 | bn_layer['bottom'] = bottom 270 | bn_layer['top'] = bottom 271 | if block.has_key('name'): 272 | bn_layer['name'] = '%s-bn' % block['name'] 273 | else: 274 | bn_layer['name'] = 'layer%d-bn' % layer_id 275 | bn_layer['type'] = 'BatchNorm' 276 | batch_norm_param = OrderedDict() 277 | batch_norm_param['use_global_stats'] = 'true' 278 | bn_layer['batch_norm_param'] = batch_norm_param 279 | layers.append(bn_layer) 280 | 281 | scale_layer = OrderedDict() 282 | scale_layer['bottom'] = bottom 283 | scale_layer['top'] = bottom 284 | if block.has_key('name'): 285 | scale_layer['name'] = '%s-scale' % block['name'] 286 | else: 287 | scale_layer['name'] = 'layer%d-scale' % layer_id 288 | scale_layer['type'] = 'Scale' 289 | scale_param = OrderedDict() 290 | scale_param['bias_term'] = 'true' 291 | scale_layer['scale_param'] = scale_param 292 | layers.append(scale_layer) 293 | 294 | if block['activation'] != 'linear': 295 | relu_layer = OrderedDict() 296 | relu_layer['bottom'] = bottom 297 | relu_layer['top'] = bottom 298 | if block.has_key('name'): 299 | relu_layer['name'] = '%s-act' % block['name'] 300 | else: 301 | relu_layer['name'] = 'layer%d-act' % layer_id 302 | relu_layer['type'] = 'ReLU' 303 | if block['activation'] == 'leaky': 304 | relu_param = OrderedDict() 305 | relu_param['negative_slope'] = '0.1' 306 | relu_layer['relu_param'] = relu_param 307 | layers.append(relu_layer) 308 | topnames[layer_id] = bottom 309 | layer_id = layer_id+1 310 | elif block['type'] == 'maxpool': 311 | max_layer = OrderedDict() 312 | max_layer['bottom'] = bottom 313 | if block.has_key('name'): 314 | max_layer['top'] = block['name'] 315 | max_layer['name'] = block['name'] 316 | else: 317 | max_layer['top'] = 'layer%d-maxpool' % layer_id 318 | max_layer['name'] = 'layer%d-maxpool' % layer_id 319 | max_layer['type'] = 'Pooling' 320 | pooling_param = OrderedDict() 321 | pooling_param['stride'] = block['stride'] 322 | pooling_param['pool'] = 'MAX' 323 | # pooling_param['kernel_size'] = block['size'] 324 | # pooling_param['pad'] = str((int(block['size'])-1) // 2) 325 | if (int(block['size']) - int(block['stride'])) % 2 == 0: 326 | pooling_param['kernel_size'] = block['size'] 327 | pooling_param['pad'] = str((int(block['size'])-1) // 2) 328 | 329 | if (int(block['size']) - int(block['stride'])) % 2 == 1: 330 | pooling_param['kernel_size'] = str(int(block['size']) + 1) 331 | pooling_param['pad'] = str((int(block['size']) + 1) // 2) 332 | 333 | max_layer['pooling_param'] = pooling_param 334 | layers.append(max_layer) 335 | bottom = max_layer['top'] 336 | topnames[layer_id] = bottom 337 | layer_id = layer_id+1 338 | elif block['type'] == 'avgpool': 339 | avg_layer = OrderedDict() 340 | avg_layer['bottom'] = bottom 341 | if block.has_key('name'): 342 | avg_layer['top'] = block['name'] 343 | avg_layer['name'] = block['name'] 344 | else: 345 | avg_layer['top'] = 'layer%d-avgpool' % layer_id 346 | avg_layer['name'] = 'layer%d-avgpool' % layer_id 347 | avg_layer['type'] = 'Pooling' 348 | pooling_param = OrderedDict() 349 | pooling_param['kernel_size'] = 7 350 | pooling_param['stride'] = 1 351 | pooling_param['pool'] = 'AVE' 352 | avg_layer['pooling_param'] = pooling_param 353 | layers.append(avg_layer) 354 | bottom = avg_layer['top'] 355 | topnames[layer_id] = bottom 356 | layer_id = layer_id+1 357 | 358 | elif block['type'] == 'region': 359 | if True: 360 | region_layer = OrderedDict() 361 | region_layer['bottom'] = bottom 362 | if block.has_key('name'): 363 | region_layer['top'] = block['name'] 364 | region_layer['name'] = block['name'] 365 | else: 366 | region_layer['top'] = 'layer%d-region' % layer_id 367 | region_layer['name'] = 'layer%d-region' % layer_id 368 | region_layer['type'] = 'Region' 369 | region_param = OrderedDict() 370 | region_param['anchors'] = block['anchors'].strip() 371 | region_param['classes'] = block['classes'] 372 | region_param['num'] = block['num'] 373 | region_layer['region_param'] = region_param 374 | layers.append(region_layer) 375 | bottom = region_layer['top'] 376 | topnames[layer_id] = bottom 377 | layer_id = layer_id + 1 378 | 379 | elif block['type'] == 'route': 380 | route_layer = OrderedDict() 381 | layer_name = str(block['layers']).split(',') 382 | bottom_layer_size = len(str(block['layers']).split(',')) 383 | bottoms = [] 384 | for i in range(bottom_layer_size): 385 | if int(layer_name[i]) < 0: 386 | prev_layer_id = layer_id + int(layer_name[i]) 387 | else: 388 | prev_layer_id = int(layer_name[i]) + 1 389 | bottom = topnames[prev_layer_id] 390 | bottoms.append(bottom) 391 | route_layer['bottom'] = bottoms 392 | 393 | if block.has_key('name'): 394 | route_layer['top'] = block['name'] 395 | route_layer['name'] = block['name'] 396 | else: 397 | route_layer['top'] = 'layer%d-route' % layer_id 398 | route_layer['name'] = 'layer%d-route' % layer_id 399 | route_layer['type'] = 'Concat' 400 | layers.append(route_layer) 401 | bottom = route_layer['top'] 402 | topnames[layer_id] = bottom 403 | layer_id = layer_id + 1 404 | 405 | elif block['type'] == 'upsample': 406 | upsample_layer = OrderedDict() 407 | upsample_layer['bottom'] = bottom 408 | if block.has_key('name'): 409 | upsample_layer['top'] = block['name'] 410 | upsample_layer['name'] = block['name'] 411 | else: 412 | upsample_layer['top'] = 'layer%d-upsample' % layer_id 413 | upsample_layer['name'] = 'layer%d-upsample' % layer_id 414 | upsample_layer['type'] = 'Upsample' 415 | upsample_param = OrderedDict() 416 | upsample_param['scale'] = block['stride'] 417 | upsample_layer['upsample_param'] = upsample_param 418 | layers.append(upsample_layer) 419 | bottom = upsample_layer['top'] 420 | print('upsample:',layer_id) 421 | topnames[layer_id] = bottom 422 | layer_id = layer_id + 1 423 | 424 | elif block['type'] == 'shortcut': 425 | prev_layer_id1 = layer_id + int(block['from']) 426 | prev_layer_id2 = layer_id - 1 427 | bottom1 = topnames[prev_layer_id1] 428 | bottom2= topnames[prev_layer_id2] 429 | shortcut_layer = OrderedDict() 430 | shortcut_layer['bottom'] = [bottom1, bottom2] 431 | if block.has_key('name'): 432 | shortcut_layer['top'] = block['name'] 433 | shortcut_layer['name'] = block['name'] 434 | else: 435 | shortcut_layer['top'] = 'layer%d-shortcut' % layer_id 436 | shortcut_layer['name'] = 'layer%d-shortcut' % layer_id 437 | shortcut_layer['type'] = 'Eltwise' 438 | eltwise_param = OrderedDict() 439 | eltwise_param['operation'] = 'SUM' 440 | shortcut_layer['eltwise_param'] = eltwise_param 441 | layers.append(shortcut_layer) 442 | bottom = shortcut_layer['top'] 443 | 444 | if block['activation'] != 'linear': 445 | relu_layer = OrderedDict() 446 | relu_layer['bottom'] = bottom 447 | relu_layer['top'] = bottom 448 | if block.has_key('name'): 449 | relu_layer['name'] = '%s-act' % block['name'] 450 | else: 451 | relu_layer['name'] = 'layer%d-act' % layer_id 452 | relu_layer['type'] = 'ReLU' 453 | if block['activation'] == 'leaky': 454 | relu_param = OrderedDict() 455 | relu_param['negative_slope'] = '0.1' 456 | relu_layer['relu_param'] = relu_param 457 | layers.append(relu_layer) 458 | topnames[layer_id] = bottom 459 | layer_id = layer_id + 1 460 | 461 | elif block['type'] == 'connected': 462 | fc_layer = OrderedDict() 463 | fc_layer['bottom'] = bottom 464 | if block.has_key('name'): 465 | fc_layer['top'] = block['name'] 466 | fc_layer['name'] = block['name'] 467 | else: 468 | fc_layer['top'] = 'layer%d-fc' % layer_id 469 | fc_layer['name'] = 'layer%d-fc' % layer_id 470 | fc_layer['type'] = 'InnerProduct' 471 | fc_param = OrderedDict() 472 | fc_param['num_output'] = int(block['output']) 473 | fc_layer['inner_product_param'] = fc_param 474 | layers.append(fc_layer) 475 | bottom = fc_layer['top'] 476 | 477 | if block['activation'] != 'linear': 478 | relu_layer = OrderedDict() 479 | relu_layer['bottom'] = bottom 480 | relu_layer['top'] = bottom 481 | if block.has_key('name'): 482 | relu_layer['name'] = '%s-act' % block['name'] 483 | else: 484 | relu_layer['name'] = 'layer%d-act' % layer_id 485 | relu_layer['type'] = 'ReLU' 486 | if block['activation'] == 'leaky': 487 | relu_param = OrderedDict() 488 | relu_param['negative_slope'] = '0.1' 489 | relu_layer['relu_param'] = relu_param 490 | layers.append(relu_layer) 491 | topnames[layer_id] = bottom 492 | layer_id = layer_id+1 493 | else: 494 | print('unknow layer type %s ' % block['type']) 495 | topnames[layer_id] = bottom 496 | layer_id = layer_id + 1 497 | 498 | net_info = OrderedDict() 499 | net_info['props'] = props 500 | net_info['layers'] = layers 501 | return net_info 502 | 503 | if __name__ == '__main__': 504 | import sys 505 | if len(sys.argv) != 5: 506 | print('try:') 507 | print('python darknet2caffe.py tiny-yolo-voc.cfg tiny-yolo-voc.weights tiny-yolo-voc.prototxt tiny-yolo-voc.caffemodel') 508 | print('') 509 | print('please add name field for each block to avoid generated name') 510 | exit() 511 | 512 | cfgfile = sys.argv[1] 513 | #net_info = cfg2prototxt(cfgfile) 514 | #print_prototxt(net_info) 515 | #save_prototxt(net_info, 'tmp.prototxt') 516 | weightfile = sys.argv[2] 517 | protofile = sys.argv[3] 518 | caffemodel = sys.argv[4] 519 | darknet2caffe(cfgfile, weightfile, protofile, caffemodel) 520 | -------------------------------------------------------------------------------- /prototxt/mobilenet_v1_yolov3.prototxt: -------------------------------------------------------------------------------- 1 | name: "Darkent2Caffe" 2 | input: "data" 3 | input_dim: 1 4 | input_dim: 3 5 | input_dim: 416 6 | input_dim: 416 7 | 8 | layer { 9 | bottom: "data" 10 | top: "layer1-conv" 11 | name: "layer1-conv" 12 | type: "Convolution" 13 | convolution_param { 14 | num_output: 32 15 | kernel_size: 3 16 | pad: 1 17 | stride: 2 18 | bias_term: false 19 | } 20 | } 21 | layer { 22 | bottom: "layer1-conv" 23 | top: "layer1-conv" 24 | name: "layer1-bn" 25 | type: "BatchNorm" 26 | batch_norm_param { 27 | use_global_stats: true 28 | } 29 | } 30 | layer { 31 | bottom: "layer1-conv" 32 | top: "layer1-conv" 33 | name: "layer1-scale" 34 | type: "Scale" 35 | scale_param { 36 | bias_term: true 37 | } 38 | } 39 | layer { 40 | bottom: "layer1-conv" 41 | top: "layer1-conv" 42 | name: "layer1-act" 43 | type: "ReLU" 44 | relu_param { 45 | negative_slope: 0.1 46 | } 47 | } 48 | layer { 49 | bottom: "layer1-conv" 50 | top: "layer2-dwconv" 51 | name: "layer2-dwconv" 52 | type: "ConvolutionDepthwise" 53 | convolution_param { 54 | num_output: 32 55 | kernel_size: 3 56 | pad: 1 57 | stride: 1 58 | bias_term: false 59 | } 60 | } 61 | layer { 62 | bottom: "layer2-dwconv" 63 | top: "layer2-dwconv" 64 | name: "layer2-bn" 65 | type: "BatchNorm" 66 | batch_norm_param { 67 | use_global_stats: true 68 | } 69 | } 70 | layer { 71 | bottom: "layer2-dwconv" 72 | top: "layer2-dwconv" 73 | name: "layer2-scale" 74 | type: "Scale" 75 | scale_param { 76 | bias_term: true 77 | } 78 | } 79 | layer { 80 | bottom: "layer2-dwconv" 81 | top: "layer2-dwconv" 82 | name: "layer2-act" 83 | type: "ReLU" 84 | relu_param { 85 | negative_slope: 0.1 86 | } 87 | } 88 | layer { 89 | bottom: "layer2-dwconv" 90 | top: "layer3-conv" 91 | name: "layer3-conv" 92 | type: "Convolution" 93 | convolution_param { 94 | num_output: 64 95 | kernel_size: 1 96 | pad: 0 97 | stride: 1 98 | bias_term: false 99 | } 100 | } 101 | layer { 102 | bottom: "layer3-conv" 103 | top: "layer3-conv" 104 | name: "layer3-bn" 105 | type: "BatchNorm" 106 | batch_norm_param { 107 | use_global_stats: true 108 | } 109 | } 110 | layer { 111 | bottom: "layer3-conv" 112 | top: "layer3-conv" 113 | name: "layer3-scale" 114 | type: "Scale" 115 | scale_param { 116 | bias_term: true 117 | } 118 | } 119 | layer { 120 | bottom: "layer3-conv" 121 | top: "layer3-conv" 122 | name: "layer3-act" 123 | type: "ReLU" 124 | relu_param { 125 | negative_slope: 0.1 126 | } 127 | } 128 | layer { 129 | bottom: "layer3-conv" 130 | top: "layer4-dwconv" 131 | name: "layer4-dwconv" 132 | type: "ConvolutionDepthwise" 133 | convolution_param { 134 | num_output: 64 135 | kernel_size: 3 136 | pad: 1 137 | stride: 2 138 | bias_term: false 139 | } 140 | } 141 | layer { 142 | bottom: "layer4-dwconv" 143 | top: "layer4-dwconv" 144 | name: "layer4-bn" 145 | type: "BatchNorm" 146 | batch_norm_param { 147 | use_global_stats: true 148 | } 149 | } 150 | layer { 151 | bottom: "layer4-dwconv" 152 | top: "layer4-dwconv" 153 | name: "layer4-scale" 154 | type: "Scale" 155 | scale_param { 156 | bias_term: true 157 | } 158 | } 159 | layer { 160 | bottom: "layer4-dwconv" 161 | top: "layer4-dwconv" 162 | name: "layer4-act" 163 | type: "ReLU" 164 | relu_param { 165 | negative_slope: 0.1 166 | } 167 | } 168 | layer { 169 | bottom: "layer4-dwconv" 170 | top: "layer5-conv" 171 | name: "layer5-conv" 172 | type: "Convolution" 173 | convolution_param { 174 | num_output: 128 175 | kernel_size: 1 176 | pad: 0 177 | stride: 1 178 | bias_term: false 179 | } 180 | } 181 | layer { 182 | bottom: "layer5-conv" 183 | top: "layer5-conv" 184 | name: "layer5-bn" 185 | type: "BatchNorm" 186 | batch_norm_param { 187 | use_global_stats: true 188 | } 189 | } 190 | layer { 191 | bottom: "layer5-conv" 192 | top: "layer5-conv" 193 | name: "layer5-scale" 194 | type: "Scale" 195 | scale_param { 196 | bias_term: true 197 | } 198 | } 199 | layer { 200 | bottom: "layer5-conv" 201 | top: "layer5-conv" 202 | name: "layer5-act" 203 | type: "ReLU" 204 | relu_param { 205 | negative_slope: 0.1 206 | } 207 | } 208 | layer { 209 | bottom: "layer5-conv" 210 | top: "layer6-dwconv" 211 | name: "layer6-dwconv" 212 | type: "ConvolutionDepthwise" 213 | convolution_param { 214 | num_output: 128 215 | kernel_size: 3 216 | pad: 1 217 | stride: 1 218 | bias_term: false 219 | } 220 | } 221 | layer { 222 | bottom: "layer6-dwconv" 223 | top: "layer6-dwconv" 224 | name: "layer6-bn" 225 | type: "BatchNorm" 226 | batch_norm_param { 227 | use_global_stats: true 228 | } 229 | } 230 | layer { 231 | bottom: "layer6-dwconv" 232 | top: "layer6-dwconv" 233 | name: "layer6-scale" 234 | type: "Scale" 235 | scale_param { 236 | bias_term: true 237 | } 238 | } 239 | layer { 240 | bottom: "layer6-dwconv" 241 | top: "layer6-dwconv" 242 | name: "layer6-act" 243 | type: "ReLU" 244 | relu_param { 245 | negative_slope: 0.1 246 | } 247 | } 248 | layer { 249 | bottom: "layer6-dwconv" 250 | top: "layer7-conv" 251 | name: "layer7-conv" 252 | type: "Convolution" 253 | convolution_param { 254 | num_output: 128 255 | kernel_size: 1 256 | pad: 0 257 | stride: 1 258 | bias_term: false 259 | } 260 | } 261 | layer { 262 | bottom: "layer7-conv" 263 | top: "layer7-conv" 264 | name: "layer7-bn" 265 | type: "BatchNorm" 266 | batch_norm_param { 267 | use_global_stats: true 268 | } 269 | } 270 | layer { 271 | bottom: "layer7-conv" 272 | top: "layer7-conv" 273 | name: "layer7-scale" 274 | type: "Scale" 275 | scale_param { 276 | bias_term: true 277 | } 278 | } 279 | layer { 280 | bottom: "layer7-conv" 281 | top: "layer7-conv" 282 | name: "layer7-act" 283 | type: "ReLU" 284 | relu_param { 285 | negative_slope: 0.1 286 | } 287 | } 288 | layer { 289 | bottom: "layer7-conv" 290 | top: "layer8-dwconv" 291 | name: "layer8-dwconv" 292 | type: "ConvolutionDepthwise" 293 | convolution_param { 294 | num_output: 128 295 | kernel_size: 3 296 | pad: 1 297 | stride: 2 298 | bias_term: false 299 | } 300 | } 301 | layer { 302 | bottom: "layer8-dwconv" 303 | top: "layer8-dwconv" 304 | name: "layer8-bn" 305 | type: "BatchNorm" 306 | batch_norm_param { 307 | use_global_stats: true 308 | } 309 | } 310 | layer { 311 | bottom: "layer8-dwconv" 312 | top: "layer8-dwconv" 313 | name: "layer8-scale" 314 | type: "Scale" 315 | scale_param { 316 | bias_term: true 317 | } 318 | } 319 | layer { 320 | bottom: "layer8-dwconv" 321 | top: "layer8-dwconv" 322 | name: "layer8-act" 323 | type: "ReLU" 324 | relu_param { 325 | negative_slope: 0.1 326 | } 327 | } 328 | layer { 329 | bottom: "layer8-dwconv" 330 | top: "layer9-conv" 331 | name: "layer9-conv" 332 | type: "Convolution" 333 | convolution_param { 334 | num_output: 256 335 | kernel_size: 1 336 | pad: 0 337 | stride: 1 338 | bias_term: false 339 | } 340 | } 341 | layer { 342 | bottom: "layer9-conv" 343 | top: "layer9-conv" 344 | name: "layer9-bn" 345 | type: "BatchNorm" 346 | batch_norm_param { 347 | use_global_stats: true 348 | } 349 | } 350 | layer { 351 | bottom: "layer9-conv" 352 | top: "layer9-conv" 353 | name: "layer9-scale" 354 | type: "Scale" 355 | scale_param { 356 | bias_term: true 357 | } 358 | } 359 | layer { 360 | bottom: "layer9-conv" 361 | top: "layer9-conv" 362 | name: "layer9-act" 363 | type: "ReLU" 364 | relu_param { 365 | negative_slope: 0.1 366 | } 367 | } 368 | layer { 369 | bottom: "layer9-conv" 370 | top: "layer10-dwconv" 371 | name: "layer10-dwconv" 372 | type: "ConvolutionDepthwise" 373 | convolution_param { 374 | num_output: 256 375 | kernel_size: 3 376 | pad: 1 377 | stride: 1 378 | bias_term: false 379 | } 380 | } 381 | layer { 382 | bottom: "layer10-dwconv" 383 | top: "layer10-dwconv" 384 | name: "layer10-bn" 385 | type: "BatchNorm" 386 | batch_norm_param { 387 | use_global_stats: true 388 | } 389 | } 390 | layer { 391 | bottom: "layer10-dwconv" 392 | top: "layer10-dwconv" 393 | name: "layer10-scale" 394 | type: "Scale" 395 | scale_param { 396 | bias_term: true 397 | } 398 | } 399 | layer { 400 | bottom: "layer10-dwconv" 401 | top: "layer10-dwconv" 402 | name: "layer10-act" 403 | type: "ReLU" 404 | relu_param { 405 | negative_slope: 0.1 406 | } 407 | } 408 | layer { 409 | bottom: "layer10-dwconv" 410 | top: "layer11-conv" 411 | name: "layer11-conv" 412 | type: "Convolution" 413 | convolution_param { 414 | num_output: 256 415 | kernel_size: 1 416 | pad: 0 417 | stride: 1 418 | bias_term: false 419 | } 420 | } 421 | layer { 422 | bottom: "layer11-conv" 423 | top: "layer11-conv" 424 | name: "layer11-bn" 425 | type: "BatchNorm" 426 | batch_norm_param { 427 | use_global_stats: true 428 | } 429 | } 430 | layer { 431 | bottom: "layer11-conv" 432 | top: "layer11-conv" 433 | name: "layer11-scale" 434 | type: "Scale" 435 | scale_param { 436 | bias_term: true 437 | } 438 | } 439 | layer { 440 | bottom: "layer11-conv" 441 | top: "layer11-conv" 442 | name: "layer11-act" 443 | type: "ReLU" 444 | relu_param { 445 | negative_slope: 0.1 446 | } 447 | } 448 | layer { 449 | bottom: "layer11-conv" 450 | top: "layer12-dwconv" 451 | name: "layer12-dwconv" 452 | type: "ConvolutionDepthwise" 453 | convolution_param { 454 | num_output: 256 455 | kernel_size: 3 456 | pad: 1 457 | stride: 2 458 | bias_term: false 459 | } 460 | } 461 | layer { 462 | bottom: "layer12-dwconv" 463 | top: "layer12-dwconv" 464 | name: "layer12-bn" 465 | type: "BatchNorm" 466 | batch_norm_param { 467 | use_global_stats: true 468 | } 469 | } 470 | layer { 471 | bottom: "layer12-dwconv" 472 | top: "layer12-dwconv" 473 | name: "layer12-scale" 474 | type: "Scale" 475 | scale_param { 476 | bias_term: true 477 | } 478 | } 479 | layer { 480 | bottom: "layer12-dwconv" 481 | top: "layer12-dwconv" 482 | name: "layer12-act" 483 | type: "ReLU" 484 | relu_param { 485 | negative_slope: 0.1 486 | } 487 | } 488 | layer { 489 | bottom: "layer12-dwconv" 490 | top: "layer13-conv" 491 | name: "layer13-conv" 492 | type: "Convolution" 493 | convolution_param { 494 | num_output: 512 495 | kernel_size: 1 496 | pad: 0 497 | stride: 1 498 | bias_term: false 499 | } 500 | } 501 | layer { 502 | bottom: "layer13-conv" 503 | top: "layer13-conv" 504 | name: "layer13-bn" 505 | type: "BatchNorm" 506 | batch_norm_param { 507 | use_global_stats: true 508 | } 509 | } 510 | layer { 511 | bottom: "layer13-conv" 512 | top: "layer13-conv" 513 | name: "layer13-scale" 514 | type: "Scale" 515 | scale_param { 516 | bias_term: true 517 | } 518 | } 519 | layer { 520 | bottom: "layer13-conv" 521 | top: "layer13-conv" 522 | name: "layer13-act" 523 | type: "ReLU" 524 | relu_param { 525 | negative_slope: 0.1 526 | } 527 | } 528 | layer { 529 | bottom: "layer13-conv" 530 | top: "layer14-dwconv" 531 | name: "layer14-dwconv" 532 | type: "ConvolutionDepthwise" 533 | convolution_param { 534 | num_output: 512 535 | kernel_size: 3 536 | pad: 1 537 | stride: 1 538 | bias_term: false 539 | } 540 | } 541 | layer { 542 | bottom: "layer14-dwconv" 543 | top: "layer14-dwconv" 544 | name: "layer14-bn" 545 | type: "BatchNorm" 546 | batch_norm_param { 547 | use_global_stats: true 548 | } 549 | } 550 | layer { 551 | bottom: "layer14-dwconv" 552 | top: "layer14-dwconv" 553 | name: "layer14-scale" 554 | type: "Scale" 555 | scale_param { 556 | bias_term: true 557 | } 558 | } 559 | layer { 560 | bottom: "layer14-dwconv" 561 | top: "layer14-dwconv" 562 | name: "layer14-act" 563 | type: "ReLU" 564 | relu_param { 565 | negative_slope: 0.1 566 | } 567 | } 568 | layer { 569 | bottom: "layer14-dwconv" 570 | top: "layer15-conv" 571 | name: "layer15-conv" 572 | type: "Convolution" 573 | convolution_param { 574 | num_output: 512 575 | kernel_size: 1 576 | pad: 0 577 | stride: 1 578 | bias_term: false 579 | } 580 | } 581 | layer { 582 | bottom: "layer15-conv" 583 | top: "layer15-conv" 584 | name: "layer15-bn" 585 | type: "BatchNorm" 586 | batch_norm_param { 587 | use_global_stats: true 588 | } 589 | } 590 | layer { 591 | bottom: "layer15-conv" 592 | top: "layer15-conv" 593 | name: "layer15-scale" 594 | type: "Scale" 595 | scale_param { 596 | bias_term: true 597 | } 598 | } 599 | layer { 600 | bottom: "layer15-conv" 601 | top: "layer15-conv" 602 | name: "layer15-act" 603 | type: "ReLU" 604 | relu_param { 605 | negative_slope: 0.1 606 | } 607 | } 608 | layer { 609 | bottom: "layer15-conv" 610 | top: "layer16-dwconv" 611 | name: "layer16-dwconv" 612 | type: "ConvolutionDepthwise" 613 | convolution_param { 614 | num_output: 512 615 | kernel_size: 3 616 | pad: 1 617 | stride: 1 618 | bias_term: false 619 | } 620 | } 621 | layer { 622 | bottom: "layer16-dwconv" 623 | top: "layer16-dwconv" 624 | name: "layer16-bn" 625 | type: "BatchNorm" 626 | batch_norm_param { 627 | use_global_stats: true 628 | } 629 | } 630 | layer { 631 | bottom: "layer16-dwconv" 632 | top: "layer16-dwconv" 633 | name: "layer16-scale" 634 | type: "Scale" 635 | scale_param { 636 | bias_term: true 637 | } 638 | } 639 | layer { 640 | bottom: "layer16-dwconv" 641 | top: "layer16-dwconv" 642 | name: "layer16-act" 643 | type: "ReLU" 644 | relu_param { 645 | negative_slope: 0.1 646 | } 647 | } 648 | layer { 649 | bottom: "layer16-dwconv" 650 | top: "layer17-conv" 651 | name: "layer17-conv" 652 | type: "Convolution" 653 | convolution_param { 654 | num_output: 512 655 | kernel_size: 1 656 | pad: 0 657 | stride: 1 658 | bias_term: false 659 | } 660 | } 661 | layer { 662 | bottom: "layer17-conv" 663 | top: "layer17-conv" 664 | name: "layer17-bn" 665 | type: "BatchNorm" 666 | batch_norm_param { 667 | use_global_stats: true 668 | } 669 | } 670 | layer { 671 | bottom: "layer17-conv" 672 | top: "layer17-conv" 673 | name: "layer17-scale" 674 | type: "Scale" 675 | scale_param { 676 | bias_term: true 677 | } 678 | } 679 | layer { 680 | bottom: "layer17-conv" 681 | top: "layer17-conv" 682 | name: "layer17-act" 683 | type: "ReLU" 684 | relu_param { 685 | negative_slope: 0.1 686 | } 687 | } 688 | layer { 689 | bottom: "layer17-conv" 690 | top: "layer18-dwconv" 691 | name: "layer18-dwconv" 692 | type: "ConvolutionDepthwise" 693 | convolution_param { 694 | num_output: 512 695 | kernel_size: 3 696 | pad: 1 697 | stride: 1 698 | bias_term: false 699 | } 700 | } 701 | layer { 702 | bottom: "layer18-dwconv" 703 | top: "layer18-dwconv" 704 | name: "layer18-bn" 705 | type: "BatchNorm" 706 | batch_norm_param { 707 | use_global_stats: true 708 | } 709 | } 710 | layer { 711 | bottom: "layer18-dwconv" 712 | top: "layer18-dwconv" 713 | name: "layer18-scale" 714 | type: "Scale" 715 | scale_param { 716 | bias_term: true 717 | } 718 | } 719 | layer { 720 | bottom: "layer18-dwconv" 721 | top: "layer18-dwconv" 722 | name: "layer18-act" 723 | type: "ReLU" 724 | relu_param { 725 | negative_slope: 0.1 726 | } 727 | } 728 | layer { 729 | bottom: "layer18-dwconv" 730 | top: "layer19-conv" 731 | name: "layer19-conv" 732 | type: "Convolution" 733 | convolution_param { 734 | num_output: 512 735 | kernel_size: 1 736 | pad: 0 737 | stride: 1 738 | bias_term: false 739 | } 740 | } 741 | layer { 742 | bottom: "layer19-conv" 743 | top: "layer19-conv" 744 | name: "layer19-bn" 745 | type: "BatchNorm" 746 | batch_norm_param { 747 | use_global_stats: true 748 | } 749 | } 750 | layer { 751 | bottom: "layer19-conv" 752 | top: "layer19-conv" 753 | name: "layer19-scale" 754 | type: "Scale" 755 | scale_param { 756 | bias_term: true 757 | } 758 | } 759 | layer { 760 | bottom: "layer19-conv" 761 | top: "layer19-conv" 762 | name: "layer19-act" 763 | type: "ReLU" 764 | relu_param { 765 | negative_slope: 0.1 766 | } 767 | } 768 | layer { 769 | bottom: "layer19-conv" 770 | top: "layer20-dwconv" 771 | name: "layer20-dwconv" 772 | type: "ConvolutionDepthwise" 773 | convolution_param { 774 | num_output: 512 775 | kernel_size: 3 776 | pad: 1 777 | stride: 1 778 | bias_term: false 779 | } 780 | } 781 | layer { 782 | bottom: "layer20-dwconv" 783 | top: "layer20-dwconv" 784 | name: "layer20-bn" 785 | type: "BatchNorm" 786 | batch_norm_param { 787 | use_global_stats: true 788 | } 789 | } 790 | layer { 791 | bottom: "layer20-dwconv" 792 | top: "layer20-dwconv" 793 | name: "layer20-scale" 794 | type: "Scale" 795 | scale_param { 796 | bias_term: true 797 | } 798 | } 799 | layer { 800 | bottom: "layer20-dwconv" 801 | top: "layer20-dwconv" 802 | name: "layer20-act" 803 | type: "ReLU" 804 | relu_param { 805 | negative_slope: 0.1 806 | } 807 | } 808 | layer { 809 | bottom: "layer20-dwconv" 810 | top: "layer21-conv" 811 | name: "layer21-conv" 812 | type: "Convolution" 813 | convolution_param { 814 | num_output: 512 815 | kernel_size: 1 816 | pad: 0 817 | stride: 1 818 | bias_term: false 819 | } 820 | } 821 | layer { 822 | bottom: "layer21-conv" 823 | top: "layer21-conv" 824 | name: "layer21-bn" 825 | type: "BatchNorm" 826 | batch_norm_param { 827 | use_global_stats: true 828 | } 829 | } 830 | layer { 831 | bottom: "layer21-conv" 832 | top: "layer21-conv" 833 | name: "layer21-scale" 834 | type: "Scale" 835 | scale_param { 836 | bias_term: true 837 | } 838 | } 839 | layer { 840 | bottom: "layer21-conv" 841 | top: "layer21-conv" 842 | name: "layer21-act" 843 | type: "ReLU" 844 | relu_param { 845 | negative_slope: 0.1 846 | } 847 | } 848 | layer { 849 | bottom: "layer21-conv" 850 | top: "layer22-dwconv" 851 | name: "layer22-dwconv" 852 | type: "ConvolutionDepthwise" 853 | convolution_param { 854 | num_output: 512 855 | kernel_size: 3 856 | pad: 1 857 | stride: 1 858 | bias_term: false 859 | } 860 | } 861 | layer { 862 | bottom: "layer22-dwconv" 863 | top: "layer22-dwconv" 864 | name: "layer22-bn" 865 | type: "BatchNorm" 866 | batch_norm_param { 867 | use_global_stats: true 868 | } 869 | } 870 | layer { 871 | bottom: "layer22-dwconv" 872 | top: "layer22-dwconv" 873 | name: "layer22-scale" 874 | type: "Scale" 875 | scale_param { 876 | bias_term: true 877 | } 878 | } 879 | layer { 880 | bottom: "layer22-dwconv" 881 | top: "layer22-dwconv" 882 | name: "layer22-act" 883 | type: "ReLU" 884 | relu_param { 885 | negative_slope: 0.1 886 | } 887 | } 888 | layer { 889 | bottom: "layer22-dwconv" 890 | top: "layer23-conv" 891 | name: "layer23-conv" 892 | type: "Convolution" 893 | convolution_param { 894 | num_output: 512 895 | kernel_size: 1 896 | pad: 0 897 | stride: 1 898 | bias_term: false 899 | } 900 | } 901 | layer { 902 | bottom: "layer23-conv" 903 | top: "layer23-conv" 904 | name: "layer23-bn" 905 | type: "BatchNorm" 906 | batch_norm_param { 907 | use_global_stats: true 908 | } 909 | } 910 | layer { 911 | bottom: "layer23-conv" 912 | top: "layer23-conv" 913 | name: "layer23-scale" 914 | type: "Scale" 915 | scale_param { 916 | bias_term: true 917 | } 918 | } 919 | layer { 920 | bottom: "layer23-conv" 921 | top: "layer23-conv" 922 | name: "layer23-act" 923 | type: "ReLU" 924 | relu_param { 925 | negative_slope: 0.1 926 | } 927 | } 928 | layer { 929 | bottom: "layer23-conv" 930 | top: "layer24-dwconv" 931 | name: "layer24-dwconv" 932 | type: "ConvolutionDepthwise" 933 | convolution_param { 934 | num_output: 512 935 | kernel_size: 3 936 | pad: 1 937 | stride: 2 938 | bias_term: false 939 | } 940 | } 941 | layer { 942 | bottom: "layer24-dwconv" 943 | top: "layer24-dwconv" 944 | name: "layer24-bn" 945 | type: "BatchNorm" 946 | batch_norm_param { 947 | use_global_stats: true 948 | } 949 | } 950 | layer { 951 | bottom: "layer24-dwconv" 952 | top: "layer24-dwconv" 953 | name: "layer24-scale" 954 | type: "Scale" 955 | scale_param { 956 | bias_term: true 957 | } 958 | } 959 | layer { 960 | bottom: "layer24-dwconv" 961 | top: "layer24-dwconv" 962 | name: "layer24-act" 963 | type: "ReLU" 964 | relu_param { 965 | negative_slope: 0.1 966 | } 967 | } 968 | layer { 969 | bottom: "layer24-dwconv" 970 | top: "layer25-conv" 971 | name: "layer25-conv" 972 | type: "Convolution" 973 | convolution_param { 974 | num_output: 1024 975 | kernel_size: 1 976 | pad: 0 977 | stride: 1 978 | bias_term: false 979 | } 980 | } 981 | layer { 982 | bottom: "layer25-conv" 983 | top: "layer25-conv" 984 | name: "layer25-bn" 985 | type: "BatchNorm" 986 | batch_norm_param { 987 | use_global_stats: true 988 | } 989 | } 990 | layer { 991 | bottom: "layer25-conv" 992 | top: "layer25-conv" 993 | name: "layer25-scale" 994 | type: "Scale" 995 | scale_param { 996 | bias_term: true 997 | } 998 | } 999 | layer { 1000 | bottom: "layer25-conv" 1001 | top: "layer25-conv" 1002 | name: "layer25-act" 1003 | type: "ReLU" 1004 | relu_param { 1005 | negative_slope: 0.1 1006 | } 1007 | } 1008 | layer { 1009 | bottom: "layer25-conv" 1010 | top: "layer26-dwconv" 1011 | name: "layer26-dwconv" 1012 | type: "ConvolutionDepthwise" 1013 | convolution_param { 1014 | num_output: 1024 1015 | kernel_size: 3 1016 | pad: 1 1017 | stride: 1 1018 | bias_term: false 1019 | } 1020 | } 1021 | layer { 1022 | bottom: "layer26-dwconv" 1023 | top: "layer26-dwconv" 1024 | name: "layer26-bn" 1025 | type: "BatchNorm" 1026 | batch_norm_param { 1027 | use_global_stats: true 1028 | } 1029 | } 1030 | layer { 1031 | bottom: "layer26-dwconv" 1032 | top: "layer26-dwconv" 1033 | name: "layer26-scale" 1034 | type: "Scale" 1035 | scale_param { 1036 | bias_term: true 1037 | } 1038 | } 1039 | layer { 1040 | bottom: "layer26-dwconv" 1041 | top: "layer26-dwconv" 1042 | name: "layer26-act" 1043 | type: "ReLU" 1044 | relu_param { 1045 | negative_slope: 0.1 1046 | } 1047 | } 1048 | layer { 1049 | bottom: "layer26-dwconv" 1050 | top: "layer27-conv" 1051 | name: "layer27-conv" 1052 | type: "Convolution" 1053 | convolution_param { 1054 | num_output: 1024 1055 | kernel_size: 1 1056 | pad: 0 1057 | stride: 1 1058 | bias_term: false 1059 | } 1060 | } 1061 | layer { 1062 | bottom: "layer27-conv" 1063 | top: "layer27-conv" 1064 | name: "layer27-bn" 1065 | type: "BatchNorm" 1066 | batch_norm_param { 1067 | use_global_stats: true 1068 | } 1069 | } 1070 | layer { 1071 | bottom: "layer27-conv" 1072 | top: "layer27-conv" 1073 | name: "layer27-scale" 1074 | type: "Scale" 1075 | scale_param { 1076 | bias_term: true 1077 | } 1078 | } 1079 | layer { 1080 | bottom: "layer27-conv" 1081 | top: "layer27-conv" 1082 | name: "layer27-act" 1083 | type: "ReLU" 1084 | relu_param { 1085 | negative_slope: 0.1 1086 | } 1087 | } 1088 | layer { 1089 | bottom: "layer27-conv" 1090 | top: "layer28-conv" 1091 | name: "layer28-conv" 1092 | type: "Convolution" 1093 | convolution_param { 1094 | num_output: 512 1095 | kernel_size: 1 1096 | pad: 0 1097 | stride: 1 1098 | bias_term: false 1099 | } 1100 | } 1101 | layer { 1102 | bottom: "layer28-conv" 1103 | top: "layer28-conv" 1104 | name: "layer28-bn" 1105 | type: "BatchNorm" 1106 | batch_norm_param { 1107 | use_global_stats: true 1108 | } 1109 | } 1110 | layer { 1111 | bottom: "layer28-conv" 1112 | top: "layer28-conv" 1113 | name: "layer28-scale" 1114 | type: "Scale" 1115 | scale_param { 1116 | bias_term: true 1117 | } 1118 | } 1119 | layer { 1120 | bottom: "layer28-conv" 1121 | top: "layer28-conv" 1122 | name: "layer28-act" 1123 | type: "ReLU" 1124 | relu_param { 1125 | negative_slope: 0.1 1126 | } 1127 | } 1128 | layer { 1129 | bottom: "layer28-conv" 1130 | top: "layer29-conv" 1131 | name: "layer29-conv" 1132 | type: "Convolution" 1133 | convolution_param { 1134 | num_output: 1024 1135 | kernel_size: 3 1136 | pad: 1 1137 | stride: 1 1138 | bias_term: false 1139 | } 1140 | } 1141 | layer { 1142 | bottom: "layer29-conv" 1143 | top: "layer29-conv" 1144 | name: "layer29-bn" 1145 | type: "BatchNorm" 1146 | batch_norm_param { 1147 | use_global_stats: true 1148 | } 1149 | } 1150 | layer { 1151 | bottom: "layer29-conv" 1152 | top: "layer29-conv" 1153 | name: "layer29-scale" 1154 | type: "Scale" 1155 | scale_param { 1156 | bias_term: true 1157 | } 1158 | } 1159 | layer { 1160 | bottom: "layer29-conv" 1161 | top: "layer29-conv" 1162 | name: "layer29-act" 1163 | type: "ReLU" 1164 | relu_param { 1165 | negative_slope: 0.1 1166 | } 1167 | } 1168 | layer { 1169 | bottom: "layer29-conv" 1170 | top: "layer30-conv" 1171 | name: "layer30-conv" 1172 | type: "Convolution" 1173 | convolution_param { 1174 | num_output: 512 1175 | kernel_size: 1 1176 | pad: 0 1177 | stride: 1 1178 | bias_term: false 1179 | } 1180 | } 1181 | layer { 1182 | bottom: "layer30-conv" 1183 | top: "layer30-conv" 1184 | name: "layer30-bn" 1185 | type: "BatchNorm" 1186 | batch_norm_param { 1187 | use_global_stats: true 1188 | } 1189 | } 1190 | layer { 1191 | bottom: "layer30-conv" 1192 | top: "layer30-conv" 1193 | name: "layer30-scale" 1194 | type: "Scale" 1195 | scale_param { 1196 | bias_term: true 1197 | } 1198 | } 1199 | layer { 1200 | bottom: "layer30-conv" 1201 | top: "layer30-conv" 1202 | name: "layer30-act" 1203 | type: "ReLU" 1204 | relu_param { 1205 | negative_slope: 0.1 1206 | } 1207 | } 1208 | layer { 1209 | bottom: "layer30-conv" 1210 | top: "layer31-conv" 1211 | name: "layer31-conv" 1212 | type: "Convolution" 1213 | convolution_param { 1214 | num_output: 1024 1215 | kernel_size: 3 1216 | pad: 1 1217 | stride: 1 1218 | bias_term: false 1219 | } 1220 | } 1221 | layer { 1222 | bottom: "layer31-conv" 1223 | top: "layer31-conv" 1224 | name: "layer31-bn" 1225 | type: "BatchNorm" 1226 | batch_norm_param { 1227 | use_global_stats: true 1228 | } 1229 | } 1230 | layer { 1231 | bottom: "layer31-conv" 1232 | top: "layer31-conv" 1233 | name: "layer31-scale" 1234 | type: "Scale" 1235 | scale_param { 1236 | bias_term: true 1237 | } 1238 | } 1239 | layer { 1240 | bottom: "layer31-conv" 1241 | top: "layer31-conv" 1242 | name: "layer31-act" 1243 | type: "ReLU" 1244 | relu_param { 1245 | negative_slope: 0.1 1246 | } 1247 | } 1248 | layer { 1249 | bottom: "layer31-conv" 1250 | top: "layer32-conv" 1251 | name: "layer32-conv" 1252 | type: "Convolution" 1253 | convolution_param { 1254 | num_output: 512 1255 | kernel_size: 1 1256 | pad: 0 1257 | stride: 1 1258 | bias_term: false 1259 | } 1260 | } 1261 | layer { 1262 | bottom: "layer32-conv" 1263 | top: "layer32-conv" 1264 | name: "layer32-bn" 1265 | type: "BatchNorm" 1266 | batch_norm_param { 1267 | use_global_stats: true 1268 | } 1269 | } 1270 | layer { 1271 | bottom: "layer32-conv" 1272 | top: "layer32-conv" 1273 | name: "layer32-scale" 1274 | type: "Scale" 1275 | scale_param { 1276 | bias_term: true 1277 | } 1278 | } 1279 | layer { 1280 | bottom: "layer32-conv" 1281 | top: "layer32-conv" 1282 | name: "layer32-act" 1283 | type: "ReLU" 1284 | relu_param { 1285 | negative_slope: 0.1 1286 | } 1287 | } 1288 | layer { 1289 | bottom: "layer32-conv" 1290 | top: "layer33-conv" 1291 | name: "layer33-conv" 1292 | type: "Convolution" 1293 | convolution_param { 1294 | num_output: 1024 1295 | kernel_size: 3 1296 | pad: 1 1297 | stride: 1 1298 | bias_term: false 1299 | } 1300 | } 1301 | layer { 1302 | bottom: "layer33-conv" 1303 | top: "layer33-conv" 1304 | name: "layer33-bn" 1305 | type: "BatchNorm" 1306 | batch_norm_param { 1307 | use_global_stats: true 1308 | } 1309 | } 1310 | layer { 1311 | bottom: "layer33-conv" 1312 | top: "layer33-conv" 1313 | name: "layer33-scale" 1314 | type: "Scale" 1315 | scale_param { 1316 | bias_term: true 1317 | } 1318 | } 1319 | layer { 1320 | bottom: "layer33-conv" 1321 | top: "layer33-conv" 1322 | name: "layer33-act" 1323 | type: "ReLU" 1324 | relu_param { 1325 | negative_slope: 0.1 1326 | } 1327 | } 1328 | layer { 1329 | bottom: "layer33-conv" 1330 | top: "layer34-conv" 1331 | name: "layer34-conv" 1332 | type: "Convolution" 1333 | convolution_param { 1334 | num_output: 255 1335 | kernel_size: 1 1336 | pad: 0 1337 | stride: 1 1338 | bias_term: true 1339 | } 1340 | } 1341 | layer { 1342 | bottom: "layer32-conv" 1343 | top: "layer36-route" 1344 | name: "layer36-route" 1345 | type: "Concat" 1346 | } 1347 | layer { 1348 | bottom: "layer36-route" 1349 | top: "layer37-conv" 1350 | name: "layer37-conv" 1351 | type: "Convolution" 1352 | convolution_param { 1353 | num_output: 256 1354 | kernel_size: 1 1355 | pad: 0 1356 | stride: 1 1357 | bias_term: false 1358 | } 1359 | } 1360 | layer { 1361 | bottom: "layer37-conv" 1362 | top: "layer37-conv" 1363 | name: "layer37-bn" 1364 | type: "BatchNorm" 1365 | batch_norm_param { 1366 | use_global_stats: true 1367 | } 1368 | } 1369 | layer { 1370 | bottom: "layer37-conv" 1371 | top: "layer37-conv" 1372 | name: "layer37-scale" 1373 | type: "Scale" 1374 | scale_param { 1375 | bias_term: true 1376 | } 1377 | } 1378 | layer { 1379 | bottom: "layer37-conv" 1380 | top: "layer37-conv" 1381 | name: "layer37-act" 1382 | type: "ReLU" 1383 | relu_param { 1384 | negative_slope: 0.1 1385 | } 1386 | } 1387 | layer { 1388 | bottom: "layer37-conv" 1389 | top: "layer38-upsample" 1390 | name: "layer38-upsample" 1391 | type: "Upsample" 1392 | upsample_param { 1393 | scale: 2 1394 | } 1395 | } 1396 | layer { 1397 | bottom: "layer38-upsample" 1398 | bottom: "layer23-conv" 1399 | top: "layer39-route" 1400 | name: "layer39-route" 1401 | type: "Concat" 1402 | } 1403 | layer { 1404 | bottom: "layer39-route" 1405 | top: "layer40-conv" 1406 | name: "layer40-conv" 1407 | type: "Convolution" 1408 | convolution_param { 1409 | num_output: 256 1410 | kernel_size: 1 1411 | pad: 0 1412 | stride: 1 1413 | bias_term: false 1414 | } 1415 | } 1416 | layer { 1417 | bottom: "layer40-conv" 1418 | top: "layer40-conv" 1419 | name: "layer40-bn" 1420 | type: "BatchNorm" 1421 | batch_norm_param { 1422 | use_global_stats: true 1423 | } 1424 | } 1425 | layer { 1426 | bottom: "layer40-conv" 1427 | top: "layer40-conv" 1428 | name: "layer40-scale" 1429 | type: "Scale" 1430 | scale_param { 1431 | bias_term: true 1432 | } 1433 | } 1434 | layer { 1435 | bottom: "layer40-conv" 1436 | top: "layer40-conv" 1437 | name: "layer40-act" 1438 | type: "ReLU" 1439 | relu_param { 1440 | negative_slope: 0.1 1441 | } 1442 | } 1443 | layer { 1444 | bottom: "layer40-conv" 1445 | top: "layer41-conv" 1446 | name: "layer41-conv" 1447 | type: "Convolution" 1448 | convolution_param { 1449 | num_output: 512 1450 | kernel_size: 3 1451 | pad: 1 1452 | stride: 1 1453 | bias_term: false 1454 | } 1455 | } 1456 | layer { 1457 | bottom: "layer41-conv" 1458 | top: "layer41-conv" 1459 | name: "layer41-bn" 1460 | type: "BatchNorm" 1461 | batch_norm_param { 1462 | use_global_stats: true 1463 | } 1464 | } 1465 | layer { 1466 | bottom: "layer41-conv" 1467 | top: "layer41-conv" 1468 | name: "layer41-scale" 1469 | type: "Scale" 1470 | scale_param { 1471 | bias_term: true 1472 | } 1473 | } 1474 | layer { 1475 | bottom: "layer41-conv" 1476 | top: "layer41-conv" 1477 | name: "layer41-act" 1478 | type: "ReLU" 1479 | relu_param { 1480 | negative_slope: 0.1 1481 | } 1482 | } 1483 | layer { 1484 | bottom: "layer41-conv" 1485 | top: "layer42-conv" 1486 | name: "layer42-conv" 1487 | type: "Convolution" 1488 | convolution_param { 1489 | num_output: 256 1490 | kernel_size: 1 1491 | pad: 0 1492 | stride: 1 1493 | bias_term: false 1494 | } 1495 | } 1496 | layer { 1497 | bottom: "layer42-conv" 1498 | top: "layer42-conv" 1499 | name: "layer42-bn" 1500 | type: "BatchNorm" 1501 | batch_norm_param { 1502 | use_global_stats: true 1503 | } 1504 | } 1505 | layer { 1506 | bottom: "layer42-conv" 1507 | top: "layer42-conv" 1508 | name: "layer42-scale" 1509 | type: "Scale" 1510 | scale_param { 1511 | bias_term: true 1512 | } 1513 | } 1514 | layer { 1515 | bottom: "layer42-conv" 1516 | top: "layer42-conv" 1517 | name: "layer42-act" 1518 | type: "ReLU" 1519 | relu_param { 1520 | negative_slope: 0.1 1521 | } 1522 | } 1523 | layer { 1524 | bottom: "layer42-conv" 1525 | top: "layer43-conv" 1526 | name: "layer43-conv" 1527 | type: "Convolution" 1528 | convolution_param { 1529 | num_output: 512 1530 | kernel_size: 3 1531 | pad: 1 1532 | stride: 1 1533 | bias_term: false 1534 | } 1535 | } 1536 | layer { 1537 | bottom: "layer43-conv" 1538 | top: "layer43-conv" 1539 | name: "layer43-bn" 1540 | type: "BatchNorm" 1541 | batch_norm_param { 1542 | use_global_stats: true 1543 | } 1544 | } 1545 | layer { 1546 | bottom: "layer43-conv" 1547 | top: "layer43-conv" 1548 | name: "layer43-scale" 1549 | type: "Scale" 1550 | scale_param { 1551 | bias_term: true 1552 | } 1553 | } 1554 | layer { 1555 | bottom: "layer43-conv" 1556 | top: "layer43-conv" 1557 | name: "layer43-act" 1558 | type: "ReLU" 1559 | relu_param { 1560 | negative_slope: 0.1 1561 | } 1562 | } 1563 | layer { 1564 | bottom: "layer43-conv" 1565 | top: "layer44-conv" 1566 | name: "layer44-conv" 1567 | type: "Convolution" 1568 | convolution_param { 1569 | num_output: 256 1570 | kernel_size: 1 1571 | pad: 0 1572 | stride: 1 1573 | bias_term: false 1574 | } 1575 | } 1576 | layer { 1577 | bottom: "layer44-conv" 1578 | top: "layer44-conv" 1579 | name: "layer44-bn" 1580 | type: "BatchNorm" 1581 | batch_norm_param { 1582 | use_global_stats: true 1583 | } 1584 | } 1585 | layer { 1586 | bottom: "layer44-conv" 1587 | top: "layer44-conv" 1588 | name: "layer44-scale" 1589 | type: "Scale" 1590 | scale_param { 1591 | bias_term: true 1592 | } 1593 | } 1594 | layer { 1595 | bottom: "layer44-conv" 1596 | top: "layer44-conv" 1597 | name: "layer44-act" 1598 | type: "ReLU" 1599 | relu_param { 1600 | negative_slope: 0.1 1601 | } 1602 | } 1603 | layer { 1604 | bottom: "layer44-conv" 1605 | top: "layer45-conv" 1606 | name: "layer45-conv" 1607 | type: "Convolution" 1608 | convolution_param { 1609 | num_output: 512 1610 | kernel_size: 3 1611 | pad: 1 1612 | stride: 1 1613 | bias_term: false 1614 | } 1615 | } 1616 | layer { 1617 | bottom: "layer45-conv" 1618 | top: "layer45-conv" 1619 | name: "layer45-bn" 1620 | type: "BatchNorm" 1621 | batch_norm_param { 1622 | use_global_stats: true 1623 | } 1624 | } 1625 | layer { 1626 | bottom: "layer45-conv" 1627 | top: "layer45-conv" 1628 | name: "layer45-scale" 1629 | type: "Scale" 1630 | scale_param { 1631 | bias_term: true 1632 | } 1633 | } 1634 | layer { 1635 | bottom: "layer45-conv" 1636 | top: "layer45-conv" 1637 | name: "layer45-act" 1638 | type: "ReLU" 1639 | relu_param { 1640 | negative_slope: 0.1 1641 | } 1642 | } 1643 | layer { 1644 | bottom: "layer45-conv" 1645 | top: "layer46-conv" 1646 | name: "layer46-conv" 1647 | type: "Convolution" 1648 | convolution_param { 1649 | num_output: 255 1650 | kernel_size: 1 1651 | pad: 0 1652 | stride: 1 1653 | bias_term: true 1654 | } 1655 | } 1656 | layer { 1657 | bottom: "layer44-conv" 1658 | top: "layer48-route" 1659 | name: "layer48-route" 1660 | type: "Concat" 1661 | } 1662 | layer { 1663 | bottom: "layer48-route" 1664 | top: "layer49-conv" 1665 | name: "layer49-conv" 1666 | type: "Convolution" 1667 | convolution_param { 1668 | num_output: 128 1669 | kernel_size: 1 1670 | pad: 0 1671 | stride: 1 1672 | bias_term: false 1673 | } 1674 | } 1675 | layer { 1676 | bottom: "layer49-conv" 1677 | top: "layer49-conv" 1678 | name: "layer49-bn" 1679 | type: "BatchNorm" 1680 | batch_norm_param { 1681 | use_global_stats: true 1682 | } 1683 | } 1684 | layer { 1685 | bottom: "layer49-conv" 1686 | top: "layer49-conv" 1687 | name: "layer49-scale" 1688 | type: "Scale" 1689 | scale_param { 1690 | bias_term: true 1691 | } 1692 | } 1693 | layer { 1694 | bottom: "layer49-conv" 1695 | top: "layer49-conv" 1696 | name: "layer49-act" 1697 | type: "ReLU" 1698 | relu_param { 1699 | negative_slope: 0.1 1700 | } 1701 | } 1702 | layer { 1703 | bottom: "layer49-conv" 1704 | top: "layer50-upsample" 1705 | name: "layer50-upsample" 1706 | type: "Upsample" 1707 | upsample_param { 1708 | scale: 2 1709 | } 1710 | } 1711 | layer { 1712 | bottom: "layer50-upsample" 1713 | bottom: "layer11-conv" 1714 | top: "layer51-route" 1715 | name: "layer51-route" 1716 | type: "Concat" 1717 | } 1718 | layer { 1719 | bottom: "layer51-route" 1720 | top: "layer52-conv" 1721 | name: "layer52-conv" 1722 | type: "Convolution" 1723 | convolution_param { 1724 | num_output: 128 1725 | kernel_size: 1 1726 | pad: 0 1727 | stride: 1 1728 | bias_term: false 1729 | } 1730 | } 1731 | layer { 1732 | bottom: "layer52-conv" 1733 | top: "layer52-conv" 1734 | name: "layer52-bn" 1735 | type: "BatchNorm" 1736 | batch_norm_param { 1737 | use_global_stats: true 1738 | } 1739 | } 1740 | layer { 1741 | bottom: "layer52-conv" 1742 | top: "layer52-conv" 1743 | name: "layer52-scale" 1744 | type: "Scale" 1745 | scale_param { 1746 | bias_term: true 1747 | } 1748 | } 1749 | layer { 1750 | bottom: "layer52-conv" 1751 | top: "layer52-conv" 1752 | name: "layer52-act" 1753 | type: "ReLU" 1754 | relu_param { 1755 | negative_slope: 0.1 1756 | } 1757 | } 1758 | layer { 1759 | bottom: "layer52-conv" 1760 | top: "layer53-conv" 1761 | name: "layer53-conv" 1762 | type: "Convolution" 1763 | convolution_param { 1764 | num_output: 256 1765 | kernel_size: 3 1766 | pad: 1 1767 | stride: 1 1768 | bias_term: false 1769 | } 1770 | } 1771 | layer { 1772 | bottom: "layer53-conv" 1773 | top: "layer53-conv" 1774 | name: "layer53-bn" 1775 | type: "BatchNorm" 1776 | batch_norm_param { 1777 | use_global_stats: true 1778 | } 1779 | } 1780 | layer { 1781 | bottom: "layer53-conv" 1782 | top: "layer53-conv" 1783 | name: "layer53-scale" 1784 | type: "Scale" 1785 | scale_param { 1786 | bias_term: true 1787 | } 1788 | } 1789 | layer { 1790 | bottom: "layer53-conv" 1791 | top: "layer53-conv" 1792 | name: "layer53-act" 1793 | type: "ReLU" 1794 | relu_param { 1795 | negative_slope: 0.1 1796 | } 1797 | } 1798 | layer { 1799 | bottom: "layer53-conv" 1800 | top: "layer54-conv" 1801 | name: "layer54-conv" 1802 | type: "Convolution" 1803 | convolution_param { 1804 | num_output: 128 1805 | kernel_size: 1 1806 | pad: 0 1807 | stride: 1 1808 | bias_term: false 1809 | } 1810 | } 1811 | layer { 1812 | bottom: "layer54-conv" 1813 | top: "layer54-conv" 1814 | name: "layer54-bn" 1815 | type: "BatchNorm" 1816 | batch_norm_param { 1817 | use_global_stats: true 1818 | } 1819 | } 1820 | layer { 1821 | bottom: "layer54-conv" 1822 | top: "layer54-conv" 1823 | name: "layer54-scale" 1824 | type: "Scale" 1825 | scale_param { 1826 | bias_term: true 1827 | } 1828 | } 1829 | layer { 1830 | bottom: "layer54-conv" 1831 | top: "layer54-conv" 1832 | name: "layer54-act" 1833 | type: "ReLU" 1834 | relu_param { 1835 | negative_slope: 0.1 1836 | } 1837 | } 1838 | layer { 1839 | bottom: "layer54-conv" 1840 | top: "layer55-conv" 1841 | name: "layer55-conv" 1842 | type: "Convolution" 1843 | convolution_param { 1844 | num_output: 256 1845 | kernel_size: 3 1846 | pad: 1 1847 | stride: 1 1848 | bias_term: false 1849 | } 1850 | } 1851 | layer { 1852 | bottom: "layer55-conv" 1853 | top: "layer55-conv" 1854 | name: "layer55-bn" 1855 | type: "BatchNorm" 1856 | batch_norm_param { 1857 | use_global_stats: true 1858 | } 1859 | } 1860 | layer { 1861 | bottom: "layer55-conv" 1862 | top: "layer55-conv" 1863 | name: "layer55-scale" 1864 | type: "Scale" 1865 | scale_param { 1866 | bias_term: true 1867 | } 1868 | } 1869 | layer { 1870 | bottom: "layer55-conv" 1871 | top: "layer55-conv" 1872 | name: "layer55-act" 1873 | type: "ReLU" 1874 | relu_param { 1875 | negative_slope: 0.1 1876 | } 1877 | } 1878 | layer { 1879 | bottom: "layer55-conv" 1880 | top: "layer56-conv" 1881 | name: "layer56-conv" 1882 | type: "Convolution" 1883 | convolution_param { 1884 | num_output: 128 1885 | kernel_size: 1 1886 | pad: 0 1887 | stride: 1 1888 | bias_term: false 1889 | } 1890 | } 1891 | layer { 1892 | bottom: "layer56-conv" 1893 | top: "layer56-conv" 1894 | name: "layer56-bn" 1895 | type: "BatchNorm" 1896 | batch_norm_param { 1897 | use_global_stats: true 1898 | } 1899 | } 1900 | layer { 1901 | bottom: "layer56-conv" 1902 | top: "layer56-conv" 1903 | name: "layer56-scale" 1904 | type: "Scale" 1905 | scale_param { 1906 | bias_term: true 1907 | } 1908 | } 1909 | layer { 1910 | bottom: "layer56-conv" 1911 | top: "layer56-conv" 1912 | name: "layer56-act" 1913 | type: "ReLU" 1914 | relu_param { 1915 | negative_slope: 0.1 1916 | } 1917 | } 1918 | layer { 1919 | bottom: "layer56-conv" 1920 | top: "layer57-conv" 1921 | name: "layer57-conv" 1922 | type: "Convolution" 1923 | convolution_param { 1924 | num_output: 256 1925 | kernel_size: 3 1926 | pad: 1 1927 | stride: 1 1928 | bias_term: false 1929 | } 1930 | } 1931 | layer { 1932 | bottom: "layer57-conv" 1933 | top: "layer57-conv" 1934 | name: "layer57-bn" 1935 | type: "BatchNorm" 1936 | batch_norm_param { 1937 | use_global_stats: true 1938 | } 1939 | } 1940 | layer { 1941 | bottom: "layer57-conv" 1942 | top: "layer57-conv" 1943 | name: "layer57-scale" 1944 | type: "Scale" 1945 | scale_param { 1946 | bias_term: true 1947 | } 1948 | } 1949 | layer { 1950 | bottom: "layer57-conv" 1951 | top: "layer57-conv" 1952 | name: "layer57-act" 1953 | type: "ReLU" 1954 | relu_param { 1955 | negative_slope: 0.1 1956 | } 1957 | } 1958 | layer { 1959 | bottom: "layer57-conv" 1960 | top: "layer58-conv" 1961 | name: "layer58-conv" 1962 | type: "Convolution" 1963 | convolution_param { 1964 | num_output: 255 1965 | kernel_size: 1 1966 | pad: 0 1967 | stride: 1 1968 | bias_term: true 1969 | } 1970 | } 1971 | --------------------------------------------------------------------------------