├── .gitignore
├── 3rd_party
└── ncnn
│ ├── .travis.yml
│ ├── CMakeLists.txt
│ ├── Info.plist
│ ├── LICENSE.txt
│ ├── README.md
│ ├── android.toolchain.cmake
│ ├── autotest
│ ├── CMakeLists.txt
│ ├── autotest.cpp
│ ├── cmake
│ │ ├── DownloadProject.CMakeLists.cmake.in
│ │ └── DownloadProject.cmake
│ ├── test_convolution.h
│ └── test_innerproduct.h
│ ├── benchmark
│ ├── CMakeLists.txt
│ ├── README.md
│ ├── alexnet.param
│ ├── benchncnn.cpp
│ ├── googlenet.param
│ ├── mobilenet.param
│ ├── mobilenet_ssd.param
│ ├── mobilenet_v2.param
│ ├── resnet18.param
│ ├── shufflenet.param
│ ├── squeezenet.param
│ ├── squeezenet_ssd.param
│ └── vgg16.param
│ ├── build.sh
│ ├── images
│ ├── 128-ncnn.png
│ ├── 16-ncnn.png
│ ├── 256-ncnn.png
│ ├── 32-ncnn.png
│ └── 64-ncnn.png
│ ├── ios.toolchain.cmake
│ ├── iossimxc.toolchain.cmake
│ ├── iosxc.toolchain.cmake
│ ├── package.sh
│ ├── src
│ ├── CMakeLists.txt
│ ├── benchmark.cpp
│ ├── benchmark.h
│ ├── blob.cpp
│ ├── blob.h
│ ├── cpu.cpp
│ ├── cpu.h
│ ├── layer.cpp
│ ├── layer.h
│ ├── layer
│ │ ├── absval.cpp
│ │ ├── absval.h
│ │ ├── argmax.cpp
│ │ ├── argmax.h
│ │ ├── arm
│ │ │ ├── absval_arm.cpp
│ │ │ ├── absval_arm.h
│ │ │ ├── batchnorm_arm.cpp
│ │ │ ├── batchnorm_arm.h
│ │ │ ├── bias_arm.cpp
│ │ │ ├── bias_arm.h
│ │ │ ├── convolution_1x1.h
│ │ │ ├── convolution_2x2.h
│ │ │ ├── convolution_3x3.h
│ │ │ ├── convolution_4x4.h
│ │ │ ├── convolution_5x5.h
│ │ │ ├── convolution_7x7.h
│ │ │ ├── convolution_arm.cpp
│ │ │ ├── convolution_arm.h
│ │ │ ├── convolutiondepthwise_3x3.h
│ │ │ ├── convolutiondepthwise_arm.cpp
│ │ │ ├── convolutiondepthwise_arm.h
│ │ │ ├── deconvolution_3x3.h
│ │ │ ├── deconvolution_4x4.h
│ │ │ ├── deconvolution_arm.cpp
│ │ │ ├── deconvolution_arm.h
│ │ │ ├── deconvolutiondepthwise_arm.cpp
│ │ │ ├── deconvolutiondepthwise_arm.h
│ │ │ ├── eltwise_arm.cpp
│ │ │ ├── eltwise_arm.h
│ │ │ ├── innerproduct_arm.cpp
│ │ │ ├── innerproduct_arm.h
│ │ │ ├── lrn_arm.cpp
│ │ │ ├── lrn_arm.h
│ │ │ ├── neon_mathfun.h
│ │ │ ├── pooling_2x2.h
│ │ │ ├── pooling_3x3.h
│ │ │ ├── pooling_arm.cpp
│ │ │ ├── pooling_arm.h
│ │ │ ├── prelu_arm.cpp
│ │ │ ├── prelu_arm.h
│ │ │ ├── relu_arm.cpp
│ │ │ ├── relu_arm.h
│ │ │ ├── scale_arm.cpp
│ │ │ ├── scale_arm.h
│ │ │ ├── sigmoid_arm.cpp
│ │ │ ├── sigmoid_arm.h
│ │ │ ├── softmax_arm.cpp
│ │ │ └── softmax_arm.h
│ │ ├── batchnorm.cpp
│ │ ├── batchnorm.h
│ │ ├── bias.cpp
│ │ ├── bias.h
│ │ ├── binaryop.cpp
│ │ ├── binaryop.h
│ │ ├── bnll.cpp
│ │ ├── bnll.h
│ │ ├── clip.cpp
│ │ ├── clip.h
│ │ ├── concat.cpp
│ │ ├── concat.h
│ │ ├── convolution.cpp
│ │ ├── convolution.h
│ │ ├── convolutiondepthwise.cpp
│ │ ├── convolutiondepthwise.h
│ │ ├── crop.cpp
│ │ ├── crop.h
│ │ ├── deconvolution.cpp
│ │ ├── deconvolution.h
│ │ ├── deconvolutiondepthwise.cpp
│ │ ├── deconvolutiondepthwise.h
│ │ ├── detectionoutput.cpp
│ │ ├── detectionoutput.h
│ │ ├── dropout.cpp
│ │ ├── dropout.h
│ │ ├── eltwise.cpp
│ │ ├── eltwise.h
│ │ ├── elu.cpp
│ │ ├── elu.h
│ │ ├── embed.cpp
│ │ ├── embed.h
│ │ ├── exp.cpp
│ │ ├── exp.h
│ │ ├── expanddims.cpp
│ │ ├── expanddims.h
│ │ ├── flatten.cpp
│ │ ├── flatten.h
│ │ ├── innerproduct.cpp
│ │ ├── innerproduct.h
│ │ ├── input.cpp
│ │ ├── input.h
│ │ ├── instancenorm.cpp
│ │ ├── instancenorm.h
│ │ ├── interp.cpp
│ │ ├── interp.h
│ │ ├── log.cpp
│ │ ├── log.h
│ │ ├── lrn.cpp
│ │ ├── lrn.h
│ │ ├── lstm.cpp
│ │ ├── lstm.h
│ │ ├── memorydata.cpp
│ │ ├── memorydata.h
│ │ ├── mvn.cpp
│ │ ├── mvn.h
│ │ ├── normalize.cpp
│ │ ├── normalize.h
│ │ ├── padding.cpp
│ │ ├── padding.h
│ │ ├── permute.cpp
│ │ ├── permute.h
│ │ ├── pooling.cpp
│ │ ├── pooling.h
│ │ ├── power.cpp
│ │ ├── power.h
│ │ ├── prelu.cpp
│ │ ├── prelu.h
│ │ ├── priorbox.cpp
│ │ ├── priorbox.h
│ │ ├── proposal.cpp
│ │ ├── proposal.h
│ │ ├── reduction.cpp
│ │ ├── reduction.h
│ │ ├── relu.cpp
│ │ ├── relu.h
│ │ ├── reshape.cpp
│ │ ├── reshape.h
│ │ ├── rnn.cpp
│ │ ├── rnn.h
│ │ ├── roipooling.cpp
│ │ ├── roipooling.h
│ │ ├── scale.cpp
│ │ ├── scale.h
│ │ ├── shufflechannel.cpp
│ │ ├── shufflechannel.h
│ │ ├── sigmoid.cpp
│ │ ├── sigmoid.h
│ │ ├── slice.cpp
│ │ ├── slice.h
│ │ ├── softmax.cpp
│ │ ├── softmax.h
│ │ ├── split.cpp
│ │ ├── split.h
│ │ ├── spp.cpp
│ │ ├── spp.h
│ │ ├── squeeze.cpp
│ │ ├── squeeze.h
│ │ ├── tanh.cpp
│ │ ├── tanh.h
│ │ ├── threshold.cpp
│ │ ├── threshold.h
│ │ ├── tile.cpp
│ │ ├── tile.h
│ │ ├── unaryop.cpp
│ │ ├── unaryop.h
│ │ └── x86
│ │ │ ├── avx_mathfun.h
│ │ │ ├── convolution_1x1.h
│ │ │ ├── convolution_3x3.h
│ │ │ ├── convolution_5x5.h
│ │ │ ├── convolution_x86.cpp
│ │ │ ├── convolution_x86.h
│ │ │ ├── convolutiondepthwise_3x3.h
│ │ │ ├── convolutiondepthwise_x86.cpp
│ │ │ ├── convolutiondepthwise_x86.h
│ │ │ └── sse_mathfun.h
│ ├── layer_type.h
│ ├── mat.cpp
│ ├── mat.h
│ ├── mat_pixel.cpp
│ ├── modelbin.cpp
│ ├── modelbin.h
│ ├── net.cpp
│ ├── net.h
│ ├── opencv.cpp
│ ├── opencv.h
│ ├── paramdict.cpp
│ ├── paramdict.h
│ └── platform.h.in
│ └── tools
│ ├── CMakeLists.txt
│ ├── caffe
│ ├── CMakeLists.txt
│ ├── caffe.proto
│ └── caffe2ncnn.cpp
│ ├── mxnet
│ ├── CMakeLists.txt
│ └── mxnet2ncnn.cpp
│ ├── ncnn2mem.cpp
│ ├── onnx
│ ├── CMakeLists.txt
│ ├── onnx.proto
│ └── onnx2ncnn.cpp
│ ├── pytorch
│ └── readme.txt
│ └── tensorflow
│ ├── CMakeLists.txt
│ ├── attr_value.proto
│ ├── function.proto
│ ├── graph.proto
│ ├── node_def.proto
│ ├── op_def.proto
│ ├── resource_handle.proto
│ ├── tensor.proto
│ ├── tensor_shape.proto
│ ├── tensorflow2ncnn.cpp
│ ├── types.proto
│ └── versions.proto
├── CMakeLists.txt
├── MOT17-11.mp4
├── README.md
├── detect_and_track
├── CMakeLists.txt
├── README.md
├── det_and_track.cpp
├── det_and_track.h
├── lk_tracker.cpp
├── lk_tracker.h
├── munkres.cpp
└── munkres.h
├── main.cpp
└── pedestrian_detection
├── CMakeLists.txt
├── object_detection.cpp
├── object_detection.h
├── ssdperson10695.bin
├── ssdperson10695.param
└── test.cpp
/.gitignore:
--------------------------------------------------------------------------------
1 | build/
2 | 3rd_party/ncnn/build/
3 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/.travis.yml:
--------------------------------------------------------------------------------
1 | sudo: false
2 | dist: trusty
3 |
4 | language: cpp
5 |
6 | compiler:
7 | - g++
8 |
9 | addons:
10 | apt:
11 | sources:
12 | - ubuntu-toolchain-r-test
13 | packages:
14 | - cmake
15 |
16 | install:
17 | - wget https://github.com/google/protobuf/archive/v3.5.1.tar.gz
18 | - tar -xzvf v3.5.1.tar.gz
19 | - pushd protobuf-3.5.1 && ./autogen.sh && ./configure --prefix=/usr && make -j2 && sudo make install && sudo ldconfig && popd
20 |
21 | script:
22 | - mkdir build
23 | - cd build
24 | - cmake ..
25 | - make -j2
26 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/CMakeLists.txt:
--------------------------------------------------------------------------------
1 |
2 | if(CMAKE_TOOLCHAIN_FILE)
3 | set(LIBRARY_OUTPUT_PATH_ROOT ${CMAKE_BINARY_DIR} CACHE PATH "root for library output, set this to change where android libs are compiled to")
4 | # get absolute path, but get_filename_component ABSOLUTE only refer with source dir, so find_file here :(
5 | get_filename_component(CMAKE_TOOLCHAIN_FILE_NAME ${CMAKE_TOOLCHAIN_FILE} NAME)
6 | find_file(CMAKE_TOOLCHAIN_FILE ${CMAKE_TOOLCHAIN_FILE_NAME} PATHS ${CMAKE_SOURCE_DIR} NO_DEFAULT_PATH)
7 | message(STATUS "CMAKE_TOOLCHAIN_FILE = ${CMAKE_TOOLCHAIN_FILE}")
8 | endif()
9 |
10 | if(NOT DEFINED CMAKE_INSTALL_PREFIX)
11 | set(CMAKE_INSTALL_PREFIX "${CMAKE_BINARY_DIR}/install" CACHE PATH "Installation Directory")
12 | endif()
13 | message(STATUS "CMAKE_INSTALL_PREFIX = ${CMAKE_INSTALL_PREFIX}")
14 |
15 | cmake_minimum_required(VERSION 2.8.10)
16 |
17 | if(NOT CMAKE_BUILD_TYPE)
18 | set(CMAKE_BUILD_TYPE release CACHE STRING "Choose the type of build" FORCE)
19 | endif()
20 |
21 | project(ncnn)
22 |
23 | option(NCNN_OPENMP "openmp support" ON)
24 | option(NCNN_STDIO "load model from external file" ON)
25 | option(NCNN_STRING "plain and verbose string" ON)
26 | option(NCNN_OPENCV "minimal opencv structure emulation" OFF)
27 | option(NCNN_BENCHMARK "print benchmark information for every layer" OFF)
28 |
29 | if(NCNN_OPENMP)
30 | find_package(OpenMP)
31 | if(OpenMP_CXX_FOUND OR OPENMP_FOUND)
32 | set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
33 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
34 | endif()
35 | endif()
36 |
37 | if(WIN32)
38 | add_definitions(-D_SCL_SECURE_NO_WARNINGS -D_CRT_SECURE_NO_DEPRECATE)
39 | else()
40 | add_definitions(-Wall -Wextra -Wno-unused-function)
41 |
42 | if(CMAKE_BUILD_TYPE MATCHES "(Release|RELEASE|release)")
43 | add_definitions(-fPIC)
44 | add_definitions(-Ofast)
45 |
46 | add_definitions(-ffast-math)
47 | endif()
48 | # add_definitions(-march=native)
49 |
50 | # add_definitions(-flto)
51 |
52 | add_definitions(-fvisibility=hidden -fvisibility-inlines-hidden)
53 | endif()
54 |
55 | if(ANDROID)
56 | # disable shared library on android
57 | set_property(GLOBAL PROPERTY TARGET_SUPPORTS_SHARED_LIBS FALSE)
58 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti -fno-exceptions")
59 | elseif(IOS)
60 | # disable shared library on xcode ios
61 | set_property(GLOBAL PROPERTY TARGET_SUPPORTS_SHARED_LIBS FALSE)
62 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-rtti -fno-exceptions")
63 | endif()
64 |
65 | ##############################################
66 |
67 | # add_subdirectory(examples)
68 | # add_subdirectory(benchmark)
69 | add_subdirectory(src)
70 | #add_subdirectory(tools)
71 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | CFBundleName
6 | ncnn
7 | CFBundleIdentifier
8 | com.tencent.ncnn
9 | CFBundleVersion
10 | 1.0
11 | CFBundleShortVersionString
12 | 1.0
13 | CFBundleSignature
14 | ????
15 | CFBundlePackageType
16 | FMWK
17 |
18 |
19 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/autotest/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | include(CTest)
2 | include(cmake/DownloadProject.cmake)
3 |
4 | enable_testing()
5 |
6 | # download gtest at first
7 | download_project(
8 | PROJ googletest
9 | GIT_REPOSITORY https://github.com/google/googletest.git
10 | GIT_TAG master
11 | UPDATE_DISCONNECTED 1
12 | )
13 | add_subdirectory(${googletest_SOURCE_DIR} ${googletest_BINARY_DIR})
14 |
15 | # include directory
16 | include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../src)
17 | include_directories(${CMAKE_CURRENT_BINARY_DIR}/../src)
18 |
19 | # add cpp and lib files
20 | add_executable(autotest autotest.cpp)
21 |
22 | target_link_libraries(autotest ${project_library_target_name} ${REQUIRED_LIBRARIES} gtest gmock)
23 | target_link_libraries(autotest ncnn)
24 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/autotest/autotest.cpp:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | #include "blob.h"
4 | #include "net.h"
5 | #include "layer.h"
6 | #include "mat.h"
7 | #include "opencv.h"
8 | #include "platform.h"
9 |
10 | #include "test_convolution.h"
11 | #include "test_innerproduct.h"
12 | #include "gtest/gtest.h"
13 |
14 | int main(int argc, char **argv){
15 | ::testing::InitGoogleTest(&argc, argv);
16 | return RUN_ALL_TESTS();
17 | }
18 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/autotest/cmake/DownloadProject.CMakeLists.cmake.in:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8.2)
2 |
3 | project(${DL_ARGS_PROJ}-download NONE)
4 |
5 | include(ExternalProject)
6 | ExternalProject_Add(${DL_ARGS_PROJ}-download
7 | ${DL_ARGS_UNPARSED_ARGUMENTS}
8 | SOURCE_DIR "${DL_ARGS_SOURCE_DIR}"
9 | BINARY_DIR "${DL_ARGS_BINARY_DIR}"
10 | CONFIGURE_COMMAND ""
11 | BUILD_COMMAND ""
12 | INSTALL_COMMAND ""
13 | TEST_COMMAND ""
14 | )
15 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/autotest/test_convolution.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include "gtest/gtest.h"
3 | #include "layer/convolution.h"
4 | using namespace ncnn;
5 |
6 | /*
7 | forward - pass:
8 | [0,1,2,3,4,
9 | 1,2,3,4,5, [1,1,1, [ 9.5, 18.5,
10 | 2,3,4,5,6, * 0.5* 1,1,1, + 0.5 =
11 | 3,4,5,6,7, 1,1,1] 18.5, 27.5]
12 | 4,5,6,7,8]
13 | */
14 |
15 | TEST(convolution, forward)
16 | {
17 | // layer params
18 | Convolution convolution_layer;
19 | convolution_layer.num_output = 1;
20 | convolution_layer.kernel_size = 3;
21 | convolution_layer.dilation = 1;
22 | convolution_layer.stride = 2;
23 | convolution_layer.pad = 0;
24 | convolution_layer.bias_term = 1;
25 | convolution_layer.weight_data_size = 9;
26 |
27 | // input & output
28 | float_t in[] = {
29 | 0.0f, 1.0f, 2.0f, 3.0f, 4.0f,
30 | 1.0f, 2.0f, 3.0f, 4.0f, 5.0f,
31 | 2.0f, 3.0f, 4.0f, 5.0f, 6.0f,
32 | 3.0f, 4.0f, 5.0f, 6.0f, 7.0f,
33 | 4.0f, 5.0f, 6.0f, 7.0f, 8.0f
34 | };
35 |
36 | float_t expected_out[] = {
37 | 9.5f, 18.5f,
38 | 18.5f, 27.5f
39 | };
40 |
41 |
42 | // weights & bias
43 | float_t w[] = {
44 | 0.5f, 0.5f, 0.5f,
45 | 0.5f, 0.5f, 0.5f,
46 | 0.5f, 0.5f, 0.5f
47 | };
48 |
49 | float_t b[] = {
50 | 0.5f
51 | };
52 |
53 | // forward
54 | Mat mat_in(5, 5, 1, in);
55 | Mat mat_out;
56 |
57 | convolution_layer.bias_data.data = b;
58 | convolution_layer.weight_data.data = w;
59 | convolution_layer.forward(mat_in, mat_out);
60 |
61 | // check expect
62 | EXPECT_EQ(mat_out.w, 2);
63 | EXPECT_EQ(mat_out.h, 2);
64 | EXPECT_EQ(mat_out.c, 1);
65 | for (int i = 0; i < _countof(expected_out); ++i)
66 | {
67 | EXPECT_NEAR(mat_out[i], expected_out[i], 1E-5);
68 | }
69 |
70 | }
71 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/autotest/test_innerproduct.h:
--------------------------------------------------------------------------------
1 | #pragma once
2 | #include "gtest/gtest.h"
3 | #include "layer/innerproduct.h"
4 |
5 | /*
6 | forward - pass:
7 | [0,1,2,3] * [1,1,1,1 + [0.5, = [6.5,
8 | 1,1,1,1] 0.5] 6.5]
9 | */
10 |
11 | TEST(innerproduct, forward)
12 | {
13 | // layer params
14 | InnerProduct inner_product_layer;
15 | inner_product_layer.num_output = 2; // W
16 | inner_product_layer.bias_term = 1; // bias
17 | inner_product_layer.weight_data_size = 3; // W + bias
18 |
19 |
20 | // input & output
21 | float_t in[] = {
22 | 0.0f, 1.0f, 2.0f, 3.0f
23 | };
24 |
25 | float_t expected_out[] = {
26 | 6.5, 6.5 /// 0+1+2+3+0.5
27 | };
28 |
29 |
30 | // weights & bias
31 | float_t w[] = {
32 | 1.0f, 1.0f, 1.0f, 1.0f,
33 | 1.0f, 1.0f, 1.0f, 1.0f
34 | };
35 |
36 | float_t b[] = {
37 | 0.5f, 0.5f
38 | };
39 |
40 | // forward
41 | Mat mat_in(4, in);
42 | Mat mat_out;
43 |
44 | inner_product_layer.bias_data.data = b;
45 | inner_product_layer.weight_data.data = w;
46 | inner_product_layer.forward(mat_in, mat_out);
47 |
48 | // check expect
49 | EXPECT_EQ(mat_out.c, 2);
50 | for (int i = 0; i < _countof(expected_out); ++i)
51 | {
52 | float output_value = *(mat_out.data + mat_out.cstep * i);
53 | EXPECT_NEAR(output_value, expected_out[i], 1E-5);
54 | }
55 | }
56 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/benchmark/CMakeLists.txt:
--------------------------------------------------------------------------------
1 |
2 | include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../src)
3 | include_directories(${CMAKE_CURRENT_BINARY_DIR}/../src)
4 |
5 | add_executable(benchncnn benchncnn.cpp)
6 | set_property(TARGET benchncnn PROPERTY COMPILE_FLAGS "-fpie")
7 | set_property(TARGET benchncnn PROPERTY LINK_FLAGS "-pie")
8 | target_link_libraries(benchncnn ncnn)
9 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/benchmark/alexnet.param:
--------------------------------------------------------------------------------
1 | 7767517
2 | 24 24
3 | Input data 0 1 data 0=227 1=227 2=3
4 | Convolution conv1 1 1 data conv1 0=96 1=11 2=1 3=4 4=0 5=1 6=34848
5 | ReLU relu1 1 1 conv1 conv1_relu1
6 | LRN norm1 1 1 conv1_relu1 norm1 0=0 1=5 2=0.000100 3=0.750000
7 | Pooling pool1 1 1 norm1 pool1 0=0 1=3 2=2 3=0 4=0
8 | ConvolutionDepthWise conv2 1 1 pool1 conv2 0=256 1=5 2=1 3=1 4=2 5=1 6=307200 7=2
9 | ReLU relu2 1 1 conv2 conv2_relu2
10 | LRN norm2 1 1 conv2_relu2 norm2 0=0 1=5 2=0.000100 3=0.750000
11 | Pooling pool2 1 1 norm2 pool2 0=0 1=3 2=2 3=0 4=0
12 | Convolution conv3 1 1 pool2 conv3 0=384 1=3 2=1 3=1 4=1 5=1 6=884736
13 | ReLU relu3 1 1 conv3 conv3_relu3
14 | ConvolutionDepthWise conv4 1 1 conv3_relu3 conv4 0=384 1=3 2=1 3=1 4=1 5=1 6=663552 7=2
15 | ReLU relu4 1 1 conv4 conv4_relu4
16 | ConvolutionDepthWise conv5 1 1 conv4_relu4 conv5 0=256 1=3 2=1 3=1 4=1 5=1 6=442368 7=2
17 | ReLU relu5 1 1 conv5 conv5_relu5
18 | Pooling pool5 1 1 conv5_relu5 pool5 0=0 1=3 2=2 3=0 4=0
19 | InnerProduct fc6 1 1 pool5 fc6 0=4096 1=1 2=37748736
20 | ReLU relu6 1 1 fc6 fc6_relu6
21 | Dropout drop6 1 1 fc6_relu6 fc6_drop6
22 | InnerProduct fc7 1 1 fc6_drop6 fc7 0=4096 1=1 2=16777216
23 | ReLU relu7 1 1 fc7 fc7_relu7
24 | Dropout drop7 1 1 fc7_relu7 fc7_drop7
25 | InnerProduct fc8 1 1 fc7_drop7 fc8 0=1000 1=1 2=4096000
26 | Softmax prob 1 1 fc8 prob 0=0
27 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/build.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ##### android armv7
4 | mkdir -p build-android-armv7
5 | pushd build-android-armv7
6 | cmake -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI="armeabi-v7a" -DANDROID_ARM_NEON=ON -DANDROID_PLATFORM=android-14 ..
7 | make
8 | make install
9 | popd
10 |
11 | ##### android aarch64
12 | mkdir -p build-android-aarch64
13 | pushd build-android-aarch64
14 | cmake -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI="arm64-v8a" -DANDROID_PLATFORM=android-21 ..
15 | make
16 | make install
17 | popd
18 |
19 | ##### android armv7 without neon
20 | mkdir -p build-android-armv7-without-neon
21 | pushd build-android-armv7-without-neon
22 | cmake -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI="armeabi-v7a" -DANDROID_PLATFORM=android-14 ..
23 | make
24 | make install
25 | popd
26 |
27 | ##### ios armv7 arm64
28 | mkdir -p build-ios
29 | pushd build-ios
30 | cmake -DCMAKE_TOOLCHAIN_FILE=../iosxc.toolchain.cmake ..
31 | make
32 | make install
33 | popd
34 |
35 | ##### ios simulator i386 x86_64
36 | mkdir -p build-ios-sim
37 | pushd build-ios-sim
38 | cmake -DCMAKE_TOOLCHAIN_FILE=../iossimxc.toolchain.cmake ..
39 | make
40 | make install
41 | popd
42 |
43 | ##### MacOS
44 | mkdir -p build-mac
45 | pushd build-mac
46 | cmake -DNCNN_OPENMP=OFF \
47 | -DNCNN_OPENCV=ON \
48 | -DNCNN_BENCHMARK=ON \
49 | ..
50 | make -j 8
51 | make install
52 | popd
53 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/images/128-ncnn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zlingkang/pedestrian_detection_and_tracking/b8c5c2508c745bf0f4bb148ae291d0a8b8b10eac/3rd_party/ncnn/images/128-ncnn.png
--------------------------------------------------------------------------------
/3rd_party/ncnn/images/16-ncnn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zlingkang/pedestrian_detection_and_tracking/b8c5c2508c745bf0f4bb148ae291d0a8b8b10eac/3rd_party/ncnn/images/16-ncnn.png
--------------------------------------------------------------------------------
/3rd_party/ncnn/images/256-ncnn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zlingkang/pedestrian_detection_and_tracking/b8c5c2508c745bf0f4bb148ae291d0a8b8b10eac/3rd_party/ncnn/images/256-ncnn.png
--------------------------------------------------------------------------------
/3rd_party/ncnn/images/32-ncnn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zlingkang/pedestrian_detection_and_tracking/b8c5c2508c745bf0f4bb148ae291d0a8b8b10eac/3rd_party/ncnn/images/32-ncnn.png
--------------------------------------------------------------------------------
/3rd_party/ncnn/images/64-ncnn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zlingkang/pedestrian_detection_and_tracking/b8c5c2508c745bf0f4bb148ae291d0a8b8b10eac/3rd_party/ncnn/images/64-ncnn.png
--------------------------------------------------------------------------------
/3rd_party/ncnn/iossimxc.toolchain.cmake:
--------------------------------------------------------------------------------
1 | # Standard settings
2 | # set(UNIX True)
3 | # set(Darwin True)
4 | # set(IOS True)
5 | set (CMAKE_SYSTEM_NAME Darwin)
6 | set (CMAKE_SYSTEM_VERSION 1)
7 | set (UNIX True)
8 | set (APPLE True)
9 | set (IOS True)
10 |
11 | # suppress -rdynamic
12 | # set(CMAKE_SYSTEM_NAME Generic)
13 |
14 | set(CMAKE_C_COMPILER i386-apple-darwin11-clang)
15 | set(CMAKE_CXX_COMPILER i386-apple-darwin11-clang++)
16 |
17 | set(_CMAKE_TOOLCHAIN_PREFIX i386-apple-darwin11-)
18 |
19 | set(CMAKE_IOS_SDK_ROOT "/home/nihui/osd/cctools-port/usage_examples/ios_toolchain/target-sim/SDK/")
20 |
21 | # Set the sysroot default to the most recent SDK
22 | set(CMAKE_OSX_SYSROOT ${CMAKE_IOS_SDK_ROOT} CACHE PATH "Sysroot used for iOS Simulator support")
23 |
24 | # set the architecture for iOS
25 | # set(IOS_ARCH i386)
26 | # set(IOS_ARCH x86_64)
27 | set(IOS_ARCH i386;x86_64)
28 |
29 | set(CMAKE_OSX_ARCHITECTURES ${IOS_ARCH} CACHE string "Build architecture for iOS Simulator")
30 |
31 | # Set the find root to the iOS developer roots and to user defined paths
32 | set(CMAKE_FIND_ROOT_PATH ${CMAKE_IOS_DEVELOPER_ROOT} ${CMAKE_IOS_SDK_ROOT} ${CMAKE_PREFIX_PATH} CACHE string "iOS Simulator find search path root")
33 |
34 | # searching for frameworks only
35 | set(CMAKE_FIND_FRAMEWORK FIRST)
36 |
37 | # set up the default search directories for frameworks
38 | set(CMAKE_SYSTEM_FRAMEWORK_PATH
39 | ${CMAKE_IOS_SDK_ROOT}/System/Library/Frameworks
40 | )
41 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/iosxc.toolchain.cmake:
--------------------------------------------------------------------------------
1 | # Standard settings
2 | # set(UNIX True)
3 | # set(Darwin True)
4 | # set(IOS True)
5 | set (CMAKE_SYSTEM_NAME Darwin)
6 | set (CMAKE_SYSTEM_VERSION 1)
7 | set (UNIX True)
8 | set (APPLE True)
9 | set (IOS True)
10 |
11 | # suppress -rdynamic
12 | # set(CMAKE_SYSTEM_NAME Generic)
13 |
14 | set(CMAKE_C_COMPILER arm-apple-darwin11-clang)
15 | set(CMAKE_CXX_COMPILER arm-apple-darwin11-clang++)
16 |
17 | set(_CMAKE_TOOLCHAIN_PREFIX arm-apple-darwin11-)
18 |
19 | set(CMAKE_IOS_SDK_ROOT "/home/nihui/osd/cctools-port/usage_examples/ios_toolchain/target/SDK/")
20 |
21 | # Set the sysroot default to the most recent SDK
22 | set(CMAKE_OSX_SYSROOT ${CMAKE_IOS_SDK_ROOT} CACHE PATH "Sysroot used for iOS support")
23 |
24 | # set the architecture for iOS
25 | # set(IOS_ARCH arm64)
26 | set(IOS_ARCH armv7;arm64)
27 |
28 | set(CMAKE_OSX_ARCHITECTURES ${IOS_ARCH} CACHE string "Build architecture for iOS")
29 |
30 | # Set the find root to the iOS developer roots and to user defined paths
31 | set(CMAKE_FIND_ROOT_PATH ${CMAKE_IOS_DEVELOPER_ROOT} ${CMAKE_IOS_SDK_ROOT} ${CMAKE_PREFIX_PATH} CACHE string "iOS find search path root")
32 |
33 | # searching for frameworks only
34 | set(CMAKE_FIND_FRAMEWORK FIRST)
35 |
36 | # set up the default search directories for frameworks
37 | set(CMAKE_SYSTEM_FRAMEWORK_PATH
38 | ${CMAKE_IOS_SDK_ROOT}/System/Library/Frameworks
39 | )
40 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/package.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 |
3 | NAME=ncnn
4 |
5 | ##### package android lib
6 | ANDROIDPKGNAME=${NAME}-android-lib
7 | rm -rf $ANDROIDPKGNAME
8 | mkdir -p $ANDROIDPKGNAME
9 | mkdir -p $ANDROIDPKGNAME/armeabi-v7a
10 | mkdir -p $ANDROIDPKGNAME/arm64-v8a
11 | mkdir -p $ANDROIDPKGNAME/include
12 | cp build-android-armv7/install/lib/lib${NAME}.a $ANDROIDPKGNAME/armeabi-v7a/
13 | cp build-android-aarch64/install/lib/lib${NAME}.a $ANDROIDPKGNAME/arm64-v8a/
14 | cp build-android-aarch64/install/include/* $ANDROIDPKGNAME/include/
15 | rm -f $ANDROIDPKGNAME.zip
16 | zip -9 -r $ANDROIDPKGNAME.zip $ANDROIDPKGNAME
17 |
18 | ##### package ios framework
19 | IOSPKGNAME=${NAME}.framework
20 | rm -rf $IOSPKGNAME
21 | mkdir -p $IOSPKGNAME/Versions/A/Headers
22 | mkdir -p $IOSPKGNAME/Versions/A/Resources
23 | ln -s A $IOSPKGNAME/Versions/Current
24 | ln -s Versions/Current/Headers $IOSPKGNAME/Headers
25 | ln -s Versions/Current/Resources $IOSPKGNAME/Resources
26 | ln -s Versions/Current/${NAME} $IOSPKGNAME/${NAME}
27 | lipo -create \
28 | build-ios/install/lib/lib${NAME}.a \
29 | build-ios-sim/install/lib/lib${NAME}.a \
30 | -o $IOSPKGNAME/Versions/A/${NAME}
31 | cp -r build-ios/install/include/* $IOSPKGNAME/Versions/A/Headers/
32 | cp Info.plist ${IOSPKGNAME}/Versions/A/Resources/
33 | rm -f $IOSPKGNAME.zip
34 | zip -9 -y -r $IOSPKGNAME.zip $IOSPKGNAME
35 |
36 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/benchmark.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef NCNN_BENCHMARK_H
16 | #define NCNN_BENCHMARK_H
17 |
18 | #include "platform.h"
19 | #include "mat.h"
20 | #include "layer.h"
21 |
22 | namespace ncnn {
23 |
24 | // get now timestamp in ms
25 | double get_current_time();
26 |
27 | #if NCNN_BENCHMARK
28 |
29 | void benchmark(const Layer* layer, double start, double end);
30 | void benchmark(const Layer* layer, const Mat& bottom_blob, Mat& top_blob, double start, double end);
31 |
32 | #endif // NCNN_BENCHMARK
33 |
34 | } // namespace ncnn
35 |
36 | #endif // NCNN_BENCHMARK_H
37 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/blob.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "blob.h"
16 |
17 | namespace ncnn {
18 |
19 | Blob::Blob()
20 | {
21 | producer = -1;
22 | }
23 |
24 | } // namespace ncnn
25 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/blob.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef NCNN_BLOB_H
16 | #define NCNN_BLOB_H
17 |
18 | #include
19 | #include
20 | #include "platform.h"
21 |
22 | namespace ncnn {
23 |
24 | class Blob
25 | {
26 | public:
27 | // empty
28 | Blob();
29 |
30 | public:
31 | #if NCNN_STRING
32 | // blob name
33 | std::string name;
34 | #endif // NCNN_STRING
35 | // layer index which produce this blob as output
36 | int producer;
37 | // layer index which need this blob as input
38 | std::vector consumers;
39 | };
40 |
41 | } // namespace ncnn
42 |
43 | #endif // NCNN_BLOB_H
44 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/cpu.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef NCNN_CPU_H
16 | #define NCNN_CPU_H
17 |
18 | namespace ncnn {
19 |
20 | // test optional cpu features
21 | // neon = armv7 neon or aarch64 asimd
22 | int cpu_support_arm_neon();
23 | // vfpv4 = armv7 fp16 + fma
24 | int cpu_support_arm_vfpv4();
25 | // asimdhp = aarch64 asimd half precision
26 | int cpu_support_arm_asimdhp();
27 |
28 | // cpu info
29 | int get_cpu_count();
30 |
31 | // bind all threads on little clusters if powersave enabled
32 | // affacts HMP arch cpu like ARM big.LITTLE
33 | // only implemented on android at the moment
34 | // switching powersave is expensive and not thread-safe
35 | // 0 = all cores enabled(default)
36 | // 1 = only little clusters enabled
37 | // 2 = only big clusters enabled
38 | // return 0 if success for setter function
39 | int get_cpu_powersave();
40 | int set_cpu_powersave(int powersave);
41 |
42 | // misc function wrapper for openmp routines
43 | int get_omp_num_threads();
44 | void set_omp_num_threads(int num_threads);
45 |
46 | int get_omp_dynamic();
47 | void set_omp_dynamic(int dynamic);
48 |
49 | } // namespace ncnn
50 |
51 | #endif // NCNN_CPU_H
52 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/absval.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "absval.h"
16 |
17 | namespace ncnn {
18 |
19 | DEFINE_LAYER_CREATOR(AbsVal)
20 |
21 | AbsVal::AbsVal()
22 | {
23 | one_blob_only = true;
24 | support_inplace = true;
25 | }
26 |
27 | int AbsVal::forward_inplace(Mat& bottom_top_blob) const
28 | {
29 | int w = bottom_top_blob.w;
30 | int h = bottom_top_blob.h;
31 | int channels = bottom_top_blob.c;
32 | int size = w * h;
33 |
34 | #pragma omp parallel for
35 | for (int q=0; q
17 | #include
18 |
19 | namespace ncnn {
20 |
21 | DEFINE_LAYER_CREATOR(ArgMax)
22 |
23 | ArgMax::ArgMax()
24 | {
25 | one_blob_only = true;
26 | }
27 |
28 | int ArgMax::load_param(const ParamDict& pd)
29 | {
30 | out_max_val = pd.get(0, 0);
31 | topk = pd.get(1, 1);
32 |
33 | return 0;
34 | }
35 |
36 | int ArgMax::forward(const Mat& bottom_blob, Mat& top_blob) const
37 | {
38 | int size = bottom_blob.total();
39 |
40 | if (out_max_val)
41 | top_blob.create(topk, 2);
42 | else
43 | top_blob.create(topk, 1);
44 | if (top_blob.empty())
45 | return -100;
46 |
47 | const float* ptr = bottom_blob;
48 |
49 | // partial sort topk with index
50 | // optional value
51 | std::vector< std::pair > vec;
52 | vec.resize(size);
53 | for (int i=0; i >());
60 |
61 | float* outptr = top_blob;
62 | if (out_max_val)
63 | {
64 | float* valptr = outptr + topk;
65 | for (int i=0; i
19 | #endif // __ARM_NEON
20 |
21 | namespace ncnn {
22 |
23 | DEFINE_LAYER_CREATOR(Bias_arm)
24 |
25 | int Bias_arm::forward_inplace(Mat& bottom_top_blob) const
26 | {
27 | int w = bottom_top_blob.w;
28 | int h = bottom_top_blob.h;
29 | int channels = bottom_top_blob.c;
30 | int size = w * h;
31 |
32 | const float* bias_ptr = bias_data;
33 | #pragma omp parallel for
34 | for (int q=0; q> 2;
42 | int remain = size - (nn << 2);
43 | #else
44 | int remain = size;
45 | #endif // __ARM_NEON
46 |
47 | #if __ARM_NEON
48 | float32x4_t _bias = vdupq_n_f32(bias);
49 | for (; nn>0; nn--)
50 | {
51 | float32x4_t _p = vld1q_f32(ptr);
52 | float32x4_t _outp = vaddq_f32(_p, _bias);
53 | vst1q_f32(ptr, _outp);
54 |
55 | ptr += 4;
56 | }
57 | #endif // __ARM_NEON
58 |
59 | for (; remain>0; remain--)
60 | {
61 | *ptr = *ptr + bias;
62 |
63 | ptr++;
64 | }
65 | }
66 |
67 | return 0;
68 | }
69 |
70 | } // namespace ncnn
71 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/arm/bias_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_BIAS_ARM_H
16 | #define LAYER_BIAS_ARM_H
17 |
18 | #include "bias.h"
19 |
20 | namespace ncnn {
21 |
22 | class Bias_arm : public Bias
23 | {
24 | public:
25 | virtual int forward_inplace(Mat& bottom_top_blob) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_BIAS_ARM_H
31 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/arm/convolution_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_CONVOLUTION_ARM_H
16 | #define LAYER_CONVOLUTION_ARM_H
17 |
18 | #include "convolution.h"
19 |
20 | namespace ncnn {
21 |
22 | typedef void (*conv_func)(const Mat&, Mat&, const Mat&, const Mat&);
23 |
24 | class Convolution_arm : public Convolution
25 | {
26 | public:
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int load_model(const ModelBin& mb);
30 |
31 | virtual int forward(const Mat& bottom_blob, Mat& top_blob) const;
32 | virtual int forwardDilation(const Mat& bottom_blob, Mat& top_blob, conv_func conv) const;
33 |
34 | public:
35 | bool use_winograd3x3;
36 | Mat weight_3x3_winograd64_data;
37 | };
38 |
39 | } // namespace ncnn
40 |
41 | #endif // LAYER_CONVOLUTION_ARM_H
42 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/arm/convolutiondepthwise_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_CONVOLUTIONDEPTHWISE_ARM_H
16 | #define LAYER_CONVOLUTIONDEPTHWISE_ARM_H
17 |
18 | #include "convolutiondepthwise.h"
19 |
20 | namespace ncnn {
21 |
22 | class ConvolutionDepthWise_arm : public ConvolutionDepthWise
23 | {
24 | public:
25 | ConvolutionDepthWise_arm();
26 | virtual ~ConvolutionDepthWise_arm();
27 |
28 | virtual int load_model(const ModelBin& mb);
29 |
30 | virtual int forward(const Mat& bottom_blob, Mat& top_blob) const;
31 |
32 | public:
33 | std::vector group_ops;
34 | };
35 |
36 | } // namespace ncnn
37 |
38 | #endif // LAYER_CONVOLUTIONDEPTHWISE_ARM_H
39 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/arm/deconvolution_arm.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "deconvolution_arm.h"
16 |
17 | namespace ncnn {
18 |
19 | #include "deconvolution_4x4.h"
20 | #include "deconvolution_3x3.h"
21 |
22 | DEFINE_LAYER_CREATOR(Deconvolution_arm)
23 |
24 | int Deconvolution_arm::forward(const Mat& bottom_blob, Mat& top_blob) const
25 | {
26 | // deconvolv with NxN kernel
27 | // value = value + bias
28 |
29 | if (kernel_w != kernel_h || stride_w != stride_h)
30 | {
31 | return Deconvolution::forward(bottom_blob, top_blob);
32 | }
33 |
34 | const int kernel_size = kernel_w;
35 | const int stride = stride_w;
36 |
37 | if ((kernel_size != 3 && kernel_size != 4) || stride > 2 || dilation_w != 1 || dilation_h != 1)
38 | {
39 | return Deconvolution::forward(bottom_blob, top_blob);
40 | }
41 |
42 | typedef void (*deconv_func)(const Mat&, Mat&, const Mat&, const Mat&);
43 |
44 | // kernel_size x stride
45 | deconv_func deconv_func_table[2][2] =
46 | {
47 | {
48 | deconv3x3s1_neon,
49 | deconv3x3s2_neon
50 | }, // kernel_size = 3
51 | {
52 | deconv4x4s1_neon,
53 | deconv4x4s2_neon
54 | } // kernel_size = 4
55 | };
56 |
57 | deconv_func deconv = deconv_func_table[kernel_size-3][stride-1];
58 | if (!deconv)
59 | {
60 | return Deconvolution::forward(bottom_blob, top_blob);
61 | }
62 |
63 | int w = bottom_blob.w;
64 | int h = bottom_blob.h;
65 |
66 | int outw = (w - 1) * stride + kernel_size;
67 | int outh = (h - 1) * stride + kernel_size;
68 |
69 | Mat top_blob_bordered = top_blob;
70 | top_blob_bordered.create(outw, outh, num_output);
71 | if (top_blob_bordered.empty())
72 | return -100;
73 |
74 | deconv(bottom_blob, top_blob_bordered, weight_data, bias_data);
75 |
76 | top_blob = top_blob_bordered;
77 |
78 | if (pad_w > 0 || pad_h > 0)
79 | {
80 | copy_cut_border(top_blob_bordered, top_blob, pad_h, pad_h, pad_w, pad_w);
81 | if (top_blob.empty())
82 | return -100;
83 |
84 | outw = top_blob.w;
85 | outh = top_blob.h;
86 | }
87 |
88 | return 0;
89 | }
90 |
91 | } // namespace ncnn
92 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/arm/deconvolution_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_DECONVOLUTION_ARM_H
16 | #define LAYER_DECONVOLUTION_ARM_H
17 |
18 | #include "deconvolution.h"
19 |
20 | namespace ncnn {
21 |
22 | class Deconvolution_arm : public Deconvolution
23 | {
24 | public:
25 | virtual int forward(const Mat& bottom_blob, Mat& top_blob) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_DECONVOLUTION_ARM_H
31 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/arm/deconvolutiondepthwise_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_DECONVOLUTIONDEPTHWISE_ARM_H
16 | #define LAYER_DECONVOLUTIONDEPTHWISE_ARM_H
17 |
18 | #include "deconvolutiondepthwise.h"
19 |
20 | namespace ncnn {
21 |
22 | class DeconvolutionDepthWise_arm : public DeconvolutionDepthWise
23 | {
24 | public:
25 | virtual int forward(const Mat& bottom_blob, Mat& top_blob) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_DECONVOLUTIONDEPTHWISE_ARM_H
31 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/arm/eltwise_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_ELTWISE_ARM_H
16 | #define LAYER_ELTWISE_ARM_H
17 |
18 | #include "eltwise.h"
19 |
20 | namespace ncnn {
21 |
22 | class Eltwise_arm : public Eltwise
23 | {
24 | public:
25 | virtual int forward(const std::vector& bottom_blobs, std::vector& top_blobs) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_ELTWISE_ARM_H
31 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/arm/innerproduct_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_INNERPRODUCT_ARM_H
16 | #define LAYER_INNERPRODUCT_ARM_H
17 |
18 | #include "innerproduct.h"
19 |
20 | namespace ncnn {
21 |
22 | class InnerProduct_arm : public InnerProduct
23 | {
24 | public:
25 | virtual int forward(const Mat& bottom_blob, Mat& top_blob) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_INNERPRODUCT_ARM_H
31 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/arm/lrn_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_LRN_ARM_H
16 | #define LAYER_LRN_ARM_H
17 |
18 | #include "lrn.h"
19 |
20 | namespace ncnn {
21 |
22 | class LRN_arm : public LRN
23 | {
24 | public:
25 | virtual int forward_inplace(Mat& bottom_top_blob) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_LRN_ARM_H
31 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/arm/pooling_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_POOLING_ARM_H
16 | #define LAYER_POOLING_ARM_H
17 |
18 | #include "pooling.h"
19 |
20 | namespace ncnn {
21 |
22 | class Pooling_arm : public Pooling
23 | {
24 | public:
25 | virtual int forward(const Mat& bottom_blob, Mat& top_blob) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_POOLING_ARM_H
31 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/arm/prelu_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_PRELU_ARM_H
16 | #define LAYER_PRELU_ARM_H
17 |
18 | #include "prelu.h"
19 |
20 | namespace ncnn {
21 |
22 | class PReLU_arm : public PReLU
23 | {
24 | public:
25 | virtual int forward_inplace(Mat& bottom_top_blob) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_PRELU_ARM_H
31 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/arm/relu_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_RELU_ARM_H
16 | #define LAYER_RELU_ARM_H
17 |
18 | #include "relu.h"
19 |
20 | namespace ncnn {
21 |
22 | class ReLU_arm : public ReLU
23 | {
24 | public:
25 | virtual int forward_inplace(Mat& bottom_top_blob) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_RELU_ARM_H
31 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/arm/scale_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_SCALE_ARM_H
16 | #define LAYER_SCALE_ARM_H
17 |
18 | #include "scale.h"
19 |
20 | namespace ncnn {
21 |
22 | class Scale_arm : public Scale
23 | {
24 | public:
25 | virtual int forward_inplace(Mat& bottom_top_blob) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_SCALE_ARM_H
31 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/arm/sigmoid_arm.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "sigmoid_arm.h"
16 |
17 | #if __ARM_NEON
18 | #include
19 | #include "neon_mathfun.h"
20 | #endif // __ARM_NEON
21 |
22 | #include
23 |
24 | namespace ncnn {
25 |
26 | DEFINE_LAYER_CREATOR(Sigmoid_arm)
27 |
28 | int Sigmoid_arm::forward_inplace(Mat& bottom_top_blob) const
29 | {
30 | int w = bottom_top_blob.w;
31 | int h = bottom_top_blob.h;
32 | int channels = bottom_top_blob.c;
33 | int size = w * h;
34 |
35 | #pragma omp parallel for
36 | for (int q=0; q> 2;
42 | int remain = size - (nn << 2);
43 | #else
44 | int remain = size;
45 | #endif // __ARM_NEON
46 |
47 | #if __ARM_NEON
48 | float32x4_t _one = vdupq_n_f32(1.f);
49 | for (; nn>0; nn--)
50 | {
51 | float32x4_t _p = vld1q_f32(ptr);
52 | _p = vnegq_f32(_p);
53 | _p = exp_ps(_p);
54 | _p = vaddq_f32(_p, _one);
55 | float32x4_t _outp = vrecpeq_f32(_p);
56 | _outp = vmulq_f32(vrecpsq_f32(_p, _outp), _outp);
57 | // _outp = vmulq_f32(vrecpsq_f32(_p, _outp), _outp);
58 | vst1q_f32(ptr, _outp);
59 |
60 | ptr += 4;
61 | }
62 | #endif // __ARM_NEON
63 | for (; remain>0; remain--)
64 | {
65 | *ptr = 1.f / (1.f + exp(-*ptr));
66 |
67 | ptr++;
68 | }
69 | }
70 |
71 | return 0;
72 | }
73 |
74 | } // namespace ncnn
75 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/arm/sigmoid_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_SIGMOID_ARM_H
16 | #define LAYER_SIGMOID_ARM_H
17 |
18 | #include "sigmoid.h"
19 |
20 | namespace ncnn {
21 |
22 | class Sigmoid_arm : public Sigmoid
23 | {
24 | public:
25 | virtual int forward_inplace(Mat& bottom_top_blob) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_SIGMOID_ARM_H
31 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/arm/softmax_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_SOFTMAX_ARM_H
16 | #define LAYER_SOFTMAX_ARM_H
17 |
18 | #include "softmax.h"
19 |
20 | namespace ncnn {
21 |
22 | class Softmax_arm : public Softmax
23 | {
24 | public:
25 | virtual int forward_inplace(Mat& bottom_top_blob) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_SOFTMAX_ARM_H
31 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/batchnorm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_BATCHNORM_H
16 | #define LAYER_BATCHNORM_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class BatchNorm : public Layer
23 | {
24 | public:
25 | BatchNorm();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int load_model(const ModelBin& mb);
30 |
31 | virtual int forward_inplace(Mat& bottom_top_blob) const;
32 |
33 | public:
34 | // param
35 | int channels;
36 | float eps;
37 |
38 | // model
39 | Mat slope_data;
40 | Mat mean_data;
41 | Mat var_data;
42 | Mat bias_data;
43 |
44 | Mat a_data;
45 | Mat b_data;
46 | };
47 |
48 | } // namespace ncnn
49 |
50 | #endif // LAYER_BATCHNORM_H
51 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/bias.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "bias.h"
16 |
17 | namespace ncnn {
18 |
19 | DEFINE_LAYER_CREATOR(Bias)
20 |
21 | Bias::Bias()
22 | {
23 | one_blob_only = true;
24 | support_inplace = true;
25 | }
26 |
27 | int Bias::load_param(const ParamDict& pd)
28 | {
29 | bias_data_size = pd.get(0, 0);
30 |
31 | return 0;
32 | }
33 |
34 | int Bias::load_model(const ModelBin& mb)
35 | {
36 | bias_data = mb.load(bias_data_size, 1);
37 | if (bias_data.empty())
38 | return -100;
39 |
40 | return 0;
41 | }
42 |
43 | int Bias::forward_inplace(Mat& bottom_top_blob) const
44 | {
45 | int w = bottom_top_blob.w;
46 | int h = bottom_top_blob.h;
47 | int channels = bottom_top_blob.c;
48 | int size = w * h;
49 |
50 | #pragma omp parallel for
51 | for (int q=0; q& bottom_blobs, std::vector& top_blobs) const;
30 |
31 | virtual int forward_inplace(Mat& bottom_top_blob) const;
32 |
33 | enum {
34 | Operation_ADD = 0,
35 | Operation_SUB = 1,
36 | Operation_MUL = 2,
37 | Operation_DIV = 3,
38 | Operation_MAX = 4,
39 | Operation_MIN = 5,
40 | Operation_POW = 6,
41 | Operation_RSUB = 7,
42 | Operation_RDIV = 8
43 | };
44 |
45 | public:
46 | // param
47 | int op_type;
48 | int with_scalar;
49 | float b;
50 | };
51 |
52 | } // namespace ncnn
53 |
54 | #endif // LAYER_BINARYOP_H
55 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/bnll.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "bnll.h"
16 | #include
17 |
18 | namespace ncnn {
19 |
20 | DEFINE_LAYER_CREATOR(BNLL)
21 |
22 | BNLL::BNLL()
23 | {
24 | one_blob_only = true;
25 | support_inplace = true;
26 | }
27 |
28 | int BNLL::forward_inplace(Mat& bottom_top_blob) const
29 | {
30 | int w = bottom_top_blob.w;
31 | int h = bottom_top_blob.h;
32 | int channels = bottom_top_blob.c;
33 | int size = w * h;
34 |
35 | #pragma omp parallel for
36 | for (int q=0; q 0)
43 | ptr[i] = ptr[i] + log(1.f + exp(-ptr[i]));
44 | else
45 | ptr[i] = log(1.f + exp(ptr[i]));
46 | }
47 | }
48 |
49 | return 0;
50 | }
51 |
52 | } // namespace ncnn
53 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/bnll.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_BNLL_H
16 | #define LAYER_BNLL_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class BNLL : public Layer
23 | {
24 | public:
25 | BNLL();
26 |
27 | virtual int forward_inplace(Mat& bottom_top_blob) const;
28 |
29 | public:
30 | };
31 |
32 | } // namespace ncnn
33 |
34 | #endif // LAYER_BNLL_H
35 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/clip.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "clip.h"
16 |
17 | #include
18 |
19 | namespace ncnn {
20 |
21 | DEFINE_LAYER_CREATOR(Clip)
22 |
23 | Clip::Clip()
24 | {
25 | one_blob_only = true;
26 | support_inplace = true;
27 | }
28 |
29 | int Clip::load_param(const ParamDict& pd)
30 | {
31 | min = pd.get(0, -FLT_MAX);
32 | max = pd.get(1, FLT_MAX);
33 |
34 | return 0;
35 | }
36 |
37 | int Clip::forward_inplace(Mat& bottom_top_blob) const
38 | {
39 | int w = bottom_top_blob.w;
40 | int h = bottom_top_blob.h;
41 | int channels = bottom_top_blob.c;
42 | int size = w * h;
43 |
44 | #pragma omp parallel for
45 | for (int q=0; q max)
54 | ptr[i] = max;
55 | }
56 | }
57 |
58 | return 0;
59 | }
60 |
61 | } // namespace ncnn
62 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/clip.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_CLIP_H
16 | #define LAYER_CLIP_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Clip : public Layer
23 | {
24 | public:
25 | Clip();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward_inplace(Mat& bottom_top_blob) const;
30 |
31 | public:
32 | float min;
33 | float max;
34 | };
35 |
36 | } // namespace ncnn
37 |
38 | #endif // LAYER_CLIP_H
39 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/concat.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_CONCAT_H
16 | #define LAYER_CONCAT_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Concat : public Layer
23 | {
24 | public:
25 | Concat();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const std::vector& bottom_blobs, std::vector& top_blobs) const;
30 |
31 | public:
32 | int axis;
33 | };
34 |
35 | } // namespace ncnn
36 |
37 | #endif // LAYER_CONCAT_H
38 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/convolution.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_CONVOLUTION_H
16 | #define LAYER_CONVOLUTION_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Convolution : public Layer
23 | {
24 | public:
25 | Convolution();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int load_model(const ModelBin& mb);
30 |
31 | virtual int forward(const Mat& bottom_blob, Mat& top_blob) const;
32 |
33 | public:
34 | // param
35 | int num_output;
36 | int kernel_w;
37 | int kernel_h;
38 | int dilation_w;
39 | int dilation_h;
40 | int stride_w;
41 | int stride_h;
42 | int pad_w;
43 | int pad_h;
44 | int bias_term;
45 |
46 | int weight_data_size;
47 |
48 | // model
49 | Mat weight_data;
50 | Mat bias_data;
51 | };
52 |
53 | } // namespace ncnn
54 |
55 | #endif // LAYER_CONVOLUTION_H
56 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/convolutiondepthwise.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_CONVOLUTIONDEPTHWISE_H
16 | #define LAYER_CONVOLUTIONDEPTHWISE_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class ConvolutionDepthWise : public Layer
23 | {
24 | public:
25 | ConvolutionDepthWise();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int load_model(const ModelBin& mb);
30 |
31 | virtual int forward(const Mat& bottom_blob, Mat& top_blob) const;
32 |
33 | public:
34 | // param
35 | int num_output;
36 | int kernel_w;
37 | int kernel_h;
38 | int dilation_w;
39 | int dilation_h;
40 | int stride_w;
41 | int stride_h;
42 | int pad_w;
43 | int pad_h;
44 | int bias_term;
45 |
46 | int weight_data_size;
47 | int group;
48 |
49 | // model
50 | Mat weight_data;
51 | Mat bias_data;
52 | };
53 |
54 | } // namespace ncnn
55 |
56 | #endif // LAYER_CONVOLUTIONDEPTHWISE_H
57 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/crop.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "crop.h"
16 |
17 | namespace ncnn {
18 |
19 | DEFINE_LAYER_CREATOR(Crop)
20 |
21 | Crop::Crop()
22 | {
23 | }
24 |
25 | int Crop::load_param(const ParamDict& pd)
26 | {
27 | woffset = pd.get(0, 0);
28 | hoffset = pd.get(1, 0);
29 |
30 | return 0;
31 | }
32 |
33 | int Crop::forward(const std::vector& bottom_blobs, std::vector& top_blobs) const
34 | {
35 | const Mat& bottom_blob = bottom_blobs[0];
36 | const Mat& reference_blob = bottom_blobs[1];
37 |
38 | int w = bottom_blob.w;
39 | int h = bottom_blob.h;
40 |
41 | int outw = reference_blob.w;
42 | int outh = reference_blob.h;
43 |
44 | int top = hoffset;
45 | int bottom = h - outh - hoffset;
46 | int left = woffset;
47 | int right = w - outw - woffset;
48 |
49 | Mat& top_blob = top_blobs[0];
50 |
51 | copy_cut_border(bottom_blob, top_blob, top, bottom, left, right);
52 | if (top_blob.empty())
53 | return -100;
54 |
55 | return 0;
56 | }
57 |
58 | } // namespace ncnn
59 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/crop.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_CROP_H
16 | #define LAYER_CROP_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Crop : public Layer
23 | {
24 | public:
25 | Crop();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const std::vector& bottom_blobs, std::vector& top_blobs) const;
30 |
31 | public:
32 | int woffset;
33 | int hoffset;
34 | };
35 |
36 | } // namespace ncnn
37 |
38 | #endif // LAYER_CROP_H
39 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/deconvolution.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_DECONVOLUTION_H
16 | #define LAYER_DECONVOLUTION_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Deconvolution : public Layer
23 | {
24 | public:
25 | Deconvolution();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int load_model(const ModelBin& mb);
30 |
31 | virtual int forward(const Mat& bottom_blob, Mat& top_blob) const;
32 |
33 | public:
34 | // param
35 | int num_output;
36 | int kernel_w;
37 | int kernel_h;
38 | int dilation_w;
39 | int dilation_h;
40 | int stride_w;
41 | int stride_h;
42 | int pad_w;
43 | int pad_h;
44 | int bias_term;
45 |
46 | int weight_data_size;
47 |
48 | // model
49 | Mat weight_data;
50 | Mat bias_data;
51 | };
52 |
53 | } // namespace ncnn
54 |
55 | #endif // LAYER_DECONVOLUTION_H
56 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/deconvolutiondepthwise.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_DECONVOLUTIONDEPTHWISE_H
16 | #define LAYER_DECONVOLUTIONDEPTHWISE_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class DeconvolutionDepthWise : public Layer
23 | {
24 | public:
25 | DeconvolutionDepthWise();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int load_model(const ModelBin& mb);
30 |
31 | virtual int forward(const Mat& bottom_blob, Mat& top_blob) const;
32 |
33 | public:
34 | // param
35 | int num_output;
36 | int kernel_w;
37 | int kernel_h;
38 | int dilation_w;
39 | int dilation_h;
40 | int stride_w;
41 | int stride_h;
42 | int pad_w;
43 | int pad_h;
44 | int bias_term;
45 |
46 | int weight_data_size;
47 | int group;
48 |
49 | // model
50 | Mat weight_data;
51 | Mat bias_data;
52 | };
53 |
54 | } // namespace ncnn
55 |
56 | #endif // LAYER_DECONVOLUTIONDEPTHWISE_H
57 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/detectionoutput.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_DETECTIONOUTPUT_H
16 | #define LAYER_DETECTIONOUTPUT_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class DetectionOutput : public Layer
23 | {
24 | public:
25 | DetectionOutput();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const std::vector& bottom_blobs, std::vector& top_blobs) const;
30 |
31 | public:
32 | int num_class;
33 | float nms_threshold;
34 | int nms_top_k;
35 | int keep_top_k;
36 | float confidence_threshold;
37 | };
38 |
39 | } // namespace ncnn
40 |
41 | #endif // LAYER_DETECTIONOUTPUT_H
42 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/dropout.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "dropout.h"
16 |
17 | namespace ncnn {
18 |
19 | DEFINE_LAYER_CREATOR(Dropout)
20 |
21 | Dropout::Dropout()
22 | {
23 | one_blob_only = true;
24 | support_inplace = true;
25 | }
26 |
27 | int Dropout::load_param(const ParamDict& pd)
28 | {
29 | scale = pd.get(0, 1.f);
30 |
31 | return 0;
32 | }
33 |
34 | int Dropout::forward_inplace(Mat& bottom_top_blob) const
35 | {
36 | if (scale == 1.f)
37 | {
38 | return 0;
39 | }
40 |
41 | int w = bottom_top_blob.w;
42 | int h = bottom_top_blob.h;
43 | int channels = bottom_top_blob.c;
44 | int size = w * h;
45 |
46 | #pragma omp parallel for
47 | for (int q=0; q& bottom_blobs, std::vector& top_blobs) const;
30 |
31 | enum { Operation_PROD = 0, Operation_SUM = 1, Operation_MAX = 2 };
32 |
33 | public:
34 | // param
35 | int op_type;
36 | Mat coeffs;
37 | };
38 |
39 | } // namespace ncnn
40 |
41 | #endif // LAYER_ELTWISE_H
42 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/elu.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "elu.h"
16 | #include
17 |
18 | namespace ncnn {
19 |
20 | DEFINE_LAYER_CREATOR(ELU)
21 |
22 | ELU::ELU()
23 | {
24 | one_blob_only = true;
25 | support_inplace = true;
26 | }
27 |
28 | int ELU::load_param(const ParamDict& pd)
29 | {
30 | alpha = pd.get(0, 0.1f);
31 |
32 | return 0;
33 | }
34 |
35 | int ELU::forward_inplace(Mat& bottom_top_blob) const
36 | {
37 | int w = bottom_top_blob.w;
38 | int h = bottom_top_blob.h;
39 | int channels = bottom_top_blob.c;
40 | int size = w * h;
41 |
42 | #pragma omp parallel for
43 | for (int q=0; q
17 |
18 | namespace ncnn {
19 |
20 | DEFINE_LAYER_CREATOR(Embed)
21 |
22 | Embed::Embed()
23 | {
24 | one_blob_only = true;
25 | support_inplace = false;
26 | }
27 |
28 | int Embed::load_param(const ParamDict& pd)
29 | {
30 | num_output = pd.get(0, 0);
31 | input_dim = pd.get(1, 0);
32 | bias_term = pd.get(2, 0);
33 | weight_data_size = pd.get(3, 0);
34 |
35 | return 0;
36 | }
37 |
38 | int Embed::load_model(const ModelBin& mb)
39 | {
40 | weight_data = mb.load(weight_data_size, 0);
41 | if (weight_data.empty())
42 | return -100;
43 |
44 | if (bias_term)
45 | {
46 | bias_data = mb.load(num_output, 1);
47 | if (bias_data.empty())
48 | return -100;
49 | }
50 |
51 | return 0;
52 | }
53 |
54 | int Embed::forward(const Mat& bottom_blob, Mat& top_blob) const
55 | {
56 | int words = bottom_blob.total();
57 |
58 | top_blob.create(num_output, words);
59 | if (top_blob.empty())
60 | return -100;
61 |
62 | // num_output
63 | #pragma omp parallel for
64 | for (int q=0; q= input_dim)
73 | word_index = input_dim - 1;
74 |
75 | const float* em = (const float*)weight_data + num_output * word_index;
76 |
77 | memcpy(outptr, em, num_output * sizeof(float));
78 |
79 | if (bias_term)
80 | {
81 | for (int p=0; p
17 |
18 | namespace ncnn {
19 |
20 | DEFINE_LAYER_CREATOR(Exp)
21 |
22 | Exp::Exp()
23 | {
24 | one_blob_only = true;
25 | support_inplace = true;
26 | }
27 |
28 | int Exp::load_param(const ParamDict& pd)
29 | {
30 | base = pd.get(0, -1.f);
31 | scale = pd.get(1, 1.f);
32 | shift = pd.get(2, 0.f);
33 |
34 | return 0;
35 | }
36 |
37 | int Exp::forward_inplace(Mat& bottom_top_blob) const
38 | {
39 | int w = bottom_top_blob.w;
40 | int h = bottom_top_blob.h;
41 | int channels = bottom_top_blob.c;
42 | int size = w * h;
43 |
44 | if (base == -1.f)
45 | {
46 | #pragma omp parallel for
47 | for (int q=0; q
17 |
18 | namespace ncnn {
19 |
20 | DEFINE_LAYER_CREATOR(InstanceNorm)
21 |
22 | InstanceNorm::InstanceNorm()
23 | {
24 | one_blob_only = true;
25 | support_inplace = true;
26 | }
27 |
28 | int InstanceNorm::load_param(const ParamDict& pd)
29 | {
30 | channels = pd.get(0, 0);
31 | eps = pd.get(1, 0.001f);
32 |
33 | return 0;
34 | }
35 |
36 | int InstanceNorm::load_model(const ModelBin& mb)
37 | {
38 | gamma_data = mb.load(channels, 1);
39 | if (gamma_data.empty())
40 | return -100;
41 |
42 | beta_data = mb.load(channels, 1);
43 | if (beta_data.empty())
44 | return -100;
45 |
46 | return 0;
47 | }
48 |
49 | int InstanceNorm::forward_inplace(Mat& bottom_top_blob) const
50 | {
51 | // x = (x - mean) / (sqrt(var) + eps) * gamma + beta
52 |
53 | int w = bottom_top_blob.w;
54 | int h = bottom_top_blob.h;
55 | int size = w * h;
56 |
57 | #pragma omp parallel for
58 | for (int q=0; q
17 |
18 | namespace ncnn {
19 |
20 | DEFINE_LAYER_CREATOR(Log)
21 |
22 | Log::Log()
23 | {
24 | one_blob_only = true;
25 | support_inplace = true;
26 | }
27 |
28 | int Log::load_param(const ParamDict& pd)
29 | {
30 | base = pd.get(0, -1.f);
31 | scale = pd.get(1, 1.f);
32 | shift = pd.get(2, 0.f);
33 |
34 | return 0;
35 | }
36 |
37 | int Log::forward_inplace(Mat& bottom_top_blob) const
38 | {
39 | int w = bottom_top_blob.w;
40 | int h = bottom_top_blob.h;
41 | int channels = bottom_top_blob.c;
42 | int size = w * h;
43 |
44 | if (base == -1.f)
45 | {
46 | #pragma omp parallel for
47 | for (int q=0; q& bottom_blobs, std::vector& top_blobs) const;
32 |
33 | public:
34 | // param
35 | int num_output;
36 | int weight_data_size;
37 |
38 | // model
39 | Mat weight_hc_data;
40 | Mat weight_xc_data;
41 | Mat bias_c_data;
42 | };
43 |
44 | } // namespace ncnn
45 |
46 | #endif // LAYER_LSTM_H
47 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/memorydata.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "memorydata.h"
16 |
17 | namespace ncnn {
18 |
19 | DEFINE_LAYER_CREATOR(MemoryData)
20 |
21 | MemoryData::MemoryData()
22 | {
23 | one_blob_only = false;
24 | support_inplace = false;
25 | }
26 |
27 | int MemoryData::load_param(const ParamDict& pd)
28 | {
29 | w = pd.get(0, 0);
30 | h = pd.get(1, 0);
31 | c = pd.get(2, 0);
32 |
33 | return 0;
34 | }
35 |
36 | int MemoryData::load_model(const ModelBin& mb)
37 | {
38 | if (c != 0)
39 | {
40 | data = mb.load(w, h, c, 1);
41 | }
42 | else if (h != 0)
43 | {
44 | data = mb.load(w, h, 1);
45 | }
46 | else if (w != 0)
47 | {
48 | data = mb.load(w, 1);
49 | }
50 | else // 0 0 0
51 | {
52 | data.create(1);
53 | }
54 | if (data.empty())
55 | return -100;
56 |
57 | return 0;
58 | }
59 |
60 | int MemoryData::forward(const std::vector& /*bottom_blobs*/, std::vector& top_blobs) const
61 | {
62 | Mat& top_blob = top_blobs[0];
63 |
64 | top_blob = data.clone();
65 | if (top_blob.empty())
66 | return -100;
67 |
68 | return 0;
69 | }
70 |
71 | } // namespace ncnn
72 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/memorydata.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_MEMORYDATA_H
16 | #define LAYER_MEMORYDATA_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class MemoryData : public Layer
23 | {
24 | public:
25 | MemoryData();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int load_model(const ModelBin& mb);
30 |
31 | virtual int forward(const std::vector& bottom_blobs, std::vector& top_blobs) const;
32 |
33 | public:
34 | int w;
35 | int h;
36 | int c;
37 |
38 | Mat data;
39 | };
40 |
41 | } // namespace ncnn
42 |
43 | #endif // LAYER_MEMORYDATA_H
44 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/mvn.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_MVN_H
16 | #define LAYER_MVN_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class MVN : public Layer
23 | {
24 | public:
25 | MVN();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const Mat& bottom_blob, Mat& top_blob) const;
30 |
31 | public:
32 | int normalize_variance;
33 | int across_channels;
34 | float eps;
35 | };
36 |
37 | } // namespace ncnn
38 |
39 | #endif // LAYER_MVN_H
40 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/normalize.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_NORMALIZE_H
16 | #define LAYER_NORMALIZE_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Normalize : public Layer
23 | {
24 | public:
25 | Normalize();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int load_model(const ModelBin& mb);
30 |
31 | virtual int forward(const Mat& bottom_blob, Mat& top_blob) const;
32 |
33 | public:
34 | // param
35 | int across_spatial;
36 | int across_channel;
37 | int channel_shared;
38 | float eps;
39 | int scale_data_size;
40 |
41 | Mat scale_data;
42 | };
43 |
44 | } // namespace ncnn
45 |
46 | #endif // LAYER_NORMALIZE_H
47 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/padding.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "padding.h"
16 |
17 | namespace ncnn {
18 |
19 | DEFINE_LAYER_CREATOR(Padding)
20 |
21 | Padding::Padding()
22 | {
23 | one_blob_only = true;
24 | support_inplace = false;
25 | }
26 |
27 | int Padding::load_param(const ParamDict& pd)
28 | {
29 | top = pd.get(0, 0);
30 | bottom = pd.get(1, 0);
31 | left = pd.get(2, 0);
32 | right = pd.get(3, 0);
33 | type = pd.get(4, 0);
34 | value = pd.get(5, 0.f);
35 |
36 | return 0;
37 | }
38 |
39 | int Padding::forward(const Mat& bottom_blob, Mat& top_blob) const
40 | {
41 | copy_make_border(bottom_blob, top_blob, top, bottom, left, right, type, value);
42 |
43 | if (top_blob.empty())
44 | return -100;
45 |
46 | return 0;
47 | }
48 |
49 | } // namespace ncnn
50 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/padding.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_PADDING_H
16 | #define LAYER_PADDING_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Padding : public Layer
23 | {
24 | public:
25 | Padding();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const Mat& bottom_blob, Mat& top_blob) const;
30 |
31 | public:
32 | int top;
33 | int bottom;
34 | int left;
35 | int right;
36 | int type;
37 | float value;
38 | };
39 |
40 | } // namespace ncnn
41 |
42 | #endif // LAYER_PADDING_H
43 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/permute.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_PERMUTE_H
16 | #define LAYER_PERMUTE_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Permute : public Layer
23 | {
24 | public:
25 | Permute();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const Mat& bottom_blob, Mat& top_blob) const;
30 |
31 | public:
32 | int order_type;
33 | };
34 |
35 | } // namespace ncnn
36 |
37 | #endif // LAYER_PERMUTE_H
38 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/pooling.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_POOLING_H
16 | #define LAYER_POOLING_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Pooling : public Layer
23 | {
24 | public:
25 | Pooling();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const Mat& bottom_blob, Mat& top_blob) const;
30 |
31 | enum { PoolMethod_MAX = 0, PoolMethod_AVE = 1 };
32 |
33 | public:
34 | // param
35 | int pooling_type;
36 | int kernel_w;
37 | int kernel_h;
38 | int stride_w;
39 | int stride_h;
40 | int pad_left;
41 | int pad_right;
42 | int pad_top;
43 | int pad_bottom;
44 | int global_pooling;
45 | int pad_mode;// 0=full 1=valid 2=SAME
46 | };
47 |
48 | } // namespace ncnn
49 |
50 | #endif // LAYER_POOLING_H
51 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/power.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "power.h"
16 | #include
17 |
18 | namespace ncnn {
19 |
20 | DEFINE_LAYER_CREATOR(Power)
21 |
22 | Power::Power()
23 | {
24 | one_blob_only = true;
25 | support_inplace = true;
26 | }
27 |
28 | int Power::load_param(const ParamDict& pd)
29 | {
30 | power = pd.get(0, 1.f);
31 | scale = pd.get(1, 1.f);
32 | shift = pd.get(2, 0.f);
33 |
34 | return 0;
35 | }
36 |
37 | int Power::forward_inplace(Mat& bottom_top_blob) const
38 | {
39 | int w = bottom_top_blob.w;
40 | int h = bottom_top_blob.h;
41 | int channels = bottom_top_blob.c;
42 | int size = w * h;
43 |
44 | #pragma omp parallel for
45 | for (int q=0; q& bottom_blobs, std::vector& top_blobs) const;
30 |
31 | public:
32 | Mat min_sizes;
33 | Mat max_sizes;
34 | Mat aspect_ratios;
35 | float variances[4];
36 | int flip;
37 | int clip;
38 | int image_width;
39 | int image_height;
40 | float step_width;
41 | float step_height;
42 | float offset;
43 | };
44 |
45 | } // namespace ncnn
46 |
47 | #endif // LAYER_PRIORBOX_H
48 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/proposal.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_PROPOSAL_H
16 | #define LAYER_PROPOSAL_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Proposal : public Layer
23 | {
24 | public:
25 | Proposal();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const std::vector& bottom_blobs, std::vector& top_blobs) const;
30 |
31 | public:
32 | // param
33 | int feat_stride;
34 | int base_size;
35 | int pre_nms_topN;
36 | int after_nms_topN;
37 | float nms_thresh;
38 | int min_size;
39 |
40 | Mat ratios;
41 | Mat scales;
42 |
43 | Mat anchors;
44 | };
45 |
46 | } // namespace ncnn
47 |
48 | #endif // LAYER_PROPOSAL_H
49 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/reduction.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_REDUCTION_H
16 | #define LAYER_REDUCTION_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Reduction : public Layer
23 | {
24 | public:
25 | Reduction();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const Mat& bottom_blob, Mat& top_blob) const;
30 |
31 | enum {
32 | ReductionOp_SUM = 0,
33 | ReductionOp_ASUM = 1,
34 | ReductionOp_SUMSQ = 2,
35 | ReductionOp_MEAN = 3,
36 | ReductionOp_MAX = 4,
37 | ReductionOp_MIN = 5,
38 | ReductionOp_PROD = 6
39 | };
40 |
41 | public:
42 | // param
43 | int operation;
44 | int dim;
45 | float coeff;
46 | };
47 |
48 | } // namespace ncnn
49 |
50 | #endif // LAYER_REDUCTION_H
51 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/relu.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "relu.h"
16 |
17 | namespace ncnn {
18 |
19 | DEFINE_LAYER_CREATOR(ReLU)
20 |
21 | ReLU::ReLU()
22 | {
23 | one_blob_only = true;
24 | support_inplace = true;
25 | }
26 |
27 | int ReLU::load_param(const ParamDict& pd)
28 | {
29 | slope = pd.get(0, 0.f);
30 |
31 | return 0;
32 | }
33 |
34 | int ReLU::forward_inplace(Mat& bottom_top_blob) const
35 | {
36 | int w = bottom_top_blob.w;
37 | int h = bottom_top_blob.h;
38 | int channels = bottom_top_blob.c;
39 | int size = w * h;
40 |
41 | if (slope == 0.f)
42 | {
43 | #pragma omp parallel for
44 | for (int q=0; q& bottom_blobs, std::vector& top_blobs) const;
32 |
33 | public:
34 | // param
35 | int num_output;
36 | int weight_data_size;
37 |
38 | // model
39 | Mat weight_hh_data;
40 | Mat weight_xh_data;
41 | Mat weight_ho_data;
42 | Mat bias_h_data;
43 | Mat bias_o_data;
44 | };
45 |
46 | } // namespace ncnn
47 |
48 | #endif // LAYER_RNN_H
49 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/roipooling.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_ROIPOOLING_H
16 | #define LAYER_ROIPOOLING_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class ROIPooling : public Layer
23 | {
24 | public:
25 | ROIPooling();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const std::vector& bottom_blobs, std::vector& top_blobs) const;
30 |
31 | public:
32 | int pooled_width;
33 | int pooled_height;
34 | float spatial_scale;
35 | };
36 |
37 | } // namespace ncnn
38 |
39 | #endif // LAYER_ROIPOOLING_H
40 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/scale.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_SCALE_H
16 | #define LAYER_SCALE_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Scale : public Layer
23 | {
24 | public:
25 | Scale();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int load_model(const ModelBin& mb);
30 |
31 | virtual int forward_inplace(std::vector& bottom_top_blobs) const;
32 | virtual int forward_inplace(Mat& bottom_top_blob) const;
33 |
34 | public:
35 | // param
36 | int scale_data_size;
37 | int bias_term;
38 |
39 | // model
40 | Mat scale_data;
41 | Mat bias_data;
42 | };
43 |
44 | } // namespace ncnn
45 |
46 | #endif // LAYER_SCALE_H
47 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/shufflechannel.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "shufflechannel.h"
16 |
17 | namespace ncnn {
18 |
19 | DEFINE_LAYER_CREATOR(ShuffleChannel)
20 |
21 | ShuffleChannel::ShuffleChannel()
22 | {
23 | one_blob_only = true;
24 | support_inplace = false;
25 | }
26 |
27 | int ShuffleChannel::load_param(const ParamDict& pd)
28 | {
29 | group = pd.get(0, 1);
30 |
31 | return 0;
32 | }
33 |
34 | int ShuffleChannel::forward(const Mat& bottom_blob, Mat& top_blob) const
35 | {
36 | int w = bottom_blob.w;
37 | int h = bottom_blob.h;
38 | int c = bottom_blob.c;
39 | size_t elemsize = bottom_blob.elemsize;
40 | int chs_per_group = c / group;
41 |
42 | if (c != chs_per_group * group)
43 | {
44 | // reject invalid group
45 | return -100;
46 | }
47 |
48 | top_blob.create(w, h, c, elemsize);
49 | if (top_blob.empty())
50 | return -100;
51 |
52 | const size_t feature_sz = w * h * elemsize;
53 | for (int i = 0; i != group; i++)
54 | {
55 | for (int j = 0; j != chs_per_group; j++)
56 | {
57 | int src_q = chs_per_group * i + j;
58 | int dst_q = group * j + i;
59 | memcpy(top_blob.channel(dst_q), bottom_blob.channel(src_q), feature_sz);
60 | }
61 | }
62 | return 0;
63 | }
64 |
65 | } // namespace ncnn
66 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/shufflechannel.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_SHUFFLECHANNEL_H
16 | #define LAYER_SHUFFLECHANNEL_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class ShuffleChannel : public Layer
23 | {
24 | public:
25 | ShuffleChannel();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const Mat& bottom_blob, Mat& top_blob) const;
30 |
31 | public:
32 | int group;
33 | };
34 |
35 | } // namespace ncnn
36 |
37 | #endif // LAYER_SHUFFLECHANNEL_H
38 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/sigmoid.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "sigmoid.h"
16 | #include
17 |
18 | namespace ncnn {
19 |
20 | DEFINE_LAYER_CREATOR(Sigmoid)
21 |
22 | Sigmoid::Sigmoid()
23 | {
24 | one_blob_only = true;
25 | support_inplace = true;
26 | }
27 |
28 | int Sigmoid::forward_inplace(Mat& bottom_top_blob) const
29 | {
30 | int w = bottom_top_blob.w;
31 | int h = bottom_top_blob.h;
32 | int channels = bottom_top_blob.c;
33 | int size = w * h;
34 |
35 | #pragma omp parallel for
36 | for (int q=0; q& bottom_blobs, std::vector& top_blobs) const;
30 |
31 | public:
32 | Mat slices;
33 | int axis;
34 | };
35 |
36 | } // namespace ncnn
37 |
38 | #endif // LAYER_SLICE_H
39 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/softmax.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_SOFTMAX_H
16 | #define LAYER_SOFTMAX_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Softmax : public Layer
23 | {
24 | public:
25 | Softmax();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward_inplace(Mat& bottom_top_blob) const;
30 |
31 | public:
32 | int axis;
33 | };
34 |
35 | } // namespace ncnn
36 |
37 | #endif // LAYER_SOFTMAX_H
38 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/split.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "split.h"
16 |
17 | namespace ncnn {
18 |
19 | DEFINE_LAYER_CREATOR(Split)
20 |
21 | Split::Split()
22 | {
23 | }
24 |
25 | int Split::forward(const std::vector& bottom_blobs, std::vector& top_blobs) const
26 | {
27 | const Mat& bottom_blob = bottom_blobs[0];
28 | for (size_t i=0; i& bottom_blobs, std::vector& top_blobs) const;
28 |
29 | public:
30 | };
31 |
32 | } // namespace ncnn
33 |
34 | #endif // LAYER_SPLIT_H
35 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/spp.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_SPP_H
16 | #define LAYER_SPP_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class SPP : public Layer
23 | {
24 | public:
25 | SPP();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const Mat& bottom_blob, Mat& top_blob) const;
30 |
31 | enum { PoolMethod_MAX = 0, PoolMethod_AVE = 1 };
32 |
33 | public:
34 | // param
35 | int pooling_type;
36 | int pyramid_height;
37 | };
38 |
39 | } // namespace ncnn
40 |
41 | #endif // LAYER_SPP_H
42 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/squeeze.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "squeeze.h"
16 |
17 | namespace ncnn {
18 |
19 | DEFINE_LAYER_CREATOR(Squeeze)
20 |
21 | Squeeze::Squeeze()
22 | {
23 | one_blob_only = true;
24 | support_inplace = false;
25 | }
26 |
27 | int Squeeze::load_param(const ParamDict& pd)
28 | {
29 | squeeze_w = pd.get(0, 0);
30 | squeeze_h = pd.get(1, 0);
31 | squeeze_c = pd.get(2, 0);
32 |
33 | return 0;
34 | }
35 |
36 | int Squeeze::forward(const Mat& bottom_blob, Mat& top_blob) const
37 | {
38 | int w = bottom_blob.w;
39 | int h = bottom_blob.h;
40 | int channels = bottom_blob.c;
41 | int dims = bottom_blob.dims;
42 |
43 | top_blob = bottom_blob;
44 |
45 | if (squeeze_c && dims == 3 && channels == 1)
46 | {
47 | if (squeeze_h && h == 1)
48 | top_blob = bottom_blob.reshape(w);
49 | else
50 | top_blob = bottom_blob.reshape(w, h);
51 | }
52 | else if (squeeze_h && dims >= 2 && h == 1)
53 | {
54 | if (squeeze_w && w == 1)
55 | top_blob = bottom_blob.reshape(channels);
56 | else
57 | top_blob = bottom_blob.reshape(w, channels);
58 | }
59 | else if (squeeze_w && dims >= 1 && w == 1)
60 | {
61 | if (squeeze_h && h == 1)
62 | top_blob = bottom_blob.reshape(channels);
63 | else
64 | top_blob = bottom_blob.reshape(h, channels);
65 | }
66 |
67 | if (top_blob.empty())
68 | return -100;
69 |
70 | return 0;
71 | }
72 |
73 | } // namespace ncnn
74 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/squeeze.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_SQUEEZE_H
16 | #define LAYER_SQUEEZE_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Squeeze : public Layer
23 | {
24 | public:
25 | Squeeze();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const Mat& bottom_blob, Mat& top_blob) const;
30 |
31 | public:
32 | int squeeze_w;
33 | int squeeze_h;
34 | int squeeze_c;
35 | };
36 |
37 | } // namespace ncnn
38 |
39 | #endif // LAYER_SQUEEZE_H
40 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/tanh.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "tanh.h"
16 | #include
17 |
18 | namespace ncnn {
19 |
20 | DEFINE_LAYER_CREATOR(TanH)
21 |
22 | TanH::TanH()
23 | {
24 | one_blob_only = true;
25 | support_inplace = true;
26 | }
27 |
28 | int TanH::forward_inplace(Mat& bottom_top_blob) const
29 | {
30 | int w = bottom_top_blob.w;
31 | int h = bottom_top_blob.h;
32 | int channels = bottom_top_blob.c;
33 | int size = w * h;
34 |
35 | #pragma omp parallel for
36 | for (int q=0; q threshold ? 1.f : 0.f;
49 | }
50 | }
51 |
52 | return 0;
53 | }
54 |
55 | } // namespace ncnn
56 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/threshold.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_THRESHOLD_H
16 | #define LAYER_THRESHOLD_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Threshold : public Layer
23 | {
24 | public:
25 | Threshold();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward_inplace(Mat& bottom_top_blob) const;
30 |
31 | public:
32 | float threshold;
33 | };
34 |
35 | } // namespace ncnn
36 |
37 | #endif // LAYER_THRESHOLD_H
38 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/tile.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_TILE_H
16 | #define LAYER_TILE_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Tile : public Layer
23 | {
24 | public:
25 | Tile();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const Mat& bottom_blob, Mat& top_blob) const;
30 |
31 | public:
32 | int dim;
33 | int tiles;
34 | };
35 |
36 | } // namespace ncnn
37 |
38 | #endif // LAYER_TILE_H
39 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/unaryop.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_UNARYOP_H
16 | #define LAYER_UNARYOP_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class UnaryOp : public Layer
23 | {
24 | public:
25 | UnaryOp();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward_inplace(Mat& bottom_top_blob) const;
30 |
31 | enum {
32 | Operation_ABS = 0,
33 | Operation_NEG = 1,
34 | Operation_FLOOR = 2,
35 | Operation_CEIL = 3,
36 | Operation_SQUARE= 4,
37 | Operation_SQRT = 5,
38 | Operation_RSQRT = 6,
39 | Operation_EXP = 7,
40 | Operation_LOG = 8,
41 | Operation_SIN = 9,
42 | Operation_COS = 10,
43 | Operation_TAN = 11,
44 | Operation_ASIN = 12,
45 | Operation_ACOS = 13,
46 | Operation_ATAN = 14,
47 | Operation_RECIPROCAL = 15
48 | };
49 |
50 | public:
51 | // param
52 | int op_type;
53 | };
54 |
55 | } // namespace ncnn
56 |
57 | #endif // LAYER_UNARYOP_H
58 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/x86/convolution_x86.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_CONVOLUTION_X86_H
16 | #define LAYER_CONVOLUTION_X86_H
17 |
18 | #include "convolution.h"
19 |
20 | namespace ncnn {
21 |
22 | typedef void (*conv_func)(const Mat&, Mat&, const Mat&, const Mat&);
23 |
24 | class Convolution_x86 : public Convolution
25 | {
26 | public:
27 | virtual int forward(const Mat& bottom_blob, Mat& top_blob) const;
28 | virtual int forwardDilation(const Mat& bottom_blob, Mat &top_blob, conv_func conv) const;
29 | };
30 |
31 | } // namespace ncnn
32 |
33 | #endif // LAYER_CONVOLUTION_X86_H
34 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer/x86/convolutiondepthwise_x86.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_CONVOLUTIONDEPTHWISE_X86_H
16 | #define LAYER_CONVOLUTIONDEPTHWISE_X86_H
17 |
18 | #include "convolutiondepthwise.h"
19 |
20 | namespace ncnn {
21 |
22 | class ConvolutionDepthWise_x86 : public ConvolutionDepthWise
23 | {
24 | public:
25 | virtual int forward(const Mat& bottom_blob, Mat& top_blob) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_CONVOLUTIONDEPTHWISE_X86_H
31 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/layer_type.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef NCNN_LAYER_TYPE_H
16 | #define NCNN_LAYER_TYPE_H
17 |
18 | namespace ncnn {
19 |
20 | namespace LayerType {
21 | enum
22 | {
23 | #include "layer_type_enum.h"
24 | CustomBit = (1<<8),
25 | };
26 | } // namespace LayerType
27 |
28 | } // namespace ncnn
29 |
30 | #endif // NCNN_LAYER_TYPE_H
31 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/modelbin.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef NCNN_MODELBIN_H
16 | #define NCNN_MODELBIN_H
17 |
18 | #include
19 | #include "mat.h"
20 | #include "platform.h"
21 |
22 | namespace ncnn {
23 |
24 | class Net;
25 | class ModelBin
26 | {
27 | public:
28 | // element type
29 | // 0 = auto
30 | // 1 = float32
31 | // 2 = float16
32 | // 3 = uint8
33 | // load vec
34 | virtual Mat load(int w, int type) const = 0;
35 | // load image
36 | virtual Mat load(int w, int h, int type) const;
37 | // load dim
38 | virtual Mat load(int w, int h, int c, int type) const;
39 | };
40 |
41 | #if NCNN_STDIO
42 | class ModelBinFromStdio : public ModelBin
43 | {
44 | public:
45 | // construct from file
46 | ModelBinFromStdio(FILE* binfp);
47 |
48 | virtual Mat load(int w, int type) const;
49 |
50 | protected:
51 | FILE* binfp;
52 | };
53 | #endif // NCNN_STDIO
54 |
55 | class ModelBinFromMemory : public ModelBin
56 | {
57 | public:
58 | // construct from external memory
59 | ModelBinFromMemory(const unsigned char*& mem);
60 |
61 | virtual Mat load(int w, int type) const;
62 |
63 | protected:
64 | const unsigned char*& mem;
65 | };
66 |
67 | class ModelBinFromMatArray : public ModelBin
68 | {
69 | public:
70 | // construct from weight blob array
71 | ModelBinFromMatArray(const Mat* weights);
72 |
73 | virtual Mat load(int w, int type) const;
74 |
75 | protected:
76 | mutable const Mat* weights;
77 | };
78 |
79 | } // namespace ncnn
80 |
81 | #endif // NCNN_MODELBIN_H
82 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/paramdict.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef NCNN_PARAMDICT_H
16 | #define NCNN_PARAMDICT_H
17 |
18 | #include
19 | #include "mat.h"
20 | #include "platform.h"
21 |
22 | // at most 20 parameters
23 | #define NCNN_MAX_PARAM_COUNT 20
24 |
25 | namespace ncnn {
26 |
27 | class Net;
28 | class ParamDict
29 | {
30 | public:
31 | // empty
32 | ParamDict();
33 |
34 | // get int
35 | int get(int id, int def) const;
36 | // get float
37 | float get(int id, float def) const;
38 | // get array
39 | Mat get(int id, const Mat& def) const;
40 |
41 | // set int
42 | void set(int id, int i);
43 | // set float
44 | void set(int id, float f);
45 | // set array
46 | void set(int id, const Mat& v);
47 |
48 | protected:
49 | friend class Net;
50 |
51 | void clear();
52 |
53 | #if NCNN_STDIO
54 | #if NCNN_STRING
55 | int load_param(FILE* fp);
56 | #endif // NCNN_STRING
57 | int load_param_bin(FILE* fp);
58 | #endif // NCNN_STDIO
59 | int load_param(const unsigned char*& mem);
60 |
61 | protected:
62 | struct
63 | {
64 | int loaded;
65 | union { int i; float f; };
66 | Mat v;
67 | } params[NCNN_MAX_PARAM_COUNT];
68 | };
69 |
70 | } // namespace ncnn
71 |
72 | #endif // NCNN_PARAMDICT_H
73 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/src/platform.h.in:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef NCNN_PLATFORM_H
16 | #define NCNN_PLATFORM_H
17 |
18 | #cmakedefine01 NCNN_STDIO
19 | #cmakedefine01 NCNN_STRING
20 | #cmakedefine01 NCNN_OPENCV
21 | #cmakedefine01 NCNN_BENCHMARK
22 |
23 | #endif // NCNN_PLATFORM_H
24 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/tools/CMakeLists.txt:
--------------------------------------------------------------------------------
1 |
2 | add_subdirectory(caffe)
3 | add_subdirectory(mxnet)
4 | add_subdirectory(onnx)
5 |
6 | include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../src)
7 | include_directories(${CMAKE_CURRENT_BINARY_DIR}/../src)
8 |
9 | include_directories(${CMAKE_SOURCE_DIR}/src)
10 |
11 | add_executable(ncnn2mem ncnn2mem.cpp)
12 |
13 | target_link_libraries(ncnn2mem ncnn)
14 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/tools/caffe/CMakeLists.txt:
--------------------------------------------------------------------------------
1 |
2 | find_package(Protobuf)
3 |
4 | if(PROTOBUF_FOUND)
5 | include_directories(${PROTOBUF_INCLUDE_DIR})
6 | include_directories(${CMAKE_CURRENT_BINARY_DIR})
7 | protobuf_generate_cpp(CAFFE_PROTO_SRCS CAFFE_PROTO_HDRS caffe.proto)
8 | add_executable(caffe2ncnn caffe2ncnn.cpp ${CAFFE_PROTO_SRCS} ${CAFFE_PROTO_HDRS})
9 | target_link_libraries(caffe2ncnn ${PROTOBUF_LIBRARIES})
10 | else()
11 | message(WARNING "Protobuf not found, caffe model convert tool won't be built")
12 | endif()
13 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/tools/mxnet/CMakeLists.txt:
--------------------------------------------------------------------------------
1 |
2 | add_executable(mxnet2ncnn mxnet2ncnn.cpp)
3 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/tools/onnx/CMakeLists.txt:
--------------------------------------------------------------------------------
1 |
2 | find_package(Protobuf)
3 |
4 | if(PROTOBUF_FOUND)
5 | include_directories(${PROTOBUF_INCLUDE_DIR})
6 | include_directories(${CMAKE_CURRENT_BINARY_DIR})
7 | protobuf_generate_cpp(ONNX_PROTO_SRCS ONNX_PROTO_HDRS onnx.proto)
8 | add_executable(onnx2ncnn onnx2ncnn.cpp ${ONNX_PROTO_SRCS} ${ONNX_PROTO_HDRS})
9 | target_link_libraries(onnx2ncnn ${PROTOBUF_LIBRARIES})
10 | else()
11 | message(WARNING "Protobuf not found, onnx model convert tool won't be built")
12 | endif()
13 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/tools/pytorch/readme.txt:
--------------------------------------------------------------------------------
1 | You can find pytorch2ncnn tool here
2 |
3 | https://github.com/starimeL/PytorchConverter
4 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/tools/tensorflow/CMakeLists.txt:
--------------------------------------------------------------------------------
1 |
2 | find_package(Protobuf REQUIRED)
3 |
4 | include_directories(${PROTOBUF_INCLUDE_DIR})
5 |
6 | include_directories(${CMAKE_CURRENT_BINARY_DIR})
7 | protobuf_generate_cpp(TENSORFLOW_PROTO_SRCS TENSORFLOW_PROTO_HDRS
8 | attr_value.proto
9 | function.proto
10 | graph.proto
11 | node_def.proto
12 | op_def.proto
13 | resource_handle.proto
14 | tensor.proto
15 | tensor_shape.proto
16 | types.proto
17 | versions.proto
18 | )
19 |
20 | add_executable(tensorflow2ncnn tensorflow2ncnn.cpp ${TENSORFLOW_PROTO_SRCS} ${TENSORFLOW_PROTO_HDRS})
21 |
22 | target_link_libraries(tensorflow2ncnn ${PROTOBUF_LIBRARIES})
23 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/tools/tensorflow/attr_value.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | package tensorflow;
4 | option cc_enable_arenas = true;
5 | option java_outer_classname = "AttrValueProtos";
6 | option java_multiple_files = true;
7 | option java_package = "org.tensorflow.framework";
8 |
9 | import "tensor.proto";
10 | import "tensor_shape.proto";
11 | import "types.proto";
12 |
13 | // Protocol buffer representing the value for an attr used to configure an Op.
14 | // Comment indicates the corresponding attr type. Only the field matching the
15 | // attr type may be filled.
16 | message AttrValue {
17 | // LINT.IfChange
18 | message ListValue {
19 | repeated bytes s = 2; // "list(string)"
20 | repeated int64 i = 3 [packed = true]; // "list(int)"
21 | repeated float f = 4 [packed = true]; // "list(float)"
22 | repeated bool b = 5 [packed = true]; // "list(bool)"
23 | repeated DataType type = 6 [packed = true]; // "list(type)"
24 | repeated TensorShapeProto shape = 7; // "list(shape)"
25 | repeated TensorProto tensor = 8; // "list(tensor)"
26 | repeated NameAttrList func = 9; // "list(attr)"
27 | }
28 | // LINT.ThenChange(https://www.tensorflow.org/code/tensorflow/c/c_api.cc)
29 |
30 | oneof value {
31 | bytes s = 2; // "string"
32 | int64 i = 3; // "int"
33 | float f = 4; // "float"
34 | bool b = 5; // "bool"
35 | DataType type = 6; // "type"
36 | TensorShapeProto shape = 7; // "shape"
37 | TensorProto tensor = 8; // "tensor"
38 | ListValue list = 1; // any "list(...)"
39 |
40 | // "func" represents a function. func.name is a function's name or
41 | // a primitive op's name. func.attr.first is the name of an attr
42 | // defined for that function. func.attr.second is the value for
43 | // that attr in the instantiation.
44 | NameAttrList func = 10;
45 |
46 | // This is a placeholder only used in nodes defined inside a
47 | // function. It indicates the attr value will be supplied when
48 | // the function is instantiated. For example, let us suppose a
49 | // node "N" in function "FN". "N" has an attr "A" with value
50 | // placeholder = "foo". When FN is instantiated with attr "foo"
51 | // set to "bar", the instantiated node N's attr A will have been
52 | // given the value "bar".
53 | string placeholder = 9;
54 | }
55 | }
56 |
57 | // A list of attr names and their values. The whole list is attached
58 | // with a string name. E.g., MatMul[T=float].
59 | message NameAttrList {
60 | string name = 1;
61 | map attr = 2;
62 | }
63 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/tools/tensorflow/graph.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | package tensorflow;
4 | option cc_enable_arenas = true;
5 | option java_outer_classname = "GraphProtos";
6 | option java_multiple_files = true;
7 | option java_package = "org.tensorflow.framework";
8 |
9 | import "node_def.proto";
10 | import "function.proto";
11 | import "versions.proto";
12 |
13 | // Represents the graph of operations
14 | message GraphDef {
15 | repeated NodeDef node = 1;
16 |
17 | // Compatibility versions of the graph. See core/public/version.h for version
18 | // history. The GraphDef version is distinct from the TensorFlow version, and
19 | // each release of TensorFlow will support a range of GraphDef versions.
20 | VersionDef versions = 4;
21 |
22 | // Deprecated single version field; use versions above instead. Since all
23 | // GraphDef changes before "versions" was introduced were forward
24 | // compatible, this field is entirely ignored.
25 | int32 version = 3 [deprecated = true];
26 |
27 | // EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET.
28 | //
29 | // "library" provides user-defined functions.
30 | //
31 | // Naming:
32 | // * library.function.name are in a flat namespace.
33 | // NOTE: We may need to change it to be hierarchical to support
34 | // different orgs. E.g.,
35 | // { "/google/nn", { ... }},
36 | // { "/google/vision", { ... }}
37 | // { "/org_foo/module_bar", { ... }}
38 | // map named_lib;
39 | // * If node[i].op is the name of one function in "library",
40 | // node[i] is deemed as a function call. Otherwise, node[i].op
41 | // must be a primitive operation supported by the runtime.
42 | //
43 | //
44 | // Function call semantics:
45 | //
46 | // * The callee may start execution as soon as some of its inputs
47 | // are ready. The caller may want to use Tuple() mechanism to
48 | // ensure all inputs are ready in the same time.
49 | //
50 | // * The consumer of return values may start executing as soon as
51 | // the return values the consumer depends on are ready. The
52 | // consumer may want to use Tuple() mechanism to ensure the
53 | // consumer does not start until all return values of the callee
54 | // function are ready.
55 | FunctionDefLibrary library = 2;
56 | };
57 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/tools/tensorflow/node_def.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | package tensorflow;
4 | option cc_enable_arenas = true;
5 | option java_outer_classname = "NodeProto";
6 | option java_multiple_files = true;
7 | option java_package = "org.tensorflow.framework";
8 |
9 | import "attr_value.proto";
10 |
11 | message NodeDef {
12 | // The name given to this operator. Used for naming inputs,
13 | // logging, visualization, etc. Unique within a single GraphDef.
14 | // Must match the regexp "[A-Za-z0-9.][A-Za-z0-9_./]*".
15 | string name = 1;
16 |
17 | // The operation name. There may be custom parameters in attrs.
18 | // Op names starting with an underscore are reserved for internal use.
19 | string op = 2;
20 |
21 | // Each input is "node:src_output" with "node" being a string name and
22 | // "src_output" indicating which output tensor to use from "node". If
23 | // "src_output" is 0 the ":0" suffix can be omitted. Regular inputs
24 | // may optionally be followed by control inputs that have the format
25 | // "^node".
26 | repeated string input = 3;
27 |
28 | // A (possibly partial) specification for the device on which this
29 | // node should be placed.
30 | // The expected syntax for this string is as follows:
31 | //
32 | // DEVICE_SPEC ::= PARTIAL_SPEC
33 | //
34 | // PARTIAL_SPEC ::= ("/" CONSTRAINT) *
35 | // CONSTRAINT ::= ("job:" JOB_NAME)
36 | // | ("replica:" [1-9][0-9]*)
37 | // | ("task:" [1-9][0-9]*)
38 | // | ( ("gpu" | "cpu") ":" ([1-9][0-9]* | "*") )
39 | //
40 | // Valid values for this string include:
41 | // * "/job:worker/replica:0/task:1/gpu:3" (full specification)
42 | // * "/job:worker/gpu:3" (partial specification)
43 | // * "" (no specification)
44 | //
45 | // If the constraints do not resolve to a single device (or if this
46 | // field is empty or not present), the runtime will attempt to
47 | // choose a device automatically.
48 | string device = 4;
49 |
50 | // Operation-specific graph-construction-time configuration.
51 | // Note that this should include all attrs defined in the
52 | // corresponding OpDef, including those with a value matching
53 | // the default -- this allows the default to change and makes
54 | // NodeDefs easier to interpret on their own. However, if
55 | // an attr with a default is not specified in this list, the
56 | // default will be used.
57 | // The "names" (keys) must match the regexp "[a-z][a-z0-9_]+" (and
58 | // one of the names from the corresponding OpDef's attr field).
59 | // The values must have a type matching the corresponding OpDef
60 | // attr's type field.
61 | // TODO(josh11b): Add some examples here showing best practices.
62 | map attr = 5;
63 | };
64 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/tools/tensorflow/resource_handle.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | package tensorflow;
4 | option cc_enable_arenas = true;
5 | option java_outer_classname = "ResourceHandle";
6 | option java_multiple_files = true;
7 | option java_package = "org.tensorflow.framework";
8 |
9 | // Protocol buffer representing a handle to a tensorflow resource. Handles are
10 | // not valid across executions, but can be serialized back and forth from within
11 | // a single run.
12 | message ResourceHandleProto {
13 | // Unique name for the device containing the resource.
14 | string device = 1;
15 |
16 | // Container in which this resource is placed.
17 | string container = 2;
18 |
19 | // Unique name of this resource.
20 | string name = 3;
21 |
22 | // Hash code for the type of the resource. Is only valid in the same device
23 | // and in the same execution.
24 | uint64 hash_code = 4;
25 |
26 | // For debug-only, the name of the type pointed to by this handle, if
27 | // available.
28 | string maybe_type_name = 5;
29 | };
30 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/tools/tensorflow/tensor_shape.proto:
--------------------------------------------------------------------------------
1 | // Protocol buffer representing the shape of tensors.
2 |
3 | syntax = "proto3";
4 | option cc_enable_arenas = true;
5 | option java_outer_classname = "TensorShapeProtos";
6 | option java_multiple_files = true;
7 | option java_package = "org.tensorflow.framework";
8 |
9 | package tensorflow;
10 |
11 | // Dimensions of a tensor.
12 | message TensorShapeProto {
13 | // One dimension of the tensor.
14 | message Dim {
15 | // Size of the tensor in that dimension.
16 | // This value must be >= -1, but values of -1 are reserved for "unknown"
17 | // shapes (values of -1 mean "unknown" dimension). Certain wrappers
18 | // that work with TensorShapeProto may fail at runtime when deserializing
19 | // a TensorShapeProto containing a dim value of -1.
20 | int64 size = 1;
21 |
22 | // Optional name of the tensor dimension.
23 | string name = 2;
24 | };
25 |
26 | // Dimensions of the tensor, such as {"input", 30}, {"output", 40}
27 | // for a 30 x 40 2D tensor. If an entry has size -1, this
28 | // corresponds to a dimension of unknown size. The names are
29 | // optional.
30 | //
31 | // The order of entries in "dim" matters: It indicates the layout of the
32 | // values in the tensor in-memory representation.
33 | //
34 | // The first entry in "dim" is the outermost dimension used to layout the
35 | // values, the last entry is the innermost dimension. This matches the
36 | // in-memory layout of RowMajor Eigen tensors.
37 | //
38 | // If "dim.size()" > 0, "unknown_rank" must be false.
39 | repeated Dim dim = 2;
40 |
41 | // If true, the number of dimensions in the shape is unknown.
42 | //
43 | // If true, "dim.size()" must be 0.
44 | bool unknown_rank = 3;
45 | };
46 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/tools/tensorflow/types.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | package tensorflow;
4 | option cc_enable_arenas = true;
5 | option java_outer_classname = "TypesProtos";
6 | option java_multiple_files = true;
7 | option java_package = "org.tensorflow.framework";
8 |
9 | // LINT.IfChange
10 | enum DataType {
11 | // Not a legal value for DataType. Used to indicate a DataType field
12 | // has not been set.
13 | DT_INVALID = 0;
14 |
15 | // Data types that all computation devices are expected to be
16 | // capable to support.
17 | DT_FLOAT = 1;
18 | DT_DOUBLE = 2;
19 | DT_INT32 = 3;
20 | DT_UINT8 = 4;
21 | DT_INT16 = 5;
22 | DT_INT8 = 6;
23 | DT_STRING = 7;
24 | DT_COMPLEX64 = 8; // Single-precision complex
25 | DT_INT64 = 9;
26 | DT_BOOL = 10;
27 | DT_QINT8 = 11; // Quantized int8
28 | DT_QUINT8 = 12; // Quantized uint8
29 | DT_QINT32 = 13; // Quantized int32
30 | DT_BFLOAT16 = 14; // Float32 truncated to 16 bits. Only for cast ops.
31 | DT_QINT16 = 15; // Quantized int16
32 | DT_QUINT16 = 16; // Quantized uint16
33 | DT_UINT16 = 17;
34 | DT_COMPLEX128 = 18; // Double-precision complex
35 | DT_HALF = 19;
36 | DT_RESOURCE = 20;
37 | DT_VARIANT = 21; // Arbitrary C++ data types
38 |
39 | // TODO(josh11b): DT_GENERIC_PROTO = ??;
40 | // TODO(jeff,josh11b): DT_UINT64? DT_UINT32?
41 |
42 | // Do not use! These are only for parameters. Every enum above
43 | // should have a corresponding value below (verified by types_test).
44 | DT_FLOAT_REF = 101;
45 | DT_DOUBLE_REF = 102;
46 | DT_INT32_REF = 103;
47 | DT_UINT8_REF = 104;
48 | DT_INT16_REF = 105;
49 | DT_INT8_REF = 106;
50 | DT_STRING_REF = 107;
51 | DT_COMPLEX64_REF = 108;
52 | DT_INT64_REF = 109;
53 | DT_BOOL_REF = 110;
54 | DT_QINT8_REF = 111;
55 | DT_QUINT8_REF = 112;
56 | DT_QINT32_REF = 113;
57 | DT_BFLOAT16_REF = 114;
58 | DT_QINT16_REF = 115;
59 | DT_QUINT16_REF = 116;
60 | DT_UINT16_REF = 117;
61 | DT_COMPLEX128_REF = 118;
62 | DT_HALF_REF = 119;
63 | DT_RESOURCE_REF = 120;
64 | DT_VARIANT_REF = 121;
65 | }
66 | // LINT.ThenChange(https://www.tensorflow.org/code/tensorflow/c/c_api.h,https://www.tensorflow.org/code/tensorflow/go/tensor.go)
67 |
--------------------------------------------------------------------------------
/3rd_party/ncnn/tools/tensorflow/versions.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | package tensorflow;
4 | option cc_enable_arenas = true;
5 | option java_outer_classname = "VersionsProtos";
6 | option java_multiple_files = true;
7 | option java_package = "org.tensorflow.framework";
8 |
9 | // Version information for a piece of serialized data
10 | //
11 | // There are different types of versions for each type of data
12 | // (GraphDef, etc.), but they all have the same common shape
13 | // described here.
14 | //
15 | // Each consumer has "consumer" and "min_producer" versions (specified
16 | // elsewhere). A consumer is allowed to consume this data if
17 | //
18 | // producer >= min_producer
19 | // consumer >= min_consumer
20 | // consumer not in bad_consumers
21 | //
22 | message VersionDef {
23 | // The version of the code that produced this data.
24 | int32 producer = 1;
25 |
26 | // Any consumer below this version is not allowed to consume this data.
27 | int32 min_consumer = 2;
28 |
29 | // Specific consumer versions which are disallowed (e.g. due to bugs).
30 | repeated int32 bad_consumers = 3;
31 | };
32 |
--------------------------------------------------------------------------------
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 2.8)
2 | set(OpenCV_DIR "/home/lingkang/Documents/opencv-3.1.0/build")
3 | project(pedestrian_detection_and_tracking)
4 | add_definitions(-std=c++11)
5 |
6 | add_subdirectory(pedestrian_detection)
7 | add_subdirectory(detect_and_track)
8 |
9 | if(NOT CMAKE_BUILD_TYPE)
10 | set(CMAKE_BUILD_TYPE Release)
11 | endif()
12 |
13 | set(CMAKE_CXX_FLAGS "-Wall -Wextra -pthread")
14 | set(CMAKE_CXX_FLAGS_DEBUG "-g")
15 | set(CMAKE_CXX_FLAGS_RELEASE "-O3")
16 |
17 | find_package( OpenCV REQUIRED )
18 | include_directories( ${OpenCV_INCLUDE_DIRS} )
19 |
20 | find_package(OpenMP)
21 | if(OpenMP_CXX_FOUND OR OPENMP_FOUND)
22 | set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
23 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
24 | endif()
25 |
26 | #link_directories(${CMAKE_CURRENT_SOURCE_DIR}/build/detect_and_track)
27 | link_directories(${CMAKE_CURRENT_SOURCE_DIR}/3rd_party/ncnn/build/src)
28 | include_directories(${CMAKE_CURRENT_SOURCE_DIR}/detect_and_track)
29 | include_directories(${CMAKE_CURRENT_SOURCE_DIR}/pedestrian_detection)
30 |
31 | add_executable( main main.cpp)
32 | target_link_libraries( main ${OpenCV_LIBS} detect_and_track)
33 |
--------------------------------------------------------------------------------
/MOT17-11.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/zlingkang/pedestrian_detection_and_tracking/b8c5c2508c745bf0f4bb148ae291d0a8b8b10eac/MOT17-11.mp4
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # pedestrian_detection_and_tracking
2 | Pedestrian detection using Mobilenet SSD and tracking
3 |
4 | [](http://www.youtube.com/watch?v=RvVWxufc6S8)
5 |
6 | ## Pedestrian detection
7 | Trained using Caffe Mobilenet SSD, details(https://github.com/zlingkang/mobilenet_ssd_pedestrian_detection)
8 | Then model transfered to [ncnn]() (a deeplearning framework optimized for mobile platforms)
9 |
10 | ## Tracking
11 | An optical-flow and Kalman Filter based multiple object tracker, more details: [https://github.com/zlingkang/multi_object_tracker](https://github.com/zlingkang/multi_object_tracker)
12 |
13 | ## Usage
14 | * Compile ncnn
15 | go to 3rd_party/ncnn
16 | (When using with ARM platforms such as Raspberry Pi, modify ncnn/CMakeLists.txt: add `add_definitions(-mfpu=neon)`,
17 | modify ncnn/src/CMakeLists.txt: modify line 40to46 to `if(TRUE)` )
18 | `mkdir build`
19 | `cd build`
20 | `cmake ..`
21 | `make`
22 | * Compile all the rest
23 | in root directory
24 | `mkdir build`
25 | `cd build`
26 | `cmake ..`
27 | `make`
28 | * Run
29 | go to build/
30 | `./main`
31 |
--------------------------------------------------------------------------------
/detect_and_track/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | #cmake_minimum_required(VERSION 2.8)
2 | #set(OpenCV_DIR "/home/lingkang/Documents/opencv-3.1.0/build")
3 | #project(optical_flow_tracker)
4 |
5 | if(NOT CMAKE_BUILD_TYPE)
6 | set(CMAKE_BUILD_TYPE Release)
7 | endif()
8 |
9 | set(CMAKE_CXX_FLAGS "-Wall -Wextra -pthread")
10 | set(CMAKE_CXX_FLAGS_DEBUG "-g")
11 | set(CMAKE_CXX_FLAGS_RELEASE "-O3")
12 |
13 | find_package( OpenCV REQUIRED )
14 | include_directories( ${OpenCV_INCLUDE_DIRS} )
15 | find_package(Threads REQUIRED)
16 | add_definitions(-std=c++11)
17 |
18 | link_directories(${CMAKE_CURRENT_SOURCE_DIR}/../3rd_party/ncnn/build/src)
19 | #link_directories(${CMAKE_CURRENT_SOURCE_DIR}/../build/pedestrian_detection)
20 | include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../pedestrian_detection)
21 |
22 | add_library( detect_and_track SHARED lk_tracker.cpp munkres.cpp det_and_track.cpp)
23 | target_link_libraries( detect_and_track ${OpenCV_LIBS} pedestrian_detection)
24 |
--------------------------------------------------------------------------------
/detect_and_track/det_and_track.h:
--------------------------------------------------------------------------------
1 | #ifndef DET_AND_TRACK_H
2 | #define DET_AND_TRACK_H
3 |
4 | class TrackerManager;
5 |
6 | #include
7 | #include
8 | #include
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include
14 |
15 | #include "lk_tracker.h"
16 | #include "object_detection.h"
17 |
18 | class DetAndTrack
19 | {
20 | bool get_new_detection_;
21 | bool first_time_detection_;
22 | std::vector det_boxes_;
23 | //std::vector track_boxes_;
24 | //std::vector final_boxes_;
25 |
26 | cv::Mat current_frame_;
27 | //cv::Mat last_frame_;
28 |
29 | //cv::CascadeClassifier face_cascade_;
30 | ObjectDetection* object_detection_ptr_;
31 |
32 | TrackerManager* track_manager_ptr_;
33 |
34 | int detection_sleep_time_; //milliseconds
35 | int track_sleep_time_;
36 |
37 | public:
38 | std::mutex mutex_;
39 | DetAndTrack();
40 | DetAndTrack(int _detection_sleep_time, int _track_sleep_time);
41 | void detectionTask();
42 | void trackTask();
43 |
44 | std::vector getBox()
45 | {
46 | if(!first_time_detection_) // means the track_manager_ptr_ is initialized
47 | {
48 | return track_manager_ptr_->getAllBox();
49 | }
50 | else
51 | {
52 | std::vector res;
53 | return res;
54 | }
55 | }
56 |
57 | std::vector getColor()
58 | {
59 | if(!first_time_detection_)
60 | {
61 | return track_manager_ptr_->getAllColor();
62 | }
63 | else
64 | {
65 | std::vector res;
66 | return res;
67 | }
68 | }
69 |
70 | void setFrame(const cv::Mat& _new_frame);
71 | };
72 |
73 | #endif
74 |
--------------------------------------------------------------------------------
/detect_and_track/munkres.h:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2007 John Weaver
3 | *
4 | * This program is free software; you can redistribute it and/or modify
5 | * it under the terms of the GNU General Public License as published by
6 | * the Free Software Foundation; either version 2 of the License, or
7 | * (at your option) any later version.
8 | *
9 | * This program is distributed in the hope that it will be useful,
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 | * GNU General Public License for more details.
13 | *
14 | * You should have received a copy of the GNU General Public License
15 | * along with this program; if not, write to the Free Software
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 | */
18 |
19 | #if !defined(_MUNKRES_H_)
20 | #define _MUNKRES_H_
21 |
22 | #include
23 | #include
24 | #include
25 |
26 | class Munkres {
27 | public:
28 | Munkres();
29 | ~Munkres(){};
30 | void solve(cv::Mat_ &m);
31 | void diag(bool);
32 | private:
33 | static const int NORMAL = 0;
34 | static const int STAR = 1;
35 | static const int PRIME = 2;
36 | inline bool find_uncovered_in_matrix(double, unsigned int&, unsigned int&) const;
37 | inline bool pair_in_list(const std::pair &, const std::list > &);
38 | int step1(void);
39 | int step2(void);
40 | int step3(void);
41 | int step4(void);
42 | int step5(void);
43 | int step6(void);
44 |
45 | cv::Mat_ mask_matrix;
46 | cv::Mat_ matrix;
47 | bool *row_mask;
48 | bool *col_mask;
49 | unsigned int saverow, savecol;
50 | bool isDiag;
51 | };
52 |
53 | #endif /* !defined(_MUNKRES_H_) */
54 |
--------------------------------------------------------------------------------
/main.cpp:
--------------------------------------------------------------------------------
1 | #include "det_and_track.h"
2 | #include
3 | #include
4 |
5 | int main( int argc, char** argv )
6 | {
7 | DetAndTrack dtObject;
8 |
9 | cv::VideoCapture cap("../MOT17-11.mp4");
10 | //cv::VideoCapture cap(0);
11 | cv::Mat frame;
12 |
13 | cap >> frame;
14 |
15 | dtObject.setFrame(frame);
16 |
17 | std::thread detThread(&DetAndTrack::detectionTask, &dtObject);
18 | std::thread trackThread(&DetAndTrack::trackTask, &dtObject);
19 |
20 | while(true)
21 | {
22 | cap >> frame;
23 | if(frame.empty())
24 | {
25 | break;
26 | }
27 | cv::Mat result = frame.clone();
28 | //uncomment the next line when reading from a video
29 | std::this_thread::sleep_for(std::chrono::milliseconds(40));
30 | std::vector boxes;
31 | std::vector colors;
32 |
33 | std::cout << "set new frame.." << std::endl;
34 | dtObject.mutex_.lock();
35 | boxes = dtObject.getBox();
36 | colors = dtObject.getColor();
37 | dtObject.setFrame(frame);
38 | dtObject.mutex_.unlock();
39 |
40 | int i = 0;
41 | for(auto box:boxes)
42 | {
43 | cv::rectangle(result, box, colors[i], 2, 1);
44 | //cv::rectangle(result, box, cv::Scalar(0,0,255), 2, 1);
45 | i ++;
46 | std::cout << box << std::endl;
47 | }
48 | std::cout << "start drawing.." << std::endl;
49 | cv::imshow("detandtrack", result);
50 | if(cv::waitKey(1) == 27)
51 | {
52 | break;
53 | }
54 |
55 | }
56 |
57 | detThread.join();
58 | trackThread.join();
59 | return 0;
60 | }
61 |
--------------------------------------------------------------------------------
/pedestrian_detection/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | #cmake_minimum_required(VERSION 2.8)
2 |
3 | #project(pedistrian_detection)
4 |
5 | set(CMAKE_BUILD_TYPE Release)
6 |
7 | find_package( OpenCV REQUIRED )
8 | #find_package(OpenCV REQUIRED core highgui imgproc)
9 |
10 | if (NOT OpenCV_FOUND)
11 | message(FATAL_ERROR "opencv Not Found!")
12 | else()
13 | message(STATUS "OpenCV_LIBS: ${OpenCV_LIBS}")
14 | message(STATUS "OpenCV_INCLUDE_DIRS: ${OpenCV_INCLUDE_DIRS}")
15 | endif (NOT OpenCV_FOUND)
16 |
17 | find_package(OpenMP)
18 | if(OpenMP_CXX_FOUND OR OPENMP_FOUND)
19 | set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
20 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
21 | endif()
22 |
23 | add_definitions(-std=c++11)
24 |
25 | message(STATUS "CMAKE_CURRENT_SOURCE_DIR: ${CMAKE_CURRENT_SOURCE_DIR}")
26 | message(STATUS "CMAKE_CURRENT_SOURCE_DIR: ${CMAKE_CURRENT_BINARY_DIR}")
27 | include_directories(${OpenCV_INCLUDE_DIRS})
28 | include_directories(${CMAKE_CURRENT_SOURCE_DIR})
29 | include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../3rd_party/ncnn/src)
30 | include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../3rd_party/ncnn/build/src)
31 | link_directories(${CMAKE_CURRENT_SOURCE_DIR}/../3rd_party/ncnn/build/src)
32 |
33 | SET(NCNNLIB -Wl,--whole-archive ncnn -Wl,--no-whole-archive)
34 |
35 | link_directories(
36 | ${OpenCV_Install}/lib
37 | )
38 |
39 | add_library(pedestrian_detection STATIC object_detection.cpp)
40 | target_link_libraries(pedestrian_detection ${NCNNLIB} ${OpenCV_LIBS})
41 |
42 | add_executable(detection_test test.cpp)
43 | link_directories(${CMAKE_CURRENT_SOURCE_DIR}/../build/pedestrian_detection)
44 | #link_directories(${CMAKE_CURRENT_SOURCE_DIR}/build)
45 | target_link_libraries(detection_test pedestrian_detection ${OpenCV_LIBS})
46 |
47 | set_property(TARGET pedestrian_detection PROPERTY POSITION_INDEPENDENT_CODE ON)
48 |
49 |
50 |
--------------------------------------------------------------------------------
/pedestrian_detection/object_detection.cpp:
--------------------------------------------------------------------------------
1 | #include "object_detection.h"
2 | #include "net.h"
3 |
4 | static ncnn::Net mobilenet;
5 |
6 | ObjectDetection::ObjectDetection(const std::string _model_path)
7 | {
8 | mobilenet.load_param((_model_path+"ssdperson10695.param").c_str());
9 | mobilenet.load_model((_model_path+"ssdperson10695.bin").c_str());
10 | }
11 |
12 | std::vector ObjectDetection::detectObject(const cv::Mat& _frame)
13 | {
14 | std::vector boxes;
15 |
16 | int img_h = _frame.size().height;
17 | int img_w = _frame.size().width;
18 |
19 | int input_size = 300;
20 | ncnn::Mat in = ncnn::Mat::from_pixels_resize(_frame.data, ncnn::Mat::PIXEL_BGR, _frame.cols, _frame.rows, input_size, input_size);
21 |
22 | const float mean_vals[3] = {127.5f, 127.5f, 127.5f};
23 | const float norm_vals[3] = {1.0/127.5,1.0/127.5,1.0/127.5};
24 | in.substract_mean_normalize(mean_vals, norm_vals);
25 |
26 | ncnn::Mat out;
27 |
28 | ncnn::Extractor ex = mobilenet.create_extractor();
29 | ex.set_light_mode(true);
30 | //ex.set_num_threads(8);
31 | ex.input("data", in);
32 | ex.extract("detection_out",out);
33 |
34 | std::vector