├── .gitignore
├── .travis.yml
├── CMakeLists.txt
├── CONTRIBUTING.md
├── Info.plist
├── LICENSE.txt
├── README.md
├── benchmark
├── CMakeLists.txt
├── README.md
├── alexnet.param
├── benchncnn.cpp
├── googlenet.param
├── mnasnet.param
├── mobilenet.param
├── mobilenet_ssd.param
├── mobilenet_v2.param
├── mobilenet_yolo.param
├── mobilenet_yolov3.param
├── proxylessnasnet.param
├── resnet18.param
├── shufflenet.param
├── squeezenet.param
├── squeezenet_ssd.param
└── vgg16.param
├── build.sh
├── examples
├── CMakeLists.txt
├── fasterrcnn.cpp
├── mobilenetssd.cpp
├── mobilenetv2ssdlite.cpp
├── rfcn.cpp
├── shufflenetv2.cpp
├── squeezencnn
│ ├── AndroidManifest.xml
│ ├── ant.properties
│ ├── assets
│ │ ├── squeezenet_v1.1.bin
│ │ ├── squeezenet_v1.1.param.bin
│ │ └── synset_words.txt
│ ├── build.xml
│ ├── jni
│ │ ├── Android.mk
│ │ ├── Application.mk
│ │ ├── squeezencnn_jni.cpp
│ │ └── squeezenet_v1.1.id.h
│ ├── local.properties
│ ├── proguard-project.txt
│ ├── project.properties
│ ├── res
│ │ ├── layout
│ │ │ └── main.xml
│ │ └── values
│ │ │ └── strings.xml
│ └── src
│ │ └── com
│ │ └── tencent
│ │ └── squeezencnn
│ │ ├── MainActivity.java
│ │ └── SqueezeNcnn.java
├── squeezenet.cpp
├── squeezenet_v1.1.bin
├── squeezenet_v1.1.caffemodel
├── squeezenet_v1.1.param
├── squeezenet_v1.1.prototxt
├── squeezenetssd.cpp
├── synset_words.txt
├── yolov2.cpp
└── yolov3.cpp
├── images
├── 128-ncnn.png
├── 16-ncnn.png
├── 256-ncnn.png
├── 32-ncnn.png
└── 64-ncnn.png
├── package.sh
├── src
├── CMakeLists.txt
├── allocator.cpp
├── allocator.h
├── benchmark.cpp
├── benchmark.h
├── blob.cpp
├── blob.h
├── cpu.cpp
├── cpu.h
├── layer.cpp
├── layer.h
├── layer
│ ├── absval.cpp
│ ├── absval.h
│ ├── argmax.cpp
│ ├── argmax.h
│ ├── arm
│ │ ├── absval_arm.cpp
│ │ ├── absval_arm.h
│ │ ├── batchnorm_arm.cpp
│ │ ├── batchnorm_arm.h
│ │ ├── bias_arm.cpp
│ │ ├── bias_arm.h
│ │ ├── clip_arm.cpp
│ │ ├── clip_arm.h
│ │ ├── convolution_1x1.h
│ │ ├── convolution_1x1_int8.h
│ │ ├── convolution_2x2.h
│ │ ├── convolution_3x3.h
│ │ ├── convolution_3x3_int8.h
│ │ ├── convolution_4x4.h
│ │ ├── convolution_5x5.h
│ │ ├── convolution_7x7.h
│ │ ├── convolution_arm.cpp
│ │ ├── convolution_arm.h
│ │ ├── convolutiondepthwise_3x3.h
│ │ ├── convolutiondepthwise_3x3_int8.h
│ │ ├── convolutiondepthwise_5x5.h
│ │ ├── convolutiondepthwise_arm.cpp
│ │ ├── convolutiondepthwise_arm.h
│ │ ├── deconvolution_3x3.h
│ │ ├── deconvolution_4x4.h
│ │ ├── deconvolution_arm.cpp
│ │ ├── deconvolution_arm.h
│ │ ├── deconvolutiondepthwise_arm.cpp
│ │ ├── deconvolutiondepthwise_arm.h
│ │ ├── dequantize_arm.cpp
│ │ ├── dequantize_arm.h
│ │ ├── eltwise_arm.cpp
│ │ ├── eltwise_arm.h
│ │ ├── innerproduct_arm.cpp
│ │ ├── innerproduct_arm.h
│ │ ├── lrn_arm.cpp
│ │ ├── lrn_arm.h
│ │ ├── neon_mathfun.h
│ │ ├── pooling_2x2.h
│ │ ├── pooling_3x3.h
│ │ ├── pooling_arm.cpp
│ │ ├── pooling_arm.h
│ │ ├── prelu_arm.cpp
│ │ ├── prelu_arm.h
│ │ ├── quantize_arm.cpp
│ │ ├── quantize_arm.h
│ │ ├── relu_arm.cpp
│ │ ├── relu_arm.h
│ │ ├── scale_arm.cpp
│ │ ├── scale_arm.h
│ │ ├── sigmoid_arm.cpp
│ │ ├── sigmoid_arm.h
│ │ ├── softmax_arm.cpp
│ │ └── softmax_arm.h
│ ├── batchnorm.cpp
│ ├── batchnorm.h
│ ├── bias.cpp
│ ├── bias.h
│ ├── binaryop.cpp
│ ├── binaryop.h
│ ├── bnll.cpp
│ ├── bnll.h
│ ├── clip.cpp
│ ├── clip.h
│ ├── concat.cpp
│ ├── concat.h
│ ├── convolution.cpp
│ ├── convolution.h
│ ├── convolutiondepthwise.cpp
│ ├── convolutiondepthwise.h
│ ├── crop.cpp
│ ├── crop.h
│ ├── deconvolution.cpp
│ ├── deconvolution.h
│ ├── deconvolutiondepthwise.cpp
│ ├── deconvolutiondepthwise.h
│ ├── dequantize.cpp
│ ├── dequantize.h
│ ├── detectionoutput.cpp
│ ├── detectionoutput.h
│ ├── dropout.cpp
│ ├── dropout.h
│ ├── eltwise.cpp
│ ├── eltwise.h
│ ├── elu.cpp
│ ├── elu.h
│ ├── embed.cpp
│ ├── embed.h
│ ├── exp.cpp
│ ├── exp.h
│ ├── expanddims.cpp
│ ├── expanddims.h
│ ├── flatten.cpp
│ ├── flatten.h
│ ├── innerproduct.cpp
│ ├── innerproduct.h
│ ├── input.cpp
│ ├── input.h
│ ├── instancenorm.cpp
│ ├── instancenorm.h
│ ├── interp.cpp
│ ├── interp.h
│ ├── log.cpp
│ ├── log.h
│ ├── lrn.cpp
│ ├── lrn.h
│ ├── lstm.cpp
│ ├── lstm.h
│ ├── memorydata.cpp
│ ├── memorydata.h
│ ├── mvn.cpp
│ ├── mvn.h
│ ├── normalize.cpp
│ ├── normalize.h
│ ├── padding.cpp
│ ├── padding.h
│ ├── permute.cpp
│ ├── permute.h
│ ├── pooling.cpp
│ ├── pooling.h
│ ├── power.cpp
│ ├── power.h
│ ├── prelu.cpp
│ ├── prelu.h
│ ├── priorbox.cpp
│ ├── priorbox.h
│ ├── proposal.cpp
│ ├── proposal.h
│ ├── psroipooling.cpp
│ ├── psroipooling.h
│ ├── quantize.cpp
│ ├── quantize.h
│ ├── reduction.cpp
│ ├── reduction.h
│ ├── relu.cpp
│ ├── relu.h
│ ├── reorg.cpp
│ ├── reorg.h
│ ├── reshape.cpp
│ ├── reshape.h
│ ├── rnn.cpp
│ ├── rnn.h
│ ├── roialign.cpp
│ ├── roialign.h
│ ├── roipooling.cpp
│ ├── roipooling.h
│ ├── scale.cpp
│ ├── scale.h
│ ├── shufflechannel.cpp
│ ├── shufflechannel.h
│ ├── sigmoid.cpp
│ ├── sigmoid.h
│ ├── slice.cpp
│ ├── slice.h
│ ├── softmax.cpp
│ ├── softmax.h
│ ├── split.cpp
│ ├── split.h
│ ├── spp.cpp
│ ├── spp.h
│ ├── squeeze.cpp
│ ├── squeeze.h
│ ├── tanh.cpp
│ ├── tanh.h
│ ├── threshold.cpp
│ ├── threshold.h
│ ├── tile.cpp
│ ├── tile.h
│ ├── unaryop.cpp
│ ├── unaryop.h
│ ├── x86
│ │ ├── avx_mathfun.h
│ │ ├── convolution_1x1.h
│ │ ├── convolution_1x1_int8.h
│ │ ├── convolution_3x3.h
│ │ ├── convolution_3x3_int8.h
│ │ ├── convolution_5x5.h
│ │ ├── convolution_x86.cpp
│ │ ├── convolution_x86.h
│ │ ├── convolutiondepthwise_3x3.h
│ │ ├── convolutiondepthwise_3x3_int8.h
│ │ ├── convolutiondepthwise_x86.cpp
│ │ ├── convolutiondepthwise_x86.h
│ │ └── sse_mathfun.h
│ ├── yolodetectionoutput.cpp
│ ├── yolodetectionoutput.h
│ ├── yolov3detectionoutput.cpp
│ └── yolov3detectionoutput.h
├── layer_declaration.h.in
├── layer_registry.h.in
├── layer_type.h
├── layer_type_enum.h.in
├── mat.cpp
├── mat.h
├── mat_pixel.cpp
├── mat_pixel_resize.cpp
├── modelbin.cpp
├── modelbin.h
├── net.cpp
├── net.h
├── opencv.cpp
├── opencv.h
├── paramdict.cpp
├── paramdict.h
└── platform.h.in
├── toolchains
├── aarch64-linux-gnu.toolchain.cmake
├── arm-linux-gnueabi.toolchain.cmake
├── arm-linux-gnueabihf.toolchain.cmake
├── himix100.toolchain.cmake
├── hisiv300.toolchain.cmake
├── hisiv500.toolchain.cmake
├── host.gcc.toolchain.cmake
├── ios.toolchain.cmake
├── iossimxc.toolchain.cmake
├── iosxc.toolchain.cmake
└── pi3.toolchain.cmake
└── tools
├── CMakeLists.txt
├── caffe
├── CMakeLists.txt
├── caffe.proto
└── caffe2ncnn.cpp
├── darknet
└── readme.txt
├── mxnet
├── CMakeLists.txt
└── mxnet2ncnn.cpp
├── ncnn2mem.cpp
├── onnx
├── CMakeLists.txt
├── onnx.proto
└── onnx2ncnn.cpp
├── plugin
├── ImageWatchNCNN.natvis
├── README.md
└── snapshot.png
├── pytorch
└── readme.txt
└── tensorflow
├── CMakeLists.txt
├── attr_value.proto
├── function.proto
├── graph.proto
├── node_def.proto
├── op_def.proto
├── resource_handle.proto
├── tensor.proto
├── tensor_shape.proto
├── tensorflow2ncnn.cpp
├── types.proto
└── versions.proto
/.gitignore:
--------------------------------------------------------------------------------
1 | # CMake build directory
2 | *build*/
3 |
4 | # Backup files.
5 | *~
6 |
7 | # Prerequisites
8 | *.d
9 |
10 | # Compiled Object files
11 | *.slo
12 | *.lo
13 | *.o
14 | *.obj
15 |
16 | # Precompiled Headers
17 | *.gch
18 | *.pch
19 |
20 | # Compiled Dynamic libraries
21 | *.so
22 | *.dylib
23 | *.dll
24 |
25 | # Fortran module files
26 | *.mod
27 | *.smod
28 |
29 | # Compiled Static libraries
30 | *.lai
31 | *.la
32 | *.a
33 | *.lib
34 |
35 | # Executables
36 | *.exe
37 | *.out
38 | *.app
39 |
40 | # Compiled pb file
41 |
42 | *.pb.cc
43 | *.pb.h
44 |
45 | # Compiled framework
46 | ncnn.framework/
47 |
48 | # IDE
49 | .idea/
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 |
2 | ## Acknowledgements
3 | - Thanks to bug1989 [https://github.com/bug1989] for contributing the initial quantized int8 inference code and a large variety of device benchmark
4 |
--------------------------------------------------------------------------------
/Info.plist:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | CFBundleName
6 | ncnn
7 | CFBundleIdentifier
8 | com.tencent.ncnn
9 | CFBundleVersion
10 | 1.0
11 | CFBundleShortVersionString
12 | 1.0
13 | CFBundleSignature
14 | ????
15 | CFBundlePackageType
16 | FMWK
17 |
18 |
19 |
--------------------------------------------------------------------------------
/benchmark/CMakeLists.txt:
--------------------------------------------------------------------------------
1 |
2 | include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../src)
3 | include_directories(${CMAKE_CURRENT_BINARY_DIR}/../src)
4 |
5 | add_executable(benchncnn benchncnn.cpp)
6 | set_property(TARGET benchncnn PROPERTY COMPILE_FLAGS "-fpie")
7 | set_property(TARGET benchncnn PROPERTY LINK_FLAGS "-pie")
8 | target_link_libraries(benchncnn ncnn)
9 |
--------------------------------------------------------------------------------
/benchmark/alexnet.param:
--------------------------------------------------------------------------------
1 | 7767517
2 | 24 24
3 | Input data 0 1 data 0=227 1=227 2=3
4 | Convolution conv1 1 1 data conv1 0=96 1=11 2=1 3=4 4=0 5=1 6=34848
5 | ReLU relu1 1 1 conv1 conv1_relu1
6 | LRN norm1 1 1 conv1_relu1 norm1 0=0 1=5 2=0.000100 3=0.750000
7 | Pooling pool1 1 1 norm1 pool1 0=0 1=3 2=2 3=0 4=0
8 | ConvolutionDepthWise conv2 1 1 pool1 conv2 0=256 1=5 2=1 3=1 4=2 5=1 6=307200 7=2
9 | ReLU relu2 1 1 conv2 conv2_relu2
10 | LRN norm2 1 1 conv2_relu2 norm2 0=0 1=5 2=0.000100 3=0.750000
11 | Pooling pool2 1 1 norm2 pool2 0=0 1=3 2=2 3=0 4=0
12 | Convolution conv3 1 1 pool2 conv3 0=384 1=3 2=1 3=1 4=1 5=1 6=884736
13 | ReLU relu3 1 1 conv3 conv3_relu3
14 | ConvolutionDepthWise conv4 1 1 conv3_relu3 conv4 0=384 1=3 2=1 3=1 4=1 5=1 6=663552 7=2
15 | ReLU relu4 1 1 conv4 conv4_relu4
16 | ConvolutionDepthWise conv5 1 1 conv4_relu4 conv5 0=256 1=3 2=1 3=1 4=1 5=1 6=442368 7=2
17 | ReLU relu5 1 1 conv5 conv5_relu5
18 | Pooling pool5 1 1 conv5_relu5 pool5 0=0 1=3 2=2 3=0 4=0
19 | InnerProduct fc6 1 1 pool5 fc6 0=4096 1=1 2=37748736
20 | ReLU relu6 1 1 fc6 fc6_relu6
21 | Dropout drop6 1 1 fc6_relu6 fc6_drop6
22 | InnerProduct fc7 1 1 fc6_drop6 fc7 0=4096 1=1 2=16777216
23 | ReLU relu7 1 1 fc7 fc7_relu7
24 | Dropout drop7 1 1 fc7_relu7 fc7_drop7
25 | InnerProduct fc8 1 1 fc7_drop7 fc8 0=1000 1=1 2=4096000
26 | Softmax prob 1 1 fc8 prob 0=0
27 |
--------------------------------------------------------------------------------
/examples/CMakeLists.txt:
--------------------------------------------------------------------------------
1 |
2 | find_package(OpenCV QUIET COMPONENTS core highgui imgproc imgcodecs)
3 | if(NOT OpenCV_FOUND)
4 | find_package(OpenCV REQUIRED COMPONENTS core highgui imgproc)
5 | endif()
6 |
7 | include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../src)
8 | include_directories(${CMAKE_CURRENT_BINARY_DIR}/../src)
9 |
10 | add_executable(squeezenet squeezenet.cpp)
11 | target_link_libraries(squeezenet ncnn ${OpenCV_LIBS})
12 |
13 | add_executable(fasterrcnn fasterrcnn.cpp)
14 | target_link_libraries(fasterrcnn ncnn ${OpenCV_LIBS})
15 |
16 | add_executable(rfcn rfcn.cpp)
17 | target_link_libraries(rfcn ncnn ${OpenCV_LIBS})
18 |
19 | add_executable(yolov2 yolov2.cpp)
20 | target_link_libraries(yolov2 ncnn ${OpenCV_LIBS})
21 |
22 | add_executable(yolov3 yolov3.cpp)
23 | target_link_libraries(yolov3 ncnn ${OpenCV_LIBS})
24 |
25 | add_executable(mobilenetv2ssdlite mobilenetv2ssdlite.cpp)
26 | target_link_libraries(mobilenetv2ssdlite ncnn ${OpenCV_LIBS})
27 |
28 | add_executable(mobilenetssd mobilenetssd.cpp)
29 | target_link_libraries(mobilenetssd ncnn ${OpenCV_LIBS})
30 |
31 | add_executable(squeezenetssd squeezenetssd.cpp)
32 | target_link_libraries(squeezenetssd ncnn ${OpenCV_LIBS})
33 |
34 | add_executable(shufflenetv2 shufflenetv2.cpp)
35 | target_link_libraries(shufflenetv2 ncnn ${OpenCV_LIBS})
36 |
--------------------------------------------------------------------------------
/examples/squeezencnn/AndroidManifest.xml:
--------------------------------------------------------------------------------
1 |
2 |
6 |
7 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/examples/squeezencnn/ant.properties:
--------------------------------------------------------------------------------
1 | # This file is used to override default values used by the Ant build system.
2 | #
3 | # This file must be checked into Version Control Systems, as it is
4 | # integral to the build system of your project.
5 |
6 | # This file is only used by the Ant script.
7 |
8 | # You can use this to override default values such as
9 | # 'source.dir' for the location of your java source folder and
10 | # 'out.dir' for the location of your output folder.
11 |
12 | # You can also use it define how the release builds are signed by declaring
13 | # the following properties:
14 | # 'key.store' for the location of your keystore and
15 | # 'key.alias' for the name of the key to use.
16 | # The password will be asked during the build when you use the 'release' target.
17 |
18 | key.store=/home/nihui/osd/nihuini-release-key.keystore
19 | key.alias=nihuini
20 | key.store.password=nihuini
21 | key.alias.password=nihuini
22 |
--------------------------------------------------------------------------------
/examples/squeezencnn/assets/squeezenet_v1.1.bin:
--------------------------------------------------------------------------------
1 | ../../squeezenet_v1.1.bin
--------------------------------------------------------------------------------
/examples/squeezencnn/assets/squeezenet_v1.1.param.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jiangxiluning/ncnn-tensorflow/4f84be38d58102529c94d87d48aac71e5739fa9e/examples/squeezencnn/assets/squeezenet_v1.1.param.bin
--------------------------------------------------------------------------------
/examples/squeezencnn/assets/synset_words.txt:
--------------------------------------------------------------------------------
1 | ../../synset_words.txt
--------------------------------------------------------------------------------
/examples/squeezencnn/jni/Android.mk:
--------------------------------------------------------------------------------
1 | LOCAL_PATH := $(call my-dir)
2 |
3 | # change this folder path to yours
4 | NCNN_INSTALL_PATH := /home/nihui/osd/ncnn/ncnn-android-lib
5 |
6 | include $(CLEAR_VARS)
7 | LOCAL_MODULE := ncnn
8 | LOCAL_SRC_FILES := $(NCNN_INSTALL_PATH)/$(TARGET_ARCH_ABI)/libncnn.a
9 | include $(PREBUILT_STATIC_LIBRARY)
10 |
11 | include $(CLEAR_VARS)
12 |
13 | LOCAL_MODULE := squeezencnn
14 | LOCAL_SRC_FILES := squeezencnn_jni.cpp
15 |
16 | LOCAL_C_INCLUDES := $(NCNN_INSTALL_PATH)/include
17 |
18 | LOCAL_STATIC_LIBRARIES := ncnn
19 |
20 | LOCAL_CFLAGS := -O2 -fvisibility=hidden -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math
21 | LOCAL_CPPFLAGS := -O2 -fvisibility=hidden -fvisibility-inlines-hidden -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math
22 | LOCAL_LDFLAGS += -Wl,--gc-sections
23 |
24 | LOCAL_CFLAGS += -fopenmp
25 | LOCAL_CPPFLAGS += -fopenmp
26 | LOCAL_LDFLAGS += -fopenmp
27 |
28 | LOCAL_LDLIBS := -lz -llog -ljnigraphics
29 |
30 | include $(BUILD_SHARED_LIBRARY)
31 |
--------------------------------------------------------------------------------
/examples/squeezencnn/jni/Application.mk:
--------------------------------------------------------------------------------
1 |
2 | # APP_STL := stlport_static
3 | APP_STL := gnustl_static
4 | # APP_ABI := armeabi armeabi-v7a
5 | APP_ABI := armeabi-v7a arm64-v8a
6 | APP_PLATFORM := android-9
7 | # NDK_TOOLCHAIN_VERSION := 4.9
8 |
--------------------------------------------------------------------------------
/examples/squeezencnn/local.properties:
--------------------------------------------------------------------------------
1 | # This file is automatically generated by Android Tools.
2 | # Do not modify this file -- YOUR CHANGES WILL BE ERASED!
3 | #
4 | # This file must *NOT* be checked into Version Control Systems,
5 | # as it contains information specific to your local configuration.
6 |
7 | # location of the SDK. This is only used by Ant
8 | # For customization when using a Version Control System, please read the
9 | # header note.
10 | sdk.dir=/home/nihui/osd/android-sdk-linux
11 |
--------------------------------------------------------------------------------
/examples/squeezencnn/proguard-project.txt:
--------------------------------------------------------------------------------
1 | # To enable ProGuard in your project, edit project.properties
2 | # to define the proguard.config property as described in that file.
3 | #
4 | # Add project specific ProGuard rules here.
5 | # By default, the flags in this file are appended to flags specified
6 | # in ${sdk.dir}/tools/proguard/proguard-android.txt
7 | # You can edit the include path and order by changing the ProGuard
8 | # include property in project.properties.
9 | #
10 | # For more details, see
11 | # http://developer.android.com/guide/developing/tools/proguard.html
12 |
13 | # Add any project specific keep options here:
14 |
15 | # If your project uses WebView with JS, uncomment the following
16 | # and specify the fully qualified class name to the JavaScript interface
17 | # class:
18 | #-keepclassmembers class fqcn.of.javascript.interface.for.webview {
19 | # public *;
20 | #}
21 |
--------------------------------------------------------------------------------
/examples/squeezencnn/project.properties:
--------------------------------------------------------------------------------
1 | # This file is automatically generated by Android Tools.
2 | # Do not modify this file -- YOUR CHANGES WILL BE ERASED!
3 | #
4 | # This file must be checked in Version Control Systems.
5 | #
6 | # To customize properties used by the Ant build system edit
7 | # "ant.properties", and override values to adapt the script to your
8 | # project structure.
9 | #
10 | # To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):
11 | #proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt
12 |
13 | # Project target.
14 | target=android-9
15 |
--------------------------------------------------------------------------------
/examples/squeezencnn/res/layout/main.xml:
--------------------------------------------------------------------------------
1 |
2 |
6 |
7 |
11 |
12 |
17 |
22 |
23 |
24 |
29 |
30 |
35 |
36 |
37 |
--------------------------------------------------------------------------------
/examples/squeezencnn/res/values/strings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | squeezencnn
4 |
5 |
--------------------------------------------------------------------------------
/examples/squeezencnn/src/com/tencent/squeezencnn/SqueezeNcnn.java:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | package com.tencent.squeezencnn;
16 |
17 | import android.graphics.Bitmap;
18 | import android.content.Context;
19 |
20 | public class SqueezeNcnn
21 | {
22 | public native boolean Init(byte[] param, byte[] bin, byte[] words);
23 |
24 | public native String Detect(Bitmap bitmap);
25 |
26 | static {
27 | System.loadLibrary("squeezencnn");
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/examples/squeezenet_v1.1.bin:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jiangxiluning/ncnn-tensorflow/4f84be38d58102529c94d87d48aac71e5739fa9e/examples/squeezenet_v1.1.bin
--------------------------------------------------------------------------------
/examples/squeezenet_v1.1.caffemodel:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jiangxiluning/ncnn-tensorflow/4f84be38d58102529c94d87d48aac71e5739fa9e/examples/squeezenet_v1.1.caffemodel
--------------------------------------------------------------------------------
/images/128-ncnn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jiangxiluning/ncnn-tensorflow/4f84be38d58102529c94d87d48aac71e5739fa9e/images/128-ncnn.png
--------------------------------------------------------------------------------
/images/16-ncnn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jiangxiluning/ncnn-tensorflow/4f84be38d58102529c94d87d48aac71e5739fa9e/images/16-ncnn.png
--------------------------------------------------------------------------------
/images/256-ncnn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jiangxiluning/ncnn-tensorflow/4f84be38d58102529c94d87d48aac71e5739fa9e/images/256-ncnn.png
--------------------------------------------------------------------------------
/images/32-ncnn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jiangxiluning/ncnn-tensorflow/4f84be38d58102529c94d87d48aac71e5739fa9e/images/32-ncnn.png
--------------------------------------------------------------------------------
/images/64-ncnn.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jiangxiluning/ncnn-tensorflow/4f84be38d58102529c94d87d48aac71e5739fa9e/images/64-ncnn.png
--------------------------------------------------------------------------------
/package.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/bash
2 |
3 | NAME=ncnn
4 |
5 | ##### package android lib
6 | ANDROIDPKGNAME=${NAME}-android-lib
7 | rm -rf $ANDROIDPKGNAME
8 | mkdir -p $ANDROIDPKGNAME
9 | mkdir -p $ANDROIDPKGNAME/armeabi-v7a
10 | mkdir -p $ANDROIDPKGNAME/arm64-v8a
11 | mkdir -p $ANDROIDPKGNAME/include
12 | cp build-android-armv7/install/lib/lib${NAME}.a $ANDROIDPKGNAME/armeabi-v7a/
13 | cp build-android-aarch64/install/lib/lib${NAME}.a $ANDROIDPKGNAME/arm64-v8a/
14 | cp build-android-aarch64/install/include/* $ANDROIDPKGNAME/include/
15 | rm -f $ANDROIDPKGNAME.zip
16 | zip -9 -r $ANDROIDPKGNAME.zip $ANDROIDPKGNAME
17 |
18 | ##### package ios framework
19 | IOSPKGNAME=${NAME}.framework
20 | rm -rf $IOSPKGNAME
21 | mkdir -p $IOSPKGNAME/Versions/A/Headers
22 | mkdir -p $IOSPKGNAME/Versions/A/Resources
23 | ln -s A $IOSPKGNAME/Versions/Current
24 | ln -s Versions/Current/Headers $IOSPKGNAME/Headers
25 | ln -s Versions/Current/Resources $IOSPKGNAME/Resources
26 | ln -s Versions/Current/${NAME} $IOSPKGNAME/${NAME}
27 | lipo -create \
28 | build-ios/install/lib/lib${NAME}.a \
29 | build-ios-sim/install/lib/lib${NAME}.a \
30 | -o $IOSPKGNAME/Versions/A/${NAME}
31 | cp -r build-ios/install/include/* $IOSPKGNAME/Versions/A/Headers/
32 | cp Info.plist ${IOSPKGNAME}/Versions/A/Resources/
33 | rm -f $IOSPKGNAME.zip
34 | zip -9 -y -r $IOSPKGNAME.zip $IOSPKGNAME
35 |
36 |
--------------------------------------------------------------------------------
/src/benchmark.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef NCNN_BENCHMARK_H
16 | #define NCNN_BENCHMARK_H
17 |
18 | #include "platform.h"
19 | #include "mat.h"
20 | #include "layer.h"
21 |
22 | namespace ncnn {
23 |
24 | // get now timestamp in ms
25 | double get_current_time();
26 |
27 | #if NCNN_BENCHMARK
28 |
29 | void benchmark(const Layer* layer, double start, double end);
30 | void benchmark(const Layer* layer, const Mat& bottom_blob, Mat& top_blob, double start, double end);
31 |
32 | #endif // NCNN_BENCHMARK
33 |
34 | } // namespace ncnn
35 |
36 | #endif // NCNN_BENCHMARK_H
37 |
--------------------------------------------------------------------------------
/src/blob.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "blob.h"
16 |
17 | namespace ncnn {
18 |
19 | Blob::Blob()
20 | {
21 | producer = -1;
22 | }
23 |
24 | } // namespace ncnn
25 |
--------------------------------------------------------------------------------
/src/blob.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef NCNN_BLOB_H
16 | #define NCNN_BLOB_H
17 |
18 | #include
19 | #include
20 | #include "platform.h"
21 |
22 | namespace ncnn {
23 |
24 | class Blob
25 | {
26 | public:
27 | // empty
28 | Blob();
29 |
30 | public:
31 | #if NCNN_STRING
32 | // blob name
33 | std::string name;
34 | #endif // NCNN_STRING
35 | // layer index which produce this blob as output
36 | int producer;
37 | // layer index which need this blob as input
38 | std::vector consumers;
39 | };
40 |
41 | } // namespace ncnn
42 |
43 | #endif // NCNN_BLOB_H
44 |
--------------------------------------------------------------------------------
/src/cpu.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef NCNN_CPU_H
16 | #define NCNN_CPU_H
17 |
18 | namespace ncnn {
19 |
20 | // test optional cpu features
21 | // neon = armv7 neon or aarch64 asimd
22 | int cpu_support_arm_neon();
23 | // vfpv4 = armv7 fp16 + fma
24 | int cpu_support_arm_vfpv4();
25 | // asimdhp = aarch64 asimd half precision
26 | int cpu_support_arm_asimdhp();
27 |
28 | // cpu info
29 | int get_cpu_count();
30 |
31 | // bind all threads on little clusters if powersave enabled
32 | // affacts HMP arch cpu like ARM big.LITTLE
33 | // only implemented on android at the moment
34 | // switching powersave is expensive and not thread-safe
35 | // 0 = all cores enabled(default)
36 | // 1 = only little clusters enabled
37 | // 2 = only big clusters enabled
38 | // return 0 if success for setter function
39 | int get_cpu_powersave();
40 | int set_cpu_powersave(int powersave);
41 |
42 | // misc function wrapper for openmp routines
43 | int get_omp_num_threads();
44 | void set_omp_num_threads(int num_threads);
45 |
46 | int get_omp_dynamic();
47 | void set_omp_dynamic(int dynamic);
48 |
49 | } // namespace ncnn
50 |
51 | #endif // NCNN_CPU_H
52 |
--------------------------------------------------------------------------------
/src/layer/absval.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "absval.h"
16 |
17 | namespace ncnn {
18 |
19 | DEFINE_LAYER_CREATOR(AbsVal)
20 |
21 | AbsVal::AbsVal()
22 | {
23 | one_blob_only = true;
24 | support_inplace = true;
25 | }
26 |
27 | int AbsVal::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
28 | {
29 | int w = bottom_top_blob.w;
30 | int h = bottom_top_blob.h;
31 | int channels = bottom_top_blob.c;
32 | int size = w * h;
33 |
34 | #pragma omp parallel for num_threads(opt.num_threads)
35 | for (int q=0; q
17 | #include
18 |
19 | namespace ncnn {
20 |
21 | DEFINE_LAYER_CREATOR(ArgMax)
22 |
23 | ArgMax::ArgMax()
24 | {
25 | one_blob_only = true;
26 | }
27 |
28 | int ArgMax::load_param(const ParamDict& pd)
29 | {
30 | out_max_val = pd.get(0, 0);
31 | topk = pd.get(1, 1);
32 |
33 | return 0;
34 | }
35 |
36 | int ArgMax::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
37 | {
38 | int size = bottom_blob.total();
39 |
40 | if (out_max_val)
41 | top_blob.create(topk, 2, 4u, opt.blob_allocator);
42 | else
43 | top_blob.create(topk, 1, 4u, opt.blob_allocator);
44 | if (top_blob.empty())
45 | return -100;
46 |
47 | const float* ptr = bottom_blob;
48 |
49 | // partial sort topk with index
50 | // optional value
51 | std::vector< std::pair > vec;
52 | vec.resize(size);
53 | for (int i=0; i >());
60 |
61 | float* outptr = top_blob;
62 | if (out_max_val)
63 | {
64 | float* valptr = outptr + topk;
65 | for (int i=0; i
19 | #endif // __ARM_NEON
20 |
21 | namespace ncnn {
22 |
23 | DEFINE_LAYER_CREATOR(Bias_arm)
24 |
25 | int Bias_arm::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
26 | {
27 | int w = bottom_top_blob.w;
28 | int h = bottom_top_blob.h;
29 | int channels = bottom_top_blob.c;
30 | int size = w * h;
31 |
32 | const float* bias_ptr = bias_data;
33 | #pragma omp parallel for num_threads(opt.num_threads)
34 | for (int q=0; q> 2;
42 | int remain = size - (nn << 2);
43 | #else
44 | int remain = size;
45 | #endif // __ARM_NEON
46 |
47 | #if __ARM_NEON
48 | float32x4_t _bias = vdupq_n_f32(bias);
49 | for (; nn>0; nn--)
50 | {
51 | float32x4_t _p = vld1q_f32(ptr);
52 | float32x4_t _outp = vaddq_f32(_p, _bias);
53 | vst1q_f32(ptr, _outp);
54 |
55 | ptr += 4;
56 | }
57 | #endif // __ARM_NEON
58 |
59 | for (; remain>0; remain--)
60 | {
61 | *ptr = *ptr + bias;
62 |
63 | ptr++;
64 | }
65 | }
66 |
67 | return 0;
68 | }
69 |
70 | } // namespace ncnn
71 |
--------------------------------------------------------------------------------
/src/layer/arm/bias_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_BIAS_ARM_H
16 | #define LAYER_BIAS_ARM_H
17 |
18 | #include "bias.h"
19 |
20 | namespace ncnn {
21 |
22 | class Bias_arm : public Bias
23 | {
24 | public:
25 | virtual int forward_inplace(Mat& bottom_top_blob, const Option& opt) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_BIAS_ARM_H
31 |
--------------------------------------------------------------------------------
/src/layer/arm/clip_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_CLIP_ARM_H
16 | #define LAYER_CLIP_ARM_H
17 |
18 | #include "clip.h"
19 |
20 | namespace ncnn {
21 |
22 | class Clip_arm : public Clip
23 | {
24 | public:
25 | virtual int forward_inplace(Mat& bottom_top_blob, const Option& opt) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_CLIP_ARM_H
31 |
--------------------------------------------------------------------------------
/src/layer/arm/convolution_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_CONVOLUTION_ARM_H
16 | #define LAYER_CONVOLUTION_ARM_H
17 |
18 | #include "convolution.h"
19 |
20 | namespace ncnn {
21 |
22 | typedef void (*conv_func)(const Mat&, Mat&, const Mat&, const Mat&, const Option&);
23 |
24 | class Convolution_arm : public Convolution
25 | {
26 | public:
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int load_model(const ModelBin& mb);
30 |
31 | virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
32 | virtual int forwardDilation(const Mat& bottom_blob, Mat& top_blob, conv_func conv, const Option& opt) const;
33 |
34 | public:
35 | bool use_winograd3x3;
36 | bool use_sgemm1x1;
37 | Mat weight_3x3_winograd64_data;
38 | Mat weight_1x1_sgemm_data;
39 | Mat weight_3x3s2_data;
40 | Mat weight_3x3s1_int8_data;
41 | Mat weight_1x1s1_sgemm_int8_data;
42 | };
43 |
44 | } // namespace ncnn
45 |
46 | #endif // LAYER_CONVOLUTION_ARM_H
47 |
--------------------------------------------------------------------------------
/src/layer/arm/convolutiondepthwise_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_CONVOLUTIONDEPTHWISE_ARM_H
16 | #define LAYER_CONVOLUTIONDEPTHWISE_ARM_H
17 |
18 | #include "convolutiondepthwise.h"
19 |
20 | namespace ncnn {
21 |
22 | class ConvolutionDepthWise_arm : public ConvolutionDepthWise
23 | {
24 | public:
25 | ConvolutionDepthWise_arm();
26 | virtual ~ConvolutionDepthWise_arm();
27 |
28 | virtual int load_model(const ModelBin& mb);
29 |
30 | virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
31 |
32 | public:
33 | std::vector group_ops;
34 | };
35 |
36 | } // namespace ncnn
37 |
38 | #endif // LAYER_CONVOLUTIONDEPTHWISE_ARM_H
39 |
--------------------------------------------------------------------------------
/src/layer/arm/deconvolution_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_DECONVOLUTION_ARM_H
16 | #define LAYER_DECONVOLUTION_ARM_H
17 |
18 | #include "deconvolution.h"
19 |
20 | namespace ncnn {
21 |
22 | class Deconvolution_arm : public Deconvolution
23 | {
24 | public:
25 | virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_DECONVOLUTION_ARM_H
31 |
--------------------------------------------------------------------------------
/src/layer/arm/deconvolutiondepthwise_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_DECONVOLUTIONDEPTHWISE_ARM_H
16 | #define LAYER_DECONVOLUTIONDEPTHWISE_ARM_H
17 |
18 | #include "deconvolutiondepthwise.h"
19 |
20 | namespace ncnn {
21 |
22 | class DeconvolutionDepthWise_arm : public DeconvolutionDepthWise
23 | {
24 | public:
25 | virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_DECONVOLUTIONDEPTHWISE_ARM_H
31 |
--------------------------------------------------------------------------------
/src/layer/arm/dequantize_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_DEQUANTIZE_ARM_H
16 | #define LAYER_DEQUANTIZE_ARM_H
17 |
18 | #include "dequantize.h"
19 |
20 | namespace ncnn {
21 |
22 | class Dequantize_arm : public Dequantize
23 | {
24 | public:
25 | virtual int forward_inplace(Mat& bottom_top_blob, const Option& opt) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_DEQUANTIZE_ARM_H
31 |
--------------------------------------------------------------------------------
/src/layer/arm/eltwise_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_ELTWISE_ARM_H
16 | #define LAYER_ELTWISE_ARM_H
17 |
18 | #include "eltwise.h"
19 |
20 | namespace ncnn {
21 |
22 | class Eltwise_arm : public Eltwise
23 | {
24 | public:
25 | virtual int forward(const std::vector& bottom_blobs, std::vector& top_blobs, const Option& opt) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_ELTWISE_ARM_H
31 |
--------------------------------------------------------------------------------
/src/layer/arm/innerproduct_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_INNERPRODUCT_ARM_H
16 | #define LAYER_INNERPRODUCT_ARM_H
17 |
18 | #include "innerproduct.h"
19 |
20 | namespace ncnn {
21 |
22 | class InnerProduct_arm : public InnerProduct
23 | {
24 | public:
25 | virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_INNERPRODUCT_ARM_H
31 |
--------------------------------------------------------------------------------
/src/layer/arm/lrn_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_LRN_ARM_H
16 | #define LAYER_LRN_ARM_H
17 |
18 | #include "lrn.h"
19 |
20 | namespace ncnn {
21 |
22 | class LRN_arm : public LRN
23 | {
24 | public:
25 | virtual int forward_inplace(Mat& bottom_top_blob, const Option& opt) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_LRN_ARM_H
31 |
--------------------------------------------------------------------------------
/src/layer/arm/pooling_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_POOLING_ARM_H
16 | #define LAYER_POOLING_ARM_H
17 |
18 | #include "pooling.h"
19 |
20 | namespace ncnn {
21 |
22 | class Pooling_arm : public Pooling
23 | {
24 | public:
25 | virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_POOLING_ARM_H
31 |
--------------------------------------------------------------------------------
/src/layer/arm/prelu_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_PRELU_ARM_H
16 | #define LAYER_PRELU_ARM_H
17 |
18 | #include "prelu.h"
19 |
20 | namespace ncnn {
21 |
22 | class PReLU_arm : public PReLU
23 | {
24 | public:
25 | virtual int forward_inplace(Mat& bottom_top_blob, const Option& opt) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_PRELU_ARM_H
31 |
--------------------------------------------------------------------------------
/src/layer/arm/quantize_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_QUANTIZE_ARM_H
16 | #define LAYER_QUANTIZE_ARM_H
17 |
18 | #include "quantize.h"
19 |
20 | namespace ncnn {
21 |
22 | class Quantize_arm : public Quantize
23 | {
24 | public:
25 | virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_QUANTIZE_ARM_H
31 |
--------------------------------------------------------------------------------
/src/layer/arm/relu_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_RELU_ARM_H
16 | #define LAYER_RELU_ARM_H
17 |
18 | #include "relu.h"
19 |
20 | namespace ncnn {
21 |
22 | class ReLU_arm : public ReLU
23 | {
24 | public:
25 | virtual int forward_inplace(Mat& bottom_top_blob, const Option& opt) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_RELU_ARM_H
31 |
--------------------------------------------------------------------------------
/src/layer/arm/scale_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_SCALE_ARM_H
16 | #define LAYER_SCALE_ARM_H
17 |
18 | #include "scale.h"
19 |
20 | namespace ncnn {
21 |
22 | class Scale_arm : public Scale
23 | {
24 | public:
25 | virtual int forward_inplace(Mat& bottom_top_blob, const Option& opt) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_SCALE_ARM_H
31 |
--------------------------------------------------------------------------------
/src/layer/arm/sigmoid_arm.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "sigmoid_arm.h"
16 |
17 | #if __ARM_NEON
18 | #include
19 | #include "neon_mathfun.h"
20 | #endif // __ARM_NEON
21 |
22 | #include
23 |
24 | namespace ncnn {
25 |
26 | DEFINE_LAYER_CREATOR(Sigmoid_arm)
27 |
28 | int Sigmoid_arm::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
29 | {
30 | int w = bottom_top_blob.w;
31 | int h = bottom_top_blob.h;
32 | int channels = bottom_top_blob.c;
33 | int size = w * h;
34 |
35 | #pragma omp parallel for num_threads(opt.num_threads)
36 | for (int q=0; q> 2;
42 | int remain = size - (nn << 2);
43 | #else
44 | int remain = size;
45 | #endif // __ARM_NEON
46 |
47 | #if __ARM_NEON
48 | float32x4_t _one = vdupq_n_f32(1.f);
49 | for (; nn>0; nn--)
50 | {
51 | float32x4_t _p = vld1q_f32(ptr);
52 | _p = vnegq_f32(_p);
53 | _p = exp_ps(_p);
54 | _p = vaddq_f32(_p, _one);
55 | float32x4_t _outp = vrecpeq_f32(_p);
56 | _outp = vmulq_f32(vrecpsq_f32(_p, _outp), _outp);
57 | // _outp = vmulq_f32(vrecpsq_f32(_p, _outp), _outp);
58 | vst1q_f32(ptr, _outp);
59 |
60 | ptr += 4;
61 | }
62 | #endif // __ARM_NEON
63 | for (; remain>0; remain--)
64 | {
65 | *ptr = 1.f / (1.f + exp(-*ptr));
66 |
67 | ptr++;
68 | }
69 | }
70 |
71 | return 0;
72 | }
73 |
74 | } // namespace ncnn
75 |
--------------------------------------------------------------------------------
/src/layer/arm/sigmoid_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_SIGMOID_ARM_H
16 | #define LAYER_SIGMOID_ARM_H
17 |
18 | #include "sigmoid.h"
19 |
20 | namespace ncnn {
21 |
22 | class Sigmoid_arm : public Sigmoid
23 | {
24 | public:
25 | virtual int forward_inplace(Mat& bottom_top_blob, const Option& opt) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_SIGMOID_ARM_H
31 |
--------------------------------------------------------------------------------
/src/layer/arm/softmax_arm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_SOFTMAX_ARM_H
16 | #define LAYER_SOFTMAX_ARM_H
17 |
18 | #include "softmax.h"
19 |
20 | namespace ncnn {
21 |
22 | class Softmax_arm : public Softmax
23 | {
24 | public:
25 | virtual int forward_inplace(Mat& bottom_top_blob, const Option& opt) const;
26 | };
27 |
28 | } // namespace ncnn
29 |
30 | #endif // LAYER_SOFTMAX_ARM_H
31 |
--------------------------------------------------------------------------------
/src/layer/batchnorm.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_BATCHNORM_H
16 | #define LAYER_BATCHNORM_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class BatchNorm : public Layer
23 | {
24 | public:
25 | BatchNorm();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int load_model(const ModelBin& mb);
30 |
31 | virtual int forward_inplace(Mat& bottom_top_blob, const Option& opt) const;
32 |
33 | public:
34 | // param
35 | int channels;
36 | float eps;
37 |
38 | // model
39 | Mat slope_data;
40 | Mat mean_data;
41 | Mat var_data;
42 | Mat bias_data;
43 |
44 | Mat a_data;
45 | Mat b_data;
46 | };
47 |
48 | } // namespace ncnn
49 |
50 | #endif // LAYER_BATCHNORM_H
51 |
--------------------------------------------------------------------------------
/src/layer/bias.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "bias.h"
16 |
17 | namespace ncnn {
18 |
19 | DEFINE_LAYER_CREATOR(Bias)
20 |
21 | Bias::Bias()
22 | {
23 | one_blob_only = true;
24 | support_inplace = true;
25 | }
26 |
27 | int Bias::load_param(const ParamDict& pd)
28 | {
29 | bias_data_size = pd.get(0, 0);
30 |
31 | return 0;
32 | }
33 |
34 | int Bias::load_model(const ModelBin& mb)
35 | {
36 | bias_data = mb.load(bias_data_size, 1);
37 | if (bias_data.empty())
38 | return -100;
39 |
40 | return 0;
41 | }
42 |
43 | int Bias::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
44 | {
45 | int w = bottom_top_blob.w;
46 | int h = bottom_top_blob.h;
47 | int channels = bottom_top_blob.c;
48 | int size = w * h;
49 |
50 | #pragma omp parallel for num_threads(opt.num_threads)
51 | for (int q=0; q& bottom_blobs, std::vector& top_blobs, const Option& opt) const;
30 |
31 | virtual int forward_inplace(Mat& bottom_top_blob, const Option& opt) const;
32 |
33 | enum {
34 | Operation_ADD = 0,
35 | Operation_SUB = 1,
36 | Operation_MUL = 2,
37 | Operation_DIV = 3,
38 | Operation_MAX = 4,
39 | Operation_MIN = 5,
40 | Operation_POW = 6,
41 | Operation_RSUB = 7,
42 | Operation_RDIV = 8
43 | };
44 |
45 | public:
46 | // param
47 | int op_type;
48 | int with_scalar;
49 | float b;
50 | };
51 |
52 | } // namespace ncnn
53 |
54 | #endif // LAYER_BINARYOP_H
55 |
--------------------------------------------------------------------------------
/src/layer/bnll.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "bnll.h"
16 | #include
17 |
18 | namespace ncnn {
19 |
20 | DEFINE_LAYER_CREATOR(BNLL)
21 |
22 | BNLL::BNLL()
23 | {
24 | one_blob_only = true;
25 | support_inplace = true;
26 | }
27 |
28 | int BNLL::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
29 | {
30 | int w = bottom_top_blob.w;
31 | int h = bottom_top_blob.h;
32 | int channels = bottom_top_blob.c;
33 | int size = w * h;
34 |
35 | #pragma omp parallel for num_threads(opt.num_threads)
36 | for (int q=0; q 0)
43 | ptr[i] = ptr[i] + log(1.f + exp(-ptr[i]));
44 | else
45 | ptr[i] = log(1.f + exp(ptr[i]));
46 | }
47 | }
48 |
49 | return 0;
50 | }
51 |
52 | } // namespace ncnn
53 |
--------------------------------------------------------------------------------
/src/layer/bnll.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_BNLL_H
16 | #define LAYER_BNLL_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class BNLL : public Layer
23 | {
24 | public:
25 | BNLL();
26 |
27 | virtual int forward_inplace(Mat& bottom_top_blob, const Option& opt) const;
28 |
29 | public:
30 | };
31 |
32 | } // namespace ncnn
33 |
34 | #endif // LAYER_BNLL_H
35 |
--------------------------------------------------------------------------------
/src/layer/clip.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "clip.h"
16 |
17 | #include
18 |
19 | namespace ncnn {
20 |
21 | DEFINE_LAYER_CREATOR(Clip)
22 |
23 | Clip::Clip()
24 | {
25 | one_blob_only = true;
26 | support_inplace = true;
27 | }
28 |
29 | int Clip::load_param(const ParamDict& pd)
30 | {
31 | min = pd.get(0, -FLT_MAX);
32 | max = pd.get(1, FLT_MAX);
33 |
34 | return 0;
35 | }
36 |
37 | int Clip::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
38 | {
39 | int w = bottom_top_blob.w;
40 | int h = bottom_top_blob.h;
41 | int channels = bottom_top_blob.c;
42 | int size = w * h;
43 |
44 | #pragma omp parallel for num_threads(opt.num_threads)
45 | for (int q=0; q max)
54 | ptr[i] = max;
55 | }
56 | }
57 |
58 | return 0;
59 | }
60 |
61 | } // namespace ncnn
62 |
--------------------------------------------------------------------------------
/src/layer/clip.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_CLIP_H
16 | #define LAYER_CLIP_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Clip : public Layer
23 | {
24 | public:
25 | Clip();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward_inplace(Mat& bottom_top_blob, const Option& opt) const;
30 |
31 | public:
32 | float min;
33 | float max;
34 | };
35 |
36 | } // namespace ncnn
37 |
38 | #endif // LAYER_CLIP_H
39 |
--------------------------------------------------------------------------------
/src/layer/concat.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_CONCAT_H
16 | #define LAYER_CONCAT_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Concat : public Layer
23 | {
24 | public:
25 | Concat();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const std::vector& bottom_blobs, std::vector& top_blobs, const Option& opt) const;
30 |
31 | public:
32 | int axis;
33 | };
34 |
35 | } // namespace ncnn
36 |
37 | #endif // LAYER_CONCAT_H
38 |
--------------------------------------------------------------------------------
/src/layer/convolution.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_CONVOLUTION_H
16 | #define LAYER_CONVOLUTION_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Convolution : public Layer
23 | {
24 | public:
25 | Convolution();
26 | ~Convolution();
27 |
28 | virtual int load_param(const ParamDict& pd);
29 |
30 | virtual int load_model(const ModelBin& mb);
31 |
32 | virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
33 |
34 | public:
35 | // param
36 | int num_output;
37 | int kernel_w;
38 | int kernel_h;
39 | int dilation_w;
40 | int dilation_h;
41 | int stride_w;
42 | int stride_h;
43 | int pad_w;
44 | int pad_h;
45 | int bias_term;
46 |
47 | int weight_data_size;
48 |
49 | int int8_scale_term;
50 |
51 | // model
52 | Mat weight_data;
53 | Mat bias_data;
54 |
55 | float weight_data_int8_scale;
56 | float bottom_blob_int8_scale;
57 |
58 | bool use_int8_inference;
59 |
60 | ncnn::Layer* quantize;
61 | ncnn::Layer* dequantize;
62 | };
63 |
64 | } // namespace ncnn
65 |
66 | #endif // LAYER_CONVOLUTION_H
67 |
--------------------------------------------------------------------------------
/src/layer/convolutiondepthwise.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_CONVOLUTIONDEPTHWISE_H
16 | #define LAYER_CONVOLUTIONDEPTHWISE_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class ConvolutionDepthWise : public Layer
23 | {
24 | public:
25 | ConvolutionDepthWise();
26 | ~ConvolutionDepthWise();
27 |
28 | virtual int load_param(const ParamDict& pd);
29 |
30 | virtual int load_model(const ModelBin& mb);
31 |
32 | virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
33 |
34 | public:
35 | // param
36 | int num_output;
37 | int kernel_w;
38 | int kernel_h;
39 | int dilation_w;
40 | int dilation_h;
41 | int stride_w;
42 | int stride_h;
43 | int pad_w;
44 | int pad_h;
45 | int bias_term;
46 |
47 | int weight_data_size;
48 | int group;
49 |
50 | int int8_scale_term;
51 |
52 | // model
53 | Mat weight_data;
54 | Mat bias_data;
55 |
56 | Mat weight_data_int8_scales;
57 | Mat bottom_blob_int8_scales;
58 |
59 | bool use_int8_inference;
60 |
61 | std::vector quantize_ops;
62 | std::vector dequantize_ops;
63 | };
64 |
65 | } // namespace ncnn
66 |
67 | #endif // LAYER_CONVOLUTIONDEPTHWISE_H
68 |
--------------------------------------------------------------------------------
/src/layer/crop.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_CROP_H
16 | #define LAYER_CROP_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Crop : public Layer
23 | {
24 | public:
25 | Crop();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
30 |
31 | virtual int forward(const std::vector& bottom_blobs, std::vector& top_blobs, const Option& opt) const;
32 |
33 | public:
34 | int woffset;
35 | int hoffset;
36 | int coffset;
37 | int outw;
38 | int outh;
39 | int outc;
40 | };
41 |
42 | } // namespace ncnn
43 |
44 | #endif // LAYER_CROP_H
45 |
--------------------------------------------------------------------------------
/src/layer/deconvolution.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_DECONVOLUTION_H
16 | #define LAYER_DECONVOLUTION_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Deconvolution : public Layer
23 | {
24 | public:
25 | Deconvolution();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int load_model(const ModelBin& mb);
30 |
31 | virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
32 |
33 | public:
34 | // param
35 | int num_output;
36 | int kernel_w;
37 | int kernel_h;
38 | int dilation_w;
39 | int dilation_h;
40 | int stride_w;
41 | int stride_h;
42 | int pad_w;
43 | int pad_h;
44 | int bias_term;
45 |
46 | int weight_data_size;
47 |
48 | // model
49 | Mat weight_data;
50 | Mat bias_data;
51 | };
52 |
53 | } // namespace ncnn
54 |
55 | #endif // LAYER_DECONVOLUTION_H
56 |
--------------------------------------------------------------------------------
/src/layer/deconvolutiondepthwise.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_DECONVOLUTIONDEPTHWISE_H
16 | #define LAYER_DECONVOLUTIONDEPTHWISE_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class DeconvolutionDepthWise : public Layer
23 | {
24 | public:
25 | DeconvolutionDepthWise();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int load_model(const ModelBin& mb);
30 |
31 | virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
32 |
33 | public:
34 | // param
35 | int num_output;
36 | int kernel_w;
37 | int kernel_h;
38 | int dilation_w;
39 | int dilation_h;
40 | int stride_w;
41 | int stride_h;
42 | int pad_w;
43 | int pad_h;
44 | int bias_term;
45 |
46 | int weight_data_size;
47 | int group;
48 |
49 | // model
50 | Mat weight_data;
51 | Mat bias_data;
52 | };
53 |
54 | } // namespace ncnn
55 |
56 | #endif // LAYER_DECONVOLUTIONDEPTHWISE_H
57 |
--------------------------------------------------------------------------------
/src/layer/dequantize.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_DEQUANTIZE_H
16 | #define LAYER_DEQUANTIZE_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Dequantize : public Layer
23 | {
24 | public:
25 | Dequantize();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int load_model(const ModelBin& mb);
30 |
31 | virtual int forward_inplace(Mat& bottom_top_blob, const Option& opt) const;
32 |
33 | public:
34 | float scale;
35 | int bias_term;
36 | int bias_data_size;
37 |
38 | Mat bias_data;
39 | };
40 |
41 | } // namespace ncnn
42 |
43 | #endif // LAYER_DEQUANTIZE_H
44 |
--------------------------------------------------------------------------------
/src/layer/detectionoutput.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_DETECTIONOUTPUT_H
16 | #define LAYER_DETECTIONOUTPUT_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class DetectionOutput : public Layer
23 | {
24 | public:
25 | DetectionOutput();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const std::vector& bottom_blobs, std::vector& top_blobs, const Option& opt) const;
30 |
31 | public:
32 | int num_class;
33 | float nms_threshold;
34 | int nms_top_k;
35 | int keep_top_k;
36 | float confidence_threshold;
37 | float variances[4];
38 | };
39 |
40 | } // namespace ncnn
41 |
42 | #endif // LAYER_DETECTIONOUTPUT_H
43 |
--------------------------------------------------------------------------------
/src/layer/dropout.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "dropout.h"
16 |
17 | namespace ncnn {
18 |
19 | DEFINE_LAYER_CREATOR(Dropout)
20 |
21 | Dropout::Dropout()
22 | {
23 | one_blob_only = true;
24 | support_inplace = true;
25 | }
26 |
27 | int Dropout::load_param(const ParamDict& pd)
28 | {
29 | scale = pd.get(0, 1.f);
30 |
31 | return 0;
32 | }
33 |
34 | int Dropout::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
35 | {
36 | if (scale == 1.f)
37 | {
38 | return 0;
39 | }
40 |
41 | int w = bottom_top_blob.w;
42 | int h = bottom_top_blob.h;
43 | int channels = bottom_top_blob.c;
44 | int size = w * h;
45 |
46 | #pragma omp parallel for num_threads(opt.num_threads)
47 | for (int q=0; q& bottom_blobs, std::vector& top_blobs, const Option& opt) const;
30 |
31 | enum { Operation_PROD = 0, Operation_SUM = 1, Operation_MAX = 2 };
32 |
33 | public:
34 | // param
35 | int op_type;
36 | Mat coeffs;
37 | };
38 |
39 | } // namespace ncnn
40 |
41 | #endif // LAYER_ELTWISE_H
42 |
--------------------------------------------------------------------------------
/src/layer/elu.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "elu.h"
16 | #include
17 |
18 | namespace ncnn {
19 |
20 | DEFINE_LAYER_CREATOR(ELU)
21 |
22 | ELU::ELU()
23 | {
24 | one_blob_only = true;
25 | support_inplace = true;
26 | }
27 |
28 | int ELU::load_param(const ParamDict& pd)
29 | {
30 | alpha = pd.get(0, 0.1f);
31 |
32 | return 0;
33 | }
34 |
35 | int ELU::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
36 | {
37 | int w = bottom_top_blob.w;
38 | int h = bottom_top_blob.h;
39 | int channels = bottom_top_blob.c;
40 | int size = w * h;
41 |
42 | #pragma omp parallel for num_threads(opt.num_threads)
43 | for (int q=0; q
17 |
18 | namespace ncnn {
19 |
20 | DEFINE_LAYER_CREATOR(Exp)
21 |
22 | Exp::Exp()
23 | {
24 | one_blob_only = true;
25 | support_inplace = true;
26 | }
27 |
28 | int Exp::load_param(const ParamDict& pd)
29 | {
30 | base = pd.get(0, -1.f);
31 | scale = pd.get(1, 1.f);
32 | shift = pd.get(2, 0.f);
33 |
34 | return 0;
35 | }
36 |
37 | int Exp::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
38 | {
39 | int w = bottom_top_blob.w;
40 | int h = bottom_top_blob.h;
41 | int channels = bottom_top_blob.c;
42 | int size = w * h;
43 |
44 | if (base == -1.f)
45 | {
46 | #pragma omp parallel for num_threads(opt.num_threads)
47 | for (int q=0; q
17 |
18 | namespace ncnn {
19 |
20 | DEFINE_LAYER_CREATOR(InstanceNorm)
21 |
22 | InstanceNorm::InstanceNorm()
23 | {
24 | one_blob_only = true;
25 | support_inplace = true;
26 | }
27 |
28 | int InstanceNorm::load_param(const ParamDict& pd)
29 | {
30 | channels = pd.get(0, 0);
31 | eps = pd.get(1, 0.001f);
32 |
33 | return 0;
34 | }
35 |
36 | int InstanceNorm::load_model(const ModelBin& mb)
37 | {
38 | gamma_data = mb.load(channels, 1);
39 | if (gamma_data.empty())
40 | return -100;
41 |
42 | beta_data = mb.load(channels, 1);
43 | if (beta_data.empty())
44 | return -100;
45 |
46 | return 0;
47 | }
48 |
49 | int InstanceNorm::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
50 | {
51 | // x = (x - mean) / (sqrt(var) + eps) * gamma + beta
52 |
53 | int w = bottom_top_blob.w;
54 | int h = bottom_top_blob.h;
55 | int size = w * h;
56 |
57 | #pragma omp parallel for num_threads(opt.num_threads)
58 | for (int q=0; q
17 |
18 | namespace ncnn {
19 |
20 | DEFINE_LAYER_CREATOR(Log)
21 |
22 | Log::Log()
23 | {
24 | one_blob_only = true;
25 | support_inplace = true;
26 | }
27 |
28 | int Log::load_param(const ParamDict& pd)
29 | {
30 | base = pd.get(0, -1.f);
31 | scale = pd.get(1, 1.f);
32 | shift = pd.get(2, 0.f);
33 |
34 | return 0;
35 | }
36 |
37 | int Log::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
38 | {
39 | int w = bottom_top_blob.w;
40 | int h = bottom_top_blob.h;
41 | int channels = bottom_top_blob.c;
42 | int size = w * h;
43 |
44 | if (base == -1.f)
45 | {
46 | #pragma omp parallel for num_threads(opt.num_threads)
47 | for (int q=0; q& bottom_blobs, std::vector& top_blobs, const Option& opt) const;
32 |
33 | public:
34 | // param
35 | int num_output;
36 | int weight_data_size;
37 |
38 | // model
39 | Mat weight_hc_data;
40 | Mat weight_xc_data;
41 | Mat bias_c_data;
42 | };
43 |
44 | } // namespace ncnn
45 |
46 | #endif // LAYER_LSTM_H
47 |
--------------------------------------------------------------------------------
/src/layer/memorydata.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "memorydata.h"
16 |
17 | namespace ncnn {
18 |
19 | DEFINE_LAYER_CREATOR(MemoryData)
20 |
21 | MemoryData::MemoryData()
22 | {
23 | one_blob_only = false;
24 | support_inplace = false;
25 | }
26 |
27 | int MemoryData::load_param(const ParamDict& pd)
28 | {
29 | w = pd.get(0, 0);
30 | h = pd.get(1, 0);
31 | c = pd.get(2, 0);
32 |
33 | return 0;
34 | }
35 |
36 | int MemoryData::load_model(const ModelBin& mb)
37 | {
38 | if (c != 0)
39 | {
40 | data = mb.load(w, h, c, 1);
41 | }
42 | else if (h != 0)
43 | {
44 | data = mb.load(w, h, 1);
45 | }
46 | else if (w != 0)
47 | {
48 | data = mb.load(w, 1);
49 | }
50 | else // 0 0 0
51 | {
52 | data.create(1);
53 | }
54 | if (data.empty())
55 | return -100;
56 |
57 | return 0;
58 | }
59 |
60 | int MemoryData::forward(const std::vector& /*bottom_blobs*/, std::vector& top_blobs, const Option& opt) const
61 | {
62 | Mat& top_blob = top_blobs[0];
63 |
64 | top_blob = data.clone(opt.blob_allocator);
65 | if (top_blob.empty())
66 | return -100;
67 |
68 | return 0;
69 | }
70 |
71 | } // namespace ncnn
72 |
--------------------------------------------------------------------------------
/src/layer/memorydata.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_MEMORYDATA_H
16 | #define LAYER_MEMORYDATA_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class MemoryData : public Layer
23 | {
24 | public:
25 | MemoryData();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int load_model(const ModelBin& mb);
30 |
31 | virtual int forward(const std::vector& bottom_blobs, std::vector& top_blobs, const Option& opt) const;
32 |
33 | public:
34 | int w;
35 | int h;
36 | int c;
37 |
38 | Mat data;
39 | };
40 |
41 | } // namespace ncnn
42 |
43 | #endif // LAYER_MEMORYDATA_H
44 |
--------------------------------------------------------------------------------
/src/layer/mvn.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_MVN_H
16 | #define LAYER_MVN_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class MVN : public Layer
23 | {
24 | public:
25 | MVN();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
30 |
31 | public:
32 | int normalize_variance;
33 | int across_channels;
34 | float eps;
35 | };
36 |
37 | } // namespace ncnn
38 |
39 | #endif // LAYER_MVN_H
40 |
--------------------------------------------------------------------------------
/src/layer/normalize.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_NORMALIZE_H
16 | #define LAYER_NORMALIZE_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Normalize : public Layer
23 | {
24 | public:
25 | Normalize();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int load_model(const ModelBin& mb);
30 |
31 | virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
32 |
33 | public:
34 | // param
35 | int across_spatial;
36 | int across_channel;
37 | int channel_shared;
38 | float eps;
39 | int scale_data_size;
40 |
41 | Mat scale_data;
42 | };
43 |
44 | } // namespace ncnn
45 |
46 | #endif // LAYER_NORMALIZE_H
47 |
--------------------------------------------------------------------------------
/src/layer/padding.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_PADDING_H
16 | #define LAYER_PADDING_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Padding : public Layer
23 | {
24 | public:
25 | Padding();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
30 |
31 | public:
32 | int top;
33 | int bottom;
34 | int left;
35 | int right;
36 | int type;// 0=BORDER_CONSTANT 1=BORDER_REPLICATE
37 | float value;
38 | };
39 |
40 | } // namespace ncnn
41 |
42 | #endif // LAYER_PADDING_H
43 |
--------------------------------------------------------------------------------
/src/layer/permute.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_PERMUTE_H
16 | #define LAYER_PERMUTE_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Permute : public Layer
23 | {
24 | public:
25 | Permute();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
30 |
31 | public:
32 | int order_type;
33 | };
34 |
35 | } // namespace ncnn
36 |
37 | #endif // LAYER_PERMUTE_H
38 |
--------------------------------------------------------------------------------
/src/layer/pooling.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_POOLING_H
16 | #define LAYER_POOLING_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Pooling : public Layer
23 | {
24 | public:
25 | Pooling();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
30 |
31 | enum { PoolMethod_MAX = 0, PoolMethod_AVE = 1 };
32 |
33 | public:
34 | // param
35 | int pooling_type;
36 | int kernel_w;
37 | int kernel_h;
38 | int stride_w;
39 | int stride_h;
40 | int pad_left;
41 | int pad_right;
42 | int pad_top;
43 | int pad_bottom;
44 | int global_pooling;
45 | int pad_mode;// 0=full 1=valid 2=SAME
46 | };
47 |
48 | } // namespace ncnn
49 |
50 | #endif // LAYER_POOLING_H
51 |
--------------------------------------------------------------------------------
/src/layer/power.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "power.h"
16 | #include
17 |
18 | namespace ncnn {
19 |
20 | DEFINE_LAYER_CREATOR(Power)
21 |
22 | Power::Power()
23 | {
24 | one_blob_only = true;
25 | support_inplace = true;
26 | }
27 |
28 | int Power::load_param(const ParamDict& pd)
29 | {
30 | power = pd.get(0, 1.f);
31 | scale = pd.get(1, 1.f);
32 | shift = pd.get(2, 0.f);
33 |
34 | return 0;
35 | }
36 |
37 | int Power::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
38 | {
39 | int w = bottom_top_blob.w;
40 | int h = bottom_top_blob.h;
41 | int channels = bottom_top_blob.c;
42 | int size = w * h;
43 |
44 | #pragma omp parallel for num_threads(opt.num_threads)
45 | for (int q=0; q& bottom_blobs, std::vector& top_blobs, const Option& opt) const;
30 |
31 | public:
32 | Mat min_sizes;
33 | Mat max_sizes;
34 | Mat aspect_ratios;
35 | float variances[4];
36 | int flip;
37 | int clip;
38 | int image_width;
39 | int image_height;
40 | float step_width;
41 | float step_height;
42 | float offset;
43 | };
44 |
45 | } // namespace ncnn
46 |
47 | #endif // LAYER_PRIORBOX_H
48 |
--------------------------------------------------------------------------------
/src/layer/proposal.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_PROPOSAL_H
16 | #define LAYER_PROPOSAL_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Proposal : public Layer
23 | {
24 | public:
25 | Proposal();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const std::vector& bottom_blobs, std::vector& top_blobs, const Option& opt) const;
30 |
31 | public:
32 | // param
33 | int feat_stride;
34 | int base_size;
35 | int pre_nms_topN;
36 | int after_nms_topN;
37 | float nms_thresh;
38 | int min_size;
39 |
40 | Mat ratios;
41 | Mat scales;
42 |
43 | Mat anchors;
44 | };
45 |
46 | } // namespace ncnn
47 |
48 | #endif // LAYER_PROPOSAL_H
49 |
--------------------------------------------------------------------------------
/src/layer/psroipooling.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_PSROIPOOLING_H
16 | #define LAYER_PSROIPOOLING_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class PSROIPooling : public Layer
23 | {
24 | public:
25 | PSROIPooling();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const std::vector& bottom_blobs, std::vector& top_blobs, const Option& opt) const;
30 |
31 | public:
32 | int pooled_width;
33 | int pooled_height;
34 | float spatial_scale;
35 | int output_dim;
36 | };
37 |
38 | } // namespace ncnn
39 |
40 | #endif // LAYER_PSROIPOOLING_H
41 |
--------------------------------------------------------------------------------
/src/layer/quantize.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_QUANTIZE_H
16 | #define LAYER_QUANTIZE_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Quantize : public Layer
23 | {
24 | public:
25 | Quantize();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
30 |
31 | public:
32 | float scale;
33 | };
34 |
35 | } // namespace ncnn
36 |
37 | #endif // LAYER_QUANTIZE_H
38 |
--------------------------------------------------------------------------------
/src/layer/reduction.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_REDUCTION_H
16 | #define LAYER_REDUCTION_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Reduction : public Layer
23 | {
24 | public:
25 | Reduction();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
30 |
31 | enum {
32 | ReductionOp_SUM = 0,
33 | ReductionOp_ASUM = 1,
34 | ReductionOp_SUMSQ = 2,
35 | ReductionOp_MEAN = 3,
36 | ReductionOp_MAX = 4,
37 | ReductionOp_MIN = 5,
38 | ReductionOp_PROD = 6
39 | };
40 |
41 | public:
42 | // param
43 | int operation;
44 | int dim;
45 | float coeff;
46 | };
47 |
48 | } // namespace ncnn
49 |
50 | #endif // LAYER_REDUCTION_H
51 |
--------------------------------------------------------------------------------
/src/layer/relu.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "relu.h"
16 |
17 | namespace ncnn {
18 |
19 | DEFINE_LAYER_CREATOR(ReLU)
20 |
21 | ReLU::ReLU()
22 | {
23 | one_blob_only = true;
24 | support_inplace = true;
25 | }
26 |
27 | int ReLU::load_param(const ParamDict& pd)
28 | {
29 | slope = pd.get(0, 0.f);
30 |
31 | return 0;
32 | }
33 |
34 | int ReLU::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
35 | {
36 | int w = bottom_top_blob.w;
37 | int h = bottom_top_blob.h;
38 | int channels = bottom_top_blob.c;
39 | int size = w * h;
40 |
41 | if (slope == 0.f)
42 | {
43 | #pragma omp parallel for num_threads(opt.num_threads)
44 | for (int q=0; q& bottom_blobs, std::vector& top_blobs, const Option& opt) const;
32 |
33 | public:
34 | // param
35 | int num_output;
36 | int weight_data_size;
37 |
38 | // model
39 | Mat weight_hh_data;
40 | Mat weight_xh_data;
41 | Mat weight_ho_data;
42 | Mat bias_h_data;
43 | Mat bias_o_data;
44 | };
45 |
46 | } // namespace ncnn
47 |
48 | #endif // LAYER_RNN_H
49 |
--------------------------------------------------------------------------------
/src/layer/roialign.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_ROIALIGN_H
16 | #define LAYER_ROIALIGN_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class ROIAlign : public Layer
23 | {
24 | public:
25 | ROIAlign();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const std::vector& bottom_blobs, std::vector& top_blobs, const Option& opt) const;
30 |
31 | public:
32 | int pooled_width;
33 | int pooled_height;
34 | float spatial_scale;
35 | };
36 |
37 | } // namespace ncnn
38 |
39 | #endif // LAYER_ROIALIGN_H
40 |
--------------------------------------------------------------------------------
/src/layer/roipooling.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_ROIPOOLING_H
16 | #define LAYER_ROIPOOLING_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class ROIPooling : public Layer
23 | {
24 | public:
25 | ROIPooling();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const std::vector& bottom_blobs, std::vector& top_blobs, const Option& opt) const;
30 |
31 | public:
32 | int pooled_width;
33 | int pooled_height;
34 | float spatial_scale;
35 | };
36 |
37 | } // namespace ncnn
38 |
39 | #endif // LAYER_ROIPOOLING_H
40 |
--------------------------------------------------------------------------------
/src/layer/scale.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_SCALE_H
16 | #define LAYER_SCALE_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Scale : public Layer
23 | {
24 | public:
25 | Scale();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int load_model(const ModelBin& mb);
30 |
31 | virtual int forward_inplace(std::vector& bottom_top_blobs, const Option& opt) const;
32 | virtual int forward_inplace(Mat& bottom_top_blob, const Option& opt) const;
33 |
34 | public:
35 | // param
36 | int scale_data_size;
37 | int bias_term;
38 |
39 | // model
40 | Mat scale_data;
41 | Mat bias_data;
42 | };
43 |
44 | } // namespace ncnn
45 |
46 | #endif // LAYER_SCALE_H
47 |
--------------------------------------------------------------------------------
/src/layer/shufflechannel.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "shufflechannel.h"
16 |
17 | namespace ncnn {
18 |
19 | DEFINE_LAYER_CREATOR(ShuffleChannel)
20 |
21 | ShuffleChannel::ShuffleChannel()
22 | {
23 | one_blob_only = true;
24 | support_inplace = false;
25 | }
26 |
27 | int ShuffleChannel::load_param(const ParamDict& pd)
28 | {
29 | group = pd.get(0, 1);
30 |
31 | return 0;
32 | }
33 |
34 | int ShuffleChannel::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
35 | {
36 | int w = bottom_blob.w;
37 | int h = bottom_blob.h;
38 | int c = bottom_blob.c;
39 | size_t elemsize = bottom_blob.elemsize;
40 | int chs_per_group = c / group;
41 |
42 | if (c != chs_per_group * group)
43 | {
44 | // reject invalid group
45 | return -100;
46 | }
47 |
48 | top_blob.create(w, h, c, elemsize, opt.blob_allocator);
49 | if (top_blob.empty())
50 | return -100;
51 |
52 | const size_t feature_sz = w * h * elemsize;
53 | for (int i = 0; i != group; i++)
54 | {
55 | for (int j = 0; j != chs_per_group; j++)
56 | {
57 | int src_q = chs_per_group * i + j;
58 | int dst_q = group * j + i;
59 | memcpy(top_blob.channel(dst_q), bottom_blob.channel(src_q), feature_sz);
60 | }
61 | }
62 | return 0;
63 | }
64 |
65 | } // namespace ncnn
66 |
--------------------------------------------------------------------------------
/src/layer/shufflechannel.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_SHUFFLECHANNEL_H
16 | #define LAYER_SHUFFLECHANNEL_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class ShuffleChannel : public Layer
23 | {
24 | public:
25 | ShuffleChannel();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
30 |
31 | public:
32 | int group;
33 | };
34 |
35 | } // namespace ncnn
36 |
37 | #endif // LAYER_SHUFFLECHANNEL_H
38 |
--------------------------------------------------------------------------------
/src/layer/sigmoid.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "sigmoid.h"
16 | #include
17 |
18 | namespace ncnn {
19 |
20 | DEFINE_LAYER_CREATOR(Sigmoid)
21 |
22 | Sigmoid::Sigmoid()
23 | {
24 | one_blob_only = true;
25 | support_inplace = true;
26 | }
27 |
28 | int Sigmoid::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
29 | {
30 | int w = bottom_top_blob.w;
31 | int h = bottom_top_blob.h;
32 | int channels = bottom_top_blob.c;
33 | int size = w * h;
34 |
35 | #pragma omp parallel for num_threads(opt.num_threads)
36 | for (int q=0; q& bottom_blobs, std::vector& top_blobs, const Option& opt) const;
30 |
31 | public:
32 | Mat slices;
33 | int axis;
34 | };
35 |
36 | } // namespace ncnn
37 |
38 | #endif // LAYER_SLICE_H
39 |
--------------------------------------------------------------------------------
/src/layer/softmax.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_SOFTMAX_H
16 | #define LAYER_SOFTMAX_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Softmax : public Layer
23 | {
24 | public:
25 | Softmax();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward_inplace(Mat& bottom_top_blob, const Option& opt) const;
30 |
31 | public:
32 | int axis;
33 | };
34 |
35 | } // namespace ncnn
36 |
37 | #endif // LAYER_SOFTMAX_H
38 |
--------------------------------------------------------------------------------
/src/layer/split.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "split.h"
16 |
17 | namespace ncnn {
18 |
19 | DEFINE_LAYER_CREATOR(Split)
20 |
21 | Split::Split()
22 | {
23 | }
24 |
25 | int Split::forward(const std::vector& bottom_blobs, std::vector& top_blobs, const Option& /*opt*/) const
26 | {
27 | const Mat& bottom_blob = bottom_blobs[0];
28 | for (size_t i=0; i& bottom_blobs, std::vector& top_blobs, const Option& opt) const;
28 |
29 | public:
30 | };
31 |
32 | } // namespace ncnn
33 |
34 | #endif // LAYER_SPLIT_H
35 |
--------------------------------------------------------------------------------
/src/layer/spp.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_SPP_H
16 | #define LAYER_SPP_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class SPP : public Layer
23 | {
24 | public:
25 | SPP();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
30 |
31 | enum { PoolMethod_MAX = 0, PoolMethod_AVE = 1 };
32 |
33 | public:
34 | // param
35 | int pooling_type;
36 | int pyramid_height;
37 | };
38 |
39 | } // namespace ncnn
40 |
41 | #endif // LAYER_SPP_H
42 |
--------------------------------------------------------------------------------
/src/layer/squeeze.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "squeeze.h"
16 |
17 | namespace ncnn {
18 |
19 | DEFINE_LAYER_CREATOR(Squeeze)
20 |
21 | Squeeze::Squeeze()
22 | {
23 | one_blob_only = true;
24 | support_inplace = false;
25 | }
26 |
27 | int Squeeze::load_param(const ParamDict& pd)
28 | {
29 | squeeze_w = pd.get(0, 0);
30 | squeeze_h = pd.get(1, 0);
31 | squeeze_c = pd.get(2, 0);
32 |
33 | return 0;
34 | }
35 |
36 | int Squeeze::forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
37 | {
38 | int w = bottom_blob.w;
39 | int h = bottom_blob.h;
40 | int channels = bottom_blob.c;
41 | int dims = bottom_blob.dims;
42 |
43 | top_blob = bottom_blob;
44 |
45 | if (squeeze_c && dims == 3 && channels == 1)
46 | {
47 | if (squeeze_h && h == 1)
48 | top_blob = bottom_blob.reshape(w, opt.blob_allocator);
49 | else
50 | top_blob = bottom_blob.reshape(w, h, opt.blob_allocator);
51 | }
52 | else if (squeeze_h && dims >= 2 && h == 1)
53 | {
54 | if (squeeze_w && w == 1)
55 | top_blob = bottom_blob.reshape(channels, opt.blob_allocator);
56 | else
57 | top_blob = bottom_blob.reshape(w, channels, opt.blob_allocator);
58 | }
59 | else if (squeeze_w && dims >= 1 && w == 1)
60 | {
61 | if (squeeze_h && h == 1)
62 | top_blob = bottom_blob.reshape(channels, opt.blob_allocator);
63 | else
64 | top_blob = bottom_blob.reshape(h, channels, opt.blob_allocator);
65 | }
66 |
67 | if (top_blob.empty())
68 | return -100;
69 |
70 | return 0;
71 | }
72 |
73 | } // namespace ncnn
74 |
--------------------------------------------------------------------------------
/src/layer/squeeze.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_SQUEEZE_H
16 | #define LAYER_SQUEEZE_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Squeeze : public Layer
23 | {
24 | public:
25 | Squeeze();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
30 |
31 | public:
32 | int squeeze_w;
33 | int squeeze_h;
34 | int squeeze_c;
35 | };
36 |
37 | } // namespace ncnn
38 |
39 | #endif // LAYER_SQUEEZE_H
40 |
--------------------------------------------------------------------------------
/src/layer/tanh.cpp:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #include "tanh.h"
16 | #include
17 |
18 | namespace ncnn {
19 |
20 | DEFINE_LAYER_CREATOR(TanH)
21 |
22 | TanH::TanH()
23 | {
24 | one_blob_only = true;
25 | support_inplace = true;
26 | }
27 |
28 | int TanH::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
29 | {
30 | int w = bottom_top_blob.w;
31 | int h = bottom_top_blob.h;
32 | int channels = bottom_top_blob.c;
33 | int size = w * h;
34 |
35 | #pragma omp parallel for num_threads(opt.num_threads)
36 | for (int q=0; q threshold ? 1.f : 0.f;
49 | }
50 | }
51 |
52 | return 0;
53 | }
54 |
55 | } // namespace ncnn
56 |
--------------------------------------------------------------------------------
/src/layer/threshold.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_THRESHOLD_H
16 | #define LAYER_THRESHOLD_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Threshold : public Layer
23 | {
24 | public:
25 | Threshold();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward_inplace(Mat& bottom_top_blob, const Option& opt) const;
30 |
31 | public:
32 | float threshold;
33 | };
34 |
35 | } // namespace ncnn
36 |
37 | #endif // LAYER_THRESHOLD_H
38 |
--------------------------------------------------------------------------------
/src/layer/tile.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_TILE_H
16 | #define LAYER_TILE_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Tile : public Layer
23 | {
24 | public:
25 | Tile();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
30 |
31 | public:
32 | int dim;
33 | int tiles;
34 | };
35 |
36 | } // namespace ncnn
37 |
38 | #endif // LAYER_TILE_H
39 |
--------------------------------------------------------------------------------
/src/layer/unaryop.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_UNARYOP_H
16 | #define LAYER_UNARYOP_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class UnaryOp : public Layer
23 | {
24 | public:
25 | UnaryOp();
26 |
27 | virtual int load_param(const ParamDict& pd);
28 |
29 | virtual int forward_inplace(Mat& bottom_top_blob, const Option& opt) const;
30 |
31 | enum {
32 | Operation_ABS = 0,
33 | Operation_NEG = 1,
34 | Operation_FLOOR = 2,
35 | Operation_CEIL = 3,
36 | Operation_SQUARE= 4,
37 | Operation_SQRT = 5,
38 | Operation_RSQRT = 6,
39 | Operation_EXP = 7,
40 | Operation_LOG = 8,
41 | Operation_SIN = 9,
42 | Operation_COS = 10,
43 | Operation_TAN = 11,
44 | Operation_ASIN = 12,
45 | Operation_ACOS = 13,
46 | Operation_ATAN = 14,
47 | Operation_RECIPROCAL = 15
48 | };
49 |
50 | public:
51 | // param
52 | int op_type;
53 | };
54 |
55 | } // namespace ncnn
56 |
57 | #endif // LAYER_UNARYOP_H
58 |
--------------------------------------------------------------------------------
/src/layer/x86/convolution_x86.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_CONVOLUTION_X86_H
16 | #define LAYER_CONVOLUTION_X86_H
17 |
18 | #include "convolution.h"
19 |
20 | namespace ncnn {
21 |
22 | typedef void (*conv_func)(const Mat&, Mat&, const Mat&, const Mat&, const Option&);
23 |
24 | class Convolution_x86 : public Convolution
25 | {
26 | public:
27 | virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
28 | virtual int forwardDilation(const Mat& bottom_blob, Mat &top_blob, conv_func conv, const Option& opt) const;
29 | };
30 |
31 | } // namespace ncnn
32 |
33 | #endif // LAYER_CONVOLUTION_X86_H
34 |
--------------------------------------------------------------------------------
/src/layer/x86/convolutiondepthwise_x86.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_CONVOLUTIONDEPTHWISE_X86_H
16 | #define LAYER_CONVOLUTIONDEPTHWISE_X86_H
17 |
18 | #include "convolutiondepthwise.h"
19 |
20 | namespace ncnn {
21 |
22 | class ConvolutionDepthWise_x86 : public ConvolutionDepthWise
23 | {
24 | public:
25 | ConvolutionDepthWise_x86();
26 | virtual ~ConvolutionDepthWise_x86();
27 |
28 | virtual int load_model(const ModelBin& mb);
29 |
30 | virtual int forward(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const;
31 |
32 | public:
33 | std::vector group_ops;
34 | };
35 |
36 | } // namespace ncnn
37 |
38 | #endif // LAYER_CONVOLUTIONDEPTHWISE_X86_H
39 |
--------------------------------------------------------------------------------
/src/layer/yolodetectionoutput.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_YOLODETECTIONOUTPUT_H
16 | #define LAYER_YOLODETECTIONOUTPUT_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class YoloDetectionOutput : public Layer
23 | {
24 | public:
25 | YoloDetectionOutput();
26 | ~YoloDetectionOutput();
27 |
28 | virtual int load_param(const ParamDict& pd);
29 |
30 | virtual int forward_inplace(std::vector& bottom_top_blobs, const Option& opt) const;
31 |
32 | public:
33 | int num_class;
34 | int num_box;
35 | float confidence_threshold;
36 | float nms_threshold;
37 | Mat biases;
38 |
39 | ncnn::Layer* softmax;
40 | };
41 |
42 | } // namespace ncnn
43 |
44 | #endif // LAYER_YOLODETECTIONOUTPUT_H
45 |
--------------------------------------------------------------------------------
/src/layer/yolov3detectionoutput.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef LAYER_YOLOV3DETECTIONOUTPUT_H
16 | #define LAYER_YOLOV3DETECTIONOUTPUT_H
17 |
18 | #include "layer.h"
19 |
20 | namespace ncnn {
21 |
22 | class Yolov3DetectionOutput : public Layer
23 | {
24 | public:
25 | Yolov3DetectionOutput();
26 | ~Yolov3DetectionOutput();
27 |
28 | virtual int load_param(const ParamDict& pd);
29 |
30 | virtual int forward(const std::vector& bottom_blobs, std::vector& top_blobs, const Option& opt) const;
31 |
32 | public:
33 | int num_class;
34 | int num_box;
35 | float confidence_threshold;
36 | float nms_threshold;
37 | Mat biases;
38 | Mat mask;
39 | Mat anchors_scale;
40 | int mask_group_num;
41 | ncnn::Layer* softmax;
42 | };
43 |
44 | } // namespace ncnn
45 |
46 | #endif // LAYER_YOLODETECTIONOUTPUT_H
47 |
--------------------------------------------------------------------------------
/src/layer_declaration.h.in:
--------------------------------------------------------------------------------
1 | // Layer Declaration header
2 | //
3 | // This file is auto-generated by cmake, don't edit it.
4 |
5 | @layer_declaration@
6 |
--------------------------------------------------------------------------------
/src/layer_registry.h.in:
--------------------------------------------------------------------------------
1 | // Layer Registry header
2 | //
3 | // This file is auto-generated by cmake, don't edit it.
4 |
5 | @layer_registry@
6 |
--------------------------------------------------------------------------------
/src/layer_type.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef NCNN_LAYER_TYPE_H
16 | #define NCNN_LAYER_TYPE_H
17 |
18 | namespace ncnn {
19 |
20 | namespace LayerType {
21 | enum
22 | {
23 | #include "layer_type_enum.h"
24 | CustomBit = (1<<8),
25 | };
26 | } // namespace LayerType
27 |
28 | } // namespace ncnn
29 |
30 | #endif // NCNN_LAYER_TYPE_H
31 |
--------------------------------------------------------------------------------
/src/layer_type_enum.h.in:
--------------------------------------------------------------------------------
1 | // Layer Type Enum header
2 | //
3 | // This file is auto-generated by cmake, don't edit it.
4 |
5 | @layer_type_enum@
6 |
--------------------------------------------------------------------------------
/src/modelbin.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef NCNN_MODELBIN_H
16 | #define NCNN_MODELBIN_H
17 |
18 | #include
19 | #include "mat.h"
20 | #include "platform.h"
21 |
22 | namespace ncnn {
23 |
24 | class Net;
25 | class ModelBin
26 | {
27 | public:
28 | // element type
29 | // 0 = auto
30 | // 1 = float32
31 | // 2 = float16
32 | // 3 = int8
33 | // load vec
34 | virtual Mat load(int w, int type) const = 0;
35 | // load image
36 | virtual Mat load(int w, int h, int type) const;
37 | // load dim
38 | virtual Mat load(int w, int h, int c, int type) const;
39 | };
40 |
41 | #if NCNN_STDIO
42 | class ModelBinFromStdio : public ModelBin
43 | {
44 | public:
45 | // construct from file
46 | ModelBinFromStdio(FILE* binfp);
47 |
48 | virtual Mat load(int w, int type) const;
49 |
50 | protected:
51 | FILE* binfp;
52 | };
53 | #endif // NCNN_STDIO
54 |
55 | class ModelBinFromMemory : public ModelBin
56 | {
57 | public:
58 | // construct from external memory
59 | ModelBinFromMemory(const unsigned char*& mem);
60 |
61 | virtual Mat load(int w, int type) const;
62 |
63 | protected:
64 | const unsigned char*& mem;
65 | };
66 |
67 | class ModelBinFromMatArray : public ModelBin
68 | {
69 | public:
70 | // construct from weight blob array
71 | ModelBinFromMatArray(const Mat* weights);
72 |
73 | virtual Mat load(int w, int type) const;
74 |
75 | protected:
76 | mutable const Mat* weights;
77 | };
78 |
79 | } // namespace ncnn
80 |
81 | #endif // NCNN_MODELBIN_H
82 |
--------------------------------------------------------------------------------
/src/paramdict.h:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef NCNN_PARAMDICT_H
16 | #define NCNN_PARAMDICT_H
17 |
18 | #include
19 | #include "mat.h"
20 | #include "platform.h"
21 |
22 | // at most 20 parameters
23 | #define NCNN_MAX_PARAM_COUNT 20
24 |
25 | namespace ncnn {
26 |
27 | class Net;
28 | class ParamDict
29 | {
30 | public:
31 | // empty
32 | ParamDict();
33 |
34 | // get int
35 | int get(int id, int def) const;
36 | // get float
37 | float get(int id, float def) const;
38 | // get array
39 | Mat get(int id, const Mat& def) const;
40 |
41 | // set int
42 | void set(int id, int i);
43 | // set float
44 | void set(int id, float f);
45 | // set array
46 | void set(int id, const Mat& v);
47 |
48 | public:
49 | int use_winograd_convolution;
50 | int use_sgemm_convolution;
51 | int use_int8_inference;
52 |
53 | protected:
54 | friend class Net;
55 |
56 | void clear();
57 |
58 | #if NCNN_STDIO
59 | #if NCNN_STRING
60 | int load_param(FILE* fp);
61 | int load_param_mem(const char*& mem);
62 | #endif // NCNN_STRING
63 | int load_param_bin(FILE* fp);
64 | #endif // NCNN_STDIO
65 | int load_param(const unsigned char*& mem);
66 |
67 | protected:
68 | struct
69 | {
70 | int loaded;
71 | union { int i; float f; };
72 | Mat v;
73 | } params[NCNN_MAX_PARAM_COUNT];
74 | };
75 |
76 | } // namespace ncnn
77 |
78 | #endif // NCNN_PARAMDICT_H
79 |
--------------------------------------------------------------------------------
/src/platform.h.in:
--------------------------------------------------------------------------------
1 | // Tencent is pleased to support the open source community by making ncnn available.
2 | //
3 | // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 | //
5 | // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 | // in compliance with the License. You may obtain a copy of the License at
7 | //
8 | // https://opensource.org/licenses/BSD-3-Clause
9 | //
10 | // Unless required by applicable law or agreed to in writing, software distributed
11 | // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 | // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 | // specific language governing permissions and limitations under the License.
14 |
15 | #ifndef NCNN_PLATFORM_H
16 | #define NCNN_PLATFORM_H
17 |
18 | #cmakedefine01 NCNN_STDIO
19 | #cmakedefine01 NCNN_STRING
20 | #cmakedefine01 NCNN_OPENCV
21 | #cmakedefine01 NCNN_BENCHMARK
22 | #cmakedefine01 NCNN_PIXEL
23 | #cmakedefine01 NCNN_PIXEL_ROTATE
24 |
25 | #endif // NCNN_PLATFORM_H
26 |
--------------------------------------------------------------------------------
/toolchains/aarch64-linux-gnu.toolchain.cmake:
--------------------------------------------------------------------------------
1 | # set cross-compiled system type, it's better not use the type which cmake cannot recognized.
2 | SET ( CMAKE_SYSTEM_NAME Linux )
3 | SET ( CMAKE_SYSTEM_PROCESSOR aarch64 )
4 | # for the reason of aarch64-linux-gnu-gcc DONOT need to be installed, make sure aarch64-linux-gnu-gcc and aarch64-linux-gnu-g++ can be found in $PATH:
5 | SET ( CMAKE_C_COMPILER "aarch64-linux-gnu-gcc" )
6 | SET ( CMAKE_CXX_COMPILER "aarch64-linux-gnu-g++" )
7 |
8 | # set searching rules for cross-compiler
9 | SET ( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER )
10 | SET ( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
11 | SET ( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
12 |
13 | # set ${CMAKE_C_FLAGS} and ${CMAKE_CXX_FLAGS}flag for cross-compiled process
14 | SET ( CMAKE_CXX_FLAGS "-std=c++11 -march=armv8-a -fopenmp ${CMAKE_CXX_FLAGS}" )
15 |
16 | # other settings
17 | add_definitions(-D__ARM_NEON)
18 | add_definitions(-D__ANDROID__)
19 | SET ( ANDROID true)
20 |
--------------------------------------------------------------------------------
/toolchains/arm-linux-gnueabi.toolchain.cmake:
--------------------------------------------------------------------------------
1 | # set cross-compiled system type, it's better not use the type which cmake cannot recognized.
2 | SET ( CMAKE_SYSTEM_NAME Linux )
3 | SET ( CMAKE_SYSTEM_PROCESSOR arm )
4 | # when hislicon SDK was installed, toolchain was installed in the path as below:
5 | SET ( CMAKE_C_COMPILER "arm-linux-gnueabi-gcc" )
6 | SET ( CMAKE_CXX_COMPILER "arm-linux-gnueabi-g++" )
7 |
8 | # set searching rules for cross-compiler
9 | SET ( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER )
10 | SET ( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
11 | SET ( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
12 |
13 | # set ${CMAKE_C_FLAGS} and ${CMAKE_CXX_FLAGS}flag for cross-compiled process
14 | SET ( CMAKE_CXX_FLAGS "-std=c++11 -march=armv7-a -mfloat-abi=softfp -mfpu=neon-vfpv4 -fopenmp ${CMAKE_CXX_FLAGS}" )
15 |
16 | # other settings
17 | add_definitions(-D__ARM_NEON)
18 | add_definitions(-D__ANDROID__)
19 | SET ( ANDROID true)
20 |
--------------------------------------------------------------------------------
/toolchains/arm-linux-gnueabihf.toolchain.cmake:
--------------------------------------------------------------------------------
1 | # set cross-compiled system type, it's better not use the type which cmake cannot recognized.
2 | SET ( CMAKE_SYSTEM_NAME Linux )
3 | SET ( CMAKE_SYSTEM_PROCESSOR arm )
4 | # when hislicon SDK was installed, toolchain was installed in the path as below:
5 | SET ( CMAKE_C_COMPILER "arm-linux-gnueabihf-gcc" )
6 | SET ( CMAKE_CXX_COMPILER "arm-linux-gnueabihf-g++" )
7 |
8 | # set searching rules for cross-compiler
9 | SET ( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER )
10 | SET ( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
11 | SET ( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
12 |
13 | # set ${CMAKE_C_FLAGS} and ${CMAKE_CXX_FLAGS}flag for cross-compiled process
14 | SET ( CMAKE_CXX_FLAGS "-std=c++11 -march=armv7-a -mfloat-abi=hard -mfpu=neon-vfpv4 -fopenmp ${CMAKE_CXX_FLAGS}" )
15 |
16 | # other settings
17 | add_definitions(-D__ARM_NEON)
18 | add_definitions(-D__ANDROID__)
19 | SET ( ANDROID true)
20 |
--------------------------------------------------------------------------------
/toolchains/himix100.toolchain.cmake:
--------------------------------------------------------------------------------
1 | # set cross-compiled system type, it's better not use the type which cmake cannot recognized.
2 | SET ( CMAKE_SYSTEM_NAME Linux )
3 | SET ( CMAKE_SYSTEM_PROCESSOR aarch64 )
4 | # when hislicon SDK was installed, toolchain was installed in the path as below:
5 | SET ( CMAKE_C_COMPILER "/opt/hisi-linux/x86-arm/aarch64-himix100-linux/bin/aarch64-himix100-linux-gcc" )
6 | SET ( CMAKE_CXX_COMPILER "/opt/hisi-linux/x86-arm/aarch64-himix100-linux/bin/aarch64-himix100-linux-g++" )
7 | SET ( CMAKE_FIND_ROOT_PATH "/opt/hisi-linux/x86-arm/aarch64-himix100-linux" )
8 |
9 | # set searching rules for cross-compiler
10 | SET ( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER )
11 | SET ( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
12 | SET ( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
13 |
14 | # set ${CMAKE_C_FLAGS} and ${CMAKE_CXX_FLAGS}flag for cross-compiled process
15 | SET ( CMAKE_CXX_FLAGS "-std=c++11 -march=armv8-a -fopenmp ${CMAKE_CXX_FLAGS}" )
16 |
17 | # other settings
18 | add_definitions(-D__ARM_NEON)
19 | add_definitions(-D__ANDROID__)
20 | SET ( ANDROID true)
21 |
--------------------------------------------------------------------------------
/toolchains/hisiv300.toolchain.cmake:
--------------------------------------------------------------------------------
1 | # set cross-compiled system type, it's better not use the type which cmake cannot recognized.
2 | SET ( CMAKE_SYSTEM_NAME Linux )
3 | SET ( CMAKE_SYSTEM_PROCESSOR arm )
4 | # when hislicon SDK was installed, toolchain was installed in the path as below:
5 | SET ( CMAKE_C_COMPILER "/opt/hisi-linux/x86-arm/arm-hisiv300-linux/target/bin/arm-hisiv300-linux-gcc" )
6 | SET ( CMAKE_CXX_COMPILER "/opt/hisi-linux/x86-arm/arm-hisiv300-linux/target/bin/arm-hisiv300-linux-g++" )
7 | SET ( CMAKE_FIND_ROOT_PATH "/opt/hisi-linux/x86-arm/arm-hisiv300-linux" )
8 |
9 | # set searching rules for cross-compiler
10 | SET ( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER )
11 | SET ( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
12 | SET ( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
13 |
14 | # set ${CMAKE_C_FLAGS} and ${CMAKE_CXX_FLAGS}flag for cross-compiled process
15 | SET ( CMAKE_CXX_FLAGS "-std=c++11 -march=armv7-a -mfloat-abi=softfp -mfpu=neon-vfpv4 -fopenmp ${CMAKE_CXX_FLAGS}" )
16 |
17 | # other settings
18 | add_definitions(-D__ARM_NEON)
19 | add_definitions(-D__ANDROID__)
20 | SET ( ANDROID true)
21 |
--------------------------------------------------------------------------------
/toolchains/hisiv500.toolchain.cmake:
--------------------------------------------------------------------------------
1 | # set cross-compiled system type, it's better not use the type which cmake cannot recognized.
2 | SET ( CMAKE_SYSTEM_NAME Linux )
3 | SET ( CMAKE_SYSTEM_PROCESSOR arm )
4 | # when hislicon SDK was installed, toolchain was installed in the path as below:
5 | SET ( CMAKE_C_COMPILER "/opt/hisi-linux/x86-arm/arm-hisiv500-linux/target/bin/arm-hisiv500-linux-gcc" )
6 | SET ( CMAKE_CXX_COMPILER "/opt/hisi-linux/x86-arm/arm-hisiv500-linux/target/bin/arm-hisiv500-linux-g++" )
7 | SET ( CMAKE_FIND_ROOT_PATH "/opt/hisi-linux/x86-arm/arm-hisiv500-linux" )
8 |
9 | # set searching rules for cross-compiler
10 | SET ( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER )
11 | SET ( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
12 | SET ( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
13 |
14 | # set ${CMAKE_C_FLAGS} and ${CMAKE_CXX_FLAGS}flag for cross-compiled process
15 | SET ( CMAKE_CXX_FLAGS "-std=c++11 -march=armv7-a -mfloat-abi=softfp -mfpu=neon-vfpv4 -fopenmp ${CMAKE_CXX_FLAGS}" )
16 |
17 | # other settings
18 | add_definitions(-D__ARM_NEON)
19 | add_definitions(-D__ANDROID__)
20 | SET ( ANDROID true)
21 |
--------------------------------------------------------------------------------
/toolchains/host.gcc.toolchain.cmake:
--------------------------------------------------------------------------------
1 | # set cross-compiled system type, it's better not use the type which cmake cannot recognized.
2 | SET ( CMAKE_SYSTEM_NAME Linux )
3 | SET ( CMAKE_SYSTEM_PROCESSOR x86 )
4 | # if gcc/g++ was installed:
5 | SET ( CMAKE_C_COMPILER "gcc" )
6 | SET ( CMAKE_CXX_COMPILER "g++" )
7 |
8 | # set searching rules
9 | SET ( CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER )
10 | SET ( CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY )
11 | SET ( CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY )
12 |
13 | # set ${CMAKE_C_FLAGS} and ${CMAKE_CXX_FLAGS}flag
14 | SET ( CMAKE_CXX_FLAGS "-std=c++11 ${CMAKE_CXX_FLAGS}" )
15 |
--------------------------------------------------------------------------------
/toolchains/iossimxc.toolchain.cmake:
--------------------------------------------------------------------------------
1 | # Standard settings
2 | # set(UNIX True)
3 | # set(Darwin True)
4 | # set(IOS True)
5 | set (CMAKE_SYSTEM_NAME Darwin)
6 | set (CMAKE_SYSTEM_VERSION 1)
7 | set (UNIX True)
8 | set (APPLE True)
9 | set (IOS True)
10 |
11 | # suppress -rdynamic
12 | # set(CMAKE_SYSTEM_NAME Generic)
13 |
14 | set(CMAKE_C_COMPILER i386-apple-darwin11-clang)
15 | set(CMAKE_CXX_COMPILER i386-apple-darwin11-clang++)
16 |
17 | set(_CMAKE_TOOLCHAIN_PREFIX i386-apple-darwin11-)
18 |
19 | set(CMAKE_IOS_SDK_ROOT "/home/nihui/osd/cctools-port/usage_examples/ios_toolchain/target-sim/SDK/")
20 |
21 | # Set the sysroot default to the most recent SDK
22 | set(CMAKE_OSX_SYSROOT ${CMAKE_IOS_SDK_ROOT} CACHE PATH "Sysroot used for iOS Simulator support")
23 |
24 | # set the architecture for iOS
25 | # set(IOS_ARCH i386)
26 | # set(IOS_ARCH x86_64)
27 | set(IOS_ARCH i386;x86_64)
28 |
29 | set(CMAKE_OSX_ARCHITECTURES ${IOS_ARCH} CACHE string "Build architecture for iOS Simulator")
30 |
31 | # Set the find root to the iOS developer roots and to user defined paths
32 | set(CMAKE_FIND_ROOT_PATH ${CMAKE_IOS_DEVELOPER_ROOT} ${CMAKE_IOS_SDK_ROOT} ${CMAKE_PREFIX_PATH} CACHE string "iOS Simulator find search path root")
33 |
34 | # searching for frameworks only
35 | set(CMAKE_FIND_FRAMEWORK FIRST)
36 |
37 | # set up the default search directories for frameworks
38 | set(CMAKE_SYSTEM_FRAMEWORK_PATH
39 | ${CMAKE_IOS_SDK_ROOT}/System/Library/Frameworks
40 | )
41 |
--------------------------------------------------------------------------------
/toolchains/iosxc.toolchain.cmake:
--------------------------------------------------------------------------------
1 | # Standard settings
2 | # set(UNIX True)
3 | # set(Darwin True)
4 | # set(IOS True)
5 | set (CMAKE_SYSTEM_NAME Darwin)
6 | set (CMAKE_SYSTEM_VERSION 1)
7 | set (UNIX True)
8 | set (APPLE True)
9 | set (IOS True)
10 |
11 | # suppress -rdynamic
12 | # set(CMAKE_SYSTEM_NAME Generic)
13 |
14 | set(CMAKE_C_COMPILER arm-apple-darwin11-clang)
15 | set(CMAKE_CXX_COMPILER arm-apple-darwin11-clang++)
16 |
17 | set(_CMAKE_TOOLCHAIN_PREFIX arm-apple-darwin11-)
18 |
19 | set(CMAKE_IOS_SDK_ROOT "/home/nihui/osd/cctools-port/usage_examples/ios_toolchain/target/SDK/")
20 |
21 | # Set the sysroot default to the most recent SDK
22 | set(CMAKE_OSX_SYSROOT ${CMAKE_IOS_SDK_ROOT} CACHE PATH "Sysroot used for iOS support")
23 |
24 | # set the architecture for iOS
25 | # set(IOS_ARCH arm64)
26 | set(IOS_ARCH armv7;arm64)
27 |
28 | set(CMAKE_OSX_ARCHITECTURES ${IOS_ARCH} CACHE string "Build architecture for iOS")
29 |
30 | # Set the find root to the iOS developer roots and to user defined paths
31 | set(CMAKE_FIND_ROOT_PATH ${CMAKE_IOS_DEVELOPER_ROOT} ${CMAKE_IOS_SDK_ROOT} ${CMAKE_PREFIX_PATH} CACHE string "iOS find search path root")
32 |
33 | # searching for frameworks only
34 | set(CMAKE_FIND_FRAMEWORK FIRST)
35 |
36 | # set up the default search directories for frameworks
37 | set(CMAKE_SYSTEM_FRAMEWORK_PATH
38 | ${CMAKE_IOS_SDK_ROOT}/System/Library/Frameworks
39 | )
40 |
--------------------------------------------------------------------------------
/toolchains/pi3.toolchain.cmake:
--------------------------------------------------------------------------------
1 | SET(CMAKE_SYSTEM_NANE Android)
2 | SET(CMAKE_SYSTEM_PROCESSOR "armv7l")
3 | SET(ANDROID_ARCH_NAME "arm")
4 | SET(UNIX true)
5 | SET(CMAKE_C_COMPILER "gcc")
6 | SET(CMAKE_CXX_COMPILER "g++")
7 |
--------------------------------------------------------------------------------
/tools/CMakeLists.txt:
--------------------------------------------------------------------------------
1 |
2 | add_subdirectory(caffe)
3 | add_subdirectory(mxnet)
4 | add_subdirectory(onnx)
5 | add_subdirectory(tensorflow)
6 |
7 | include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../src)
8 | include_directories(${CMAKE_CURRENT_BINARY_DIR}/../src)
9 |
10 | include_directories(${CMAKE_SOURCE_DIR}/src)
11 |
12 | add_executable(ncnn2mem ncnn2mem.cpp)
13 |
14 | target_link_libraries(ncnn2mem PRIVATE ncnn)
15 |
16 | if(COVERAGE)
17 | target_link_libraries(ncnn2mem PRIVATE --coverage)
18 | endif()
19 |
--------------------------------------------------------------------------------
/tools/caffe/CMakeLists.txt:
--------------------------------------------------------------------------------
1 |
2 | find_package(Protobuf)
3 |
4 | if(PROTOBUF_FOUND)
5 | include_directories(${PROTOBUF_INCLUDE_DIR})
6 | include_directories(${CMAKE_CURRENT_BINARY_DIR})
7 | protobuf_generate_cpp(CAFFE_PROTO_SRCS CAFFE_PROTO_HDRS caffe.proto)
8 | add_executable(caffe2ncnn caffe2ncnn.cpp ${CAFFE_PROTO_SRCS} ${CAFFE_PROTO_HDRS})
9 | set_target_properties(caffe2ncnn PROPERTIES CXX_STANDARD 11)
10 | target_link_libraries(caffe2ncnn ${PROTOBUF_LIBRARIES})
11 | else()
12 | message(WARNING "Protobuf not found, caffe model convert tool won't be built")
13 | endif()
14 |
--------------------------------------------------------------------------------
/tools/darknet/readme.txt:
--------------------------------------------------------------------------------
1 | You can find darknet2ncnn tool here
2 |
3 | https://github.com/xiangweizeng/darknet2ncnn
4 |
--------------------------------------------------------------------------------
/tools/mxnet/CMakeLists.txt:
--------------------------------------------------------------------------------
1 |
2 | add_executable(mxnet2ncnn mxnet2ncnn.cpp)
3 |
--------------------------------------------------------------------------------
/tools/onnx/CMakeLists.txt:
--------------------------------------------------------------------------------
1 |
2 | find_package(Protobuf)
3 |
4 | if(PROTOBUF_FOUND)
5 | include_directories(${PROTOBUF_INCLUDE_DIR})
6 | include_directories(${CMAKE_CURRENT_BINARY_DIR})
7 | protobuf_generate_cpp(ONNX_PROTO_SRCS ONNX_PROTO_HDRS onnx.proto)
8 | add_executable(onnx2ncnn onnx2ncnn.cpp ${ONNX_PROTO_SRCS} ${ONNX_PROTO_HDRS})
9 | set_target_properties(onnx2ncnn PROPERTIES CXX_STANDARD 11)
10 | target_link_libraries(onnx2ncnn ${PROTOBUF_LIBRARIES})
11 | else()
12 | message(WARNING "Protobuf not found, onnx model convert tool won't be built")
13 | endif()
14 |
--------------------------------------------------------------------------------
/tools/plugin/ImageWatchNCNN.natvis:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 | {{FLOAT32, {c} x {w} x {h}}}
13 | {{FLOAT16, {c} x {w} x {h}}}
14 | {{INT8, {c} x {w} x {h}}}
15 |
16 |
17 | FLOAT32
18 |
19 |
20 | FLOAT16
21 |
22 |
23 | INT8
24 |
25 | c
26 | w
27 | h
28 | c
29 | ((float*)(data))
30 | data
31 | data
32 | w
33 | w*2
34 | w*4
35 |
36 |
37 |
38 |
--------------------------------------------------------------------------------
/tools/plugin/README.md:
--------------------------------------------------------------------------------
1 |
2 | ## NCNN Image Watch Plugin for Visual Studio
3 | Image Watch plugin is a good tool for better understanding insight of images. This tiny work offer a ".natvis" file which could add ncnn::Mat class support for Image Watch, and users could debug ncnn::Mat image just like debuging cv::Mat via Image Watch.
4 |
5 | To use this plugin, please move this "ImageWatchNCNN.natvis" file to "C:/user/${your user name}/Documents/Visual Studio ${VS_Version}/Visualizers" folder. If not exist this folder, create it(such as: "C:\Users\nihui\Documents\Visual Studio 2017\Visualizers").
6 |
7 | 
8 |
9 | See [Image Watch Help](https://imagewatch.azurewebsites.net/ImageWatchHelp/ImageWatchHelp.htm) page for more advanced using tips of Image Watch(For example, get single channel from channels, such as getting confidence heatmap from forward result list {confidence, x1, y1, x2, y2}).
--------------------------------------------------------------------------------
/tools/plugin/snapshot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jiangxiluning/ncnn-tensorflow/4f84be38d58102529c94d87d48aac71e5739fa9e/tools/plugin/snapshot.png
--------------------------------------------------------------------------------
/tools/pytorch/readme.txt:
--------------------------------------------------------------------------------
1 | You can find pytorch2ncnn tool here
2 |
3 | https://github.com/starimeL/PytorchConverter
4 |
--------------------------------------------------------------------------------
/tools/tensorflow/CMakeLists.txt:
--------------------------------------------------------------------------------
1 |
2 | find_package(Protobuf REQUIRED)
3 |
4 | if(PROTOBUF_FOUND)
5 | include_directories(${PROTOBUF_INCLUDE_DIR})
6 | include_directories(${CMAKE_CURRENT_BINARY_DIR})
7 | protobuf_generate_cpp(TENSORFLOW_PROTO_SRCS TENSORFLOW_PROTO_HDRS
8 | attr_value.proto
9 | function.proto
10 | graph.proto
11 | node_def.proto
12 | op_def.proto
13 | resource_handle.proto
14 | tensor.proto
15 | tensor_shape.proto
16 | types.proto
17 | versions.proto
18 | )
19 | add_executable(tensorflow2ncnn tensorflow2ncnn.cpp ${TENSORFLOW_PROTO_SRCS} ${TENSORFLOW_PROTO_HDRS})
20 | set_target_properties(tensorflow2ncnn PROPERTIES CXX_STANDARD 11)
21 | target_link_libraries(tensorflow2ncnn ${PROTOBUF_LIBRARIES})
22 | else()
23 | message(WARNING "Protobuf not found, tensorflow model convert tool won't be built")
24 | endif()
25 |
--------------------------------------------------------------------------------
/tools/tensorflow/graph.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | package tensorflow;
4 | option cc_enable_arenas = true;
5 | option java_outer_classname = "GraphProtos";
6 | option java_multiple_files = true;
7 | option java_package = "org.tensorflow.framework";
8 |
9 | import "node_def.proto";
10 | import "function.proto";
11 | import "versions.proto";
12 |
13 | // Represents the graph of operations
14 | message GraphDef {
15 | repeated NodeDef node = 1;
16 |
17 | // Compatibility versions of the graph. See core/public/version.h for version
18 | // history. The GraphDef version is distinct from the TensorFlow version, and
19 | // each release of TensorFlow will support a range of GraphDef versions.
20 | VersionDef versions = 4;
21 |
22 | // Deprecated single version field; use versions above instead. Since all
23 | // GraphDef changes before "versions" was introduced were forward
24 | // compatible, this field is entirely ignored.
25 | int32 version = 3 [deprecated = true];
26 |
27 | // EXPERIMENTAL. DO NOT USE OR DEPEND ON THIS YET.
28 | //
29 | // "library" provides user-defined functions.
30 | //
31 | // Naming:
32 | // * library.function.name are in a flat namespace.
33 | // NOTE: We may need to change it to be hierarchical to support
34 | // different orgs. E.g.,
35 | // { "/google/nn", { ... }},
36 | // { "/google/vision", { ... }}
37 | // { "/org_foo/module_bar", { ... }}
38 | // map named_lib;
39 | // * If node[i].op is the name of one function in "library",
40 | // node[i] is deemed as a function call. Otherwise, node[i].op
41 | // must be a primitive operation supported by the runtime.
42 | //
43 | //
44 | // Function call semantics:
45 | //
46 | // * The callee may start execution as soon as some of its inputs
47 | // are ready. The caller may want to use Tuple() mechanism to
48 | // ensure all inputs are ready in the same time.
49 | //
50 | // * The consumer of return values may start executing as soon as
51 | // the return values the consumer depends on are ready. The
52 | // consumer may want to use Tuple() mechanism to ensure the
53 | // consumer does not start until all return values of the callee
54 | // function are ready.
55 | FunctionDefLibrary library = 2;
56 | };
57 |
--------------------------------------------------------------------------------
/tools/tensorflow/resource_handle.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | package tensorflow;
4 | option cc_enable_arenas = true;
5 | option java_outer_classname = "ResourceHandle";
6 | option java_multiple_files = true;
7 | option java_package = "org.tensorflow.framework";
8 |
9 | // Protocol buffer representing a handle to a tensorflow resource. Handles are
10 | // not valid across executions, but can be serialized back and forth from within
11 | // a single run.
12 | message ResourceHandleProto {
13 | // Unique name for the device containing the resource.
14 | string device = 1;
15 |
16 | // Container in which this resource is placed.
17 | string container = 2;
18 |
19 | // Unique name of this resource.
20 | string name = 3;
21 |
22 | // Hash code for the type of the resource. Is only valid in the same device
23 | // and in the same execution.
24 | uint64 hash_code = 4;
25 |
26 | // For debug-only, the name of the type pointed to by this handle, if
27 | // available.
28 | string maybe_type_name = 5;
29 | };
30 |
--------------------------------------------------------------------------------
/tools/tensorflow/tensor_shape.proto:
--------------------------------------------------------------------------------
1 | // Protocol buffer representing the shape of tensors.
2 |
3 | syntax = "proto3";
4 | option cc_enable_arenas = true;
5 | option java_outer_classname = "TensorShapeProtos";
6 | option java_multiple_files = true;
7 | option java_package = "org.tensorflow.framework";
8 |
9 | package tensorflow;
10 |
11 | // Dimensions of a tensor.
12 | message TensorShapeProto {
13 | // One dimension of the tensor.
14 | message Dim {
15 | // Size of the tensor in that dimension.
16 | // This value must be >= -1, but values of -1 are reserved for "unknown"
17 | // shapes (values of -1 mean "unknown" dimension). Certain wrappers
18 | // that work with TensorShapeProto may fail at runtime when deserializing
19 | // a TensorShapeProto containing a dim value of -1.
20 | int64 size = 1;
21 |
22 | // Optional name of the tensor dimension.
23 | string name = 2;
24 | };
25 |
26 | // Dimensions of the tensor, such as {"input", 30}, {"output", 40}
27 | // for a 30 x 40 2D tensor. If an entry has size -1, this
28 | // corresponds to a dimension of unknown size. The names are
29 | // optional.
30 | //
31 | // The order of entries in "dim" matters: It indicates the layout of the
32 | // values in the tensor in-memory representation.
33 | //
34 | // The first entry in "dim" is the outermost dimension used to layout the
35 | // values, the last entry is the innermost dimension. This matches the
36 | // in-memory layout of RowMajor Eigen tensors.
37 | //
38 | // If "dim.size()" > 0, "unknown_rank" must be false.
39 | repeated Dim dim = 2;
40 |
41 | // If true, the number of dimensions in the shape is unknown.
42 | //
43 | // If true, "dim.size()" must be 0.
44 | bool unknown_rank = 3;
45 | };
46 |
--------------------------------------------------------------------------------
/tools/tensorflow/types.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | package tensorflow;
4 | option cc_enable_arenas = true;
5 | option java_outer_classname = "TypesProtos";
6 | option java_multiple_files = true;
7 | option java_package = "org.tensorflow.framework";
8 |
9 | // LINT.IfChange
10 | enum DataType {
11 | // Not a legal value for DataType. Used to indicate a DataType field
12 | // has not been set.
13 | DT_INVALID = 0;
14 |
15 | // Data types that all computation devices are expected to be
16 | // capable to support.
17 | DT_FLOAT = 1;
18 | DT_DOUBLE = 2;
19 | DT_INT32 = 3;
20 | DT_UINT8 = 4;
21 | DT_INT16 = 5;
22 | DT_INT8 = 6;
23 | DT_STRING = 7;
24 | DT_COMPLEX64 = 8; // Single-precision complex
25 | DT_INT64 = 9;
26 | DT_BOOL = 10;
27 | DT_QINT8 = 11; // Quantized int8
28 | DT_QUINT8 = 12; // Quantized uint8
29 | DT_QINT32 = 13; // Quantized int32
30 | DT_BFLOAT16 = 14; // Float32 truncated to 16 bits. Only for cast ops.
31 | DT_QINT16 = 15; // Quantized int16
32 | DT_QUINT16 = 16; // Quantized uint16
33 | DT_UINT16 = 17;
34 | DT_COMPLEX128 = 18; // Double-precision complex
35 | DT_HALF = 19;
36 | DT_RESOURCE = 20;
37 | DT_VARIANT = 21; // Arbitrary C++ data types
38 |
39 | // TODO(josh11b): DT_GENERIC_PROTO = ??;
40 | // TODO(jeff,josh11b): DT_UINT64? DT_UINT32?
41 |
42 | // Do not use! These are only for parameters. Every enum above
43 | // should have a corresponding value below (verified by types_test).
44 | DT_FLOAT_REF = 101;
45 | DT_DOUBLE_REF = 102;
46 | DT_INT32_REF = 103;
47 | DT_UINT8_REF = 104;
48 | DT_INT16_REF = 105;
49 | DT_INT8_REF = 106;
50 | DT_STRING_REF = 107;
51 | DT_COMPLEX64_REF = 108;
52 | DT_INT64_REF = 109;
53 | DT_BOOL_REF = 110;
54 | DT_QINT8_REF = 111;
55 | DT_QUINT8_REF = 112;
56 | DT_QINT32_REF = 113;
57 | DT_BFLOAT16_REF = 114;
58 | DT_QINT16_REF = 115;
59 | DT_QUINT16_REF = 116;
60 | DT_UINT16_REF = 117;
61 | DT_COMPLEX128_REF = 118;
62 | DT_HALF_REF = 119;
63 | DT_RESOURCE_REF = 120;
64 | DT_VARIANT_REF = 121;
65 | }
66 | // LINT.ThenChange(https://www.tensorflow.org/code/tensorflow/c/c_api.h,https://www.tensorflow.org/code/tensorflow/go/tensor.go)
67 |
--------------------------------------------------------------------------------
/tools/tensorflow/versions.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | package tensorflow;
4 | option cc_enable_arenas = true;
5 | option java_outer_classname = "VersionsProtos";
6 | option java_multiple_files = true;
7 | option java_package = "org.tensorflow.framework";
8 |
9 | // Version information for a piece of serialized data
10 | //
11 | // There are different types of versions for each type of data
12 | // (GraphDef, etc.), but they all have the same common shape
13 | // described here.
14 | //
15 | // Each consumer has "consumer" and "min_producer" versions (specified
16 | // elsewhere). A consumer is allowed to consume this data if
17 | //
18 | // producer >= min_producer
19 | // consumer >= min_consumer
20 | // consumer not in bad_consumers
21 | //
22 | message VersionDef {
23 | // The version of the code that produced this data.
24 | int32 producer = 1;
25 |
26 | // Any consumer below this version is not allowed to consume this data.
27 | int32 min_consumer = 2;
28 |
29 | // Specific consumer versions which are disallowed (e.g. due to bugs).
30 | repeated int32 bad_consumers = 3;
31 | };
32 |
--------------------------------------------------------------------------------