├── .gitignore
├── LICENSE
├── README.md
├── app
├── .gitignore
├── CMakeLists.txt
├── build.gradle
├── proguard-rules.pro
└── src
│ ├── androidTest
│ └── java
│ │ └── com
│ │ └── facesdk
│ │ └── ExampleInstrumentedTest.java
│ ├── main
│ ├── AndroidManifest.xml
│ ├── assets
│ │ ├── RFB-320-quant-ADMM-32.mnn
│ │ ├── RFB-320-quant-KL-5792.mnn
│ │ ├── RFB-320.mnn
│ │ ├── slim-320-quant-ADMM-50.mnn
│ │ └── slim-320.mnn
│ ├── cpp
│ │ ├── UltraFace.cpp
│ │ ├── UltraFace.hpp
│ │ ├── Ultra_jni.cpp
│ │ ├── include
│ │ │ ├── AutoTime.hpp
│ │ │ ├── Backend.hpp
│ │ │ ├── ErrorCode.hpp
│ │ │ ├── HalideRuntime.h
│ │ │ ├── ImageProcess.hpp
│ │ │ ├── Interpreter.hpp
│ │ │ ├── MNNDefine.h
│ │ │ ├── MNNForwardType.h
│ │ │ ├── MNNSharedContext.h
│ │ │ ├── Matrix.h
│ │ │ ├── NonCopyable.hpp
│ │ │ ├── Rect.h
│ │ │ ├── Tensor.hpp
│ │ │ └── revertMNNModel.hpp
│ │ ├── net.cpp
│ │ └── net.h
│ ├── java
│ │ └── com
│ │ │ └── facesdk
│ │ │ ├── FaceSDKNative.java
│ │ │ └── MainActivity.java
│ ├── jniLibs
│ │ ├── arm64-v8a
│ │ │ ├── libMNN.so
│ │ │ ├── libMNN_CL.so
│ │ │ └── libMNN_Express.so
│ │ └── armeabi-v7a
│ │ │ ├── libMNN.so
│ │ │ ├── libMNN_CL.so
│ │ │ └── libMNN_Express.so
│ └── res
│ │ ├── layout
│ │ ├── activity_main.xml
│ │ └── main.xml
│ │ ├── mipmap-hdpi
│ │ └── ic_launcher.png
│ │ ├── mipmap-mdpi
│ │ └── ic_launcher.png
│ │ ├── mipmap-xhdpi
│ │ └── ic_launcher.png
│ │ ├── mipmap-xxhdpi
│ │ └── ic_launcher.png
│ │ ├── mipmap-xxxhdpi
│ │ └── ic_launcher.png
│ │ ├── values-w820dp
│ │ └── dimens.xml
│ │ └── values
│ │ ├── colors.xml
│ │ ├── dimens.xml
│ │ ├── strings.xml
│ │ └── styles.xml
│ └── test
│ └── java
│ └── com
│ └── facesdk
│ └── ExampleUnitTest.java
├── build.gradle
├── gradle.properties
├── gradle
└── wrapper
│ ├── gradle-wrapper.jar
│ └── gradle-wrapper.properties
├── gradlew
├── gradlew.bat
├── res_img
└── result.jpg
└── settings.gradle
/.gitignore:
--------------------------------------------------------------------------------
1 | *.iml
2 | .gradle
3 | /local.properties
4 | /.idea
5 | .DS_Store
6 | /build
7 | /captures
8 | .externalNativeBuild
9 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 jackweiwang
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## Face detection android
2 | v1.0
3 | v1.1 remove opencv
4 | v1.2 inference reduction about 20ms
5 | ## Platform
6 | android
7 | ## Master
8 | [Ultra-Light-Fast-Generic-Face-Detector-1MB](https://github.com/Linzaer/Ultra-Light-Fast-Generic-Face-Detector-1MB) with [MNN](https://github.com/alibaba/MNN)
9 |
10 | ## Build
11 | Android Studio Build.
12 |
13 | ## Show Result
14 | Device | Input resolution
15 | -------| ---------
16 | Lenovo: PB2-690N | 320x240
17 |
18 | Model | Inference(ms)
19 | ---- | ----
20 | RFB-320-quant-ADMM-32.mnn | 35
21 |
22 | ## MNN Lib Version
23 | 0.2.2.1
24 |
--------------------------------------------------------------------------------
/app/.gitignore:
--------------------------------------------------------------------------------
1 | /build
2 |
--------------------------------------------------------------------------------
/app/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.4.1)
2 | #set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fopenmp")
3 | #set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp")
4 |
5 | #set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99 -fvisibility=hidden -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math -flax-vector-conversions")
6 | set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
7 | #set (CMAKE_LINKER_FLAGS "${CMAKE_LINKER_FLAGS} -Wl,--gc-sections")
8 |
9 | include_directories(src/main/cpp/include
10 | src/main/cpp/)
11 |
12 | #add_library(lib_opencv STATIC IMPORTED ) #引入libopencv_java3.so文件
13 | #set_target_properties(lib_opencv
14 | # PROPERTIES
15 | # IMPORTED_LOCATION ${CMAKE_SOURCE_DIR}/src/main/jniLibs/${ANDROID_ABI}/libopencv_java3.so
16 | # )
17 |
18 | add_library(MNN SHARED IMPORTED)
19 | add_library(MNN_CL SHARED IMPORTED)
20 | #add_library(MNN_GL SHARED IMPORTED)
21 |
22 | set_target_properties(MNN
23 | PROPERTIES IMPORTED_LOCATION
24 | ${CMAKE_SOURCE_DIR}/src/main/jniLibs/${ANDROID_ABI}/libMNN.so)
25 | set_target_properties(MNN_CL
26 | PROPERTIES IMPORTED_LOCATION
27 | ${CMAKE_SOURCE_DIR}/src/main/jniLibs/${ANDROID_ABI}/libMNN_CL.so)
28 | #set_target_properties(MNN_GL
29 | # PROPERTIES IMPORTED_LOCATION
30 | # ${CMAKE_SOURCE_DIR}/src/main/jniLibs/${ANDROID_ABI}/libMNN_GL.so)
31 |
32 | add_library(facedetect SHARED src/main/cpp/Ultra_jni.cpp
33 | src/main/cpp/UltraFace.cpp
34 | src/main/cpp/net.cpp)
35 |
36 | find_library(log-lib log)
37 | find_library( jnigraphics-lib jnigraphics )
38 | add_definitions(-DMNN_USE_LOGCAT)
39 | target_link_libraries(facedetect
40 | MNN
41 | MNN_CL
42 | #MNN_GL
43 | #lib_opencv
44 | jnigraphics
45 | z
46 | ${log-lib})
47 |
48 |
49 |
--------------------------------------------------------------------------------
/app/build.gradle:
--------------------------------------------------------------------------------
1 | apply plugin: 'com.android.application'
2 |
3 | android {
4 | compileSdkVersion 28
5 | defaultConfig {
6 | applicationId "com.facesdk"
7 | minSdkVersion 14
8 | targetSdkVersion 28
9 | versionCode 1
10 | versionName "1.0"
11 | testInstrumentationRunner "android.support.test.runner.AndroidJUnitRunner"
12 |
13 | externalNativeBuild {
14 | cmake {
15 | arguments "-DANDROID_STL=c++_shared", "-DANDROID_ARM_NEON=TRUE", "-DANDROID_PLATFORM=android-21",
16 | "-DMNN_OPENCL=true", "-DMNN_OPENGL=true"
17 | abiFilters 'arm64-v8a', 'armeabi-v7a'
18 | }
19 | }
20 | }
21 | buildTypes {
22 | debug {
23 | ndk {
24 | abiFilters 'arm64-v8a', 'armeabi-v7a'
25 | }
26 | }
27 | release {
28 | minifyEnabled false
29 | proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'
30 | }
31 | }
32 |
33 | externalNativeBuild {
34 | cmake {
35 | path "CMakeLists.txt"
36 | }
37 | }
38 |
39 |
40 | }
41 |
42 | dependencies {
43 | // implementation fileTree(dir: 'libs', include: ['*.jar'])
44 | // implementation 'com.android.support:appcompat-v7:28.0.0'
45 | // implementation 'com.android.support.constraint:constraint-layout:1.1.3'
46 | // testImplementation 'junit:junit:4.12'
47 | // androidTestImplementation 'com.android.support.test:runner:1.0.2'
48 | // androidTestImplementation 'com.android.support.test.espresso:espresso-core:3.0.2'
49 | // implementation 'com.android.support:design:28.0.0'
50 | // implementation 'com.android.support:cardview-v7:28.0.0'
51 |
52 | implementation fileTree(include: ['*.jar'], dir: 'libs')
53 | androidTestImplementation('com.android.support.test.espresso:espresso-core:3.0.2', {
54 | exclude group: 'com.android.support', module: 'support-annotations'
55 | })
56 | implementation 'com.android.support:appcompat-v7:28.0.0'
57 | implementation 'com.android.support:support-v4:28.0.0'
58 | testImplementation 'junit:junit:4.12'
59 | implementation 'com.google.android.gms:play-services-appindexing:8.4.0'
60 | }
61 |
--------------------------------------------------------------------------------
/app/proguard-rules.pro:
--------------------------------------------------------------------------------
1 | # Add project specific ProGuard rules here.
2 | # By default, the flags in this file are appended to flags specified
3 | # in D:\AndroidSDK/tools/proguard/proguard-android.txt
4 | # You can edit the include path and order by changing the proguardFiles
5 | # directive in build.gradle.
6 | #
7 | # For more details, see
8 | # http://developer.android.com/guide/developing/tools/proguard.html
9 |
10 | # Add any project specific keep options here:
11 |
12 | # If your project uses WebView with JS, uncomment the following
13 | # and specify the fully qualified class name to the JavaScript interface
14 | # class:
15 | #-keepclassmembers class fqcn.of.javascript.interface.for.webview {
16 | # public *;
17 | #}
18 |
--------------------------------------------------------------------------------
/app/src/androidTest/java/com/facesdk/ExampleInstrumentedTest.java:
--------------------------------------------------------------------------------
1 | package com.facesdk;
2 |
3 | import android.content.Context;
4 | import android.support.test.InstrumentationRegistry;
5 | import android.support.test.runner.AndroidJUnit4;
6 |
7 | import org.junit.Test;
8 | import org.junit.runner.RunWith;
9 |
10 | import static org.junit.Assert.*;
11 |
12 | /**
13 | * Instrumentation test, which will execute on an Android device.
14 | *
15 | * @see Testing documentation
16 | */
17 | @RunWith(AndroidJUnit4.class)
18 | public class ExampleInstrumentedTest {
19 | @Test
20 | public void useAppContext() throws Exception {
21 | // Context of the app under test.
22 | Context appContext = InstrumentationRegistry.getTargetContext();
23 |
24 | assertEquals("com.facesdk", appContext.getPackageName());
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/app/src/main/AndroidManifest.xml:
--------------------------------------------------------------------------------
1 |
2 |
4 |
5 |
6 |
7 |
8 |
9 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
23 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/app/src/main/assets/RFB-320-quant-ADMM-32.mnn:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jackweiwang/Android-FaceDetection-UltraNet-MNN/6f4e5e0184bbdfe6ed6bcf9b993d393bc03f18a2/app/src/main/assets/RFB-320-quant-ADMM-32.mnn
--------------------------------------------------------------------------------
/app/src/main/assets/RFB-320-quant-KL-5792.mnn:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jackweiwang/Android-FaceDetection-UltraNet-MNN/6f4e5e0184bbdfe6ed6bcf9b993d393bc03f18a2/app/src/main/assets/RFB-320-quant-KL-5792.mnn
--------------------------------------------------------------------------------
/app/src/main/assets/RFB-320.mnn:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jackweiwang/Android-FaceDetection-UltraNet-MNN/6f4e5e0184bbdfe6ed6bcf9b993d393bc03f18a2/app/src/main/assets/RFB-320.mnn
--------------------------------------------------------------------------------
/app/src/main/assets/slim-320-quant-ADMM-50.mnn:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jackweiwang/Android-FaceDetection-UltraNet-MNN/6f4e5e0184bbdfe6ed6bcf9b993d393bc03f18a2/app/src/main/assets/slim-320-quant-ADMM-50.mnn
--------------------------------------------------------------------------------
/app/src/main/assets/slim-320.mnn:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jackweiwang/Android-FaceDetection-UltraNet-MNN/6f4e5e0184bbdfe6ed6bcf9b993d393bc03f18a2/app/src/main/assets/slim-320.mnn
--------------------------------------------------------------------------------
/app/src/main/cpp/UltraFace.cpp:
--------------------------------------------------------------------------------
1 | // Created by Linzaer on 2019/11/15.
2 | // Copyright © 2019 Linzaer. All rights reserved.
3 |
4 | #define clip(x, y) (x < 0 ? 0 : (x > y ? y : x))
5 |
6 | #include "UltraFace.hpp"
7 | #define TAG "cpp"
8 | #define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
9 | using namespace std;
10 |
11 | UltraFace::UltraFace(std::string &mnn_path,
12 | int input_width, int input_length, int num_thread_,
13 | float score_threshold_, float iou_threshold_) {
14 | num_thread = num_thread_;
15 | score_threshold = score_threshold_;
16 | iou_threshold = iou_threshold_;
17 | in_w = input_width;
18 | in_h = input_length;
19 | w_h_list = {in_w, in_h};
20 |
21 | for (auto size : w_h_list) {
22 | std::vector fm_item;
23 | for (float stride : strides) {
24 | fm_item.push_back(ceil(size / stride));
25 | }
26 | featuremap_size.push_back(fm_item);
27 | }
28 |
29 | for (auto size : w_h_list) {
30 | shrinkage_size.push_back(strides);
31 | }
32 | /* generate prior anchors */
33 | for (int index = 0; index < num_featuremap; index++) {
34 | float scale_w = in_w / shrinkage_size[0][index];
35 | float scale_h = in_h / shrinkage_size[1][index];
36 | for (int j = 0; j < featuremap_size[1][index]; j++) {
37 | for (int i = 0; i < featuremap_size[0][index]; i++) {
38 | float x_center = (i + 0.5) / scale_w;
39 | float y_center = (j + 0.5) / scale_h;
40 |
41 | for (float k : min_boxes[index]) {
42 | float w = k / in_w;
43 | float h = k / in_h;
44 | priors.push_back({clip(x_center, 1), clip(y_center, 1), clip(w, 1), clip(h, 1)});
45 | }
46 | }
47 | }
48 | }
49 | /* generate prior anchors finished */
50 |
51 | num_anchors = priors.size();
52 |
53 | ultra_net.load_param(mnn_path, num_thread);
54 | ultra_net.set_params(0, 1, mean_vals, norm_vals);
55 |
56 | }
57 |
58 | int UltraFace::detect(unsigned char *data, int width, int height, int channel, std::vector &face_list ) {
59 |
60 |
61 | image_h = height;
62 | image_w = width;
63 |
64 | Inference_engine_tensor out;
65 |
66 | string scores = "scores";
67 | out.add_name(scores);
68 |
69 | string boxes = "boxes";
70 | out.add_name(boxes);
71 |
72 | ultra_net.infer_img(data, width, height, channel, in_w, in_h, out);
73 |
74 | std::vector bbox_collection;
75 | generateBBox(bbox_collection, out.score(0).get() , out.score(1).get());
76 | //LOGD("bbox_collection == %d", bbox_collection.size());
77 | nms(bbox_collection, face_list);
78 | return 0;
79 | }
80 |
81 | void UltraFace::generateBBox(std::vector &bbox_collection, float* scores, float* boxes) {
82 | for (int i = 0; i < num_anchors; i++) {
83 | if (scores[i * 2 + 1 ] > score_threshold) {
84 |
85 | FaceInfo rects;
86 | float x_center = boxes[i * 4] * center_variance * priors[i][2] + priors[i][0];
87 | float y_center = boxes[i * 4 + 1] * center_variance * priors[i][3] + priors[i][1];
88 | float w = exp(boxes[i * 4 + 2] * size_variance) * priors[i][2];
89 | float h = exp(boxes[i * 4 + 3] * size_variance) * priors[i][3];
90 |
91 | rects.x1 = clip(x_center - w / 2.0, 1) * image_w;
92 | rects.y1 = clip(y_center - h / 2.0, 1) * image_h;
93 | rects.x2 = clip(x_center + w / 2.0, 1) * image_w;
94 | rects.y2 = clip(y_center + h / 2.0, 1) * image_h;
95 | rects.score = clip(scores[i * 2 + 1 ], 1);
96 |
97 | bbox_collection.push_back(rects);
98 | }
99 | }
100 | }
101 |
102 | void UltraFace::nms(std::vector &input, std::vector &output, int type) {
103 | std::sort(input.begin(), input.end(), [](const FaceInfo &a, const FaceInfo &b) { return a.score > b.score; });
104 |
105 | int box_num = input.size();
106 |
107 | std::vector merged(box_num, 0);
108 |
109 | for (int i = 0; i < box_num; i++) {
110 | if (merged[i])
111 | continue;
112 | std::vector buf;
113 |
114 | buf.push_back(input[i]);
115 | merged[i] = 1;
116 |
117 | float h0 = input[i].y2 - input[i].y1 + 1;
118 | float w0 = input[i].x2 - input[i].x1 + 1;
119 |
120 | float area0 = h0 * w0;
121 |
122 | for (int j = i + 1; j < box_num; j++) {
123 | if (merged[j])
124 | continue;
125 |
126 | float inner_x0 = input[i].x1 > input[j].x1 ? input[i].x1 : input[j].x1;
127 | float inner_y0 = input[i].y1 > input[j].y1 ? input[i].y1 : input[j].y1;
128 |
129 | float inner_x1 = input[i].x2 < input[j].x2 ? input[i].x2 : input[j].x2;
130 | float inner_y1 = input[i].y2 < input[j].y2 ? input[i].y2 : input[j].y2;
131 |
132 | float inner_h = inner_y1 - inner_y0 + 1;
133 | float inner_w = inner_x1 - inner_x0 + 1;
134 |
135 | if (inner_h <= 0 || inner_w <= 0)
136 | continue;
137 |
138 | float inner_area = inner_h * inner_w;
139 |
140 | float h1 = input[j].y2 - input[j].y1 + 1;
141 | float w1 = input[j].x2 - input[j].x1 + 1;
142 |
143 | float area1 = h1 * w1;
144 |
145 | float score;
146 |
147 | score = inner_area / (area0 + area1 - inner_area);
148 |
149 | if (score > iou_threshold) {
150 | merged[j] = 1;
151 | buf.push_back(input[j]);
152 | }
153 | }
154 | switch (type) {
155 | case hard_nms: {
156 | output.push_back(buf[0]);
157 | break;
158 | }
159 | case blending_nms: {
160 | float total = 0;
161 | for (int i = 0; i < buf.size(); i++) {
162 | total += exp(buf[i].score);
163 | }
164 | FaceInfo rects;
165 | memset(&rects, 0, sizeof(rects));
166 | for (int i = 0; i < buf.size(); i++) {
167 | float rate = exp(buf[i].score) / total;
168 | rects.x1 += buf[i].x1 * rate;
169 | rects.y1 += buf[i].y1 * rate;
170 | rects.x2 += buf[i].x2 * rate;
171 | rects.y2 += buf[i].y2 * rate;
172 | rects.score += buf[i].score * rate;
173 | }
174 | output.push_back(rects);
175 | break;
176 | }
177 | default: {
178 | printf("wrong type of nms.");
179 | exit(-1);
180 | }
181 | }
182 | }
183 | }
184 |
--------------------------------------------------------------------------------
/app/src/main/cpp/UltraFace.hpp:
--------------------------------------------------------------------------------
1 | // Created by Linzaer on 2019/11/15.
2 | // Copyright © 2019 Linzaer. All rights reserved.
3 |
4 | #ifndef UltraFace_hpp
5 | #define UltraFace_hpp
6 |
7 | #pragma once
8 |
9 | #include
10 | #include
11 | #include
12 | #include
13 | #include
14 | #include "net.h"
15 |
16 | #define num_featuremap 4
17 | #define hard_nms 1
18 | #define blending_nms 2 /* mix nms was been proposaled in paper blaze face, aims to minimize the temporal jitter*/
19 | typedef struct FaceInfo {
20 | float x1;
21 | float y1;
22 | float x2;
23 | float y2;
24 | float score;
25 |
26 | } FaceInfo;
27 |
28 | class UltraFace {
29 | public:
30 | UltraFace(std::string &mnn_path, int input_width, int input_length, int num_thread_ = 4, float score_threshold_ = 0.7, float iou_threshold_ = 0.35);
31 |
32 | //~UltraFace();
33 |
34 | int detect(unsigned char *raw_image, int width, int height, int channel, std::vector &face_list);
35 |
36 | void generateBBox(std::vector &bbox_collection, float* scores, float* boxes);
37 |
38 | void nms(std::vector &input, std::vector &output, int type = blending_nms);
39 |
40 | private:
41 | Inference_engine ultra_net;
42 |
43 | int num_thread;
44 | int image_w;
45 | int image_h;
46 |
47 | int in_w;
48 | int in_h;
49 | int num_anchors;
50 |
51 | float score_threshold;
52 | float iou_threshold;
53 |
54 | float mean_vals[3] = {127, 127, 127};
55 | float norm_vals[3] = {1.0 / 128, 1.0 / 128, 1.0 / 128};
56 |
57 | const float center_variance = 0.1;
58 | const float size_variance = 0.2;
59 | const std::vector> min_boxes = {
60 | {10.0f, 16.0f, 24.0f},
61 | {32.0f, 48.0f},
62 | {64.0f, 96.0f},
63 | {128.0f, 192.0f, 256.0f}};
64 | const std::vector strides = {8.0, 16.0, 32.0, 64.0};
65 | std::vector> featuremap_size;
66 | std::vector> shrinkage_size;
67 | std::vector w_h_list;
68 |
69 | std::vector> priors = {};
70 | };
71 |
72 | #endif /* UltraFace_hpp */
73 |
--------------------------------------------------------------------------------
/app/src/main/cpp/Ultra_jni.cpp:
--------------------------------------------------------------------------------
1 | #include
2 | #include
3 | #include
4 | #include
5 | #include
6 | #include "UltraFace.hpp"
7 |
8 | #define TAG "FaceSDKNative"
9 | #define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
10 |
11 | using namespace std;
12 |
13 | static UltraFace *ultra;
14 | bool detection_sdk_init_ok = false;
15 |
16 | extern "C" {
17 |
18 | JNIEXPORT jboolean JNICALL
19 | Java_com_facesdk_FaceSDKNative_FaceDetectionModelInit(JNIEnv *env, jobject instance,
20 | jstring faceDetectionModelPath_) {
21 | LOGD("JNI init native sdk");
22 | if (detection_sdk_init_ok) {
23 | LOGD("sdk already init");
24 | return true;
25 | }
26 | jboolean tRet = false;
27 | if (NULL == faceDetectionModelPath_) {
28 | LOGD("model dir is empty");
29 | return tRet;
30 | }
31 |
32 | //获取模型的绝对路径的目录(不是/aaa/bbb.bin这样的路径,是/aaa/)
33 | const char *faceDetectionModelPath = env->GetStringUTFChars(faceDetectionModelPath_, 0);
34 | if (NULL == faceDetectionModelPath) {
35 | LOGD("model dir is empty");
36 | return tRet;
37 | }
38 |
39 | string tFaceModelDir = faceDetectionModelPath;
40 | string tLastChar = tFaceModelDir.substr(tFaceModelDir.length()-1, 1);
41 | //RFB-320
42 | //RFB-320-quant-ADMM-32
43 | //RFB-320-quant-KL-5792
44 | //slim-320
45 | //slim-320-quant-ADMM-50
46 | //量化模型需要使用CPU方式 net.cpp中修改 sch_config.type = (MNNForwardType)MNN_FORWARD_CPU
47 | // change names
48 | string str = tFaceModelDir + "RFB-320-quant-ADMM-32.mnn";
49 |
50 | ultra = new UltraFace(str, 320, 240, 4, 0.65 ); // config model input
51 |
52 | env->ReleaseStringUTFChars(faceDetectionModelPath_, faceDetectionModelPath);
53 | detection_sdk_init_ok = true;
54 | tRet = true;
55 |
56 | return tRet;
57 | }
58 |
59 | JNIEXPORT jintArray JNICALL
60 | Java_com_facesdk_FaceSDKNative_FaceDetect(JNIEnv *env, jobject instance, jbyteArray imageDate_,
61 | jint imageWidth, jint imageHeight, jint imageChannel) {
62 | if(!detection_sdk_init_ok){
63 | LOGD("sdk not init");
64 | return NULL;
65 | }
66 |
67 | int tImageDateLen = env->GetArrayLength(imageDate_);
68 | if(imageChannel == tImageDateLen / imageWidth / imageHeight){
69 | LOGD("imgW=%d, imgH=%d,imgC=%d",imageWidth,imageHeight,imageChannel);
70 | }
71 | else{
72 | LOGD("img data format error");
73 | return NULL;
74 | }
75 |
76 | jbyte *imageDate = env->GetByteArrayElements(imageDate_, NULL);
77 | if (NULL == imageDate){
78 | LOGD("img data is null");
79 | return NULL;
80 | }
81 |
82 | if(imageWidth<200||imageHeight<200){
83 | LOGD("img is too small");
84 | return NULL;
85 | }
86 |
87 |
88 | std::vector face_info;
89 | //detect face
90 | ultra ->detect((unsigned char*)imageDate, imageWidth, imageHeight, imageChannel, face_info );
91 |
92 | int32_t num_face = static_cast(face_info.size());
93 |
94 | int out_size = 1+num_face*4;
95 | int *allfaceInfo = new int[out_size];
96 | allfaceInfo[0] = num_face;
97 | for (int i=0; iNewIntArray(out_size);
107 | env->SetIntArrayRegion(tFaceInfo, 0, out_size, allfaceInfo);
108 | env->ReleaseByteArrayElements(imageDate_, imageDate, 0);
109 |
110 |
111 | delete [] allfaceInfo;
112 |
113 | return tFaceInfo;
114 | }
115 |
116 | JNIEXPORT jboolean JNICALL
117 | Java_com_facesdk_FaceSDKNative_FaceDetectionModelUnInit(JNIEnv *env, jobject instance) {
118 |
119 | jboolean tDetectionUnInit = false;
120 |
121 | if (!detection_sdk_init_ok) {
122 | LOGD("sdk not inited, do nothing");
123 | return true;
124 | }
125 |
126 | delete ultra;
127 |
128 | detection_sdk_init_ok = false;
129 |
130 | tDetectionUnInit = true;
131 |
132 | LOGD("sdk release ok");
133 |
134 | return tDetectionUnInit;
135 | }
136 |
137 | }
138 |
--------------------------------------------------------------------------------
/app/src/main/cpp/include/AutoTime.hpp:
--------------------------------------------------------------------------------
1 | //
2 | // AutoTime.hpp
3 | // MNN
4 | //
5 | // Created by MNN on 2018/07/27.
6 | // Copyright © 2018, Alibaba Group Holding Limited
7 | //
8 |
9 | #ifndef AutoTime_hpp
10 | #define AutoTime_hpp
11 |
12 | #include
13 | #include
14 | #include "MNNDefine.h"
15 |
16 | namespace MNN {
17 |
18 | /** time tracing util. prints duration between init and deinit. */
19 | class MNN_PUBLIC AutoTime {
20 | public:
21 | AutoTime(int line, const char* func);
22 | ~AutoTime();
23 | AutoTime(const AutoTime&) = delete;
24 | AutoTime(const AutoTime&&) = delete;
25 | AutoTime& operator=(const AutoTime&) = delete;
26 | AutoTime& operator=(const AutoTime&&) = delete;
27 |
28 | private:
29 | int mLine;
30 | char* mName;
31 | uint64_t mCurrentTime;
32 | };
33 | } // namespace MNN
34 |
35 | #ifdef MNN_OPEN_TIME_TRACE
36 | #define AUTOTIME MNN::AutoTime ___t(__LINE__, __func__)
37 | #else
38 | #define AUTOTIME
39 | #endif
40 |
41 | #endif /* AutoTime_hpp */
42 |
--------------------------------------------------------------------------------
/app/src/main/cpp/include/Backend.hpp:
--------------------------------------------------------------------------------
1 | //
2 | // Backend.hpp
3 | // MNN
4 | //
5 | // Created by MNN on 2018/07/06.
6 | // Copyright © 2018, Alibaba Group Holding Limited
7 | //
8 |
9 | #ifndef Backend_hpp
10 | #define Backend_hpp
11 |
12 | #include
13 | #include