├── .gitignore ├── .vscode └── settings.json ├── 0_Compile ├── Compile_Android.md ├── Compile_X86.md ├── cat.jpg ├── compile_tengine_android.sh ├── compile_tengine_x86.sh └── squeezenet_caffe.tmfile ├── 1_Convert ├── Convert_tmfile.md ├── convert_facedetect_onnx_2_tmfile.sh ├── face_detect.jpg └── version-RFB-320_simplified.onnx.tmfile ├── 2_FaceDetector ├── .gitignore ├── CMakeLists.txt ├── README.md ├── build.sh ├── imgs │ ├── 1.jpg │ ├── 2.jpg │ ├── 3.jpg │ └── 4.jpg ├── models │ └── version-RFB-320_simplified.tmfile ├── result2.jpg ├── run.sh ├── src │ ├── UltraFace.cpp │ ├── UltraFace.hpp │ └── main.cpp └── tengine │ ├── include │ └── tengine_c_api.h │ └── lib │ └── libtengine-lite.so ├── 3_FaceDetector_Android ├── Android │ ├── .gitignore │ ├── app │ │ ├── .gitignore │ │ ├── build.gradle │ │ ├── proguard-rules.pro │ │ └── src │ │ │ └── main │ │ │ ├── AndroidManifest.xml │ │ │ ├── assets │ │ │ ├── girl.jpeg │ │ │ ├── girls.jpg │ │ │ └── version-RFB-320_simplified.tmfile │ │ │ ├── cpp │ │ │ ├── AndroidLog.h │ │ │ ├── CMakeLists.txt │ │ │ ├── FaceDetector.cpp │ │ │ ├── UltraFace.cpp │ │ │ ├── UltraFace.hpp │ │ │ └── tengine │ │ │ │ └── include │ │ │ │ └── tengine_c_api.h │ │ │ ├── java │ │ │ └── com │ │ │ │ └── facesdk │ │ │ │ ├── FaceDetector.java │ │ │ │ ├── FaceInfo.java │ │ │ │ ├── activity │ │ │ │ ├── ClassifierActivity.java │ │ │ │ └── MainActivity.java │ │ │ │ └── utils │ │ │ │ ├── BitmapUtils.java │ │ │ │ ├── FaceUtils.java │ │ │ │ ├── FileUtils.java │ │ │ │ ├── ImageUtils.java │ │ │ │ ├── MagnifyEyeUtils.java │ │ │ │ ├── PermissionUtils.java │ │ │ │ ├── SensorEventUtil.java │ │ │ │ └── SmallFaceUtils.java │ │ │ └── res │ │ │ ├── layout │ │ │ ├── activity_classifier.xml │ │ │ └── activity_main.xml │ │ │ ├── mipmap-mdpi │ │ │ ├── camera_switch.png │ │ │ └── ic_launcher_round.png │ │ │ ├── values-v21 │ │ │ ├── dimens.xml │ │ │ └── styles.xml │ │ │ ├── values │ │ │ ├── colors.xml │ │ │ ├── dimens.xml │ │ │ ├── strings.xml │ │ │ ├── sttrs.xml │ │ │ └── styles.xml │ │ │ └── xml │ │ │ └── provider_path.xml │ ├── build.gradle │ ├── gradle.properties │ ├── gradle │ │ └── wrapper │ │ │ ├── gradle-wrapper.jar │ │ │ └── gradle-wrapper.properties │ ├── gradlew │ ├── gradlew.bat │ └── settings.gradle ├── README.md └── imgs │ ├── girls.jpg │ └── girls.png ├── 4_FaceDetector_Android_Camera ├── Andorid │ ├── .gitignore │ ├── app │ │ ├── .gitignore │ │ ├── build.gradle │ │ ├── proguard-rules.pro │ │ └── src │ │ │ └── main │ │ │ ├── AndroidManifest.xml │ │ │ ├── assets │ │ │ └── version-RFB-320_simplified.tmfile │ │ │ ├── cpp │ │ │ ├── AndroidLog.h │ │ │ ├── CMakeLists.txt │ │ │ ├── FaceDetector.cpp │ │ │ ├── UltraFace.cpp │ │ │ ├── UltraFace.hpp │ │ │ └── tengine │ │ │ │ └── include │ │ │ │ └── tengine_c_api.h │ │ │ ├── java │ │ │ └── com │ │ │ │ └── facesdk │ │ │ │ ├── FaceDetector.java │ │ │ │ ├── FaceInfo.java │ │ │ │ ├── activity │ │ │ │ ├── CameraActivity.java │ │ │ │ ├── ClassifierActivity.java │ │ │ │ └── MainActivity.java │ │ │ │ ├── camera │ │ │ │ └── CameraEngine.java │ │ │ │ ├── currencyview │ │ │ │ ├── AutoFitTextureView.java │ │ │ │ ├── CameraConnectionFragment.java │ │ │ │ ├── LegacyCameraConnectionFragment.java │ │ │ │ └── OverlayView.java │ │ │ │ └── utils │ │ │ │ ├── BitmapUtils.java │ │ │ │ ├── FoundationDraw.java │ │ │ │ ├── ImageUtils.java │ │ │ │ ├── LipDraw.java │ │ │ │ ├── PermissionUtils.java │ │ │ │ └── SensorEventUtil.java │ │ │ └── res │ │ │ ├── layout │ │ │ ├── activity_camera.xml │ │ │ ├── activity_main.xml │ │ │ └── camera_connection_fragment.xml │ │ │ ├── mipmap-mdpi │ │ │ ├── camera_switch.png │ │ │ └── ic_launcher_round.png │ │ │ ├── values-v21 │ │ │ ├── dimens.xml │ │ │ └── styles.xml │ │ │ ├── values │ │ │ ├── colors.xml │ │ │ ├── dimens.xml │ │ │ ├── strings.xml │ │ │ ├── sttrs.xml │ │ │ └── styles.xml │ │ │ └── xml │ │ │ └── provider_path.xml │ ├── build.gradle │ ├── gradle.properties │ ├── gradle │ │ └── wrapper │ │ │ ├── gradle-wrapper.jar │ │ │ └── gradle-wrapper.properties │ ├── gradlew │ ├── gradlew.bat │ └── settings.gradle ├── README.md └── imgs │ └── TengineKitDemo2.gif └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "files.associations": { 3 | "ostream": "cpp", 4 | "array": "cpp", 5 | "string": "cpp", 6 | "string_view": "cpp", 7 | "cctype": "cpp", 8 | "clocale": "cpp", 9 | "cmath": "cpp", 10 | "cstdarg": "cpp", 11 | "cstddef": "cpp", 12 | "cstdio": "cpp", 13 | "cstdlib": "cpp", 14 | "cstring": "cpp", 15 | "ctime": "cpp", 16 | "cwchar": "cpp", 17 | "cwctype": "cpp", 18 | "atomic": "cpp", 19 | "*.tcc": "cpp", 20 | "bitset": "cpp", 21 | "chrono": "cpp", 22 | "complex": "cpp", 23 | "cstdint": "cpp", 24 | "deque": "cpp", 25 | "list": "cpp", 26 | "unordered_map": "cpp", 27 | "vector": "cpp", 28 | "exception": "cpp", 29 | "algorithm": "cpp", 30 | "functional": "cpp", 31 | "iterator": "cpp", 32 | "map": "cpp", 33 | "memory": "cpp", 34 | "memory_resource": "cpp", 35 | "optional": "cpp", 36 | "ratio": "cpp", 37 | "set": "cpp", 38 | "system_error": "cpp", 39 | "tuple": "cpp", 40 | "type_traits": "cpp", 41 | "utility": "cpp", 42 | "fstream": "cpp", 43 | "initializer_list": "cpp", 44 | "iomanip": "cpp", 45 | "iosfwd": "cpp", 46 | "iostream": "cpp", 47 | "istream": "cpp", 48 | "limits": "cpp", 49 | "new": "cpp", 50 | "sstream": "cpp", 51 | "stdexcept": "cpp", 52 | "streambuf": "cpp", 53 | "thread": "cpp", 54 | "cinttypes": "cpp", 55 | "typeinfo": "cpp", 56 | "*.ipp": "cpp" 57 | } 58 | } -------------------------------------------------------------------------------- /0_Compile/Compile_Android.md: -------------------------------------------------------------------------------- 1 | # Tengine Android版本编译 2 | 3 | ## 环境 4 | 1. System: Ubuntu18.04 5 | 2. CMAKE: 3.15.3 6 | 7 | ## 下载Android NDK 8 | 9 | ``` 10 | wget ftp://ftp.openailab.net.cn/Tengine_android_build/android-ndk-r16b-linux-x86_64.zip 11 | unzip android-ndk-r16b-linux-x86_64.zip 12 | ``` 13 | 14 | ## 克隆Tengine源码 15 | 16 | ``` 17 | git clone https://github.com/OAID/Tengine.git 18 | ``` 19 | 20 | ## 编译 21 | 22 | ### 设置ANDROID_NDK环境变量 23 | **ANDROID_NDK为刚才解压的zip文件** 24 | ``` 25 | export ANDROID_NDK=/home/oal/ssd_data/workspace/tmp/android-ndk-r16b 26 | ``` 27 | 28 | ### 编译ARM32 29 | 30 | ``` 31 | cd Tengine 32 | mkdir build_android_32 33 | cd build_android_32 34 | cmake -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI="armeabi-v7a" -DANDROID_PLATFORM=android-22 -DANDROID_STL=c++_shared -DANDROID_ARM_NEON=ON -DCONFIG_ARCH_ARM32=ON -DANDROID_ALLOW_UNDEFINED_SYMBOLS=TRUE .. 35 | ``` 36 | 如果输出下面内容,说明没有问题 37 | ``` 38 | -- Configuring done 39 | -- Generating done 40 | ``` 41 | 编译 42 | ``` 43 | make -j4 && make install 44 | ``` 45 | 46 | ### 编译ARM64 47 | 48 | ``` 49 | cd Tengine 50 | mkdir build_android_64 51 | cd build_android_64 52 | cmake -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI="arm64-v8a" -DANDROID_PLATFORM=android-22 -DANDROID_STL=c++_shared -DANDROID_ARM_NEON=ON -DCONFIG_ARCH_ARM64=ON -DANDROID_ALLOW_UNDEFINED_SYMBOLS=TRUE .. 53 | ``` 54 | 如果输出下面内容,说明没有问题 55 | ``` 56 | -- Configuring done 57 | -- Generating done 58 | ``` 59 | 编译 60 | ``` 61 | make -j4 && make install 62 | ``` 63 | 64 | ## 测试 65 | 66 | 绝大多数手机都支持ARM32,我们用编译好的ARM32版本测试编译出来的so有没有问题。 67 | 68 | 把 https://github.com/jiangzhongbo/Tengine_Tutorial/tree/master/0_Compile 中的cat.jpg,squeezenet_caffe.tmfile放到Tengine/build_android_32/install/bin目录下 69 | 70 | 然后push到手机的/data/local/tmp/目录下,这个tmp目录是Android系统的特殊目录,放在里面的文件可以被赋予可执行权限。 71 | 72 | ``` 73 | cd Tengine/build_android_32/install 74 | adb push ./lib/libtengine-lite.so /data/local/tmp/ 75 | adb push ./bin/tm_classification /data/local/tmp/ 76 | adb push ./bin/cat.jpg /data/local/tmp/ 77 | adb push ./bin/squeezenet_caffe.tmfile /data/local/tmp/ 78 | ``` 79 | **有些手机可能无法直接push到/data/local/tmp文件夹,这种情况下可以先push到/sdcard/文件夹,然后在mv到/data/local/tmp/** 80 | 81 | 用下面命令进入手机终端并进行测试 82 | ``` 83 | adb shell 84 | cd /data/local/tmp/ 85 | export LD_LIBRARY_PATH=. 86 | chmod +x tm_classification 87 | ./tm_classification -m squeezenet_caffe.tmfile -i cat.jpg 88 | ``` 89 | 输出 90 | ``` 91 | Image height not specified, use default 227 92 | Image width not specified, use default 227 93 | Scale value not specified, use default 1.0, 1.0, 1.0 94 | Mean value not specified, use default 104.0, 116.7, 122.7 95 | tengine-lite library version: 0.2-dev 96 | 97 | model file : squeezenet_caffe.tmfile 98 | image file : ./cat.jpg 99 | img_h, img_w, scale[3], mean[3] : 227 227 , 1.000 1.000 1.000, 104.0 116.7 122.7 100 | Repeat 1 times, thread 1, avg time 33.31 ms, max_time 33.31 ms, min_time 33.31 ms 101 | -------------------------------------- 102 | 0.273201, 281 103 | 0.267551, 282 104 | 0.181003, 278 105 | 0.081799, 285 106 | 0.072407, 151 107 | -------------------------------------- 108 | ``` 109 | 自此Tengine Android版本编译成功。 110 | 111 | ## 自动化编译脚本 112 | 113 | ``` 114 | wget ftp://ftp.openailab.net.cn/Tengine_android_build/android-ndk-r16b-linux-x86_64.zip 115 | unzip android-ndk-r16b-linux-x86_64.zip 116 | export ANDROID_NDK=`pwd`/android-ndk-r16b 117 | 118 | 119 | git clone https://github.com/OAID/Tengine.git 120 | git clone https://github.com/jiangzhongbo/Tengine_Tutorial 121 | cd Tengine 122 | 123 | mkdir build_android_32 124 | cd build_android_32 125 | cmake -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI="armeabi-v7a" -DANDROID_PLATFORM=android-22 -DANDROID_STL=c++_shared -DANDROID_ARM_NEON=ON -DCONFIG_ARCH_ARM32=ON -DANDROID_ALLOW_UNDEFINED_SYMBOLS=TRUE .. 126 | make -j4 && make install 127 | 128 | cd ./install/bin 129 | cp ../../../../Tengine_Tutorial/0_Compile/cat.jpg ./ 130 | cp ../../../../Tengine_Tutorial/0_Compile/squeezenet_caffe.tmfile ./ 131 | 132 | cd ../../.. 133 | 134 | mkdir build_android_64 135 | cd build_android_64 136 | cmake -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI="arm64-v8a" -DANDROID_PLATFORM=android-22 -DANDROID_STL=c++_shared -DANDROID_ARM_NEON=ON -DCONFIG_ARCH_ARM64=ON -DANDROID_ALLOW_UNDEFINED_SYMBOLS=TRUE .. 137 | make -j4 && make install 138 | 139 | cd ./install/bin 140 | cp ../../../../Tengine_Tutorial/0_Compile/cat.jpg ./ 141 | cp ../../../../Tengine_Tutorial/0_Compile/squeezenet_caffe.tmfile ./ 142 | 143 | ``` 144 | 145 | 为了方便,你可以直接 146 | ``` 147 | wget https://raw.githubusercontent.com/jiangzhongbo/Tengine_Tutorial/master/0_Compile/compile_tengine_android.sh 148 | sh compile_tengine_android.sh 149 | ``` 150 | 151 | ## 参考 152 | 153 | [https://github.com/OAID/Tengine/wiki/Tengine源码编译](https://github.com/OAID/Tengine/wiki/Tengine源码编译) 154 | -------------------------------------------------------------------------------- /0_Compile/Compile_X86.md: -------------------------------------------------------------------------------- 1 | # Tengine X86版本编译 2 | 3 | ## 环境 4 | 1. System: Ubuntu18.04 5 | 2. CMAKE: 3.15.3 6 | 7 | 为了确保后续执行没有问题,先执行 8 | ``` 9 | sudo apt-get install cmake make g++ git 10 | ``` 11 | 安装完后,终端中会显示下面内容 12 | ``` 13 | make 已经是最新版 14 | cmake 已经是最新版 15 | g++ 已经是最新版 16 | git 已经是最新版 17 | ``` 18 | ## 克隆Tengine源码 19 | 20 | ``` 21 | git clone https://github.com/OAID/Tengine.git 22 | ``` 23 | 24 | ## 编译 25 | 26 | ``` 27 | cd Tengine 28 | mkdir build 29 | cd build 30 | cmake .. 31 | ``` 32 | 如果输出下面内容,说明没有问题 33 | ``` 34 | -- Configuring done 35 | -- Generating done 36 | ``` 37 | 编译 38 | ``` 39 | make -j4 && make install 40 | ``` 41 | 42 | ## 测试 43 | 44 | 编译没有问题的话,把 https://github.com/jiangzhongbo/Tengine_Tutorial/tree/master/0_Compile 中的cat.jpg,squeezenet_caffe.tmfile放到Tengine/build/install/bin目录下,然后执行下面命令 45 | ``` 46 | cd Tengine/build/install/bin 47 | ./tm_classification -m squeezenet_caffe.tmfile -i ./cat.jpg 48 | ``` 49 | 输出 50 | ``` 51 | Image height not specified, use default 227 52 | Image width not specified, use default 227 53 | Scale value not specified, use default 1.0, 1.0, 1.0 54 | Mean value not specified, use default 104.0, 116.7, 122.7 55 | tengine-lite library version: 0.2-dev 56 | 57 | model file : squeezenet_caffe.tmfile 58 | image file : ./cat.jpg 59 | img_h, img_w, scale[3], mean[3] : 227 227 , 1.000 1.000 1.000, 104.0 116.7 122.7 60 | Repeat 1 times, thread 1, avg time 33.31 ms, max_time 33.31 ms, min_time 33.31 ms 61 | -------------------------------------- 62 | 0.273201, 281 63 | 0.267551, 282 64 | 0.181003, 278 65 | 0.081799, 285 66 | 0.072407, 151 67 | -------------------------------------- 68 | ``` 69 | 自此Tengine X86版本编译成功。 70 | 71 | ## 自动编译脚本 72 | 73 | ``` 74 | sudo apt-get install cmake make g++ git -y 75 | 76 | git clone https://github.com/OAID/Tengine.git 77 | git clone https://github.com/jiangzhongbo/Tengine_Tutorial 78 | cd Tengine 79 | mkdir build 80 | cd build 81 | cmake .. 82 | make -j4 && make install 83 | 84 | cd ./install/bin 85 | cp ../../../../Tengine_Tutorial/0_Compile/cat.jpg ./ 86 | cp ../../../../Tengine_Tutorial/0_Compile/squeezenet_caffe.tmfile ./ 87 | 88 | ./tm_classification -m squeezenet_caffe.tmfile -i ./cat.jpg 89 | ``` 90 | 你可以直接 91 | ``` 92 | wget https://raw.githubusercontent.com/jiangzhongbo/Tengine_Tutorial/master/0_Compile/compile_tengine_x86.sh 93 | sh compile_tengine_x86.sh 94 | ``` 95 | 96 | ## 参考 97 | 98 | [https://github.com/OAID/Tengine/wiki/Tengine源码编译](https://github.com/OAID/Tengine/wiki/Tengine源码编译) -------------------------------------------------------------------------------- /0_Compile/cat.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiangzhongbo/Tengine_Tutorial/4b490907b6012b9bae0bfc1798bce73457dcd19f/0_Compile/cat.jpg -------------------------------------------------------------------------------- /0_Compile/compile_tengine_android.sh: -------------------------------------------------------------------------------- 1 | wget ftp://ftp.openailab.net.cn/Tengine_android_build/android-ndk-r16b-linux-x86_64.zip 2 | unzip android-ndk-r16b-linux-x86_64.zip 3 | export ANDROID_NDK=`pwd`/android-ndk-r16b 4 | 5 | git clone https://github.com/OAID/Tengine.git 6 | git clone https://github.com/jiangzhongbo/Tengine_Tutorial 7 | cd Tengine 8 | 9 | mkdir build_android_32 10 | cd build_android_32 11 | cmake -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI="armeabi-v7a" -DANDROID_PLATFORM=android-22 -DANDROID_STL=c++_shared -DANDROID_ARM_NEON=ON -DCONFIG_ARCH_ARM32=ON -DANDROID_ALLOW_UNDEFINED_SYMBOLS=TRUE .. 12 | make -j4 && make install 13 | 14 | cd ./install/bin 15 | cp ../../../../Tengine_Tutorial/0_Compile/cat.jpg ./ 16 | cp ../../../../Tengine_Tutorial/0_Compile/squeezenet_caffe.tmfile ./ 17 | 18 | cd ../../.. 19 | 20 | mkdir build_android_64 21 | cd build_android_64 22 | cmake -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI="arm64-v8a" -DANDROID_PLATFORM=android-22 -DANDROID_STL=c++_shared -DANDROID_ARM_NEON=ON -DCONFIG_ARCH_ARM64=ON -DANDROID_ALLOW_UNDEFINED_SYMBOLS=TRUE .. 23 | make -j4 && make install 24 | 25 | cd ./install/bin 26 | cp ../../../../Tengine_Tutorial/0_Compile/cat.jpg ./ 27 | cp ../../../../Tengine_Tutorial/0_Compile/squeezenet_caffe.tmfile ./ -------------------------------------------------------------------------------- /0_Compile/compile_tengine_x86.sh: -------------------------------------------------------------------------------- 1 | sudo apt-get install cmake make g++ git -y 2 | 3 | git clone https://github.com/OAID/Tengine.git 4 | git clone https://github.com/jiangzhongbo/Tengine_Tutorial 5 | cd Tengine 6 | mkdir build 7 | cd build 8 | cmake .. 9 | make -j4 && make install 10 | 11 | cd ./install/bin 12 | cp ../../../../Tengine_Tutorial/0_Compile/cat.jpg ./ 13 | cp ../../../../Tengine_Tutorial/0_Compile/squeezenet_caffe.tmfile ./ 14 | 15 | ./tm_classification -m squeezenet_caffe.tmfile -i ./cat.jpg -------------------------------------------------------------------------------- /0_Compile/squeezenet_caffe.tmfile: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiangzhongbo/Tengine_Tutorial/4b490907b6012b9bae0bfc1798bce73457dcd19f/0_Compile/squeezenet_caffe.tmfile -------------------------------------------------------------------------------- /1_Convert/Convert_tmfile.md: -------------------------------------------------------------------------------- 1 | # Tengine 转换模型 2 | 3 | ## 环境 4 | **System**: Ubuntu18.04 5 | 6 | **CMAKE**: 3.15.3 7 | 8 | 9 | ## 编译模型转化工具 10 | 11 | Tengine其实是提供Linux版本的二进制模型转化工具([下载](https://github.com/OAID/Tengine/releases)),为了让读者了解的更详细些,还是把工具编译过程写一下。 12 | 13 | ### 准备工作 14 | 15 | 为了确保后续执行没有问题,先执行 16 | ``` 17 | sudo apt install libprotobuf-dev protobuf-compiler 18 | ``` 19 | 安装完后,终端中会显示下面内容 20 | ``` 21 | libprotobuf-dev 已经是最新版 22 | protobuf-compiler 已经是最新版 23 | ``` 24 | 25 | ### 克隆Tengine-Convert-Tools源码 26 | 27 | ``` 28 | git clone https://github.com/OAID/Tengine-Convert-Tools 29 | ``` 30 | 31 | ### 编译 32 | 33 | ``` 34 | cd Tengine-Convert-Tools 35 | mkdir build && cd build 36 | cmake .. 37 | ``` 38 | 如果输出下面内容,说明没有问题 39 | ``` 40 | -- Configuring done 41 | -- Generating done 42 | ``` 43 | 编译 44 | ``` 45 | make -j4 && make install 46 | ``` 47 | 48 | 编译完成后该工具放在 **./build/install/bin** 49 | 50 | ## 转换模型 51 | 52 | ![](./face_detect.jpg) 53 | 54 | [Ultra-Light-Fast-Generic-Face-Detector-1MB](https://github.com/Linzaer/Ultra-Light-Fast-Generic-Face-Detector-1MB) 55 | 56 | 该模型是针对边缘计算设备设计的轻量人脸检测模型: 57 | 58 | 1. 在模型大小上,默认FP32精度下(.pth)文件大小为 1.04~1.1MB,推理框架int8量化后大小为 300KB 左右。 59 | 2. 在模型计算量上,320x240的输入分辨率下 90~109 MFlops左右。 60 | 3. 模型有两个版本,version-slim(主干精简速度略快),version-RFB(加入了修改后的RFB模块,精度更高)。 61 | 4. 提供320x240、640x480不同输入分辨率下使用widerface训练的预训练模型,更好的工作于不同的应用场景。 62 | 5. 支持onnx导出。 63 | 64 | 我们将把该模型改为Tengine可用模型。 65 | 66 | 我们先下载Ultra-Light-Fast-Generic-Face-Detector-1MB 67 | 68 | ``` 69 | git clone https://github.com/Linzaer/Ultra-Light-Fast-Generic-Face-Detector-1MB 70 | ``` 71 | 72 | 将./Ultra-Light-Fast-Generic-Face-Detector-1MB/models/onnx下的version-RFB-320_simplified.onnx复制到./Tengine-Convert-Tools/build/install/bin 73 | 74 | ``` 75 | cp ./Ultra-Light-Fast-Generic-Face-Detector-1MB/models/onnx/version-RFB-320_simplified.onnx ./Tengine-Convert-Tools/build/install/bin/ 76 | ``` 77 | 78 | 这个模型是经过[onnx-simplifier](https://github.com/daquexian/onnx-simplifier)优化的,如果不操作此过程,可能会保留部分不支持算子。 79 | 80 | ### 转换 81 | 82 | ``` 83 | ./tm_convert_tool -f onnx -m version-RFB-320_simplified.onnx -o version-RFB-320_simplified.tmfile 84 | ``` 85 | 86 | 输出 87 | 88 | ``` 89 | Create tengine model file done: version-RFB-320_simplified.tmfile 90 | ``` 91 | 92 | 自此转化成功 93 | 94 | ## 自动编译脚本 95 | 96 | ``` 97 | sudo apt install libprotobuf-dev protobuf-compiler 98 | 99 | git clone https://github.com/OAID/Tengine-Convert-Tools 100 | git clone https://github.com/Linzaer/Ultra-Light-Fast-Generic-Face-Detector-1MB 101 | 102 | cd Tengine-Convert-Tools 103 | mkdir build && cd build 104 | cmake .. 105 | make -j4 && make install 106 | 107 | cd ../.. 108 | 109 | cp ./Ultra-Light-Fast-Generic-Face-Detector-1MB/models/onnx/version-RFB-320_simplified.onnx ./Tengine-Convert-Tools/build/install/bin/ 110 | 111 | cd ./Tengine-Convert-Tools/build/install/bin/ 112 | 113 | ./tm_convert_tool -f onnx -m version-RFB-320_simplified.onnx -o version-RFB-320_simplified.tmfile 114 | ``` 115 | 116 | 你可以直接 117 | ``` 118 | wget https://raw.githubusercontent.com/jiangzhongbo/Tengine_Tutorial/master/1_Convert/convert_facedetect_onnx_2_tmfile.sh 119 | sh convert_facedetect_onnx_2_tmfile.sh 120 | ``` 121 | 122 | ## 参考 123 | 124 | [https://github.com/OAID/Tengine-Convert-Tools](https://github.com/OAID/Tengine-Convert-Tools) 125 | 126 | [github.com/daquexian/onnx-simplifier](github.com/daquexian/onnx-simplifier) 127 | 128 | [Ultra-Light-Fast-Generic-Face-Detector-1MB](https://github.com/Linzaer/Ultra-Light-Fast-Generic-Face-Detector-1MB) 129 | -------------------------------------------------------------------------------- /1_Convert/convert_facedetect_onnx_2_tmfile.sh: -------------------------------------------------------------------------------- 1 | sudo apt install libprotobuf-dev protobuf-compiler 2 | 3 | git clone https://github.com/OAID/Tengine-Convert-Tools 4 | git clone https://github.com/Linzaer/Ultra-Light-Fast-Generic-Face-Detector-1MB 5 | 6 | cd Tengine-Convert-Tools 7 | mkdir build && cd build 8 | cmake .. 9 | make -j4 && make install 10 | 11 | cd ../.. 12 | 13 | cp ./Ultra-Light-Fast-Generic-Face-Detector-1MB/models/onnx/version-RFB-320_simplified.onnx ./Tengine-Convert-Tools/build/install/bin/ 14 | 15 | cd ./Tengine-Convert-Tools/build/install/bin/ 16 | 17 | ./tm_convert_tool -f onnx -m version-RFB-320_simplified.onnx -o version-RFB-320_simplified.tmfile -------------------------------------------------------------------------------- /1_Convert/face_detect.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiangzhongbo/Tengine_Tutorial/4b490907b6012b9bae0bfc1798bce73457dcd19f/1_Convert/face_detect.jpg -------------------------------------------------------------------------------- /1_Convert/version-RFB-320_simplified.onnx.tmfile: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiangzhongbo/Tengine_Tutorial/4b490907b6012b9bae0bfc1798bce73457dcd19f/1_Convert/version-RFB-320_simplified.onnx.tmfile -------------------------------------------------------------------------------- /2_FaceDetector/.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | ./build -------------------------------------------------------------------------------- /2_FaceDetector/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.10) 2 | project(Ultra-face-tengine) 3 | 4 | set(CMAKE_CXX_STANDARD 11) 5 | 6 | 7 | find_package(OpenCV REQUIRED) 8 | include_directories(tengine/include/) 9 | 10 | add_library(tengine SHARED IMPORTED) 11 | set_target_properties(tengine PROPERTIES IMPORTED_LOCATION ${CMAKE_SOURCE_DIR}/tengine/lib/libtengine-lite.so) 12 | 13 | add_executable(Ultra-face-tengine src/main.cpp src/UltraFace.cpp) 14 | target_link_libraries(Ultra-face-tengine tengine ${OpenCV_LIBS}) -------------------------------------------------------------------------------- /2_FaceDetector/build.sh: -------------------------------------------------------------------------------- 1 | rm -rf ./build 2 | mkdir build 3 | cd build 4 | cmake .. 5 | make -j8 -------------------------------------------------------------------------------- /2_FaceDetector/imgs/1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiangzhongbo/Tengine_Tutorial/4b490907b6012b9bae0bfc1798bce73457dcd19f/2_FaceDetector/imgs/1.jpg -------------------------------------------------------------------------------- /2_FaceDetector/imgs/2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiangzhongbo/Tengine_Tutorial/4b490907b6012b9bae0bfc1798bce73457dcd19f/2_FaceDetector/imgs/2.jpg -------------------------------------------------------------------------------- /2_FaceDetector/imgs/3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiangzhongbo/Tengine_Tutorial/4b490907b6012b9bae0bfc1798bce73457dcd19f/2_FaceDetector/imgs/3.jpg -------------------------------------------------------------------------------- /2_FaceDetector/imgs/4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiangzhongbo/Tengine_Tutorial/4b490907b6012b9bae0bfc1798bce73457dcd19f/2_FaceDetector/imgs/4.jpg -------------------------------------------------------------------------------- /2_FaceDetector/models/version-RFB-320_simplified.tmfile: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiangzhongbo/Tengine_Tutorial/4b490907b6012b9bae0bfc1798bce73457dcd19f/2_FaceDetector/models/version-RFB-320_simplified.tmfile -------------------------------------------------------------------------------- /2_FaceDetector/result2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiangzhongbo/Tengine_Tutorial/4b490907b6012b9bae0bfc1798bce73457dcd19f/2_FaceDetector/result2.jpg -------------------------------------------------------------------------------- /2_FaceDetector/run.sh: -------------------------------------------------------------------------------- 1 | ./build/Ultra-face-tengine /home/oal/ssd_data/workspace/Tengine_Tutorial/2_FaceDetector/models/version-RFB-320_simplified.tmfile ./imgs/1.jpg -------------------------------------------------------------------------------- /2_FaceDetector/src/UltraFace.cpp: -------------------------------------------------------------------------------- 1 | #define clip(x, y) (x < 0 ? 0 : (x > y ? y : x)) 2 | 3 | #include "UltraFace.hpp" 4 | 5 | using namespace std; 6 | 7 | UltraFace::UltraFace(const std::string &tengine_path, 8 | int input_width, int input_length, int num_thread_, 9 | float score_threshold_, float iou_threshold_, int topk_) { 10 | num_thread = num_thread_; 11 | score_threshold = score_threshold_; 12 | iou_threshold = iou_threshold_; 13 | in_w = input_width; 14 | in_h = input_length; 15 | w_h_list = {in_w, in_h}; 16 | 17 | for (auto size : w_h_list) { 18 | std::vector fm_item; 19 | for (float stride : strides) { 20 | fm_item.push_back(ceil(size / stride)); 21 | } 22 | featuremap_size.push_back(fm_item); 23 | } 24 | 25 | for (auto size : w_h_list) { 26 | shrinkage_size.push_back(strides); 27 | } 28 | /* generate prior anchors */ 29 | for (int index = 0; index < num_featuremap; index++) { 30 | float scale_w = in_w / shrinkage_size[0][index]; 31 | float scale_h = in_h / shrinkage_size[1][index]; 32 | for (int j = 0; j < featuremap_size[1][index]; j++) { 33 | for (int i = 0; i < featuremap_size[0][index]; i++) { 34 | float x_center = (i + 0.5) / scale_w; 35 | float y_center = (j + 0.5) / scale_h; 36 | 37 | for (float k : min_boxes[index]) { 38 | float w = k / in_w; 39 | float h = k / in_h; 40 | priors.push_back({clip(x_center, 1), clip(y_center, 1), clip(w, 1), clip(h, 1)}); 41 | } 42 | } 43 | } 44 | } 45 | /* generate prior anchors finished */ 46 | 47 | num_anchors = priors.size(); 48 | 49 | if (init_tengine() != 0) 50 | { 51 | fprintf(stderr, "Initial tengine failed.\n"); 52 | exit(0); 53 | } 54 | 55 | cout< &face_list) { 92 | if (raw_image.empty()) { 93 | std::cout << "image is empty ,please check!" << std::endl; 94 | return -1; 95 | } 96 | 97 | image_h = raw_image.rows; 98 | image_w = raw_image.cols; 99 | 100 | int img_size = in_w * in_h * 3; 101 | float* input_data = ( float* )malloc(img_size * sizeof(float)); 102 | get_input_data_cv(raw_image, input_data, in_w, in_h, mean_vals, norm_vals, 0); 103 | 104 | if (set_tensor_buffer(input_tensor, input_data, (in_w * in_h * 3) * 4) < 0) 105 | { 106 | printf("Set input tensor buffer failed\n"); 107 | return -1; 108 | } 109 | 110 | auto start = chrono::steady_clock::now(); 111 | 112 | 113 | // run network 114 | if (run_graph(graph, 1) < 0) 115 | { 116 | printf("Run graph failed\n"); 117 | return -1; 118 | } 119 | 120 | // get output data 121 | 122 | string scores = "scores"; 123 | string boxes = "boxes"; 124 | tensor_t tensor_scores = get_graph_tensor(graph, scores.c_str()); 125 | tensor_t tensor_boxes = get_graph_tensor(graph, boxes.c_str()); 126 | 127 | std::vector bbox_collection; 128 | 129 | 130 | auto end = chrono::steady_clock::now(); 131 | chrono::duration elapsed = end - start; 132 | cout << "inference time:" << elapsed.count() << " s" << endl; 133 | 134 | generateBBox(bbox_collection, tensor_scores, tensor_boxes); 135 | nms(bbox_collection, face_list); 136 | 137 | free(input_data); 138 | 139 | return 0; 140 | } 141 | 142 | void UltraFace::generateBBox(std::vector &bbox_collection, tensor_t scores, tensor_t boxes) { 143 | float* scores_blob = ( float* )get_tensor_buffer(scores); 144 | float* boxes_blob = ( float* )get_tensor_buffer(boxes); 145 | for (int i = 0; i < num_anchors; i++) { 146 | if (scores_blob[i * 2 + 1] > score_threshold) { 147 | FaceInfo rects; 148 | float x_center = boxes_blob[i * 4] * center_variance * priors[i][2] + priors[i][0]; 149 | float y_center = boxes_blob[i * 4 + 1] * center_variance * priors[i][3] + priors[i][1]; 150 | float w = exp(boxes_blob[i * 4 + 2] * size_variance) * priors[i][2]; 151 | float h = exp(boxes_blob[i * 4 + 3] * size_variance) * priors[i][3]; 152 | 153 | rects.x1 = clip(x_center - w / 2.0, 1) * image_w; 154 | rects.y1 = clip(y_center - h / 2.0, 1) * image_h; 155 | rects.x2 = clip(x_center + w / 2.0, 1) * image_w; 156 | rects.y2 = clip(y_center + h / 2.0, 1) * image_h; 157 | rects.score = clip(scores_blob[i * 2 + 1], 1); 158 | bbox_collection.push_back(rects); 159 | } 160 | } 161 | } 162 | 163 | void UltraFace::nms(std::vector &input, std::vector &output, int type) { 164 | std::sort(input.begin(), input.end(), [](const FaceInfo &a, const FaceInfo &b) { return a.score > b.score; }); 165 | 166 | int box_num = input.size(); 167 | 168 | std::vector merged(box_num, 0); 169 | 170 | for (int i = 0; i < box_num; i++) { 171 | if (merged[i]) 172 | continue; 173 | std::vector buf; 174 | 175 | buf.push_back(input[i]); 176 | merged[i] = 1; 177 | 178 | float h0 = input[i].y2 - input[i].y1 + 1; 179 | float w0 = input[i].x2 - input[i].x1 + 1; 180 | 181 | float area0 = h0 * w0; 182 | 183 | for (int j = i + 1; j < box_num; j++) { 184 | if (merged[j]) 185 | continue; 186 | 187 | float inner_x0 = input[i].x1 > input[j].x1 ? input[i].x1 : input[j].x1; 188 | float inner_y0 = input[i].y1 > input[j].y1 ? input[i].y1 : input[j].y1; 189 | 190 | float inner_x1 = input[i].x2 < input[j].x2 ? input[i].x2 : input[j].x2; 191 | float inner_y1 = input[i].y2 < input[j].y2 ? input[i].y2 : input[j].y2; 192 | 193 | float inner_h = inner_y1 - inner_y0 + 1; 194 | float inner_w = inner_x1 - inner_x0 + 1; 195 | 196 | if (inner_h <= 0 || inner_w <= 0) 197 | continue; 198 | 199 | float inner_area = inner_h * inner_w; 200 | 201 | float h1 = input[j].y2 - input[j].y1 + 1; 202 | float w1 = input[j].x2 - input[j].x1 + 1; 203 | 204 | float area1 = h1 * w1; 205 | 206 | float score; 207 | 208 | score = inner_area / (area0 + area1 - inner_area); 209 | 210 | if (score > iou_threshold) { 211 | merged[j] = 1; 212 | buf.push_back(input[j]); 213 | } 214 | } 215 | switch (type) { 216 | case hard_nms: { 217 | output.push_back(buf[0]); 218 | break; 219 | } 220 | case blending_nms: { 221 | float total = 0; 222 | for (int i = 0; i < buf.size(); i++) { 223 | total += exp(buf[i].score); 224 | } 225 | FaceInfo rects; 226 | memset(&rects, 0, sizeof(rects)); 227 | for (int i = 0; i < buf.size(); i++) { 228 | float rate = exp(buf[i].score) / total; 229 | rects.x1 += buf[i].x1 * rate; 230 | rects.y1 += buf[i].y1 * rate; 231 | rects.x2 += buf[i].x2 * rate; 232 | rects.y2 += buf[i].y2 * rate; 233 | rects.score += buf[i].score * rate; 234 | } 235 | output.push_back(rects); 236 | break; 237 | } 238 | default: { 239 | printf("wrong type of nms."); 240 | exit(-1); 241 | } 242 | } 243 | } 244 | } 245 | 246 | void UltraFace::get_input_data_cv(const cv::Mat& sample, float* input_data, int img_w, int img_h, const float* mean, const float* scale, int swapRB) 247 | { 248 | cv::Mat img; 249 | if(sample.channels() == 4) 250 | { 251 | cv::cvtColor(sample, img, cv::COLOR_BGRA2BGR); 252 | } 253 | else if(sample.channels() == 1) 254 | { 255 | cv::cvtColor(sample, img, cv::COLOR_GRAY2BGR); 256 | } 257 | else if(sample.channels() == 3 && swapRB == 1) 258 | { 259 | cv::cvtColor(sample, img, cv::COLOR_BGR2RGB); 260 | } 261 | else 262 | { 263 | img = sample; 264 | } 265 | 266 | cv::resize(img, img, cv::Size(img_w, img_h)); 267 | img.convertTo(img, CV_32FC3); 268 | float* img_data = ( float* )img.data; 269 | int hw = img_w * img_h; 270 | for(int w = 0; w < img_w; w++) 271 | { 272 | for(int h = 0; h < img_h; h++) 273 | { 274 | for(int c = 0; c < 3; c++) 275 | { 276 | input_data[c * hw + w * img_h + h] = (*img_data - mean[c]) * scale[c]; 277 | img_data++; 278 | } 279 | } 280 | } 281 | } -------------------------------------------------------------------------------- /2_FaceDetector/src/UltraFace.hpp: -------------------------------------------------------------------------------- 1 | #ifndef UltraFace_hpp 2 | #define UltraFace_hpp 3 | 4 | #pragma once 5 | 6 | #include "tengine_c_api.h" 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #define num_featuremap 4 16 | #define hard_nms 1 17 | #define blending_nms 2 /* mix nms was been proposaled in paper blaze face, aims to minimize the temporal jitter*/ 18 | typedef struct FaceInfo { 19 | float x1; 20 | float y1; 21 | float x2; 22 | float y2; 23 | float score; 24 | 25 | } FaceInfo; 26 | 27 | class UltraFace { 28 | public: 29 | UltraFace(const std::string &tengine_path, 30 | int input_width, int input_length, int num_thread_ = 4, float score_threshold_ = 0.7, float iou_threshold_ = 0.3, 31 | int topk_ = -1); 32 | 33 | ~UltraFace(); 34 | 35 | int detect(cv::Mat &img, std::vector &face_list); 36 | 37 | private: 38 | void generateBBox(std::vector &bbox_collection, tensor_t scores, tensor_t boxes); 39 | 40 | void nms(std::vector &input, std::vector &output, int type = blending_nms); 41 | 42 | void get_input_data_cv(const cv::Mat& sample, float* input_data, int img_h, int img_w, const float* mean, const float* scale, int swapRB = 0); 43 | 44 | private: 45 | 46 | graph_t graph = nullptr; 47 | tensor_t input_tensor = nullptr; 48 | 49 | int num_thread; 50 | int image_w; 51 | int image_h; 52 | 53 | int in_w; 54 | int in_h; 55 | int num_anchors; 56 | 57 | float score_threshold; 58 | float iou_threshold; 59 | 60 | 61 | const float mean_vals[3] = {127, 127, 127}; 62 | const float norm_vals[3] = {1.0 / 128, 1.0 / 128, 1.0 / 128}; 63 | 64 | const float center_variance = 0.1; 65 | const float size_variance = 0.2; 66 | const std::vector> min_boxes = { 67 | {10.0f, 16.0f, 24.0f}, 68 | {32.0f, 48.0f}, 69 | {64.0f, 96.0f}, 70 | {128.0f, 192.0f, 256.0f}}; 71 | const std::vector strides = {8.0, 16.0, 32.0, 64.0}; 72 | std::vector> featuremap_size; 73 | std::vector> shrinkage_size; 74 | std::vector w_h_list; 75 | 76 | std::vector> priors = {}; 77 | }; 78 | 79 | #endif /* UltraFace_hpp */ -------------------------------------------------------------------------------- /2_FaceDetector/src/main.cpp: -------------------------------------------------------------------------------- 1 | #include "UltraFace.hpp" 2 | #include 3 | #include 4 | 5 | using namespace std; 6 | 7 | int main(int argc, char **argv) { 8 | if (argc <= 2) { 9 | fprintf(stderr, "Usage: %s [image files...]\n", argv[0]); 10 | return 1; 11 | } 12 | 13 | string tengine_path = argv[1]; 14 | UltraFace ultraface(tengine_path, 320, 240, 4, 0.65); // config model input 15 | 16 | for (int i = 2; i < argc; i++) { 17 | string image_file = argv[i]; 18 | cout << "Processing " << image_file << endl; 19 | 20 | cv::Mat frame = cv::imread(image_file); 21 | auto start = chrono::steady_clock::now(); 22 | vector face_info; 23 | ultraface.detect(frame, face_info); 24 | 25 | cout << "face_info " << face_info.size() << endl; 26 | 27 | for (auto face : face_info) { 28 | cv::Point pt1(face.x1, face.y1); 29 | cv::Point pt2(face.x2, face.y2); 30 | cv::rectangle(frame, pt1, pt2, cv::Scalar(0, 255, 0), 2); 31 | } 32 | 33 | auto end = chrono::steady_clock::now(); 34 | chrono::duration elapsed = end - start; 35 | cout << "all time: " << elapsed.count() << " s" << endl; 36 | // cv::imshow("UltraFace", frame); 37 | // cv::waitKey(); 38 | string result_name = "result" + to_string(i) + ".jpg"; 39 | cv::imwrite(result_name, frame); 40 | } 41 | return 0; 42 | } -------------------------------------------------------------------------------- /2_FaceDetector/tengine/lib/libtengine-lite.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiangzhongbo/Tengine_Tutorial/4b490907b6012b9bae0bfc1798bce73457dcd19f/2_FaceDetector/tengine/lib/libtengine-lite.so -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/.gitignore: -------------------------------------------------------------------------------- 1 | # Built application files 2 | *.apk 3 | *.ap_ 4 | 5 | # Files for the ART/Dalvik VM 6 | *.dex 7 | 8 | # Java class files 9 | *.class 10 | 11 | # Generated files 12 | bin/ 13 | gen/ 14 | out/ 15 | 16 | # Gradle files 17 | .gradle/ 18 | build/ 19 | 20 | # Local configuration file (sdk path, etc) 21 | local.properties 22 | 23 | # Proguard folder generated by Eclipse 24 | proguard/ 25 | 26 | # Log Files 27 | *.log 28 | 29 | # Android Studio Navigation editor temp files 30 | .navigation/ 31 | 32 | # Android Studio captures folder 33 | captures/ 34 | 35 | # IntelliJ 36 | *.iml 37 | .idea/workspace.xml 38 | .idea/tasks.xml 39 | .idea/gradle.xml 40 | .idea/assetWizardSettings.xml 41 | .idea/dictionaries 42 | .idea/libraries 43 | .idea/caches 44 | 45 | # Keystore files 46 | # Uncomment the following line if you do not want to check your keystore files in. 47 | #*.jks 48 | 49 | # External native build folder generated in Android Studio 2.2 and later 50 | .externalNativeBuild 51 | 52 | # Google Services (e.g. APIs or Firebase) 53 | google-services.json 54 | 55 | # Freeline 56 | freeline.py 57 | freeline/ 58 | freeline_project_description.json 59 | 60 | # fastlane 61 | fastlane/report.xml 62 | fastlane/Preview.html 63 | fastlane/screenshots 64 | fastlane/test_output 65 | fastlane/readme.md 66 | 67 | .settings 68 | .idea 69 | .project -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/app/.gitignore: -------------------------------------------------------------------------------- 1 | /build 2 | libs/* 3 | .cxx -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/app/build.gradle: -------------------------------------------------------------------------------- 1 | apply plugin: 'com.android.application' 2 | 3 | android { 4 | compileSdkVersion 28 5 | buildToolsVersion '28.0.3' 6 | defaultConfig { 7 | applicationId "com.facesdk" 8 | minSdkVersion 19 9 | targetSdkVersion 28 10 | versionCode 1 11 | versionName "1.0" 12 | testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner" 13 | externalNativeBuild { 14 | cmake { 15 | cppFlags "" 16 | cppFlags "-std=c++11 -frtti -fexceptions" 17 | abiFilters 'armeabi-v7a', 'arm64-v8a' 18 | 19 | arguments "-DANDROID_TOOLCHAIN=clang" 20 | cFlags "-O2 -fvisibility=hidden -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math " 21 | cppFlags "-O2 -fvisibility=hidden -fvisibility-inlines-hidden -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math " 22 | 23 | //rtti(run-time type interface)运行时类型信息,这是编译器的一个特性,默认时关闭状态 24 | //如果需要在C/C++代码中调用Java的方法,需要手动开启此特性 25 | cppFlags "-frtti -fexceptions -std=c++11 -v -Wdeprecated-declarations" 26 | } 27 | } 28 | ndk { 29 | //声明启用Android日志, 在c/c++的源文件中使用的#include 日志将得到输出 30 | ldLibs "log" 31 | 32 | //声明创建指定cpu架构的so库 33 | //如果想在模拟器运行 加上 "x86" 34 | abiFilters 'armeabi-v7a', 'arm64-v8a' 35 | 36 | stl "gnustl_static" 37 | } 38 | } 39 | buildTypes { 40 | release { 41 | minifyEnabled true 42 | shrinkResources true 43 | proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro' 44 | } 45 | 46 | debug { 47 | minifyEnabled false 48 | shrinkResources false 49 | proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro' 50 | } 51 | } 52 | externalNativeBuild { 53 | cmake { 54 | path 'src/main/cpp/CMakeLists.txt' 55 | version "3.10.2" 56 | } 57 | } 58 | lintOptions { 59 | checkReleaseBuilds false 60 | abortOnError false 61 | } 62 | sourceSets { 63 | main { 64 | jniLibs.srcDirs = ['libs'] 65 | } 66 | } 67 | repositories { 68 | flatDir { 69 | dirs 'libs' 70 | } 71 | } 72 | } 73 | 74 | 75 | dependencies { 76 | implementation fileTree(dir: 'libs', include: ['*.jar','*.aar']) 77 | implementation 'androidx.appcompat:appcompat:1.1.0' 78 | testImplementation 'junit:junit:4.12' 79 | androidTestImplementation 'androidx.test.ext:junit:1.1.1' 80 | androidTestImplementation 'androidx.test.espresso:espresso-core:3.1.1' 81 | implementation 'com.makeramen:roundedimageview:2.3.0' 82 | implementation 'com.google.android.material:material:1.0.0' 83 | 84 | implementation 'com.tengine.android:tenginekit:1.0.5' 85 | 86 | implementation 'com.android.support:appcompat-v7:28.0.0' 87 | implementation 'com.android.support:recyclerview-v7:28.0.0' 88 | implementation 'com.android.support:design:28.0.0' 89 | implementation 'com.android.support:support-v4:28.0.0' 90 | 91 | 92 | } -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/app/proguard-rules.pro: -------------------------------------------------------------------------------- 1 | # Add project specific ProGuard rules here. 2 | # You can control the set of applied configuration files using the 3 | # proguardFiles setting in build.gradle. 4 | # 5 | # For more details, see 6 | # http://developer.android.com/guide/developing/tools/proguard.html 7 | 8 | # If your project uses WebView with JS, uncomment the following 9 | # and specify the fully qualified class name to the JavaScript interface 10 | # class: 11 | #-keepclassmembers class fqcn.of.javascript.interface.for.webview { 12 | # public *; 13 | #} 14 | 15 | # Uncomment this to preserve the line number information for 16 | # debugging stack traces. 17 | #-keepattributes SourceFile,LineNumberTable 18 | 19 | # If you keep the line number information, uncomment this to 20 | # hide the original source file name. 21 | #-renamesourcefileattribute SourceFile 22 | -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/app/src/main/AndroidManifest.xml: -------------------------------------------------------------------------------- 1 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 19 | 20 | 21 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 40 | 41 | 42 | 43 | -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/app/src/main/assets/girl.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiangzhongbo/Tengine_Tutorial/4b490907b6012b9bae0bfc1798bce73457dcd19f/3_FaceDetector_Android/Android/app/src/main/assets/girl.jpeg -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/app/src/main/assets/girls.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiangzhongbo/Tengine_Tutorial/4b490907b6012b9bae0bfc1798bce73457dcd19f/3_FaceDetector_Android/Android/app/src/main/assets/girls.jpg -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/app/src/main/assets/version-RFB-320_simplified.tmfile: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jiangzhongbo/Tengine_Tutorial/4b490907b6012b9bae0bfc1798bce73457dcd19f/3_FaceDetector_Android/Android/app/src/main/assets/version-RFB-320_simplified.tmfile -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/app/src/main/cpp/AndroidLog.h: -------------------------------------------------------------------------------- 1 | #ifndef FACE_ANDROIDLOG_H 2 | #define FACE_ANDROIDLOG_H 3 | 4 | #include 5 | 6 | #define LOG_TAG "JNI_LOG" 7 | #define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG, __VA_ARGS__) 8 | #define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG,LOG_TAG, __VA_ARGS__) 9 | #define LOGW(...) __android_log_print(ANDROID_LOG_WARN,LOG_TAG, __VA_ARGS__) 10 | #define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG, __VA_ARGS__) 11 | #define LOGF(...) __android_log_print(ANDROID_LOG_FATAL,LOG_TAG, __VA_ARGS__) 12 | 13 | class AndroidLog{ 14 | public: 15 | }; 16 | #endif //FACE_ANDROIDLOG_H 17 | -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/app/src/main/cpp/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.10) 2 | project(FaceDetect) 3 | 4 | set ( 5 | CMAKE_CXX_FLAGS 6 | "${CMAKE_CXX_FLAGS} -std=c++11 -fstrict-aliasing -ffast-math -flax-vector-conversions -O1") 7 | set ( 8 | CMAKE_C_FLAGS 9 | "${CMAKE_C_FLAGS} -std=gnu99 -fvisibility=hidden -fomit-frame-pointer -fstrict-aliasing -ffunction-sections -fdata-sections -ffast-math -flax-vector-conversions") 10 | 11 | 12 | set(distribution_DIR ${CMAKE_SOURCE_DIR}/../../../libs/${ANDROID_ABI}) 13 | 14 | set(OpenCV_DIR "D:/Android_ENV/OpenCV-android-sdk/sdk/native/jni") 15 | find_package(OpenCV 3.4 REQUIRED) 16 | 17 | include_directories(tengine/include/) 18 | include_directories(D:/Android_ENV/OpenCV-android-sdk/sdk/native/jni/include) 19 | 20 | add_library(tengine SHARED IMPORTED) 21 | set_target_properties(tengine PROPERTIES IMPORTED_LOCATION ${distribution_DIR}/libtengine-lite.so) 22 | 23 | find_library( 24 | log-lib 25 | log ) 26 | 27 | add_library(FaceDetect SHARED UltraFace.cpp UltraFace.hpp FaceDetector.cpp AndroidLog.h) 28 | 29 | 30 | target_link_libraries(FaceDetect tengine ${OpenCV_LIBS} ${log-lib} z jnigraphics) -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/app/src/main/cpp/FaceDetector.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "UltraFace.hpp" 3 | #include "AndroidLog.h" 4 | extern "C"{ 5 | 6 | 7 | jfloatArray faces_to_floats(JNIEnv *env, std::vector &faces){ 8 | jfloatArray jarr = env->NewFloatArray(faces.size() * 5); 9 | jfloat *arr = env->GetFloatArrayElements(jarr, NULL); 10 | for (int i = 0; i < faces.size(); i++) { 11 | arr[5 * i + 0] = faces[i].x1; 12 | arr[5 * i + 1] = faces[i].y1; 13 | arr[5 * i + 2] = faces[i].x2; 14 | arr[5 * i + 3] = faces[i].y2; 15 | arr[5 * i + 4] = faces[i].score; 16 | } 17 | env->ReleaseFloatArrayElements(jarr, arr, 0); 18 | return jarr; 19 | } 20 | 21 | UltraFace *ultraface; 22 | JNIEXPORT void JNICALL 23 | Java_com_facesdk_FaceDetector_init(JNIEnv *env, jclass){ 24 | if(!ultraface){ 25 | ultraface = new UltraFace("/sdcard/OAL/version-RFB-320_simplified.tmfile", 320, 240, 4, 0.65); 26 | } 27 | } 28 | 29 | JNIEXPORT jfloatArray JNICALL 30 | Java_com_facesdk_FaceDetector_detect(JNIEnv *env, jclass, jbyteArray img, jint w, jint h){ 31 | jbyte* arr = env->GetByteArrayElements(img, 0); 32 | cv::Mat frame(h, w, CV_8UC4, (char *)arr); 33 | cv::Mat rgb; 34 | cv::cvtColor(frame, rgb, CV_RGBA2RGB); 35 | std::vector face_info; 36 | ultraface->detect(rgb, face_info); 37 | env->ReleaseByteArrayElements(img, arr, 0); 38 | return faces_to_floats(env, face_info); 39 | } 40 | 41 | JNIEXPORT void JNICALL 42 | Java_com_facesdk_FaceDetector_release(JNIEnv *env, jclass){ 43 | if(ultraface){ 44 | delete ultraface; 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/app/src/main/cpp/UltraFace.cpp: -------------------------------------------------------------------------------- 1 | #define clip(x, y) (x < 0 ? 0 : (x > y ? y : x)) 2 | 3 | #include "UltraFace.hpp" 4 | 5 | using namespace std; 6 | 7 | UltraFace::UltraFace(const std::string &tengine_path, 8 | int input_width, int input_length, int num_thread_, 9 | float score_threshold_, float iou_threshold_, int topk_) { 10 | num_thread = num_thread_; 11 | score_threshold = score_threshold_; 12 | iou_threshold = iou_threshold_; 13 | in_w = input_width; 14 | in_h = input_length; 15 | w_h_list = {in_w, in_h}; 16 | 17 | for (auto size : w_h_list) { 18 | std::vector fm_item; 19 | for (float stride : strides) { 20 | fm_item.push_back(ceil(size / stride)); 21 | } 22 | featuremap_size.push_back(fm_item); 23 | } 24 | 25 | for (auto size : w_h_list) { 26 | shrinkage_size.push_back(strides); 27 | } 28 | /* generate prior anchors */ 29 | for (int index = 0; index < num_featuremap; index++) { 30 | float scale_w = in_w / shrinkage_size[0][index]; 31 | float scale_h = in_h / shrinkage_size[1][index]; 32 | for (int j = 0; j < featuremap_size[1][index]; j++) { 33 | for (int i = 0; i < featuremap_size[0][index]; i++) { 34 | float x_center = (i + 0.5) / scale_w; 35 | float y_center = (j + 0.5) / scale_h; 36 | 37 | for (float k : min_boxes[index]) { 38 | float w = k / in_w; 39 | float h = k / in_h; 40 | priors.push_back({clip(x_center, 1), clip(y_center, 1), clip(w, 1), clip(h, 1)}); 41 | } 42 | } 43 | } 44 | } 45 | /* generate prior anchors finished */ 46 | 47 | num_anchors = priors.size(); 48 | 49 | if (init_tengine() != 0) 50 | { 51 | fprintf(stderr, "Initial tengine failed.\n"); 52 | exit(0); 53 | } 54 | 55 | cout< &face_list) { 92 | if (raw_image.empty()) { 93 | std::cout << "image is empty ,please check!" << std::endl; 94 | return -1; 95 | } 96 | 97 | image_h = raw_image.rows; 98 | image_w = raw_image.cols; 99 | 100 | int img_size = in_w * in_h * 3; 101 | float* input_data = ( float* )malloc(img_size * sizeof(float)); 102 | get_input_data_cv(raw_image, input_data, in_w, in_h, mean_vals, norm_vals, 0); 103 | 104 | if (set_tensor_buffer(input_tensor, input_data, (in_w * in_h * 3) * 4) < 0) 105 | { 106 | printf("Set input tensor buffer failed\n"); 107 | return -1; 108 | } 109 | 110 | auto start = chrono::steady_clock::now(); 111 | 112 | 113 | // run network 114 | if (run_graph(graph, 1) < 0) 115 | { 116 | printf("Run graph failed\n"); 117 | return -1; 118 | } 119 | 120 | // get output data 121 | 122 | string scores = "scores"; 123 | string boxes = "boxes"; 124 | tensor_t tensor_scores = get_graph_tensor(graph, scores.c_str()); 125 | tensor_t tensor_boxes = get_graph_tensor(graph, boxes.c_str()); 126 | 127 | std::vector bbox_collection; 128 | 129 | 130 | auto end = chrono::steady_clock::now(); 131 | chrono::duration elapsed = end - start; 132 | cout << "inference time:" << elapsed.count() << " s" << endl; 133 | 134 | generateBBox(bbox_collection, tensor_scores, tensor_boxes); 135 | nms(bbox_collection, face_list); 136 | 137 | free(input_data); 138 | 139 | return 0; 140 | } 141 | 142 | void UltraFace::generateBBox(std::vector &bbox_collection, tensor_t scores, tensor_t boxes) { 143 | float* scores_blob = ( float* )get_tensor_buffer(scores); 144 | float* boxes_blob = ( float* )get_tensor_buffer(boxes); 145 | for (int i = 0; i < num_anchors; i++) { 146 | if (scores_blob[i * 2 + 1] > score_threshold) { 147 | FaceInfo rects; 148 | float x_center = boxes_blob[i * 4] * center_variance * priors[i][2] + priors[i][0]; 149 | float y_center = boxes_blob[i * 4 + 1] * center_variance * priors[i][3] + priors[i][1]; 150 | float w = exp(boxes_blob[i * 4 + 2] * size_variance) * priors[i][2]; 151 | float h = exp(boxes_blob[i * 4 + 3] * size_variance) * priors[i][3]; 152 | 153 | rects.x1 = clip(x_center - w / 2.0, 1) * image_w; 154 | rects.y1 = clip(y_center - h / 2.0, 1) * image_h; 155 | rects.x2 = clip(x_center + w / 2.0, 1) * image_w; 156 | rects.y2 = clip(y_center + h / 2.0, 1) * image_h; 157 | rects.score = clip(scores_blob[i * 2 + 1], 1); 158 | bbox_collection.push_back(rects); 159 | } 160 | } 161 | } 162 | 163 | void UltraFace::nms(std::vector &input, std::vector &output, int type) { 164 | std::sort(input.begin(), input.end(), [](const FaceInfo &a, const FaceInfo &b) { return a.score > b.score; }); 165 | 166 | int box_num = input.size(); 167 | 168 | std::vector merged(box_num, 0); 169 | 170 | for (int i = 0; i < box_num; i++) { 171 | if (merged[i]) 172 | continue; 173 | std::vector buf; 174 | 175 | buf.push_back(input[i]); 176 | merged[i] = 1; 177 | 178 | float h0 = input[i].y2 - input[i].y1 + 1; 179 | float w0 = input[i].x2 - input[i].x1 + 1; 180 | 181 | float area0 = h0 * w0; 182 | 183 | for (int j = i + 1; j < box_num; j++) { 184 | if (merged[j]) 185 | continue; 186 | 187 | float inner_x0 = input[i].x1 > input[j].x1 ? input[i].x1 : input[j].x1; 188 | float inner_y0 = input[i].y1 > input[j].y1 ? input[i].y1 : input[j].y1; 189 | 190 | float inner_x1 = input[i].x2 < input[j].x2 ? input[i].x2 : input[j].x2; 191 | float inner_y1 = input[i].y2 < input[j].y2 ? input[i].y2 : input[j].y2; 192 | 193 | float inner_h = inner_y1 - inner_y0 + 1; 194 | float inner_w = inner_x1 - inner_x0 + 1; 195 | 196 | if (inner_h <= 0 || inner_w <= 0) 197 | continue; 198 | 199 | float inner_area = inner_h * inner_w; 200 | 201 | float h1 = input[j].y2 - input[j].y1 + 1; 202 | float w1 = input[j].x2 - input[j].x1 + 1; 203 | 204 | float area1 = h1 * w1; 205 | 206 | float score; 207 | 208 | score = inner_area / (area0 + area1 - inner_area); 209 | 210 | if (score > iou_threshold) { 211 | merged[j] = 1; 212 | buf.push_back(input[j]); 213 | } 214 | } 215 | switch (type) { 216 | case hard_nms: { 217 | output.push_back(buf[0]); 218 | break; 219 | } 220 | case blending_nms: { 221 | float total = 0; 222 | for (int i = 0; i < buf.size(); i++) { 223 | total += exp(buf[i].score); 224 | } 225 | FaceInfo rects; 226 | memset(&rects, 0, sizeof(rects)); 227 | for (int i = 0; i < buf.size(); i++) { 228 | float rate = exp(buf[i].score) / total; 229 | rects.x1 += buf[i].x1 * rate; 230 | rects.y1 += buf[i].y1 * rate; 231 | rects.x2 += buf[i].x2 * rate; 232 | rects.y2 += buf[i].y2 * rate; 233 | rects.score += buf[i].score * rate; 234 | } 235 | output.push_back(rects); 236 | break; 237 | } 238 | default: { 239 | printf("wrong type of nms."); 240 | exit(-1); 241 | } 242 | } 243 | } 244 | } 245 | 246 | void UltraFace::get_input_data_cv(const cv::Mat& sample, float* input_data, int img_w, int img_h, const float* mean, const float* scale, int swapRB) 247 | { 248 | cv::Mat img; 249 | if(sample.channels() == 4) 250 | { 251 | cv::cvtColor(sample, img, cv::COLOR_BGRA2BGR); 252 | } 253 | else if(sample.channels() == 1) 254 | { 255 | cv::cvtColor(sample, img, cv::COLOR_GRAY2BGR); 256 | } 257 | else if(sample.channels() == 3 && swapRB == 1) 258 | { 259 | cv::cvtColor(sample, img, cv::COLOR_BGR2RGB); 260 | } 261 | else 262 | { 263 | img = sample; 264 | } 265 | 266 | cv::resize(img, img, cv::Size(img_w, img_h)); 267 | img.convertTo(img, CV_32FC3); 268 | float* img_data = ( float* )img.data; 269 | int hw = img_w * img_h; 270 | for(int w = 0; w < img_w; w++) 271 | { 272 | for(int h = 0; h < img_h; h++) 273 | { 274 | for(int c = 0; c < 3; c++) 275 | { 276 | input_data[c * hw + w * img_h + h] = (*img_data - mean[c]) * scale[c]; 277 | img_data++; 278 | } 279 | } 280 | } 281 | } -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/app/src/main/cpp/UltraFace.hpp: -------------------------------------------------------------------------------- 1 | #ifndef UltraFace_hpp 2 | #define UltraFace_hpp 3 | 4 | #pragma once 5 | 6 | #include "tengine_c_api.h" 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | 15 | #define num_featuremap 4 16 | #define hard_nms 1 17 | #define blending_nms 2 /* mix nms was been proposaled in paper blaze face, aims to minimize the temporal jitter*/ 18 | typedef struct FaceInfo { 19 | float x1; 20 | float y1; 21 | float x2; 22 | float y2; 23 | float score; 24 | 25 | } FaceInfo; 26 | 27 | class UltraFace { 28 | public: 29 | UltraFace(const std::string &tengine_path, 30 | int input_width, int input_length, int num_thread_ = 4, float score_threshold_ = 0.7, float iou_threshold_ = 0.3, 31 | int topk_ = -1); 32 | 33 | ~UltraFace(); 34 | 35 | int detect(cv::Mat &img, std::vector &face_list); 36 | 37 | private: 38 | void generateBBox(std::vector &bbox_collection, tensor_t scores, tensor_t boxes); 39 | 40 | void nms(std::vector &input, std::vector &output, int type = blending_nms); 41 | 42 | void get_input_data_cv(const cv::Mat& sample, float* input_data, int img_h, int img_w, const float* mean, const float* scale, int swapRB = 0); 43 | 44 | private: 45 | 46 | graph_t graph = nullptr; 47 | tensor_t input_tensor = nullptr; 48 | 49 | int num_thread; 50 | int image_w; 51 | int image_h; 52 | 53 | int in_w; 54 | int in_h; 55 | int num_anchors; 56 | 57 | float score_threshold; 58 | float iou_threshold; 59 | 60 | 61 | const float mean_vals[3] = {127, 127, 127}; 62 | const float norm_vals[3] = {1.0 / 128, 1.0 / 128, 1.0 / 128}; 63 | 64 | const float center_variance = 0.1; 65 | const float size_variance = 0.2; 66 | const std::vector> min_boxes = { 67 | {10.0f, 16.0f, 24.0f}, 68 | {32.0f, 48.0f}, 69 | {64.0f, 96.0f}, 70 | {128.0f, 192.0f, 256.0f}}; 71 | const std::vector strides = {8.0, 16.0, 32.0, 64.0}; 72 | std::vector> featuremap_size; 73 | std::vector> shrinkage_size; 74 | std::vector w_h_list; 75 | 76 | std::vector> priors = {}; 77 | }; 78 | 79 | #endif /* UltraFace_hpp */ -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/app/src/main/java/com/facesdk/FaceDetector.java: -------------------------------------------------------------------------------- 1 | package com.facesdk; 2 | 3 | import android.util.Log; 4 | 5 | import java.util.ArrayList; 6 | import java.util.List; 7 | 8 | public class FaceDetector { 9 | static { 10 | System.loadLibrary("FaceDetect"); 11 | } 12 | 13 | public static native void init(); 14 | 15 | public static native float[] detect(byte[] img, int w, int h); 16 | 17 | public static native void release(); 18 | 19 | public static List detectByBytes(byte[] img, int w, int h){ 20 | float[] data = detect(img, w, h); 21 | if(data != null && data.length % 5 == 0){ 22 | int num = data.length / 5; 23 | List faceInfos = new ArrayList<>(num); 24 | for(int i = 0; i < num; i++){ 25 | FaceInfo faceInfo = new FaceInfo(); 26 | faceInfo.x1 = data[i * 5 + 0]; 27 | faceInfo.y1 = data[i * 5 + 1]; 28 | faceInfo.x2 = data[i * 5 + 2]; 29 | faceInfo.y2 = data[i * 5 + 3]; 30 | faceInfo.score = data[i * 5 + 4]; 31 | faceInfos.add(faceInfo); 32 | } 33 | return faceInfos; 34 | } 35 | return null; 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/app/src/main/java/com/facesdk/FaceInfo.java: -------------------------------------------------------------------------------- 1 | package com.facesdk; 2 | 3 | import android.graphics.Rect; 4 | 5 | public class FaceInfo { 6 | public float x1; 7 | public float y1; 8 | public float x2; 9 | public float y2; 10 | public float score; 11 | 12 | public Rect getRect(){ 13 | return new Rect((int)x1, (int)y1, (int)x2, (int)y2); 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/app/src/main/java/com/facesdk/activity/ClassifierActivity.java: -------------------------------------------------------------------------------- 1 | package com.facesdk.activity; 2 | 3 | import android.graphics.Bitmap; 4 | import android.graphics.Canvas; 5 | import android.graphics.Color; 6 | import android.graphics.Paint; 7 | import android.graphics.Point; 8 | import android.graphics.drawable.BitmapDrawable; 9 | import android.graphics.drawable.Drawable; 10 | import android.os.Bundle; 11 | import android.util.Log; 12 | import android.widget.ImageView; 13 | 14 | import androidx.annotation.Nullable; 15 | import androidx.appcompat.app.AppCompatActivity; 16 | 17 | import com.facesdk.FaceDetector; 18 | import com.facesdk.FaceInfo; 19 | import com.facesdk.R; 20 | import com.facesdk.utils.BitmapUtils; 21 | import com.facesdk.utils.FileUtils; 22 | import com.tenginekit.AndroidConfig; 23 | import com.tenginekit.Face; 24 | import com.tenginekit.model.FaceLandmarkInfo; 25 | import com.tenginekit.model.FaceLandmarkPoint; 26 | 27 | import java.io.ByteArrayOutputStream; 28 | import java.io.InputStream; 29 | import java.nio.Buffer; 30 | import java.nio.ByteBuffer; 31 | import java.util.List; 32 | 33 | 34 | public class ClassifierActivity extends AppCompatActivity { 35 | private static final String TAG = "ClassifierActivity"; 36 | 37 | ImageView showImage; 38 | 39 | List faceLandmarks; 40 | private final Paint circlePaint = new Paint(); 41 | private Paint paint = new Paint(); 42 | 43 | @Override 44 | protected void onCreate(@Nullable Bundle savedInstanceState) { 45 | super.onCreate(savedInstanceState); 46 | setContentView(R.layout.activity_classifier); 47 | FileUtils.copyAllAssets(this,"sdcard/OAL/"); 48 | 49 | onInit(); 50 | } 51 | 52 | public void onInit() { 53 | 54 | circlePaint.setAntiAlias(true); 55 | circlePaint.setColor(Color.YELLOW); 56 | circlePaint.setStrokeWidth((float) 3); 57 | circlePaint.setStyle(Paint.Style.STROKE); 58 | 59 | paint.setAntiAlias(true); 60 | paint.setColor(Color.RED); 61 | paint.setStrokeWidth((float) 5); 62 | paint.setStyle(Paint.Style.FILL); 63 | 64 | showImage = findViewById(R.id.show_image); 65 | 66 | Drawable d = null; 67 | Bitmap bb = null; 68 | 69 | 70 | try { 71 | d = Drawable.createFromStream(getAssets().open("girls.jpg"), null); 72 | showImage.setImageDrawable(d); 73 | bb = ((BitmapDrawable)d).getBitmap(); 74 | 75 | }catch (Exception e){ 76 | e.printStackTrace(); 77 | } 78 | 79 | 80 | byte[] girl = bitmap2Bytes(bb); 81 | 82 | FaceDetector.init(); 83 | 84 | List faceInfos = FaceDetector.detectByBytes(girl, showImage.getDrawable().getIntrinsicWidth(), showImage.getDrawable().getIntrinsicHeight()); 85 | FaceDetector.release(); 86 | 87 | 88 | 89 | Bitmap out_bitmap = Bitmap.createBitmap( 90 | showImage.getDrawable().getIntrinsicWidth(), 91 | showImage.getDrawable().getIntrinsicHeight(), 92 | Bitmap.Config.ARGB_8888); 93 | 94 | Canvas canvas = new Canvas(out_bitmap); 95 | canvas.drawBitmap(bb, 0,0 , null); 96 | 97 | if(faceInfos != null){ 98 | for(int i = 0; i < faceInfos.size(); i++){ 99 | canvas.drawRect(faceInfos.get(i).getRect(), circlePaint); 100 | } 101 | } 102 | 103 | showImage.setImageBitmap(out_bitmap); 104 | 105 | BitmapUtils.saveBitmap(out_bitmap, "/sdcard/girls.png"); 106 | } 107 | 108 | 109 | @Override 110 | public synchronized void onDestroy() { 111 | super.onDestroy(); 112 | com.tenginekit.Face.release(); 113 | } 114 | 115 | private byte[] bitmap2Bytes(Bitmap image) { 116 | // calculate how many bytes our image consists of 117 | int bytes = image.getByteCount(); 118 | ByteBuffer buffer = ByteBuffer.allocate(bytes); // Create a new buffer 119 | image.copyPixelsToBuffer(buffer); // Move the byte data to the buffer 120 | byte[] temp = buffer.array(); // Get the underlying array containing the 121 | return temp; 122 | } 123 | 124 | static private Bitmap bytes2bitmap(byte[] byteArray, int ImageW, int ImageH) { 125 | Bitmap image1 = Bitmap.createBitmap(ImageW,ImageH, Bitmap.Config.ARGB_8888); 126 | ByteBuffer buffer = ByteBuffer.wrap(byteArray); 127 | buffer.get(byteArray); 128 | Buffer temp = buffer.rewind(); 129 | 130 | image1.copyPixelsFromBuffer(temp); 131 | return image1; 132 | } 133 | } -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/app/src/main/java/com/facesdk/activity/MainActivity.java: -------------------------------------------------------------------------------- 1 | package com.facesdk.activity; 2 | 3 | import android.content.Intent; 4 | import android.content.pm.PackageManager; 5 | import android.os.Bundle; 6 | import android.view.View; 7 | import android.widget.ImageView; 8 | 9 | import androidx.annotation.Nullable; 10 | import androidx.appcompat.app.AppCompatActivity; 11 | 12 | import com.facesdk.R; 13 | import com.facesdk.utils.PermissionUtils; 14 | 15 | public class MainActivity extends AppCompatActivity implements View.OnClickListener{ 16 | 17 | public static String TAG = "MainActivity"; 18 | private ImageView mEffectVideo; 19 | 20 | @Override 21 | protected void onCreate(@Nullable Bundle savedInstanceState) { 22 | super.onCreate(savedInstanceState); 23 | 24 | setContentView(R.layout.activity_main); 25 | findViewById(R.id.detect).setOnClickListener(this); 26 | } 27 | 28 | @Override 29 | public void onClick(View v) { 30 | switch (v.getId()){ 31 | case R.id.detect: 32 | startVideoWithFaceDetected(); 33 | break; 34 | } 35 | } 36 | 37 | private void startVideoWithFaceDetected() { 38 | PermissionUtils.checkPermission(this, new Runnable() { 39 | @Override 40 | public void run() { 41 | jumpToCameraActivity(); 42 | } 43 | }); 44 | } 45 | 46 | public void jumpToCameraActivity() 47 | { 48 | Intent intent = new Intent(MainActivity.this, ClassifierActivity.class); 49 | startActivity(intent); 50 | } 51 | 52 | @Override 53 | public void onRequestPermissionsResult(final int requestCode, final String[] permissions, final int[] grantResults) { 54 | if (requestCode == 1) { 55 | if (grantResults.length > 0 56 | && grantResults[0] == PackageManager.PERMISSION_GRANTED 57 | && grantResults[1] == PackageManager.PERMISSION_GRANTED) { 58 | jumpToCameraActivity(); 59 | } else { 60 | startVideoWithFaceDetected(); 61 | } 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/app/src/main/java/com/facesdk/utils/FaceUtils.java: -------------------------------------------------------------------------------- 1 | package com.facesdk.utils; 2 | 3 | import android.graphics.Path; 4 | 5 | import com.tenginekit.model.FaceLandmarkInfo; 6 | 7 | import java.util.ArrayList; 8 | import java.util.List; 9 | 10 | public class FaceUtils { 11 | int[][] triangles = { 12 | {1,160,4}, 13 | }; 14 | 15 | public static List getFaceTriangles(FaceLandmarkInfo fi){ 16 | List paths = new ArrayList<>(); 17 | Path path = new Path(); 18 | path.moveTo( 19 | fi.landmarks.get(0).X, 20 | fi.landmarks.get(0).Y 21 | ); 22 | path.lineTo( 23 | fi.landmarks.get(159).X, 24 | fi.landmarks.get(159).Y 25 | ); 26 | path.lineTo( 27 | fi.landmarks.get(3).X, 28 | fi.landmarks.get(3).Y 29 | ); 30 | path.close(); 31 | paths.add(path); 32 | return paths; 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/app/src/main/java/com/facesdk/utils/FileUtils.java: -------------------------------------------------------------------------------- 1 | package com.facesdk.utils; 2 | 3 | import android.content.Context; 4 | 5 | import java.io.File; 6 | import java.io.FileOutputStream; 7 | import java.io.InputStream; 8 | import java.io.RandomAccessFile; 9 | 10 | public class FileUtils { 11 | public static void copyAllAssets(Context context, String destination) { 12 | copyAssetsToDst(context, "", destination); 13 | } 14 | 15 | /** 16 | * @param context :application context 17 | * @param srcPath :the path of source file 18 | * @param dstPath :the path of destination 19 | */ 20 | private static void copyAssetsToDst(Context context, String srcPath, String dstPath) { 21 | try { 22 | String fileNames[] = context.getAssets().list(srcPath); 23 | if (fileNames.length > 0) { 24 | File file = new File(dstPath); 25 | file.mkdirs(); 26 | for (String fileName : fileNames) { 27 | if (srcPath != "") { 28 | copyAssetsToDst(context, srcPath + "/" + fileName, dstPath + "/" + fileName); 29 | } else { 30 | copyAssetsToDst(context, fileName, dstPath + "/" + fileName); 31 | } 32 | } 33 | } else { 34 | InputStream is = context.getAssets().open(srcPath); 35 | FileOutputStream fos = new FileOutputStream(new File(dstPath)); 36 | byte[] buffer = new byte[1024]; 37 | int byteCount = 0; 38 | while ((byteCount = is.read(buffer)) != -1) { 39 | fos.write(buffer, 0, byteCount); 40 | } 41 | fos.flush();//刷新缓冲区 42 | is.close(); 43 | fos.close(); 44 | } 45 | } catch (Exception e) { 46 | // TODO Auto-generated catch block 47 | e.printStackTrace(); 48 | } 49 | } 50 | 51 | public static void writeData(String filePath, String fileName, String content) { 52 | writeTxtToFile(content, filePath, fileName); 53 | } 54 | 55 | //写文件 56 | private static void writeTxtToFile(String content, String filePath, String fileName) { 57 | makeFilePath(filePath, fileName); 58 | 59 | String strFilePath = filePath + fileName; 60 | String strContent = content + "\r\n"; 61 | try { 62 | File file = new File(strFilePath); 63 | if (!file.exists()) { 64 | file.getParentFile().mkdirs(); 65 | file.createNewFile(); 66 | } 67 | RandomAccessFile raf = new RandomAccessFile(file, "rwd"); 68 | raf.seek(file.length()); 69 | raf.write(strContent.getBytes()); 70 | raf.close(); 71 | } catch (Exception e) { 72 | } 73 | } 74 | 75 | //创建文件 76 | private static File makeFilePath(String filePath, String fileName) { 77 | File file = null; 78 | makeRootDirectory(filePath); 79 | try { 80 | file = new File(filePath + fileName); 81 | if (!file.exists()) { 82 | file.createNewFile(); 83 | } 84 | } catch (Exception e) { 85 | e.printStackTrace(); 86 | } 87 | return file; 88 | } 89 | 90 | //创建文件夹 91 | private static void makeRootDirectory(String filePath) { 92 | File file = null; 93 | try { 94 | file = new File(filePath); 95 | if (!file.exists()) { 96 | file.mkdir(); 97 | } 98 | } catch (Exception e) { 99 | } 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/app/src/main/java/com/facesdk/utils/ImageUtils.java: -------------------------------------------------------------------------------- 1 | package com.facesdk.utils; 2 | 3 | public class ImageUtils { 4 | public static int getYUVByteSize(final int width, final int height) { 5 | // The luminance plane requires 1 byte per pixel. 6 | final int ySize = width * height; 7 | 8 | // The UV plane works on 2x2 blocks, so dimensions with odd size must be rounded up. 9 | // Each 2x2 block takes 2 bytes to encode, one each for U and V. 10 | final int uvSize = ((width + 1) / 2) * ((height + 1) / 2) * 2; 11 | 12 | return ySize + uvSize; 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/app/src/main/java/com/facesdk/utils/MagnifyEyeUtils.java: -------------------------------------------------------------------------------- 1 | package com.facesdk.utils; 2 | 3 | import android.graphics.Bitmap; 4 | import android.graphics.Point; 5 | 6 | 7 | /** 8 | * @author by dingdegao 9 | * time 2017/9/29 16:03 10 | * function: 眼睛放大 11 | */ 12 | 13 | public class MagnifyEyeUtils { 14 | /** 15 | * 眼睛放大算法 16 | * @param bitmap 原来的bitmap 17 | * @param centerPoint 放大中心点 18 | * @param radius 放大半径 19 | * @param sizeLevel 放大力度 [0,4] 20 | * @return 放大眼睛后的图片 21 | */ 22 | public static Bitmap magnifyEye(Bitmap bitmap, Point centerPoint, int radius, float sizeLevel) { 23 | Bitmap dstBitmap = bitmap.copy(Bitmap.Config.RGB_565, true); 24 | int left = centerPoint.x - radius < 0 ? 0 : centerPoint.x - radius; 25 | int top = centerPoint.y - radius < 0 ? 0 : centerPoint.y - radius; 26 | int right = centerPoint.x + radius > bitmap.getWidth() ? bitmap.getWidth() - 1 : centerPoint.x + radius; 27 | int bottom = centerPoint.y + radius > bitmap.getHeight() ? bitmap.getHeight() - 1 : centerPoint.y + radius; 28 | int powRadius = radius * radius; 29 | 30 | int offsetX, offsetY, powDistance, powOffsetX, powOffsetY; 31 | 32 | int disX, disY; 33 | 34 | //当为负数时,为缩小 35 | float strength = (5 + sizeLevel * 2) / 10; 36 | 37 | for (int i = top; i <= bottom; i++) { 38 | offsetY = i - centerPoint.y; 39 | for (int j = left; j <= right; j++) { 40 | offsetX = j - centerPoint.x; 41 | powOffsetX = offsetX * offsetX; 42 | powOffsetY = offsetY * offsetY; 43 | powDistance = powOffsetX + powOffsetY; 44 | 45 | if (powDistance <= powRadius) { 46 | double distance = Math.sqrt(powDistance); 47 | double sinA = offsetX / distance; 48 | double cosA = offsetY / distance; 49 | 50 | double scaleFactor = distance / radius - 1; 51 | scaleFactor = (1 - scaleFactor * scaleFactor * (distance / radius) * strength); 52 | 53 | distance = distance * scaleFactor; 54 | disY = (int) (distance * cosA + centerPoint.y + 0.5); 55 | disY = checkY(disY, bitmap); 56 | disX = (int) (distance * sinA + centerPoint.x + 0.5); 57 | disX = checkX(disX, bitmap); 58 | //中心点不做处理 59 | if (!(j == centerPoint.x && i == centerPoint.y)) { 60 | dstBitmap.setPixel(j, i, bitmap.getPixel(disX, disY)); 61 | //dstBitmap.setPixel(j, i, Color.WHITE); 62 | } 63 | } 64 | } 65 | } 66 | return dstBitmap; 67 | } 68 | 69 | private static int checkY(int disY, Bitmap bitmap) { 70 | if (disY < 0) { 71 | disY = 0; 72 | } else if (disY >= bitmap.getHeight()) { 73 | disY = bitmap.getHeight() - 1; 74 | } 75 | return disY; 76 | } 77 | 78 | private static int checkX(int disX, Bitmap bitmap) { 79 | if (disX < 0) { 80 | disX = 0; 81 | } else if (disX >= bitmap.getWidth()) { 82 | disX = bitmap.getWidth() - 1; 83 | } 84 | return disX; 85 | } 86 | 87 | } 88 | -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/app/src/main/java/com/facesdk/utils/PermissionUtils.java: -------------------------------------------------------------------------------- 1 | package com.facesdk.utils; 2 | 3 | import android.Manifest; 4 | import android.app.Activity; 5 | import android.content.pm.PackageManager; 6 | import android.os.Build; 7 | 8 | import androidx.core.app.ActivityCompat; 9 | 10 | import java.util.ArrayList; 11 | import java.util.List; 12 | 13 | public class PermissionUtils { 14 | /** 15 | * 权限列表 16 | */ 17 | private static String[] permissionList = new String[]{ 18 | Manifest.permission.WRITE_EXTERNAL_STORAGE, 19 | Manifest.permission.READ_PHONE_STATE 20 | }; 21 | /*** 22 | * 权限请求结果code 23 | */ 24 | public static final int PERMISSIONS_REQUEST = 1; 25 | 26 | public static boolean checkPermission(Activity mActivity, Runnable callback) { 27 | List needPermission = new ArrayList<>(); 28 | if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) { 29 | for (int i = 0; i < permissionList.length; i++) { 30 | if (mActivity.checkSelfPermission(permissionList[i]) != PackageManager.PERMISSION_GRANTED) { 31 | needPermission.add(permissionList[i]); 32 | } 33 | } 34 | if (!needPermission.isEmpty()) { 35 | String[] permissions = needPermission.toArray(new String[needPermission.size()]); 36 | ActivityCompat.requestPermissions(mActivity,permissions,1); 37 | return false; 38 | } 39 | callback.run(); 40 | return true; 41 | } else { 42 | callback.run(); 43 | return true; 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/app/src/main/java/com/facesdk/utils/SensorEventUtil.java: -------------------------------------------------------------------------------- 1 | package com.facesdk.utils; 2 | 3 | import android.content.Context; 4 | import android.hardware.Sensor; 5 | import android.hardware.SensorEvent; 6 | import android.hardware.SensorEventListener; 7 | import android.hardware.SensorManager; 8 | import android.util.Log; 9 | 10 | public class SensorEventUtil implements SensorEventListener { 11 | private SensorManager mSensorManager; 12 | private Sensor mSensor; 13 | 14 | public int orientation = 0; 15 | 16 | public SensorEventUtil(Context context) { 17 | mSensorManager = (SensorManager) context.getSystemService(Context.SENSOR_SERVICE); 18 | mSensor = mSensorManager.getDefaultSensor(Sensor.TYPE_ACCELEROMETER);// TYPE_GRAVITY 19 | // 参数三,检测的精准度 20 | mSensorManager.registerListener(this, mSensor, SensorManager.SENSOR_DELAY_NORMAL);// SENSOR_DELAY_GAME 21 | } 22 | 23 | @Override 24 | public void onAccuracyChanged(Sensor sensor, int accuracy) { 25 | 26 | } 27 | 28 | @Override 29 | public void onSensorChanged(SensorEvent event) { 30 | final double G = 9.81; 31 | final double SQRT2 = 1.414213; 32 | if (event.sensor == null) { 33 | return; 34 | } 35 | 36 | if (event.sensor.getType() == Sensor.TYPE_ACCELEROMETER) { 37 | float x = event.values[0]; 38 | float y = event.values[1]; 39 | float z = event.values[2]; 40 | if (z >= G / SQRT2) { //screen is more likely lying on the table 41 | if (x >= G / 2) { 42 | orientation = 1; 43 | } else if (x <= -G / 2) { 44 | orientation = 2; 45 | } else if (y <= -G / 2) { 46 | orientation = 3; 47 | } else { 48 | orientation = 0; 49 | } 50 | } else { 51 | if (x >= G / SQRT2) { 52 | orientation = 1; 53 | } else if (x <= -G / SQRT2) { 54 | orientation = 2; 55 | } else if (y <= -G / SQRT2) { 56 | orientation = 3; 57 | } else { 58 | orientation = 0; 59 | } 60 | } 61 | } 62 | Log.d("@@@@", "onSensorChanged: " + orientation); 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/app/src/main/java/com/facesdk/utils/SmallFaceUtils.java: -------------------------------------------------------------------------------- 1 | package com.facesdk.utils; 2 | 3 | import android.graphics.Bitmap; 4 | import android.graphics.Canvas; 5 | import android.graphics.Paint; 6 | import android.graphics.Point; 7 | 8 | import java.util.List; 9 | 10 | /** 11 | * @author by dingdegao 12 | * function:瘦脸功能 13 | */ 14 | 15 | public class SmallFaceUtils { 16 | 17 | private static final int WIDTH = 200; 18 | private static final int HEIGHT = 200; 19 | 20 | /** 21 | * 瘦脸算法 22 | * 23 | * @param bitmap 原来的bitmap 24 | * @return 之后的图片 25 | */ 26 | public static Bitmap smallFaceMesh(Bitmap bitmap, List leftFacePoint,List rightFacePoint,Point centerPoint, int level) { 27 | 28 | //交点坐标的个数 29 | int COUNT = (WIDTH + 1) * (HEIGHT + 1); 30 | 31 | //用于保存COUNT的坐标 32 | float[] verts = new float[COUNT * 2]; 33 | 34 | 35 | float bmWidth = bitmap.getWidth(); 36 | float bmHeight = bitmap.getHeight(); 37 | 38 | int index = 0; 39 | for (int i = 0; i < HEIGHT + 1; i++) { 40 | float fy = bmHeight * i / HEIGHT; 41 | for (int j = 0; j < WIDTH + 1; j++) { 42 | float fx = bmWidth * j / WIDTH; 43 | //X轴坐标 放在偶数位 44 | verts[index * 2] = fx; 45 | //Y轴坐标 放在奇数位 46 | verts[index * 2 + 1] = fy; 47 | index += 1; 48 | } 49 | } 50 | int r = 180 + 15 * level; 51 | // warp(COUNT,verts,leftFacePoint.get(4).x,leftFacePoint.get(4).y,centerPoint.x,centerPoint.y,r); 52 | warp(COUNT,verts,leftFacePoint.get(15).x,leftFacePoint.get(15).y,centerPoint.x,centerPoint.y,r); 53 | 54 | // warp(COUNT,verts,rightFacePoint.get(4).x,rightFacePoint.get(4).y,centerPoint.x,centerPoint.y,r); 55 | warp(COUNT,verts,rightFacePoint.get(15).x,rightFacePoint.get(15).y,centerPoint.x,centerPoint.y,r); 56 | 57 | Bitmap resultBitmap = Bitmap.createBitmap(bitmap.getWidth(),bitmap.getHeight(), Bitmap.Config.ARGB_8888); 58 | Canvas canvas = new Canvas(resultBitmap); 59 | Paint paint = new Paint(); 60 | // canvas.drawBitmap(bitmap,0,0,paint); 61 | // paint.setColor(Color.RED); 62 | // canvas.drawCircle(leftFacePoint.get(16).x,leftFacePoint.get(16).y,3,paint); 63 | // canvas.drawCircle(leftFacePoint.get(46).x,leftFacePoint.get(46).y,3,paint); 64 | // canvas.drawCircle(rightFacePoint.get(16).x,rightFacePoint.get(16).y,3,paint); 65 | // canvas.drawCircle(rightFacePoint.get(46).x,rightFacePoint.get(46).y,3,paint); 66 | // canvas.drawCircle(centerPoint.x,centerPoint.y,3,paint); 67 | canvas.drawBitmapMesh(bitmap,WIDTH, HEIGHT,verts,0,null,0,null); 68 | return resultBitmap; 69 | } 70 | 71 | private static void warp(int COUNT,float verts[],float startX, float startY, float endX, float endY,int r) { 72 | //level [0,4] 73 | 74 | //int r = 200; default 200 75 | 76 | //计算拖动距离 77 | float ddPull = (endX - startX) * (endX - startX) + (endY - startY) * (endY - startY); 78 | float dPull = (float) Math.sqrt(ddPull); 79 | //dPull = screenWidth - dPull >= 0.0001f ? screenWidth - dPull : 0.0001f; 80 | if(dPull < 2 * r){ 81 | dPull = 2 * r; 82 | } 83 | 84 | int powR = r * r; 85 | int index = 0; 86 | int offset = 1; 87 | for (int i = 0; i < HEIGHT + 1; i++) { 88 | for (int j = 0; j < WIDTH + 1; j++) { 89 | //边界区域不处理 90 | if(i < offset || i > HEIGHT - offset || j < offset || j > WIDTH - offset){ 91 | index = index + 1; 92 | continue; 93 | } 94 | //计算每个坐标点与触摸点之间的距离 95 | float dx = verts[index * 2] - startX; 96 | float dy = verts[index * 2 + 1] - startY; 97 | float dd = dx * dx + dy * dy; 98 | 99 | if (dd < powR) { 100 | //变形系数,扭曲度 101 | double e = (powR - dd) * (powR - dd) / ((powR - dd + dPull * dPull) * (powR - dd + dPull * dPull)); 102 | double pullX = e * (endX - startX); 103 | double pullY = e * (endY - startY); 104 | verts[index * 2] = (float) (verts[index * 2] + pullX); 105 | verts[index * 2 + 1] = (float) (verts[index * 2 + 1] + pullY); 106 | } 107 | else{ 108 | double e = 10; 109 | double pullX = e * (endX - startX); 110 | double pullY = e * (endY - startY); 111 | verts[index * 2] = (float) (verts[index * 2] + pullX); 112 | verts[index * 2 + 1] = (float) (verts[index * 2 + 1] + pullY); 113 | } 114 | index = index + 1; 115 | } 116 | } 117 | } 118 | 119 | } 120 | -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/app/src/main/res/layout/activity_classifier.xml: -------------------------------------------------------------------------------- 1 | 7 | 8 | 13 | 14 | 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /3_FaceDetector_Android/Android/app/src/main/res/layout/activity_main.xml: -------------------------------------------------------------------------------- 1 | 2 | 7 | 8 |