├── .gitignore ├── CMakeLists.txt ├── COMPILE.md ├── ChangeLog ├── LICENSE ├── README.md ├── aarch64-toolchain.cmake ├── build_android.sh ├── example ├── benchmark.cpp ├── detect-camera.cpp └── detect-image.cpp ├── images └── cnnresult.png ├── mobile ├── Android │ ├── FaceDetection │ │ ├── app │ │ │ ├── app.iml │ │ │ ├── build.gradle │ │ │ ├── proguard-rules.pro │ │ │ └── src │ │ │ │ ├── androidTest │ │ │ │ └── java │ │ │ │ │ └── org │ │ │ │ │ └── dp │ │ │ │ │ └── facedetection │ │ │ │ │ └── ExampleInstrumentedTest.kt │ │ │ │ ├── main │ │ │ │ ├── AndroidManifest.xml │ │ │ │ ├── assets │ │ │ │ │ ├── .DS_Store │ │ │ │ │ ├── test.jpg │ │ │ │ │ └── test2.jpg │ │ │ │ ├── cpp │ │ │ │ │ ├── CMakeLists.txt │ │ │ │ │ └── facedetectcnn-jni.cpp │ │ │ │ ├── java │ │ │ │ │ └── org │ │ │ │ │ │ └── dp │ │ │ │ │ │ ├── .DS_Store │ │ │ │ │ │ └── facedetection │ │ │ │ │ │ ├── Face.java │ │ │ │ │ │ └── MainActivity.kt │ │ │ │ └── res │ │ │ │ │ ├── .DS_Store │ │ │ │ │ ├── drawable-v24 │ │ │ │ │ └── ic_launcher_foreground.xml │ │ │ │ │ ├── drawable │ │ │ │ │ ├── .DS_Store │ │ │ │ │ └── ic_launcher_background.xml │ │ │ │ │ ├── layout │ │ │ │ │ └── activity_main.xml │ │ │ │ │ ├── mipmap-anydpi-v26 │ │ │ │ │ ├── ic_launcher.xml │ │ │ │ │ └── ic_launcher_round.xml │ │ │ │ │ ├── mipmap-hdpi │ │ │ │ │ ├── ic_launcher.png │ │ │ │ │ └── ic_launcher_round.png │ │ │ │ │ ├── mipmap-mdpi │ │ │ │ │ ├── ic_launcher.png │ │ │ │ │ └── ic_launcher_round.png │ │ │ │ │ ├── mipmap-xhdpi │ │ │ │ │ ├── ic_launcher.png │ │ │ │ │ └── ic_launcher_round.png │ │ │ │ │ ├── mipmap-xxhdpi │ │ │ │ │ ├── ic_launcher.png │ │ │ │ │ └── ic_launcher_round.png │ │ │ │ │ ├── mipmap-xxxhdpi │ │ │ │ │ ├── ic_launcher.png │ │ │ │ │ └── ic_launcher_round.png │ │ │ │ │ └── values │ │ │ │ │ ├── colors.xml │ │ │ │ │ ├── strings.xml │ │ │ │ │ └── styles.xml │ │ │ │ └── test │ │ │ │ └── java │ │ │ │ └── org │ │ │ │ └── dp │ │ │ │ └── facedetection │ │ │ │ └── ExampleUnitTest.kt │ │ ├── build.gradle │ │ ├── gradle.properties │ │ ├── gradle │ │ │ ├── .DS_Store │ │ │ └── wrapper │ │ │ │ ├── gradle-wrapper.jar │ │ │ │ └── gradle-wrapper.properties │ │ ├── gradlew │ │ ├── gradlew.bat │ │ ├── local.properties │ │ ├── release │ │ │ ├── .DS_Store │ │ │ └── facedetection.apk │ │ └── settings.gradle │ ├── screenshot1.jpg │ └── screenshot2.jpg ├── README.md └── iOS │ ├── FaceDetection.xcodeproj │ ├── project.pbxproj │ ├── project.xcworkspace │ │ ├── contents.xcworkspacedata │ │ ├── xcshareddata │ │ │ └── IDEWorkspaceChecks.plist │ │ └── xcuserdata │ │ │ └── Robin.xcuserdatad │ │ │ └── UserInterfaceState.xcuserstate │ └── xcuserdata │ │ └── Robin.xcuserdatad │ │ └── xcschemes │ │ └── xcschememanagement.plist │ ├── FaceDetection │ ├── AppDelegate.h │ ├── AppDelegate.m │ ├── Assets.xcassets │ │ ├── AppIcon.appiconset │ │ │ └── Contents.json │ │ └── Contents.json │ ├── Base.lproj │ │ ├── LaunchScreen.storyboard │ │ └── Main.storyboard │ ├── Info.plist │ ├── ViewController.h │ ├── ViewController.mm │ ├── main.m │ └── test.jpg │ ├── screenshot1.png │ └── screenshot2.png ├── opencv_dnn ├── README.md ├── cpp │ ├── CMakeLists.txt │ └── detect.cpp └── python │ └── detect.py ├── src ├── facedetectcnn-data.cpp ├── facedetectcnn-model.cpp ├── facedetectcnn.cpp └── facedetectcnn.h └── wu-thesis-facedetect.pdf /.gitignore: -------------------------------------------------------------------------------- 1 | model/ 2 | *.swp 3 | Makefile.Debug 4 | Makefile.Release 5 | Version.h 6 | Update/Update_*.xml 7 | *.bak 8 | *~ 9 | *.autosave 10 | *.qm 11 | *.user 12 | test.xml 13 | .CCodec.h.kate-swp 14 | .kdev4/ 15 | .kdev_include_paths 16 | .vs/ 17 | CMakeSettings.json 18 | build_ 19 | modul/ 20 | build-*/ 21 | build/ 22 | build_android/ 23 | build_unix_mingw/ 24 | build_windows_mingw/ 25 | build_unix/ 26 | build_windows_msvc/ 27 | *.ncb 28 | .metadata/ 29 | debug/ 30 | bin/ 31 | *.class 32 | .deps/ 33 | Makefile.in 34 | aclocal.m4 35 | config.guess 36 | config.h 37 | config.h.in 38 | config.h.in~ 39 | config.log 40 | config.status 41 | config.sub 42 | configure 43 | depcomp 44 | install-sh 45 | libtool 46 | ltmain.sh 47 | missing 48 | reachmonitor 49 | stamp-h1 50 | .deps/ 51 | Makefile.in 52 | aclocal.m4 53 | config.guess 54 | config.h 55 | config.h.in 56 | config.h.in~ 57 | config.log 58 | config.status 59 | config.sub 60 | configure 61 | depcomp 62 | install-sh 63 | libtool 64 | ltmain.sh 65 | missing 66 | stamp-h1 67 | *.bak 68 | *.bs 69 | *.la 70 | *.lo 71 | *.ft 72 | *.ft.1 73 | *.made 74 | *.o 75 | *.obj 76 | *.old 77 | *.orig 78 | *.out 79 | *.pdb 80 | *.rej 81 | .libs/ 82 | Makefile 83 | *.cdf 84 | *.cache 85 | *.obj 86 | *.ilk 87 | *.resources 88 | *.tlb 89 | *.tli 90 | *.tlh 91 | *.tmp 92 | *.rsp 93 | *.pgc 94 | *.pgd 95 | *.meta 96 | *.tlog 97 | *.manifest 98 | *.res 99 | *.pch 100 | *.exp 101 | *.idb 102 | *.rep 103 | *.xdc 104 | *.pdb 105 | *_manifest.rc 106 | *.bsc 107 | *.sbr 108 | *.opensdf 109 | *.sdf 110 | *.suo 111 | Debug/ 112 | release/ 113 | Release/ 114 | ipch/ 115 | rabbitim.kdev4 116 | *.pro.user.* 117 | Doxygen/ 118 | Doxyfile 119 | android/local.properties 120 | android/gradlew.* 121 | android/gradle.properties 122 | *.iml 123 | 124 | # Compiled Object files 125 | *.slo 126 | *.lo 127 | *.o 128 | *.obj 129 | 130 | # Precompiled Headers 131 | *.gch 132 | *.pch 133 | 134 | # Compiled Dynamic libraries 135 | *.so 136 | *.dylib 137 | *.dll 138 | 139 | # Fortran module files 140 | *.mod 141 | 142 | # Compiled Static libraries 143 | *.lai 144 | *.la 145 | *.a 146 | *.lib 147 | 148 | # Executables 149 | *.exe 150 | *.out 151 | *.app 152 | 153 | # Platform Specifics - auto generated files 154 | PlatformSpecifics/Windows/*.rc 155 | 156 | # Visual studio - project files 157 | *.sln 158 | *.suo 159 | *.vcxproj 160 | *.vcxproj.filters 161 | *.vcxproj.user 162 | 163 | # Visual Studio - Build Results 164 | [Dd]ebug/ 165 | [Rr]elease/ 166 | [Mm]in[Ss]ize[Rr]el/ 167 | [Rr]el[Ww]ith[Dd]eb[Ii]nfo/ 168 | 169 | # Visual Studio - Browsing Database File 170 | *.sdf 171 | *.opensdf 172 | 173 | #osx xcode 174 | DerivedData/ 175 | *.DS_Store 176 | *.build 177 | *.xcodeproj 178 | 179 | #CPACK related files 180 | CPackConfig-*.cmake 181 | _CPack_Packages/ 182 | 183 | #packages 184 | *.tar.gz 185 | *.zip 186 | 187 | android/.gradle/ 188 | android/.idea/ 189 | android/android.iml 190 | android/gradle/ 191 | android/gradlew 192 | 193 | **/__pycache__ 194 | .vscode -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # CMakeLists for libfacedetectcnn 2 | 3 | project(libfacedetection) 4 | 5 | cmake_minimum_required(VERSION 2.8.12) 6 | 7 | option(ENABLE_NEON "whether use neon, if use arm please set it on" OFF) 8 | option(ENABLE_AVX512 "use avx512" OFF) 9 | option(ENABLE_AVX2 "use avx2" ON) 10 | option(DEMO "build the demo" OFF) 11 | option(USE_OPENMP "Use OpenMP" ON) 12 | 13 | if (BUILD_SHARED_LIBS) 14 | add_definitions(-DBUILD_SHARED_LIBS) 15 | if (CMAKE_COMPILER_IS_GNUCXX AND NOT MINGW) 16 | # Just setting CMAKE_POSITION_INDEPENDENT_CODE should be enough to set 17 | # -fPIC for GCC but sometimes it still doesn't get set, so make sure it 18 | # does. 19 | add_definitions("-fPIC") 20 | endif() 21 | set(CMAKE_POSITION_INDEPENDENT_CODE ON) 22 | endif() 23 | 24 | SET(BUILD_VERSION "v0.0.3") 25 | # Find Git Version Patch 26 | IF(EXISTS "${CMAKE_SOURCE_DIR}/.git") 27 | if(NOT GIT) 28 | SET(GIT $ENV{GIT}) 29 | endif() 30 | if(NOT GIT) 31 | FIND_PROGRAM(GIT NAMES git git.exe git.cmd) 32 | endif() 33 | IF(GIT) 34 | EXECUTE_PROCESS( 35 | WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} 36 | COMMAND ${GIT} describe --tags 37 | OUTPUT_VARIABLE GIT_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE 38 | ) 39 | if(NOT GIT_VERSION) 40 | EXECUTE_PROCESS( 41 | WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} 42 | COMMAND ${GIT} rev-parse --short HEAD 43 | OUTPUT_VARIABLE GIT_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE 44 | ) 45 | endif() 46 | IF(NOT GIT_VERSION) 47 | SET(BUILD_VERSION ${GIT_VERSION}) 48 | ENDIF() 49 | ENDIF() 50 | ENDIF() 51 | message("BUILD_VERSION:${BUILD_VERSION}") 52 | 53 | SET(fdt_base_dir ${PROJECT_SOURCE_DIR}) 54 | SET(fdt_src_dir ${fdt_base_dir}/src) 55 | SET(fdt_inc_dir ${fdt_base_dir}/src) 56 | 57 | SET(fdt_lib_name facedetection) 58 | 59 | FILE(GLOB_RECURSE fdt_source_files ${fdt_src_dir}/*.cpp) 60 | LIST(SORT fdt_source_files) 61 | 62 | SET(INSTALLHEADER_FILES ${fdt_inc_dir}/facedetectcnn.h) 63 | 64 | set(CMAKE_CXX_STANDARD 11) 65 | set(CMAKE_CXX_STANDARD_REQUIRED ON) 66 | set(CMAKE_CXX_EXTENSIONS OFF) 67 | 68 | IF(MSVC) 69 | # This option is to enable the /MP switch for Visual Studio 2005 and above compilers 70 | OPTION(WIN32_USE_MP "Set to ON to build with the /MP option (Visual Studio 2005 and above)." ON) 71 | IF(WIN32_USE_MP) 72 | #SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP") 73 | add_compile_options(/MP) 74 | ENDIF(WIN32_USE_MP) 75 | add_compile_options("$<$:/utf-8>") 76 | add_compile_options("$<$:/utf-8>") 77 | ENDIF(MSVC) 78 | 79 | IF(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" 80 | OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang") 81 | #use -O3 to speedup 82 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3") 83 | ENDIF() 84 | 85 | if(ENABLE_AVX512) 86 | add_definitions(-D_ENABLE_AVX512) 87 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512bw") 88 | endif() 89 | 90 | if(ENABLE_AVX2) 91 | add_definitions(-D_ENABLE_AVX2) 92 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx2 -mfma") 93 | endif() 94 | 95 | if(ENABLE_NEON) 96 | message("Using ENON") 97 | add_definitions(-D_ENABLE_NEON) 98 | endif() 99 | 100 | if(USE_OPENMP) 101 | FIND_PACKAGE(OpenMP) 102 | if(OPENMP_FOUND) 103 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") 104 | set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OpenMP_EXE_LINKER_FLAGS}") 105 | endif() 106 | endif() 107 | 108 | INCLUDE_DIRECTORIES(${fdt_inc_dir}) 109 | 110 | include(CMakePackageConfigHelpers) 111 | include(GenerateExportHeader) 112 | include(GNUInstallDirs) 113 | 114 | # Create a library 115 | ADD_LIBRARY(${fdt_lib_name} ${fdt_source_files} ${INSTALLHEADER_FILES}) 116 | 117 | # Generate export header 118 | GENERATE_EXPORT_HEADER(${fdt_lib_name}) 119 | string(TOLOWER ${fdt_lib_name} LOWER_PROJECT_NAME) 120 | set(INSTALLHEADER_FILES ${INSTALLHEADER_FILES} 121 | ${CMAKE_CURRENT_BINARY_DIR}/${LOWER_PROJECT_NAME}_export.h) 122 | file(COPY ${CMAKE_CURRENT_BINARY_DIR}/${LOWER_PROJECT_NAME}_export.h 123 | DESTINATION ${CMAKE_BINARY_DIR}) 124 | 125 | include_directories(${fdt_lib_name} ${CMAKE_BINARY_DIR}) 126 | 127 | set_target_properties(${fdt_lib_name} PROPERTIES 128 | PUBLIC_HEADER "${INSTALLHEADER_FILES}" # Install head files 129 | VERSION ${BUILD_VERSION} 130 | ) 131 | 132 | # Install target 133 | INSTALL(TARGETS ${fdt_lib_name} 134 | EXPORT ${fdt_lib_name}Config 135 | RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}" 136 | LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}" 137 | ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}" 138 | PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/facedetection 139 | INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} 140 | ) 141 | export(TARGETS ${fdt_lib_name} 142 | APPEND FILE ${CMAKE_BINARY_DIR}/${fdt_lib_name}Config.cmake 143 | ) 144 | # Install cmake configure files 145 | install(EXPORT ${fdt_lib_name}Config 146 | DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${fdt_lib_name}" 147 | ) 148 | write_basic_package_version_file( 149 | "${CMAKE_BINARY_DIR}/${fdt_lib_name}ConfigVersion.cmake" 150 | VERSION ${BUILD_VERSION} 151 | COMPATIBILITY AnyNewerVersion) 152 | install(FILES "${CMAKE_BINARY_DIR}/${fdt_lib_name}ConfigVersion.cmake" DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${fdt_lib_name}") 153 | 154 | # Create demo. OpenCV is requred. 155 | if (DEMO) 156 | find_package(OpenCV REQUIRED) 157 | include_directories(${OpenCV_INCLUDE_DIRS}) 158 | 159 | add_executable(detect-image-demo ${fdt_base_dir}/example/detect-image.cpp) 160 | target_link_libraries(detect-image-demo ${fdt_lib_name} ${OpenCV_LIBS}) 161 | 162 | add_executable(detect-camera-demo ${fdt_base_dir}/example/detect-camera.cpp) 163 | target_link_libraries(detect-camera-demo ${fdt_lib_name} ${OpenCV_LIBS}) 164 | 165 | add_executable(benchmark ${fdt_base_dir}/example/benchmark.cpp) 166 | target_link_libraries(benchmark ${fdt_lib_name} ${OpenCV_LIBS}) 167 | endif() 168 | 169 | if (GSTREAMER) 170 | find_package(OpenCV REQUIRED) 171 | 172 | include(FindPkgConfig) 173 | pkg_search_module(GSTREAMER REQUIRED gstreamer-1.0) 174 | pkg_search_module(GSTREAMER_BASE REQUIRED gstreamer-base-1.0) 175 | pkg_search_module(GSTREAMER_VIDEO REQUIRED gstreamer-video-1.0) 176 | 177 | add_library(gstfacedetect SHARED 178 | example/libfacedetect.cpp 179 | ) 180 | 181 | include_directories(gstfacedetect PRIVATE 182 | ${GSTREAMER_INCLUDE_DIRS} 183 | ${GSTREAMER_BASE_INCLUDE_DIRS} 184 | ${GSTREAMER_VIDEO_INCLUDE_DIRS} 185 | ${OpenCV_INCLUDE_DIRS} 186 | ) 187 | 188 | target_link_libraries(gstfacedetect 189 | ${GSTREAMER_LIBRARIES} 190 | ${GSTREAMER_BASE_LIBRARIES} 191 | ${GSTREAMER_VIDEO_LIBRARIES} 192 | ${OpenCV_LIBS} 193 | ${fdt_lib_shared} 194 | ) 195 | 196 | endif() 197 | 198 | message("CXX_FLAGS: ${CMAKE_CXX_FLAGS}") 199 | message("LINKER_FLAGS: ${CMAKE_EXE_LINKER_FLAGS}") 200 | message("AVX512 = ${ENABLE_AVX512}") 201 | message("AVX2 = ${ENABLE_AVX2}") 202 | message("NEON = ${ENABLE_NEON}") 203 | message("OpenMP = ${OPENMP_FOUND}") 204 | message("DEMO = ${DEMO}") 205 | -------------------------------------------------------------------------------- /COMPILE.md: -------------------------------------------------------------------------------- 1 | # How to compile the library 2 | 3 | ## Table of contents: 4 | - [Compilation](#compilation) 5 | - Windows 10 6 | - [with Visual Studio 2019 (MSVC)](#windows-10-with-visual-studio-2019) 7 | - [with MinGW](#windows-10-with-mingw) 8 | - [Linux/Ubuntu](#linux-or-ubuntu) 9 | - [Android](#android) 10 | - [OpenCV DNN](#opencv-dnn) 11 | - [Cross build for aarch64](#cross-build-for-aarch64) 12 | 13 | - [Usage](#usage) 14 | 15 | - [Example](#example) 16 | 17 | ## Compilation 18 | To use the library, you can do either of the following: 19 | - Copy the .cpp files in src folder directly to your project's source folder, and then compile them with the other files in your project. 20 | The source code are written in standard C/C++, so they should compile on any platform that supports C/C++; 21 | - Or follow the steps below to generate dynamic/static libraries under different environments. 22 | ### Windows 10 with Visual Studio 2019 23 | 0. Set up OpenCV with 4.51+ version. 24 | 1. Download libfacedetection and then run powershell terminal as administrator: 25 | 26 | cd libfacedetection 27 | mkdir build 28 | cd build 29 | cmake .. -DCMAKE_INSTALL_PREFIX=install -DBUILD_SHARED_LIBS=ON -DCMAKE_BUILD_TYPE=Release -DDEMO=OFF 30 | cmake --build . --config Release 31 | cmake --build . --config Release --target install 32 | 33 | Dynamic library(facedetection.dll) is generated. Then, to generate static library(lib), you need to set the parameter `BUILD_SHARED_LIBS` to `OFF` with the above commands. 34 | 2. To deploy the facedetection libraries in a Visual Studio C++ console app, in your console application's property pages, under Release mode (because the build type is Release), add the path of your `libfacedetection\build\install\include\facedetection` to VC++ Directories -> Include Directories and the path of your `libfacedetection\build\install\lib` to VC++ Directories -> Library Directories, and add `facedetection.lib` to Linker -> Input -> Additional Dependencies. 35 | 36 | 3. Add `#include "facedetectcnn.h"` to your source files. See [code example built with Visual Studio](#visual-studio-example). 37 | 38 | ### Windows 10 with MinGW 39 | 0. Set up OpenCV with 4.51+ version. 40 | 1. Same as the step 1 above with Visual Studio. `libfacedetection.so` is built with `BUILD_SHARED_LIBS=ON` and `libfacedetecion.a` is built with the variable set to `OFF`. 41 | 2. Set the environment variable `facedetection_DIR` to `path\to\libfacedetection\build`. 42 | 3. Use `find_package(facedetection)` in the CMakeLists of your project, or use `target_link_libraries( your-program /path/to/libfacedetection/build/install/lib/libfacedetection.so)`. 43 | 44 | find_package(facedetection) 45 | if(facedetection_FOUND) 46 | //your code 47 | endif() 48 | 4. Add `#include "facedetectcnn.h"` to your source files to use the libraries. 49 | 50 | ### Linux or Ubuntu 51 | 0. [Set up OpenCV](https://docs.opencv.org/4.5.2/d7/d9f/tutorial_linux_install.html) with 4.51+ version. 52 | 1. Same as the step 1 above with Visual Studio. `libfacedetection.so` is built with `BUILD_SHARED_LIBS=ON` and `libfacedetecion.a` is built with the variable set to `OFF`. 53 | 2. Set the environment variable `facedetection_DIR` to `path\to\libfacedetection\build`. 54 | 3. Use `target_link_libraries( your-program /path/to/libfacedetection/build/install/lib/libfacedetection.so)`in the CMakelists of your project to use the shared object. 55 | 4. Add `#include "facedetectcnn.h"` to your source files to use the libraries. See [code example built with GNU on Linux/Ubuntu](#linux-or-ubuntu-example). 56 | 57 | ### Android 58 | 1. Install ndk 59 | - Download and install to /home/android-ndk from https://developer.android.com/ndk/downloads 60 | - Setting environment variables 61 | 62 | export ANDROID_NDK=/home/android-ndk 63 | 64 | 2. Compile 65 | - The host is Linux / Ubuntu 66 | - Build 67 | 68 | mkdir build 69 | cd build 70 | cmake .. -DCMAKE_INSTALL_PREFIX=install \ 71 | -DCMAKE_BUILD_TYPE=MinSizeRel \ 72 | -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK}/build/cmake/android.toolchain.cmake \ 73 | -DANDROID_ABI="arm64-v8a" \ 74 | -DANDROID_PLATFORM=android-18 \ 75 | -DUSE_OPENMP=OFF \ 76 | -DENABLE_NEON=ON \ 77 | -DENABLE_AVX2=OFF \ 78 | -DDEMO=OFF 79 | cmake --build . --config MinSizeRel 80 | 81 | - Install 82 | 83 | cmake --build . --config MinSizeRel --target install/strip 84 | 85 | - The host is Windows 86 | - Build 87 | 88 | mkdir build 89 | cd build 90 | cmake .. -DCMAKE_INSTALL_PREFIX=%cd%\install -G"Unix Makefiles" -DCMAKE_BUILD_TYPE=MinSizeRel -DCMAKE_TOOLCHAIN_FILE=%ANDROID_NDK%/build/cmake/android.toolchain.cmake -DCMAKE_MAKE_PROGRAM=%ANDROID_NDK%/prebuilt/windows-x86_64/bin/make.exe -DANDROID_ABI=arm64-v8a -DANDROID_ARM_NEON=ON -DANDROID_PLATFORM=android-24 -DUSE_OPENMP=OFF -DENABLE_NEON=ON -DENABLE_AVX2=OFF -DDEMO=OFF 91 | cmake --build . --config MinSizeRel 92 | 93 | - Install 94 | 95 | cmake --build . --config MinSizeRel --target install/strip 96 | 97 | - msys2 or cygwin 98 | - Build 99 | 100 | mkdir build 101 | cd build 102 | cmake .. -DCMAKE_INSTALL_PREFIX=install \ 103 | -G"Unix Makefiles" \ 104 | -DCMAKE_BUILD_TYPE=MinSizeRel \ 105 | -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK}/build/cmake/android.toolchain.cmake \ 106 | -DCMAKE_MAKE_PROGRAM=${ANDROID_NDK}\prebuilt\windows-x86_64\bin\make.exe \ 107 | -DANDROID_ABI=arm64-v8a \ 108 | -DANDROID_ARM_NEON=ON \ 109 | -DUSE_OPENMP=OFF \ 110 | -DENABLE_NEON=ON \ 111 | -DENABLE_AVX2=OFF \ 112 | -DDEMO=OFF 113 | cmake --build . --config MinSizeRel 114 | 115 | - Install 116 | 117 | cmake --build . --config MinSizeRel --target install/strip 118 | 119 | + Parameter Description: https://developer.android.google.cn/ndk/guides/cmake 120 | + ANDROID_ABI: The following values can be taken: 121 | Goal ABI. If the target ABI is not specified, CMake uses armeabi-v7a by default. 122 | Valid ABI are: 123 | - armeabi:CPU with software floating point arithmetic based on ARMv5TE 124 | - armeabi-v7a:ARMv7-based device with hardware FPU instructions (VFP v3 D16) 125 | - armeabi-v7a with NEON:Same as armeabi-v7a, but with NEON floating point instructions enabled. This is equivalent to setting -DANDROID_ABI=armeabi-v7a and -DANDROID_ARM_NEON=ON. 126 | - arm64-v8a:ARMv8 AArch64 Instruction Set 127 | - x86:IA-32 Instruction Set 128 | - x86_64 - x86-64 Instruction Set 129 | + ANDROID_NDK The path of installed ndk in host 130 | + ANDROID_PLATFORM: For a full list of platform names and corresponding Android system images, see the [Android NDK Native API] (https://developer.android.google.com/ndk/guides/stable_apis.html) 131 | + ANDROID_ARM_MODE 132 | + ANDROID_ARM_NEON 133 | + ANDROID_STL:Specifies the STL that CMake should use. 134 | - c++_shared: use libc++ shared library 135 | - c++_static: use libc++ static library 136 | - none: no stl 137 | - system: use system STL 138 | 139 | ### OpenCV DNN 140 | - To deploy libfacedetection with the OpenCV DNN module and ONNX model, see 141 | [face detection with OpenCV DNN](https://github.com/ShiqiYu/libfacedetection/tree/master/opencv_dnn). 142 | 143 | ### Cross build for aarch64 144 | 145 | 1. Set cross compiler for aarch64 (please refer to aarch64-toolchain.cmake). 146 | 2. Set opencv path since the example code depends on opencv. 147 | 148 | ```bash 149 | cmake \ 150 | -DENABLE_NEON=ON \ 151 | -DCMAKE_BUILD_TYPE=RELEASE \ 152 | -DCMAKE_TOOLCHAIN_FILE=../aarch64-toolchain.cmake \ 153 | .. 154 | 155 | make 156 | ``` 157 | 158 | 159 | ## Usage 160 | Here is an example of how to use the face detection model in C++: 161 | ```C++ 162 | #include "facedetect.h" 163 | #include 164 | 165 | #define DETECT_BUFFER_SIZE 0x20000 166 | 167 | int main() 168 | { 169 | int * pResults = NULL; 170 | unsigned char * pBuffer = (unsigned char *)malloc(DETECT_BUFFER_SIZE); 171 | Mat image = imread(file_path); 172 | 173 | /** 174 | The function that loads the face detection model. 175 | 176 | @param result_buffer Buffer memory for storing face detection results, whose size must be 0x20000 * bytes. 177 | @param rgb_image_data Input image, which must be BGR (three channels) instead of RGB image. 178 | @param width The width of the input image. 179 | @param height The height. 180 | @param step The step. 181 | @return An int pointer reflecting the face detection result, see Example for detailed usage. 182 | */ 183 | int * pResults = facedetect_cnn(pBuffer, (unsigned char*)(image.ptr(0)), image.cols, image.rows, (int)image.step); 184 | } 185 | ``` 186 | 187 | ## Example 188 | 189 | + To build the ./example of libfacedetection: 190 | 191 | - Tips: 192 | * Please add facedetection_export.h file in the position where you copy your facedetectcnn.h files, add #define FACEDETECTION_EXPORT to facedetection_export.h file. See: [issues #222](https://github.com/ShiqiYu/libfacedetection/issues/222) 193 | * Please add -O3 to turn on optimizations when you compile the source code using g++. 194 | * Please choose 'Maximize Speed/-O2' when you compile the source code using Microsoft Visual Studio. 195 | * You can enable OpenMP to speedup. But the best solution is to call the detection function in different threads. 196 | 197 | ### Linux or Ubuntu example 198 | 199 | If using Linux/Ubuntu, you can: 200 | 201 | 0. [Generate libfacedetecion.so](#linux-or-ubuntu); 202 | 1. Add CMakeLists.txt: 203 | 204 | ``` 205 | cmake_minimum_required( VERSION 2.8 ) 206 | project( example ) 207 | find_package( OpenCV REQUIRED ) 208 | message(STATUS "OpenCV_LIBS = ${OpenCV_LIBS}") 209 | include_directories( ${OpenCV_INCLUDE_DIRS} ) 210 | add_executable( detect-image detect-image.cpp ) 211 | add_executable( detect-camera detect-camera.cpp ) 212 | target_link_libraries( detect-image ${OpenCV_LIBS} ) 213 | target_link_libraries( detect-image /libfacedetection/build/install/lib/libfacedetection.so ) 214 | target_link_libraries( detect-image /opencv/build/lib/libopencv_highgui.so ) 215 | target_link_libraries( detect-image /opencv/build/lib/libopencv_imgproc.so ) 216 | target_link_libraries( detect-image /opencv/build/lib/libopencv_core.so ) 217 | target_link_libraries( detect-image /opencv/build/lib/libopencv_imgcodecs.so ) 218 | target_link_libraries( detect-camera ${OpenCV_LIBS} ) 219 | target_link_libraries( detect-camera /libfacedetection/build/install/lib/libfacedetection.so ) 220 | target_link_libraries( detect-camera /opencv/build/lib/libopencv_highgui.so ) 221 | target_link_libraries( detect-camera /opencv/build/lib/libopencv_video.so ) 222 | target_link_libraries( detect-camera /opencv/build/lib/libopencv_imgproc.so ) 223 | target_link_libraries( detect-camera /opencv/build/lib/libopencv_core.so ) 224 | target_link_libraries( detect-camera /opencv/build/lib/libopencv_videoio.so ) 225 | ``` 226 | 227 | 2. CMake and make: 228 | 229 | ``` 230 | cd example 231 | mkdir build 232 | cd build 233 | cmake .. 234 | make 235 | // detect using an image 236 | ./detect-image 237 | // or detect using camera 238 | ./detect-camera 239 | ``` 240 | ### Visual Studio example 241 | 242 | If using Visual Studio 2019, you can: 243 | 244 | 0. [Generate facedetection.lib](#windows-10-with-visual-studio-2019) as well as facedetection.dll (to avoid errors); 245 | 1. You can either: 246 | - Add a similar CMakeLists, but instead linking the project to *.lib and compile the folder as a whole into a solution(.sln) that opens in Visual Studio. 247 | 248 | OR 249 | 250 | - Create a new project with C++ Console App template. Go to Project->Properties and select Release in Configuration, x64 in Platform, then do the following: add the path of your `libfacedetection\build\install\include\facedetection` (as well as your OpenCV include path) to VC++ Directories -> Include Directories and the path of your `libfacedetection\build\install\lib` (as well as your OpenCV lib path)to VC++ Directories -> Library Directories, and add `facedetection.lib` and other necessary dependencies to Linker -> Input -> Additional Dependencies. 251 | - Add one file in the example folder to the project's Source folder. To build another example, you can right-click the current Solution in the Solution Explorer, Add->New Project and follow the above step (or use property manager to copy-paste Property Sheet). 252 | 253 | 2. Build the solution and run the powershell terminal in Visual Studio: 254 | ``` 255 | cd x64/Release 256 | // detect using an image 257 | ./detect-image 258 | // or detect using camera 259 | ./detect-camera 260 | ``` 261 | 262 | 263 | 264 | + Sample output of detect-image 265 | ![Examples](/images/cnnresult.png "Detection example") 266 | 267 | + Third-party examples 268 | 269 | - FaceRecognizer: https://github.com/KangLin/FaceRecognizer 270 | This is a cross-platforms program. It has supported windows, linux, android, etc. 271 | -------------------------------------------------------------------------------- /ChangeLog: -------------------------------------------------------------------------------- 1 | 2022-09-14 2 | --------------------- 3 | * Update the model to v3. The new model is anchor-free instead of anchor-based. 4 | * The number of parameters is sharply dropped to only 55K. 5 | * The runtime speed has been improved to varying degrees on all platforms. 6 | * The performance on WIDER Face is improving from 0.856/0.842/0.727 to 0.887/0.871/0.768. 7 | 8 | 2021-06-09 9 | --------------------- 10 | * Now the model is depth-wise based. 11 | * The number of parameters is sharply dropped to only 85K. File size of the PyTorch model is 399KB, while ONNX is 337KB. 12 | * The runtime speed is slightly faster on X86 CPU, and 20% faster on ARM CPU. 13 | * int8-quantization is dropped, but float operations are carried out by AVX2/AVX512/NEON accordingly. 14 | 15 | 2020-02-27 16 | --------------------- 17 | * Update the model to v3. The computational cost similar with the previous one. 18 | * Now the algorithm can support 5 landmakr detection. 19 | * AVX512 support is added. 20 | 21 | 2019-09-14 22 | --------------------- 23 | * Update the model to v2. The computational cost is doubled. But the speed is almost the same with the previous one because int8 convolutional operation is carried out by AVX2. 24 | * NEON support is not finished. 25 | 26 | 2019-03-13 27 | --------------------- 28 | * Release the source code and the model files. Removed the binary libary. 29 | 30 | 2018-11-17 31 | --------------------- 32 | * Replaced the AdaBoost methods with a CNN based one. 33 | 34 | 2017-02-24 35 | --------------------- 36 | * landmark detection speed reaches to 0.8ms per face. The former version is 1.7ms per face. 37 | 38 | 2017-01-20 39 | --------------------- 40 | * 68-point landmark detection added. 41 | 42 | 2016-11-24 43 | --------------------- 44 | * Added benchmark.cpp which can run face detection in multiple threads using OpenMP. 45 | 46 | 2016-11-16 47 | --------------------- 48 | * Bugs in the previous version were fixed. std::vector was removed from the API because it can cause error. 49 | 50 | 2016-11-10 51 | --------------------- 52 | * The API was updated. std::vector was involved. 53 | * The functions can be called in multiple threads at the same time. 54 | 55 | 2016-10-6 56 | --------------------- 57 | * The algorithm has been speeded up greatly (2x to 3x). 58 | * The true positive rates (FDDB) have been improved 1% to 2% at FP=100. 59 | * Multi-core parallelization has been disabled. The detection time is still the same. 60 | 61 | 2016-9-16 62 | --------------------- 63 | * Speedup again. 64 | * Change function name facedetect_frontal_tmp() to facedetect_frontal_surveillance(). This function now uses a new trained classifier which can achieve a higher detection speed. 65 | 66 | 2016-6-28 67 | --------------------- 68 | * 64-bit dll added since there are so many users request it. 69 | * An easter egg is hidden in the 64-bit dll. Can you find it? 70 | 71 | 2016-6-8 72 | --------------------- 73 | * Speedup 1.2x 74 | * Added an experimental function facedetect_frontal_tmp(). The function can gain a higher detection rate in video surveillance. -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | By downloading, copying, installing or using the software you agree to this license. 2 | If you do not agree to this license, do not download, install, 3 | copy or use the software. 4 | 5 | 6 | License Agreement For libfacedetection 7 | (3-clause BSD License) 8 | 9 | Copyright (c) 2015-2019, Shiqi Yu, all rights reserved. 10 | shiqi.yu@gmail.com 11 | 12 | Redistribution and use in source and binary forms, with or without modification, 13 | are permitted provided that the following conditions are met: 14 | 15 | * Redistributions of source code must retain the above copyright notice, 16 | this list of conditions and the following disclaimer. 17 | 18 | * Redistributions in binary form must reproduce the above copyright notice, 19 | this list of conditions and the following disclaimer in the documentation 20 | and/or other materials provided with the distribution. 21 | 22 | * Neither the names of the copyright holders nor the names of the contributors 23 | may be used to endorse or promote products derived from this software 24 | without specific prior written permission. 25 | 26 | This software is provided by the copyright holders and contributors "as is" and 27 | any express or implied warranties, including, but not limited to, the implied 28 | warranties of merchantability and fitness for a particular purpose are disclaimed. 29 | In no event shall copyright holders or contributors be liable for any direct, 30 | indirect, incidental, special, exemplary, or consequential damages 31 | (including, but not limited to, procurement of substitute goods or services; 32 | loss of use, data, or profits; or business interruption) however caused 33 | and on any theory of liability, whether in contract, strict liability, 34 | or tort (including negligence or otherwise) arising in any way out of 35 | the use of this software, even if advised of the possibility of such damage. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # libfacedetection 2 | 3 | This is an open source library for CNN-based face detection in images. The CNN model has been converted to static variables in C source files. The source code does not depend on any other libraries. What you need is just a C++ compiler. You can compile the source code under Windows, Linux, ARM and any platform with a C++ compiler. 4 | 5 | SIMD instructions are used to speed up the detection. You can enable AVX2 if you use Intel CPU or NEON for ARM. 6 | 7 | The model files are provided in `src/facedetectcnn-data.cpp` (C++ arrays) & [the model (ONNX) from OpenCV Zoo](https://github.com/opencv/opencv_zoo/tree/master/models/face_detection_yunet). You can try our scripts (C++ & Python) in `opencv_dnn/` with the ONNX model. View the network architecture [here](https://netron.app/?url=https://raw.githubusercontent.com/ShiqiYu/libfacedetection.train/master/onnx/yunet*.onnx). 8 | 9 | Please note that OpenCV DNN does not support the latest version of YuNet with dynamic input shape. Please ensure you have the exact same input shape as the one in the ONNX model to run latest YuNet with OpenCV DNN. 10 | 11 | examples/detect-image.cpp and examples/detect-camera.cpp show how to use the library. 12 | 13 | The library was trained by [libfacedetection.train](https://github.com/ShiqiYu/libfacedetection.train). 14 | 15 | ![Examples](/images/cnnresult.png "Detection example") 16 | 17 | ## How to use the code 18 | 19 | You can copy the files in directory src/ into your project, 20 | and compile them as the other files in your project. 21 | The source code is written in standard C/C++. 22 | It should be compiled at any platform which supports C/C++. 23 | 24 | Some tips: 25 | 26 | * Please add facedetection_export.h file in the position where you copy your facedetectcnn.h files, add #define FACEDETECTION_EXPORT to facedetection_export.h file. See: [issues #222](https://github.com/ShiqiYu/libfacedetection/issues/222) 27 | * Please add -O3 to turn on optimizations when you compile the source code using g++. 28 | * Please choose 'Maximize Speed/-O2' when you compile the source code using Microsoft Visual Studio. 29 | * You can enable OpenMP to speedup. But the best solution is to call the detection function in different threads. 30 | 31 | You can also compile the source code to a static or dynamic library, and then use it in your project. 32 | 33 | [How to compile](COMPILE.md) 34 | 35 | 36 | ## CNN-based Face Detection on Intel CPU 37 | 38 | Using **AVX2** instructions 39 | | Method |Time | FPS |Time | FPS | 40 | |--------------------|--------------|-------------|--------------|-------------| 41 | | | X64 |X64 | X64 |X64 | 42 | | |Single-thread |Single-thread|Multi-thread |Multi-thread | 43 | |cnn (CPU, 640x480) | 50.02ms | 19.99 | 6.55ms | 152.65 | 44 | |cnn (CPU, 320x240) | 13.09ms | 76.39 | 1.82ms | 550.54 | 45 | |cnn (CPU, 160x120) | 3.61ms | 277.37 | 0.57ms | 1745.13 | 46 | |cnn (CPU, 128x96) | 2.11ms | 474.60 | 0.33ms | 2994.23 | 47 | 48 | Using **AVX512** instructions 49 | | Method |Time | FPS |Time | FPS | 50 | |--------------------|--------------|-------------|--------------|-------------| 51 | | | X64 |X64 | X64 |X64 | 52 | | |Single-thread |Single-thread|Multi-thread |Multi-thread | 53 | |cnn (CPU, 640x480) | 46.47ms | 21.52 | 6.39ms | 156.47 | 54 | |cnn (CPU, 320x240) | 12.10ms | 82.67 | 1.67ms | 599.31 | 55 | |cnn (CPU, 160x120) | 3.37ms | 296.47 | 0.46ms | 2155.80 | 56 | |cnn (CPU, 128x96) | 1.98ms | 504.72 | 0.31ms | 3198.63 | 57 | 58 | * Minimal face size ~10x10 59 | * Intel(R) Core(TM) i7-7820X CPU @ 3.60GHz 60 | * Multi-thread in 16 threads and 16 processors. 61 | 62 | 63 | ## CNN-based Face Detection on ARM Linux (Raspberry Pi 4 B) 64 | 65 | | Method |Time | FPS |Time | FPS | 66 | |--------------------|--------------|-------------|--------------|-------------| 67 | | |Single-thread |Single-thread|Multi-thread |Multi-thread | 68 | |cnn (CPU, 640x480) | 404.63ms | 2.47 | 125.47ms | 7.97 | 69 | |cnn (CPU, 320x240) | 105.73ms | 9.46 | 32.98ms | 30.32 | 70 | |cnn (CPU, 160x120) | 26.05ms | 38.38 | 7.91ms | 126.49 | 71 | |cnn (CPU, 128x96) | 15.06ms | 66.38 | 4.50ms | 222.28 | 72 | 73 | * Minimal face size ~10x10 74 | * Raspberry Pi 4 B, Broadcom BCM2835, Cortex-A72 (ARMv8) 64-bit SoC @ 1.5GHz 75 | * Multi-thread in 4 threads and 4 processors. 76 | 77 | ## Performance on WIDER Face 78 | Run on default settings: scales=[1.], confidence_threshold=0.02, floating point: 79 | ``` 80 | AP_easy=0.887, AP_medium=0.871, AP_hard=0.768 81 | ``` 82 | 83 | ## Author 84 | * Shiqi Yu, 85 | 86 | ## Contributors 87 | All contributors who contribute at GitHub.com are listed [here](https://github.com/ShiqiYu/libfacedetection/graphs/contributors). 88 | 89 | The contributors who were not listed at GitHub.com: 90 | * Jia Wu (吴佳) 91 | * Dong Xu (徐栋) 92 | * Shengyin Wu (伍圣寅) 93 | 94 | ## Acknowledgment 95 | The work was partly supported by the Science Foundation of Shenzhen (Grant No. 20170504160426188). 96 | 97 | ## Citation 98 | 99 | The master thesis of Mr. Wei Wu. All details of the algorithm are in the thesis. The thesis can be downloaded at [吴伟硕士毕业论文](wu-thesis-facedetect.pdf) 100 | ``` 101 | @thesis{wu2023thesisyunet, 102 | author = {吴伟}, 103 | title = {面向边缘设备的高精度毫秒级人脸检测技术研究}, 104 | type = {硕士学位论文}, 105 | institution = {南方科技大学}, 106 | year = {2023}, 107 | } 108 | ``` 109 | 110 | The paper for the main idea of this repository https://link.springer.com/article/10.1007/s11633-023-1423-y. 111 | 112 | ``` 113 | @article{wu2023miryunet, 114 | title = {YuNet: A Tiny Millisecond-level Face Detector}, 115 | author = {Wu, Wei and Peng, Hanyang and Yu, Shiqi}, 116 | journal = {Machine Intelligence Research}, 117 | pages = {1--10}, 118 | year = {2023}, 119 | doi = {10.1007/s11633-023-1423-y}, 120 | publisher = {Springer} 121 | } 122 | ``` 123 | 124 | The survey paper on face detection to evaluate different methods. It can be open-accessed at https://ieeexplore.ieee.org/document/9580485 125 | ``` 126 | @article{feng2022face, 127 | author = {Feng, Yuantao and Yu, Shiqi and Peng, Hanyang and Li, Yan-Ran and Zhang, Jianguo}, 128 | journal = {IEEE Transactions on Biometrics, Behavior, and Identity Science}, 129 | title = {Detect Faces Efficiently: A Survey and Evaluations}, 130 | year = {2022}, 131 | volume = {4}, 132 | number = {1}, 133 | pages = {1-18}, 134 | doi = {10.1109/TBIOM.2021.3120412} 135 | } 136 | ``` 137 | 138 | The loss used in training is EIoU, a novel extended IoU. The paper can be open-accessed at https://ieeexplore.ieee.org/document/9429909. 139 | ``` 140 | @article{peng2021eiou, 141 | author = {Peng, Hanyang and Yu, Shiqi}, 142 | journal = {IEEE Transactions on Image Processing}, 143 | title = {A Systematic IoU-Related Method: Beyond Simplified Regression for Better Localization}, 144 | year = {2021}, 145 | volume = {30}, 146 | pages = {5032-5044}, 147 | doi = {10.1109/TIP.2021.3077144} 148 | } 149 | ``` 150 | -------------------------------------------------------------------------------- /aarch64-toolchain.cmake: -------------------------------------------------------------------------------- 1 | set(CMAKE_SYSTEM_NAME Linux) 2 | set(CMAKE_SYSTEM_VERSION 1) 3 | set(CMAKE_SYSTEM_PROCESSOR "aarch64") 4 | set(CMAKE_CXX_COMPILER "/opt/linaro/gcc-linaro-6.3.1-2017.05-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu-g++") 5 | set(CMAKE_C_COMPILER "/opt/linaro/gcc-linaro-6.3.1-2017.05-x86_64_aarch64-linux-gnu/bin/aarch64-linux-gnu-gcc") 6 | -------------------------------------------------------------------------------- /build_android.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -n "$1" ]; then 4 | ANDROID_NDK=$1 5 | fi 6 | if [ -z "${ANDROID_NDK}" ]; then 7 | echo "$0 ANDROID_NDK" 8 | exit -1 9 | fi 10 | 11 | if [ -z "${ANDROID_STL}" ]; then 12 | ANDROID_STL=c++_static 13 | fi 14 | 15 | if [ ! -d build_android ]; then 16 | mkdir -p build_android 17 | fi 18 | cd build_android 19 | 20 | cmake .. -G"Unix Makefiles" -DCMAKE_INSTALL_PREFIX=install \ 21 | -DCMAKE_BUILD_TYPE=Release \ 22 | -DCMAKE_VERBOSE_MAKEFILE=TRUE \ 23 | -DANDROID_ABI="arm64-v8a" \ 24 | -DANDROID_ARM_NEON=ON \ 25 | -DANDROID_PLATFORM=android-24 \ 26 | -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK}/build/cmake/android.toolchain.cmake \ 27 | -DANDROID_STL=${ANDROID_STL} \ 28 | -DENABLE_NEON=ON \ 29 | -DENABLE_AVX2=OFF 30 | 31 | cmake --build . --config Release -- -j`cat /proc/cpuinfo |grep 'cpu cores' |wc -l` 32 | 33 | cmake --build . --config Release --target install/strip -- -j`cat /proc/cpuinfo |grep 'cpu cores' |wc -l` 34 | 35 | cd .. 36 | -------------------------------------------------------------------------------- /example/benchmark.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include "facedetectcnn.h" 4 | 5 | #ifdef _OPENMP 6 | #include 7 | #endif 8 | 9 | using namespace cv; 10 | using namespace std; 11 | 12 | //define the buffer size. Do not change the size! 13 | #define DETECT_BUFFER_SIZE 0x20000 14 | using namespace cv; 15 | 16 | int main(int argc, char* argv[]) 17 | { 18 | if (argc != 2) 19 | { 20 | printf("Usage: %s \n", argv[0]); 21 | return -1; 22 | } 23 | //load an image and convert it to gray (single-channel) 24 | Mat image = imread(argv[1]); 25 | if (image.empty()) 26 | { 27 | fprintf(stderr, "Can not load the image file %s.\n", argv[1]); 28 | return -1; 29 | } 30 | 31 | #ifdef _OPENMP 32 | int num_thread = omp_get_num_procs(); 33 | omp_set_num_threads(num_thread); 34 | printf("There are %d threads, %d processors.\n", num_thread, omp_get_num_procs()); 35 | #else 36 | int num_thread = 1; 37 | printf("There is %d thread.\n", num_thread); 38 | #endif 39 | 40 | int * pResults = NULL; 41 | unsigned char * pBuffers[1024];//large enough 42 | 43 | //pBuffer is used in the detection functions. 44 | //If you call functions in multiple threads, please create one buffer for each thread! 45 | unsigned char * p = (unsigned char *)malloc(DETECT_BUFFER_SIZE * num_thread); 46 | if (!p) 47 | { 48 | fprintf(stderr, "Can not alloc buffer.\n"); 49 | return -1; 50 | } 51 | 52 | for (int i = 0; i < num_thread; i++) 53 | pBuffers[i] = p + (DETECT_BUFFER_SIZE)*i; 54 | 55 | int total_count = 256; 56 | 57 | pResults = facedetect_cnn(pBuffers[0], image.ptr(0), (int)image.cols, (int)image.rows, (int)image.step); 58 | 59 | TickMeter tm; 60 | tm.start(); 61 | #ifdef _OPENMP 62 | #pragma omp parallel for 63 | #endif 64 | for (int i = 0; i < total_count; i++) 65 | { 66 | #ifdef _OPENMP 67 | int idx = omp_get_thread_num(); 68 | #else 69 | int idx = 0; 70 | #endif 71 | pResults = facedetect_cnn(pBuffers[idx], image.ptr(0), (int)image.cols, (int)image.rows, (int)image.step); 72 | } 73 | tm.stop(); 74 | double t = tm.getTimeMilli(); 75 | t /= total_count; 76 | printf("cnn facedetection average time = %.2fms | %.2f FPS\n", t, 1000.0 / t); 77 | 78 | //release the buffer 79 | free(p); 80 | 81 | return 0; 82 | } 83 | 84 | -------------------------------------------------------------------------------- /example/detect-camera.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | By downloading, copying, installing or using the software you agree to this license. 3 | If you do not agree to this license, do not download, install, 4 | copy or use the software. 5 | 6 | 7 | License Agreement For libfacedetection 8 | (3-clause BSD License) 9 | 10 | Copyright (c) 2018-2020, Shiqi Yu, all rights reserved. 11 | shiqi.yu@gmail.com 12 | 13 | Redistribution and use in source and binary forms, with or without modification, 14 | are permitted provided that the following conditions are met: 15 | 16 | * Redistributions of source code must retain the above copyright notice, 17 | this list of conditions and the following disclaimer. 18 | 19 | * Redistributions in binary form must reproduce the above copyright notice, 20 | this list of conditions and the following disclaimer in the documentation 21 | and/or other materials provided with the distribution. 22 | 23 | * Neither the names of the copyright holders nor the names of the contributors 24 | may be used to endorse or promote products derived from this software 25 | without specific prior written permission. 26 | 27 | This software is provided by the copyright holders and contributors "as is" and 28 | any express or implied warranties, including, but not limited to, the implied 29 | warranties of merchantability and fitness for a particular purpose are disclaimed. 30 | In no event shall copyright holders or contributors be liable for any direct, 31 | indirect, incidental, special, exemplary, or consequential damages 32 | (including, but not limited to, procurement of substitute goods or services; 33 | loss of use, data, or profits; or business interruption) however caused 34 | and on any theory of liability, whether in contract, strict liability, 35 | or tort (including negligence or otherwise) arising in any way out of 36 | the use of this software, even if advised of the possibility of such damage. 37 | */ 38 | 39 | #include 40 | #include 41 | #include "facedetectcnn.h" 42 | 43 | //define the buffer size. Do not change the size! 44 | //0x9000 = 1024 * (16 * 2 + 4), detect 1024 face at most 45 | #define DETECT_BUFFER_SIZE 0x9000 46 | using namespace cv; 47 | using namespace std; 48 | 49 | int main(int argc, char* argv[]) 50 | { 51 | if(argc != 2) 52 | { 53 | printf("Usage: %s \n", argv[0]); 54 | return -1; 55 | } 56 | 57 | int * pResults = NULL; 58 | //pBuffer is used in the detection functions. 59 | //If you call functions in multiple threads, please create one buffer for each thread! 60 | unsigned char * pBuffer = (unsigned char *)malloc(DETECT_BUFFER_SIZE); 61 | if(!pBuffer) 62 | { 63 | fprintf(stderr, "Can not alloc buffer.\n"); 64 | return -1; 65 | } 66 | 67 | 68 | VideoCapture cap; 69 | Mat im; 70 | 71 | if( isdigit(argv[1][0])) 72 | { 73 | cap.open(argv[1][0]-'0'); 74 | if(! cap.isOpened()) 75 | { 76 | cerr << "Cannot open the camera." << endl; 77 | return 0; 78 | } 79 | } 80 | 81 | if( cap.isOpened()) 82 | { 83 | while(true) 84 | { 85 | cap >> im; 86 | //cout << "Image size: " << im.rows << "X" << im.cols << endl; 87 | Mat image = im.clone(); 88 | 89 | /////////////////////////////////////////// 90 | // CNN face detection 91 | // Best detection rate 92 | ////////////////////////////////////////// 93 | //!!! The input image must be a BGR one (three-channel) instead of RGB 94 | //!!! DO NOT RELEASE pResults !!! 95 | TickMeter cvtm; 96 | cvtm.start(); 97 | 98 | pResults = facedetect_cnn(pBuffer, (unsigned char*)(image.ptr(0)), image.cols, image.rows, (int)image.step); 99 | 100 | cvtm.stop(); 101 | printf("time = %gms\n", cvtm.getTimeMilli()); 102 | 103 | printf("%d faces detected.\n", (pResults ? *pResults : 0)); 104 | Mat result_image = image.clone(); 105 | //print the detection results 106 | for(int i = 0; i < (pResults ? *pResults : 0); i++) 107 | { 108 | short * p = ((short*)(pResults+1)) + 16*i; 109 | int confidence = p[0]; 110 | int x = p[1]; 111 | int y = p[2]; 112 | int w = p[3]; 113 | int h = p[4]; 114 | 115 | //show the score of the face. Its range is [0-100] 116 | char sScore[256]; 117 | snprintf(sScore, 256, "%d", confidence); 118 | cv::putText(result_image, sScore, cv::Point(x, y-3), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 255, 0), 1); 119 | 120 | //draw face rectangle 121 | rectangle(result_image, Rect(x, y, w, h), Scalar(0, 255, 0), 2); 122 | //draw five face landmarks in different colors 123 | cv::circle(result_image, cv::Point(p[5], p[5 + 1]), 1, cv::Scalar(255, 0, 0), 2); 124 | cv::circle(result_image, cv::Point(p[5 + 2], p[5 + 3]), 1, cv::Scalar(0, 0, 255), 2); 125 | cv::circle(result_image, cv::Point(p[5 + 4], p[5 + 5]), 1, cv::Scalar(0, 255, 0), 2); 126 | cv::circle(result_image, cv::Point(p[5 + 6], p[5 + 7]), 1, cv::Scalar(255, 0, 255), 2); 127 | cv::circle(result_image, cv::Point(p[5 + 8], p[5 + 9]), 1, cv::Scalar(0, 255, 255), 2); 128 | 129 | //print the result 130 | printf("face %d: confidence=%d, [%d, %d, %d, %d] (%d,%d) (%d,%d) (%d,%d) (%d,%d) (%d,%d)\n", 131 | i, confidence, x, y, w, h, 132 | p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13],p[14]); 133 | 134 | } 135 | imshow("result", result_image); 136 | 137 | if((cv::waitKey(2)& 0xFF) == 'q') 138 | break; 139 | } 140 | } 141 | 142 | 143 | 144 | 145 | //release the buffer 146 | free(pBuffer); 147 | 148 | return 0; 149 | } 150 | -------------------------------------------------------------------------------- /example/detect-image.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | By downloading, copying, installing or using the software you agree to this license. 3 | If you do not agree to this license, do not download, install, 4 | copy or use the software. 5 | 6 | 7 | License Agreement For libfacedetection 8 | (3-clause BSD License) 9 | 10 | Copyright (c) 2018-2020, Shiqi Yu, all rights reserved. 11 | shiqi.yu@gmail.com 12 | 13 | Redistribution and use in source and binary forms, with or without modification, 14 | are permitted provided that the following conditions are met: 15 | 16 | * Redistributions of source code must retain the above copyright notice, 17 | this list of conditions and the following disclaimer. 18 | 19 | * Redistributions in binary form must reproduce the above copyright notice, 20 | this list of conditions and the following disclaimer in the documentation 21 | and/or other materials provided with the distribution. 22 | 23 | * Neither the names of the copyright holders nor the names of the contributors 24 | may be used to endorse or promote products derived from this software 25 | without specific prior written permission. 26 | 27 | This software is provided by the copyright holders and contributors "as is" and 28 | any express or implied warranties, including, but not limited to, the implied 29 | warranties of merchantability and fitness for a particular purpose are disclaimed. 30 | In no event shall copyright holders or contributors be liable for any direct, 31 | indirect, incidental, special, exemplary, or consequential damages 32 | (including, but not limited to, procurement of substitute goods or services; 33 | loss of use, data, or profits; or business interruption) however caused 34 | and on any theory of liability, whether in contract, strict liability, 35 | or tort (including negligence or otherwise) arising in any way out of 36 | the use of this software, even if advised of the possibility of such damage. 37 | */ 38 | 39 | #include 40 | #include 41 | #include "facedetectcnn.h" 42 | 43 | //define the buffer size. Do not change the size! 44 | //0x9000 = 1024 * (16 * 2 + 4), detect 1024 face at most 45 | #define DETECT_BUFFER_SIZE 0x9000 46 | using namespace cv; 47 | using namespace std; 48 | 49 | int main(int argc, char* argv[]) 50 | { 51 | if(argc != 2) 52 | { 53 | printf("Usage: %s \n", argv[0]); 54 | return -1; 55 | } 56 | 57 | //load an image and convert it to gray (single-channel) 58 | Mat image = imread(argv[1]); 59 | if(image.empty()) 60 | { 61 | fprintf(stderr, "Can not load the image file %s.\n", argv[1]); 62 | return -1; 63 | } 64 | 65 | int * pResults = NULL; 66 | //pBuffer is used in the detection functions. 67 | //If you call functions in multiple threads, please create one buffer for each thread! 68 | unsigned char * pBuffer = (unsigned char *)malloc(DETECT_BUFFER_SIZE); 69 | if(!pBuffer) 70 | { 71 | fprintf(stderr, "Can not alloc buffer.\n"); 72 | return -1; 73 | } 74 | 75 | 76 | /////////////////////////////////////////// 77 | // CNN face detection 78 | // Best detection rate 79 | ////////////////////////////////////////// 80 | //!!! The input image must be a BGR one (three-channel) instead of RGB 81 | //!!! DO NOT RELEASE pResults !!! 82 | TickMeter cvtm; 83 | cvtm.start(); 84 | 85 | pResults = facedetect_cnn(pBuffer, (unsigned char*)(image.ptr(0)), image.cols, image.rows, (int)image.step); 86 | 87 | cvtm.stop(); 88 | printf("time = %gms\n", cvtm.getTimeMilli()); 89 | 90 | printf("%d faces detected.\n", (pResults ? *pResults : 0)); 91 | Mat result_image = image.clone(); 92 | //print the detection results 93 | for(int i = 0; i < (pResults ? *pResults : 0); i++) 94 | { 95 | short * p = ((short*)(pResults + 1)) + 16*i; 96 | int confidence = p[0]; 97 | int x = p[1]; 98 | int y = p[2]; 99 | int w = p[3]; 100 | int h = p[4]; 101 | 102 | //show the score of the face. Its range is [0-100] 103 | char sScore[256]; 104 | snprintf(sScore, 256, "%d", confidence); 105 | cv::putText(result_image, sScore, cv::Point(x, y-3), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 255, 0), 1); 106 | //draw face rectangle 107 | rectangle(result_image, Rect(x, y, w, h), Scalar(0, 255, 0), 2); 108 | //draw five face landmarks in different colors 109 | cv::circle(result_image, cv::Point(p[5], p[5 + 1]), 1, cv::Scalar(255, 0, 0), 2); 110 | cv::circle(result_image, cv::Point(p[5 + 2], p[5 + 3]), 1, cv::Scalar(0, 0, 255), 2); 111 | cv::circle(result_image, cv::Point(p[5 + 4], p[5 + 5]), 1, cv::Scalar(0, 255, 0), 2); 112 | cv::circle(result_image, cv::Point(p[5 + 6], p[5 + 7]), 1, cv::Scalar(255, 0, 255), 2); 113 | cv::circle(result_image, cv::Point(p[5 + 8], p[5 + 9]), 1, cv::Scalar(0, 255, 255), 2); 114 | 115 | //print the result 116 | printf("face %d: confidence=%d, [%d, %d, %d, %d] (%d,%d) (%d,%d) (%d,%d) (%d,%d) (%d,%d)\n", 117 | i, confidence, x, y, w, h, 118 | p[5], p[6], p[7], p[8], p[9], p[10], p[11], p[12], p[13],p[14]); 119 | 120 | } 121 | imshow("result", result_image); 122 | 123 | waitKey(); 124 | 125 | //release the buffer 126 | free(pBuffer); 127 | 128 | return 0; 129 | } 130 | -------------------------------------------------------------------------------- /images/cnnresult.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/images/cnnresult.png -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/app.iml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 8 | 9 | 10 | 11 | 13 | 14 | 15 | 16 | 28 | 29 | 30 | 31 | 32 | 33 | 49 | 54 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | 190 | 191 | 192 | 193 | 194 | 195 | 196 | 197 | 198 | 199 | 200 | 201 | 202 | 203 | 204 | 205 | 206 | 207 | 208 | 209 | 210 | 211 | 212 | 213 | 214 | 215 | 216 | 217 | 218 | 219 | 220 | 221 | 222 | 223 | 224 | 225 | 226 | 227 | -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/build.gradle: -------------------------------------------------------------------------------- 1 | apply plugin: 'com.android.application' 2 | 3 | apply plugin: 'kotlin-android' 4 | 5 | apply plugin: 'kotlin-android-extensions' 6 | 7 | android { 8 | compileSdkVersion 28 9 | defaultConfig { 10 | applicationId "org.dp.facedetection" 11 | minSdkVersion 21 12 | targetSdkVersion 28 13 | versionCode 1 14 | versionName "1.0" 15 | testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner" 16 | externalNativeBuild { 17 | cmake { 18 | cppFlags "-fopenmp" 19 | arguments "-DANDROID_ARM_NEON=TRUE" 20 | abiFilters "armeabi-v7a","arm64-v8a" 21 | } 22 | } 23 | ndk{ 24 | abiFilters "armeabi-v7a","arm64-v8a" 25 | } 26 | } 27 | buildTypes { 28 | release { 29 | minifyEnabled false 30 | proguardFiles getDefaultProguardFile('proguard-android-optimize.txt'), 'proguard-rules.pro' 31 | } 32 | } 33 | externalNativeBuild { 34 | cmake { 35 | path "src/main/cpp/CMakeLists.txt" 36 | } 37 | } 38 | } 39 | 40 | dependencies { 41 | implementation fileTree(dir: 'libs', include: ['*.jar']) 42 | implementation"org.jetbrains.kotlin:kotlin-stdlib-jdk8:$kotlin_version" 43 | implementation project(':opencv4') 44 | implementation 'androidx.appcompat:appcompat:1.3.0-alpha02' 45 | implementation 'androidx.core:core-ktx:1.5.0-alpha05' 46 | implementation 'androidx.constraintlayout:constraintlayout:2.0.4' 47 | testImplementation 'junit:junit:4.12' 48 | androidTestImplementation 'androidx.test:runner:1.3.1-alpha02' 49 | androidTestImplementation 'androidx.test.espresso:espresso-core:3.4.0-alpha02' 50 | } 51 | -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/proguard-rules.pro: -------------------------------------------------------------------------------- 1 | # Add project specific ProGuard rules here. 2 | # You can control the set of applied configuration files using the 3 | # proguardFiles setting in build.gradle. 4 | # 5 | # For more details, see 6 | # http://developer.android.com/guide/developing/tools/proguard.html 7 | 8 | # If your project uses WebView with JS, uncomment the following 9 | # and specify the fully qualified class name to the JavaScript interface 10 | # class: 11 | #-keepclassmembers class fqcn.of.javascript.interface.for.webview { 12 | # public *; 13 | #} 14 | 15 | # Uncomment this to preserve the line number information for 16 | # debugging stack traces. 17 | #-keepattributes SourceFile,LineNumberTable 18 | 19 | # If you keep the line number information, uncomment this to 20 | # hide the original source file name. 21 | #-renamesourcefileattribute SourceFile 22 | -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/androidTest/java/org/dp/facedetection/ExampleInstrumentedTest.kt: -------------------------------------------------------------------------------- 1 | package org.dp.facedetection 2 | 3 | import androidx.test.InstrumentationRegistry 4 | import androidx.test.runner.AndroidJUnit4 5 | 6 | import org.junit.Test 7 | import org.junit.runner.RunWith 8 | 9 | import org.junit.Assert.* 10 | 11 | /** 12 | * Instrumented test, which will execute on an Android device. 13 | * 14 | * See [testing documentation](http://d.android.com/tools/testing). 15 | */ 16 | @RunWith(AndroidJUnit4::class) 17 | class ExampleInstrumentedTest { 18 | @Test 19 | fun useAppContext() { 20 | // Context of the app under test. 21 | val appContext = InstrumentationRegistry.getTargetContext() 22 | assertEquals("org.dp.facedetection", appContext.packageName) 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/AndroidManifest.xml: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/assets/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/Android/FaceDetection/app/src/main/assets/.DS_Store -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/assets/test.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/Android/FaceDetection/app/src/main/assets/test.jpg -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/assets/test2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/Android/FaceDetection/app/src/main/assets/test2.jpg -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/cpp/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # CMakeLists for libfacedetectcnn 2 | 3 | project(facedetection) 4 | 5 | cmake_minimum_required(VERSION 2.8) 6 | option(ENABLE_OPENCV "use opencv" ON) 7 | option(ENABLE_OPENMP "use openmp" ON) 8 | option(ENABLE_INT8 "use int8" ON) 9 | option(ENABLE_AVX2 "use avx2" OFF) 10 | option(ENABLE_AVX512 "use avx512" OFF) 11 | option(ENABLE_NEON "whether use neon, if use arm please set it on" ON) 12 | option(DEMO "build the demo" OFF) 13 | add_definitions("-O3") 14 | 15 | if (BUILD_SHARED_LIBS) 16 | add_definitions(-DBUILD_SHARED_LIBS) 17 | if (CMAKE_COMPILER_IS_GNUCXX AND NOT MINGW) 18 | # Just setting CMAKE_POSITION_INDEPENDENT_CODE should be enough to set 19 | # -fPIC for GCC but sometimes it still doesn't get set, so make sure it 20 | # does. 21 | add_definitions("-fPIC") 22 | endif() 23 | set(CMAKE_POSITION_INDEPENDENT_CODE ON) 24 | endif() 25 | 26 | SET(fdt_base_dir ${PROJECT_SOURCE_DIR}) 27 | SET(fdt_src_dir ${fdt_base_dir}/../../../../../../../src/) 28 | SET(fdt_inc_dir ${fdt_base_dir}/../../../../../../../src/) 29 | 30 | SET(fdt_lib_name facedetection) 31 | SET(fdt_lib_static ${fdt_lib_name}) 32 | SET(fdt_lib_shared ${fdt_lib_name}_shared) 33 | 34 | FILE(GLOB_RECURSE fdt_source_files ${fdt_src_dir}/*.cpp) 35 | FILE(GLOB_RECURSE jni_source_files ${fdt_base_dir}/facedetectcnn-jni.cpp) 36 | 37 | LIST(SORT fdt_source_files) 38 | 39 | SET(INSTALLHEADER_FILES ${fdt_inc_dir}/facedetectcnn.h) 40 | 41 | set(CMAKE_CXX_STANDARD 11) 42 | set(CMAKE_CXX_STANDARD_REQUIRED ON) 43 | set(CMAKE_CXX_EXTENSIONS OFF) 44 | 45 | if(ENABLE_OPENMP) 46 | message("using openmp") 47 | add_definitions(-D_OPENMP) 48 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") 49 | set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OpenMP_EXE_LINKER_FLAGS}") 50 | endif() 51 | 52 | if(ENABLE_INT8) 53 | message("using int8") 54 | add_definitions(-D_ENABLE_INT8) 55 | endif() 56 | 57 | IF(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" 58 | OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang") 59 | message("use -O3 to speedup") 60 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3") 61 | ENDIF() 62 | 63 | if(ENABLE_AVX512) 64 | add_definitions(-D_ENABLE_AVX512) 65 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx512bw") 66 | endif() 67 | 68 | if(ENABLE_AVX2) 69 | add_definitions(-D_ENABLE_AVX2) 70 | set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mavx2 -mfma") 71 | endif() 72 | 73 | if(ENABLE_NEON) 74 | message("Using NEON") 75 | add_definitions(-D_ENABLE_NEON) 76 | endif() 77 | 78 | INCLUDE_DIRECTORIES(${fdt_inc_dir}) 79 | 80 | 81 | # Create a static library (.a) 82 | ADD_LIBRARY(${fdt_lib_static} STATIC ${fdt_source_files} ${jni_source_files}) 83 | 84 | # Create a shared library (.so) 85 | ADD_LIBRARY(${fdt_lib_shared} SHARED ${fdt_source_files} ${jni_source_files}) 86 | SET_TARGET_PROPERTIES(${fdt_lib_shared} PROPERTIES OUTPUT_NAME "${fdt_lib_name}") 87 | SET_TARGET_PROPERTIES(${fdt_lib_shared} PROPERTIES PREFIX "lib") 88 | 89 | # Create demo. OpenCV is requred. 90 | if (ENABLE_OPENCV) 91 | SET(OpenCV_INCLUDE_DIRS ${PROJECT_SOURCE_DIR}/../../../../OpenCV-android-sdk/sdk/native/jni/include) 92 | SET(OpenCV_SHARED_LIBS ${PROJECT_SOURCE_DIR}/../../../../OpenCV-android-sdk/sdk/native/libs/${ANDROID_ABI}) 93 | SET(OpenCV_STATIC_LIBS ${PROJECT_SOURCE_DIR}/../../../../OpenCV-android-sdk/sdk/native/staticlibs/${ANDROID_ABI}) 94 | 95 | # find_package(OpenCV REQUIRED) 96 | include_directories(${OpenCV_INCLUDE_DIRS}) 97 | 98 | add_library(libopencv_java4 SHARED IMPORTED ) 99 | set_target_properties(libopencv_java4 PROPERTIES 100 | IMPORTED_LOCATION "${OpenCV_SHARED_LIBS}/libopencv_java4.so") 101 | 102 | # add_library(libopencv_calib3d STATIC IMPORTED ) 103 | # set_target_properties(libopencv_calib3d PROPERTIES 104 | # IMPORTED_LOCATION "${OpenCV_STATIC_LIBS}/libopencv_calib3d.a") 105 | # 106 | # add_library(libopencv_core STATIC IMPORTED ) 107 | # set_target_properties(libopencv_core PROPERTIES 108 | # IMPORTED_LOCATION "${OpenCV_STATIC_LIBS}/libopencv_core.a") 109 | # 110 | # add_library(libopencv_dnn STATIC IMPORTED ) 111 | # set_target_properties(libopencv_dnn PROPERTIES 112 | # IMPORTED_LOCATION "${OpenCV_STATIC_LIBS}/libopencv_dnn.a") 113 | # 114 | # add_library(libopencv_features2d STATIC IMPORTED ) 115 | # set_target_properties(libopencv_features2d PROPERTIES 116 | # IMPORTED_LOCATION "${OpenCV_STATIC_LIBS}/libopencv_features2d.a") 117 | # 118 | # add_library(libopencv_flann STATIC IMPORTED ) 119 | # set_target_properties(libopencv_flann PROPERTIES 120 | # IMPORTED_LOCATION "${OpenCV_STATIC_LIBS}/libopencv_flann.a") 121 | # 122 | # add_library(libopencv_highgui STATIC IMPORTED ) 123 | # set_target_properties(libopencv_highgui PROPERTIES 124 | # IMPORTED_LOCATION "${OpenCV_STATIC_LIBS}/libopencv_highgui.a") 125 | # 126 | # add_library(libopencv_imgcodecs STATIC IMPORTED ) 127 | # set_target_properties(libopencv_imgcodecs PROPERTIES 128 | # IMPORTED_LOCATION "${OpenCV_STATIC_LIBS}/libopencv_imgcodecs.a") 129 | # 130 | # add_library(libopencv_imgproc STATIC IMPORTED ) 131 | # set_target_properties(libopencv_imgproc PROPERTIES 132 | # IMPORTED_LOCATION "${OpenCV_STATIC_LIBS}/libopencv_imgproc.a") 133 | # 134 | # add_library(libopencv_ml STATIC IMPORTED ) 135 | # set_target_properties(libopencv_ml PROPERTIES 136 | # IMPORTED_LOCATION "${OpenCV_STATIC_LIBS}/libopencv_ml.a") 137 | # 138 | # add_library(libopencv_objdetect STATIC IMPORTED ) 139 | # set_target_properties(libopencv_objdetect PROPERTIES 140 | # IMPORTED_LOCATION "${OpenCV_STATIC_LIBS}/libopencv_objdetect.a") 141 | # 142 | # add_library(libopencv_photo STATIC IMPORTED ) 143 | # set_target_properties(libopencv_photo PROPERTIES 144 | # IMPORTED_LOCATION "${OpenCV_STATIC_LIBS}/libopencv_photo.a") 145 | # 146 | # add_library(libopencv_stitching STATIC IMPORTED ) 147 | # set_target_properties(libopencv_stitching PROPERTIES 148 | # IMPORTED_LOCATION "${OpenCV_STATIC_LIBS}/libopencv_stitching.a") 149 | # 150 | # add_library(libopencv_video STATIC IMPORTED ) 151 | # set_target_properties(libopencv_video PROPERTIES 152 | # IMPORTED_LOCATION "${OpenCV_STATIC_LIBS}/libopencv_video.a") 153 | # 154 | # add_library(libopencv_videoio STATIC IMPORTED ) 155 | # set_target_properties(libopencv_videoio PROPERTIES 156 | # IMPORTED_LOCATION "${OpenCV_STATIC_LIBS}/libopencv_videoio.a") 157 | find_library( # Sets the name of the path variable. 158 | log-lib 159 | 160 | # Specifies the name of the NDK library that 161 | # you want CMake to locate. 162 | log) 163 | target_link_libraries( 164 | ${fdt_lib_shared} 165 | # ${fdt_lib_static} 166 | libopencv_java4 167 | #编译静态 opencv 需要依赖的静态链接库 168 | # libopencv_calib3d 169 | # libopencv_core 170 | # libopencv_dnn 171 | # libopencv_features2d 172 | # libopencv_flann 173 | # libopencv_highgui 174 | # libopencv_imgcodecs 175 | # libopencv_imgproc 176 | # libopencv_ml 177 | # libopencv_objdetect 178 | # libopencv_photo 179 | # libopencv_stitching 180 | # libopencv_video 181 | # libopencv_videoio 182 | ${log-lib} 183 | ) 184 | endif() -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/cpp/facedetectcnn-jni.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "facedetectcnn.h" 5 | #include 6 | 7 | //define the buffer size. Do not change the size! 8 | #define DETECT_BUFFER_SIZE 0x20000 9 | using namespace cv; 10 | 11 | extern "C" { 12 | char *JNITag = const_cast("facedetection-jni"); 13 | 14 | JNIEXPORT jobjectArray JNICALL 15 | Java_org_dp_facedetection_MainActivity_facedetect(JNIEnv *env,jobject /* this */,jlong matAddr) 16 | { 17 | jobjectArray faceArgs = nullptr; 18 | Mat& img = *(Mat*)matAddr; 19 | Mat bgr = img.clone(); 20 | cvtColor(img, bgr, COLOR_RGBA2BGR); 21 | __android_log_print(ANDROID_LOG_ERROR, JNITag,"convert RGBA to BGR"); 22 | //load an image and convert it to gray (single-channel) 23 | if(bgr.empty()) 24 | { 25 | fprintf(stderr, "Can not convert image"); 26 | return faceArgs; 27 | } 28 | 29 | int * pResults = NULL; 30 | //pBuffer is used in the detection functions. 31 | //If you call functions in multiple threads, please create one buffer for each thread! 32 | unsigned char * pBuffer = (unsigned char *)malloc(DETECT_BUFFER_SIZE); 33 | if(!pBuffer) 34 | { 35 | fprintf(stderr, "Can not alloc buffer.\n"); 36 | return faceArgs; 37 | } 38 | 39 | 40 | /////////////////////////////////////////// 41 | // CNN face detection 42 | // Best detection rate 43 | ////////////////////////////////////////// 44 | //!!! The input image must be a BGR one (three-channel) 45 | //!!! DO NOT RELEASE pResults !!! 46 | pResults = facedetect_cnn(pBuffer, (unsigned char*)(bgr.ptr(0)), bgr.cols, bgr.rows, (int)bgr.step); 47 | int numOfFaces = pResults ? *pResults : 0; 48 | __android_log_print(ANDROID_LOG_ERROR, JNITag,"%d faces detected.\n", numOfFaces); 49 | 50 | /** 51 | * 获取Face类以及其对于参数的签名 52 | */ 53 | jclass faceClass = env->FindClass("org/dp/facedetection/Face");//获取Face类 54 | jmethodID faceClassInitID = (env)->GetMethodID(faceClass, "", "()V"); 55 | jfieldID faceConfidence = env->GetFieldID(faceClass, "faceConfidence", "I");//获取int类型参数confidence 56 | jfieldID faceLandmarks = env->GetFieldID(faceClass, "faceLandmarks", "[Lorg/opencv/core/Point;");//获取List类型参数landmarks 57 | jfieldID faceRect = env->GetFieldID(faceClass, "faceRect","Lorg/opencv/core/Rect;");//获取faceRect签名 58 | /** 59 | * 获取RECT类以及对应参数的签名 60 | */ 61 | jclass rectClass = env->FindClass("org/opencv/core/Rect");//获取到RECT类 62 | jmethodID rectClassInitID = (env)->GetMethodID(rectClass, "", "()V"); 63 | jfieldID rect_x = env->GetFieldID(rectClass, "x", "I");//获取x的签名 64 | jfieldID rect_y = env->GetFieldID(rectClass, "y", "I");//获取y的签名 65 | jfieldID rect_width = env->GetFieldID(rectClass, "width", "I");//获取width的签名 66 | jfieldID rect_height = env->GetFieldID(rectClass, "height", "I");//获取height的签名 67 | 68 | /** 69 | * 获取Point类以及对应参数的签名 70 | */ 71 | jclass pointClass = env->FindClass("org/opencv/core/Point");//获取到Point类 72 | jmethodID pointClassInitID = (env)->GetMethodID(pointClass, "", "()V"); 73 | jfieldID point_x = env->GetFieldID(pointClass, "x", "D");//获取x的签名 74 | jfieldID point_y = env->GetFieldID(pointClass, "y", "D");//获取y的签名 75 | 76 | 77 | faceArgs = (env)->NewObjectArray(numOfFaces, faceClass, 0); 78 | //print the detection results 79 | for(int i = 0; i < (pResults ? *pResults : 0); i++) 80 | { 81 | short * p = ((short*)(pResults+1))+142*i; 82 | int confidence = p[0]; 83 | int x = p[1]; 84 | int y = p[2]; 85 | int w = p[3]; 86 | int h = p[4]; 87 | 88 | __android_log_print(ANDROID_LOG_ERROR, JNITag,"face %d rect=[%d, %d, %d, %d], confidence=%d\n",i,x,y,w,h,confidence); 89 | jobject newFace = (env)->NewObject(faceClass, faceClassInitID); 90 | jobject newRect = (env)->NewObject(rectClass, rectClassInitID); 91 | 92 | (env)->SetIntField(newRect, rect_x, x); 93 | (env)->SetIntField(newRect, rect_y, y); 94 | (env)->SetIntField(newRect, rect_width, w); 95 | (env)->SetIntField(newRect, rect_height, h); 96 | (env)->SetObjectField(newFace,faceRect,newRect); 97 | env->DeleteLocalRef(newRect); 98 | 99 | jobjectArray newPoints = (env)->NewObjectArray(5, pointClass, 0); 100 | for (int j = 5; j < 14; j += 2){ 101 | int p_x = p[j]; 102 | int p_y = p[j+1]; 103 | jobject newPoint = (env)->NewObject(pointClass, pointClassInitID); 104 | (env)->SetDoubleField(newPoint, point_x, (double)p_x); 105 | (env)->SetDoubleField(newPoint, point_y, (double)p_y); 106 | int index = (j-5)/2; 107 | (env)->SetObjectArrayElement(newPoints, index, newPoint); 108 | env->DeleteLocalRef(newPoint); 109 | __android_log_print(ANDROID_LOG_ERROR, JNITag,"landmark %d =[%f, %f]\n",index,(double)p_x,(double)p_y); 110 | } 111 | (env)->SetObjectField(newFace,faceLandmarks,newPoints); 112 | env->DeleteLocalRef(newPoints); 113 | 114 | (env)->SetIntField(newFace,faceConfidence,confidence); 115 | 116 | (env)->SetObjectArrayElement(faceArgs, i, newFace); 117 | env->DeleteLocalRef(newFace); 118 | 119 | } 120 | 121 | //release the buffer 122 | free(pBuffer); 123 | 124 | return faceArgs; 125 | } 126 | } -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/java/org/dp/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/Android/FaceDetection/app/src/main/java/org/dp/.DS_Store -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/java/org/dp/facedetection/Face.java: -------------------------------------------------------------------------------- 1 | package org.dp.facedetection; 2 | 3 | import org.opencv.core.Point; 4 | 5 | import org.opencv.core.Rect; 6 | 7 | 8 | /** 9 | * Stay Hungry Stay Foolish 10 | * Author: dp on 2019/3/25 12:51 11 | */ 12 | public class Face { 13 | public Rect faceRect; 14 | public int faceConfidence; 15 | public Point[] faceLandmarks; 16 | } 17 | -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/java/org/dp/facedetection/MainActivity.kt: -------------------------------------------------------------------------------- 1 | package org.dp.facedetection 2 | 3 | import android.graphics.Bitmap 4 | import android.graphics.BitmapFactory 5 | import androidx.appcompat.app.AppCompatActivity 6 | import android.os.Bundle 7 | import kotlinx.android.synthetic.main.activity_main.* 8 | import org.opencv.android.Utils 9 | import org.opencv.core.MatOfRect 10 | import org.opencv.core.Point 11 | import org.opencv.core.Scalar 12 | import org.opencv.imgproc.Imgproc 13 | import org.opencv.imgproc.Imgproc.FONT_HERSHEY_SIMPLEX 14 | import java.io.IOException 15 | import kotlin.math.PI 16 | 17 | class MainActivity : AppCompatActivity() { 18 | override fun onCreate(savedInstanceState: Bundle?) { 19 | super.onCreate(savedInstanceState) 20 | setContentView(R.layout.activity_main) 21 | 22 | // Example of a call to facedetect method 23 | testFacedetect() 24 | } 25 | 26 | fun testFacedetect() { 27 | val bmp = getImageFromAssets("test2.jpg") ?: return 28 | var str = "image size = ${bmp.width}x${bmp.height}\n" 29 | imageView.setImageBitmap(bmp) 30 | val mat = MatOfRect() 31 | val bmp2 = bmp.copy(bmp.config, true) 32 | Utils.bitmapToMat(bmp, mat) 33 | val FACE_RECT_COLOR = Scalar(255.0, 0.0, 0.0) 34 | val FACE_RECT_THICKNESS = 3 35 | val TEXT_SIZE = 2.0 36 | val startTime = System.currentTimeMillis() 37 | val facesArray = facedetect(mat.nativeObjAddr) 38 | str = str + "detectTime = ${System.currentTimeMillis() - startTime}ms\n" 39 | for (face in facesArray) { 40 | val text_pos = Point(face.faceRect.x.toDouble() - FACE_RECT_THICKNESS,face.faceRect.y - FACE_RECT_THICKNESS.toDouble()) 41 | Imgproc.putText(mat,face.faceConfidence.toString(),text_pos,FONT_HERSHEY_SIMPLEX,TEXT_SIZE,FACE_RECT_COLOR) 42 | Imgproc.rectangle(mat, face.faceRect, FACE_RECT_COLOR, FACE_RECT_THICKNESS) 43 | for (landmark in face.faceLandmarks){ 44 | Imgproc.circle(mat, landmark, FACE_RECT_THICKNESS, FACE_RECT_COLOR,-1,Imgproc.LINE_AA) 45 | } 46 | } 47 | str = str + "face num = ${facesArray.size}\n" 48 | Utils.matToBitmap(mat, bmp2) 49 | imageView.setImageBitmap(bmp2) 50 | textView.text = str 51 | } 52 | 53 | /** 54 | * A native method that is implemented by the 'libfacedetection' native library, 55 | * which is packaged with this application. 56 | */ 57 | external fun facedetect(matAddr: Long): Array 58 | 59 | 60 | companion object { 61 | 62 | // Used to load the 'facedetection' library on application startup. 63 | init { 64 | System.loadLibrary("facedetection") 65 | } 66 | } 67 | 68 | 69 | /** 70 | * read from Assets 71 | */ 72 | private fun getImageFromAssets(fileName:String):Bitmap? 73 | { 74 | var image:Bitmap? = null 75 | try { 76 | val stream = resources.assets.open(fileName) 77 | image = BitmapFactory.decodeStream(stream) 78 | stream.close() 79 | } catch (e: IOException) { 80 | e.printStackTrace() 81 | }finally { 82 | return image 83 | } 84 | } 85 | } -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/res/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/Android/FaceDetection/app/src/main/res/.DS_Store -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/res/drawable-v24/ic_launcher_foreground.xml: -------------------------------------------------------------------------------- 1 | 7 | 12 | 13 | 19 | 22 | 25 | 26 | 27 | 28 | 34 | 35 | -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/res/drawable/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/Android/FaceDetection/app/src/main/res/drawable/.DS_Store -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/res/drawable/ic_launcher_background.xml: -------------------------------------------------------------------------------- 1 | 2 | 8 | 10 | 12 | 14 | 16 | 18 | 20 | 22 | 24 | 26 | 28 | 30 | 32 | 34 | 36 | 38 | 40 | 42 | 44 | 46 | 48 | 50 | 52 | 54 | 56 | 58 | 60 | 62 | 64 | 66 | 68 | 70 | 72 | 74 | 75 | -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/res/layout/activity_main.xml: -------------------------------------------------------------------------------- 1 | 2 | 9 | 10 | 16 | 25 | 26 | -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/res/mipmap-hdpi/ic_launcher.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/Android/FaceDetection/app/src/main/res/mipmap-hdpi/ic_launcher.png -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/res/mipmap-hdpi/ic_launcher_round.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/Android/FaceDetection/app/src/main/res/mipmap-hdpi/ic_launcher_round.png -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/res/mipmap-mdpi/ic_launcher.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/Android/FaceDetection/app/src/main/res/mipmap-mdpi/ic_launcher.png -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/res/mipmap-mdpi/ic_launcher_round.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/Android/FaceDetection/app/src/main/res/mipmap-mdpi/ic_launcher_round.png -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/res/mipmap-xhdpi/ic_launcher.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/Android/FaceDetection/app/src/main/res/mipmap-xhdpi/ic_launcher.png -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/res/mipmap-xhdpi/ic_launcher_round.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/Android/FaceDetection/app/src/main/res/mipmap-xhdpi/ic_launcher_round.png -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/res/mipmap-xxhdpi/ic_launcher.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/Android/FaceDetection/app/src/main/res/mipmap-xxhdpi/ic_launcher.png -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/Android/FaceDetection/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.png -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/Android/FaceDetection/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/Android/FaceDetection/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/res/values/colors.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | #008577 4 | #00574B 5 | #D81B60 6 | 7 | -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/res/values/strings.xml: -------------------------------------------------------------------------------- 1 | 2 | Facedetection 3 | 4 | -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/main/res/values/styles.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/app/src/test/java/org/dp/facedetection/ExampleUnitTest.kt: -------------------------------------------------------------------------------- 1 | package org.dp.facedetection 2 | 3 | import org.junit.Test 4 | 5 | import org.junit.Assert.* 6 | 7 | /** 8 | * Example local unit test, which will execute on the development machine (host). 9 | * 10 | * See [testing documentation](http://d.android.com/tools/testing). 11 | */ 12 | class ExampleUnitTest { 13 | @Test 14 | fun addition_isCorrect() { 15 | assertEquals(4, 2 + 2) 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/build.gradle: -------------------------------------------------------------------------------- 1 | // Top-level build file where you can add configuration options common to all sub-projects/modules. 2 | 3 | buildscript { 4 | ext.kotlin_version = '1.3.72' 5 | repositories { 6 | google() 7 | jcenter() 8 | 9 | } 10 | dependencies { 11 | classpath 'com.android.tools.build:gradle:3.4.3' 12 | classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version" 13 | // NOTE: Do not place your application dependencies here; they belong 14 | // in the individual module build.gradle files 15 | } 16 | } 17 | 18 | allprojects { 19 | repositories { 20 | google() 21 | jcenter() 22 | 23 | } 24 | } 25 | 26 | task clean(type: Delete) { 27 | delete rootProject.buildDir 28 | } 29 | -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/gradle.properties: -------------------------------------------------------------------------------- 1 | # Project-wide Gradle settings. 2 | # IDE (e.g. Android Studio) users: 3 | # Gradle settings configured through the IDE *will override* 4 | # any settings specified in this file. 5 | # For more details on how to configure your build environment visit 6 | # http://www.gradle.org/docs/current/userguide/build_environment.html 7 | # Specifies the JVM arguments used for the daemon process. 8 | # The setting is particularly useful for tweaking memory settings. 9 | org.gradle.jvmargs=-Xmx1536m 10 | # When configured, Gradle will run in incubating parallel mode. 11 | # This option should only be used with decoupled projects. More details, visit 12 | # http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects 13 | # org.gradle.parallel=true 14 | # AndroidX package structure to make it clearer which packages are bundled with the 15 | # Android operating system, and which are packaged with your app's APK 16 | # https://developer.android.com/topic/libraries/support-library/androidx-rn 17 | android.useAndroidX=true 18 | # Automatically convert third-party libraries to use AndroidX 19 | android.enableJetifier=true 20 | # Kotlin code style for this project: "official" or "obsolete": 21 | kotlin.code.style=official 22 | -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/gradle/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/Android/FaceDetection/gradle/.DS_Store -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/gradle/wrapper/gradle-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/Android/FaceDetection/gradle/wrapper/gradle-wrapper.jar -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/gradle/wrapper/gradle-wrapper.properties: -------------------------------------------------------------------------------- 1 | #Mon Mar 18 13:30:38 CST 2019 2 | distributionBase=GRADLE_USER_HOME 3 | distributionPath=wrapper/dists 4 | zipStoreBase=GRADLE_USER_HOME 5 | zipStorePath=wrapper/dists 6 | distributionUrl=https\://services.gradle.org/distributions/gradle-5.1.1-all.zip 7 | -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/gradlew: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | ############################################################################## 4 | ## 5 | ## Gradle start up script for UN*X 6 | ## 7 | ############################################################################## 8 | 9 | # Attempt to set APP_HOME 10 | # Resolve links: $0 may be a link 11 | PRG="$0" 12 | # Need this for relative symlinks. 13 | while [ -h "$PRG" ] ; do 14 | ls=`ls -ld "$PRG"` 15 | link=`expr "$ls" : '.*-> \(.*\)$'` 16 | if expr "$link" : '/.*' > /dev/null; then 17 | PRG="$link" 18 | else 19 | PRG=`dirname "$PRG"`"/$link" 20 | fi 21 | done 22 | SAVED="`pwd`" 23 | cd "`dirname \"$PRG\"`/" >/dev/null 24 | APP_HOME="`pwd -P`" 25 | cd "$SAVED" >/dev/null 26 | 27 | APP_NAME="Gradle" 28 | APP_BASE_NAME=`basename "$0"` 29 | 30 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 31 | DEFAULT_JVM_OPTS="" 32 | 33 | # Use the maximum available, or set MAX_FD != -1 to use that value. 34 | MAX_FD="maximum" 35 | 36 | warn () { 37 | echo "$*" 38 | } 39 | 40 | die () { 41 | echo 42 | echo "$*" 43 | echo 44 | exit 1 45 | } 46 | 47 | # OS specific support (must be 'true' or 'false'). 48 | cygwin=false 49 | msys=false 50 | darwin=false 51 | nonstop=false 52 | case "`uname`" in 53 | CYGWIN* ) 54 | cygwin=true 55 | ;; 56 | Darwin* ) 57 | darwin=true 58 | ;; 59 | MINGW* ) 60 | msys=true 61 | ;; 62 | NONSTOP* ) 63 | nonstop=true 64 | ;; 65 | esac 66 | 67 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar 68 | 69 | # Determine the Java command to use to start the JVM. 70 | if [ -n "$JAVA_HOME" ] ; then 71 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then 72 | # IBM's JDK on AIX uses strange locations for the executables 73 | JAVACMD="$JAVA_HOME/jre/sh/java" 74 | else 75 | JAVACMD="$JAVA_HOME/bin/java" 76 | fi 77 | if [ ! -x "$JAVACMD" ] ; then 78 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME 79 | 80 | Please set the JAVA_HOME variable in your environment to match the 81 | location of your Java installation." 82 | fi 83 | else 84 | JAVACMD="java" 85 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 86 | 87 | Please set the JAVA_HOME variable in your environment to match the 88 | location of your Java installation." 89 | fi 90 | 91 | # Increase the maximum file descriptors if we can. 92 | if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then 93 | MAX_FD_LIMIT=`ulimit -H -n` 94 | if [ $? -eq 0 ] ; then 95 | if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then 96 | MAX_FD="$MAX_FD_LIMIT" 97 | fi 98 | ulimit -n $MAX_FD 99 | if [ $? -ne 0 ] ; then 100 | warn "Could not set maximum file descriptor limit: $MAX_FD" 101 | fi 102 | else 103 | warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" 104 | fi 105 | fi 106 | 107 | # For Darwin, add options to specify how the application appears in the dock 108 | if $darwin; then 109 | GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" 110 | fi 111 | 112 | # For Cygwin, switch paths to Windows format before running java 113 | if $cygwin ; then 114 | APP_HOME=`cygpath --path --mixed "$APP_HOME"` 115 | CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` 116 | JAVACMD=`cygpath --unix "$JAVACMD"` 117 | 118 | # We build the pattern for arguments to be converted via cygpath 119 | ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` 120 | SEP="" 121 | for dir in $ROOTDIRSRAW ; do 122 | ROOTDIRS="$ROOTDIRS$SEP$dir" 123 | SEP="|" 124 | done 125 | OURCYGPATTERN="(^($ROOTDIRS))" 126 | # Add a user-defined pattern to the cygpath arguments 127 | if [ "$GRADLE_CYGPATTERN" != "" ] ; then 128 | OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" 129 | fi 130 | # Now convert the arguments - kludge to limit ourselves to /bin/sh 131 | i=0 132 | for arg in "$@" ; do 133 | CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` 134 | CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option 135 | 136 | if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition 137 | eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` 138 | else 139 | eval `echo args$i`="\"$arg\"" 140 | fi 141 | i=$((i+1)) 142 | done 143 | case $i in 144 | (0) set -- ;; 145 | (1) set -- "$args0" ;; 146 | (2) set -- "$args0" "$args1" ;; 147 | (3) set -- "$args0" "$args1" "$args2" ;; 148 | (4) set -- "$args0" "$args1" "$args2" "$args3" ;; 149 | (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; 150 | (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; 151 | (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; 152 | (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; 153 | (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; 154 | esac 155 | fi 156 | 157 | # Escape application args 158 | save () { 159 | for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done 160 | echo " " 161 | } 162 | APP_ARGS=$(save "$@") 163 | 164 | # Collect all arguments for the java command, following the shell quoting and substitution rules 165 | eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" 166 | 167 | # by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong 168 | if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then 169 | cd "$(dirname "$0")" 170 | fi 171 | 172 | exec "$JAVACMD" "$@" 173 | -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/gradlew.bat: -------------------------------------------------------------------------------- 1 | @if "%DEBUG%" == "" @echo off 2 | @rem ########################################################################## 3 | @rem 4 | @rem Gradle startup script for Windows 5 | @rem 6 | @rem ########################################################################## 7 | 8 | @rem Set local scope for the variables with windows NT shell 9 | if "%OS%"=="Windows_NT" setlocal 10 | 11 | set DIRNAME=%~dp0 12 | if "%DIRNAME%" == "" set DIRNAME=. 13 | set APP_BASE_NAME=%~n0 14 | set APP_HOME=%DIRNAME% 15 | 16 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 17 | set DEFAULT_JVM_OPTS= 18 | 19 | @rem Find java.exe 20 | if defined JAVA_HOME goto findJavaFromJavaHome 21 | 22 | set JAVA_EXE=java.exe 23 | %JAVA_EXE% -version >NUL 2>&1 24 | if "%ERRORLEVEL%" == "0" goto init 25 | 26 | echo. 27 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 28 | echo. 29 | echo Please set the JAVA_HOME variable in your environment to match the 30 | echo location of your Java installation. 31 | 32 | goto fail 33 | 34 | :findJavaFromJavaHome 35 | set JAVA_HOME=%JAVA_HOME:"=% 36 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe 37 | 38 | if exist "%JAVA_EXE%" goto init 39 | 40 | echo. 41 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 42 | echo. 43 | echo Please set the JAVA_HOME variable in your environment to match the 44 | echo location of your Java installation. 45 | 46 | goto fail 47 | 48 | :init 49 | @rem Get command-line arguments, handling Windows variants 50 | 51 | if not "%OS%" == "Windows_NT" goto win9xME_args 52 | 53 | :win9xME_args 54 | @rem Slurp the command line arguments. 55 | set CMD_LINE_ARGS= 56 | set _SKIP=2 57 | 58 | :win9xME_args_slurp 59 | if "x%~1" == "x" goto execute 60 | 61 | set CMD_LINE_ARGS=%* 62 | 63 | :execute 64 | @rem Setup the command line 65 | 66 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar 67 | 68 | @rem Execute Gradle 69 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% 70 | 71 | :end 72 | @rem End local scope for the variables with windows NT shell 73 | if "%ERRORLEVEL%"=="0" goto mainEnd 74 | 75 | :fail 76 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of 77 | rem the _cmd.exe /c_ return code! 78 | if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 79 | exit /b 1 80 | 81 | :mainEnd 82 | if "%OS%"=="Windows_NT" endlocal 83 | 84 | :omega 85 | -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/local.properties: -------------------------------------------------------------------------------- 1 | ## This file must *NOT* be checked into Version Control Systems, 2 | # as it contains information specific to your local configuration. 3 | # 4 | # Location of the SDK. This is only used by Gradle. 5 | # For customization when using a Version Control System, please read the 6 | # header note. 7 | #Mon Mar 18 13:30:40 CST 2019 8 | ndk.dir=/Users/dp/Library/Android/sdk/ndk-bundle 9 | sdk.dir=/Users/dp/Library/Android/sdk 10 | -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/release/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/Android/FaceDetection/release/.DS_Store -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/release/facedetection.apk: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/Android/FaceDetection/release/facedetection.apk -------------------------------------------------------------------------------- /mobile/Android/FaceDetection/settings.gradle: -------------------------------------------------------------------------------- 1 | include ':app' 2 | 3 | def opencv_sdk='./OpenCV-android-sdk/sdk' 4 | include ':opencv4' 5 | project(':opencv4').projectDir = new File(opencv_sdk) -------------------------------------------------------------------------------- /mobile/Android/screenshot1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/Android/screenshot1.jpg -------------------------------------------------------------------------------- /mobile/Android/screenshot2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/Android/screenshot2.jpg -------------------------------------------------------------------------------- /mobile/README.md: -------------------------------------------------------------------------------- 1 | ##Mobile support for iOS and Android. 2 | 3 | 4 | ##iOS 5 | 6 | Updated for the latest libfacedetection! I try it in iOS and successful run. 7 | 8 | 1. download or clone this lib in your computer; 9 | 2. create New Xcode project; 10 | 3. add this lib's src to your project; 11 | 4. add the system lib `libc++.tbd` and other you need framework(eg. ACFoundation.framework etc.); 12 | 5. download `opencv2.framework` and add it in your project; 13 | 6. follow the example file to write the code. 14 | 15 | **!!!** 16 | 1. modify `facedetectcnn.h` 17 | ```objc 18 | //#define _ENABLE_AVX2 //Please enable it if X64 CPU 19 | #define _ENABLE_NEON //Please enable it if ARM CPU 20 | //#include "facedetection_export.h" 21 | #define FACEDETECTION_EXPORT 22 | ``` 23 | 24 | 2. modify `.m` to `.mm` 25 | 3. import lib in your `.mm` 26 | ```objc 27 | #import 28 | #import 29 | #import "ViewController.h" 30 | #import "facedetectcnn.h" 31 | ``` 32 | 4. modify 33 | > you must import the `opencv2/opencv.hpp` first !!! 34 | 35 | 36 | MyCode: 37 | ```objc 38 | #import 39 | #import 40 | #import "ViewController.h" 41 | #import "facedetectcnn.h" 42 | 43 | //define the buffer size. Do not change the size! 44 | #define DETECT_BUFFER_SIZE 0x20000 45 | using namespace cv; 46 | 47 | @implementation ViewController 48 | 49 | - (UIImage *)loadImageAndDectect:(const char *)image_file{ 50 | Mat img = imread(image_file); 51 | if (img.empty()) { 52 | fprintf(stderr, "Can not load the image file %s.\n", image_file); 53 | return nil; 54 | } 55 | 56 | int *pResults = NULL; 57 | unsigned char * pBuffer = (unsigned char *)malloc(DETECT_BUFFER_SIZE); 58 | if (!pBuffer) { 59 | fprintf(stderr, "Can not alloc buffer.\n"); 60 | return nil; 61 | } 62 | pResults = facedetect_cnn(pBuffer, (unsigned char *)(img.ptr(0)), img.cols, img.rows, (int)img.step); 63 | printf("%d faces detected.\n", (pResults ? *pResults : 0)); 64 | Mat result_cnn = img.clone();; 65 | //print the detection results 66 | for(int i = 0; i < (pResults ? *pResults : 0); i++) 67 | { 68 | short * p = ((short*)(pResults+1))+142*i; 69 | int confidence = p[0]; 70 | int x = p[1]; 71 | int y = p[2]; 72 | int w = p[3]; 73 | int h = p[4]; 74 | 75 | printf("face%drect=[%d, %d, %d, %d], confidence=%d\n",i,x,y,w,h,confidence); 76 | rectangle(result_cnn, cv::Rect(x, y, w, h), Scalar(0, 255, 0), 2); 77 | string str = to_string(confidence); 78 | putText(result_cnn, str, cv::Point(x-2,y-2), cv::FONT_HERSHEY_SIMPLEX, 1.0,Scalar(0, 255, 0)); 79 | for (int j = 5; j < 14; j += 2) { 80 | int p_x = p[j]; 81 | int p_y = p[j+1]; 82 | printf("landmark%d=[%d, %d]\n",(j-5)/2,p_x,p_y); 83 | circle(result_cnn, cv::Point(p_x,p_y), 2,Scalar(255, 0, 0),-1,cv::LINE_AA); 84 | } 85 | } 86 | 87 | free(pBuffer); 88 | 89 | return MatToUIImage(result_cnn); 90 | } 91 | 92 | 93 | - (void)viewDidLoad { 94 | [super viewDidLoad]; 95 | // Do any additional setup after loading the view, typically from a nib. 96 | 97 | UIImageView *imageView = [[UIImageView alloc] initWithFrame:[UIScreen mainScreen].bounds]; 98 | imageView.contentMode = UIViewContentModeScaleAspectFit; 99 | [self.view addSubview:imageView]; 100 | 101 | NSString *path = [[NSBundle mainBundle] pathForResource:@"test" ofType:@".jpg"]; 102 | 103 | imageView.image = [self loadImageAndDectect:[path UTF8String]]; 104 | } 105 | 106 | 107 | @end 108 | ``` 109 | ![](https://raw.githubusercontent.com/dpmaycry/libfacedetection/master/mobile/iOS/screenshot1.png) 110 | 111 | ![](https://raw.githubusercontent.com/dpmaycry/libfacedetection/master/mobile/iOS/screenshot2.png) 112 | 113 | ##Android 114 | 115 | Just transport this latetest libfacedetection to Android and run successfully and simplified use for opencv in only 3 steps. 116 | 117 | I also update apk in `Android/Facedetection/release` folder so you can just install on your android device to test it. 118 | 119 | Modified cmakelist.txt for android and configures for opencv. So all you need to do is to add opencv for android to it and RUN IT. 120 | 121 | Here is the steps for developers: 122 | 123 | 1.clone this porject and make sure cmake,ndk and lldb(if u need debug c++ code) is downloaded. 124 | 2.download opencv sdk for android from [OpenCV-release](https://opencv.org/releases.html) and unzip `OpenCV-android-sdk` to the root dir of this project. 125 | 3. modify `facedetectcnn.h` 126 | 127 | ```c++ 128 | //#define _ENABLE_AVX2 //Please enable it if X64 CPU 129 | #define _ENABLE_NEON //Please enable it if ARM CPU 130 | //#include "facedetection_export.h" 131 | #define FACEDETECTION_EXPORT 132 | ``` 133 | 4.run it! 134 | 135 | ![](https://raw.githubusercontent.com/dpmaycry/libfacedetection/master/mobile/Android/screenshot1.jpg) 136 | 137 | ![](https://raw.githubusercontent.com/dpmaycry/libfacedetection/master/mobile/Android/screenshot2.jpg) -------------------------------------------------------------------------------- /mobile/iOS/FaceDetection.xcodeproj/project.pbxproj: -------------------------------------------------------------------------------- 1 | // !$*UTF8*$! 2 | { 3 | archiveVersion = 1; 4 | classes = { 5 | }; 6 | objectVersion = 50; 7 | objects = { 8 | 9 | /* Begin PBXBuildFile section */ 10 | 00E75F61223B3E9C000D9FE7 /* AppDelegate.m in Sources */ = {isa = PBXBuildFile; fileRef = 00E75F60223B3E9C000D9FE7 /* AppDelegate.m */; }; 11 | 00E75F64223B3E9C000D9FE7 /* ViewController.mm in Sources */ = {isa = PBXBuildFile; fileRef = 00E75F63223B3E9C000D9FE7 /* ViewController.mm */; }; 12 | 00E75F67223B3E9C000D9FE7 /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 00E75F65223B3E9C000D9FE7 /* Main.storyboard */; }; 13 | 00E75F69223B3E9D000D9FE7 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 00E75F68223B3E9D000D9FE7 /* Assets.xcassets */; }; 14 | 00E75F6C223B3E9D000D9FE7 /* LaunchScreen.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 00E75F6A223B3E9D000D9FE7 /* LaunchScreen.storyboard */; }; 15 | 00E75F6F223B3E9D000D9FE7 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 00E75F6E223B3E9D000D9FE7 /* main.m */; }; 16 | 00E75F7C223B3EAC000D9FE7 /* facedetectcnn.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 00E75F77223B3EAC000D9FE7 /* facedetectcnn.cpp */; }; 17 | 00E75F7E223B3EAC000D9FE7 /* facedetectcnn-model.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 00E75F7A223B3EAC000D9FE7 /* facedetectcnn-model.cpp */; }; 18 | 00E75F80223B443C000D9FE7 /* opencv2.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 00E75F7F223B443C000D9FE7 /* opencv2.framework */; }; 19 | 00E75F83223B4538000D9FE7 /* libc++.tbd in Frameworks */ = {isa = PBXBuildFile; fileRef = 00E75F82223B4538000D9FE7 /* libc++.tbd */; }; 20 | 00E75F85223B4540000D9FE7 /* AVFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 00E75F84223B4540000D9FE7 /* AVFoundation.framework */; }; 21 | 00E75F87223B4546000D9FE7 /* CoreImage.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 00E75F86223B4546000D9FE7 /* CoreImage.framework */; }; 22 | 00E75F89223B4552000D9FE7 /* CoreGraphics.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 00E75F88223B4552000D9FE7 /* CoreGraphics.framework */; }; 23 | 00E75F8B223B455A000D9FE7 /* Accelerate.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 00E75F8A223B455A000D9FE7 /* Accelerate.framework */; }; 24 | 00E75F8D223B49BF000D9FE7 /* test.jpg in Resources */ = {isa = PBXBuildFile; fileRef = 00E75F8C223B49BF000D9FE7 /* test.jpg */; }; 25 | E230684C26AD9EBD006D69F6 /* facedetectcnn-data.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E230684B26AD9EBD006D69F6 /* facedetectcnn-data.cpp */; }; 26 | E2597524259228D1000B0C45 /* facedetectcnn.h in Sources */ = {isa = PBXBuildFile; fileRef = 00E75F78223B3EAC000D9FE7 /* facedetectcnn.h */; }; 27 | /* End PBXBuildFile section */ 28 | 29 | /* Begin PBXFileReference section */ 30 | 00E75F5C223B3E9C000D9FE7 /* FaceDetection.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = FaceDetection.app; sourceTree = BUILT_PRODUCTS_DIR; }; 31 | 00E75F5F223B3E9C000D9FE7 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = ""; }; 32 | 00E75F60223B3E9C000D9FE7 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = ""; }; 33 | 00E75F62223B3E9C000D9FE7 /* ViewController.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ViewController.h; sourceTree = ""; }; 34 | 00E75F63223B3E9C000D9FE7 /* ViewController.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = ViewController.mm; sourceTree = ""; }; 35 | 00E75F66223B3E9C000D9FE7 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/Main.storyboard; sourceTree = ""; }; 36 | 00E75F68223B3E9D000D9FE7 /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = ""; }; 37 | 00E75F6B223B3E9D000D9FE7 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/LaunchScreen.storyboard; sourceTree = ""; }; 38 | 00E75F6D223B3E9D000D9FE7 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; 39 | 00E75F6E223B3E9D000D9FE7 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = ""; }; 40 | 00E75F77223B3EAC000D9FE7 /* facedetectcnn.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = facedetectcnn.cpp; sourceTree = ""; }; 41 | 00E75F78223B3EAC000D9FE7 /* facedetectcnn.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = facedetectcnn.h; sourceTree = ""; }; 42 | 00E75F7A223B3EAC000D9FE7 /* facedetectcnn-model.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "facedetectcnn-model.cpp"; sourceTree = ""; }; 43 | 00E75F7F223B443C000D9FE7 /* opencv2.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; path = opencv2.framework; sourceTree = ""; }; 44 | 00E75F82223B4538000D9FE7 /* libc++.tbd */ = {isa = PBXFileReference; lastKnownFileType = "sourcecode.text-based-dylib-definition"; name = "libc++.tbd"; path = "usr/lib/libc++.tbd"; sourceTree = SDKROOT; }; 45 | 00E75F84223B4540000D9FE7 /* AVFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AVFoundation.framework; path = System/Library/Frameworks/AVFoundation.framework; sourceTree = SDKROOT; }; 46 | 00E75F86223B4546000D9FE7 /* CoreImage.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreImage.framework; path = System/Library/Frameworks/CoreImage.framework; sourceTree = SDKROOT; }; 47 | 00E75F88223B4552000D9FE7 /* CoreGraphics.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = CoreGraphics.framework; path = System/Library/Frameworks/CoreGraphics.framework; sourceTree = SDKROOT; }; 48 | 00E75F8A223B455A000D9FE7 /* Accelerate.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Accelerate.framework; path = System/Library/Frameworks/Accelerate.framework; sourceTree = SDKROOT; }; 49 | 00E75F8C223B49BF000D9FE7 /* test.jpg */ = {isa = PBXFileReference; lastKnownFileType = image.jpeg; path = test.jpg; sourceTree = ""; }; 50 | E230684B26AD9EBD006D69F6 /* facedetectcnn-data.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "facedetectcnn-data.cpp"; sourceTree = ""; }; 51 | /* End PBXFileReference section */ 52 | 53 | /* Begin PBXFrameworksBuildPhase section */ 54 | 00E75F59223B3E9C000D9FE7 /* Frameworks */ = { 55 | isa = PBXFrameworksBuildPhase; 56 | buildActionMask = 2147483647; 57 | files = ( 58 | 00E75F8B223B455A000D9FE7 /* Accelerate.framework in Frameworks */, 59 | 00E75F89223B4552000D9FE7 /* CoreGraphics.framework in Frameworks */, 60 | 00E75F87223B4546000D9FE7 /* CoreImage.framework in Frameworks */, 61 | 00E75F85223B4540000D9FE7 /* AVFoundation.framework in Frameworks */, 62 | 00E75F83223B4538000D9FE7 /* libc++.tbd in Frameworks */, 63 | 00E75F80223B443C000D9FE7 /* opencv2.framework in Frameworks */, 64 | ); 65 | runOnlyForDeploymentPostprocessing = 0; 66 | }; 67 | /* End PBXFrameworksBuildPhase section */ 68 | 69 | /* Begin PBXGroup section */ 70 | 00E75F53223B3E9C000D9FE7 = { 71 | isa = PBXGroup; 72 | children = ( 73 | 00E75F5E223B3E9C000D9FE7 /* FaceDetection */, 74 | 00E75F5D223B3E9C000D9FE7 /* Products */, 75 | 00E75F81223B4537000D9FE7 /* Frameworks */, 76 | ); 77 | sourceTree = ""; 78 | }; 79 | 00E75F5D223B3E9C000D9FE7 /* Products */ = { 80 | isa = PBXGroup; 81 | children = ( 82 | 00E75F5C223B3E9C000D9FE7 /* FaceDetection.app */, 83 | ); 84 | name = Products; 85 | sourceTree = ""; 86 | }; 87 | 00E75F5E223B3E9C000D9FE7 /* FaceDetection */ = { 88 | isa = PBXGroup; 89 | children = ( 90 | 00E75F8C223B49BF000D9FE7 /* test.jpg */, 91 | 00E75F75223B3EAC000D9FE7 /* src */, 92 | 00E75F5F223B3E9C000D9FE7 /* AppDelegate.h */, 93 | 00E75F60223B3E9C000D9FE7 /* AppDelegate.m */, 94 | 00E75F62223B3E9C000D9FE7 /* ViewController.h */, 95 | 00E75F63223B3E9C000D9FE7 /* ViewController.mm */, 96 | 00E75F7F223B443C000D9FE7 /* opencv2.framework */, 97 | 00E75F65223B3E9C000D9FE7 /* Main.storyboard */, 98 | 00E75F68223B3E9D000D9FE7 /* Assets.xcassets */, 99 | 00E75F6A223B3E9D000D9FE7 /* LaunchScreen.storyboard */, 100 | 00E75F6D223B3E9D000D9FE7 /* Info.plist */, 101 | 00E75F6E223B3E9D000D9FE7 /* main.m */, 102 | ); 103 | path = FaceDetection; 104 | sourceTree = ""; 105 | }; 106 | 00E75F75223B3EAC000D9FE7 /* src */ = { 107 | isa = PBXGroup; 108 | children = ( 109 | E230684B26AD9EBD006D69F6 /* facedetectcnn-data.cpp */, 110 | 00E75F77223B3EAC000D9FE7 /* facedetectcnn.cpp */, 111 | 00E75F78223B3EAC000D9FE7 /* facedetectcnn.h */, 112 | 00E75F7A223B3EAC000D9FE7 /* facedetectcnn-model.cpp */, 113 | ); 114 | name = src; 115 | path = ../../../src; 116 | sourceTree = ""; 117 | }; 118 | 00E75F81223B4537000D9FE7 /* Frameworks */ = { 119 | isa = PBXGroup; 120 | children = ( 121 | 00E75F8A223B455A000D9FE7 /* Accelerate.framework */, 122 | 00E75F88223B4552000D9FE7 /* CoreGraphics.framework */, 123 | 00E75F86223B4546000D9FE7 /* CoreImage.framework */, 124 | 00E75F84223B4540000D9FE7 /* AVFoundation.framework */, 125 | 00E75F82223B4538000D9FE7 /* libc++.tbd */, 126 | ); 127 | name = Frameworks; 128 | sourceTree = ""; 129 | }; 130 | /* End PBXGroup section */ 131 | 132 | /* Begin PBXNativeTarget section */ 133 | 00E75F5B223B3E9C000D9FE7 /* FaceDetection */ = { 134 | isa = PBXNativeTarget; 135 | buildConfigurationList = 00E75F72223B3E9D000D9FE7 /* Build configuration list for PBXNativeTarget "FaceDetection" */; 136 | buildPhases = ( 137 | 00E75F58223B3E9C000D9FE7 /* Sources */, 138 | 00E75F59223B3E9C000D9FE7 /* Frameworks */, 139 | 00E75F5A223B3E9C000D9FE7 /* Resources */, 140 | ); 141 | buildRules = ( 142 | ); 143 | dependencies = ( 144 | ); 145 | name = FaceDetection; 146 | productName = FaceDetection; 147 | productReference = 00E75F5C223B3E9C000D9FE7 /* FaceDetection.app */; 148 | productType = "com.apple.product-type.application"; 149 | }; 150 | /* End PBXNativeTarget section */ 151 | 152 | /* Begin PBXProject section */ 153 | 00E75F54223B3E9C000D9FE7 /* Project object */ = { 154 | isa = PBXProject; 155 | attributes = { 156 | LastUpgradeCheck = 1010; 157 | ORGANIZATIONNAME = TendCloud; 158 | TargetAttributes = { 159 | 00E75F5B223B3E9C000D9FE7 = { 160 | CreatedOnToolsVersion = 10.1; 161 | }; 162 | }; 163 | }; 164 | buildConfigurationList = 00E75F57223B3E9C000D9FE7 /* Build configuration list for PBXProject "FaceDetection" */; 165 | compatibilityVersion = "Xcode 9.3"; 166 | developmentRegion = en; 167 | hasScannedForEncodings = 0; 168 | knownRegions = ( 169 | en, 170 | Base, 171 | ); 172 | mainGroup = 00E75F53223B3E9C000D9FE7; 173 | productRefGroup = 00E75F5D223B3E9C000D9FE7 /* Products */; 174 | projectDirPath = ""; 175 | projectRoot = ""; 176 | targets = ( 177 | 00E75F5B223B3E9C000D9FE7 /* FaceDetection */, 178 | ); 179 | }; 180 | /* End PBXProject section */ 181 | 182 | /* Begin PBXResourcesBuildPhase section */ 183 | 00E75F5A223B3E9C000D9FE7 /* Resources */ = { 184 | isa = PBXResourcesBuildPhase; 185 | buildActionMask = 2147483647; 186 | files = ( 187 | 00E75F6C223B3E9D000D9FE7 /* LaunchScreen.storyboard in Resources */, 188 | 00E75F69223B3E9D000D9FE7 /* Assets.xcassets in Resources */, 189 | 00E75F8D223B49BF000D9FE7 /* test.jpg in Resources */, 190 | 00E75F67223B3E9C000D9FE7 /* Main.storyboard in Resources */, 191 | ); 192 | runOnlyForDeploymentPostprocessing = 0; 193 | }; 194 | /* End PBXResourcesBuildPhase section */ 195 | 196 | /* Begin PBXSourcesBuildPhase section */ 197 | 00E75F58223B3E9C000D9FE7 /* Sources */ = { 198 | isa = PBXSourcesBuildPhase; 199 | buildActionMask = 2147483647; 200 | files = ( 201 | E230684C26AD9EBD006D69F6 /* facedetectcnn-data.cpp in Sources */, 202 | E2597524259228D1000B0C45 /* facedetectcnn.h in Sources */, 203 | 00E75F7E223B3EAC000D9FE7 /* facedetectcnn-model.cpp in Sources */, 204 | 00E75F64223B3E9C000D9FE7 /* ViewController.mm in Sources */, 205 | 00E75F7C223B3EAC000D9FE7 /* facedetectcnn.cpp in Sources */, 206 | 00E75F6F223B3E9D000D9FE7 /* main.m in Sources */, 207 | 00E75F61223B3E9C000D9FE7 /* AppDelegate.m in Sources */, 208 | ); 209 | runOnlyForDeploymentPostprocessing = 0; 210 | }; 211 | /* End PBXSourcesBuildPhase section */ 212 | 213 | /* Begin PBXVariantGroup section */ 214 | 00E75F65223B3E9C000D9FE7 /* Main.storyboard */ = { 215 | isa = PBXVariantGroup; 216 | children = ( 217 | 00E75F66223B3E9C000D9FE7 /* Base */, 218 | ); 219 | name = Main.storyboard; 220 | sourceTree = ""; 221 | }; 222 | 00E75F6A223B3E9D000D9FE7 /* LaunchScreen.storyboard */ = { 223 | isa = PBXVariantGroup; 224 | children = ( 225 | 00E75F6B223B3E9D000D9FE7 /* Base */, 226 | ); 227 | name = LaunchScreen.storyboard; 228 | sourceTree = ""; 229 | }; 230 | /* End PBXVariantGroup section */ 231 | 232 | /* Begin XCBuildConfiguration section */ 233 | 00E75F70223B3E9D000D9FE7 /* Debug */ = { 234 | isa = XCBuildConfiguration; 235 | buildSettings = { 236 | ALWAYS_SEARCH_USER_PATHS = NO; 237 | CLANG_ANALYZER_NONNULL = YES; 238 | CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; 239 | CLANG_CXX_LANGUAGE_STANDARD = "gnu++14"; 240 | CLANG_CXX_LIBRARY = "libc++"; 241 | CLANG_ENABLE_MODULES = YES; 242 | CLANG_ENABLE_OBJC_ARC = YES; 243 | CLANG_ENABLE_OBJC_WEAK = YES; 244 | CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; 245 | CLANG_WARN_BOOL_CONVERSION = YES; 246 | CLANG_WARN_COMMA = YES; 247 | CLANG_WARN_CONSTANT_CONVERSION = YES; 248 | CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; 249 | CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; 250 | CLANG_WARN_DOCUMENTATION_COMMENTS = YES; 251 | CLANG_WARN_EMPTY_BODY = YES; 252 | CLANG_WARN_ENUM_CONVERSION = YES; 253 | CLANG_WARN_INFINITE_RECURSION = YES; 254 | CLANG_WARN_INT_CONVERSION = YES; 255 | CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; 256 | CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; 257 | CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; 258 | CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; 259 | CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; 260 | CLANG_WARN_STRICT_PROTOTYPES = YES; 261 | CLANG_WARN_SUSPICIOUS_MOVE = YES; 262 | CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; 263 | CLANG_WARN_UNREACHABLE_CODE = YES; 264 | CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; 265 | CODE_SIGN_IDENTITY = "iPhone Developer"; 266 | COPY_PHASE_STRIP = NO; 267 | DEBUG_INFORMATION_FORMAT = dwarf; 268 | ENABLE_STRICT_OBJC_MSGSEND = YES; 269 | ENABLE_TESTABILITY = YES; 270 | GCC_C_LANGUAGE_STANDARD = gnu11; 271 | GCC_DYNAMIC_NO_PIC = NO; 272 | GCC_NO_COMMON_BLOCKS = YES; 273 | GCC_OPTIMIZATION_LEVEL = 0; 274 | GCC_PREPROCESSOR_DEFINITIONS = ( 275 | "DEBUG=1", 276 | "$(inherited)", 277 | ); 278 | GCC_WARN_64_TO_32_BIT_CONVERSION = YES; 279 | GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; 280 | GCC_WARN_UNDECLARED_SELECTOR = YES; 281 | GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; 282 | GCC_WARN_UNUSED_FUNCTION = YES; 283 | GCC_WARN_UNUSED_VARIABLE = YES; 284 | IPHONEOS_DEPLOYMENT_TARGET = 10.1; 285 | MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE; 286 | MTL_FAST_MATH = YES; 287 | ONLY_ACTIVE_ARCH = YES; 288 | SDKROOT = iphoneos; 289 | }; 290 | name = Debug; 291 | }; 292 | 00E75F71223B3E9D000D9FE7 /* Release */ = { 293 | isa = XCBuildConfiguration; 294 | buildSettings = { 295 | ALWAYS_SEARCH_USER_PATHS = NO; 296 | CLANG_ANALYZER_NONNULL = YES; 297 | CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; 298 | CLANG_CXX_LANGUAGE_STANDARD = "gnu++14"; 299 | CLANG_CXX_LIBRARY = "libc++"; 300 | CLANG_ENABLE_MODULES = YES; 301 | CLANG_ENABLE_OBJC_ARC = YES; 302 | CLANG_ENABLE_OBJC_WEAK = YES; 303 | CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; 304 | CLANG_WARN_BOOL_CONVERSION = YES; 305 | CLANG_WARN_COMMA = YES; 306 | CLANG_WARN_CONSTANT_CONVERSION = YES; 307 | CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; 308 | CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; 309 | CLANG_WARN_DOCUMENTATION_COMMENTS = YES; 310 | CLANG_WARN_EMPTY_BODY = YES; 311 | CLANG_WARN_ENUM_CONVERSION = YES; 312 | CLANG_WARN_INFINITE_RECURSION = YES; 313 | CLANG_WARN_INT_CONVERSION = YES; 314 | CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; 315 | CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; 316 | CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; 317 | CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; 318 | CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; 319 | CLANG_WARN_STRICT_PROTOTYPES = YES; 320 | CLANG_WARN_SUSPICIOUS_MOVE = YES; 321 | CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; 322 | CLANG_WARN_UNREACHABLE_CODE = YES; 323 | CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; 324 | CODE_SIGN_IDENTITY = "iPhone Developer"; 325 | COPY_PHASE_STRIP = NO; 326 | DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; 327 | ENABLE_NS_ASSERTIONS = NO; 328 | ENABLE_STRICT_OBJC_MSGSEND = YES; 329 | GCC_C_LANGUAGE_STANDARD = gnu11; 330 | GCC_NO_COMMON_BLOCKS = YES; 331 | GCC_WARN_64_TO_32_BIT_CONVERSION = YES; 332 | GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; 333 | GCC_WARN_UNDECLARED_SELECTOR = YES; 334 | GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; 335 | GCC_WARN_UNUSED_FUNCTION = YES; 336 | GCC_WARN_UNUSED_VARIABLE = YES; 337 | IPHONEOS_DEPLOYMENT_TARGET = 10.1; 338 | MTL_ENABLE_DEBUG_INFO = NO; 339 | MTL_FAST_MATH = YES; 340 | SDKROOT = iphoneos; 341 | VALIDATE_PRODUCT = YES; 342 | }; 343 | name = Release; 344 | }; 345 | 00E75F73223B3E9D000D9FE7 /* Debug */ = { 346 | isa = XCBuildConfiguration; 347 | buildSettings = { 348 | ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; 349 | CODE_SIGN_STYLE = Automatic; 350 | DEVELOPMENT_TEAM = EXQD8EZF59; 351 | FRAMEWORK_SEARCH_PATHS = ( 352 | "$(inherited)", 353 | "$(PROJECT_DIR)/FaceDetection", 354 | ); 355 | INFOPLIST_FILE = FaceDetection/Info.plist; 356 | LD_RUNPATH_SEARCH_PATHS = ( 357 | "$(inherited)", 358 | "@executable_path/Frameworks", 359 | ); 360 | PRODUCT_BUNDLE_IDENTIFIER = org.dp.FaceDetection; 361 | PRODUCT_NAME = "$(TARGET_NAME)"; 362 | TARGETED_DEVICE_FAMILY = "1,2"; 363 | }; 364 | name = Debug; 365 | }; 366 | 00E75F74223B3E9D000D9FE7 /* Release */ = { 367 | isa = XCBuildConfiguration; 368 | buildSettings = { 369 | ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; 370 | CODE_SIGN_STYLE = Automatic; 371 | DEVELOPMENT_TEAM = EXQD8EZF59; 372 | FRAMEWORK_SEARCH_PATHS = ( 373 | "$(inherited)", 374 | "$(PROJECT_DIR)/FaceDetection", 375 | ); 376 | INFOPLIST_FILE = FaceDetection/Info.plist; 377 | LD_RUNPATH_SEARCH_PATHS = ( 378 | "$(inherited)", 379 | "@executable_path/Frameworks", 380 | ); 381 | PRODUCT_BUNDLE_IDENTIFIER = org.dp.FaceDetection; 382 | PRODUCT_NAME = "$(TARGET_NAME)"; 383 | TARGETED_DEVICE_FAMILY = "1,2"; 384 | }; 385 | name = Release; 386 | }; 387 | /* End XCBuildConfiguration section */ 388 | 389 | /* Begin XCConfigurationList section */ 390 | 00E75F57223B3E9C000D9FE7 /* Build configuration list for PBXProject "FaceDetection" */ = { 391 | isa = XCConfigurationList; 392 | buildConfigurations = ( 393 | 00E75F70223B3E9D000D9FE7 /* Debug */, 394 | 00E75F71223B3E9D000D9FE7 /* Release */, 395 | ); 396 | defaultConfigurationIsVisible = 0; 397 | defaultConfigurationName = Release; 398 | }; 399 | 00E75F72223B3E9D000D9FE7 /* Build configuration list for PBXNativeTarget "FaceDetection" */ = { 400 | isa = XCConfigurationList; 401 | buildConfigurations = ( 402 | 00E75F73223B3E9D000D9FE7 /* Debug */, 403 | 00E75F74223B3E9D000D9FE7 /* Release */, 404 | ); 405 | defaultConfigurationIsVisible = 0; 406 | defaultConfigurationName = Release; 407 | }; 408 | /* End XCConfigurationList section */ 409 | }; 410 | rootObject = 00E75F54223B3E9C000D9FE7 /* Project object */; 411 | } 412 | -------------------------------------------------------------------------------- /mobile/iOS/FaceDetection.xcodeproj/project.xcworkspace/contents.xcworkspacedata: -------------------------------------------------------------------------------- 1 | 2 | 4 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /mobile/iOS/FaceDetection.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | IDEDidComputeMac32BitWarning 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /mobile/iOS/FaceDetection.xcodeproj/project.xcworkspace/xcuserdata/Robin.xcuserdatad/UserInterfaceState.xcuserstate: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/iOS/FaceDetection.xcodeproj/project.xcworkspace/xcuserdata/Robin.xcuserdatad/UserInterfaceState.xcuserstate -------------------------------------------------------------------------------- /mobile/iOS/FaceDetection.xcodeproj/xcuserdata/Robin.xcuserdatad/xcschemes/xcschememanagement.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | SchemeUserState 6 | 7 | FaceDetection.xcscheme_^#shared#^_ 8 | 9 | orderHint 10 | 0 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /mobile/iOS/FaceDetection/AppDelegate.h: -------------------------------------------------------------------------------- 1 | // 2 | // AppDelegate.h 3 | // FaceDetection 4 | // 5 | // Created by Robin on 2019/3/15. 6 | // Copyright © 2019 . All rights reserved. 7 | // 8 | 9 | #import 10 | 11 | @interface AppDelegate : UIResponder 12 | 13 | @property (strong, nonatomic) UIWindow *window; 14 | 15 | 16 | @end 17 | 18 | -------------------------------------------------------------------------------- /mobile/iOS/FaceDetection/AppDelegate.m: -------------------------------------------------------------------------------- 1 | // 2 | // AppDelegate.m 3 | // FaceDetection 4 | // 5 | // Created by Robin on 2019/3/15. 6 | // Copyright © 2019 . All rights reserved. 7 | // 8 | 9 | #import "AppDelegate.h" 10 | 11 | @interface AppDelegate () 12 | 13 | @end 14 | 15 | @implementation AppDelegate 16 | 17 | 18 | - (BOOL)application:(UIApplication *)application didFinishLaunchingWithOptions:(NSDictionary *)launchOptions { 19 | // Override point for customization after application launch. 20 | return YES; 21 | } 22 | 23 | 24 | - (void)applicationWillResignActive:(UIApplication *)application { 25 | // Sent when the application is about to move from active to inactive state. This can occur for certain types of temporary interruptions (such as an incoming phone call or SMS message) or when the user quits the application and it begins the transition to the background state. 26 | // Use this method to pause ongoing tasks, disable timers, and invalidate graphics rendering callbacks. Games should use this method to pause the game. 27 | } 28 | 29 | 30 | - (void)applicationDidEnterBackground:(UIApplication *)application { 31 | // Use this method to release shared resources, save user data, invalidate timers, and store enough application state information to restore your application to its current state in case it is terminated later. 32 | // If your application supports background execution, this method is called instead of applicationWillTerminate: when the user quits. 33 | } 34 | 35 | 36 | - (void)applicationWillEnterForeground:(UIApplication *)application { 37 | // Called as part of the transition from the background to the active state; here you can undo many of the changes made on entering the background. 38 | } 39 | 40 | 41 | - (void)applicationDidBecomeActive:(UIApplication *)application { 42 | // Restart any tasks that were paused (or not yet started) while the application was inactive. If the application was previously in the background, optionally refresh the user interface. 43 | } 44 | 45 | 46 | - (void)applicationWillTerminate:(UIApplication *)application { 47 | // Called when the application is about to terminate. Save data if appropriate. See also applicationDidEnterBackground:. 48 | } 49 | 50 | 51 | @end 52 | -------------------------------------------------------------------------------- /mobile/iOS/FaceDetection/Assets.xcassets/AppIcon.appiconset/Contents.json: -------------------------------------------------------------------------------- 1 | { 2 | "images" : [ 3 | { 4 | "idiom" : "iphone", 5 | "size" : "20x20", 6 | "scale" : "2x" 7 | }, 8 | { 9 | "idiom" : "iphone", 10 | "size" : "20x20", 11 | "scale" : "3x" 12 | }, 13 | { 14 | "idiom" : "iphone", 15 | "size" : "29x29", 16 | "scale" : "2x" 17 | }, 18 | { 19 | "idiom" : "iphone", 20 | "size" : "29x29", 21 | "scale" : "3x" 22 | }, 23 | { 24 | "idiom" : "iphone", 25 | "size" : "40x40", 26 | "scale" : "2x" 27 | }, 28 | { 29 | "idiom" : "iphone", 30 | "size" : "40x40", 31 | "scale" : "3x" 32 | }, 33 | { 34 | "idiom" : "iphone", 35 | "size" : "60x60", 36 | "scale" : "2x" 37 | }, 38 | { 39 | "idiom" : "iphone", 40 | "size" : "60x60", 41 | "scale" : "3x" 42 | }, 43 | { 44 | "idiom" : "ipad", 45 | "size" : "20x20", 46 | "scale" : "1x" 47 | }, 48 | { 49 | "idiom" : "ipad", 50 | "size" : "20x20", 51 | "scale" : "2x" 52 | }, 53 | { 54 | "idiom" : "ipad", 55 | "size" : "29x29", 56 | "scale" : "1x" 57 | }, 58 | { 59 | "idiom" : "ipad", 60 | "size" : "29x29", 61 | "scale" : "2x" 62 | }, 63 | { 64 | "idiom" : "ipad", 65 | "size" : "40x40", 66 | "scale" : "1x" 67 | }, 68 | { 69 | "idiom" : "ipad", 70 | "size" : "40x40", 71 | "scale" : "2x" 72 | }, 73 | { 74 | "idiom" : "ipad", 75 | "size" : "76x76", 76 | "scale" : "1x" 77 | }, 78 | { 79 | "idiom" : "ipad", 80 | "size" : "76x76", 81 | "scale" : "2x" 82 | }, 83 | { 84 | "idiom" : "ipad", 85 | "size" : "83.5x83.5", 86 | "scale" : "2x" 87 | }, 88 | { 89 | "idiom" : "ios-marketing", 90 | "size" : "1024x1024", 91 | "scale" : "1x" 92 | } 93 | ], 94 | "info" : { 95 | "version" : 1, 96 | "author" : "xcode" 97 | } 98 | } -------------------------------------------------------------------------------- /mobile/iOS/FaceDetection/Assets.xcassets/Contents.json: -------------------------------------------------------------------------------- 1 | { 2 | "info" : { 3 | "version" : 1, 4 | "author" : "xcode" 5 | } 6 | } -------------------------------------------------------------------------------- /mobile/iOS/FaceDetection/Base.lproj/LaunchScreen.storyboard: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | -------------------------------------------------------------------------------- /mobile/iOS/FaceDetection/Base.lproj/Main.storyboard: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /mobile/iOS/FaceDetection/Info.plist: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | CFBundleDevelopmentRegion 6 | $(DEVELOPMENT_LANGUAGE) 7 | CFBundleExecutable 8 | $(EXECUTABLE_NAME) 9 | CFBundleIdentifier 10 | $(PRODUCT_BUNDLE_IDENTIFIER) 11 | CFBundleInfoDictionaryVersion 12 | 6.0 13 | CFBundleName 14 | $(PRODUCT_NAME) 15 | CFBundlePackageType 16 | APPL 17 | CFBundleShortVersionString 18 | 1.0 19 | CFBundleVersion 20 | 1 21 | LSRequiresIPhoneOS 22 | 23 | UILaunchStoryboardName 24 | LaunchScreen 25 | UIMainStoryboardFile 26 | Main 27 | UIRequiredDeviceCapabilities 28 | 29 | armv7 30 | 31 | UISupportedInterfaceOrientations 32 | 33 | UIInterfaceOrientationPortrait 34 | UIInterfaceOrientationLandscapeLeft 35 | UIInterfaceOrientationLandscapeRight 36 | 37 | UISupportedInterfaceOrientations~ipad 38 | 39 | UIInterfaceOrientationPortrait 40 | UIInterfaceOrientationPortraitUpsideDown 41 | UIInterfaceOrientationLandscapeLeft 42 | UIInterfaceOrientationLandscapeRight 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /mobile/iOS/FaceDetection/ViewController.h: -------------------------------------------------------------------------------- 1 | // 2 | // ViewController.h 3 | // FaceDetection 4 | // 5 | // Created by Robin on 2019/3/15. 6 | // Copyright © 2019 . All rights reserved. 7 | // 8 | 9 | #import 10 | 11 | @interface ViewController : UIViewController 12 | 13 | 14 | @end 15 | 16 | -------------------------------------------------------------------------------- /mobile/iOS/FaceDetection/ViewController.mm: -------------------------------------------------------------------------------- 1 | // 2 | // ViewController.m 3 | // FaceDetection 4 | // 5 | // Created by Robin on 2019/3/15. 6 | // Copyright © 2019 . All rights reserved. 7 | // 8 | 9 | #import 10 | #import 11 | #import "ViewController.h" 12 | #import "facedetectcnn.h" 13 | 14 | //define the buffer size. Do not change the size! 15 | #define DETECT_BUFFER_SIZE 0x20000 16 | using namespace cv; 17 | 18 | @implementation ViewController 19 | 20 | - (UIImage *)loadImageAndDectect:(const char *)image_file{ 21 | Mat img = imread(image_file); 22 | if (img.empty()) { 23 | fprintf(stderr, "Can not load the image file %s.\n", image_file); 24 | return nil; 25 | } 26 | 27 | int *pResults = NULL; 28 | unsigned char * pBuffer = (unsigned char *)malloc(DETECT_BUFFER_SIZE); 29 | if (!pBuffer) { 30 | fprintf(stderr, "Can not alloc buffer.\n"); 31 | return nil; 32 | } 33 | pResults = facedetect_cnn(pBuffer, (unsigned char *)(img.ptr(0)), img.cols, img.rows, (int)img.step); 34 | printf("%d faces detected.\n", (pResults ? *pResults : 0)); 35 | Mat result_cnn = img.clone(); 36 | cvtColor(img, result_cnn, cv::COLOR_BGR2RGB); 37 | //print the detection results 38 | for(int i = 0; i < (pResults ? *pResults : 0); i++) 39 | { 40 | short * p = ((short*)(pResults+1))+142*i; 41 | int confidence = p[0]; 42 | int x = p[1]; 43 | int y = p[2]; 44 | int w = p[3]; 45 | int h = p[4]; 46 | 47 | printf("face%drect=[%d, %d, %d, %d], confidence=%d\n",i,x,y,w,h,confidence); 48 | rectangle(result_cnn, cv::Rect(x, y, w, h), Scalar(0, 255, 0), 2); 49 | string str = to_string(confidence); 50 | putText(result_cnn, str, cv::Point(x-2,y-2), cv::FONT_HERSHEY_SIMPLEX, 1.0,Scalar(0, 255, 0)); 51 | for (int j = 5; j < 14; j += 2) { 52 | int p_x = p[j]; 53 | int p_y = p[j+1]; 54 | printf("landmark%d=[%d, %d]\n",(j-5)/2,p_x,p_y); 55 | circle(result_cnn, cv::Point(p_x,p_y), 2,Scalar(255, 0, 0),-1,cv::LINE_AA); 56 | } 57 | } 58 | 59 | free(pBuffer); 60 | 61 | return MatToUIImage(result_cnn); 62 | } 63 | 64 | 65 | - (void)viewDidLoad { 66 | [super viewDidLoad]; 67 | // Do any additional setup after loading the view, typically from a nib. 68 | 69 | UIImageView *imageView = [[UIImageView alloc] initWithFrame:[UIScreen mainScreen].bounds]; 70 | imageView.contentMode = UIViewContentModeScaleAspectFit; 71 | [self.view addSubview:imageView]; 72 | 73 | NSString *path = [[NSBundle mainBundle] pathForResource:@"test" ofType:@".jpg"]; 74 | 75 | imageView.image = [self loadImageAndDectect:[path UTF8String]]; 76 | } 77 | 78 | 79 | @end 80 | -------------------------------------------------------------------------------- /mobile/iOS/FaceDetection/main.m: -------------------------------------------------------------------------------- 1 | // 2 | // main.m 3 | // FaceDetection 4 | // 5 | // Created by Robin on 2019/3/15. 6 | // Copyright © 2019 . All rights reserved. 7 | // 8 | 9 | #import 10 | #import "AppDelegate.h" 11 | 12 | int main(int argc, char * argv[]) { 13 | @autoreleasepool { 14 | return UIApplicationMain(argc, argv, nil, NSStringFromClass([AppDelegate class])); 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /mobile/iOS/FaceDetection/test.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/iOS/FaceDetection/test.jpg -------------------------------------------------------------------------------- /mobile/iOS/screenshot1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/iOS/screenshot1.png -------------------------------------------------------------------------------- /mobile/iOS/screenshot2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/mobile/iOS/screenshot2.png -------------------------------------------------------------------------------- /opencv_dnn/README.md: -------------------------------------------------------------------------------- 1 | # Deploy libfacedetection with OpenCV 2 | 3 | Example to deploy libfacedetection with the OpenCV's FaceDetectorYN in both Python and C++. 4 | 5 | Please note that OpenCV DNN does not support the latest version of YuNet with dynamic input shape. Please ensure you have the exact same input shape as the one in the ONNX model to run latest YuNet with OpenCV DNN. 6 | 7 | ***Important Notes***: 8 | - Install OpenCV >= 4.5.4 to have the API `FaceDetectorYN`. 9 | - Download the ONNX model from [OpenCV Zoo](https://github.com/opencv/opencv_zoo/tree/master/models/face_detection_yunet). 10 | 11 | Envrionment tested: 12 | - System: Ubuntu 18.04 LTS / 20.04 LTS 13 | - OpenCV >= 4.5.4 14 | - Python >= 3.6 15 | 16 | ## Python 17 | 1. Install `numpy` and `opencv-python`. 18 | ```shell 19 | pip install numpy 20 | pip install "opencv-python>=4.5.4.58" 21 | ``` 22 | 2. Run demo. For more options, run `python python/detect.py --help`. 23 | ```shell 24 | # detect on an image 25 | python python/detect.py --model=/path/to/yunet.onnx --input=/path/to/example/image 26 | # detect on default camera 27 | python python/detect.py --model=/path/to/yunet.onnx 28 | ``` 29 | 30 | ## C++ 31 | 1. Build the example with `cmake`: 32 | ```shell 33 | cd cpp 34 | mkdir build && cd build 35 | cmake .. # NOTE: if cmake failed finding OpenCV, add the option `-DCMAKE_PREFIX_PATH=/path/to/opencv/install` 36 | make 37 | ``` 38 | 2. Run the example: 39 | ```shell 40 | # detect on an image 41 | ./detect -m/path/to/yunet.onnx -i=/path/to/image 42 | # detect on default camera 43 | ./detect -m/path/to/yunet.onnx 44 | ``` 45 | -------------------------------------------------------------------------------- /opencv_dnn/cpp/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.12) 2 | 3 | project(libfacedetection_opencvdnn) 4 | 5 | # OpenCV 6 | find_package(OpenCV 4.5.4 REQUIRED) 7 | include_directories(${OpenCV_INCLUDE_DIRS}) 8 | 9 | add_executable(detect detect.cpp) 10 | target_link_libraries(detect ${OpenCV_LIBS}) -------------------------------------------------------------------------------- /opencv_dnn/cpp/detect.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include 7 | 8 | // using namespace cv; 9 | using namespace std; 10 | 11 | static cv::Mat visualize(cv::Mat input, cv::Mat faces, bool print_flag=false, double fps=-1, int thickness=2) 12 | { 13 | cv::Mat output = input.clone(); 14 | 15 | if (fps > 0) { 16 | cv::putText(output, cv::format("FPS: %.2f", fps), cv::Point2i(0, 15), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 255, 0)); 17 | } 18 | 19 | for (int i = 0; i < faces.rows; i++) 20 | { 21 | if (print_flag) { 22 | cout << "Face " << i 23 | << ", top-left coordinates: (" << faces.at(i, 0) << ", " << faces.at(i, 1) << "), " 24 | << "box width: " << faces.at(i, 2) << ", box height: " << faces.at(i, 3) << ", " 25 | << "score: " << faces.at(i, 14) << "\n"; 26 | } 27 | 28 | // Draw bounding box 29 | cv::rectangle(output, cv::Rect2i(int(faces.at(i, 0)), int(faces.at(i, 1)), int(faces.at(i, 2)), int(faces.at(i, 3))), cv::Scalar(0, 255, 0), thickness); 30 | // Draw landmarks 31 | cv::circle(output, cv::Point2i(int(faces.at(i, 4)), int(faces.at(i, 5))), 2, cv::Scalar(255, 0, 0), thickness); 32 | cv::circle(output, cv::Point2i(int(faces.at(i, 6)), int(faces.at(i, 7))), 2, cv::Scalar( 0, 0, 255), thickness); 33 | cv::circle(output, cv::Point2i(int(faces.at(i, 8)), int(faces.at(i, 9))), 2, cv::Scalar( 0, 255, 0), thickness); 34 | cv::circle(output, cv::Point2i(int(faces.at(i, 10)), int(faces.at(i, 11))), 2, cv::Scalar(255, 0, 255), thickness); 35 | cv::circle(output, cv::Point2i(int(faces.at(i, 12)), int(faces.at(i, 13))), 2, cv::Scalar( 0, 255, 255), thickness); 36 | // Put score 37 | cv::putText(output, cv::format("%.4f", faces.at(i, 14)), cv::Point2i(int(faces.at(i, 0)), int(faces.at(i, 1))+15), cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 255, 0)); 38 | } 39 | return output; 40 | } 41 | 42 | int main(int argc, char ** argv) 43 | { 44 | cv::CommandLineParser parser(argc, argv, 45 | "{help h | | Print this message.}" 46 | "{input i | | Path to the input image. Omit for detecting on default camera.}" 47 | "{backend_id | 0 | Backend to run on. 0: default, 1: Halide, 2: Intel's Inference Engine, 3: OpenCV, 4: VKCOM, 5: CUDA}" 48 | "{target_id | 0 | Target to run on. 0: CPU, 1: OpenCL, 2: OpenCL FP16, 3: Myriad, 4: Vulkan, 5: FPGA, 6: CUDA, 7: CUDA FP16, 8: HDDL}" 49 | "{model m | yunet.onnx | Path to the model. Download yunet.onnx in https://github.com/ShiqiYu/libfacedetection.train/tree/master/tasks/task1/onnx.}" 50 | "{score_threshold | 0.9 | Filter out faces of score < score_threshold.}" 51 | "{nms_threshold | 0.3 | Suppress bounding boxes of iou >= nms_threshold.}" 52 | "{top_k | 5000 | Keep top_k bounding boxes before NMS.}" 53 | "{save s | false | Set true to save results. This flag is invalid when using camera.}" 54 | "{vis v | true | Set true to open a window for result visualization. This flag is invalid when using camera.}" 55 | ); 56 | if (argc == 1 || parser.has("help")) 57 | { 58 | parser.printMessage(); 59 | return -1; 60 | } 61 | 62 | cv::String modelPath = parser.get("model"); 63 | int backendId = parser.get("backend_id"); 64 | int targetId = parser.get("target_id"); 65 | 66 | float scoreThreshold = parser.get("score_threshold"); 67 | float nmsThreshold = parser.get("nms_threshold"); 68 | int topK = parser.get("top_k"); 69 | 70 | bool save = parser.get("save"); 71 | bool vis = parser.get("vis"); 72 | 73 | // Initialize FaceDetectorYN 74 | cv::Ptr detector = cv::FaceDetectorYN::create(modelPath, "", cv::Size(320, 320), scoreThreshold, nmsThreshold, topK, backendId, targetId); 75 | 76 | // If input is an image 77 | if (parser.has("input")) 78 | { 79 | cv::String input = parser.get("input"); 80 | cv::Mat image = cv::imread(input); 81 | 82 | detector->setInputSize(image.size()); 83 | cv::Mat faces; 84 | detector->detect(image, faces); 85 | 86 | cv::Mat vis_image = visualize(image, faces, true); 87 | if(save) 88 | { 89 | cout << "result.jpg saved.\n"; 90 | cv::imwrite("result.jpg", vis_image); 91 | } 92 | if (vis) 93 | { 94 | cv::namedWindow(input, cv::WINDOW_AUTOSIZE); 95 | cv::imshow(input, vis_image); 96 | cv::waitKey(0); 97 | } 98 | } 99 | else 100 | { 101 | int deviceId = 0; 102 | cv::VideoCapture cap; 103 | cap.open(deviceId, cv::CAP_ANY); 104 | int frameWidth = int(cap.get(cv::CAP_PROP_FRAME_WIDTH)); 105 | int frameHeight = int(cap.get(cv::CAP_PROP_FRAME_HEIGHT)); 106 | detector->setInputSize(cv::Size(frameWidth, frameHeight)); 107 | 108 | cv::Mat frame; 109 | cv::TickMeter tm; 110 | while(cv::waitKey(1) < 0) // Press any key to exit 111 | { 112 | if (!cap.read(frame)) 113 | { 114 | cerr << "No frames grabbed!\n"; 115 | break; 116 | } 117 | 118 | cv::Mat faces; 119 | tm.start(); 120 | detector->detect(frame, faces); 121 | tm.stop(); 122 | 123 | cv::Mat vis_frame = visualize(frame, faces, false, tm.getFPS()); 124 | 125 | imshow("libfacedetection demo", vis_frame); 126 | 127 | tm.reset(); 128 | } 129 | } 130 | } -------------------------------------------------------------------------------- /opencv_dnn/python/detect.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | import numpy as np 4 | import cv2 as cv 5 | 6 | def str2bool(v: str) -> bool: 7 | if v.lower() in ['true', 'yes', 'on', 'y', 't']: 8 | return True 9 | elif v.lower() in ['false', 'no', 'off', 'n', 'f']: 10 | return False 11 | else: 12 | raise NotImplementedError 13 | 14 | def visualize(image, faces, print_flag=False, fps=None): 15 | output = image.copy() 16 | 17 | if fps: 18 | cv.putText(output, 'FPS: {:.2f}'.format(fps), (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) 19 | 20 | for idx, face in enumerate(faces): 21 | if print_flag: 22 | print('Face {}, top-left coordinates: ({:.0f}, {:.0f}), box width: {:.0f}, box height {:.0f}, score: {:.2f}'.format(idx, face[0], face[1], face[2], face[3], face[-1])) 23 | 24 | coords = face[:-1].astype(np.int32) 25 | # Draw face bounding box 26 | cv.rectangle(output, (coords[0], coords[1]), (coords[0]+coords[2], coords[1]+coords[3]), (0, 255, 0), 2) 27 | # Draw landmarks 28 | cv.circle(output, (coords[4], coords[5]), 2, (255, 0, 0), 2) 29 | cv.circle(output, (coords[6], coords[7]), 2, (0, 0, 255), 2) 30 | cv.circle(output, (coords[8], coords[9]), 2, (0, 255, 0), 2) 31 | cv.circle(output, (coords[10], coords[11]), 2, (255, 0, 255), 2) 32 | cv.circle(output, (coords[12], coords[13]), 2, (0, 255, 255), 2) 33 | # Put score 34 | cv.putText(output, '{:.4f}'.format(face[-1]), (coords[0], coords[1]+15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) 35 | 36 | return output 37 | 38 | def main(): 39 | backends = (cv.dnn.DNN_BACKEND_DEFAULT, 40 | cv.dnn.DNN_BACKEND_HALIDE, 41 | cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, 42 | cv.dnn.DNN_BACKEND_OPENCV) 43 | targets = (cv.dnn.DNN_TARGET_CPU, 44 | cv.dnn.DNN_TARGET_OPENCL, 45 | cv.dnn.DNN_TARGET_OPENCL_FP16, 46 | cv.dnn.DNN_TARGET_MYRIAD) 47 | 48 | parser = argparse.ArgumentParser(description='A demo for running libfacedetection using OpenCV\'s DNN module.') 49 | parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int, 50 | help='Choose one of computation backends: ' 51 | '%d: automatically (by default), ' 52 | '%d: Halide language (http://halide-lang.org/), ' 53 | '%d: Intel\'s Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), ' 54 | '%d: OpenCV implementation' % backends) 55 | parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int, 56 | help='Choose one of target computation devices: ' 57 | '%d: CPU target (by default), ' 58 | '%d: OpenCL, ' 59 | '%d: OpenCL fp16 (half-float precision), ' 60 | '%d: VPU' % targets) 61 | # Location 62 | parser.add_argument('--input', '-i', help='Path to the image. Omit to call default camera') 63 | parser.add_argument('--model', '-m', type=str, help='Path to .onnx model file.') 64 | # Inference parameters 65 | parser.add_argument('--score_threshold', default=0.6, type=float, help='Threshold for filtering out faces with conf < conf_thresh.') 66 | parser.add_argument('--nms_threshold', default=0.3, type=float, help='Threshold for non-max suppression.') 67 | parser.add_argument('--top_k', default=5000, type=int, help='Keep keep_top_k for results outputing.') 68 | # Result 69 | parser.add_argument('--vis', default=True, type=str2bool, help='Set True to visualize the result image. Invalid when using camera.') 70 | parser.add_argument('--save', default=False, type=str2bool, help='Set True to save as result.jpg. Invalid when using camera.') 71 | args = parser.parse_args() 72 | 73 | # Instantiate yunet 74 | yunet = cv.FaceDetectorYN.create( 75 | model=args.model, 76 | config='', 77 | input_size=(320, 320), 78 | score_threshold=args.score_threshold, 79 | nms_threshold=args.nms_threshold, 80 | top_k=5000, 81 | backend_id=args.backend, 82 | target_id=args.target 83 | ) 84 | 85 | if args.input is not None: 86 | image = cv.imread(args.input) 87 | 88 | yunet.setInputSize((image.shape[1], image.shape[0])) 89 | _, faces = yunet.detect(image) # faces: None, or nx15 np.array 90 | 91 | vis_image = visualize(image, faces) 92 | if args.save: 93 | print('result.jpg saved.') 94 | cv.imwrite('result.jpg', vis_image) 95 | if args.vis: 96 | cv.namedWindow(args.input, cv.WINDOW_AUTOSIZE) 97 | cv.imshow(args.input, vis_image) 98 | cv.waitKey(0) 99 | else: 100 | device_id = 0 101 | cap = cv.VideoCapture(device_id) 102 | frame_w = int(cap.get(cv.CAP_PROP_FRAME_WIDTH)) 103 | frame_h = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) 104 | yunet.setInputSize([frame_w, frame_h]) 105 | 106 | tm = cv.TickMeter() 107 | while cv.waitKey(1) < 0: 108 | has_frame, frame = cap.read() 109 | if not has_frame: 110 | print('No frames grabbed!') 111 | 112 | tm.start() 113 | _, faces = yunet.detect(frame) # # faces: None, or nx15 np.array 114 | tm.stop() 115 | 116 | frame = visualize(frame, faces, fps=tm.getFPS()) 117 | cv.imshow('libfacedetection demo', frame) 118 | 119 | tm.reset() 120 | 121 | if __name__ == '__main__': 122 | main() -------------------------------------------------------------------------------- /src/facedetectcnn-model.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | By downloading, copying, installing or using the software you agree to this license. 3 | If you do not agree to this license, do not download, install, 4 | copy or use the software. 5 | 6 | 7 | License Agreement For libfacedetection 8 | (3-clause BSD License) 9 | 10 | Copyright (c) 2018-2021, Shiqi Yu, all rights reserved. 11 | shiqi.yu@gmail.com 12 | 13 | Redistribution and use in source and binary forms, with or without modification, 14 | are permitted provided that the following conditions are met: 15 | 16 | * Redistributions of source code must retain the above copyright notice, 17 | this list of conditions and the following disclaimer. 18 | 19 | * Redistributions in binary form must reproduce the above copyright notice, 20 | this list of conditions and the following disclaimer in the documentation 21 | and/or other materials provided with the distribution. 22 | 23 | * Neither the names of the copyright holders nor the names of the contributors 24 | may be used to endorse or promote products derived from this software 25 | without specific prior written permission. 26 | 27 | This software is provided by the copyright holders and contributors "as is" and 28 | any express or implied warranties, including, but not limited to, the implied 29 | warranties of merchantability and fitness for a particular purpose are disclaimed. 30 | In no event shall copyright holders or contributors be liable for any direct, 31 | indirect, incidental, special, exemplary, or consequential damages 32 | (including, but not limited to, procurement of substitute goods or services; 33 | loss of use, data, or profits; or business interruption) however caused 34 | and on any theory of liability, whether in contract, strict liability, 35 | or tort (including negligence or otherwise) arising in any way out of 36 | the use of this software, even if advised of the possibility of such damage. 37 | */ 38 | 39 | 40 | #include "facedetectcnn.h" 41 | 42 | 43 | #if 0 44 | #include 45 | cv::TickMeter cvtm; 46 | #define TIME_START cvtm.reset();cvtm.start(); 47 | #define TIME_END(FUNCNAME) cvtm.stop(); printf(FUNCNAME);printf("=%g\n", cvtm.getTimeMilli()); 48 | #else 49 | #define TIME_START 50 | #define TIME_END(FUNCNAME) 51 | #endif 52 | 53 | 54 | #define NUM_CONV_LAYER 53 55 | 56 | extern ConvInfoStruct param_pConvInfo[NUM_CONV_LAYER]; 57 | Filters g_pFilters[NUM_CONV_LAYER]; 58 | 59 | bool param_initialized = false; 60 | 61 | void init_parameters() 62 | { 63 | for(int i = 0; i < NUM_CONV_LAYER; i++) 64 | g_pFilters[i] = param_pConvInfo[i]; 65 | } 66 | 67 | std::vector objectdetect_cnn(unsigned char * rgbImageData, int width, int height, int step) 68 | { 69 | 70 | TIME_START; 71 | if (!param_initialized) 72 | { 73 | init_parameters(); 74 | param_initialized = true; 75 | } 76 | TIME_END("init"); 77 | 78 | 79 | TIME_START; 80 | auto fx = setDataFrom3x3S2P1to1x1S1P0FromImage(rgbImageData, width, height, 3, step); 81 | TIME_END("convert data"); 82 | 83 | /***************CONV0*********************/ 84 | TIME_START; 85 | fx = convolution(fx, g_pFilters[0]); 86 | TIME_END("conv_head"); 87 | 88 | TIME_START; 89 | fx = convolutionDP(fx, g_pFilters[1], g_pFilters[2]); 90 | TIME_END("conv0"); 91 | 92 | TIME_START; 93 | fx = maxpooling2x2S2(fx); 94 | TIME_END("pool0"); 95 | 96 | /***************CONV1*********************/ 97 | TIME_START; 98 | fx = convolution4layerUnit(fx, g_pFilters[3], g_pFilters[4], g_pFilters[5], g_pFilters[6]); 99 | TIME_END("conv1"); 100 | 101 | /***************CONV2*********************/ 102 | TIME_START; 103 | fx = convolution4layerUnit(fx, g_pFilters[7], g_pFilters[8], g_pFilters[9], g_pFilters[10]); 104 | TIME_END("conv2"); 105 | 106 | /***************CONV3*********************/ 107 | TIME_START; 108 | fx = maxpooling2x2S2(fx); 109 | TIME_END("pool3"); 110 | 111 | TIME_START; 112 | auto fb1 = convolution4layerUnit(fx, g_pFilters[11], g_pFilters[12], g_pFilters[13], g_pFilters[14]); 113 | TIME_END("conv3"); 114 | 115 | /***************CONV4*********************/ 116 | TIME_START; 117 | fx = maxpooling2x2S2(fb1); 118 | TIME_END("pool4"); 119 | 120 | TIME_START; 121 | auto fb2 = convolution4layerUnit(fx, g_pFilters[15], g_pFilters[16], g_pFilters[17], g_pFilters[18]); 122 | TIME_END("conv4"); 123 | 124 | /***************CONV5*********************/ 125 | TIME_START; 126 | fx = maxpooling2x2S2(fb2); 127 | TIME_END("pool5"); 128 | 129 | TIME_START; 130 | auto fb3 = convolution4layerUnit(fx, g_pFilters[19], g_pFilters[20], g_pFilters[21], g_pFilters[22]); 131 | TIME_END("conv5"); 132 | 133 | CDataBlob pred_reg[3], pred_cls[3], pred_kps[3], pred_obj[3]; 134 | /***************branch5*********************/ 135 | TIME_START; 136 | fb3 = convolutionDP(fb3, g_pFilters[27], g_pFilters[28]); 137 | pred_cls[2] = convolutionDP(fb3, g_pFilters[33], g_pFilters[34], false); 138 | pred_reg[2] = convolutionDP(fb3, g_pFilters[39], g_pFilters[40], false); 139 | pred_kps[2] = convolutionDP(fb3, g_pFilters[51], g_pFilters[52], false); 140 | pred_obj[2] = convolutionDP(fb3, g_pFilters[45], g_pFilters[46], false); 141 | TIME_END("branch5"); 142 | 143 | /*****************add5*********************/ 144 | TIME_START; 145 | fb2 = elementAdd(upsampleX2(fb3), fb2); 146 | TIME_END("add5"); 147 | 148 | /*****************add6*********************/ 149 | TIME_START; 150 | fb2 = convolutionDP(fb2, g_pFilters[25], g_pFilters[26]); 151 | pred_cls[1] = convolutionDP(fb2, g_pFilters[31], g_pFilters[32], false); 152 | pred_reg[1] = convolutionDP(fb2, g_pFilters[37], g_pFilters[38], false); 153 | pred_kps[1] = convolutionDP(fb2, g_pFilters[49], g_pFilters[50], false); 154 | pred_obj[1] = convolutionDP(fb2, g_pFilters[43], g_pFilters[44], false); 155 | TIME_END("branch4"); 156 | 157 | /*****************add4*********************/ 158 | TIME_START; 159 | fb1 = elementAdd(upsampleX2(fb2), fb1); 160 | TIME_END("add4"); 161 | 162 | /***************branch3*********************/ 163 | TIME_START; 164 | fb1 = convolutionDP(fb1, g_pFilters[23], g_pFilters[24]); 165 | pred_cls[0] = convolutionDP(fb1, g_pFilters[29], g_pFilters[30], false); 166 | pred_reg[0] = convolutionDP(fb1, g_pFilters[35], g_pFilters[36], false); 167 | pred_kps[0] = convolutionDP(fb1, g_pFilters[47], g_pFilters[48], false); 168 | pred_obj[0] = convolutionDP(fb1, g_pFilters[41], g_pFilters[42], false); 169 | TIME_END("branch3"); 170 | 171 | /***************PRIORBOX*********************/ 172 | TIME_START; 173 | auto prior3 = meshgrid(fb1.cols, fb1.rows, 8); 174 | auto prior4 = meshgrid(fb2.cols, fb2.rows, 16); 175 | auto prior5 = meshgrid(fb3.cols, fb3.rows, 32); 176 | TIME_END("prior"); 177 | /***************PRIORBOX*********************/ 178 | 179 | TIME_START; 180 | bbox_decode(pred_reg[0], prior3, 8); 181 | bbox_decode(pred_reg[1], prior4, 16); 182 | bbox_decode(pred_reg[2], prior5, 32); 183 | 184 | kps_decode(pred_kps[0], prior3, 8); 185 | kps_decode(pred_kps[1], prior4, 16); 186 | kps_decode(pred_kps[2], prior5, 32); 187 | 188 | auto cls = concat3(blob2vector(pred_cls[0]), blob2vector(pred_cls[1]), blob2vector(pred_cls[2])); 189 | auto reg = concat3(blob2vector(pred_reg[0]), blob2vector(pred_reg[1]), blob2vector(pred_reg[2])); 190 | auto kps = concat3(blob2vector(pred_kps[0]), blob2vector(pred_kps[1]), blob2vector(pred_kps[2])); 191 | auto obj = concat3(blob2vector(pred_obj[0]), blob2vector(pred_obj[1]), blob2vector(pred_obj[2])); 192 | 193 | sigmoid(cls); 194 | sigmoid(obj); 195 | TIME_END("decode") 196 | 197 | TIME_START; 198 | std::vector facesInfo = detection_output(cls, reg, kps, obj, 0.45f, 0.2f, 1000, 512); 199 | TIME_END("detection output") 200 | return facesInfo; 201 | } 202 | 203 | int* facedetect_cnn(unsigned char * result_buffer, //buffer memory for storing face detection results, !!its size must be 0x9000 Bytes!! 204 | unsigned char * rgb_image_data, int width, int height, int step) //input image, it must be BGR (three-channel) image! 205 | { 206 | 207 | if (!result_buffer) 208 | { 209 | fprintf(stderr, "%s: null buffer memory.\n", __FUNCTION__); 210 | return NULL; 211 | } 212 | //clear memory 213 | result_buffer[0] = 0; 214 | result_buffer[1] = 0; 215 | result_buffer[2] = 0; 216 | result_buffer[3] = 0; 217 | 218 | std::vector faces = objectdetect_cnn(rgb_image_data, width, height, step); 219 | 220 | int num_faces =(int)faces.size(); 221 | num_faces = MIN(num_faces, 1024); //1024 = 0x9000 / (16 * 2 + 4) 222 | 223 | int * pCount = (int *)result_buffer; 224 | pCount[0] = num_faces; 225 | 226 | for (int i = 0; i < num_faces; i++) 227 | { 228 | //copy data 229 | short * p = ((short*)(result_buffer + 4)) + 16 * size_t(i); 230 | p[0] = (short)(faces[i].score * 100); 231 | p[1] = (short)faces[i].x; 232 | p[2] = (short)faces[i].y; 233 | p[3] = (short)faces[i].w; 234 | p[4] = (short)faces[i].h; 235 | //copy landmarks 236 | for (int lmidx = 0; lmidx < 10; lmidx++) 237 | { 238 | p[5 + lmidx] = (short)faces[i].lm[lmidx]; 239 | } 240 | } 241 | 242 | return pCount; 243 | } 244 | -------------------------------------------------------------------------------- /src/facedetectcnn.h: -------------------------------------------------------------------------------- 1 | /* 2 | By downloading, copying, installing or using the software you agree to this license. 3 | If you do not agree to this license, do not download, install, 4 | copy or use the software. 5 | 6 | 7 | License Agreement For libfacedetection 8 | (3-clause BSD License) 9 | 10 | Copyright (c) 2018-2021, Shiqi Yu, all rights reserved. 11 | shiqi.yu@gmail.com 12 | 13 | Redistribution and use in source and binary forms, with or without modification, 14 | are permitted provided that the following conditions are met: 15 | 16 | * Redistributions of source code must retain the above copyright notice, 17 | this list of conditions and the following disclaimer. 18 | 19 | * Redistributions in binary form must reproduce the above copyright notice, 20 | this list of conditions and the following disclaimer in the documentation 21 | and/or other materials provided with the distribution. 22 | 23 | * Neither the names of the copyright holders nor the names of the contributors 24 | may be used to endorse or promote products derived from this software 25 | without specific prior written permission. 26 | 27 | This software is provided by the copyright holders and contributors "as is" and 28 | any express or implied warranties, including, but not limited to, the implied 29 | warranties of merchantability and fitness for a particular purpose are disclaimed. 30 | In no event shall copyright holders or contributors be liable for any direct, 31 | indirect, incidental, special, exemplary, or consequential damages 32 | (including, but not limited to, procurement of substitute goods or services; 33 | loss of use, data, or profits; or business interruption) however caused 34 | and on any theory of liability, whether in contract, strict liability, 35 | or tort (including negligence or otherwise) arising in any way out of 36 | the use of this software, even if advised of the possibility of such damage. 37 | */ 38 | 39 | #pragma once 40 | 41 | #include "facedetection_export.h" 42 | 43 | //#define _ENABLE_AVX512 //Please enable it if X64 CPU 44 | //#define _ENABLE_AVX2 //Please enable it if X64 CPU 45 | //#define _ENABLE_NEON //Please enable it if ARM CPU 46 | 47 | 48 | FACEDETECTION_EXPORT int * facedetect_cnn(unsigned char * result_buffer, //buffer memory for storing face detection results, !!its size must be 0x20000 Bytes!! 49 | unsigned char * rgb_image_data, int width, int height, int step); //input image, it must be BGR (three channels) insteed of RGB image! 50 | 51 | /* 52 | DO NOT EDIT the following code if you don't really understand it. 53 | */ 54 | #if defined(_ENABLE_AVX512) || defined(_ENABLE_AVX2) 55 | #include 56 | #endif 57 | 58 | 59 | #if defined(_ENABLE_NEON) 60 | #include "arm_neon.h" 61 | //NEON does not support UINT8*INT8 dot product 62 | //to conver the input data to range [0, 127], 63 | //and then use INT8*INT8 dot product 64 | #define _MAX_UINT8_VALUE 127 65 | #else 66 | #define _MAX_UINT8_VALUE 255 67 | #endif 68 | 69 | #if defined(_ENABLE_AVX512) 70 | #define _MALLOC_ALIGN 512 71 | #elif defined(_ENABLE_AVX2) 72 | #define _MALLOC_ALIGN 256 73 | #else 74 | #define _MALLOC_ALIGN 128 75 | #endif 76 | 77 | #if defined(_ENABLE_AVX512)&& defined(_ENABLE_NEON) 78 | #error Cannot enable the two of AVX512 and NEON at the same time. 79 | #endif 80 | #if defined(_ENABLE_AVX2)&& defined(_ENABLE_NEON) 81 | #error Cannot enable the two of AVX and NEON at the same time. 82 | #endif 83 | #if defined(_ENABLE_AVX512)&& defined(_ENABLE_AVX2) 84 | #error Cannot enable the two of AVX512 and AVX2 at the same time. 85 | #endif 86 | 87 | 88 | #if defined(_OPENMP) 89 | #include 90 | #endif 91 | 92 | #include 93 | #include 94 | #include 95 | #include 96 | 97 | void* myAlloc(size_t size); 98 | void myFree_(void* ptr); 99 | #define myFree(ptr) (myFree_(*(ptr)), *(ptr)=0); 100 | 101 | #ifndef MIN 102 | # define MIN(a,b) ((a) > (b) ? (b) : (a)) 103 | #endif 104 | 105 | #ifndef MAX 106 | # define MAX(a,b) ((a) < (b) ? (b) : (a)) 107 | #endif 108 | 109 | typedef struct FaceRect_ 110 | { 111 | float score; 112 | int x; 113 | int y; 114 | int w; 115 | int h; 116 | int lm[10]; 117 | }FaceRect; 118 | 119 | typedef struct ConvInfoStruct_ { 120 | int channels; 121 | int num_filters; 122 | bool is_depthwise; 123 | bool is_pointwise; 124 | bool with_relu; 125 | float* pWeights; 126 | float* pBiases; 127 | }ConvInfoStruct; 128 | 129 | 130 | 131 | template 132 | class CDataBlob 133 | { 134 | public: 135 | int rows; 136 | int cols; 137 | int channels; //in element 138 | int channelStep; //in byte 139 | T * data; 140 | 141 | public: 142 | CDataBlob() { 143 | rows = 0; 144 | cols = 0; 145 | channels = 0; 146 | channelStep = 0; 147 | data = nullptr; 148 | } 149 | CDataBlob(int r, int c, int ch) 150 | { 151 | data = nullptr; 152 | create(r, c, ch); 153 | //#warning "confirm later" 154 | setZero(); 155 | } 156 | ~CDataBlob() 157 | { 158 | setNULL(); 159 | } 160 | 161 | CDataBlob(CDataBlob &&other) { 162 | data = other.data; 163 | other.data = nullptr; 164 | rows = other.rows; 165 | cols = other.cols; 166 | channels = other.channels; 167 | channelStep = other.channelStep; 168 | } 169 | 170 | CDataBlob &operator=(CDataBlob &&other) { 171 | this->~CDataBlob(); 172 | new (this) CDataBlob(std::move(other)); 173 | return *this; 174 | } 175 | 176 | void setNULL() 177 | { 178 | if (data) 179 | myFree(&data); 180 | rows = cols = channels = channelStep = 0; 181 | data = nullptr; 182 | } 183 | 184 | void setZero() 185 | { 186 | if(data) 187 | memset(data, 0, channelStep * rows * cols); 188 | } 189 | 190 | inline bool isEmpty() const 191 | { 192 | return (rows <= 0 || cols <= 0 || channels == 0 || data == nullptr); 193 | } 194 | 195 | bool create(int r, int c, int ch) 196 | { 197 | setNULL(); 198 | 199 | rows = r; 200 | cols = c; 201 | channels = ch; 202 | 203 | //alloc space for int8 array 204 | int remBytes = (sizeof(T)* channels) % (_MALLOC_ALIGN / 8); 205 | if (remBytes == 0) 206 | this->channelStep = channels * sizeof(T); 207 | else 208 | this->channelStep = (channels * sizeof(T)) + (_MALLOC_ALIGN / 8) - remBytes; 209 | data = (T*)myAlloc(size_t(rows) * cols * this->channelStep); 210 | 211 | if (data == nullptr) 212 | { 213 | std::cerr << "Failed to alloc memeory for uint8 data blob: " 214 | << rows << "*" 215 | << cols << "*" 216 | << channels << std::endl; 217 | return false; 218 | } 219 | 220 | //memset(data, 0, width * height * channelStep); 221 | 222 | //the following code is faster than memset 223 | //but not only the padding bytes are set to zero. 224 | //BE CAREFUL!!! 225 | //#if defined(_OPENMP) 226 | //#pragma omp parallel for 227 | //#endif 228 | // for (int r = 0; r < this->rows; r++) 229 | // { 230 | // for (int c = 0; c < this->cols; c++) 231 | // { 232 | // int pixel_end = this->channelStep / sizeof(T); 233 | // T * pI = this->ptr(r, c); 234 | // for (int ch = this->channels; ch < pixel_end; ch++) 235 | // pI[ch] = 0; 236 | // } 237 | // } 238 | 239 | return true; 240 | } 241 | 242 | inline T * ptr(int r, int c) 243 | { 244 | if( r < 0 || r >= this->rows || c < 0 || c >= this->cols ) 245 | return nullptr; 246 | 247 | return (this->data + (size_t(r) * this->cols + c) * this->channelStep /sizeof(T)); 248 | } 249 | inline const T * ptr(int r, int c) const 250 | { 251 | if( r < 0 || r >= this->rows || c < 0 || c >= this->cols ) 252 | return nullptr; 253 | 254 | return (this->data + (size_t(r) * this->cols + c) * this->channelStep /sizeof(T)); 255 | } 256 | 257 | inline const T getElement(int r, int c, int ch) const 258 | { 259 | if (this->data) 260 | { 261 | if (r >= 0 && r < this->rows && 262 | c >= 0 && c < this->cols && 263 | ch >= 0 && ch < this->channels) 264 | { 265 | const T * p = this->ptr(r, c); 266 | return (p[ch]); 267 | } 268 | } 269 | 270 | return (T)(0); 271 | } 272 | 273 | friend std::ostream &operator<<(std::ostream &output, CDataBlob &dataBlob) 274 | { 275 | output << "DataBlob Size (channels, rows, cols) = (" 276 | << dataBlob.channels 277 | << ", " << dataBlob.rows 278 | << ", " << dataBlob.cols 279 | << ")" << std::endl; 280 | if( dataBlob.rows * dataBlob.cols * dataBlob.channels <= 16) 281 | { //print the elements only when the total number is less than 64 282 | for (int ch = 0; ch < dataBlob.channels; ch++) 283 | { 284 | output << "Channel " << ch << ": " << std::endl; 285 | 286 | for (int r = 0; r < dataBlob.rows; r++) 287 | { 288 | output << "("; 289 | for (int c = 0; c < dataBlob.cols; c++) 290 | { 291 | T * p = dataBlob.ptr(r, c); 292 | 293 | if(sizeof(T)<4) 294 | output << (int)(p[ch]); 295 | else 296 | output << p[ch]; 297 | 298 | if (c != dataBlob.cols - 1) 299 | output << ", "; 300 | } 301 | output << ")" << std::endl; 302 | } 303 | } 304 | } 305 | else{ 306 | output << "(" ; 307 | int idx = 0; 308 | bool outloop = false; 309 | for(int r = 0; r < dataBlob.rows && !outloop; ++r) { 310 | for(int c = 0; c < dataBlob.cols && !outloop; ++c) { 311 | for(int ch = 0; ch < dataBlob.channels && !outloop; ++ch) { 312 | output << dataBlob.getElement(r, c, ch) << ", "; 313 | ++idx; 314 | if(idx >= 16) { 315 | outloop = true; 316 | } 317 | } 318 | } 319 | } 320 | output << "..., " 321 | << dataBlob.getElement(dataBlob.rows-1, dataBlob.cols-1, dataBlob.channels-1) << ")" 322 | << std::endl; 323 | float max_it = -500.f; 324 | float min_it = 500.f; 325 | for(int r = 0; r < dataBlob.rows; ++r) { 326 | for(int c = 0; c < dataBlob.cols; ++c) { 327 | for(int ch = 0; ch < dataBlob.channels; ++ch) { 328 | max_it = std::max(max_it, dataBlob.getElement(r, c, ch)); 329 | min_it = std::min(min_it, dataBlob.getElement(r, c, ch)); 330 | } 331 | } 332 | } 333 | output << "max_it: " << max_it << " min_it: " << min_it << std::endl; 334 | } 335 | return output; 336 | } 337 | }; 338 | 339 | template 340 | class Filters{ 341 | public: 342 | int channels; 343 | int num_filters; 344 | bool is_depthwise; 345 | bool is_pointwise; 346 | bool with_relu; 347 | CDataBlob weights; 348 | CDataBlob biases; 349 | 350 | Filters() 351 | { 352 | channels = 0; 353 | num_filters = 0; 354 | is_depthwise = false; 355 | is_pointwise = false; 356 | with_relu = true; 357 | } 358 | 359 | Filters & operator=(ConvInfoStruct & convinfo) 360 | { 361 | if (typeid(float) != typeid(T)) 362 | { 363 | std::cerr << "The data type must be float in this version." << std::endl; 364 | return *this; 365 | } 366 | if (typeid(float*) != typeid(convinfo.pWeights) || 367 | typeid(float*) != typeid(convinfo.pBiases)) 368 | { 369 | std::cerr << "The data type of the filter parameters must be float in this version." << std::endl; 370 | return *this; 371 | } 372 | 373 | this->channels = convinfo.channels; 374 | this->num_filters = convinfo.num_filters; 375 | this->is_depthwise = convinfo.is_depthwise; 376 | this->is_pointwise = convinfo.is_pointwise; 377 | this->with_relu = convinfo.with_relu; 378 | 379 | if(!this->is_depthwise && this->is_pointwise) //1x1 point wise 380 | { 381 | this->weights.create(1, num_filters, channels); 382 | } 383 | else if(this->is_depthwise && !this->is_pointwise) //3x3 depth wise 384 | { 385 | this->weights.create(1, 9, channels); 386 | } 387 | else 388 | { 389 | std::cerr << "Unsupported filter type. Only 1x1 point-wise and 3x3 depth-wise are supported." << std::endl; 390 | return *this; 391 | } 392 | 393 | this->biases.create(1, 1, num_filters); 394 | 395 | //the format of convinfo.pWeights/biases must meet the format in this->weigths/biases 396 | for(int fidx = 0; fidx < this->weights.cols; fidx++) 397 | memcpy(this->weights.ptr(0,fidx), 398 | convinfo.pWeights + channels * fidx , 399 | channels * sizeof(T)); 400 | memcpy(this->biases.ptr(0,0), convinfo.pBiases, sizeof(T) * this->num_filters); 401 | 402 | return *this; 403 | } 404 | 405 | }; 406 | 407 | std::vector objectdetect_cnn(const unsigned char* rgbImageData, int with, int height, int step); 408 | 409 | CDataBlob setDataFrom3x3S2P1to1x1S1P0FromImage(const unsigned char* inputData, int imgWidth, int imgHeight, int imgChannels, int imgWidthStep, int padDivisor=32); 410 | CDataBlob convolution(const CDataBlob& inputData, const Filters& filters, bool do_relu = true); 411 | CDataBlob convolutionDP(const CDataBlob& inputData, 412 | const Filters& filtersP, const Filters& filtersD, bool do_relu = true); 413 | CDataBlob convolution4layerUnit(const CDataBlob& inputData, 414 | const Filters& filtersP1, const Filters& filtersD1, 415 | const Filters& filtersP2, const Filters& filtersD2, bool do_relu = true); 416 | CDataBlob maxpooling2x2S2(const CDataBlob& inputData); 417 | 418 | CDataBlob elementAdd(const CDataBlob& inputData1, const CDataBlob& inputData2); 419 | CDataBlob upsampleX2(const CDataBlob& inputData); 420 | 421 | CDataBlob meshgrid(int feature_width, int feature_height, int stride, float offset=0.0f); 422 | 423 | // TODO implement in SIMD 424 | void bbox_decode(CDataBlob& bbox_pred, const CDataBlob& priors, int stride); 425 | void kps_decode(CDataBlob& bbox_pred, const CDataBlob& priors, int stride); 426 | 427 | template 428 | CDataBlob blob2vector(const CDataBlob &inputData); 429 | 430 | template 431 | CDataBlob concat3(const CDataBlob& inputData1, const CDataBlob& inputData2, const CDataBlob& inputData3); 432 | 433 | // TODO implement in SIMD 434 | void sigmoid(CDataBlob& inputData); 435 | 436 | std::vector detection_output(const CDataBlob& cls, 437 | const CDataBlob& reg, 438 | const CDataBlob& kps, 439 | const CDataBlob& obj, 440 | float overlap_threshold, float confidence_threshold, int top_k, int keep_top_k); -------------------------------------------------------------------------------- /wu-thesis-facedetect.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ShiqiYu/libfacedetection/46caf3219d02cc9407c6ac5ce7284c7b1a1bda3a/wu-thesis-facedetect.pdf --------------------------------------------------------------------------------