├── .Utils.cpp.swp ├── Analyser.cpp ├── Analyser.h ├── AnalyserFactory.cpp ├── AnalyserFactory.h ├── BlinkAnalyser.cpp ├── BlinkAnalyser.h ├── BoundingBox.cpp ├── BoundingBox.h ├── Controller.cpp ├── Controller.h ├── Detector.cpp ├── Detector.h ├── ESR.hpp ├── ESR ├── Fern.cpp ├── FernCascade.cpp └── ShapeRegressor.cpp ├── EyeDetector.cpp ├── EyeDetector.h ├── FaceAligner.cpp ├── FaceAligner.h ├── FaceDetector.cpp ├── FaceDetector.h ├── LivenessDetection3.pro ├── LivenessDetection3.pro.user ├── LivenessDetection3.pro.user.c3631ef ├── OpenMouthAnalyser.cpp ├── OpenMouthAnalyser.h ├── OpticalFlowCalculater.cpp ├── OpticalFlowCalculater.h ├── Utils.cpp ├── Utils.h ├── WebcamCapture.cpp ├── WebcamCapture.h ├── YawAnalyser.cpp ├── YawAnalyser.h ├── main.cpp ├── mainwindow.cpp ├── mainwindow.h ├── mainwindow.ui ├── 请张嘴.wav ├── 请眨眼.wav └── 请跟随滑块摇头.wav /.Utils.cpp.swp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/netbeen/LivenessDetection3/ece2b5054e95b4bb69db0143fcb1d9b5eec572c1/.Utils.cpp.swp -------------------------------------------------------------------------------- /Analyser.cpp: -------------------------------------------------------------------------------- 1 | #include "Analyser.h" 2 | 3 | Analyser::Analyser() 4 | { 5 | 6 | } 7 | -------------------------------------------------------------------------------- /Analyser.h: -------------------------------------------------------------------------------- 1 | #ifndef ANALYSER_H 2 | #define ANALYSER_H 3 | 4 | #include 5 | 6 | class Analyser : public QObject 7 | { 8 | Q_OBJECT 9 | public: 10 | Analyser(); 11 | 12 | public slots: 13 | virtual void start()=0; 14 | 15 | signals: 16 | void updateSlider(int percentage); 17 | }; 18 | 19 | #endif // ANALYSER_H 20 | -------------------------------------------------------------------------------- /AnalyserFactory.cpp: -------------------------------------------------------------------------------- 1 | #include "AnalyserFactory.h" 2 | 3 | //工厂类构造函数 4 | AnalyserFactory::AnalyserFactory() 5 | { 6 | analyserType.insert("blink"); 7 | analyserType.insert("yaw"); 8 | analyserType.insert("openMouth"); 9 | } 10 | 11 | Analyser* AnalyserFactory::createAnalyser(std::string inputType){ 12 | Analyser* newAnalyser; 13 | if(inputType.compare("blink") == 0){ 14 | newAnalyser = new BlinkAnalyser(); 15 | }else if(inputType.compare("yaw") == 0){ 16 | newAnalyser = new YawAnalyser(); 17 | }else if(inputType.compare("openMouth") == 0){ 18 | newAnalyser = new OpenMouthAnalyser(); 19 | } 20 | return newAnalyser; 21 | } 22 | -------------------------------------------------------------------------------- /AnalyserFactory.h: -------------------------------------------------------------------------------- 1 | #ifndef ANALYSERFACTORY_H 2 | #define ANALYSERFACTORY_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | class AnalyserFactory 10 | { 11 | public: 12 | AnalyserFactory(); 13 | std::set analyserType; 14 | static Analyser* createAnalyser(std::string); 15 | }; 16 | 17 | #endif // ANALYSERFACTORY_H 18 | -------------------------------------------------------------------------------- /BlinkAnalyser.cpp: -------------------------------------------------------------------------------- 1 | #include "BlinkAnalyser.h" 2 | #include "QThread" 3 | #include 4 | 5 | //眨眼检测类构造函数 6 | BlinkAnalyser::BlinkAnalyser():blinkCount(0),isEyesOpen(false),blinkThreshold(6),timeoutTimeMs(10000) 7 | { 8 | webcamCapture = WebcamCapture::getInstance(); 9 | faceDetector = new FaceDetector(); //faceDetector对象 10 | eyeDetector = new EyeDetector(); //eyeDetector对象 11 | 12 | } 13 | 14 | //开启检测眨眼线程 15 | void BlinkAnalyser::start(){ 16 | QSound::play("/home/netbeen/QtWorkspace/LivenessDetection3/请眨眼.wav"); 17 | timeoutTimer = new QTimer(); 18 | QObject::connect(timeoutTimer,SIGNAL(timeout()),this,SLOT(timeout())); 19 | std::cout << "BlinkAnalyser at " << QThread::currentThreadId()<< std::endl; 20 | QObject::connect(webcamCapture,SIGNAL(newImageCaptured(cv::Mat)),this,SLOT(receiveNewFrame(cv::Mat))); 21 | timeoutTimer->start(timeoutTimeMs); 22 | } 23 | 24 | void BlinkAnalyser::receiveNewFrame(cv::Mat newFrame){ 25 | cv::cvtColor(newFrame,this->grayImage,cv::COLOR_BGR2GRAY); 26 | if (this->faceDetector->detect(this->grayImage,5, this->faceBoundingBox)) { //调用FaceDetector,如果检测到脸 27 | this->upperImage = grayImage(this->faceBoundingBox.returnUpperRect()); //截取图像的上半部分(准备检测人眼) 28 | if(this->eyeDetector->detect(this->upperImage,this->eyesRects)){ //调用eyeDetector,如果检测到眼睛 29 | if(this->isEyesOpen == false){ 30 | this->isEyesOpen = true; 31 | this->blinkCount++; 32 | } 33 | Utils::drawRect(this->grayImage, eyesRects,this->faceBoundingBox.returnRect().x,this->faceBoundingBox.returnRect().y); 34 | }else{ //如果没检测到眼睛 35 | this->isEyesOpen = false; 36 | } 37 | Utils::drawRect(this->grayImage,this->faceBoundingBox); 38 | }else{ //如果没检测到脸 39 | this->blinkCount = 0; 40 | } 41 | if(blinkCount > this->blinkThreshold){ 42 | this->success(); 43 | } 44 | //cv::moveWindow("BlinkAnalyser",200,160); 45 | //cv::imshow("BlinkAnalyser", grayImage); 46 | } 47 | 48 | //眨眼计时器超时的slot函数 49 | void BlinkAnalyser::timeout(){ 50 | 51 | timeoutTimer->stop(); 52 | QObject::disconnect(webcamCapture,SIGNAL(newImageCaptured(cv::Mat)),this,SLOT(receiveNewFrame(cv::Mat))); //解绑接收摄像头事件 53 | //cv::destroyAllWindows(); 54 | emit this->done(false); 55 | std::cout << "BlinkAnalyser Time out!"<stop(); 62 | QObject::disconnect(webcamCapture,SIGNAL(newImageCaptured(cv::Mat)),this,SLOT(receiveNewFrame(cv::Mat))); //解绑接收摄像头事件 63 | //cv::destroyAllWindows(); 64 | emit this->done(true); 65 | std::cout << "BlinkAnalyser success!"< 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | class BlinkAnalyser : public Analyser 12 | { 13 | Q_OBJECT 14 | public: 15 | BlinkAnalyser(); 16 | 17 | private: 18 | WebcamCapture* webcamCapture; 19 | int blinkCount; 20 | EyeDetector* eyeDetector; 21 | FaceDetector* faceDetector; 22 | cv::Mat grayImage; 23 | BoundingBox faceBoundingBox; 24 | std::vector eyesRects; 25 | cv::Mat upperImage; 26 | bool isEyesOpen; 27 | const int blinkThreshold; 28 | QTimer* timeoutTimer; 29 | void success(); 30 | const int timeoutTimeMs; 31 | 32 | public slots: 33 | virtual void start(); 34 | void receiveNewFrame(cv::Mat newFrame); 35 | 36 | private slots: 37 | void timeout(); 38 | 39 | signals: 40 | void done(bool result); 41 | }; 42 | 43 | #endif // BLINKANALYSER_H 44 | -------------------------------------------------------------------------------- /BoundingBox.cpp: -------------------------------------------------------------------------------- 1 | #include "BoundingBox.h" 2 | 3 | BoundingBox::~BoundingBox() 4 | { 5 | } 6 | 7 | BoundingBox::BoundingBox(): startX(0),startY(0),width(0),height(0),centerX(0),centerY(0){ 8 | } 9 | 10 | cv::Point_ BoundingBox::getStartPoint(){ 11 | cv::Point_ point; 12 | point.x = startX; 13 | point.y = startY; 14 | return point; 15 | } 16 | 17 | cv::Point_ BoundingBox::getEndPoint(){ 18 | cv::Point_ point; 19 | point.x = startX + width; 20 | point.y = startY + height; 21 | return point; 22 | } 23 | 24 | bool BoundingBox::isInBoudingBox(cv::Point_ pt){ 25 | if(pt.x > startX && pt.x < (startX + width)){ 26 | if(pt.y > startY && pt.y < (startY + height)){ 27 | return true; 28 | } 29 | } 30 | return false; 31 | } 32 | 33 | cv::Rect BoundingBox::returnRect(){ 34 | cv::Rect objectRect; 35 | objectRect.x = startX; 36 | objectRect.y = startY; 37 | objectRect.width = width; 38 | objectRect.height = height; 39 | return objectRect; 40 | } 41 | 42 | cv::Rect BoundingBox::returnUpperRect(){ 43 | cv::Rect objectRect; 44 | objectRect.x = startX; 45 | objectRect.y = startY; 46 | objectRect.width = width; 47 | objectRect.height = height * 0.6; 48 | return objectRect; 49 | } 50 | -------------------------------------------------------------------------------- /BoundingBox.h: -------------------------------------------------------------------------------- 1 | #ifndef BOUNDINGBOX_H 2 | #define BOUNDINGBOX_H 3 | 4 | #include 5 | 6 | class BoundingBox 7 | { 8 | public: 9 | BoundingBox(); 10 | ~BoundingBox(); 11 | double startX; 12 | double startY; 13 | double width; 14 | double height; 15 | double centerX; 16 | double centerY; 17 | cv::Point_ getStartPoint(); 18 | cv::Point_ getEndPoint(); 19 | bool isInBoudingBox(cv::Point_ pt); 20 | cv::Rect returnRect(); 21 | cv::Rect returnUpperRect(); 22 | }; 23 | 24 | #endif // BOUNDINGBOX_H 25 | -------------------------------------------------------------------------------- /Controller.cpp: -------------------------------------------------------------------------------- 1 | #include "Controller.h" 2 | 3 | //单例模式的静态指针的定义+初始化 4 | Controller* Controller::ptr2Controller = nullptr; 5 | 6 | Controller::Controller():currentAnalyseIndex(-1) 7 | { 8 | this->opticalFlowCalculater = OpticalFlowCalculater::getInstance(); 9 | this->opticalFlowCalculaterThread = new QThread(); 10 | this->opticalFlowCalculater->moveToThread(this->opticalFlowCalculaterThread); 11 | this->opticalFlowCalculaterThread->start(); 12 | 13 | this->faceAligner = FaceAligner::getInstance(); //提前加载faceAligner 14 | faceAlignerThread = new QThread(); 15 | faceAligner->moveToThread(faceAlignerThread); 16 | faceAlignerThread->start(); 17 | 18 | analyserFactory = new AnalyserFactory(); 19 | QThread::connect(this,SIGNAL(startNextAnalyserSignal()),this,SLOT(startNextAnalyserSlot())); //由Controller自行调用自身的slot 20 | webcamCapture = WebcamCapture::getInstance(); 21 | webcamThread = new QThread(); 22 | webcamCapture->moveToThread(webcamThread); 23 | webcamThread->start(); 24 | QObject::connect(this,SIGNAL(webcamStart()),webcamCapture,SLOT(start())); 25 | 26 | } 27 | 28 | //单例模式创建函数 29 | Controller* Controller::getInstance(){ 30 | if(Controller::ptr2Controller == nullptr){ 31 | Controller::ptr2Controller = new Controller(); 32 | } 33 | return Controller::ptr2Controller; 34 | } 35 | 36 | void Controller::start(){ 37 | std::cout << "Controller at " << QThread::currentThreadId()<< std::endl; 38 | this->currentAnalyseIndex = -1; 39 | analyserOrder.clear(); 40 | for(std::string elemStr : analyserFactory->analyserType){ 41 | this->analyserOrder.push_back(elemStr); 42 | } 43 | Utils::randomizeVector(this->analyserOrder); 44 | std::cout << "------------------------" << std::endl; 45 | for(std::string elemStr : this->analyserOrder){ 46 | std::cout << elemStr << std::endl; 47 | } 48 | std::cout << "------------------------" << std::endl; 49 | emit this->webcamStart(); //开启摄像头 50 | emit this->startNextAnalyserSignal(); 51 | } 52 | 53 | void Controller::startNextAnalyserSlot(){ 54 | this->currentAnalyseIndex++; 55 | if(this->currentAnalyseIndex < this->analyserOrder.size()){ 56 | Analyser* analyser = AnalyserFactory::createAnalyser(this->analyserOrder.at(this->currentAnalyseIndex)); 57 | QObject::connect(this,SIGNAL(analyserStartSignal()),analyser,SLOT(start())); 58 | analyserVector.push_back(analyser); 59 | 60 | analyserThread = new QThread(); 61 | analyser->moveToThread(analyserThread); 62 | QObject::connect(analyser,SIGNAL(done(bool)),this,SLOT(receiveAnalyserResultSlot(bool))); 63 | QObject::connect(analyser,SIGNAL(updateSlider(int)),this,SLOT(receiveSliderPercentage(int))); 64 | analyserThread->start(); 65 | emit this->analyserStartSignal(); 66 | QObject::disconnect(this,SIGNAL(analyserStartSignal()),analyser,SLOT(start())); //解除与当前线程的控制联系 67 | 68 | }else{ 69 | std::cout << "All over" << std::endl; 70 | } 71 | } 72 | 73 | void Controller::receiveAnalyserResultSlot(bool result){ 74 | if(result == true){ 75 | emit this->startNextAnalyserSignal(); 76 | }else{ 77 | std::cout << "Controller deny" <updateSlider(percentage); 83 | } 84 | -------------------------------------------------------------------------------- /Controller.h: -------------------------------------------------------------------------------- 1 | #ifndef CONTROLLER_H 2 | #define CONTROLLER_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | class Controller : public QObject 13 | { 14 | Q_OBJECT 15 | public: 16 | static Controller* getInstance(); 17 | 18 | private: 19 | Controller(); 20 | static Controller* ptr2Controller; 21 | AnalyserFactory* analyserFactory; 22 | std::vector analyserOrder; //用于保存测试顺序的vector 23 | size_t currentAnalyseIndex; 24 | std::vector analyserVector; 25 | QThread* analyserThread; 26 | QThread* webcamThread; 27 | QThread* faceAlignerThread; 28 | WebcamCapture* webcamCapture; 29 | FaceAligner* faceAligner; 30 | OpticalFlowCalculater* opticalFlowCalculater; 31 | QThread* opticalFlowCalculaterThread; 32 | 33 | signals: 34 | void startNextAnalyserSignal(); 35 | void analyserStartSignal(); 36 | void webcamStart(); 37 | void updateSlider(int percentage); 38 | 39 | public slots: 40 | void start(); 41 | void startNextAnalyserSlot(); 42 | void receiveAnalyserResultSlot(bool result); 43 | void receiveSliderPercentage(int percentage); 44 | 45 | }; 46 | 47 | #endif // CONTROLLER_H 48 | -------------------------------------------------------------------------------- /Detector.cpp: -------------------------------------------------------------------------------- 1 | #include "Detector.h" 2 | 3 | Detector::Detector() 4 | { 5 | 6 | } 7 | 8 | Detector::~Detector() 9 | { 10 | 11 | } 12 | 13 | -------------------------------------------------------------------------------- /Detector.h: -------------------------------------------------------------------------------- 1 | #ifndef DETECTOR_H 2 | #define DETECTOR_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | class Detector 9 | { 10 | public: 11 | Detector(); 12 | virtual ~Detector(); 13 | protected: 14 | cv::CascadeClassifier cascadeClassifier; 15 | }; 16 | 17 | #endif // DETECTOR_H 18 | -------------------------------------------------------------------------------- /ESR.hpp: -------------------------------------------------------------------------------- 1 | /* 2 | * ESR.hpp 3 | * 4 | * Created on: 2014年10月15日 5 | * Author: netbeen 6 | */ 7 | 8 | #ifndef ESR_HPP_ 9 | #define ESR_HPP_ 10 | 11 | #include 12 | #include 13 | //#include 14 | #include 15 | #include 16 | 17 | class Fern { 18 | 19 | private: 20 | int fern_pixel_num_; 21 | int landmark_num_; 22 | cv::Mat_ selected_nearest_landmark_index_; 23 | cv::Mat_ threshold_; 24 | cv::Mat_ selected_pixel_index_; 25 | cv::Mat_ selected_pixel_locations_; 26 | std::vector > bin_output_; 27 | public: 28 | std::vector > Train(const std::vector >& candidate_pixel_intensity, const cv::Mat_& covariance, const cv::Mat_& candidate_pixel_locations, const cv::Mat_& nearest_landmark_index, const std::vector >& regression_targets, int fern_pixel_num); 29 | cv::Mat_ predict(const cv::Mat_& image, const cv::Mat_& shape, const cv::Mat_& rotation, const BoundingBox& bounding_box, double scale); 30 | void read(std::ifstream& fin); 31 | void Write(std::ofstream& fout); 32 | }; 33 | 34 | class FernCascade { 35 | public: 36 | std::vector > train(const std::vector >& images, const std::vector >& current_shapes, const std::vector >& ground_truth_shapes, const std::vector & bounding_box, const cv::Mat_& mean_shape, int second_level_num, int candidate_pixel_num, int fern_pixel_num); 37 | cv::Mat_ predict(const cv::Mat_& image, const BoundingBox& bounding_box, const cv::Mat_& mean_shape, const cv::Mat_& shape); 38 | void read(std::ifstream& fin); 39 | void Write(std::ofstream& fout); 40 | private: 41 | std::vector ferns_; 42 | int second_level_num_; 43 | }; 44 | 45 | class ShapeRegressor { 46 | public: 47 | ShapeRegressor(); 48 | void train(const std::vector >& images, const std::vector >& ground_truth_shapes, const std::vector& bounding_box, int first_level_num, int second_level_num, int candidate_pixel_num, int fern_pixel_num, int initial_num); 49 | cv::Mat_ predict(const cv::Mat_& image, const BoundingBox& bounding_box, int initial_num); 50 | void read(std::ifstream& fin); 51 | void Write(std::ofstream& fout); 52 | void load(std::string path); 53 | void save(std::string path); 54 | private: 55 | int first_level_num_; 56 | int landmark_num_; 57 | std::vector fernCascades; 58 | cv::Mat_ meanShape; 59 | std::vector > training_shapes_; 60 | std::vector bounding_box_; 61 | }; 62 | 63 | #endif /* ESR_HPP_ */ 64 | -------------------------------------------------------------------------------- /ESR/Fern.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | Author: Bi Sai 3 | Date: 2014/06/18 4 | This program is a reimplementation of algorithms in "Face Alignment by Explicit 5 | Shape Regression" by Cao et al. 6 | If you find any bugs, please email me: soundsilencebisai-at-gmail-dot-com 7 | 8 | Copyright (c) 2014 Bi Sai 9 | The MIT License (MIT) 10 | Permission is hereby granted, free of charge, to any person obtaining a copy of 11 | this software and associated documentation files (the "Software"), to deal in 12 | the Software without restriction, including without limitation the rights to 13 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 14 | the Software, and to permit persons to whom the Software is furnished to do so, 15 | subject to the following conditions: 16 | 17 | The above copyright notice and this permission notice shall be included in all 18 | copies or substantial portions of the Software. 19 | 20 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 22 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 23 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 24 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 25 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 | */ 27 | 28 | #include "ESR.hpp" 29 | 30 | using namespace std; 31 | using namespace cv; 32 | 33 | vector > Fern::Train(const vector >& candidate_pixel_intensity, const Mat_& covariance, const Mat_& candidate_pixel_locations, 34 | const Mat_& nearest_landmark_index, const vector >& regression_targets, int fern_pixel_num) { 35 | // selected_pixel_index_: fern_pixel_num*2 matrix, the index of selected pixels pairs in fern 36 | // selected_pixel_locations_: fern_pixel_num*4 matrix, the locations of selected pixel pairs 37 | // stored in the format (x_1,y_1,x_2,y_2) for each row 38 | fern_pixel_num_ = fern_pixel_num; 39 | landmark_num_ = regression_targets[0].rows; 40 | selected_pixel_index_.create(fern_pixel_num, 2); 41 | selected_pixel_locations_.create(fern_pixel_num, 4); 42 | selected_nearest_landmark_index_.create(fern_pixel_num, 2); 43 | int candidate_pixel_num = candidate_pixel_locations.rows; 44 | 45 | // select pixel pairs from candidate pixels, this selection is based on the correlation between pixel 46 | // densities and regression targets 47 | // for details, please refer to "Face Alignment by Explicit Shape Regression" 48 | // threshold_: thresholds for each pair of pixels in fern 49 | 50 | threshold_.create(fern_pixel_num, 1); 51 | // get a random direction 52 | RNG random_generator(getTickCount()); 53 | for (int i = 0; i < fern_pixel_num; i++) { 54 | // RNG random_generator(i); 55 | Mat_ random_direction(landmark_num_ * 2, 1); 56 | random_generator.fill(random_direction, RNG::UNIFORM, -1.1, 1.1); 57 | 58 | normalize(random_direction, random_direction); 59 | // Mat_ projection_result(regression_targets.size(),1); 60 | vector projection_result; 61 | // project regression targets along the random direction 62 | for (int j = 0; j < static_cast(regression_targets.size()); j++) { 63 | double temp = 0; 64 | for (int k = 0; k < regression_targets[j].rows; k++) { 65 | temp = temp + regression_targets[j](k, 0) * random_direction(2 * k) + regression_targets[j](k, 1) * random_direction(2 * k + 1); 66 | } 67 | projection_result.push_back(temp); 68 | } 69 | 70 | Mat_ covariance_projection_density(candidate_pixel_num, 1); 71 | for (int j = 0; j < candidate_pixel_num; j++) { 72 | covariance_projection_density(j) = Utils::calculate_covariance(projection_result, candidate_pixel_intensity[j]); 73 | } 74 | 75 | // find max correlation 76 | double max_correlation = -1; 77 | int max_pixel_index_1 = 0; 78 | int max_pixel_index_2 = 0; 79 | for (int j = 0; j < candidate_pixel_num; j++) { 80 | for (int k = 0; k < candidate_pixel_num; k++) { 81 | double temp1 = covariance(j, j) + covariance(k, k) - 2 * covariance(j, k); 82 | if (abs(temp1) < 1e-10) { 83 | continue; 84 | } 85 | bool flag = false; 86 | for (int p = 0; p < i; p++) { 87 | if (j == selected_pixel_index_(p, 0) && k == selected_pixel_index_(p, 1)) { 88 | flag = true; 89 | break; 90 | } else if (j == selected_pixel_index_(p, 1) && k == selected_pixel_index_(p, 0)) { 91 | flag = true; 92 | break; 93 | } 94 | } 95 | if (flag) { 96 | continue; 97 | } 98 | double temp = (covariance_projection_density(j) - covariance_projection_density(k)) / sqrt(temp1); 99 | if (abs(temp) > max_correlation) { 100 | max_correlation = temp; 101 | max_pixel_index_1 = j; 102 | max_pixel_index_2 = k; 103 | } 104 | } 105 | } 106 | 107 | selected_pixel_index_(i, 0) = max_pixel_index_1; 108 | selected_pixel_index_(i, 1) = max_pixel_index_2; 109 | selected_pixel_locations_(i, 0) = candidate_pixel_locations(max_pixel_index_1, 0); 110 | selected_pixel_locations_(i, 1) = candidate_pixel_locations(max_pixel_index_1, 1); 111 | selected_pixel_locations_(i, 2) = candidate_pixel_locations(max_pixel_index_2, 0); 112 | selected_pixel_locations_(i, 3) = candidate_pixel_locations(max_pixel_index_2, 1); 113 | selected_nearest_landmark_index_(i, 0) = nearest_landmark_index(max_pixel_index_1); 114 | selected_nearest_landmark_index_(i, 1) = nearest_landmark_index(max_pixel_index_2); 115 | 116 | // get threshold for this pair 117 | double max_diff = -1; 118 | for (int j = 0; j < static_cast(candidate_pixel_intensity[max_pixel_index_1].size()); j++) { 119 | double temp = candidate_pixel_intensity[max_pixel_index_1][j] - candidate_pixel_intensity[max_pixel_index_2][j]; 120 | if (abs(temp) > max_diff) { 121 | max_diff = abs(temp); 122 | } 123 | } 124 | 125 | threshold_(i) = random_generator.uniform(-0.2 * max_diff, 0.2 * max_diff); 126 | } 127 | 128 | // determine the bins of each shape 129 | vector > shapes_in_bin; 130 | int bin_num = pow(2.0, fern_pixel_num); 131 | shapes_in_bin.resize(bin_num); 132 | for (int i = 0; i < static_cast(regression_targets.size()); i++) { 133 | int index = 0; 134 | for (int j = 0; j < fern_pixel_num; j++) { 135 | double density_1 = candidate_pixel_intensity[selected_pixel_index_(j, 0)][i]; 136 | double density_2 = candidate_pixel_intensity[selected_pixel_index_(j, 1)][i]; 137 | if (density_1 - density_2 >= threshold_(j)) { 138 | index = index + pow(2.0, j); 139 | } 140 | } 141 | shapes_in_bin[index].push_back(i); 142 | } 143 | 144 | // get bin output 145 | vector > prediction; 146 | prediction.resize(regression_targets.size()); 147 | bin_output_.resize(bin_num); 148 | for (int i = 0; i < bin_num; i++) { 149 | Mat_ temp = Mat::zeros(landmark_num_, 2, CV_64FC1); 150 | int bin_size = shapes_in_bin[i].size(); 151 | for (int j = 0; j < bin_size; j++) { 152 | int index = shapes_in_bin[i][j]; 153 | temp = temp + regression_targets[index]; 154 | } 155 | if (bin_size == 0) { 156 | bin_output_[i] = temp; 157 | continue; 158 | } 159 | temp = (1.0 / ((1.0 + 1000.0 / bin_size) * bin_size)) * temp; 160 | bin_output_[i] = temp; 161 | for (int j = 0; j < bin_size; j++) { 162 | int index = shapes_in_bin[i][j]; 163 | prediction[index] = temp; 164 | } 165 | } 166 | return prediction; 167 | } 168 | 169 | void Fern::Write(ofstream& fout) { 170 | fout << fern_pixel_num_ << endl; 171 | fout << landmark_num_ << endl; 172 | for (int i = 0; i < fern_pixel_num_; i++) { 173 | fout << selected_pixel_locations_(i, 0) << " " << selected_pixel_locations_(i, 1) << " " << selected_pixel_locations_(i, 2) << " " << selected_pixel_locations_(i, 3) << " " << endl; 174 | fout << selected_nearest_landmark_index_(i, 0) << endl; 175 | fout << selected_nearest_landmark_index_(i, 1) << endl; 176 | fout << threshold_(i) << endl; 177 | } 178 | for (int i = 0; i < static_cast(bin_output_.size()); i++) { 179 | for (int j = 0; j < bin_output_[i].rows; j++) { 180 | fout << bin_output_[i](j, 0) << " " << bin_output_[i](j, 1) << " "; 181 | } 182 | fout << endl; 183 | } 184 | 185 | } 186 | 187 | void Fern::read(ifstream& fin) { 188 | fin >> fern_pixel_num_; //第一次读数位于2696行,读数5,作用不明-------------------------------------------------------- 189 | fin >> landmark_num_; //第一次读数位于2697行,读数29,作用不明-------------------------------------------------------- 190 | selected_nearest_landmark_index_.create(fern_pixel_num_, 2); 191 | selected_pixel_locations_.create(fern_pixel_num_, 4); 192 | threshold_.create(fern_pixel_num_, 1); 193 | for (int i = 0; i < fern_pixel_num_; i++) { 194 | fin >> selected_pixel_locations_(i, 0) >> selected_pixel_locations_(i, 1) >> selected_pixel_locations_(i, 2) >> selected_pixel_locations_(i, 3); //读入4个数初始化selected_pixel_locations_ 195 | fin >> selected_nearest_landmark_index_(i, 0) >> selected_nearest_landmark_index_(i, 1); //读入两个数初始化selected_nearest_landmark_index_ 196 | fin >> threshold_(i); //读入一个数初始化threshold_ 197 | } 198 | 199 | int binNum = pow(2.0, fern_pixel_num_); 200 | for (int i = 0; i < binNum; i++) { 201 | Mat_ temp(landmark_num_, 2); 202 | for (int j = 0; j < landmark_num_; j++) { 203 | fin >> temp(j, 0) >> temp(j, 1); 204 | } 205 | bin_output_.push_back(temp); //push_back 函数将该对象push到STL对象的最后面 206 | } 207 | } 208 | 209 | Mat_ Fern::predict(const Mat_& image, const Mat_& shape, const Mat_& rotation, const BoundingBox& bounding_box, double scale) { 210 | int index = 0; 211 | for (int i = 0; i < fern_pixel_num_; i++) { 212 | int nearest_landmark_index_1 = selected_nearest_landmark_index_(i, 0); 213 | int nearest_landmark_index_2 = selected_nearest_landmark_index_(i, 1); 214 | double x = selected_pixel_locations_(i, 0); 215 | double y = selected_pixel_locations_(i, 1); 216 | double project_x = scale * (rotation(0, 0) * x + rotation(0, 1) * y) * bounding_box.width / 2.0 + shape(nearest_landmark_index_1, 0); 217 | double project_y = scale * (rotation(1, 0) * x + rotation(1, 1) * y) * bounding_box.height / 2.0 + shape(nearest_landmark_index_1, 1); 218 | 219 | project_x = std::max(0.0, std::min((double) project_x, image.cols - 1.0)); 220 | project_y = std::max(0.0, std::min((double) project_y, image.rows - 1.0)); 221 | double intensity_1 = (int) (image((int) project_y, (int) project_x)); 222 | 223 | x = selected_pixel_locations_(i, 2); 224 | y = selected_pixel_locations_(i, 3); 225 | project_x = scale * (rotation(0, 0) * x + rotation(0, 1) * y) * bounding_box.width / 2.0 + shape(nearest_landmark_index_2, 0); 226 | project_y = scale * (rotation(1, 0) * x + rotation(1, 1) * y) * bounding_box.height / 2.0 + shape(nearest_landmark_index_2, 1); 227 | project_x = std::max(0.0, std::min((double) project_x, image.cols - 1.0)); 228 | project_y = std::max(0.0, std::min((double) project_y, image.rows - 1.0)); 229 | double intensity_2 = (int) (image((int) project_y, (int) project_x)); 230 | 231 | if (intensity_1 - intensity_2 >= threshold_(i)) { 232 | index = index + (int) (pow(2, i)); 233 | } 234 | } 235 | return bin_output_[index]; 236 | 237 | } 238 | 239 | -------------------------------------------------------------------------------- /ESR/FernCascade.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | Author: Bi Sai 3 | Date: 2014/06/18 4 | This program is a reimplementation of algorithms in "Face Alignment by Explicit 5 | Shape Regression" by Cao et al. 6 | If you find any bugs, please email me: soundsilencebisai-at-gmail-dot-com 7 | 8 | Copyright (c) 2014 Bi Sai 9 | The MIT License (MIT) 10 | Permission is hereby granted, free of charge, to any person obtaining a copy of 11 | this software and associated documentation files (the "Software"), to deal in 12 | the Software without restriction, including without limitation the rights to 13 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 14 | the Software, and to permit persons to whom the Software is furnished to do so, 15 | subject to the following conditions: 16 | 17 | The above copyright notice and this permission notice shall be included in all 18 | copies or substantial portions of the Software. 19 | 20 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 22 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 23 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 24 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 25 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 | */ 27 | 28 | #include "ESR.hpp" 29 | 30 | using namespace std; 31 | using namespace cv; 32 | 33 | vector > FernCascade::train(const vector >& images, const vector >& current_shapes, const vector >& ground_truth_shapes, const vector & bounding_box, const Mat_& mean_shape, int secondLevelNum, int candidate_pixel_num, int fern_pixel_num) { 34 | Mat_ candidate_pixel_locations(candidate_pixel_num, 2); 35 | Mat_ nearest_landmark_index(candidate_pixel_num, 1); 36 | vector > regressionTargets; 37 | RNG random_generator(getTickCount()); 38 | second_level_num_ = secondLevelNum; 39 | 40 | // calculate regression targets: the difference between ground truth shapes and current shapes 41 | // candidate_pixel_locations: the locations of candidate pixels, indexed relative to its nearest landmark on mean shape 42 | regressionTargets.resize(current_shapes.size()); 43 | for (int i = 0; i < static_cast(current_shapes.size()); i++) { 44 | regressionTargets[i] = Utils::projectShape(ground_truth_shapes[i], bounding_box[i]) - Utils::projectShape(current_shapes[i], bounding_box[i]); 45 | Mat_ rotation; 46 | double scale; 47 | Utils::SimilarityTransform(mean_shape, Utils::projectShape(current_shapes[i], bounding_box[i]), rotation, scale); 48 | transpose(rotation, rotation); //transpose用来求逆矩阵 49 | 50 | regressionTargets[i] = scale * regressionTargets[i] * rotation; 51 | } 52 | 53 | // get candidate pixel locations, please refer to 'shape-indexed features' 54 | for (int i = 0; i < candidate_pixel_num; i++) { 55 | double x = random_generator.uniform(-1.0, 1.0); 56 | double y = random_generator.uniform(-1.0, 1.0); 57 | if (x * x + y * y > 1.0) { 58 | i--; 59 | continue; 60 | } 61 | // find nearest landmark index 62 | double min_dist = 1e10; 63 | int min_index = 0; 64 | for (int j = 0; j < mean_shape.rows; j++) { 65 | double temp = pow(mean_shape(j, 0) - x, 2.0) + pow(mean_shape(j, 1) - y, 2.0); 66 | if (temp < min_dist) { 67 | min_dist = temp; 68 | min_index = j; 69 | } 70 | } 71 | candidate_pixel_locations(i, 0) = x - mean_shape(min_index, 0); 72 | candidate_pixel_locations(i, 1) = y - mean_shape(min_index, 1); 73 | nearest_landmark_index(i) = min_index; 74 | } 75 | 76 | // get densities of candidate pixels for each image 77 | // for densities: each row is the pixel densities at each candidate pixels for an image 78 | // Mat_ densities(images.size(), candidate_pixel_num); 79 | vector > densities; 80 | densities.resize(candidate_pixel_num); 81 | for (int i = 0; i < static_cast(images.size()); i++) { 82 | Mat_ rotation; 83 | double scale; 84 | Mat_ temp = Utils::projectShape(current_shapes[i], bounding_box[i]); 85 | Utils::SimilarityTransform(temp, mean_shape, rotation, scale); 86 | for (int j = 0; j < candidate_pixel_num; j++) { 87 | double project_x = rotation(0, 0) * candidate_pixel_locations(j, 0) + rotation(0, 1) * candidate_pixel_locations(j, 1); 88 | double project_y = rotation(1, 0) * candidate_pixel_locations(j, 0) + rotation(1, 1) * candidate_pixel_locations(j, 1); 89 | project_x = scale * project_x * bounding_box[i].width / 2.0; 90 | project_y = scale * project_y * bounding_box[i].height / 2.0; 91 | int index = nearest_landmark_index(j); 92 | int real_x = project_x + current_shapes[i](index, 0); 93 | int real_y = project_y + current_shapes[i](index, 1); 94 | real_x = std::max(0.0, std::min((double) real_x, images[i].cols - 1.0)); 95 | real_y = std::max(0.0, std::min((double) real_y, images[i].rows - 1.0)); 96 | densities[j].push_back((int) images[i](real_y, real_x)); 97 | } 98 | } 99 | 100 | // calculate the covariance between densities at each candidate pixels 101 | Mat_ covariance(candidate_pixel_num, candidate_pixel_num); 102 | Mat_ mean; 103 | for (int i = 0; i < candidate_pixel_num; i++) { 104 | for (int j = i; j < candidate_pixel_num; j++) { 105 | double correlation_result = Utils::calculate_covariance(densities[i], densities[j]); 106 | covariance(i, j) = correlation_result; 107 | covariance(j, i) = correlation_result; 108 | } 109 | } 110 | 111 | // train ferns 112 | vector > prediction; 113 | prediction.resize(regressionTargets.size()); 114 | for (int i = 0; i < static_cast(regressionTargets.size()); i++) { 115 | prediction[i] = Mat::zeros(mean_shape.rows, 2, CV_64FC1); 116 | } 117 | ferns_.resize(secondLevelNum); 118 | for (int i = 0; i < secondLevelNum; i++) { 119 | cout << "Training ferns: " << i + 1 << " out of " << secondLevelNum << endl; 120 | vector > temp = ferns_[i].Train(densities, covariance, candidate_pixel_locations, nearest_landmark_index, regressionTargets, fern_pixel_num); 121 | // update regression targets 122 | for (int j = 0; j < static_cast(temp.size()); j++) { 123 | prediction[j] = prediction[j] + temp[j]; 124 | regressionTargets[j] = regressionTargets[j] - temp[j]; 125 | } 126 | } 127 | 128 | for (int i = 0; i < static_cast(prediction.size()); i++) { 129 | Mat_ rotation; 130 | double scale; 131 | Utils::SimilarityTransform(Utils::projectShape(current_shapes[i], bounding_box[i]), mean_shape, rotation, scale); 132 | transpose(rotation, rotation); 133 | prediction[i] = scale * prediction[i] * rotation; 134 | } 135 | return prediction; 136 | } 137 | 138 | void FernCascade::read(ifstream& fin) { 139 | fin >> second_level_num_; //读入第二级分类器的数量 140 | ferns_.resize(second_level_num_); 141 | for (int i = 0; i < second_level_num_; i++) { 142 | ferns_[i].read(fin); 143 | } 144 | } 145 | 146 | void FernCascade::Write(ofstream& fout) { 147 | fout << second_level_num_ << endl; 148 | for (int i = 0; i < second_level_num_; i++) { 149 | ferns_[i].Write(fout); 150 | } 151 | } 152 | 153 | Mat_ FernCascade::predict(const Mat_& image, const BoundingBox& bounding_box, const Mat_& mean_shape, const Mat_& shape) { 154 | Mat_ result = Mat::zeros(shape.rows, 2, CV_64FC1); 155 | Mat_ rotation; 156 | double scale; 157 | Utils::SimilarityTransform(Utils::projectShape(shape, bounding_box), mean_shape, rotation, scale); 158 | for (int i = 0; i < second_level_num_; i++) { 159 | result = result + ferns_[i].predict(image, shape, rotation, bounding_box, scale); 160 | } 161 | Utils::SimilarityTransform(Utils::projectShape(shape, bounding_box), mean_shape, rotation, scale); 162 | transpose(rotation, rotation); 163 | result = scale * result * rotation; 164 | 165 | return result; 166 | } 167 | 168 | -------------------------------------------------------------------------------- /ESR/ShapeRegressor.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | Author: Bi Sai 3 | Date: 2014/06/18 4 | This program is a reimplementation of algorithms in "Face Alignment by Explicit 5 | Shape Regression" by Cao et al. 6 | If you find any bugs, please email me: soundsilencebisai-at-gmail-dot-com 7 | 8 | Copyright (c) 2014 Bi Sai 9 | The MIT License (MIT) 10 | Permission is hereby granted, free of charge, to any person obtaining a copy of 11 | this software and associated documentation files (the "Software"), to deal in 12 | the Software without restriction, including without limitation the rights to 13 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 14 | the Software, and to permit persons to whom the Software is furnished to do so, 15 | subject to the following conditions: 16 | 17 | The above copyright notice and this permission notice shall be included in all 18 | copies or substantial portions of the Software. 19 | 20 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 22 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 23 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 24 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 25 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 | */ 27 | #include "ESR.hpp" 28 | 29 | using namespace std; 30 | using namespace cv; 31 | 32 | ShapeRegressor::ShapeRegressor() { 33 | first_level_num_ = 0; 34 | landmark_num_ = 0; 35 | } 36 | 37 | /** 38 | * @param images gray scale images 39 | * @param ground_truth_shapes a vector of N*2 matrix, where N is the number of landmarks 40 | * @param bounding_box BoundingBox of faces 41 | * @param first_level_num number of first level regressors 42 | * @param second_level_num number of second level regressors 43 | * @param candidate_pixel_num number of pixels to be selected as features 44 | * @param fern_pixel_num number of pixel pairs in a fern 45 | * @param initial_num number of initial shapes for each input image 46 | */ 47 | 48 | void ShapeRegressor::read(ifstream& fin) { 49 | fin >> first_level_num_; //第1行是第一层级联分类器的数量 50 | fin >> landmark_num_; //第2行是landmark的数量 51 | 52 | meanShape = Mat::zeros(landmark_num_, 2, CV_64FC1); //64位精度,在32位机上是double,64位机上是float 53 | for (int i = 0; i < landmark_num_; i++) { //读入29个坐标(共58个数)作为初始的平均形状 54 | fin >> meanShape(i, 0) >> meanShape(i, 1); 55 | } 56 | 57 | int trainingNum; 58 | fin >> trainingNum; //第4行是训练图的数量 59 | training_shapes_.resize(trainingNum); 60 | bounding_box_.resize(trainingNum); 61 | 62 | for (int i = 0; i < trainingNum; i++) { //两行两行一组读入训练图的参数 63 | BoundingBox temp; 64 | fin >> temp.startX >> temp.startY >> temp.width >> temp.height >> temp.centerX >> temp.centerY; //组合内:第一行是boundingBox的6个参数 65 | bounding_box_[i] = temp; 66 | 67 | Mat_ temp1(landmark_num_, 2); 68 | for (int j = 0; j < landmark_num_; j++) { //组合内:第二行是29个坐标(共58个数),作为landmark参数 69 | fin >> temp1(j, 0) >> temp1(j, 1); 70 | } 71 | training_shapes_[i] = temp1; 72 | } //一直读到了2694行 73 | 74 | fernCascades.resize(first_level_num_); 75 | for (int i = 0; i < first_level_num_; i++) { 76 | fernCascades[i].read(fin); 77 | } 78 | } 79 | 80 | Mat_ ShapeRegressor::predict(const Mat_& image, const BoundingBox& bounding_box, int initial_num) { 81 | // generate multiple initializations 82 | Mat_ result = Mat::zeros(landmark_num_, 2, CV_64FC1); 83 | RNG random_generator(getTickCount()); 84 | for (int i = 0; i < initial_num; i++) { 85 | random_generator = RNG(i); 86 | int index = random_generator.uniform(0, training_shapes_.size()); //uniform函数可以指定随机数的范围 87 | Mat_ currentShape = training_shapes_[index]; 88 | BoundingBox currentBoundingBox = bounding_box_[index]; 89 | currentShape = Utils::projectShape(currentShape, currentBoundingBox); //projectShape函数将当前的shape等比例缩放到(0,1)的boundingBox内 90 | currentShape = Utils::reProjectShape(currentShape, bounding_box); //reProjectShape函数将已经归一化的shape所放到真实的boundingBox内 91 | for (int j = 0; j < first_level_num_; j++) { 92 | Mat_ prediction = fernCascades[j].predict(image, bounding_box, meanShape, currentShape); 93 | // update current shape 94 | currentShape = prediction + Utils::projectShape(currentShape, bounding_box); 95 | currentShape = Utils::reProjectShape(currentShape, bounding_box); 96 | } 97 | result = result + currentShape; 98 | } 99 | return 1.0 / initial_num * result; 100 | } 101 | 102 | void ShapeRegressor::load(string path) { 103 | cout << "Loading model..." << endl; 104 | ifstream fin; 105 | fin.open(path.c_str()); 106 | this->read(fin); 107 | fin.close(); 108 | cout << "Model loaded successfully..." << endl; 109 | } 110 | 111 | -------------------------------------------------------------------------------- /EyeDetector.cpp: -------------------------------------------------------------------------------- 1 | #include "EyeDetector.h" 2 | 3 | EyeDetector::EyeDetector() 4 | { 5 | cv::String eyeDetectorPath = "/usr/local/share/OpenCV/haarcascades/haarcascade_eye.xml"; 6 | this->cascadeClassifier.load(eyeDetectorPath); 7 | } 8 | 9 | EyeDetector::~EyeDetector() 10 | { 11 | 12 | } 13 | 14 | bool EyeDetector::detect(cv::Mat& grayImg, std::vector& eyesRects) { 15 | this->cascadeClassifier.detectMultiScale(grayImg, eyesRects,2,2, 0 | cv::CASCADE_DO_CANNY_PRUNING | cv::CASCADE_DO_ROUGH_SEARCH | cv::CASCADE_SCALE_IMAGE, cv::Size(40, 40)); 16 | if (eyesRects.empty()) { 17 | return false; 18 | } else { 19 | return true; 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /EyeDetector.h: -------------------------------------------------------------------------------- 1 | #ifndef EYEDETECTOR_H 2 | #define EYEDETECTOR_H 3 | 4 | #include 5 | 6 | class EyeDetector : public Detector 7 | { 8 | public: 9 | EyeDetector(); 10 | virtual ~EyeDetector(); 11 | bool detect(cv::Mat& grayImg, std::vector& eyesRects); 12 | }; 13 | 14 | #endif // EYEDETECTOR_H 15 | -------------------------------------------------------------------------------- /FaceAligner.cpp: -------------------------------------------------------------------------------- 1 | #include "FaceAligner.h" 2 | 3 | FaceAligner* FaceAligner::ptr2FaceAligner = nullptr; 4 | 5 | //单例模式创建函数 6 | FaceAligner* FaceAligner::getInstance(){ 7 | if(FaceAligner::ptr2FaceAligner == nullptr){ 8 | FaceAligner::ptr2FaceAligner = new FaceAligner(); 9 | } 10 | return FaceAligner::ptr2FaceAligner; 11 | } 12 | 13 | FaceAligner::FaceAligner():initial_number(20),landmarkNum(114) 14 | { 15 | regressor = ShapeRegressor(); 16 | regressor.load("/home/netbeen/LivenessDetectionTrainFile/model-Helen114-HaarAlt2-10-120.txt"); 17 | } 18 | 19 | FaceAligner::~FaceAligner() 20 | { 21 | 22 | } 23 | 24 | 25 | void FaceAligner::doAlignment(cv::Mat grayImg, BoundingBox boundingBox){ 26 | this->currentShape = regressor.predict(grayImg, boundingBox, this->initial_number); 27 | emit this->alignmentCompete(this->currentShape); 28 | return; 29 | } 30 | 31 | cv::Mat_ FaceAligner::getCurrentShape(){ 32 | return currentShape; 33 | } 34 | -------------------------------------------------------------------------------- /FaceAligner.h: -------------------------------------------------------------------------------- 1 | #ifndef FACEALIGNER_H 2 | #define FACEALIGNER_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | class FaceAligner : public QObject 10 | { 11 | Q_OBJECT 12 | public: 13 | static FaceAligner* getInstance(); 14 | cv::Mat_ getCurrentShape(); 15 | 16 | private: 17 | static FaceAligner* ptr2FaceAligner; 18 | FaceAligner(); 19 | ~FaceAligner(); 20 | const int initial_number; 21 | const int landmarkNum; 22 | ShapeRegressor regressor; 23 | cv::Mat_ currentShape; 24 | 25 | cv::Mat grayImg; 26 | BoundingBox boundingBox; 27 | 28 | signals: 29 | void alignmentCompete(cv::Mat_); 30 | 31 | public slots: 32 | void doAlignment(cv::Mat grayImg, BoundingBox boundingBox); 33 | }; 34 | 35 | #endif // FACEALIGNER_H 36 | -------------------------------------------------------------------------------- /FaceDetector.cpp: -------------------------------------------------------------------------------- 1 | #include "FaceDetector.h" 2 | 3 | FaceDetector::FaceDetector() 4 | { 5 | cv::String faceDetectorPath = "/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt2.xml"; 6 | this->cascadeClassifier.load(faceDetectorPath); 7 | } 8 | 9 | FaceDetector::~FaceDetector() 10 | { 11 | 12 | } 13 | 14 | //人脸检测 15 | bool FaceDetector::detect(cv::Mat& grayImg, double scale, BoundingBox& boundingBox){ 16 | std::vector faces; 17 | cv::Mat smallImg = cv::Mat(cvRound(grayImg.rows / scale), cvRound(grayImg.cols / scale), CV_8UC1); 18 | resize(grayImg, smallImg, smallImg.size(), 0, 0, cv::INTER_CUBIC); 19 | cascadeClassifier.detectMultiScale(smallImg, faces, 1.1, 5, 0 | cv::CASCADE_DO_CANNY_PRUNING | cv::CASCADE_FIND_BIGGEST_OBJECT | cv::CASCADE_DO_ROUGH_SEARCH | cv::CASCADE_SCALE_IMAGE, cv::Size(30, 30)); 20 | if (faces.empty()) { 21 | return false; 22 | } else { 23 | boundingBox.startX = cvRound(faces[0].x * scale); 24 | boundingBox.startY = cvRound(faces[0].y * scale); 25 | boundingBox.width = cvRound((faces[0].width) * scale); 26 | boundingBox.height = cvRound((faces[0].height) * scale); 27 | boundingBox.centerX = boundingBox.startX + boundingBox.width / 2.0; 28 | boundingBox.centerY = boundingBox.startY+ boundingBox.height / 2.0; 29 | return true; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /FaceDetector.h: -------------------------------------------------------------------------------- 1 | #ifndef FACEDETECTOR_H 2 | #define FACEDETECTOR_H 3 | 4 | #include 5 | 6 | class FaceDetector : public Detector 7 | { 8 | public: 9 | FaceDetector(); 10 | virtual ~FaceDetector(); 11 | bool detect(cv::Mat& grayImg, double scale, BoundingBox& boundingBox); 12 | }; 13 | 14 | #endif // FACEDETECTOR_H 15 | -------------------------------------------------------------------------------- /LivenessDetection3.pro: -------------------------------------------------------------------------------- 1 | #------------------------------------------------- 2 | # 3 | # Project created by QtCreator 2015-07-09T22:01:46 4 | # 5 | #------------------------------------------------- 6 | 7 | QT += core gui 8 | QT += multimedia 9 | CONFIG += c++11 10 | 11 | greaterThan(QT_MAJOR_VERSION, 4): QT += widgets 12 | 13 | TARGET = LivenessDetection3 14 | TEMPLATE = app 15 | 16 | 17 | SOURCES += main.cpp\ 18 | mainwindow.cpp \ 19 | WebcamCapture.cpp \ 20 | Controller.cpp \ 21 | Analyser.cpp \ 22 | BlinkAnalyser.cpp \ 23 | AnalyserFactory.cpp \ 24 | Detector.cpp \ 25 | EyeDetector.cpp \ 26 | FaceDetector.cpp \ 27 | BoundingBox.cpp \ 28 | Utils.cpp \ 29 | OpenMouthAnalyser.cpp \ 30 | ESR/Fern.cpp \ 31 | ESR/FernCascade.cpp \ 32 | ESR/ShapeRegressor.cpp \ 33 | FaceAligner.cpp \ 34 | YawAnalyser.cpp \ 35 | OpticalFlowCalculater.cpp 36 | 37 | HEADERS += mainwindow.h \ 38 | WebcamCapture.h \ 39 | Controller.h \ 40 | Analyser.h \ 41 | BlinkAnalyser.h \ 42 | AnalyserFactory.h \ 43 | Detector.h \ 44 | EyeDetector.h \ 45 | FaceDetector.h \ 46 | BoundingBox.h \ 47 | Utils.h \ 48 | OpenMouthAnalyser.h \ 49 | ESR.hpp \ 50 | FaceAligner.h \ 51 | YawAnalyser.h \ 52 | OpticalFlowCalculater.h 53 | 54 | FORMS += mainwindow.ui 55 | 56 | LIBS += /usr/local/lib/libopencv_core.so \ 57 | /usr/local/lib/libopencv_imgproc.so \ 58 | /usr/local/lib/libopencv_highgui.so \ 59 | /usr/local/lib/libopencv_objdetect.so \ 60 | /usr/local/lib/libopencv_video.so \ 61 | /usr/local/lib/libopencv_videoio.so \ 62 | /usr/local/lib/libopencv_imgcodecs.so \ 63 | -------------------------------------------------------------------------------- /LivenessDetection3.pro.user: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | EnvironmentId 7 | {5533173e-5918-4812-9f26-15a15dc7d903} 8 | 9 | 10 | ProjectExplorer.Project.ActiveTarget 11 | 0 12 | 13 | 14 | ProjectExplorer.Project.EditorSettings 15 | 16 | true 17 | false 18 | true 19 | 20 | Cpp 21 | 22 | CppGlobal 23 | 24 | 25 | 26 | QmlJS 27 | 28 | QmlJSGlobal 29 | 30 | 31 | 2 32 | UTF-8 33 | false 34 | 4 35 | false 36 | 80 37 | true 38 | true 39 | 1 40 | true 41 | false 42 | 0 43 | true 44 | 0 45 | 8 46 | true 47 | 1 48 | true 49 | true 50 | true 51 | false 52 | 53 | 54 | 55 | ProjectExplorer.Project.PluginSettings 56 | 57 | 58 | 59 | ProjectExplorer.Project.Target.0 60 | 61 | Desktop Qt 5.5.1 GCC 64bit 62 | Desktop Qt 5.5.1 GCC 64bit 63 | qt.55.gcc_64_kit 64 | 0 65 | 0 66 | 0 67 | 68 | /home/netbeen/build-LivenessDetection3-Desktop_Qt_5_5_1_GCC_64bit-Debug 69 | 70 | 71 | true 72 | qmake 73 | 74 | QtProjectManager.QMakeBuildStep 75 | true 76 | 77 | false 78 | false 79 | false 80 | 81 | 82 | true 83 | Make 84 | 85 | Qt4ProjectManager.MakeStep 86 | 87 | -w 88 | -r 89 | 90 | false 91 | 92 | 93 | 94 | 2 95 | 构建 96 | 97 | ProjectExplorer.BuildSteps.Build 98 | 99 | 100 | 101 | true 102 | Make 103 | 104 | Qt4ProjectManager.MakeStep 105 | 106 | -w 107 | -r 108 | 109 | true 110 | clean 111 | 112 | 113 | 1 114 | 清理 115 | 116 | ProjectExplorer.BuildSteps.Clean 117 | 118 | 2 119 | false 120 | 121 | Debug 122 | 123 | Qt4ProjectManager.Qt4BuildConfiguration 124 | 2 125 | true 126 | 127 | 128 | /home/netbeen/build-LivenessDetection3-Desktop_Qt_5_5_1_GCC_64bit-Release 129 | 130 | 131 | true 132 | qmake 133 | 134 | QtProjectManager.QMakeBuildStep 135 | false 136 | 137 | false 138 | false 139 | false 140 | 141 | 142 | true 143 | Make 144 | 145 | Qt4ProjectManager.MakeStep 146 | 147 | -w 148 | -r 149 | 150 | false 151 | 152 | 153 | 154 | 2 155 | 构建 156 | 157 | ProjectExplorer.BuildSteps.Build 158 | 159 | 160 | 161 | true 162 | Make 163 | 164 | Qt4ProjectManager.MakeStep 165 | 166 | -w 167 | -r 168 | 169 | true 170 | clean 171 | 172 | 173 | 1 174 | 清理 175 | 176 | ProjectExplorer.BuildSteps.Clean 177 | 178 | 2 179 | false 180 | 181 | Release 182 | 183 | Qt4ProjectManager.Qt4BuildConfiguration 184 | 0 185 | true 186 | 187 | 188 | /home/netbeen/build-LivenessDetection3-Desktop_Qt_5_5_1_GCC_64bit-Profile 189 | 190 | 191 | true 192 | qmake 193 | 194 | QtProjectManager.QMakeBuildStep 195 | true 196 | 197 | false 198 | true 199 | false 200 | 201 | 202 | true 203 | Make 204 | 205 | Qt4ProjectManager.MakeStep 206 | 207 | -w 208 | -r 209 | 210 | false 211 | 212 | 213 | 214 | 2 215 | 构建 216 | 217 | ProjectExplorer.BuildSteps.Build 218 | 219 | 220 | 221 | true 222 | Make 223 | 224 | Qt4ProjectManager.MakeStep 225 | 226 | -w 227 | -r 228 | 229 | true 230 | clean 231 | 232 | 233 | 1 234 | 清理 235 | 236 | ProjectExplorer.BuildSteps.Clean 237 | 238 | 2 239 | false 240 | 241 | Profile 242 | 243 | Qt4ProjectManager.Qt4BuildConfiguration 244 | 0 245 | true 246 | 247 | 3 248 | 249 | 250 | 0 251 | 部署 252 | 253 | ProjectExplorer.BuildSteps.Deploy 254 | 255 | 1 256 | 在本地部署 257 | 258 | ProjectExplorer.DefaultDeployConfiguration 259 | 260 | 1 261 | 262 | 263 | false 264 | 1000 265 | 266 | true 267 | 268 | false 269 | false 270 | false 271 | false 272 | true 273 | 0.01 274 | 10 275 | true 276 | 1 277 | 25 278 | 279 | 1 280 | true 281 | false 282 | true 283 | valgrind 284 | 285 | 0 286 | 1 287 | 2 288 | 3 289 | 4 290 | 5 291 | 6 292 | 7 293 | 8 294 | 9 295 | 10 296 | 11 297 | 12 298 | 13 299 | 14 300 | 301 | 2 302 | 303 | LivenessDetection3 304 | 305 | Qt4ProjectManager.Qt4RunConfiguration:/home/netbeen/LivenessDetection3/LivenessDetection3.pro 306 | true 307 | 308 | LivenessDetection3.pro 309 | false 310 | false 311 | 312 | 3768 313 | false 314 | true 315 | false 316 | false 317 | true 318 | 319 | 1 320 | 321 | 322 | 323 | ProjectExplorer.Project.TargetCount 324 | 1 325 | 326 | 327 | ProjectExplorer.Project.Updater.FileVersion 328 | 18 329 | 330 | 331 | Version 332 | 18 333 | 334 | 335 | -------------------------------------------------------------------------------- /LivenessDetection3.pro.user.c3631ef: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | EnvironmentId 7 | {c3631ef5-b297-43b8-85e3-308f847beab4} 8 | 9 | 10 | ProjectExplorer.Project.ActiveTarget 11 | 0 12 | 13 | 14 | ProjectExplorer.Project.EditorSettings 15 | 16 | true 17 | false 18 | true 19 | 20 | Cpp 21 | 22 | CppGlobal 23 | 24 | 25 | 26 | QmlJS 27 | 28 | QmlJSGlobal 29 | 30 | 31 | 2 32 | UTF-8 33 | false 34 | 4 35 | false 36 | 80 37 | true 38 | true 39 | 1 40 | true 41 | false 42 | 2 43 | true 44 | 0 45 | 8 46 | true 47 | 1 48 | true 49 | true 50 | true 51 | false 52 | 53 | 54 | 55 | ProjectExplorer.Project.PluginSettings 56 | 57 | 58 | 59 | ProjectExplorer.Project.Target.0 60 | 61 | Desktop Qt 5.4.2 GCC 64bit 62 | Desktop Qt 5.4.2 GCC 64bit 63 | qt.54.gcc_64_kit 64 | 1 65 | 0 66 | 0 67 | 68 | /home/netbeen/QtWorkspace/build-LivenessDetection3-Desktop_Qt_5_4_2_GCC_64bit-Debug 69 | 70 | 71 | true 72 | qmake 73 | 74 | QtProjectManager.QMakeBuildStep 75 | false 76 | true 77 | 78 | false 79 | false 80 | false 81 | 82 | 83 | true 84 | Make 85 | 86 | Qt4ProjectManager.MakeStep 87 | 88 | -w 89 | -r 90 | 91 | false 92 | 93 | 94 | 95 | 2 96 | 构建 97 | 98 | ProjectExplorer.BuildSteps.Build 99 | 100 | 101 | 102 | true 103 | Make 104 | 105 | Qt4ProjectManager.MakeStep 106 | 107 | -w 108 | -r 109 | 110 | true 111 | clean 112 | 113 | 114 | 1 115 | 清理 116 | 117 | ProjectExplorer.BuildSteps.Clean 118 | 119 | 2 120 | false 121 | 122 | Debug 123 | 124 | Qt4ProjectManager.Qt4BuildConfiguration 125 | 2 126 | true 127 | 128 | 129 | /home/netbeen/QtWorkspace/build-LivenessDetection3-Desktop_Qt_5_4_2_GCC_64bit-Release 130 | 131 | 132 | true 133 | qmake 134 | 135 | QtProjectManager.QMakeBuildStep 136 | false 137 | true 138 | 139 | false 140 | false 141 | false 142 | 143 | 144 | true 145 | Make 146 | 147 | Qt4ProjectManager.MakeStep 148 | 149 | -w 150 | -r 151 | 152 | false 153 | 154 | 155 | 156 | 2 157 | 构建 158 | 159 | ProjectExplorer.BuildSteps.Build 160 | 161 | 162 | 163 | true 164 | Make 165 | 166 | Qt4ProjectManager.MakeStep 167 | 168 | -w 169 | -r 170 | 171 | true 172 | clean 173 | 174 | 175 | 1 176 | 清理 177 | 178 | ProjectExplorer.BuildSteps.Clean 179 | 180 | 2 181 | false 182 | 183 | Release 184 | 185 | Qt4ProjectManager.Qt4BuildConfiguration 186 | 0 187 | true 188 | 189 | 2 190 | 191 | 192 | 0 193 | 部署 194 | 195 | ProjectExplorer.BuildSteps.Deploy 196 | 197 | 1 198 | 在本地部署 199 | 200 | ProjectExplorer.DefaultDeployConfiguration 201 | 202 | 1 203 | 204 | 205 | 206 | false 207 | false 208 | false 209 | false 210 | true 211 | 0.01 212 | 10 213 | true 214 | 1 215 | 25 216 | 217 | 1 218 | true 219 | false 220 | true 221 | valgrind 222 | 223 | 0 224 | 1 225 | 2 226 | 3 227 | 4 228 | 5 229 | 6 230 | 7 231 | 8 232 | 9 233 | 10 234 | 11 235 | 12 236 | 13 237 | 14 238 | 239 | 2 240 | 241 | LivenessDetection3 242 | 243 | Qt4ProjectManager.Qt4RunConfiguration:/home/netbeen/QtWorkspace/LivenessDetection3/LivenessDetection3.pro 244 | 245 | LivenessDetection3.pro 246 | false 247 | false 248 | 249 | 3768 250 | false 251 | true 252 | false 253 | false 254 | true 255 | 256 | 1 257 | 258 | 259 | 260 | ProjectExplorer.Project.TargetCount 261 | 1 262 | 263 | 264 | ProjectExplorer.Project.Updater.FileVersion 265 | 18 266 | 267 | 268 | Version 269 | 18 270 | 271 | 272 | -------------------------------------------------------------------------------- /OpenMouthAnalyser.cpp: -------------------------------------------------------------------------------- 1 | #include "OpenMouthAnalyser.h" 2 | #include 3 | #include 4 | 5 | //构造函数 6 | OpenMouthAnalyser::OpenMouthAnalyser():timeoutTimeMs(10000),isCurrentAlignmentValid(false),openMouthThreshold(2.5) 7 | { 8 | webcamCapture = WebcamCapture::getInstance(); 9 | faceDetector = new FaceDetector(); //faceDetector对象 10 | faceAligner = FaceAligner::getInstance(); 11 | QObject::connect(this,SIGNAL(doAlignment(cv::Mat,BoundingBox)),faceAligner,SLOT(doAlignment(cv::Mat,BoundingBox))); 12 | QObject::connect(faceAligner,SIGNAL(alignmentCompete(cv::Mat_)),this,SLOT(receiveNewAlignment(cv::Mat_))); 13 | } 14 | 15 | //开始函数,连接上webcam的信号至对应的槽 16 | void OpenMouthAnalyser::start(){ 17 | QSound::play("/home/netbeen/QtWorkspace/LivenessDetection3/请张嘴.wav"); 18 | timeoutTimer = new QTimer(); 19 | QObject::connect(timeoutTimer,SIGNAL(timeout()),this,SLOT(timeout())); //绑定计时器事件 20 | std::cout << "OpenMouthAnalyser at " << QThread::currentThreadId()<< std::endl; 21 | QObject::connect(webcamCapture,SIGNAL(newImageCaptured(cv::Mat)),this,SLOT(receiveNewFrame(cv::Mat))); 22 | timeoutTimer->start(timeoutTimeMs); 23 | 24 | } 25 | 26 | //收到新的摄像头图像的槽函数 27 | void OpenMouthAnalyser::receiveNewFrame(cv::Mat newFrame){ 28 | cv::cvtColor(newFrame,this->grayImage,cv::COLOR_BGR2GRAY); 29 | if (this->faceDetector->detect(this->grayImage,5, this->faceBoundingBox)) { //调用FaceDetector,如果检测到脸 30 | emit this->doAlignment(this->grayImage, this->faceBoundingBox); 31 | 32 | Utils::drawRect(this->grayImage,this->faceBoundingBox); 33 | if(this->isCurrentAlignmentValid == true){ 34 | Utils::drawPoint(this->grayImage,this->currentAlignment); 35 | if((this->currentAlignment(107,1) - this->currentAlignment(93,1)) > (this->currentAlignment(93,1) - this->currentAlignment(65,1)) * this->openMouthThreshold ){ 36 | this->success(); 37 | } 38 | } 39 | }else{ //如果没检测到脸 40 | 41 | } 42 | //cv::moveWindow("OpenMouthAnalyser",200,160); 43 | //cv::imshow("OpenMouthAnalyser", grayImage); 44 | } 45 | 46 | //计时器超时的槽函数 47 | void OpenMouthAnalyser::timeout(){ 48 | 49 | timeoutTimer->stop(); 50 | QObject::disconnect(webcamCapture,SIGNAL(newImageCaptured(cv::Mat)),this,SLOT(receiveNewFrame(cv::Mat))); //解绑接收摄像头事件 51 | //cv::destroyAllWindows(); 52 | std::cout << "OpenMouthAnalyser Time out!"<done(false); 54 | } 55 | 56 | //若检测张嘴成功的函数 57 | void OpenMouthAnalyser::success(){ 58 | 59 | timeoutTimer->stop(); 60 | QObject::disconnect(webcamCapture,SIGNAL(newImageCaptured(cv::Mat)),this,SLOT(receiveNewFrame(cv::Mat))); //解绑接收摄像头事件 61 | cv::destroyAllWindows(); 62 | std::cout << "OpenMouthAnalyser success!"<done(true); 64 | } 65 | 66 | //当收到新的对齐的槽函数 67 | void OpenMouthAnalyser::receiveNewAlignment(cv::Mat_ newAlignment){ 68 | this->currentAlignment = newAlignment; 69 | this->isCurrentAlignmentValid = true; 70 | } 71 | -------------------------------------------------------------------------------- /OpenMouthAnalyser.h: -------------------------------------------------------------------------------- 1 | #ifndef OpenMouthAnalyser_h 2 | #define OpenMouthAnalyser_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | class OpenMouthAnalyser : public Analyser 12 | { 13 | Q_OBJECT 14 | public: 15 | OpenMouthAnalyser(); 16 | 17 | private: 18 | WebcamCapture* webcamCapture; 19 | FaceDetector* faceDetector; 20 | cv::Mat grayImage; 21 | BoundingBox faceBoundingBox; 22 | QTimer* timeoutTimer; 23 | const int timeoutTimeMs; 24 | FaceAligner* faceAligner; 25 | void success(); 26 | cv::Mat_ currentAlignment; 27 | bool isCurrentAlignmentValid; 28 | const float openMouthThreshold; 29 | 30 | public slots: 31 | virtual void start(); 32 | void receiveNewFrame(cv::Mat newFrame); 33 | void receiveNewAlignment(cv::Mat_); 34 | 35 | private slots: 36 | void timeout(); 37 | 38 | signals: 39 | void done(bool result); 40 | void doAlignment(cv::Mat grayImage, BoundingBox boundingBox); 41 | }; 42 | 43 | #endif // OpenMouthAnalyser_H 44 | -------------------------------------------------------------------------------- /OpticalFlowCalculater.cpp: -------------------------------------------------------------------------------- 1 | #include "OpticalFlowCalculater.h" 2 | 3 | OpticalFlowCalculater* OpticalFlowCalculater::ptr2OpticalFlowCalculater = nullptr; 4 | 5 | OpticalFlowCalculater::OpticalFlowCalculater() 6 | { 7 | this->previousFrame.release(); 8 | 9 | } 10 | 11 | //单例模式的实例化函数 12 | OpticalFlowCalculater* OpticalFlowCalculater::getInstance(){ 13 | if(OpticalFlowCalculater::ptr2OpticalFlowCalculater == nullptr){ 14 | OpticalFlowCalculater::ptr2OpticalFlowCalculater = new OpticalFlowCalculater(); 15 | } 16 | return OpticalFlowCalculater::ptr2OpticalFlowCalculater; 17 | } 18 | 19 | //计算光流 20 | void OpticalFlowCalculater::doCalc(cv::Mat grayImg){ 21 | if(this->previousFrame.empty()){ 22 | this->previousFrame = grayImg; 23 | emit this->calcCompete(false, this->flow); 24 | }else{ 25 | calcOpticalFlowFarneback(this->previousFrame, grayImg, this->flow, 0.5, 3, 40, 3, 7, 1.5, 0); 26 | emit this->calcCompete(true, this->flow); 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /OpticalFlowCalculater.h: -------------------------------------------------------------------------------- 1 | #ifndef OPTICALFLOWCALCULATER_H 2 | #define OPTICALFLOWCALCULATER_H 3 | 4 | #include 5 | //#include 6 | #include 7 | #include 8 | 9 | class OpticalFlowCalculater : public QObject 10 | { 11 | Q_OBJECT 12 | public: 13 | static OpticalFlowCalculater* getInstance(); 14 | 15 | private: 16 | OpticalFlowCalculater(); 17 | static OpticalFlowCalculater* ptr2OpticalFlowCalculater; 18 | cv::Mat previousFrame; 19 | cv::Mat flow; 20 | 21 | public slots: 22 | void doCalc(cv::Mat grayImg); 23 | 24 | signals: 25 | void calcCompete(bool isVaild, cv::Mat flow); 26 | }; 27 | 28 | #endif // OPTICALFLOWCALCULATER_H 29 | -------------------------------------------------------------------------------- /Utils.cpp: -------------------------------------------------------------------------------- 1 | #include "Utils.h" 2 | 3 | using namespace std; 4 | using namespace cv; 5 | 6 | Utils::Utils() 7 | { 8 | 9 | } 10 | 11 | //在input图像上绘制一系列点 12 | void Utils::drawPoint(cv::Mat& input, cv::Mat_& shape){ 13 | for (int i = 0; i < shape.rows; i++) { 14 | circle(input, cv::Point2d(shape(i, 0), shape(i, 1)), 3, cv::Scalar(0, 255, 0), -1, 8, 0); 15 | } 16 | } 17 | 18 | //在input图像上绘制矩形 19 | void Utils::drawRect(cv::Mat& input, BoundingBox& boundingBox){ 20 | cv::rectangle(input, boundingBox.returnRect(), cv::Scalar(0, 255, 255), 3, 8, 0); 21 | return; 22 | } 23 | 24 | //在input上绘制有偏移的矩形 25 | void Utils::drawRect(cv::Mat &input, std::vector& eyesRects, int offsetX, int offsetY){ 26 | for(cv::Rect& rectElem : eyesRects){ 27 | rectElem.x +=offsetX; //加上人脸框X轴的偏移 28 | rectElem.y +=offsetY; //加上人脸框Y轴的偏移 29 | cv::rectangle(input, rectElem, cv::Scalar(0, 255, 255), 3, 8, 0); 30 | } 31 | } 32 | 33 | //随机化vector内的元素位置 34 | void Utils::randomizeVector(std::vector& inputVector){ 35 | const int swapCount = inputVector.size(); 36 | for(int i = 0; i < swapCount; ++i){ 37 | std::swap(inputVector.at(random()%inputVector.size()), inputVector.at(random()%inputVector.size())); 38 | } 39 | } 40 | 41 | 42 | 43 | //从所有的shape中得到mean shape 44 | Mat_ Utils::getMeanShape(const vector >& shapes, const vector& bounding_box) { 45 | cout << "Starting GetMeanShape..." < result = Mat::zeros(shapes[0].rows, 2, CV_64FC1); 47 | for (int i = 0; i < static_cast(shapes.size()); i++) { 48 | result = result + projectShape(shapes[i], bounding_box[i]); 49 | } 50 | result = 1.0 / shapes.size() * result; 51 | return result; 52 | } 53 | 54 | // 55 | Mat_ Utils::projectShape(const Mat_& shape, const BoundingBox& bounding_box) { 56 | Mat_ temp(shape.rows, 2); 57 | for (int j = 0; j < shape.rows; j++) { 58 | temp(j, 0) = (shape(j, 0) - bounding_box.centerX) / (bounding_box.width / 2.0); 59 | temp(j, 1) = (shape(j, 1) - bounding_box.centerY) / (bounding_box.height / 2.0); 60 | } 61 | return temp; 62 | } 63 | 64 | // 65 | Mat_ Utils::reProjectShape(const Mat_& shape, const BoundingBox& bounding_box) { 66 | Mat_ temp(shape.rows, 2); 67 | for (int j = 0; j < shape.rows; j++) { 68 | temp(j, 0) = (shape(j, 0) * bounding_box.width / 2.0 + bounding_box.centerX); 69 | temp(j, 1) = (shape(j, 1) * bounding_box.height / 2.0 + bounding_box.centerY); 70 | } 71 | return temp; 72 | } 73 | 74 | void Utils::SimilarityTransform(const Mat_& shape1, const Mat_& shape2, Mat_& rotation, double& scale) { 75 | rotation = Mat::zeros(2, 2, CV_64FC1); 76 | scale = 0; 77 | 78 | // center the data 79 | double center_x_1 = 0; 80 | double center_y_1 = 0; 81 | double center_x_2 = 0; 82 | double center_y_2 = 0; 83 | for (int i = 0; i < shape1.rows; i++) { 84 | center_x_1 += shape1(i, 0); 85 | center_y_1 += shape1(i, 1); 86 | center_x_2 += shape2(i, 0); 87 | center_y_2 += shape2(i, 1); 88 | } 89 | center_x_1 /= shape1.rows; 90 | center_y_1 /= shape1.rows; 91 | center_x_2 /= shape2.rows; 92 | center_y_2 /= shape2.rows; 93 | 94 | Mat_ temp1 = shape1.clone(); 95 | Mat_ temp2 = shape2.clone(); 96 | for (int i = 0; i < shape1.rows; i++) { 97 | temp1(i, 0) -= center_x_1; 98 | temp1(i, 1) -= center_y_1; 99 | temp2(i, 0) -= center_x_2; 100 | temp2(i, 1) -= center_y_2; 101 | } 102 | 103 | Mat_ covariance1, covariance2; //covariance = 协方差 104 | Mat_ mean1, mean2; 105 | // calculate covariance matrix 106 | cv::calcCovarMatrix(temp1, covariance1, mean1, CV_COVAR_COLS); //计算temp1的协方差 107 | cv::calcCovarMatrix(temp2, covariance2, mean2, CV_COVAR_COLS); 108 | 109 | double s1 = sqrt(norm(covariance1)); 110 | double s2 = sqrt(norm(covariance2)); 111 | scale = s1 / s2; 112 | temp1 = 1.0 / s1 * temp1; 113 | temp2 = 1.0 / s2 * temp2; 114 | 115 | double num = 0; 116 | double den = 0; 117 | for (int i = 0; i < shape1.rows; i++) { 118 | num = num + temp1(i, 1) * temp2(i, 0) - temp1(i, 0) * temp2(i, 1); 119 | den = den + temp1(i, 0) * temp2(i, 0) + temp1(i, 1) * temp2(i, 1); 120 | } 121 | 122 | double norm = sqrt(num * num + den * den); 123 | double sin_theta = num / norm; 124 | double cos_theta = den / norm; 125 | rotation(0, 0) = cos_theta; 126 | rotation(0, 1) = -sin_theta; 127 | rotation(1, 0) = sin_theta; 128 | rotation(1, 1) = cos_theta; 129 | } 130 | 131 | //计算协方差 132 | double Utils::calculate_covariance(const vector& v_1, const vector& v_2) { 133 | assert(v_1.size() == v_2.size()); 134 | assert(v_1.size() != 0); 135 | double sum_1 = 0; 136 | double sum_2 = 0; 137 | double exp_1 = 0; 138 | double exp_2 = 0; 139 | double exp_3 = 0; 140 | for (int i = 0; i < static_cast(v_1.size()); i++) { 141 | sum_1 += v_1[i]; 142 | sum_2 += v_2[i]; 143 | } 144 | exp_1 = sum_1 / v_1.size(); 145 | exp_2 = sum_2 / v_2.size(); 146 | for (int i = 0; i < static_cast(v_1.size()); i++) { 147 | exp_3 = exp_3 + (v_1[i] - exp_1) * (v_2[i] - exp_2); 148 | } 149 | return exp_3 / v_1.size(); 150 | } 151 | 152 | //计算标准差 153 | double Utils::calculateStandardDeviation(const vector& v) { 154 | assert(v.size() != 0); 155 | double sum = 0; 156 | double squareSum = 0; 157 | for(double elem : v){ 158 | sum += elem; 159 | squareSum += elem*elem; 160 | } 161 | double expection = sum / v.size(); 162 | double squareSumExpection = squareSum / v.size(); 163 | return sqrt(squareSumExpection-(expection*expection)); 164 | 165 | } 166 | 167 | //计算皮尔逊相关系数 168 | double Utils::calculatePearsonCorrelation(const vector& v_1, const vector& v_2) { 169 | assert(v_1.size() == v_2.size()); 170 | assert(v_1.size() != 0); 171 | double cov = Utils::calculate_covariance(v_1,v_2); 172 | double standardDeviation1 = Utils::calculateStandardDeviation(v_1); 173 | double standardDeviation2 = Utils::calculateStandardDeviation(v_2); 174 | return cov/(standardDeviation1*standardDeviation2); 175 | } 176 | -------------------------------------------------------------------------------- /Utils.h: -------------------------------------------------------------------------------- 1 | #ifndef UTILS_H 2 | #define UTILS_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | class Utils 9 | { 10 | public: 11 | static void drawRect(cv::Mat& input, BoundingBox& boundingBox); 12 | static void drawRect(cv::Mat &input, std::vector& eyesRects, int offsetX, int offsetY); 13 | static void drawPoint(cv::Mat& input, cv::Mat_& shape); 14 | static void randomizeVector(std::vector& inputVector); 15 | static double calculateStandardDeviation(const std::vector& v); 16 | static double calculatePearsonCorrelation(const std::vector& v_1, const std::vector& v_2); 17 | 18 | static cv::Mat_ getMeanShape(const std::vector >& shapes, const std::vector& bounding_box); 19 | static cv::Mat_ projectShape(const cv::Mat_& shape, const BoundingBox& bounding_box); 20 | static cv::Mat_ reProjectShape(const cv::Mat_& shape, const BoundingBox& bounding_box); 21 | static void SimilarityTransform(const cv::Mat_& shape1, const cv::Mat_& shape2, cv::Mat_& rotation, double& scale); 22 | static double calculate_covariance(const std::vector& v_1, const std::vector& v_2); 23 | 24 | private: 25 | Utils(); 26 | }; 27 | 28 | #endif // UTILS_H 29 | -------------------------------------------------------------------------------- /WebcamCapture.cpp: -------------------------------------------------------------------------------- 1 | #include "WebcamCapture.h" 2 | 3 | WebcamCapture* WebcamCapture::ptr2WebcamCapture = nullptr; 4 | 5 | //单例模式构造函数 6 | WebcamCapture::WebcamCapture() 7 | { 8 | if (cap.open(0)) { 9 | cap.set(cv::CAP_PROP_FRAME_WIDTH,800); 10 | cap.set(cv::CAP_PROP_FRAME_HEIGHT,600); 11 | cap.set(cv::CAP_PROP_FPS, 30); 12 | }else{ 13 | exit(2); 14 | } 15 | } 16 | 17 | //单例模式的实例化函数 18 | WebcamCapture* WebcamCapture::getInstance(){ 19 | if(WebcamCapture::ptr2WebcamCapture == nullptr){ 20 | WebcamCapture::ptr2WebcamCapture = new WebcamCapture(); 21 | } 22 | return WebcamCapture::ptr2WebcamCapture; 23 | } 24 | 25 | WebcamCapture::~WebcamCapture() 26 | { 27 | if(cap.isOpened()){ 28 | cap.release(); 29 | } 30 | } 31 | 32 | //开始进行图像捕获 33 | void WebcamCapture::start(){ 34 | forever{ 35 | cv::Mat rawImg; 36 | cap >> rawImg; 37 | //cv::cvtColor(rawImg,rawImg,cv::COLOR_BGR2RGB); //转换为RGB格式 38 | emit newImageCaptured(rawImg); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /WebcamCapture.h: -------------------------------------------------------------------------------- 1 | #ifndef WEBCAMCAPTURE_H 2 | #define WEBCAMCAPTURE_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | class WebcamCapture:public QObject 9 | { 10 | Q_OBJECT 11 | 12 | public: 13 | static WebcamCapture* getInstance(); 14 | 15 | public slots: 16 | void start(); 17 | 18 | signals: 19 | void newImageCaptured(cv::Mat newImage); 20 | 21 | private: 22 | static WebcamCapture* ptr2WebcamCapture; 23 | WebcamCapture(); 24 | ~WebcamCapture(); 25 | cv::VideoCapture cap; 26 | 27 | 28 | }; 29 | 30 | #endif // WEBCAMCAPTURE_H 31 | -------------------------------------------------------------------------------- /YawAnalyser.cpp: -------------------------------------------------------------------------------- 1 | #include "YawAnalyser.h" 2 | #include 3 | #include 4 | 5 | //构造函数 6 | YawAnalyser::YawAnalyser():totalProgressTimeMS(5000),isCurrentAlignmentValid(false),isOpticalFlowCalculaterBusy(false),sliderPhase(0),isProgressTimeout(false) 7 | { 8 | this->webcamCapture = WebcamCapture::getInstance(); 9 | this->faceDetector = new FaceDetector(); //faceDetector对象 10 | faceAligner = FaceAligner::getInstance(); 11 | QObject::connect(this,SIGNAL(doAlignment(cv::Mat,BoundingBox)),faceAligner,SLOT(doAlignment(cv::Mat,BoundingBox))); 12 | QObject::connect(faceAligner,SIGNAL(alignmentCompete(cv::Mat_)),this,SLOT(receiveNewAlignment(cv::Mat_))); 13 | 14 | this->opticalFlowCalculater = OpticalFlowCalculater::getInstance(); 15 | QObject::connect(this,SIGNAL(doCalcOpticalFlow(cv::Mat)),this->opticalFlowCalculater,SLOT(doCalc(cv::Mat))); 16 | QObject::connect(opticalFlowCalculater,SIGNAL(calcCompete(bool,cv::Mat)),this,SLOT(receiveNewOpticalFlow(bool,cv::Mat))); 17 | 18 | this->norm = cv::Mat(600,800, CV_32FC1);//声明光流模的矩阵 19 | this->phaseAngle = cv::Mat(600,800, CV_32FC1); 20 | this->zoneMap = cv::Mat(600,800,CV_8UC1); 21 | } 22 | 23 | //启动入口函数 24 | void YawAnalyser::start(){ 25 | QSound::play("/home/netbeen/QtWorkspace/LivenessDetection3/请跟随滑块摇头.wav"); 26 | std::cout << "YawAnalyser at " << QThread::currentThreadId()<< std::endl; 27 | QObject::connect(webcamCapture,SIGNAL(newImageCaptured(cv::Mat)),this,SLOT(receiveNewFrame(cv::Mat))); //绑定接收摄像头事件 28 | isProgressTimeout=false; 29 | this->progressTimer = new QTimer(); 30 | QObject::connect(this->progressTimer,SIGNAL(timeout()),this,SLOT(progressTimeout())); //绑定计时器事件 31 | this->updateSliderTimer = new QTimer(); 32 | QObject::connect(this->updateSliderTimer,SIGNAL(timeout()),this,SLOT(updateSliderTimeout())); 33 | this->progressTimer->start(totalProgressTimeMS); 34 | this->updateSliderTimer->start(totalProgressTimeMS/1000); 35 | } 36 | 37 | //定时器到时,允许程序结束,为了弥补程序耗时,让slider到50的时候自行结束。 38 | void YawAnalyser::progressTimeout(){ 39 | isProgressTimeout=true; 40 | } 41 | 42 | //slider更新计时器 43 | void YawAnalyser::updateSliderTimeout(){ 44 | float currentY = 50*(sin(this->sliderPhase/1000*4*3.1415926)+1); 45 | this->sliderPhase++; 46 | emit this->updateSlider(currentY); 47 | if(isProgressTimeout && currentY <= 50){ 48 | this->finish(); 49 | } 50 | } 51 | 52 | //摇头测试结束,开始计算结果 53 | void YawAnalyser::finish(){ 54 | this->progressTimer->stop(); 55 | this->updateSliderTimer->stop(); 56 | QObject::disconnect(webcamCapture,SIGNAL(newImageCaptured(cv::Mat)),this,SLOT(receiveNewFrame(cv::Mat))); //解绑接收摄像头事件 57 | QObject::disconnect(this,SIGNAL(doCalcOpticalFlow(cv::Mat)),this->opticalFlowCalculater,SLOT(doCalc(cv::Mat))); //解绑光流计算事件 58 | QObject::disconnect(opticalFlowCalculater,SIGNAL(calcCompete(bool,cv::Mat)),this,SLOT(receiveNewOpticalFlow(bool,cv::Mat))); //解绑接受光流事件 59 | //cv::destroyAllWindows(); 60 | std::cout << "YawAnalyser Time out!"<leftBackgroundNormalVector.size() <faceNormalVector,this->leftBackgroundNormalVector) <faceNormalVector,this->rightBackgroundNormalVector) <facePhaseVector,this->leftBackgroundPhaseVector) <facePhaseVector,this->rightBackgroundPhaseVector) <done(true); 70 | } 71 | 72 | //static int count = 0; 73 | 74 | //接收新图像的slot 75 | void YawAnalyser::receiveNewFrame(cv::Mat newFrame){ 76 | 77 | cv::cvtColor(newFrame,this->grayImage,cv::COLOR_BGR2GRAY); 78 | this->imageToDisplay = this->grayImage.clone(); 79 | if (this->faceDetector->detect(this->grayImage,5, this->faceBoundingBox)) { //调用FaceDetector,如果检测到脸 80 | 81 | emit this->doAlignment(this->grayImage, this->faceBoundingBox); //进行人脸对齐 82 | 83 | if(this->isCurrentAlignmentValid == true){ 84 | 85 | if(isOpticalFlowCalculaterBusy){ 86 | //光流计算器繁忙,跳过本轮 87 | }else{ 88 | isOpticalFlowCalculaterBusy = true; 89 | emit this->doCalcOpticalFlow(this->grayImage); //进行光流计算 90 | this->calculateZoneMap(); 91 | } 92 | 93 | Utils::drawPoint(this->imageToDisplay,this->currentAlignment); 94 | } 95 | Utils::drawRect(this->imageToDisplay,this->faceBoundingBox); //绘制人脸包围盒 96 | }else{ //如果没检测到脸 97 | 98 | } 99 | //std::cout << count++ << std::endl; 100 | //cv::moveWindow("Yaw",0,160); 101 | //cv::imshow("Yaw", imageToDisplay); 102 | } 103 | 104 | //接收新对齐的slot 105 | void YawAnalyser::receiveNewAlignment(cv::Mat_ newAlignment){ 106 | this->currentAlignment = newAlignment; 107 | this->isCurrentAlignmentValid = true; 108 | } 109 | 110 | //接收新光流的slot 111 | void YawAnalyser::receiveNewOpticalFlow(bool isOpticalFlowVaild, cv::Mat newOpticalFlow){ 112 | 113 | if(isOpticalFlowVaild){ 114 | 115 | //displayZoneMap(); //DEBUG:显示区域映射图 116 | 117 | this->currentOpticalFlow = newOpticalFlow; 118 | this->separateNromAndAngle(); 119 | this->recordIntoVectors(); 120 | 121 | /*cv::moveWindow("norm",600,160); 122 | cv::imshow("norm", this->norm); 123 | cv::moveWindow("phaseAngle",600,760); 124 | cv::imshow("phaseAngle", this->phaseAngle);*/ 125 | }else{ 126 | 127 | } 128 | this->isOpticalFlowCalculaterBusy = false; 129 | } 130 | 131 | //从光流中分离法向量与相位角 132 | void YawAnalyser::separateNromAndAngle(){ 133 | for(int rowIndex = 0; rowIndex < this->currentOpticalFlow.rows; ++rowIndex){ 134 | for(int columnIndex = 0; columnIndex < this->currentOpticalFlow.cols; ++columnIndex){ 135 | cv::Vec2f flow_at_point = this->currentOpticalFlow.at(rowIndex, columnIndex); 136 | float normValue = std::sqrt(std::pow(flow_at_point[0],2)+std::pow(flow_at_point[1],2)); 137 | this->norm.at(rowIndex, columnIndex)=normValue; 138 | float angleValue = atan2(flow_at_point[0],flow_at_point[1]); 139 | //uchar angleValueUchar = static_cast(((angleValue/3.1415926*180)/180*127)+127); //将float的-pi到pi的值映射到0-255的uchar来显示 140 | //this->phaseAngle.at(rowIndex, columnIndex)=angleValueUchar; 141 | this->phaseAngle.at(rowIndex, columnIndex)=angleValue; 142 | } 143 | } 144 | } 145 | 146 | //计算区域映射图: opencv坐标系中(屏幕上方为X轴,屏幕左侧为Y轴) 147 | void YawAnalyser::calculateZoneMap(){ 148 | for(int rowIndex = 0; rowIndex < this->grayImage.rows; ++rowIndex){ 149 | for(int columnIndex = 0; columnIndex < this->grayImage.cols; ++columnIndex){ 150 | if(columnIndex > this->currentAlignment.at(0,0) && columnIndex < this->currentAlignment.at(40,0) 151 | && rowIndex > this->faceBoundingBox.startY && rowIndex < this->faceBoundingBox.startY+this->faceBoundingBox.height 152 | && this->isSinglePointInFace(rowIndex,columnIndex)){ 153 | this->zoneMap.at(rowIndex,columnIndex)=zone::face; 154 | }else if(columnIndex > this->currentAlignment.at(0,0)-this->faceBoundingBox.width/2 && columnIndex < this->faceBoundingBox.startX && columnIndex < this->currentAlignment.at(0,0) && rowIndex < this->faceBoundingBox.startY+this->faceBoundingBox.height){ 155 | this->zoneMap.at(rowIndex,columnIndex)=zone::leftBackground; 156 | }else if(columnIndex < this->currentAlignment.at(40,0)+this->faceBoundingBox.width/2 && columnIndex > this->faceBoundingBox.startX+this->faceBoundingBox.width && columnIndex > this->currentAlignment.at(40,0) && rowIndex < this->faceBoundingBox.startY+this->faceBoundingBox.height){ 157 | this->zoneMap.at(rowIndex,columnIndex)=zone::rightBackground; 158 | } 159 | else{ 160 | this->zoneMap.at(rowIndex,columnIndex)=zone::noArea; 161 | } 162 | } 163 | } 164 | } 165 | 166 | 167 | //判断当前坐标是否在脸的区域内部 168 | bool YawAnalyser::isSinglePointInFace(int rowIndex, int columnIndex){ 169 | if(columnIndex <= this->currentAlignment.at(20,0)){ 170 | for(int keypointIndex = 0; keypointIndex <= 20; keypointIndex++){ 171 | if(rowIndex > this->currentAlignment.at(keypointIndex,1)){ 172 | continue; 173 | }else{ 174 | if(columnIndex < this->currentAlignment.at(keypointIndex,0)){ 175 | return false; 176 | }else{ 177 | return true; 178 | } 179 | } 180 | } 181 | return false; 182 | }else{ 183 | for(int keypointIndex = 40; keypointIndex >= 20; keypointIndex--){ 184 | if(rowIndex > this->currentAlignment.at(keypointIndex,1)){ 185 | continue; 186 | }else{ 187 | if(columnIndex > this->currentAlignment.at(keypointIndex,0)){ 188 | return false; 189 | }else{ 190 | return true; 191 | } 192 | } 193 | } 194 | return false; 195 | } 196 | } 197 | 198 | //DEBUG: 显示区域映射图 199 | void YawAnalyser::displayZoneMap(){ 200 | cv::Vec3b blue = {255,0,0}; 201 | cv::Vec3b green = {0,255,0}; 202 | cv::Vec3b green2 = {150,200,150}; 203 | cv::Vec3b red = {0,0,255}; 204 | 205 | cv::Mat zoneMapToDisplay = cv::Mat(600,800, CV_8UC3); 206 | for(int rowIndex = 0; rowIndex < zoneMapToDisplay.rows; ++rowIndex){ 207 | for(int columnIndex = 0; columnIndex < zoneMapToDisplay.cols; ++columnIndex){ 208 | if(this->zoneMap.at(rowIndex,columnIndex) == zone::face){ 209 | zoneMapToDisplay.at(rowIndex, columnIndex)=blue; 210 | }else if(this->zoneMap.at(rowIndex,columnIndex) == zone::leftBackground){ 211 | zoneMapToDisplay.at(rowIndex, columnIndex)=green; 212 | }else if(this->zoneMap.at(rowIndex,columnIndex) == zone::rightBackground){ 213 | zoneMapToDisplay.at(rowIndex, columnIndex)=green2; 214 | } 215 | else{ 216 | zoneMapToDisplay.at(rowIndex, columnIndex)=red; 217 | } 218 | } 219 | } 220 | cv::moveWindow("ZoneMap",1200,160); 221 | cv::imshow("ZoneMap",zoneMapToDisplay); 222 | } 223 | 224 | //记录当前数据 225 | void YawAnalyser::recordIntoVectors(){ 226 | float sumNormFace = 0; 227 | float sumNormLeftground = 0; 228 | float sumNormRightground = 0; 229 | 230 | float sumPhaseFace = 0; 231 | float sumPhaseLeftground = 0; 232 | float sumPhaseRightground = 0; 233 | 234 | int countFace = 0; 235 | int countLeftground = 0; 236 | int countRightground = 0; 237 | for(int rowIndex = 0; rowIndex < this->norm.rows; ++rowIndex){ 238 | for(int columnIndex = 0; columnIndex < this->norm.cols; ++columnIndex){ 239 | if(this->zoneMap.at(rowIndex,columnIndex) == zone::face){ 240 | countFace++; 241 | sumNormFace += norm.at(rowIndex, columnIndex); 242 | sumPhaseFace += phaseAngle.at(rowIndex,columnIndex); 243 | }else if(this->zoneMap.at(rowIndex,columnIndex) == zone::leftBackground){ 244 | countLeftground++; 245 | sumNormLeftground += norm.at(rowIndex, columnIndex); 246 | sumPhaseLeftground += phaseAngle.at(rowIndex,columnIndex); 247 | }else if(this->zoneMap.at(rowIndex,columnIndex) == zone::rightBackground){ 248 | countRightground++; 249 | sumNormRightground += norm.at(rowIndex, columnIndex); 250 | sumPhaseRightground += phaseAngle.at(rowIndex,columnIndex); 251 | } 252 | } 253 | } 254 | float expectionNormFace = sumNormFace/countFace; 255 | float expectionNormLeftground = sumNormLeftground/countLeftground; 256 | float expectionNormRightground = sumNormRightground/countRightground; 257 | 258 | this->faceNormalVector.push_back(expectionNormFace); 259 | this->leftBackgroundNormalVector.push_back(expectionNormLeftground); 260 | this->rightBackgroundNormalVector.push_back(expectionNormRightground); 261 | 262 | this->facePhaseVector.push_back(sumPhaseFace/countFace); 263 | this->leftBackgroundPhaseVector.push_back(sumPhaseLeftground/countLeftground); 264 | this->rightBackgroundPhaseVector.push_back(sumPhaseRightground/countRightground); 265 | 266 | } 267 | -------------------------------------------------------------------------------- /YawAnalyser.h: -------------------------------------------------------------------------------- 1 | #ifndef YAWANALYSER_H 2 | #define YAWANALYSER_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | class YawAnalyser : public Analyser 13 | { 14 | Q_OBJECT 15 | public: 16 | YawAnalyser(); 17 | 18 | private: 19 | const int totalProgressTimeMS; 20 | QTimer* progressTimer; 21 | QTimer* updateSliderTimer; 22 | WebcamCapture* webcamCapture; 23 | FaceDetector* faceDetector; 24 | FaceAligner* faceAligner; 25 | cv::Mat grayImage; 26 | cv::Mat imageToDisplay; 27 | BoundingBox faceBoundingBox; 28 | OpticalFlowCalculater* opticalFlowCalculater; 29 | bool isCurrentAlignmentValid; 30 | bool isOpticalFlowCalculaterBusy; 31 | cv::Mat_ zoneMap; 32 | double sliderPhase; 33 | bool isProgressTimeout; 34 | 35 | std::vector faceNormalVector; 36 | std::vector leftBackgroundNormalVector; 37 | std::vector rightBackgroundNormalVector; 38 | 39 | std::vector facePhaseVector; 40 | std::vector leftBackgroundPhaseVector; 41 | std::vector rightBackgroundPhaseVector; 42 | 43 | bool isSinglePointInFace(int rowIndex, int columnIndex); 44 | void recordIntoVectors(); 45 | cv::Mat_ currentAlignment; 46 | cv::Mat currentOpticalFlow; 47 | void separateNromAndAngle(); 48 | cv::Mat norm; 49 | cv::Mat phaseAngle; 50 | void calculateZoneMap(); 51 | void displayZoneMap(); 52 | 53 | enum zone{noArea,face, leftBackground,rightBackground}; 54 | 55 | public slots: 56 | virtual void start(); 57 | void receiveNewFrame(cv::Mat newFrame); 58 | void receiveNewAlignment(cv::Mat_); 59 | void receiveNewOpticalFlow(bool isOpticalFlowVaild, cv::Mat newOpticalFlow); 60 | 61 | private slots: 62 | void finish(); 63 | void updateSliderTimeout(); 64 | void progressTimeout(); 65 | 66 | signals: 67 | void done(bool result); 68 | void doAlignment(cv::Mat grayImage, BoundingBox boundingBox); 69 | void doCalcOpticalFlow(cv::Mat); 70 | 71 | }; 72 | 73 | #endif // YAWANALYSER_H 74 | -------------------------------------------------------------------------------- /main.cpp: -------------------------------------------------------------------------------- 1 | #include "mainwindow.h" 2 | #include 3 | #include 4 | 5 | int main(int argc, char *argv[]) 6 | { 7 | 8 | 9 | QApplication a(argc, argv); 10 | qRegisterMetaType< cv::Mat >("cv::Mat"); 11 | qRegisterMetaType("BoundingBox"); 12 | qRegisterMetaType>("cv::Mat_"); 13 | 14 | MainWindow w; 15 | w.show(); 16 | 17 | return a.exec(); 18 | } 19 | -------------------------------------------------------------------------------- /mainwindow.cpp: -------------------------------------------------------------------------------- 1 | #include "mainwindow.h" 2 | #include "ui_mainwindow.h" 3 | 4 | MainWindow::MainWindow(QWidget *parent) : 5 | QMainWindow(parent), 6 | ui(new Ui::MainWindow) 7 | { 8 | webcamCapture = WebcamCapture::getInstance(); 9 | QObject::connect(webcamCapture,SIGNAL(newImageCaptured(cv::Mat)),this,SLOT(updateImage(cv::Mat))); 10 | 11 | controller = Controller::getInstance(); 12 | QObject::connect(this,SIGNAL(startToRunButtonClicked()),controller,SLOT(start())); 13 | QObject::connect(controller,SIGNAL(updateSlider(int)),this,SLOT(receiveSliderPercentage(int))); //从controller传来的滑块percentage 14 | 15 | ui->setupUi(this); 16 | } 17 | 18 | MainWindow::~MainWindow() 19 | { 20 | delete ui; 21 | } 22 | 23 | void MainWindow::updateImage(cv::Mat newImage){ 24 | cv::cvtColor(newImage,newImage,cv::COLOR_BGR2RGB); //修正BGR显示模式为RGB 25 | QImage displayImage = QImage( static_cast(newImage.data), newImage.cols, newImage.rows, QImage::Format_RGB888 ); 26 | ui->imageLabel->setPixmap( QPixmap::fromImage(displayImage) ); 27 | } 28 | 29 | void MainWindow::on_startToRunButton_clicked() 30 | { 31 | emit this->startToRunButtonClicked(); 32 | } 33 | 34 | void MainWindow::receiveSliderPercentage(int percentage){ 35 | this->ui->horizontalSlider->setValue(percentage); 36 | } 37 | -------------------------------------------------------------------------------- /mainwindow.h: -------------------------------------------------------------------------------- 1 | #ifndef MAINWINDOW_H 2 | #define MAINWINDOW_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | namespace Ui { 10 | class MainWindow; 11 | } 12 | 13 | class MainWindow : public QMainWindow 14 | { 15 | Q_OBJECT 16 | 17 | public: 18 | explicit MainWindow(QWidget *parent = 0); 19 | ~MainWindow(); 20 | 21 | private: 22 | Ui::MainWindow *ui; 23 | WebcamCapture* webcamCapture; 24 | Controller* controller; 25 | 26 | signals: 27 | void startToRunButtonClicked(); 28 | 29 | public slots: 30 | void updateImage(cv::Mat newImage); 31 | void receiveSliderPercentage(int percentage); 32 | 33 | private slots: 34 | void on_startToRunButton_clicked(); 35 | }; 36 | 37 | #endif // MAINWINDOW_H 38 | -------------------------------------------------------------------------------- /mainwindow.ui: -------------------------------------------------------------------------------- 1 | 2 | 3 | MainWindow 4 | 5 | 6 | 7 | 0 8 | 0 9 | 1160 10 | 794 11 | 12 | 13 | 14 | ArrowCursor 15 | 16 | 17 | LivenessDetection2 18 | 19 | 20 | 21 | 22 | 23 | 190 24 | 120 25 | 800 26 | 600 27 | 28 | 29 | 30 | QLabel { 31 | border-width:1px; 32 | border-style:solid; 33 | border-color:#666666; 34 | } 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | false 43 | 44 | 45 | 46 | 10 47 | 0 48 | 1141 49 | 81 50 | 51 | 52 | 53 | QSlider::groove:horizontal { 54 | border: 1px solid #999999; 55 | height: 80px; /* the groove expands to the size of the slider by default. by giving it a height, it has a fixed size */ 56 | background: qlineargradient(x1:0, y1:0, x2:0, y2:1, stop:0 #B1B1B1, stop:1 #c4c4c4); 57 | margin: 2px 0; 58 | } 59 | 60 | QSlider::handle:horizontal { 61 | background: qlineargradient(x1:0, y1:0, x2:1, y2:1, stop:0 #b4b4b4, stop:1 #8f8f8f); 62 | border: 5px solid #5c5c5c; 63 | width: 40px; 64 | margin: -2px 0; /* handle is placed by default on the contents rect of the groove. Expand outside the groove */ 65 | border-radius: 3px; 66 | } 67 | 68 | 69 | 49 70 | 71 | 72 | Qt::Horizontal 73 | 74 | 75 | 76 | 77 | 78 | 180 79 | 90 80 | 820 81 | 651 82 | 83 | 84 | 85 | QGroupBox { 86 | border-width:1px; 87 | border-style:solid; 88 | border-color:#666666; 89 | } 90 | 91 | 92 | Video Capture 93 | 94 | 95 | Qt::AlignCenter 96 | 97 | 98 | false 99 | 100 | 101 | 102 | 103 | 104 | 1020 105 | 640 106 | 111 107 | 101 108 | 109 | 110 | 111 | Start 112 | 113 | 114 | 115 | 116 | 117 | 1120 118 | 100 119 | 30 120 | 20 121 | 122 | 123 | 124 | 0 125 | 126 | 127 | 128 | 129 | 130 | 1050 131 | 100 132 | 61 133 | 20 134 | 135 | 136 | 137 | Main FR: 138 | 139 | 140 | 141 | 142 | 143 | 1010 144 | 131 145 | 101 146 | 20 147 | 148 | 149 | 150 | Alignment FR: 151 | 152 | 153 | 154 | 155 | 156 | 1120 157 | 131 158 | 30 159 | 20 160 | 161 | 162 | 163 | 0 164 | 165 | 166 | 167 | 168 | 169 | 1050 170 | 161 171 | 60 172 | 20 173 | 174 | 175 | 176 | Flow FR: 177 | 178 | 179 | 180 | 181 | 182 | 1120 183 | 160 184 | 30 185 | 20 186 | 187 | 188 | 189 | 0 190 | 191 | 192 | 193 | 194 | 195 | 9 196 | 89 197 | 151 198 | 651 199 | 200 | 201 | 202 | QGroupBox { 203 | border-width:1px; 204 | border-style:solid; 205 | border-color:#666666; 206 | } 207 | 208 | 209 | Parament Setting 210 | 211 | 212 | label 213 | label_2 214 | faceAlignmentFrameRate 215 | label_4 216 | opticalFlowFrameRate 217 | groupBox 218 | imageLabel 219 | horizontalSlider 220 | startToRunButton 221 | mainFrameRateLabel 222 | groupBox_2 223 | 224 | 225 | 226 | 227 | 0 228 | 0 229 | 1160 230 | 25 231 | 232 | 233 | 234 | 235 | Tools 236 | 237 | 238 | 239 | 240 | 241 | Help 242 | 243 | 244 | 245 | 246 | 247 | 248 | 249 | 250 | 251 | 参数设置... 252 | 253 | 254 | 255 | 256 | 关于... 257 | 258 | 259 | 260 | 261 | 262 | 263 | 264 | -------------------------------------------------------------------------------- /请张嘴.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/netbeen/LivenessDetection3/ece2b5054e95b4bb69db0143fcb1d9b5eec572c1/请张嘴.wav -------------------------------------------------------------------------------- /请眨眼.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/netbeen/LivenessDetection3/ece2b5054e95b4bb69db0143fcb1d9b5eec572c1/请眨眼.wav -------------------------------------------------------------------------------- /请跟随滑块摇头.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/netbeen/LivenessDetection3/ece2b5054e95b4bb69db0143fcb1d9b5eec572c1/请跟随滑块摇头.wav --------------------------------------------------------------------------------