1. 程式人生 > >opencv+dlib人臉關鍵點檢測cpp版

opencv+dlib人臉關鍵點檢測cpp版

#include <opencv2/opencv.hpp>

#include <dlib/opencv.h>
#include <dlib/image_processing/frontal_face_detector.h>
#include <dlib/image_processing.h>

#include <iostream>
#include <string>
#include <vector>
#include <time.h>

// ---------------------------------------------------------------------------
int main(int argc, char** argv)
{
    try
    {
        cv::VideoCapture cvCap(0);

        if (!cvCap.isOpened())
        {
            std::cout << "Can not open the camera." << std::endl;
            return 1;
        }

        //cvCap.set(CV_CAP_PROP_FRAME_WIDTH, 640);
        //cvCap.set(CV_CAP_PROP_FRAME_HEIGHT, 480);

        // load face detection and pose estimation models.
        dlib::frontal_face_detector dlibSvmFaceDetector;
        dlib::shape_predictor dlibSpFaceLandmark;

        std::vector<dlib::rectangle> dlibRectsFaces;
        std::vector<dlib::full_object_detection> dlibDetsShapes;

        cv::Mat cvImgFrame;
        cv::Mat cvImgFrameGray;

        clock_t clkBegin;
        clock_t clkEnd;

        dlibSvmFaceDetector = dlib::get_frontal_face_detector();
        dlib::deserialize("./data/shape_predictor_68_face_landmarks.dat") >>
            dlibSpFaceLandmark;

        // Grab and process frames until the main window is closed by the user.
        while (true)
        {
            clkBegin = clock();

            // Grab a frame  640,480;

            if (!cvCap.read(cvImgFrame))
            {
                break;
            }

            cv::flip(cvImgFrame, cvImgFrame, 1);
            cv::cvtColor(cvImgFrame, cvImgFrameGray, cv::COLOR_BGR2GRAY);
            dlib::cv_image<unsigned char> dlibImgFrameGray(cvImgFrameGray);

            // Detect faces
            dlibRectsFaces = dlibSvmFaceDetector(dlibImgFrameGray);
            dlibDetsShapes.clear();

            // Find the landmarks of each face.
            for (unsigned int idxFace = 0; idxFace < dlibRectsFaces.size();
                idxFace++)
            {
                dlibDetsShapes.push_back(dlibSpFaceLandmark(dlibImgFrameGray,
                    dlibRectsFaces[idxFace]));

                cv::rectangle(cvImgFrame, cvRect(
                    dlibRectsFaces[idxFace].left(),
                    dlibRectsFaces[idxFace].top(),
                    dlibRectsFaces[idxFace].width(),
                    dlibRectsFaces[idxFace].height()),
                    cv::Scalar(0, 255, 0), 1);

                for (int idxLandmark = 0;
                    idxLandmark < dlibSpFaceLandmark.num_parts();
                    idxLandmark++)
                {
                    cv::circle(cvImgFrame, cvPoint(
                        dlibDetsShapes[idxFace].part(idxLandmark).x(),
                        dlibDetsShapes[idxFace].part(idxLandmark).y()),
                        1, cv::Scalar(0, 0, 255), -1);
                }

            }
            clkEnd = clock();
            std::cout << "Running time: " << (double)(clkEnd - clkBegin) <<
                "ms" << std::endl;

            // Display it all on the screen
            cv::imshow("webcam", cvImgFrame);

            if ('q' == (char)(0xFF & cv::waitKey(30)))
            {
                cv::destroyAllWindows();
                cvCap.release();
                break;
            }
        }
    }
    catch (dlib::serialization_error& e)
    {
        std::cout << "You need dlib's default face landmarking model file " <<
            "to run this example." << std::endl;
        std::cout << std::endl << e.what() << std::endl;
    }
    catch (std::exception& e)
    {
        std::cout << "\nexception thrown!" << std::endl;
        std::cout << e.what() << std::endl;
    }

}