dlib+opencv3.2 face detection,face landmark detection,face pose estimation人脸检测,人脸关键点检测(68点),人脸位姿估计

匿名 (未验证) 提交于 2019-12-03 00:22:01
#include <dirent.h> #include <string.h>  #include <vector> #include <string> #include <iostream> #include <chrono>  #include <dlib/opencv.h> #include <opencv2/highgui/highgui.hpp> #include <opencv2/calib3d/calib3d.hpp> #include <opencv2/imgproc/imgproc.hpp> #include <dlib/image_processing/frontal_face_detector.h> #include <dlib/image_processing/render_face_detections.h> #include <dlib/image_processing.h>   using namespace std;  //Intrisics can be calculated using opencv sample code under opencv/sources/samples/cpp/tutorial_code/calib3d //Normally, you can also apprximate fx and fy by image width, cx by half image width, cy by half image height instead //没有相机参数的话,可以使用宽度估计焦距,cxcy分别为宽度和高度的一半 double K[9] = { 12.80000e+002, 0.0, 6.40000000e+002, 0.0, 12.800000e+002, 3.60000000e+002, 0.0, 0.0, 1.0 }; double D[5] = { 7.0834633684407095e-002, 6.9140193737175351e-002, 0.0, 0.0, -1.3073460323689292e+000 }; //读图片 vector<string> imageSequence; void LoadImages(const std::string& strPathToSequence, std::vector< std::string >& strImage) {     std::string absolutePath = strPathToSequence;     DIR* dir = opendir(absolutePath.c_str());     struct dirent* ptr;     while ((ptr = readdir(dir)) != NULL) {         //if(ptr->d_name != "." && ptr->d_name != "..")         if (strcmp(ptr->d_name,".") != 0 && strcmp(ptr->d_name,"..") != 0) {             std::string subDirect = strPathToSequence;             if(ptr->d_type == 8) {                 //cout<<ptr->d_name<<endl;                 std::string loadPath = subDirect + "/";                 loadPath += ptr->d_name;                 //              std::cout<<loadPath<<std::endl;                 strImage.push_back(loadPath);              }         }     } } //argv1是.dat文件所在的路径,可以在[http://dlib.net/files/](http://dlib.net/files/)上下载 shape_predictor_68_face_landmarks.dat.bz2文件并使用bzip2 -d解压。 //argv2是图片所在的文件目录 int main(int argc, char** argv) {     dlib::frontal_face_detector detector = dlib::get_frontal_face_detector();     dlib::shape_predictor predictor;     dlib::deserialize(argv[1]) >> predictor;      //fill in cam intrinsics and distortion coefficients     cv::Mat cam_matrix = cv::Mat(3, 3, CV_64FC1, K);     cv::Mat dist_coeffs = cv::Mat(5, 1, CV_64FC1, D);      //fill in 3D ref points(world coordinates), model referenced from http://aifi.isr.uc.pt/Downloads/OpenGL/glAnthropometric3DModel.cpp     std::vector<cv::Point3d> object_pts;     object_pts.push_back(cv::Point3d(6.825897, 6.760612, 4.402142));     //#33 left brow left corner     object_pts.push_back(cv::Point3d(1.330353, 7.122144, 6.903745));     //#29 left brow right corner     object_pts.push_back(cv::Point3d(-1.330353, 7.122144, 6.903745));    //#34 right brow left corner     object_pts.push_back(cv::Point3d(-6.825897, 6.760612, 4.402142));    //#38 right brow right corner     object_pts.push_back(cv::Point3d(5.311432, 5.485328, 3.987654));     //#13 left eye left corner     object_pts.push_back(cv::Point3d(1.789930, 5.393625, 4.413414));     //#17 left eye right corner     object_pts.push_back(cv::Point3d(-1.789930, 5.393625, 4.413414));    //#25 right eye left corner     object_pts.push_back(cv::Point3d(-5.311432, 5.485328, 3.987654));    //#21 right eye right corner     object_pts.push_back(cv::Point3d(2.005628, 1.409845, 6.165652));     //#55 nose left corner     object_pts.push_back(cv::Point3d(-2.005628, 1.409845, 6.165652));    //#49 nose right corner     object_pts.push_back(cv::Point3d(2.774015, -2.080775, 5.048531));    //#43 mouth left corner     object_pts.push_back(cv::Point3d(-2.774015, -2.080775, 5.048531));   //#39 mouth right corner     object_pts.push_back(cv::Point3d(0.000000, -3.116408, 6.097667));    //#45 mouth central bottom corner     object_pts.push_back(cv::Point3d(0.000000, -7.415691, 4.070434));    //#6 chin corner      //2D ref points(image coordinates), referenced from detected facial feature     std::vector<cv::Point2d> image_pts;      //result     cv::Mat rotation_vec;                           //3 x 1     cv::Mat rotation_mat;                           //3 x 3 R     cv::Mat translation_vec;                        //3 x 1 T     cv::Mat pose_mat = cv::Mat(3, 4, CV_64FC1);     //3 x 4 R | T     cv::Mat euler_angle = cv::Mat(3, 1, CV_64FC1);      //reproject 3D points world coordinate axis to verify result pose     std::vector<cv::Point3d> reprojectsrc;     reprojectsrc.push_back(cv::Point3d(10.0, 10.0, 10.0));     reprojectsrc.push_back(cv::Point3d(10.0, 10.0, -10.0));     reprojectsrc.push_back(cv::Point3d(10.0, -10.0, -10.0));     reprojectsrc.push_back(cv::Point3d(10.0, -10.0, 10.0));     reprojectsrc.push_back(cv::Point3d(-10.0, 10.0, 10.0));     reprojectsrc.push_back(cv::Point3d(-10.0, 10.0, -10.0));     reprojectsrc.push_back(cv::Point3d(-10.0, -10.0, -10.0));     reprojectsrc.push_back(cv::Point3d(-10.0, -10.0, 10.0));      //reprojected 2D points     std::vector<cv::Point2d> reprojectdst;     reprojectdst.resize(8);      //temp buf for decomposeProjectionMatrix()     cv::Mat out_intrinsics = cv::Mat(3, 3, CV_64FC1);     cv::Mat out_rotation = cv::Mat(3, 3, CV_64FC1);     cv::Mat out_translation = cv::Mat(3, 1, CV_64FC1);      //text on screen     std::ostringstream outtext;       LoadImages(argv[2],imageSequence);     //main loop     int order = 0;     for(auto& file : imageSequence)     {          // Grab a frame         cv::Mat ori,temp;         //  cap >> temp;         //使用灰度图,并缩小图片大小,这里原图是1280*720,只使用右边一半,然后再resize宽高各一半,可以显著提高运行速度。         ori = cv::imread(file,0);//grayscale         ori(cv::Rect(640,0,640,720)).copyTo(temp);         resize(temp, cv::Size(320,360));          if(temp.rows <= 0 || temp.cols <= 0)             break;          order += 1;         const auto loadframeTimeend = std::chrono::high_resolution_clock::now();         //参见dlib图片格式说明,rgb图,bgr_pixel是其中一种么,灰度图使用unsigned char         // dlib::cv_image<dlib::bgr_pixel> cimg(temp);         dlib::cv_image<unsigned char> cimg(temp);          // Detect faces         std::vector<dlib::rectangle> faces = detector(cimg);         const auto detectTimeend = std::chrono::high_resolution_clock::now();         const auto detectTimeSec = (double)std::chrono::duration_cast<std::chrono::nanoseconds>(detectTimeend - loadframeTimeend).count()* 1e-9;         cout<< "detect face time: " <<std::to_string(detectTimeSec) << " seconds."<<endl;         // Find the pose of each face         if (faces.size() > 0)             {             //track features             dlib::full_object_detection shape = predictor(cimg, faces[0]);              //draw features             for (unsigned int i = 0; i < 68; ++i)                 {                 cv::circle(temp, cv::Point(shape.part(i).x(), shape.part(i).y()), 2, cv::Scalar(0, 0, 255), -1);                 }              //fill in 2D ref points, annotations follow https://ibug.doc.ic.ac.uk/resources/300-W/             image_pts.push_back(cv::Point2d(shape.part(17).x(), shape.part(17).y())); //#17 left brow left corner             image_pts.push_back(cv::Point2d(shape.part(21).x(), shape.part(21).y())); //#21 left brow right corner             image_pts.push_back(cv::Point2d(shape.part(22).x(), shape.part(22).y())); //#22 right brow left corner             image_pts.push_back(cv::Point2d(shape.part(26).x(), shape.part(26).y())); //#26 right brow right corner             image_pts.push_back(cv::Point2d(shape.part(36).x(), shape.part(36).y())); //#36 left eye left corner             image_pts.push_back(cv::Point2d(shape.part(39).x(), shape.part(39).y())); //#39 left eye right corner             image_pts.push_back(cv::Point2d(shape.part(42).x(), shape.part(42).y())); //#42 right eye left corner             image_pts.push_back(cv::Point2d(shape.part(45).x(), shape.part(45).y())); //#45 right eye right corner             image_pts.push_back(cv::Point2d(shape.part(31).x(), shape.part(31).y())); //#31 nose left corner             image_pts.push_back(cv::Point2d(shape.part(35).x(), shape.part(35).y())); //#35 nose right corner             image_pts.push_back(cv::Point2d(shape.part(48).x(), shape.part(48).y())); //#48 mouth left corner             image_pts.push_back(cv::Point2d(shape.part(54).x(), shape.part(54).y())); //#54 mouth right corner             image_pts.push_back(cv::Point2d(shape.part(57).x(), shape.part(57).y())); //#57 mouth central bottom corner             image_pts.push_back(cv::Point2d(shape.part(8).x(), shape.part(8).y()));   //#8 chin corner               //calc pose             cv::solvePnP(object_pts, image_pts, cam_matrix, dist_coeffs, rotation_vec, translation_vec);              //reproject             cv::projectPoints(reprojectsrc, rotation_vec, translation_vec, cam_matrix, dist_coeffs, reprojectdst);              //draw axis             cv::line(temp, reprojectdst[0], reprojectdst[1], cv::Scalar(0, 0, 255));             cv::line(temp, reprojectdst[1], reprojectdst[2], cv::Scalar(0, 0, 255));             cv::line(temp, reprojectdst[2], reprojectdst[3], cv::Scalar(0, 0, 255));             cv::line(temp, reprojectdst[3], reprojectdst[0], cv::Scalar(0, 0, 255));             cv::line(temp, reprojectdst[4], reprojectdst[5], cv::Scalar(0, 0, 255));             cv::line(temp, reprojectdst[5], reprojectdst[6], cv::Scalar(0, 0, 255));             cv::line(temp, reprojectdst[6], reprojectdst[7], cv::Scalar(0, 0, 255));             cv::line(temp, reprojectdst[7], reprojectdst[4], cv::Scalar(0, 0, 255));             cv::line(temp, reprojectdst[0], reprojectdst[4], cv::Scalar(0, 0, 255));             cv::line(temp, reprojectdst[1], reprojectdst[5], cv::Scalar(0, 0, 255));             cv::line(temp, reprojectdst[2], reprojectdst[6], cv::Scalar(0, 0, 255));             cv::line(temp, reprojectdst[3], reprojectdst[7], cv::Scalar(0, 0,         
标签
易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!