SeetaFace2代码阅读

时间:2020-05-20
本文章向大家介绍SeetaFace2代码阅读,主要包括SeetaFace2代码阅读使用实例、应用技巧、基本知识点总结和需要注意事项,具有一定的参考价值,需要的朋友可以参考一下。

SeetaFace2代码阅读

一、Face Alignment

人脸对齐做仿射变换:

  1. bool face_crop_core_ex( 
  2. const uint8_t *image_data, int image_width, int image_height, int image_channels, 
  3. uint8_t *crop_data, int crop_width, int crop_height, 
  4. const float *points, int points_num, 
  5. const float *mean_shape, int mean_shape_width, int mean_shape_height, 
  6. int pad_top, int pad_bottom, int pad_left, int pad_right, 
  7. float *final_points, 
  8. SAMPLING_TYPE type, 
  9. PADDING_TYPE ptype ) 
  10. { 
  11. //std::unique_ptr<double[]> transformation(new double[TFORM_SIZE]); 
  12. double transformation[TFORM_SIZE]; 
  13. bool check1 = transformation_maker( 
  14. crop_width, crop_height, 
  15. points, points_num, mean_shape, mean_shape_width, mean_shape_height, 
  16. transformation ); 
  17. if( !check1 ) return false; 
  18. bool check2 = spatial_transform( image_data, image_width, image_height, image_channels, 
  19. crop_data, crop_width, crop_height, 
  20. transformation, 
  21. pad_top, pad_bottom, pad_left, pad_right, 
  22. type, 
  23. ptype ); 
  24. if( !check2 ) return false; 
  25. bool check3 = true; 
  26. if( final_points ) 
  27. { 
  28. check3 = caculate_final_points( points, points_num, 
  29. transformation, 
  30. pad_top, pad_left, final_points ); 
  31. } 
  32. if( !check3 ) return false; 
  33. return true; 
  34. } 

二、Face Tracker

FaceTracker::track函数中调用了FaceTracker::Implement::DetectV2函数得到所有跟踪的人脸:

  1. SeetaTrackingFaceInfoArray FaceTracker::Implement::DetectV2(const SeetaImageData &image, int frame_no) const { 
  2. auto &faces = Detect(image, frame_no); 
  3. tracked_faces.clear(); 
  4. for (auto &face : faces) { 
  5. SeetaTrackingFaceInfo info; 
  6. info.PID = face.PID; 
  7. info.score = face.conf; 
  8. info.frame_no = face.frame_no; 
  9. info.pos = face.pos; 
  10.  
  11. tracked_faces.push_back(info); 
  12. } 
  13. SeetaTrackingFaceInfoArray result = {nullptr, 0}; 
  14. result.data = tracked_faces.data(); 
  15. result.size = tracked_faces.size(); 
  16.  
  17. return result; 
  18. } 

DetectV2函数调用Detect函数,使用pFD人脸检测器进行人脸检测,检测出来的人脸分别与之间保存的人脸计算IOU,然后对IOU进行排序降序排序,设置iou thresh为0.3,大于此值表示匹配上,否则匹配失败,把它当作一个新的;如果大于0.5,则表明重合度很高,取两帧的平均坐标做为新坐标。

  1. const std::vector<TrackedFace> &FaceTracker::Implement::Detect(const SeetaImageData &image, int frame_no) const { 
  2. if (!this->pFD) { 
  3. pre_tracked_faces.clear(); 
  4. return pre_tracked_faces; 
  5. } 
  6. if (frame_no < 0) { 
  7. frame_no = this->frame_no; 
  8. ++this->frame_no; 
  9. } 
  10.  
  11. int num = 0; 
  12. auto face_array = this->pFD->detect(image); 
  13. std::vector<SeetaRect> faces; 
  14. num = int(face_array.size); 
  15. for (int i = 0; i < num; ++i) { 
  16. faces.push_back(face_array.data[i].pos); 
  17. } 
  18.  
  19. // prepare scored trakced faces 
  20. std::deque<ScoredTrackedFace> scored_tracked_faces(pre_tracked_faces.begin(), pre_tracked_faces.end()); 
  21. std::vector<TrackedFace> now_trakced_faces; 
  22.  
  23. for (int i = 0; i < num; ++i) { 
  24. auto &face = faces[i]; 
  25. for (auto &scored_tracked_face : scored_tracked_faces) { 
  26. scored_tracked_face.iou_score = IoU(scored_tracked_face.face.pos, face); 
  27. std::cout << scored_tracked_face.iou_score << std::endl; 
  28. } 
  29. if (scored_tracked_faces.size() > 1) { 
  30. std::partial_sort(scored_tracked_faces.begin(), scored_tracked_faces.begin() + 1, 
  31. scored_tracked_faces.end(), 
  32. [](const ScoredTrackedFace &a, const ScoredTrackedFace &b) { 
  33. return a.iou_score > b.iou_score; 
  34. }); 
  35. } 
  36. if (!scored_tracked_faces.empty() && scored_tracked_faces.front().iou_score > this->min_score) { 
  37. ScoredTrackedFace matched_face = scored_tracked_faces.front(); 
  38. scored_tracked_faces.pop_front(); 
  39. TrackedFace &tracked_face = matched_face.face; 
  40. if (matched_face.iou_score < max_score) { 
  41. tracked_face.pos.x = (tracked_face.pos.x + face.x) / 2; 
  42. tracked_face.pos.y = (tracked_face.pos.y + face.y) / 2; 
  43. tracked_face.pos.width = (tracked_face.pos.width + face.width) / 2; 
  44. tracked_face.pos.height = (tracked_face.pos.height + face.height) / 2; 
  45. } else { 
  46. tracked_face.pos = face; 
  47. } 
  48. tracked_face.conf = face_array.data[i].score; 
  49. tracked_face.frame_no = frame_no; 
  50. now_trakced_faces.push_back(tracked_face); 
  51. } else { 
  52. TrackedFace tracked_face; 
  53. tracked_face.pos = face; 
  54. tracked_face.PID = max_PID; 
  55. tracked_face.conf = face_array.data[i].score; 
  56. tracked_face.frame_no = frame_no; 
  57. max_PID++; 
  58. now_trakced_faces.push_back(tracked_face); 
  59. } 
  60. } 
  61.  
  62. pre_tracked_faces = now_trakced_faces; 
  63.  
  64. return pre_tracked_faces; 
  65. } 

三、Face Quality Assessor

质量评估主要分为四个方面:

  1. brightness
  2. resolution
  3. pose
  4. clarity

1. brightness

求face区域所有像素的平均亮度值,满足40到180则通过检测

2. resolution

人脸框大于80
人脸框高/宽在在0.9~1.8

3. pose

满足如下条件:

  1. static const float roll0 = 1 / 3.0f; 
  2. static const float yaw0 = 0.5f; 
  3. static const float pitch0 = 0.5f; 
  4.  
  5. return roll < roll0 && yaw < yaw0 && pitch < pitch0; 

4. clarity

清晰度检测使用如下算法进行评估,参照如下论文:

The blur effect: perception and estimation with a new no-reference perceptual blur metric

四、整体流程

总体流程如下:


face recognition flow
  1. seeta::FaceTracker * m_tracker; 
  2. m_tracker = new seeta::FaceTracker(fd_model, videowidth,videoheight); 
  3. m_tracker->SetMinFaceSize(100); //set(seeta::FaceTracker::PROPERTY_MIN_FACE_SIZE, 100); 
  4. m_tracker->SetMinFaceSize(gparamters.MinFaceSize); 
  5. m_tracker->SetThreshold(gparamters.Fd_Threshold); 
  6. m_tracker->SetVideoSize(gparamters.VideoWidth, gparamters.VideoHeight); 
  7.  
  8. auto faces = m_tracker->Track(image); 
  9. //qDebug() << "-----track size:" << faces.size; 
  10. if( faces.size > 0 ) 
  11. { 
  12. m_mutex.lock(); 
  13. if(!m_readimage) 
  14. { 
  15. clone_image(image, *m_mainImage); 
  16. //cv::Mat tmpmat; 
  17. //cv::cvtColor(mat, tmpmat, cv::COLOR_BGR2RGB); 
  18. m_mainmat = mat2.clone();//tmpmat.clone(); 
  19. m_mainfaceinfos.clear(); 
  20. for(int i=0; i<faces.size; i++) 
  21. { 
  22. m_mainfaceinfos.push_back(faces.data[i]); 
  23. } 
  24. m_readimage = true; 
  25. } 
  26. m_mutex.unlock(); 
  27.  
  28.  
  29. for(int i=0; i<faces.size; i++) 
  30. { 
  31. auto &face = faces.data[i].pos; 
  32. //std::cout << "Clarity = " << clarity << ", Reality = " << reality << std::endl; 
  33. //auto end = system_clock::now(); 
  34. //auto duration = duration_cast<microseconds>(end - start); 
  35. //int spent = duration.count() / 1000; 
  36. //std::string str = std::to_string(spent); 
  37. //str = stateOfTheFace + " " + str; 
  38. //cv::putText( mat, str.c_str(), cv::Point( face.x, face.y - 10 ), cv::FONT_HERSHEY_SIMPLEX, 1, color, 2 ); 
  39.  
  40. //cv::rectangle( mat, cv::Rect( face.x, face.y, face.width, face.height ), color, 2, 8, 0 ); 
  41. cv::rectangle( mat2, cv::Rect( face.x, face.y, face.width, face.height ), color, 2, 8, 0 ); 
  42. } 
  43. } 

原文地址:https://www.cnblogs.com/gr-nick/p/12924667.html