API说明:
cv::CascadeClassifier::detectMultiScale(InputArray image,//输入灰度图像
CV_OUT std::vector<Rect>& objects,//返回目标的外接矩形
double scaleFactor = 1.1,//检测的尺度跳变量,这个值越大,某些尺寸的对象无法被检测,但检测更快
int minNeighbors = ,//有多少个重叠的检测标记才被认为有小
int flags = , //新版本中没用
Size minSize = Size(),//目标的最小尺寸
Size maxSize = Size()//目标的最大尺寸,可以排除尺寸不合理的对象
);
利用opencv自带的数据进行人脸检测:
#include <opencv2/opencv.hpp>
#include <iostream> using namespace cv;
using namespace std; String fileName = "D:/opencv3.1/opencv/build/etc/haarcascades/haarcascade_frontalface_alt.xml";//设置文件路径
CascadeClassifier face_classifier;//创建分类器 int main(int argc, char** argv) {
if (!face_classifier.load(fileName)) {//加载分类数据
printf("could not load face feature data...\n");
return -;
} Mat src = imread("D:/vcprojects/images/test.png");
if (src.empty()) {
printf("could not load image...\n");
return -;
}
imshow("input image", src);
Mat gray;
cvtColor(src, gray, COLOR_BGR2GRAY);//转成灰度图
equalizeHist(gray, gray);//直方图均衡化,提高对比度 vector<Rect> faces;
face_classifier.detectMultiScale(gray, faces, 1.2, , , Size(, ));//在多尺度上检测
for (size_t t = ; t < faces.size(); t++) {
rectangle(src, faces[static_cast<int>(t)], Scalar(, , ), , , );
} imshow("detect faces", src);
waitKey();
return ;
}
进阶:人眼检测
#include<opencv2/opencv.hpp>
#include<iostream> using namespace cv;
using namespace std; CascadeClassifier face_cascader;
CascadeClassifier eye_cascader;
String facefile = "D:/opencv3.1/opencv/build/etc/haarcascades/haarcascade_frontalface_alt.xml";
String eyefile = "D:/opencv3.1/opencv/build/etc/haarcascades/haarcascade_eye.xml"; int main(int argc, char** argv) {
if (!face_cascader.load(facefile)) {
printf("could not load face feature data...\n");
return -;
}
if (!eye_cascader.load(eyefile)) {
printf("could not load eye feature data...\n");
return -;
}
namedWindow("camera-demo", CV_WINDOW_AUTOSIZE);
VideoCapture capture();
Mat frame;
Mat gray;
vector<Rect> faces;
vector<Rect> eyes;
while (capture.read(frame)) {
cvtColor(frame, gray, COLOR_BGR2GRAY);
equalizeHist(gray, gray);
face_cascader.detectMultiScale(gray, faces, 1.2, , , Size(, )); //眼睛一定在人脸范围内,通过截取ROI,缩小检测范围提高检测的准确度和速度
for (size_t t = ; t < faces.size(); t++) {
Rect roi;
roi.x = faces[static_cast<int>(t)].x;
roi.y = faces[static_cast<int>(t)].y;
roi.width = faces[static_cast<int>(t)].width;
roi.height = faces[static_cast<int>(t)].height / ;
Mat faceROI = frame(roi);//截取眼睛ROI,在脸的上半部分
eye_cascader.detectMultiScale(faceROI, eyes, 1.2, , , Size(, ));
for (size_t k = ; k < eyes.size(); k++) {
Rect rect;
rect.x = faces[static_cast<int>(t)].x + eyes[k].x;
rect.y = faces[static_cast<int>(t)].y + eyes[k].y;
rect.width = eyes[k].width;
rect.height = eyes[k].height;
rectangle(frame, rect, Scalar(, , ), , , );//坐标变换得到眼睛真正的坐标
}
rectangle(frame, faces[static_cast<int>(t)], Scalar(, , ), , , );
}
imshow("camera-demo", frame);
char c = waitKey();
if (c == ) {
break;
}
}
waitKey();
return ;
}
级联分类器+模板匹配提高检测的稳定性,实现眼睛的追踪:
#include <opencv2/opencv.hpp>
#include <iostream> using namespace cv;
using namespace std; String facefile = "D:/opencv3.1/opencv/build/etc/haarcascades/haarcascade_frontalface_alt.xml";//人脸数据
String lefteyefile = "D:/opencv3.1/opencv/build/etc/haarcascades/haarcascade_eye.xml";//左眼数据
String righteyefile = "D:/opencv3.1/opencv/build/etc/haarcascades/haarcascade_eye.xml";//右眼数据
CascadeClassifier face_detector;
CascadeClassifier leftyeye_detector;
CascadeClassifier righteye_detector; Rect leftEye, rightEye;
//Mat& im:ROI区域的图片
//Mat& tpl:模板图片
//Rect& rect:输入原来目标的外接矩形,返回目标的新外接矩形
void trackEye(Mat& im, Mat& tpl, Rect& rect) {
Mat result;
int result_cols = im.cols - tpl.cols + ;
int result_rows = im.rows - tpl.rows + ; // 模板匹配
result.create(result_rows, result_cols, CV_32FC1);
matchTemplate(im, tpl, result, TM_CCORR_NORMED); // 寻找位置
double minval, maxval;
Point minloc, maxloc;
minMaxLoc(result, &minval, &maxval, &minloc, &maxloc);
if (maxval > 0.75) {
rect.x = rect.x + maxloc.x;//从ROI中的坐标变换为原图的坐标
rect.y = rect.y + maxloc.y;
}
else {
rect.x = rect.y = rect.width = rect.height = ;
}
} int main(int argc, char** argv) {
//加载特征数据
if (!face_detector.load(facefile)) {
printf("could not load data file...\n");
return -;
}
if (!leftyeye_detector.load(lefteyefile)) {
printf("could not load data file...\n");
return -;
}
if (!righteye_detector.load(righteyefile)) {
printf("could not load data file...\n");
return -;
} Mat frame;
VideoCapture capture();
namedWindow("demo-win", CV_WINDOW_AUTOSIZE); Mat gray;
vector<Rect> faces;
vector<Rect> eyes;
Mat lefttpl, righttpl; // 模板
while (capture.read(frame)) {
flip(frame, frame, );
cvtColor(frame, gray, COLOR_BGR2GRAY);
equalizeHist(gray, gray);
face_detector.detectMultiScale(gray, faces, 1.1, , , Size(, ));//检测人脸
for (size_t t = ; t < faces.size(); t++) {
rectangle(frame, faces[t], Scalar(, , ), , , ); // 计算 offset ROI
int offsety = faces[t].height / ;
int offsetx = faces[t].width / ;
int eyeheight = faces[t].height / - offsety;
int eyewidth = faces[t].width / - offsetx; // 截取左眼区域
Rect leftRect;
leftRect.x = faces[t].x + offsetx;
leftRect.y = faces[t].y + offsety;
leftRect.width = eyewidth;
leftRect.height = eyeheight;
Mat leftRoi = gray(leftRect); // 检测左眼
leftyeye_detector.detectMultiScale(leftRoi, eyes, 1.1, , , Size(, ));
if (lefttpl.empty()) {
if (eyes.size()) {
leftRect = eyes[] + Point(leftRect.x, leftRect.y);
lefttpl = gray(leftRect);
rectangle(frame, leftRect, Scalar(, , ), , , );
}
}
else {
// 跟踪, 基于模板匹配
leftEye.x = leftRect.x;
leftEye.y = leftRect.y;
trackEye(leftRoi, lefttpl, leftEye);
if (leftEye.x > && leftEye.y > ) {
leftEye.width = lefttpl.cols;
leftEye.height = lefttpl.rows;
rectangle(frame, leftEye, Scalar(, , ), , , );
}
} // 截取右眼区域
Rect rightRect;
rightRect.x = faces[t].x + faces[t].width / ;
rightRect.y = faces[t].y + offsety;
rightRect.width = eyewidth;
rightRect.height = eyeheight;
Mat rightRoi = gray(rightRect); // 检测右眼
righteye_detector.detectMultiScale(rightRoi, eyes, 1.1, , , Size(, ));
if (righttpl.empty()) {
if (eyes.size()) {
rightRect = eyes[] + Point(rightRect.x, rightRect.y);
righttpl = gray(rightRect);
rectangle(frame, rightRect, Scalar(, , ), , , );
}
}
else {
// 跟踪, 基于模板匹配
rightEye.x = rightRect.x;
rightEye.y = rightRect.y;
trackEye(rightRoi, righttpl, rightEye);
if (rightEye.x > && rightEye.y > ) {
rightEye.width = righttpl.cols;
rightEye.height = righttpl.rows;
rectangle(frame, rightEye, Scalar(, , ), , , );
}
}
}
imshow("demo-win", frame);
char c = waitKey();
if (c == ) { // ESC
break;
}
} // release resource
capture.release();
waitKey();
return ;
}
自定义级联分类器的训练和使用:待续
命令行参数:
-vec <vec_file_name>
-img <image_file_name>
-bg <background_file_name>
-num <number_of_samples>
-bgcolor <background_color>
-bgthresh <background_color_threshold>
-inv
-randinv
-maxidev <max_intensity_deviation>
-maxxangle <max_x_rotation_angle>
-maxyangle <max_y_rotation_angle>
-maxzangle <max_z_rotation_angle>
-show
-w <sample_width>
-h <sample_height>