1、在opencv安装目录下,可以找到opencv官方提供的示例代码,具体位于...\opencv\sources\samples目录下,如下所示

第二章启程前的认知准备,2.1Opencv官方例程引导与赏析-LMLPHP

名为c的文件夹存放着opencv1.0等旧版本的示例程序;名为cpp的文件夹存放着opencv2.x等新版本的示例程序。

在...\opencv\sources\samples\cpp\tutorial_code路径下,存放着和官方教程配套的示例程序。其内容按opencv各组件模块而分类。

示例程序的运行

1、彩色目标跟踪:Camshift

#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp" #include <iostream>
#include <ctype.h> using namespace cv;
using namespace std; Mat image; bool backprojMode = false;
bool selectObject = false;
int trackObject = ;
bool showHist = true;
Point origin;
Rect selection;
int vmin = , vmax = , smin = ; static void onMouse(int event, int x, int y, int, void*)
{
if (selectObject)
{
selection.x = MIN(x, origin.x);
selection.y = MIN(y, origin.y);
selection.width = std::abs(x - origin.x);
selection.height = std::abs(y - origin.y); selection &= Rect(, , image.cols, image.rows);
} switch (event)
{
case CV_EVENT_LBUTTONDOWN:
origin = Point(x, y);
selection = Rect(x, y, , );
selectObject = true;
break;
case CV_EVENT_LBUTTONUP:
selectObject = false;
if (selection.width > && selection.height > )
trackObject = -;
break;
}
} static void help()
{
cout << "\nThis is a demo that shows mean-shift based tracking\n"
"You select a color objects such as your face and it tracks it.\n"
"This reads from video camera (0 by default, or the camera number the user enters\n"
"Usage: \n"
" ./camshiftdemo [camera number]\n"; cout << "\n\nHot keys: \n"
"\tESC - quit the program\n"
"\tc - stop the tracking\n"
"\tb - switch to/from backprojection view\n"
"\th - show/hide object histogram\n"
"\tp - pause video\n"
"To initialize tracking, select the object with mouse\n";
} const char* keys =
{
"{1| | 0 | camera number}"
}; int main(int argc, const char** argv)
{
help(); VideoCapture cap;
Rect trackWindow;
int hsize = ;
float hranges[] = { , };
const float* phranges = hranges;
CommandLineParser parser(argc, argv, keys);
int camNum = parser.get<int>(""); cap.open(camNum); if (!cap.isOpened())
{
help();
cout << "***Could not initialize capturing...***\n";
cout << "Current parameter's value: \n";
parser.printParams();
return -;
} namedWindow("Histogram", );
namedWindow("CamShift Demo", );
setMouseCallback("CamShift Demo", onMouse, );
createTrackbar("Vmin", "CamShift Demo", &vmin, , );
createTrackbar("Vmax", "CamShift Demo", &vmax, , );
createTrackbar("Smin", "CamShift Demo", &smin, , ); Mat frame, hsv, hue, mask, hist, histimg = Mat::zeros(, , CV_8UC3), backproj;
bool paused = false; for (;;)
{
if (!paused)
{
cap >> frame;
if (frame.empty())
break;
} frame.copyTo(image); if (!paused)
{
cvtColor(image, hsv, COLOR_BGR2HSV); if (trackObject)
{
int _vmin = vmin, _vmax = vmax; inRange(hsv, Scalar(, smin, MIN(_vmin, _vmax)),
Scalar(, , MAX(_vmin, _vmax)), mask);
int ch[] = { , };
hue.create(hsv.size(), hsv.depth());
mixChannels(&hsv, , &hue, , ch, ); if (trackObject < )
{
Mat roi(hue, selection), maskroi(mask, selection);
calcHist(&roi, , , maskroi, hist, , &hsize, &phranges);
normalize(hist, hist, , , CV_MINMAX); trackWindow = selection;
trackObject = ; histimg = Scalar::all();
int binW = histimg.cols / hsize;
Mat buf(, hsize, CV_8UC3);
for (int i = ; i < hsize; i++)
buf.at<Vec3b>(i) = Vec3b(saturate_cast<uchar>(i*. / hsize), , );
cvtColor(buf, buf, CV_HSV2BGR); for (int i = ; i < hsize; i++)
{
int val = saturate_cast<int>(hist.at<float>(i)*histimg.rows / );
rectangle(histimg, Point(i*binW, histimg.rows),
Point((i + )*binW, histimg.rows - val),
Scalar(buf.at<Vec3b>(i)), -, );
}
} calcBackProject(&hue, , , hist, backproj, &phranges);
backproj &= mask;
RotatedRect trackBox = CamShift(backproj, trackWindow,
TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, , ));
if (trackWindow.area() <= )
{
int cols = backproj.cols, rows = backproj.rows, r = (MIN(cols, rows) + ) / ;
trackWindow = Rect(trackWindow.x - r, trackWindow.y - r,
trackWindow.x + r, trackWindow.y + r) &
Rect(, , cols, rows);
} if (backprojMode)
cvtColor(backproj, image, COLOR_GRAY2BGR);
ellipse(image, trackBox, Scalar(, , ), , CV_AA);
}
}
else if (trackObject < )
paused = false; if (selectObject && selection.width > && selection.height > )
{
Mat roi(image, selection);
bitwise_not(roi, roi);
} imshow("CamShift Demo", image);
imshow("Histogram", histimg); char c = (char)waitKey();
if (c == )
break;
switch (c)
{
case 'b':
backprojMode = !backprojMode;
break;
case 'c':
trackObject = ;
histimg = Scalar::all();
break;
case 'h':
showHist = !showHist;
if (!showHist)
destroyWindow("Histogram");
else
namedWindow("Histogram", );
break;
case 'p':
paused = !paused;
break;
default:
;
}
} return ;
}

程序的用法是根据鼠标框选区域的色度光谱来进行摄像头读入的视频目标的跟踪。其重要采用CamShift算法,全称是“Continuously Adaptive Mean-SHIFT”,是对MeanShift算法的改进,被称为连续自适应的MeanShift算法。
2、光流:optical flow

放流法是目前运动图像分析的重要方法,光流用来指定时变图像中模式的运动速度,因为当物体在运动时,在图像上对应的点的亮度模式也在运动。这种图像亮度模式的表观运动(apparent motion)就是光流。光流表达了图像的变化,由于它包含了目标运动的信息,因此可被观察者用来确定目标的运动情况。

//--------------------------------------【程序说明】-------------------------------------------
// 程序说明:《OpenCV3编程入门》OpenCV2版书本配套示例程序09
// 程序描述:来自OpenCV安装目录下Samples文件夹中的官方示例程序-利用光流法进行运动目标检测
// 开发测试所用操作系统: Windows 7 64bit
// 开发测试所用IDE版本:Visual Studio 2010
// 开发测试所用OpenCV版本: 2.4.9
// 2014年11月 Revised by @浅墨_毛星云
//------------------------------------------------------------------------------------------------ /************************************************************************
* Copyright(c) 2011 Yang Xian
* All rights reserved.
*
* File: opticalFlow.cpp
* Brief: lk光流法做运动目标检测
* Version: 1.0
* Author: Yang Xian
* Email: [email protected]
* Date: 2011/11/18
* History:
************************************************************************/ //---------------------------------【头文件、命名空间包含部分】----------------------------
// 描述:包含程序所使用的头文件和命名空间
//-------------------------------------------------------------------------------------------------
#include <opencv2/video/video.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/core/core.hpp>
#include <iostream>
#include <cstdio> using namespace std;
using namespace cv; //-----------------------------------【全局函数声明】-----------------------------------------
// 描述:声明全局函数
//-------------------------------------------------------------------------------------------------
void tracking(Mat &frame, Mat &output);
bool addNewPoints();
bool acceptTrackedPoint(int i); //-----------------------------------【全局变量声明】-----------------------------------------
// 描述:声明全局变量
//-------------------------------------------------------------------------------------------------
string window_name = "optical flow tracking";
Mat gray; // 当前图片
Mat gray_prev; // 预测图片
vector<Point2f> points[]; // point0为特征点的原来位置,point1为特征点的新位置
vector<Point2f> initial; // 初始化跟踪点的位置
vector<Point2f> features; // 检测的特征
int maxCount = ; // 检测的最大特征数
double qLevel = 0.01; // 特征检测的等级
double minDist = 10.0; // 两特征点之间的最小距离
vector<uchar> status; // 跟踪特征的状态,特征的流发现为1,否则为0
vector<float> err; //--------------------------------【help( )函数】----------------------------------------------
// 描述:输出帮助信息
//-------------------------------------------------------------------------------------------------
static void help()
{
//输出欢迎信息和OpenCV版本
cout << "\n\n\t\t\t非常感谢购买《OpenCV3编程入门》一书!\n"
<< "\n\n\t\t\t此为本书OpenCV2版的第9个配套示例程序\n"
<< "\n\n\t\t\t 当前使用的OpenCV版本为:" << CV_VERSION
<< "\n\n ----------------------------------------------------------------------------";
} //-----------------------------------【main( )函数】--------------------------------------------
// 描述:控制台应用程序的入口函数,我们的程序从这里开始
//-------------------------------------------------------------------------------------------------
int main()
{ Mat frame;
Mat result; VideoCapture capture("1.avi"); help();
if (capture.isOpened()) // 摄像头读取文件开关
{
while (true)
{
capture >> frame; if (!frame.empty())
{
tracking(frame, result);
}
else
{
printf(" --(!) No captured frame -- Break!");
break;
} int c = waitKey();
if ((char)c == )
{
break;
}
}
}
return ;
} //-------------------------------------------------------------------------------------------------
// function: tracking
// brief: 跟踪
// parameter: frame 输入的视频帧
// output 有跟踪结果的视频帧
// return: void
//-------------------------------------------------------------------------------------------------
void tracking(Mat &frame, Mat &output)
{
cvtColor(frame, gray, CV_BGR2GRAY);
frame.copyTo(output);
// 添加特征点
if (addNewPoints())
{
goodFeaturesToTrack(gray, features, maxCount, qLevel, minDist);
points[].insert(points[].end(), features.begin(), features.end());
initial.insert(initial.end(), features.begin(), features.end());
} if (gray_prev.empty())
{
gray.copyTo(gray_prev);
}
// l-k光流法运动估计
calcOpticalFlowPyrLK(gray_prev, gray, points[], points[], status, err);
// 去掉一些不好的特征点
int k = ;
for (size_t i = ; i<points[].size(); i++)
{
if (acceptTrackedPoint(i))
{
initial[k] = initial[i];
points[][k++] = points[][i];
}
}
points[].resize(k);
initial.resize(k);
// 显示特征点和运动轨迹
for (size_t i = ; i<points[].size(); i++)
{
line(output, initial[i], points[][i], Scalar(, , ));
circle(output, points[][i], , Scalar(, , ), -);
} // 把当前跟踪结果作为下一此参考
swap(points[], points[]);
swap(gray_prev, gray); imshow(window_name, output);
} //-------------------------------------------------------------------------------------------------
// function: addNewPoints
// brief: 检测新点是否应该被添加
// parameter:
// return: 是否被添加标志
//-------------------------------------------------------------------------------------------------
bool addNewPoints()
{
return points[].size() <= ;
} //-------------------------------------------------------------------------------------------------
// function: acceptTrackedPoint
// brief: 决定哪些跟踪点被接受
// parameter:
// return:
//-------------------------------------------------------------------------------------------------
bool acceptTrackedPoint(int i)
{
return status[i] && ((abs(points[][i].x - points[][i].x) + abs(points[][i].y - points[][i].y)) > );
}

注意需要一个源视频文件名为1.avi

3、点追踪:lkdemo

也在对应的目录下...\opencv\sources\samples\cpp\的lkdemo.cpp文件中,程序运行之后,会自动启用摄像头,这时按键盘上的“r”键来启动自动点追踪,便可以看到对应的效果。我们在摄像头中移动物体,可以看到物体上的点随着物体一同移动。

#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp" #include <iostream>
#include <ctype.h> using namespace cv;
using namespace std; static void help()
{
// print a welcome message, and the OpenCV version
cout << "\nThis is a demo of Lukas-Kanade optical flow lkdemo(),\n"
"Using OpenCV version " << CV_VERSION << endl;
cout << "\nIt uses camera by default, but you can provide a path to video as an argument.\n";
cout << "\nHot keys: \n"
"\tESC - quit the program\n"
"\tr - auto-initialize tracking\n"
"\tc - delete all the points\n"
"\tn - switch the \"night\" mode on/off\n"
"To add/remove a feature point click it\n" << endl;
} Point2f point;
bool addRemovePt = false; static void onMouse(int event, int x, int y, int /*flags*/, void* /*param*/)
{
if (event == CV_EVENT_LBUTTONDOWN)
{
point = Point2f((float)x, (float)y);
addRemovePt = true;
}
} int main(int argc, char** argv)
{
help(); VideoCapture cap;
TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, , 0.03);
Size subPixWinSize(, ), winSize(, ); const int MAX_COUNT = ;
bool needToInit = false;
bool nightMode = false; if (argc == || (argc == && strlen(argv[]) == && isdigit(argv[][])))
cap.open(argc == ? argv[][] - '' : );
else if (argc == )
cap.open(argv[]); if (!cap.isOpened())
{
cout << "Could not initialize capturing...\n";
return ;
} namedWindow("LK Demo", );
setMouseCallback("LK Demo", onMouse, ); Mat gray, prevGray, image;
vector<Point2f> points[]; for (;;)
{
Mat frame;
cap >> frame;
if (frame.empty())
break; frame.copyTo(image);
cvtColor(image, gray, COLOR_BGR2GRAY); if (nightMode)
image = Scalar::all(); if (needToInit)
{
// automatic initialization
goodFeaturesToTrack(gray, points[], MAX_COUNT, 0.01, , Mat(), , , 0.04);
cornerSubPix(gray, points[], subPixWinSize, Size(-, -), termcrit);
addRemovePt = false;
}
else if (!points[].empty())
{
vector<uchar> status;
vector<float> err;
if (prevGray.empty())
gray.copyTo(prevGray);
calcOpticalFlowPyrLK(prevGray, gray, points[], points[], status, err, winSize,
, termcrit, , 0.001);
size_t i, k;
for (i = k = ; i < points[].size(); i++)
{
if (addRemovePt)
{
if (norm(point - points[][i]) <= )
{
addRemovePt = false;
continue;
}
} if (!status[i])
continue; points[][k++] = points[][i];
circle(image, points[][i], , Scalar(, , ), -, );
}
points[].resize(k);
} if (addRemovePt && points[].size() < (size_t)MAX_COUNT)
{
vector<Point2f> tmp;
tmp.push_back(point);
cornerSubPix(gray, tmp, winSize, cvSize(-, -), termcrit);
points[].push_back(tmp[]);
addRemovePt = false;
} needToInit = false;
imshow("LK Demo", image); char c = (char)waitKey();
if (c == )
break;
switch (c)
{
case 'r':
needToInit = true;
break;
case 'c':
points[].clear();
points[].clear();
break;
case 'n':
nightMode = !nightMode;
break;
} std::swap(points[], points[]);
cv::swap(prevGray, gray);
} return ;
}

4、人脸识别:objectDetection
人脸识别是图像处理与OpenCV非常重要的应用之一,opencv官方专门有教程和代码讲解其实现方法。此示例程序就是使用objdetect模块检测摄像头视频流中的人脸,位于...\opencv\sources\samples\cpp\tutorial_code\objectDetection路径之下。需要注意的是,要将“...\opencv\sources\data\haarcascades”路径下的haarcascade_eye_tree_eyeglasses.xml和haarcascade_frontalface_alt.xml文件复制到和源文件同一目录中,才能正确运行。运行程序,将自己的脸对准摄像头,或者放置一张照片对准摄像头任其捕获,便可以发现程序准确地识别除了人脸,并用彩色的圆将脸圈出。

//--------------------------------------【程序说明】-------------------------------------------
// 程序说明:《OpenCV3编程入门》OpenCV2版书本配套示例程序11
// 程序描述:来自OpenCV安装目录下Samples文件夹中的官方示例程序-人脸识别
// 开发测试所用操作系统: Windows 7 64bit
// 开发测试所用IDE版本:Visual Studio 2010
// 开发测试所用OpenCV版本: 2.4.9
// 2014年11月 Revised by @浅墨_毛星云
//------------------------------------------------------------------------------------------------ /**
* @file ObjectDetection.cpp
* @author A. Huaman ( based in the classic facedetect.cpp in samples/c )
* @brief A simplified version of facedetect.cpp, show how to load a cascade classifier and how to find objects (Face + eyes) in a video stream
*/ //---------------------------------【头文件、命名空间包含部分】----------------------------
// 描述:包含程序所使用的头文件和命名空间
//-------------------------------------------------------------------------------------------------
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp" #include <iostream>
#include <stdio.h> using namespace std;
using namespace cv; void detectAndDisplay(Mat frame); //--------------------------------【全局变量声明】----------------------------------------------
// 描述:声明全局变量
//-------------------------------------------------------------------------------------------------
//注意,需要把"haarcascade_frontalface_alt.xml"和"haarcascade_eye_tree_eyeglasses.xml"这两个文件复制到工程路径下
String face_cascade_name = "haarcascade_frontalface_alt.xml";
String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
string window_name = "Capture - Face detection";
RNG rng(); //--------------------------------【help( )函数】----------------------------------------------
// 描述:输出帮助信息
//-------------------------------------------------------------------------------------------------
static void ShowHelpText()
{
//输出欢迎信息和OpenCV版本
cout << "\n\n\t\t\t非常感谢购买《OpenCV3编程入门》一书!\n"
<< "\n\n\t\t\t此为本书OpenCV2版的第11个配套示例程序\n"
<< "\n\n\t\t\t 当前使用的OpenCV版本为:" << CV_VERSION
<< "\n\n ----------------------------------------------------------------------------";
} //-----------------------------------【main( )函数】--------------------------------------------
// 描述:控制台应用程序的入口函数,我们的程序从这里开始
//-------------------------------------------------------------------------------------------------
int main(void)
{
VideoCapture capture;
Mat frame; //-- 1. 加载级联(cascades)
if (!face_cascade.load(face_cascade_name)){ printf("--(!)Error loading\n"); return -; };
if (!eyes_cascade.load(eyes_cascade_name)){ printf("--(!)Error loading\n"); return -; }; //-- 2. 读取视频
capture.open();
ShowHelpText();
if (capture.isOpened())
{
for (;;)
{
capture >> frame; //-- 3. 对当前帧使用分类器(Apply the classifier to the frame)
if (!frame.empty())
{
detectAndDisplay(frame);
}
else
{
printf(" --(!) No captured frame -- Break!"); break;
} int c = waitKey();
if ((char)c == 'c') { break; } }
}
return ;
} void detectAndDisplay(Mat frame)
{
std::vector<Rect> faces;
Mat frame_gray; cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
//-- 人脸检测
face_cascade.detectMultiScale(frame_gray, faces, 1.1, , | CV_HAAR_SCALE_IMAGE, Size(, )); for (size_t i = ; i < faces.size(); i++)
{
Point center(faces[i].x + faces[i].width / , faces[i].y + faces[i].height / );
ellipse(frame, center, Size(faces[i].width / , faces[i].height / ), , , , Scalar(, , ), , , ); Mat faceROI = frame_gray(faces[i]);
std::vector<Rect> eyes; //-- 在脸中检测眼睛
eyes_cascade.detectMultiScale(faceROI, eyes, 1.1, , | CV_HAAR_SCALE_IMAGE, Size(, )); for (size_t j = ; j < eyes.size(); j++)
{
Point eye_center(faces[i].x + eyes[j].x + eyes[j].width / , faces[i].y + eyes[j].y + eyes[j].height / );
int radius = cvRound((eyes[j].width + eyes[j].height)*0.25);
circle(frame, eye_center, radius, Scalar(, , ), , , );
}
}
//-- 显示最终效果图
imshow(window_name, frame);
}

5、支持向量机引导

在opencv的机器学习模块中,官方为我们准备了两个示例程序,第一个程序是使用CvSVM::train函数训练一个SVM分类器,第二个示例程序主要用于讲解在训练数据线性不可分时,如何定义支持向量机的最优化问题。

//--------------------------------------【程序说明】-------------------------------------------
// 程序说明:《OpenCV3编程入门》OpenCV2版书本配套示例程序12
// 程序描述:来自OpenCV安装目录下Samples文件夹中的官方示例程序-支持向量机SVM引导
// 测试所用操作系统: Windows 7 64bit
// 测试所用IDE版本:Visual Studio 2010
// 测试所用OpenCV版本: 2.4.9
// 2014年11月 Revised by @浅墨_毛星云
//------------------------------------------------------------------------------------------------ //---------------------------------【头文件、命名空间包含部分】----------------------------
// 描述:包含程序所使用的头文件和命名空间
//-------------------------------------------------------------------------------------------------
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/ml/ml.hpp>
using namespace cv; //--------------------------------【help( )函数】----------------------------------------------
// 描述:输出帮助信息
//------------------------------------------------------------------------------------------------- //-----------------------------------【ShowHelpText( )函数】----------------------------------
// 描述:输出一些帮助信息
//----------------------------------------------------------------------------------------------
void ShowHelpText()
{
//输出欢迎信息和OpenCV版本
printf("\n\n\t\t\t非常感谢购买《OpenCV3编程入门》一书!\n");
printf("\n\n\t\t\t此为本书OpenCV2版的第12个配套示例程序\n");
printf("\n\n\t\t\t 当前使用的OpenCV版本为:" CV_VERSION);
printf("\n\n ----------------------------------------------------------------------------\n");
} //-----------------------------------【main( )函数】--------------------------------------------
// 描述:控制台应用程序的入口函数,我们的程序从这里开始
//-------------------------------------------------------------------------------------------------
int main()
{
// 视觉表达数据的设置(Data for visual representation)
int width = , height = ;
Mat image = Mat::zeros(height, width, CV_8UC3); //建立训练数据( Set up training data)
float labels[] = { 1.0, -1.0, -1.0, -1.0 };
Mat labelsMat(, , CV_32FC1, labels); float trainingData[][] = { { , }, { , }, { , }, { , } };
Mat trainingDataMat(, , CV_32FC1, trainingData); ShowHelpText(); //设置支持向量机的参数(Set up SVM's parameters)
CvSVMParams params;
params.svm_type = CvSVM::C_SVC;
params.kernel_type = CvSVM::LINEAR;
params.term_crit = cvTermCriteria(CV_TERMCRIT_ITER, , 1e-); // 训练支持向量机(Train the SVM)
CvSVM SVM;
SVM.train(trainingDataMat, labelsMat, Mat(), Mat(), params); Vec3b green(, , ), blue(, , );
//显示由SVM给出的决定区域 (Show the decision regions given by the SVM)
for (int i = ; i < image.rows; ++i)
for (int j = ; j < image.cols; ++j)
{
Mat sampleMat = (Mat_<float>(, ) << i, j);
float response = SVM.predict(sampleMat); if (response == )
image.at<Vec3b>(j, i) = green;
else if (response == -)
image.at<Vec3b>(j, i) = blue;
} //显示训练数据 (Show the training data)
int thickness = -;
int lineType = ;
circle(image, Point(, ), , Scalar(, , ), thickness, lineType);
circle(image, Point(, ), , Scalar(, , ), thickness, lineType);
circle(image, Point(, ), , Scalar(, , ), thickness, lineType);
circle(image, Point(, ), , Scalar(, , ), thickness, lineType); //显示支持向量 (Show support vectors)
thickness = ;
lineType = ;
int c = SVM.get_support_vector_count(); for (int i = ; i < c; ++i)
{
const float* v = SVM.get_support_vector(i);
circle(image, Point((int)v[], (int)v[]), , Scalar(, , ), thickness, lineType);
} imwrite("result.png", image); // 保存图像 imshow("SVM Simple Example", image); // 显示图像
waitKey(); }

上面程序的结果如下:
第二章启程前的认知准备,2.1Opencv官方例程引导与赏析-LMLPHP

//--------------------------------------【程序说明】-------------------------------------------
// 程序说明:《OpenCV3编程入门》OpenCV2版书本配套示例程序13
// 程序描述:来自OpenCV安装目录下Samples文件夹中的官方示例程序-支持向量机SVM之处理线性不可分数据
// 测试所用操作系统: Windows 7 64bit
// 测试所用IDE版本:Visual Studio 2010
// 测试所用OpenCV版本: 2.4.9
// 2014年11月 Revised by @浅墨_毛星云
//------------------------------------------------------------------------------------------------ //---------------------------------【头文件、命名空间包含部分】----------------------------
// 描述:包含程序所使用的头文件和命名空间
//------------------------------------------------------------------------------------------------
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/ml/ml.hpp> #define NTRAINING_SAMPLES 100 // 每类训练样本的数量
#define FRAC_LINEAR_SEP 0.9f // 部分(Fraction)线性可分的样本组成部分 using namespace cv;
using namespace std; //-----------------------------------【ShowHelpText( )函数】----------------------------------
// 描述:输出一些帮助信息
//----------------------------------------------------------------------------------------------
void ShowHelpText()
{
//输出欢迎信息和OpenCV版本
printf("\n\n\t\t\t非常感谢购买《OpenCV3编程入门》一书!\n");
printf("\n\n\t\t\t此为本书OpenCV2版的第13个配套示例程序\n");
printf("\n\n\t\t\t 当前使用的OpenCV版本为:" CV_VERSION );
printf("\n\n ----------------------------------------------------------------------------\n");
} //-----------------------------------【main( )函数】--------------------------------------------
// 描述:控制台应用程序的入口函数,我们的程序从这里开始
//-------------------------------------------------------------------------------------------------
int main()
{
//设置视觉表达的参数
const int WIDTH = , HEIGHT = ;
Mat I = Mat::zeros(HEIGHT, WIDTH, CV_8UC3);
ShowHelpText(); //--------------------- 【1】随机建立训练数据 ---------------------------------------
Mat trainData(*NTRAINING_SAMPLES, , CV_32FC1);
Mat labels (*NTRAINING_SAMPLES, , CV_32FC1); RNG rng(); // 随机生成值 //建立训练数据的线性可分的组成部分
int nLinearSamples = (int) (FRAC_LINEAR_SEP * NTRAINING_SAMPLES); // 为Class1生成随机点
Mat trainClass = trainData.rowRange(, nLinearSamples);
// 点的x坐标为[0,0.4)
Mat c = trainClass.colRange(, );
rng.fill(c, RNG::UNIFORM, Scalar(), Scalar(0.4 * WIDTH));
// 点的Y坐标为[0,1)
c = trainClass.colRange(,);
rng.fill(c, RNG::UNIFORM, Scalar(), Scalar(HEIGHT)); // 为Class2生成随机点
trainClass = trainData.rowRange(*NTRAINING_SAMPLES-nLinearSamples, *NTRAINING_SAMPLES);
// 点的x坐标为[0.6, 1]
c = trainClass.colRange( , );
rng.fill(c, RNG::UNIFORM, Scalar(0.6*WIDTH), Scalar(WIDTH));
// 点的Y坐标为[0, 1)
c = trainClass.colRange(,);
rng.fill(c, RNG::UNIFORM, Scalar(), Scalar(HEIGHT)); //------------------建立训练数据的非线性可分组成部分 --------------- // 随机生成Class1和Class2的点
trainClass = trainData.rowRange( nLinearSamples, *NTRAINING_SAMPLES-nLinearSamples);
// 点的x坐标为[0.4, 0.6)
c = trainClass.colRange(,);
rng.fill(c, RNG::UNIFORM, Scalar(0.4*WIDTH), Scalar(0.6*WIDTH));
// 点的y坐标为[0, 1)
c = trainClass.colRange(,);
rng.fill(c, RNG::UNIFORM, Scalar(), Scalar(HEIGHT)); //-------------------------设置类标签 ---------------------------------
labels.rowRange( , NTRAINING_SAMPLES).setTo(); // Class 1
labels.rowRange(NTRAINING_SAMPLES, *NTRAINING_SAMPLES).setTo(); // Class 2 //------------------------ 2. 设置支持向量机的参数 --------------------
CvSVMParams params;
params.svm_type = SVM::C_SVC;
params.C = 0.1;
params.kernel_type = SVM::LINEAR;
params.term_crit = TermCriteria(CV_TERMCRIT_ITER, (int)1e7, 1e-); //------------------------ 3. 训练支持向量机 ----------------------------------------------------
cout << "Starting training process" << endl;
CvSVM svm;
svm.train(trainData, labels, Mat(), Mat(), params);
cout << "Finished training process" << endl; //------------------------ 4. 标出决策区域(decision regions) ----------------------------------------
Vec3b green(,,), blue (,,);
for (int i = ; i < I.rows; ++i)
for (int j = ; j < I.cols; ++j)
{
Mat sampleMat = (Mat_<float>(,) << i, j);
float response = svm.predict(sampleMat); if (response == ) I.at<Vec3b>(j, i) = green;
else if (response == ) I.at<Vec3b>(j, i) = blue;
} //----------------------- 5. 显示训练数据(training data) --------------------------------------------
int thick = -;
int lineType = ;
float px, py;
// Class 1
for (int i = ; i < NTRAINING_SAMPLES; ++i)
{
px = trainData.at<float>(i,);
py = trainData.at<float>(i,);
circle(I, Point( (int) px, (int) py ), , Scalar(, , ), thick, lineType);
}
// Class 2
for (int i = NTRAINING_SAMPLES; i <*NTRAINING_SAMPLES; ++i)
{
px = trainData.at<float>(i,);
py = trainData.at<float>(i,);
circle(I, Point( (int) px, (int) py ), , Scalar(, , ), thick, lineType);
} //------------------------- 6. 显示支持向量(support vectors) --------------------------------------------
thick = ;
lineType = ;
int x = svm.get_support_vector_count(); for (int i = ; i < x; ++i)
{
const float* v = svm.get_support_vector(i);
circle( I, Point( (int) v[], (int) v[]), , Scalar(, , ), thick, lineType);
} imwrite("result.png", I); //保存图像到文件
imshow("SVM for Non-Linear Training Data", I); // 显示最终窗口
waitKey();
}

第二章启程前的认知准备,2.1Opencv官方例程引导与赏析-LMLPHP

05-11 22:22