我想从图像中找到一些东西。

就像检测到脸但未检测到脸一样,我想检测其他东西。

所以我使用SURF算法找到关键点,并使用FLANN算法匹配关键点。

但是我怎么知道图像是好的搭配?

我认为如果源图像s Key Points distribute and template image的关键点分布需要非常相似。那么两个iamge匹配。但是怎么办?

int main( int argc, char** argv )
{
    std::string templateStr = "D:\\template2.jpg";
    std::string srcString = "D:\\IMG_0284.jpg";
    Mat img_1 = imread(templateStr, CV_LOAD_IMAGE_GRAYSCALE );
    Mat img_2 = imread(srcString, CV_LOAD_IMAGE_GRAYSCALE );


    //-- Step 1: Detect the keypoints using SURF Detector
    int minHessian = 500;

    SurfFeatureDetector detector( minHessian );

    std::vector<KeyPoint> keypoints_1, keypoints_2;
    detector.detect( img_1, keypoints_1 );
    detector.detect( img_2, keypoints_2 );


    //show keypoint,only test
    Mat img_11 = imread(templateStr, CV_LOAD_IMAGE_GRAYSCALE );
    Mat img_21 = imread(srcString, CV_LOAD_IMAGE_GRAYSCALE );

    drawKeypoints (img_11, keypoints_1, img_11, cv::Scalar::all(0), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
    drawKeypoints (img_21, keypoints_2, img_21, cv::Scalar::all(0), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);

    cv::namedWindow ("img_11");
    cv::imshow ("img_11",img_11);
    cv::namedWindow ("img_21");
    cv::imshow ("img_21",img_21);
    cv::waitKey (0);



    //-- Step 2: Calculate descriptors (feature vectors)
    SurfDescriptorExtractor extractor;
    Mat descriptors_1, descriptors_2;
    extractor.compute( img_1, keypoints_1, descriptors_1 );
    extractor.compute( img_2, keypoints_2, descriptors_2 );

    //-- Step 3: Matching descriptor vectors using FLANN matcher
    FlannBasedMatcher matcher;
    std::vector<DMatch> matches;
    tt = (double)cvGetTickCount();
    matcher.match( descriptors_1, descriptors_2, matches );

    double max_dist = 0; double min_dist = 100;

    //-- Quick calculation of max and min distances between keypoints
    for( int i = 0; i < descriptors_1.rows; i++ )
    {
        double dist = matches[i].distance;
        if( dist < min_dist )
            min_dist = dist;
        if( dist > max_dist )
            max_dist = dist;
    }

    printf("-- Max dist : %f \n", max_dist );
    printf("-- Min dist : %f \n", min_dist );

    //-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist )
    //-- PS.- radiusMatch can also be used here.
    std::vector< DMatch > good_matches;
    for( int i = 0; i < descriptors_1.rows; i++ )
    {
        if( matches[i].distance < 3*min_dist )
        {
            good_matches.push_back( matches[i]);
        }
    }

    //-- Draw only "good" matches
    Mat img_matches;
    drawMatches( img_1, keypoints_1, img_2, keypoints_2, good_matches, img_matches, \
        Scalar::all(-1), Scalar::all(-1),vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

    //now I have two group keypoint,keypoints_1 and keypoints_2,and they is match.
    //keypoints_1 is tmeplate image`s keypoints,
    //keypoints_2 is source image`s keypoints,
    //so I how to compare distribution of keypoints_1 and keypoints_2?
    //if the two group keypoint`s distribute is very similarity,I will think the two image is match

    return 0;

}

我使用OpenCV2.4.9,VS 2010。

最佳答案

  • 您可以尝试下面的代码。可能对您有帮助。
    #include <opencv2/nonfree/nonfree.hpp>
    #include <iostream>
    #include <dirent.h>
    #include <ctime>
    #include <stdio.h>
    using namespace cv;
    using namespace std;
    
    int main(int argc, const char *argv[])
    {
       double ratio = 0.9;
    
       Mat image1 = imread("Image1_path);
       Mat image2 = cv::imread("Image2_path");
    
       Ptr<FeatureDetector> detector;
       Ptr<DescriptorExtractor> extractor;
    
      // TODO default is 500 keypoints..but we can change
      detector = FeatureDetector::create("ORB");
      extractor = DescriptorExtractor::create("ORB");
    
      vector<KeyPoint> keypoints1, keypoints2;
      detector->detect(image1, keypoints1);
      detector->detect(image2, keypoints2);
    
      cout << "# keypoints of image1 :" << keypoints1.size() << endl;
      cout << "# keypoints of image2 :" << keypoints2.size() << endl;
    
      Mat descriptors1,descriptors2;
      extractor->compute(image1,keypoints1,descriptors1);
      extractor->compute(image2,keypoints2,descriptors2);
    
      cout << "Descriptors size :" << descriptors1.cols << ":"<< descriptors1.rows << endl;
    
      vector< vector<DMatch> > matches12, matches21;
      Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
      matcher->knnMatch( descriptors1, descriptors2, matches12, 2);
      matcher->knnMatch( descriptors2, descriptors1, matches21, 2);
    
      //BFMatcher bfmatcher(NORM_L2, true);
      //vector<DMatch> matches;
      //bfmatcher.match(descriptors1, descriptors2, matches);
      double max_dist = 0; double min_dist = 100;
      for( int i = 0; i < descriptors1.rows; i++)
      {
          double dist = matches12[i].data()->distance;
          if(dist < min_dist)
             min_dist = dist;
          if(dist > max_dist)
             max_dist = dist;
      }
      printf("-- Max dist : %f \n", max_dist);
      printf("-- Min dist : %f \n", min_dist);
      cout << "Matches1-2:" << matches12.size() << endl;
      cout << "Matches2-1:" << matches21.size() << endl;
    
      std::vector<DMatch> good_matches1, good_matches2;
      for(int i=0; i < matches12.size(); i++)
      {
          if(matches12[i][0].distance < ratio * matches12[i][1].distance)
             good_matches1.push_back(matches12[i][0]);
      }
    
      for(int i=0; i < matches21.size(); i++)
      {
          if(matches21[i][0].distance < ratio * matches21[i][1].distance)
             good_matches2.push_back(matches21[i][0]);
      }
    
      cout << "Good matches1:" << good_matches1.size() << endl;
      cout << "Good matches2:" << good_matches2.size() << endl;
    
     // Symmetric Test
     std::vector<DMatch> better_matches;
     for(int i=0; i<good_matches1.size(); i++)
     {
         for(int j=0; j<good_matches2.size(); j++)
         {
             if(good_matches1[i].queryIdx == good_matches2[j].trainIdx && good_matches2[j].queryIdx == good_matches1[i].trainIdx)
             {
                 better_matches.push_back(DMatch(good_matches1[i].queryIdx, good_matches1[i].trainIdx, good_matches1[i].distance));
            break;
             }
         }
     }
    
     cout << "Better matches:" << better_matches.size() << endl;
    
     // show it on an image
     Mat output;
     drawMatches(image1, keypoints1, image2, keypoints2, better_matches, output);
     imshow("Matches result",output);
     waitKey(0);
    
     return 0;
    }
    
  • 要找到良好的匹配项,您可以采用阈值,例如,如果Better matches.size() > threshold然后将其视为良好的图像。
  • 关于c++ - 如何在OpenCV中比较两个组的关键点,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/35194681/

    10-15 12:23