opencv - 如何根据当前检测到的椭圆找到更准确的椭圆-LMLPHP

我根据提取的红球的边缘拟合了一个椭圆。但这是不准确的。

我根据HSV颜色空间提取了这个红色的球,但它始终忽略了该球的轮廓。 (也许是因为轮廓的颜色要深得多)。

有什么好主意可以让我适应这个球的更好的椭圆度吗?我想找到一个椭圆,它可以尽可能准确地包围红球。

如果可以使用OpenCV的现有功能会更好。

最佳答案

我已经解决了这个问题。它仍然不稳定,但是大多数时候它都可以工作。

源图像。可以检测所有这些图像:https://www.dropbox.com/sh/daerty94kv5k2n7/AABu9Axewe6mL0NdEX2nG5MIa?dl=0
opencv - 如何根据当前检测到的椭圆找到更准确的椭圆-LMLPHP

根据颜色拟合椭圆
opencv - 如何根据当前检测到的椭圆找到更准确的椭圆-LMLPHP

根据颜色和边缘重新拟合椭圆
opencv - 如何根据当前检测到的椭圆找到更准确的椭圆-LMLPHP

视频链接:https://www.youtube.com/watch?v=q0TQYREm9uA

这是源代码:

#include <iostream>
#include "opencv2/opencv.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"

using namespace cv;
using namespace std;

int main(int argc, char** argv)
{
    cv::Mat capturedImage = imread(argv[1]);

    if( capturedImage.empty() )
    {
        cout << "Couldn't open image " << argv[1] << "\nUsage: fitellipse <image_name>\n";
        return 0;
    }

/*============================= Phase 1: Translate Color Space from RGB to HSV =====================================================*/
    cv::Mat imgHSV;
    cv::cvtColor(capturedImage, imgHSV, cv::COLOR_BGR2HSV); //Convert the captured frame from BGR to HSV

    cv::Mat imgGray;
    cv::cvtColor(capturedImage, imgGray, CV_RGB2GRAY);

    cv::Mat imgThresholded;
    cv::inRange(imgHSV, cv::Scalar(160, 80, 70), cv::Scalar(179, 255, 255), imgThresholded); //Threshold the image

    //morphological opening
    cv::erode(imgThresholded, imgThresholded, cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(7, 7)) );
    cv::dilate( imgThresholded, imgThresholded, cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(7, 7)) );
    //morphological closing (removes small holes from the foreground)
    cv::dilate( imgThresholded, imgThresholded, cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(7, 7)) );
    cv::erode(imgThresholded, imgThresholded, cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(7, 7)) );

    namedWindow("imgThresholded", WINDOW_NORMAL);
    imshow("imgThresholded", imgThresholded);

/*============================= Phase 2: Fit a coarse ellipse based on red color ======================================================*/
    vector<vector<cv::Point> > contours;
    cv::findContours(imgThresholded, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cv::Point(0,0));

    size_t index = 0;
    size_t largestSize = 0;
    for(size_t i = 0; i < contours.size(); i++)
    {
        if (contours[i].size() > largestSize)
        {
            largestSize = contours[i].size();
            index = i;
        }
    }

    if (contours[index].size() < 6)
    {
        cout << "Do not have enough points" << endl;
        return -1;
    }

    cv::Mat imgContour;
    cv::Mat(contours[index]).convertTo(imgContour, CV_32F);
    cv::RotatedRect coarseEllipse = cv::fitEllipse(imgContour);

    cv::Mat capturedImageClone = capturedImage.clone();

    ellipse(capturedImageClone, coarseEllipse.center, coarseEllipse.size*0.5f, coarseEllipse.angle, 0.0, 360.0, cv::Scalar(0,255,255), 3, CV_AA);

    namedWindow("capturedImageClone", CV_WINDOW_NORMAL);
    imshow("capturedImageClone", capturedImageClone);

/*============================= Phase 3: Re-fit a final ellipse based on combination of color and edge ===============================*/

    double cxc = coarseEllipse.center.x;
    double cyc = coarseEllipse.center.y;
    double ca = coarseEllipse.size.height/2;
    double cb = coarseEllipse.size.width/2;

    cv::Mat mask(capturedImage.rows, capturedImage.cols, CV_8UC3, cv::Scalar(0,0,0));
    cv::circle(mask, cv::Point(coarseEllipse.center.x, coarseEllipse.center.y), coarseEllipse.size.height/2 + 100, cv::Scalar(255,255,255), -1);
    cv::Mat imgMask;
    cv::Mat edges;

    cv::bitwise_and(capturedImage, mask, imgMask);

    namedWindow("imgMask", CV_WINDOW_NORMAL);
    imshow("imgMask", imgMask);

    cv::GaussianBlur(imgMask, edges, cv::Size(5,5), 0);
    cv::Canny(edges, edges, 50, 100);

    namedWindow("edges", CV_WINDOW_NORMAL);
    imshow("edges", edges);

    cv::findContours(edges, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cv::Point(0,0));

    index = -1;
    double centerDistance = (numeric_limits<double>::max)();
    double abRatio = (numeric_limits<double>::max)();
    cv::RotatedRect finalEllipse;

    for (size_t i = 0; i < contours.size(); i++)
    {
        if (contours[i].size() < 500 || i == contours.size() - 1 || i == contours.size() - 2)
        continue;

        cv::Mat(contours[i]).convertTo(imgContour, CV_32F);
        cv::RotatedRect tmpEllipse = cv::fitEllipse(imgContour);

        double txc = tmpEllipse.center.x;
        double tyc = tmpEllipse.center.y;
        double ta = tmpEllipse.size.height/2;
        double tb = tmpEllipse.size.width/2;

        double tmpDis = (cxc - txc) * (cxc - txc) + (cyc - tyc) * (cyc - tyc);

        if (tmpDis < centerDistance && fabs(tb/ta - 1) < abRatio && ta > ca && tb > cb)
        {
            centerDistance = tmpDis;
            abRatio = fabs(tb/ta - 1);
            index = i;
            finalEllipse = tmpEllipse;
        }

   }

   if (index == -1)
        finalEllipse = coarseEllipse;

   ellipse(capturedImage, finalEllipse.center, finalEllipse.size*0.5f, finalEllipse.angle, 0.0, 360.0, cv::Scalar(0,255,255), 3, CV_AA);

   double xc = finalEllipse.center.x;       // center x
   double yc = finalEllipse.center.y;       // center y
   double theta = finalEllipse.angle;       // rotation angle theta
   double a = finalEllipse.size.height / 2; // semi-major axis: a
   double b = finalEllipse.size.width / 2;  // semi-minor axis: b

   double A = a * a * sin(theta) * sin(theta) + b * b * cos(theta) * cos(theta);
   double B = 2 * (b * b - a * a) * sin(theta) * cos(theta);
   double C = a * a * cos(theta) * cos(theta) + b * b * sin(theta) * sin(theta);
   double D = -2 * A * xc - B * yc;
   double E = -B * xc - 2 * C * yc;
   double F = A * xc * xc + B * xc * yc + C * yc * yc - a * a * b * b;

    A = A/F;
    B = B/F;
    C = C/F;
    D = D/F;
    E = E/F;
    F = F/F;

    double ellipseArray[3][3] = {{A, B/2, D/2},
                             {B/2, C, E/2},
                             {D/2, E/2, F}};

    cv::Mat ellipseMatrix(3,3,CV_64FC1, ellipseArray);
    cout << ellipseMatrix << endl;

    namedWindow("capturedImage", CV_WINDOW_NORMAL);
    imshow("capturedImage", capturedImage);

    imwrite(argv[2],capturedImage);
    imwrite(argv[3],edges);
    imwrite(argv[4],capturedImageClone);
    imwrite(argv[5],imgMask);

    waitKey(0);
    return 0;
}

关于opencv - 如何根据当前检测到的椭圆找到更准确的椭圆,我们在Stack Overflow上找到一个类似的问题:https://stackoverflow.com/questions/32901120/

10-13 03:34