我知道这是重复的帖子,但仍然会卡在实现上。
我遵循互联网上的一些指南,以了解如何在OpenCV和Java中检测图像中的文档。
我想到的第一个方法是,在对图像进行一些预处理(例如模糊,边缘检测)之后,使用findContours,在获得所有轮廓后,我可以找到最大的轮廓,并假设这是我要寻找的矩形,但是失败了在某些情况下,例如文档没有像丢失一个角一样被完全拿走。
在尝试了几次并进行了一些新的处理后,但是根本无法正常工作后,我发现HoughLine转换使它变得更容易。从现在开始,我将所有行都放在图像中,但是仍然无法做什么来定义我想要的兴趣矩形。
这是我到目前为止的实现代码:
方法1:使用findContours

Mat grayImage = new Mat();
    Mat detectedEdges = new Mat();
    // convert to grayscale
    Imgproc.cvtColor(frame, grayImage, Imgproc.COLOR_BGR2GRAY);
    // reduce noise with a 3x3 kernel
    // Imgproc.blur(grayImage, detectedEdges, new Size(3, 3));
    Imgproc.medianBlur(grayImage, detectedEdges, 9);
    // Imgproc.equalizeHist(detectedEdges, detectedEdges);
    // Imgproc.GaussianBlur(detectedEdges, detectedEdges, new Size(5, 5), 0, 0, Core.BORDER_DEFAULT);
    Mat edges = new Mat();
    // canny detector, with ratio of lower:upper threshold of 3:1
    Imgproc.Canny(detectedEdges, edges, this.threshold.getValue(), this.threshold.getValue() * 3, 3, true);
    // makes the object in white bigger
    Imgproc.dilate(edges, edges, new Mat(), new Point(-1, -1), 1); // 1
    Image imageToShow = Utils.mat2Image(edges);
    updateImageView(cannyFrame, imageToShow);
    /// Find contours
    List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
    Imgproc.findContours(edges, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
    // loop over the contours
    MatOfPoint2f approxCurve;
    double maxArea = 0;
    int maxId = -1;
    for (MatOfPoint contour : contours) {
        MatOfPoint2f temp = new MatOfPoint2f(contour.toArray());
        double area = Imgproc.contourArea(contour);
        approxCurve = new MatOfPoint2f();
        Imgproc.approxPolyDP(temp, approxCurve, Imgproc.arcLength(temp, true) * 0.02, true);
        if (approxCurve.total() == 4 && area >= maxArea) {
            double maxCosine = 0;
            List<Point> curves = approxCurve.toList();
            for (int j = 2; j < 5; j++) {
                double cosine = Math.abs(angle(curves.get(j % 4), curves.get(j - 2), curves.get(j - 1)));
                maxCosine = Math.max(maxCosine, cosine);
            }
            if (maxCosine < 0.3) {
                maxArea = area;
                maxId = contours.indexOf(contour);
            }
        }
    }
    MatOfPoint maxMatOfPoint = contours.get(maxId);
    MatOfPoint2f maxMatOfPoint2f = new MatOfPoint2f(maxMatOfPoint.toArray());
    RotatedRect rect = Imgproc.minAreaRect(maxMatOfPoint2f);
    System.out.println("Rect angle: " + rect.angle);
    Point points[] = new Point[4];
    rect.points(points);
    for (int i = 0; i < 4; ++i) {
        Imgproc.line(frame, points[i], points[(i + 1) % 4], new Scalar(255, 255, 25), 3);
    }

    Mat dest = new Mat();
    frame.copyTo(dest, frame);
    return dest;

应用程序2:使用HoughLine转换
// STEP 1: Edge detection
    Mat grayImage = new Mat();
    Mat detectedEdges = new Mat();
    Vector<Point> start = new Vector<Point>();
    Vector<Point> end = new Vector<Point>();
    // convert to grayscale
    Imgproc.cvtColor(frame, grayImage, Imgproc.COLOR_BGR2GRAY);
    // reduce noise with a 3x3 kernel
    // Imgproc.blur(grayImage, detectedEdges, new Size(3, 3));
    Imgproc.medianBlur(grayImage, detectedEdges, 9);
    // Imgproc.equalizeHist(detectedEdges, detectedEdges);
    // Imgproc.GaussianBlur(detectedEdges, detectedEdges, new Size(5, 5), 0, 0, Core.BORDER_DEFAULT);
    // AdaptiveThreshold -> classify as either black or white
    // Imgproc.adaptiveThreshold(detectedEdges, detectedEdges, 255, Imgproc.ADAPTIVE_THRESH_MEAN_C, Imgproc.THRESH_BINARY, 5, 2);
    // Imgproc.Sobel(detectedEdges, detectedEdges, -1, 1, 0);
    Mat edges = new Mat();
    // canny detector, with ratio of lower:upper threshold of 3:1
    Imgproc.Canny(detectedEdges, edges, this.threshold.getValue(), this.threshold.getValue() * 3, 3, true);
    // apply gaussian blur to smoothen lines of dots
    Imgproc.GaussianBlur(edges, edges, new org.opencv.core.Size(5, 5), 5);
    // makes the object in white bigger
    Imgproc.dilate(edges, edges, new Mat(), new Point(-1, -1), 1); // 1
    Image imageToShow = Utils.mat2Image(edges);
    updateImageView(cannyFrame, imageToShow);
    // STEP 2: Line detection
    // Do Hough line
    Mat lines = new Mat();
    int minLineSize = 50;
    int lineGap = 10;
    Imgproc.HoughLinesP(edges, lines, 1, Math.PI / 720, (int) this.threshold.getValue(), this.minLineSize.getValue(), lineGap);
    System.out.println("MinLineSize: " + this.minLineSize.getValue());
    System.out.println(lines.rows());
    for (int i = 0; i < lines.rows(); i++) {
        double[] val = lines.get(i, 0);
        Point tmpStartP = new Point(val[0], val[1]);
        Point tmpEndP = new Point(val[2], val[3]);
        start.add(tmpStartP);
        end.add(tmpEndP);
        Imgproc.line(frame, tmpStartP, tmpEndP, new Scalar(255, 255, 0), 2);
    }

    Mat dest = new Mat();
    frame.copyTo(dest, frame);
    return dest;

HoughLine result 1
HoughLine result 2

如何从HoughLine结果中检测所需的矩形?
有人可以给我下一步完成HoughLine转换方法的步骤。
任何帮助都适用。我坚持了一段时间。

感谢您阅读本文。

最佳答案

这个答案几乎是我发布的其他两个答案(herehere)的混合。但是根据您的情况,我用于其他答案的管道可能会有所改善。因此,我认为值得发布一个新答案。
有很多方法可以实现您想要的。但是,我认为这里不需要使用HoughLinesP进行行检测。这是我在样本上使用的管道:
步骤1:检测egdes

  • 如果输入图像太大,则调整的大小(我注意到该管道在给定输入图像的缩小版本上效果更好)
  • 模糊灰度输入并使用 Canny滤镜检测边缘

  • 第2步:找到卡的角
  • 计算等高线
  • 长度排序轮廓,仅将最大保留一个
  • 生成此轮廓的凸包
  • 使用approxPolyDP简化凸壳的(应为四边形)
  • 从近似多边形
  • 中创建一个蒙版
  • 返回四边形
  • 的4点

    步骤3:单应性
  • 使用findHomography查找纸张的仿射变换(在步骤2中找到4个角点)
  • 使用计算的单应性矩阵
  • 扭曲输入图像

    注意:当然,一旦在输入图像的缩小版本上找到了纸张的角,就可以轻松计算出全尺寸输入图像上角的位置。这是为了使翘曲的纸张具有最佳分辨率。
    结果如下:
    java - 如何在OpenCV Java中从HoughLines变换检测矩形-LMLPHP
    vector<Point> getQuadrilateral(Mat & grayscale, Mat& output)
    {
        Mat approxPoly_mask(grayscale.rows, grayscale.cols, CV_8UC1);
        approxPoly_mask = Scalar(0);
    
        vector<vector<Point>> contours;
        findContours(grayscale, contours, RETR_EXTERNAL, CHAIN_APPROX_NONE);
    
        vector<int> indices(contours.size());
        iota(indices.begin(), indices.end(), 0);
    
        sort(indices.begin(), indices.end(), [&contours](int lhs, int rhs) {
            return contours[lhs].size() > contours[rhs].size();
        });
    
        /// Find the convex hull object for each contour
        vector<vector<Point> >hull(1);
        convexHull(Mat(contours[indices[0]]), hull[0], false);
    
        vector<vector<Point>> polygon(1);
        approxPolyDP(hull[0], polygon[0], 20, true);
        drawContours(approxPoly_mask, polygon, 0, Scalar(255));
        imshow("approxPoly_mask", approxPoly_mask);
    
        if (polygon[0].size() >= 4) // we found the 4 corners
        {
            return(polygon[0]);
        }
    
        return(vector<Point>());
    }
    
    
    int main(int argc, char** argv)
    {
    
        Mat input = imread("papersheet1.JPG");
        resize(input, input, Size(), 0.1, 0.1);
        Mat input_grey;
        cvtColor(input, input_grey, CV_BGR2GRAY);
        Mat threshold1;
        Mat edges;
        blur(input_grey, input_grey, Size(3, 3));
        Canny(input_grey, edges, 30, 100);
    
    
        vector<Point> card_corners = getQuadrilateral(edges, input);
        Mat warpedCard(400, 300, CV_8UC3);
        if (card_corners.size() == 4)
        {
            Mat homography = findHomography(card_corners, vector<Point>{Point(warpedCard.cols, warpedCard.rows), Point(0, warpedCard.rows), Point(0, 0), Point(warpedCard.cols, 0)});
            warpPerspective(input, warpedCard, homography, Size(warpedCard.cols, warpedCard.rows));
        }
    
        imshow("warped card", warpedCard);
        imshow("edges", edges);
        imshow("input", input);
        waitKey(0);
    
        return 0;
    }
    
    这是C++代码,但要翻译成Java应该不难。

    07-22 04:50