简体   繁体   中英

How to detect rectangle from HoughLines transform in OpenCV Java

I know this is duplicated post but still get stuck on implementation. I following some guide on the internet in how to detect document in an image in OpenCV and Java. The first approarch i came up with is that use the findContours after pre-process some image processing like blur, edge detection, after get all the contours i can found the largest contour and assume that is a rectangle i'm looking for but it fail in some case, eg the document is not fully taken like missing one corner. After trying several time and some new processing but does not work at all, i found that the HoughLine transform take it easier. From now i have all the line inside an image but still do not what to do next to defined the interest rectangle that i want. Here is the implementation code i have so far: Approach 1: Using findContours

Mat grayImage = new Mat();
    Mat detectedEdges = new Mat();
    // convert to grayscale
    Imgproc.cvtColor(frame, grayImage, Imgproc.COLOR_BGR2GRAY);
    // reduce noise with a 3x3 kernel
    // Imgproc.blur(grayImage, detectedEdges, new Size(3, 3));
    Imgproc.medianBlur(grayImage, detectedEdges, 9);
    // Imgproc.equalizeHist(detectedEdges, detectedEdges);
    // Imgproc.GaussianBlur(detectedEdges, detectedEdges, new Size(5, 5), 0, 0, Core.BORDER_DEFAULT);
    Mat edges = new Mat();
    // canny detector, with ratio of lower:upper threshold of 3:1
    Imgproc.Canny(detectedEdges, edges, this.threshold.getValue(), this.threshold.getValue() * 3, 3, true);
    // makes the object in white bigger
    Imgproc.dilate(edges, edges, new Mat(), new Point(-1, -1), 1); // 1
    Image imageToShow = Utils.mat2Image(edges);
    updateImageView(cannyFrame, imageToShow);
    /// Find contours
    List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
    Imgproc.findContours(edges, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
    // loop over the contours
    MatOfPoint2f approxCurve;
    double maxArea = 0;
    int maxId = -1;
    for (MatOfPoint contour : contours) {
        MatOfPoint2f temp = new MatOfPoint2f(contour.toArray());
        double area = Imgproc.contourArea(contour);
        approxCurve = new MatOfPoint2f();
        Imgproc.approxPolyDP(temp, approxCurve, Imgproc.arcLength(temp, true) * 0.02, true);
        if (approxCurve.total() == 4 && area >= maxArea) {
            double maxCosine = 0;
            List<Point> curves = approxCurve.toList();
            for (int j = 2; j < 5; j++) {
                double cosine = Math.abs(angle(curves.get(j % 4), curves.get(j - 2), curves.get(j - 1)));
                maxCosine = Math.max(maxCosine, cosine);
            }
            if (maxCosine < 0.3) {
                maxArea = area;
                maxId = contours.indexOf(contour);
            }
        }
    }
    MatOfPoint maxMatOfPoint = contours.get(maxId);
    MatOfPoint2f maxMatOfPoint2f = new MatOfPoint2f(maxMatOfPoint.toArray());
    RotatedRect rect = Imgproc.minAreaRect(maxMatOfPoint2f);
    System.out.println("Rect angle: " + rect.angle);
    Point points[] = new Point[4];
    rect.points(points);
    for (int i = 0; i < 4; ++i) {
        Imgproc.line(frame, points[i], points[(i + 1) % 4], new Scalar(255, 255, 25), 3);
    }

    Mat dest = new Mat();
    frame.copyTo(dest, frame);
    return dest;

Apparch 2: Using HoughLine transform

// STEP 1: Edge detection
    Mat grayImage = new Mat();
    Mat detectedEdges = new Mat();
    Vector<Point> start = new Vector<Point>();
    Vector<Point> end = new Vector<Point>();
    // convert to grayscale
    Imgproc.cvtColor(frame, grayImage, Imgproc.COLOR_BGR2GRAY);
    // reduce noise with a 3x3 kernel
    // Imgproc.blur(grayImage, detectedEdges, new Size(3, 3));
    Imgproc.medianBlur(grayImage, detectedEdges, 9);
    // Imgproc.equalizeHist(detectedEdges, detectedEdges);
    // Imgproc.GaussianBlur(detectedEdges, detectedEdges, new Size(5, 5), 0, 0, Core.BORDER_DEFAULT);
    // AdaptiveThreshold -> classify as either black or white
    // Imgproc.adaptiveThreshold(detectedEdges, detectedEdges, 255, Imgproc.ADAPTIVE_THRESH_MEAN_C, Imgproc.THRESH_BINARY, 5, 2);
    // Imgproc.Sobel(detectedEdges, detectedEdges, -1, 1, 0);
    Mat edges = new Mat();
    // canny detector, with ratio of lower:upper threshold of 3:1
    Imgproc.Canny(detectedEdges, edges, this.threshold.getValue(), this.threshold.getValue() * 3, 3, true);
    // apply gaussian blur to smoothen lines of dots
    Imgproc.GaussianBlur(edges, edges, new org.opencv.core.Size(5, 5), 5);
    // makes the object in white bigger
    Imgproc.dilate(edges, edges, new Mat(), new Point(-1, -1), 1); // 1
    Image imageToShow = Utils.mat2Image(edges);
    updateImageView(cannyFrame, imageToShow);
    // STEP 2: Line detection
    // Do Hough line
    Mat lines = new Mat();
    int minLineSize = 50;
    int lineGap = 10;
    Imgproc.HoughLinesP(edges, lines, 1, Math.PI / 720, (int) this.threshold.getValue(), this.minLineSize.getValue(), lineGap);
    System.out.println("MinLineSize: " + this.minLineSize.getValue());
    System.out.println(lines.rows());
    for (int i = 0; i < lines.rows(); i++) {
        double[] val = lines.get(i, 0);
        Point tmpStartP = new Point(val[0], val[1]);
        Point tmpEndP = new Point(val[2], val[3]);
        start.add(tmpStartP);
        end.add(tmpEndP);
        Imgproc.line(frame, tmpStartP, tmpEndP, new Scalar(255, 255, 0), 2);
    }

    Mat dest = new Mat();
    frame.copyTo(dest, frame);
    return dest;

HoughLine result 1 HoughLine result 2

How to detect needed rectangle from HoughLine result? Can someone give me the next step to complete the HoughLine transform approach. Any help is appriciated. i'm stuck with this for a while.

Thanks you for reading this.

This answer is pretty much a mix of two other answers ( here and here ) I posted. But the pipeline I used for the other answers can be a little bit improved for your case. So I think it's worth posting a new answer.

There are many ways to achieve what you want. However, I don't think that line detection with HoughLinesP is needed here. So here is the pipeline I used on your samples:

Step 1: Detect egdes

  • Resize the input image if it's too large (I noticed that this pipeline works better on down scaled version of a given input image)
  • Blur grayscale input and detect edges with Canny filter

Step 2: Find the card's corners

  • Compute the contours
  • Sort the contours by length and only keep the largest one
  • Generate the convex hull of this contour
  • Use approxPolyDP to simplify the convex hull (this should give a quadrilateral )
  • Create a mask out of the approximate polygon
  • return the 4 points of the quadrilateral

Step 3: Homography

  • Use findHomography to find the affine transformation of your paper sheet (with the 4 corner points found at Step 2 )
  • Warp the input image using the computed homography matrix

NOTE : Of course, once you have found the corners of the paper sheet on the down scaled version of the input image, you can easily compute the position of the corners on the full sized input image. This, in order to have the best resolution for the warped paper sheet.

And here is the result: 在此处输入图片说明

vector<Point> getQuadrilateral(Mat & grayscale, Mat& output)
{
    Mat approxPoly_mask(grayscale.rows, grayscale.cols, CV_8UC1);
    approxPoly_mask = Scalar(0);

    vector<vector<Point>> contours;
    findContours(grayscale, contours, RETR_EXTERNAL, CHAIN_APPROX_NONE);

    vector<int> indices(contours.size());
    iota(indices.begin(), indices.end(), 0);

    sort(indices.begin(), indices.end(), [&contours](int lhs, int rhs) {
        return contours[lhs].size() > contours[rhs].size();
    });

    /// Find the convex hull object for each contour
    vector<vector<Point> >hull(1);
    convexHull(Mat(contours[indices[0]]), hull[0], false);

    vector<vector<Point>> polygon(1);
    approxPolyDP(hull[0], polygon[0], 20, true);
    drawContours(approxPoly_mask, polygon, 0, Scalar(255));
    imshow("approxPoly_mask", approxPoly_mask);

    if (polygon[0].size() >= 4) // we found the 4 corners
    {
        return(polygon[0]);
    }

    return(vector<Point>());
}


int main(int argc, char** argv)
{

    Mat input = imread("papersheet1.JPG");
    resize(input, input, Size(), 0.1, 0.1);
    Mat input_grey;
    cvtColor(input, input_grey, CV_BGR2GRAY);
    Mat threshold1;
    Mat edges;
    blur(input_grey, input_grey, Size(3, 3));
    Canny(input_grey, edges, 30, 100);


    vector<Point> card_corners = getQuadrilateral(edges, input);
    Mat warpedCard(400, 300, CV_8UC3);
    if (card_corners.size() == 4)
    {
        Mat homography = findHomography(card_corners, vector<Point>{Point(warpedCard.cols, warpedCard.rows), Point(0, warpedCard.rows), Point(0, 0), Point(warpedCard.cols, 0)});
        warpPerspective(input, warpedCard, homography, Size(warpedCard.cols, warpedCard.rows));
    }

    imshow("warped card", warpedCard);
    imshow("edges", edges);
    imshow("input", input);
    waitKey(0);

    return 0;
}

This is C++ code, but it shouldn't be to hard to translate into Java.

The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.

 
粤ICP备18138465号  © 2020-2024 STACKOOM.COM