Example usage for org.opencv.imgproc Imgproc approxPolyDP

List of usage examples for org.opencv.imgproc Imgproc approxPolyDP

Introduction

In this page you can find the example usage for org.opencv.imgproc Imgproc approxPolyDP.

Prototype

public static void approxPolyDP(MatOfPoint2f curve, MatOfPoint2f approxCurve, double epsilon, boolean closed) 

Source Link

Usage

From source file:objectdetection.ObjectDetector.java

public void findObjects() {

    preProcessImg();// w ww .j a va2 s.co m
    Imgproc.findContours(imgCanny, contours, imgCanny, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);

    for (MatOfPoint mop : contours) {
        MatOfPoint2f m2p;
        m2p = new MatOfPoint2f(mop.toArray());
        Double peri = Imgproc.arcLength(m2p, true);
        Imgproc.approxPolyDP(m2p, m2p, 0.02 * peri, true);
        //Imgproc.drawContours(img, contours, -1, new Scalar(0, 0, 255), 2);

        float area = img.width() * img.height();
        Rect rect = Imgproc.boundingRect(mop);
        objList.add(rect);
        //if (rect.height * rect.width > area*5/100) {
        Imgproc.rectangle(img, rect.tl(), rect.br(), new Scalar(255, 0, 0));
        //}
    }
    Collections.sort(objList, new Comparator<Rect>() {
        @Override
        public int compare(Rect r1, Rect r2) {
            return (int) (r2.area() - r1.area());
        }

    });

    List<Rect> arr = objList;

    Rect bigRect = arr.get(0);
    Rect bigRect2 = arr.get(1);

    while (!equals(bigRect, bigRect2)) {
        bigRect2 = bigRect;
        for (int i = 1; i < arr.size(); ++i) {
            if (doOverlap(bigRect, arr.get(i))) {
                bigRect = union(bigRect, arr.get(i));
                arr.remove(i);
                break;
            }
        }

    }

    Imgproc.rectangle(img, bigRect.tl(), bigRect.br(), new Scalar(255, 255, 0));
    mainRect = bigRect;
}

From source file:opencv.CaptchaDetection.java

/***
 * ??, ROI/*from w  ww  .j  a v a2 s .c  o m*/
 * @param src
 * @return 
 */
private static List<Mat> find_number(Mat src) {
    Mat src_tmp = src.clone();

    //  
    Imgproc.dilate(src_tmp, src_tmp, new Mat());

    //  ?
    Mat canny_edge = new Mat();
    Imgproc.blur(src_tmp, src_tmp, new Size(3, 3));
    Imgproc.Canny(src_tmp, canny_edge, 50, 150, 3, false);

    //  
    List<MatOfPoint> contours = new ArrayList<>();
    Imgproc.findContours(canny_edge, contours, new Mat(), Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);

    List<Rect> boundRect = new ArrayList<>();

    //  ??, ??
    for (int i = 0; i < contours.size(); i++) {
        MatOfPoint2f tmp_mp2f_1 = new MatOfPoint2f();
        MatOfPoint2f tmp_mp2f_2 = new MatOfPoint2f();

        contours.get(i).convertTo(tmp_mp2f_1, CvType.CV_32FC2);

        Imgproc.approxPolyDP(tmp_mp2f_1, tmp_mp2f_2, 3, true);

        tmp_mp2f_2.convertTo(contours.get(i), CvType.CV_32S);

        Rect rect = Imgproc.boundingRect(contours.get(i));

        //if (rect.area() > 300)
        //out.println("h : " + rect.height + ", w : " + rect.width + ", aera :  " + rect.area());

        if (rect.height >= 21 && rect.width >= 21 && rect.area() >= 700)
            boundRect.add(rect);
    }

    //  ??
    for (Rect rect : boundRect) {
        Scalar color = new Scalar(128);
        Imgproc.rectangle(src_tmp, rect.tl(), rect.br(), color, 2, 8, 0);
    }

    //  ???
    Collections.sort(boundRect, rectSort);

    List<Mat> numRoi = new ArrayList<>();
    for (Rect rect : boundRect)
        numRoi.add(src.submat(rect));

    //for (Mat roi : numRoi) 
    //showResult(roi, "roi");

    return numRoi;
}

From source file:org.ar.rubik.ImageRecognizer.java

License:Open Source License

/**
 * On Camera Frame//  w w  w .  jav  a 2s .  co m
 * 
 * Process frame image through Rubik Face recognition possibly resulting in a state change.
 * 
 *  (non-Javadoc)
 * @see org.opencv.android.CameraBridgeViewBase.CvCameraViewListener2#onCameraFrame(org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame)
 */
@Override
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {

    //      Log.e(Constants.TAG, "CV Thread ID = " + Thread.currentThread().getId());

    // Just display error message if it is non-null.
    if (errorImage != null)
        return errorImage;

    Mat image = inputFrame.rgba();
    Size imageSize = image.size();
    Log.v(Constants.TAG_CAL, "Input Frame width=" + imageSize.width + " height=" + imageSize.height);
    if (imageSize.width != stateModel.openCVSize.width || imageSize.height != stateModel.openCVSize.height)
        Log.e(Constants.TAG_CAL, "State Model openCVSize does not agree with input frame!");

    // Save or Recall image as requested
    switch (MenuAndParams.imageSourceMode) {
    case NORMAL:
        break;
    case SAVE_NEXT:
        Util.saveImage(image);
        MenuAndParams.imageSourceMode = ImageSourceModeEnum.NORMAL;
        break;
    case PLAYBACK:
        image = Util.recallImage();
    default:
        break;
    }

    // Calculate and display Frames Per Second
    long newTimeStamp = System.currentTimeMillis();
    if (framesPerSecondTimeStamp > 0) {
        long frameTime = newTimeStamp - framesPerSecondTimeStamp;
        double framesPerSecond = 1000.0 / frameTime;
        String string = String.format("%4.1f FPS", framesPerSecond);
        Core.putText(image, string, new Point(50, 700), Constants.FontFace, 2, ColorTileEnum.WHITE.cvColor, 2);
    }
    framesPerSecondTimeStamp = newTimeStamp;

    try {

        // Initialize
        RubikFace rubikFace = new RubikFace();
        rubikFace.profiler.markTime(Profiler.Event.START);
        Log.i(Constants.TAG, "============================================================================");

        /* **********************************************************************
         * **********************************************************************
         * Return Original Image
         */
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.DIRECT) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            return annotation.drawAnnotation(image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Process to Grey Scale
         * 
         * This algorithm finds highlights areas that are all of nearly
         * the same hue.  In particular, cube faces should be highlighted.
         */
        Mat greyscale_image = new Mat();
        Imgproc.cvtColor(image, greyscale_image, Imgproc.COLOR_BGR2GRAY);
        rubikFace.profiler.markTime(Profiler.Event.GREYSCALE);
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.GREYSCALE) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            image.release();
            return annotation.drawAnnotation(greyscale_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Gaussian Filter Blur prevents getting a lot of false hits 
         */
        Mat blur_image = new Mat();

        int kernelSize = (int) MenuAndParams.gaussianBlurKernelSizeParam.value;
        kernelSize = kernelSize % 2 == 0 ? kernelSize + 1 : kernelSize; // make odd
        Imgproc.GaussianBlur(greyscale_image, blur_image, new Size(kernelSize, kernelSize), -1, -1);
        rubikFace.profiler.markTime(Profiler.Event.GAUSSIAN);
        greyscale_image.release();
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.GAUSSIAN) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            image.release();
            return annotation.drawAnnotation(blur_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Canny Edge Detection
         */
        Mat canny_image = new Mat();
        Imgproc.Canny(blur_image, canny_image, MenuAndParams.cannyLowerThresholdParam.value,
                MenuAndParams.cannyUpperThresholdParam.value, 3, // Sobel Aperture size.  This seems to be typically value used in the literature: i.e., a 3x3 Sobel Matrix.
                false); // use cheap gradient calculation: norm =|dI/dx|+|dI/dy|
        rubikFace.profiler.markTime(Profiler.Event.EDGE);
        blur_image.release();
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.CANNY) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            image.release();
            return annotation.drawAnnotation(canny_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Dilation Image Process
         */
        Mat dilate_image = new Mat();
        Imgproc.dilate(canny_image, dilate_image, Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(
                MenuAndParams.dilationKernelSizeParam.value, MenuAndParams.dilationKernelSizeParam.value)));
        rubikFace.profiler.markTime(Profiler.Event.DILATION);
        canny_image.release();
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.DILATION) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            image.release();
            return annotation.drawAnnotation(dilate_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Contour Generation 
         */
        List<MatOfPoint> contours = new LinkedList<MatOfPoint>();
        Mat heirarchy = new Mat();
        Imgproc.findContours(dilate_image, contours, heirarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE); // Note: tried other TC89 options, but no significant change or improvement on cpu time.
        rubikFace.profiler.markTime(Profiler.Event.CONTOUR);
        dilate_image.release();

        // Create gray scale image but in RGB format, and then added yellow colored contours on top.
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.CONTOUR) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            Mat gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Mat rgba_gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Imgproc.cvtColor(image, gray_image, Imgproc.COLOR_RGB2GRAY);
            Imgproc.cvtColor(gray_image, rgba_gray_image, Imgproc.COLOR_GRAY2BGRA, 3);
            Imgproc.drawContours(rgba_gray_image, contours, -1, ColorTileEnum.YELLOW.cvColor, 3);
            Core.putText(rgba_gray_image, "Num Contours: " + contours.size(), new Point(500, 50),
                    Constants.FontFace, 4, ColorTileEnum.RED.cvColor, 4);
            gray_image.release();
            image.release();
            return annotation.drawAnnotation(rgba_gray_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Polygon Detection
         */
        List<Rhombus> polygonList = new LinkedList<Rhombus>();
        for (MatOfPoint contour : contours) {

            // Keep only counter clockwise contours.  A clockwise contour is reported as a negative number.
            double contourArea = Imgproc.contourArea(contour, true);
            if (contourArea < 0.0)
                continue;

            // Keep only reasonable area contours
            if (contourArea < MenuAndParams.minimumContourAreaParam.value)
                continue;

            // Floating, instead of Double, for some reason required for approximate polygon detection algorithm.
            MatOfPoint2f contour2f = new MatOfPoint2f();
            MatOfPoint2f polygone2f = new MatOfPoint2f();
            MatOfPoint polygon = new MatOfPoint();

            // Make a Polygon out of a contour with provide Epsilon accuracy parameter.
            // It uses the Douglas-Peucker algorithm http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm
            contour.convertTo(contour2f, CvType.CV_32FC2);
            Imgproc.approxPolyDP(contour2f, polygone2f, MenuAndParams.polygonEpsilonParam.value, // The maximum distance between the original curve and its approximation.
                    true); // Resulting polygon representation is "closed:" its first and last vertices are connected.
            polygone2f.convertTo(polygon, CvType.CV_32S);

            polygonList.add(new Rhombus(polygon));
        }

        rubikFace.profiler.markTime(Profiler.Event.POLYGON);

        // Create gray scale image but in RGB format, and then add yellow colored polygons on top.
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.POLYGON) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            Mat gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Mat rgba_gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Imgproc.cvtColor(image, gray_image, Imgproc.COLOR_RGB2GRAY);
            Imgproc.cvtColor(gray_image, rgba_gray_image, Imgproc.COLOR_GRAY2BGRA, 4);
            for (Rhombus polygon : polygonList)
                polygon.draw(rgba_gray_image, ColorTileEnum.YELLOW.cvColor);
            Core.putText(rgba_gray_image, "Num Polygons: " + polygonList.size(), new Point(500, 50),
                    Constants.FontFace, 3, ColorTileEnum.RED.cvColor, 4);
            return annotation.drawAnnotation(rgba_gray_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Rhombus Tile Recognition
         * 
         * From polygon list, produces a list of suitable Parallelograms (Rhombi).
         */
        Log.i(Constants.TAG, String.format("Rhombus:   X    Y   Area   a-a  b-a a-l b-l gamma"));
        List<Rhombus> rhombusList = new LinkedList<Rhombus>();
        // Get only valid Rhombus(es) : actually parallelograms.
        for (Rhombus rhombus : polygonList) {
            rhombus.qualify();
            if (rhombus.status == Rhombus.StatusEnum.VALID)
                rhombusList.add(rhombus);
        }

        // Filtering w.r.t. Rhmobus set characteristics
        Rhombus.removedOutlierRhombi(rhombusList);

        rubikFace.profiler.markTime(Profiler.Event.RHOMBUS);

        // Create gray scale image but in RGB format, and then add yellow colored Rhombi (parallelograms) on top.
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.RHOMBUS) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            Mat gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Mat rgba_gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Imgproc.cvtColor(image, gray_image, Imgproc.COLOR_RGB2GRAY);
            Imgproc.cvtColor(gray_image, rgba_gray_image, Imgproc.COLOR_GRAY2BGRA, 4);
            for (Rhombus rhombus : rhombusList)
                rhombus.draw(rgba_gray_image, ColorTileEnum.YELLOW.cvColor);
            Core.putText(rgba_gray_image, "Num Rhombus: " + rhombusList.size(), new Point(500, 50),
                    Constants.FontFace, 4, ColorTileEnum.RED.cvColor, 4);
            gray_image.release();
            image.release();
            return annotation.drawAnnotation(rgba_gray_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Face Recognition
         * 
         * Takes a collection of Rhombus objects and determines if a valid
         * Rubik Face can be determined from them, and then also determines 
         * initial color for all nine tiles. 
         */
        rubikFace.processRhombuses(rhombusList, image);
        Log.i(Constants.TAG, "Face Solution = " + rubikFace.faceRecognitionStatus);
        rubikFace.profiler.markTime(Profiler.Event.FACE);
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.FACE_DETECT) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            return annotation.drawAnnotation(image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Cube Pose Estimation
         * 
         * Reconstruct the Rubik Cube 3D location and orientation in GL space coordinates.
         */
        if (rubikFace.faceRecognitionStatus == FaceRecognitionStatusEnum.SOLVED) {

            // Obtain Cube Pose from Face Grid information.
            stateModel.cubePose = CubePoseEstimator.poseEstimation(rubikFace, image, stateModel);

            // Process measurement update on Kalman Filter (if it exists).
            KalmanFilter kalmanFilter = stateModel.kalmanFilter;
            if (kalmanFilter != null)
                kalmanFilter.measurementUpdate(stateModel.cubePose, System.currentTimeMillis());

            // Process measurement update on Kalman Filter ALSM (if it exists).
            KalmanFilterALSM kalmanFilterALSM = stateModel.kalmanFilterALSM;
            if (kalmanFilter != null)
                kalmanFilterALSM.measurementUpdate(stateModel.cubePose, System.currentTimeMillis());
        } else {
            stateModel.cubePose = null;
        }
        rubikFace.profiler.markTime(Profiler.Event.POSE);

        /* **********************************************************************
         * **********************************************************************
         * Application State Machine
         * 
         * Will provide user instructions.
         * Will determine when we are on-face and off-face
         * Will determine when we are on-new-face
         * Will change state 
         */
        appStateMachine.onFaceEvent(rubikFace);
        rubikFace.profiler.markTime(Profiler.Event.CONTROLLER);
        rubikFace.profiler.markTime(Profiler.Event.TOTAL);

        // Normal return point.
        stateModel.activeRubikFace = rubikFace;
        return annotation.drawAnnotation(image);

        // =+= Issue: how to get stdio to print as error and not warning in logcat?
    } catch (CvException e) {
        Log.e(Constants.TAG, "CvException: " + e.getMessage());
        e.printStackTrace();
        errorImage = new Mat(imageSize, CvType.CV_8UC4);
        Core.putText(errorImage, "CvException: " + e.getMessage(), new Point(50, 50), Constants.FontFace, 2,
                ColorTileEnum.WHITE.cvColor, 2);
        int i = 1;
        for (StackTraceElement element : e.getStackTrace())
            Core.putText(errorImage, element.toString(), new Point(50, 50 + 50 * i++), Constants.FontFace, 2,
                    ColorTileEnum.WHITE.cvColor, 2);
    } catch (Exception e) {
        Log.e(Constants.TAG, "Exception: " + e.getMessage());
        e.printStackTrace();
        errorImage = new Mat(imageSize, CvType.CV_8UC4);
        Core.putText(errorImage, "Exception: " + e.getMessage(), new Point(50, 50), Constants.FontFace, 2,
                ColorTileEnum.WHITE.cvColor, 2);
        int i = 1;
        for (StackTraceElement element : e.getStackTrace())
            Core.putText(errorImage, element.toString(), new Point(50, 50 + 50 * i++), Constants.FontFace, 2,
                    ColorTileEnum.WHITE.cvColor, 2);
    } catch (Error e) {
        Log.e(Constants.TAG, "Error: " + e.getMessage());
        e.printStackTrace();
        errorImage = new Mat(imageSize, CvType.CV_8UC4);
        Core.putText(errorImage, "Error: " + e.getMessage(), new Point(50, 50), Constants.FontFace, 2,
                ColorTileEnum.WHITE.cvColor, 2);
        int i = 1;
        for (StackTraceElement element : e.getStackTrace())
            Core.putText(errorImage, element.toString(), new Point(50, 50 + 50 * i++), Constants.FontFace, 2,
                    ColorTileEnum.WHITE.cvColor, 2);
    }

    return annotation.drawAnnotation(image);
}

From source file:org.lasarobotics.vision.detection.PrimitiveDetection.java

License:Open Source License

/**
 * Locate rectangles in an image//from   w  ww .  j av  a2s  .  c  om
 *
 * @param grayImage Grayscale image
 * @return Rectangle locations
 */
public RectangleLocationResult locateRectangles(Mat grayImage) {
    Mat gray = grayImage.clone();

    //Filter out some noise by halving then doubling size
    Filter.downsample(gray, 2);
    Filter.upsample(gray, 2);

    //Mat is short for Matrix, and here is used to store an image.
    //it is n-dimensional, but as an image, is two-dimensional
    Mat cacheHierarchy = new Mat();
    Mat grayTemp = new Mat();
    List<Rectangle> rectangles = new ArrayList<>();
    List<Contour> contours = new ArrayList<>();

    //This finds the edges using a Canny Edge Detector
    //It is sent the grayscale Image, a temp Mat, the lower detection threshold for an edge,
    //the higher detection threshold, the Aperture (blurring) of the image - higher is better
    //for long, smooth edges, and whether a more accurate version (but time-expensive) version
    //should be used (true = more accurate)
    //Note: the edges are stored in "grayTemp", which is an image where everything
    //is black except for gray-scale lines delineating the edges.
    Imgproc.Canny(gray, grayTemp, 0, THRESHOLD_CANNY, APERTURE_CANNY, true);
    //make the white lines twice as big, while leaving the image size constant
    Filter.dilate(gray, 2);

    List<MatOfPoint> contoursTemp = new ArrayList<>();
    //Find contours - the parameters here are very important to compression and retention
    //grayTemp is the image from which the contours are found,
    //contoursTemp is where the resultant contours are stored (note: color is not retained),
    //cacheHierarchy is the parent-child relationship between the contours (e.g. a contour
    //inside of another is its child),
    //Imgproc.CV_RETR_LIST disables the hierarchical relationships being returned,
    //Imgproc.CHAIN_APPROX_SIMPLE means that the contour is compressed from a massive chain of
    //paired coordinates to just the endpoints of each segment (e.g. an up-right rectangular
    //contour is encoded with 4 points.)
    Imgproc.findContours(grayTemp, contoursTemp, cacheHierarchy, Imgproc.CV_RETR_LIST,
            Imgproc.CHAIN_APPROX_SIMPLE);
    //MatOfPoint2f means that is a MatofPoint (Matrix of Points) represented by floats instead of ints
    MatOfPoint2f approx = new MatOfPoint2f();
    //For each contour, test whether the contour is a rectangle
    //List<Contour> contours = new ArrayList<>()
    for (MatOfPoint co : contoursTemp) {
        //converting the MatOfPoint to MatOfPoint2f
        MatOfPoint2f matOfPoint2f = new MatOfPoint2f(co.toArray());
        //converting the matrix to a Contour
        Contour c = new Contour(co);

        //Attempt to fit the contour to the best polygon
        //input: matOfPoint2f, which is the contour found earlier
        //output: approx, which is the MatOfPoint2f that holds the new polygon that has less vertices
        //basically, it smooths out the edges using the third parameter as its approximation accuracy
        //final parameter determines whether the new approximation must be closed (true=closed)
        Imgproc.approxPolyDP(matOfPoint2f, approx, c.arcLength(true) * EPLISON_APPROX_TOLERANCE_FACTOR, true);

        //converting the MatOfPoint2f to a contour
        Contour approxContour = new Contour(approx);

        //Make sure the contour is big enough, CLOSED (convex), and has exactly 4 points
        if (approx.toArray().length == 4 && Math.abs(approxContour.area()) > 1000 && approxContour.isClosed()) {

            //TODO contours and rectangles array may not match up, but why would they?
            contours.add(approxContour);

            //Check each angle to be approximately 90 degrees
            //Done by comparing the three points constituting the angle of each corner
            double maxCosine = 0;
            for (int j = 2; j < 5; j++) {
                double cosine = Math.abs(MathUtil.angle(approx.toArray()[j % 4], approx.toArray()[j - 2],
                        approx.toArray()[j - 1]));
                maxCosine = Math.max(maxCosine, cosine);
            }

            if (maxCosine < MAX_COSINE_VALUE) {
                //Convert the points to a rectangle instance
                rectangles.add(new Rectangle(approx.toArray()));
            }
        }
    }

    return new RectangleLocationResult(contours, rectangles);
}

From source file:org.usfirst.frc.team2084.CMonster2016.vision.Target.java

License:Open Source License

/**
 * Creates a new possible target based on the specified blob and calculates
 * its score.// www  .  ja  v  a  2s . c  o m
 *
 * @param p the shape of the possible target
 */
public Target(MatOfPoint contour, Mat grayImage) {
    // Simplify contour to make the corner finding algorithm work better
    MatOfPoint2f fContour = new MatOfPoint2f();
    contour.convertTo(fContour, CvType.CV_32F);
    Imgproc.approxPolyDP(fContour, fContour, VisionParameters.getGoalApproxPolyEpsilon(), true);
    fContour.convertTo(contour, CvType.CV_32S);

    this.contour = contour;

    // Check area, and don't do any calculations if it is not valid
    if (validArea = validateArea()) {

        // Find a bounding rectangle
        RotatedRect rect = Imgproc.minAreaRect(fContour);

        Point[] rectPoints = new Point[4];
        rect.points(rectPoints);

        for (int j = 0; j < rectPoints.length; j++) {
            Point rectPoint = rectPoints[j];

            double minDistance = Double.MAX_VALUE;
            Point point = null;

            for (int i = 0; i < contour.rows(); i++) {
                Point contourPoint = new Point(contour.get(i, 0));
                double dist = distance(rectPoint, contourPoint);
                if (dist < minDistance) {
                    minDistance = dist;
                    point = contourPoint;
                }
            }

            rectPoints[j] = point;
        }
        MatOfPoint2f rectMat = new MatOfPoint2f(rectPoints);
        // Refine the corners to improve accuracy
        Imgproc.cornerSubPix(grayImage, rectMat, new Size(4, 10), new Size(-1, -1),
                new TermCriteria(TermCriteria.EPS + TermCriteria.COUNT, 30, 0.1));
        rectPoints = rectMat.toArray();

        // Identify each corner
        SortedMap<Double, List<Point>> x = new TreeMap<>();
        Arrays.stream(rectPoints).forEach((p) -> {
            List<Point> points;
            if ((points = x.get(p.x)) == null) {
                x.put(p.x, points = new LinkedList<>());
            }
            points.add(p);
        });

        int i = 0;
        for (Iterator<List<Point>> it = x.values().iterator(); it.hasNext();) {
            List<Point> s = it.next();

            for (Point p : s) {
                switch (i) {
                case 0:
                    topLeft = p;
                    break;
                case 1:
                    bottomLeft = p;
                    break;
                case 2:
                    topRight = p;
                    break;
                case 3:
                    bottomRight = p;
                }
                i++;
            }
        }

        // Organize corners
        if (topLeft.y > bottomLeft.y) {
            Point p = bottomLeft;
            bottomLeft = topLeft;
            topLeft = p;
        }

        if (topRight.y > bottomRight.y) {
            Point p = bottomRight;
            bottomRight = topRight;
            topRight = p;
        }

        // Create corners for centroid calculation
        corners = new MatOfPoint2f(rectPoints);

        // Calculate center
        Moments moments = Imgproc.moments(corners);
        center = new Point(moments.m10 / moments.m00, moments.m01 / moments.m00);

        // Put the points in the correct order for solvePNP
        rectPoints[0] = topLeft;
        rectPoints[1] = topRight;
        rectPoints[2] = bottomLeft;
        rectPoints[3] = bottomRight;
        // Recreate corners in the new order
        corners = new MatOfPoint2f(rectPoints);

        widthTop = distance(topLeft, topRight);
        widthBottom = distance(bottomLeft, bottomRight);
        width = (widthTop + widthBottom) / 2.0;
        heightLeft = distance(topLeft, bottomLeft);
        heightRight = distance(topRight, bottomRight);
        height = (heightLeft + heightRight) / 2.0;

        Mat tvec = new Mat();

        // Calculate target's location
        Calib3d.solvePnP(OBJECT_POINTS, corners, CAMERA_MAT, DISTORTION_MAT, rotation, tvec, false,
                Calib3d.CV_P3P);

        // =======================================
        // Position and Orientation Transformation
        // =======================================

        double armAngle = VisionResults.getArmAngle();

        // Flip y axis to point upward
        Core.multiply(tvec, SIGN_NORMALIZATION_MATRIX, tvec);

        // Shift origin to arm pivot point, on the robot's centerline
        CoordinateMath.translate(tvec, CAMERA_X_OFFSET, CAMERA_Y_OFFSET, ARM_LENGTH);

        // Align axes with ground
        CoordinateMath.rotateX(tvec, -armAngle);
        Core.add(rotation, new MatOfDouble(armAngle, 0, 0), rotation);

        // Shift origin to robot center of rotation
        CoordinateMath.translate(tvec, 0, ARM_PIVOT_Y_OFFSET, -ARM_PIVOT_Z_OFFSET);

        double xPosFeet = tvec.get(0, 0)[0];
        double yPosFeet = tvec.get(1, 0)[0];
        double zPosFeet = tvec.get(2, 0)[0];

        // Old less effective aiming heading and distance calculation
        // double pixelsToFeet = TARGET_WIDTH / width;

        // distance = (TARGET_WIDTH * HighGoalProcessor.IMAGE_SIZE.width
        // / (2 * width ** Math.tan(VisionParameters.getFOVAngle() / 2)));
        // double xPosFeet = (center.x - (HighGoalProcessor.IMAGE_SIZE.width
        // / 2)) * pixelsToFeet;
        // double yPosFeet = -(center.y -
        // (HighGoalProcessor.IMAGE_SIZE.height / 2)) * pixelsToFeet;

        distance = Math.sqrt(xPosFeet * xPosFeet + zPosFeet * zPosFeet);

        position = new Point3(xPosFeet, yPosFeet, zPosFeet);

        xGoalAngle = Math.atan(xPosFeet / zPosFeet);
        yGoalAngle = Math.atan(yPosFeet / zPosFeet);

        validate();
        score = calculateScore();
    } else {
        valid = false;
    }
}

From source file:simeav.filtros.instanciaciones.DetectorModulosEstandar.java

@Override
public Mat detectarModulos(Mat original, Diagrama diagrama) {
    Imgproc.blur(original, original, new Size(15, 15));
    original = Utils.dilate(original);/*from  w ww.j a  v a  2s  . co m*/
    Mat jerarquia = new Mat();
    ArrayList<MatOfPoint> contornos = new ArrayList<>();
    Imgproc.findContours(original.clone(), contornos, jerarquia, Imgproc.RETR_CCOMP,
            Imgproc.CHAIN_APPROX_SIMPLE);
    ArrayList<MatOfPoint> cp = new ArrayList<>(contornos.size());
    Map<Integer, Rect> rectangulos = new HashMap<>();
    Integer id_cuadrado = 0;
    Mat resultado = Mat.zeros(original.size(), CvType.CV_8U);
    for (int i = contornos.size() - 1; i >= 0; i--) {
        if (jerarquia.get(0, i)[3] > -1) {
            MatOfPoint2f contorno2f = new MatOfPoint2f();
            contorno2f.fromList(contornos.get(i).toList());
            MatOfPoint2f c = new MatOfPoint2f();
            Imgproc.approxPolyDP(contorno2f, c, 3, true);
            cp.add(new MatOfPoint(c.toArray()));
            int lados = cp.get(cp.size() - 1).height();
            if ((4 <= lados) && lados < 12) {
                rectangulos.put(id_cuadrado, Imgproc.boundingRect(new MatOfPoint(c.toArray())));
                Point tl = new Point(rectangulos.get(id_cuadrado).tl().x - 20,
                        rectangulos.get(id_cuadrado).tl().y - 20);
                Point br = new Point(rectangulos.get(id_cuadrado).br().x + 20,
                        rectangulos.get(id_cuadrado).br().y + 20);
                Core.rectangle(resultado, tl, br, new Scalar(255, 255, 255), -1);
                diagrama.addModulo(id_cuadrado, new Rect(tl, br));
                Imgproc.drawContours(resultado, contornos, i, new Scalar(0, 0, 0), -1);
                id_cuadrado++;
            }
        }
    }
    return resultado;
}

From source file:tv.danmaku.ijk.media.example.activities.VideoActivity.java

License:Apache License

public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
    mRgba = inputFrame.rgba();//from  w ww  .  j  av  a 2 s .  c o m
    mGray = inputFrame.gray();

    //        return mRgba;
    //        iThreshold = 10000;

    //Imgproc.blur(mRgba, mRgba, new Size(5,5));
    Imgproc.GaussianBlur(mRgba, mRgba, new org.opencv.core.Size(3, 3), 1, 1);
    //Imgproc.medianBlur(mRgba, mRgba, 3);

    if (!mIsColorSelected)
        return mRgba;

    List<MatOfPoint> contours = mDetector.getContours();
    mDetector.process(mRgba);

    Log.d(TAG, "Contours count: " + contours.size());

    if (contours.size() <= 0) {
        return mRgba;
    }

    RotatedRect rect = Imgproc.minAreaRect(new MatOfPoint2f(contours.get(0).toArray()));

    double boundWidth = rect.size.width;
    double boundHeight = rect.size.height;
    int boundPos = 0;

    for (int i = 1; i < contours.size(); i++) {
        rect = Imgproc.minAreaRect(new MatOfPoint2f(contours.get(i).toArray()));
        if (rect.size.width * rect.size.height > boundWidth * boundHeight) {
            boundWidth = rect.size.width;
            boundHeight = rect.size.height;
            boundPos = i;
        }
    }

    Rect boundRect = Imgproc.boundingRect(new MatOfPoint(contours.get(boundPos).toArray()));
    Imgproc.rectangle(mRgba, boundRect.tl(), boundRect.br(), CONTOUR_COLOR_WHITE, 2, 8, 0);

    Log.d(TAG, " Row start [" + (int) boundRect.tl().y + "] row end [" + (int) boundRect.br().y
            + "] Col start [" + (int) boundRect.tl().x + "] Col end [" + (int) boundRect.br().x + "]");

    int rectHeightThresh = 0;
    double a = boundRect.br().y - boundRect.tl().y;
    a = a * 0.7;
    a = boundRect.tl().y + a;

    Log.d(TAG, " A [" + a + "] br y - tl y = [" + (boundRect.br().y - boundRect.tl().y) + "]");

    //Core.rectangle( mRgba, boundRect.tl(), boundRect.br(), CONTOUR_COLOR, 2, 8, 0 );
    Imgproc.rectangle(mRgba, boundRect.tl(), new Point(boundRect.br().x, a), CONTOUR_COLOR, 2, 8, 0);

    MatOfPoint2f pointMat = new MatOfPoint2f();
    Imgproc.approxPolyDP(new MatOfPoint2f(contours.get(boundPos).toArray()), pointMat, 3, true);
    contours.set(boundPos, new MatOfPoint(pointMat.toArray()));

    MatOfInt hull = new MatOfInt();
    MatOfInt4 convexDefect = new MatOfInt4();
    Imgproc.convexHull(new MatOfPoint(contours.get(boundPos).toArray()), hull);

    if (hull.toArray().length < 3)
        return mRgba;

    Imgproc.convexityDefects(new MatOfPoint(contours.get(boundPos).toArray()), hull, convexDefect);

    List<MatOfPoint> hullPoints = new LinkedList<MatOfPoint>();
    List<Point> listPo = new LinkedList<Point>();
    for (int j = 0; j < hull.toList().size(); j++) {
        listPo.add(contours.get(boundPos).toList().get(hull.toList().get(j)));
    }

    MatOfPoint e = new MatOfPoint();
    e.fromList(listPo);
    hullPoints.add(e);

    List<MatOfPoint> defectPoints = new LinkedList<MatOfPoint>();
    List<Point> listPoDefect = new LinkedList<Point>();
    for (int j = 0; j < convexDefect.toList().size(); j = j + 4) {
        Point farPoint = contours.get(boundPos).toList().get(convexDefect.toList().get(j + 2));
        Integer depth = convexDefect.toList().get(j + 3);
        if (depth > iThreshold && farPoint.y < a) {
            listPoDefect.add(contours.get(boundPos).toList().get(convexDefect.toList().get(j + 2)));
        }
        Log.d(TAG, "defects [" + j + "] " + convexDefect.toList().get(j + 3));
    }

    MatOfPoint e2 = new MatOfPoint();
    e2.fromList(listPo);
    defectPoints.add(e2);

    Log.d(TAG, "hull: " + hull.toList());
    Log.d(TAG, "defects: " + convexDefect.toList());

    Imgproc.drawContours(mRgba, hullPoints, -1, CONTOUR_COLOR, 3);

    int defectsTotal = (int) convexDefect.total();
    Log.d(TAG, "Defect total " + defectsTotal);

    this.numberOfFingers = listPoDefect.size();
    if (this.numberOfFingers > 5)
        this.numberOfFingers = 5;

    mHandler.post(mUpdateFingerCountResults);

    for (Point p : listPoDefect) {
        Imgproc.circle(mRgba, p, 6, new Scalar(255, 0, 255));
    }

    return mRgba;
}

From source file:video.PictureAnalyser.java

public List<MatOfPoint> getConturs(Scalar low, Scalar high, Mat img) {

    Mat imgThresholded = new Mat();
    Mat imgThresholded2 = new Mat();
    Core.inRange(img, low, high, imgThresholded);
    if (low.val[0] < 0) {
        low.val[0] = 180 + low.val[0];
        high.val[0] = 179;

        Core.inRange(img, low, high, imgThresholded2);
        Core.bitwise_or(imgThresholded, imgThresholded2, imgThresholded);

    }//  ww w.j  a v  a2 s  . c  o  m
    if (high.val[0] > 179) {
        low.val[0] = 0;
        high.val[0] = high.val[0] - 180;

        Core.inRange(img, low, high, imgThresholded2);
        Core.bitwise_or(imgThresholded, imgThresholded2, imgThresholded);
    }

    List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
    int dilation_size = 3;
    Mat element1 = Imgproc.getStructuringElement(Imgproc.MORPH_RECT,
            new Size(2 * dilation_size + 1, 2 * dilation_size + 1));
    Imgproc.dilate(imgThresholded, imgThresholded, element1);
    Imgproc.findContours(imgThresholded, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
    MatOfPoint2f approxCurve = new MatOfPoint2f();

    for (int i = 0; i < contours.size(); i++) {
        MatOfPoint2f contour2f = new MatOfPoint2f(contours.get(i).toArray());
        double approxDistance = Imgproc.arcLength(contour2f, true) * 0.02;
        Imgproc.approxPolyDP(contour2f, approxCurve, approxDistance, true);
        MatOfPoint points = new MatOfPoint(approxCurve.toArray());
        Rect rect = Imgproc.boundingRect(points);
        int area = (rect.width) * (rect.height);
        //tester og arealet er for smt
        if (area > 500) {
        } else {
            contours.remove(i);
            i--;
        }
    }

    return contours;
}