Example usage for org.opencv.core MatOfPoint2f MatOfPoint2f

List of usage examples for org.opencv.core MatOfPoint2f MatOfPoint2f

Introduction

In this page you can find the example usage for org.opencv.core MatOfPoint2f MatOfPoint2f.

Prototype

public MatOfPoint2f() 

Source Link

Usage

From source file:org.ar.rubik.CubeReconstructor.java

License:Open Source License

/**
 * Pose Estimation/*  w w  w.ja  v a 2s . c o m*/
 * 
 * Deduce real world cube coordinates and rotation
 * 
 * @param rubikFace
 * @param image 
 * @param stateModel 
 */
public void poseEstimation(RubikFace rubikFace, Mat image, StateModel stateModel) {

    if (rubikFace == null)
        return;

    if (rubikFace.faceRecognitionStatus != FaceRecognitionStatusEnum.SOLVED)
        return;

    LeastMeansSquare lmsResult = rubikFace.lmsResult;

    if (lmsResult == null)
        return;

    // OpenCV Pose Estimate requires at least four points.
    if (rubikFace.rhombusList.size() <= 4)
        return;

    // List of real world point and screen points that correspond.
    List<Point3> objectPointsList = new ArrayList<Point3>(9);
    List<Point> imagePointsList = new ArrayList<Point>(9);

    // Create list of image (in 2D) and object (in 3D) points.
    // Loop over Rubik Face Tiles/Rhombi
    for (int n = 0; n < 3; n++) {
        for (int m = 0; m < 3; m++) {

            Rhombus rhombus = rubikFace.faceRhombusArray[n][m];

            // Only use if Rhombus was non null.
            if (rhombus != null) {

                // Obtain center of Rhombus in screen image coordinates
                // Convention:
                //  o X is zero on the left, and increases to the right.
                //  o Y is zero on the top and increases downward.
                Point imagePoint = new Point(rhombus.center.x, rhombus.center.y);
                imagePointsList.add(imagePoint);

                // N and M are actual not conceptual (as in design doc).
                int mm = 2 - n;
                int nn = 2 - m;
                // above now matches design doc.
                // that is:
                //  o the nn vector is to the right and upwards.
                //  o the mm vector is to the left and upwards.

                // Calculate center of Tile in OpenCV World Space Coordinates
                // Convention:
                //  o X is zero in the center, and increases to the left.
                //  o Y is zero in the center and increases downward.
                //  o Z is zero (at the world coordinate origin) and increase away for the camera.
                float x = (1 - mm) * 0.66666f;
                float y = -1.0f;
                float z = -1.0f * (1 - nn) * 0.666666f;
                Point3 objectPoint = new Point3(x, y, z);
                objectPointsList.add(objectPoint);
            }
        }
    }

    // Cast image point list into OpenCV Matrix.
    MatOfPoint2f imagePoints = new MatOfPoint2f();
    imagePoints.fromList(imagePointsList);

    // Cast object point list into OpenCV Matrix.
    MatOfPoint3f objectPoints = new MatOfPoint3f();
    objectPoints.fromList(objectPointsList);

    Mat cameraMatrix = stateModel.cameraParameters.getOpenCVCameraMatrix();
    MatOfDouble distCoeffs = new MatOfDouble();
    Mat rvec = new Mat();
    Mat tvec = new Mat();

    //      Log.e(Constants.TAG, "Image Points: " + imagePoints.dump());
    //      Log.e(Constants.TAG, "Object Points: " + objectPoints.dump());

    //      boolean result = 
    Calib3d.solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec);

    Log.v(Constants.TAG, String.format("Open CV Rotation Vector x=%4.2f y=%4.2f z=%4.2f", rvec.get(0, 0)[0],
            rvec.get(1, 0)[0], rvec.get(2, 0)[0]));

    // Convert from OpenCV to OpenGL World Coordinates
    x = +1.0f * (float) tvec.get(0, 0)[0];
    y = -1.0f * (float) tvec.get(1, 0)[0];
    z = -1.0f * (float) tvec.get(2, 0)[0];

    // =+= Add manual offset correction to translation  
    x += MenuAndParams.xTranslationOffsetParam.value;
    y += MenuAndParams.yTranslationOffsetParam.value;
    z += MenuAndParams.zTranslationOffsetParam.value;

    // Convert Rotation Vector from OpenCL polarity axes definition to OpenGL definition
    rvec.put(1, 0, -1.0f * rvec.get(1, 0)[0]);
    rvec.put(2, 0, -1.0f * rvec.get(2, 0)[0]);

    // =+= Add manual offset correction to Rotation
    rvec.put(0, 0, rvec.get(0, 0)[0] + MenuAndParams.xRotationOffsetParam.value * Math.PI / 180.0); // X rotation
    rvec.put(1, 0, rvec.get(1, 0)[0] + MenuAndParams.yRotationOffsetParam.value * Math.PI / 180.0); // Y rotation
    rvec.put(2, 0, rvec.get(2, 0)[0] + MenuAndParams.zRotationOffsetParam.value * Math.PI / 180.0); // Z rotation

    // Create an OpenCV Rotation Matrix from a Rotation Vector
    Mat rMatrix = new Mat(4, 4, CvType.CV_32FC2);
    Calib3d.Rodrigues(rvec, rMatrix);
    Log.v(Constants.TAG, "Rodrigues Matrix: " + rMatrix.dump());

    /*
     * Create an OpenGL Rotation Matrix
     * Notes:
     *   o  OpenGL is in column-row order (correct?).
     *   o  OpenCV Rodrigues Rotation Matrix is 3x3 where OpenGL Rotation Matrix is 4x4.
     */

    // Initialize all Rotational Matrix elements to zero.
    for (int i = 0; i < 16; i++)
        rotationMatrix[i] = 0.0f; // Initialize to zero

    // Initialize element [3,3] to 1.0: i.e., "w" component in homogenous coordinates
    rotationMatrix[3 * 4 + 3] = 1.0f;

    // Copy OpenCV matrix to OpenGL matrix element by element.
    for (int r = 0; r < 3; r++)
        for (int c = 0; c < 3; c++)
            rotationMatrix[r + c * 4] = (float) (rMatrix.get(r, c)[0]);

    // Diagnostics
    for (int r = 0; r < 4; r++)
        Log.v(Constants.TAG, String.format("Rotation Matrix  r=%d  [%5.2f  %5.2f  %5.2f  %5.2f]", r,
                rotationMatrix[r + 0], rotationMatrix[r + 4], rotationMatrix[r + 8], rotationMatrix[r + 12]));

    //      Log.e(Constants.TAG, "Result: " + result);
    //      Log.e(Constants.TAG, "Camera: " + cameraMatrix.dump());
    //      Log.e(Constants.TAG, "Rotation: " + rvec.dump());
    //      Log.e(Constants.TAG, "Translation: " + tvec.dump());

    //      // Reporting in OpenGL World Coordinates
    //      Core.rectangle(image, new Point(0, 50), new Point(1270, 150), Constants.ColorBlack, -1);
    //      Core.putText(image, String.format("Translation  x=%4.2f y=%4.2f z=%4.2f", x, y, z), new Point(50, 100), Constants.FontFace, 3, Constants.ColorWhite, 3);
    //      Core.putText(image, String.format("Rotation     x=%4.0f y=%4.0f z=%4.0f", cubeXrotation, cubeYrotation, cubeZrotation), new Point(50, 150), Constants.FontFace, 3, Constants.ColorWhite, 3);
}

From source file:org.ar.rubik.ImageRecognizer.java

License:Open Source License

/**
 * On Camera Frame/*  ww w  .j a v  a2  s. c o m*/
 * 
 * Process frame image through Rubik Face recognition possibly resulting in a state change.
 * 
 *  (non-Javadoc)
 * @see org.opencv.android.CameraBridgeViewBase.CvCameraViewListener2#onCameraFrame(org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame)
 */
@Override
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {

    //      Log.e(Constants.TAG, "CV Thread ID = " + Thread.currentThread().getId());

    // Just display error message if it is non-null.
    if (errorImage != null)
        return errorImage;

    Mat image = inputFrame.rgba();
    Size imageSize = image.size();
    Log.v(Constants.TAG_CAL, "Input Frame width=" + imageSize.width + " height=" + imageSize.height);
    if (imageSize.width != stateModel.openCVSize.width || imageSize.height != stateModel.openCVSize.height)
        Log.e(Constants.TAG_CAL, "State Model openCVSize does not agree with input frame!");

    // Save or Recall image as requested
    switch (MenuAndParams.imageSourceMode) {
    case NORMAL:
        break;
    case SAVE_NEXT:
        Util.saveImage(image);
        MenuAndParams.imageSourceMode = ImageSourceModeEnum.NORMAL;
        break;
    case PLAYBACK:
        image = Util.recallImage();
    default:
        break;
    }

    // Calculate and display Frames Per Second
    long newTimeStamp = System.currentTimeMillis();
    if (framesPerSecondTimeStamp > 0) {
        long frameTime = newTimeStamp - framesPerSecondTimeStamp;
        double framesPerSecond = 1000.0 / frameTime;
        String string = String.format("%4.1f FPS", framesPerSecond);
        Core.putText(image, string, new Point(50, 700), Constants.FontFace, 2, ColorTileEnum.WHITE.cvColor, 2);
    }
    framesPerSecondTimeStamp = newTimeStamp;

    try {

        // Initialize
        RubikFace rubikFace = new RubikFace();
        rubikFace.profiler.markTime(Profiler.Event.START);
        Log.i(Constants.TAG, "============================================================================");

        /* **********************************************************************
         * **********************************************************************
         * Return Original Image
         */
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.DIRECT) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            return annotation.drawAnnotation(image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Process to Grey Scale
         * 
         * This algorithm finds highlights areas that are all of nearly
         * the same hue.  In particular, cube faces should be highlighted.
         */
        Mat greyscale_image = new Mat();
        Imgproc.cvtColor(image, greyscale_image, Imgproc.COLOR_BGR2GRAY);
        rubikFace.profiler.markTime(Profiler.Event.GREYSCALE);
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.GREYSCALE) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            image.release();
            return annotation.drawAnnotation(greyscale_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Gaussian Filter Blur prevents getting a lot of false hits 
         */
        Mat blur_image = new Mat();

        int kernelSize = (int) MenuAndParams.gaussianBlurKernelSizeParam.value;
        kernelSize = kernelSize % 2 == 0 ? kernelSize + 1 : kernelSize; // make odd
        Imgproc.GaussianBlur(greyscale_image, blur_image, new Size(kernelSize, kernelSize), -1, -1);
        rubikFace.profiler.markTime(Profiler.Event.GAUSSIAN);
        greyscale_image.release();
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.GAUSSIAN) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            image.release();
            return annotation.drawAnnotation(blur_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Canny Edge Detection
         */
        Mat canny_image = new Mat();
        Imgproc.Canny(blur_image, canny_image, MenuAndParams.cannyLowerThresholdParam.value,
                MenuAndParams.cannyUpperThresholdParam.value, 3, // Sobel Aperture size.  This seems to be typically value used in the literature: i.e., a 3x3 Sobel Matrix.
                false); // use cheap gradient calculation: norm =|dI/dx|+|dI/dy|
        rubikFace.profiler.markTime(Profiler.Event.EDGE);
        blur_image.release();
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.CANNY) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            image.release();
            return annotation.drawAnnotation(canny_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Dilation Image Process
         */
        Mat dilate_image = new Mat();
        Imgproc.dilate(canny_image, dilate_image, Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(
                MenuAndParams.dilationKernelSizeParam.value, MenuAndParams.dilationKernelSizeParam.value)));
        rubikFace.profiler.markTime(Profiler.Event.DILATION);
        canny_image.release();
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.DILATION) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            image.release();
            return annotation.drawAnnotation(dilate_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Contour Generation 
         */
        List<MatOfPoint> contours = new LinkedList<MatOfPoint>();
        Mat heirarchy = new Mat();
        Imgproc.findContours(dilate_image, contours, heirarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE); // Note: tried other TC89 options, but no significant change or improvement on cpu time.
        rubikFace.profiler.markTime(Profiler.Event.CONTOUR);
        dilate_image.release();

        // Create gray scale image but in RGB format, and then added yellow colored contours on top.
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.CONTOUR) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            Mat gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Mat rgba_gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Imgproc.cvtColor(image, gray_image, Imgproc.COLOR_RGB2GRAY);
            Imgproc.cvtColor(gray_image, rgba_gray_image, Imgproc.COLOR_GRAY2BGRA, 3);
            Imgproc.drawContours(rgba_gray_image, contours, -1, ColorTileEnum.YELLOW.cvColor, 3);
            Core.putText(rgba_gray_image, "Num Contours: " + contours.size(), new Point(500, 50),
                    Constants.FontFace, 4, ColorTileEnum.RED.cvColor, 4);
            gray_image.release();
            image.release();
            return annotation.drawAnnotation(rgba_gray_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Polygon Detection
         */
        List<Rhombus> polygonList = new LinkedList<Rhombus>();
        for (MatOfPoint contour : contours) {

            // Keep only counter clockwise contours.  A clockwise contour is reported as a negative number.
            double contourArea = Imgproc.contourArea(contour, true);
            if (contourArea < 0.0)
                continue;

            // Keep only reasonable area contours
            if (contourArea < MenuAndParams.minimumContourAreaParam.value)
                continue;

            // Floating, instead of Double, for some reason required for approximate polygon detection algorithm.
            MatOfPoint2f contour2f = new MatOfPoint2f();
            MatOfPoint2f polygone2f = new MatOfPoint2f();
            MatOfPoint polygon = new MatOfPoint();

            // Make a Polygon out of a contour with provide Epsilon accuracy parameter.
            // It uses the Douglas-Peucker algorithm http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm
            contour.convertTo(contour2f, CvType.CV_32FC2);
            Imgproc.approxPolyDP(contour2f, polygone2f, MenuAndParams.polygonEpsilonParam.value, // The maximum distance between the original curve and its approximation.
                    true); // Resulting polygon representation is "closed:" its first and last vertices are connected.
            polygone2f.convertTo(polygon, CvType.CV_32S);

            polygonList.add(new Rhombus(polygon));
        }

        rubikFace.profiler.markTime(Profiler.Event.POLYGON);

        // Create gray scale image but in RGB format, and then add yellow colored polygons on top.
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.POLYGON) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            Mat gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Mat rgba_gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Imgproc.cvtColor(image, gray_image, Imgproc.COLOR_RGB2GRAY);
            Imgproc.cvtColor(gray_image, rgba_gray_image, Imgproc.COLOR_GRAY2BGRA, 4);
            for (Rhombus polygon : polygonList)
                polygon.draw(rgba_gray_image, ColorTileEnum.YELLOW.cvColor);
            Core.putText(rgba_gray_image, "Num Polygons: " + polygonList.size(), new Point(500, 50),
                    Constants.FontFace, 3, ColorTileEnum.RED.cvColor, 4);
            return annotation.drawAnnotation(rgba_gray_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Rhombus Tile Recognition
         * 
         * From polygon list, produces a list of suitable Parallelograms (Rhombi).
         */
        Log.i(Constants.TAG, String.format("Rhombus:   X    Y   Area   a-a  b-a a-l b-l gamma"));
        List<Rhombus> rhombusList = new LinkedList<Rhombus>();
        // Get only valid Rhombus(es) : actually parallelograms.
        for (Rhombus rhombus : polygonList) {
            rhombus.qualify();
            if (rhombus.status == Rhombus.StatusEnum.VALID)
                rhombusList.add(rhombus);
        }

        // Filtering w.r.t. Rhmobus set characteristics
        Rhombus.removedOutlierRhombi(rhombusList);

        rubikFace.profiler.markTime(Profiler.Event.RHOMBUS);

        // Create gray scale image but in RGB format, and then add yellow colored Rhombi (parallelograms) on top.
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.RHOMBUS) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            Mat gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Mat rgba_gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Imgproc.cvtColor(image, gray_image, Imgproc.COLOR_RGB2GRAY);
            Imgproc.cvtColor(gray_image, rgba_gray_image, Imgproc.COLOR_GRAY2BGRA, 4);
            for (Rhombus rhombus : rhombusList)
                rhombus.draw(rgba_gray_image, ColorTileEnum.YELLOW.cvColor);
            Core.putText(rgba_gray_image, "Num Rhombus: " + rhombusList.size(), new Point(500, 50),
                    Constants.FontFace, 4, ColorTileEnum.RED.cvColor, 4);
            gray_image.release();
            image.release();
            return annotation.drawAnnotation(rgba_gray_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Face Recognition
         * 
         * Takes a collection of Rhombus objects and determines if a valid
         * Rubik Face can be determined from them, and then also determines 
         * initial color for all nine tiles. 
         */
        rubikFace.processRhombuses(rhombusList, image);
        Log.i(Constants.TAG, "Face Solution = " + rubikFace.faceRecognitionStatus);
        rubikFace.profiler.markTime(Profiler.Event.FACE);
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.FACE_DETECT) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            return annotation.drawAnnotation(image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Cube Pose Estimation
         * 
         * Reconstruct the Rubik Cube 3D location and orientation in GL space coordinates.
         */
        if (rubikFace.faceRecognitionStatus == FaceRecognitionStatusEnum.SOLVED) {

            // Obtain Cube Pose from Face Grid information.
            stateModel.cubePose = CubePoseEstimator.poseEstimation(rubikFace, image, stateModel);

            // Process measurement update on Kalman Filter (if it exists).
            KalmanFilter kalmanFilter = stateModel.kalmanFilter;
            if (kalmanFilter != null)
                kalmanFilter.measurementUpdate(stateModel.cubePose, System.currentTimeMillis());

            // Process measurement update on Kalman Filter ALSM (if it exists).
            KalmanFilterALSM kalmanFilterALSM = stateModel.kalmanFilterALSM;
            if (kalmanFilter != null)
                kalmanFilterALSM.measurementUpdate(stateModel.cubePose, System.currentTimeMillis());
        } else {
            stateModel.cubePose = null;
        }
        rubikFace.profiler.markTime(Profiler.Event.POSE);

        /* **********************************************************************
         * **********************************************************************
         * Application State Machine
         * 
         * Will provide user instructions.
         * Will determine when we are on-face and off-face
         * Will determine when we are on-new-face
         * Will change state 
         */
        appStateMachine.onFaceEvent(rubikFace);
        rubikFace.profiler.markTime(Profiler.Event.CONTROLLER);
        rubikFace.profiler.markTime(Profiler.Event.TOTAL);

        // Normal return point.
        stateModel.activeRubikFace = rubikFace;
        return annotation.drawAnnotation(image);

        // =+= Issue: how to get stdio to print as error and not warning in logcat?
    } catch (CvException e) {
        Log.e(Constants.TAG, "CvException: " + e.getMessage());
        e.printStackTrace();
        errorImage = new Mat(imageSize, CvType.CV_8UC4);
        Core.putText(errorImage, "CvException: " + e.getMessage(), new Point(50, 50), Constants.FontFace, 2,
                ColorTileEnum.WHITE.cvColor, 2);
        int i = 1;
        for (StackTraceElement element : e.getStackTrace())
            Core.putText(errorImage, element.toString(), new Point(50, 50 + 50 * i++), Constants.FontFace, 2,
                    ColorTileEnum.WHITE.cvColor, 2);
    } catch (Exception e) {
        Log.e(Constants.TAG, "Exception: " + e.getMessage());
        e.printStackTrace();
        errorImage = new Mat(imageSize, CvType.CV_8UC4);
        Core.putText(errorImage, "Exception: " + e.getMessage(), new Point(50, 50), Constants.FontFace, 2,
                ColorTileEnum.WHITE.cvColor, 2);
        int i = 1;
        for (StackTraceElement element : e.getStackTrace())
            Core.putText(errorImage, element.toString(), new Point(50, 50 + 50 * i++), Constants.FontFace, 2,
                    ColorTileEnum.WHITE.cvColor, 2);
    } catch (Error e) {
        Log.e(Constants.TAG, "Error: " + e.getMessage());
        e.printStackTrace();
        errorImage = new Mat(imageSize, CvType.CV_8UC4);
        Core.putText(errorImage, "Error: " + e.getMessage(), new Point(50, 50), Constants.FontFace, 2,
                ColorTileEnum.WHITE.cvColor, 2);
        int i = 1;
        for (StackTraceElement element : e.getStackTrace())
            Core.putText(errorImage, element.toString(), new Point(50, 50 + 50 * i++), Constants.FontFace, 2,
                    ColorTileEnum.WHITE.cvColor, 2);
    }

    return annotation.drawAnnotation(image);
}

From source file:org.lasarobotics.vision.detection.ObjectDetection.java

License:Open Source License

/**
 * Draw the object's location//from ww w . ja v a 2 s.  c  o m
 *
 * @param output         Image to draw on
 * @param objectAnalysis Object analysis information
 * @param sceneAnalysis  Scene analysis information
 */
public static void drawObjectLocation(Mat output, ObjectAnalysis objectAnalysis, SceneAnalysis sceneAnalysis) {
    List<Point> ptsObject = new ArrayList<>();
    List<Point> ptsScene = new ArrayList<>();

    KeyPoint[] keypointsObject = objectAnalysis.keypoints.toArray();
    KeyPoint[] keypointsScene = sceneAnalysis.keypoints.toArray();

    DMatch[] matches = sceneAnalysis.matches.toArray();

    for (DMatch matche : matches) {
        //Get the keypoints from these matches
        ptsObject.add(keypointsObject[matche.queryIdx].pt);
        ptsScene.add(keypointsScene[matche.trainIdx].pt);
    }

    MatOfPoint2f matObject = new MatOfPoint2f();
    matObject.fromList(ptsObject);

    MatOfPoint2f matScene = new MatOfPoint2f();
    matScene.fromList(ptsScene);

    //Calculate homography of object in scene
    Mat homography = Calib3d.findHomography(matObject, matScene, Calib3d.RANSAC, 5.0f);

    //Create the unscaled array of corners, representing the object size
    Point cornersObject[] = new Point[4];
    cornersObject[0] = new Point(0, 0);
    cornersObject[1] = new Point(objectAnalysis.object.cols(), 0);
    cornersObject[2] = new Point(objectAnalysis.object.cols(), objectAnalysis.object.rows());
    cornersObject[3] = new Point(0, objectAnalysis.object.rows());

    Point[] cornersSceneTemp = new Point[0];

    MatOfPoint2f cornersSceneMatrix = new MatOfPoint2f(cornersSceneTemp);
    MatOfPoint2f cornersObjectMatrix = new MatOfPoint2f(cornersObject);

    //Transform the object coordinates to the scene coordinates by the homography matrix
    Core.perspectiveTransform(cornersObjectMatrix, cornersSceneMatrix, homography);

    //Mat transform = Imgproc.getAffineTransform(cornersObjectMatrix, cornersSceneMatrix);

    //Draw the lines of the object on the scene
    Point[] cornersScene = cornersSceneMatrix.toArray();
    final ColorRGBA lineColor = new ColorRGBA("#00ff00");
    Drawing.drawLine(output, new Point(cornersScene[0].x + objectAnalysis.object.cols(), cornersScene[0].y),
            new Point(cornersScene[1].x + objectAnalysis.object.cols(), cornersScene[1].y), lineColor, 5);
    Drawing.drawLine(output, new Point(cornersScene[1].x + objectAnalysis.object.cols(), cornersScene[1].y),
            new Point(cornersScene[2].x + objectAnalysis.object.cols(), cornersScene[2].y), lineColor, 5);
    Drawing.drawLine(output, new Point(cornersScene[2].x + objectAnalysis.object.cols(), cornersScene[2].y),
            new Point(cornersScene[3].x + objectAnalysis.object.cols(), cornersScene[3].y), lineColor, 5);
    Drawing.drawLine(output, new Point(cornersScene[3].x + objectAnalysis.object.cols(), cornersScene[3].y),
            new Point(cornersScene[0].x + objectAnalysis.object.cols(), cornersScene[0].y), lineColor, 5);
}

From source file:org.lasarobotics.vision.detection.PrimitiveDetection.java

License:Open Source License

/**
 * Locate rectangles in an image/*from   w w  w.j  a v a  2  s  .  co  m*/
 *
 * @param grayImage Grayscale image
 * @return Rectangle locations
 */
public RectangleLocationResult locateRectangles(Mat grayImage) {
    Mat gray = grayImage.clone();

    //Filter out some noise by halving then doubling size
    Filter.downsample(gray, 2);
    Filter.upsample(gray, 2);

    //Mat is short for Matrix, and here is used to store an image.
    //it is n-dimensional, but as an image, is two-dimensional
    Mat cacheHierarchy = new Mat();
    Mat grayTemp = new Mat();
    List<Rectangle> rectangles = new ArrayList<>();
    List<Contour> contours = new ArrayList<>();

    //This finds the edges using a Canny Edge Detector
    //It is sent the grayscale Image, a temp Mat, the lower detection threshold for an edge,
    //the higher detection threshold, the Aperture (blurring) of the image - higher is better
    //for long, smooth edges, and whether a more accurate version (but time-expensive) version
    //should be used (true = more accurate)
    //Note: the edges are stored in "grayTemp", which is an image where everything
    //is black except for gray-scale lines delineating the edges.
    Imgproc.Canny(gray, grayTemp, 0, THRESHOLD_CANNY, APERTURE_CANNY, true);
    //make the white lines twice as big, while leaving the image size constant
    Filter.dilate(gray, 2);

    List<MatOfPoint> contoursTemp = new ArrayList<>();
    //Find contours - the parameters here are very important to compression and retention
    //grayTemp is the image from which the contours are found,
    //contoursTemp is where the resultant contours are stored (note: color is not retained),
    //cacheHierarchy is the parent-child relationship between the contours (e.g. a contour
    //inside of another is its child),
    //Imgproc.CV_RETR_LIST disables the hierarchical relationships being returned,
    //Imgproc.CHAIN_APPROX_SIMPLE means that the contour is compressed from a massive chain of
    //paired coordinates to just the endpoints of each segment (e.g. an up-right rectangular
    //contour is encoded with 4 points.)
    Imgproc.findContours(grayTemp, contoursTemp, cacheHierarchy, Imgproc.CV_RETR_LIST,
            Imgproc.CHAIN_APPROX_SIMPLE);
    //MatOfPoint2f means that is a MatofPoint (Matrix of Points) represented by floats instead of ints
    MatOfPoint2f approx = new MatOfPoint2f();
    //For each contour, test whether the contour is a rectangle
    //List<Contour> contours = new ArrayList<>()
    for (MatOfPoint co : contoursTemp) {
        //converting the MatOfPoint to MatOfPoint2f
        MatOfPoint2f matOfPoint2f = new MatOfPoint2f(co.toArray());
        //converting the matrix to a Contour
        Contour c = new Contour(co);

        //Attempt to fit the contour to the best polygon
        //input: matOfPoint2f, which is the contour found earlier
        //output: approx, which is the MatOfPoint2f that holds the new polygon that has less vertices
        //basically, it smooths out the edges using the third parameter as its approximation accuracy
        //final parameter determines whether the new approximation must be closed (true=closed)
        Imgproc.approxPolyDP(matOfPoint2f, approx, c.arcLength(true) * EPLISON_APPROX_TOLERANCE_FACTOR, true);

        //converting the MatOfPoint2f to a contour
        Contour approxContour = new Contour(approx);

        //Make sure the contour is big enough, CLOSED (convex), and has exactly 4 points
        if (approx.toArray().length == 4 && Math.abs(approxContour.area()) > 1000 && approxContour.isClosed()) {

            //TODO contours and rectangles array may not match up, but why would they?
            contours.add(approxContour);

            //Check each angle to be approximately 90 degrees
            //Done by comparing the three points constituting the angle of each corner
            double maxCosine = 0;
            for (int j = 2; j < 5; j++) {
                double cosine = Math.abs(MathUtil.angle(approx.toArray()[j % 4], approx.toArray()[j - 2],
                        approx.toArray()[j - 1]));
                maxCosine = Math.max(maxCosine, cosine);
            }

            if (maxCosine < MAX_COSINE_VALUE) {
                //Convert the points to a rectangle instance
                rectangles.add(new Rectangle(approx.toArray()));
            }
        }
    }

    return new RectangleLocationResult(contours, rectangles);
}

From source file:org.openpnp.vision.FluentCv.java

License:Open Source License

public FluentCv getContourRects(List<MatOfPoint> contours, List<RotatedRect> rects) {
    for (int i = 0; i < contours.size(); i++) {
        MatOfPoint2f contour_ = new MatOfPoint2f();
        contours.get(i).convertTo(contour_, CvType.CV_32FC2);
        if (contour_.empty()) {
            continue;
        }/* ww w .  j a  va 2s.c o m*/
        RotatedRect rect = Imgproc.minAreaRect(contour_);
        rects.add(rect);
    }
    return this;
}

From source file:org.pooledtimeseries.PoT.java

License:Apache License

static ArrayList<double[][][]> getOpticalHistograms(Path filename, int w_d, int h_d, int o_d)
        throws PoTException {
    ArrayList<double[][][]> histograms = new ArrayList<double[][][]>();

    try {/*ww w. j  av a  2  s .  c  o  m*/
        LOG.info("opening video file " + filename.toString());
        VideoCapture capture = new VideoCapture(filename.toString());

        if (!capture.isOpened()) {
            LOG.warning("video file " + filename.getFileName() + " could not be opened.");
            double[][][] hist = new double[w_d][h_d][o_d];
            histograms.add(hist);
        } else {
            // variables for processing images
            Mat original_frame = new Mat();

            Mat frame = new Mat();
            Mat frame_gray = new Mat();
            Mat prev_frame_gray = new Mat();
            MatOfPoint2f flow = new MatOfPoint2f();

            // computing a list of histogram of optical flows (i.e. a list of 5*5*8
            // arrays)
            for (int frame_index = 0;; frame_index++) {
                // capturing the video images
                capture.read(original_frame);

                if (original_frame.empty()) {
                    if (frame_index == 0) {
                        throw new PoTException("Could not read the video file");
                    } else
                        break;
                } else {
                    // resizing the captured frame and converting it to the gray scale
                    // image.
                    Imgproc.resize(original_frame, frame, new Size(frame_width, frame_height));
                    Imgproc.cvtColor(frame, frame_gray, Imgproc.COLOR_BGR2GRAY);

                    double[][][] hist = new double[w_d][h_d][o_d];
                    histograms.add(hist);

                    // from frame #2
                    if (frame_index > 0) {
                        // calculate optical flows
                        Video.calcOpticalFlowFarneback(prev_frame_gray, frame_gray, flow, 0.5, 1, 10, 2, 7, 1.5,
                                0); // 0.5, 1, 15, 2, 7, 1.5, 0

                        // update histogram of optical flows
                        updateOpticalHistogram(histograms.get(frame_index), flow);
                    }

                    Mat temp_frame = prev_frame_gray;
                    prev_frame_gray = frame_gray;
                    frame_gray = temp_frame;
                }
            }

            capture.release();
        }
    } catch (Exception e) {
        e.printStackTrace();
        LOG.log(Level.SEVERE, "Exception in getOpticalHistograms ", e);
    }
    return histograms;
}

From source file:org.usfirst.frc.team2084.CMonster2016.vision.Target.java

License:Open Source License

/**
 * Creates a new possible target based on the specified blob and calculates
 * its score.// w w w.j a  v  a 2  s  . c o  m
 *
 * @param p the shape of the possible target
 */
public Target(MatOfPoint contour, Mat grayImage) {
    // Simplify contour to make the corner finding algorithm work better
    MatOfPoint2f fContour = new MatOfPoint2f();
    contour.convertTo(fContour, CvType.CV_32F);
    Imgproc.approxPolyDP(fContour, fContour, VisionParameters.getGoalApproxPolyEpsilon(), true);
    fContour.convertTo(contour, CvType.CV_32S);

    this.contour = contour;

    // Check area, and don't do any calculations if it is not valid
    if (validArea = validateArea()) {

        // Find a bounding rectangle
        RotatedRect rect = Imgproc.minAreaRect(fContour);

        Point[] rectPoints = new Point[4];
        rect.points(rectPoints);

        for (int j = 0; j < rectPoints.length; j++) {
            Point rectPoint = rectPoints[j];

            double minDistance = Double.MAX_VALUE;
            Point point = null;

            for (int i = 0; i < contour.rows(); i++) {
                Point contourPoint = new Point(contour.get(i, 0));
                double dist = distance(rectPoint, contourPoint);
                if (dist < minDistance) {
                    minDistance = dist;
                    point = contourPoint;
                }
            }

            rectPoints[j] = point;
        }
        MatOfPoint2f rectMat = new MatOfPoint2f(rectPoints);
        // Refine the corners to improve accuracy
        Imgproc.cornerSubPix(grayImage, rectMat, new Size(4, 10), new Size(-1, -1),
                new TermCriteria(TermCriteria.EPS + TermCriteria.COUNT, 30, 0.1));
        rectPoints = rectMat.toArray();

        // Identify each corner
        SortedMap<Double, List<Point>> x = new TreeMap<>();
        Arrays.stream(rectPoints).forEach((p) -> {
            List<Point> points;
            if ((points = x.get(p.x)) == null) {
                x.put(p.x, points = new LinkedList<>());
            }
            points.add(p);
        });

        int i = 0;
        for (Iterator<List<Point>> it = x.values().iterator(); it.hasNext();) {
            List<Point> s = it.next();

            for (Point p : s) {
                switch (i) {
                case 0:
                    topLeft = p;
                    break;
                case 1:
                    bottomLeft = p;
                    break;
                case 2:
                    topRight = p;
                    break;
                case 3:
                    bottomRight = p;
                }
                i++;
            }
        }

        // Organize corners
        if (topLeft.y > bottomLeft.y) {
            Point p = bottomLeft;
            bottomLeft = topLeft;
            topLeft = p;
        }

        if (topRight.y > bottomRight.y) {
            Point p = bottomRight;
            bottomRight = topRight;
            topRight = p;
        }

        // Create corners for centroid calculation
        corners = new MatOfPoint2f(rectPoints);

        // Calculate center
        Moments moments = Imgproc.moments(corners);
        center = new Point(moments.m10 / moments.m00, moments.m01 / moments.m00);

        // Put the points in the correct order for solvePNP
        rectPoints[0] = topLeft;
        rectPoints[1] = topRight;
        rectPoints[2] = bottomLeft;
        rectPoints[3] = bottomRight;
        // Recreate corners in the new order
        corners = new MatOfPoint2f(rectPoints);

        widthTop = distance(topLeft, topRight);
        widthBottom = distance(bottomLeft, bottomRight);
        width = (widthTop + widthBottom) / 2.0;
        heightLeft = distance(topLeft, bottomLeft);
        heightRight = distance(topRight, bottomRight);
        height = (heightLeft + heightRight) / 2.0;

        Mat tvec = new Mat();

        // Calculate target's location
        Calib3d.solvePnP(OBJECT_POINTS, corners, CAMERA_MAT, DISTORTION_MAT, rotation, tvec, false,
                Calib3d.CV_P3P);

        // =======================================
        // Position and Orientation Transformation
        // =======================================

        double armAngle = VisionResults.getArmAngle();

        // Flip y axis to point upward
        Core.multiply(tvec, SIGN_NORMALIZATION_MATRIX, tvec);

        // Shift origin to arm pivot point, on the robot's centerline
        CoordinateMath.translate(tvec, CAMERA_X_OFFSET, CAMERA_Y_OFFSET, ARM_LENGTH);

        // Align axes with ground
        CoordinateMath.rotateX(tvec, -armAngle);
        Core.add(rotation, new MatOfDouble(armAngle, 0, 0), rotation);

        // Shift origin to robot center of rotation
        CoordinateMath.translate(tvec, 0, ARM_PIVOT_Y_OFFSET, -ARM_PIVOT_Z_OFFSET);

        double xPosFeet = tvec.get(0, 0)[0];
        double yPosFeet = tvec.get(1, 0)[0];
        double zPosFeet = tvec.get(2, 0)[0];

        // Old less effective aiming heading and distance calculation
        // double pixelsToFeet = TARGET_WIDTH / width;

        // distance = (TARGET_WIDTH * HighGoalProcessor.IMAGE_SIZE.width
        // / (2 * width ** Math.tan(VisionParameters.getFOVAngle() / 2)));
        // double xPosFeet = (center.x - (HighGoalProcessor.IMAGE_SIZE.width
        // / 2)) * pixelsToFeet;
        // double yPosFeet = -(center.y -
        // (HighGoalProcessor.IMAGE_SIZE.height / 2)) * pixelsToFeet;

        distance = Math.sqrt(xPosFeet * xPosFeet + zPosFeet * zPosFeet);

        position = new Point3(xPosFeet, yPosFeet, zPosFeet);

        xGoalAngle = Math.atan(xPosFeet / zPosFeet);
        yGoalAngle = Math.atan(yPosFeet / zPosFeet);

        validate();
        score = calculateScore();
    } else {
        valid = false;
    }
}

From source file:overwatchteampicker.OverwatchTeamPicker.java

public static ReturnValues findImage(String template, String source, int flag) {
    File lib = null;//  w  ww. ja  va2 s.  co m
    BufferedImage image = null;
    try {
        image = ImageIO.read(new File(source));
    } catch (Exception e) {
        e.printStackTrace();
    }

    String os = System.getProperty("os.name");
    String bitness = System.getProperty("sun.arch.data.model");

    if (os.toUpperCase().contains("WINDOWS")) {
        if (bitness.endsWith("64")) {
            lib = new File("C:\\Users\\POWERUSER\\Downloads\\opencv\\build\\java\\x64\\"
                    + System.mapLibraryName("opencv_java2413"));
        } else {
            lib = new File("libs//x86//" + System.mapLibraryName("opencv_java2413"));
        }
    }
    System.load(lib.getAbsolutePath());
    String tempObject = "images\\hero_templates\\" + template + ".png";
    String source_pic = source;
    Mat objectImage = Highgui.imread(tempObject, Highgui.CV_LOAD_IMAGE_GRAYSCALE);
    Mat sceneImage = Highgui.imread(source_pic, Highgui.CV_LOAD_IMAGE_GRAYSCALE);

    MatOfKeyPoint objectKeyPoints = new MatOfKeyPoint();
    FeatureDetector featureDetector = FeatureDetector.create(FeatureDetector.SURF);
    featureDetector.detect(objectImage, objectKeyPoints);
    KeyPoint[] keypoints = objectKeyPoints.toArray();
    MatOfKeyPoint objectDescriptors = new MatOfKeyPoint();
    DescriptorExtractor descriptorExtractor = DescriptorExtractor.create(DescriptorExtractor.SURF);
    descriptorExtractor.compute(objectImage, objectKeyPoints, objectDescriptors);

    // Create the matrix for output image.
    Mat outputImage = new Mat(objectImage.rows(), objectImage.cols(), Highgui.CV_LOAD_IMAGE_COLOR);
    Scalar newKeypointColor = new Scalar(255, 0, 0);
    Features2d.drawKeypoints(objectImage, objectKeyPoints, outputImage, newKeypointColor, 0);

    // Match object image with the scene image
    MatOfKeyPoint sceneKeyPoints = new MatOfKeyPoint();
    MatOfKeyPoint sceneDescriptors = new MatOfKeyPoint();
    featureDetector.detect(sceneImage, sceneKeyPoints);
    descriptorExtractor.compute(sceneImage, sceneKeyPoints, sceneDescriptors);

    Mat matchoutput = new Mat(sceneImage.rows() * 2, sceneImage.cols() * 2, Highgui.CV_LOAD_IMAGE_COLOR);
    Scalar matchestColor = new Scalar(0, 255, 25);

    List<MatOfDMatch> matches = new LinkedList<MatOfDMatch>();
    DescriptorMatcher descriptorMatcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED);
    descriptorMatcher.knnMatch(objectDescriptors, sceneDescriptors, matches, 2);

    LinkedList<DMatch> goodMatchesList = new LinkedList<DMatch>();

    float nndrRatio = .78f;

    for (int i = 0; i < matches.size(); i++) {
        MatOfDMatch matofDMatch = matches.get(i);
        DMatch[] dmatcharray = matofDMatch.toArray();
        DMatch m1 = dmatcharray[0];
        DMatch m2 = dmatcharray[1];

        if (m1.distance <= m2.distance * nndrRatio) {
            goodMatchesList.addLast(m1);

        }
    }

    if (goodMatchesList.size() >= 4) {

        List<KeyPoint> objKeypointlist = objectKeyPoints.toList();
        List<KeyPoint> scnKeypointlist = sceneKeyPoints.toList();

        LinkedList<Point> objectPoints = new LinkedList<>();
        LinkedList<Point> scenePoints = new LinkedList<>();

        for (int i = 0; i < goodMatchesList.size(); i++) {
            objectPoints.addLast(objKeypointlist.get(goodMatchesList.get(i).queryIdx).pt);
            scenePoints.addLast(scnKeypointlist.get(goodMatchesList.get(i).trainIdx).pt);
        }

        MatOfPoint2f objMatOfPoint2f = new MatOfPoint2f();
        objMatOfPoint2f.fromList(objectPoints);
        MatOfPoint2f scnMatOfPoint2f = new MatOfPoint2f();
        scnMatOfPoint2f.fromList(scenePoints);

        Mat homography = Calib3d.findHomography(objMatOfPoint2f, scnMatOfPoint2f, Calib3d.RANSAC, 3);

        Mat obj_corners = new Mat(4, 1, CvType.CV_32FC2);
        Mat scene_corners = new Mat(4, 1, CvType.CV_32FC2);

        obj_corners.put(0, 0, new double[] { 0, 0 });
        obj_corners.put(1, 0, new double[] { objectImage.cols(), 0 });
        obj_corners.put(2, 0, new double[] { objectImage.cols(), objectImage.rows() });
        obj_corners.put(3, 0, new double[] { 0, objectImage.rows() });

        Core.perspectiveTransform(obj_corners, scene_corners, homography);

        Mat img = Highgui.imread(source_pic, Highgui.CV_LOAD_IMAGE_COLOR);

        Core.line(img, new Point(scene_corners.get(0, 0)), new Point(scene_corners.get(1, 0)),
                new Scalar(0, 255, 255), 4);
        Core.line(img, new Point(scene_corners.get(1, 0)), new Point(scene_corners.get(2, 0)),
                new Scalar(255, 255, 0), 4);
        Core.line(img, new Point(scene_corners.get(2, 0)), new Point(scene_corners.get(3, 0)),
                new Scalar(0, 255, 0), 4);
        Core.line(img, new Point(scene_corners.get(3, 0)), new Point(scene_corners.get(0, 0)),
                new Scalar(0, 255, 0), 4);

        MatOfDMatch goodMatches = new MatOfDMatch();
        goodMatches.fromList(goodMatchesList);

        Features2d.drawMatches(objectImage, objectKeyPoints, sceneImage, sceneKeyPoints, goodMatches,
                matchoutput, matchestColor, newKeypointColor, new MatOfByte(), 2);
        if (new Point(scene_corners.get(0, 0)).x < new Point(scene_corners.get(1, 0)).x
                && new Point(scene_corners.get(0, 0)).y < new Point(scene_corners.get(2, 0)).y) {
            System.out.println("found " + template);
            Highgui.imwrite("points.jpg", outputImage);
            Highgui.imwrite("matches.jpg", matchoutput);
            Highgui.imwrite("final.jpg", img);

            if (flag == 0) {
                ReturnValues retVal = null;
                int y = (int) new Point(scene_corners.get(3, 0)).y;
                int yHeight = (int) new Point(scene_corners.get(3, 0)).y
                        - (int) new Point(scene_corners.get(2, 0)).y;
                if (y < image.getHeight() * .6) { //if found hero is in upper half of image then return point 3,0
                    retVal = new ReturnValues(y + (int) (image.getHeight() * .01), yHeight);
                } else { //if found hero is in lower half of image then return point 2,0
                    y = (int) new Point(scene_corners.get(2, 0)).y;
                    retVal = new ReturnValues(y + (int) (image.getHeight() * .3), yHeight);
                }
                return retVal;
            } else if (flag == 1) {
                int[] xPoints = new int[4];
                int[] yPoints = new int[4];

                xPoints[0] = (int) (new Point(scene_corners.get(0, 0)).x);
                xPoints[1] = (int) (new Point(scene_corners.get(1, 0)).x);
                xPoints[2] = (int) (new Point(scene_corners.get(2, 0)).x);
                xPoints[3] = (int) (new Point(scene_corners.get(3, 0)).x);

                yPoints[0] = (int) (new Point(scene_corners.get(0, 0)).y);
                yPoints[1] = (int) (new Point(scene_corners.get(1, 0)).y);
                yPoints[2] = (int) (new Point(scene_corners.get(2, 0)).y);
                yPoints[3] = (int) (new Point(scene_corners.get(3, 0)).y);

                ReturnValues retVal = new ReturnValues(xPoints, yPoints);
                return retVal;

            }
        }
    }
    return null;

}

From source file:simeav.filtros.instanciaciones.DetectorModulosEstandar.java

@Override
public Mat detectarModulos(Mat original, Diagrama diagrama) {
    Imgproc.blur(original, original, new Size(15, 15));
    original = Utils.dilate(original);//from w  w  w  .j a  v a 2s .  c  om
    Mat jerarquia = new Mat();
    ArrayList<MatOfPoint> contornos = new ArrayList<>();
    Imgproc.findContours(original.clone(), contornos, jerarquia, Imgproc.RETR_CCOMP,
            Imgproc.CHAIN_APPROX_SIMPLE);
    ArrayList<MatOfPoint> cp = new ArrayList<>(contornos.size());
    Map<Integer, Rect> rectangulos = new HashMap<>();
    Integer id_cuadrado = 0;
    Mat resultado = Mat.zeros(original.size(), CvType.CV_8U);
    for (int i = contornos.size() - 1; i >= 0; i--) {
        if (jerarquia.get(0, i)[3] > -1) {
            MatOfPoint2f contorno2f = new MatOfPoint2f();
            contorno2f.fromList(contornos.get(i).toList());
            MatOfPoint2f c = new MatOfPoint2f();
            Imgproc.approxPolyDP(contorno2f, c, 3, true);
            cp.add(new MatOfPoint(c.toArray()));
            int lados = cp.get(cp.size() - 1).height();
            if ((4 <= lados) && lados < 12) {
                rectangulos.put(id_cuadrado, Imgproc.boundingRect(new MatOfPoint(c.toArray())));
                Point tl = new Point(rectangulos.get(id_cuadrado).tl().x - 20,
                        rectangulos.get(id_cuadrado).tl().y - 20);
                Point br = new Point(rectangulos.get(id_cuadrado).br().x + 20,
                        rectangulos.get(id_cuadrado).br().y + 20);
                Core.rectangle(resultado, tl, br, new Scalar(255, 255, 255), -1);
                diagrama.addModulo(id_cuadrado, new Rect(tl, br));
                Imgproc.drawContours(resultado, contornos, i, new Scalar(0, 0, 0), -1);
                id_cuadrado++;
            }
        }
    }
    return resultado;
}

From source file:simeav.filtros.instanciaciones.SeparadorTextoEstandar.java

@Override
public void separarTexto(Mat origin, int umbral_radio) {
    Mat original = origin.clone();/*from   w  w  w  . ja  v a2s.c  om*/
    imagenSinTexto = original.clone();
    imagenTexto = Mat.zeros(original.size(), CvType.CV_8U);
    imagenTexto = imagenTexto.setTo(new Scalar(255, 255, 255));
    Mat imGrises = new Mat();
    Mat bw = new Mat();
    Imgproc.cvtColor(original, imGrises, Imgproc.COLOR_BGR2GRAY);
    Imgproc.GaussianBlur(imGrises, bw, new Size(1, 1), 0);
    Imgproc.threshold(bw, bw, 200, 250, Imgproc.THRESH_BINARY_INV);
    ArrayList<MatOfPoint> contornos = Utils.detectarContornos(bw);
    for (int i = 0; i < contornos.size(); i++) {
        MatOfPoint2f contorno2f = new MatOfPoint2f();
        contorno2f.fromList(contornos.get(i).toList());
        float[] radius = new float[1];
        Point centro = new Point();
        Imgproc.minEnclosingCircle(contorno2f, centro, radius);
        double a = Imgproc.contourArea(contornos.get(i));
        if (radius[0] <= umbral_radio) {
            Imgproc.drawContours(imagenSinTexto, contornos, i, new Scalar(255, 255, 255), -1);
            Imgproc.drawContours(imagenTexto, contornos, i, new Scalar(0, 0, 0), -1);
        }
    }
}