Example usage for org.opencv.core MatOfPoint MatOfPoint

List of usage examples for org.opencv.core MatOfPoint MatOfPoint

Introduction

In this page you can find the example usage for org.opencv.core MatOfPoint MatOfPoint.

Prototype

public MatOfPoint() 

Source Link

Usage

From source file:OCV_ConvexHull.java

License:Open Source License

@Override
public void run(ImageProcessor ip) {
    byte[] byteArray = (byte[]) ip.getPixels();
    int w = ip.getWidth();
    int h = ip.getHeight();

    ArrayList<Point> lstPt = new ArrayList<Point>();
    MatOfPoint pts = new MatOfPoint();

    for (int y = 0; y < h; y++) {
        for (int x = 0; x < w; x++) {
            if (byteArray[x + w * y] != 0) {
                lstPt.add(new Point((double) x, (double) y));
            }//from w  w  w. j  ava2s. c  om
        }
    }

    if (lstPt.isEmpty()) {
        return;
    }

    pts.fromList(lstPt);
    MatOfInt hull = new MatOfInt();
    Imgproc.convexHull(pts, hull, enCW);
    showData(pts, hull);
}

From source file:OCV_BoundingRect.java

License:Open Source License

@Override
public void run(ImageProcessor ip) {
    byte[] byteArray = (byte[]) ip.getPixels();
    int w = ip.getWidth();
    int h = ip.getHeight();
    int num_slice = ip.getSliceNumber();

    ArrayList<Point> lstPt = new ArrayList<Point>();
    MatOfPoint pts = new MatOfPoint();

    for (int y = 0; y < h; y++) {
        for (int x = 0; x < w; x++) {
            if (byteArray[x + w * y] != 0) {
                lstPt.add(new Point(x, y));
            }//from  w  w  w . j a  v a  2  s .co  m
        }
    }

    if (lstPt.isEmpty()) {
        return;
    }

    pts.fromList(lstPt);
    Rect rect = Imgproc.boundingRect(pts);
    showData(rect, num_slice);
}

From source file:com.github.rosjava_catkin_package_a.ARLocROS.ComputePose.java

License:Apache License

public boolean computePose(Mat rvec, Mat tvec, Mat image2) throws NyARException, FileNotFoundException {
    // convert image to NyAR style for processing
    final INyARRgbRaster imageRaster = NyARImageHelper.createFromMat(image2);

    // create new marker system configuration
    i_config = new NyARMarkerSystemConfig(i_param);
    markerSystemState = new NyARMarkerSystem(i_config);
    // Create wrapper that passes cam pictures to marker system
    cameraSensorWrapper = new NyARSensor(i_screen_size);
    ids = new int[markerPatterns.size()];
    patternmap = new HashMap<>();
    for (int i = 0; i < markerPatterns.size(); i++) {
        // create marker description from pattern file and add to marker
        // system
        ids[i] = markerSystemState.addARMarker(arCodes.get(i), 25, markerConfig.getMarkerSize());
        patternmap.put(ids[i], markerPatterns.get(i));
    }//from w  w w .ja  v a  2  s.  com

    cameraSensorWrapper.update(imageRaster);
    markerSystemState.update(cameraSensorWrapper);

    // init 3D point list
    final List<Point3> points3dlist = new ArrayList<>();
    final List<Point> points2dlist = new ArrayList<>();

    for (final int id : ids) {
        // process only if this marker has been detected
        if (markerSystemState.isExistMarker(id) && markerSystemState.getConfidence(id) > 0.7) {
            // read and add 2D points
            final NyARIntPoint2d[] vertex2d = markerSystemState.getMarkerVertex2D(id);
            Point p = new Point(vertex2d[0].x, vertex2d[0].y);
            points2dlist.add(p);
            p = new Point(vertex2d[1].x, vertex2d[2].y);
            points2dlist.add(p);
            p = new Point(vertex2d[2].x, vertex2d[2].y);
            points2dlist.add(p);
            p = new Point(vertex2d[3].x, vertex2d[3].y);
            points2dlist.add(p);

            final MatOfPoint mop = new MatOfPoint();
            mop.fromList(points2dlist);
            final List<MatOfPoint> pts = new ArrayList<>();
            pts.add(mop);
            // read and add corresponding 3D points
            points3dlist.addAll(markerConfig.create3dpointlist(patternmap.get(id)));
            // draw red rectangle around detected marker
            Core.rectangle(image2, new Point(vertex2d[0].x, vertex2d[0].y),
                    new Point(vertex2d[2].x, vertex2d[2].y), new Scalar(0, 0, 255));
        }

    }
    // load 2D and 3D points to Mats for solvePNP
    final MatOfPoint3f objectPoints = new MatOfPoint3f();
    objectPoints.fromList(points3dlist);
    final MatOfPoint2f imagePoints = new MatOfPoint2f();
    imagePoints.fromList(points2dlist);

    if (visualization) {
        // show image with markers detected
        Imshow.show(image2);
    }

    // do not call solvePNP with empty intput data (no markers detected)
    if (points2dlist.size() == 0) {
        return false;
    }

    // uncomment these lines if using RANSAC-based pose estimation (more
    // shaking)
    Mat inliers = new Mat();

    Calib3d.solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, 300, 5, 16,
            inliers, Calib3d.CV_P3P);
    ARLoc.getLog().info("Points detected: " + points2dlist.size() + " inliers: " + inliers.size());
    // avoid publish zero pose if localization failed
    if (inliers.rows() == 0) {
        return false;
    }

    return true;
}

From source file:com.mycompany.objectdetection.ObjectDetector.java

public Rect union(Rect r1, Rect r2) {
    Point[] rects_pts = new Point[4];
    rects_pts[0] = r1.tl();/*from w  w  w .  ja  v  a 2s . c o  m*/
    rects_pts[1] = r1.br();
    rects_pts[2] = r2.tl();
    rects_pts[3] = r2.br();

    MatOfPoint mof = new MatOfPoint();
    mof.fromArray(rects_pts);

    Rect union = Imgproc.boundingRect(mof);
    return union;
}

From source file:emotion.Eyebrow.java

public static void Harris(Mat img, boolean rightEyeFlag) {
    //Harris point extraction
    Mat harrisTestimg;//from  ww w  . j  a  va2  s  . co m
    harrisTestimg = img.clone();
    cvtColor(harrisTestimg, harrisTestimg, Imgproc.COLOR_BGR2GRAY);
    threshold(harrisTestimg, harrisTestimg, 200, 255, Imgproc.THRESH_BINARY_INV);
    Mat struct = Imgproc.getStructuringElement(Imgproc.MORPH_CROSS, new Size(3, 3));
    erode(harrisTestimg, harrisTestimg, struct);
    dilate(harrisTestimg, harrisTestimg, struct);
    imwrite("intermediateHaaris.jpg", harrisTestimg);
    harrisTestimg.convertTo(harrisTestimg, CV_8UC1);
    ArrayList<MatOfPoint> contours = new ArrayList<>();
    Mat hierarchy = new Mat();

    Imgproc.findContours(harrisTestimg, contours, hierarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_NONE);

    //System.out.println("Average Y for contours:");
    float[] averageY = new float[contours.size()];
    for (int i = 0; i < contours.size(); ++i) {
        //We calculate mean of Y coordinates for each contour
        for (int j = 0; j < contours.get(i).total(); ++j) {
            int val = (int) contours.get(i).toArray()[j].y;
            averageY[i] += val;
        }
        averageY[i] /= contours.get(i).total();
        //System.out.println(i+") "+averageY[i]);

        if (averageY[i] <= img.height() / 2 && //We consider just up half of an image
                contours.get(i).total() >= img.width()) //and longer than threshold
            Imgproc.drawContours(harrisTestimg, contours, i, new Scalar(255, 255, 255));
        else
            Imgproc.drawContours(harrisTestimg, contours, i, new Scalar(0, 0, 0));
    }

    MatOfPoint features = new MatOfPoint();
    Imgproc.goodFeaturesToTrack(harrisTestimg, features, 100, 0.00001, 0);

    //We draw just 2 extreme points- first and last
    Point eyebrowsPoints[] = new Point[2];
    for (int i = 0; i < features.toList().size(); i++) {
        if (i == 0) {
            eyebrowsPoints[0] = new Point(harrisTestimg.width() / 2, 0);
            eyebrowsPoints[1] = new Point(harrisTestimg.width() / 2, 0);
        }
        if (features.toArray()[i].x < eyebrowsPoints[0].x
                && features.toArray()[i].y < harrisTestimg.height() / 2) {
            eyebrowsPoints[0] = features.toArray()[i];
        }
        if (features.toArray()[i].x > eyebrowsPoints[1].x
                && features.toArray()[i].y < harrisTestimg.height() / 2) {
            eyebrowsPoints[1] = features.toArray()[i];
        }
    }
    StaticFunctions.drawCross(img, eyebrowsPoints[1], StaticFunctions.Features.EYEBROWS_ENDS);
    StaticFunctions.drawCross(img, eyebrowsPoints[0], StaticFunctions.Features.EYEBROWS_ENDS);
    imwrite("testHaris.jpg", img);
    if (rightEyeFlag) {
        EyeRegion.rightInnerEyebrowsCorner = eyebrowsPoints[0];
        EyeRegion.rightInnerEyebrowsCorner.x += Eye.rightRect.x;
        EyeRegion.rightInnerEyebrowsCorner.y += Eye.rightRect.y;

        EyeRegion.rightOuterEyebrowsCorner = eyebrowsPoints[1];
        EyeRegion.rightOuterEyebrowsCorner.x += Eye.rightRect.x;
        EyeRegion.rightOuterEyebrowsCorner.y += Eye.rightRect.y;
    } else {
        EyeRegion.leftInnerEyebrowsCorner = eyebrowsPoints[1];
        EyeRegion.leftInnerEyebrowsCorner.x += Eye.leftRect.x;
        EyeRegion.leftInnerEyebrowsCorner.y += Eye.leftRect.y;

        EyeRegion.leftOuterEyebrowsCorner = eyebrowsPoints[0];
        EyeRegion.leftOuterEyebrowsCorner.x += Eye.leftRect.x;
        EyeRegion.leftOuterEyebrowsCorner.y += Eye.leftRect.y;
    }
}

From source file:opencv_ext.TGG_OpenCV_Util.java

private static MatOfPoint hull2Points(MatOfInt hull, MatOfPoint contour) {
    // Contains indexes of pointing to corner points of contours
    ArrayList<Integer> indexes = new ArrayList<>(hull.toList());
    // Contains list of points found in contour
    List<Point> pointList = contour.toList();
    // Destination for corner points
    ArrayList<Point> points = new ArrayList<>();
    // Reads corner points into `points`
    for (Integer index : indexes) {
        points.add(pointList.get(index));
    }/*w  w w .  jav a 2  s.  c om*/
    // Converts list to Mat representation
    MatOfPoint mop = new MatOfPoint();
    mop.fromList(points);
    return mop;
}

From source file:org.ar.rubik.ImageRecognizer.java

License:Open Source License

/**
 * On Camera Frame/*from   w  w  w .j a  va  2s .  c o  m*/
 * 
 * Process frame image through Rubik Face recognition possibly resulting in a state change.
 * 
 *  (non-Javadoc)
 * @see org.opencv.android.CameraBridgeViewBase.CvCameraViewListener2#onCameraFrame(org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame)
 */
@Override
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {

    //      Log.e(Constants.TAG, "CV Thread ID = " + Thread.currentThread().getId());

    // Just display error message if it is non-null.
    if (errorImage != null)
        return errorImage;

    Mat image = inputFrame.rgba();
    Size imageSize = image.size();
    Log.v(Constants.TAG_CAL, "Input Frame width=" + imageSize.width + " height=" + imageSize.height);
    if (imageSize.width != stateModel.openCVSize.width || imageSize.height != stateModel.openCVSize.height)
        Log.e(Constants.TAG_CAL, "State Model openCVSize does not agree with input frame!");

    // Save or Recall image as requested
    switch (MenuAndParams.imageSourceMode) {
    case NORMAL:
        break;
    case SAVE_NEXT:
        Util.saveImage(image);
        MenuAndParams.imageSourceMode = ImageSourceModeEnum.NORMAL;
        break;
    case PLAYBACK:
        image = Util.recallImage();
    default:
        break;
    }

    // Calculate and display Frames Per Second
    long newTimeStamp = System.currentTimeMillis();
    if (framesPerSecondTimeStamp > 0) {
        long frameTime = newTimeStamp - framesPerSecondTimeStamp;
        double framesPerSecond = 1000.0 / frameTime;
        String string = String.format("%4.1f FPS", framesPerSecond);
        Core.putText(image, string, new Point(50, 700), Constants.FontFace, 2, ColorTileEnum.WHITE.cvColor, 2);
    }
    framesPerSecondTimeStamp = newTimeStamp;

    try {

        // Initialize
        RubikFace rubikFace = new RubikFace();
        rubikFace.profiler.markTime(Profiler.Event.START);
        Log.i(Constants.TAG, "============================================================================");

        /* **********************************************************************
         * **********************************************************************
         * Return Original Image
         */
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.DIRECT) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            return annotation.drawAnnotation(image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Process to Grey Scale
         * 
         * This algorithm finds highlights areas that are all of nearly
         * the same hue.  In particular, cube faces should be highlighted.
         */
        Mat greyscale_image = new Mat();
        Imgproc.cvtColor(image, greyscale_image, Imgproc.COLOR_BGR2GRAY);
        rubikFace.profiler.markTime(Profiler.Event.GREYSCALE);
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.GREYSCALE) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            image.release();
            return annotation.drawAnnotation(greyscale_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Gaussian Filter Blur prevents getting a lot of false hits 
         */
        Mat blur_image = new Mat();

        int kernelSize = (int) MenuAndParams.gaussianBlurKernelSizeParam.value;
        kernelSize = kernelSize % 2 == 0 ? kernelSize + 1 : kernelSize; // make odd
        Imgproc.GaussianBlur(greyscale_image, blur_image, new Size(kernelSize, kernelSize), -1, -1);
        rubikFace.profiler.markTime(Profiler.Event.GAUSSIAN);
        greyscale_image.release();
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.GAUSSIAN) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            image.release();
            return annotation.drawAnnotation(blur_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Canny Edge Detection
         */
        Mat canny_image = new Mat();
        Imgproc.Canny(blur_image, canny_image, MenuAndParams.cannyLowerThresholdParam.value,
                MenuAndParams.cannyUpperThresholdParam.value, 3, // Sobel Aperture size.  This seems to be typically value used in the literature: i.e., a 3x3 Sobel Matrix.
                false); // use cheap gradient calculation: norm =|dI/dx|+|dI/dy|
        rubikFace.profiler.markTime(Profiler.Event.EDGE);
        blur_image.release();
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.CANNY) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            image.release();
            return annotation.drawAnnotation(canny_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Dilation Image Process
         */
        Mat dilate_image = new Mat();
        Imgproc.dilate(canny_image, dilate_image, Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(
                MenuAndParams.dilationKernelSizeParam.value, MenuAndParams.dilationKernelSizeParam.value)));
        rubikFace.profiler.markTime(Profiler.Event.DILATION);
        canny_image.release();
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.DILATION) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            image.release();
            return annotation.drawAnnotation(dilate_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Contour Generation 
         */
        List<MatOfPoint> contours = new LinkedList<MatOfPoint>();
        Mat heirarchy = new Mat();
        Imgproc.findContours(dilate_image, contours, heirarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE); // Note: tried other TC89 options, but no significant change or improvement on cpu time.
        rubikFace.profiler.markTime(Profiler.Event.CONTOUR);
        dilate_image.release();

        // Create gray scale image but in RGB format, and then added yellow colored contours on top.
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.CONTOUR) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            Mat gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Mat rgba_gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Imgproc.cvtColor(image, gray_image, Imgproc.COLOR_RGB2GRAY);
            Imgproc.cvtColor(gray_image, rgba_gray_image, Imgproc.COLOR_GRAY2BGRA, 3);
            Imgproc.drawContours(rgba_gray_image, contours, -1, ColorTileEnum.YELLOW.cvColor, 3);
            Core.putText(rgba_gray_image, "Num Contours: " + contours.size(), new Point(500, 50),
                    Constants.FontFace, 4, ColorTileEnum.RED.cvColor, 4);
            gray_image.release();
            image.release();
            return annotation.drawAnnotation(rgba_gray_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Polygon Detection
         */
        List<Rhombus> polygonList = new LinkedList<Rhombus>();
        for (MatOfPoint contour : contours) {

            // Keep only counter clockwise contours.  A clockwise contour is reported as a negative number.
            double contourArea = Imgproc.contourArea(contour, true);
            if (contourArea < 0.0)
                continue;

            // Keep only reasonable area contours
            if (contourArea < MenuAndParams.minimumContourAreaParam.value)
                continue;

            // Floating, instead of Double, for some reason required for approximate polygon detection algorithm.
            MatOfPoint2f contour2f = new MatOfPoint2f();
            MatOfPoint2f polygone2f = new MatOfPoint2f();
            MatOfPoint polygon = new MatOfPoint();

            // Make a Polygon out of a contour with provide Epsilon accuracy parameter.
            // It uses the Douglas-Peucker algorithm http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm
            contour.convertTo(contour2f, CvType.CV_32FC2);
            Imgproc.approxPolyDP(contour2f, polygone2f, MenuAndParams.polygonEpsilonParam.value, // The maximum distance between the original curve and its approximation.
                    true); // Resulting polygon representation is "closed:" its first and last vertices are connected.
            polygone2f.convertTo(polygon, CvType.CV_32S);

            polygonList.add(new Rhombus(polygon));
        }

        rubikFace.profiler.markTime(Profiler.Event.POLYGON);

        // Create gray scale image but in RGB format, and then add yellow colored polygons on top.
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.POLYGON) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            Mat gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Mat rgba_gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Imgproc.cvtColor(image, gray_image, Imgproc.COLOR_RGB2GRAY);
            Imgproc.cvtColor(gray_image, rgba_gray_image, Imgproc.COLOR_GRAY2BGRA, 4);
            for (Rhombus polygon : polygonList)
                polygon.draw(rgba_gray_image, ColorTileEnum.YELLOW.cvColor);
            Core.putText(rgba_gray_image, "Num Polygons: " + polygonList.size(), new Point(500, 50),
                    Constants.FontFace, 3, ColorTileEnum.RED.cvColor, 4);
            return annotation.drawAnnotation(rgba_gray_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Rhombus Tile Recognition
         * 
         * From polygon list, produces a list of suitable Parallelograms (Rhombi).
         */
        Log.i(Constants.TAG, String.format("Rhombus:   X    Y   Area   a-a  b-a a-l b-l gamma"));
        List<Rhombus> rhombusList = new LinkedList<Rhombus>();
        // Get only valid Rhombus(es) : actually parallelograms.
        for (Rhombus rhombus : polygonList) {
            rhombus.qualify();
            if (rhombus.status == Rhombus.StatusEnum.VALID)
                rhombusList.add(rhombus);
        }

        // Filtering w.r.t. Rhmobus set characteristics
        Rhombus.removedOutlierRhombi(rhombusList);

        rubikFace.profiler.markTime(Profiler.Event.RHOMBUS);

        // Create gray scale image but in RGB format, and then add yellow colored Rhombi (parallelograms) on top.
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.RHOMBUS) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            Mat gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Mat rgba_gray_image = new Mat(imageSize, CvType.CV_8UC4);
            Imgproc.cvtColor(image, gray_image, Imgproc.COLOR_RGB2GRAY);
            Imgproc.cvtColor(gray_image, rgba_gray_image, Imgproc.COLOR_GRAY2BGRA, 4);
            for (Rhombus rhombus : rhombusList)
                rhombus.draw(rgba_gray_image, ColorTileEnum.YELLOW.cvColor);
            Core.putText(rgba_gray_image, "Num Rhombus: " + rhombusList.size(), new Point(500, 50),
                    Constants.FontFace, 4, ColorTileEnum.RED.cvColor, 4);
            gray_image.release();
            image.release();
            return annotation.drawAnnotation(rgba_gray_image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Face Recognition
         * 
         * Takes a collection of Rhombus objects and determines if a valid
         * Rubik Face can be determined from them, and then also determines 
         * initial color for all nine tiles. 
         */
        rubikFace.processRhombuses(rhombusList, image);
        Log.i(Constants.TAG, "Face Solution = " + rubikFace.faceRecognitionStatus);
        rubikFace.profiler.markTime(Profiler.Event.FACE);
        if (MenuAndParams.imageProcessMode == ImageProcessModeEnum.FACE_DETECT) {
            stateModel.activeRubikFace = rubikFace;
            rubikFace.profiler.markTime(Profiler.Event.TOTAL);
            return annotation.drawAnnotation(image);
        }

        /* **********************************************************************
         * **********************************************************************
         * Cube Pose Estimation
         * 
         * Reconstruct the Rubik Cube 3D location and orientation in GL space coordinates.
         */
        if (rubikFace.faceRecognitionStatus == FaceRecognitionStatusEnum.SOLVED) {

            // Obtain Cube Pose from Face Grid information.
            stateModel.cubePose = CubePoseEstimator.poseEstimation(rubikFace, image, stateModel);

            // Process measurement update on Kalman Filter (if it exists).
            KalmanFilter kalmanFilter = stateModel.kalmanFilter;
            if (kalmanFilter != null)
                kalmanFilter.measurementUpdate(stateModel.cubePose, System.currentTimeMillis());

            // Process measurement update on Kalman Filter ALSM (if it exists).
            KalmanFilterALSM kalmanFilterALSM = stateModel.kalmanFilterALSM;
            if (kalmanFilter != null)
                kalmanFilterALSM.measurementUpdate(stateModel.cubePose, System.currentTimeMillis());
        } else {
            stateModel.cubePose = null;
        }
        rubikFace.profiler.markTime(Profiler.Event.POSE);

        /* **********************************************************************
         * **********************************************************************
         * Application State Machine
         * 
         * Will provide user instructions.
         * Will determine when we are on-face and off-face
         * Will determine when we are on-new-face
         * Will change state 
         */
        appStateMachine.onFaceEvent(rubikFace);
        rubikFace.profiler.markTime(Profiler.Event.CONTROLLER);
        rubikFace.profiler.markTime(Profiler.Event.TOTAL);

        // Normal return point.
        stateModel.activeRubikFace = rubikFace;
        return annotation.drawAnnotation(image);

        // =+= Issue: how to get stdio to print as error and not warning in logcat?
    } catch (CvException e) {
        Log.e(Constants.TAG, "CvException: " + e.getMessage());
        e.printStackTrace();
        errorImage = new Mat(imageSize, CvType.CV_8UC4);
        Core.putText(errorImage, "CvException: " + e.getMessage(), new Point(50, 50), Constants.FontFace, 2,
                ColorTileEnum.WHITE.cvColor, 2);
        int i = 1;
        for (StackTraceElement element : e.getStackTrace())
            Core.putText(errorImage, element.toString(), new Point(50, 50 + 50 * i++), Constants.FontFace, 2,
                    ColorTileEnum.WHITE.cvColor, 2);
    } catch (Exception e) {
        Log.e(Constants.TAG, "Exception: " + e.getMessage());
        e.printStackTrace();
        errorImage = new Mat(imageSize, CvType.CV_8UC4);
        Core.putText(errorImage, "Exception: " + e.getMessage(), new Point(50, 50), Constants.FontFace, 2,
                ColorTileEnum.WHITE.cvColor, 2);
        int i = 1;
        for (StackTraceElement element : e.getStackTrace())
            Core.putText(errorImage, element.toString(), new Point(50, 50 + 50 * i++), Constants.FontFace, 2,
                    ColorTileEnum.WHITE.cvColor, 2);
    } catch (Error e) {
        Log.e(Constants.TAG, "Error: " + e.getMessage());
        e.printStackTrace();
        errorImage = new Mat(imageSize, CvType.CV_8UC4);
        Core.putText(errorImage, "Error: " + e.getMessage(), new Point(50, 50), Constants.FontFace, 2,
                ColorTileEnum.WHITE.cvColor, 2);
        int i = 1;
        for (StackTraceElement element : e.getStackTrace())
            Core.putText(errorImage, element.toString(), new Point(50, 50 + 50 * i++), Constants.FontFace, 2,
                    ColorTileEnum.WHITE.cvColor, 2);
    }

    return annotation.drawAnnotation(image);
}

From source file:org.openpnp.vision.FluentCv.java

License:Open Source License

public FluentCv getContourMaxRects(List<MatOfPoint> contours, List<RotatedRect> rect) {
    List<Point> contoursCombined = new ArrayList<>();
    for (MatOfPoint mp : contours) {
        List<Point> points = new ArrayList<>();
        Converters.Mat_to_vector_Point(mp, points);
        for (Point point : points) {
            contoursCombined.add(point);
        }/*  w w  w . j  av a 2s . c om*/
    }
    contours.clear();
    MatOfPoint points = new MatOfPoint();
    points.fromList(contoursCombined);

    return getContourRects(Collections.singletonList(points), rect);
}

From source file:org.sahyagiri.rpi.opencv.DetectCar.java

License:Mozilla Public License

private MatOfPoint combineContourPoints(ArrayList<MatOfPoint> contours) {
    MatOfPoint matPoints = new MatOfPoint();
    List<Point> points = new ArrayList<Point>();
    for (MatOfPoint point : contours) {
        points.addAll(point.toList());/*www.  j  a v  a2 s  .co  m*/
    }
    matPoints.fromList(points);
    return matPoints;
}

From source file:org.usfirst.frc.team2084.CMonster2016.vision.BallProcessor.java

License:Open Source License

@Override
public void process(Mat cameraImage) {
    Imgproc.blur(cameraImage, hsvImage, new Size(20, 20));

    Imgproc.cvtColor(hsvImage, hsvImage, Imgproc.COLOR_BGR2HSV);

    // Threshold image to find blue/green
    Core.inRange(hsvImage, thresholdMin, thresholdMax, thresholdImage);

    Imgproc.erode(thresholdImage, thresholdImage, KERNEL);
    Imgproc.dilate(thresholdImage, thresholdImage, KERNEL);

    thresholdImage.copyTo(contourImage);

    contours.clear();/* ww w.  j  a  va  2 s.c o m*/
    hulls.clear();

    Imgproc.findContours(contourImage, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
    Imgproc.drawContours(cameraImage, contours, -1, CONTOUR_COLOR);

    for (int i = 0; i < contours.size(); i++) {
        MatOfPoint contour = contours.get(i);
        MatOfInt hullIndices = new MatOfInt();
        MatOfPoint hull;
        hulls.add(hull = new MatOfPoint());
        Imgproc.convexHull(contour, hullIndices);

        for (int c = 0; c < hullIndices.rows(); c++) {
            int v = (int) hullIndices.get(c, 0)[0];
            hull.put(c, 0, contour.get(v, 0));
            // System.out.println(v);
        }

        // System.out.println(hull.size());

    }

    Imgproc.drawContours(cameraImage, hulls, -1, CONTOUR_COLOR);

    debugImage("Threshold Image", thresholdImage);
    // debugImage("Contour Image", contourImage);
}