Example usage for org.opencv.core Point Point

List of usage examples for org.opencv.core Point Point

Introduction

In this page you can find the example usage for org.opencv.core Point Point.

Prototype

public Point(double x, double y) 

Source Link

Usage

From source file:com.example.sarthuak.opencv.MainActivity.java

public Mat onCameraFrame(CvCameraViewFrame inputFrame) {

    // TODO Auto-generated method stub
    final int viewMode = mViewMode;
    switch (viewMode) {

    case VIEW_MODE_RGBA:
        // input frame has RBGA format
        mRgba = inputFrame.rgba();/*ww  w .j  a  v a 2  s  . c o  m*/
        break;
    case VIEW_MODE_CANNY:
        // input frame has gray scale format
        mRgba = inputFrame.rgba();
        Imgproc.Canny(inputFrame.gray(), mRgbaF, 80, 100);
        Imgproc.cvtColor(mRgbaF, mRgba, Imgproc.COLOR_GRAY2RGBA, 4);
        break;

    case VIEW_MODE_ocr:
        startActivity(new Intent(this, ScanLicensePlateActivity.class));
        break;

    case VIEW_MODE_new:
        Mat mRgba;

        mRgba = inputFrame.rgba();
        drawing = mRgba.clone();

        mRgbaT = drawing;

        Imgproc.cvtColor(drawing, mRgbaT, Imgproc.COLOR_BGR2GRAY);

        org.opencv.core.Size s = new Size(1, 1);
        Imgproc.GaussianBlur(mRgbaT, mRgbaT, s, 0, 0);

        Imgproc.Canny(mRgbaT, mRgbaT, 100, 255);
        Mat element = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(5, 5));
        Imgproc.dilate(mRgbaT, mRgbaT, element);
        List<MatOfPoint> contours = new ArrayList<>();

        Imgproc.findContours(drawing, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE,
                new Point(0, 0));
        double maxArea = -1;
        int maxAreaIdx = -1;

        for (int idx = 0; idx < contours.size(); idx++) {
            Mat contour = contours.get(idx);

            double contourarea = Imgproc.contourArea(contour);
            if (contourarea > maxArea) {

                maxArea = contourarea;
                maxAreaIdx = idx;
            }
        }

        Imgproc.drawContours(mRgba, contours, maxAreaIdx, new Scalar(255, 0, 0), 5);

    }
    return mRgba; // This function must return

}

From source file:com.github.mbillingr.correlationcheck.ImageProcessor.java

License:Open Source License

public List<Point> extractPoints() {
    Mat gray = new Mat();//work_width, work_height, CvType.CV_8UC1);
    Mat binary = new Mat();

    Mat kernel = Mat.ones(3, 3, CvType.CV_8UC1);

    debugreset();/*from ww w  . ja va 2s.  c  o m*/

    Mat image = load_transformed();
    working_image = image.clone();
    debugsave(image, "source");

    Imgproc.cvtColor(image, gray, Imgproc.COLOR_RGB2GRAY);
    debugsave(gray, "grayscale");

    Imgproc.GaussianBlur(gray, gray, new Size(15, 15), 0);
    debugsave(gray, "blurred");

    //Imgproc.equalizeHist(gray, gray);
    //debugsave(gray, "equalized");

    Imgproc.adaptiveThreshold(gray, binary, 255, Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C, Imgproc.THRESH_BINARY_INV,
            129, 5);
    //Imgproc.threshold(gray, binary, 0, 255, Imgproc.THRESH_BINARY_INV + Imgproc.THRESH_OTSU);
    //Imgproc.threshold(gray, binary, 128, 255, Imgproc.THRESH_BINARY_INV);
    debugsave(binary, "binary");

    Imgproc.morphologyEx(binary, binary, Imgproc.MORPH_CLOSE, kernel);
    debugsave(binary, "closed");

    Imgproc.morphologyEx(binary, binary, Imgproc.MORPH_OPEN, kernel);
    debugsave(binary, "opened");

    List<MatOfPoint> contours = new ArrayList<>();
    Mat hierarchy = new Mat();
    Imgproc.findContours(binary, contours, hierarchy, Imgproc.RETR_TREE, Imgproc.CHAIN_APPROX_SIMPLE); // is binary is now changed
    Imgproc.drawContours(image, contours, -1, new Scalar(0, 0, 255), 3);
    debugsave(image, "contours");

    List<PointAndArea> points = new ArrayList<>();

    for (MatOfPoint cnt : contours) {
        MatOfPoint2f c2f = new MatOfPoint2f();
        c2f.fromArray(cnt.toArray());
        RotatedRect rr = Imgproc.minAreaRect(c2f);

        double area = Imgproc.contourArea(cnt);

        if (rr.size.width / rr.size.height < 3 && rr.size.height / rr.size.width < 3 && rr.size.width < 64
                && rr.size.height < 64 && area > 9 && area < 10000) {
            points.add(new PointAndArea((int) area, rr.center));
        }
    }

    List<Point> final_points = new ArrayList<>();

    Collections.sort(points);
    Collections.reverse(points);
    int prev = -1;
    for (PointAndArea p : points) {
        Log.i("area", Integer.toString(p.area));
        if (prev == -1 || p.area >= prev / 2) {
            prev = p.area;
            Imgproc.circle(image, p.point, 10, new Scalar(0, 255, 0), 5);
            final_points.add(new Point(1 - p.point.y / work_height, 1 - p.point.x / work_width));
        }
    }
    debugsave(image, "circles");

    return final_points;
}

From source file:com.github.mbillingr.correlationcheck.ImageProcessor.java

License:Open Source License

void setPerspectiveCorrection(float ax, float ay, float bx, float by, float cx, float cy, float dx, float dy) {

    List<Point> pts_in = new ArrayList<>();
    pts_in.add(new Point(ax, ay));
    pts_in.add(new Point(bx, by));
    pts_in.add(new Point(cx, cy));
    pts_in.add(new Point(dx, dy));

    setPerspectiveCorrection(pts_in);//  w w w.  jav a 2s.  c  om
}

From source file:com.github.mbillingr.correlationcheck.ImageProcessor.java

License:Open Source License

void setPerspectiveCorrection(List<Point> refpoints) {
    List<Point> pts_in = new ArrayList<>();
    for (Point p : refpoints) {
        pts_in.add(new Point(p.x * raw_width, (1 - p.y) * raw_height));
    }/*from  w  w  w . j ava 2 s.c  o m*/
    Mat mat_src = Converters.vector_Point2f_to_Mat(pts_in);

    List<Point> pts_out = new ArrayList<>();
    pts_out.add(new Point(0, work_height));
    pts_out.add(new Point(0, 0));
    pts_out.add(new Point(work_width, work_height));
    pts_out.add(new Point(work_width, 0));
    Mat mat_dst = Converters.vector_Point2f_to_Mat(pts_out);

    perspective_transform = Imgproc.getPerspectiveTransform(mat_src, mat_dst);
}

From source file:com.github.mbillingr.correlationcheck.MarkplotActivity.java

License:Open Source License

public List<Point> getPerspectivePoints() {

    List<Point> points = new ArrayList<>();
    for (Marker m : markers) {
        points.add(new Point(m.getY() / getHeight(), m.getX() / getWidth()));
    }//from  ww w .  jav  a 2 s  .  c o m
    return points;
}

From source file:com.github.mbillingr.correlationcheck.Statistics.java

License:Open Source License

static Point sum(List<Point> points) {
    Point sum = new Point(0, 0);
    for (Point p : points) {
        sum.x += p.x;/*from  ww w  .j  a v a 2  s .com*/
        sum.y += p.y;
    }
    return sum;
}

From source file:com.github.mbillingr.correlationcheck.Statistics.java

License:Open Source License

static Point squaresum(List<Point> points) {
    Point sum = new Point(0, 0);
    for (Point p : points) {
        sum.x += p.x * p.x;/*  www .  j  av a  2s  .  c o m*/
        sum.y += p.y * p.y;
    }
    return sum;
}

From source file:com.github.rosjava_catkin_package_a.ARLocROS.ComputePose.java

License:Apache License

public boolean computePose(Mat rvec, Mat tvec, Mat image2) throws NyARException, FileNotFoundException {
    // convert image to NyAR style for processing
    final INyARRgbRaster imageRaster = NyARImageHelper.createFromMat(image2);

    // create new marker system configuration
    i_config = new NyARMarkerSystemConfig(i_param);
    markerSystemState = new NyARMarkerSystem(i_config);
    // Create wrapper that passes cam pictures to marker system
    cameraSensorWrapper = new NyARSensor(i_screen_size);
    ids = new int[markerPatterns.size()];
    patternmap = new HashMap<>();
    for (int i = 0; i < markerPatterns.size(); i++) {
        // create marker description from pattern file and add to marker
        // system
        ids[i] = markerSystemState.addARMarker(arCodes.get(i), 25, markerConfig.getMarkerSize());
        patternmap.put(ids[i], markerPatterns.get(i));
    }//from   w w w . j av  a 2 s  .c om

    cameraSensorWrapper.update(imageRaster);
    markerSystemState.update(cameraSensorWrapper);

    // init 3D point list
    final List<Point3> points3dlist = new ArrayList<>();
    final List<Point> points2dlist = new ArrayList<>();

    for (final int id : ids) {
        // process only if this marker has been detected
        if (markerSystemState.isExistMarker(id) && markerSystemState.getConfidence(id) > 0.7) {
            // read and add 2D points
            final NyARIntPoint2d[] vertex2d = markerSystemState.getMarkerVertex2D(id);
            Point p = new Point(vertex2d[0].x, vertex2d[0].y);
            points2dlist.add(p);
            p = new Point(vertex2d[1].x, vertex2d[2].y);
            points2dlist.add(p);
            p = new Point(vertex2d[2].x, vertex2d[2].y);
            points2dlist.add(p);
            p = new Point(vertex2d[3].x, vertex2d[3].y);
            points2dlist.add(p);

            final MatOfPoint mop = new MatOfPoint();
            mop.fromList(points2dlist);
            final List<MatOfPoint> pts = new ArrayList<>();
            pts.add(mop);
            // read and add corresponding 3D points
            points3dlist.addAll(markerConfig.create3dpointlist(patternmap.get(id)));
            // draw red rectangle around detected marker
            Core.rectangle(image2, new Point(vertex2d[0].x, vertex2d[0].y),
                    new Point(vertex2d[2].x, vertex2d[2].y), new Scalar(0, 0, 255));
        }

    }
    // load 2D and 3D points to Mats for solvePNP
    final MatOfPoint3f objectPoints = new MatOfPoint3f();
    objectPoints.fromList(points3dlist);
    final MatOfPoint2f imagePoints = new MatOfPoint2f();
    imagePoints.fromList(points2dlist);

    if (visualization) {
        // show image with markers detected
        Imshow.show(image2);
    }

    // do not call solvePNP with empty intput data (no markers detected)
    if (points2dlist.size() == 0) {
        return false;
    }

    // uncomment these lines if using RANSAC-based pose estimation (more
    // shaking)
    Mat inliers = new Mat();

    Calib3d.solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, 300, 5, 16,
            inliers, Calib3d.CV_P3P);
    ARLoc.getLog().info("Points detected: " + points2dlist.size() + " inliers: " + inliers.size());
    // avoid publish zero pose if localization failed
    if (inliers.rows() == 0) {
        return false;
    }

    return true;
}

From source file:com.ibm.streamsx.edgevideo.device.AbstractFaceDetectApp.java

License:Open Source License

protected void renderImages(Mat rgbFrame, MatOfRect faceRects, List<Mat> faces) {
    // draw rectangles around the detected faces and render
    Rect[] rectArray = faceRects.toArray();
    for (Rect faceRect : rectArray) {
        Imgproc.rectangle(rgbFrame, new Point(faceRect.x, faceRect.y),
                new Point(faceRect.x + faceRect.width, faceRect.y + faceRect.height), new Scalar(0, 255, 0));
    }/*ww w.j a va  2 s  .c om*/
    faceDetectPanel.matToBufferedImage(rgbFrame);
    faceDetectPanel.repaint();

    // render the detected faces
    if (renderDetections) {
        detectedFacesPanel.clear();
        for (Mat face : faces) {
            // TODO handle rendering multiple detections / images in the panel 
            detectedFacesPanel.matToBufferedImage(face);
        }
        detectedFacesPanel.repaint();
    }
}

From source file:com.ibm.streamsx.edgevideo.device.wipRecognition.WIP_NonEdgentFaceDetectApp.java

License:Open Source License

protected void renderImages(Mat rgbFrame, MatOfRect faceRects, List<Mat> faces, List<Prediction> predictions) {
    // draw rectangles around the detected faces and render
    Rect[] rectArray = faceRects.toArray();
    for (Rect faceRect : rectArray) {
        Imgproc.rectangle(rgbFrame, new Point(faceRect.x, faceRect.y),
                new Point(faceRect.x + faceRect.width, faceRect.y + faceRect.height), new Scalar(0, 255, 0));
    }//ww w .j  a  v a  2s.c o  m

    // TODO add recognition prediction info label to image

    faceDetectPanel.matToBufferedImage(rgbFrame);
    faceDetectPanel.repaint();

    // render the detected faces
    if (renderDetections) {
        detectedFacesPanel.clear();
        for (Mat face : faces) {
            // TODO handle rendering multiple detections / images in the panel 
            detectedFacesPanel.matToBufferedImage(face);
        }
        detectedFacesPanel.repaint();
    }
}