Example usage for org.opencv.core Core norm

List of usage examples for org.opencv.core Core norm

Introduction

In this page you can find the example usage for org.opencv.core Core norm.

Prototype

public static double norm(Mat src1, int normType, Mat mask) 

Source Link

Usage

From source file:Retrive.java

public Double Find_dist(Mat query, Mat img_corpse) {
    Double dist = Core.norm(img_corpse, query, Core.NORM_L2);
    return dist;
}

From source file:ch.zhaw.facerecognitionlibrary.Helpers.FaceDetection.java

License:Open Source License

public Eyes getEyes(Mat img) {
    double halfWidth = img.cols() / 2;
    double height = img.rows();
    double[] values = new double[4];
    values[0] = 0;/*from  w w w .j a v  a  2 s .  c om*/
    values[1] = 0;
    values[2] = halfWidth;
    values[3] = height;
    Rect rightHalf = new Rect(values);
    values[0] = halfWidth;
    Rect leftHalf = new Rect(values);
    MatOfRect rightEyes = new MatOfRect();
    MatOfRect leftEyes = new MatOfRect();

    Mat rightHalfImg = img.submat(rightHalf);
    rightEyeDetector.detectMultiScale(rightHalfImg, rightEyes);
    Mat leftHalfImg = img.submat(leftHalf);
    leftEyeDetector.detectMultiScale(leftHalfImg, leftEyes);

    if (rightEyes.empty() || leftEyes.empty() || rightEyes.toArray().length > 1
            || leftEyes.toArray().length > 1) {
        return null;
    }

    Rect rightEye = rightEyes.toArray()[0];
    Rect leftEye = leftEyes.toArray()[0];

    MatOfFloat rightPoint = new MatOfFloat(rightEye.x + rightEye.width / 2, rightEye.y + rightEye.height / 2);
    MatOfFloat leftPoint = new MatOfFloat(img.cols() / 2 + leftEye.x + leftEye.width / 2,
            leftEye.y + leftEye.height / 2);

    MatOfFloat diff = new MatOfFloat();
    Core.subtract(leftPoint, rightPoint, diff);
    double angle = Core.fastAtan2(diff.toArray()[1], diff.toArray()[0]);
    double dist = Core.norm(leftPoint, rightPoint, Core.NORM_L2);
    Eyes eyes = new Eyes(dist, rightPoint, leftPoint, angle);
    return eyes;
}

From source file:ch.zhaw.facerecognitionlibrary.Recognition.Eigenfaces.java

License:Open Source License

public String recognize(Mat img, String expectedLabel) {
    // Ignore/*from  w ww.j ava  2s.co m*/
    img = img.reshape(1, 1);
    // Subtract mean
    img.convertTo(img, CvType.CV_32F);
    Core.subtract(img, Psi, img);
    // Project to subspace
    Mat projected = getFeatureVector(img);
    // Save all points of image for tSNE
    img.convertTo(img, CvType.CV_8U);
    addImage(projected, expectedLabel, true);
    //addImage(projected, expectedLabel);
    Mat distance = new Mat(Omega.rows(), 1, CvType.CV_64FC1);
    for (int i = 0; i < Omega.rows(); i++) {
        double dist = Core.norm(projected.row(0), Omega.row(i), Core.NORM_L2);
        distance.put(i, 0, dist);
    }
    Mat sortedDist = new Mat(Omega.rows(), 1, CvType.CV_8UC1);
    Core.sortIdx(distance, sortedDist, Core.SORT_EVERY_COLUMN + Core.SORT_ASCENDING);
    // Give back the name of the found person
    int index = (int) (sortedDist.get(0, 0)[0]);
    return labelMap.getKey(labelList.get(index));
}

From source file:com.android.cts.verifier.sensors.RVCVXCheckAnalyzer.java

License:Apache License

/**
 * Analyze video frames using computer vision approach and generate a ArrayList<AttitudeRec>
 *
 * @param recs  output ArrayList of AttitudeRec
 * @return total number of frame of the video
 *///  www  . j  ava  2  s .c o  m
private int analyzeVideo(ArrayList<AttitudeRec> recs) {
    VideoMetaInfo meta = new VideoMetaInfo(new File(mPath, "videometa.json"));

    int decimation = 1;
    boolean use_timestamp = true;

    // roughly determine if decimation is necessary
    if (meta.fps > DECIMATION_FPS_TARGET) {
        decimation = (int) (meta.fps / DECIMATION_FPS_TARGET);
        meta.fps /= decimation;
    }

    VideoDecoderForOpenCV videoDecoder = new VideoDecoderForOpenCV(new File(mPath, "video.mp4"), decimation);

    Mat frame;
    Mat gray = new Mat();
    int i = -1;

    Size frameSize = videoDecoder.getSize();

    if (frameSize.width != meta.frameWidth || frameSize.height != meta.frameHeight) {
        // this is very unlikely
        return -1;
    }

    if (TRACE_VIDEO_ANALYSIS) {
        Debug.startMethodTracing("cvprocess");
    }

    Size patternSize = new Size(4, 11);

    float fc = (float) (meta.frameWidth / 2.0 / Math.tan(meta.fovWidth / 2.0));
    Mat camMat = cameraMatrix(fc, new Size(frameSize.width / 2, frameSize.height / 2));
    MatOfDouble coeff = new MatOfDouble(); // dummy

    MatOfPoint2f centers = new MatOfPoint2f();
    MatOfPoint3f grid = asymmetricalCircleGrid(patternSize);
    Mat rvec = new MatOfFloat();
    Mat tvec = new MatOfFloat();

    MatOfPoint2f reprojCenters = new MatOfPoint2f();

    if (LOCAL_LOGV) {
        Log.v(TAG, "Camera Mat = \n" + camMat.dump());
    }

    long startTime = System.nanoTime();
    long[] ts = new long[1];

    while ((frame = videoDecoder.getFrame(ts)) != null) {
        if (LOCAL_LOGV) {
            Log.v(TAG, "got a frame " + i);
        }

        if (use_timestamp && ts[0] == -1) {
            use_timestamp = false;
        }

        // has to be in front, as there are cases where execution
        // will skip the later part of this while
        i++;

        // convert to gray manually as by default findCirclesGridDefault uses COLOR_BGR2GRAY
        Imgproc.cvtColor(frame, gray, Imgproc.COLOR_RGB2GRAY);

        boolean foundPattern = Calib3d.findCirclesGrid(gray, patternSize, centers,
                Calib3d.CALIB_CB_ASYMMETRIC_GRID);

        if (!foundPattern) {
            // skip to next frame
            continue;
        }

        if (OUTPUT_DEBUG_IMAGE) {
            Calib3d.drawChessboardCorners(frame, patternSize, centers, true);
        }

        // figure out the extrinsic parameters using real ground truth 3D points and the pixel
        // position of blobs found in findCircleGrid, an estimated camera matrix and
        // no-distortion are assumed.
        boolean foundSolution = Calib3d.solvePnP(grid, centers, camMat, coeff, rvec, tvec, false,
                Calib3d.CV_ITERATIVE);

        if (!foundSolution) {
            // skip to next frame
            if (LOCAL_LOGV) {
                Log.v(TAG, "cannot find pnp solution in frame " + i + ", skipped.");
            }
            continue;
        }

        // reproject points to for evaluation of result accuracy of solvePnP
        Calib3d.projectPoints(grid, rvec, tvec, camMat, coeff, reprojCenters);

        // error is evaluated in norm2, which is real error in pixel distance / sqrt(2)
        double error = Core.norm(centers, reprojCenters, Core.NORM_L2);

        if (LOCAL_LOGV) {
            Log.v(TAG, "Found attitude, re-projection error = " + error);
        }

        // if error is reasonable, add it into the results. use ratio to frame height to avoid
        // discriminating higher definition videos
        if (error < REPROJECTION_THREASHOLD_RATIO * frameSize.height) {
            double[] rv = new double[3];
            double timestamp;

            rvec.get(0, 0, rv);
            if (use_timestamp) {
                timestamp = (double) ts[0] / 1e6;
            } else {
                timestamp = (double) i / meta.fps;
            }
            if (LOCAL_LOGV)
                Log.v(TAG, String.format("Added frame %d  ts = %f", i, timestamp));
            recs.add(new AttitudeRec(timestamp, rodr2rpy(rv)));
        }

        if (OUTPUT_DEBUG_IMAGE) {
            Calib3d.drawChessboardCorners(frame, patternSize, reprojCenters, true);
            Imgcodecs.imwrite(Environment.getExternalStorageDirectory().getPath() + "/RVCVRecData/DebugCV/img"
                    + i + ".png", frame);
        }
    }

    if (LOCAL_LOGV) {
        Log.v(TAG, "Finished decoding");
    }

    if (TRACE_VIDEO_ANALYSIS) {
        Debug.stopMethodTracing();
    }

    if (LOCAL_LOGV) {
        // time analysis
        double totalTime = (System.nanoTime() - startTime) / 1e9;
        Log.i(TAG, "Total time: " + totalTime + "s, Per frame time: " + totalTime / i);
    }
    return i;
}

From source file:de.vion.eyetracking.cameracalib.calibration.opencv.CameraCalibrator.java

private double computeReprojectionErrors(List<Mat> objectPoints, List<Mat> rvecs, List<Mat> tvecs,
        Mat perViewErrors) {/*from  w w w .  j a va 2  s.  co m*/
    MatOfPoint2f cornersProjected = new MatOfPoint2f();
    double totalError = 0;
    double error;
    float viewErrors[] = new float[objectPoints.size()];

    MatOfDouble distortionCoefficients = new MatOfDouble(this.mDistortionCoefficients);
    int totalPoints = 0;
    for (int i = 0; i < objectPoints.size(); i++) {
        MatOfPoint3f points = new MatOfPoint3f(objectPoints.get(i));
        Calib3d.projectPoints(points, rvecs.get(i), tvecs.get(i), this.mCameraMatrix, distortionCoefficients,
                cornersProjected);
        error = Core.norm(this.mCornersBuffer.get(i), cornersProjected, Core.NORM_L2);

        int n = objectPoints.get(i).rows();
        viewErrors[i] = (float) Math.sqrt(error * error / n);
        totalError += error * error;
        totalPoints += n;
    }
    perViewErrors.create(objectPoints.size(), 1, CvType.CV_32FC1);
    perViewErrors.put(0, 0, viewErrors);

    return Math.sqrt(totalError / totalPoints);
}