Example usage for org.opencv.android Utils bitmapToMat

List of usage examples for org.opencv.android Utils bitmapToMat

Introduction

In this page you can find the example usage for org.opencv.android Utils bitmapToMat.

Prototype

public static void bitmapToMat(Bitmap bmp, Mat mat) 

Source Link

Document

Short form of the bitmapToMat(bmp, mat, unPremultiplyAlpha=false).

Usage

From source file:de.hu_berlin.informatik.spws2014.mapever.entzerrung.EntzerrungsView.java

License:Open Source License

/**
 * Use corner detection algorithm to find and set corners automatically.
 *///from   w w  w.ja v a2s  .com
public void calcCornersWithDetector() {
    // Bitmap berechnen, die fr CD-Algorithmus runterskaliert wurde
    Bitmap bmp32 = getCDScaledBitmap();

    if (bmp32 == null || getImageWidth() <= 0) {
        Log.e("EntzerrungsView/calcCornersWithDetector",
                bmp32 == null ? "getCDScaledBitmap() returned null!" : "getImageWidth() is nonpositive!");
        calcCornerDefaults();
        return;
    }

    float sampleSize = getImageWidth() / bmp32.getWidth();

    org.opencv.core.Point[] corner_points;

    try {
        Mat imgMat = new Mat();
        Utils.bitmapToMat(bmp32, imgMat);
        Mat greyMat = new Mat();
        Imgproc.cvtColor(imgMat, greyMat, Imgproc.COLOR_RGB2GRAY);

        corner_points = CornerDetector.guess_corners(greyMat);
    } catch (CvException e) {
        Log.w("EntzerrungsView/calcCornersWithDetector", "Corner detection failed with CvException");
        e.printStackTrace();

        // it seems that the image type is not supported by the corner detection algorithm (GIF?)
        // it won't be deskewable either, so deactivate that feature
        showCorners(false);
        imageTypeSupportsDeskew = false;

        calcCornerDefaults();
        return;
    } catch (UnsatisfiedLinkError e) {
        Log.w("EntzerrungsView/calcCornersWithDetector", "OpenCV not available");
        openCVLoadError = true;
        calcCornerDefaults();
        return;
    }

    // Im Fehlerfall Standardecken verwenden
    if (corner_points == null) {
        Log.w("EntzerrungsView/calcCornersWithDetector", "Corner detection returned null");
        calcCornerDefaults();
        return;
    }

    // Koordinaten auf ursprngliche Bildgre hochrechnen
    for (int i = 0; i < corner_points.length; i++) {
        corner_points[i].x *= sampleSize;
        corner_points[i].y *= sampleSize;
    }

    Log.d("Corner points", "0: " + corner_points[0] + " 1: " + corner_points[1] + " 2: " + corner_points[2]
            + " 3: " + corner_points[3]);

    // Algorithmusergebnis als Eckpunkte verwenden
    corners[0].setPosition(corner_points[0]);
    corners[1].setPosition(corner_points[1]);
    corners[2].setPosition(corner_points[2]);
    corners[3].setPosition(corner_points[3]);

    // Sortieren (obwohl sie eigentlich sortiert sein sollten...?)
    sortCorners();

    punkte_gesetzt = true;
}

From source file:edu.sfsu.cs.orange.ocr.OcrRecognizeAsyncTask.java

License:Apache License

@Override
protected Boolean doInBackground(Void... arg0) {
    long start = System.currentTimeMillis();
    Bitmap bitmap = activity.getCameraManager().buildLuminanceSource(data, width, height)
            .renderCroppedGreyscaleBitmap();

    String textResult;/*from w  ww .ja  v  a  2  s .c  o  m*/
    Mat image = new Mat();
    Utils.bitmapToMat(bitmap, image);
    Mat gray = new Mat();
    Utils.bitmapToMat(bitmap, gray);

    Mat background = new Mat();
    Utils.bitmapToMat(bitmap, background); //to test with BinarizeBG
    Mat finalimage = new Mat();
    Utils.bitmapToMat(bitmap, finalimage);

    //image.convertTo( gray,CvType.CV_8UC1);
    //image.convertTo(image,CvType.CV_64F);
    try {
        Imgcodecs.imwrite("/storage/emulated/0/DCIM/orig.jpg", image);
        OpencvNativeClass.BinarizeShafait(gray.getNativeObjAddr(), image.getNativeObjAddr());

        Imgcodecs.imwrite("/storage/emulated/0/DCIM/binarized.jpg", image);
        Utils.matToBitmap(image, bitmap);

        //Pix fimage = ReadFile.readBitmap(bitmap);
        //fimage = Binarize.otsuAdaptiveThreshold(fimage);

        //float angle = Skew.findSkew(fimage);
        //Log.i("Skew: ", Float.toString(angle));
        //double deg2rad = 3.14159265 / 180.;

        //fimage = Rotate.rotate(fimage, angle);

        //bitmap = WriteFile.writeBitmap(fimage);

        Mat skewed = new Mat();

        //Utils.bitmapToMat(bitmap,skewed);
        //Imgcodecs.imwrite("/storage/emulated/0/DCIM/deskewed.jpg", skewed);

        baseApi.setImage(ReadFile.readBitmap(bitmap));

        textResult = baseApi.getUTF8Text();
        timeRequired = System.currentTimeMillis() - start;

        // Check for failure to recognize text
        if (textResult == null || textResult.equals("")) {
            return false;
        }

        ocrResult = new OcrResult();
        ocrResult.setWordConfidences(baseApi.wordConfidences());
        ocrResult.setMeanConfidence(baseApi.meanConfidence());
        ocrResult.setRegionBoundingBoxes(baseApi.getRegions().getBoxRects());
        ocrResult.setTextlineBoundingBoxes(baseApi.getTextlines().getBoxRects());
        ocrResult.setWordBoundingBoxes(baseApi.getWords().getBoxRects());
        ocrResult.setStripBoundingBoxes(baseApi.getStrips().getBoxRects());

        // Iterate through the results.
        final ResultIterator iterator = baseApi.getResultIterator();
        int[] lastBoundingBox;
        ArrayList<Rect> charBoxes = new ArrayList<Rect>();
        iterator.begin();
        do {
            lastBoundingBox = iterator.getBoundingBox(PageIteratorLevel.RIL_SYMBOL);
            Rect lastRectBox = new Rect(lastBoundingBox[0], lastBoundingBox[1], lastBoundingBox[2],
                    lastBoundingBox[3]);
            charBoxes.add(lastRectBox);
        } while (iterator.next(PageIteratorLevel.RIL_SYMBOL));
        iterator.delete();
        ocrResult.setCharacterBoundingBoxes(charBoxes);

    } catch (RuntimeException e) {
        Log.e("OcrRecognizeAsyncTask",
                "Caught RuntimeException in request to Tesseract. Setting state to CONTINUOUS_STOPPED.");
        e.printStackTrace();
        try {
            baseApi.clear();
            activity.stopHandler();
        } catch (NullPointerException e1) {
            // Continue
        }
        return false;
    }
    timeRequired = System.currentTimeMillis() - start;
    ocrResult.setBitmap(bitmap);
    String[] temp = textResult.split("\n");
    if (temp.length != 0)
        textResult = "";
    for (int i = 0; i < temp.length; i++) {
        if (temp[i].length() != 0) {
            if (i < temp.length - 1) {
                textResult = textResult + temp[i] + "\n";
            } else
                textResult = textResult + temp[i];
        }
    }
    String textResult2 = ParsingNativeClass.ParseAddress(textResult);
    Log.d("Return parsing", textResult2);
    ocrResult.setViewtext(textResult);
    ocrResult.setText(textResult2);
    ocrResult.setRecognitionTimeRequired(timeRequired);
    return true;
}

From source file:ftclib.FtcVuforia.java

License:Open Source License

/**
 * This method gets a frame from the frame queue and returns the image that matches the format specified by the
 * configVideoSource method./*from w  w w  . j  av a2 s  . c  o m*/
 *
 * @param frame specifies the frame object to hold image.
 * @return true if success, false otherwise.
 */
@Override
public boolean getFrame(Mat frame) {
    boolean success = false;

    try {
        VuforiaLocalizer.CloseableFrame closeableFrame = localizer.getFrameQueue().take();

        for (int i = 0; i < closeableFrame.getNumImages(); i++) {
            Image image = closeableFrame.getImage(i);
            if (image.getWidth() == imageWidth && image.getHeight() == imageHeight
                    && image.getFormat() == PIXEL_FORMAT.RGB565) {
                Bitmap bm = Bitmap.createBitmap(image.getWidth(), image.getHeight(), Bitmap.Config.RGB_565);
                bm.copyPixelsFromBuffer(image.getPixels());
                Utils.bitmapToMat(bm, frame);
                break;
            }
        }

        closeableFrame.close();
        success = true;
    } catch (InterruptedException e) {
        e.printStackTrace();
    }

    return success;
}

From source file:info.jmfavreau.bifrostcore.imageprocessing.ImageToColor.java

License:Open Source License

public Scalar process(Bitmap bmp) {
    // convert the image to OpenCV format
    Log.d("bifrostcore", "create original image");
    Mat original_alpha = new Mat();
    Assert.assertNotNull(original_alpha);
    Utils.bitmapToMat(bmp, original_alpha);
    // remove alpha
    Mat original = new Mat();
    Imgproc.cvtColor(original_alpha, original, Imgproc.COLOR_RGBA2RGB, 0);
    Log.d("bifrostcore", "image size: " + String.valueOf(original.total()));

    // compute an ROI
    Mat roi = compute_roi(original);/*from w w  w  . j a v a 2  s .c o  m*/

    Log.d("bifrostcore", "smooth image");
    // smooth the image
    Mat smoothed = smooth_image(original);

    Log.d("bifrostcore", "convert to hsv");
    Mat hsv = toHSV(smoothed);

    Log.d("bifrostcore", "extract main region");
    // extract main region using histogram
    Mat main_region = extract_main_region(hsv, roi);

    // threshold to preserve only the most significant regions
    Mat main_region_threshold = threshold_mask(main_region);
    saveImage(main_region_threshold);

    Log.d("bifrostcore", "return mean value");
    // return the mean value
    return Core.mean(original, main_region_threshold);
}

From source file:karthiknr.TextID.ProcessAsyncActivity.java

License:Apache License

@Override
protected Bitmap doInBackground(Object... params) {

    try {//  w  w w  .  ja  v a  2s. c o  m

        if (params.length < 2) {
            Log.e(TAG, "Error passing parameter to execute - missing params");
            return null;
        }

        if (!(params[0] instanceof Context) || !(params[1] instanceof Bitmap)) {
            Log.e(TAG, "Error passing parameter to execute(context, bitmap)");
            return null;
        }

        context = (Context) params[0];

        bmp = (Bitmap) params[1];

        if (context == null || bmp == null) {
            Log.e(TAG, "Error passed null parameter to execute(context, bitmap)");
            return null;
        }

        Log.v(TAG, "Saving original bitmap");
        FileOutputStream out = null;
        try {
            out = new FileOutputStream(DATA_PATH + "/oocr.png");
            bmp.compress(Bitmap.CompressFormat.PNG, 100, out);
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            try {
                if (out != null) {
                    out.close();
                }
            } catch (IOException e) {
                e.printStackTrace();
            }
        }

        Log.v(TAG, "Starting Processing");

        //OpenCV Warping
        Bitmap mutableBitmap = bmp.copy(Bitmap.Config.ARGB_8888, true);

        Mat imgSource = new Mat(mutableBitmap.getHeight(), mutableBitmap.getWidth(), CvType.CV_8UC1);
        Utils.bitmapToMat(mutableBitmap, imgSource);
        Mat startM = findWarpedMat(imgSource);

        Mat sourceImage = new Mat(mutableBitmap.getHeight(), mutableBitmap.getWidth(), CvType.CV_8UC1);
        Utils.bitmapToMat(mutableBitmap, sourceImage);
        Mat warpedMat = warpImage(sourceImage, startM);

        Bitmap resultBitmap = Bitmap.createBitmap(warpedMat.cols(), warpedMat.rows(), Bitmap.Config.ARGB_8888);
        Utils.matToBitmap(warpedMat, resultBitmap);

        Log.v(TAG, "Got warped bitmap");
        Log.v(TAG, "Saving warped bitmap");

        out = null;
        try {
            out = new FileOutputStream(DATA_PATH + "/wocr.png");
            resultBitmap.compress(Bitmap.CompressFormat.PNG, 100, out);
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            try {
                if (out != null) {
                    out.close();
                }
            } catch (IOException e) {
                e.printStackTrace();
            }
        }

        return resultBitmap;

    } catch (Exception ex) {
        Log.d(TAG, "Error: " + ex + "\n" + ex.getMessage());
    }

    return null;
}

From source file:mineshcvit.opendocscanner.CropImage.java

License:Apache License

private void onSaveClicked() throws Exception {
    // TODO this code needs to change to use the decode/crop/encode single
    // step api so that we don't require that the whole (possibly large)
    // bitmap doesn't have to be read into memory
    if (mSaving)//w w w  .java  2  s  . c om
        return;

    if (mCrop == null) {

        return;
    }

    mSaving = true;

    //   Rect r = mCrop.getCropRect();
    final float[] trapezoid = mCrop.getTrapezoid();
    Log.w("myApp", "onsaveclicekd, trap[0] is " + trapezoid[0]);
    Log.w("myApp", "onsaveclicekd, trap[1] is " + trapezoid[1]);

    Log.w("myApp", "onsaveclicekd, trap[2] is " + trapezoid[2]);

    Log.w("myApp", "onsaveclicekd, trap[3] is " + trapezoid[3]);

    Log.w("myApp", "onsaveclicekd, trap[4] is " + trapezoid[4]);

    Log.w("myApp", "onsaveclicekd, trap[5] is " + trapezoid[5]);

    Log.w("myApp", "onsaveclicekd, trap[6] is " + trapezoid[6]);
    Log.w("myApp", "onsaveclicekd, trap[7] is " + trapezoid[7]);

    /// refer this for perspective correction

    //minesh:
    //find the bounding rectangle of the quadilateral
    //new image to whiche perspective corrected matrix to be plotted will be made in this size
    final RectF perspectiveCorrectedBoundingRect = new RectF(mCrop.getPerspectiveCorrectedBoundingRect());

    //dimension of the new image
    int result_width = (int) perspectiveCorrectedBoundingRect.width();
    int result_height = (int) perspectiveCorrectedBoundingRect.height();

    Log.w("myApp", "bounding rect width is " + result_width);
    Log.w("myApp", "bounding rect height " + result_height);

    Mat inputMat = new Mat(mBitmap.getHeight(), mBitmap.getHeight(), CvType.CV_8UC4);
    Utils.bitmapToMat(mBitmap, inputMat);
    final Mat outputMat = new Mat(result_width, result_height, CvType.CV_8UC4);

    //the 4 points of the quad,
    Point ocvPIn1 = new Point((int) trapezoid[0], (int) trapezoid[1]);//left top
    Point ocvPIn2 = new Point((int) trapezoid[6], (int) trapezoid[7]);//left bottom
    Point ocvPIn3 = new Point((int) trapezoid[4], (int) trapezoid[5]); //bottom right
    Point ocvPIn4 = new Point((int) trapezoid[2], (int) trapezoid[3]);//right top

    List<Point> source = new ArrayList<Point>();
    source.add(ocvPIn1);
    source.add(ocvPIn2);
    source.add(ocvPIn3);
    source.add(ocvPIn4);

    Mat startM = Converters.vector_Point2f_to_Mat(source);

    //points in the destination imafge
    Point ocvPOut1 = new Point(0, 0);// lfet top
    Point ocvPOut2 = new Point(0, result_height);//left bottom
    Point ocvPOut3 = new Point(result_width, result_height); //bottom right
    Point ocvPOut4 = new Point(result_width, 0);//right top

    List<Point> dest = new ArrayList<Point>();
    dest.add(ocvPOut1);
    dest.add(ocvPOut2);
    dest.add(ocvPOut3);
    dest.add(ocvPOut4);
    Mat endM = Converters.vector_Point2f_to_Mat(dest);

    Mat perspectiveTransform = Imgproc.getPerspectiveTransform(startM, endM);
    Imgproc.warpPerspective(inputMat, outputMat, perspectiveTransform, new Size(result_width, result_height),
            Imgproc.INTER_CUBIC);

    //
    Imgcodecs.imwrite(mImagePath, outputMat);

    //     }
    //  }, mHandler);
    //  }

    Intent intent = new Intent();
    setResult(RESULT_OK, intent);
    finish();
}

From source file:mineshcvit.opendocscanner.CropImage.java

License:Apache License

private void makeDefault() {

    // minesh: finding the largest rect in the given image

    //Mat grayImage= Imgcodecs.imread(IMAGE_PATH, Imgcodecs.CV_LOAD_IMAGE_GRAYSCALE);

    //////////////////////
    ////////////*from www.jav a2s. c om*/

    Mat imgSource = new Mat();

    Utils.bitmapToMat(mBitmap, imgSource);
    //  Utils.bitmapToMat(bmp32, imgMAT);

    Imgproc.cvtColor(imgSource, imgSource, Imgproc.COLOR_BGR2GRAY);

    //Mat imgSource = Imgcodecs.imread(mImagePath,Imgcodecs.CV_LOAD_IMAGE_GRAYSCALE);
    Log.w("myApp", "image path from isnde makedefault() is " + mImagePath);

    int matwidth = imgSource.width();
    int matheight = imgSource.height();

    Log.w("myApp", "mat image width, from makedefault() is " + matwidth);
    Log.w("myApp", "mat image height from, makedefault() is " + matheight);

    Mat imageBin = new Mat();

    double threshold = Imgproc.threshold(imgSource, imageBin, 0, 255, Imgproc.THRESH_OTSU);
    Log.w("myApp", "otsu threshold is " + threshold);

    //for canny higher threshold is chosen as otsus threshold and lower threshold is half of the otsu threshold value
    Imgproc.Canny(imgSource.clone(), imgSource, threshold * 0.5, threshold);

    // Imgcodecs.imwrite(mImagePath, imgSource);

    // int canny_height=imgSource.height();
    //   int canny_width=imgSource.width();

    // Log.w("myApp", "canny image height is "+canny_height);

    Imgproc.GaussianBlur(imgSource, imgSource, new org.opencv.core.Size(3, 3), 3);
    // find the contours
    List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
    //MatVector contours = new MatVector();

    Imgproc.findContours(imgSource, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);

    double maxArea = -1;
    MatOfPoint temp_contour = contours.get(0); // the largest is at the
    // index 0 for starting
    // point
    MatOfPoint2f approxCurve = new MatOfPoint2f();

    for (int idx = 0; idx < contours.size(); idx++) {
        temp_contour = contours.get(idx);
        double contourarea = Imgproc.contourArea(temp_contour);
        // compare this contour to the previous largest contour found
        if (contourarea > maxArea) {
            // check if this contour is a square
            MatOfPoint2f new_mat = new MatOfPoint2f(temp_contour.toArray());
            int contourSize = (int) temp_contour.total();
            MatOfPoint2f approxCurve_temp = new MatOfPoint2f();
            Imgproc.approxPolyDP(new_mat, approxCurve_temp, contourSize * 0.05, true);
            if (approxCurve_temp.total() == 4) {
                maxArea = contourarea;
                approxCurve = approxCurve_temp;
            }
        }
    }
    double[] temp_double;
    temp_double = approxCurve.get(0, 0);
    Point p1 = new Point(temp_double[0], temp_double[1]);
    // Core.circle(imgSource,p1,55,new Scalar(0,0,255));
    // Imgproc.warpAffine(sourceImage, dummy, rotImage,sourceImage.size());
    temp_double = approxCurve.get(1, 0);
    Point p2 = new Point(temp_double[0], temp_double[1]);
    // Core.circle(imgSource,p2,150,new Scalar(255,255,255));
    temp_double = approxCurve.get(2, 0);
    Point p3 = new Point(temp_double[0], temp_double[1]);
    // Core.circle(imgSource,p3,200,new Scalar(255,0,0));
    temp_double = approxCurve.get(3, 0);
    Point p4 = new Point(temp_double[0], temp_double[1]);
    // Core.circle(imgSource,p4,100,new Scalar(0,0,255));
    ArrayList<Point> source = new ArrayList<Point>();
    ArrayList<Point> topPoints = new ArrayList<Point>();
    ArrayList<Point> bottomPoints = new ArrayList<Point>();
    ArrayList<Point> sortedPoints = new ArrayList<Point>();

    source.add(p1);
    source.add(p2);
    source.add(p3);
    source.add(p4);

    Collections.sort(source, new Comparator<Point>() {

        public int compare(Point o1, Point o2) {
            return Double.compare(o1.y, o2.y);
        }
    });

    topPoints.add(source.get(0));
    topPoints.add(source.get(1));

    Collections.sort(topPoints, new Comparator<Point>() {

        public int compare(Point o1, Point o2) {
            return Double.compare(o1.x, o2.x);
        }
    });

    bottomPoints.add(source.get(2));
    bottomPoints.add(source.get(3));

    Collections.sort(bottomPoints, new Comparator<Point>() {

        public int compare(Point o1, Point o2) {
            return Double.compare(o1.x, o2.x);
        }
    });

    sortedPoints.add(topPoints.get(0));//top left
    sortedPoints.add(bottomPoints.get(0));//bottom left
    sortedPoints.add(bottomPoints.get(1));//bottom right
    sortedPoints.add(topPoints.get(1));//top right

    /*
    c++ code to sort the points
            
    void sortCorners(std::vector<cv::Point2f>& corners, cv::Point2f center)
    {
    std::vector<cv::Point2f> top, bot;
            
    for (int i = 0; i < corners.size(); i++)
    {
    if (corners[i].y < center.y)
    top.push_back(corners[i]);
    else
    bot.push_back(corners[i]);
    }
            
    cv::Point2f tl = top[0].x > top[1].x ? top[1] : top[0];
    cv::Point2f tr = top[0].x > top[1].x ? top[0] : top[1];
    cv::Point2f bl = bot[0].x > bot[1].x ? bot[1] : bot[0];
    cv::Point2f br = bot[0].x > bot[1].x ? bot[0] : bot[1];
            
    corners.clear();
    corners.push_back(tl);
    corners.push_back(tr);
    corners.push_back(br);
    corners.push_back(bl);
    }
            
    ...
            
    // Get mass center
    cv::Point2f center(0,0);
    for (int i = 0; i < corners.size(); i++)
    center += corners[i];
            
    center *= (1. / corners.size());
    sortCorners(corners, center);
            
            
            
     */

    //p1 t0 p4 are in the anti clock wise order starting from top left

    // double s=source.get(0).x;

    /////////////////
    /////////////////
    int width = mBitmap.getWidth();
    int height = mBitmap.getHeight();

    Log.w("myApp", "bitmap width is " + width);
    Log.w("myApp", "bitmap height is " + height);

    Rect imageRect = new Rect(0, 0, width, height);

    // make the default size about 4/5 of the width or height

    /*
            
            int cropWidth = Math.min(width, height) * 4 / 5;
            int cropHeight = cropWidth;
            
            
            int x = (width - cropWidth) / 2;
            int y = (height - cropHeight) / 2;
            
            RectF cropRect = new RectF(x, y, x + cropWidth, y + cropHeight);
            
    */
    /// To test the points order

    /*
    Point p1 = new Point(1.0*x,1.0*y );
    Point p2 = new Point(1.0*x+150.0,1.0*y+1.0*cropHeight);
            
    Point p3 = new Point(1.0*x+1.0*cropWidth,1.0*y+1.0*cropHeight);
            
    Point p4 = new Point(1.0*x+1.0*cropWidth,1.0*y);
            
    ArrayList<Point> source = new ArrayList<Point>();
    source.add(p1);
    source.add(p2);
    source.add(p3);
    source.add(p4);
            
    */
    ////////////////////////////

    Log.w("myApp",
            "from inside makedeafult inside cropimage calss, default crop rect values are set and now highlight view will be initiated ");

    HighlightView hv = new HighlightView(mImageView, imageRect, sortedPoints);

    Log.w("myApp", "higlight view initiated; done");

    mImageView.add(hv);
    Log.w("myApp", "add hv is done; done");

    mImageView.invalidate();
    mCrop = hv;

    Log.w("myApp", "mcrop=hv donee");
    mCrop.setFocus(true);
    ;
}

From source file:net.hydex11.opencvinteropexample.MainActivity.java

License:Open Source License

private void example() {
    RenderScript mRS = RenderScript.create(this);

    // Loads input image
    Bitmap inputBitmap = BitmapFactory.decodeResource(getResources(), R.drawable.houseimage);

    // Puts input image inside an OpenCV mat
    Mat inputMat = new Mat();
    Utils.bitmapToMat(inputBitmap, inputMat);

    Mat outputMat = new Mat(inputMat.size(), inputMat.type());

    // Testing bitmap, used to test that the OpenCV mat actually has bitmap data inside
    Bitmap initialBitmap = Bitmap.createBitmap(inputMat.width(), inputMat.height(), Bitmap.Config.ARGB_8888);
    Utils.matToBitmap(inputMat, initialBitmap);

    // Retrieve OpenCV mat data address
    long inputMatDataAddress = inputMat.dataAddr();
    long outputMatDataAddress = outputMat.dataAddr();

    // Creates a RS type that matches the input mat one.
    Element element = Element.RGBA_8888(mRS);
    Type.Builder tb = new Type.Builder(mRS, element);
    tb.setX(inputMat.width());//from  w  ww  . j av  a 2  s .com
    tb.setY(inputMat.height());

    Type inputMatType = tb.create();

    // Creates a RenderScript allocation that uses directly the OpenCV input mat address
    Allocation inputAllocation = createTypedAllocationWithDataPointer(mRS, inputMatType, inputMatDataAddress);
    Allocation outputAllocation = createTypedAllocationWithDataPointer(mRS, inputMatType, outputMatDataAddress);

    // Define a simple convolve script
    // Note: here, ANY kernel can be applied!
    ScriptIntrinsicConvolve3x3 convolve3x3 = ScriptIntrinsicConvolve3x3.create(mRS, element);

    float convolveCoefficients[] = new float[9];
    convolveCoefficients[0] = 1;
    convolveCoefficients[2] = 1;
    convolveCoefficients[5] = 1;
    convolveCoefficients[6] = 1;
    convolveCoefficients[8] = 1;
    convolve3x3.setCoefficients(convolveCoefficients);

    convolve3x3.setInput(inputAllocation);
    convolve3x3.forEach(outputAllocation);

    mRS.finish();

    // Converts the result to a bitmap
    Bitmap cvOutputBitmap = Bitmap.createBitmap(outputMat.width(), outputMat.height(), Bitmap.Config.ARGB_8888);
    Utils.matToBitmap(outputMat, cvOutputBitmap);

    // Testing bitmap, used to test the RenderScript ouput allocation contents
    // Note: it is placed here because the copyTo function clears the input buffer
    Bitmap rsOutputBitmap = Bitmap.createBitmap(outputMat.width(), outputMat.height(), Bitmap.Config.ARGB_8888);
    outputAllocation.copyTo(rsOutputBitmap);

    // Testing bitmap, used to test that RenderScript input allocation pointed to the OpenCV mat
    // Note: it is placed here because the copyTo function clears the input buffer
    Bitmap rsInitialBitmap = Bitmap.createBitmap(inputMat.width(), inputMat.height(), Bitmap.Config.ARGB_8888);
    inputAllocation.copyTo(rsInitialBitmap);

    // Display input and output
    ImageView originalImageIV = (ImageView) findViewById(R.id.imageView);
    ImageView inputRSImageIV = (ImageView) findViewById(R.id.imageView2);
    ImageView outputRSImageIV = (ImageView) findViewById(R.id.imageView3);
    ImageView outputCVIV = (ImageView) findViewById(R.id.imageView4);

    originalImageIV.setImageBitmap(initialBitmap);
    inputRSImageIV.setImageBitmap(rsInitialBitmap);
    outputRSImageIV.setImageBitmap(rsOutputBitmap);
    outputCVIV.setImageBitmap(cvOutputBitmap);

}

From source file:nz.ac.auckland.lablet.vision.CamShiftTracker.java

License:Open Source License

/**
 * Gets the location of an object in a frame. Assumes you have called setRegionOfInterest,
 * which informs CamShiftTracker which object to track.
 *
 * @param frame The frame to search for the object in.
 * @return The location and bounds of the object, represented by a Rect.
 *///from  w w w .ja  v  a 2  s.co  m
public Rect getObjectLocation(Bitmap frame) {
    Mat image = new Mat();
    Utils.bitmapToMat(frame, image);

    //        Mat out = new Mat(image.rows(), image.cols(), image.type());
    //        image.convertTo(out, -1, 2.0, 2.0);
    //        image = out;

    toHsv(image, hsvMin, hsvMax);

    ArrayList<Mat> hsvs = new ArrayList<>();
    hsvs.add(hsv);

    Imgproc.calcBackProject(hsvs, new MatOfInt(0), hist, backproj, ranges, 1);
    Core.bitwise_and(backproj, mask, backproj);

    try {
        Rect tempTrackWindow = trackWindow.clone();
        RotatedRect result = Video.CamShift(backproj, trackWindow, termCriteria);

        if (result.size.equals(new Size(0, 0)) && result.angle == 0 && result.center.equals(new Point(0, 0))) {
            trackWindow = tempTrackWindow;
            return null;
        }
    } catch (Exception e) {
        Log.e(TAG, "Shit went down: ", e);
        return null;
    }

    return trackWindow.clone();
}

From source file:nz.ac.auckland.lablet.vision.CamShiftTracker.java

License:Open Source License

/**
 * Internally sets the region of interest (ROI) to track.
 * Only needs to be set once, unless the region of interest changes.
 *
 * @param frame The frame to extract the ROI from.
 * @param x The x coordinate of the ROI (top left).
 * @param y The y coordinate of the ROI (top left).
 * @param width The width of the ROI.//  w  w w.  j  a  va  2 s . c o m
 * @param height The height of the ROI.
 */
public void setRegionOfInterest(Bitmap frame, int x, int y, int width, int height) {
    size = new Size(frame.getWidth(), frame.getHeight());
    hsv = new Mat(size, CvType.CV_8UC3);
    hue = new Mat(size, CvType.CV_8UC3);
    mask = new Mat(size, CvType.CV_8UC3);
    hist = new Mat(size, CvType.CV_8UC3);
    bgr = new Mat();
    backproj = new Mat();

    Mat image = new Mat();
    Utils.bitmapToMat(frame, image);

    //        Mat out = new Mat(image.rows(), image.cols(), image.type());
    //        image.convertTo(out, -1, 2.0, 2.0);
    //        image = out;

    trackWindow = new Rect(x, y, width, height);

    Mat bgrRoi = image.submat(trackWindow);

    Pair<Scalar, Scalar> minMaxHsv = getMinMaxHsv(bgrRoi, 2);
    hsvMin = minMaxHsv.first;
    hsvMax = minMaxHsv.second;

    toHsv(image, hsvMin, hsvMax);

    Mat hsvRoi = hsv.submat(trackWindow);
    Mat maskRoi = mask.submat(trackWindow);

    ArrayList<Mat> hsvRois = new ArrayList<>();
    hsvRois.add(hsvRoi);
    Imgproc.calcHist(hsvRois, new MatOfInt(0), maskRoi, hist, histSize, ranges);
    Core.normalize(hist, hist, 0, 255, Core.NORM_MINMAX);
}