Example usage for org.opencv.core Mat depth

List of usage examples for org.opencv.core Mat depth

Introduction

In this page you can find the example usage for org.opencv.core Mat depth.

Prototype

public int depth() 

Source Link

Usage

From source file:OCV_Sobel.java

License:Open Source License

@Override
public void run(ImageProcessor ip) {
    if (ip.getBitDepth() == 8) {
        // srcdst
        int imw = ip.getWidth();
        int imh = ip.getHeight();
        byte[] srcdst_bytes = (byte[]) ip.getPixels();

        // mat/*from  w ww  . j av  a2  s .c  o m*/
        Mat src_mat = new Mat(imh, imw, CvType.CV_8UC1);
        Mat dst_mat = new Mat(imh, imw, CvType.CV_8UC1);

        // run
        src_mat.put(0, 0, srcdst_bytes);
        Imgproc.Sobel(src_mat, dst_mat, src_mat.depth(), dx, dy, ksize, scale, delta,
                INT_BORDERTYPE[indBorderType]);
        dst_mat.get(0, 0, srcdst_bytes);
    } else if (ip.getBitDepth() == 16) {
        // srcdst
        int imw = ip.getWidth();
        int imh = ip.getHeight();
        short[] srcdst_shorts = (short[]) ip.getPixels();

        // mat
        Mat src_mat = new Mat(imh, imw, CvType.CV_16S);
        Mat dst_mat = new Mat(imh, imw, CvType.CV_16S);

        // run
        src_mat.put(0, 0, srcdst_shorts);
        Imgproc.Sobel(src_mat, dst_mat, src_mat.depth(), dx, dy, ksize, scale, delta,
                INT_BORDERTYPE[indBorderType]);
        dst_mat.get(0, 0, srcdst_shorts);
    } else if (ip.getBitDepth() == 32) {
        // srcdst
        int imw = ip.getWidth();
        int imh = ip.getHeight();
        float[] srcdst_floats = (float[]) ip.getPixels();

        // mat
        Mat src_mat = new Mat(imh, imw, CvType.CV_32F);
        Mat dst_mat = new Mat(imh, imw, CvType.CV_32F);

        // run
        src_mat.put(0, 0, srcdst_floats);
        Imgproc.Sobel(src_mat, dst_mat, src_mat.depth(), dx, dy, ksize, scale, delta,
                INT_BORDERTYPE[indBorderType]);
        dst_mat.get(0, 0, srcdst_floats);
    } else {
        IJ.error("Wrong image format");
    }
}

From source file:OCV_Laplacian.java

License:Open Source License

@Override
public void run(ImageProcessor ip) {
    if (ip.getBitDepth() == 8) {
        // srcdst
        int imw = ip.getWidth();
        int imh = ip.getHeight();
        byte[] srcdst_bytes = (byte[]) ip.getPixels();

        // mat//from  w ww .  j a v a2  s. c o  m
        Mat src_mat = new Mat(imh, imw, CvType.CV_8UC1);
        Mat dst_mat = new Mat(imh, imw, CvType.CV_8UC1);

        // run
        src_mat.put(0, 0, srcdst_bytes);
        Imgproc.Laplacian(src_mat, dst_mat, dst_mat.depth(), ksize, scale, delta,
                INT_BORDERTYPE[indBorderType]);
        dst_mat.get(0, 0, srcdst_bytes);
    } else if (ip.getBitDepth() == 16) {
        // srcdst
        int imw = ip.getWidth();
        int imh = ip.getHeight();
        short[] srcdst_shorts = (short[]) ip.getPixels();

        // mat
        Mat src_mat = new Mat(imh, imw, CvType.CV_16S);
        Mat dst_mat = new Mat(imh, imw, CvType.CV_16S);

        // run
        src_mat.put(0, 0, srcdst_shorts);
        Imgproc.Laplacian(src_mat, dst_mat, dst_mat.depth(), ksize, scale, delta,
                INT_BORDERTYPE[indBorderType]);
        dst_mat.get(0, 0, srcdst_shorts);
    } else if (ip.getBitDepth() == 24) {
        // dst
        int imw = ip.getWidth();
        int imh = ip.getHeight();
        int[] srcdst_ints = (int[]) ip.getPixels();

        // mat
        Mat src_mat = new Mat(imh, imw, CvType.CV_8UC3);
        Mat dst_mat = new Mat(imh, imw, CvType.CV_8UC3);

        // run
        OCV__LoadLibrary.intarray2mat(srcdst_ints, src_mat, imw, imh);
        Imgproc.Laplacian(src_mat, dst_mat, dst_mat.depth(), ksize, scale, delta,
                INT_BORDERTYPE[indBorderType]);
        OCV__LoadLibrary.mat2intarray(dst_mat, srcdst_ints, imw, imh);
    } else if (ip.getBitDepth() == 32) {
        // srcdst
        int imw = ip.getWidth();
        int imh = ip.getHeight();
        float[] srcdst_floats = (float[]) ip.getPixels();

        // mat
        Mat src_mat = new Mat(imh, imw, CvType.CV_32F);
        Mat dst_mat = new Mat(imh, imw, CvType.CV_32F);

        // run
        src_mat.put(0, 0, srcdst_floats);
        Imgproc.Laplacian(src_mat, dst_mat, dst_mat.depth(), ksize, scale, delta,
                INT_BORDERTYPE[indBorderType]);
        dst_mat.get(0, 0, srcdst_floats);
    } else {
        IJ.error("Wrong image format");
    }
}

From source file:classes.ObjectFinder.java

private void backprojectObjectHistogram() {

    // Converting the current fram to HSV color space
    Mat hsvImage = new Mat(this.objectImage.size(), CvType.CV_8UC3);

    Imgproc.cvtColor(this.inputFrame, hsvImage, Imgproc.COLOR_BGR2HSV);

    // Getting the pixels that are in te specified ranges    
    int hmin = this.thresholdsVector.get(0);
    int hmax = this.thresholdsVector.get(1);
    int smin = this.thresholdsVector.get(2);
    int smax = this.thresholdsVector.get(3);
    int vmin = this.thresholdsVector.get(4);
    int vmax = this.thresholdsVector.get(5);

    Mat maskImage = new Mat(this.objectImage.size(), CvType.CV_8UC1);
    Core.inRange(hsvImage, new Scalar(hmin, smin, vmin), new Scalar(hmax, smax, vmax), maskImage);

    // Taking the hue channel of the image
    Mat hueImage = new Mat(hsvImage.size(), hsvImage.depth());

    MatOfInt fromto = new MatOfInt(0, 0);
    Core.mixChannels(Arrays.asList(hsvImage), Arrays.asList(hueImage), fromto);

    // Backprojecting the histogram over that hue channel image
    MatOfFloat ranges = new MatOfFloat(0, 180);
    MatOfInt channels = new MatOfInt(0);

    Imgproc.calcBackProject(Arrays.asList(hueImage), channels, this.objectHistogram, this.backprojectionImage,
            ranges, 1);/*from w  w  w. j a  v  a2  s .c om*/

    Core.bitwise_and(backprojectionImage, maskImage, backprojectionImage);

}

From source file:com.ibm.streamsx.edgevideo.device.edgent.JsonMat.java

License:Open Source License

public static JsonObject toJsonObject(Mat mat) {
    JsonObject jo = new JsonObject();
    jo.addProperty("width", mat.width());
    jo.addProperty("height", mat.height());
    jo.addProperty("type", mat.type());
    jo.addProperty("channels", mat.channels());
    jo.addProperty("depth", mat.depth());
    jo.addProperty("mat", base64MimeEncodeMat(mat));
    return jo;/*  w  w w  .  j  a v  a2s.c  om*/
}

From source file:com.trandi.opentld.tld.PatchGenerator.java

License:Apache License

/**
 * /*from   w  w  w .  j  a  v  a2  s  . c om*/
 * @param image
 * @param T
 * @param patch OUTPUT
 * @param patchSize
 */
void generate(final Mat image, final Mat T, Mat patch, Size patchSize, final RNG rng) {
    patch.create(patchSize, image.type());
    if (backgroundMin != backgroundMax) {
        Core.randu(patch, backgroundMin, backgroundMax);
        // TODO if that null scalar OK or should it be new Scalar(0) ?
        Imgproc.warpAffine(image, patch, T, patchSize, Imgproc.INTER_LINEAR, Core.BORDER_TRANSPARENT, null);
    } else {
        Imgproc.warpAffine(image, patch, T, patchSize, Imgproc.INTER_LINEAR, Core.BORDER_CONSTANT,
                new Scalar(backgroundMin));
    }

    int ksize = randomBlur ? rng.nextInt() % 9 - 5 : 0;
    if (ksize > 0) {
        ksize = ksize * 2 + 1;
        Imgproc.GaussianBlur(patch, patch, new Size(ksize, ksize), 0, 0);
    }

    if (noiseRange > 0) {
        final Mat noise = new Mat(patchSize, image.type());
        int delta = (image.depth() == CvType.CV_8U ? 128 : (image.depth() == CvType.CV_16U ? 32768 : 0));
        Core.randn(noise, delta, noiseRange);

        // TODO this was different !!
        Core.addWeighted(patch, 1, noise, 1, -delta, patch);

        //           if( backgroundMin != backgroundMax )
        //               addWeighted(patch, 1, noise, 1, -delta, patch);
        //           else
        //           {
        //               for( int i = 0; i < patchSize.height; i++ )
        //               {
        //                   uchar* prow = patch.ptr<uchar>(i);
        //                   const uchar* nrow =  noise.ptr<uchar>(i);
        //                   for( int j = 0; j < patchSize.width; j++ )
        //                       if( prow[j] != backgroundMin )
        //                           prow[j] = saturate_cast<uchar>(prow[j] + nrow[j] - delta);
        //               }
        //           }
    }
}

From source file:cx.uni.jk.mms.iaip.filter.MatHelper.java

License:Open Source License

/**
 * converts any mat with 1/3/4 channels to an 8 bit BufferedImage with the
 * same number of channels. if the input mat is not CvType.CV_8U it is
 * converted to such with truncation of values to [0..255].
 * /*from  w w w .  jav  a 2 s. c  o  m*/
 * @param mat
 * @return the image
 */
public static BufferedImage convertMatTo8BitBufferedImage(Mat mat) {
    Mat byteMat;
    if (mat.depth() != CvType.CV_8U) {
        /** conversion to 8 bit Mat */
        byteMat = new MatOfByte();
        mat.convertTo(byteMat, CvType.CV_8U);
    } else {
        byteMat = mat; // just a reference!
    }

    /** encode to .bmp file in memory */
    MatOfByte fileMat = new MatOfByte();
    Highgui.imencode(".bmp", byteMat, fileMat);

    /** use file as input stream for BufferdImage */
    byte[] byteArray = fileMat.toArray();
    BufferedImage bufferedImage = null;
    try {
        InputStream in = new ByteArrayInputStream(byteArray);
        bufferedImage = ImageIO.read(in);
    } catch (Exception e) {
        logger.severe(e.getStackTrace().toString());
        System.exit(e.hashCode());
    }

    return bufferedImage;
}

From source file:cx.uni.jk.mms.iaip.mat.MatModel.java

License:Open Source License

/**
 * Loads an image from a file into this model.
 * //from  w w w. j a v  a  2  s .com
 * The image file type must be supported by ImageIO and must be 8 bit gray
 * scale due to limitations of the used methods. The image must be of even
 * width and even height in order to be processed by OpenCV's DCT/IDCT
 * methods.
 * 
 * This implementation uses {@link Path} instead of {@link File} in order to
 * read the jar from the inside.
 * 
 * @param path
 * @throws IllegalSizeException
 * @throws IOException
 * @throws UnsupportedImageTypeException
 */
public void loadImage(Path path) throws IllegalSizeException, IOException, UnsupportedImageTypeException {
    this.logger
            .fine(String.format("MatModel \"%s\" loading iamge from path %s", this.getName(), path.toString()));

    Mat matRead = null;

    matRead = this.loadAndDecodeImageWithJavaImageIO(path);
    // matRead = loadImageWithJavaImageIOAndDecodeWithOpenCV(path);
    // matRead = loadImageWithOpenCV(path);

    this.logger.finer("image type = " + matRead.type());
    this.logger.finer("image channels = " + matRead.channels());
    this.logger.finer("image depth = " + matRead.depth());

    /** images must have size larger than 0x0 */
    if (matRead.width() <= 0 || matRead.height() <= 0) {
        throw new IllegalSizeException("Image must have width and height > 0.");
    }

    /** dct images must have odd width or height */
    if (matRead.width() % 2 == 1 || matRead.height() % 2 == 1) {
        throw new IllegalSizeException("Image must have even width and even height to perform DCT/IDCT.");
    }

    /** we need a float mat to do DCT/IDCT */
    this.mat = matRead; // just a reference
    this.logger.finer("convert to internal format");
    this.mat.convertTo(this.mat, MAT_TYPE);
    this.logger.finer("image type = " + this.mat.type());
    this.logger.finer("image channels = " + this.mat.channels());
    this.logger.finer("image depth = " + this.mat.depth());

    /** remember last file loaded successfully */
    this.lastPath = path;
}

From source file:qupath.opencv.processing.PixelImageCV.java

License:Open Source License

public PixelImageCV(Mat mat) {
    // Extract dimensions and pixels
    this.width = (int) mat.size().width;
    this.height = (int) mat.size().height;

    pixels = new float[(int) mat.total()];
    if (mat.depth() == CvType.CV_32F)
        mat.get(0, 0, pixels);/*from   ww w . j a  v a  2 s  . com*/
    else {
        Mat mat2 = new Mat();
        mat.convertTo(mat2, CvType.CV_32F);
        mat2.get(0, 0, pixels);
    }
}

From source file:qupath.opencv.processing.PixelImageCV.java

License:Open Source License

public void put(Mat mat) {
    if (mat.depth() == CvType.CV_32F)
        mat.put(0, 0, pixels);//from   ww  w  .  java  2  s.  c o m
    else {
        Mat mat2 = new Mat(new Size(width, height), CvType.CV_32F);
        mat2.put(0, 0, pixels);
        mat2.convertTo(mat, mat.depth());
    }
}

From source file:syncleus.dann.data.video.PatchGenerator.java

License:Apache License

/**
 * //from www .java2  s.  com
 * @param image
 * @param T
 * @param patch OUTPUT
 * @param patchSize
 */
void generate(final Mat image, final Mat T, Mat patch, Size patchSize, final RNG rng) {
    patch.create(patchSize, image.type());
    if (backgroundMin != backgroundMax) {
        Core.randu(patch, backgroundMin, backgroundMax);
        // TODO if that null scalar OK or should it be new Scalar(0) ?
        Imgproc.warpAffine(image, patch, T, patchSize, Imgproc.INTER_LINEAR, Imgproc.BORDER_TRANSPARENT, null);
    } else {
        Imgproc.warpAffine(image, patch, T, patchSize, Imgproc.INTER_LINEAR, Imgproc.BORDER_CONSTANT,
                new Scalar(backgroundMin));
    }

    int ksize = randomBlur ? rng.nextInt() % 9 - 5 : 0;
    if (ksize > 0) {
        ksize = ksize * 2 + 1;
        Imgproc.GaussianBlur(patch, patch, new Size(ksize, ksize), 0, 0);
    }

    if (noiseRange > 0) {
        final Mat noise = new Mat(patchSize, image.type());
        int delta = (image.depth() == CvType.CV_8U ? 128 : (image.depth() == CvType.CV_16U ? 32768 : 0));
        Core.randn(noise, delta, noiseRange);

        // TODO this was different !!
        Core.addWeighted(patch, 1, noise, 1, -delta, patch);

        //           if( backgroundMin != backgroundMax )
        //               addWeighted(patch, 1, noise, 1, -delta, patch);
        //           else
        //           {
        //               for( int i = 0; i < patchSize.height; i++ )
        //               {
        //                   uchar* prow = patch.ptr<uchar>(i);
        //                   const uchar* nrow =  noise.ptr<uchar>(i);
        //                   for( int j = 0; j < patchSize.width; j++ )
        //                       if( prow[j] != backgroundMin )
        //                           prow[j] = saturate_cast<uchar>(prow[j] + nrow[j] - delta);
        //               }
        //           }
    }
}