Example usage for org.opencv.imgproc Imgproc warpAffine

List of usage examples for org.opencv.imgproc Imgproc warpAffine

Introduction

In this page you can find the example usage for org.opencv.imgproc Imgproc warpAffine.

Prototype

public static void warpAffine(Mat src, Mat dst, Mat M, Size dsize, int flags, int borderMode,
            Scalar borderValue) 

Source Link

Usage

From source file:classes.TextExtractor.java

public void extractText(Rect roi, double roiAngle) throws Exception {

    Point roiTopLeft = roi.tl();//w ww .  j ava2  s.  c  om

    double radians = Math.toRadians(roiAngle);
    double sin = Math.abs(Math.sin(radians));
    double cos = Math.abs(Math.cos(radians));

    int newWidth = (int) (image.width() * cos + image.height() * sin);
    int newHeight = (int) (image.width() * sin + image.height() * cos);

    int[] newWidthHeight = { newWidth, newHeight };

    int pivotX = newWidthHeight[0] / 2;
    int pivotY = newWidthHeight[1] / 2;

    Point center = new Point(pivotX, pivotY);

    Size targetSize = new Size(newWidthHeight[0], newWidthHeight[1]);

    Mat intermediateImage = new Mat(targetSize, image.type());

    int offsetX = (newWidthHeight[0] - image.width()) / 2;
    int offsetY = (newWidthHeight[1] - image.height()) / 2;

    Point paddedTopLeft = new Point(roiTopLeft.x + offsetX, roiTopLeft.y + offsetY);

    Mat containerImage = intermediateImage.submat(offsetY, offsetY + image.height(), offsetX,
            offsetX + image.width());
    image.copyTo(containerImage);

    Mat rotationMatrix = Imgproc.getRotationMatrix2D(center, roiAngle, 1.0);

    Point transformedTopLeft = transformPoint(paddedTopLeft, rotationMatrix);

    Mat rotatedImage = new Mat();
    Imgproc.warpAffine(intermediateImage, rotatedImage, rotationMatrix, targetSize, Imgproc.INTER_LINEAR,
            Imgproc.BORDER_CONSTANT, new Scalar(0));

    ImageUtils.saveImage(rotatedImage, imageID + "_rotatedImage.png", request);

    double adjustedWidth = roi.size().width;
    double adjustedHeight = roi.size().height;

    if (transformedTopLeft.x + adjustedWidth > rotatedImage.width()) {
        adjustedWidth = rotatedImage.width() - transformedTopLeft.x;
    }

    if (transformedTopLeft.y + adjustedHeight > rotatedImage.height()) {
        adjustedHeight = rotatedImage.height() - transformedTopLeft.y;
    }

    Rect newROI = new Rect(transformedTopLeft, new Size(adjustedWidth, adjustedHeight));

    Mat extractedROI = new Mat(rotatedImage, newROI);

    String fileName = ImageUtils.saveImage(extractedROI, imageID + "_ROI.png", request);

    extractText(fileName);
}

From source file:com.trandi.opentld.tld.PatchGenerator.java

License:Apache License

/**
 * /*from  w w  w. j  av  a 2s . c  o m*/
 * @param image
 * @param T
 * @param patch OUTPUT
 * @param patchSize
 */
void generate(final Mat image, final Mat T, Mat patch, Size patchSize, final RNG rng) {
    patch.create(patchSize, image.type());
    if (backgroundMin != backgroundMax) {
        Core.randu(patch, backgroundMin, backgroundMax);
        // TODO if that null scalar OK or should it be new Scalar(0) ?
        Imgproc.warpAffine(image, patch, T, patchSize, Imgproc.INTER_LINEAR, Core.BORDER_TRANSPARENT, null);
    } else {
        Imgproc.warpAffine(image, patch, T, patchSize, Imgproc.INTER_LINEAR, Core.BORDER_CONSTANT,
                new Scalar(backgroundMin));
    }

    int ksize = randomBlur ? rng.nextInt() % 9 - 5 : 0;
    if (ksize > 0) {
        ksize = ksize * 2 + 1;
        Imgproc.GaussianBlur(patch, patch, new Size(ksize, ksize), 0, 0);
    }

    if (noiseRange > 0) {
        final Mat noise = new Mat(patchSize, image.type());
        int delta = (image.depth() == CvType.CV_8U ? 128 : (image.depth() == CvType.CV_16U ? 32768 : 0));
        Core.randn(noise, delta, noiseRange);

        // TODO this was different !!
        Core.addWeighted(patch, 1, noise, 1, -delta, patch);

        //           if( backgroundMin != backgroundMax )
        //               addWeighted(patch, 1, noise, 1, -delta, patch);
        //           else
        //           {
        //               for( int i = 0; i < patchSize.height; i++ )
        //               {
        //                   uchar* prow = patch.ptr<uchar>(i);
        //                   const uchar* nrow =  noise.ptr<uchar>(i);
        //                   for( int j = 0; j < patchSize.width; j++ )
        //                       if( prow[j] != backgroundMin )
        //                           prow[j] = saturate_cast<uchar>(prow[j] + nrow[j] - delta);
        //               }
        //           }
    }
}

From source file:logic.analyzer.AnalyzerIF.java

/**
 * Virtual//from  w w w  .  j  av  a 2  s  .c  om
 * Rotates frame according to detected eye irises: compares y-coordinates and computes
 * angles between eye centers, calculates rotation matrix and rotates target image.
 * @return -1 if detected irises are not appropriate or computed eye centers angle is wrong, 
 * 0 if rotation matrix is not detected, 1 if success
 */
public int rotateFrameAndTrackTemplate(double in_baseDist, int in_boundRect, Rect[] in_out_trackRectArr,
        Mat[] out_trackTemplateMatArr, Point[] in_pointLocationArr, Scalar in_color) {
    //check if tracked objects lost location
    double tmp = in_baseDist / (double) in_boundRect;

    if (tmp < Parameters.eyeRectAndBaseDiffMinThresh || tmp > Parameters.eyeRectAndBaseDiffMaxThresh) {

        LOG.warn("baseDst: condition FAIL");

        return -1;
    }

    if (!trackRectArr(container.grayFrame, container.origFrame, in_out_trackRectArr, out_trackTemplateMatArr,
            in_pointLocationArr, in_color))
        return -1;

    //save new centers to feature in container
    container.features.eyeBrowCenterPointArr = in_pointLocationArr;

    if (in_pointLocationArr.length == 2) {
        Drawer.drawRectanglePair(container.origFrame, in_out_trackRectArr[0], in_out_trackRectArr[1], in_color);
        Drawer.drawTrackedEyeCenters(container.origFrame, in_pointLocationArr[0], in_pointLocationArr[1]);
    }

    //rotate images: color for watching, gray for further processing 
    //(eye templates rotate by themselves)
    container.rotationMat = getRotationMat(container.origFrame, container.faceRect, in_pointLocationArr[0],
            in_pointLocationArr[1], prevAlpha);

    if (prevAlpha != null && prevAlpha.x < 0) {
        LOG.warn("PrevAlpha: RESET");
        prevAlpha = new Point(0.0, 0.0);
        return -1;
    }

    if (container.rotationMat == null) {
        LOG.warn("Rotation angle is out of +/- " + Parameters.maxIrisAngle);
        return 0;
    }

    //save rot angle to features in container
    container.features.faceRotAngle = prevAlpha.y;

    Imgproc.warpAffine(container.origFrame, container.origFrame, container.rotationMat,
            container.origFrame.size(), Imgproc.INTER_LINEAR, Imgproc.BORDER_CONSTANT, new Scalar(0));

    Imgproc.warpAffine(container.grayFrame, container.grayFrame, container.rotationMat,
            container.grayFrame.size(), Imgproc.INTER_LINEAR, Imgproc.BORDER_CONSTANT, new Scalar(0));

    //recompute eyebrow interrocular distance
    container.eyeBrowBaseDst = Math
            .abs(container.eyeBrowBoundRectArr[0].x + container.eyeBrowBoundRectArr[0].width / 2
                    - (container.eyeBrowBoundRectArr[1].x + container.eyeBrowBoundRectArr[1].width / 2));

    LOG.info("eyeBrowBaseDst = " + container.eyeBrowBaseDst);

    return 1;
}

From source file:samples.LWF.java

private static void affine(Mat mat, double[][] from, double[][] to, double[][] coeficients, Mat lienzo,
        double escala, double gap) {
    // throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.

    //   http://stackoverflow.com/questions/10100715/opencv-warping-from-one-triangle-to-another
    //  https://www.learnopencv.com/warp-one-triangle-to-another-using-opencv-c-python/
    //   http://docs.opencv.org/2.4/doc/tutorials/imgproc/imgtrans/warp_affine/warp_affine.html
    MatOfPoint2f src_pf = new MatOfPoint2f(new Point(from[0][0], from[0][1]), new Point(from[1][0], from[1][1]),
            new Point(from[2][0], from[2][1]));
    MatOfPoint2f dst_pf = new MatOfPoint2f(new Point(to[0][0], to[0][1]), new Point(to[1][0], to[1][1]),
            new Point(to[2][0], to[2][1]));

    //  https://www.learnopencv.com/warp-one-triangle-to-another-using-opencv-c-python/#download
    //how do I set up the position numbers in MatOfPoint2f here?
    //  Mat perspective_matrix = Imgproc.getAffineTransform(src_pf, dst_pf);
    Rect r1 = Imgproc.boundingRect(new MatOfPoint(new Point(from[0][0], from[0][1]),
            new Point(from[1][0], from[1][1]), new Point(from[2][0], from[2][1])));
    Rect r2 = Imgproc.boundingRect(new MatOfPoint(new Point(to[0][0], to[0][1]), new Point(to[1][0], to[1][1]),
            new Point(to[2][0], to[2][1])));

    MatOfPoint2f tri1Cropped = new MatOfPoint2f(new Point(from[0][0] - r1.x, from[0][1] - r1.y),
            new Point(from[1][0] - r1.x, from[1][1] - r1.y), new Point(from[2][0] - r1.x, from[2][1] - r1.y));

    MatOfPoint tri2CroppedInt = new MatOfPoint(new Point(to[0][0] - r2.x, to[0][1] - r2.y),
            new Point(to[1][0] - r2.x, to[1][1] - r2.y), new Point(to[2][0] - r2.x, to[2][1] - r2.y));

    MatOfPoint2f tri2Cropped = new MatOfPoint2f(new Point((to[0][0] - r2.x), (to[0][1] - r2.y)),
            new Point((to[1][0] - r2.x), (to[1][1] - r2.y)), new Point((to[2][0] - r2.x), (to[2][1] - r2.y)));
    //        for (int i = 0; i < 3; i++) {
    //           // tri1Cropped.push_back(new MatOfPoint(new Point(from[i][0] - r1.x, from[i][1] - r1.y))); //           new Point( from[i][0]  - r1.x, from[i][1]-  r1.y) );
    //            //tri2Cropped.push_back(new MatOfPoint(new Point(to[i][0] - r2.x, to[i][1] - r2.y)));
    //// w w w  .j  a va 2s  .c  o  m
    //            // fillConvexPoly needs a vector of Point and not Point2f
    //           // tri2CroppedInt.push_back(new MatOfPoint2f(new Point((int) (to[i][0] - r2.x), (int) (to[i][1] - r2.y))));
    //
    //        }

    // Apply warpImage to small rectangular patches
    Mat img1Cropped = mat.submat(r1);
    //img1(r1).copyTo(img1Cropped);

    // Given a pair of triangles, find the affine transform.
    Mat warpMat = Imgproc.getAffineTransform(tri1Cropped, tri2Cropped);

    //       Mat bbb = warpMat.mul(tri1Cropped);
    //        
    //       System.out.println( warpMat.dump() );
    //       System.out.println( tri2Cropped.dump() );
    //       System.out.println( bbb.dump() );
    // Apply the Affine Transform just found to the src image
    Mat img2Cropped = Mat.zeros(r2.height, r2.width, img1Cropped.type());
    Imgproc.warpAffine(img1Cropped, img2Cropped, warpMat, img2Cropped.size(), 0, Imgproc.INTER_LINEAR,
            new Scalar(Core.BORDER_TRANSPARENT)); //, 0, Imgproc.INTER_LINEAR, new Scalar(Core.BORDER_REFLECT_101));

    // Get mask by filling triangle
    Mat mask = Mat.zeros(r2.height, r2.width, CvType.CV_8UC3); ///CV_8U    CV_32FC3
    Imgproc.fillConvexPoly(mask, tri2CroppedInt, new Scalar(1.0, 1.0, 1.0), 16, 0);

    // Copy triangular region of the rectangular patch to the output image
    //         Core.multiply(img2Cropped,mask, img2Cropped);
    //         
    //         Core.multiply(mask, new Scalar(-1), mask);
    //        Core.(mask,new Scalar(gap), mask);
    //Core.multiply(lienzo.submat(r2),  (new Scalar(1.0,1.0,1.0)). - Core.multiply(mask,), lienzo.submat(r2));
    //         img2(r2) = img2(r2) + img2Cropped;
    // Core.subtract(Mat.ones(mask.height(), mask.width(), CvType.CV_8UC3), mask, mask);
    // Mat ff =   ;
    //   este
    Core.multiply(img2Cropped, mask, img2Cropped);
    //Core.multiply(lienzo.submat(r2), mask  , lienzo.submat(r2));         
    Core.add(lienzo.submat(r2), img2Cropped, lienzo.submat(r2));

    /*
     Mat bb = new Mat(mat, r2);
     bb.setTo(new Scalar(rnd.nextInt(),rnd.nextInt(),rnd.nextInt()));         
     Core.multiply(bb,mask, bb);
     Core.multiply(lienzo.submat(r2), mask  , lienzo.submat(r2));         
     Core.add(lienzo.submat(r2), bb, lienzo.submat(r2));
             
     */
    // lienzo.submat(r2).setTo(new Scalar(rnd.nextInt(),rnd.nextInt(),rnd.nextInt()));
    //         
    //      Imgproc.fillConvexPoly(lienzo, new MatOfPoint(
    //                new Point(to[0][0] , to[0][1]),
    //                new Point(to[1][0] , to[1][1]),
    //                new Point(to[2][0] , to[2][1] )), new Scalar(1,1,1));
    //        img2Cropped.copyTo(lienzo);
    //        return;
    // http://stackoverflow.com/questions/14111716/how-to-set-a-mask-image-for-grabcut-in-opencv  
    //  Imgproc.warpAffine(mat, lienzo, perspective_matrix, lienzo.size());
    // Imgproc.getAffineTransform(null, null);
    /*     
     // Find bounding rectangle for each triangle
     Rect r1 = boundingRect(tri1);
     Rect r2 = boundingRect(tri2);
            
     // Offset points by left top corner of the respective rectangles
     vector<Point2f> tri1Cropped, tri2Cropped;
     vector<Point> tri2CroppedInt;
     for(int i = 0; i < 3; i++)
     {
     tri1Cropped.push_back( Point2f( tri1[i].x - r1.x, tri1[i].y -  r1.y) );
     tri2Cropped.push_back( Point2f( tri2[i].x - r2.x, tri2[i].y - r2.y) );
            
     // fillConvexPoly needs a vector of Point and not Point2f
     tri2CroppedInt.push_back( Point((int)(tri2[i].x - r2.x), (int)(tri2[i].y - r2.y)) );
            
     }
            
     // Apply warpImage to small rectangular patches
     Mat img1Cropped;
     img1(r1).copyTo(img1Cropped);
            
     // Given a pair of triangles, find the affine transform.
     Mat warpMat = getAffineTransform( tri1Cropped, tri2Cropped );
            
     // Apply the Affine Transform just found to the src image
     Mat img2Cropped = Mat::zeros(r2.height, r2.width, img1Cropped.type());
     warpAffine( img1Cropped, img2Cropped, warpMat, img2Cropped.size(), INTER_LINEAR, BORDER_REFLECT_101);
            
     // Get mask by filling triangle
     Mat mask = Mat::zeros(r2.height, r2.width, CV_32FC3);
     fillConvexPoly(mask, tri2CroppedInt, Scalar(1.0, 1.0, 1.0), 16, 0);
            
     // Copy triangular region of the rectangular patch to the output image
     multiply(img2Cropped,mask, img2Cropped);
     multiply(img2(r2), Scalar(1.0,1.0,1.0) - mask, img2(r2));
     img2(r2) = img2(r2) + img2Cropped;*/
}

From source file:syncleus.dann.data.video.PatchGenerator.java

License:Apache License

/**
 * //from  w  ww  . j  a v  a  2 s . co  m
 * @param image
 * @param T
 * @param patch OUTPUT
 * @param patchSize
 */
void generate(final Mat image, final Mat T, Mat patch, Size patchSize, final RNG rng) {
    patch.create(patchSize, image.type());
    if (backgroundMin != backgroundMax) {
        Core.randu(patch, backgroundMin, backgroundMax);
        // TODO if that null scalar OK or should it be new Scalar(0) ?
        Imgproc.warpAffine(image, patch, T, patchSize, Imgproc.INTER_LINEAR, Imgproc.BORDER_TRANSPARENT, null);
    } else {
        Imgproc.warpAffine(image, patch, T, patchSize, Imgproc.INTER_LINEAR, Imgproc.BORDER_CONSTANT,
                new Scalar(backgroundMin));
    }

    int ksize = randomBlur ? rng.nextInt() % 9 - 5 : 0;
    if (ksize > 0) {
        ksize = ksize * 2 + 1;
        Imgproc.GaussianBlur(patch, patch, new Size(ksize, ksize), 0, 0);
    }

    if (noiseRange > 0) {
        final Mat noise = new Mat(patchSize, image.type());
        int delta = (image.depth() == CvType.CV_8U ? 128 : (image.depth() == CvType.CV_16U ? 32768 : 0));
        Core.randn(noise, delta, noiseRange);

        // TODO this was different !!
        Core.addWeighted(patch, 1, noise, 1, -delta, patch);

        //           if( backgroundMin != backgroundMax )
        //               addWeighted(patch, 1, noise, 1, -delta, patch);
        //           else
        //           {
        //               for( int i = 0; i < patchSize.height; i++ )
        //               {
        //                   uchar* prow = patch.ptr<uchar>(i);
        //                   const uchar* nrow =  noise.ptr<uchar>(i);
        //                   for( int j = 0; j < patchSize.width; j++ )
        //                       if( prow[j] != backgroundMin )
        //                           prow[j] = saturate_cast<uchar>(prow[j] + nrow[j] - delta);
        //               }
        //           }
    }
}