Example usage for com.amazonaws.services.rekognition.model DetectFacesResult getFaceDetails

List of usage examples for com.amazonaws.services.rekognition.model DetectFacesResult getFaceDetails

Introduction

In this page you can find the example usage for com.amazonaws.services.rekognition.model DetectFacesResult getFaceDetails.

Prototype


public java.util.List<FaceDetail> getFaceDetails() 

Source Link

Document

Details of each face found in the image.

Usage

From source file:com.razorfish.fluent.contentintelligence.core.servlets.SmartCropServlet.java

License:Apache License

@Override
protected final void doGet(final SlingHttpServletRequest request, final SlingHttpServletResponse response)
        throws ServletException, IOException {

    String[] selectors = request.getRequestPathInfo().getSelectors();

    int sizeX = Integer.parseInt(selectors[0]);
    int sizeY = Integer.parseInt(selectors[1]);
    String extension = request.getRequestPathInfo().getExtension();
    String imagePath = request.getRequestPathInfo().getResourcePath().substring(0,
            request.getRequestPathInfo().getResourcePath().indexOf("."));

    log.info("received" + Arrays.toString(selectors) + " : " + extension + " : " + imagePath);

    String type = getImageType(extension);
    if (type == null) {
        response.sendError(404, "Image type not supported");
        return;//from   w ww . java 2  s  . c om
    }
    response.setContentType(type);

    ImageContext context = new ImageContext(request, type);

    Resource resource = context.request.getResourceResolver().getResource(imagePath + "." + extension);
    Asset asset = resource.adaptTo(Asset.class);

    log.info("asset : " + asset.getPath());

    log.info("resource : " + resource.getPath() + "type " + resource.getResourceType());
    Image image = new Image(resource);

    float x1 = 0, y1 = 0, x2 = 1, y2 = 1;

    if (isAsset(resource) || isRendition(resource)) {
        image.setFileReference(image.getPath());
        Rendition r = null;
        if (isAsset(resource)) {
            r = DamUtil.resolveToAsset(resource).getOriginal();
        } else {
            r = resource.adaptTo(Rendition.class);
        }
        byte[] data = new byte[(int) r.getSize()];

        int numbytesread = r.getStream().read(data);
        log.debug("Read : {} of {}", numbytesread, r.getSize());
        DetectFacesRequest dfrequest = new DetectFacesRequest()
                .withImage(
                        new com.amazonaws.services.rekognition.model.Image().withBytes(ByteBuffer.wrap(data)))
                .withAttributes(Attribute.ALL);

        AmazonRekognitionClient rekognitionClient = new AmazonRekognitionClient(
                new ProfileCredentialsProvider().getCredentials());
        rekognitionClient.setSignerRegionOverride("us-east-1");

        DetectFacesResult result = rekognitionClient.detectFaces(dfrequest);

        List<FaceDetail> faceDetails = result.getFaceDetails();
        if (!faceDetails.isEmpty()) {
            log.info("result " + Arrays.toString(faceDetails.toArray()));
            x1 = faceDetails.get(0).getBoundingBox().getLeft();
            y1 = faceDetails.get(0).getBoundingBox().getTop();
            x2 = x1 + faceDetails.get(0).getBoundingBox().getWidth();
            y2 = y1 + faceDetails.get(0).getBoundingBox().getHeight();

        }
    }

    if (!image.hasContent()) {
        response.sendError(404);
        return;
    }

    Layer layer;
    try {
        log.info("image : " + image.getMimeType());
        layer = image.getLayer(true, false, true);
        int ratioY = layer.getHeight();
        int ratioX = layer.getWidth();

        if (sizeX > ratioX) {
            sizeX = ratioX;
        }

        if (sizeY > ratioY) {
            sizeY = ratioY;
        }

        log.info("baseline : " + sizeX + "," + sizeY);
        x1 = (int) Math.ceil(x1 * ratioX);
        y1 = (int) Math.ceil(y1 * ratioY);
        x2 = (int) Math.ceil(x2 * ratioX);
        y2 = (int) Math.ceil(y2 * ratioY);

        log.info("detected : " + (int) x1 + "," + (int) y1 + "," + (int) x2 + "," + (int) y2);
        log.info("calculated : " + (int) (x2 - x1) + "," + (int) (y2 - y1));

        // check if the crop target is bigger than bounding box, if so crop at a larger size
        if ((x2 - x1) < sizeX) {
            x1 = x1 - ((sizeX - (x2 - x1)) / 2);
            x2 = x2 + ((sizeX - (x2 - x1)) / 2);
            log.info("x adj : " + (int) x1 + "," + (int) y1 + "," + (int) x2 + "," + (int) y2);
            log.info("x adj : " + (int) (x2 - x1) + "," + (int) (y2 - y1));
        }

        if ((y2 - y1) < sizeY) {
            y1 = y1 - ((sizeY - (y2 - y1)) / 2);
            y2 = y2 + ((sizeY - (y2 - y1)) / 2);

            log.info("y adj : " + (int) x1 + "," + (int) y1 + "," + (int) x2 + "," + (int) y2);
            log.info("y adj : " + (int) (x2 - x1) + "," + (int) (y2 - y1));
        }

        //ensure we are still within the image boundaries   
        if (x1 < 0) {
            x2 = x2 - x1;
            x1 = 0;
        }
        if (y1 < 0) {
            y2 = y2 - y1;
            y1 = 0;
        }
        if (x2 > ratioX) {
            x1 = x1 - (x2 - ratioX);
            x2 = ratioX;
        }
        if (y2 > ratioY) {
            y1 = y1 - (y2 - ratioY);
            y2 = ratioY;
        }

        //TODO - handle negative values for bounding box - http://docs.aws.amazon.com/rekognition/latest/dg/API_BoundingBox.html
        log.info("resolved : " + (int) x1 + "," + (int) y1 + "," + (int) x2 + "," + (int) y2);
        layer.crop(ImageHelper.getCropRect((int) x1 + "," + (int) y1 + "," + (int) x2 + "," + (int) y2,
                image.getPath()));

        //after cropping the face, resize to the target size if needed
        layer.resize(sizeX, sizeY);
        //layer.crop(ImageHelper.getCropRect("225,121,525,421", image.getPath()));

        double quality = image.getMimeType().equals("image/gif") ? 255 : 1.0;
        layer.write(image.getMimeType(), quality, response.getOutputStream());
    } catch (RepositoryException e) {
        log.error("Could not create layer" + e);
        e.printStackTrace();
    }

    response.flushBuffer();
}