Example usage for com.amazonaws.services.s3 AmazonS3 putObject

List of usage examples for com.amazonaws.services.s3 AmazonS3 putObject

Introduction

In this page you can find the example usage for com.amazonaws.services.s3 AmazonS3 putObject.

Prototype

public PutObjectResult putObject(String bucketName, String key, InputStream input, ObjectMetadata metadata)
        throws SdkClientException, AmazonServiceException;

Source Link

Document

Uploads the specified input stream and object metadata to Amazon S3 under the specified bucket and key name.

Usage

From source file:ch.admin.isb.hermes5.tools.filebackup.FileBackup.java

License:Apache License

public void run(String bucketName, String accessKey, String secretKey, String source, String targetPrefix,
        Long retentionPeriod, String topicArn, String snsEndpoint, String s3Endpoint) {

    AmazonS3 s3 = s3(accessKey, secretKey, s3Endpoint);

    AmazonSNS sns = sns(accessKey, secretKey, snsEndpoint);

    List<String> errors = new ArrayList<String>();
    String[] list = new File(source).list();
    for (String string : list) {
        File file = new File(source + "/" + string);
        System.out.print(timestamp() + " Backing up " + file.getAbsolutePath() + " to " + bucketName + "/"
                + targetPrefix + string + "...");
        try {//from w  w  w . j a va2  s.c  o m
            byte[] data = readFileToByteArray(file);
            ObjectMetadata metadata = new ObjectMetadata();
            metadata.setContentLength(data.length);
            s3.putObject(bucketName, targetPrefix + string, new ByteArrayInputStream(data), metadata);
            System.out.println("done");
            long lastModified = file.lastModified();
            long now = System.currentTimeMillis();
            if (retentionPeriod > 0 && differenceInDays(lastModified, now) > retentionPeriod) {
                System.out.println(timestamp() + " File " + source + "/" + string
                        + " is removed because it is older than " + retentionPeriod + " days.");
                boolean delete = file.delete();
                if (!delete) {
                    errors.add("Unable to delete " + file.getAbsolutePath());
                }
            }
        } catch (Exception e) {
            System.out.println("failed " + e.getMessage());
            errors.add(timestamp() + " Problem Backing up " + file.getAbsolutePath() + " to " + bucketName + "/"
                    + targetPrefix + string + "\n" + getStackTrace(e));
        }
    }

    if (errors.size() > 0) {
        StringBuilder sb = new StringBuilder();
        for (String string : errors) {
            sb.append(string).append("\n");
        }
        try {
            sendMessageThroughSNS(topicArn, sns, sb.toString(), "Problem with backup");
        } catch (Exception e) {
            System.out.println(timestamp() + "ERROR: unable to report issue " + sb.toString());
            e.printStackTrace();
        }
    }

}

From source file:ch.hesso.master.sweetcity.utils.PictureUtils.java

License:Apache License

public static Key uploadPicture(Bitmap picture, GoogleAccountCredential googleCredential) {
    if (picture == null)
        return null;

    try {//  w  w w  .j  a v a  2s .c  o m
        ClientConfiguration clientConfig = new ClientConfiguration();
        clientConfig.setProtocol(Protocol.HTTP);
        AmazonS3 s3Connection = new AmazonS3Client(AWS_CREDENTIALS, clientConfig);
        s3Connection.setEndpoint(ConstantsAWS.S3_END_POINT);

        ObjectMetadata pictureMetadata = new ObjectMetadata();

        String key = String.format(ConstantsAWS.S3_PICTURE_NAME_FORMAT,
                googleCredential.getSelectedAccountName(), Constants.DATE_FORMAT_IMAGE.format(new Date()));

        s3Connection.putObject(ConstantsAWS.S3_BUCKET_NAME, key, ImageUtils.bitmapToInputStream(picture),
                pictureMetadata);

        return new Key(key);
    } catch (Exception e) {
        Log.d(Constants.PROJECT_NAME, e.toString());
    }

    return null;
}

From source file:com.amediamanager.config.S3ConfigurationProvider.java

License:Apache License

@Override
public void persistNewProperty(String key, String value) {
    if (this.properties != null) {
        this.properties.put(key, value);
        AmazonS3 s3Client = new AmazonS3Client();
        try {//from w w  w .  ja v  a 2 s. c  om
            s3Client.putObject(this.bucket, this.key,
                    IOUtils.toInputStream(this.propsToString(this.properties)), null);
        } catch (AmazonS3Exception ase) {
            LOG.error("Error persisting config from s3://{}/{}", new Object[] { this.bucket, this.key, ase });
        }
    } else {
        LOG.error("Could not persist new property because this.properties is null.");
    }
}

From source file:com.BoomPi.ImageResizeHandlerFromS3.java

License:Apache License

private String putResizedImageToS3(ImageProcess jpegImageProcess, String s3ObjectKey, int width, int height)
        throws IOException {
    ByteArrayOutputStream os = jpegImageProcess.resize(width, height).getOutPutStream();
    try (InputStream is = new ByteArrayInputStream(os.toByteArray());) {

        ObjectMetadata meta = new ObjectMetadata();
        meta.setContentLength(os.size());
        meta.setContentType(jpegImageProcess.getImageMime());

        String dstKey = String.join("_", "resized", s3ObjectKey);

        AmazonS3 s3Client = new AmazonS3Client();

        System.out.println("Writing to: " + dstBucket + "/" + dstKey);
        s3Client.putObject(dstBucket, dstKey, is, meta);
    }//from   ww w  . j  a va  2 s.  c  o  m
    return "Ok";
}

From source file:com.cloud.utils.S3Utils.java

License:Apache License

public static boolean canReadWriteBucket(final ClientOptions clientOptions, final String bucketName) {

    assert clientOptions != null;
    assert isNotBlank(bucketName);

    try {//w ww.java 2s.c  o m

        final AmazonS3 client = acquireClient(clientOptions);

        final String fileContent = "testing put and delete";
        final InputStream inputStream = new ByteArrayInputStream(fileContent.getBytes());
        final String key = UUID.randomUUID().toString() + ".txt";

        final ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(fileContent.length());

        client.putObject(bucketName, key, inputStream, metadata);
        client.deleteObject(bucketName, key);

        return true;

    } catch (AmazonClientException e) {

        return false;

    }

}

From source file:com.dustindoloff.s3websitedeploy.Main.java

License:Apache License

private static boolean upload(final AmazonS3 s3Client, final String bucket, final ZipFile zipFile) {
    boolean failed = false;
    final ObjectMetadata data = new ObjectMetadata();
    final Enumeration<? extends ZipEntry> entries = zipFile.entries();
    while (entries.hasMoreElements()) {
        final ZipEntry entry = entries.nextElement();
        data.setContentLength(entry.getSize());
        try {/*from  w  w w .jav  a  2  s . c  o  m*/
            s3Client.putObject(bucket, entry.getName(), zipFile.getInputStream(entry), data);
        } catch (final AmazonClientException | IOException e) {
            failed = true;
        }
    }
    return !failed;
}

From source file:com.easarrive.aws.plugins.common.service.impl.S3Service.java

License:Open Source License

/**
 * {@inheritDoc}//  w  ww .  ja v  a2s  .  c  o  m
 */
@Override
public PutObjectResult putObject(AmazonS3 client, String bucketName, String key, InputStream input,
        ObjectMetadata metadata) {
    if (client == null) {
        return null;
    } else if (StringUtil.isEmpty(bucketName)) {
        return null;
    } else if (StringUtil.isEmpty(key)) {
        return null;
    } else if (input == null) {
        return null;
    }
    if (metadata == null) {
        metadata = new ObjectMetadata();
    }
    PutObjectResult result = null;
    if (!client.doesObjectExist(bucketName, key)) {
        result = client.putObject(bucketName, key, input, metadata);
    }
    return result;
}

From source file:com.handywedge.binarystore.store.aws.BinaryStoreManagerImpl.java

License:MIT License

@Override
public BinaryInfo upload(StorageInfo storage, BinaryInfo binary, InputStream inStream) throws StoreException {
    logger.debug("={}", storage);
    logger.debug("?={}", binary);

    AmazonS3 s3client = getS3Client(binary.getBucketName());

    ObjectMetadata oMetadata = new ObjectMetadata();
    oMetadata.setContentType(binary.getContentType());

    // ???//  w  w  w .j  av a2  s . c om
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(binary.getBucketName(),
            binary.getFileName(), oMetadata);
    InitiateMultipartUploadResult initResponse = s3client.initiateMultipartUpload(initRequest);

    try {
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        long written = IOUtils.copyLarge(inStream, baos, 0, BINARY_PART_SIZE_5MB);

        byte[] data = baos.toByteArray();
        InputStream awsInputStream = new ByteArrayInputStream(data);

        if (written < BINARY_PART_SIZE_5MB) {
            oMetadata.setContentLength(written);
            s3client.putObject(binary.getBucketName(), binary.getFileName(), awsInputStream, oMetadata);
        } else {
            int firstByte = 0;
            int partNumber = 1;
            boolean isFirstChunck = true;
            boolean overSizeLimit = false;
            List<PartETag> partETags = new ArrayList<PartETag>();
            InputStream firstChunck = new ByteArrayInputStream(data);
            PushbackInputStream chunckableInputStream = new PushbackInputStream(inStream, 1);

            long maxSize = BINARY_PART_SIZE_5MB * 1024;
            String maxSizeStr = "5GB";
            String prefix = MDC.get("requestId");
            while (-1 != (firstByte = chunckableInputStream.read())) {
                long partSize = 0;
                chunckableInputStream.unread(firstByte);
                File tempFile = File.createTempFile(prefix.concat("-part").concat(String.valueOf(partNumber)),
                        null);
                tempFile.deleteOnExit();
                try (OutputStream os = new BufferedOutputStream(
                        new FileOutputStream(tempFile.getAbsolutePath()))) {

                    if (isFirstChunck) {
                        partSize = IOUtils.copyLarge(firstChunck, os, 0, (BINARY_PART_SIZE_5MB));
                        isFirstChunck = false;
                    } else {
                        partSize = IOUtils.copyLarge(chunckableInputStream, os, 0, (BINARY_PART_SIZE_5MB));
                    }
                    written += partSize;

                    if (written > maxSize) { // 5GB
                        overSizeLimit = true;
                        logger.warn("OVERSIZED FILE ({}). STARTING ABORT", written);
                        break;
                    }
                }

                FileInputStream chunk = new FileInputStream(tempFile);
                Boolean isLastPart = -1 == (firstByte = chunckableInputStream.read());
                if (!isLastPart) {
                    chunckableInputStream.unread(firstByte);
                }

                oMetadata.setContentLength(partSize);

                UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(binary.getBucketName())
                        .withKey(binary.getFileName()).withUploadId(initResponse.getUploadId())
                        .withObjectMetadata(oMetadata).withInputStream(chunk).withPartSize(partSize)
                        .withPartNumber(partNumber).withLastPart(isLastPart);
                UploadPartResult result = s3client.uploadPart(uploadRequest);
                partETags.add(result.getPartETag());
                partNumber++;
            }

            if (overSizeLimit) {
                ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(
                        binary.getBucketName());
                MultipartUploadListing listResult = s3client.listMultipartUploads(listRequest);

                int timesIterated = 20;
                // loop and abort all the multipart uploads
                while (listResult.getMultipartUploads().size() != 0 && timesIterated > 0) {
                    s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                            binary.getFileName(), initResponse.getUploadId()));
                    Thread.sleep(1000);
                    timesIterated--;
                    listResult = s3client.listMultipartUploads(listRequest);
                    logger.debug("Files that haven't been aborted are: {}",
                            listResult.getMultipartUploads().listIterator().toString());
                }
                if (timesIterated == 0) {
                    logger.warn("Files parts that couldn't be aborted in 20 seconds are:");
                    Iterator<MultipartUpload> multipartUploadIterator = listResult.getMultipartUploads()
                            .iterator();
                    while (multipartUploadIterator.hasNext()) {
                        logger.warn(multipartUploadIterator.next().getKey());
                    }
                }
                throw new StoreException(HttpStatus.SC_REQUEST_TOO_LONG, ErrorClassification.UPLOAD_TOO_LARGE,
                        maxSizeStr);
            } else {
                CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(
                        binary.getBucketName(), binary.getFileName(), initResponse.getUploadId(), partETags);

                CompleteMultipartUploadResult comMPUResult = s3client.completeMultipartUpload(compRequest);
                logger.debug("CompleteMultipartUploadResult={}", comMPUResult);
            }
        }
    } catch (AmazonServiceException ase) {
        s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                binary.getFileName(), initResponse.getUploadId()));
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, ase,
                binary.toString());
    } catch (AmazonClientException ace) {
        s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                binary.getFileName(), initResponse.getUploadId()));
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, ace,
                binary.toString());
    } catch (IOException ioe) {
        throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, ioe,
                binary.toString());
    } catch (InterruptedException itre) {
        throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, itre,
                binary.toString());
    } finally {
        if (inStream != null) {
            try {
                inStream.close();
            } catch (Exception e) {
            }
        }
    }

    return getBinaryInfo(s3client, binary.getBucketName(), binary.getFileName());
}

From source file:com.kodemore.aws.s3.KmS3Uploader.java

License:Open Source License

/**
 * Upload the data from the input stream to the remote s3 repository.
 * The toPath (at s3) should NOT begin with a slash (/).
 *///from  w w w .  j ava 2s. c o  m
public void upload(String bucketName, String toPath, InputStream is) {
    ObjectMetadata meta = new ObjectMetadata();

    AmazonS3 s3;
    s3 = createClient();
    s3.putObject(bucketName, toPath, is, meta);
}

From source file:gov.usgs.cida.iplover.util.ImageStorage.java

public static String save(byte[] parsedImage, String uuid) throws IOException {

    AmazonS3 s3 = prepS3Client();

    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(parsedImage.length);

    LOG.trace("Setting up image key.");
    //Build key, split by date uploaded
    Date d = new Date();
    String fname = uuid + ".jpg";
    String fileId = simp.format(d) + "/" + fname;
    String fileKey = KEY_BASE + "/" + fileId;

    s3.putObject(BUCKET_NAME, fileKey, new ByteArrayInputStream(parsedImage), metadata);

    LOG.trace("Image uploaded.");

    //The date directory and filename are the "unique key"
    return (fileId);
}