Example usage for com.amazonaws.services.s3.model MultipartUpload getInitiated

List of usage examples for com.amazonaws.services.s3.model MultipartUpload getInitiated

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model MultipartUpload getInitiated.

Prototype

public Date getInitiated() 

Source Link

Document

Returns the date at which this upload was initiated.

Usage

From source file:baldrickv.s3streamingtool.S3CleanupMultipart.java

License:Open Source License

public static void cleanup(S3StreamConfig config) throws Exception {

    AmazonS3Client s3 = config.getS3Client();
    String bucket = config.getS3Bucket();

    ListMultipartUploadsRequest list_req = new ListMultipartUploadsRequest(bucket);

    List<MultipartUpload> list = s3.listMultipartUploads(list_req).getMultipartUploads();

    Scanner scan = new Scanner(System.in);

    for (MultipartUpload mu : list) {
        System.out.println("-----------------------");
        System.out.println("  bucket: " + bucket);
        System.out.println("  key: " + mu.getKey());
        System.out.println("  uploadId: " + mu.getUploadId());
        System.out.println("  initiated at: " + mu.getInitiated());
        System.out.println("  initiated by: " + mu.getInitiator());
        System.out.println("-----------------------");

        System.out.print("Abort this upload [y|N]? ");
        String result = scan.nextLine().trim().toLowerCase();
        if (result.equals("y")) {
            AbortMultipartUploadRequest abort = new AbortMultipartUploadRequest(bucket, mu.getKey(),
                    mu.getUploadId());/*from www  . jav  a2  s. c  o  m*/

            s3.abortMultipartUpload(abort);
            System.out.println("Aborted upload");
        } else {
            System.out.println("Leaving this one alone");

        }

    }

}

From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java

License:Open Source License

@Override
public ListMultipartUploadsResponseType listMultipartUploads(ListMultipartUploadsType request)
        throws S3Exception {
    ListMultipartUploadsResponseType reply = request.getReply();
    User requestUser = getRequestUser(request);
    OsgInternalS3Client internalS3Client = null;

    String bucketName = request.getBucket();
    ListMultipartUploadsRequest listMultipartUploadsRequest = new ListMultipartUploadsRequest(bucketName);
    listMultipartUploadsRequest.setMaxUploads(request.getMaxUploads());
    listMultipartUploadsRequest.setKeyMarker(request.getKeyMarker());
    listMultipartUploadsRequest.setDelimiter(request.getDelimiter());
    listMultipartUploadsRequest.setPrefix(request.getPrefix());
    listMultipartUploadsRequest.setUploadIdMarker(request.getUploadIdMarker());
    try {// ww w  .  j av  a  2  s  .com
        internalS3Client = getS3Client(requestUser);
        AmazonS3Client s3Client = internalS3Client.getS3Client();

        MultipartUploadListing listing = s3Client.listMultipartUploads(listMultipartUploadsRequest);
        reply.setBucket(listing.getBucketName());
        reply.setKeyMarker(listing.getKeyMarker());
        reply.setUploadIdMarker(listing.getUploadIdMarker());
        reply.setNextKeyMarker(listing.getNextKeyMarker());
        reply.setNextUploadIdMarker(listing.getNextUploadIdMarker());
        reply.setMaxUploads(listing.getMaxUploads());
        reply.setIsTruncated(listing.isTruncated());
        reply.setPrefix(listing.getPrefix());
        reply.setDelimiter(listing.getDelimiter());

        List<String> commonPrefixes = listing.getCommonPrefixes();
        List<MultipartUpload> multipartUploads = listing.getMultipartUploads();

        List<com.eucalyptus.storage.msgs.s3.Upload> uploads = reply.getUploads();
        List<CommonPrefixesEntry> prefixes = reply.getCommonPrefixes();

        for (MultipartUpload multipartUpload : multipartUploads) {
            uploads.add(new com.eucalyptus.storage.msgs.s3.Upload(multipartUpload.getKey(),
                    multipartUpload.getUploadId(),
                    new Initiator(multipartUpload.getInitiator().getId(),
                            multipartUpload.getInitiator().getDisplayName()),
                    new CanonicalUser(multipartUpload.getOwner().getId(),
                            multipartUpload.getOwner().getDisplayName()),
                    multipartUpload.getStorageClass(), multipartUpload.getInitiated()));
        }
        for (String commonPrefix : commonPrefixes) {
            prefixes.add(new CommonPrefixesEntry(commonPrefix));
        }
        return reply;
    } catch (AmazonServiceException e) {
        LOG.debug("Error from backend", e);
        throw S3ExceptionMapper.fromAWSJavaSDK(e);
    }
}

From source file:org.apache.nifi.processors.aws.s3.PutS3Object.java

License:Apache License

protected MultipartUploadListing getS3AgeoffListAndAgeoffLocalState(final ProcessContext context,
        final AmazonS3Client s3, final long now) {
    final long ageoff_interval = context.getProperty(MULTIPART_S3_AGEOFF_INTERVAL)
            .asTimePeriod(TimeUnit.MILLISECONDS);
    final String bucket = context.getProperty(BUCKET).evaluateAttributeExpressions().getValue();
    final Long maxAge = context.getProperty(MULTIPART_S3_MAX_AGE).asTimePeriod(TimeUnit.MILLISECONDS);
    final long ageCutoff = now - maxAge;

    final List<MultipartUpload> ageoffList = new ArrayList<>();
    if ((lastS3AgeOff.get() < now - ageoff_interval) && s3BucketLock.tryLock()) {
        try {/*ww w.  j  a  va2  s  . com*/

            ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(bucket);
            MultipartUploadListing listing = s3.listMultipartUploads(listRequest);
            for (MultipartUpload upload : listing.getMultipartUploads()) {
                long uploadTime = upload.getInitiated().getTime();
                if (uploadTime < ageCutoff) {
                    ageoffList.add(upload);
                }
            }

            // ageoff any local state
            ageoffLocalState(ageCutoff);
            lastS3AgeOff.set(System.currentTimeMillis());
        } catch (AmazonClientException e) {
            if (e instanceof AmazonS3Exception && ((AmazonS3Exception) e).getStatusCode() == 403
                    && ((AmazonS3Exception) e).getErrorCode().equals("AccessDenied")) {
                getLogger().warn("AccessDenied checking S3 Multipart Upload list for {}: {} "
                        + "** The configured user does not have the s3:ListBucketMultipartUploads permission "
                        + "for this bucket, S3 ageoff cannot occur without this permission.  Next ageoff check "
                        + "time is being advanced by interval to prevent checking on every upload **",
                        new Object[] { bucket, e.getMessage() });
                lastS3AgeOff.set(System.currentTimeMillis());
            } else {
                getLogger().error("Error checking S3 Multipart Upload list for {}: {}",
                        new Object[] { bucket, e.getMessage() });
            }
        } finally {
            s3BucketLock.unlock();
        }
    }
    MultipartUploadListing result = new MultipartUploadListing();
    result.setBucketName(bucket);
    result.setMultipartUploads(ageoffList);
    return result;
}

From source file:org.apache.nifi.processors.aws.s3.PutS3Object.java

License:Apache License

protected void abortS3MultipartUpload(final AmazonS3Client s3, final String bucket,
        final MultipartUpload upload) {
    final String uploadKey = upload.getKey();
    final String uploadId = upload.getUploadId();
    final AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucket, uploadKey,
            uploadId);//from w w w . ja  v  a  2 s  . c  om
    try {
        s3.abortMultipartUpload(abortRequest);
        getLogger().info("Aborting out of date multipart upload, bucket {} key {} ID {}, initiated {}",
                new Object[] { bucket, uploadKey, uploadId, logFormat.format(upload.getInitiated()) });
    } catch (AmazonClientException ace) {
        getLogger().info("Error trying to abort multipart upload from bucket {} with key {} and ID {}: {}",
                new Object[] { bucket, uploadKey, uploadId, ace.getMessage() });
    }
}

From source file:org.finra.dm.dao.impl.S3DaoImpl.java

License:Apache License

/**
 * {@inheritDoc}//from w  w w  . j  a v a  2  s  .  com
 */
@Override
public int abortMultipartUploads(S3FileTransferRequestParamsDto params, Date thresholdDate) {
    AmazonS3Client s3Client = null;
    int abortedMultipartUploadsCount = 0;

    try {
        // Create an Amazon S3 client.
        s3Client = getAmazonS3(params);

        // List upload markers. Null implies initial list request.
        String uploadIdMarker = null;
        String keyMarker = null;

        boolean truncated;
        do {
            // Create the list multipart request, optionally using the last markers.
            ListMultipartUploadsRequest request = new ListMultipartUploadsRequest(params.getS3BucketName());
            request.setUploadIdMarker(uploadIdMarker);
            request.setKeyMarker(keyMarker);

            // Request the multipart upload listing.
            MultipartUploadListing uploadListing = s3Operations
                    .listMultipartUploads(TransferManager.appendSingleObjectUserAgent(request), s3Client);

            for (MultipartUpload upload : uploadListing.getMultipartUploads()) {
                if (upload.getInitiated().compareTo(thresholdDate) < 0) {
                    // Abort the upload.
                    s3Operations.abortMultipartUpload(
                            TransferManager.appendSingleObjectUserAgent(new AbortMultipartUploadRequest(
                                    params.getS3BucketName(), upload.getKey(), upload.getUploadId())),
                            s3Client);

                    // Log the information about the aborted multipart upload.
                    LOGGER.info(String.format(
                            "Aborted S3 multipart upload for \"%s\" object key initiated at [%s] in \"%s\" S3 bucket.",
                            upload.getKey(), upload.getInitiated(), params.getS3BucketName()));

                    // Increment the counter.
                    abortedMultipartUploadsCount++;
                }
            }

            // Determine whether there are more uploads to list.
            truncated = uploadListing.isTruncated();
            if (truncated) {
                // Record the list markers.
                uploadIdMarker = uploadListing.getUploadIdMarker();
                keyMarker = uploadListing.getKeyMarker();
            }
        } while (truncated);
    } finally {
        // Shutdown the Amazon S3 client instance to release resources.
        if (s3Client != null) {
            s3Client.shutdown();
        }
    }

    return abortedMultipartUploadsCount;
}

From source file:org.finra.herd.dao.impl.S3DaoImpl.java

License:Apache License

@Override
public int abortMultipartUploads(S3FileTransferRequestParamsDto params, Date thresholdDate) {
    // Create an Amazon S3 client.
    AmazonS3Client s3Client = getAmazonS3(params);
    int abortedMultipartUploadsCount = 0;

    try {/* w  ww . j  av a2s.  c  o m*/
        // List upload markers. Null implies initial list request.
        String uploadIdMarker = null;
        String keyMarker = null;

        boolean truncated;
        do {
            // Create the list multipart request, optionally using the last markers.
            ListMultipartUploadsRequest request = new ListMultipartUploadsRequest(params.getS3BucketName());
            request.setUploadIdMarker(uploadIdMarker);
            request.setKeyMarker(keyMarker);

            // Request the multipart upload listing.
            MultipartUploadListing uploadListing = s3Operations
                    .listMultipartUploads(TransferManager.appendSingleObjectUserAgent(request), s3Client);

            for (MultipartUpload upload : uploadListing.getMultipartUploads()) {
                if (upload.getInitiated().compareTo(thresholdDate) < 0) {
                    // Abort the upload.
                    s3Operations.abortMultipartUpload(
                            TransferManager.appendSingleObjectUserAgent(new AbortMultipartUploadRequest(
                                    params.getS3BucketName(), upload.getKey(), upload.getUploadId())),
                            s3Client);

                    // Log the information about the aborted multipart upload.
                    LOGGER.info(
                            "Aborted S3 multipart upload. s3Key=\"{}\" s3BucketName=\"{}\" s3MultipartUploadInitiatedDate=\"{}\"",
                            upload.getKey(), params.getS3BucketName(), upload.getInitiated());

                    // Increment the counter.
                    abortedMultipartUploadsCount++;
                }
            }

            // Determine whether there are more uploads to list.
            truncated = uploadListing.isTruncated();
            if (truncated) {
                // Record the list markers.
                uploadIdMarker = uploadListing.getNextUploadIdMarker();
                keyMarker = uploadListing.getNextKeyMarker();
            }
        } while (truncated);
    } finally {
        // Shutdown the Amazon S3 client instance to release resources.
        s3Client.shutdown();
    }

    return abortedMultipartUploadsCount;
}

From source file:org.icgc.dcc.storage.server.repository.UploadCleanupService.java

License:Open Source License

private boolean isStale(MultipartUpload upload) {
    val started = upload.getInitiated().toInstant();
    val threshold = Instant.now().minus(expiration, DAYS);
    return started.isBefore(threshold);
}

From source file:org.icgc.dcc.storage.server.repository.UploadCleanupService.java

License:Open Source License

private static String formatUpload(MultipartUpload upload) {
    return String.format(
            "uploadId = %s, key = %s, initiated = %s, owner = %s, initiator = %s, storageClass = %s",
            upload.getUploadId(), upload.getKey(), upload.getInitiated(), upload.getOwner(),
            upload.getInitiator(), upload.getStorageClass());
}