Example usage for com.amazonaws.services.s3.model MultipartUploadListing getMultipartUploads

List of usage examples for com.amazonaws.services.s3.model MultipartUploadListing getMultipartUploads

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model MultipartUploadListing getMultipartUploads.

Prototype

public List<MultipartUpload> getMultipartUploads() 

Source Link

Document

Returns the list of multipart uploads.

Usage

From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java

License:Open Source License

@Override
public ListMultipartUploadsResponseType listMultipartUploads(ListMultipartUploadsType request)
        throws S3Exception {
    ListMultipartUploadsResponseType reply = request.getReply();
    User requestUser = getRequestUser(request);
    OsgInternalS3Client internalS3Client = null;

    String bucketName = request.getBucket();
    ListMultipartUploadsRequest listMultipartUploadsRequest = new ListMultipartUploadsRequest(bucketName);
    listMultipartUploadsRequest.setMaxUploads(request.getMaxUploads());
    listMultipartUploadsRequest.setKeyMarker(request.getKeyMarker());
    listMultipartUploadsRequest.setDelimiter(request.getDelimiter());
    listMultipartUploadsRequest.setPrefix(request.getPrefix());
    listMultipartUploadsRequest.setUploadIdMarker(request.getUploadIdMarker());
    try {/* w  w w. j a  v  a 2  s . c o  m*/
        internalS3Client = getS3Client(requestUser);
        AmazonS3Client s3Client = internalS3Client.getS3Client();

        MultipartUploadListing listing = s3Client.listMultipartUploads(listMultipartUploadsRequest);
        reply.setBucket(listing.getBucketName());
        reply.setKeyMarker(listing.getKeyMarker());
        reply.setUploadIdMarker(listing.getUploadIdMarker());
        reply.setNextKeyMarker(listing.getNextKeyMarker());
        reply.setNextUploadIdMarker(listing.getNextUploadIdMarker());
        reply.setMaxUploads(listing.getMaxUploads());
        reply.setIsTruncated(listing.isTruncated());
        reply.setPrefix(listing.getPrefix());
        reply.setDelimiter(listing.getDelimiter());

        List<String> commonPrefixes = listing.getCommonPrefixes();
        List<MultipartUpload> multipartUploads = listing.getMultipartUploads();

        List<com.eucalyptus.storage.msgs.s3.Upload> uploads = reply.getUploads();
        List<CommonPrefixesEntry> prefixes = reply.getCommonPrefixes();

        for (MultipartUpload multipartUpload : multipartUploads) {
            uploads.add(new com.eucalyptus.storage.msgs.s3.Upload(multipartUpload.getKey(),
                    multipartUpload.getUploadId(),
                    new Initiator(multipartUpload.getInitiator().getId(),
                            multipartUpload.getInitiator().getDisplayName()),
                    new CanonicalUser(multipartUpload.getOwner().getId(),
                            multipartUpload.getOwner().getDisplayName()),
                    multipartUpload.getStorageClass(), multipartUpload.getInitiated()));
        }
        for (String commonPrefix : commonPrefixes) {
            prefixes.add(new CommonPrefixesEntry(commonPrefix));
        }
        return reply;
    } catch (AmazonServiceException e) {
        LOG.debug("Error from backend", e);
        throw S3ExceptionMapper.fromAWSJavaSDK(e);
    }
}

From source file:com.handywedge.binarystore.store.aws.BinaryStoreManagerImpl.java

License:MIT License

@Override
public BinaryInfo upload(StorageInfo storage, BinaryInfo binary, InputStream inStream) throws StoreException {
    logger.debug("={}", storage);
    logger.debug("?={}", binary);

    AmazonS3 s3client = getS3Client(binary.getBucketName());

    ObjectMetadata oMetadata = new ObjectMetadata();
    oMetadata.setContentType(binary.getContentType());

    // ???/* w w w  . j a v  a  2s .  c  o m*/
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(binary.getBucketName(),
            binary.getFileName(), oMetadata);
    InitiateMultipartUploadResult initResponse = s3client.initiateMultipartUpload(initRequest);

    try {
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        long written = IOUtils.copyLarge(inStream, baos, 0, BINARY_PART_SIZE_5MB);

        byte[] data = baos.toByteArray();
        InputStream awsInputStream = new ByteArrayInputStream(data);

        if (written < BINARY_PART_SIZE_5MB) {
            oMetadata.setContentLength(written);
            s3client.putObject(binary.getBucketName(), binary.getFileName(), awsInputStream, oMetadata);
        } else {
            int firstByte = 0;
            int partNumber = 1;
            boolean isFirstChunck = true;
            boolean overSizeLimit = false;
            List<PartETag> partETags = new ArrayList<PartETag>();
            InputStream firstChunck = new ByteArrayInputStream(data);
            PushbackInputStream chunckableInputStream = new PushbackInputStream(inStream, 1);

            long maxSize = BINARY_PART_SIZE_5MB * 1024;
            String maxSizeStr = "5GB";
            String prefix = MDC.get("requestId");
            while (-1 != (firstByte = chunckableInputStream.read())) {
                long partSize = 0;
                chunckableInputStream.unread(firstByte);
                File tempFile = File.createTempFile(prefix.concat("-part").concat(String.valueOf(partNumber)),
                        null);
                tempFile.deleteOnExit();
                try (OutputStream os = new BufferedOutputStream(
                        new FileOutputStream(tempFile.getAbsolutePath()))) {

                    if (isFirstChunck) {
                        partSize = IOUtils.copyLarge(firstChunck, os, 0, (BINARY_PART_SIZE_5MB));
                        isFirstChunck = false;
                    } else {
                        partSize = IOUtils.copyLarge(chunckableInputStream, os, 0, (BINARY_PART_SIZE_5MB));
                    }
                    written += partSize;

                    if (written > maxSize) { // 5GB
                        overSizeLimit = true;
                        logger.warn("OVERSIZED FILE ({}). STARTING ABORT", written);
                        break;
                    }
                }

                FileInputStream chunk = new FileInputStream(tempFile);
                Boolean isLastPart = -1 == (firstByte = chunckableInputStream.read());
                if (!isLastPart) {
                    chunckableInputStream.unread(firstByte);
                }

                oMetadata.setContentLength(partSize);

                UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(binary.getBucketName())
                        .withKey(binary.getFileName()).withUploadId(initResponse.getUploadId())
                        .withObjectMetadata(oMetadata).withInputStream(chunk).withPartSize(partSize)
                        .withPartNumber(partNumber).withLastPart(isLastPart);
                UploadPartResult result = s3client.uploadPart(uploadRequest);
                partETags.add(result.getPartETag());
                partNumber++;
            }

            if (overSizeLimit) {
                ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(
                        binary.getBucketName());
                MultipartUploadListing listResult = s3client.listMultipartUploads(listRequest);

                int timesIterated = 20;
                // loop and abort all the multipart uploads
                while (listResult.getMultipartUploads().size() != 0 && timesIterated > 0) {
                    s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                            binary.getFileName(), initResponse.getUploadId()));
                    Thread.sleep(1000);
                    timesIterated--;
                    listResult = s3client.listMultipartUploads(listRequest);
                    logger.debug("Files that haven't been aborted are: {}",
                            listResult.getMultipartUploads().listIterator().toString());
                }
                if (timesIterated == 0) {
                    logger.warn("Files parts that couldn't be aborted in 20 seconds are:");
                    Iterator<MultipartUpload> multipartUploadIterator = listResult.getMultipartUploads()
                            .iterator();
                    while (multipartUploadIterator.hasNext()) {
                        logger.warn(multipartUploadIterator.next().getKey());
                    }
                }
                throw new StoreException(HttpStatus.SC_REQUEST_TOO_LONG, ErrorClassification.UPLOAD_TOO_LARGE,
                        maxSizeStr);
            } else {
                CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(
                        binary.getBucketName(), binary.getFileName(), initResponse.getUploadId(), partETags);

                CompleteMultipartUploadResult comMPUResult = s3client.completeMultipartUpload(compRequest);
                logger.debug("CompleteMultipartUploadResult={}", comMPUResult);
            }
        }
    } catch (AmazonServiceException ase) {
        s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                binary.getFileName(), initResponse.getUploadId()));
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, ase,
                binary.toString());
    } catch (AmazonClientException ace) {
        s3client.abortMultipartUpload(new AbortMultipartUploadRequest(binary.getBucketName(),
                binary.getFileName(), initResponse.getUploadId()));
        throw new StoreException(HttpStatus.SC_BAD_REQUEST, ErrorClassification.UPLOAD_FAIL, ace,
                binary.toString());
    } catch (IOException ioe) {
        throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, ioe,
                binary.toString());
    } catch (InterruptedException itre) {
        throw new StoreException(HttpStatus.SC_INTERNAL_SERVER_ERROR, ErrorClassification.UPLOAD_FAIL, itre,
                binary.toString());
    } finally {
        if (inStream != null) {
            try {
                inStream.close();
            } catch (Exception e) {
            }
        }
    }

    return getBinaryInfo(s3client, binary.getBucketName(), binary.getFileName());
}

From source file:org.apache.nifi.processors.aws.s3.PutS3Object.java

License:Apache License

protected boolean localUploadExistsInS3(final AmazonS3Client s3, final String bucket,
        final MultipartState localState) {
    ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(bucket);
    MultipartUploadListing listing = s3.listMultipartUploads(listRequest);
    for (MultipartUpload upload : listing.getMultipartUploads()) {
        if (upload.getUploadId().equals(localState.getUploadId())) {
            return true;
        }/*from   w w  w  .j  av  a2  s  . c  om*/
    }
    return false;
}

From source file:org.apache.nifi.processors.aws.s3.PutS3Object.java

License:Apache License

protected void ageoffS3Uploads(final ProcessContext context, final AmazonS3Client s3, final long now) {
    MultipartUploadListing oldUploads = getS3AgeoffListAndAgeoffLocalState(context, s3, now);
    for (MultipartUpload upload : oldUploads.getMultipartUploads()) {
        abortS3MultipartUpload(s3, oldUploads.getBucketName(), upload);
    }/*from www.  ja v  a 2 s .  c  om*/
}

From source file:org.apache.nifi.processors.aws.s3.PutS3Object.java

License:Apache License

protected MultipartUploadListing getS3AgeoffListAndAgeoffLocalState(final ProcessContext context,
        final AmazonS3Client s3, final long now) {
    final long ageoff_interval = context.getProperty(MULTIPART_S3_AGEOFF_INTERVAL)
            .asTimePeriod(TimeUnit.MILLISECONDS);
    final String bucket = context.getProperty(BUCKET).evaluateAttributeExpressions().getValue();
    final Long maxAge = context.getProperty(MULTIPART_S3_MAX_AGE).asTimePeriod(TimeUnit.MILLISECONDS);
    final long ageCutoff = now - maxAge;

    final List<MultipartUpload> ageoffList = new ArrayList<>();
    if ((lastS3AgeOff.get() < now - ageoff_interval) && s3BucketLock.tryLock()) {
        try {//from   w  w  w.  j  ava2 s .c  o  m

            ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(bucket);
            MultipartUploadListing listing = s3.listMultipartUploads(listRequest);
            for (MultipartUpload upload : listing.getMultipartUploads()) {
                long uploadTime = upload.getInitiated().getTime();
                if (uploadTime < ageCutoff) {
                    ageoffList.add(upload);
                }
            }

            // ageoff any local state
            ageoffLocalState(ageCutoff);
            lastS3AgeOff.set(System.currentTimeMillis());
        } catch (AmazonClientException e) {
            if (e instanceof AmazonS3Exception && ((AmazonS3Exception) e).getStatusCode() == 403
                    && ((AmazonS3Exception) e).getErrorCode().equals("AccessDenied")) {
                getLogger().warn("AccessDenied checking S3 Multipart Upload list for {}: {} "
                        + "** The configured user does not have the s3:ListBucketMultipartUploads permission "
                        + "for this bucket, S3 ageoff cannot occur without this permission.  Next ageoff check "
                        + "time is being advanced by interval to prevent checking on every upload **",
                        new Object[] { bucket, e.getMessage() });
                lastS3AgeOff.set(System.currentTimeMillis());
            } else {
                getLogger().error("Error checking S3 Multipart Upload list for {}: {}",
                        new Object[] { bucket, e.getMessage() });
            }
        } finally {
            s3BucketLock.unlock();
        }
    }
    MultipartUploadListing result = new MultipartUploadListing();
    result.setBucketName(bucket);
    result.setMultipartUploads(ageoffList);
    return result;
}

From source file:org.apache.usergrid.services.assets.data.AWSBinaryStore.java

License:Apache License

@Override
public void write(final UUID appId, final Entity entity, InputStream inputStream) throws Exception {

    String uploadFileName = AssetUtils.buildAssetKey(appId, entity);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    long written = IOUtils.copyLarge(inputStream, baos, 0, FIVE_MB);

    byte[] data = baos.toByteArray();

    InputStream awsInputStream = new ByteArrayInputStream(data);

    final Map<String, Object> fileMetadata = AssetUtils.getFileMetadata(entity);
    fileMetadata.put(AssetUtils.LAST_MODIFIED, System.currentTimeMillis());

    String mimeType = AssetMimeHandler.get().getMimeType(entity, data);

    Boolean overSizeLimit = false;

    EntityManager em = emf.getEntityManager(appId);

    if (written < FIVE_MB) { // total smaller than 5mb

        ObjectMetadata om = new ObjectMetadata();
        om.setContentLength(written);/*from   ww  w  . j  a  v  a 2s.  com*/
        om.setContentType(mimeType);
        PutObjectResult result = null;
        result = getS3Client().putObject(bucketName, uploadFileName, awsInputStream, om);

        String md5sum = Hex.encodeHexString(Base64.decodeBase64(result.getContentMd5()));
        String eTag = result.getETag();

        fileMetadata.put(AssetUtils.CONTENT_LENGTH, written);

        if (md5sum != null)
            fileMetadata.put(AssetUtils.CHECKSUM, md5sum);
        fileMetadata.put(AssetUtils.E_TAG, eTag);

        em.update(entity);

    } else { // bigger than 5mb... dump 5 mb tmp files and upload from them
        written = 0; //reset written to 0, we still haven't wrote anything in fact
        int partNumber = 1;
        int firstByte = 0;
        Boolean isFirstChunck = true;
        List<PartETag> partETags = new ArrayList<PartETag>();

        //get the s3 client in order to initialize the multipart request
        getS3Client();
        InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName,
                uploadFileName);
        InitiateMultipartUploadResult initResponse = getS3Client().initiateMultipartUpload(initRequest);

        InputStream firstChunck = new ByteArrayInputStream(data);
        PushbackInputStream chunckableInputStream = new PushbackInputStream(inputStream, 1);

        // determine max size file allowed, default to 50mb
        long maxSizeBytes = 50 * FileUtils.ONE_MB;
        String maxSizeMbString = properties.getProperty("usergrid.binary.max-size-mb", "50");
        if (StringUtils.isNumeric(maxSizeMbString)) {
            maxSizeBytes = Long.parseLong(maxSizeMbString) * FileUtils.ONE_MB;
        }

        // always allow files up to 5mb
        if (maxSizeBytes < 5 * FileUtils.ONE_MB) {
            maxSizeBytes = 5 * FileUtils.ONE_MB;
        }

        while (-1 != (firstByte = chunckableInputStream.read())) {
            long partSize = 0;
            chunckableInputStream.unread(firstByte);
            File tempFile = File.createTempFile(
                    entity.getUuid().toString().concat("-part").concat(String.valueOf(partNumber)), "tmp");

            tempFile.deleteOnExit();
            OutputStream os = null;
            try {
                os = new BufferedOutputStream(new FileOutputStream(tempFile.getAbsolutePath()));

                if (isFirstChunck == true) {
                    partSize = IOUtils.copyLarge(firstChunck, os, 0, (FIVE_MB));
                    isFirstChunck = false;
                } else {
                    partSize = IOUtils.copyLarge(chunckableInputStream, os, 0, (FIVE_MB));
                }
                written += partSize;

                if (written > maxSizeBytes) {
                    overSizeLimit = true;
                    logger.error("OVERSIZED FILE ({}). STARTING ABORT", written);
                    break;
                    //set flag here and break out of loop to run abort
                }
            } finally {
                IOUtils.closeQuietly(os);
            }

            FileInputStream chunk = new FileInputStream(tempFile);

            Boolean isLastPart = -1 == (firstByte = chunckableInputStream.read());
            if (!isLastPart)
                chunckableInputStream.unread(firstByte);

            UploadPartRequest uploadRequest = new UploadPartRequest().withUploadId(initResponse.getUploadId())
                    .withBucketName(bucketName).withKey(uploadFileName).withInputStream(chunk)
                    .withPartNumber(partNumber).withPartSize(partSize).withLastPart(isLastPart);
            partETags.add(getS3Client().uploadPart(uploadRequest).getPartETag());
            partNumber++;
        }

        //check for flag here then abort.
        if (overSizeLimit) {

            AbortMultipartUploadRequest abortRequest = new AbortMultipartUploadRequest(bucketName,
                    uploadFileName, initResponse.getUploadId());

            ListMultipartUploadsRequest listRequest = new ListMultipartUploadsRequest(bucketName);

            MultipartUploadListing listResult = getS3Client().listMultipartUploads(listRequest);

            //upadte the entity with the error.
            try {
                logger.error("starting update of entity due to oversized asset");
                fileMetadata.put("error", "Asset size is larger than max size of " + maxSizeBytes);
                em.update(entity);
            } catch (Exception e) {
                logger.error("Error updating entity with error message", e);
            }

            int timesIterated = 20;
            //loop and abort all the multipart uploads
            while (listResult.getMultipartUploads().size() != 0 && timesIterated > 0) {

                getS3Client().abortMultipartUpload(abortRequest);
                Thread.sleep(1000);
                timesIterated--;
                listResult = getS3Client().listMultipartUploads(listRequest);
                if (logger.isDebugEnabled()) {
                    logger.debug("Files that haven't been aborted are: {}",
                            listResult.getMultipartUploads().listIterator().toString());
                }

            }
            if (timesIterated == 0) {
                logger.error("Files parts that couldn't be aborted in 20 seconds are:");
                Iterator<MultipartUpload> multipartUploadIterator = listResult.getMultipartUploads().iterator();
                while (multipartUploadIterator.hasNext()) {
                    logger.error(multipartUploadIterator.next().getKey());
                }
            }
        } else {
            CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest(bucketName,
                    uploadFileName, initResponse.getUploadId(), partETags);
            CompleteMultipartUploadResult amazonResult = getS3Client().completeMultipartUpload(request);
            fileMetadata.put(AssetUtils.CONTENT_LENGTH, written);
            fileMetadata.put(AssetUtils.E_TAG, amazonResult.getETag());
            em.update(entity);
        }
    }
}

From source file:org.finra.dm.dao.impl.S3DaoImpl.java

License:Apache License

/**
 * {@inheritDoc}//  ww w  .j a  v a 2 s  .  c  om
 */
@Override
public int abortMultipartUploads(S3FileTransferRequestParamsDto params, Date thresholdDate) {
    AmazonS3Client s3Client = null;
    int abortedMultipartUploadsCount = 0;

    try {
        // Create an Amazon S3 client.
        s3Client = getAmazonS3(params);

        // List upload markers. Null implies initial list request.
        String uploadIdMarker = null;
        String keyMarker = null;

        boolean truncated;
        do {
            // Create the list multipart request, optionally using the last markers.
            ListMultipartUploadsRequest request = new ListMultipartUploadsRequest(params.getS3BucketName());
            request.setUploadIdMarker(uploadIdMarker);
            request.setKeyMarker(keyMarker);

            // Request the multipart upload listing.
            MultipartUploadListing uploadListing = s3Operations
                    .listMultipartUploads(TransferManager.appendSingleObjectUserAgent(request), s3Client);

            for (MultipartUpload upload : uploadListing.getMultipartUploads()) {
                if (upload.getInitiated().compareTo(thresholdDate) < 0) {
                    // Abort the upload.
                    s3Operations.abortMultipartUpload(
                            TransferManager.appendSingleObjectUserAgent(new AbortMultipartUploadRequest(
                                    params.getS3BucketName(), upload.getKey(), upload.getUploadId())),
                            s3Client);

                    // Log the information about the aborted multipart upload.
                    LOGGER.info(String.format(
                            "Aborted S3 multipart upload for \"%s\" object key initiated at [%s] in \"%s\" S3 bucket.",
                            upload.getKey(), upload.getInitiated(), params.getS3BucketName()));

                    // Increment the counter.
                    abortedMultipartUploadsCount++;
                }
            }

            // Determine whether there are more uploads to list.
            truncated = uploadListing.isTruncated();
            if (truncated) {
                // Record the list markers.
                uploadIdMarker = uploadListing.getUploadIdMarker();
                keyMarker = uploadListing.getKeyMarker();
            }
        } while (truncated);
    } finally {
        // Shutdown the Amazon S3 client instance to release resources.
        if (s3Client != null) {
            s3Client.shutdown();
        }
    }

    return abortedMultipartUploadsCount;
}

From source file:org.finra.herd.dao.impl.S3DaoImpl.java

License:Apache License

@Override
public int abortMultipartUploads(S3FileTransferRequestParamsDto params, Date thresholdDate) {
    // Create an Amazon S3 client.
    AmazonS3Client s3Client = getAmazonS3(params);
    int abortedMultipartUploadsCount = 0;

    try {//from   w  ww. j  av a 2 s  . com
        // List upload markers. Null implies initial list request.
        String uploadIdMarker = null;
        String keyMarker = null;

        boolean truncated;
        do {
            // Create the list multipart request, optionally using the last markers.
            ListMultipartUploadsRequest request = new ListMultipartUploadsRequest(params.getS3BucketName());
            request.setUploadIdMarker(uploadIdMarker);
            request.setKeyMarker(keyMarker);

            // Request the multipart upload listing.
            MultipartUploadListing uploadListing = s3Operations
                    .listMultipartUploads(TransferManager.appendSingleObjectUserAgent(request), s3Client);

            for (MultipartUpload upload : uploadListing.getMultipartUploads()) {
                if (upload.getInitiated().compareTo(thresholdDate) < 0) {
                    // Abort the upload.
                    s3Operations.abortMultipartUpload(
                            TransferManager.appendSingleObjectUserAgent(new AbortMultipartUploadRequest(
                                    params.getS3BucketName(), upload.getKey(), upload.getUploadId())),
                            s3Client);

                    // Log the information about the aborted multipart upload.
                    LOGGER.info(
                            "Aborted S3 multipart upload. s3Key=\"{}\" s3BucketName=\"{}\" s3MultipartUploadInitiatedDate=\"{}\"",
                            upload.getKey(), params.getS3BucketName(), upload.getInitiated());

                    // Increment the counter.
                    abortedMultipartUploadsCount++;
                }
            }

            // Determine whether there are more uploads to list.
            truncated = uploadListing.isTruncated();
            if (truncated) {
                // Record the list markers.
                uploadIdMarker = uploadListing.getNextUploadIdMarker();
                keyMarker = uploadListing.getNextKeyMarker();
            }
        } while (truncated);
    } finally {
        // Shutdown the Amazon S3 client instance to release resources.
        s3Client.shutdown();
    }

    return abortedMultipartUploadsCount;
}