Example usage for com.amazonaws.services.s3.model S3ObjectSummary getLastModified

List of usage examples for com.amazonaws.services.s3.model S3ObjectSummary getLastModified

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model S3ObjectSummary getLastModified.

Prototype

public Date getLastModified() 

Source Link

Document

Gets the date when, according to Amazon S3, this object was last modified.

Usage

From source file:cloudExplorer.BucketClass.java

License:Open Source License

String getObjectInfo(String key, String access_key, String secret_key, String bucket, String endpoint,
        String process) {//  w  w w  .j a  v a2 s  .c  o  m
    AWSCredentials credentials = new BasicAWSCredentials(access_key, secret_key);
    AmazonS3 s3Client = new AmazonS3Client(credentials,
            new ClientConfiguration().withSignerOverride("S3SignerType"));
    s3Client.setEndpoint(endpoint);
    objectlist = null;

    try {
        ObjectListing current = s3Client.listObjects((bucket));

        ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucket);
        ObjectListing objectListing;
        do {
            objectListing = s3Client.listObjects(listObjectsRequest);

            for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {

                if (process.contains("objectsize")) {
                    if (objectSummary.getKey().contains(key)) {
                        objectlist = String.valueOf(objectSummary.getSize());
                        break;
                    }
                }

                if (process.contains("objectdate")) {
                    if (objectSummary.getKey().contains(key)) {
                        objectlist = String.valueOf(objectSummary.getLastModified());
                        break;
                    }

                }
            }
            listObjectsRequest.setMarker(objectListing.getNextMarker());
        } while (objectListing.isTruncated());

    } catch (Exception listBucket) {
        mainFrame.jTextArea1.append("\n" + listBucket.getMessage());
    }

    return objectlist;
}

From source file:com.altoukhov.svsync.fileviews.S3FileSpace.java

License:Apache License

@Override
protected Snapshot scan(List<Pattern> filters) {
    try {//from w  w  w . j av  a  2s  . c o m
        Map<String, FileSnapshot> files = new LinkedHashMap<>();
        Set<String> dirs = new HashSet<>();

        ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName)
                .withPrefix(rootPath.isEmpty() ? "" : rootPath + "/");

        ObjectListing objectListing;

        do {
            objectListing = listObjects(listObjectsRequest);
            for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {

                if (isExcluded(objectSummary.getKey()) || isFiltered(objectSummary.getKey(), filters))
                    continue;

                if (objectSummary.getKey().endsWith("/")) {
                    String filePath = trimPath(objectSummary.getKey());
                    filePath = filePath.equals(rootPath) ? ""
                            : filePath.substring(rootPath.length() + (rootPath.isEmpty() ? 0 : 1));
                    dirs.add(filePath);
                    System.out
                            .println(String.format("Scanning s3://%s/%s", bucketName, objectSummary.getKey()));
                } else {
                    String fileName = objectSummary.getKey();
                    String filePath = "";

                    if (fileName.contains("/")) {
                        int fileNameSplitIndex = fileName.lastIndexOf("/");
                        filePath = fileName.substring(0, fileNameSplitIndex);
                        fileName = fileName.substring(fileNameSplitIndex + 1);

                        filePath = filePath.equals(rootPath) ? ""
                                : filePath.substring(rootPath.length() + (rootPath.isEmpty() ? 0 : 1));
                    }

                    if (filePath.equals("")) {
                        filePath = fileName;
                    } else {
                        filePath = filePath + "/" + fileName;
                    }

                    ObjectMetadata meta = getObjectInfo(objectSummary);
                    String lmd = meta.getUserMetaDataOf("lmd");

                    Date lastModified = (lmd == null) ? objectSummary.getLastModified()
                            : new Date(Long.parseLong(lmd));

                    FileSnapshot file = new FileSnapshot(fileName, objectSummary.getSize(),
                            new DateTime(lastModified), filePath);
                    files.put(filePath, file);
                }
            }
            listObjectsRequest.setMarker(objectListing.getNextMarker());
        } while (objectListing.isTruncated());

        Snapshot snapshot = new Snapshot(files, dirs);
        return snapshot;
    } catch (AmazonClientException ex) {
        System.out.println("Failed to scan file space");
        System.out.println(ex.getMessage());
    }

    return null;
}

From source file:com.appdynamics.monitors.s3.AWSS3Monitor.java

License:Apache License

/**
 * This method calls Amazon WS to get required S3 statistics, set values
 * based on configured unit, and returns the result back
 * /*from  w  ww .j a va 2 s . c o m*/
 * @param buckets
 * @param amazonS3Client
 * @return Map<String, String>
 * @throws TaskExecutionException
 */
private Map<String, String> getS3Result(List<Bucket> buckets, AmazonS3Client amazonS3Client)
        throws TaskExecutionException {
    // Declaring result variables with default values
    long size = 0;
    long count = 0;
    Date lastModified = new Date(0);

    try {
        // Fetching all bucket names if passed buckets is null
        if (buckets == null) {
            logger.debug("Calling Webservice to list all buckets");
            buckets = amazonS3Client.listBuckets();
        }

        // Looping over all buckets
        for (Bucket bucket : buckets) {

            logger.debug("Getting data for bucket: " + bucket.getName());

            ObjectListing objectListing = null;

            do {
                // Getting objectListing while calling it for the first time
                if (objectListing == null) {
                    logger.debug("Calling Webservice to get objectlisting for first time");
                    objectListing = amazonS3Client.listObjects(bucket.getName());
                } else {
                    // Calling listNextBatchOfObjects if previous response
                    // is truncated
                    logger.debug("Calling Webservice to get objectlisting subsequent time");
                    objectListing = amazonS3Client.listNextBatchOfObjects(objectListing);
                }

                // Incrementing the count
                count += objectListing.getObjectSummaries().size();

                // Looping over all objects
                for (S3ObjectSummary s3ObjectSummary : objectListing.getObjectSummaries()) {
                    // Incrementing size
                    size += s3ObjectSummary.getSize();

                    // Setting last modified if lastModifiedDate is latest
                    if (lastModified.before(s3ObjectSummary.getLastModified())) {
                        lastModified = s3ObjectSummary.getLastModified();
                    }
                }
            }

            // Continuing till objectListing is complete
            while (objectListing.isTruncated());
        }

    } catch (AmazonS3Exception exception) {
        logger.error("AmazonS3Exception occurred", exception);
        throw new TaskExecutionException("Sending S3 metric failed due to AmazonS3Exception");
    }

    return getResultWithRequiredUnit(size, count, lastModified);
}

From source file:com.athena.dolly.web.aws.s3.S3Service.java

License:Open Source License

private S3Dto makeDto(String bucketName, S3ObjectSummary objectSummary) {
    S3Dto dto = new S3Dto();

    // Default value setting
    dto.setBucketName(bucketName);/*from w w  w  .j av a  2  s  .com*/
    dto.setLastModified(date2String(objectSummary.getLastModified(), "yyyy/MM/dd a KK:mm"));
    dto.setSize((objectSummary.getSize() / 1024) + "K");
    dto.setDataType(checkDataType(objectSummary.getKey()));

    // Caculate position

    String current = "";
    String dataType = "file";
    String parent = "";

    String key = objectSummary.getKey();

    dto.setUrl(presignedUrl(bucketName, key).toString());
    // 1. lastIndexOf("/") == -1 is root directory's file
    int pos = key.lastIndexOf("/");
    if (pos == -1) { // root file

    } else { // This is directory or file. Apply filter
        current = key.substring(0, pos);
        key = key.substring(pos + 1);

        if (key.equals("")) {
            key = "..";
            dataType = "folder";
        }
        if (parent.length() != 0)
            parent = current.substring(0, current.lastIndexOf("/"));
    }

    dto.setKey(key);
    dto.setDataType(dataType);
    dto.setParent(parent);

    // 2. lastIndexOf("/") == 
    return dto;
}

From source file:com.conductor.s3.S3InputFormatUtils.java

License:Apache License

/**
 * Efficiently gets the Hadoop {@link org.apache.hadoop.fs.FileStatus} for all S3 files under the provided
 * {@code dirs}/*from  www . j  a  va 2 s.  com*/
 * 
 * @param s3Client
 *            s3 client
 * @param blockSize
 *            the block size
 * @param dirs
 *            the dirs to search through
 * @return the {@link org.apache.hadoop.fs.FileStatus} version of all S3 files under {@code dirs}
 */
static List<FileStatus> getFileStatuses(final AmazonS3 s3Client, final long blockSize, final Path... dirs) {
    final List<FileStatus> result = Lists.newArrayList();
    for (final Path dir : dirs) {
        // get bucket and prefix from path
        final String bucket = S3HadoopUtils.getBucketFromPath(dir.toString());
        final String prefix = S3HadoopUtils.getKeyFromPath(dir.toString());
        // list request
        final ListObjectsRequest req = new ListObjectsRequest().withMaxKeys(Integer.MAX_VALUE)
                .withBucketName(bucket).withPrefix(prefix);
        // recursively page through all objects under the path
        for (ObjectListing listing = s3Client.listObjects(req); listing.getObjectSummaries()
                .size() > 0; listing = s3Client.listNextBatchOfObjects(listing)) {
            for (final S3ObjectSummary summary : listing.getObjectSummaries()) {
                final Path path = new Path(
                        String.format("s3n://%s/%s", summary.getBucketName(), summary.getKey()));
                if (S3_PATH_FILTER.accept(path)) {
                    result.add(new FileStatus(summary.getSize(), false, 1, blockSize,
                            summary.getLastModified().getTime(), path));
                }
            }
            // don't need to check the next listing if this one is not truncated
            if (!listing.isTruncated()) {
                break;
            }
        }
    }
    return result;
}

From source file:com.epam.dlab.module.aws.S3FileList.java

License:Apache License

private boolean matchBillingRegexAndWasNotProcessed(S3ObjectSummary o) {
    return reportPattern.matcher(o.getKey()).matches()
            && !moduleData.wasProcessed(o.getKey(), o.getLastModified(), extractDatePrefix(reportPattern, o));
}

From source file:com.eucalyptus.objectstorage.providers.s3.S3ProviderClient.java

License:Open Source License

@Override
public ListBucketResponseType listBucket(ListBucketType request) throws S3Exception {
    ListBucketResponseType reply = request.getReply();
    User requestUser = getRequestUser(request);
    OsgInternalS3Client internalS3Client = null;
    try {//from   w w w  . ja  v  a2s  . co  m
        internalS3Client = getS3Client(requestUser);
        AmazonS3Client s3Client = internalS3Client.getS3Client();
        ListObjectsRequest listRequest = new ListObjectsRequest();
        listRequest.setBucketName(request.getBucket());
        listRequest.setDelimiter(Strings.isNullOrEmpty(request.getDelimiter()) ? null : request.getDelimiter());
        listRequest.setMarker(Strings.isNullOrEmpty(request.getMarker()) ? null : request.getMarker());
        listRequest.setMaxKeys((request.getMaxKeys() == null ? null : Integer.parseInt(request.getMaxKeys())));
        listRequest.setPrefix(Strings.isNullOrEmpty(request.getPrefix()) ? null : request.getPrefix());

        ObjectListing response = s3Client.listObjects(listRequest);

        /* Non-optional, must have non-null values */
        reply.setName(request.getBucket());
        reply.setMaxKeys(response.getMaxKeys());
        reply.setMarker(response.getMarker() == null ? "" : response.getMarker());
        reply.setPrefix(response.getPrefix() == null ? "" : response.getPrefix());
        reply.setIsTruncated(response.isTruncated());

        /* Optional */
        reply.setNextMarker(response.getNextMarker());
        reply.setDelimiter(response.getDelimiter());
        if (reply.getContents() == null) {
            reply.setContents(new ArrayList<ListEntry>());
        }
        if (reply.getCommonPrefixesList() == null) {
            reply.setCommonPrefixesList(new ArrayList<CommonPrefixesEntry>());
        }

        for (S3ObjectSummary obj : response.getObjectSummaries()) {
            //Add entry, note that the canonical user is set based on requesting user, not returned user
            reply.getContents()
                    .add(new ListEntry(obj.getKey(),
                            DateFormatter.dateToHeaderFormattedString(obj.getLastModified()), obj.getETag(),
                            obj.getSize(), getCanonicalUser(requestUser), obj.getStorageClass()));
        }

        if (response.getCommonPrefixes() != null && response.getCommonPrefixes().size() > 0) {
            reply.setCommonPrefixesList(new ArrayList<CommonPrefixesEntry>());

            for (String s : response.getCommonPrefixes()) {
                reply.getCommonPrefixesList().add(new CommonPrefixesEntry(s));
            }
        }

        return reply;
    } catch (AmazonServiceException e) {
        LOG.debug("Error from backend", e);
        throw S3ExceptionMapper.fromAWSJavaSDK(e);
    }
}

From source file:com.facebook.presto.hive.PrestoS3FileSystem.java

License:Apache License

private Iterator<LocatedFileStatus> statusFromObjects(List<S3ObjectSummary> objects) {
    List<LocatedFileStatus> list = new ArrayList<>();
    for (S3ObjectSummary object : objects) {
        if (!object.getKey().endsWith("/")) {
            FileStatus status = new FileStatus(object.getSize(), false, 1, BLOCK_SIZE.toBytes(),
                    object.getLastModified().getTime(), qualifiedPath(new Path("/" + object.getKey())));
            list.add(createLocatedFileStatus(status));
        }//from w  w w .ja  v a2  s  .c  om
    }
    return list.iterator();
}

From source file:com.facebook.presto.kinesis.s3config.S3TableConfigClient.java

License:Apache License

/**
 * Connect to S3 directory to look for new or updated table definitions and then
 * update the map./*from w w w .  j a v a 2 s.  co m*/
 */
protected void updateTablesFromS3() {
    long now = System.currentTimeMillis();

    List<S3ObjectSummary> objectList = this.getObjectSummaries();
    AmazonS3Client s3client = this.clientManager.getS3Client();
    AmazonS3URI directoryURI = new AmazonS3URI(this.bucketUrl);

    // Build map of "deltas" which in the end contains new definitions and deleted tables
    HashMap<String, KinesisStreamDescription> deltasMap = new HashMap<String, KinesisStreamDescription>();
    internalMapLock.readLock().lock();
    try {
        Iterator<String> keysIter = this.internalMap.keySet().iterator();
        while (keysIter.hasNext()) {
            deltasMap.put(keysIter.next(), dummyStreamDesc);
        }
    } finally {
        internalMapLock.readLock().unlock();
    }

    for (S3ObjectSummary objInfo : objectList) {
        if (!deltasMap.containsKey(objInfo.getKey()) || objInfo.getLastModified().getTime() >= this.lastCheck) {
            // New or updated file, so we must read from AWS
            try {
                if (objInfo.getKey().endsWith("/")) {
                    continue;
                }

                log.info("Getting : %s - %s", objInfo.getBucketName(), objInfo.getKey());
                S3Object object = s3client
                        .getObject(new GetObjectRequest(objInfo.getBucketName(), objInfo.getKey()));

                StringBuilder resultStr = new StringBuilder("");
                try (BufferedReader reader = new BufferedReader(
                        new InputStreamReader(object.getObjectContent()))) {
                    boolean hasMore = true;
                    while (hasMore) {
                        String line = reader.readLine();
                        if (line != null) {
                            resultStr.append(line);
                        } else {
                            hasMore = false;
                        }
                    }

                    KinesisStreamDescription table = streamDescriptionCodec.fromJson(resultStr.toString());

                    deltasMap.put(objInfo.getKey(), table);
                    log.info("Put table description into the map from %s : %s.%s", objInfo.getKey(),
                            table.getSchemaName(), table.getTableName());
                } catch (IOException iox) {
                    log.error("Problem reading input stream from object.", iox);
                } catch (IllegalArgumentException iax) {
                    // Note: this gets thrown by airlift json library when the input is malformed.
                    log.error("Invalid JSON table description.", iax);
                }
            } catch (AmazonServiceException ase) {
                StringBuilder sb = new StringBuilder();
                sb.append("Caught an AmazonServiceException, which means your request made it ");
                sb.append("to Amazon S3, but was rejected with an error response for some reason.\n");
                sb.append("Error Message:    " + ase.getMessage());
                sb.append("HTTP Status Code: " + ase.getStatusCode());
                sb.append("AWS Error Code:   " + ase.getErrorCode());
                sb.append("Error Type:       " + ase.getErrorType());
                sb.append("Request ID:       " + ase.getRequestId());
                log.error(sb.toString(), ase);
            } catch (AmazonClientException ace) {
                StringBuilder sb = new StringBuilder();
                sb.append("Caught an AmazonClientException, " + "which means the client encountered "
                        + "an internal error while trying to communicate" + " with S3, "
                        + "such as not being able to access the network.");
                sb.append("Error Message: " + ace.getMessage());
                log.error(sb.toString(), ace);
            }
        } else if (deltasMap.containsKey(objInfo.getKey())) {
            deltasMap.remove(objInfo.getKey());
        }
    } // end loop through object descriptions

    // Deltas: key pointing to dummy means delete, key pointing to other object means update.
    // This approach lets us delete and update while shortening the locked critical section.
    Iterator<Map.Entry<String, KinesisStreamDescription>> deltasIter = deltasMap.entrySet().iterator();
    internalMapLock.writeLock().lock();
    try {
        while (deltasIter.hasNext()) {
            Map.Entry<String, KinesisStreamDescription> entry = deltasIter.next();
            if (entry.getValue().getTableName().equals("__DUMMY__")) {
                this.internalMap.remove(entry.getKey());
            } else {
                this.internalMap.put(entry.getKey(), entry.getValue());
            }
        }
    } finally {
        internalMapLock.writeLock().unlock();
    }

    log.info("Completed updating table definitions from S3.");
    this.lastCheck = now;

    return;
}

From source file:com.github.lbroudoux.elasticsearch.river.s3.connector.S3Connector.java

License:Apache License

/**
 * Select and retrieves summaries of object into bucket and of given path prefix
 * that have modification date younger than lastScanTime.
 * @param lastScanTime Last modification date filter
 * @return Summaries of picked objects./*from w ww  . ja  va  2  s  .  c  o m*/
 */
public S3ObjectSummaries getObjectSummaries(String riverName, Long lastScanTime, String initialScanBookmark,
        boolean trackS3Deletions) {
    List<String> keys = new ArrayList<String>();
    List<S3ObjectSummary> result = new ArrayList<S3ObjectSummary>();
    boolean initialScan = initialScanBookmark != null;

    if (initialScan) {
        trackS3Deletions = false;
        logger.info("{}: resuming initial scan of {} from {}", riverName, pathPrefix, initialScanBookmark);
    } else {
        logger.info("{}: checking {} for changes since {}", riverName, pathPrefix, lastScanTime);
    }

    // Store the scan time to return before doing big queries...
    Long lastScanTimeToReturn = System.currentTimeMillis();

    if (lastScanTime == null || initialScan) {
        lastScanTime = 0L;
    }

    ListObjectsRequest request = new ListObjectsRequest().withBucketName(bucketName).withPrefix(pathPrefix)
            .withEncodingType("url");
    ObjectListing listing = s3Client.listObjects(request);
    //logger.debug("Listing: {}", listing);
    int keyCount = 0;
    boolean scanTruncated = false;
    String lastKey = null;

    while (!listing.getObjectSummaries().isEmpty() || listing.isTruncated()) {
        List<S3ObjectSummary> summaries = listing.getObjectSummaries();
        // if (logger.isDebugEnabled()) {
        //    logger.debug("Found {} items in this listObjects page", summaries.size());
        // }

        for (S3ObjectSummary summary : summaries) {
            if (logger.isDebugEnabled()) {
                // logger.debug("Getting {} last modified on {}", summary.getKey(), summary.getLastModified());
            }

            if (trackS3Deletions) {
                keys.add(summary.getKey());
            }

            if (summary.getLastModified().getTime() > lastScanTime
                    && result.size() < MAX_NEW_RESULTS_TO_INDEX_ON_RUN) {
                // logger.debug("  Picked !");

                if (!initialScan || initialScanBookmark.compareTo(summary.getKey()) < 0) {
                    logger.debug("  Picked {}", summary.getKey());
                    result.add(summary);
                    lastKey = summary.getKey();
                }

            } else if (!scanTruncated && result.size() == MAX_NEW_RESULTS_TO_INDEX_ON_RUN) {
                logger.info("{}: only indexing up to {} new objects on this indexing run", riverName,
                        MAX_NEW_RESULTS_TO_INDEX_ON_RUN);
                // initialScan = true;
                scanTruncated = true;

                if (!trackS3Deletions) {
                    // No need to keep iterating through all keys if we aren't doing deleteOnS3 
                    break;
                }
            }

            keyCount += 1;
        }

        if (initialScan && scanTruncated && !trackS3Deletions) {
            break;
        }

        listing = s3Client.listNextBatchOfObjects(listing);
    }

    // Wrap results and latest scan time.
    if (scanTruncated) {
        logger.info("{}: scan truncated for speed: {} files ({} new)", riverName, keyCount, result.size());
    } else {
        logger.info("{}: complete scan: {} files ({} new)", riverName, keyCount, result.size());
    }

    return new S3ObjectSummaries(lastScanTimeToReturn, lastKey, scanTruncated, trackS3Deletions, result, keys);
}