Example usage for com.amazonaws.services.s3.model CopyObjectRequest setNewObjectMetadata

List of usage examples for com.amazonaws.services.s3.model CopyObjectRequest setNewObjectMetadata

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model CopyObjectRequest setNewObjectMetadata.

Prototype

public void setNewObjectMetadata(ObjectMetadata newObjectMetadata) 

Source Link

Document

Sets the object metadata to use for the new, copied object.

Usage

From source file:alluxio.underfs.s3a.S3AUnderFileSystem.java

License:Apache License

/**
 * Copies an object to another key./*  w ww .  j  av a  2s .  c o m*/
 *
 * @param src the source key to copy
 * @param dst the destination key to copy to
 * @return true if the operation was successful, false otherwise
 */
private boolean copy(String src, String dst) {
    src = stripPrefixIfPresent(src);
    dst = stripPrefixIfPresent(dst);
    LOG.debug("Copying {} to {}", src, dst);
    // Retry copy for a few times, in case some AWS internal errors happened during copy.
    int retries = 3;
    for (int i = 0; i < retries; i++) {
        try {
            CopyObjectRequest request = new CopyObjectRequest(mBucketName, src, mBucketName, dst);
            if (Configuration.getBoolean(PropertyKey.UNDERFS_S3A_SERVER_SIDE_ENCRYPTION_ENABLED)) {
                ObjectMetadata meta = new ObjectMetadata();
                meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
                request.setNewObjectMetadata(meta);
            }
            mManager.copy(request).waitForCopyResult();
            return true;
        } catch (AmazonClientException | InterruptedException e) {
            LOG.error("Failed to copy file {} to {}", src, dst, e);
            if (i != retries - 1) {
                LOG.error("Retrying copying file {} to {}", src, dst);
            }
        }
    }
    LOG.error("Failed to copy file {} to {}, after {} retries", src, dst, retries);
    return false;
}

From source file:com.altoukhov.svsync.fileviews.S3FileSpace.java

License:Apache License

@Override
public boolean moveFile(String oldPath, String newPath) {

    try {/*from ww  w .j  a  va  2s.  c o  m*/
        ObjectMetadata meta = s3.getObjectMetadata(bucketName, toAbsoluteFilePath(oldPath));
        CopyObjectRequest copyRequest = new CopyObjectRequest(bucketName, toAbsoluteFilePath(oldPath),
                bucketName, toAbsoluteFilePath(newPath));
        copyRequest.setNewObjectMetadata(meta);
        s3.copyObject(copyRequest);
        s3.deleteObject(bucketName, toAbsoluteFilePath(oldPath));
    } catch (AmazonClientException ex) {
        return false;
    }

    return true;
}

From source file:com.emc.ecs.sync.target.S3Target.java

License:Open Source License

@Override
public void filter(SyncObject obj) {
    try {/*ww  w . j a v a  2s . c o  m*/
        // skip the root of the bucket since it obviously exists
        if ("".equals(rootKey + obj.getRelativePath())) {
            log.debug("Target is bucket root; skipping");
            return;
        }

        // some sync objects lazy-load their metadata (i.e. AtmosSyncObject)
        // since this may be a timed operation, ensure it loads outside of other timed operations
        if (!(obj instanceof S3ObjectVersion) || !((S3ObjectVersion) obj).isDeleteMarker())
            obj.getMetadata();

        // Compute target key
        String targetKey = getTargetKey(obj);
        obj.setTargetIdentifier(AwsS3Util.fullPath(bucketName, targetKey));

        if (includeVersions) {
            ListIterator<S3ObjectVersion> sourceVersions = s3Source.versionIterator((S3SyncObject) obj);
            ListIterator<S3ObjectVersion> targetVersions = versionIterator(obj);

            boolean newVersions = false, replaceVersions = false;
            if (force) {
                replaceVersions = true;
            } else {

                // special workaround for bug where objects are listed, but they have no versions
                if (sourceVersions.hasNext()) {

                    // check count and etag/delete-marker to compare version chain
                    while (sourceVersions.hasNext()) {
                        S3ObjectVersion sourceVersion = sourceVersions.next();

                        if (targetVersions.hasNext()) {
                            S3ObjectVersion targetVersion = targetVersions.next();

                            if (sourceVersion.isDeleteMarker()) {

                                if (!targetVersion.isDeleteMarker())
                                    replaceVersions = true;
                            } else {

                                if (targetVersion.isDeleteMarker())
                                    replaceVersions = true;

                                else if (!sourceVersion.getETag().equals(targetVersion.getETag()))
                                    replaceVersions = true; // different checksum
                            }

                        } else if (!replaceVersions) { // source has new versions, but existing target versions are ok
                            newVersions = true;
                            sourceVersions.previous(); // back up one
                            putIntermediateVersions(sourceVersions, targetKey); // add any new intermediary versions (current is added below)
                        }
                    }

                    if (targetVersions.hasNext())
                        replaceVersions = true; // target has more versions

                    if (!newVersions && !replaceVersions) {
                        log.info("Source and target versions are the same. Skipping {}", obj.getRelativePath());
                        return;
                    }
                }
            }

            // something's off; must delete all versions of the object
            if (replaceVersions) {
                log.info(
                        "[{}]: version history differs between source and target; re-placing target version history with that from source.",
                        obj.getRelativePath());

                // collect versions in target
                List<DeleteObjectsRequest.KeyVersion> deleteVersions = new ArrayList<>();
                while (targetVersions.hasNext())
                    targetVersions.next(); // move cursor to end
                while (targetVersions.hasPrevious()) { // go in reverse order
                    S3ObjectVersion version = targetVersions.previous();
                    deleteVersions.add(new DeleteObjectsRequest.KeyVersion(targetKey, version.getVersionId()));
                }

                // batch delete all versions in target
                log.debug("[{}]: deleting all versions in target", obj.getRelativePath());
                s3.deleteObjects(new DeleteObjectsRequest(bucketName).withKeys(deleteVersions));

                // replay version history in target
                while (sourceVersions.hasPrevious())
                    sourceVersions.previous(); // move cursor to beginning
                putIntermediateVersions(sourceVersions, targetKey);
            }

        } else { // normal sync (no versions)
            Date sourceLastModified = obj.getMetadata().getModificationTime();
            long sourceSize = obj.getMetadata().getContentLength();

            // Get target metadata.
            ObjectMetadata destMeta = null;
            try {
                destMeta = s3.getObjectMetadata(bucketName, targetKey);
            } catch (AmazonS3Exception e) {
                if (e.getStatusCode() != 404)
                    throw new RuntimeException("Failed to check target key '" + targetKey + "' : " + e, e);
            }

            if (!force && obj.getFailureCount() == 0 && destMeta != null) {

                // Check overwrite
                Date destLastModified = destMeta.getLastModified();
                long destSize = destMeta.getContentLength();

                if (destLastModified.equals(sourceLastModified) && sourceSize == destSize) {
                    log.info("Source and target the same.  Skipping {}", obj.getRelativePath());
                    return;
                }
                if (destLastModified.after(sourceLastModified)) {
                    log.info("Target newer than source.  Skipping {}", obj.getRelativePath());
                    return;
                }
            }
        }

        // at this point we know we are going to write the object
        // Put [current object version]
        if (obj instanceof S3ObjectVersion && ((S3ObjectVersion) obj).isDeleteMarker()) {

            // object has version history, but is currently deleted
            log.debug("[{}]: deleting object in target to replicate delete marker in source.",
                    obj.getRelativePath());
            s3.deleteObject(bucketName, targetKey);
        } else {
            putObject(obj, targetKey);

            // if object has new metadata after the stream (i.e. encryption checksum), we must update S3 again
            if (obj.requiresPostStreamMetadataUpdate()) {
                log.debug("[{}]: updating metadata after sync as required", obj.getRelativePath());
                CopyObjectRequest cReq = new CopyObjectRequest(bucketName, targetKey, bucketName, targetKey);
                cReq.setNewObjectMetadata(AwsS3Util.s3MetaFromSyncMeta(obj.getMetadata()));
                s3.copyObject(cReq);
            }
        }
    } catch (Exception e) {
        throw new RuntimeException("Failed to store object: " + e, e);
    }
}

From source file:com.openkm.util.backup.RepositoryS3Backup.java

License:Open Source License

/**
 * Performs a recursive repository content export with metadata
 *///ww w .  j  av a  2  s .c  om
private static ImpExpStats backupHelper(String token, String fldPath, AmazonS3 s3, String bucket,
        boolean metadata, Writer out, InfoDecorator deco)
        throws FileNotFoundException, PathNotFoundException, AccessDeniedException, ParseException,
        NoSuchGroupException, RepositoryException, IOException, DatabaseException {
    log.info("backup({}, {}, {}, {}, {}, {})", new Object[] { token, fldPath, bucket, metadata, out, deco });
    ImpExpStats stats = new ImpExpStats();
    DocumentModule dm = ModuleManager.getDocumentModule();
    FolderModule fm = ModuleManager.getFolderModule();
    MetadataAdapter ma = MetadataAdapter.getInstance(token);
    Gson gson = new Gson();

    for (Iterator<Document> it = dm.getChildren(token, fldPath).iterator(); it.hasNext();) {
        File tmpDoc = null;
        InputStream is = null;
        FileOutputStream fos = null;
        boolean upload = true;

        try {
            Document docChild = it.next();
            String path = docChild.getPath().substring(1);
            ObjectMetadata objMeta = new ObjectMetadata();

            if (Config.REPOSITORY_CONTENT_CHECKSUM) {
                if (exists(s3, bucket, path)) {
                    objMeta = s3.getObjectMetadata(bucket, path);

                    if (docChild.getActualVersion().getChecksum().equals(objMeta.getETag())) {
                        upload = false;
                    }
                }
            }

            if (upload) {
                tmpDoc = FileUtils.createTempFileFromMime(docChild.getMimeType());
                fos = new FileOutputStream(tmpDoc);
                is = dm.getContent(token, docChild.getPath(), false);
                IOUtils.copy(is, fos);
                PutObjectRequest request = new PutObjectRequest(bucket, path, tmpDoc);

                if (metadata) {
                    // Metadata
                    DocumentMetadata dmd = ma.getMetadata(docChild);
                    String json = gson.toJson(dmd);
                    objMeta.addUserMetadata("okm", json);
                }

                request.setMetadata(objMeta);
                s3.putObject(request);
                out.write(deco.print(docChild.getPath(), docChild.getActualVersion().getSize(), null));
                out.flush();
            } else {
                if (metadata) {
                    // Metadata
                    DocumentMetadata dmd = ma.getMetadata(docChild);
                    String json = gson.toJson(dmd);
                    objMeta.addUserMetadata("okm", json);

                    // Update object metadata
                    CopyObjectRequest copyObjReq = new CopyObjectRequest(bucket, path, bucket, path);
                    copyObjReq.setNewObjectMetadata(objMeta);
                    s3.copyObject(copyObjReq);
                }

                log.info("Don't need to upload document {}", docChild.getPath());
            }

            // Stats
            stats.setSize(stats.getSize() + docChild.getActualVersion().getSize());
            stats.setDocuments(stats.getDocuments() + 1);
        } finally {
            IOUtils.closeQuietly(is);
            IOUtils.closeQuietly(fos);
            FileUtils.deleteQuietly(tmpDoc);
        }
    }

    for (Iterator<Folder> it = fm.getChildren(token, fldPath).iterator(); it.hasNext();) {
        InputStream is = null;

        try {
            Folder fldChild = it.next();
            String path = fldChild.getPath().substring(1) + "/";
            is = new ByteArrayInputStream(new byte[0]);
            ObjectMetadata objMeta = new ObjectMetadata();
            objMeta.setContentLength(0);
            PutObjectRequest request = new PutObjectRequest(bucket, path, is, objMeta);

            // Metadata
            if (metadata) {
                FolderMetadata fmd = ma.getMetadata(fldChild);
                String json = gson.toJson(fmd);
                objMeta.addUserMetadata("okm", json);
            }

            request.setMetadata(objMeta);
            s3.putObject(request);

            ImpExpStats tmp = backupHelper(token, fldChild.getPath(), s3, bucket, metadata, out, deco);

            // Stats
            stats.setSize(stats.getSize() + tmp.getSize());
            stats.setDocuments(stats.getDocuments() + tmp.getDocuments());
            stats.setFolders(stats.getFolders() + tmp.getFolders() + 1);
            stats.setOk(stats.isOk() && tmp.isOk());
        } finally {
            IOUtils.closeQuietly(is);
        }
    }

    log.debug("backupHelper: {}", stats);
    return stats;
}

From source file:com.tango.BucketSyncer.KeyJobs.S32S3KeyCopyJob.java

License:Apache License

boolean keyCopied(ObjectMetadata sourceMetadata, AccessControlList objectAcl) {
    boolean copied = false;
    String key = summary.getKey();
    MirrorOptions options = context.getOptions();
    boolean verbose = options.isVerbose();
    int maxRetries = options.getMaxRetries();
    MirrorStats stats = context.getStats();
    for (int tries = 0; tries < maxRetries; tries++) {
        if (verbose) {
            log.info("copying (try # {}): {} to: {}", new Object[] { tries, key, keydest });
        }//from  w  w w  . j  a va  2s  .co m
        final CopyObjectRequest request = new CopyObjectRequest(options.getSourceBucket(), key,
                options.getDestinationBucket(), keydest);
        request.setNewObjectMetadata(sourceMetadata);
        if (options.isCrossAccountCopy()) {
            request.setCannedAccessControlList(CannedAccessControlList.BucketOwnerFullControl);
        } else {
            request.setAccessControlList(objectAcl);
        }
        try {
            stats.copyCount.incrementAndGet();
            client.copyObject(request);
            stats.bytesCopied.addAndGet(sourceMetadata.getContentLength());
            if (verbose) {
                log.info("successfully copied (on try #{}): {} to: {}", new Object[] { tries, key, keydest });
            }
            copied = true;
            break;
        } catch (AmazonS3Exception s3e) {
            //if return with 404 error, problem with bucket name
            if (s3e.getStatusCode() == HttpStatus.SC_NOT_FOUND) {
                log.error("Failed to access S3 bucket. Check bucket name: ", s3e);
                System.exit(1);
            }
            log.error("s3 exception copying (try #{}) {} to: {}: {}",
                    new Object[] { tries, key, keydest, s3e });
        } catch (Exception e) {
            log.error("unexpected exception copying (try #{}) {} to: {}: {}",
                    new Object[] { tries, key, keydest, e });
        }
        try {
            Thread.sleep(10);
        } catch (InterruptedException e) {
            log.error("interrupted while waiting to retry key: {}: {}", key, e);
            return copied;
        }
    }
    return copied;
}

From source file:com.upplication.s3fs.S3FileSystemProvider.java

License:Open Source License

@Override
public void copy(Path source, Path target, CopyOption... options) throws IOException {
    Preconditions.checkArgument(source instanceof S3Path, "source must be an instance of %s",
            S3Path.class.getName());
    Preconditions.checkArgument(target instanceof S3Path, "target must be an instance of %s",
            S3Path.class.getName());

    if (isSameFile(source, target)) {
        return;//from   w  w w .  ja v  a 2  s  .  c o m
    }

    S3Path s3Source = (S3Path) source;
    S3Path s3Target = (S3Path) target;
    /*
     * Preconditions.checkArgument(!s3Source.isDirectory(),
     * "copying directories is not yet supported: %s", source); // TODO
     * Preconditions.checkArgument(!s3Target.isDirectory(),
     * "copying directories is not yet supported: %s", target); // TODO
     */
    ImmutableSet<CopyOption> actualOptions = ImmutableSet.copyOf(options);
    verifySupportedOptions(EnumSet.of(StandardCopyOption.REPLACE_EXISTING), actualOptions);

    if (!actualOptions.contains(StandardCopyOption.REPLACE_EXISTING)) {
        if (exists(s3Target)) {
            throw new FileAlreadyExistsException(format("target already exists: %s", target));
        }
    }

    AmazonS3Client client = s3Source.getFileSystem().getClient();

    final ObjectMetadata sourceObjMetadata = s3Source.getFileSystem().getClient()
            .getObjectMetadata(s3Source.getBucket(), s3Source.getKey());
    final S3MultipartOptions opts = props != null ? new S3MultipartOptions<>(props) : new S3MultipartOptions();
    final int chunkSize = opts.getChunkSize();
    final long length = sourceObjMetadata.getContentLength();

    if (length <= chunkSize) {

        CopyObjectRequest copyObjRequest = new CopyObjectRequest(s3Source.getBucket(), s3Source.getKey(),
                s3Target.getBucket(), s3Target.getKey());
        if (sourceObjMetadata.getSSEAlgorithm() != null) {
            ObjectMetadata targetObjectMetadata = new ObjectMetadata();
            targetObjectMetadata.setSSEAlgorithm(sourceObjMetadata.getSSEAlgorithm());
            copyObjRequest.setNewObjectMetadata(targetObjectMetadata);
        }

        client.copyObject(copyObjRequest);
    } else {
        client.multipartCopyObject(s3Source, s3Target, length, opts);
    }
}

From source file:org.apache.beam.sdk.io.aws.s3.S3FileSystem.java

License:Apache License

@VisibleForTesting
CopyObjectResult atomicCopy(S3ResourceId sourcePath, S3ResourceId destinationPath,
        ObjectMetadata sourceObjectMetadata) throws AmazonClientException {
    CopyObjectRequest copyObjectRequest = new CopyObjectRequest(sourcePath.getBucket(), sourcePath.getKey(),
            destinationPath.getBucket(), destinationPath.getKey());
    copyObjectRequest.setNewObjectMetadata(sourceObjectMetadata);
    copyObjectRequest.setStorageClass(options.getS3StorageClass());
    copyObjectRequest.setSourceSSECustomerKey(options.getSSECustomerKey());
    copyObjectRequest.setDestinationSSECustomerKey(options.getSSECustomerKey());
    return amazonS3.get().copyObject(copyObjectRequest);
}

From source file:org.apache.hadoop.fs.s3a.S3AFileSystem.java

License:Apache License

private void copyFile(String srcKey, String dstKey) throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("copyFile " + srcKey + " -> " + dstKey);
    }// ww w.  j a  v a2s .  c  o m

    TransferManagerConfiguration transferConfiguration = new TransferManagerConfiguration();
    transferConfiguration.setMultipartCopyPartSize(partSize);

    TransferManager transfers = new TransferManager(s3);
    transfers.setConfiguration(transferConfiguration);

    ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
    final ObjectMetadata dstom = srcom.clone();
    if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
        dstom.setServerSideEncryption(serverSideEncryptionAlgorithm);
    }

    CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
    copyObjectRequest.setCannedAccessControlList(cannedACL);
    copyObjectRequest.setNewObjectMetadata(dstom);

    ProgressListener progressListener = new ProgressListener() {
        public void progressChanged(ProgressEvent progressEvent) {
            switch (progressEvent.getEventCode()) {
            case ProgressEvent.PART_COMPLETED_EVENT_CODE:
                statistics.incrementWriteOps(1);
                break;
            }
        }
    };

    Copy copy = transfers.copy(copyObjectRequest);
    copy.addProgressListener(progressListener);
    try {
        copy.waitForCopyResult();
        statistics.incrementWriteOps(1);
    } catch (InterruptedException e) {
        throw new IOException("Got interrupted, cancelling");
    } finally {
        transfers.shutdownNow(false);
    }
}

From source file:org.apache.hadoop.fs.s3r.S3RFileSystem.java

License:Apache License

private void copyFile(String srcKey, String dstKey) throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("copyFile " + srcKey + " -> " + dstKey);
    }/* ww w  . j a v a2  s .  co  m*/

    ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
    final ObjectMetadata dstom = srcom.clone();
    if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
        dstom.setServerSideEncryption(serverSideEncryptionAlgorithm);
    }
    CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
    copyObjectRequest.setCannedAccessControlList(cannedACL);
    copyObjectRequest.setNewObjectMetadata(dstom);

    ProgressListener progressListener = new ProgressListener() {
        public void progressChanged(ProgressEvent progressEvent) {
            switch (progressEvent.getEventCode()) {
            case ProgressEvent.PART_COMPLETED_EVENT_CODE:
                statistics.incrementWriteOps(1);
                break;
            default:
                break;
            }
        }
    };

    Copy copy = transfers.copy(copyObjectRequest);
    copy.addProgressListener(progressListener);
    try {
        copy.waitForCopyResult();
        statistics.incrementWriteOps(1);
    } catch (InterruptedException e) {
        throw new IOException("Got interrupted, cancelling");
    }
}

From source file:org.apache.jackrabbit.aws.ext.ds.S3Backend.java

License:Apache License

@Override
public boolean exists(DataIdentifier identifier, boolean touch) throws DataStoreException {
    long start = System.currentTimeMillis();
    String key = getKeyName(identifier);
    ObjectMetadata objectMetaData = null;
    boolean retVal = false;
    ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
    try {//from   ww  w .j a v a2  s  . c o m
        Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
        objectMetaData = s3service.getObjectMetadata(bucket, key);
        if (objectMetaData != null) {
            retVal = true;
            if (touch) {
                CopyObjectRequest copReq = new CopyObjectRequest(bucket, key, bucket, key);
                copReq.setNewObjectMetadata(objectMetaData);
                s3service.copyObject(copReq);
                LOG.debug("[{}] touched took [{}] ms. ", identifier, (System.currentTimeMillis() - start));
            }
        } else {
            retVal = false;
        }

    } catch (AmazonServiceException e) {
        if (e.getStatusCode() == 404) {
            retVal = false;
        } else {
            throw new DataStoreException("Error occured to find exists for key [" + identifier.toString() + "]",
                    e);
        }
    } catch (Exception e) {
        throw new DataStoreException("Error occured to find exists for key  " + identifier.toString(), e);
    } finally {
        if (contextClassLoader != null) {
            Thread.currentThread().setContextClassLoader(contextClassLoader);
        }
    }
    LOG.debug("exists [{}]: [{}] took [{}] ms.",
            new Object[] { identifier, retVal, (System.currentTimeMillis() - start) });
    return retVal;
}