List of usage examples for com.amazonaws.services.s3.model CopyObjectRequest setNewObjectMetadata
public void setNewObjectMetadata(ObjectMetadata newObjectMetadata)
From source file:org.apache.jackrabbit.aws.ext.ds.S3Backend.java
License:Apache License
private void write(DataIdentifier identifier, File file, boolean asyncUpload, AsyncUploadCallback callback) throws DataStoreException { String key = getKeyName(identifier); ObjectMetadata objectMetaData = null; long start = System.currentTimeMillis(); ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try {// ww w .ja v a 2s . c om Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); // check if the same record already exists try { objectMetaData = s3service.getObjectMetadata(bucket, key); } catch (AmazonServiceException ase) { if (ase.getStatusCode() != 404) { throw ase; } } if (objectMetaData != null) { long l = objectMetaData.getContentLength(); if (l != file.length()) { throw new DataStoreException( "Collision: " + key + " new length: " + file.length() + " old length: " + l); } LOG.debug("[{}]'s exists, lastmodified = [{}]", key, objectMetaData.getLastModified().getTime()); CopyObjectRequest copReq = new CopyObjectRequest(bucket, key, bucket, key); copReq.setNewObjectMetadata(objectMetaData); s3service.copyObject(copReq); LOG.debug("lastModified of [{}] updated successfully.", identifier); if (callback != null) { callback.onSuccess(new AsyncUploadResult(identifier, file)); } } if (objectMetaData == null) { try { // start multipart parallel upload using amazon sdk Upload up = tmx.upload(new PutObjectRequest(bucket, key, file)); // wait for upload to finish if (asyncUpload) { up.addProgressListener(new S3UploadProgressListener(up, identifier, file, callback)); LOG.debug("added upload progress listener to identifier [{}]", identifier); } else { up.waitForUploadResult(); LOG.debug("synchronous upload to identifier [{}] completed.", identifier); if (callback != null) { callback.onSuccess(new AsyncUploadResult(identifier, file)); } } } catch (Exception e2) { if (!asyncUpload) { callback.onAbort(new AsyncUploadResult(identifier, file)); } throw new DataStoreException("Could not upload " + key, e2); } } } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } LOG.debug("write of [{}], length=[{}], in async mode [{}], in [{}]ms", new Object[] { identifier, file.length(), asyncUpload, (System.currentTimeMillis() - start) }); }
From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3Backend.java
License:Apache License
@Override public boolean exists(DataIdentifier identifier, boolean touch) throws DataStoreException { long start = System.currentTimeMillis(); String key = getKeyName(identifier); ObjectMetadata objectMetaData = null; boolean retVal = false; ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try {/*ww w . jav a 2 s .c om*/ Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); objectMetaData = s3service.getObjectMetadata(bucket, key); if (objectMetaData != null) { retVal = true; if (touch) { CopyObjectRequest copReq = new CopyObjectRequest(bucket, key, bucket, key); copReq.setNewObjectMetadata(objectMetaData); Copy copy = tmx.copy(s3ReqDecorator.decorate(copReq)); copy.waitForCopyResult(); LOG.debug("[{}] touched took [{}] ms. ", identifier, (System.currentTimeMillis() - start)); } } else { retVal = false; } } catch (AmazonServiceException e) { if (e.getStatusCode() == 404 || e.getStatusCode() == 403) { retVal = false; } else { throw new DataStoreException("Error occured to find exists for key [" + identifier.toString() + "]", e); } } catch (Exception e) { throw new DataStoreException("Error occured to find exists for key " + identifier.toString(), e); } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } LOG.debug("exists [{}]: [{}] took [{}] ms.", new Object[] { identifier, retVal, (System.currentTimeMillis() - start) }); return retVal; }
From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3Backend.java
License:Apache License
@Override public void touch(DataIdentifier identifier, long minModifiedDate) throws DataStoreException { ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try {/*from ww w. j a v a 2s. c o m*/ final long start = System.currentTimeMillis(); final String key = getKeyName(identifier); if (minModifiedDate > 0 && minModifiedDate > getLastModified(identifier)) { CopyObjectRequest copReq = new CopyObjectRequest(bucket, key, bucket, key); copReq.setNewObjectMetadata(new ObjectMetadata()); Copy copy = tmx.copy(s3ReqDecorator.decorate(copReq)); copy.waitForCompletion(); LOG.debug("[{}] touched. time taken [{}] ms ", new Object[] { identifier, (System.currentTimeMillis() - start) }); } else { LOG.trace("[{}] touch not required. time taken [{}] ms ", new Object[] { identifier, (System.currentTimeMillis() - start) }); } } catch (Exception e) { throw new DataStoreException("Error occured in touching key [" + identifier.toString() + "]", e); } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } }
From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3Backend.java
License:Apache License
private void write(DataIdentifier identifier, File file, boolean asyncUpload, AsyncUploadCallback callback) throws DataStoreException { String key = getKeyName(identifier); ObjectMetadata objectMetaData = null; long start = System.currentTimeMillis(); ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try {//from www . jav a 2 s . c o m Thread.currentThread().setContextClassLoader(getClass().getClassLoader()); // check if the same record already exists try { objectMetaData = s3service.getObjectMetadata(bucket, key); } catch (AmazonServiceException ase) { if (!(ase.getStatusCode() == 404 || ase.getStatusCode() == 403)) { throw ase; } } if (objectMetaData != null) { long l = objectMetaData.getContentLength(); if (l != file.length()) { throw new DataStoreException( "Collision: " + key + " new length: " + file.length() + " old length: " + l); } LOG.debug("[{}]'s exists, lastmodified = [{}]", key, objectMetaData.getLastModified().getTime()); CopyObjectRequest copReq = new CopyObjectRequest(bucket, key, bucket, key); copReq.setNewObjectMetadata(objectMetaData); Copy copy = tmx.copy(s3ReqDecorator.decorate(copReq)); try { copy.waitForCopyResult(); LOG.debug("lastModified of [{}] updated successfully.", identifier); if (callback != null) { callback.onSuccess(new AsyncUploadResult(identifier, file)); } } catch (Exception e2) { AsyncUploadResult asyncUpRes = new AsyncUploadResult(identifier, file); asyncUpRes.setException(e2); if (callback != null) { callback.onAbort(asyncUpRes); } throw new DataStoreException("Could not upload " + key, e2); } } if (objectMetaData == null) { try { // start multipart parallel upload using amazon sdk Upload up = tmx.upload(s3ReqDecorator.decorate(new PutObjectRequest(bucket, key, file))); // wait for upload to finish if (asyncUpload) { up.addProgressListener(new S3UploadProgressListener(up, identifier, file, callback)); LOG.debug("added upload progress listener to identifier [{}]", identifier); } else { up.waitForUploadResult(); LOG.debug("synchronous upload to identifier [{}] completed.", identifier); if (callback != null) { callback.onSuccess(new AsyncUploadResult(identifier, file)); } } } catch (Exception e2) { AsyncUploadResult asyncUpRes = new AsyncUploadResult(identifier, file); asyncUpRes.setException(e2); if (callback != null) { callback.onAbort(asyncUpRes); } throw new DataStoreException("Could not upload " + key, e2); } } } finally { if (contextClassLoader != null) { Thread.currentThread().setContextClassLoader(contextClassLoader); } } LOG.debug("write of [{}], length=[{}], in async mode [{}], in [{}]ms", new Object[] { identifier, file.length(), asyncUpload, (System.currentTimeMillis() - start) }); }
From source file:org.apache.jackrabbit.oak.blob.cloud.aws.s3.S3RequestDecorator.java
License:Apache License
/** * Set encryption in {@link CopyObjectRequest} */// w w w.j a v a 2 s . c om public CopyObjectRequest decorate(CopyObjectRequest request) { switch (getDataEncryption()) { case SSE_S3: ObjectMetadata metadata = request.getNewObjectMetadata() == null ? new ObjectMetadata() : request.getNewObjectMetadata(); metadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); request.setNewObjectMetadata(metadata); break; case NONE: break; } return request; }
From source file:org.duracloud.s3storage.S3StorageProvider.java
License:Apache License
private void updateObjectProperties(String bucketName, String contentId, ObjectMetadata objMetadata) { try {/*from w w w .ja v a 2 s . c o m*/ AccessControlList originalACL = s3Client.getObjectAcl(bucketName, contentId); CopyObjectRequest copyRequest = new CopyObjectRequest(bucketName, contentId, bucketName, contentId); copyRequest.setStorageClass(DEFAULT_STORAGE_CLASS); copyRequest.setNewObjectMetadata(objMetadata); s3Client.copyObject(copyRequest); s3Client.setObjectAcl(bucketName, contentId, originalACL); } catch (AmazonClientException e) { throwIfContentNotExist(bucketName, contentId); String err = "Could not update metadata for content " + contentId + " in S3 bucket " + bucketName + " due to error: " + e.getMessage(); throw new StorageException(err, e, NO_RETRY); } }
From source file:org.elasticsearch.cloud.aws.blobstore.S3BlobContainer.java
License:Apache License
@Override public void move(String sourceBlobName, String targetBlobName) throws IOException { try {// w w w . java 2 s .c o m CopyObjectRequest request = new CopyObjectRequest(blobStore.bucket(), buildKey(sourceBlobName), blobStore.bucket(), buildKey(targetBlobName)); if (blobStore.serverSideEncryption()) { ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); request.setNewObjectMetadata(objectMetadata); } blobStore.client().copyObject(request); blobStore.client().deleteObject(blobStore.bucket(), buildKey(sourceBlobName)); } catch (AmazonS3Exception e) { throw new IOException(e); } }
From source file:org.elasticsearch.repositories.s3.S3BlobContainer.java
License:Apache License
@Override public void move(String sourceBlobName, String targetBlobName) throws IOException { try {/*www . j a v a 2s.c o m*/ CopyObjectRequest request = new CopyObjectRequest(blobStore.bucket(), buildKey(sourceBlobName), blobStore.bucket(), buildKey(targetBlobName)); if (blobStore.serverSideEncryption()) { ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); request.setNewObjectMetadata(objectMetadata); } SocketAccess.doPrivilegedVoid(() -> { blobStore.client().copyObject(request); blobStore.client().deleteObject(blobStore.bucket(), buildKey(sourceBlobName)); }); } catch (AmazonS3Exception e) { throw new IOException(e); } }
From source file:org.fcrepo.modeshape.binary.S3BinaryStore.java
License:Apache License
@Override protected void storeMimeType(BinaryValue binaryValue, String mimeType) throws BinaryStoreException { try {// w w w . jav a2 s. co m String key = binaryValue.getKey().toString(); ObjectMetadata metadata = s3Client.getObjectMetadata(bucketName, key); metadata.setContentType(mimeType); // Update the object in place CopyObjectRequest copyRequest = new CopyObjectRequest(bucketName, key, bucketName, key); copyRequest.setNewObjectMetadata(metadata); s3Client.copyObject(copyRequest); } catch (AmazonClientException e) { throw new BinaryStoreException(e); } }
From source file:org.fcrepo.modeshape.binary.S3BinaryStore.java
License:Apache License
private void setS3ObjectUserProperty(BinaryKey binaryKey, String metadataKey, String metadataValue) throws BinaryStoreException { try {/* www . j av a 2s . c om*/ String key = binaryKey.toString(); ObjectMetadata metadata = s3Client.getObjectMetadata(bucketName, key); Map<String, String> userMetadata = metadata.getUserMetadata(); if (null != metadataValue && metadataValue.equals(userMetadata.get(metadataKey))) { return; // The key/value pair already exists in user metadata, skip update } userMetadata.put(metadataKey, metadataValue); metadata.setUserMetadata(userMetadata); // Update the object in place CopyObjectRequest copyRequest = new CopyObjectRequest(bucketName, key, bucketName, key); copyRequest.setNewObjectMetadata(metadata); s3Client.copyObject(copyRequest); } catch (AmazonClientException e) { throw new BinaryStoreException(e); } }