Example usage for com.amazonaws.services.s3.model ObjectMetadata setContentLength

List of usage examples for com.amazonaws.services.s3.model ObjectMetadata setContentLength

Introduction

In this page you can find the example usage for com.amazonaws.services.s3.model ObjectMetadata setContentLength.

Prototype

public void setContentLength(long contentLength) 

Source Link

Document

<p> Sets the Content-Length HTTP header indicating the size of the associated object in bytes.

Usage

From source file:S3DataManager.java

License:Open Source License

public UploadToS3Output uploadSourceToS3(AbstractBuild build, Launcher launcher, BuildListener listener)
        throws Exception {
    Validation.checkS3SourceUploaderConfig(projectName, workspace);

    SCM scm = build.getProject().getScm();
    if (scm.getType().equals("hudson.scm.NullSCM")) {
        throw new Exception("Select a valid option in Source Code Management.");
    }/*  ww w  .ja v  a 2s.c o m*/
    scm.checkout(build, launcher, workspace, listener, null, null);
    String localfileName = this.projectName + "-" + "source.zip";
    String sourceFilePath = workspace.getRemote();
    String zipFilePath = sourceFilePath.substring(0, sourceFilePath.lastIndexOf("/")) + "/" + localfileName;
    File zipFile = new File(zipFilePath);

    if (!zipFile.getParentFile().exists()) {
        boolean dirMade = zipFile.getParentFile().mkdirs();
        if (!dirMade) {
            throw new Exception("Unable to create directory: " + zipFile.getParentFile().getAbsolutePath());
        }
    }

    ZipOutputStream out = new ZipOutputStream(new FileOutputStream(zipFilePath));
    try {
        zipSource(sourceFilePath, out, sourceFilePath);
    } finally {
        out.close();
    }

    File sourceZipFile = new File(zipFilePath);
    PutObjectRequest putObjectRequest = new PutObjectRequest(s3InputBucket, s3InputKey, sourceZipFile);

    // Add MD5 checksum as S3 Object metadata
    String zipFileMD5;
    try (FileInputStream fis = new FileInputStream(zipFilePath)) {
        zipFileMD5 = new String(org.apache.commons.codec.binary.Base64.encodeBase64(DigestUtils.md5(fis)),
                "UTF-8");
    }
    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setContentMD5(zipFileMD5);
    objectMetadata.setContentLength(sourceZipFile.length());
    putObjectRequest.setMetadata(objectMetadata);

    LoggingHelper.log(listener, "Uploading code to S3 at location " + putObjectRequest.getBucketName() + "/"
            + putObjectRequest.getKey() + ". MD5 checksum is " + zipFileMD5);
    PutObjectResult putObjectResult = s3Client.putObject(putObjectRequest);

    return new UploadToS3Output(putObjectRequest.getBucketName() + "/" + putObjectRequest.getKey(),
            putObjectResult.getVersionId());
}

From source file:alluxio.underfs.s3a.S3AOutputStream.java

License:Apache License

@Override
public void close() throws IOException {
    if (mClosed) {
        return;/* w ww  . jav a2s .  co  m*/
    }
    mLocalOutputStream.close();
    try {
        // Generate the object metadata by setting server side encryption, md5 checksum, the file
        // length, and encoding as octet stream since no assumptions are made about the file type
        ObjectMetadata meta = new ObjectMetadata();
        if (SSE_ENABLED) {
            meta.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        }
        if (mHash != null) {
            meta.setContentMD5(new String(Base64.encode(mHash.digest())));
        }
        meta.setContentLength(mFile.length());
        meta.setContentEncoding(Mimetypes.MIMETYPE_OCTET_STREAM);

        // Generate the put request and wait for the transfer manager to complete the upload, then
        // delete the temporary file on the local machine
        PutObjectRequest putReq = new PutObjectRequest(mBucketName, mKey, mFile).withMetadata(meta);
        mManager.upload(putReq).waitForUploadResult();
        if (!mFile.delete()) {
            LOG.error("Failed to delete temporary file @ {}", mFile.getPath());
        }
    } catch (Exception e) {
        LOG.error("Failed to upload {}. Temporary file @ {}", mKey, mFile.getPath());
        throw new IOException(e);
    }

    // Set the closed flag, close can be retried until mFile.delete is called successfully
    mClosed = true;
}

From source file:alluxio.underfs.s3a.S3AUnderFileSystem.java

License:Apache License

/**
 * Creates a directory flagged file with the key and folder suffix.
 *
 * @param key the key to create a folder
 * @return true if the operation was successful, false otherwise
 *//*from  w  w w  . ja v a  2s .  c o  m*/
private boolean mkdirsInternal(String key) {
    try {
        String keyAsFolder = convertToFolderName(stripPrefixIfPresent(key));
        ObjectMetadata meta = new ObjectMetadata();
        meta.setContentLength(0);
        meta.setContentMD5(DIR_HASH);
        meta.setContentType(Mimetypes.MIMETYPE_OCTET_STREAM);
        mClient.putObject(
                new PutObjectRequest(mBucketName, keyAsFolder, new ByteArrayInputStream(new byte[0]), meta));
        return true;
    } catch (AmazonClientException e) {
        LOG.error("Failed to create directory: {}", key, e);
        return false;
    }
}

From source file:be.ugent.intec.halvade.uploader.AWSUploader.java

License:Open Source License

public void Upload(String key, InputStream input, long size) throws InterruptedException {
    ObjectMetadata meta = new ObjectMetadata();
    if (SSE)//from w  w w. ja  v a 2 s.  c o m
        meta.setServerSideEncryption(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    meta.setContentLength(size);
    Upload upload = tm.upload(existingBucketName, key, input, meta);

    try {
        // Or you can block and wait for the upload to finish
        upload.waitForCompletion();
        Logger.DEBUG("Upload complete.");
    } catch (AmazonClientException amazonClientException) {
        Logger.DEBUG("Unable to upload file, upload was aborted.");
        Logger.EXCEPTION(amazonClientException);
    }
}

From source file:be.ugent.intec.halvade.uploader.CopyOfAWSUploader.java

License:Open Source License

public void Upload(String key, InputStream input, long size) throws InterruptedException {
    ObjectMetadata meta = new ObjectMetadata();
    meta.setServerSideEncryption(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
    meta.setContentLength(size);
    Upload upload = tm.upload(existingBucketName, key, input, meta);

    try {/*w w  w. j a v a 2s .  c  o m*/
        // Or you can block and wait for the upload to finish
        upload.waitForCompletion();
        Logger.DEBUG("Upload complete.");
    } catch (AmazonClientException amazonClientException) {
        Logger.DEBUG("Unable to upload file, upload was aborted.");
        Logger.EXCEPTION(amazonClientException);
    }
}

From source file:biz.k11i.S3GyazoController.java

License:Open Source License

@RequestMapping(value = "/upload.cgi", method = RequestMethod.POST)
@ResponseBody//from   w  w w  .ja  v  a 2  s  . c o  m
String upload(@RequestParam("imagedata") MultipartFile imagedata) throws IOException {
    if (imagedata.isEmpty()) {
        String message = "????????";
        logger.warn(message);
        throw new BadRequestException(message);
    }

    byte[] bytes = imagedata.getBytes();
    String hash = generateHash(bytes);
    String filename = String.format("%s.png", hash);

    try (InputStream input = imagedata.getInputStream()) {
        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.setContentType("image/png");
        objectMetadata.setContentLength(bytes.length);

        PutObjectRequest req = new PutObjectRequest(bucket, filename, input, objectMetadata)
                .withCannedAcl(CannedAccessControlList.PublicRead);

        amazonS3Client.putObject(req);
    }

    String result = urlPrefix + filename;
    logger.info("New image uploaded {}", result);
    return result;
}

From source file:ch.admin.isb.hermes5.persistence.s3.S3RemoteAdapter.java

License:Apache License

@Override
@Asynchronous/*ww w  .j  av a 2  s . c  o  m*/
@Logged
public Future<Void> addFile(InputStream file, long size, String path) {
    try {
        ObjectMetadata metadata = new ObjectMetadata();

        metadata.setContentLength(size);
        metadata.setContentType(mimeTypeUtil.getMimeType(path));
        s3.putObject(new PutObjectRequest(bucketName.getStringValue(), path, file, metadata)
                .withCannedAcl(CannedAccessControlList.PublicRead));
        return new AsyncResult<Void>(null);
    } catch (Exception e) {
        throw new RuntimeException(e);
    } finally {
        if (file != null) {
            try {
                file.close();
            } catch (IOException e) {
            }
        }
    }
}

From source file:ch.admin.isb.hermes5.tools.filebackup.FileBackup.java

License:Apache License

public void run(String bucketName, String accessKey, String secretKey, String source, String targetPrefix,
        Long retentionPeriod, String topicArn, String snsEndpoint, String s3Endpoint) {

    AmazonS3 s3 = s3(accessKey, secretKey, s3Endpoint);

    AmazonSNS sns = sns(accessKey, secretKey, snsEndpoint);

    List<String> errors = new ArrayList<String>();
    String[] list = new File(source).list();
    for (String string : list) {
        File file = new File(source + "/" + string);
        System.out.print(timestamp() + " Backing up " + file.getAbsolutePath() + " to " + bucketName + "/"
                + targetPrefix + string + "...");
        try {/*  ww w .j a v  a 2  s .  c o m*/
            byte[] data = readFileToByteArray(file);
            ObjectMetadata metadata = new ObjectMetadata();
            metadata.setContentLength(data.length);
            s3.putObject(bucketName, targetPrefix + string, new ByteArrayInputStream(data), metadata);
            System.out.println("done");
            long lastModified = file.lastModified();
            long now = System.currentTimeMillis();
            if (retentionPeriod > 0 && differenceInDays(lastModified, now) > retentionPeriod) {
                System.out.println(timestamp() + " File " + source + "/" + string
                        + " is removed because it is older than " + retentionPeriod + " days.");
                boolean delete = file.delete();
                if (!delete) {
                    errors.add("Unable to delete " + file.getAbsolutePath());
                }
            }
        } catch (Exception e) {
            System.out.println("failed " + e.getMessage());
            errors.add(timestamp() + " Problem Backing up " + file.getAbsolutePath() + " to " + bucketName + "/"
                    + targetPrefix + string + "\n" + getStackTrace(e));
        }
    }

    if (errors.size() > 0) {
        StringBuilder sb = new StringBuilder();
        for (String string : errors) {
            sb.append(string).append("\n");
        }
        try {
            sendMessageThroughSNS(topicArn, sns, sb.toString(), "Problem with backup");
        } catch (Exception e) {
            System.out.println(timestamp() + "ERROR: unable to report issue " + sb.toString());
            e.printStackTrace();
        }
    }

}

From source file:ch.myniva.gradle.caching.s3.internal.AwsS3BuildCacheService.java

License:Apache License

@Override
public void store(BuildCacheKey key, BuildCacheEntryWriter writer) {
    final String bucketPath = getBucketPath(key);
    logger.info("Start storing cache entry '{}' in S3 bucket", bucketPath);
    ObjectMetadata meta = new ObjectMetadata();
    meta.setContentType(BUILD_CACHE_CONTENT_TYPE);

    try (ByteArrayOutputStream os = new ByteArrayOutputStream()) {
        writer.writeTo(os);//from w ww  .j  a  v a 2 s.  c o m
        meta.setContentLength(os.size());
        try (InputStream is = new ByteArrayInputStream(os.toByteArray())) {
            PutObjectRequest request = getPutObjectRequest(bucketPath, meta, is);
            if (this.reducedRedundancy) {
                request.withStorageClass(StorageClass.ReducedRedundancy);
            }
            s3.putObject(request);
        }
    } catch (IOException e) {
        throw new BuildCacheException("Error while storing cache object in S3 bucket", e);
    }
}

From source file:com.adeptj.modules.aws.s3.internal.AwsS3Service.java

License:Apache License

/**
 * {@inheritDoc}/*from   w w  w.j a v  a2s  .co  m*/
 */
@Override
public S3Response createFolder(String bucketName, String folderName) {
    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setContentLength(0);
    try {
        return new S3Response().withPutObjectResult(this.s3Client.putObject(new PutObjectRequest(bucketName,
                folderName + PATH_SEPARATOR, new ByteArrayInputStream(new byte[0]), objectMetadata)));
    } catch (RuntimeException ex) {
        LOGGER.error("Exception while creating folder!!", ex);
        throw new AwsException(ex.getMessage(), ex);
    }
}