List of usage examples for com.amazonaws.services.s3.model ObjectMetadata ObjectMetadata
public ObjectMetadata()
From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java
License:Open Source License
@Override public void uploadFile(File f, String to, String pp) throws IOException { this.s3clientLock.readLock().lock(); try {/*from www. j a v a 2 s . c om*/ InputStream in = null; while (to.startsWith(File.separator)) to = to.substring(1); String pth = pp + "/" + EncyptUtils.encString(to, Main.chunkStoreEncryptionEnabled); SDFSLogger.getLog().info("uploading " + f.getPath() + " to " + to + " pth " + pth); boolean isDir = false; boolean isSymlink = false; if (!OSValidator.isWindows()) { isDir = Files.readAttributes(f.toPath(), PosixFileAttributes.class, LinkOption.NOFOLLOW_LINKS) .isDirectory(); isSymlink = Files.readAttributes(f.toPath(), PosixFileAttributes.class, LinkOption.NOFOLLOW_LINKS) .isSymbolicLink(); } else { isDir = f.isDirectory(); } if (isSymlink) { try { HashMap<String, String> metaData = new HashMap<String, String>(); metaData.put("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled)); metaData.put("lastmodified", Long.toString(f.lastModified())); String slp = EncyptUtils.encString(Files.readSymbolicLink(f.toPath()).toFile().getPath(), Main.chunkStoreEncryptionEnabled); metaData.put("symlink", slp); ObjectMetadata md = new ObjectMetadata(); md.setContentType("binary/octet-stream"); md.setContentLength(pth.getBytes().length); md.setUserMetadata(metaData); PutObjectRequest req = new PutObjectRequest(this.name, pth, new ByteArrayInputStream(pth.getBytes()), md); s3Service.putObject(req); if (this.isClustered()) this.checkoutFile(pth); } catch (Exception e1) { throw new IOException(e1); } } else if (isDir) { HashMap<String, String> metaData = FileUtils.getFileMetaData(f, Main.chunkStoreEncryptionEnabled); metaData.put("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled)); metaData.put("lastmodified", Long.toString(f.lastModified())); metaData.put("directory", "true"); ObjectMetadata md = new ObjectMetadata(); md.setContentType("binary/octet-stream"); md.setContentLength(pth.getBytes().length); md.setUserMetadata(metaData); try { PutObjectRequest req = new PutObjectRequest(this.name, pth, new ByteArrayInputStream(pth.getBytes()), md); s3Service.putObject(req); if (this.isClustered()) this.checkoutFile(pth); } catch (Exception e1) { SDFSLogger.getLog().error("error uploading", e1); throw new IOException(e1); } } else { String rnd = RandomGUID.getGuid(); File p = new File(this.staged_sync_location, rnd); File z = new File(this.staged_sync_location, rnd + ".z"); File e = new File(this.staged_sync_location, rnd + ".e"); while (z.exists()) { rnd = RandomGUID.getGuid(); p = new File(this.staged_sync_location, rnd); z = new File(this.staged_sync_location, rnd + ".z"); e = new File(this.staged_sync_location, rnd + ".e"); } try { BufferedInputStream is = new BufferedInputStream(new FileInputStream(f)); BufferedOutputStream os = new BufferedOutputStream(new FileOutputStream(p)); IOUtils.copy(is, os); os.flush(); os.close(); is.close(); if (Main.compress) { CompressionUtils.compressFile(p, z); p.delete(); p = z; } byte[] ivb = null; if (Main.chunkStoreEncryptionEnabled) { try { ivb = PassPhrase.getByteIV(); EncryptUtils.encryptFile(p, e, new IvParameterSpec(ivb)); } catch (Exception e1) { throw new IOException(e1); } p.delete(); p = e; } String objName = pth; ObjectMetadata md = new ObjectMetadata(); Map<String, String> umd = FileUtils.getFileMetaData(f, Main.chunkStoreEncryptionEnabled); md.setUserMetadata(umd); md.addUserMetadata("lz4compress", Boolean.toString(Main.compress)); md.addUserMetadata("encrypt", Boolean.toString(Main.chunkStoreEncryptionEnabled)); if (ivb != null) md.addUserMetadata("ivspec", BaseEncoding.base64().encode(ivb)); md.addUserMetadata("lastmodified", Long.toString(f.lastModified())); if (simpleS3) { md.setContentType("binary/octet-stream"); in = new BufferedInputStream(new FileInputStream(p), 32768); try { if (md5sum) { byte[] md5Hash = ServiceUtils.computeMD5Hash(in); in.close(); String mds = BaseEncoding.base64().encode(md5Hash); md.setContentMD5(mds); md.addUserMetadata("md5sum", mds); } } catch (NoSuchAlgorithmException e2) { SDFSLogger.getLog().error("while hashing", e2); throw new IOException(e2); } in = new FileInputStream(p); md.setContentLength(p.length()); try { PutObjectRequest req = new PutObjectRequest(this.name, objName, in, md); s3Service.putObject(req); if (this.isClustered()) this.checkoutFile(pth); SDFSLogger.getLog().debug( "uploaded=" + f.getPath() + " lm=" + md.getUserMetadata().get("lastmodified")); } catch (AmazonS3Exception e1) { if (e1.getStatusCode() == 409) { try { s3Service.deleteObject(this.name, objName); this.uploadFile(f, to, pp); return; } catch (Exception e2) { throw new IOException(e2); } } else { throw new IOException(e1); } } catch (Exception e1) { // SDFSLogger.getLog().error("error uploading", e1); throw new IOException(e1); } } else { try { md.setContentType("binary/octet-stream"); in = new BufferedInputStream(new FileInputStream(p), 32768); byte[] md5Hash = ServiceUtils.computeMD5Hash(in); in.close(); String mds = BaseEncoding.base64().encode(md5Hash); md.setContentMD5(mds); md.addUserMetadata("md5sum", mds); in = new BufferedInputStream(new FileInputStream(p), 32768); md.setContentLength(p.length()); PutObjectRequest req = new PutObjectRequest(this.name, objName, in, md); multiPartUpload(req); if (this.isClustered()) this.checkoutFile(pth); } catch (AmazonS3Exception e1) { if (e1.getStatusCode() == 409) { try { s3Service.deleteObject(this.name, objName); this.uploadFile(f, to, pp); return; } catch (Exception e2) { throw new IOException(e2); } } else { throw new IOException(e1); } } catch (Exception e1) { // SDFSLogger.getLog().error("error uploading", e1); throw new IOException(e1); } } } finally { try { if (in != null) in.close(); } finally { p.delete(); z.delete(); e.delete(); } } } } finally { this.s3clientLock.readLock().unlock(); } }
From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java
License:Open Source License
@Override public void checkoutFile(String name) throws IOException { String pth = "claims/" + name + "/" + EncyptUtils.encHashArchiveName(Main.DSEID, Main.chunkStoreEncryptionEnabled); this.s3clientLock.readLock().lock(); try {/* w w w . j ava 2s.co m*/ byte[] b = Long.toString(System.currentTimeMillis()).getBytes(); ObjectMetadata om = new ObjectMetadata(); String mds = BaseEncoding.base64().encode(ServiceUtils.computeMD5Hash(b)); om.setContentMD5(mds); om.addUserMetadata("md5sum", mds); om.setContentLength(b.length); PutObjectRequest creq = new PutObjectRequest(this.name, pth, new ByteArrayInputStream(b), om); s3Service.putObject(creq); } catch (AmazonS3Exception e1) { if (e1.getStatusCode() == 409) { try { s3Service.deleteObject(this.name, pth); this.checkoutFile(name); return; } catch (Exception e2) { throw new IOException(e2); } } else { throw new IOException(e1); } } catch (Exception e1) { // SDFSLogger.getLog().error("error uploading", e1); throw new IOException(e1); } finally { this.s3clientLock.readLock().unlock(); } }
From source file:org.openflamingo.fs.s3.S3ObjectProvider.java
License:Apache License
@Override public boolean mkdir(String path) { // Assert.hasLength(path, " ? 'path' ."); Assert.hasLength(path, "Please enter the path"); String bucket = S3Utils.getBucket(path); String relativePath = S3Utils.getObjectKey(path); try {//from w w w.j a v a 2s . com ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(0); InputStream emptyContent = new ByteArrayInputStream(new byte[0]); PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, relativePath, emptyContent, metadata); awsClient.putObject(putObjectRequest); /* auditService.mkdir(FileSystemType.S3, username, path); */ return true; } catch (AmazonServiceException ase) { // throw new FileSystemException(" ? ? . ? ? ? .", ase); throw new FileSystemException("Cannot create the directory.", ase); } catch (AmazonClientException ace) { // throw new FileSystemException(" ? ? . ? ? ? .", ace); throw new FileSystemException("Cannot create the directory.", ace); } }
From source file:org.openflamingo.fs.s3.S3ObjectProvider.java
License:Apache License
public boolean save(InputStream is, long size, String path) { // Assert.notNull(is, " ?? 'is'? ."); Assert.notNull(is, "Please enter the input stream."); // Assert.hasLength(path, "?? ? ?? 'path'? ."); Assert.hasLength(path, "Please enter the path."); try {/*from www .j a va2s. c om*/ String bucket = S3Utils.getBucket(path); String key = StringUtils.remove(path, "/" + bucket + "/"); ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader(Headers.CONTENT_LENGTH, size); awsClient.putObject(new PutObjectRequest(bucket, key, is, metadata)); return true; } catch (AmazonServiceException ase) { System.out.println("Caught an AmazonServiceException, " + "which means your request made it " + "to Amazon S3, but was rejected with an error " + "response for some reason."); System.out.println("Error Message: " + ase.getMessage()); System.out.println("HTTP Status Code: " + ase.getStatusCode()); System.out.println("AWS Error Code: " + ase.getErrorCode()); System.out.println("Error Type: " + ase.getErrorType()); System.out.println("Request ID: " + ase.getRequestId()); // throw new FileSystemException("?? . ? ? ? .", ase); throw new FileSystemException("Connot copy the file.", ase); } catch (AmazonClientException ace) { System.out.println("Caught an AmazonClientException, " + "which means the client encountered " + "an internal error while trying to " + " communicate with S3, " + "such as not being able to access the network."); System.out.println("Error Message: " + ace.getMessage()); // throw new FileSystemException("?? . ? ? ?.", ace); throw new FileSystemException("Connot copy the file.", ace); } }
From source file:org.openinfinity.cloud.domain.repository.deployer.BucketRepositoryAWSImpl.java
License:Apache License
/** * Creates bucket based on file input stream. * //from w w w .ja va 2s . co m * @param inputStream Represents the file input stream. * @param bucketName Represents the file name of the deployment. * @param key Represents the key under which to store the new object. * @return key Defines the created resource key. */ public String createBucket(InputStream inputStream, String bucketName, String key, Map<String, String> metadataMap) { ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setUserMetadata(metadataMap); PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, key, inputStream, objectMetadata); simpleStorageService.createBucket(bucketName); simpleStorageService.putObject(putObjectRequest); return key; }
From source file:org.pieShare.pieDrive.adapter.s3.S3Adapter.java
@Override public synchronized void upload(PieDriveFile file, InputStream stream) throws AdaptorException { try {//from ww w .j av a 2 s . co m ObjectMetadata meta = new ObjectMetadata(); meta.setContentLength(file.getSize()); PutObjectRequest req = new PutObjectRequest(bucketName, file.getUuid(), stream, meta); //req.getRequestClientOptions().setReadLimit(64); s3Auth.getClient().putObject(req); //Thread.sleep(2000); PieLogger.trace(S3Adapter.class, "{} uploaded", file.getUuid()); } catch (AmazonServiceException ase) { throw new AdaptorException(ase); } catch (AmazonClientException ace) { throw new AdaptorException(ace); } catch (Exception e) { } }
From source file:org.plos.repo.service.S3StoreService.java
License:Open Source License
@Override public boolean saveUploadedObject(Bucket bucket, UploadInfo uploadInfo, RepoObject repoObject) { int retries = 5; int tryCount = 0; int waitSecond = 4; ObjectMapper m = new ObjectMapper(); Map<String, java.lang.Object> propsObj = m.convertValue(repoObject, Map.class); Map<String, String> propsStr = new HashMap<>(); for (Map.Entry<String, java.lang.Object> entry : propsObj.entrySet()) { try {/* www . j av a2s. co m*/ if (entry.getValue() == null) { propsStr.put(entry.getKey(), ""); } else { propsStr.put(entry.getKey(), entry.getValue().toString()); } } catch (ClassCastException cce) { log.error("Problem converting object to metadata", cce); } } ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentLength(uploadInfo.getSize()); objectMetadata.setUserMetadata(propsStr); File tempFile = new File(uploadInfo.getTempLocation()); PutObjectRequest putObjectRequest = new PutObjectRequest(bucket.getBucketName(), uploadInfo.getChecksum(), tempFile); putObjectRequest.withCannedAcl(CannedAccessControlList.PublicRead); putObjectRequest.setMetadata(objectMetadata); while (tryCount < retries) { try { s3Client.putObject(putObjectRequest); // TODO: check result and do something about it tempFile.delete(); return true; } catch (Exception e) { tryCount++; log.error("Error during putObject", e); try { Thread.sleep(waitSecond * 1000); } catch (Exception e2) { } } } return false; }
From source file:org.rdswitchboard.harvesters.pmh.Harvester.java
License:Open Source License
public void downloadRecords(SetStatus set) throws HarvesterException, UnsupportedEncodingException, IOException, InterruptedException, XPathExpressionException, SAXException, ParserConfigurationException { // Generate the URL of request String url = null;/*from w w w . j a va 2 s .c om*/ ; if (set.hasToken()) { try { url = repoUrl + String.format(URL_LIST_RECORDS_RESUMPTION_TOKEN, URLEncoder.encode(set.getToken(), "UTF-8")); } catch (UnsupportedEncodingException e) { e.printStackTrace(); } } if (null == url) { if (!set.hasName()) url = repoUrl + String.format(URL_LIST_DEFAULT_RECORDS, metadataPrefix); else url = repoUrl + String.format(URL_LIST_RECORDS, URLEncoder.encode(set.getName(), "UTF-8"), metadataPrefix); } System.out.println("Downloading records: " + url); String xml = null; // Get XML document URLConnection conn = new URL(url).openConnection(); if (connectionTimeout > 0) conn.setConnectTimeout(connectionTimeout); if (readTimeout > 0) conn.setReadTimeout(readTimeout); try (InputStream is = conn.getInputStream()) { if (null != is) xml = IOUtils.toString(is, StandardCharsets.UTF_8.name()); } // Check if xml has been returned and check what it had a valid root element if (null == xml) throw new HarvesterException("The XML document is empty"); Document doc = dbf.newDocumentBuilder().parse(new InputSource(new StringReader(xml))); // Extract root node Node root = (Node) XPATH_OAI_PMH.evaluate(doc, XPathConstants.NODE); if (null == root) throw new HarvesterException("The document is not an OAI:PMH file"); // Check for error node Node error = (Node) XPATH_ERROR.evaluate(root, XPathConstants.NODE); if (null != error && error instanceof Element) { String code = ((Element) error).getAttribute("code"); String message = ((Element) error).getTextContent(); if (ERR_NO_RECORDS_MATCH.equals(code)) { System.out.println("Error: The set is empty"); set.setFiles(0); set.resetToken(); return; } else throw new HarvesterException(code, message); } Node nodeToken = (Node) XPATH_RECORDS_RESUMPTION_TOKEN.evaluate(root, XPathConstants.NODE); if (null != nodeToken && nodeToken instanceof Element) { String tokenString = ((Element) nodeToken).getTextContent(); if (null != tokenString && !tokenString.isEmpty()) set.setToken(tokenString); else set.resetToken(); set.setCursor(((Element) nodeToken).getAttribute("cursor")); set.setSize(((Element) nodeToken).getAttribute("completeListSize")); set.dumpToken(System.out); } else set.resetToken(); String filePath = repoPrefix + "/" + metadataPrefix + "/" + harvestDate + "/" + set.getNameSafe() + "/" + set.getFiles() + ".xml"; if (StringUtils.isNullOrEmpty(bucketName)) { FileUtils.writeStringToFile(new File(folderName, filePath), xml); } else { byte[] bytes = xml.getBytes(StandardCharsets.UTF_8); ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentEncoding(StandardCharsets.UTF_8.name()); metadata.setContentType("text/xml"); metadata.setContentLength(bytes.length); InputStream inputStream = new ByteArrayInputStream(bytes); PutObjectRequest request = new PutObjectRequest(bucketName, filePath, inputStream, metadata); s3client.putObject(request); } set.incFiles(); }
From source file:org.rdswitchboard.harvesters.pmh.Harvester.java
License:Open Source License
/** * Alternative function to organize the harvest process. The difference with another function * is in data storage. The harvest2 function will store files in the raw format as they come * from the server.// w ww . j a va 2s .c o m * The harvesting method should never be mixed. The harvesting folder must be wiped out if * switching to this method, or function will fail. * @param prefix A metadata prefix * @throws Exception */ public boolean harvest() throws Exception { if (StringUtils.isNullOrEmpty(metadataPrefix)) throw new IllegalArgumentException("The OAI:PMH Metadata Prefix can not be empty"); System.out.println("Downloading set list"); boolean result = false; if (null == whiteList || whiteList.isEmpty()) { System.out.println( "There is no whitelist found. Proceeding with downloading the list of all available sets."); // download all sets in the repository Map<String, String> mapSets = listSets(); if (null == mapSets || mapSets.isEmpty()) { System.out.println("Processing default set"); result = harvestSet(new SetStatus(null, "Default")); } else { result = false; for (Map.Entry<String, String> entry : mapSets.entrySet()) { SetStatus set = new SetStatus(entry.getKey().trim(), URLDecoder.decode(entry.getValue(), StandardCharsets.UTF_8.name())); // if black list exists and item is blacklisted, continue if (null != blackList && blackList.contains(set)) { set.setFiles(-2); saveSetStats(set); // set was ignored continue; } System.out.println("Processing set: " + URLDecoder.decode(entry.getValue(), StandardCharsets.UTF_8.name())); if (!harvestSet(set)) { System.err.println( "The harvesting job has been aborted due to an error. If you want harvesting to be continued, please set option 'fail.on.error' to 'false' in the configuration file"); result = false; break; } else result = true; } } } else { for (String item : whiteList) { if (!harvestSet(new SetStatus(item, item))) { System.err.println( "The harvesting job has been aborted due to an error. If you want harvesting to be continued, please set option 'fail.on.error' to 'false' in the configuration file"); result = false; break; } else result = true; } } if (result) { String filePath = repoPrefix + "/" + metadataPrefix + "/latest.txt"; if (StringUtils.isNullOrEmpty(bucketName)) { FileUtils.writeStringToFile(new File(folderName, filePath), harvestDate); } else { byte[] bytes = harvestDate.getBytes(StandardCharsets.UTF_8); ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentEncoding(StandardCharsets.UTF_8.name()); metadata.setContentType("text/plain"); metadata.setContentLength(bytes.length); InputStream inputStream = new ByteArrayInputStream(bytes); PutObjectRequest request = new PutObjectRequest(bucketName, filePath, inputStream, metadata); s3client.putObject(request); } } return result; }
From source file:org.researchgraph.crossref.CrossRef.java
private void saveCacheFile(String file, String json) throws IOException { if (null != file && null != json && !json.isEmpty()) { if (null != cache) { FileUtils.write(new File(cache, file), json); } else if (null != s3Client) { byte[] bytes = json.getBytes(StandardCharsets.UTF_8); ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentEncoding(StandardCharsets.UTF_8.name()); metadata.setContentType("text/json"); metadata.setContentLength(bytes.length); InputStream inputStream = new ByteArrayInputStream(bytes); s3Client.putObject(new PutObjectRequest(s3Bucket, getS3Key(file), inputStream, metadata)); }/*from w ww.java 2 s . c om*/ } }