List of usage examples for com.amazonaws.services.s3.model ObjectMetadata getUserMetaDataOf
public String getUserMetaDataOf(String key)
From source file:com.altoukhov.svsync.fileviews.S3FileSpace.java
License:Apache License
@Override protected Snapshot scan(List<Pattern> filters) { try {/*from ww w . j a va 2 s .co m*/ Map<String, FileSnapshot> files = new LinkedHashMap<>(); Set<String> dirs = new HashSet<>(); ListObjectsRequest listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName) .withPrefix(rootPath.isEmpty() ? "" : rootPath + "/"); ObjectListing objectListing; do { objectListing = listObjects(listObjectsRequest); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { if (isExcluded(objectSummary.getKey()) || isFiltered(objectSummary.getKey(), filters)) continue; if (objectSummary.getKey().endsWith("/")) { String filePath = trimPath(objectSummary.getKey()); filePath = filePath.equals(rootPath) ? "" : filePath.substring(rootPath.length() + (rootPath.isEmpty() ? 0 : 1)); dirs.add(filePath); System.out .println(String.format("Scanning s3://%s/%s", bucketName, objectSummary.getKey())); } else { String fileName = objectSummary.getKey(); String filePath = ""; if (fileName.contains("/")) { int fileNameSplitIndex = fileName.lastIndexOf("/"); filePath = fileName.substring(0, fileNameSplitIndex); fileName = fileName.substring(fileNameSplitIndex + 1); filePath = filePath.equals(rootPath) ? "" : filePath.substring(rootPath.length() + (rootPath.isEmpty() ? 0 : 1)); } if (filePath.equals("")) { filePath = fileName; } else { filePath = filePath + "/" + fileName; } ObjectMetadata meta = getObjectInfo(objectSummary); String lmd = meta.getUserMetaDataOf("lmd"); Date lastModified = (lmd == null) ? objectSummary.getLastModified() : new Date(Long.parseLong(lmd)); FileSnapshot file = new FileSnapshot(fileName, objectSummary.getSize(), new DateTime(lastModified), filePath); files.put(filePath, file); } } listObjectsRequest.setMarker(objectListing.getNextMarker()); } while (objectListing.isTruncated()); Snapshot snapshot = new Snapshot(files, dirs); return snapshot; } catch (AmazonClientException ex) { System.out.println("Failed to scan file space"); System.out.println(ex.getMessage()); } return null; }
From source file:com.ibm.stocator.fs.cos.COSAPIClient.java
License:Apache License
private FileStatus getFileStatusKeyBased(String key, Path path) throws AmazonS3Exception { LOG.trace("internal method - get file status by key {}, path {}", key, path); FileStatus cachedFS = memoryCache.getFileStatus(path.toString()); if (cachedFS != null) { return cachedFS; }/* w w w.j a va 2 s . co m*/ ObjectMetadata meta = mClient.getObjectMetadata(mBucket, key); String sparkOrigin = meta.getUserMetaDataOf("data-origin"); boolean stocatorCreated = false; if (sparkOrigin != null) { String tmp = (String) sparkOrigin; if (tmp.equals("stocator")) { stocatorCreated = true; } } mCachedSparkOriginated.put(key, Boolean.valueOf(stocatorCreated)); FileStatus fs = createFileStatus(meta.getContentLength(), key, meta.getLastModified(), path); memoryCache.putFileStatus(path.toString(), fs); return fs; }
From source file:com.ibm.stocator.fs.cos.COSAPIClient.java
License:Apache License
/** * Checks if container/object exists and verifies that it contains * Data-Origin=stocator metadata If so, object was created by Spark. * * @param objectKey the key of the object * @param path the object path//from www . ja va2 s . c om * @return boolean if object was created by Spark */ private boolean isSparkOrigin(String objectKey, String path) { LOG.debug("check spark origin for {}", objectKey); if (!objectKey.endsWith("/")) { LOG.debug("Key {} has no slash. Return false", objectKey); return false; } else { objectKey = objectKey.substring(0, objectKey.length() - 1); } if (mCachedSparkOriginated.containsKey(objectKey)) { boolean res = mCachedSparkOriginated.get(objectKey).booleanValue(); LOG.debug("found cached for spark origin for {}. Status {}", objectKey, res); return res; } String key = getRealKey(objectKey); Boolean sparkOriginated = Boolean.FALSE; ObjectMetadata objMetadata = getObjectMetadata(key); if (objMetadata != null) { Object sparkOrigin = objMetadata.getUserMetaDataOf("data-origin"); if (sparkOrigin != null) { String tmp = (String) sparkOrigin; if (tmp.equals("stocator")) { sparkOriginated = Boolean.TRUE; } } } mCachedSparkOriginated.put(key, sparkOriginated); LOG.debug("spark origin for {} is {} non cached", objectKey, sparkOriginated.booleanValue()); return sparkOriginated.booleanValue(); }
From source file:com.netflix.hollow.example.consumer.infrastructure.S3BlobRetriever.java
License:Apache License
private HollowBlob knownSnapshotBlob(long desiredVersion) { String objectName = S3Publisher.getS3ObjectName(blobNamespace, "snapshot", desiredVersion); ObjectMetadata objectMetadata = s3.getObjectMetadata(bucketName, objectName); long toState = Long.parseLong(objectMetadata.getUserMetaDataOf("to_state")); return new S3Blob(objectName, toState); }
From source file:com.netflix.hollow.example.consumer.infrastructure.S3BlobRetriever.java
License:Apache License
private HollowBlob knownDeltaBlob(String fileType, long fromVersion) { String objectName = S3Publisher.getS3ObjectName(blobNamespace, fileType, fromVersion); ObjectMetadata objectMetadata = s3.getObjectMetadata(bucketName, objectName); long fromState = Long.parseLong(objectMetadata.getUserMetaDataOf("from_state")); long toState = Long.parseLong(objectMetadata.getUserMetaDataOf("to_state")); return new S3Blob(objectName, fromState, toState); }
From source file:org.apereo.portal.portlets.dynamicskin.storage.s3.AwsS3DynamicSkinService.java
License:Apache License
@Override protected boolean innerSkinCssFileExists(DynamicSkinInstanceData data) { final String objectKey = this.getCssObjectKey(data); log.info(ATTEMPTING_TO_GET_FILE_METADATA_FROM_AWS_S3_LOG_MSG, this.awsS3BucketConfig.getBucketName(), objectKey);/*from www . j av a 2s .co m*/ final ObjectMetadata metadata = this.getMetadataFromAwsS3Bucket(objectKey); log.info(FILE_METADATA_RETRIEVED_FROM_AWS_S3_LOG_MSG, this.awsS3BucketConfig.getBucketName(), objectKey); if (metadata == null) { return false; } else { final String uniqueTokenFromS3 = metadata.getUserMetaDataOf(SKIN_UNIQUE_TOKEN_METADATA_KEY); return this.getUniqueToken(data).equals(uniqueTokenFromS3); } }
From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java
License:Open Source License
@Override public void downloadFile(String nm, File to, String pp) throws IOException { this.s3clientLock.readLock().lock(); try {//ww w . j av a2s. c o m while (nm.startsWith(File.separator)) nm = nm.substring(1); String rnd = RandomGUID.getGuid(); File p = new File(this.staged_sync_location, rnd); File z = new File(this.staged_sync_location, rnd + ".uz"); File e = new File(this.staged_sync_location, rnd + ".de"); while (z.exists()) { rnd = RandomGUID.getGuid(); p = new File(this.staged_sync_location, rnd); z = new File(this.staged_sync_location, rnd + ".uz"); e = new File(this.staged_sync_location, rnd + ".de"); } if (nm.startsWith(File.separator)) nm = nm.substring(1); String haName = EncyptUtils.encString(nm, Main.chunkStoreEncryptionEnabled); Map<String, String> mp = null; byte[] shash = null; try { if (this.simpleS3) { S3Object obj = null; SDFSLogger.getLog().debug("downloading " + pp + "/" + haName); obj = s3Service.getObject(this.name, pp + "/" + haName); BufferedInputStream in = new BufferedInputStream(obj.getObjectContent()); BufferedOutputStream out = new BufferedOutputStream(new FileOutputStream(p)); IOUtils.copy(in, out); out.flush(); out.close(); in.close(); ObjectMetadata omd = s3Service.getObjectMetadata(name, pp + "/" + haName); mp = this.getUserMetaData(omd); SDFSLogger.getLog().debug("mp sz=" + mp.size()); try { if (obj != null) obj.close(); } catch (Exception e1) { } } else { SDFSLogger.getLog().debug("downloading " + pp + "/" + haName); this.multiPartDownload(pp + "/" + haName, p); ObjectMetadata omd = s3Service.getObjectMetadata(name, pp + "/" + haName); mp = this.getUserMetaData(omd); if (md5sum && mp.containsKey("md5sum")) { shash = BaseEncoding.base64().decode(omd.getUserMetaDataOf("md5sum")); } } if (shash != null && !FileUtils.fileValid(p, shash)) throw new IOException("file " + p.getPath() + " is corrupt"); boolean encrypt = false; boolean lz4compress = false; if (mp.containsKey("encrypt")) { encrypt = Boolean.parseBoolean(mp.get("encrypt")); } if (mp.containsKey("lz4compress")) { lz4compress = Boolean.parseBoolean(mp.get("lz4compress")); } byte[] ivb = null; if (mp.containsKey("ivspec")) { ivb = BaseEncoding.base64().decode(mp.get("ivspec")); } SDFSLogger.getLog().debug("compress=" + lz4compress + " " + mp.get("lz4compress")); if (mp.containsKey("symlink")) { if (OSValidator.isWindows()) throw new IOException("unable to restore symlinks to windows"); else { String spth = EncyptUtils.decString(mp.get("symlink"), encrypt); Path srcP = Paths.get(spth); Path dstP = Paths.get(to.getPath()); Files.createSymbolicLink(dstP, srcP); } } else if (mp.containsKey("directory")) { to.mkdirs(); FileUtils.setFileMetaData(to, mp, encrypt); p.delete(); } else { if (encrypt) { if (ivb != null) { EncryptUtils.decryptFile(p, e, new IvParameterSpec(ivb)); } else { EncryptUtils.decryptFile(p, e); } p.delete(); p = e; } if (lz4compress) { CompressionUtils.decompressFile(p, z); p.delete(); p = z; } File parent = to.getParentFile(); if (!parent.exists()) parent.mkdirs(); BufferedInputStream is = new BufferedInputStream(new FileInputStream(p)); BufferedOutputStream os = new BufferedOutputStream(new FileOutputStream(to)); IOUtils.copy(is, os); os.flush(); os.close(); is.close(); FileUtils.setFileMetaData(to, mp, encrypt); SDFSLogger.getLog().debug("updated " + to + " sz=" + to.length()); } } catch (Exception e1) { throw new IOException(e1); } finally { p.delete(); z.delete(); e.delete(); } } finally { this.s3clientLock.readLock().unlock(); } }