List of usage examples for com.amazonaws.services.s3.model GetObjectRequest setRange
public void setRange(long start, long end)
Sets the optional inclusive byte range within the desired object that will be downloaded by this request.
From source file:org.caboclo.clients.AmazonClient.java
License:Open Source License
@Override public void getNextPart(MultiPartDownload mpd) throws IOException { RemoteFile remoteFile = mpd.getRemotePath(); File file = mpd.getFile();// w w w. ja v a 2 s. c o m long start = mpd.getOffset() + 1; long end = mpd.getOffset() + Constants.CHUNK_DOWNLOAD_SIZE - 1; boolean finished = false; if (end > mpd.getRemotePath().getSize()) { end = mpd.getRemotePath().getSize(); finished = true; } String child = remoteFile.getPath(); GetObjectRequest rangeObjectRequest = new GetObjectRequest(getBucketName(), child); rangeObjectRequest.setRange(start, end); S3Object obj = s3.getObject(rangeObjectRequest); BufferedInputStream bis = new BufferedInputStream(obj.getObjectContent()); ByteArrayOutputStream baos = new ByteArrayOutputStream(Constants.CHUNK_DOWNLOAD_SIZE); writeStreamToByteStream(bis, baos); appendFile(file, baos.toByteArray()); mpd.setOffset(end); mpd.setFinished(finished); }
From source file:org.gradle.internal.resource.transport.aws.s3.S3Client.java
License:Apache License
private S3Object doGetS3Object(URI uri, boolean isLightWeight) { S3RegionalResource s3RegionalResource = new S3RegionalResource(uri); String bucketName = s3RegionalResource.getBucketName(); String s3BucketKey = s3RegionalResource.getKey(); configureClient(s3RegionalResource); GetObjectRequest getObjectRequest = new GetObjectRequest(bucketName, s3BucketKey); if (isLightWeight) { //Skip content download getObjectRequest.setRange(0, 0); }// www .java 2 s . c o m try { return amazonS3Client.getObject(getObjectRequest); } catch (AmazonServiceException e) { String errorCode = e.getErrorCode(); if (null != errorCode && errorCode.equalsIgnoreCase("NoSuchKey")) { return null; } throw ResourceExceptions.getFailed(uri, e); } }
From source file:org.opendedup.sdfs.filestore.cloud.BatchAwsS3ChunkStore.java
License:Open Source License
public byte[] getBytes(long id, int from, int to) throws IOException, DataArchivedException { // SDFSLogger.getLog().info("Downloading " + id); // SDFSLogger.getLog().info("Current readers :" + rr.incrementAndGet()); String haName = EncyptUtils.encHashArchiveName(id, Main.chunkStoreEncryptionEnabled); this.s3clientLock.readLock().lock(); S3Object sobj = null;/* www . j ava2 s .com*/ byte[] data = null; // int ol = 0; try { long tm = System.currentTimeMillis(); // ObjectMetadata omd = s3Service.getObjectMetadata(this.name, // "blocks/" + haName); // Map<String, String> mp = this.getUserMetaData(omd); // ol = Integer.parseInt(mp.get("compressedsize")); // if (ol <= to) { // to = ol; // SDFSLogger.getLog().info("change to=" + to); // } int cl = (int) to - from; GetObjectRequest gr = new GetObjectRequest(this.name, "blocks/" + haName); gr.setRange(from, to); sobj = s3Service.getObject(gr); InputStream in = sobj.getObjectContent(); data = new byte[cl]; IOUtils.readFully(in, data); IOUtils.closeQuietly(in); double dtm = (System.currentTimeMillis() - tm) / 1000d; double bps = (cl / 1024) / dtm; SDFSLogger.getLog().debug("read [" + id + "] at " + bps + " kbps"); // mp = this.getUserMetaData(omd); /* * try { mp.put("lastaccessed", * Long.toString(System.currentTimeMillis())); * omd.setUserMetadata(mp); CopyObjectRequest req = new * CopyObjectRequest(this.name, "blocks/" + haName, this.name, * "blocks/" + haName) .withNewObjectMetadata(omd); * s3Service.copyObject(req); } catch (Exception e) { * SDFSLogger.getLog().debug("error setting last accessed", e); } */ /* * if (mp.containsKey("deleted")) { boolean del = * Boolean.parseBoolean((String) mp.get("deleted")); if (del) { * S3Object kobj = s3Service.getObject(this.name, "keys/" + haName); * * int claims = this.getClaimedObjects(kobj, id); * * int delobj = 0; if (mp.containsKey("deleted-objects")) { delobj = * Integer.parseInt((String) mp .get("deleted-objects")) - claims; * if (delobj < 0) delobj = 0; } mp.remove("deleted"); * mp.put("deleted-objects", Integer.toString(delobj)); * mp.put("suspect", "true"); omd.setUserMetadata(mp); * CopyObjectRequest req = new CopyObjectRequest(this.name, "keys/" * + haName, this.name, "keys/" + haName) * .withNewObjectMetadata(omd); s3Service.copyObject(req); int _size * = Integer.parseInt((String) mp.get("size")); int _compressedSize * = Integer.parseInt((String) mp .get("compressedsize")); * HashBlobArchive.currentLength.addAndGet(_size); * HashBlobArchive.compressedLength.addAndGet(_compressedSize); * SDFSLogger.getLog().warn( "Reclaimed [" + claims + * "] blocks marked for deletion"); kobj.close(); } } */ dtm = (System.currentTimeMillis() - tm) / 1000d; bps = (cl / 1024) / dtm; } catch (AmazonS3Exception e) { if (e.getErrorCode().equalsIgnoreCase("InvalidObjectState")) throw new DataArchivedException(id, null); else { SDFSLogger.getLog().error( "unable to get block [" + id + "] at [blocks/" + haName + "] pos " + from + " to " + to, e); throw e; } } catch (Exception e) { throw new IOException(e); } finally { try { if (sobj != null) { sobj.close(); } } catch (Exception e) { } this.s3clientLock.readLock().unlock(); } return data; }
From source file:surrey.repository.impl.S3RepositoryFile.java
License:Open Source License
/** * @see surrey.repository.RepositoryFile#getInputStream(long, long) *//*w w w . j av a 2s . c om*/ @Override public InputStream getInputStream(long start, long length) throws IOException { GetObjectRequest getObjectRequest = new GetObjectRequest(bucketName, key); getObjectRequest.setRange(start, start + length - 1); S3Object object = transferManager.getAmazonS3Client().getObject(getObjectRequest); return new S3AbortingInputStream(object.getObjectContent(), length); }