List of usage examples for com.google.common.io ByteSource ByteSource
protected ByteSource()
From source file:org.apache.druid.indexing.common.tasklogs.FileTaskLogs.java
@Override public Optional<ByteSource> streamTaskReports(final String taskid) { final File file = fileForTask(taskid, "report.json"); if (file.exists()) { return Optional.of(new ByteSource() { @Override/*from w w w . j av a 2 s .c o m*/ public InputStream openStream() throws IOException { return LogUtils.streamFile(file, 0); } }); } else { return Optional.absent(); } }
From source file:net.snowflake.client.jdbc.FileBackedOutputStream.java
/** * Creates a new instance that uses the given file threshold, and * optionally resets the data when the {@link ByteSource} returned * by {@link #asByteSource} is finalized. * * @param fileThreshold the number of bytes before the stream should * switch to buffering to a file * @param resetOnFinalize if true, the {@link #reset} method will * be called when the {@link ByteSource} returned by {@link * #asByteSource} is finalized *//* w w w .ja v a 2 s . c o m*/ public FileBackedOutputStream(int fileThreshold, boolean resetOnFinalize) { this.fileThreshold = fileThreshold; this.resetOnFinalize = resetOnFinalize; memory = new MemoryOutput(); out = memory; if (resetOnFinalize) { source = new ByteSource() { @Override public InputStream openStream() throws IOException { return openInputStream(); } @Override protected void finalize() { try { reset(); } catch (Throwable t) { t.printStackTrace(System.err); } } }; } else { source = new ByteSource() { @Override public InputStream openStream() throws IOException { return openInputStream(); } }; } }
From source file:com.google.gerrit.server.account.AddSshKey.java
public Response<SshKeyInfo> apply(IdentifiedUser user, Input input) throws BadRequestException, IOException, ConfigInvalidException { if (input == null) { input = new Input(); }/*w w w . j a va2s. co m*/ if (input.raw == null) { throw new BadRequestException("SSH public key missing"); } final RawInput rawKey = input.raw; String sshPublicKey = new ByteSource() { @Override public InputStream openStream() throws IOException { return rawKey.getInputStream(); } }.asCharSource(UTF_8).read(); try { AccountSshKey sshKey = authorizedKeys.addKey(user.getAccountId(), sshPublicKey); try { addKeyFactory.create(user, sshKey).send(); } catch (EmailException e) { log.error("Cannot send SSH key added message to " + user.getAccount().getPreferredEmail(), e); } sshKeyCache.evict(user.getUserName()); return Response.<SshKeyInfo>created(GetSshKeys.newSshKeyInfo(sshKey)); } catch (InvalidSshKeyException e) { throw new BadRequestException(e.getMessage()); } }
From source file:org.apache.druid.storage.google.GoogleTaskLogs.java
private Optional<ByteSource> streamTaskFile(final String taskid, final long offset, String taskKey) throws IOException { try {//from ww w .ja va 2 s . c o m if (!storage.exists(config.getBucket(), taskKey)) { return Optional.absent(); } final long length = storage.size(config.getBucket(), taskKey); return Optional.of(new ByteSource() { @Override public InputStream openStream() throws IOException { try { final long start; if (offset > 0 && offset < length) { start = offset; } else if (offset < 0 && (-1 * offset) < length) { start = length + offset; } else { start = 0; } InputStream stream = new GoogleByteSource(storage, config.getBucket(), taskKey) .openStream(); stream.skip(start); return stream; } catch (Exception e) { throw new IOException(e); } } }); } catch (IOException e) { throw new IOE(e, "Failed to stream logs from: %s", taskKey); } }
From source file:org.apache.druid.storage.azure.AzureTaskLogs.java
private Optional<ByteSource> streamTaskFile(final String taskid, final long offset, String taskKey) throws IOException { final String container = config.getContainer(); try {//w w w.j a v a2 s.c o m if (!azureStorage.getBlobExists(container, taskKey)) { return Optional.absent(); } return Optional.of(new ByteSource() { @Override public InputStream openStream() throws IOException { try { final long start; final long length = azureStorage.getBlobLength(container, taskKey); if (offset > 0 && offset < length) { start = offset; } else if (offset < 0 && (-1 * offset) < length) { start = length + offset; } else { start = 0; } InputStream stream = azureStorage.getBlobInputStream(container, taskKey); stream.skip(start); return stream; } catch (Exception e) { throw new IOException(e); } } }); } catch (StorageException | URISyntaxException e) { throw new IOE(e, "Failed to stream logs from: %s", taskKey); } }
From source file:org.apache.druid.storage.s3.S3DataSegmentPuller.java
FileUtils.FileCopyResult getSegmentFiles(final S3Coords s3Coords, final File outDir) throws SegmentLoadingException { log.info("Pulling index at path[%s] to outDir[%s]", s3Coords, outDir); if (!isObjectInBucket(s3Coords)) { throw new SegmentLoadingException("IndexFile[%s] does not exist.", s3Coords); }/*from w ww . j a va2 s. c o m*/ try { org.apache.commons.io.FileUtils.forceMkdir(outDir); final URI uri = URI.create(StringUtils.format("s3://%s/%s", s3Coords.bucket, s3Coords.path)); final ByteSource byteSource = new ByteSource() { @Override public InputStream openStream() throws IOException { try { return buildFileObject(uri).openInputStream(); } catch (AmazonServiceException e) { if (e.getCause() != null) { if (S3Utils.S3RETRY.apply(e)) { throw new IOException("Recoverable exception", e); } } throw Throwables.propagate(e); } } }; if (CompressionUtils.isZip(s3Coords.path)) { final FileUtils.FileCopyResult result = CompressionUtils.unzip(byteSource, outDir, S3Utils.S3RETRY, false); log.info("Loaded %d bytes from [%s] to [%s]", result.size(), s3Coords.toString(), outDir.getAbsolutePath()); return result; } if (CompressionUtils.isGz(s3Coords.path)) { final String fname = Files.getNameWithoutExtension(uri.getPath()); final File outFile = new File(outDir, fname); final FileUtils.FileCopyResult result = CompressionUtils.gunzip(byteSource, outFile, S3Utils.S3RETRY); log.info("Loaded %d bytes from [%s] to [%s]", result.size(), s3Coords.toString(), outFile.getAbsolutePath()); return result; } throw new IAE("Do not know how to load file type at [%s]", uri.toString()); } catch (Exception e) { try { org.apache.commons.io.FileUtils.deleteDirectory(outDir); } catch (IOException ioe) { log.warn(ioe, "Failed to remove output directory [%s] for segment pulled from [%s]", outDir.getAbsolutePath(), s3Coords.toString()); } throw new SegmentLoadingException(e, e.getMessage()); } }
From source file:com.facebook.buck.util.zip.Zip.java
/** Walks the file tree rooted in baseDirectory to create zip entries */ public static void walkBaseDirectoryToCreateEntries(ProjectFilesystem filesystem, Map<String, Pair<CustomZipEntry, Optional<Path>>> entries, Path baseDir, ImmutableSet<Path> paths, boolean junkPaths, ZipCompressionLevel compressionLevel) throws IOException { // Since filesystem traversals can be non-deterministic, sort the entries we find into // a tree map before writing them out. FileVisitor<Path> pathFileVisitor = new SimpleFileVisitor<Path>() { private boolean isSkipFile(Path file) { return !paths.isEmpty() && !paths.contains(file); }/*from ww w.j a v a 2 s .c o m*/ private String getEntryName(Path path) { Path relativePath = junkPaths ? path.getFileName() : baseDir.relativize(path); return MorePaths.pathWithUnixSeparators(relativePath); } private CustomZipEntry getZipEntry(String entryName, Path path, BasicFileAttributes attr) throws IOException { boolean isDirectory = filesystem.isDirectory(path); if (isDirectory) { entryName += "/"; } CustomZipEntry entry = new CustomZipEntry(entryName); // We want deterministic ZIPs, so avoid mtimes. entry.setFakeTime(); entry.setCompressionLevel( isDirectory ? ZipCompressionLevel.NONE.getValue() : compressionLevel.getValue()); // If we're using STORED files, we must manually set the CRC, size, and compressed size. if (entry.getMethod() == ZipEntry.STORED && !isDirectory) { entry.setSize(attr.size()); entry.setCompressedSize(attr.size()); entry.setCrc(new ByteSource() { @Override public InputStream openStream() throws IOException { return filesystem.newFileInputStream(path); } }.hash(Hashing.crc32()).padToLong()); } long externalAttributes = filesystem.getFileAttributesForZipEntry(path); LOG.verbose("Setting mode for entry %s path %s to 0x%08X", entryName, path, externalAttributes); entry.setExternalAttributes(externalAttributes); return entry; } @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { if (!isSkipFile(file)) { CustomZipEntry entry = getZipEntry(getEntryName(file), file, attrs); entries.put(entry.getName(), new Pair<>(entry, Optional.of(file))); } return FileVisitResult.CONTINUE; } @Override public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException { if (!dir.equals(baseDir) && !isSkipFile(dir)) { CustomZipEntry entry = getZipEntry(getEntryName(dir), dir, attrs); entries.put(entry.getName(), new Pair<>(entry, Optional.empty())); } return FileVisitResult.CONTINUE; } }; filesystem.walkRelativeFileTree(baseDir, pathFileVisitor); }
From source file:org.apache.druid.storage.hdfs.tasklog.HdfsTaskLogs.java
private Optional<ByteSource> streamTaskFile(final Path path, final long offset) throws IOException { final FileSystem fs = path.getFileSystem(hadoopConfig); if (fs.exists(path)) { return Optional.of(new ByteSource() { @Override//from w w w. j a v a 2 s . co m public InputStream openStream() throws IOException { log.info("Reading task log from: %s", path); final long seekPos; if (offset < 0) { final FileStatus stat = fs.getFileStatus(path); seekPos = Math.max(0, stat.getLen() + offset); } else { seekPos = offset; } final FSDataInputStream inputStream = fs.open(path); inputStream.seek(seekPos); log.info("Read task log from: %s (seek = %,d)", path, seekPos); return inputStream; } }); } else { return Optional.absent(); } }
From source file:com.b2international.commons.FileUtils.java
/** * Creates a temporary file by copying the content of the original file given * as an input stream and the file name. The temporary file will be deleted on graceful JVM halt. * <p>May return with {@code null} if the file cannot be created. * <p>Callers are responsible for closing the input stream. * @param is the input stream to the file. * @param the file name. Can be null. If {@code null} a random UUID will be assigned as the temporary file name. * @return the temporary copy file. Or {@code null} if the copy failed. * @see File#deleteOnExit()//from w w w. j av a2 s.com */ public static File copyContentToTempFile(final InputStream is, final String fileName) { checkNotNull(is, "is"); try { final File tmpDirectory = com.google.common.io.Files.createTempDir(); final File tmpFile = new File(tmpDirectory, isEmpty(fileName) ? randomUUID().toString() : fileName); tmpFile.deleteOnExit(); new ByteSource() { @Override public InputStream openStream() throws IOException { return is; } }.copyTo(Files.asByteSink(tmpFile)); return tmpFile; } catch (final IOException e) { throw new RuntimeException(e); } }
From source file:org.opendaylight.controller.cluster.io.FileBackedOutputStream.java
/** * Returns a readable {@link ByteSource} view of the data that has been written to this stream. This stream is * closed and further attempts to write to it will result in an IOException. * * @return a ByteSource instance/*from www . j a v a 2 s . c o m*/ * @throws IOException if close fails */ @Nonnull public synchronized ByteSource asByteSource() throws IOException { close(); if (source == null) { source = new ByteSource() { @Override public InputStream openStream() throws IOException { synchronized (FileBackedOutputStream.this) { if (file != null) { return Files.newInputStream(file.toPath()); } else { return new ByteArrayInputStream(memory.getBuffer(), 0, memory.getCount()); } } } @Override public long size() { return count; } }; } return source; }