Example usage for com.google.common.io Closer create

List of usage examples for com.google.common.io Closer create

Introduction

In this page you can find the example usage for com.google.common.io Closer create.

Prototype

public static Closer create() 

Source Link

Document

Creates a new Closer .

Usage

From source file:com.googlecode.jmxtrans.model.output.support.HttpOutputWriter.java

private void consumeInputStreams(HttpURLConnection httpURLConnection) throws IOException {
    Closer closer = Closer.create();
    try {//from  w w w  .  j a v a  2  s .  co  m
        InputStream in = closer.register(httpURLConnection.getInputStream());
        InputStream err = closer.register(httpURLConnection.getErrorStream());
        copy(in, nullOutputStream());
        if (err != null)
            copy(err, nullOutputStream());
    } catch (Throwable t) {
        throw closer.rethrow(t);
    } finally {
        closer.close();
    }
}

From source file:alluxio.client.block.stream.BlockInStream.java

/**
 * Creates an instance of remote {@link BlockInStream} that reads from a remote worker.
 *
 * @param blockId the block ID/*from  w ww . j a  v a  2s.com*/
 * @param blockSize the block size
 * @param workerNetAddress the worker network address
 * @param context the file system context
 * @param options the options
 * @throws IOException if it fails to create an instance
 * @return the {@link BlockInStream} created
 */
// TODO(peis): Use options idiom (ALLUXIO-2579).
public static BlockInStream createRemoteBlockInStream(long blockId, long blockSize,
        WorkerNetAddress workerNetAddress, FileSystemContext context, InStreamOptions options)
        throws IOException {
    Closer closer = Closer.create();
    try {
        BlockWorkerClient blockWorkerClient = closer
                .register(context.createBlockWorkerClient(workerNetAddress));
        LockBlockResource lockBlockResource = closer
                .register(blockWorkerClient.lockBlock(blockId, LockBlockOptions.defaults()));
        PacketInStream inStream = closer.register(PacketInStream.createNettyPacketInStream(context,
                blockWorkerClient.getDataServerAddress(), blockId, lockBlockResource.getResult().getLockId(),
                blockWorkerClient.getSessionId(), blockSize, false, Protocol.RequestType.ALLUXIO_BLOCK));
        blockWorkerClient.accessBlock(blockId);
        return new BlockInStream(inStream, blockWorkerClient, closer, options);
    } catch (AlluxioException | IOException e) {
        CommonUtils.closeQuietly(closer);
        throw CommonUtils.castToIOException(e);
    }
}

From source file:org.pantsbuild.tools.jar.JarEntryCopier.java

/**
 * Copy a a jar entry to an output file without decompressing and re-compressing the entry when
 * it is {@link ZipEntry#DEFLATED}./*from  w  w  w . j av  a  2s. c  om*/
 *
 * @param jarOut The jar file being created or appended to.
 * @param name The resource name to write.
 * @param jarIn The input JarFile.
 * @param jarEntry The entry extracted from <code>jarIn</code>.  The compression method passed in
 *     to this entry is preserved in the output file.
 * @throws IOException if there is a problem reading from {@code jarIn} or writing to
 *     {@code jarOut}.
 */
static void copyEntry(JarOutputStream jarOut, String name, JarFile jarIn, JarEntry jarEntry)
        throws IOException {

    JarEntry outEntry = new JarEntry(jarEntry);
    ZE_NAME.set(outEntry, name);

    if (outEntry.isDirectory()) {
        outEntry.setMethod(ZipEntry.STORED);
        outEntry.setSize(0);
        outEntry.setCompressedSize(0);
        outEntry.setCrc(0);
        jarOut.putNextEntry(outEntry);
        jarOut.closeEntry();
    } else if (jarEntry.getMethod() == ZipEntry.STORED) {
        Closer closer = Closer.create();
        try {
            InputStream is = closer.register(jarIn.getInputStream(jarEntry));
            jarOut.putNextEntry(outEntry);
            ByteStreams.copy(is, jarOut);
        } catch (IOException e) {
            throw closer.rethrow(e);
        } finally {
            closer.close();
        }
        jarOut.closeEntry();
    } else {
        Closer closer = Closer.create();
        try {
            // Grab the underlying stream so we can read the compressed bytes.
            FilterInputStream zis = (FilterInputStream) closer.register(jarIn.getInputStream(jarEntry));
            InputStream is = FIS_IN.get(zis);

            // Start it as a DEFLATE....
            jarOut.putNextEntry(outEntry);

            // But swap out the method to STORE to the bytes don't get compressed.
            // This works because ZipFile doesn't make a defensive copy.
            outEntry.setMethod(ZipEntry.STORED);
            outEntry.setSize(jarEntry.getCompressedSize());
            ByteStreams.copy(is, jarOut);
        } catch (IOException e) {
            throw closer.rethrow(e);
        } finally {
            closer.close();
        }

        // The internal CRC is now wrong, so hack it before we close the entry.
        CRC_VALUE.set(ZOS_CRC.get(jarOut), (int) jarEntry.getCrc());
        jarOut.closeEntry();

        // Restore entry back to normal, so it will be written out correctly at the end.
        outEntry.setMethod(ZipEntry.DEFLATED);
        outEntry.setSize(jarEntry.getSize());
    }
}

From source file:io.bazel.rules.closure.worker.PersistentWorker.java

private int runProgram(PrintStream output, Map<Path, HashCode> digests) throws InterruptedException {
    AtomicBoolean failed = new AtomicBoolean();
    try (Closer closer = Closer.create()) {
        component.newActionComponentBuilder().args(new ArrayList<>(arguments)).closer(closer)
                .inputDigests(digests).output(output).failed(failed).build().program().run();
    } catch (Exception e) {
        if (Utilities.wasInterrupted(e)) {
            throw new InterruptedException();
        }/*from   w w w .j a va 2s .  co m*/
        output.printf("ERROR: Program threw uncaught exception with args: %s%n",
                Joiner.on(' ').join(arguments));
        e.printStackTrace(output);
        return 1;
    }
    return failed.get() ? 1 : 0;
}

From source file:net.ripe.rpki.commons.crypto.crl.X509Crl.java

private static X509CRL makeX509CRLFromEncoded(byte[] encoded) {
    final X509CRL crl;
    if (null != encoded) {
        try {//from   w ww  .  j  ava 2 s.co m
            final Closer closer = Closer.create();
            try {
                final ByteArrayInputStream in = new ByteArrayInputStream(encoded);
                final CertificateFactory factory = CertificateFactory.getInstance("X.509");
                crl = (X509CRL) factory.generateCRL(in);
            } catch (final CertificateException e) {
                throw closer.rethrow(new IllegalArgumentException(e));
            } catch (final CRLException e) {
                throw closer.rethrow(new IllegalArgumentException(e));
            } catch (final Throwable t) {
                throw closer.rethrow(t);
            } finally {
                closer.close();
            }
        } catch (final IOException e) {
            throw new RuntimeException("Error managing CRL I/O stream", e);
        }
    } else {
        crl = null;
    }
    return crl;

}

From source file:com.android.build.gradle.internal.transforms.MultiStreamJarTransform.java

private static void jarFolder(@NonNull File folder, @NonNull File jarFile) throws IOException {
    Closer closer = Closer.create();
    try {/*from  w w  w  .jav a  2 s .c o  m*/

        FileOutputStream fos = closer.register(new FileOutputStream(jarFile));
        JarOutputStream jos = closer.register(new JarOutputStream(fos));

        final byte[] buffer = new byte[8192];
        processFolder(jos, "", folder, buffer);

    } finally {
        closer.close();
    }
}

From source file:tachyon.client.RemoteBlockOutStream.java

/**
 * @param file the file the block belongs to
 * @param opType the OutStream's write type
 * @param blockIndex the index of the block in the file
 * @param initialBytes the initial size bytes that will be allocated to the block. This is unused
 *                     for now, since the data server will allocate space matching the size of the
 *                     first write./*from   w w  w  .j a  va2 s .com*/
 * @param tachyonConf the TachyonConf instance for this file output stream.
 * @throws IOException
 */
RemoteBlockOutStream(TachyonFile file, WriteType opType, int blockIndex, long initialBytes,
        TachyonConf tachyonConf) throws IOException {
    super(file, opType, tachyonConf);

    if (!opType.isCache()) {
        throw new IOException("RemoteBlockOutStream only supports WriteType.CACHE. opType: " + opType);
    }

    mBlockIndex = blockIndex;
    mBlockCapacityBytes = mFile.getBlockSizeByte();
    mBlockId = mFile.getBlockId(mBlockIndex);
    mCloser = Closer.create();

    // Create a local buffer.
    mBufferBytes = mTachyonConf.getBytes(Constants.USER_FILE_BUFFER_BYTES, Constants.MB);
    mBuffer = ByteBuffer.allocate(Ints.checkedCast(mBufferBytes));

    // Open the remote writer.
    mRemoteWriter = mCloser.register(RemoteBlockWriter.Factory.createRemoteBlockWriter(tachyonConf));
    mRemoteWriter.open(mTachyonFS.getWorkerDataServerAddress(), mBlockId, mTachyonFS.getUserId());
    mClosed = false;
}

From source file:org.apache.gobblin.writer.PartitionedDataWriter.java

public PartitionedDataWriter(DataWriterBuilder<S, D> builder, final State state) throws IOException {
    this.isSpeculativeAttemptSafe = true;
    this.isWatermarkCapable = true;
    this.baseWriterId = builder.getWriterId();
    this.closer = Closer.create();
    this.writerBuilder = builder;
    this.controlMessageHandler = new PartitionDataWriterMessageHandler();
    this.partitionWriters = CacheBuilder.newBuilder().build(new CacheLoader<GenericRecord, DataWriter<D>>() {
        @Override//from   w  ww .j  a v  a 2 s  .  com
        public DataWriter<D> load(final GenericRecord key) throws Exception {
            /* wrap the data writer to allow the option to close the writer on flush */
            return PartitionedDataWriter.this.closer.register(new InstrumentedPartitionedDataWriterDecorator<>(
                    new CloseOnFlushWriterWrapper<D>(new Supplier<DataWriter<D>>() {
                        @Override
                        public DataWriter<D> get() {
                            try {
                                return createPartitionWriter(key);
                            } catch (IOException e) {
                                throw new RuntimeException("Error creating writer", e);
                            }
                        }
                    }, state), state, key));
        }
    });

    if (state.contains(ConfigurationKeys.WRITER_PARTITIONER_CLASS)) {
        Preconditions.checkArgument(builder instanceof PartitionAwareDataWriterBuilder,
                String.format("%s was specified but the writer %s does not support partitioning.",
                        ConfigurationKeys.WRITER_PARTITIONER_CLASS, builder.getClass().getCanonicalName()));

        try {
            this.shouldPartition = true;
            this.builder = Optional.of(PartitionAwareDataWriterBuilder.class.cast(builder));
            this.partitioner = Optional.of(WriterPartitioner.class.cast(ConstructorUtils.invokeConstructor(
                    Class.forName(state.getProp(ConfigurationKeys.WRITER_PARTITIONER_CLASS)), state,
                    builder.getBranches(), builder.getBranch())));
            Preconditions.checkArgument(
                    this.builder.get().validatePartitionSchema(this.partitioner.get().partitionSchema()),
                    String.format("Writer %s does not support schema from partitioner %s",
                            builder.getClass().getCanonicalName(),
                            this.partitioner.getClass().getCanonicalName()));
        } catch (ReflectiveOperationException roe) {
            throw new IOException(roe);
        }
    } else {
        this.shouldPartition = false;
        // Support configuration to close the DataWriter on flush to allow publishing intermediate results in a task
        CloseOnFlushWriterWrapper closeOnFlushWriterWrapper = new CloseOnFlushWriterWrapper<D>(
                new Supplier<DataWriter<D>>() {
                    @Override
                    public DataWriter<D> get() {
                        try {
                            return builder.withWriterId(PartitionedDataWriter.this.baseWriterId + "_"
                                    + PartitionedDataWriter.this.writerIdSuffix++).build();
                        } catch (IOException e) {
                            throw new RuntimeException("Error creating writer", e);
                        }
                    }
                }, state);
        DataWriter<D> dataWriter = (DataWriter) closeOnFlushWriterWrapper.getDecoratedObject();

        InstrumentedDataWriterDecorator<D> writer = this.closer
                .register(new InstrumentedDataWriterDecorator<>(closeOnFlushWriterWrapper, state));

        this.isSpeculativeAttemptSafe = this.isDataWriterForPartitionSafe(dataWriter);
        this.isWatermarkCapable = this.isDataWriterWatermarkCapable(dataWriter);
        this.partitionWriters.put(NON_PARTITIONED_WRITER_KEY, writer);
        this.partitioner = Optional.absent();
        this.builder = Optional.absent();
    }
}

From source file:com.hujiang.gradle.plugin.android.aspectjx.JarMerger.java

public void addJar(@NonNull File file, boolean removeEntryTimestamp) throws IOException {
    init();//from  w w w. j a  v a 2 s.  c  o m

    Closer localCloser = Closer.create();
    try {
        FileInputStream fis = localCloser.register(new FileInputStream(file));
        ZipInputStream zis = localCloser.register(new ZipInputStream(fis));

        // loop on the entries of the jar file package and put them in the final jar
        ZipEntry entry;
        while ((entry = zis.getNextEntry()) != null) {
            // do not take directories or anything inside a potential META-INF folder.
            if (entry.isDirectory()) {
                continue;
            }

            String name = entry.getName();
            if (filter != null && !filter.checkEntry(name)) {
                continue;
            }

            JarEntry newEntry;

            // Preserve the STORED method of the input entry.
            if (entry.getMethod() == JarEntry.STORED) {
                newEntry = new JarEntry(entry);
            } else {
                // Create a new entry so that the compressed len is recomputed.
                newEntry = new JarEntry(name);
            }
            if (removeEntryTimestamp) {
                newEntry.setTime(0);
            }

            // add the entry to the jar archive
            jarOutputStream.putNextEntry(newEntry);

            // read the content of the entry from the input stream, and write it into the archive.
            int count;
            while ((count = zis.read(buffer)) != -1) {
                jarOutputStream.write(buffer, 0, count);
            }

            // close the entries for this file
            jarOutputStream.closeEntry();
            zis.closeEntry();
        }
    } catch (IZipEntryFilter.ZipAbortException e) {
        throw new IOException(e);
    } finally {
        localCloser.close();
    }
}

From source file:org.ow2.proactive_grid_cloud_portal.dataspace.util.VFSZipper.java

public static void zip(FileObject file, OutputStream out) throws IOException {
    Closer closer = Closer.create();
    try {/*w  ww  .  j  av a 2s . c om*/
        closer.register(out);
        InputStream in = file.getContent().getInputStream();
        closer.register(in);
        ByteStreams.copy(in, out);
    } catch (IOException ioe) {
        throw closer.rethrow(ioe);
    } finally {
        closer.close();
    }
}