Example usage for com.google.common.io Closer rethrow

List of usage examples for com.google.common.io Closer rethrow

Introduction

In this page you can find the example usage for com.google.common.io Closer rethrow.

Prototype

public RuntimeException rethrow(Throwable e) throws IOException 

Source Link

Document

Stores the given throwable and rethrows it.

Usage

From source file:com.tinspx.util.io.ByteUtils.java

/**
 * Copies at most {@code limit} bytes from {@code from} into {@code to},
 * returning the total number of bytes copied. {@code to} is not closed.
 * //from  w w  w .j ava  2s  .c om
 * @param from the source to read bytes from
 * @param to the destination to copy bytes read from {@code from} into
 * @param limit the maximum number of bytes to copy
 * @return the total number of bytes copied from {@code from} to {@code to}
 * @throws IOException if an IOException occurs
 * @throws NullPointerException if either {@code from} or {@code to} is null
 * @throws IllegalArgumentException if {@code limit} is negative
 */
@ThreadLocalArray(8192)
public static long copy(@NonNull ByteSource from, @NonNull @WillNotClose WritableByteChannel to, long limit)
        throws IOException {
    checkLimit(limit);
    final Closer closer = Closer.create();
    try {
        if (from instanceof ChannelSource) {
            return copy(closer.register(((ChannelSource) from).openChannel()), to, limit);
        } else {
            return copy(closer.register(from.openStream()), to, limit);
        }
    } catch (Throwable e) {
        throw closer.rethrow(e);
    } finally {
        closer.close();
    }
}

From source file:com.tinspx.util.io.ChannelSource.java

@Override
public long copyTo(ByteSink sink) throws IOException {
    Closer closer = Closer.create();
    try {// w ww  .java  2  s.c om
        if (preferChannel() && sink instanceof ChannelSink && ((ChannelSink) sink).preferChannel()) {
            return copyTo(closer.register(((ChannelSink) sink).openChannel()));
        } else {
            OutputStream out = closer.register(sink.openStream());
            long total = copyTo(out);
            out.flush();
            return total;
        }
    } catch (Throwable t) {
        throw closer.rethrow(t);
    } finally {
        closer.close();
    }
}

From source file:org.apache.gobblin.yarn.YarnService.java

private ByteBuffer getSecurityTokens() throws IOException {
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    Closer closer = Closer.create();
    try {/*from   ww w  .  j  av a 2 s .  c o m*/
        DataOutputBuffer dataOutputBuffer = closer.register(new DataOutputBuffer());
        credentials.writeTokenStorageToStream(dataOutputBuffer);

        // Remove the AM->RM token so that containers cannot access it
        Iterator<Token<?>> tokenIterator = credentials.getAllTokens().iterator();
        while (tokenIterator.hasNext()) {
            Token<?> token = tokenIterator.next();
            if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
                tokenIterator.remove();
            }
        }

        return ByteBuffer.wrap(dataOutputBuffer.getData(), 0, dataOutputBuffer.getLength());
    } catch (Throwable t) {
        throw closer.rethrow(t);
    } finally {
        closer.close();
    }
}

From source file:com.tinspx.util.io.ByteUtils.java

/**
 * Provides an alternative to {@link ByteSource#read()}.
 *//* w w  w  . ja v a 2 s .  c  o m*/
@ThreadLocalArray(8192)
public static byte[] toByteArray(ByteSource source) throws IOException {
    final Closer closer = Closer.create();
    try {
        if (source instanceof ChannelSource && ((ChannelSource) source).hasKnownSize()) {
            return toByteArray(closer.register(source.openStream()), checkByteSourceSize(source));
        } else {
            return toByteArray(closer.register(source.openStream()));
        }
    } catch (Throwable e) {
        throw closer.rethrow(e);
    } finally {
        closer.close();
    }
}

From source file:gobblin.runtime.AbstractJobLauncher.java

/**
 * Cleanup the left-over staging data possibly from the previous run of the job that may have failed
 * and not cleaned up its staging data.//from  www .  j  a v  a 2  s  .  c  o m
 *
 * Property {@link ConfigurationKeys#CLEANUP_STAGING_DATA_PER_TASK} controls whether to cleanup
 * staging data per task, or to cleanup entire job's staging data at once.
 *
 * Staging data will not be cleaned if the job has unfinished {@link CommitSequence}s.
 */
private void cleanLeftoverStagingData(WorkUnitStream workUnits, JobState jobState) throws JobException {
    if (jobState.getPropAsBoolean(ConfigurationKeys.CLEANUP_STAGING_DATA_BY_INITIALIZER, false)) {
        //Clean up will be done by initializer.
        return;
    }

    try {
        if (!canCleanStagingData(jobState)) {
            LOG.error("Job " + jobState.getJobName()
                    + " has unfinished commit sequences. Will not clean up staging data.");
            return;
        }
    } catch (IOException e) {
        throw new JobException("Failed to check unfinished commit sequences", e);
    }

    try {
        if (this.jobContext.shouldCleanupStagingDataPerTask()) {
            if (workUnits.isSafeToMaterialize()) {
                Closer closer = Closer.create();
                Map<String, ParallelRunner> parallelRunners = Maps.newHashMap();
                try {
                    for (WorkUnit workUnit : JobLauncherUtils
                            .flattenWorkUnits(workUnits.getMaterializedWorkUnitCollection())) {
                        JobLauncherUtils.cleanTaskStagingData(new WorkUnitState(workUnit, jobState), LOG,
                                closer, parallelRunners);
                    }
                } catch (Throwable t) {
                    throw closer.rethrow(t);
                } finally {
                    closer.close();
                }
            } else {
                throw new RuntimeException("Work unit streams do not support cleaning staging data per task.");
            }
        } else {
            JobLauncherUtils.cleanJobStagingData(jobState, LOG);
        }
    } catch (Throwable t) {
        // Catch Throwable instead of just IOException to make sure failure of this won't affect the current run
        LOG.error("Failed to clean leftover staging data", t);
    }
}

From source file:com.tinspx.util.io.ByteUtils.java

/**
 * Copies at most {@code limit} bytes from {@code from} into {@code to},
 * returning the total number of bytes copied. {@code from} is not closed.
 * /*  w  ww.j  a  v a 2s  .  co  m*/
 * @param from the source to read bytes from
 * @param to the destination to copy bytes read from {@code from} into
 * @param limit the maximum number of bytes to copy
 * @return the total number of bytes copied from {@code from} to {@code to}
 * @throws IOException if an IOException occurs
 * @throws NullPointerException if either {@code from} or {@code to} is null
 * @throws IllegalArgumentException if {@code limit} is negative
 */
@ThreadLocalArray(8192)
public static long copy(@NonNull @WillNotClose ReadableByteChannel from, @NonNull ByteSink to, long limit)
        throws IOException {
    checkLimit(limit);
    final Closer closer = Closer.create();
    try {
        if (to instanceof ChannelSink) {
            return copy(from, closer.register(((ChannelSink) to).openChannel()), limit);
        } else {
            OutputStream out = closer.register(to.openStream());
            long total = copy(from, out, limit);
            out.flush();
            return total;
        }
    } catch (Throwable e) {
        throw closer.rethrow(e);
    } finally {
        closer.close();
    }
}

From source file:gobblin.runtime.Task.java

private void publishTaskData() throws IOException {
    Closer closer = Closer.create();
    try {/*ww w.  ja  v a2  s  .  c  o  m*/
        Class<? extends DataPublisher> dataPublisherClass = getTaskPublisherClass();
        SingleTaskDataPublisher publisher = closer
                .register(SingleTaskDataPublisher.getInstance(dataPublisherClass, this.taskState));

        LOG.info("Publishing data from task " + this.taskId);
        publisher.publish(this.taskState);
    } catch (ClassCastException e) {
        LOG.error(String.format("To publish data in task, the publisher class must extend %s",
                SingleTaskDataPublisher.class.getSimpleName()), e);
        this.taskState.setTaskFailureException(e);
        throw closer.rethrow(e);
    } catch (Throwable t) {
        this.taskState.setTaskFailureException(t);
        throw closer.rethrow(t);
    } finally {
        closer.close();
    }
}

From source file:org.apache.gobblin.runtime.AbstractJobLauncher.java

/**
 * Cleanup the left-over staging data possibly from the previous run of the job that may have failed
 * and not cleaned up its staging data./*  w w w . ja v a 2  s.  com*/
 *
 * Property {@link ConfigurationKeys#CLEANUP_STAGING_DATA_PER_TASK} controls whether to cleanup
 * staging data per task, or to cleanup entire job's staging data at once.
 *
 * Staging data will not be cleaned if the job has unfinished {@link CommitSequence}s.
 */
private void cleanLeftoverStagingData(WorkUnitStream workUnits, JobState jobState) throws JobException {
    if (jobState.getPropAsBoolean(ConfigurationKeys.CLEANUP_STAGING_DATA_BY_INITIALIZER, false)) {
        //Clean up will be done by initializer.
        return;
    }

    try {
        if (!canCleanStagingData(jobState)) {
            LOG.error("Job " + jobState.getJobName()
                    + " has unfinished commit sequences. Will not clean up staging data.");
            return;
        }
    } catch (IOException e) {
        throw new JobException("Failed to check unfinished commit sequences", e);
    }

    try {
        if (this.jobContext.shouldCleanupStagingDataPerTask()) {
            if (workUnits.isSafeToMaterialize()) {
                Closer closer = Closer.create();
                Map<String, ParallelRunner> parallelRunners = Maps.newHashMap();
                try {
                    for (WorkUnit workUnit : JobLauncherUtils
                            .flattenWorkUnits(workUnits.getMaterializedWorkUnitCollection())) {
                        JobLauncherUtils.cleanTaskStagingData(new WorkUnitState(workUnit, jobState), LOG,
                                closer, parallelRunners);
                    }
                } catch (Throwable t) {
                    throw closer.rethrow(t);
                } finally {
                    closer.close();
                }
            } else {
                throw new RuntimeException("Work unit streams do not support cleaning staging data per task.");
            }
        } else {
            if (jobState.getPropAsBoolean(ConfigurationKeys.CLEANUP_OLD_JOBS_DATA,
                    ConfigurationKeys.DEFAULT_CLEANUP_OLD_JOBS_DATA)) {
                JobLauncherUtils.cleanUpOldJobData(jobState, LOG, jobContext.getStagingDirProvided(),
                        jobContext.getOutputDirProvided());
            }
            JobLauncherUtils.cleanJobStagingData(jobState, LOG);
        }
    } catch (Throwable t) {
        // Catch Throwable instead of just IOException to make sure failure of this won't affect the current run
        LOG.error("Failed to clean leftover staging data", t);
    }
}

From source file:org.pantsbuild.tools.jar.JarBuilder.java

private void enumerateJarEntries(File jarFile, JarEntryVisitor visitor) throws IOException {

    Closer jarFileCloser = Closer.create();
    JarFile jar = JarFileUtil.openJarFile(jarFileCloser, jarFile);
    try {//  w  w w  . j  a  v a2 s  . c  o m
        for (Enumeration<JarEntry> entries = jar.entries(); entries.hasMoreElements();) {
            visitor.visit(entries.nextElement());
        }
    } catch (IOException e) {
        throw jarFileCloser.rethrow(e);
    } finally {
        jarFileCloser.close();
    }
}

From source file:org.jclouds.vsphere.functions.CreateOrGetTagsId.java

@Inject
public synchronized void start() {
    Closer closer = Closer.create();
    VSphereServiceInstance client = serviceInstance.get();
    closer.register(client);/*from   w w  w.j a  v a  2 s.c  o m*/
    try {
        try {

            CustomFieldDef[] customFieldDefs = client.getInstance().getCustomFieldsManager().getField();
            if (null != customFieldDefs) {
                for (CustomFieldDef field : customFieldDefs) {
                    if (field.getName().equalsIgnoreCase(VSphereConstants.JCLOUDS_TAGS)) {
                        customFieldDefMap.put(VSphereConstants.JCLOUDS_TAGS, field);
                    } else if (field.getName().equalsIgnoreCase(VSphereConstants.JCLOUDS_GROUP)) {
                        customFieldDefMap.put(VSphereConstants.JCLOUDS_GROUP, field);
                    }
                }
            }
            if (!customFieldDefMap.containsKey(VSphereConstants.JCLOUDS_TAGS))
                customFieldDefMap.put(VSphereConstants.JCLOUDS_TAGS,
                        client.getInstance().getCustomFieldsManager()
                                .addCustomFieldDef(VSphereConstants.JCLOUDS_TAGS, null, null, null));
            if (!customFieldDefMap.containsKey(VSphereConstants.JCLOUDS_GROUP))
                customFieldDefMap.put(VSphereConstants.JCLOUDS_GROUP,
                        client.getInstance().getCustomFieldsManager()
                                .addCustomFieldDef(VSphereConstants.JCLOUDS_GROUP, null, null, null));
        } catch (Throwable t) {
            throw closer.rethrow(t);
        } finally {
            closer.close();
        }
    } catch (IOException e) {
        Throwables.propagate(e);
    }
}