Example usage for com.google.common.io Closer create

List of usage examples for com.google.common.io Closer create

Introduction

In this page you can find the example usage for com.google.common.io Closer create.

Prototype

public static Closer create() 

Source Link

Document

Creates a new Closer .

Usage

From source file:org.gradle.caching.internal.LocalDirectoryBuildCacheService.java

@Override
public void store(final BuildCacheKey key, final BuildCacheEntryWriter result) throws BuildCacheException {
    persistentCache.useCache(new Runnable() {
        @Override/*  w  ww. j a v a  2s  .  co m*/
        public void run() {
            File file = getFile(key.getHashCode());
            try {
                Closer closer = Closer.create();
                OutputStream output = closer.register(new FileOutputStream(file));
                try {
                    result.writeTo(output);
                } finally {
                    closer.close();
                }
            } catch (IOException ex) {
                throw new UncheckedIOException(ex);
            }
        }
    });
}

From source file:org.gradle.caching.internal.LocalDirectoryBuildCache.java

@Override
public void store(final BuildCacheKey key, final BuildCacheEntryWriter result) throws BuildCacheException {
    persistentCache.useCache("store build cache entry", new Runnable() {
        @Override// ww w .j  a v  a  2  s  . c  om
        public void run() {
            File file = getFile(key.getHashCode());
            try {
                Closer closer = Closer.create();
                OutputStream output = closer.register(new FileOutputStream(file));
                try {
                    result.writeTo(output);
                } finally {
                    closer.close();
                }
            } catch (IOException ex) {
                throw new UncheckedIOException(ex);
            }
        }
    });
}

From source file:org.apache.jackrabbit.oak.run.ResetClusterIdCommand.java

@Override
public void execute(String... args) throws Exception {
    OptionParser parser = new OptionParser();
    OptionSpec segmentTar = parser.accepts("segment-tar", "Use oak-segment-tar instead of oak-segment");
    OptionSet options = parser.parse(args);

    if (options.nonOptionArguments().isEmpty()) {
        System.out.println("usage: resetclusterid {<path>|<mongo-uri>}");
        System.exit(1);//from  ww  w.  j av  a2s  .  com
    }

    String source = options.nonOptionArguments().get(0).toString();

    Closer closer = Closer.create();
    try {
        NodeStore store;
        if (args[0].startsWith(MongoURI.MONGODB_PREFIX)) {
            MongoClientURI uri = new MongoClientURI(source);
            MongoClient client = new MongoClient(uri);
            final DocumentNodeStore dns = new DocumentMK.Builder().setMongoDB(client.getDB(uri.getDatabase()))
                    .getNodeStore();
            closer.register(Utils.asCloseable(dns));
            store = dns;
        } else if (options.has(segmentTar)) {
            store = SegmentTarUtils.bootstrapNodeStore(source, closer);
        } else {
            FileStore fs = openFileStore(source);
            closer.register(Utils.asCloseable(fs));
            store = SegmentNodeStore.builder(fs).build();
        }

        deleteClusterId(store);
    } catch (Throwable e) {
        throw closer.rethrow(e);
    } finally {
        closer.close();
    }

}

From source file:org.apache.gobblin.runtime.SafeDatasetCommit.java

@Override
public Void call() throws Exception {
    if (this.datasetState.getState() == JobState.RunningState.COMMITTED) {
        log.info(this.datasetUrn + " have been committed.");
        return null;
    }//  www.  j  a va2s  .  c om
    metricContext = Instrumented.getMetricContext(datasetState, SafeDatasetCommit.class);

    finalizeDatasetStateBeforeCommit(this.datasetState);
    Class<? extends DataPublisher> dataPublisherClass;
    try (Closer closer = Closer.create()) {
        dataPublisherClass = JobContext.getJobDataPublisherClass(this.jobContext.getJobState()).or(
                (Class<? extends DataPublisher>) Class.forName(ConfigurationKeys.DEFAULT_DATA_PUBLISHER_TYPE));
        if (!canCommitDataset(datasetState)) {
            log.warn(String.format("Not committing dataset %s of job %s with commit policy %s and state %s",
                    this.datasetUrn, this.jobContext.getJobId(), this.jobContext.getJobCommitPolicy(),
                    this.datasetState.getState()));
            checkForUnpublishedWUHandling(this.datasetUrn, this.datasetState, dataPublisherClass, closer);
            throw new RuntimeException(
                    String.format("Not committing dataset %s of job %s with commit policy %s and state %s",
                            this.datasetUrn, this.jobContext.getJobId(), this.jobContext.getJobCommitPolicy(),
                            this.datasetState.getState()));
        }
    } catch (ReflectiveOperationException roe) {
        log.error("Failed to instantiate data publisher for dataset %s of job %s.", this.datasetUrn,
                this.jobContext.getJobId(), roe);
        throw new RuntimeException(roe);
    } finally {
        maySubmitFailureEvent(datasetState);
    }

    if (this.isJobCancelled) {
        log.info("Executing commit steps although job is cancelled due to job commit policy: "
                + this.jobContext.getJobCommitPolicy());
    }

    Optional<CommitSequence.Builder> commitSequenceBuilder = Optional.absent();
    boolean canPersistStates = true;
    try (Closer closer = Closer.create()) {
        if (this.shouldCommitDataInJob) {
            log.info(String.format("Committing dataset %s of job %s with commit policy %s and state %s",
                    this.datasetUrn, this.jobContext.getJobId(), this.jobContext.getJobCommitPolicy(),
                    this.datasetState.getState()));

            ListMultimap<TaskFactoryWrapper, TaskState> taskStatesByFactory = groupByTaskFactory(
                    this.datasetState);

            for (Map.Entry<TaskFactoryWrapper, Collection<TaskState>> entry : taskStatesByFactory.asMap()
                    .entrySet()) {
                TaskFactory taskFactory = entry.getKey().getTaskFactory();

                if (this.deliverySemantics == DeliverySemantics.EXACTLY_ONCE) {
                    if (taskFactory != null) {
                        throw new RuntimeException(
                                "Custom task factories do not support exactly once delivery semantics.");
                    }
                    generateCommitSequenceBuilder(this.datasetState, entry.getValue());
                } else {
                    DataPublisher publisher;

                    if (taskFactory == null) {
                        publisher = DataPublisherFactory.get(dataPublisherClass.getName(),
                                this.jobContext.getJobState(), this.jobContext.getJobBroker());

                        // non-threadsafe publishers are not shareable and are not retained in the broker, so register them with
                        // the closer
                        if (!DataPublisherFactory.isPublisherCacheable(publisher)) {
                            closer.register(publisher);
                        }
                    } else {
                        // NOTE: sharing of publishers is not supported when they are instantiated through the TaskFactory.
                        // This should be revisited if sharing is required.
                        publisher = taskFactory.createDataPublisher(this.datasetState);
                    }

                    if (this.isJobCancelled) {
                        if (publisher.canBeSkipped()) {
                            log.warn(publisher.getClass() + " will be skipped.");
                        } else {
                            canPersistStates = false;
                            throw new RuntimeException(
                                    "Cannot persist state upon cancellation because publisher has unfinished work and cannot be skipped.");
                        }
                    } else if (this.isMultithreaded && !publisher.isThreadSafe()) {
                        log.warn(String.format(
                                "Gobblin is set up to parallelize publishing, however the publisher %s is not thread-safe. "
                                        + "Falling back to serial publishing.",
                                publisher.getClass().getName()));
                        safeCommitDataset(entry.getValue(), publisher);
                    } else {
                        commitDataset(entry.getValue(), publisher);
                    }
                }
            }
            this.datasetState.setState(JobState.RunningState.COMMITTED);
        } else {
            if (this.datasetState.getState() == JobState.RunningState.SUCCESSFUL) {
                this.datasetState.setState(JobState.RunningState.COMMITTED);
            }
        }
    } catch (Throwable throwable) {
        log.error(String.format("Failed to commit dataset state for dataset %s of job %s", this.datasetUrn,
                this.jobContext.getJobId()), throwable);
        throw new RuntimeException(throwable);
    } finally {
        try {
            finalizeDatasetState(datasetState, datasetUrn);
            maySubmitFailureEvent(datasetState);
            maySubmitLineageEvent(datasetState);
            if (commitSequenceBuilder.isPresent()) {
                buildAndExecuteCommitSequence(commitSequenceBuilder.get(), datasetState, datasetUrn);
                datasetState.setState(JobState.RunningState.COMMITTED);
            } else if (canPersistStates) {
                persistDatasetState(datasetUrn, datasetState);
            }

        } catch (IOException | RuntimeException ioe) {
            log.error(String.format("Failed to persist dataset state for dataset %s of job %s", datasetUrn,
                    this.jobContext.getJobId()), ioe);
            throw new RuntimeException(ioe);
        }
    }
    return null;
}

From source file:alluxio.client.file.UnderFileSystemFileOutStream.java

/**
 * Constructor for a under file system file output stream.
 *
 * @param context the file system context
 * @param address address of the worker/*from   w  w w .  j ava 2  s . c om*/
 * @param ufsFileId the worker specific file id
 */
private UnderFileSystemFileOutStream(FileSystemContext context, InetSocketAddress address, long ufsFileId) {
    mContext = context;
    mBuffer = allocateBuffer();
    mAddress = address;
    mUfsFileId = ufsFileId;
    mFlushedBytes = 0;
    mWrittenBytes = 0;
    mClosed = false;
    mCloser = Closer.create();
    mWriter = mCloser.register(UnderFileSystemFileWriter.Factory.create(mContext));
}

From source file:com.google.devtools.build.android.dexer.DexFileSplitter.java

@VisibleForTesting
static void splitIntoShards(Options options) throws IOException {
    checkArgument(!options.minimalMainDex || options.mainDexListFile != null,
            "--minimal-main-dex not allowed without --main-dex-list");

    if (!Files.exists(options.outputDirectory)) {
        Files.createDirectories(options.outputDirectory);
    }//from   w w w.j a va 2  s .c  o m

    ImmutableSet<String> classesInMainDex = options.mainDexListFile != null
            ? ImmutableSet.copyOf(Files.readAllLines(options.mainDexListFile, UTF_8))
            : null;
    try (Closer closer = Closer.create();
            DexFileSplitter out = new DexFileSplitter(options.outputDirectory, options.maxNumberOfIdxPerDex)) {
        // 1. Scan inputs in order and keep first occurrence of each class, keeping all zips open.
        // We don't process anything yet so we can shard in sorted order, which is what dx would do
        // if presented with a single jar containing all the given inputs.
        // TODO(kmb): Abandon alphabetic sorting to process each input fully before moving on (still
        // requires scanning inputs twice for main dex list).
        LinkedHashMap<String, ZipFile> deduped = new LinkedHashMap<>();
        for (Path inputArchive : options.inputArchives) {
            ZipFile zip = closer.register(new ZipFile(inputArchive.toFile()));
            zip.stream().filter(ZipEntryPredicates.suffixes(".dex", ".class"))
                    .forEach(e -> deduped.putIfAbsent(e.getName(), zip));
        }
        ImmutableList<Map.Entry<String, ZipFile>> files = deduped.entrySet().stream()
                .sorted(Comparator.comparing(e -> e.getKey(), ZipEntryComparator::compareClassNames))
                .collect(ImmutableList.toImmutableList());

        // 2. Process each class in desired order, rolling from shard to shard as needed.
        if (classesInMainDex == null || classesInMainDex.isEmpty()) {
            out.processDexFiles(files, Predicates.alwaysTrue());
        } else {
            // To honor --main_dex_list make two passes:
            // 1. process only the classes listed in the given file
            // 2. process the remaining files
            Predicate<String> mainDexFilter = ZipEntryPredicates.classFileNameFilter(classesInMainDex);
            out.processDexFiles(files, mainDexFilter);
            // Fail if main_dex_list is too big, following dx's example
            checkState(out.shardsWritten() == 0,
                    "Too many classes listed in main dex list file " + "%s, main dex capacity exceeded",
                    options.mainDexListFile);
            if (options.minimalMainDex) {
                out.nextShard(); // Start new .dex file if requested
            }
            out.processDexFiles(files, Predicates.not(mainDexFilter));
        }
    }
}

From source file:gobblin.compaction.hive.AvroExternalTable.java

private List<HiveAttribute> getAttributesFromAvroSchemaFile() throws IOException {
    Closer closer = Closer.create();
    try {//from  w  ww.  ja  v a 2  s .co m
        InputStream schemaInputStream = closer
                .register(new HdfsReader(this.schemaLocationInHdfs).getInputStream());
        Schema schema = new Schema.Parser().parse(schemaInputStream);
        return parseSchema(schema);
    } finally {
        closer.close();
    }
}

From source file:com.googlecode.jmxtrans.guice.JmxTransModule.java

@Provides
@Inject/*from   w w w .  ja va2 s  .c om*/
Scheduler scheduler(JmxTransConfiguration configuration, GuiceJobFactory jobFactory)
        throws SchedulerException, IOException {
    StdSchedulerFactory serverSchedFact = new StdSchedulerFactory();
    Closer closer = Closer.create();
    try {
        InputStream stream;
        if (configuration.getQuartzPropertiesFile() == null) {
            stream = closer.register(JmxTransModule.class.getResourceAsStream("/quartz.server.properties"));
        } else {
            stream = closer.register(new FileInputStream(configuration.getQuartzPropertiesFile()));
        }
        serverSchedFact.initialize(stream);
    } catch (Throwable t) {
        throw closer.rethrow(t);
    } finally {
        closer.close();
    }
    Scheduler scheduler = serverSchedFact.getScheduler();
    scheduler.setJobFactory(jobFactory);
    return scheduler;
}

From source file:com.tinspx.util.io.ChannelSource.java

@Override
public long copyTo(ByteSink sink) throws IOException {
    Closer closer = Closer.create();
    try {/*from   w w w .j  av  a 2  s .  c  o  m*/
        if (preferChannel() && sink instanceof ChannelSink && ((ChannelSink) sink).preferChannel()) {
            return copyTo(closer.register(((ChannelSink) sink).openChannel()));
        } else {
            OutputStream out = closer.register(sink.openStream());
            long total = copyTo(out);
            out.flush();
            return total;
        }
    } catch (Throwable t) {
        throw closer.rethrow(t);
    } finally {
        closer.close();
    }
}

From source file:com.netflix.servo.publish.FileMetricObserver.java

/** {@inheritDoc} */
public void updateImpl(List<Metric> metrics) {
    Preconditions.checkNotNull(metrics);
    File file = new File(dir, fileFormat.format(new Date(clock.now())));
    Closer closer = Closer.create();
    Writer out = null;/*from  ww  w.ja v a 2  s .  c  o  m*/
    try {
        try {
            LOGGER.debug("writing {} metrics to file {}", metrics.size(), file);
            OutputStream fileOut = new FileOutputStream(file, true);
            if (compress) {
                fileOut = new GZIPOutputStream(fileOut);
            }
            out = closer.register(new OutputStreamWriter(fileOut, "UTF-8"));
            for (Metric m : metrics) {
                out.append(m.getConfig().getName()).append('\t').append(m.getConfig().getTags().toString())
                        .append('\t').append(m.getValue().toString()).append('\n');
            }
        } catch (Throwable t) {
            throw closer.rethrow(t);
        } finally {
            closer.close();
        }
    } catch (IOException e) {
        incrementFailedCount();
        LOGGER.error("failed to write update to file " + file, e);
    }
}