Example usage for com.google.common.io Closer create

List of usage examples for com.google.common.io Closer create

Introduction

In this page you can find the example usage for com.google.common.io Closer create.

Prototype

public static Closer create() 

Source Link

Document

Creates a new Closer .

Usage

From source file:com.seyren.core.service.notification.IrcCatNotificationService.java

private void sendMessage(String ircCatHost, int ircCatPort, String message, String channel) throws IOException {
    Socket socket = new Socket(ircCatHost, ircCatPort);
    Closer closer = Closer.create();
    try {/*from  ww  w  .ja  v a  2  s  .  c  o m*/
        Writer out = closer
                .register(new OutputStreamWriter(socket.getOutputStream(), Charset.forName("UTF-8")));
        out.write(format("%s %s\n", channel, message));
        out.flush();
    } catch (IOException e) {
        socket.close();
        throw closer.rethrow(e);
    } finally {
        closer.close();
    }
}

From source file:org.apache.gobblin.instrumented.writer.InstrumentedDataWriterBase.java

protected InstrumentedDataWriterBase(State state, Optional<Class<?>> classTag) {
    this.closer = Closer.create();
    this.instrumentationEnabled = GobblinMetrics.isEnabled(state);
    this.metricContext = this.closer
            .register(Instrumented.getMetricContext(state, classTag.or(this.getClass())));

    if (this.instrumentationEnabled) {
        this.writerMetricsUpdater = Optional.of(buildWriterMetricsUpdater());
        scheduleWriterMetricsUpdater(this.writerMetricsUpdater.get(), getWriterMetricsUpdaterInterval(state));
    } else {//from  w  w  w.  j a  v a 2  s.  c om
        this.writerMetricsUpdater = Optional.absent();
    }

    regenerateMetrics();
}

From source file:org.apache.gobblin.metrics.reporter.ConfiguredScheduledReporter.java

public ConfiguredScheduledReporter(Builder<?> builder, Config config) {
    super(builder.name, config);
    this.rateUnit = builder.rateUnit;
    this.durationUnit = builder.durationUnit;
    this.rateFactor = builder.rateUnit.toSeconds(1);
    this.durationFactor = 1.0 / builder.durationUnit.toNanos(1);
    this.tags = ImmutableMap.copyOf(builder.tags);
    this.closer = Closer.create();
    this.metricContextName = builder.metricContextName;
    this.metricsPrefix = builder.metricsPrefix;
}

From source file:org.grouplens.lenskit.cli.TrainModel.java

@Override
public void execute() throws IOException, RecommenderBuildException {
    LenskitConfiguration dataConfig = input.getConfiguration();
    LenskitRecommenderEngineBuilder builder = LenskitRecommenderEngine.newBuilder();
    for (LenskitConfiguration config : environment.loadConfigurations(getConfigFiles())) {
        builder.addConfiguration(config);
    }/*from ww w.j  a  v a 2 s. co m*/
    builder.addConfiguration(dataConfig, ModelDisposition.EXCLUDED);

    Stopwatch timer = Stopwatch.createStarted();
    LenskitRecommenderEngine engine = builder.build();
    timer.stop();
    logger.info("built model in {}", timer);
    File output = getOutputFile();
    logger.info("writing model to {}", output);
    Closer closer = Closer.create();
    try {
        OutputStream stream = closer.register(new FileOutputStream(output));
        if (LKFileUtils.isCompressed(output)) {
            stream = closer.register(new GZIPOutputStream(stream));
        }
        engine.write(stream);
    } catch (Throwable th) {
        throw closer.rethrow(th);
    } finally {
        closer.close();
    }
}

From source file:com.android.builder.files.IncrementalRelativeFileSets.java

/**
 * Reads a zip file and adds all files in the file in a new incremental relative set. The
 * status of each file is set to {@code status}.
 *
 * @param zip the zip file to read, must be a valid, existing zip file
 * @param status the status to set the files to
 * @return the file set/*from   ww w. java  2s  . co  m*/
 * @throws IOException failed to read the zip file
 */
@NonNull
public static ImmutableMap<RelativeFile, FileStatus> fromZip(@NonNull File zip, FileStatus status)
        throws IOException {
    Preconditions.checkArgument(zip.isFile(), "!zip.isFile()");

    Set<RelativeFile> files = Sets.newHashSet();

    Closer closer = Closer.create();
    try {
        ZFile zipReader = closer.register(new ZFile(zip));
        for (StoredEntry entry : zipReader.entries()) {
            if (entry.getType() == StoredEntryType.FILE) {
                File file = new File(zip,
                        FileUtils.toSystemDependentPath(entry.getCentralDirectoryHeader().getName()));
                files.add(new RelativeFile(zip, file));
            }
        }
    } catch (Throwable t) {
        throw closer.rethrow(t, IOException.class);
    } finally {
        closer.close();
    }

    Map<RelativeFile, FileStatus> map = Maps.asMap(files, Functions.constant(status));
    return ImmutableMap.copyOf(map);
}

From source file:com.jive.myco.seyren.core.service.notification.IrcCatNotificationService.java

private void sendMessage(String ircCatHost, int ircCatPort, String message, String channel) throws IOException {
    Socket socket = new Socket(ircCatHost, ircCatPort);
    Closer closer = Closer.create();
    try {/*from   w ww. j a va  2  s .  c om*/
        Writer out = closer.register(new OutputStreamWriter(socket.getOutputStream()));
        out.write(format("%s %s\n", channel, message));
        out.flush();
    } catch (IOException e) {
        socket.close();
        throw closer.rethrow(e);
    } finally {
        closer.close();
    }
}

From source file:org.apache.gobblin.instrumented.extractor.InstrumentedExtractorBase.java

protected InstrumentedExtractorBase(WorkUnitState workUnitState, Optional<Class<?>> classTag) {
    super();/*from  w w w.j  a  v a2  s  . c o m*/
    this.closer = Closer.create();

    this.instrumentationEnabled = GobblinMetrics.isEnabled(workUnitState);

    this.metricContext = this.closer.register(Instrumented.getMetricContext(workUnitState,
            classTag.or(this.getClass()), generateTags(workUnitState)));

    regenerateMetrics();
}

From source file:alluxio.master.journal.ufs.UfsJournalSystem.java

@Override
public void stopInternal() {
    Closer closer = Closer.create();
    for (UfsJournal journal : mJournals.values()) {
        closer.register(journal);/*w  w  w .  j  a v a2  s . c om*/
    }
    RetryPolicy retry = ExponentialTimeBoundedRetry.builder().withMaxDuration(Duration.ofMinutes(1))
            .withInitialSleep(Duration.ofMillis(100)).withMaxSleep(Duration.ofSeconds(3)).build();
    IOException exception = null;
    while (retry.attempt()) {
        try {
            closer.close();
            return;
        } catch (IOException e) {
            exception = e;
            LOG.warn("Failed to close journal: {}", e.toString());
        }
    }
    if (exception != null) {
        throw new RuntimeException(exception);
    }
}

From source file:gobblin.runtime.SafeDatasetCommit.java

@Override
public Void call() throws Exception {
    if (this.datasetState.getState() == JobState.RunningState.COMMITTED) {
        log.info(this.datasetUrn + " have been committed.");
        return null;
    }//from w ww. j  a v  a  2s.co m
    finalizeDatasetStateBeforeCommit(this.datasetState);
    Class<? extends DataPublisher> dataPublisherClass;
    try (Closer closer = Closer.create()) {
        dataPublisherClass = JobContext.getJobDataPublisherClass(this.jobContext.getJobState()).or(
                (Class<? extends DataPublisher>) Class.forName(ConfigurationKeys.DEFAULT_DATA_PUBLISHER_TYPE));
        if (!canCommitDataset(datasetState)) {
            log.warn(String.format("Not committing dataset %s of job %s with commit policy %s and state %s",
                    this.datasetUrn, this.jobContext.getJobId(), this.jobContext.getJobCommitPolicy(),
                    this.datasetState.getState()));
            checkForUnpublishedWUHandling(this.datasetUrn, this.datasetState, dataPublisherClass, closer);
            throw new RuntimeException(
                    String.format("Not committing dataset %s of job %s with commit policy %s and state %s",
                            this.datasetUrn, this.jobContext.getJobId(), this.jobContext.getJobCommitPolicy(),
                            this.datasetState.getState()));
        }
    } catch (ReflectiveOperationException roe) {
        log.error("Failed to instantiate data publisher for dataset %s of job %s.", this.datasetUrn,
                this.jobContext.getJobId(), roe);
        throw new RuntimeException(roe);
    }

    if (this.isJobCancelled) {
        log.info("Executing commit steps although job is cancelled due to job commit policy: "
                + this.jobContext.getJobCommitPolicy());
    }

    Optional<CommitSequence.Builder> commitSequenceBuilder = Optional.absent();
    boolean canPersistStates = true;
    try (Closer closer = Closer.create()) {
        if (this.shouldCommitDataInJob) {
            log.info(String.format("Committing dataset %s of job %s with commit policy %s and state %s",
                    this.datasetUrn, this.jobContext.getJobId(), this.jobContext.getJobCommitPolicy(),
                    this.datasetState.getState()));

            ListMultimap<TaskFactoryWrapper, TaskState> taskStatesByFactory = groupByTaskFactory(
                    this.datasetState);

            for (Map.Entry<TaskFactoryWrapper, Collection<TaskState>> entry : taskStatesByFactory.asMap()
                    .entrySet()) {
                TaskFactory taskFactory = entry.getKey().getTaskFactory();

                if (this.deliverySemantics == DeliverySemantics.EXACTLY_ONCE) {
                    if (taskFactory != null) {
                        throw new RuntimeException(
                                "Custom task factories do not support exactly once delivery semantics.");
                    }
                    generateCommitSequenceBuilder(this.datasetState, entry.getValue());
                } else {
                    DataPublisher publisher = taskFactory == null
                            ? closer.register(DataPublisher.getInstance(dataPublisherClass,
                                    this.jobContext.getJobState()))
                            : taskFactory.createDataPublisher(this.datasetState);
                    if (this.isJobCancelled) {
                        if (publisher.canBeSkipped()) {
                            log.warn(publisher.getClass() + " will be skipped.");
                        } else {
                            canPersistStates = false;
                            throw new RuntimeException(
                                    "Cannot persist state upon cancellation because publisher has unfinished work and cannot be skipped.");
                        }
                    } else if (this.isMultithreaded && !publisher.isThreadSafe()) {
                        log.warn(String.format(
                                "Gobblin is set up to parallelize publishing, however the publisher %s is not thread-safe. "
                                        + "Falling back to serial publishing.",
                                publisher.getClass().getName()));
                        safeCommitDataset(entry.getValue(), publisher);
                    } else {
                        commitDataset(entry.getValue(), publisher);
                    }
                }
            }
            this.datasetState.setState(JobState.RunningState.COMMITTED);
        } else {
            if (this.datasetState.getState() == JobState.RunningState.SUCCESSFUL) {
                this.datasetState.setState(JobState.RunningState.COMMITTED);
            }
        }
    } catch (ReflectiveOperationException roe) {
        log.error(String.format("Failed to instantiate data publisher for dataset %s of job %s.",
                this.datasetUrn, this.jobContext.getJobId()), roe);
        throw new RuntimeException(roe);
    } catch (Throwable throwable) {
        log.error(String.format("Failed to commit dataset state for dataset %s of job %s", this.datasetUrn,
                this.jobContext.getJobId()), throwable);
        throw new RuntimeException(throwable);
    } finally {
        try {
            finalizeDatasetState(datasetState, datasetUrn);
            if (commitSequenceBuilder.isPresent()) {
                buildAndExecuteCommitSequence(commitSequenceBuilder.get(), datasetState, datasetUrn);
                datasetState.setState(JobState.RunningState.COMMITTED);
            } else if (canPersistStates) {
                persistDatasetState(datasetUrn, datasetState);
            }
        } catch (IOException | RuntimeException ioe) {
            log.error(String.format("Failed to persist dataset state for dataset %s of job %s", datasetUrn,
                    this.jobContext.getJobId()), ioe);
            throw new RuntimeException(ioe);
        }
    }
    return null;
}

From source file:com.facebook.buck.distributed.build_slave.RuleKeyDivergenceRunnerFactory.java

/** Creates DistBuildModeRunner to be used for rule key divergence checks */
public static DistBuildModeRunner createRunner(StampedeId stampedeId, BuildSlaveRunId buildSlaveRunId,
        Clock clock, DistBuildService distBuildService, DelegateAndGraphsInitializer initializer,
        RuleKeyConfiguration ruleKeyConfiguration, RuleKeyCacheScope<RuleKey> ruleKeyCacheScope,
        WeightedListeningExecutorService executorService, BuckEventBus eventBus, DistBuildState state,
        Cell rootCell, UnconfiguredBuildTargetFactory unconfiguredBuildTargetFactory) {
    return new AbstractDistBuildModeRunner() {
        @Override/*from   w  w w  .  ja v a2  s. co m*/
        public ListenableFuture<?> getAsyncPrepFuture() {
            return Futures.immediateFuture(null);
        }

        @Override
        public ExitCode runAndReturnExitCode(HeartbeatService heartbeatService)
                throws IOException, InterruptedException {

            try (Closer closer = Closer.create()) {
                closer.register(heartbeatService.addCallback("ReportCoordinatorAlive",
                        createHeartbeatCallback(stampedeId, distBuildService)));

                try {
                    List<Pair<BuildRule, RuleKey>> rulesAndKeys = calculateDefaultRuleKeys(
                            getTopLevelTargetsToBuild(state, rootCell, unconfiguredBuildTargetFactory),
                            initializer, ruleKeyConfiguration, ruleKeyCacheScope, executorService, eventBus);

                    List<BuildSlaveEvent> ruleKeyCalculatedEvents = rulesAndKeys.stream().map(rk -> {
                        RuleKeyCalculatedEvent event = new RuleKeyCalculatedEvent();
                        event.setBuildTarget(rk.getFirst().getFullyQualifiedName());
                        event.setDefaultRuleKey(rk.getSecond().getHashCode().toString());

                        BuildSlaveEvent buildSlaveEvent = new BuildSlaveEvent();
                        buildSlaveEvent.setEventType(BuildSlaveEventType.RULE_KEY_CALCULATED_EVENT);
                        buildSlaveEvent.setRuleKeyCalculatedEvent(event);

                        return buildSlaveEvent;
                    }).collect(Collectors.toList());

                    List<List<BuildSlaveEvent>> ruleKeyCalculationBatches = Lists
                            .partition(ruleKeyCalculatedEvents, RULE_CALCULATION_EVENTS_PER_FRONTEND_REQUEST);

                    for (List<BuildSlaveEvent> ruleKeyCalculateBatch : ruleKeyCalculationBatches) {
                        distBuildService.uploadBuildSlaveEvents(stampedeId, buildSlaveRunId,
                                ruleKeyCalculateBatch);
                    }

                    // Ensure client doesn't wait for timeout before completing
                    distBuildService.sendAllBuildRulesPublishedEvent(stampedeId, buildSlaveRunId,
                            clock.currentTimeMillis());
                    distBuildService.setFinalBuildStatus(stampedeId, BuildStatus.FINISHED_SUCCESSFULLY,
                            "Rule key divergence check complete");
                    return ExitCode.SUCCESS;

                } catch (ExecutionException | IOException e) {
                    LOG.error(e, "Failed to calculate rule keys");
                    distBuildService.setFinalBuildStatus(stampedeId, BuildStatus.FAILED,
                            "Could not compute or publish rule keys");
                    return ExitCode.FATAL_GENERIC;
                }
            }
        }
    };
}