Example usage for com.google.common.io Closer create

List of usage examples for com.google.common.io Closer create

Introduction

In this page you can find the example usage for com.google.common.io Closer create.

Prototype

public static Closer create() 

Source Link

Document

Creates a new Closer .

Usage

From source file:org.lenskit.eval.traintest.TrainTestExperiment.java

/**
 * Run the experiment./* w  ww . j  a  v a 2s. c o m*/
 * @return The global aggregate results from the experiment.
 */
public Table execute() {
    try {
        try {
            resultCloser = Closer.create();
            logger.debug("setting up output");
            ExperimentOutputLayout layout = makeExperimentOutputLayout();
            openOutputs(layout);
            for (EvalTask task : tasks) {
                task.start(layout);
            }

            logger.debug("gathering jobs");
            buildJobGraph();
            int nthreads = getThreadCount();
            if (nthreads > 1) {
                logger.info("running with {} threads", nthreads);
                runJobGraph(nthreads);
            } else {
                logger.info("running in a single thread");
                runJobList();
            }

            logger.info("train-test evaluation complete");
            // done before closing, but that is ok
            return resultBuilder.build();
        } catch (Throwable th) { //NOSONAR using closer
            throw resultCloser.rethrow(th);
        } finally {
            outputLayout = null;
            // FIXME Handle exceptions in task shutdown cleanly
            for (EvalTask task : tasks) {
                task.finish();
            }
            resultBuilder = null;
            resultCloser.close();
        }
    } catch (IOException ex) {
        throw new EvaluationException("I/O error in evaluation", ex);
    }
}

From source file:org.apache.gobblin.runtime.JobLauncherTestHelper.java

/**
 * Test when a test with the matching suffix is skipped.
 * @param jobProps job properties/*w  w  w  .  j  a  v  a  2 s .  co  m*/
 * @param skippedTaskSuffix the suffix for the task that is skipped
 */
public void runTestWithSkippedTask(Properties jobProps, String skippedTaskSuffix) throws Exception {
    String jobName = jobProps.getProperty(ConfigurationKeys.JOB_NAME_KEY);
    String jobId = JobLauncherUtils.newJobId(jobName).toString();
    jobProps.setProperty(ConfigurationKeys.JOB_ID_KEY, jobId);
    jobProps.setProperty(ConfigurationKeys.PUBLISH_DATA_AT_JOB_LEVEL, Boolean.FALSE.toString());
    jobProps.setProperty(ConfigurationKeys.JOB_COMMIT_POLICY_KEY, "successful");
    jobProps.setProperty(ConfigurationKeys.MAX_TASK_RETRIES_KEY, "0");

    Closer closer = Closer.create();
    try {
        JobLauncher jobLauncher = closer
                .register(JobLauncherFactory.newJobLauncher(this.launcherProps, jobProps));
        jobLauncher.launchJob(null);
    } finally {
        closer.close();
    }

    List<JobState.DatasetState> datasetStateList = this.datasetStateStore.getAll(jobName,
            sanitizeJobNameForDatasetStore(jobId) + ".jst");
    JobState jobState = datasetStateList.get(0);

    Assert.assertEquals(jobState.getState(), JobState.RunningState.COMMITTED);
    // one task is skipped out of 4
    Assert.assertEquals(jobState.getCompletedTasks(), 3);
    for (TaskState taskState : jobState.getTaskStates()) {
        if (taskState.getTaskId().endsWith(skippedTaskSuffix)) {
            Assert.assertEquals(taskState.getWorkingState(), WorkUnitState.WorkingState.PENDING);
        } else {
            Assert.assertEquals(taskState.getWorkingState(), WorkUnitState.WorkingState.COMMITTED);
            Assert.assertEquals(taskState.getPropAsLong(ConfigurationKeys.WRITER_RECORDS_WRITTEN),
                    TestExtractor.TOTAL_RECORDS);
        }
    }
}

From source file:gobblin.example.wikipedia.WikipediaExtractor.java

private JsonElement performHttpQuery(String rootUrl, Map<String, String> query)
        throws URISyntaxException, IOException {
    if (null == this.httpClient) {
        this.httpClient = createHttpClient();
    }/*from w  w  w .  ja  va2 s.  c  om*/
    HttpUriRequest req = createHttpRequest(rootUrl, query);

    Closer closer = Closer.create();

    StringBuilder sb = new StringBuilder();
    try {
        HttpResponse response = sendHttpRequest(req, this.httpClient);
        if (response instanceof CloseableHttpResponse) {
            closer.register((CloseableHttpResponse) response);
        }
        BufferedReader br = closer
                .register(new BufferedReader(new InputStreamReader(response.getEntity().getContent(),
                        ConfigurationKeys.DEFAULT_CHARSET_ENCODING)));
        String line;
        while ((line = br.readLine()) != null) {
            sb.append(line + "\n");
        }
    } catch (Throwable t) {
        throw closer.rethrow(t);
    } finally {
        try {
            closer.close();
        } catch (IOException e) {
            LOG.error("IOException in Closer.close() while performing query " + req + ": " + e, e);
        }
    }

    if (Strings.isNullOrEmpty(sb.toString())) {
        LOG.warn("Received empty response for query: " + req);
        return new JsonObject();
    }

    JsonElement jsonElement = GSON.fromJson(sb.toString(), JsonElement.class);
    return jsonElement;

}

From source file:org.apache.gobblin.instrumented.Instrumented.java

public Instrumented(State state, Class<?> klazz, List<Tag<?>> tags) {
    this.closer = Closer.create();
    this.instrumentationEnabled = GobblinMetrics.isEnabled(state);
    this.metricContext = this.closer.register(getMetricContext(state, klazz, tags));
}

From source file:eu.interedition.collatex.cli.Engine.java

@Override
public void close() throws IOException {
    final Closer closer = Closer.create();
    try {//from   ww w . j a  v  a  2 s.  c om
        if (out != null) {
            closer.register(out).flush();
        }
        if (log != null) {
            closer.register(log).flush();
        }
    } finally {
        closer.close();
    }
    if (errorOccurred && (outFile != null) && outFile.isFile()) {
        //noinspection ResultOfMethodCallIgnored
        outFile.delete();
    }
}

From source file:org.apache.gobblin.data.management.copy.hive.HiveCopyEntityHelper.java

HiveCopyEntityHelper(HiveDataset dataset, CopyConfiguration configuration, FileSystem targetFs)
        throws IOException {

    try (Closer closer = Closer.create()) {
        log.info("Finding copy entities for table " + dataset.table.getCompleteName());

        this.eventSubmitter = new EventSubmitter.Builder(dataset.getMetricContext(), "hive.dataset.copy")
                .build();//ww w.  j a  v  a 2  s .  co m
        MultiTimingEvent multiTimer = closer
                .register(new MultiTimingEvent(this.eventSubmitter, "HiveCopySetup", true));

        this.startTime = System.currentTimeMillis();

        this.dataset = dataset;
        this.configuration = configuration;
        this.targetFs = targetFs;

        this.targetPathHelper = new HiveTargetPathHelper(this.dataset);
        this.hiveRegProps = new HiveRegProps(new State(this.dataset.getProperties()));
        this.targetURI = Optional
                .fromNullable(this.dataset.getProperties().getProperty(TARGET_METASTORE_URI_KEY));
        this.targetClientPool = HiveMetastoreClientPool.get(this.dataset.getProperties(), this.targetURI);
        this.targetDatabase = Optional
                .fromNullable(this.dataset.getProperties().getProperty(TARGET_DATABASE_KEY))
                .or(this.dataset.table.getDbName());
        this.existingEntityPolicy = ExistingEntityPolicy.valueOf(this.dataset.getProperties()
                .getProperty(EXISTING_ENTITY_POLICY_KEY, DEFAULT_EXISTING_ENTITY_POLICY).toUpperCase());
        this.unmanagedDataPolicy = UnmanagedDataPolicy.valueOf(this.dataset.getProperties()
                .getProperty(UNMANAGED_DATA_POLICY_KEY, DEFAULT_UNMANAGED_DATA_POLICY).toUpperCase());

        this.deleteMethod = this.dataset.getProperties().containsKey(DELETE_FILES_ON_DEREGISTER)
                ? DeregisterFileDeleteMethod.valueOf(
                        this.dataset.getProperties().getProperty(DELETE_FILES_ON_DEREGISTER).toUpperCase())
                : DEFAULT_DEREGISTER_DELETE_METHOD;

        if (this.dataset.getProperties().containsKey(COPY_PARTITION_FILTER_GENERATOR)) {
            try {
                PartitionFilterGenerator generator = GobblinConstructorUtils.invokeFirstConstructor(
                        (Class<PartitionFilterGenerator>) Class.forName(
                                this.dataset.getProperties().getProperty(COPY_PARTITION_FILTER_GENERATOR)),
                        Lists.<Object>newArrayList(this.dataset.getProperties()), Lists.newArrayList());
                this.partitionFilter = Optional.of(generator.getFilter(this.dataset));
                log.info(String.format("Dynamic partition filter for table %s: %s.",
                        this.dataset.table.getCompleteName(), this.partitionFilter.get()));
            } catch (ReflectiveOperationException roe) {
                throw new IOException(roe);
            }
        } else {
            this.partitionFilter = Optional
                    .fromNullable(this.dataset.getProperties().getProperty(COPY_PARTITIONS_FILTER_CONSTANT));
        }

        // Initialize extended partition filter
        if (this.dataset.getProperties().containsKey(HIVE_PARTITION_EXTENDED_FILTER_TYPE)) {
            String filterType = dataset.getProperties().getProperty(HIVE_PARTITION_EXTENDED_FILTER_TYPE);
            try {
                Config config = ConfigFactory.parseProperties(this.dataset.getProperties());
                this.hivePartitionExtendedFilter = Optional
                        .of(new ClassAliasResolver<>(HivePartitionExtendedFilterFactory.class)
                                .resolveClass(filterType).newInstance().createFilter(config));
            } catch (ReflectiveOperationException roe) {
                log.error("Error: Could not find filter with alias " + filterType);
                closer.close();
                throw new IOException(roe);
            }
        } else {
            this.hivePartitionExtendedFilter = Optional.absent();
        }

        try {
            this.fastPartitionSkip = this.dataset.getProperties().containsKey(FAST_PARTITION_SKIP_PREDICATE)
                    ? Optional.of(GobblinConstructorUtils.invokeFirstConstructor(
                            (Class<Predicate<HivePartitionFileSet>>) Class.forName(
                                    this.dataset.getProperties().getProperty(FAST_PARTITION_SKIP_PREDICATE)),
                            Lists.<Object>newArrayList(this), Lists.newArrayList()))
                    : Optional.<Predicate<HivePartitionFileSet>>absent();

            this.fastTableSkip = this.dataset.getProperties().containsKey(FAST_TABLE_SKIP_PREDICATE)
                    ? Optional.of(GobblinConstructorUtils.invokeFirstConstructor(
                            (Class<Predicate<HiveCopyEntityHelper>>) Class.forName(
                                    this.dataset.getProperties().getProperty(FAST_TABLE_SKIP_PREDICATE)),
                            Lists.newArrayList()))
                    : Optional.<Predicate<HiveCopyEntityHelper>>absent();

        } catch (ReflectiveOperationException roe) {
            closer.close();
            throw new IOException(roe);
        }

        Map<String, HiveMetastoreClientPool> namedPools = ImmutableMap.of(source_client,
                this.dataset.clientPool, target_client, this.targetClientPool);

        multiTimer.nextStage(Stages.GET_TABLES);
        try (HiveMetastoreClientPool.MultiClient multiClient = HiveMetastoreClientPool
                .safeGetClients(namedPools)) {

            if (multiClient.getClient(target_client).tableExists(this.targetDatabase,
                    this.dataset.table.getTableName())) {
                this.existingTargetTable = Optional.of(new Table(multiClient.getClient(target_client)
                        .getTable(this.targetDatabase, this.dataset.table.getTableName())));
            } else {
                this.existingTargetTable = Optional.absent();
            }

            // Constructing CommitStep object for table registration
            Path targetPath = getTargetLocation(dataset.fs, this.targetFs, dataset.table.getDataLocation(),
                    Optional.<Partition>absent());
            this.targetTable = getTargetTable(this.dataset.table, targetPath);
            HiveSpec tableHiveSpec = new SimpleHiveSpec.Builder<>(targetPath)
                    .withTable(HiveMetaStoreUtils.getHiveTable(this.targetTable.getTTable())).build();

            CommitStep tableRegistrationStep = new HiveRegisterStep(this.targetURI, tableHiveSpec,
                    this.hiveRegProps);
            this.tableRegistrationStep = Optional.of(tableRegistrationStep);

            if (this.existingTargetTable.isPresent() && this.existingTargetTable.get().isPartitioned()) {
                checkPartitionedTableCompatibility(this.targetTable, this.existingTargetTable.get());
            }
            if (HiveUtils.isPartitioned(this.dataset.table)) {
                this.sourcePartitions = HiveUtils.getPartitionsMap(multiClient.getClient(source_client),
                        this.dataset.table, this.partitionFilter, this.hivePartitionExtendedFilter);
                HiveAvroCopyEntityHelper.updatePartitionAttributesIfAvro(this.targetTable,
                        this.sourcePartitions, this);

                // Note: this must be mutable, so we copy the map
                this.targetPartitions = this.existingTargetTable.isPresent()
                        ? Maps.newHashMap(HiveUtils.getPartitionsMap(multiClient.getClient(target_client),
                                this.existingTargetTable.get(), this.partitionFilter,
                                this.hivePartitionExtendedFilter))
                        : Maps.<List<String>, Partition>newHashMap();
            } else {
                this.sourcePartitions = Maps.newHashMap();
                this.targetPartitions = Maps.newHashMap();
            }

        } catch (TException te) {
            closer.close();
            throw new IOException("Failed to generate work units for table " + dataset.table.getCompleteName(),
                    te);
        }
    }
}

From source file:com.android.builder.internal.packaging.OldPackager.java

/**
 * Incrementally updates resources in the packaging. The resources can be added or removed,
 * depending on the changes made to the file. Updating an archive file as modified will update
 * the entries, but will not remove archive entries tht are no longer in the archive.
 *
 * @param file the archive file (zip)/*from w  w  w  .  j a  v  a2 s.  c o m*/
 * @param modificationType the type of file modification
 * @param isIgnored the filter to apply to the contents of the archive; the filter is applied
 * before processing: filtered out files are exactly the same as inexistent files; the filter
 * applies to the path stored in the zip
 * @throws PackagerException failed to update the package
 */
public void updateResourceArchive(@NonNull File file, @NonNull FileModificationType modificationType,
        @NonNull final Predicate<String> isIgnored) throws PackagerException {
    Preconditions.checkNotNull(mApkCreator, "mApkCreator == null");

    if (modificationType == FileModificationType.NEW || modificationType == FileModificationType.CHANGED) {
        try {
            Closer closer = Closer.create();
            try {
                /*
                 * Note that ZipAbortException has to be masked because it is not allowed in
                 * the Predicate interface.
                 */
                Predicate<String> newIsIgnored = input -> {
                    try {
                        if (!mNoJavaClassZipFilter.checkEntry(input)) {
                            return true;
                        }
                    } catch (ZipAbortException e) {
                        throw new RuntimeException(e);
                    }

                    return isIgnored.apply(input);
                };

                mApkCreator.writeZip(file, null, newIsIgnored::apply);
            } catch (Throwable t) {
                throw closer.rethrow(t, ZipAbortException.class);
            } finally {
                closer.close();
            }
        } catch (IOException e) {
            throw new PackagerException(e);
        }
    }
}

From source file:org.glowroot.agent.live.ClasspathCache.java

@GuardedBy("this")
private void loadClassNamesFromJarFile(File jarFile, Location location,
        Multimap<String, Location> newClassNameLocations) throws IOException {
    Closer closer = Closer.create();
    try {//w  w  w .j  a  v  a2 s .com
        InputStream in = closer.register(new FileInputStream(jarFile));
        JarInputStream jarIn = closer.register(new JarInputStream(in));
        loadClassNamesFromManifestClassPath(jarIn, jarFile, newClassNameLocations);
        loadClassNamesFromJarInputStream(jarIn, "", location, newClassNameLocations);
    } catch (Throwable t) {
        throw closer.rethrow(t);
    } finally {
        closer.close();
    }
}

From source file:org.grouplens.lenskit.eval.graph.GraphDumper.java

/**
 * Render a graph to a file.//from   w ww .  j a  v  a 2  s  . c om
 *
 * @param graph The graph to render.
 * @param graphvizFile The file to write the graph to.
 * @throws IOException
 */
public static void renderGraph(DAGNode<Component, Dependency> graph, File graphvizFile)
        throws IOException, RecommenderBuildException {
    logger.debug("graph has {} nodes", graph.getReachableNodes().size());
    logger.debug("simulating instantiation");
    RecommenderInstantiator instantiator = RecommenderInstantiator.create(graph);
    DAGNode<Component, Dependency> unshared = instantiator.simulate();
    logger.debug("unshared graph has {} nodes", unshared.getReachableNodes().size());
    Closer close = Closer.create();
    try {
        Writer writer = close.register(new FileWriter(graphvizFile));
        GraphWriter gw = close.register(new GraphWriter(writer));
        GraphDumper dumper = new GraphDumper(graph, unshared.getReachableNodes(), gw);
        logger.debug("writing root node");
        String rid = dumper.setRoot(graph);
        // process each other node & add an edge
        for (DAGEdge<Component, Dependency> e : graph.getOutgoingEdges()) {
            DAGNode<Component, Dependency> target = e.getTail();
            Component csat = target.getLabel();
            if (!satIsNull(csat.getSatisfaction())) {
                logger.debug("processing node {}", csat.getSatisfaction());
                String id = dumper.process(target);
                gw.putEdge(EdgeBuilder.create(rid, id).set("arrowhead", "vee").build());
            }
        }
        // and we're done
        dumper.finish();
    } catch (Throwable th) {
        throw close.rethrow(th);
    } finally {
        close.close();
    }
}

From source file:org.gbif.ipt.task.GenerateDCAT.java

/**
 * This method loads the DCAT prefixes from dcat.properties.
 */// w  ww.  j  a  va  2s.c o  m
private Map<String, String> loadDCATPrefixes() {
    HashMap<String, String> prefixes = new HashMap<String, String>();
    Closer closer = Closer.create();
    try {
        InputStreamUtils streamUtils = new InputStreamUtils();
        InputStream configStream = streamUtils.classpathStream(PREFIXES_PROPERTIES);
        if (configStream == null) {
            LOG.error("Could not load DCAT prefixes from file: " + PREFIXES_PROPERTIES);
        } else {
            Properties properties = new Properties();
            properties.load(configStream);
            for (Map.Entry<Object, Object> entry : properties.entrySet()) {
                String key = StringUtils.trim((String) entry.getKey());
                String value = StringUtils.trim((String) entry.getValue());
                if (key != null && value != null) {
                    prefixes.put(key, value);
                } else {
                    throw new InvalidConfigException(InvalidConfigException.TYPE.INVALID_PROPERTIES_FILE,
                            "Invalid properties file: " + PREFIXES_PROPERTIES);
                }
            }
            LOG.debug("Loaded DCAT prefixes: " + prefixes.toString());
        }
    } catch (Exception e) {
        e.printStackTrace();
    } finally {
        try {
            closer.close();
        } catch (IOException e) {
            LOG.error("Failed to close input stream on DCAT prefixes file: " + PREFIXES_PROPERTIES);
        }
    }
    return prefixes;
}