List of usage examples for com.google.common.io Closer create
public static Closer create()
From source file:gobblin.data.management.conversion.hive.validation.ValidationJob.java
/*** * Execute Hive queries using {@link HiveJdbcConnector} and validate results. * @param queries Queries to execute./*from w ww. j av a 2s . c om*/ */ @SuppressWarnings("unused") private List<Long> getValidationOutputFromHiveJdbc(List<String> queries) throws IOException { if (null == queries || queries.size() == 0) { log.warn("No queries specified to be executed"); return Collections.emptyList(); } Statement statement = null; List<Long> rowCounts = Lists.newArrayList(); Closer closer = Closer.create(); try { HiveJdbcConnector hiveJdbcConnector = HiveJdbcConnector.newConnectorWithProps(props); statement = hiveJdbcConnector.getConnection().createStatement(); for (String query : queries) { log.info("Executing query: " + query); boolean result = statement.execute(query); if (result) { ResultSet resultSet = statement.getResultSet(); if (resultSet.next()) { rowCounts.add(resultSet.getLong(1)); } } else { log.warn("Query output for: " + query + " : " + result); } } } catch (SQLException e) { throw new RuntimeException(e); } finally { try { closer.close(); } catch (Exception e) { log.warn("Could not close HiveJdbcConnector", e); } if (null != statement) { try { statement.close(); } catch (SQLException e) { log.warn("Could not close Hive statement", e); } } } return rowCounts; }
From source file:org.glowroot.agent.live.ClasspathCache.java
private static byte[] getBytesFromJarFileInsideJarFile(String name, File jarFile, String jarFileInsideJarFile) throws IOException { String path = jarFile.getPath(); URI uri;//from ww w . j a v a2 s . co m try { uri = new URI("jar", "file:" + path + "!/" + jarFileInsideJarFile, ""); } catch (URISyntaxException e) { // this is a programmatic error throw new RuntimeException(e); } Closer closer = Closer.create(); try { InputStream in = closer.register(uri.toURL().openStream()); JarInputStream jarIn = closer.register(new JarInputStream(in)); JarEntry jarEntry; while ((jarEntry = jarIn.getNextJarEntry()) != null) { if (jarEntry.isDirectory()) { continue; } if (jarEntry.getName().equals(name)) { return ByteStreams.toByteArray(jarIn); } } } catch (Throwable t) { throw closer.rethrow(t); } finally { closer.close(); } throw new UnsupportedOperationException(); }
From source file:com.facebook.buck.android.exopackage.RealAndroidDevice.java
private void doMultiInstall(String filesType, Map<Path, Path> installPaths) throws Exception { Closer closer = Closer.create(); BuckInitiatedInstallReceiver receiver = new BuckInitiatedInstallReceiver(closer, filesType, installPaths); String command = "umask 022 && " + agent.get().getAgentCommand() + "multi-receive-file " + "-" + " " + agentPort + " " + "1" + ECHO_COMMAND_SUFFIX; LOG.debug("Executing %s", command); // If we fail to execute the command, stash the exception. My experience during development // has been that the exception from checkReceiverOutput is more actionable. Exception shellException = null; try {// w w w . j a v a 2s .c om device.executeShellCommand(command, receiver); } catch (Exception e) { shellException = e; } // Close the client socket, if we opened it. closer.close(); if (receiver.getError().isPresent()) { Exception prev = shellException; shellException = receiver.getError().get(); if (prev != null) { shellException.addSuppressed(prev); } } try { checkReceiverOutput(command, receiver); } catch (Exception e) { if (shellException != null) { e.addSuppressed(shellException); } throw e; } if (shellException != null) { throw shellException; } for (Path targetFileName : installPaths.keySet()) { chmod644(targetFileName); } }
From source file:gobblin.data.management.conversion.hive.validation.ValidationJob.java
/*** * Execute Hive queries using {@link HiveJdbcConnector} and validate results. * @param queries Queries to execute./*from w ww .j a v a 2s . c o m*/ */ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "SQL_NONCONSTANT_STRING_PASSED_TO_EXECUTE", justification = "Temporary fix") private List<Long> getValidationOutputFromHive(List<String> queries) throws IOException { if (null == queries || queries.size() == 0) { log.warn("No queries specified to be executed"); return Collections.emptyList(); } List<Long> rowCounts = Lists.newArrayList(); Closer closer = Closer.create(); try { HiveJdbcConnector hiveJdbcConnector = closer.register(HiveJdbcConnector.newConnectorWithProps(props)); for (String query : queries) { String hiveOutput = "hiveConversionValidationOutput_" + UUID.randomUUID().toString(); Path hiveTempDir = new Path("/tmp" + Path.SEPARATOR + hiveOutput); query = "INSERT OVERWRITE DIRECTORY '" + hiveTempDir + "' " + query; log.info("Executing query: " + query); try { if (this.hiveSettings.size() > 0) { hiveJdbcConnector .executeStatements(this.hiveSettings.toArray(new String[this.hiveSettings.size()])); } hiveJdbcConnector.executeStatements("SET hive.exec.compress.output=false", "SET hive.auto.convert.join=false", query); FileStatus[] fileStatusList = this.fs.listStatus(hiveTempDir); List<FileStatus> files = new ArrayList<>(); for (FileStatus fileStatus : fileStatusList) { if (fileStatus.isFile()) { files.add(fileStatus); } } if (files.size() > 1) { log.warn("Found more than one output file. Should have been one."); } else if (files.size() == 0) { log.warn("Found no output file. Should have been one."); } else { String theString = IOUtils.toString( new InputStreamReader(this.fs.open(files.get(0).getPath()), Charsets.UTF_8)); log.info("Found row count: " + theString.trim()); if (StringUtils.isBlank(theString.trim())) { rowCounts.add(0l); } else { try { rowCounts.add(Long.parseLong(theString.trim())); } catch (NumberFormatException e) { throw new RuntimeException("Could not parse Hive output: " + theString.trim(), e); } } } } finally { if (this.fs.exists(hiveTempDir)) { log.debug("Deleting temp dir: " + hiveTempDir); this.fs.delete(hiveTempDir, true); } } } } catch (SQLException e) { throw new RuntimeException(e); } finally { try { closer.close(); } catch (Exception e) { log.warn("Could not close HiveJdbcConnector", e); } } return rowCounts; }
From source file:org.apache.gobblin.data.management.conversion.hive.validation.ValidationJob.java
/*** * Execute Hive queries using {@link HiveJdbcConnector} and validate results. * @param queries Queries to execute.//ww w. j a va2 s. co m */ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "SQL_NONCONSTANT_STRING_PASSED_TO_EXECUTE", justification = "Temporary fix") private List<Long> getValidationOutputFromHive(List<String> queries) throws IOException { if (null == queries || queries.size() == 0) { log.warn("No queries specified to be executed"); return Collections.emptyList(); } List<Long> rowCounts = Lists.newArrayList(); Closer closer = Closer.create(); try { HiveJdbcConnector hiveJdbcConnector = closer.register(HiveJdbcConnector.newConnectorWithProps(props)); for (String query : queries) { String hiveOutput = "hiveConversionValidationOutput_" + UUID.randomUUID().toString(); Path hiveTempDir = new Path("/tmp" + Path.SEPARATOR + hiveOutput); query = "INSERT OVERWRITE DIRECTORY '" + hiveTempDir + "' " + query; log.info("Executing query: " + query); try { if (this.hiveSettings.size() > 0) { hiveJdbcConnector .executeStatements(this.hiveSettings.toArray(new String[this.hiveSettings.size()])); } hiveJdbcConnector.executeStatements("SET hive.exec.compress.output=false", "SET hive.auto.convert.join=false", query); FileStatus[] fileStatusList = this.fs.listStatus(hiveTempDir); List<FileStatus> files = new ArrayList<>(); for (FileStatus fileStatus : fileStatusList) { if (fileStatus.isFile()) { files.add(fileStatus); } } if (files.size() > 1) { log.warn("Found more than one output file. Should have been one."); } else if (files.size() == 0) { log.warn("Found no output file. Should have been one."); } else { String theString = IOUtils.toString( new InputStreamReader(this.fs.open(files.get(0).getPath()), Charsets.UTF_8)); log.info("Found row count: " + theString.trim()); if (StringUtils.isBlank(theString.trim())) { rowCounts.add(0l); } else { try { rowCounts.add(Long.parseLong(theString.trim())); } catch (NumberFormatException e) { throw new RuntimeException("Could not parse Hive output: " + theString.trim(), e); } } } } finally { if (this.fs.exists(hiveTempDir)) { log.debug("Deleting temp dir: " + hiveTempDir); this.fs.delete(hiveTempDir, true); } } } } catch (SQLException e) { log.warn("Execution failed for query set " + queries.toString(), e); } finally { try { closer.close(); } catch (Exception e) { log.warn("Could not close HiveJdbcConnector", e); } } return rowCounts; }
From source file:gobblin.yarn.GobblinYarnAppLauncher.java
private void setupSecurityTokens(ContainerLaunchContext containerLaunchContext) throws IOException { Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials(); String tokenRenewer = this.yarnConfiguration.get(YarnConfiguration.RM_PRINCIPAL); if (tokenRenewer == null || tokenRenewer.length() == 0) { throw new IOException("Failed to get master Kerberos principal for the RM to use as renewer"); }/* www. j a v a2 s. c o m*/ // For now, only getting tokens for the default file-system. Token<?> tokens[] = this.fs.addDelegationTokens(tokenRenewer, credentials); if (tokens != null) { for (Token<?> token : tokens) { LOGGER.info("Got delegation token for " + this.fs.getUri() + "; " + token); } } Closer closer = Closer.create(); try { DataOutputBuffer dataOutputBuffer = closer.register(new DataOutputBuffer()); credentials.writeTokenStorageToStream(dataOutputBuffer); ByteBuffer fsTokens = ByteBuffer.wrap(dataOutputBuffer.getData(), 0, dataOutputBuffer.getLength()); containerLaunchContext.setTokens(fsTokens); } catch (Throwable t) { throw closer.rethrow(t); } finally { closer.close(); } }
From source file:org.gbif.occurrence.download.oozie.ArchiveBuilder.java
/** * Creates Map with dataset UUIDs and its record counts. */// w w w . j ava2s.c om private Map<UUID, Integer> readDatasetCounts(Path citationSrc) throws IOException { // the hive query result is a directory with one or more files - read them all into a uuid set Map<UUID, Integer> srcDatasets = Maps.newHashMap(); // map of uuids to occurrence counts FileStatus[] citFiles = hdfs.listStatus(citationSrc); int invalidUuids = 0; Closer closer = Closer.create(); for (FileStatus fs : citFiles) { if (!fs.isDirectory()) { BufferedReader citationReader = new BufferedReader( new InputStreamReader(hdfs.open(fs.getPath()), Charsets.UTF_8)); closer.register(citationReader); try { String line = citationReader.readLine(); while (line != null) { if (!Strings.isNullOrEmpty(line)) { // we also catch errors for every dataset so we dont break the loop try { Iterator<String> iter = TAB_SPLITTER.split(line).iterator(); // play safe and make sure we got a uuid - even though our api doesnt require it UUID key = UUID.fromString(iter.next()); Integer count = Integer.parseInt(iter.next()); srcDatasets.put(key, count); // small downloads persist dataset usages while builds the citations file if (!isSmallDownload) { persistDatasetUsage(count, downloadId, key); } } catch (IllegalArgumentException e) { // ignore invalid UUIDs LOG.info("Found invalid UUID as datasetId {}", line); invalidUuids++; } } line = citationReader.readLine(); } } finally { closer.close(); } } } if (invalidUuids > 0) { LOG.info("Found {} invalid dataset UUIDs", invalidUuids); } else { LOG.info("All {} dataset UUIDs are valid", srcDatasets.size()); } return srcDatasets; }
From source file:com.tinspx.util.io.ByteUtils.java
/** * Copies at most {@code limit} bytes from {@code from} into {@code to}, * returning the total number of bytes copied. {@code to} is not closed or * flushed.//from w w w . ja v a2 s.c om * * @param from the source to read bytes from * @param to the destination to copy bytes read from {@code from} into * @param limit the maximum number of bytes to copy * @return the total number of bytes copied from {@code from} to {@code to} * @throws IOException if an IOException occurs * @throws NullPointerException if either {@code from} or {@code to} is null * @throws IllegalArgumentException if {@code limit} is negative */ @ThreadLocalArray(8192) public static long copy(@NonNull ByteSource from, @NonNull PrimitiveSink to, long limit) throws IOException { checkLimit(limit); final Closer closer = Closer.create(); try { return copy(closer.register(from.openStream()), to, limit); } catch (Throwable e) { throw closer.rethrow(e); } finally { closer.close(); } }
From source file:gobblin.runtime.AbstractJobLauncher.java
/** * Cleanup the left-over staging data possibly from the previous run of the job that may have failed * and not cleaned up its staging data.//from w ww. j ava 2 s .c o m * * Property {@link ConfigurationKeys#CLEANUP_STAGING_DATA_PER_TASK} controls whether to cleanup * staging data per task, or to cleanup entire job's staging data at once. * * Staging data will not be cleaned if the job has unfinished {@link CommitSequence}s. */ private void cleanLeftoverStagingData(WorkUnitStream workUnits, JobState jobState) throws JobException { if (jobState.getPropAsBoolean(ConfigurationKeys.CLEANUP_STAGING_DATA_BY_INITIALIZER, false)) { //Clean up will be done by initializer. return; } try { if (!canCleanStagingData(jobState)) { LOG.error("Job " + jobState.getJobName() + " has unfinished commit sequences. Will not clean up staging data."); return; } } catch (IOException e) { throw new JobException("Failed to check unfinished commit sequences", e); } try { if (this.jobContext.shouldCleanupStagingDataPerTask()) { if (workUnits.isSafeToMaterialize()) { Closer closer = Closer.create(); Map<String, ParallelRunner> parallelRunners = Maps.newHashMap(); try { for (WorkUnit workUnit : JobLauncherUtils .flattenWorkUnits(workUnits.getMaterializedWorkUnitCollection())) { JobLauncherUtils.cleanTaskStagingData(new WorkUnitState(workUnit, jobState), LOG, closer, parallelRunners); } } catch (Throwable t) { throw closer.rethrow(t); } finally { closer.close(); } } else { throw new RuntimeException("Work unit streams do not support cleaning staging data per task."); } } else { JobLauncherUtils.cleanJobStagingData(jobState, LOG); } } catch (Throwable t) { // Catch Throwable instead of just IOException to make sure failure of this won't affect the current run LOG.error("Failed to clean leftover staging data", t); } }
From source file:org.pantsbuild.tools.jar.JarBuilder.java
/** * As an optimization, use {@link JarEntryCopier} to copy one jar file to * another without decompressing and recompressing. * * @param writer target to copy JAR file entries to. * @param entries entries that came from a jar file *//*from w ww.ja v a 2 s . c o m*/ private void copyJarFiles(JarWriter writer, Iterable<ReadableJarEntry> entries) throws IOException { // Walk the entries to bucketize by input jar file names Multimap<JarSource, ReadableJarEntry> jarEntries = HashMultimap.create(); for (ReadableJarEntry entry : entries) { Preconditions.checkState(entry.getSource() instanceof JarSource); jarEntries.put((JarSource) entry.getSource(), entry); } // Copy the data from each jar input file to the output for (JarSource source : jarEntries.keySet()) { Closer jarFileCloser = Closer.create(); try { final InputSupplier<JarFile> jarSupplier = jarFileCloser .register(new JarSupplier(new File(source.name()))); JarFile jarFile = jarSupplier.getInput(); for (ReadableJarEntry readableJarEntry : jarEntries.get(source)) { JarEntry jarEntry = readableJarEntry.getJarEntry(); String resource = jarEntry.getName(); writer.copy(resource, jarFile, jarEntry); } } catch (IOException ex) { throw jarFileCloser.rethrow(ex); } finally { jarFileCloser.close(); } } }