Example usage for com.google.common.io Closeables close

List of usage examples for com.google.common.io Closeables close

Introduction

In this page you can find the example usage for com.google.common.io Closeables close.

Prototype

public static void close(@Nullable Closeable closeable, boolean swallowIOException) throws IOException 

Source Link

Document

Closes a Closeable , with control over whether an IOException may be thrown.

Usage

From source file:org.kitesdk.tools.JobClasspathHelper.java

/**
 * /* ww w  .j a  v a2s.  c om*/
 * @param conf
 *            Configuration object for the Job. Used to get the FileSystem associated with it.
 * @param libDir
 *            Destination directory in the FileSystem (Usually HDFS) where to upload and look for the libs.
 * @param classesToInclude
 *            Classes that are needed by the job. JarFinder will look for the jar containing these classes.
 * @throws Exception
 */
public void prepareClasspath(final Configuration conf, final Path libDir, Class<?>... classesToInclude)
        throws Exception {
    FileSystem fs = null;
    List<Class<?>> classList = new ArrayList<Class<?>>(Arrays.asList(classesToInclude));
    fs = FileSystem.get(conf);
    Map<String, String> jarMd5Map = new TreeMap<String, String>();
    // for each classes we use JarFinder to locate the jar in the local classpath.
    for (Class<?> clz : classList) {
        if (clz != null) {
            String localJarPath = JarFinder.getJar(clz);
            // we don't want to upload the same jar twice
            if (!jarMd5Map.containsKey(localJarPath)) {
                // We should not push core Hadoop classes with this tool.
                // Should it be the responsibility of the developer or we let
                // this fence here?
                if (!clz.getName().startsWith("org.apache.hadoop.")) {
                    // we compute the MD5 sum of the local jar
                    InputStream in = new FileInputStream(localJarPath);
                    boolean threw = true;
                    try {
                        String md5sum = DigestUtils.md5Hex(in);
                        jarMd5Map.put(localJarPath, md5sum);
                        threw = false;
                    } finally {
                        Closeables.close(in, threw);
                    }
                } else {
                    LOG.info("Ignoring {}, since it looks like it's from Hadoop's core libs", localJarPath);
                }
            }
        }
    }

    for (Entry<String, String> entry : jarMd5Map.entrySet()) {
        Path localJarPath = new Path(entry.getKey());
        String jarFilename = localJarPath.getName();
        String localMd5sum = entry.getValue();
        LOG.info("Jar {}. MD5 : [{}]", localJarPath, localMd5sum);

        Path remoteJarPath = new Path(libDir, jarFilename);
        Path remoteMd5Path = new Path(libDir, jarFilename + ".md5");

        // If the jar file does not exist in HDFS or if the MD5 file does not exist in HDFS,
        // we force the upload of the jar.
        if (!fs.exists(remoteJarPath) || !fs.exists(remoteMd5Path)) {
            copyJarToHDFS(fs, localJarPath, localMd5sum, remoteJarPath, remoteMd5Path);
        } else {
            // If the jar exist,we validate the MD5 file.
            // If the MD5 sum is different, we upload the jar
            FSDataInputStream md5FileStream = null;

            String remoteMd5sum = "";
            try {
                md5FileStream = fs.open(remoteMd5Path);
                byte[] md5bytes = new byte[32];
                if (32 == md5FileStream.read(md5bytes)) {
                    remoteMd5sum = new String(md5bytes, Charsets.UTF_8);
                }
            } finally {
                if (md5FileStream != null) {
                    md5FileStream.close();
                }
            }

            if (localMd5sum.equals(remoteMd5sum)) {
                LOG.info("Jar {} already exists [{}] and md5sum are equals", jarFilename,
                        remoteJarPath.toUri().toASCIIString());
            } else {
                LOG.info("Jar {} already exists [{}] and md5sum are different!", jarFilename,
                        remoteJarPath.toUri().toASCIIString());
                copyJarToHDFS(fs, localJarPath, localMd5sum, remoteJarPath, remoteMd5Path);
            }

        }
        // In all case we want to add the jar to the DistributedCache's classpath
        DistributedCache.addFileToClassPath(remoteJarPath, conf, fs);
    }
    // and we create the symlink (was necessary in earlier versions of Hadoop)
    DistributedCache.createSymlink(conf);
}

From source file:com.cloudera.cdk.tools.JobClasspathHelper.java

/**
 * //from   ww  w. ja v a2  s.  c om
 * @param conf
 *            Configuration object for the Job. Used to get the FileSystem associated with it.
 * @param libDir
 *            Destination directory in the FileSystem (Usually HDFS) where to upload and look for the libs.
 * @param classesToInclude
 *            Classes that are needed by the job. JarFinder will look for the jar containing these classes.
 * @throws Exception
 */
public void prepareClasspath(final Configuration conf, final Path libDir, Class<?>... classesToInclude)
        throws Exception {
    FileSystem fs = null;
    List<Class<?>> classList = new ArrayList<Class<?>>(Arrays.asList(classesToInclude));
    fs = FileSystem.get(conf);
    Map<String, String> jarMd5Map = new TreeMap<String, String>();
    // for each classes we use JarFinder to locate the jar in the local classpath.
    for (Class<?> clz : classList) {
        if (clz != null) {
            String localJarPath = JarFinder.getJar(clz);
            // we don't want to upload the same jar twice
            if (!jarMd5Map.containsKey(localJarPath)) {
                // We should not push core Hadoop classes with this tool.
                // Should it be the responsibility of the developer or we let
                // this fence here?
                if (!clz.getName().startsWith("org.apache.hadoop.")) {
                    // we compute the MD5 sum of the local jar
                    InputStream in = new FileInputStream(localJarPath);
                    boolean threw = true;
                    try {
                        String md5sum = DigestUtils.md5Hex(in);
                        jarMd5Map.put(localJarPath, md5sum);
                        threw = false;
                    } finally {
                        Closeables.close(in, threw);
                    }
                } else {
                    logger.info("Ignoring {}, since it looks like it's from Hadoop's core libs", localJarPath);
                }
            }
        }
    }

    for (Entry<String, String> entry : jarMd5Map.entrySet()) {
        Path localJarPath = new Path(entry.getKey());
        String jarFilename = localJarPath.getName();
        String localMd5sum = entry.getValue();
        logger.info("Jar {}. MD5 : [{}]", localJarPath, localMd5sum);

        Path remoteJarPath = new Path(libDir, jarFilename);
        Path remoteMd5Path = new Path(libDir, jarFilename + ".md5");

        // If the jar file does not exist in HDFS or if the MD5 file does not exist in HDFS,
        // we force the upload of the jar.
        if (!fs.exists(remoteJarPath) || !fs.exists(remoteMd5Path)) {
            copyJarToHDFS(fs, localJarPath, localMd5sum, remoteJarPath, remoteMd5Path);
        } else {
            // If the jar exist,we validate the MD5 file.
            // If the MD5 sum is different, we upload the jar
            FSDataInputStream md5FileStream = null;

            String remoteMd5sum = "";
            try {
                md5FileStream = fs.open(remoteMd5Path);
                byte[] md5bytes = new byte[32];
                if (32 == md5FileStream.read(md5bytes)) {
                    remoteMd5sum = new String(md5bytes, Charsets.UTF_8);
                }
            } finally {
                if (md5FileStream != null) {
                    md5FileStream.close();
                }
            }

            if (localMd5sum.equals(remoteMd5sum)) {
                logger.info("Jar {} already exists [{}] and md5sum are equals", jarFilename,
                        remoteJarPath.toUri().toASCIIString());
            } else {
                logger.info("Jar {} already exists [{}] and md5sum are different!", jarFilename,
                        remoteJarPath.toUri().toASCIIString());
                copyJarToHDFS(fs, localJarPath, localMd5sum, remoteJarPath, remoteMd5Path);
            }

        }
        // In all case we want to add the jar to the DistributedCache's classpath
        DistributedCache.addFileToClassPath(remoteJarPath, conf, fs);
    }
    // and we create the symlink (was necessary in earlier versions of Hadoop)
    DistributedCache.createSymlink(conf);
}

From source file:org.apache.parquet.cli.commands.ConvertCommand.java

@Override
@SuppressWarnings("unchecked")
public int run() throws IOException {
    Preconditions.checkArgument(targets != null && targets.size() == 1, "A data file is required.");

    String source = targets.get(0);

    CompressionCodecName codec = Codecs.parquetCodec(compressionCodecName);

    Schema schema;//  w ww  .ja  va2  s .c o  m
    if (avroSchemaFile != null) {
        schema = Schemas.fromAvsc(open(avroSchemaFile));
    } else {
        schema = getAvroSchema(source);
    }
    Schema projection = filterSchema(schema, columns);

    Path outPath = qualifiedPath(outputPath);
    FileSystem outFS = outPath.getFileSystem(getConf());
    if (overwrite && outFS.exists(outPath)) {
        console.debug("Deleting output file {} (already exists)", outPath);
        outFS.delete(outPath);
    }

    Iterable<Record> reader = openDataFile(source, projection);
    boolean threw = true;
    long count = 0;
    try {
        try (ParquetWriter<Record> writer = AvroParquetWriter.<Record>builder(qualifiedPath(outputPath))
                .withWriterVersion(v2 ? PARQUET_2_0 : PARQUET_1_0).withConf(getConf())
                .withCompressionCodec(codec).withRowGroupSize(rowGroupSize)
                .withDictionaryPageSize(dictionaryPageSize < 64 ? 64 : dictionaryPageSize)
                .withDictionaryEncoding(dictionaryPageSize != 0).withPageSize(pageSize)
                .withDataModel(GenericData.get()).withSchema(projection).build()) {
            for (Record record : reader) {
                writer.write(record);
                count += 1;
            }
        }
        threw = false;
    } catch (RuntimeException e) {
        throw new RuntimeException("Failed on record " + count, e);
    } finally {
        if (reader instanceof Closeable) {
            Closeables.close((Closeable) reader, threw);
        }
    }

    return 0;
}

From source file:org.openqa.selenium.io.FileHandler.java

private static void copyFile(File from, File to, Filter onlyCopy) throws IOException {
    if (!onlyCopy.isRequired(from)) {
        return;// ww w .j a  v  a 2 s.  co  m
    }

    FileChannel out = null;
    FileChannel in = null;
    try {
        in = new FileInputStream(from).getChannel();
        out = new FileOutputStream(to).getChannel();
        final long length = in.size();

        final long copied = in.transferTo(0, in.size(), out);
        if (copied != length) {
            throw new IOException("Could not transfer all bytes.");
        }
    } finally {
        Closeables.close(out, false);
        Closeables.close(in, false);
    }
}

From source file:com.ml.ira.algos.AdaptiveLogisticModelParameters.java

public static AdaptiveLogisticModelParameters loadFromFile(File in) throws IOException {
    InputStream input = new FileInputStream(in);
    try {// w ww  .ja  v  a 2 s.  c o  m
        return loadFromStream(input);
    } finally {
        Closeables.close(input, true);
    }
}

From source file:org.plista.kornakapi.core.training.SemanticModel.java

/**
 * Method to safe the model//from w w  w. j  av  a  2 s.  co m
 * @throws IOException
 */
public void safe(String safeKey) throws IOException {
    /**
     * New Model training changes the key. Inference can only safe the model if its key is still valid. Thus since inference job start and end no new model was calculated
     */
    if (!this.key.equals(safeKey)) {
        if (log.isInfoEnabled()) {
            log.info("Storing model Failed. Modelkey Changed");
        }
        return;
    }

    if (itemFeatures != null) {
        Path model = path.suffix("/itemFeature.model");
        Writer w = SequenceFile.createWriter(fs, lconf, model, Text.class, VectorWritable.class);
        for (String itemid : itemFeatures.keySet()) {
            Text id = new Text();
            VectorWritable val = new VectorWritable();
            id.set(itemid);
            val.set(itemFeatures.get(itemid));
            w.append(id, val);
        }
        Closeables.close(w, false);
    }
    if (indexItem != null) {
        Path model = path.suffix("/indexItem.model");
        Writer w = SequenceFile.createWriter(fs, lconf, model, IntWritable.class, Text.class);
        for (Integer itemid : indexItem.keySet()) {
            IntWritable key = new IntWritable();
            Text val = new Text();
            key.set(itemid);
            val.set(indexItem.get(itemid));
            w.append(key, val);
        }
        Closeables.close(w, false);
    }
    if (itemIndex != null) {
        Path model = path.suffix("/itemIndex.model");
        Writer w = SequenceFile.createWriter(fs, lconf, model, Text.class, IntWritable.class);
        for (String itemid : itemIndex.keySet()) {
            IntWritable val = new IntWritable();
            Text key = new Text();
            key.set(itemid);
            val.set(itemIndex.get(itemid));
            w.append(key, val);
        }
        Closeables.close(w, false);
    }
    if (log.isInfoEnabled()) {
        log.info("LDA Model Safed");
    }
}

From source file:org.qcri.pca.Norm2Job.java

public double loadResult(Path outputDirPath, Configuration conf) throws IOException {
    Path finalNumberFile = new Path(outputDirPath, "part-r-00000");
    SequenceFileIterator<NullWritable, DoubleWritable> iterator = new SequenceFileIterator<NullWritable, DoubleWritable>(
            finalNumberFile, true, conf);
    double norm2;
    try {//from ww w  .j  av a 2  s . co m
        Pair<NullWritable, DoubleWritable> next = iterator.next();
        norm2 = next.getSecond().get();
        if (iterator.hasNext())
            throw new IOException("More than one value after norm2Job!");
    } finally {
        Closeables.close(iterator, false);
    }
    return norm2;
}

From source file:org.apache.hama.ml.ann.NeuralNetwork.java

/**
 * Read the model meta-data from the specified location.
 * /*from  w w  w .jav a2  s .  c  om*/
 * @throws IOException
 */
protected void readFromModel() throws IOException {
    Preconditions.checkArgument(this.modelPath != null, "Model path has not been set.");
    Configuration conf = new Configuration();
    FSDataInputStream is = null;
    try {
        URI uri = new URI(this.modelPath);
        FileSystem fs = FileSystem.get(uri, conf);
        is = new FSDataInputStream(fs.open(new Path(modelPath)));
        this.readFields(is);
    } catch (URISyntaxException e) {
        e.printStackTrace();
    } finally {
        Closeables.close(is, false);
    }
}

From source file:org.kitesdk.data.filesystem.FileSystemMetadataProvider.java

@Override
public DatasetDescriptor load(String name) {
    Preconditions.checkNotNull(name, "Dataset name cannot be null");

    logger.debug("Loading dataset metadata name:{}", name);

    final Path metadataPath = pathForMetadata(name);
    checkExists(rootFileSystem, metadataPath);

    InputStream inputStream = null;
    Properties properties = new Properties();
    DatasetDescriptor.Builder builder = new DatasetDescriptor.Builder();
    Path descriptorPath = new Path(metadataPath, DESCRIPTOR_FILE_NAME);

    boolean threw = true;
    try {/*from  ww  w. j a  va 2  s.c  o m*/
        inputStream = rootFileSystem.open(descriptorPath);
        properties.load(inputStream);
        threw = false;
    } catch (IOException e) {
        throw new MetadataProviderException(
                "Unable to load descriptor file:" + descriptorPath + " for dataset:" + name, e);
    } finally {
        try {
            Closeables.close(inputStream, threw);
        } catch (IOException e) {
            throw new MetadataProviderException(e);
        }
    }

    if (properties.containsKey(FORMAT_FIELD_NAME)) {
        builder.format(Accessor.getDefault().newFormat(properties.getProperty(FORMAT_FIELD_NAME)));
    }
    if (properties.containsKey(PARTITION_EXPRESSION_FIELD_NAME)) {
        builder.partitionStrategy(
                Accessor.getDefault().fromExpression(properties.getProperty(PARTITION_EXPRESSION_FIELD_NAME)));
    }
    Path schemaPath = new Path(metadataPath, SCHEMA_FILE_NAME);
    try {
        builder.schemaUri(rootFileSystem.makeQualified(schemaPath).toUri());
    } catch (IOException e) {
        throw new MetadataProviderException("Unable to load schema file:" + schemaPath + " for dataset:" + name,
                e);
    }

    final Path location;
    if (properties.containsKey(LOCATION_FIELD_NAME)) {
        // the location should always be written by this library and validated
        // when the descriptor is first created.
        location = new Path(properties.getProperty(LOCATION_FIELD_NAME));
    } else {
        // backwards-compatibility: older versions didn't write this property
        location = pathForDataset(name);
    }
    builder.location(location);

    // custom properties
    for (String property : properties.stringPropertyNames()) {
        if (!RESERVED_PROPERTIES.contains(property)) {
            builder.property(property, properties.getProperty(property));
        }
    }

    return builder.build();
}

From source file:com.kixeye.chassis.bootstrap.Application.java

private void closeConfiguration(org.apache.commons.configuration.Configuration configuration)
        throws IOException {
    if (configuration instanceof CompositeConfiguration) {
        CompositeConfiguration config = (CompositeConfiguration) configuration;
        for (int i = 0; i < config.getNumberOfConfigurations(); i++) {
            closeConfiguration(config.getConfiguration(i));
        }//from   www.  ja v  a  2 s .c  om
    } else if (configuration instanceof AggregatedConfiguration) {
        AggregatedConfiguration config = (AggregatedConfiguration) configuration;
        for (int i = 0; i < config.getNumberOfConfigurations(); i++) {
            closeConfiguration(config.getConfiguration(i));
        }
    } else {
        if (configuration instanceof DynamicWatchedConfiguration) {
            DynamicWatchedConfiguration dynamicWatchedConfiguration = (DynamicWatchedConfiguration) configuration;
            if (dynamicWatchedConfiguration.getSource() instanceof Closeable) {
                Closeables.close((Closeable) dynamicWatchedConfiguration.getSource(), true);
            }
        }
    }
}