Example usage for org.apache.hadoop.io IOUtils closeStream

List of usage examples for org.apache.hadoop.io IOUtils closeStream

Introduction

In this page you can find the example usage for org.apache.hadoop.io IOUtils closeStream.

Prototype

public static void closeStream(java.io.Closeable stream) 

Source Link

Document

Closes the stream ignoring Throwable .

Usage

From source file:com.mozilla.grouperfish.transforms.coclustering.display.WriteCoClusteringOutput.java

License:Apache License

private void loadCentroids() throws IOException {
    Text k = new Text();
    Cluster v = new Cluster();
    CoCluster c;//from   w w w .  j a  v  a2  s.co  m
    SequenceFile.Reader currReader = null;
    try {
        fs = FileSystem.get(clustersPath.toUri(), conf);
        for (FileStatus status : fs.listStatus(clustersPath)) {
            Path p = status.getPath();
            if (!status.isDir() && !p.getName().startsWith("_")) {
                try {
                    currReader = new SequenceFile.Reader(fs, p, conf);
                    while (currReader.next(k, v)) {
                        c = new CoCluster(v.getCenter(), v.getMeasure());
                        coclusters.put(v.getId(), c);
                    }
                } finally {
                    IOUtils.closeStream(currReader);
                }
            }
        }
    } catch (IOException ie) {
        LOG.error("Error while reading clusters", ie);

    } finally {
        if (currReader != null) {
            IOUtils.closeStream(currReader);
        }
        if (fs != null) {
            fs.close();
        }
    }
}

From source file:com.mozilla.grouperfish.transforms.coclustering.display.WriteCoClusteringOutput.java

License:Apache License

private void loadPoints() throws IOException {
    SequenceFile.Reader currReader = null;
    IntWritable k = new IntWritable();
    CoCluster currCluster;//ww  w . j a  v a 2  s.c  o  m
    int currVID;
    WeightedVectorWritable wvw = new WeightedVectorWritable();
    try {
        fs = FileSystem.get(clusteredPointsPath.toUri(), conf);
        for (FileStatus status : fs.listStatus(clusteredPointsPath)) {
            Path p = status.getPath();
            if (!status.isDir() && !p.getName().startsWith("_")) {
                try {
                    currReader = new SequenceFile.Reader(fs, p, conf);
                    while (currReader.next(k, wvw)) {
                        currCluster = coclusters.get(k.get());
                        NamedVector v = (NamedVector) wvw.getVector();
                        currVID = Integer.parseInt(v.getName());
                        if (docIDMap.containsKey(currVID)) {
                            currCluster.put(v, docIDMap.get(currVID), true);
                        } else if (featureIDMap.containsKey(currVID)) {
                            currCluster.put(v, featureIDMap.get(currVID), false);
                        } else {
                            LOG.error("Key not feature or document!");
                        }
                    }
                } finally {
                    if (currReader != null) {
                        IOUtils.closeStream(currReader);
                    }
                }
            }
        }
    } catch (IOException ie) {
        LOG.info("Error while reading points", ie);
    } catch (ClassCastException ce) {
        LOG.info("NamedVectors possibly not used", ce);
    } finally {
        if (currReader != null) {
            IOUtils.closeStream(currReader);
        }
        if (fs != null) {
            fs.close();
        }
    }
}

From source file:com.mvdb.etl.consumer.SequenceFileConsumer.java

License:Apache License

@Override
public boolean flushAndClose() {
    if (writer != null) {
        IOUtils.closeStream(writer);
    }
    return true;
}

From source file:com.mvdb.etl.dao.impl.JdbcGenericDAO.java

License:Apache License

@Override
public boolean scan2(String objectName, File snapshotDirectory) {
    String hadoopLocalFS = "file:///";
    Configuration conf = new Configuration();
    conf.set("fs.defaultFS", hadoopLocalFS);
    String dataFileName = "data-" + objectName + ".dat";
    File dataFile = new File(snapshotDirectory, dataFileName);
    Path path = new Path(dataFile.getAbsolutePath());

    FileSystem fs;/*from   w  w  w . jav a 2  s .  co m*/
    try {
        fs = FileSystem.get(conf);
        SequenceFile.Reader reader = new SequenceFile.Reader(fs, path, conf);

        Text key = new Text();
        BytesWritable value = new BytesWritable();
        while (reader.next(key, value)) {
            byte[] bytes = value.getBytes();
            ByteArrayInputStream bis = new ByteArrayInputStream(bytes);
            ObjectInputStream ois = new ObjectInputStream(bis);
            GenericDataRecord dr = (GenericDataRecord) ois.readObject();
            System.out.println(dr.toString());
        }

        IOUtils.closeStream(reader);
    } catch (IOException e) {
        logger.error("scan2():", e);
        return false;
    } catch (ClassNotFoundException e) {
        logger.error("scan2():", e);
        return false;
    }

    return true;
}

From source file:com.mvdb.platform.action.ScanDBTable.java

License:Apache License

public static boolean scan(String dataFileName) {
    File dataFile = new File(dataFileName);
    String hadoopLocalFS = "file:///";
    Configuration conf = new Configuration();
    conf.set("fs.defaultFS", hadoopLocalFS);
    //        String dataFileName = "data-" + objectName + ".dat";
    //        File dataFile = new File(snapshotDirectory, dataFileName);
    Path path = new Path(dataFile.getAbsolutePath());

    FileSystem fs;/*www .j  a v  a2 s. co  m*/
    try {
        fs = FileSystem.get(conf);
        SequenceFile.Reader reader = new SequenceFile.Reader(fs, path, conf);

        Text key = new Text();
        BytesWritable value = new BytesWritable();
        while (reader.next(key, value)) {
            byte[] bytes = value.getBytes();
            ByteArrayInputStream bis = new ByteArrayInputStream(bytes);
            ObjectInputStream ois = new ObjectInputStream(bis);
            Object object = ois.readObject();
            if (object instanceof GenericDataRecord) {
                GenericDataRecord dr = (GenericDataRecord) object;
                System.out.println(dr.toString());
            }
            if (object instanceof MultiVersionRecord) {
                MultiVersionRecord mvr = (MultiVersionRecord) object;
                System.out.println(mvr.toString());
            }
        }

        IOUtils.closeStream(reader);
    } catch (IOException e) {
        logger.error("scan2():", e);
        return false;
    } catch (ClassNotFoundException e) {
        logger.error("scan2():", e);
        return false;
    }

    return true;
}

From source file:com.mvdb.scratch.HadoopClient.java

License:Apache License

public static void readSequenceFile(String sequenceFileName, String hadoopFS) throws IOException {
    Path path = new Path(sequenceFileName);
    conf.set("fs.defaultFS", hadoopFS);
    FileSystem fs = FileSystem.get(conf);

    SequenceFile.Reader reader = new SequenceFile.Reader(fs, path, conf);

    IntWritable key = new IntWritable(); // this could be the wrong type
    BytesWritable value = new BytesWritable(); // also could be wrong

    while (reader.next(key, value)) {
        System.out.println(key + ":" + new String(value.getBytes()));
    }/*from   ww  w .  ja  v a2  s.  co m*/

    IOUtils.closeStream(reader);
}

From source file:com.mvdb.scratch.HadoopClient.java

License:Apache License

/**
 * Convert the lines of text in a file to binary and write to a Hadoop
 * sequence file./* w  ww  .j  a  v a  2  s  .  c o  m*/
 * 
 * @param dataFile File containing lines of text
 * @param sequenceFileName Name of the sequence file to create
 * @param hadoopFS Hadoop file system
 * 
 * @throws IOException
 */
public static void writeToSequenceFile(File dataFile, String sequenceFileName, String hadoopFS)
        throws IOException {

    IntWritable key = null;
    BytesWritable value = null;

    conf.set("fs.defaultFS", hadoopFS);
    FileSystem fs = FileSystem.get(conf);
    Path path = new Path(sequenceFileName);

    if ((conf != null) && (dataFile != null) && (dataFile.exists())) {
        SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, path, IntWritable.class,
                BytesWritable.class);

        List<String> lines = FileUtils.readLines(dataFile);

        for (int i = 0; i < lines.size(); i++) {
            value = new BytesWritable(lines.get(i).getBytes());
            key = new IntWritable(i);
            writer.append(key, value);
        }
        IOUtils.closeStream(writer);
    }
}

From source file:com.ramsane.samplehadoop.ReadFile.java

public static void main(String[] args) {
    Configuration conf = new Configuration();
    conf.set("fs.defaultFS", "hdfs://localhost:9000");
    FSDataInputStream is = null;//from  w w w. ja v a2s.  c om
    try {
        // get file system object....
        FileSystem fs = FileSystem.get(conf);
        is = fs.open(new Path("/big"));
        IOUtils.copyBytes(is, System.out, 4096, false);
    } catch (IOException ex) {
        System.out.println(ex.getMessage());
    } finally {
        IOUtils.closeStream(is);
    }
}

From source file:com.ramsane.samplehadoop.ReadTwice.java

public static void main(String[] args) {
    Configuration cfg = new Configuration();
    cfg.set("fs.defaultFS", "hdfs://localhost:9000");
    FSDataInputStream in = null;/*from   w ww . j  a  v a 2 s. c  o  m*/
    try {
        FileSystem fs = FileSystem.get(cfg);
        in = fs.open(new Path("/big"));
        System.out.println("First TIme...");
        IOUtils.copyBytes(in, System.out, 4096, false);
        System.out.println("Second time..");
        in.seek(0);
        IOUtils.copyBytes(in, System.out, 4096, false);
    } catch (IOException ex) {
        System.out.println(ex.getMessage());
    } finally {
        IOUtils.closeStream(in);
    }
}

From source file:com.taobao.datax.plugins.writer.fsewriter.FseWriter.java

License:Open Source License

private void closeAll() {
    try {/*from   www  . j a  va2 s  .com*/
        IOUtils.closeStream(fs);
    } catch (Exception e) {
        throw new DataExchangeException(
                String.format("FseWriter closing filesystem failed: %s,%s", e.getMessage(), e.getCause()));
    }
}