Example usage for org.apache.hadoop.fs FileSystem open

List of usage examples for org.apache.hadoop.fs FileSystem open

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem open.

Prototype

public FSDataInputStream open(PathHandle fd) throws IOException 

Source Link

Document

Open an FSDataInputStream matching the PathHandle instance.

Usage

From source file:com.fullcontact.sstable.hadoop.IndexOffsetScanner.java

License:Apache License

/**
 * Hadoop fs based version.//from   w  ww  .  j  ava2s . co m
 *
 * @param path File path.
 * @param fileSystem File system.
 */
public IndexOffsetScanner(final Path path, final FileSystem fileSystem) {
    closer = Closer.create();
    try {
        final FSDataInputStream inputStream = fileSystem.open(path);
        this.input = closer.register(new DataInputStream(new FastBufferedInputStream(inputStream)));
    } catch (IOException e) {
        throw new IOError(e);
    }
}

From source file:com.fullcontact.sstable.index.SSTableIndexIndex.java

License:Apache License

/**
 * Read an existing index. Reads and returns the index index, which is a list of chunks defined by the Cassandra
 * Index.db file along with the configured split size.
 *
 * @param fileSystem Hadoop file system.
 * @param sstablePath SSTable Index.db.//from   w  w w. j  a v a  2 s  . co  m
 * @return Index of chunks.
 * @throws IOException
 */
public static SSTableIndexIndex readIndex(final FileSystem fileSystem, final Path sstablePath)
        throws IOException {
    final Closer closer = Closer.create();
    final Path indexPath = sstablePath.suffix(SSTABLE_INDEX_SUFFIX);

    // Detonate if we don't have an index.
    final FSDataInputStream inputStream = closer.register(fileSystem.open(indexPath));

    final SSTableIndexIndex indexIndex = new SSTableIndexIndex();
    try {
        while (inputStream.available() != 0) {
            indexIndex.add(inputStream.readLong(), inputStream.readLong());
        }
    } finally {
        closer.close();
    }

    return indexIndex;
}

From source file:com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.HoplogUtil.java

License:Apache License

public static long readCleanUpIntervalMillis(FileSystem fs, Path cleanUpIntervalPath) throws IOException {
    if (fs.exists(cleanUpIntervalPath)) {
        FSDataInputStream input = new FSDataInputStream(fs.open(cleanUpIntervalPath));
        long intervalDurationMillis = input.readLong();
        input.close();// w  ww . j  ava  2s . c  o  m
        return intervalDurationMillis;
    } else {
        return -1l;
    }
}

From source file:com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.HoplogUtil.java

License:Apache License

public static void exposeCleanupIntervalMillis(FileSystem fs, Path path, long intervalDurationMillis) {
    FSDataInputStream input = null;/*from ww w .  j ava 2s  . co  m*/
    FSDataOutputStream output = null;
    try {
        if (fs.exists(path)) {
            input = new FSDataInputStream(fs.open(path));
            if (intervalDurationMillis == input.readLong()) {
                input.close();
                return;
            }
            input.close();
            fs.delete(path, true);
        }
        output = fs.create(path);
        output.writeLong(intervalDurationMillis);
        output.close();
    } catch (IOException e) {
        return;
    } finally {
        try {
            if (input != null) {
                input.close();
            }
            if (output != null) {
                output.close();
            }
        } catch (IOException e2) {

        }
    }
}

From source file:com.geneix.bottle.WordRecordReader.java

License:Apache License

public void initialize(InputSplit genericSplit, TaskAttemptContext context) throws IOException {
    if (LOG.isInfoEnabled()) {
        LOG.info("Initializing WordRecordReader");
    }/* ww  w.  jav  a 2  s .c om*/
    FileSplit split = (FileSplit) genericSplit;
    Configuration job = context.getConfiguration();
    this.maxWordLength = job.getInt(MAX_WORD_LENGTH, Integer.MAX_VALUE);
    start = split.getStart();
    end = start + split.getLength();
    final Path file = split.getPath();

    // open the file and seek to the start of the split
    final FileSystem fs = file.getFileSystem(job);
    fileIn = fs.open(file);

    CompressionCodec codec = new CompressionCodecFactory(job).getCodec(file);
    if (null != codec) {
        throw new IOException("Cannot handle compressed files right now");
    } else {
        fileIn.seek(start);
        in = new WordReader(fileIn, job);
        filePosition = fileIn;
    }
    // If this is not the first split, we always throw away first record
    // because we always (except the last split) read one extra line in
    // next() method.
    if (start != 0) {
        start += in.readWord(new Text(), 0, maxBytesToConsume(start));
    }
    this.pos = start;
}

From source file:com.github.bskaggs.mapreduce.flowfile.AbstractFlowFileV3RecordReader.java

License:Apache License

@Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
    FileSplit fileSplit = (FileSplit) split;

    Path file = fileSplit.getPath();
    FileSystem fs = file.getFileSystem(context.getConfiguration());
    fileStream = fs.open(file);

    startPos = fileSplit.getStart();/* ww  w .j  a  v a  2s  .  com*/
    nextPos = startPos;
    length = fileSplit.getLength();
    lastPos = nextPos + length;
}

From source file:com.github.dryangkun.hbase.tidx.hive.HBaseSerDeHelper.java

License:Apache License

/**
 * Read the schema from the given hdfs url for the schema
 * *///w  ww.j a  v  a 2s  . c  o  m
public static Schema getSchemaFromFS(String schemaFSUrl, Configuration conf) throws SerDeException {
    FSDataInputStream in = null;
    FileSystem fs = null;
    try {
        fs = FileSystem.get(new URI(schemaFSUrl), conf);
        in = fs.open(new Path(schemaFSUrl));
        Schema s = Schema.parse(in);
        return s;
    } catch (URISyntaxException e) {
        throw new SerDeException("Failure reading schema from filesystem", e);
    } catch (IOException e) {
        throw new SerDeException("Failure reading schema from filesystem", e);
    } finally {
        IOUtils.closeQuietly(in);
    }
}

From source file:com.github.jmabuin.blaspark.io.IO.java

License:Open Source License

public static DenseVector readVectorFromFileInHDFS(String file, Configuration conf) {

    try {/*from  w w w.jav a2  s  .  c o  m*/
        FileSystem fs = FileSystem.get(conf);

        Path pt = new Path(file);

        //FileSystem fileSystem = FileSystem.get(context.getConfiguration());
        BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(pt)));
        String line;
        line = br.readLine();

        double vector[] = null;

        boolean arrayInfo = true;

        int i = 0;

        while (line != null) {

            if ((arrayInfo == true) && (line.charAt(0) == '%')) {
                arrayInfo = true;
                //LOG.info("JMAbuin:: Skipping line with %");
            } else if ((arrayInfo == true) && !(line.charAt(0) == '%')) {
                arrayInfo = false;
                String[] matrixInfo = line.split(" ");
                //LOG.info("JMAbuin:: Creating vector after line with %");
                vector = new double[Integer.parseInt(matrixInfo[0])];

            } else {
                vector[i] = Double.parseDouble(line);
                i++;
            }

            line = br.readLine();
        }

        br.close();

        return new DenseVector(vector);

    } catch (IOException e) {
        LOG.error("Error in " + IO.class.getName() + ": " + e.getMessage());
        e.printStackTrace();
        System.exit(1);
    }

    return null;
}

From source file:com.github.libsml.commons.util.HadoopUtils.java

License:Apache License

public static String readString(Path path, Configuration conf) throws IOException {
    FileSystem fs = path.getFileSystem(conf);
    FileStatus[] statuses = fs.listStatus(path);
    StringBuilder re = new StringBuilder();
    for (FileStatus status : statuses) {
        if (status.isFile() && !status.getPath().getName().equals("_SUCCESS")) {
            FSDataInputStream streaming = fs.open(status.getPath());
            BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(streaming));
            re.append(bufferedReader.readLine() + System.lineSeparator());
        }//from   www . ja va  2s. co  m
    }
    return re.toString();
}

From source file:com.github.sadikovi.netflowlib.NetFlowReaderSuite.java

License:Apache License

private FSDataInputStream getTestStream(String file) throws IOException {
    Configuration conf = new Configuration(false);
    Path path = new Path(file);
    FileSystem fs = path.getFileSystem(conf);
    return fs.open(path);
}