Example usage for org.apache.hadoop.fs FileSystem getConf

List of usage examples for org.apache.hadoop.fs FileSystem getConf

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem getConf.

Prototype

@Override
    public Configuration getConf() 

Source Link

Usage

From source file:BooleanRetrievalCompressed.java

License:Apache License

private void initialize(String indexPath, String collectionPath, FileSystem fs) throws IOException {
    index = new MapFile.Reader(new Path(indexPath + "/part-r-00000"), fs.getConf());
    collection = fs.open(new Path(collectionPath));
    stack = new Stack<Set<Integer>>();
}

From source file:clone.ReadSequenceFile.java

License:Apache License

private static int readSequenceFile(Path path, FileSystem fs, int max) throws IOException {
    SequenceFile.Reader reader = new SequenceFile.Reader(fs, path, fs.getConf());

    System.out.println("Reading " + path + "...\n");
    try {//from   ww  w  .  jav  a  2  s. c om
        System.out.println("Key type: " + reader.getKeyClass().toString());
        System.out.println("Value type: " + reader.getValueClass().toString() + "\n");
    } catch (Exception e) {
        throw new RuntimeException("Error: loading key/value class");
    }

    Writable key = null, value;
    int n = 0;
    try {
        if (Tuple.class.isAssignableFrom(reader.getKeyClass())) {
            key = TUPLE_FACTORY.newTuple();
        } else {
            key = (Writable) reader.getKeyClass().newInstance();
        }

        if (Tuple.class.isAssignableFrom(reader.getValueClass())) {
            value = TUPLE_FACTORY.newTuple();
        } else {
            value = (Writable) reader.getValueClass().newInstance();
        }

        while (reader.next(key, value)) {
            System.out.println("Record " + n);
            System.out.println("Key: " + key + "\nValue: " + value);
            System.out.println("----------------------------------------");
            n++;

            if (n >= max)
                break;
        }
        reader.close();
        System.out.println(n + " records read.\n");
    } catch (Exception e) {
        e.printStackTrace();
    }

    return n;
}

From source file:com.alibaba.jstorm.hdfs.spout.SequenceFileReader.java

License:Apache License

public SequenceFileReader(FileSystem fs, Path file, Map conf) throws IOException {
    super(fs, file);
    int bufferSize = !conf.containsKey(BUFFER_SIZE) ? DEFAULT_BUFF_SIZE
            : Integer.parseInt(conf.get(BUFFER_SIZE).toString());
    this.reader = new SequenceFile.Reader(fs.getConf(), SequenceFile.Reader.file(file),
            SequenceFile.Reader.bufferSize(bufferSize));
    this.key = (Key) ReflectionUtils.newInstance(reader.getKeyClass(), fs.getConf());
    this.value = (Value) ReflectionUtils.newInstance(reader.getValueClass(), fs.getConf());
    this.offset = new SequenceFileReader.Offset(0, 0, 0);
}

From source file:com.alibaba.jstorm.hdfs.spout.SequenceFileReader.java

License:Apache License

public SequenceFileReader(FileSystem fs, Path file, Map conf, String offset) throws IOException {
    super(fs, file);
    int bufferSize = !conf.containsKey(BUFFER_SIZE) ? DEFAULT_BUFF_SIZE
            : Integer.parseInt(conf.get(BUFFER_SIZE).toString());
    this.offset = new SequenceFileReader.Offset(offset);
    this.reader = new SequenceFile.Reader(fs.getConf(), SequenceFile.Reader.file(file),
            SequenceFile.Reader.bufferSize(bufferSize));
    this.key = (Key) ReflectionUtils.newInstance(reader.getKeyClass(), fs.getConf());
    this.value = (Value) ReflectionUtils.newInstance(reader.getValueClass(), fs.getConf());
    skipToOffset(this.reader, this.offset, this.key);
}

From source file:com.asakusafw.bulkloader.cache.CacheBuildTest.java

License:Apache License

private Collection<TestDataModel> collectContent(FileSystem fs, FileStatus status) throws IOException {
    Collection<TestDataModel> results = new ArrayList<>();
    try (ModelInput<TestDataModel> input = TemporaryStorage.openInput(fs.getConf(), TestDataModel.class,
            status.getPath())) {/*  w  ww .j  ava  2 s . co  m*/
        TestDataModel model = new TestDataModel();
        while (input.readTo(model)) {
            results.add(model.copy());
        }
    }
    return results;
}

From source file:com.asakusafw.bulkloader.collector.ExportFileSendTest.java

License:Apache License

@SuppressWarnings("unchecked")
private File prepareInput(String path) throws IOException {
    File result = folder.newFile();
    Path p = new Path(new File(path).toURI());
    FileSystem fs = p.getFileSystem(new Configuration());
    SequenceFile.Reader reader = new SequenceFile.Reader(fs, p, fs.getConf());
    try {//from  w  ww  .j ava2  s. co m
        Writable buffer = (Writable) reader.getValueClass().newInstance();
        ModelOutput<Writable> output = (ModelOutput<Writable>) TemporaryStorage.openOutput(fs.getConf(),
                reader.getValueClass(), new BufferedOutputStream(new FileOutputStream(result)));
        try {
            while (reader.next(NullWritable.get(), buffer)) {
                output.write(buffer);
            }
        } finally {
            output.close();
        }
    } catch (Exception e) {
        throw new AssertionError(e);
    } finally {
        reader.close();
    }
    return result;
}

From source file:com.awcoleman.StandaloneJava.AvroCombinerByBlock.java

License:Apache License

public long getBlockSize() throws IOException {

    Configuration conf = new Configuration();

    /*      /*from   w  w  w . j a v  a  2  s.  c om*/
          //Block Size from XML conf files (is null if not explicitly defined)
          Configuration conf = new Configuration();
          conf.addResource(new Path("/etc/hadoop/conf/core-site.xml"));  //BigTop path, change to yours
          conf.addResource(new Path("/etc/hadoop/conf/hdfs-site.xml"));
          System.out.println("Default block size from conf xml files: "+conf.get("dfs.blocksize"));  //Default blocksize
    */

    //FS blocksize
    FileSystem hdfs = FileSystem.get(conf);
    Configuration cconf = hdfs.getConf();
    String block = cconf.get("dfs.blocksize");
    long blocksize = Long.parseLong(block);
    //System.out.println("FS hdfs block size: "+block+" ( "+blocksize+" )");

    /*      
          //Block Size of file (not valid for directories - returns 0. Not sure how to get default directory block size if overridden from fs default)
          Path myFile = new Path("/myFile");
          FileStatus fileStatus = hdfs.getFileStatus(myFile);
          long fileBlockSize = fileStatus.getBlockSize();
          System.out.println("Input directory block size: "+fileBlockSize);   
    */
    return blocksize;
}

From source file:com.bigdog.hadoop.hdfs.HDFS_Test.java

public void listConfig() {
    Configuration config = new Configuration();
    FileSystem fs;
    try {/*from   w w w .  java2 s. c  om*/
        fs = FileSystem.get(config);
        Iterator<Entry<String, String>> entrys = fs.getConf().iterator();
        while (entrys.hasNext()) {
            Entry<String, String> item = entrys.next();
            System.out.println(item.getKey() + ": " + item.getValue());
        }
    } catch (IOException ex) {
        Logger.getLogger(HDFS_Test.class.getName()).log(Level.SEVERE, null, ex);
    }

}

From source file:com.cloudera.CacheTool.java

License:Apache License

public static void createFile(FileSystem fs, Path fileName, long fileLen) throws IOException {
    int bufferLen = 1024;
    assert bufferLen > 0;
    if (!fs.mkdirs(fileName.getParent())) {
        throw new IOException("Mkdirs failed to create " + fileName.getParent().toString());
    }/* w ww.  ja v a 2  s . co m*/
    FSDataOutputStream out = null;
    try {
        out = fs.create(fileName, true,
                fs.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), (short) 1,
                fs.getDefaultBlockSize(fileName));
        if (fileLen > 0) {
            byte[] toWrite = new byte[bufferLen];
            Random rb = new Random(0);
            long bytesToWrite = fileLen;
            while (bytesToWrite > 0) {
                rb.nextBytes(toWrite);
                int bytesToWriteNext = (bufferLen < bytesToWrite) ? bufferLen : (int) bytesToWrite;

                out.write(toWrite, 0, bytesToWriteNext);
                bytesToWrite -= bytesToWriteNext;
            }
        }
    } finally {
        if (out != null) {
            out.close();
        }
    }
}

From source file:com.cloudera.cdk.data.TestDatasetDescriptor.java

License:Apache License

@Test
public void testSchemaFromHdfs() throws IOException {
    FileSystem fs = getDFS();

    // copy a schema to HDFS
    Path schemaPath = fs.makeQualified(new Path("schema.avsc"));
    FSDataOutputStream out = fs.create(schemaPath);
    IOUtils.copyBytes(USER_SCHEMA_URL.toURL().openStream(), out, fs.getConf());
    out.close();// w  w  w.j a va  2 s  .c  o  m

    // build a schema using the HDFS path and check it's the same
    Schema schema = new DatasetDescriptor.Builder().schemaUri(schemaPath.toUri()).build().getSchema();

    Assert.assertEquals(USER_SCHEMA, schema);
}