Example usage for org.apache.hadoop.hdfs DistributedFileSystem create

List of usage examples for org.apache.hadoop.hdfs DistributedFileSystem create

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs DistributedFileSystem create.

Prototype

public FSDataOutputStream create(Path f, boolean overwrite) throws IOException 

Source Link

Document

Create an FSDataOutputStream at the indicated Path.

Usage

From source file:io.hops.erasure_coding.TestUtil.java

License:Apache License

public static void createRandomFile(DistributedFileSystem dfs, Path path, long seed, int blockCount,
        int blockSize) throws IOException {
    FSDataOutputStream out = dfs.create(path, new EncodingPolicy("src", (short) 1));
    byte[] buffer = randomBytes(seed, blockCount, blockSize);
    out.write(buffer, 0, buffer.length);
    out.close();//from  w w w .  jav a2 s  .  c om
}

From source file:io.hops.erasure_coding.Util.java

License:Apache License

public static void createRandomFile(DistributedFileSystem dfs, Path path, long seed, int blockCount,
        int blockSize, EncodingPolicy policy) throws IOException {
    FSDataOutputStream out = dfs.create(path, policy);
    byte[] buffer = Util.randomBytes(seed, blockCount, blockSize);
    out.write(buffer, 0, buffer.length);
    out.close();//  w  w w . ja v a  2 s  .co  m
}

From source file:org.apache.tajo.storage.TestByteBufLineReader.java

License:Apache License

@Test
public void testReaderWithDFS() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    cluster.waitClusterUp();/*w  w  w . ja va  2  s.  c o  m*/

    TajoConf tajoConf = new TajoConf(conf);
    tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo");

    Path tablePath = new Path("/testReaderWithDFS");
    Path filePath = new Path(tablePath, "data.dat");
    try {
        DistributedFileSystem fs = cluster.getFileSystem();
        FSDataOutputStream out = fs.create(filePath, true);
        out.write(LINE.getBytes(Charset.defaultCharset()));
        out.write('\n');
        out.close();

        assertTrue(fs.exists(filePath));
        FSDataInputStream inputStream = fs.open(filePath);
        assertTrue(inputStream.getWrappedStream() instanceof ByteBufferReadable);

        ByteBufLineReader lineReader = new ByteBufLineReader(new FSDataInputChannel(inputStream));
        assertEquals(LINE, lineReader.readLine());
        lineReader.seek(0);
        assertEquals(LINE, lineReader.readLine());
        assertNull(lineReader.readLine());

        lineReader.close();
        fs.close();
    } finally {
        cluster.shutdown(true);
    }
}