Example usage for org.apache.hadoop.hdfs DistributedFileSystem create

List of usage examples for org.apache.hadoop.hdfs DistributedFileSystem create

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs DistributedFileSystem create.

Prototype

public FSDataOutputStream create(Path f) throws IOException 

Source Link

Document

Create an FSDataOutputStream at the indicated Path.

Usage

From source file:backup.integration.MiniClusterTestBase.java

License:Apache License

private void writeFile(DistributedFileSystem fileSystem, Path path) throws IOException, InterruptedException {
    try (FSDataOutputStream outputStream = fileSystem.create(path)) {
        for (int i = 0; i < 10; i++) {
            outputStream.write("abc".getBytes());
            outputStream.hsync();/*from  ww  w  .ja  va 2  s  .c  o  m*/
            Thread.sleep(50);
        }
    }
    try (FSDataOutputStream outputStream = fileSystem.append(path)) {
        for (int i = 0; i < 10; i++) {
            outputStream.write("abc".getBytes());
            outputStream.hsync();
            Thread.sleep(50);
        }
    }
}

From source file:io.hops.erasure_coding.TestLocalEncodingManagerImpl.java

License:Apache License

@Test
public void testRaidFiles() throws IOException {
    DistributedFileSystem dfs = (DistributedFileSystem) getFileSystem();
    FSDataOutputStream stm = dfs.create(testFile);
    byte[] buffer = Util.randomBytes(seed, TEST_BLOCK_COUNT, DFS_TEST_BLOCK_SIZE);
    stm.write(buffer, 0, buffer.length);
    stm.close();/* w  w  w.  ja  v a 2  s  .c  om*/

    LocalEncodingManager encodingManager = new LocalEncodingManager(conf);

    Codec codec = Util.getCodec(Util.Codecs.SRC);
    BaseEncodingManager.Statistics stats = new BaseEncodingManager.Statistics();
    assertTrue(encodingManager.doFileRaid(conf, testFile, parityFile, codec, stats, RaidUtils.NULL_PROGRESSABLE,
            1, 1));
    try {
        dfs.open(parityFile);
    } catch (IOException e) {
        fail("Couldn't open parity file under given path.");
    }
}

From source file:io.hops.erasure_coding.Util.java

License:Apache License

public static void createRandomFile(DistributedFileSystem dfs, Path path, long seed, int blockCount,
        int blockSize) throws IOException {
    FSDataOutputStream out = dfs.create(path);
    byte[] buffer = Util.randomBytes(seed, blockCount, blockSize);
    out.write(buffer, 0, buffer.length);
    out.close();//w  w w  .  j  a va  2 s.  c  o m
}

From source file:se.sics.gvod.stream.system.hops.SetupExperiment.java

License:Open Source License

public static void main(String[] args) throws IOException, HashUtil.HashBuilderException {
    String hopsURL = "bbc1.sics.se:26801";
    Configuration conf = new Configuration();
    conf.set("fs.defaultFS", hopsURL);
    DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(conf);

    String path = "/experiment";
    if (!fs.isDirectory(new Path(path))) {
        fs.mkdirs(new Path(path));
    } else {//from   w w  w.j  a  v a2s .c  om
        fs.delete(new Path(path), true);
        fs.mkdirs(new Path(path));
    }
    String uploadDirPath = path + "/upload";
    fs.mkdirs(new Path(uploadDirPath));
    String downloadDirPath = path + "/download";
    fs.mkdirs(new Path(downloadDirPath));

    String dataFile = uploadDirPath + "/file";
    Random rand = new Random(1234);
    try (FSDataOutputStream out = fs.create(new Path(dataFile))) {
        for (int i = 0; i < fileSize / pieceSize; i++) {
            byte[] data = new byte[1024];
            rand.nextBytes(data);
            out.write(data);
            out.flush();
        }
        System.err.println("created file - expected:" + fileSize + " created:" + out.size());
    } catch (IOException ex) {
        throw new RuntimeException(ex);
    }
    fs.close();
}