Example usage for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR

List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR.

Prototype

String HDFS_MINIDFS_BASEDIR

To view the source code for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR.

Click Source Link

Document

Configuration option to set the data dir:

Usage

From source file:org.apache.coheigea.bigdata.hdfs.ranger.HDFSRangerTest.java

License:Apache License

@org.junit.BeforeClass
public static void setup() throws Exception {
    Configuration conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    conf.set("dfs.namenode.inode.attributes.provider.class", RangerHdfsAuthorizer.class.getName());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    hdfsCluster = builder.build();//from   w  w  w .j  a v  a 2s  .c  o  m
    defaultFs = conf.get("fs.defaultFS");
}

From source file:org.apache.coheigea.camel.hdfs.HDFSTest.java

License:Apache License

@org.junit.BeforeClass
public static void setup() throws Exception {
    Configuration conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    conf.set("fs.defaultFS", "hdfs://localhost:43678");
    conf.set("dfs.namenode.http-address", "hdfs://localhost:43678");
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    hdfsCluster = builder.build();//from w  w  w  .  j a v a 2s .  c  o m
    defaultFs = conf.get("fs.defaultFS");

    FileSystem fileSystem = hdfsCluster.getFileSystem();

    // Write a file
    final Path file = new Path("/tmp/tmpdir/data-file");
    FSDataOutputStream out = fileSystem.create(file);
    for (int i = 0; i < 1024; ++i) {
        out.write(("data" + i + "\n").getBytes("UTF-8"));
        out.flush();
    }
    out.close();

    // fileSystem.setPermission(file, new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE));

    // Set the port so that it's available to the Camel context as a system property
    System.setProperty("port", defaultFs.substring(defaultFs.lastIndexOf(':') + 1));
}

From source file:org.apache.druid.indexer.HdfsClasspathSetupTest.java

License:Apache License

@BeforeClass
public static void setupStatic() throws IOException {
    hdfsTmpDir = File.createTempFile("hdfsClasspathSetupTest", "dir");
    if (!hdfsTmpDir.delete()) {
        throw new IOE("Unable to delete hdfsTmpDir [%s]", hdfsTmpDir.getAbsolutePath());
    }/*from  www .  j  a v  a  2s. co m*/
    conf = new Configuration(true);
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsTmpDir.getAbsolutePath());
    miniCluster = new MiniDFSCluster.Builder(conf).build();
}

From source file:org.apache.druid.segment.loading.HdfsFileTimestampVersionFinderTest.java

License:Apache License

@BeforeClass
public static void setupStatic() throws IOException {
    hdfsTmpDir = File.createTempFile("hdfsHandlerTest", "dir");
    if (!hdfsTmpDir.delete()) {
        throw new IOE("Unable to delete hdfsTmpDir [%s]", hdfsTmpDir.getAbsolutePath());
    }//w w w . j av  a  2  s  . c  o  m
    conf = new Configuration(true);
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsTmpDir.getAbsolutePath());
    miniCluster = new MiniDFSCluster.Builder(conf).build();

    final File tmpFile = File.createTempFile("hdfsHandlerTest", ".data");
    tmpFile.delete();
    try {
        Files.copy(new ByteArrayInputStream(pathByteContents), tmpFile.toPath());
        try (OutputStream stream = miniCluster.getFileSystem().create(filePath)) {
            Files.copy(tmpFile.toPath(), stream);
        }
    } finally {
        tmpFile.delete();
    }
}

From source file:org.apache.falcon.cluster.util.MiniHdfsClusterUtil.java

License:Apache License

public static MiniDFSCluster initMiniDfs(int port, File baseDir) throws Exception {
    Configuration conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    builder.nameNodePort(port);/*from  w  w  w  . ja va2  s .c  om*/
    return builder.build();
}

From source file:org.apache.flink.hdfstests.ContinuousFileMonitoringFunctionITCase.java

License:Apache License

@Before
public void createHDFS() {
    try {/*from  w  ww . j  a  v a 2 s.co m*/
        baseDir = new File("./target/hdfs/hdfsTesting").getAbsoluteFile();
        FileUtil.fullyDelete(baseDir);

        org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration();
        hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
        hdConf.set("dfs.block.size", String.valueOf(1048576)); // this is the minimum we can set.

        MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
        hdfsCluster = builder.build();

        hdfsURI = "hdfs://" + hdfsCluster.getURI().getHost() + ":" + hdfsCluster.getNameNodePort() + "/";
        hdfs = new org.apache.hadoop.fs.Path(hdfsURI).getFileSystem(hdConf);

    } catch (Throwable e) {
        e.printStackTrace();
        Assert.fail("Test failed " + e.getMessage());
    }
}

From source file:org.apache.flink.hdfstests.ContinuousFileMonitoringTest.java

License:Apache License

@BeforeClass
public static void createHDFS() {
    try {/*from   w  ww .  j a va2s.c  o  m*/
        baseDir = new File("./target/hdfs/hdfsTesting").getAbsoluteFile();
        FileUtil.fullyDelete(baseDir);

        org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration();
        hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
        hdConf.set("dfs.block.size", String.valueOf(1048576)); // this is the minimum we can set.

        MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
        hdfsCluster = builder.build();

        hdfsURI = "hdfs://" + hdfsCluster.getURI().getHost() + ":" + hdfsCluster.getNameNodePort() + "/";
        hdfs = new org.apache.hadoop.fs.Path(hdfsURI).getFileSystem(hdConf);

    } catch (Throwable e) {
        e.printStackTrace();
        Assert.fail("Test failed " + e.getMessage());
    }
}

From source file:org.apache.flink.hdfstests.ContinuousFileProcessingFrom11MigrationTest.java

License:Apache License

@BeforeClass
public static void createHDFS() {
    try {/*from  w  w w.  jav a  2 s  .  c  o m*/
        baseDir = tempFolder.newFolder().getAbsoluteFile();
        FileUtil.fullyDelete(baseDir);

        Configuration hdConf = new Configuration();
        hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
        hdConf.set("dfs.block.size", String.valueOf(1048576)); // this is the minimum we can set.

        MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
        hdfsCluster = builder.build();

        hdfsURI = "hdfs://" + hdfsCluster.getURI().getHost() + ":" + hdfsCluster.getNameNodePort() + "/";
        hdfs = new org.apache.hadoop.fs.Path(hdfsURI).getFileSystem(hdConf);

    } catch (Throwable e) {
        e.printStackTrace();
        Assert.fail("Test failed " + e.getMessage());
    }
}

From source file:org.apache.flink.hdfstests.ContinuousFileProcessingTest.java

License:Apache License

@BeforeClass
public static void createHDFS() {
    try {//w w w  .  j  a  v  a 2  s. c  o m
        File hdfsDir = tempFolder.newFolder();

        org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration();
        hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsDir.getAbsolutePath());
        hdConf.set("dfs.block.size", String.valueOf(1048576)); // this is the minimum we can set.

        MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
        hdfsCluster = builder.build();

        hdfsURI = "hdfs://" + hdfsCluster.getURI().getHost() + ":" + hdfsCluster.getNameNodePort() + "/";
        hdfs = new org.apache.hadoop.fs.Path(hdfsURI).getFileSystem(hdConf);

    } catch (Throwable e) {
        e.printStackTrace();
        Assert.fail("Test failed " + e.getMessage());
    }
}

From source file:org.apache.flink.hdfstests.DistributedCacheDfsTest.java

License:Apache License

@BeforeClass
public static void setup() throws Exception {
    File dataDir = TEMP_FOLDER.newFolder();

    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    hdfsCluster = builder.build();//from  w ww. ja  v  a2  s. co m

    String hdfsURI = "hdfs://"
            + NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
            + "/";

    FileSystem dfs = FileSystem.get(new URI(hdfsURI));
    testFile = writeFile(dfs, dfs.getHomeDirectory(), "testFile");

    testDir = new Path(dfs.getHomeDirectory(), "testDir");
    dfs.mkdirs(testDir);
    writeFile(dfs, testDir, "testFile1");
    writeFile(dfs, testDir, "testFile2");
}