Example usage for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR

List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR.

Prototype

String HDFS_MINIDFS_BASEDIR

To view the source code for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR.

Click Source Link

Document

Configuration option to set the data dir:

Usage

From source file:org.apache.flink.hdfstests.FileStateBackendTest.java

License:Apache License

@BeforeClass
public static void createHDFS() {
    try {/*w  w  w.j ava2 s  .com*/
        TEMP_DIR = new File(ConfigConstants.DEFAULT_TASK_MANAGER_TMP_PATH, UUID.randomUUID().toString());

        Configuration hdConf = new Configuration();
        hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEMP_DIR.getAbsolutePath());
        MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
        HDFS_CLUSTER = builder.build();

        HDFS_ROOT_URI = "hdfs://" + HDFS_CLUSTER.getURI().getHost() + ":" + HDFS_CLUSTER.getNameNodePort()
                + "/";

        FS = FileSystem.get(new URI(HDFS_ROOT_URI));
    } catch (Exception e) {
        e.printStackTrace();
        fail("Could not create HDFS mini cluster " + e.getMessage());
    }
}

From source file:org.apache.flink.hdfstests.FsNegativeRunningJobsRegistryTest.java

License:Apache License

@BeforeClass
public static void createHDFS() throws Exception {
    final File tempDir = TEMP_DIR.newFolder();

    Configuration hdConf = new Configuration();
    hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tempDir.getAbsolutePath());

    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
    HDFS_CLUSTER = builder.build();/*from w  w  w  . j  a v a  2  s  . co m*/

    HDFS_ROOT_PATH = new Path(
            "hdfs://" + HDFS_CLUSTER.getURI().getHost() + ":" + HDFS_CLUSTER.getNameNodePort() + "/");
}

From source file:org.apache.flink.hdfstests.HDFSTest.java

License:Apache License

@Before
public void createHDFS() {
    try {/*from  ww  w  .j ava2  s. c o  m*/
        Configuration hdConf = new Configuration();

        File baseDir = new File("./target/hdfs/hdfsTest").getAbsoluteFile();
        FileUtil.fullyDelete(baseDir);
        hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
        MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
        hdfsCluster = builder.build();

        hdfsURI = "hdfs://" + hdfsCluster.getURI().getHost() + ":" + hdfsCluster.getNameNodePort() + "/";

        hdPath = new org.apache.hadoop.fs.Path("/test");
        hdfs = hdPath.getFileSystem(hdConf);
        FSDataOutputStream stream = hdfs.create(hdPath);
        for (int i = 0; i < 10; i++) {
            stream.write("Hello HDFS\n".getBytes());
        }
        stream.close();

    } catch (Throwable e) {
        e.printStackTrace();
        Assert.fail("Test failed " + e.getMessage());
    }
}

From source file:org.apache.flink.runtime.fs.hdfs.HadoopRecoverableWriterTest.java

License:Apache License

@BeforeClass
public static void createHDFS() throws Exception {
    final File baseDir = TEMP_FOLDER.newFolder();

    final Configuration hdConf = new Configuration();
    hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());

    final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
    hdfsCluster = builder.build();/*from  w  ww  .  j  a v  a2 s .c  o m*/

    final org.apache.hadoop.fs.FileSystem hdfs = hdfsCluster.getFileSystem();

    fileSystem = new HadoopFileSystem(hdfs);
    basePath = new Path(hdfs.getUri() + "/tests");
}

From source file:org.apache.flink.runtime.fs.hdfs.HdfsBehaviorTest.java

License:Apache License

@BeforeClass
public static void createHDFS() throws Exception {
    final File baseDir = TMP.newFolder();

    Configuration hdConf = new Configuration();
    hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
    hdfsCluster = builder.build();/*from w  w w  . j  ava 2  s  .c  om*/

    org.apache.hadoop.fs.FileSystem hdfs = hdfsCluster.getFileSystem();
    fs = new HadoopFileSystem(hdfs);

    basePath = new Path(hdfs.getUri().toString() + "/tests");
}

From source file:org.apache.flink.streaming.connectors.fs.bucketing.BucketingSinkFaultTolerance2ITCase.java

License:Apache License

@BeforeClass
public static void createHDFS() throws IOException {
    Configuration conf = new Configuration();

    File dataDir = tempFolder.newFolder();

    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    hdfsCluster = builder.build();/*from w w w  . ja  v  a2  s . com*/

    dfs = hdfsCluster.getFileSystem();

    outPath = "hdfs://"
            + NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
            + "/string-non-rolling-out-no-checkpoint";
}

From source file:org.apache.flink.streaming.connectors.fs.bucketing.BucketingSinkFaultToleranceITCase.java

License:Apache License

@BeforeClass
public static void createHDFS() throws IOException {
    Configuration conf = new Configuration();

    File dataDir = tempFolder.newFolder();

    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    hdfsCluster = builder.build();/*  ww w  .j  av  a  2  s  .c o m*/

    dfs = hdfsCluster.getFileSystem();

    outPath = "hdfs://"
            + NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
            + "/string-non-rolling-out";
}

From source file:org.apache.flink.streaming.connectors.fs.bucketing.BucketingSinkTest.java

License:Apache License

@BeforeClass
public static void createHDFS() throws IOException {
    Configuration conf = new Configuration();

    File dataDir = tempFolder.newFolder();

    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    hdfsCluster = builder.build();//from www .  j a va 2  s . c  o  m

    dfs = hdfsCluster.getFileSystem();

    hdfsURI = "hdfs://"
            + NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
            + "/";
}

From source file:org.apache.flink.streaming.connectors.fs.RollingSinkSecuredITCase.java

License:Apache License

@BeforeClass
public static void startSecureCluster() throws Exception {

    skipIfHadoopVersionIsNotAppropriate();

    LOG.info("starting secure cluster environment for testing");

    dataDir = tempFolder.newFolder();//from w ww. ja va2s .c  o m

    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dataDir.getAbsolutePath());

    SecureTestEnvironment.prepare(tempFolder);

    populateSecureConfigurations();

    Configuration flinkConfig = new Configuration();
    flinkConfig.setString(SecurityOptions.KERBEROS_LOGIN_KEYTAB, SecureTestEnvironment.getTestKeytab());
    flinkConfig.setString(SecurityOptions.KERBEROS_LOGIN_PRINCIPAL,
            SecureTestEnvironment.getHadoopServicePrincipal());

    SecurityUtils.SecurityConfiguration ctx = new SecurityUtils.SecurityConfiguration(flinkConfig, conf);
    try {
        TestingSecurityContext.install(ctx, SecureTestEnvironment.getClientSecurityConfigurationMap());
    } catch (Exception e) {
        throw new RuntimeException("Exception occurred while setting up secure test context. Reason: {}", e);
    }

    File hdfsSiteXML = new File(dataDir.getAbsolutePath() + "/hdfs-site.xml");

    FileWriter writer = new FileWriter(hdfsSiteXML);
    conf.writeXml(writer);
    writer.flush();
    writer.close();

    Map<String, String> map = new HashMap<String, String>(System.getenv());
    map.put("HADOOP_CONF_DIR", hdfsSiteXML.getParentFile().getAbsolutePath());
    TestBaseUtils.setEnv(map);

    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    builder.checkDataNodeAddrConfig(true);
    builder.checkDataNodeHostConfig(true);
    hdfsCluster = builder.build();

    dfs = hdfsCluster.getFileSystem();

    hdfsURI = "hdfs://"
            + NetUtils.hostAndPortToUrlString(hdfsCluster.getURI().getHost(), hdfsCluster.getNameNodePort())
            + "/";

    startSecureFlinkClusterWithRecoveryModeEnabled();
}

From source file:org.apache.flink.tachyon.FileStateHandleTest.java

License:Apache License

@Before
public void createHDFS() {
    try {/*from  w w  w .  j  a v  a 2  s  . c o m*/
        Configuration hdConf = new Configuration();

        File baseDir = new File("./target/hdfs/filestatehandletest").getAbsoluteFile();
        FileUtil.fullyDelete(baseDir);
        hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
        MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
        hdfsCluster = builder.build();

        hdfsURI = "hdfs://" + hdfsCluster.getURI().getHost() + ":" + hdfsCluster.getNameNodePort() + "/";

        hdPath = new org.apache.hadoop.fs.Path("/StateHandleTest");
        hdfs = hdPath.getFileSystem(hdConf);
        hdfs.mkdirs(hdPath);

    } catch (Throwable e) {
        e.printStackTrace();
        Assert.fail("Test failed " + e.getMessage());
    }
}