Example usage for org.apache.hadoop.hdfs MiniDFSCluster.Builder waitSafeMode

List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster.Builder waitSafeMode

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs MiniDFSCluster.Builder waitSafeMode.

Prototype

boolean waitSafeMode

To view the source code for org.apache.hadoop.hdfs MiniDFSCluster.Builder waitSafeMode.

Click Source Link

Usage

From source file:org.apache.tajo.storage.raw.TestDirectRawFile.java

License:Apache License

@BeforeClass
public static void setUpClass() throws IOException, InterruptedException {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, false);

    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(new HdfsConfiguration(conf));
    builder.numDataNodes(1);// w w w .j  a  v  a2  s .  co  m
    builder.format(true);
    builder.manageNameDfsDirs(true);
    builder.manageDataDfsDirs(true);
    builder.waitSafeMode(true);
    cluster = builder.build();

    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    localFs = FileSystem.getLocal(new TajoConf());
}

From source file:org.apache.tajo.TajoTestingCluster.java

License:Apache License

/**
 * Start a minidfscluster./* w w w. ja va 2 s .co  m*/
 * Can only create one.
 * @param servers How many DNs to start.
 * @param dir Where to home your dfs cluster.
 * @param hosts hostnames DNs to run on.
 * @throws Exception
 * @see {@link #shutdownMiniDFSCluster()}
 * @return The mini dfs cluster created.
 * @throws java.io.IOException
 */
public MiniDFSCluster startMiniDFSCluster(int servers, File dir, final String hosts[]) throws IOException {

    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dir.toString());
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, false);
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(new HdfsConfiguration(conf));
    builder.hosts(hosts);
    builder.numDataNodes(servers);
    builder.format(true);
    builder.manageNameDfsDirs(true);
    builder.manageDataDfsDirs(true);
    builder.waitSafeMode(true);
    this.dfsCluster = builder.build();

    // Set this just-started cluser as our filesystem.
    this.defaultFS = this.dfsCluster.getFileSystem();
    this.conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, defaultFS.getUri().toString());
    this.conf.setVar(TajoConf.ConfVars.ROOT_DIR, defaultFS.getUri() + "/tajo");
    isDFSRunning = true;
    return this.dfsCluster;
}

From source file:org.apache.vxquery.xtest.MiniDFS.java

License:Apache License

public void startHDFS() throws IOException {

    FileSystem lfs = FileSystem.getLocal(new Configuration());
    JobConf conf = new JobConf();
    String PATH_TO_HADOOP_CONF = "src/test/resources/hadoop/conf";
    Path hdfs_conf = new Path(PATH_TO_HADOOP_CONF);
    if (!lfs.exists(hdfs_conf)) {
        PATH_TO_HADOOP_CONF = "vxquery-xtest/src/test/resources/hadoop/conf";
        hdfs_conf = new Path(PATH_TO_HADOOP_CONF);
        if (!lfs.exists(hdfs_conf)) {
            PATH_TO_HADOOP_CONF = "../vxquery-xtest/src/test/resources/hadoop/conf";
            hdfs_conf = new Path(PATH_TO_HADOOP_CONF);
        }/*from   w w  w.j a v  a  2s  .c  om*/
    }
    conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/core-site.xml"));
    conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/mapred-site.xml"));
    conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/hdfs-site.xml"));
    int numDataNodes = 1;
    int nameNodePort = 40000;

    // cleanup artifacts created on the local file system
    lfs.delete(new Path("build"), true);
    System.setProperty("hadoop.log.dir", "logs");
    MiniDFSCluster.Builder build = new MiniDFSCluster.Builder(conf);
    build.nameNodePort(nameNodePort);
    build.nameNodeHttpPort(nameNodePort + 34);
    build.numDataNodes(numDataNodes);
    build.checkExitOnShutdown(true);
    build.startupOption(StartupOption.REGULAR);
    build.format(true);
    build.waitSafeMode(true);
    dfsCluster = build.build();

    FileSystem dfs = FileSystem.get(conf);
    String DATA_PATH = "src/test/resources/TestSources/ghcnd";
    Path src = new Path(DATA_PATH);
    if (!lfs.exists(src)) {
        DATA_PATH = "vxquery-xtest/src/test/resources/TestSources/ghcnd";
        src = new Path(DATA_PATH);
        if (!lfs.exists(src)) {
            DATA_PATH = "../vxquery-xtest/src/test/resources/TestSources/ghcnd";
            src = new Path(DATA_PATH);
        }
    }
    dfs.mkdirs(new Path("/tmp"));
    Path dest = new Path("/tmp/vxquery-hdfs-test");
    dfs.copyFromLocalFile(src, dest);
    if (dfs.exists(dest)) {
        System.err.println("Test files copied to HDFS successfully");
    }
}