Example usage for org.apache.hadoop.hdfs MiniDFSCluster.Builder numDataNodes

List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster.Builder numDataNodes

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs MiniDFSCluster.Builder numDataNodes.

Prototype

int numDataNodes

To view the source code for org.apache.hadoop.hdfs MiniDFSCluster.Builder numDataNodes.

Click Source Link

Usage

From source file:edu.uci.ics.asterix.test.runtime.HDFSCluster.java

License:Apache License

/**
 * Instantiates the (Mini) DFS Cluster with the configured number of datanodes.
 * Post instantiation, data is laoded to HDFS.
 * Called prior to running the Runtime test suite.
 *//*from www .j ava2s . c  om*/
public void setup() throws Exception {
    conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/core-site.xml"));
    conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/mapred-site.xml"));
    conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/hdfs-site.xml"));
    cleanupLocal();
    //this constructor is deprecated in hadoop 2x 
    //dfsCluster = new MiniDFSCluster(nameNodePort, conf, numDataNodes, true, true, StartupOption.REGULAR, null);
    MiniDFSCluster.Builder build = new MiniDFSCluster.Builder(conf);
    build.nameNodePort(nameNodePort);
    build.numDataNodes(numDataNodes);
    build.startupOption(StartupOption.REGULAR);
    dfsCluster = build.build();
    dfs = FileSystem.get(conf);
    loadData();
}

From source file:edu.uci.ics.hyracks.hdfs.MiniDFSClusterFactory.java

License:Apache License

public MiniDFSCluster getMiniDFSCluster(Configuration conf, int numberOfNC) throws HyracksDataException {
    try {/*from   w w  w. j  av a 2 s.co m*/
        MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
        builder.numDataNodes(numberOfNC);
        MiniDFSCluster dfsCluster = builder.build();
        return dfsCluster;
    } catch (Exception e) {
        throw new HyracksDataException(e);
    }
}

From source file:org.apache.asterix.test.runtime.HDFSCluster.java

License:Apache License

public void setup(String basePath) throws Exception {
    conf.addResource(new Path(basePath + PATH_TO_HADOOP_CONF + "/core-site.xml"));
    conf.addResource(new Path(basePath + PATH_TO_HADOOP_CONF + "/mapred-site.xml"));
    conf.addResource(new Path(basePath + PATH_TO_HADOOP_CONF + "/hdfs-site.xml"));
    cleanupLocal();/*w w w.j a  v  a2  s.  c  om*/
    setLoggingLevel(Level.WARN);
    MiniDFSCluster.Builder build = new MiniDFSCluster.Builder(conf);
    build.nameNodePort(nameNodePort);
    build.numDataNodes(numDataNodes);
    build.startupOption(StartupOption.REGULAR);
    dfsCluster = build.build();
    dfs = FileSystem.get(conf);
    loadData(basePath);
}

From source file:org.apache.tajo.storage.raw.TestDirectRawFile.java

License:Apache License

@BeforeClass
public static void setUpClass() throws IOException, InterruptedException {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, false);

    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(new HdfsConfiguration(conf));
    builder.numDataNodes(1);
    builder.format(true);/*from   w  ww  .  j a  va  2s. co  m*/
    builder.manageNameDfsDirs(true);
    builder.manageDataDfsDirs(true);
    builder.waitSafeMode(true);
    cluster = builder.build();

    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    localFs = FileSystem.getLocal(new TajoConf());
}

From source file:org.apache.tajo.TajoTestingCluster.java

License:Apache License

/**
 * Start a minidfscluster.//from   www . java2s.  c o m
 * Can only create one.
 * @param servers How many DNs to start.
 * @param dir Where to home your dfs cluster.
 * @param hosts hostnames DNs to run on.
 * @throws Exception
 * @see {@link #shutdownMiniDFSCluster()}
 * @return The mini dfs cluster created.
 * @throws java.io.IOException
 */
public MiniDFSCluster startMiniDFSCluster(int servers, File dir, final String hosts[]) throws IOException {

    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dir.toString());
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, false);
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(new HdfsConfiguration(conf));
    builder.hosts(hosts);
    builder.numDataNodes(servers);
    builder.format(true);
    builder.manageNameDfsDirs(true);
    builder.manageDataDfsDirs(true);
    builder.waitSafeMode(true);
    this.dfsCluster = builder.build();

    // Set this just-started cluser as our filesystem.
    this.defaultFS = this.dfsCluster.getFileSystem();
    this.conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, defaultFS.getUri().toString());
    this.conf.setVar(TajoConf.ConfVars.ROOT_DIR, defaultFS.getUri() + "/tajo");
    isDFSRunning = true;
    return this.dfsCluster;
}

From source file:org.apache.vxquery.xtest.MiniDFS.java

License:Apache License

public void startHDFS() throws IOException {

    FileSystem lfs = FileSystem.getLocal(new Configuration());
    JobConf conf = new JobConf();
    String PATH_TO_HADOOP_CONF = "src/test/resources/hadoop/conf";
    Path hdfs_conf = new Path(PATH_TO_HADOOP_CONF);
    if (!lfs.exists(hdfs_conf)) {
        PATH_TO_HADOOP_CONF = "vxquery-xtest/src/test/resources/hadoop/conf";
        hdfs_conf = new Path(PATH_TO_HADOOP_CONF);
        if (!lfs.exists(hdfs_conf)) {
            PATH_TO_HADOOP_CONF = "../vxquery-xtest/src/test/resources/hadoop/conf";
            hdfs_conf = new Path(PATH_TO_HADOOP_CONF);
        }//w w  w  .j  a va2 s.com
    }
    conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/core-site.xml"));
    conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/mapred-site.xml"));
    conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/hdfs-site.xml"));
    int numDataNodes = 1;
    int nameNodePort = 40000;

    // cleanup artifacts created on the local file system
    lfs.delete(new Path("build"), true);
    System.setProperty("hadoop.log.dir", "logs");
    MiniDFSCluster.Builder build = new MiniDFSCluster.Builder(conf);
    build.nameNodePort(nameNodePort);
    build.nameNodeHttpPort(nameNodePort + 34);
    build.numDataNodes(numDataNodes);
    build.checkExitOnShutdown(true);
    build.startupOption(StartupOption.REGULAR);
    build.format(true);
    build.waitSafeMode(true);
    dfsCluster = build.build();

    FileSystem dfs = FileSystem.get(conf);
    String DATA_PATH = "src/test/resources/TestSources/ghcnd";
    Path src = new Path(DATA_PATH);
    if (!lfs.exists(src)) {
        DATA_PATH = "vxquery-xtest/src/test/resources/TestSources/ghcnd";
        src = new Path(DATA_PATH);
        if (!lfs.exists(src)) {
            DATA_PATH = "../vxquery-xtest/src/test/resources/TestSources/ghcnd";
            src = new Path(DATA_PATH);
        }
    }
    dfs.mkdirs(new Path("/tmp"));
    Path dest = new Path("/tmp/vxquery-hdfs-test");
    dfs.copyFromLocalFile(src, dest);
    if (dfs.exists(dest)) {
        System.err.println("Test files copied to HDFS successfully");
    }
}

From source file:org.zuinnote.hadoop.office.example.MapReduceExcelInputIntegrationTest.java

License:Apache License

@BeforeAll
public static void oneTimeSetUp() throws IOException {
    // Create temporary directory for HDFS base and shutdownhook 
    // create temp directory
    tmpPath = Files.createTempDirectory(tmpPrefix);
    // create shutdown hook to remove temp files (=HDFS MiniCluster) after shutdown, may need to rethink to avoid many threads are created
    Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
        @Override//from  w w w  . j  a v a 2 s  . co  m
        public void run() {
            try {
                Files.walkFileTree(tmpPath, new SimpleFileVisitor<java.nio.file.Path>() {

                    @Override
                    public FileVisitResult visitFile(java.nio.file.Path file, BasicFileAttributes attrs)
                            throws IOException {
                        Files.delete(file);
                        return FileVisitResult.CONTINUE;
                    }

                    @Override
                    public FileVisitResult postVisitDirectory(java.nio.file.Path dir, IOException e)
                            throws IOException {
                        if (e == null) {
                            Files.delete(dir);
                            return FileVisitResult.CONTINUE;
                        }
                        throw e;
                    }
                });
            } catch (IOException e) {
                throw new RuntimeException(
                        "Error temporary files in following path could not be deleted " + tmpPath, e);
            }
        }
    }));
    // Create Configuration
    Configuration conf = new Configuration();
    // create HDFS cluster
    File baseDir = new File(tmpPath.toString()).getAbsoluteFile();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    dfsCluster = builder.numDataNodes(NOOFDATANODES).build();
    // create Yarn cluster
    YarnConfiguration clusterConf = new YarnConfiguration(conf);
    conf.set("fs.defaultFS", dfsCluster.getFileSystem().getUri().toString());
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64);
    conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
    miniCluster = new MiniMRYarnCluster(CLUSTERNAME, NOOFNODEMANAGERS, STARTTIMELINESERVER);
    miniCluster.init(conf);
    miniCluster.start();
}