Example usage for org.apache.hadoop.hdfs MiniDFSCluster MiniDFSCluster

List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster MiniDFSCluster

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs MiniDFSCluster MiniDFSCluster.

Prototype

@Deprecated 
public MiniDFSCluster(Configuration conf, int numDataNodes, boolean format, String[] racks) throws IOException 

Source Link

Document

Modify the config and start up the servers.

Usage

From source file:org.apache.sentry.binding.solr.HdfsTestUtil.java

License:Apache License

public static MiniDFSCluster setupClass(String dataDir) throws Exception {
    File dir = new File(dataDir);
    new File(dataDir).mkdirs();

    savedLocale = Locale.getDefault();
    // TODO: we HACK around HADOOP-9643
    Locale.setDefault(Locale.ENGLISH);

    int dataNodes = 2;

    Configuration conf = new Configuration();
    conf.set("dfs.block.access.token.enable", "false");
    conf.set("dfs.permissions.enabled", "false");
    conf.set("hadoop.security.authentication", "simple");
    conf.set("hdfs.minidfs.basedir", dir.getAbsolutePath() + File.separator + "hdfsBaseDir");
    conf.set("dfs.namenode.name.dir", dir.getAbsolutePath() + File.separator + "nameNodeNameDir");

    System.setProperty("test.build.data",
            dir.getAbsolutePath() + File.separator + "hdfs" + File.separator + "build");
    System.setProperty("test.cache.data",
            dir.getAbsolutePath() + File.separator + "hdfs" + File.separator + "cache");
    System.setProperty("solr.lock.type", "hdfs");

    MiniDFSCluster dfsCluster = new MiniDFSCluster(conf, dataNodes, true, null);

    return dfsCluster;
}

From source file:org.apache.sentry.tests.e2e.solr.HdfsTestUtil.java

License:Apache License

public static MiniDFSCluster setupClass(String dataDir) throws Exception {
    LuceneTestCase.assumeFalse("HDFS tests were disabled by -Dtests.disableHdfs",
            Boolean.parseBoolean(System.getProperty("tests.disableHdfs", "false")));
    File dir = new File(dataDir);
    new File(dataDir).mkdirs();

    savedLocale = Locale.getDefault();
    // TODO: we HACK around HADOOP-9643
    Locale.setDefault(Locale.ENGLISH);

    int dataNodes = 2;

    Configuration conf = new Configuration();
    conf.set("dfs.block.access.token.enable", "false");
    conf.set("dfs.permissions.enabled", "false");
    conf.set("hadoop.security.authentication", "simple");
    conf.set("hdfs.minidfs.basedir", dir.getAbsolutePath() + File.separator + "hdfsBaseDir");
    conf.set("dfs.namenode.name.dir", dir.getAbsolutePath() + File.separator + "nameNodeNameDir");

    System.setProperty("test.build.data",
            dir.getAbsolutePath() + File.separator + "hdfs" + File.separator + "build");
    System.setProperty("test.cache.data",
            dir.getAbsolutePath() + File.separator + "hdfs" + File.separator + "cache");
    System.setProperty("solr.lock.type", "hdfs");

    MiniDFSCluster dfsCluster = new MiniDFSCluster(conf, dataNodes, true, null);

    SolrTestCaseJ4.useFactory("org.apache.solr.core.HdfsDirectoryFactory");

    return dfsCluster;
}

From source file:org.apache.solr.hadoop.MorphlineBasicMiniMRTest.java

License:Apache License

@BeforeClass
public static void setupClass() throws Exception {
    if (System.getProperty("hadoop.log.dir") == null) {
        System.setProperty("hadoop.log.dir", "target");
    }//www .  ja va  2s.com
    int taskTrackers = 2;
    int dataNodes = 2;
    //    String proxyUser = System.getProperty("user.name");
    //    String proxyGroup = "g";
    //    StringBuilder sb = new StringBuilder();
    //    sb.append("127.0.0.1,localhost");
    //    for (InetAddress i : InetAddress.getAllByName(InetAddress.getLocalHost().getHostName())) {
    //      sb.append(",").append(i.getCanonicalHostName());
    //    }

    System.setProperty("solr.hdfs.blockcache.enabled", "false");

    JobConf conf = new JobConf();
    conf.set("dfs.block.access.token.enable", "false");
    conf.set("dfs.permissions", "true");
    conf.set("hadoop.security.authentication", "simple");

    dfsCluster = new MiniDFSCluster(conf, dataNodes, true, null);
    FileSystem fileSystem = dfsCluster.getFileSystem();
    fileSystem.mkdirs(new Path("/tmp"));
    fileSystem.mkdirs(new Path("/user"));
    fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
    fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
    fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
    fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
    String nnURI = fileSystem.getUri().toString();
    int numDirs = 1;
    String[] racks = null;
    String[] hosts = null;

    mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf);
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}

From source file:org.apache.solr.hadoop.MorphlineGoLiveMiniMRTest.java

License:Apache License

@BeforeClass
public static void setupClass() throws Exception {
    //    if (isYarn()) {
    //      org.junit.Assume.assumeTrue(false); // ignore test on Yarn until CDH-10420 is fixed
    //    }/*from  w  w  w  . j ava 2s  .c  o m*/
    if (System.getProperty("hadoop.log.dir") == null) {
        System.setProperty("hadoop.log.dir", "target");
    }
    int taskTrackers = 2;
    int dataNodes = 2;

    System.setProperty("solr.hdfs.blockcache.enabled", "false");

    JobConf conf = new JobConf();
    conf.set("dfs.block.access.token.enable", "false");
    conf.set("dfs.permissions", "true");
    conf.set("hadoop.security.authentication", "simple");

    createTempDir();
    System.setProperty("test.build.data", dataDir + File.separator + "hdfs" + File.separator + "build");
    System.setProperty("test.cache.data", dataDir + File.separator + "hdfs" + File.separator + "cache");

    dfsCluster = new MiniDFSCluster(conf, dataNodes, true, null);
    FileSystem fileSystem = dfsCluster.getFileSystem();
    fileSystem.mkdirs(new Path("/tmp"));
    fileSystem.mkdirs(new Path("/user"));
    fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
    fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
    fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
    fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
    String nnURI = fileSystem.getUri().toString();
    int numDirs = 1;
    String[] racks = null;
    String[] hosts = null;

    mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf);
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}

From source file:org.apache.sqoop.test.hadoop.HadoopMiniClusterRunner.java

License:Apache License

@SuppressWarnings("deprecation")
@Override//from  ww  w  . j a va 2s. c  o m
public void start() throws Exception {
    System.setProperty("test.build.data", getDataDir());
    LOG.info("test.build.data set to: " + getDataDir());

    System.setProperty("hadoop.log.dir", getLogDir());
    LOG.info("log dir set to: " + getLogDir());

    // Start DFS server
    LOG.info("Starting DFS cluster...");
    dfsCluster = new MiniDFSCluster(config, 1, true, null);
    if (dfsCluster.isClusterUp()) {
        LOG.info("Started DFS cluster on port: " + dfsCluster.getNameNodePort());
    } else {
        LOG.error("Could not start DFS cluster");
    }

    // Start MR server
    LOG.info("Starting MR cluster");
    mrCluster = new MiniMRCluster(0, 0, 1, dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null,
            new JobConf(config));
    LOG.info("Started MR cluster");
    config = prepareConfiguration(mrCluster.createJobConf());
}

From source file:org.datanucleus.test.BaseTest.java

License:Open Source License

@BeforeClass
public static void beforeClass() throws Exception {
    conf = new HBaseConfiguration();
    dfsCluster = new MiniDFSCluster(conf, 2, true, (String[]) null);

    // mangle the conf so that the fs parameter points to the minidfs we
    // just started up
    final FileSystem filesystem = dfsCluster.getFileSystem();
    conf.set("fs.default.name", filesystem.getUri().toString());
    final Path parentdir = filesystem.getHomeDirectory();
    conf.set(HConstants.HBASE_DIR, parentdir.toString());
    filesystem.mkdirs(parentdir);/*from   w w  w .j  a va 2  s . co  m*/
    FSUtils.setVersion(filesystem, parentdir);

    preHBaseClusterSetup();
    hBaseClusterSetup();
    postHBaseClusterSetup();

    pm.set(PMF.get().getPersistenceManager());

}

From source file:org.elasticsearch.plugin.hadoop.hdfs.MiniHDFSCluster.java

License:Apache License

@SuppressForbidden(reason = "Hadoop is messy")
public static void main(String[] args) throws Exception {
    FileUtil.fullyDelete(new File(System.getProperty("test.build.data", "build/test/data"), "dfs/"));
    // MiniHadoopClusterManager.main(new String[] { "-nomr" });
    Configuration cfg = new Configuration();
    cfg.set(DataNode.DATA_DIR_PERMISSION_KEY, "666");
    cfg.set("dfs.replication", "0");
    MiniDFSCluster dfsCluster = new MiniDFSCluster(cfg, 1, true, null);
    FileSystem fs = dfsCluster.getFileSystem();
    System.out.println(fs.getClass());
    System.out.println(fs.getUri());
    System.out.println(dfsCluster.getHftpFileSystem().getClass());

    // dfsCluster.shutdown();
}

From source file:org.goldenorb.io.checkpoint.CheckPointDataTest.java

License:Apache License

/**
 * Set up the MiniDFSCluster.// ww w  .ja v a 2 s .  com
 */
@BeforeClass
public static void setUpCluster() throws Exception {
    Configuration conf = new Configuration(true);
    cluster = new MiniDFSCluster(conf, 1, true, null);
    fs = cluster.getFileSystem();
}

From source file:org.goldenorb.io.TestInputSplitAllocatorDFS.java

License:Apache License

@BeforeClass
public static void setUpCluster() throws Exception {
    Configuration conf = new Configuration(true);
    conf.set("dfs.block.size", "16384");
    cluster = new MiniDFSCluster(conf, 3, true, null);
    fs = cluster.getFileSystem();/*w  w w  . j  a  v  a2s.c  om*/
}

From source file:org.goldenorb.OrbRunnerTest.java

License:Apache License

@BeforeClass
public static void setUpCluster() throws Exception {
    orbConf.setOrbClusterName("TestOrbCluster");
    orbConf.setOrbZooKeeperQuorum("localhost:21810");
    cluster = new MiniDFSCluster(orbConf, 3, true, null);
    orbConf.set("fs.default.name", "hdfs://localhost:" + cluster.getNameNodePort());
}