Example usage for org.apache.hadoop.hdfs MiniDFSCluster MiniDFSCluster

List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster MiniDFSCluster

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs MiniDFSCluster MiniDFSCluster.

Prototype

@Deprecated 
public MiniDFSCluster(Configuration conf, int numDataNodes, boolean format, String[] racks) throws IOException 

Source Link

Document

Modify the config and start up the servers.

Usage

From source file:org.goldenorb.TestJobManager.java

License:Apache License

@BeforeClass
public static void setUp() throws IOException {
    OrbConfiguration orbConf = new OrbConfiguration(true);
    cluster = new MiniDFSCluster(orbConf, 3, true, null);
}

From source file:org.hadoop.tdg.TestPseudoHadoop.java

License:Apache License

@Before
public void setUp() throws IOException {
    Configuration configuration = new Configuration();
    if (System.getProperty("test.build.DATA") == null) {
        System.setProperty("test.build.DATA", "/tmp");
    }/*www  .j a  va  2 s .com*/
    cluster = new MiniDFSCluster(configuration, 1, true, null);
    fs = cluster.getFileSystem();
    copyFileWithProgress();
}

From source file:org.kitesdk.data.MiniDFSTest.java

License:Apache License

@BeforeClass
@SuppressWarnings("deprecation")
public static void setupFS() throws IOException {
    if (cluster == null) {
        Configuration c = new Configuration();
        c.setBoolean("dfs.webhdfs.enabled", true);
        // if this fails with "The directory is already locked" set umask to 0022
        cluster = new MiniDFSCluster(c, 1, true, null);
        //cluster = new MiniDFSCluster.Builder(new Configuration()).build();
        dfs = getFS.invoke(cluster);/*from   w  ww  . ja  v a  2 s.com*/
        conf = new Configuration(dfs.getConf());
        lfs = FileSystem.getLocal(conf);
    }
}

From source file:org.kitesdk.data.oozie.MiniDFSTest.java

License:Apache License

@BeforeClass
@SuppressWarnings("deprecation")
public static void setupFS() throws IOException {
    if (cluster == null) {
        Configuration c = new Configuration();
        c.setBoolean("dfs.webhdfs.enabled", true);

        String user = System.getProperty("user.name");
        c.set("hadoop.proxyuser." + user + ".hosts", "*");
        c.set("hadoop.proxyuser." + user + ".groups", "*");

        // if this fails with "The directory is already locked" set umask to 0022
        cluster = new MiniDFSCluster(c, 1, true, null);
        //cluster = new MiniDFSCluster.Builder(new Configuration()).build();
        dfs = getFS.invoke(cluster);//from w  w  w. j a  va  2 s .c  o m
        conf = new Configuration(dfs.getConf());
        lfs = FileSystem.getLocal(conf);
    }
}

From source file:org.kitesdk.morphline.hadoop.core.MiniDFSTest.java

License:Apache License

@BeforeClass
public static void setupFS() throws IOException {
    final Configuration conf = new Configuration();
    cluster = new MiniDFSCluster(new Configuration(), 1, true, null);
    // Builder is not compatible with hadoop1
    //cluster = new MiniDFSCluster.Builder(conf).build();
    dfs = getFS.invoke(cluster);//from   w  w w. j  a v a2  s  . c o m
    lfs = FileSystem.getLocal(conf);
}

From source file:org.kitesdk.morphline.hadoop.rcfile.ReadRCFileTest.java

License:Apache License

@BeforeClass
public static void setupFS() throws IOException {
    final Configuration conf = new Configuration();
    cluster = new MiniDFSCluster(new Configuration(), 1, true, null);
    // Builder is not compatible with hadoop1
    //cluster = new MiniDFSCluster.Builder(conf).build();
    dfs = getFS.invoke(cluster);//from  ww  w .j a v a2s . c o  m
}

From source file:org.pentaho.hdfs.vfs.test.HDFSVFSTest.java

License:Open Source License

@BeforeClass
public static void beforeClass() throws IOException {
    fsManager = VFS.getManager();/*from   ww w  .j  a v a2  s .  co m*/
    final Configuration dfsConf = new Configuration();
    dfsConf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".users", "users");
    dfsConf.set("hadoop.proxyuser.users.ip-addresses", "localhost");
    dfsConf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".ip-addresses", "localhost");
    cluster = new MiniDFSCluster(dfsConf, 2, true, null);
    cluster.waitActive();

    FileSystem hdfs = cluster.getFileSystem();
    HDFSFileSystem.setMockHDFSFileSystem(hdfs);
}

From source file:org.springframework.data.hadoop.test.support.StandaloneHadoopCluster.java

License:Apache License

@SuppressWarnings("deprecation")
@Override//from   w  ww .j av  a 2 s  . c o m
public void start() throws IOException {
    log.info("Checking if cluster=" + clusterName + " needs to be started");
    synchronized (this.startupShutdownMonitor) {
        if (started) {
            return;
        }

        // TODO: fix for MAPREDUCE-2785
        // I don't like it at all to do it like this, but
        // until we find a better way for the fix,
        // just set the system property
        // who knows what kind of problems this will cause!!!
        // keeping this here as reminder for the next guy who
        // clean up the mess
        String tmpDir = getTmpDir();
        System.setProperty("hadoop.log.dir", tmpDir);

        // need to get unique dir per cluster
        System.setProperty("test.build.data", "build/test/data/" + clusterName);

        log.info("Starting cluster=" + clusterName);

        Configuration config = new JobConf();

        // umask trick
        String umask = getCurrentUmask(tmpDir, config);
        if (umask != null) {
            log.info("Setting expected umask to " + umask);
            config.set("dfs.datanode.data.dir.perm", umask);
        }

        // dfs cluster is updating config
        // newer dfs cluster are using builder pattern
        // but we need to support older versions in
        // a same code. there are some ramifications if
        // deprecated methods are removed in a future
        dfsCluster = new MiniDFSCluster(config, nodes, true, null);

        // we need to ask the config from mr cluster after init
        // returns. for this case it is not guaranteed that passed config
        // is updated.
        // we do all this via compatibility class which uses
        // reflection because at this point we don't know
        // the exact runtime implementation

        FileSystem fs = dfsCluster.getFileSystem();
        log.info("Dfs cluster uri= " + fs.getUri());

        mrClusterObject = MiniMRClusterCompat.instantiateCluster(this.getClass(), nodes, config, fs,
                this.getClass().getClassLoader());

        configuration = MiniMRClusterCompat.getConfiguration(mrClusterObject);

        // set default uri again in case it wasn't updated
        FileSystem.setDefaultUri(configuration, fs.getUri());

        log.info("Started cluster=" + clusterName);
        started = true;
    }
}

From source file:pathmerge.linear.MergePathH1Test.java

License:Apache License

private void startHadoop() throws IOException {
    FileSystem lfs = FileSystem.getLocal(new Configuration());
    lfs.delete(new Path("build"), true);
    System.setProperty("hadoop.log.dir", "logs");
    dfsCluster = new MiniDFSCluster(conf, 2, true, null);
    dfs = dfsCluster.getFileSystem();/* w  ww .j  a  va2s  .co  m*/
    mrCluster = new MiniMRCluster(4, dfs.getUri().toString(), 2);

    Path src = new Path(DATA_PATH);
    Path dest = new Path(HDFS_PATH + "/");
    dfs.mkdirs(dest);
    dfs.copyFromLocalFile(src, dest);
    Path data = new Path(HDFS_PATH_MERGED + "/");
    dfs.mkdirs(data);

    DataOutputStream confOutput = new DataOutputStream(new FileOutputStream(new File(HADOOP_CONF_PATH)));
    conf.writeXml(confOutput);
    confOutput.flush();
    confOutput.close();
}

From source file:pathmerge.log.MergePathH2Test.java

License:Apache License

private void startHadoop() throws IOException {
    FileSystem lfs = FileSystem.getLocal(new Configuration());
    lfs.delete(new Path("build"), true);
    System.setProperty("hadoop.log.dir", "logs");
    dfsCluster = new MiniDFSCluster(conf, 2, true, null);
    dfs = dfsCluster.getFileSystem();/* w w w. ja v a 2s  .c  om*/
    mrCluster = new MiniMRCluster(4, dfs.getUri().toString(), 2);

    Path src = new Path(DATA_PATH);
    Path dest = new Path(HDFS_PATH + "/");
    dfs.mkdirs(dest);
    dfs.copyFromLocalFile(src, dest);
    Path data = new Path(HDFA_PATH_DATA + "/");
    dfs.mkdirs(data);

    DataOutputStream confOutput = new DataOutputStream(new FileOutputStream(new File(HADOOP_CONF_PATH)));
    conf.writeXml(confOutput);
    confOutput.flush();
    confOutput.close();
}