Example usage for org.apache.hadoop.hdfs MiniDFSCluster MiniDFSCluster

List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster MiniDFSCluster

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs MiniDFSCluster MiniDFSCluster.

Prototype

@Deprecated 
public MiniDFSCluster(Configuration conf, int numDataNodes, boolean format, String[] racks) throws IOException 

Source Link

Document

Modify the config and start up the servers.

Usage

From source file:org.apache.ivory.converter.OozieFeedMapperTest.java

License:Apache License

@BeforeClass
public void setUpDFS() throws Exception {
    Configuration conf = new Configuration();
    System.setProperty("test.build.data", "target/" + "cluster1" + "/data");
    srcMiniDFS = new MiniDFSCluster(conf, 1, true, null);
    String srcHdfsUrl = conf.get("fs.default.name");

    System.setProperty("test.build.data", "target/" + "cluster2" + "/data");
    conf = new Configuration();
    trgMiniDFS = new MiniDFSCluster(conf, 1, true, null);
    String trgHdfsUrl = conf.get("fs.default.name");

    cleanupStore();//from   w w  w . j a v  a 2  s  . c  o  m

    srcCluster = (Cluster) storeEntity(EntityType.CLUSTER, SRC_CLUSTER_PATH);
    ClusterHelper.getInterface(srcCluster, Interfacetype.WRITE).setEndpoint(srcHdfsUrl);

    trgCluster = (Cluster) storeEntity(EntityType.CLUSTER, TRG_CLUSTER_PATH);
    ClusterHelper.getInterface(trgCluster, Interfacetype.WRITE).setEndpoint(trgHdfsUrl);

    feed = (Feed) storeEntity(EntityType.FEED, FEED);

}

From source file:org.apache.ivory.converter.OozieProcessMapperLateProcessTest.java

License:Apache License

@BeforeClass
public void setUpDFS() throws Exception {

    cleanupStore();/*w  ww. j  a  v  a 2 s .c o m*/

    conf = new Configuration();
    dfsCluster = new MiniDFSCluster(conf, 1, true, null);
    hdfsUrl = conf.get("fs.default.name");

    Cluster cluster = (Cluster) EntityType.CLUSTER.getUnmarshaller()
            .unmarshal(this.getClass().getResource(CLUSTER_XML));
    ClusterHelper.getInterface(cluster, Interfacetype.WRITE).setEndpoint(hdfsUrl);

    store.publish(EntityType.CLUSTER, cluster);

    Feed feed1 = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(this.getClass().getResource(FEED1_XML));
    Feed feed2 = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(this.getClass().getResource(FEED2_XML));
    Feed feed3 = (Feed) EntityType.FEED.getUnmarshaller().unmarshal(this.getClass().getResource(FEED3_XML));

    store.publish(EntityType.FEED, feed1);
    store.publish(EntityType.FEED, feed2);
    store.publish(EntityType.FEED, feed3);

    Process process1 = (Process) EntityType.PROCESS.getUnmarshaller()
            .unmarshal(this.getClass().getResource(PROCESS1_XML));
    store.publish(EntityType.PROCESS, process1);
    Process process2 = (Process) EntityType.PROCESS.getUnmarshaller()
            .unmarshal(this.getClass().getResource(PROCESS2_XML));
    store.publish(EntityType.PROCESS, process2);

}

From source file:org.apache.ivory.converter.OozieProcessMapperTest.java

License:Apache License

@BeforeClass
public void setUpDFS() throws Exception {
    Configuration conf = new Configuration();
    new MiniDFSCluster(conf, 1, true, null);
    hdfsUrl = conf.get("fs.default.name");
}

From source file:org.apache.ivory.entity.parser.ClusterEntityParserTest.java

License:Apache License

@BeforeClass
public void init() throws Exception {
    conf.set("hadoop.log.dir", "/tmp");
    this.dfsCluster = new MiniDFSCluster(conf, 1, true, null);
}

From source file:org.apache.ivory.entity.parser.FeedUpdateTest.java

License:Apache License

@BeforeClass
public void init() throws Exception {
    conf.set("hadoop.log.dir", "/tmp");
    this.dfsCluster = new MiniDFSCluster(conf, 1, true, null);
    setup();/*from  w w w. ja v a  2  s .  co m*/
}

From source file:org.apache.ivory.messaging.FeedProducerTest.java

License:Apache License

@BeforeClass
public void setup() throws Exception {

    this.dfsCluster = new MiniDFSCluster(conf, 1, true, null);
    logFile = new Path(conf.get("fs.default.name"), "/ivory/feed/agg-logs/instance-2012-01-01-10-00.csv");

    args = new String[] { "-" + ARG.entityName.getArgName(), TOPIC_NAME, "-" + ARG.feedNames.getArgName(),
            "click-logs", "-" + ARG.feedInstancePaths.getArgName(), "/click-logs/10/05/05/00/20",
            "-" + ARG.workflowId.getArgName(), "workflow-01-00", "-" + ARG.runId.getArgName(), "1",
            "-" + ARG.nominalTime.getArgName(), "2011-01-01-01-00", "-" + ARG.timeStamp.getArgName(),
            "2012-01-01-01-00", "-" + ARG.brokerUrl.getArgName(), BROKER_URL,
            "-" + ARG.brokerImplClass.getArgName(), (BROKER_IMPL_CLASS), "-" + ARG.entityType.getArgName(),
            ("FEED"), "-" + ARG.operation.getArgName(), ("DELETE"), "-" + ARG.logFile.getArgName(),
            (logFile.toString()), "-" + ARG.topicName.getArgName(), (TOPIC_NAME), "-" + ARG.status.getArgName(),
            ("SUCCEEDED"), "-" + ARG.brokerTTL.getArgName(), "10", "-" + ARG.cluster.getArgName(), "corp" };

    broker = new BrokerService();
    broker.setUseJmx(true);//w w  w .j  a va 2  s . c  om
    broker.setDataDirectory("target/activemq");
    broker.addConnector(BROKER_URL);
    broker.start();
}

From source file:org.apache.jena.tdbloader3.AbstractMiniMRClusterTest.java

License:Apache License

public static void startCluster() throws IOException {
    FileUtils.deleteDirectory(new File("build/test"));

    Configuration configuration = new Configuration();
    // this is to avoid problems with permissions in the ./build directory used by tests... none of these attempts works.
    // configuration.setBoolean("dfs.permissions", false) ;
    // configuration.set("dfs.datanode.data.dir.perm", "755") ;
    // "dfs.umask=022"
    System.setProperty("hadoop.log.dir", "build/test/logs");
    dfsCluster = new MiniDFSCluster(configuration, numNodes, true, null);
    mrCluster = new MiniMRCluster(numNodes, dfsCluster.getFileSystem().getUri().toString(), 1);

    // Generate Hadoop configuration
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    mrCluster.createJobConf().writeXml(baos);
    String cfg = baos.toString();
    cfg = cfg.replace("<name>dfs.permissions</name><value>true</value>",
            "<name>dfs.permissions</name><value>false</value>");
    FileOutputStream out = new FileOutputStream(config);
    out.write(cfg.getBytes());// ww w . j a v  a2  s  . c  o m
    out.close();

    // Copy testing data onto (H)DFS
    fs = dfsCluster.getFileSystem();
    fs.copyFromLocalFile(new Path("src/test/resources"), new Path("src/test/resources"));
}

From source file:org.apache.oozie.test.XTestCase.java

License:Apache License

private void setUpEmbeddedHadoop(String testCaseDir) throws Exception {
    if (dfsCluster == null && mrCluster == null) {
        if (System.getProperty("hadoop.log.dir") == null) {
            System.setProperty("hadoop.log.dir", testCaseDir);
        }//from   www . ja  v a 2 s  . com
        int taskTrackers = 2;
        int dataNodes = 2;
        String oozieUser = getOozieUser();
        JobConf conf = createDFSConfig();
        String[] userGroups = new String[] { getTestGroup(), getTestGroup2() };
        UserGroupInformation.createUserForTesting(oozieUser, userGroups);
        UserGroupInformation.createUserForTesting(getTestUser(), userGroups);
        UserGroupInformation.createUserForTesting(getTestUser2(), userGroups);
        UserGroupInformation.createUserForTesting(getTestUser3(), new String[] { "users" });

        try {
            dfsCluster = new MiniDFSCluster(conf, dataNodes, true, null);
            FileSystem fileSystem = dfsCluster.getFileSystem();
            fileSystem.mkdirs(new Path("target/test-data"));
            fileSystem.mkdirs(new Path("target/test-data" + "/minicluster/mapred"));
            fileSystem.mkdirs(new Path("/user"));
            fileSystem.mkdirs(new Path("/tmp"));
            fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
            fileSystem.setPermission(new Path("target/test-data"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("target/test-data" + "/minicluster"),
                    FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("target/test-data" + "/minicluster/mapred"),
                    FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
            String nnURI = fileSystem.getUri().toString();
            int numDirs = 1;
            String[] racks = null;
            String[] hosts = null;
            mrCluster = new MiniMRCluster(0, 0, taskTrackers, nnURI, numDirs, racks, hosts, null, conf);
            JobConf jobConf = mrCluster.createJobConf();
            System.setProperty(OOZIE_TEST_JOB_TRACKER, jobConf.get("mapred.job.tracker"));
            String rmAddress = jobConf.get("yarn.resourcemanager.address");
            if (rmAddress != null) {
                System.setProperty(OOZIE_TEST_JOB_TRACKER, rmAddress);
            }
            System.setProperty(OOZIE_TEST_NAME_NODE, jobConf.get("fs.default.name"));
            ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
        } catch (Exception ex) {
            shutdownMiniCluster();
            throw ex;
        }
        new MiniClusterShutdownMonitor().start();
    }
}

From source file:org.apache.oozie.test.XTestCase.java

License:Apache License

private void setUpEmbeddedHadoop2() throws Exception {
    if (dfsCluster != null && dfsCluster2 == null) {
        // Trick dfs location for MiniDFSCluster since it doesn't accept location as input)
        String testBuildDataSaved = System.getProperty("test.build.data", "build/test/data");
        try {/*w  w w  .ja  va  2 s  .co m*/
            System.setProperty("test.build.data", FilenameUtils.concat(testBuildDataSaved, "2"));
            // Only DFS cluster is created based upon current need
            dfsCluster2 = new MiniDFSCluster(createDFSConfig(), 2, true, null);
            FileSystem fileSystem = dfsCluster2.getFileSystem();
            fileSystem.mkdirs(new Path("target/test-data"));
            fileSystem.mkdirs(new Path("/user"));
            fileSystem.mkdirs(new Path("/tmp"));
            fileSystem.setPermission(new Path("target/test-data"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
            fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
            System.setProperty(OOZIE_TEST_NAME_NODE2, fileSystem.getConf().get("fs.default.name"));
        } catch (Exception ex) {
            shutdownMiniCluster2();
            throw ex;
        } finally {
            // Restore previus value
            System.setProperty("test.build.data", testBuildDataSaved);
        }
    }
}

From source file:org.apache.pig.test.MiniCluster.java

License:Apache License

@Override
protected void setupMiniDfsAndMrClusters() {
    try {// w w  w  .  ja  v  a 2s  .co  m
        System.setProperty("hadoop.log.dir", "build/test/logs");
        final int dataNodes = 4; // There will be 4 data nodes
        final int taskTrackers = 4; // There will be 4 task tracker nodes

        // Create the configuration hadoop-site.xml file
        File conf_dir = new File("build/classes/");
        conf_dir.mkdirs();
        File conf_file = new File(conf_dir, "hadoop-site.xml");

        conf_file.delete();

        // Builds and starts the mini dfs and mapreduce clusters
        Configuration config = new Configuration();
        if (FBUtilities.isWindows())
            config.set("fs.file.impl", WindowsLocalFileSystem.class.getName());
        m_dfs = new MiniDFSCluster(config, dataNodes, true, null);
        m_fileSys = m_dfs.getFileSystem();
        m_mr = new MiniMRCluster(taskTrackers, m_fileSys.getUri().toString(), 1);

        // Write the necessary config info to hadoop-site.xml
        m_conf = m_mr.createJobConf();
        m_conf.setInt("mapred.submit.replication", 2);
        m_conf.set("dfs.datanode.address", "0.0.0.0:0");
        m_conf.set("dfs.datanode.http.address", "0.0.0.0:0");
        m_conf.set("mapred.map.max.attempts", "2");
        m_conf.set("mapred.reduce.max.attempts", "2");
        m_conf.set("pig.jobcontrol.sleep", "100");
        try (OutputStream os = new FileOutputStream(conf_file)) {
            m_conf.writeXml(os);
        }

        // Set the system properties needed by Pig
        System.setProperty("cluster", m_conf.get("mapred.job.tracker"));
        System.setProperty("namenode", m_conf.get("fs.default.name"));
        System.setProperty("junit.hadoop.conf", conf_dir.getPath());
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}