List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster MiniDFSCluster
@Deprecated public MiniDFSCluster(Configuration conf, int numDataNodes, boolean format, String[] racks) throws IOException
From source file:pl.edu.icm.coansys.heeut.MiniCluster.java
License:Apache License
@Override protected void setupMiniDfsAndMrClusters() { try {/* w w w . j ava2 s .c om*/ final int dataNodes = 2; // There will be 2 data nodes final int taskTrackers = 2; // There will be 2 task tracker nodes System.setProperty("hadoop.log.dir", "test-logs"); // Create the configuration hadoop-site.xml file File conf_dir = new File(System.getProperty("user.home"), "pigtest/conf/"); conf_dir.mkdirs(); File conf_file = new File(conf_dir, "hadoop-site.xml"); conf_file.delete(); // Builds and starts the mini dfs and mapreduce clusters Configuration config = new Configuration(); m_dfs = new MiniDFSCluster(config, dataNodes, true, null); m_fileSys = m_dfs.getFileSystem(); m_mr = new MiniMRCluster(taskTrackers, m_fileSys.getUri().toString(), 1); // Write the necessary config info to hadoop-site.xml m_conf = m_mr.createJobConf(); m_conf.setInt("mapred.submit.replication", 2); m_conf.set("dfs.datanode.address", "0.0.0.0:0"); m_conf.set("dfs.datanode.http.address", "0.0.0.0:0"); m_conf.set("mapred.map.max.attempts", "2"); m_conf.set("mapred.reduce.max.attempts", "2"); m_conf.writeXml(new FileOutputStream(conf_file)); // Set the system properties needed by Pig System.setProperty("cluster", m_conf.get("mapred.job.tracker")); System.setProperty("namenode", m_conf.get("fs.default.name")); System.setProperty("junit.hadoop.conf", conf_dir.getPath()); } catch (IOException e) { throw new RuntimeException(e); } }
From source file:test.gov.jgi.meta.MiniCluster.java
License:Apache License
private void setupMiniDfsAndMrClusters() { try {//from w w w. j av a2s.c o m final int dataNodes = 4; // There will be 4 data nodes final int taskTrackers = 4; // There will be 4 task tracker nodes Configuration config = new Configuration(); // Builds and starts the mini dfs and mapreduce clusters m_dfs = new MiniDFSCluster(config, dataNodes, true, null); m_fileSys = m_dfs.getFileSystem(); m_mr = new MiniMRCluster(taskTrackers, m_fileSys.getUri().toString(), 1); // Create the configuration hadoop-site.xml file File conf_dir = new File(System.getProperty("user.home"), "pigtest/conf/"); conf_dir.mkdirs(); File conf_file = new File(conf_dir, "hadoop-site.xml"); // Write the necessary config info to hadoop-site.xml m_conf = m_mr.createJobConf(); m_conf.setInt("mapred.submit.replication", 2); m_conf.set("dfs.datanode.address", "0.0.0.0:0"); m_conf.set("dfs.datanode.http.address", "0.0.0.0:0"); m_conf.writeXml(new FileOutputStream(conf_file)); // Set the system properties needed by Pig System.setProperty("cluster", m_conf.get("mapred.job.tracker")); System.setProperty("namenode", m_conf.get("fs.default.name")); System.setProperty("junit.hadoop.conf", conf_dir.getPath()); } catch (IOException e) { throw new RuntimeException(e); } }
From source file:uk.bl.wa.hadoop.datasets.WARCDatasetGeneratorIntegrationTest.java
License:Open Source License
@Before public void setUp() throws Exception { // Print out the full config for debugging purposes: // Config index_conf = ConfigFactory.load(); // LOG.debug(index_conf.root().render()); log.warn("Spinning up test cluster..."); // make sure the log folder exists, // otherwise the test fill fail new File("target/test-logs").mkdirs(); //// w w w . j av a2s . co m System.setProperty("hadoop.log.dir", "target/test-logs"); System.setProperty("javax.xml.parsers.SAXParserFactory", "com.sun.org.apache.xerces.internal.jaxp.SAXParserFactoryImpl"); // Configuration conf = new Configuration(); System.setProperty("test.build.data", new File("target/mini-dfs").getAbsolutePath()); dfsCluster = new MiniDFSCluster(conf, 1, true, null); dfsCluster.getFileSystem().makeQualified(input); dfsCluster.getFileSystem().makeQualified(output); // mrCluster = new MiniMRCluster(1, getFileSystem().getUri().toString(), 1); // prepare for tests for (String filename : testWarcs) { copyFileToTestCluster(getFileSystem(), input, "../warc-indexer/src/test/resources/", filename); } log.warn("Spun up test cluster."); }
From source file:uk.bl.wa.hadoop.indexer.mdx.MDXSeqSampleGeneratorIntegrationTest.java
License:Open Source License
/** * @throws java.lang.Exception//from ww w .j a v a 2 s .c o m */ @Before public void setUp() throws Exception { log.warn("Spinning up test cluster..."); // make sure the log folder exists, // otherwise the test fill fail new File("target/test-logs").mkdirs(); // System.setProperty("hadoop.log.dir", "target/test-logs"); System.setProperty("javax.xml.parsers.SAXParserFactory", "com.sun.org.apache.xerces.internal.jaxp.SAXParserFactoryImpl"); // Configuration conf = new Configuration(); dfsCluster = new MiniDFSCluster(conf, 1, true, null); dfsCluster.getFileSystem().makeQualified(input); dfsCluster.getFileSystem().makeQualified(output); // mrCluster = new MiniMRCluster(1, dfsCluster.getFileSystem().getUri().toString(), 1); // prepare for tests for (String filename : testWarcs) { WARCMDXGeneratorIntegrationTest.copyFileToTestCluster(dfsCluster.getFileSystem(), input, "src/test/resources/", filename); } log.warn("Spun up test cluster."); }
From source file:uk.bl.wa.hadoop.mapreduce.MapReduceTestBaseClass.java
License:Open Source License
@BeforeClass public static void setUp() throws Exception { // static Print out the full config for debugging purposes: // Config index_conf = ConfigFactory.load(); // LOG.debug(index_conf.root().render()); log.warn("Spinning up test cluster..."); // make sure the log folder exists, // otherwise the test fill fail new File("target/test-logs").mkdirs(); ///*from w ww . j a va 2 s. c om*/ System.setProperty("hadoop.log.dir", "target/test-logs"); System.setProperty("javax.xml.parsers.SAXParserFactory", "com.sun.org.apache.xerces.internal.jaxp.SAXParserFactoryImpl"); // Configuration conf = new Configuration(); dfsCluster = new MiniDFSCluster(conf, 1, true, null); dfsCluster.getFileSystem().makeQualified(input); dfsCluster.getFileSystem().makeQualified(output); // mrCluster = new MiniMRCluster(1, getFileSystem().getUri().toString(), 1); // prepare for tests for (String filename : testWarcs) { copyFileToTestCluster(filename, "../warc-indexer/src/test/resources/"); } log.warn("Spun up test cluster."); }
From source file:uk.bl.wa.hadoop.mapreduce.warcstats.WARCStatsToolIntegrationTest.java
License:Open Source License
@Before public void setUp() throws Exception { // Print out the full config for debugging purposes: //Config index_conf = ConfigFactory.load(); //LOG.debug(index_conf.root().render()); log.warn("Spinning up test cluster..."); // make sure the log folder exists, // otherwise the test fill fail new File("target/test-logs").mkdirs(); ////w ww. j a va 2 s .c o m System.setProperty("hadoop.log.dir", "target/test-logs"); System.setProperty("javax.xml.parsers.SAXParserFactory", "com.sun.org.apache.xerces.internal.jaxp.SAXParserFactoryImpl"); // Configuration conf = new Configuration(); dfsCluster = new MiniDFSCluster(conf, 1, true, null); dfsCluster.getFileSystem().makeQualified(input); dfsCluster.getFileSystem().makeQualified(output); // mrCluster = new MiniMRCluster(1, getFileSystem().getUri().toString(), 1); // prepare for tests for (String filename : testWarcs) { copyFileToTestCluster(filename); } log.warn("Spun up test cluster."); }