List of usage examples for org.apache.hadoop.yarn.server MiniYARNCluster MiniYARNCluster
public MiniYARNCluster(String testName, int numNodeManagers, int numLocalDirs, int numLogDirs)
From source file:com.yahoo.storm.yarn.TestIntegration.java
License:Open Source License
@SuppressWarnings({ "rawtypes", "unchecked" }) @BeforeClass//from w ww .ja va 2 s. c o m public static void setup() { try { zkServer = new EmbeddedZKServer(); zkServer.start(); LOG.info("Starting up MiniYARN cluster"); if (yarnCluster == null) { yarnCluster = new MiniYARNCluster(TestIntegration.class.getName(), 2, 1, 1); Configuration conf = new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512); conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 2 * 1024); yarnCluster.init(conf); yarnCluster.start(); } sleep(2000); Configuration miniyarn_conf = yarnCluster.getConfig(); yarn_site_xml = testConf.createYarnSiteConfig(miniyarn_conf); storm_home = testConf.stormHomePath(); LOG.info("Will be using storm found on PATH at " + storm_home); //create a storm configuration file with zkport final Map storm_conf = Config.readStormConfig(); storm_conf.put(backtype.storm.Config.STORM_ZOOKEEPER_PORT, zkServer.port()); storm_conf_file = testConf.createConfigFile(storm_conf); List<String> cmd = java.util.Arrays.asList("bin/storm-yarn", "launch", storm_conf_file.toString(), "--stormZip", "lib/storm.zip", "--appname", "storm-on-yarn-test", "--output", "target/appId.txt"); execute(cmd); //wait for Storm cluster to be fully luanched sleep(15000); BufferedReader reader = new BufferedReader(new FileReader("target/appId.txt")); appId = reader.readLine(); reader.close(); if (appId != null) appId = appId.trim(); LOG.info("application ID:" + appId); } catch (Exception ex) { LOG.error("setup failure", ex); Assert.assertEquals(null, ex); } }
From source file:edu.uci.ics.asterix.aoya.test.YARNCluster.java
License:Apache License
/** * Instantiates the (Mini) DFS Cluster with the configured number of datanodes. * Post instantiation, data is laoded to HDFS. * Called prior to running the Runtime test suite. *///from ww w . j av a 2s. com public void setup() throws Exception { conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/core-site.xml")); conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/mapred-site.xml")); conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/hdfs-site.xml")); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64); conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, "target/integrationts/data"); cleanupLocal(); //this constructor is deprecated in hadoop 2x //dfsCluster = new MiniDFSCluster(nameNodePort, conf, numDataNodes, true, true, StartupOption.REGULAR, null); miniCluster = new MiniYARNCluster("Asterix_testing", numDataNodes, 1, 1); miniCluster.init(conf); dfs = FileSystem.get(conf); }
From source file:gobblin.yarn.GobblinYarnAppLauncherTest.java
License:Apache License
@BeforeClass public void setUp() throws Exception { // Set java home in environment since it isn't set on some systems String javaHome = System.getProperty("java.home"); setEnv("JAVA_HOME", javaHome); final YarnConfiguration clusterConf = new YarnConfiguration(); clusterConf.set("yarn.resourcemanager.connect.max-wait.ms", "10000"); MiniYARNCluster miniYARNCluster = this.closer.register(new MiniYARNCluster("TestCluster", 1, 1, 1)); miniYARNCluster.init(clusterConf);/* w ww.jav a 2s .co m*/ miniYARNCluster.start(); // YARN client should not be started before the Resource Manager is up AssertWithBackoff.create().logger(LOG).timeoutMs(10000).assertTrue(new Predicate<Void>() { @Override public boolean apply(Void input) { return !clusterConf.get(YarnConfiguration.RM_ADDRESS).contains(":0"); } }, "Waiting for RM"); this.yarnClient = this.closer.register(YarnClient.createYarnClient()); this.yarnClient.init(clusterConf); this.yarnClient.start(); // Use a random ZK port TestingServer testingZKServer = this.closer.register(new TestingServer(-1)); LOG.info("Testing ZK Server listening on: " + testingZKServer.getConnectString()); // the zk port is dynamically configured try (PrintWriter pw = new PrintWriter("dynamic.conf")) { File dir = new File("target/dummydir"); // dummy directory specified in configuration dir.mkdir(); pw.println("gobblin.cluster.zk.connection.string=\"" + testingZKServer.getConnectString() + "\""); pw.println("jobconf.fullyQualifiedPath=\"" + dir.getAbsolutePath() + "\""); } // YARN config is dynamic and needs to be passed to other processes try (OutputStream os = new FileOutputStream(new File("yarn-site.xml"))) { clusterConf.writeXml(os); } this.curatorFramework = TestHelper.createZkClient(testingZKServer, this.closer); URL url = GobblinYarnAppLauncherTest.class.getClassLoader() .getResource(GobblinYarnAppLauncherTest.class.getSimpleName() + ".conf"); Assert.assertNotNull(url, "Could not find resource " + url); this.config = ConfigFactory.parseURL(url).withValue("gobblin.cluster.zk.connection.string", ConfigValueFactory.fromAnyRef(testingZKServer.getConnectString())).resolve(); String zkConnectionString = this.config.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY); this.helixManager = HelixManagerFactory.getZKHelixManager( this.config.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY), TestHelper.TEST_HELIX_INSTANCE_NAME, InstanceType.CONTROLLER, zkConnectionString); this.gobblinYarnAppLauncher = new GobblinYarnAppLauncher(this.config, clusterConf); }
From source file:org.apache.asterix.aoya.test.YARNCluster.java
License:Apache License
/** * Instantiates the (Mini) DFS Cluster with the configured number of datanodes. * Post instantiation, data is laoded to HDFS. * Called prior to running the Runtime test suite. *//* ww w. j ava2s . c o m*/ public void setup() throws Exception { conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/core-site.xml")); conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/mapred-site.xml")); conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/hdfs-site.xml")); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64); conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, "target/integrationts/data"); cleanupLocal(); //this constructor is deprecated in hadoop 2x //dfsCluster = new MiniDFSCluster(nameNodePort, conf, numDataNodes, true, true, StartupOption.REGULAR, null); miniCluster = new MiniYARNCluster("Asterix_testing", numDataNodes, 1, 1); miniCluster.init(conf); }
From source file:org.apache.flink.yarn.YarnTestBase.java
License:Apache License
public static void startYARNWithConfig(Configuration conf) { // set the home directory to a tmp directory. Flink on YARN is using the home dir to distribute the file File homeDir = null;//from w ww.j a v a2 s. co m try { homeDir = tmp.newFolder(); } catch (IOException e) { e.printStackTrace(); Assert.fail(e.getMessage()); } System.setProperty("user.home", homeDir.getAbsolutePath()); String uberjarStartLoc = ".."; LOG.info("Trying to locate uberjar in {}", new File(uberjarStartLoc)); flinkUberjar = findFile(uberjarStartLoc, new RootDirFilenameFilter()); Assert.assertNotNull("Flink uberjar not found", flinkUberjar); String flinkDistRootDir = flinkUberjar.getParentFile().getParent(); flinkLibFolder = flinkUberjar.getParentFile(); // the uberjar is located in lib/ Assert.assertNotNull("Flink flinkLibFolder not found", flinkLibFolder); Assert.assertTrue("lib folder not found", flinkLibFolder.exists()); Assert.assertTrue("lib folder not found", flinkLibFolder.isDirectory()); if (!flinkUberjar.exists()) { Assert.fail("Unable to locate yarn-uberjar.jar"); } try { LOG.info("Starting up MiniYARNCluster"); if (yarnCluster == null) { yarnCluster = new MiniYARNCluster(conf.get(YarnTestBase.TEST_CLUSTER_NAME_KEY), NUM_NODEMANAGERS, 1, 1); yarnCluster.init(conf); yarnCluster.start(); } Map<String, String> map = new HashMap<String, String>(System.getenv()); File flinkConfDirPath = findFile(flinkDistRootDir, new ContainsName(new String[] { "flink-conf.yaml" })); Assert.assertNotNull(flinkConfDirPath); map.put("FLINK_CONF_DIR", flinkConfDirPath.getParent()); File yarnConfFile = writeYarnSiteConfigXML(conf); map.put("YARN_CONF_DIR", yarnConfFile.getParentFile().getAbsolutePath()); map.put("IN_TESTS", "yes we are in tests"); // see FlinkYarnClient() for more infos TestBaseUtils.setEnv(map); Assert.assertTrue(yarnCluster.getServiceState() == Service.STATE.STARTED); // wait for the nodeManagers to connect while (!yarnCluster.waitForNodeManagersToConnect(500)) { LOG.info("Waiting for Nodemanagers to connect"); } } catch (Exception ex) { ex.printStackTrace(); LOG.error("setup failure", ex); Assert.fail(); } }
From source file:org.apache.giraph.yarn.TestYarnJob.java
License:Apache License
/** * Initialize the MiniYARNCluster for the integration test. *//*from w w w . ja v a 2s .c o m*/ private void initYarnCluster() { cluster = new MiniYARNCluster(TestYarnJob.class.getName(), 1, 1, 1); cluster.init(new ImmutableClassesGiraphConfiguration(conf)); cluster.start(); }
From source file:org.apache.gobblin.yarn.GobblinYarnAppLauncherTest.java
License:Apache License
@BeforeClass public void setUp() throws Exception { // Set java home in environment since it isn't set on some systems String javaHome = System.getProperty("java.home"); setEnv("JAVA_HOME", javaHome); final YarnConfiguration clusterConf = new YarnConfiguration(); clusterConf.set("yarn.resourcemanager.connect.max-wait.ms", "10000"); MiniYARNCluster miniYARNCluster = this.closer.register(new MiniYARNCluster("TestCluster", 1, 1, 1)); miniYARNCluster.init(clusterConf);/*from w ww. j a v a 2s . co m*/ miniYARNCluster.start(); // YARN client should not be started before the Resource Manager is up AssertWithBackoff.create().logger(LOG).timeoutMs(10000).assertTrue(new Predicate<Void>() { @Override public boolean apply(Void input) { return !clusterConf.get(YarnConfiguration.RM_ADDRESS).contains(":0"); } }, "Waiting for RM"); this.yarnClient = this.closer.register(YarnClient.createYarnClient()); this.yarnClient.init(clusterConf); this.yarnClient.start(); // Use a random ZK port TestingServer testingZKServer = this.closer.register(new TestingServer(-1)); LOG.info("Testing ZK Server listening on: " + testingZKServer.getConnectString()); // the zk port is dynamically configured try (PrintWriter pw = new PrintWriter(DYNAMIC_CONF_PATH)) { File dir = new File("target/dummydir"); // dummy directory specified in configuration dir.mkdir(); pw.println("gobblin.cluster.zk.connection.string=\"" + testingZKServer.getConnectString() + "\""); pw.println("jobconf.fullyQualifiedPath=\"" + dir.getAbsolutePath() + "\""); } // YARN config is dynamic and needs to be passed to other processes try (OutputStream os = new FileOutputStream(new File(YARN_SITE_XML_PATH))) { clusterConf.writeXml(os); } this.curatorFramework = TestHelper.createZkClient(testingZKServer, this.closer); URL url = GobblinYarnAppLauncherTest.class.getClassLoader() .getResource(GobblinYarnAppLauncherTest.class.getSimpleName() + ".conf"); Assert.assertNotNull(url, "Could not find resource " + url); this.config = ConfigFactory.parseURL(url).withValue("gobblin.cluster.zk.connection.string", ConfigValueFactory.fromAnyRef(testingZKServer.getConnectString())).resolve(); String zkConnectionString = this.config.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY); this.helixManager = HelixManagerFactory.getZKHelixManager( this.config.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY), TestHelper.TEST_HELIX_INSTANCE_NAME, InstanceType.CONTROLLER, zkConnectionString); this.gobblinYarnAppLauncher = new GobblinYarnAppLauncher(this.config, clusterConf); }
From source file:org.apache.gobblin.yarn.YarnServiceTest.java
License:Apache License
@BeforeClass public void setUp() throws Exception { // Set java home in environment since it isn't set on some systems String javaHome = System.getProperty("java.home"); setEnv("JAVA_HOME", javaHome); this.clusterConf = new YarnConfiguration(); this.clusterConf.set(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, "100"); this.clusterConf.set(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, "10000"); this.clusterConf.set(YarnConfiguration.YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_TIMEOUT_MS, "60000"); this.yarnCluster = this.closer.register(new MiniYARNCluster("YarnServiceTestCluster", 4, 1, 1)); this.yarnCluster.init(this.clusterConf); this.yarnCluster.start(); // YARN client should not be started before the Resource Manager is up AssertWithBackoff.create().logger(LOG).timeoutMs(10000).assertTrue(new Predicate<Void>() { @Override/*w w w .j a v a 2 s.co m*/ public boolean apply(Void input) { return !clusterConf.get(YarnConfiguration.RM_ADDRESS).contains(":0"); } }, "Waiting for RM"); this.yarnClient = this.closer.register(YarnClient.createYarnClient()); this.yarnClient.init(this.clusterConf); this.yarnClient.start(); URL url = YarnServiceTest.class.getClassLoader() .getResource(YarnServiceTest.class.getSimpleName() + ".conf"); Assert.assertNotNull(url, "Could not find resource " + url); this.config = ConfigFactory.parseURL(url).resolve(); // Start a dummy application manager so that the YarnService can use the AM-RM token. startApp(); // create and start the test yarn service this.yarnService = new TestYarnService(this.config, "testApp", "appId", this.clusterConf, FileSystem.getLocal(new Configuration()), this.eventBus); this.yarnService.startUp(); }
From source file:org.apache.twill.yarn.TwillTester.java
License:Apache License
@Override protected void before() throws Throwable { tmpFolder.create();/*from w ww .j a v a2s . c o m*/ // Starts Zookeeper zkServer = InMemoryZKServer.builder().setDataDir(tmpFolder.newFolder()).build(); zkServer.startAndWait(); // Start YARN mini cluster File miniDFSDir = tmpFolder.newFolder(); LOG.info("Starting Mini DFS on path {}", miniDFSDir); Configuration fsConf = new HdfsConfiguration(new Configuration()); fsConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, miniDFSDir.getAbsolutePath()); dfsCluster = new MiniDFSCluster.Builder(fsConf).numDataNodes(1).build(); Configuration conf = new YarnConfiguration(dfsCluster.getFileSystem().getConf()); if (YarnUtils.getHadoopVersion().equals(YarnUtils.HadoopVersions.HADOOP_20)) { conf.set("yarn.resourcemanager.scheduler.class", "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler"); } else { conf.set("yarn.resourcemanager.scheduler.class", "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler"); conf.set("yarn.scheduler.capacity.resource-calculator", "org.apache.hadoop.yarn.util.resource.DominantResourceCalculator"); conf.setBoolean("yarn.scheduler.include-port-in-node-name", true); } conf.set("yarn.nodemanager.vmem-pmem-ratio", "20.1"); conf.set("yarn.nodemanager.vmem-check-enabled", "false"); conf.set("yarn.scheduler.minimum-allocation-mb", "128"); conf.set("yarn.nodemanager.delete.debug-delay-sec", "3600"); cluster = new MiniYARNCluster("test-cluster", 3, 1, 1); cluster.init(conf); cluster.start(); this.config = new YarnConfiguration(cluster.getConfig()); twillRunner = createTwillRunnerService(); twillRunner.start(); yarnAppClient = new VersionDetectYarnAppClientFactory().create(conf); yarnAppClient.startAndWait(); }
From source file:org.apache.twill.yarn.YarnTestUtils.java
License:Apache License
private static final void init(File folder) throws IOException { // Starts Zookeeper zkServer = InMemoryZKServer.builder().build(); zkServer.startAndWait();//from ww w .j a va 2 s . c om // Start YARN mini cluster LOG.info("Starting Mini DFS on path {}", folder); Configuration fsConf = new HdfsConfiguration(new Configuration()); fsConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, folder.getAbsolutePath()); dfsCluster = new MiniDFSCluster.Builder(fsConf).numDataNodes(1).build(); Configuration conf = new YarnConfiguration(dfsCluster.getFileSystem().getConf()); if (YarnUtils.getHadoopVersion().equals(YarnUtils.HadoopVersions.HADOOP_20)) { conf.set("yarn.resourcemanager.scheduler.class", "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler"); } else { conf.set("yarn.resourcemanager.scheduler.class", "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler"); conf.set("yarn.scheduler.capacity.resource-calculator", "org.apache.hadoop.yarn.util.resource.DominantResourceCalculator"); conf.setBoolean("yarn.scheduler.include-port-in-node-name", true); } conf.set("yarn.nodemanager.vmem-pmem-ratio", "20.1"); conf.set("yarn.nodemanager.vmem-check-enabled", "false"); conf.set("yarn.scheduler.minimum-allocation-mb", "128"); conf.set("yarn.nodemanager.delete.debug-delay-sec", "3600"); cluster = new MiniYARNCluster("test-cluster", 3, 1, 1); cluster.init(conf); cluster.start(); config = new YarnConfiguration(cluster.getConfig()); runnerService = createTwillRunnerService(); runnerService.startAndWait(); yarnAppClient = new VersionDetectYarnAppClientFactory().create(conf); yarnAppClient.start(); }