List of usage examples for org.apache.hadoop.yarn.api.protocolrecords GetApplicationsRequest newInstance
@Public @Stable public static GetApplicationsRequest newInstance(EnumSet<YarnApplicationState> applicationStates)
The request from clients to get a report of Applications matching the giving application states in the cluster from the ResourceManager.
From source file:io.hops.metadata.util.TestHopYarnAPIUtilities.java
License:Apache License
@Test(timeout = 60000) public void testAppSubmissionAndNodeUpdate() throws Exception { MockRM rm = new MockRM(conf); rm.start();//from w w w . j ava 2 s . c o m ClientRMService rmService = rm.getClientRMService(); GetApplicationsRequest getRequest = GetApplicationsRequest .newInstance(EnumSet.of(YarnApplicationState.KILLED)); ApplicationId appId1 = getApplicationId(100); ApplicationId appId2 = getApplicationId(101); ApplicationACLsManager mockAclsManager = mock(ApplicationACLsManager.class); when(mockAclsManager.checkAccess(UserGroupInformation.getCurrentUser(), ApplicationAccessType.VIEW_APP, null, appId1)).thenReturn(true); SubmitApplicationRequest submitRequest1 = mockSubmitAppRequest(appId1, null, null); SubmitApplicationRequest submitRequest2 = mockSubmitAppRequest(appId2, null, null); try { rmService.submitApplication(submitRequest1); rmService.submitApplication(submitRequest2); } catch (YarnException e) { Assert.fail("Exception is not expected."); } assertEquals("Incorrect number of apps in the RM", 0, rmService.getApplications(getRequest).getApplicationList().size()); Thread.sleep(1000); //test persistance of schedulerapplication Map<String, SchedulerApplication> schedulerApplications = RMUtilities.getSchedulerApplications(); assertEquals("db does not contain good number of schedulerApplications", 2, schedulerApplications.size()); MockNM nm1 = rm.registerNode("host1:1234", 5120); MockNM nm2 = rm.registerNode("host2:5678", 10240); NodeHeartbeatResponse nodeHeartbeat = nm1.nodeHeartbeat(true); Assert.assertEquals(4000, nodeHeartbeat.getNextHeartBeatInterval()); NodeHeartbeatResponse nodeHeartbeat2 = nm2.nodeHeartbeat(true); Assert.assertEquals(4000, nodeHeartbeat2.getNextHeartBeatInterval()); Thread.sleep(2000); rm.stop(); Thread.sleep(2000); }
From source file:io.hops.metadata.util.TestHopYarnAPIUtilities.java
License:Apache License
@Test(timeout = 30000) public void testForceKillApplication() throws Exception { MockRM rm = new MockRM(conf); rm.start();// w ww . j a va2 s. c om ClientRMService rmService = rm.getClientRMService(); GetApplicationsRequest getRequest = GetApplicationsRequest .newInstance(EnumSet.of(YarnApplicationState.KILLED)); ApplicationId appId1 = getApplicationId(100); ApplicationACLsManager mockAclsManager = mock(ApplicationACLsManager.class); when(mockAclsManager.checkAccess(UserGroupInformation.getCurrentUser(), ApplicationAccessType.VIEW_APP, null, appId1)).thenReturn(true); SubmitApplicationRequest submitRequest1 = mockSubmitAppRequest(appId1, null, null); try { rmService.submitApplication(submitRequest1); } catch (YarnException e) { Assert.fail("Exception is not expected."); } assertEquals("Incorrect number of apps in the RM", 0, rmService.getApplications(getRequest).getApplicationList().size()); Thread.sleep(1000); //TODO: check what have to be present in the db Thread.sleep(2000); rm.stop(); Thread.sleep(2000); }
From source file:org.apache.zeppelin.integration.FlinkIntegrationTest.java
License:Apache License
@Test public void testLocalMode() throws IOException, YarnException, InterpreterException { InterpreterSetting flinkInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("flink"); flinkInterpreterSetting.setProperty("FLINK_HOME", flinkHome); flinkInterpreterSetting.setProperty("ZEPPELIN_CONF_DIR", zeppelin.getZeppelinConfDir().getAbsolutePath()); testInterpreterBasics();// w w w . j av a2s .c o m // no yarn application launched GetApplicationsRequest request = GetApplicationsRequest .newInstance(EnumSet.of(YarnApplicationState.RUNNING)); GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService() .getApplications(request); assertEquals(0, response.getApplicationList().size()); interpreterSettingManager.close(); }
From source file:org.apache.zeppelin.integration.FlinkIntegrationTest.java
License:Apache License
public void testYarnMode() throws IOException, InterpreterException, YarnException { InterpreterSetting flinkInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("flink"); flinkInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath()); flinkInterpreterSetting.setProperty("FLINK_HOME", flinkHome); flinkInterpreterSetting.setProperty("ZEPPELIN_CONF_DIR", zeppelin.getZeppelinConfDir().getAbsolutePath()); flinkInterpreterSetting.setProperty("flink.execution.mode", "YARN"); testInterpreterBasics();/*from ww w. j a v a 2 s.c o m*/ // 1 yarn application launched GetApplicationsRequest request = GetApplicationsRequest .newInstance(EnumSet.of(YarnApplicationState.RUNNING)); GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService() .getApplications(request); assertEquals(1, response.getApplicationList().size()); interpreterSettingManager.close(); }
From source file:org.apache.zeppelin.integration.SparkIntegrationTest.java
License:Apache License
@Test public void testLocalMode() throws IOException, YarnException, InterpreterException, XmlPullParserException { InterpreterSetting sparkInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("spark"); sparkInterpreterSetting.setProperty("master", "local[*]"); sparkInterpreterSetting.setProperty("SPARK_HOME", sparkHome); sparkInterpreterSetting.setProperty("ZEPPELIN_CONF_DIR", zeppelin.getZeppelinConfDir().getAbsolutePath()); sparkInterpreterSetting.setProperty("zeppelin.spark.useHiveContext", "false"); sparkInterpreterSetting.setProperty("zeppelin.pyspark.useIPython", "false"); sparkInterpreterSetting.setProperty("zeppelin.spark.scala.color", "false"); sparkInterpreterSetting.setProperty("zeppelin.spark.deprecatedMsg.show", "false"); testInterpreterBasics();/*ww w.j a v a 2 s .co m*/ // no yarn application launched GetApplicationsRequest request = GetApplicationsRequest .newInstance(EnumSet.of(YarnApplicationState.RUNNING)); GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService() .getApplications(request); assertEquals(0, response.getApplicationList().size()); interpreterSettingManager.close(); }
From source file:org.apache.zeppelin.integration.SparkIntegrationTest.java
License:Apache License
@Test public void testYarnClientMode() throws IOException, YarnException, InterruptedException, InterpreterException, XmlPullParserException { InterpreterSetting sparkInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("spark"); sparkInterpreterSetting.setProperty("master", "yarn-client"); sparkInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath()); sparkInterpreterSetting.setProperty("SPARK_HOME", sparkHome); sparkInterpreterSetting.setProperty("ZEPPELIN_CONF_DIR", zeppelin.getZeppelinConfDir().getAbsolutePath()); sparkInterpreterSetting.setProperty("zeppelin.spark.useHiveContext", "false"); sparkInterpreterSetting.setProperty("zeppelin.pyspark.useIPython", "false"); sparkInterpreterSetting.setProperty("PYSPARK_PYTHON", getPythonExec()); sparkInterpreterSetting.setProperty("spark.driver.memory", "512m"); sparkInterpreterSetting.setProperty("zeppelin.spark.scala.color", "false"); sparkInterpreterSetting.setProperty("zeppelin.spark.deprecatedMsg.show", "false"); testInterpreterBasics();//w ww . j av a2 s. co m // 1 yarn application launched GetApplicationsRequest request = GetApplicationsRequest .newInstance(EnumSet.of(YarnApplicationState.RUNNING)); GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService() .getApplications(request); assertEquals(1, response.getApplicationList().size()); interpreterSettingManager.close(); waitForYarnAppCompleted(30 * 1000); }
From source file:org.apache.zeppelin.integration.SparkIntegrationTest.java
License:Apache License
private void waitForYarnAppCompleted(int timeout) throws YarnException { long start = System.currentTimeMillis(); boolean yarnAppCompleted = false; while ((System.currentTimeMillis() - start) < timeout) { GetApplicationsRequest request = GetApplicationsRequest .newInstance(EnumSet.of(YarnApplicationState.RUNNING)); GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager() .getClientRMService().getApplications(request); if (response.getApplicationList().isEmpty()) { yarnAppCompleted = true;/* w w w . j a v a 2 s. com*/ break; } try { Thread.sleep(1000); } catch (InterruptedException e) { e.printStackTrace(); } } assertTrue("Yarn app is not completed in " + timeout + " milliseconds.", yarnAppCompleted); }
From source file:org.apache.zeppelin.integration.SparkIntegrationTest.java
License:Apache License
@Test public void testYarnClusterMode() throws IOException, YarnException, InterruptedException, InterpreterException, XmlPullParserException { InterpreterSetting sparkInterpreterSetting = interpreterSettingManager.getInterpreterSettingByName("spark"); sparkInterpreterSetting.setProperty("master", "yarn-cluster"); sparkInterpreterSetting.setProperty("HADOOP_CONF_DIR", hadoopCluster.getConfigPath()); sparkInterpreterSetting.setProperty("SPARK_HOME", sparkHome); sparkInterpreterSetting.setProperty("ZEPPELIN_CONF_DIR", zeppelin.getZeppelinConfDir().getAbsolutePath()); sparkInterpreterSetting.setProperty("zeppelin.spark.useHiveContext", "false"); sparkInterpreterSetting.setProperty("zeppelin.pyspark.useIPython", "false"); sparkInterpreterSetting.setProperty("PYSPARK_PYTHON", getPythonExec()); sparkInterpreterSetting.setProperty("spark.driver.memory", "512m"); sparkInterpreterSetting.setProperty("zeppelin.spark.scala.color", "false"); sparkInterpreterSetting.setProperty("zeppelin.spark.deprecatedMsg.show", "false"); testInterpreterBasics();/*from w w w .java2 s .co m*/ // 1 yarn application launched GetApplicationsRequest request = GetApplicationsRequest .newInstance(EnumSet.of(YarnApplicationState.RUNNING)); GetApplicationsResponse response = hadoopCluster.getYarnCluster().getResourceManager().getClientRMService() .getApplications(request); assertEquals(1, response.getApplicationList().size()); interpreterSettingManager.close(); waitForYarnAppCompleted(30 * 1000); }