List of usage examples for org.apache.hadoop.yarn.api.records ApplicationReport getYarnApplicationState
@Public @Stable public abstract YarnApplicationState getYarnApplicationState();
YarnApplicationState
of the application. From source file:org.apache.tez.dag.api.client.DAGClientImpl.java
License:Apache License
private void checkAndSetDagCompletionStatus() { ApplicationReport appReport = realClient.getApplicationReportInternal(); if (appReport != null) { final YarnApplicationState appState = appReport.getYarnApplicationState(); if (appState == YarnApplicationState.FINISHED || appState == YarnApplicationState.FAILED || appState == YarnApplicationState.KILLED) { dagCompleted = true;/*from w ww . ja v a 2s . c o m*/ } } }
From source file:org.apache.tez.dag.api.client.rpc.DAGClientRPCImpl.java
License:Apache License
ApplicationReport getAppReport() throws IOException, TezException { try {/* ww w. ja va 2 s.c om*/ ApplicationReport appReport = frameworkClient.getApplicationReport(appId); if (LOG.isDebugEnabled()) { LOG.debug("App: " + appId + " in state: " + appReport.getYarnApplicationState()); } return appReport; } catch (YarnException e) { throw new TezException(e); } }
From source file:org.apache.tez.mapreduce.TestMRRJobsDAGApi.java
License:Apache License
private void stopAndVerifyYarnApp(TezClient tezSession) throws TezException, IOException, YarnException { ApplicationId appId = tezSession.getAppMasterApplicationId(); tezSession.stop();/*from ww w . j a v a 2 s.c o m*/ Assert.assertEquals(TezAppMasterStatus.SHUTDOWN, tezSession.getAppMasterStatus()); YarnClient yarnClient = YarnClient.createYarnClient(); yarnClient.init(mrrTezCluster.getConfig()); yarnClient.start(); while (true) { ApplicationReport appReport = yarnClient.getApplicationReport(appId); if (appReport.getYarnApplicationState().equals(YarnApplicationState.FINISHED) || appReport.getYarnApplicationState().equals(YarnApplicationState.FAILED) || appReport.getYarnApplicationState().equals(YarnApplicationState.KILLED)) { break; } } ApplicationReport appReport = yarnClient.getApplicationReport(appId); Assert.assertEquals(YarnApplicationState.FINISHED, appReport.getYarnApplicationState()); Assert.assertEquals(FinalApplicationStatus.SUCCEEDED, appReport.getFinalApplicationStatus()); }
From source file:org.apache.tez.mapreduce.TestMRRJobsDAGApi.java
License:Apache License
public State testMRRSleepJobDagSubmitCore(boolean dagViaRPC, boolean killDagWhileRunning, boolean closeSessionBeforeSubmit, TezClient reUseTezSession, boolean genSplitsInAM, Class<? extends InputInitializer> initializerClass, Map<String, LocalResource> additionalLocalResources) throws IOException, InterruptedException, TezException, ClassNotFoundException, YarnException { LOG.info("\n\n\nStarting testMRRSleepJobDagSubmit()."); JobConf stage1Conf = new JobConf(mrrTezCluster.getConfig()); JobConf stage2Conf = new JobConf(mrrTezCluster.getConfig()); JobConf stage3Conf = new JobConf(mrrTezCluster.getConfig()); stage1Conf.setLong(MRRSleepJob.MAP_SLEEP_TIME, 1); stage1Conf.setInt(MRRSleepJob.MAP_SLEEP_COUNT, 1); stage1Conf.setInt(MRJobConfig.NUM_MAPS, 1); stage1Conf.set(MRJobConfig.MAP_CLASS_ATTR, SleepMapper.class.getName()); stage1Conf.set(MRJobConfig.MAP_OUTPUT_KEY_CLASS, IntWritable.class.getName()); stage1Conf.set(MRJobConfig.MAP_OUTPUT_VALUE_CLASS, IntWritable.class.getName()); stage1Conf.set(MRJobConfig.INPUT_FORMAT_CLASS_ATTR, SleepInputFormat.class.getName()); stage1Conf.set(MRJobConfig.PARTITIONER_CLASS_ATTR, MRRSleepJobPartitioner.class.getName()); stage2Conf.setLong(MRRSleepJob.REDUCE_SLEEP_TIME, 1); stage2Conf.setInt(MRRSleepJob.REDUCE_SLEEP_COUNT, 1); stage2Conf.setInt(MRJobConfig.NUM_REDUCES, 1); stage2Conf.set(MRJobConfig.REDUCE_CLASS_ATTR, ISleepReducer.class.getName()); stage2Conf.set(MRJobConfig.MAP_OUTPUT_KEY_CLASS, IntWritable.class.getName()); stage2Conf.set(MRJobConfig.MAP_OUTPUT_VALUE_CLASS, IntWritable.class.getName()); stage2Conf.set(MRJobConfig.PARTITIONER_CLASS_ATTR, MRRSleepJobPartitioner.class.getName()); stage3Conf.setLong(MRRSleepJob.REDUCE_SLEEP_TIME, 1); stage3Conf.setInt(MRRSleepJob.REDUCE_SLEEP_COUNT, 1); stage3Conf.setInt(MRJobConfig.NUM_REDUCES, 1); stage3Conf.set(MRJobConfig.REDUCE_CLASS_ATTR, SleepReducer.class.getName()); stage3Conf.set(MRJobConfig.MAP_OUTPUT_KEY_CLASS, IntWritable.class.getName()); stage3Conf.set(MRJobConfig.MAP_OUTPUT_VALUE_CLASS, IntWritable.class.getName()); MRHelpers.translateMRConfToTez(stage1Conf); MRHelpers.translateMRConfToTez(stage2Conf); MRHelpers.translateMRConfToTez(stage3Conf); MRHelpers.configureMRApiUsage(stage1Conf); MRHelpers.configureMRApiUsage(stage2Conf); MRHelpers.configureMRApiUsage(stage3Conf); Path remoteStagingDir = remoteFs .makeQualified(new Path("/tmp", String.valueOf(new Random().nextInt(100000)))); TezClientUtils.ensureStagingDirExists(conf, remoteStagingDir); UserPayload stage1Payload = TezUtils.createUserPayloadFromConf(stage1Conf); UserPayload stage2Payload = TezUtils.createUserPayloadFromConf(stage2Conf); UserPayload stage3Payload = TezUtils.createUserPayloadFromConf(stage3Conf); DAG dag = DAG.create("testMRRSleepJobDagSubmit-" + random.nextInt(1000)); Class<? extends InputInitializer> inputInitializerClazz = genSplitsInAM ? (initializerClass == null ? MRInputAMSplitGenerator.class : initializerClass) : null;/*from w ww.j a v a 2s. c om*/ LOG.info("Using initializer class: " + initializerClass); DataSourceDescriptor dsd; if (!genSplitsInAM) { dsd = MRInputHelpers.configureMRInputWithLegacySplitGeneration(stage1Conf, remoteStagingDir, true); } else { if (initializerClass == null) { dsd = MRInputLegacy.createConfigBuilder(stage1Conf, SleepInputFormat.class).build(); } else { InputInitializerDescriptor iid = InputInitializerDescriptor.create(inputInitializerClazz.getName()); dsd = MRInputLegacy.createConfigBuilder(stage1Conf, SleepInputFormat.class) .setCustomInitializerDescriptor(iid).build(); } } Vertex stage1Vertex = Vertex.create("map", ProcessorDescriptor.create(MapProcessor.class.getName()).setUserPayload(stage1Payload), dsd.getNumberOfShards(), Resource.newInstance(256, 1)); stage1Vertex.addDataSource("MRInput", dsd); Vertex stage2Vertex = Vertex.create("ireduce", ProcessorDescriptor.create(ReduceProcessor.class.getName()).setUserPayload(stage2Payload), 1, Resource.newInstance(256, 1)); Vertex stage3Vertex = Vertex.create("reduce", ProcessorDescriptor.create(ReduceProcessor.class.getName()).setUserPayload(stage3Payload), 1, Resource.newInstance(256, 1)); stage3Conf.setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_CONVERT_USER_PAYLOAD_TO_HISTORY_TEXT, true); DataSinkDescriptor dataSinkDescriptor = MROutputLegacy .createConfigBuilder(stage3Conf, NullOutputFormat.class).build(); Assert.assertFalse(dataSinkDescriptor.getOutputDescriptor().getHistoryText().isEmpty()); stage3Vertex.addDataSink("MROutput", dataSinkDescriptor); // TODO env, resources dag.addVertex(stage1Vertex); dag.addVertex(stage2Vertex); dag.addVertex(stage3Vertex); Edge edge1 = Edge.create(stage1Vertex, stage2Vertex, EdgeProperty.create(DataMovementType.SCATTER_GATHER, DataSourceType.PERSISTED, SchedulingType.SEQUENTIAL, OutputDescriptor.create(OrderedPartitionedKVOutput.class.getName()).setUserPayload(stage2Payload), InputDescriptor.create(OrderedGroupedInputLegacy.class.getName()).setUserPayload(stage2Payload))); Edge edge2 = Edge.create(stage2Vertex, stage3Vertex, EdgeProperty.create(DataMovementType.SCATTER_GATHER, DataSourceType.PERSISTED, SchedulingType.SEQUENTIAL, OutputDescriptor.create(OrderedPartitionedKVOutput.class.getName()).setUserPayload(stage3Payload), InputDescriptor.create(OrderedGroupedInputLegacy.class.getName()).setUserPayload(stage3Payload))); dag.addEdge(edge1); dag.addEdge(edge2); TezConfiguration tezConf = new TezConfiguration(mrrTezCluster.getConfig()); tezConf.set(TezConfiguration.TEZ_AM_STAGING_DIR, remoteStagingDir.toString()); DAGClient dagClient = null; boolean reuseSession = reUseTezSession != null; TezClient tezSession = null; if (!dagViaRPC) { Preconditions.checkArgument(reuseSession == false); } if (!reuseSession) { TezConfiguration tempTezconf = new TezConfiguration(tezConf); if (!dagViaRPC) { tempTezconf.setBoolean(TezConfiguration.TEZ_AM_SESSION_MODE, false); } else { tempTezconf.setBoolean(TezConfiguration.TEZ_AM_SESSION_MODE, true); } tezSession = TezClient.create("testsession", tempTezconf); tezSession.start(); } else { tezSession = reUseTezSession; } if (!dagViaRPC) { // TODO Use utility method post TEZ-205 to figure out AM arguments etc. dagClient = tezSession.submitDAG(dag); } if (dagViaRPC && closeSessionBeforeSubmit) { YarnClient yarnClient = YarnClient.createYarnClient(); yarnClient.init(mrrTezCluster.getConfig()); yarnClient.start(); boolean sentKillSession = false; while (true) { Thread.sleep(500l); ApplicationReport appReport = yarnClient .getApplicationReport(tezSession.getAppMasterApplicationId()); if (appReport == null) { continue; } YarnApplicationState appState = appReport.getYarnApplicationState(); if (!sentKillSession) { if (appState == YarnApplicationState.RUNNING) { tezSession.stop(); sentKillSession = true; } } else { if (appState == YarnApplicationState.FINISHED || appState == YarnApplicationState.KILLED || appState == YarnApplicationState.FAILED) { LOG.info("Application completed after sending session shutdown" + ", yarnApplicationState=" + appState + ", finalAppStatus=" + appReport.getFinalApplicationStatus()); Assert.assertEquals(YarnApplicationState.FINISHED, appState); Assert.assertEquals(FinalApplicationStatus.SUCCEEDED, appReport.getFinalApplicationStatus()); break; } } } yarnClient.stop(); return null; } if (dagViaRPC) { LOG.info("Submitting dag to tez session with appId=" + tezSession.getAppMasterApplicationId() + " and Dag Name=" + dag.getName()); if (additionalLocalResources != null) { tezSession.addAppMasterLocalFiles(additionalLocalResources); } dagClient = tezSession.submitDAG(dag); Assert.assertEquals(TezAppMasterStatus.RUNNING, tezSession.getAppMasterStatus()); } DAGStatus dagStatus = dagClient.getDAGStatus(null); while (!dagStatus.isCompleted()) { LOG.info( "Waiting for job to complete. Sleeping for 500ms." + " Current state: " + dagStatus.getState()); Thread.sleep(500l); if (killDagWhileRunning && dagStatus.getState() == DAGStatus.State.RUNNING) { LOG.info("Killing running dag/session"); if (dagViaRPC) { tezSession.stop(); } else { dagClient.tryKillDAG(); } } dagStatus = dagClient.getDAGStatus(null); } if (!reuseSession) { tezSession.stop(); } return dagStatus.getState(); }
From source file:org.apache.tez.test.MiniTezCluster.java
License:Apache License
private void waitForAppsToFinish() { YarnClient yarnClient = YarnClient.createYarnClient(); yarnClient.init(getConfig());//from ww w .j av a2s. c o m yarnClient.start(); try { while (true) { List<ApplicationReport> appReports = yarnClient.getApplications(); Collection<ApplicationReport> unCompletedApps = Collections2.filter(appReports, new Predicate<ApplicationReport>() { @Override public boolean apply(ApplicationReport appReport) { return EnumSet .of(YarnApplicationState.NEW, YarnApplicationState.NEW_SAVING, YarnApplicationState.SUBMITTED, YarnApplicationState.ACCEPTED, YarnApplicationState.RUNNING) .contains(appReport.getYarnApplicationState()); } }); if (unCompletedApps.size() == 0) { break; } LOG.info("wait for applications to finish in MiniTezCluster"); Thread.sleep(1000); } } catch (Exception e) { e.printStackTrace(); } finally { yarnClient.stop(); } }
From source file:org.apache.tez.test.TestExceptionPropagation.java
License:Apache License
/** * verify the diagnostics in {@link DAGStatus} is correct in non-session mode, * and also verify that diagnostics from {@link DAGStatus} should match that * from {@link ApplicationReport}/* w w w. j a v a 2 s .c om*/ * * @throws Exception */ @Test(timeout = 120000) public void testExceptionPropagationNonSession() throws Exception { try { startMiniTezCluster(); startNonSessionClient(); ExceptionLocation exLocation = ExceptionLocation.EM_GetNumSourceTaskPhysicalOutputs; LOG.info("NonSession mode, Test for Exception from:" + exLocation.name()); DAG dag = createDAG(exLocation); DAGClient dagClient = tezClient.submitDAG(dag); DAGStatus dagStatus = dagClient.waitForCompletion(); String diagnostics = StringUtils.join(dagStatus.getDiagnostics(), ","); LOG.info("Diagnostics:" + diagnostics); assertTrue(diagnostics.contains(exLocation.name())); // wait for app complete (unregisterApplicationMaster is done) ApplicationId appId = tezClient.getAppMasterApplicationId(); YarnClient yarnClient = YarnClient.createYarnClient(); yarnClient.init(tezConf); yarnClient.start(); Set<YarnApplicationState> FINAL_APPLICATION_STATES = EnumSet.of(YarnApplicationState.KILLED, YarnApplicationState.FAILED, YarnApplicationState.FINISHED); ApplicationReport appReport = null; while (true) { appReport = yarnClient.getApplicationReport(appId); Thread.sleep(1000); LOG.info("FinalAppStatus:" + appReport.getFinalApplicationStatus()); LOG.info("Diagnostics from appReport:" + appReport.getDiagnostics()); if (FINAL_APPLICATION_STATES.contains(appReport.getYarnApplicationState())) { break; } } // wait for 1 second and call getApplicationReport again to ensure get the // diagnostics // TODO remove it after YARN-2560 Thread.sleep(1000); appReport = yarnClient.getApplicationReport(appId); LOG.info("FinalAppStatus:" + appReport.getFinalApplicationStatus()); LOG.info("Diagnostics from appReport:" + appReport.getDiagnostics()); assertTrue(appReport.getDiagnostics().contains(exLocation.name())); // use "\n" as separator, because we also use it in Tez internally when // assembling the application diagnostics. assertEquals(StringUtils.join(dagStatus.getDiagnostics(), "\n").trim(), appReport.getDiagnostics().trim()); } finally { stopNonSessionClient(); Thread.sleep(10 * 1000); stopTezMiniCluster(); } }
From source file:org.apache.tez.tests.MiniTezClusterWithTimeline.java
License:Apache License
private void waitForAppsToFinish() { YarnClient yarnClient = YarnClient.createYarnClient(); yarnClient.init(getConfig());//from w ww .j a v a 2 s . c om yarnClient.start(); try { while (true) { List<ApplicationReport> appReports = yarnClient.getApplications(); Collection<ApplicationReport> unCompletedApps = Collections2.filter(appReports, new Predicate<ApplicationReport>() { @Override public boolean apply(ApplicationReport appReport) { return EnumSet .of(YarnApplicationState.NEW, YarnApplicationState.NEW_SAVING, YarnApplicationState.SUBMITTED, YarnApplicationState.ACCEPTED, YarnApplicationState.RUNNING) .contains(appReport.getYarnApplicationState()); } }); if (unCompletedApps.size() == 0) { break; } LOG.info("wait for applications to finish in MiniTezClusterWithTimeline"); Thread.sleep(1000); } } catch (Exception e) { e.printStackTrace(); } finally { yarnClient.stop(); } }
From source file:org.apache.tez.tests.TestExternalTezServicesErrors.java
License:Apache License
private void testFatalError(String methodName, Vertex.VertexExecutionContext lhsExecutionContext, String dagNameSuffix, List<String> expectedDiagMessages) throws IOException, TezException, YarnException, InterruptedException { TezConfiguration tezClientConf = new TezConfiguration(extServiceTestHelper.getConfForJobs()); TezClient tezClient = TezClient//from w w w.ja v a2 s.c om .newBuilder(TestExternalTezServicesErrors.class.getSimpleName() + methodName + "_session", tezClientConf) .setIsSession(true).setServicePluginDescriptor(servicePluginsDescriptor).build(); ApplicationId appId = null; try { tezClient.start(); LOG.info("TezSessionStarted for " + methodName); tezClient.waitTillReady(); LOG.info("TezSession ready for submission for " + methodName); JoinValidateConfigured joinValidate = new JoinValidateConfigured(EXECUTION_CONTEXT_DEFAULT, lhsExecutionContext, EXECUTION_CONTEXT_EXT_SERVICE_PUSH, EXECUTION_CONTEXT_EXT_SERVICE_PUSH, dagNameSuffix); DAG dag = joinValidate.createDag(new TezConfiguration(extServiceTestHelper.getConfForJobs()), HASH_JOIN_EXPECTED_RESULT_PATH, HASH_JOIN_OUTPUT_PATH, 3); DAGClient dagClient = tezClient.submitDAG(dag); DAGStatus dagStatus = dagClient .waitForCompletionWithStatusUpdates(Sets.newHashSet(StatusGetOpts.GET_COUNTERS)); assertEquals(DAGStatus.State.ERROR, dagStatus.getState()); boolean foundDiag = false; for (String diag : dagStatus.getDiagnostics()) { foundDiag = checkDiag(diag, expectedDiagMessages); if (foundDiag) { break; } } appId = tezClient.getAppMasterApplicationId(); assertTrue(foundDiag); } catch (InterruptedException e) { e.printStackTrace(); } finally { tezClient.stop(); } // Verify the state of the application. if (appId != null) { YarnClient yarnClient = YarnClient.createYarnClient(); try { yarnClient.init(tezClientConf); yarnClient.start(); ApplicationReport appReport = yarnClient.getApplicationReport(appId); YarnApplicationState appState = appReport.getYarnApplicationState(); while (!EnumSet .of(YarnApplicationState.FINISHED, YarnApplicationState.FAILED, YarnApplicationState.KILLED) .contains(appState)) { Thread.sleep(200L); appReport = yarnClient.getApplicationReport(appId); appState = appReport.getYarnApplicationState(); } // TODO Workaround for YARN-4554. AppReport does not provide diagnostics - need to fetch them from ApplicationAttemptReport ApplicationAttemptId appAttemptId = appReport.getCurrentApplicationAttemptId(); ApplicationAttemptReport appAttemptReport = yarnClient.getApplicationAttemptReport(appAttemptId); String diag = appAttemptReport.getDiagnostics(); assertEquals(FinalApplicationStatus.FAILED, appReport.getFinalApplicationStatus()); assertEquals(YarnApplicationState.FINISHED, appReport.getYarnApplicationState()); checkDiag(diag, expectedDiagMessages); } finally { yarnClient.stop(); } } }
From source file:org.deeplearning4j.iterativereduce.runtime.yarn.client.Client.java
License:Apache License
/** * TODO: consider the scenarios where we dont get enough containers * - we need to re-submit the job till we get the containers alloc'd * // www .j a v a2s . c o m */ @Override public int run(String[] args) throws Exception { //System.out.println("IR: Client.run() [start]"); if (args.length < 1) LOG.info("No configuration file specified, using default (" + ConfigFields.DEFAULT_CONFIG_FILE + ")"); long startTime = System.currentTimeMillis(); String configFile = (args.length < 1) ? ConfigFields.DEFAULT_CONFIG_FILE : args[0]; Properties props = new Properties(); Configuration conf = getConf(); try { FileInputStream fis = new FileInputStream(configFile); props.load(fis); } catch (FileNotFoundException ex) { throw ex; // TODO: be nice } catch (IOException ex) { throw ex; // TODO: be nice } // Make sure we have some bare minimums ConfigFields.validateConfig(props); if (LOG.isDebugEnabled()) { LOG.debug("Loaded configuration: "); for (Map.Entry<Object, Object> entry : props.entrySet()) { LOG.debug(entry.getKey() + "=" + entry.getValue()); } } // TODO: make sure input file(s), libs, etc. actually exist! // Ensure our input path exists Path p = new Path(props.getProperty(ConfigFields.APP_INPUT_PATH)); FileSystem fs = FileSystem.get(conf); if (!fs.exists(p)) throw new FileNotFoundException("Input path not found: " + p.toString() + " (in " + fs.getUri() + ")"); LOG.info("Using input path: " + p.toString()); // Connect ResourceManagerHandler rmHandler = new ResourceManagerHandler(conf, null); rmHandler.getClientResourceManager(); // Create an Application request/ID ApplicationId appId = rmHandler.getApplicationId(); // Our AppId String appName = props.getProperty(ConfigFields.APP_NAME, ConfigFields.DEFAULT_APP_NAME).replace(' ', '_'); LOG.info("Got an application, id=" + appId + ", appName=" + appName); // Copy resources to [HD]FS LOG.debug("Copying resources to filesystem"); Utils.copyLocalResourcesToFs(props, conf, appId, appName); // Local resources Utils.copyLocalResourceToFs(configFile, ConfigFields.APP_CONFIG_FILE, conf, appId, appName); // Config file try { Utils.copyLocalResourceToFs("log4j.properties", "log4j.properties", conf, appId, appName); // Log4j } catch (FileNotFoundException ex) { LOG.warn("log4j.properties file not found"); } // Create our context List<String> commands = Utils.getMasterCommand(conf, props); Map<String, LocalResource> localResources = Utils.getLocalResourcesForApplication(conf, appId, appName, props, LocalResourceVisibility.APPLICATION); // Submit app rmHandler.submitApplication(appId, appName, Utils.getEnvironment(conf, props), localResources, commands, Integer.parseInt(props.getProperty(ConfigFields.YARN_MEMORY, "512"))); /* * TODO: * - look at updating this code region to make sure job is submitted! * */ StopWatch watch = new StopWatch(); watch.start(); // Wait for app to complete while (true) { Thread.sleep(2000); ApplicationReport report = rmHandler.getApplicationReport(appId); LOG.info("IterativeReduce report: " + " appId=" + appId.getId() + ", state: " + report.getYarnApplicationState().toString() + ", Running Time: " + watch.toString()); //report.getDiagnostics() if (YarnApplicationState.FINISHED == report.getYarnApplicationState()) { LOG.info("Application finished in " + (System.currentTimeMillis() - startTime) + "ms"); if (FinalApplicationStatus.SUCCEEDED == report.getFinalApplicationStatus()) { LOG.info("Application completed succesfully."); return 0; } else { LOG.info("Application completed with en error: " + report.getDiagnostics()); return -1; } } else if (YarnApplicationState.FAILED == report.getYarnApplicationState() || YarnApplicationState.KILLED == report.getYarnApplicationState()) { LOG.info("Application completed with a failed or killed state: " + report.getDiagnostics()); return -1; } } }
From source file:org.dknight.app.UnmanagedAMLauncher.java
License:Apache License
public boolean run() throws IOException, YarnException { LOG.info("Starting Client"); // Connect to ResourceManager rmClient.start();/*from w w w.j a v a 2s . c o m*/ try { // Create launch context for app master LOG.info("Setting up application submission context for ASM"); ApplicationSubmissionContext appContext = rmClient.createApplication() .getApplicationSubmissionContext(); ApplicationId appId = appContext.getApplicationId(); // set the application name appContext.setApplicationName(appName); // Set the priority for the application master Priority pri = Records.newRecord(Priority.class); pri.setPriority(amPriority); appContext.setPriority(pri); // Set the queue to which this application is to be submitted in the RM appContext.setQueue(amQueue); // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); appContext.setAMContainerSpec(amContainer); // unmanaged AM appContext.setUnmanagedAM(true); LOG.info("Setting unmanaged AM"); // Submit the application to the applications manager LOG.info("Submitting application to ASM"); rmClient.submitApplication(appContext); // Monitor the application to wait for launch state ApplicationReport appReport = monitorApplication(appId, EnumSet.of(YarnApplicationState.ACCEPTED)); ApplicationAttemptId attemptId = appReport.getCurrentApplicationAttemptId(); LOG.info("Launching application with id: " + attemptId); // launch AM launchAM(attemptId); // Monitor the application for end state appReport = monitorApplication(appId, EnumSet.of(YarnApplicationState.KILLED, YarnApplicationState.FAILED, YarnApplicationState.FINISHED)); YarnApplicationState appState = appReport.getYarnApplicationState(); FinalApplicationStatus appStatus = appReport.getFinalApplicationStatus(); LOG.info("App ended with state: " + appReport.getYarnApplicationState() + " and status: " + appStatus); boolean success; if (YarnApplicationState.FINISHED == appState && FinalApplicationStatus.SUCCEEDED == appStatus) { LOG.info("Application has completed successfully."); success = true; } else { LOG.info("Application did finished unsuccessfully." + " YarnState=" + appState.toString() + ", FinalStatus=" + appStatus.toString()); success = false; } return success; } finally { rmClient.stop(); } }