List of usage examples for org.apache.hadoop.yarn.api.records FinalApplicationStatus UNDEFINED
FinalApplicationStatus UNDEFINED
To view the source code for org.apache.hadoop.yarn.api.records FinalApplicationStatus UNDEFINED.
Click Source Link
From source file:com.datatorrent.stram.cli.ApexCli.java
License:Apache License
private JSONObject getResource(StramAgent.StramUriSpec uriSpec, ApplicationReport appReport, WebServicesClient.WebServicesHandler handler) { if (appReport == null) { throw new CliException("No application selected"); }//from w ww .ja va2 s .c o m if (StringUtils.isEmpty(appReport.getTrackingUrl()) || appReport.getFinalApplicationStatus() != FinalApplicationStatus.UNDEFINED) { appReport = null; throw new CliException("Application terminated"); } WebServicesClient wsClient = new WebServicesClient(); try { return stramAgent.issueStramWebRequest(wsClient, appReport.getApplicationId().toString(), uriSpec, handler); } catch (Exception e) { // check the application status as above may have failed due application termination etc. if (appReport == currentApp) { currentApp = assertRunningApp(appReport); } throw new CliException( "Failed to request web service for appid " + appReport.getApplicationId().toString(), e); } }
From source file:com.datatorrent.stram.cli.ApexCliShutdownCommandTest.java
License:Apache License
private ApplicationReport mockRunningApplicationReport(String appId, String appName) { ApplicationReport app = mock(ApplicationReport.class); ApplicationId applicationId = mock(ApplicationId.class); when(applicationId.toString()).thenReturn(appId); when(app.getApplicationId()).thenReturn(applicationId); when(app.getName()).thenReturn(appName); when(app.getYarnApplicationState()).thenReturn(YarnApplicationState.RUNNING); when(app.getFinalApplicationStatus()).thenReturn(FinalApplicationStatus.UNDEFINED); when(app.getTrackingUrl()).thenReturn("http://example.com"); return app;//from w ww .j av a2 s. co m }
From source file:io.amient.yarn1.YarnClient.java
License:Open Source License
/** * This method should be called by the implementing application static main * method. It does all the work around creating a yarn application and * submitting the request to the yarn resource manager. The class given in * the appClass argument will be run inside the yarn-allocated master * container./*w ww.ja v a 2 s . c o m*/ */ public static void submitApplicationMaster(Properties appConfig, Class<? extends YarnMaster> masterClass, String[] args, Boolean awaitCompletion) throws Exception { log.info("Yarn1 App Configuration:"); for (Object param : appConfig.keySet()) { log.info(param.toString() + " = " + appConfig.get(param).toString()); } String yarnConfigPath = appConfig.getProperty("yarn1.site", "/etc/hadoop"); String masterClassName = masterClass.getName(); appConfig.setProperty("yarn1.master.class", masterClassName); String applicationName = appConfig.getProperty("yarn1.application.name", masterClassName); log.info("--------------------------------------------------------------"); if (Boolean.valueOf(appConfig.getProperty("yarn1.local.mode", "false"))) { YarnMaster.run(appConfig, args); return; } int masterPriority = Integer.valueOf( appConfig.getProperty("yarn1.master.priority", String.valueOf(YarnMaster.DEFAULT_MASTER_PRIORITY))); int masterMemoryMb = Integer.valueOf(appConfig.getProperty("yarn1.master.memory.mb", String.valueOf(YarnMaster.DEFAULT_MASTER_MEMORY_MB))); int masterNumCores = Integer.valueOf( appConfig.getProperty("yarn1.master.num.cores", String.valueOf(YarnMaster.DEFAULT_MASTER_CORES))); String queue = appConfig.getProperty("yarn1.queue"); Configuration yarnConfig = new YarnConfiguration(); yarnConfig.addResource(new FileInputStream(yarnConfigPath + "/core-site.xml")); yarnConfig.addResource(new FileInputStream(yarnConfigPath + "/hdfs-site.xml")); yarnConfig.addResource(new FileInputStream(yarnConfigPath + "/yarn-site.xml")); for (Map.Entry<Object, Object> entry : appConfig.entrySet()) { yarnConfig.set(entry.getKey().toString(), entry.getValue().toString()); } final org.apache.hadoop.yarn.client.api.YarnClient yarnClient = org.apache.hadoop.yarn.client.api.YarnClient .createYarnClient(); yarnClient.init(yarnConfig); yarnClient.start(); for (NodeReport report : yarnClient.getNodeReports(NodeState.RUNNING)) { log.debug("Node report:" + report.getNodeId() + " @ " + report.getHttpAddress() + " | " + report.getCapability()); } log.info("Submitting application master class " + masterClassName); YarnClientApplication app = yarnClient.createApplication(); GetNewApplicationResponse appResponse = app.getNewApplicationResponse(); final ApplicationId appId = appResponse.getApplicationId(); if (appId == null) { System.exit(111); } else { appConfig.setProperty("am.timestamp", String.valueOf(appId.getClusterTimestamp())); appConfig.setProperty("am.id", String.valueOf(appId.getId())); } YarnClient.distributeResources(yarnConfig, appConfig, applicationName); String masterJvmArgs = appConfig.getProperty("yarn1.master.jvm.args", ""); YarnContainerContext masterContainer = new YarnContainerContext(yarnConfig, appConfig, masterJvmArgs, masterPriority, masterMemoryMb, masterNumCores, applicationName, YarnMaster.class, args); ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); appContext.setApplicationName(masterClassName); appContext.setResource(masterContainer.capability); appContext.setPriority(masterContainer.priority); appContext.setQueue(queue); appContext.setApplicationType(appConfig.getProperty("yarn1.application.type", "YARN")); appContext.setAMContainerSpec(masterContainer.createContainerLaunchContext()); log.info("Master container spec: " + masterContainer.capability); yarnClient.submitApplication(appContext); ApplicationReport report = yarnClient.getApplicationReport(appId); log.info("Tracking URL: " + report.getTrackingUrl()); if (awaitCompletion) { Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { if (!yarnClient.isInState(Service.STATE.STOPPED)) { log.info("Killing yarn application in shutdown hook"); try { yarnClient.killApplication(appId); } catch (Throwable e) { log.error("Failed to kill yarn application - please check YARN Resource Manager", e); } } } }); float lastProgress = -0.0f; while (true) { try { Thread.sleep(10000); report = yarnClient.getApplicationReport(appId); if (lastProgress != report.getProgress()) { lastProgress = report.getProgress(); log.info(report.getApplicationId() + " " + (report.getProgress() * 100.00) + "% " + (System.currentTimeMillis() - report.getStartTime()) + "(ms) " + report.getDiagnostics()); } if (!report.getFinalApplicationStatus().equals(FinalApplicationStatus.UNDEFINED)) { log.info(report.getApplicationId() + " " + report.getFinalApplicationStatus()); log.info("Tracking url: " + report.getTrackingUrl()); log.info("Finish time: " + ((System.currentTimeMillis() - report.getStartTime()) / 1000) + "(s)"); break; } } catch (Throwable e) { log.error("Master Heart Beat Error - terminating", e); yarnClient.killApplication(appId); Thread.sleep(2000); } } yarnClient.stop(); if (!report.getFinalApplicationStatus().equals(FinalApplicationStatus.SUCCEEDED)) { System.exit(112); } } yarnClient.stop(); }
From source file:io.hops.hopsworks.common.jobs.flink.AbstractYarnClusterDescriptor.java
License:Apache License
@Override public YarnClusterClient retrieve(String applicationID) { try {// w ww . j a v a2 s. c om // check if required Hadoop environment variables are set. If not, warn user if (System.getenv("HADOOP_CONF_DIR") == null && System.getenv("YARN_CONF_DIR") == null) { LOG.warn("Neither the HADOOP_CONF_DIR nor the YARN_CONF_DIR environment variable is set." + "The Flink YARN Client needs one of these to be set to properly load the Hadoop " + "configuration for accessing YARN."); } final ApplicationId yarnAppId = ConverterUtils.toApplicationId(applicationID); final YarnClient yarnClient = getYarnClient(); final ApplicationReport appReport = yarnClient.getApplicationReport(yarnAppId); if (appReport.getFinalApplicationStatus() != FinalApplicationStatus.UNDEFINED) { // Flink cluster is not running anymore LOG.error( "The application {} doesn't run anymore. It has previously completed with final status: {}", applicationID, appReport.getFinalApplicationStatus()); throw new RuntimeException("The Yarn application " + applicationID + " doesn't run anymore."); } LOG.info("Found application JobManager host name '{}' and port '{}' from supplied application id '{}'", appReport.getHost(), appReport.getRpcPort(), applicationID); flinkConfiguration.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, appReport.getHost()); flinkConfiguration.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, appReport.getRpcPort()); return createYarnClusterClient(this, yarnClient, appReport, flinkConfiguration, sessionFilesDir, false); } catch (Exception e) { throw new RuntimeException("Couldn't retrieve Yarn cluster", e); } }
From source file:org.apache.flink.yarn.YarnFlinkResourceManager.java
License:Apache License
/** * Converts a Flink application status enum to a YARN application status enum. * @param status The Flink application status. * @return The corresponding YARN application status. *///from w ww.j a v a 2 s . co m private FinalApplicationStatus getYarnStatus(ApplicationStatus status) { if (status == null) { return FinalApplicationStatus.UNDEFINED; } else { switch (status) { case SUCCEEDED: return FinalApplicationStatus.SUCCEEDED; case FAILED: return FinalApplicationStatus.FAILED; case CANCELED: return FinalApplicationStatus.KILLED; default: return FinalApplicationStatus.UNDEFINED; } } }
From source file:org.apache.oozie.action.hadoop.TestLauncherMain.java
License:Apache License
@Test public void testKillChildYarnJobs() throws Exception { YarnClient yc = Mockito.mock(YarnClient.class); ApplicationReport ar = Mockito.mock(ApplicationReport.class); Mockito.when(yc.getApplicationReport(Mockito.any(ApplicationId.class))).thenReturn(ar); Mockito.when(ar.getFinalApplicationStatus()).thenReturn(FinalApplicationStatus.UNDEFINED) .thenReturn(FinalApplicationStatus.FAILED).thenReturn(FinalApplicationStatus.KILLED); ApplicationId appz[] = { ApplicationId.newInstance(System.currentTimeMillis(), 1), ApplicationId.newInstance(System.currentTimeMillis(), 2), ApplicationId.newInstance(System.currentTimeMillis(), 3) }; Collection<ApplicationId> result = LauncherMain.checkAndKillChildYarnJobs(yc, null, Arrays.asList(appz)); assertEquals(1, result.size());/*from w w w. j ava2 s . co m*/ assertEquals(appz[0].getId(), result.iterator().next().getId()); }
From source file:org.apache.tajo.master.rm.RMContainerAllocator.java
License:Apache License
public void stop() { stopped.set(true);/*from ww w.j a va 2 s. c om*/ super.stop(); FinalApplicationStatus finishState = FinalApplicationStatus.UNDEFINED; QueryState state = context.getQuery().getState(); if (state == QueryState.QUERY_SUCCEEDED) { finishState = FinalApplicationStatus.SUCCEEDED; } else if (state == QueryState.QUERY_KILLED || (state == QueryState.QUERY_RUNNING)) { finishState = FinalApplicationStatus.KILLED; } else if (state == QueryState.QUERY_FAILED || state == QueryState.QUERY_ERROR) { finishState = FinalApplicationStatus.FAILED; } try { unregisterApplicationMaster(finishState, "", "http://localhost:1234"); } catch (YarnRemoteException e) { LOG.error(e); } }
From source file:org.apache.tajo.master.rm.YarnRMContainerAllocator.java
License:Apache License
public void stop() { if (stopped.get()) { return;//from ww w .j ava 2s .c o m } LOG.info("un-registering ApplicationMaster(QueryMaster):" + appAttemptId); stopped.set(true); try { FinalApplicationStatus status = FinalApplicationStatus.UNDEFINED; Query query = context.getQuery(); if (query != null) { TajoProtos.QueryState state = query.getState(); if (state == TajoProtos.QueryState.QUERY_SUCCEEDED) { status = FinalApplicationStatus.SUCCEEDED; } else if (state == TajoProtos.QueryState.QUERY_FAILED || state == TajoProtos.QueryState.QUERY_ERROR) { status = FinalApplicationStatus.FAILED; } else if (state == TajoProtos.QueryState.QUERY_ERROR) { status = FinalApplicationStatus.FAILED; } } unregisterApplicationMaster(status, "tajo query finished", null); } catch (Exception e) { LOG.error(e.getMessage(), e); } allocatorThread.interrupt(); LOG.info("un-registered ApplicationMAster(QueryMaster) stopped:" + appAttemptId); super.stop(); }
From source file:org.apache.tajo.master.rm.YarnTajoResourceManager.java
License:Apache License
@Override public void stopQueryMaster(QueryId queryId) { try {// w ww . j av a 2s . c om FinalApplicationStatus appStatus = FinalApplicationStatus.UNDEFINED; QueryInProgress queryInProgress = masterContext.getQueryJobManager().getQueryInProgress(queryId); if (queryInProgress == null) { return; } TajoProtos.QueryState state = queryInProgress.getQueryInfo().getQueryState(); if (state == TajoProtos.QueryState.QUERY_SUCCEEDED) { appStatus = FinalApplicationStatus.SUCCEEDED; } else if (state == TajoProtos.QueryState.QUERY_FAILED || state == TajoProtos.QueryState.QUERY_ERROR) { appStatus = FinalApplicationStatus.FAILED; } else if (state == TajoProtos.QueryState.QUERY_ERROR) { appStatus = FinalApplicationStatus.FAILED; } FinishApplicationMasterRequest request = recordFactory .newRecordInstance(FinishApplicationMasterRequest.class); request.setFinalApplicationStatus(appStatus); request.setDiagnostics("QueryMaster shutdown by TajoMaster."); rmClient.finishApplicationMaster(request); } catch (Exception e) { LOG.error(e.getMessage(), e); } }
From source file:org.apache.tez.client.LocalClient.java
License:Apache License
protected FinalApplicationStatus convertDAGAppMasterStateToFinalYARNState(DAGAppMasterState dagAppMasterState) { switch (dagAppMasterState) { case NEW:// ww w .ja va2 s . co m case INITED: case RECOVERING: case IDLE: case RUNNING: return FinalApplicationStatus.UNDEFINED; case SUCCEEDED: return FinalApplicationStatus.SUCCEEDED; case FAILED: return FinalApplicationStatus.FAILED; case KILLED: return FinalApplicationStatus.KILLED; case ERROR: return FinalApplicationStatus.FAILED; default: return FinalApplicationStatus.UNDEFINED; } }