List of usage examples for org.apache.hadoop.yarn.api.records Resource getVirtualCores
@Public @Evolving public abstract int getVirtualCores();
From source file:org.apache.sysml.yarn.ropt.YarnClusterAnalyzer.java
License:Apache License
public static double getClusterUtilization() throws IOException { double util = 0; try {//from w ww.java 2 s . co m if (_client == null) _client = createYarnClient(); List<NodeReport> nodesReport = _client.getNodeReports(); double maxMem = 0; double currMem = 0; long maxCores = 0; long currCores = 0; for (NodeReport node : nodesReport) { Resource max = node.getCapability(); Resource used = node.getUsed(); maxMem += max.getMemory(); currMem += used.getMemory(); maxCores += max.getVirtualCores(); currCores += used.getVirtualCores(); } util = Math.max(Math.min(1, currMem / maxMem), //memory util Math.min(1, (double) currCores / maxCores)); //vcore util } catch (Exception ex) { throw new IOException(ex); } return util; }
From source file:org.apache.sysml.yarn.ropt.YarnClusterAnalyzer.java
License:Apache License
/** * Analyzes properties of Yarn cluster and Hadoop configurations. * /*from www . j a v a 2 s. c o m*/ * @param yarnClient hadoop yarn client * @param conf hadoop yarn configuration * @param verbose output info to standard output */ public static void analyzeYarnCluster(YarnClient yarnClient, YarnConfiguration conf, boolean verbose) { try { List<NodeReport> nodesReport = yarnClient.getNodeReports(); if (verbose) System.out.println("There are " + nodesReport.size() + " nodes in the cluster"); if (nodesReport.isEmpty()) throw new YarnException("There are zero available nodes in the yarn cluster"); nodesMaxPhySorted = new ArrayList<>(nodesReport.size()); clusterTotalMem = 0; clusterTotalCores = 0; clusterTotalNodes = 0; minimumMRContainerPhyMB = -1; for (NodeReport node : nodesReport) { Resource resource = node.getCapability(); Resource used = node.getUsed(); if (used == null) used = Resource.newInstance(0, 0); int mb = resource.getMemory(); int cores = resource.getVirtualCores(); if (mb <= 0) throw new YarnException("A node has non-positive memory " + mb); int myMinMRPhyMB = mb / cores / CPU_HYPER_FACTOR; if (minimumMRContainerPhyMB < myMinMRPhyMB) minimumMRContainerPhyMB = myMinMRPhyMB; // minimumMRContainerPhyMB needs to be the largest among the mins clusterTotalMem += (long) mb * 1024 * 1024; nodesMaxPhySorted.add((long) mb * 1024 * 1024); clusterTotalCores += cores; clusterTotalNodes++; if (verbose) System.out.println("\t" + node.getNodeId() + " has " + mb + " MB (" + used.getMemory() + " MB used) memory and " + resource.getVirtualCores() + " (" + used.getVirtualCores() + " used) cores"); } Collections.sort(nodesMaxPhySorted, Collections.reverseOrder()); nodesMaxBudgetSorted = new ArrayList<>(nodesMaxPhySorted.size()); for (int i = 0; i < nodesMaxPhySorted.size(); i++) nodesMaxBudgetSorted.add(ResourceOptimizer.phyToBudget(nodesMaxPhySorted.get(i))); _remotePar = nodesReport.size(); if (_remotePar == 0) throw new YarnException("There are no available nodes in the yarn cluster"); // Now get the default cluster settings _remoteMRSortMem = (1024 * 1024) * conf.getLong(MRConfigurationNames.MR_TASK_IO_SORT_MB, 100); //100MB //handle jvm max mem (map mem budget is relevant for map-side distcache and parfor) //(for robustness we probe both: child and map configuration parameters) String javaOpts1 = conf.get(MRConfigurationNames.MR_CHILD_JAVA_OPTS); //internally mapred/mapreduce synonym String javaOpts2 = conf.get(MRConfigurationNames.MR_MAP_JAVA_OPTS, null); //internally mapred/mapreduce synonym String javaOpts3 = conf.get(MRConfigurationNames.MR_REDUCE_JAVA_OPTS, null); //internally mapred/mapreduce synonym if (javaOpts2 != null) //specific value overrides generic _remoteJVMMaxMemMap = extractMaxMemoryOpt(javaOpts2); else _remoteJVMMaxMemMap = extractMaxMemoryOpt(javaOpts1); if (javaOpts3 != null) //specific value overrides generic _remoteJVMMaxMemReduce = extractMaxMemoryOpt(javaOpts3); else _remoteJVMMaxMemReduce = extractMaxMemoryOpt(javaOpts1); //HDFS blocksize String blocksize = conf.get(MRConfigurationNames.DFS_BLOCKSIZE, "134217728"); _blocksize = Long.parseLong(blocksize); minimalPhyAllocate = (long) 1024 * 1024 * conf.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB); maximumPhyAllocate = (long) 1024 * 1024 * conf.getInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB); mrAMPhy = (long) conf.getInt(MRConfigurationNames.YARN_APP_MR_AM_RESOURCE_MB, 1536) * 1024 * 1024; } catch (Exception e) { throw new RuntimeException("Unable to analyze yarn cluster ", e); } /* * This is for AppMaster to query available resource in the cluster during heartbeat * AMRMClient<ContainerRequest> rmClient = AMRMClient.createAMRMClient(); rmClient.init(conf); rmClient.start(); AllocateResponse response = rmClient.allocate(0); int nodeCount = response.getNumClusterNodes(); Resource resource = response.getAvailableResources(); List<NodeReport> nodeUpdate = response.getUpdatedNodes(); LOG.info("This is a " + nodeCount + " node cluster with totally " + resource.getMemory() + " memory and " + resource.getVirtualCores() + " cores"); LOG.info(nodereport.size() + " updatedNode reports received"); for (NodeReport node : nodeUpdate) { resource = node.getCapability(); LOG.info(node.getNodeId() + " updated with " + resource.getMemory() + " memory and " + resource.getVirtualCores() + " cores"); }*/ }
From source file:org.apache.tez.dag.api.DAG.java
License:Apache License
@Private public synchronized DAGPlan createDag(Configuration tezConf, Credentials extraCredentials, Map<String, LocalResource> tezJarResources, LocalResource binaryConfig, boolean tezLrsAsArchive, Map<String, String> additionalConfigs) { verify(true);/*w w w . ja v a 2 s.c om*/ DAGPlan.Builder dagBuilder = DAGPlan.newBuilder(); dagBuilder.setName(this.name); if (this.dagInfo != null && !this.dagInfo.isEmpty()) { dagBuilder.setDagInfo(this.dagInfo); } if (!vertexGroups.isEmpty()) { for (VertexGroup av : vertexGroups) { GroupInfo groupInfo = av.getGroupInfo(); PlanVertexGroupInfo.Builder groupBuilder = PlanVertexGroupInfo.newBuilder(); groupBuilder.setGroupName(groupInfo.getGroupName()); for (Vertex v : groupInfo.getMembers()) { groupBuilder.addGroupMembers(v.getName()); } groupBuilder.addAllOutputs(groupInfo.outputs); for (Map.Entry<String, InputDescriptor> entry : groupInfo.edgeMergedInputs.entrySet()) { groupBuilder.addEdgeMergedInputs( PlanGroupInputEdgeInfo.newBuilder().setDestVertexName(entry.getKey()) .setMergedInput(DagTypeConverters.convertToDAGPlan(entry.getValue()))); } dagBuilder.addVertexGroups(groupBuilder); } } Credentials dagCredentials = new Credentials(); if (extraCredentials != null) { dagCredentials.mergeAll(extraCredentials); } dagCredentials.mergeAll(credentials); if (!commonTaskLocalFiles.isEmpty()) { dagBuilder.addAllLocalResource(DagTypeConverters.convertToDAGPlan(commonTaskLocalFiles)); } Preconditions.checkArgument(topologicalVertexStack.size() == vertices.size(), "size of topologicalVertexStack is:" + topologicalVertexStack.size() + " while size of vertices is:" + vertices.size() + ", make sure they are the same in order to sort the vertices"); while (!topologicalVertexStack.isEmpty()) { Vertex vertex = vertices.get(topologicalVertexStack.pop()); // infer credentials, resources and parallelism from data source Resource vertexTaskResource = vertex.getTaskResource(); if (vertexTaskResource == null) { vertexTaskResource = Resource.newInstance( tezConf.getInt(TezConfiguration.TEZ_TASK_RESOURCE_MEMORY_MB, TezConfiguration.TEZ_TASK_RESOURCE_MEMORY_MB_DEFAULT), tezConf.getInt(TezConfiguration.TEZ_TASK_RESOURCE_CPU_VCORES, TezConfiguration.TEZ_TASK_RESOURCE_CPU_VCORES_DEFAULT)); } Map<String, LocalResource> vertexLRs = Maps.newHashMap(); vertexLRs.putAll(vertex.getTaskLocalFiles()); List<DataSourceDescriptor> dataSources = vertex.getDataSources(); for (DataSourceDescriptor dataSource : dataSources) { if (dataSource.getCredentials() != null) { dagCredentials.addAll(dataSource.getCredentials()); } if (dataSource.getAdditionalLocalFiles() != null) { TezCommonUtils.addAdditionalLocalResources(dataSource.getAdditionalLocalFiles(), vertexLRs, "Vertex " + vertex.getName()); } } if (tezJarResources != null) { TezCommonUtils.addAdditionalLocalResources(tezJarResources, vertexLRs, "Vertex " + vertex.getName()); } if (binaryConfig != null) { vertexLRs.put(TezConstants.TEZ_PB_BINARY_CONF_NAME, binaryConfig); } int vertexParallelism = vertex.getParallelism(); VertexLocationHint vertexLocationHint = vertex.getLocationHint(); if (dataSources.size() == 1) { DataSourceDescriptor dataSource = dataSources.get(0); if (vertexParallelism == -1 && dataSource.getNumberOfShards() > -1) { vertexParallelism = dataSource.getNumberOfShards(); } if (vertexLocationHint == null && dataSource.getLocationHint() != null) { vertexLocationHint = dataSource.getLocationHint(); } } if (vertexParallelism == -1) { Preconditions.checkState(vertexLocationHint == null, "Cannot specify vertex location hint without specifying vertex parallelism. Vertex: " + vertex.getName()); } else if (vertexLocationHint != null) { Preconditions.checkState(vertexParallelism == vertexLocationHint.getTaskLocationHints().size(), "vertex task location hint must equal vertex parallelism. Vertex: " + vertex.getName()); } for (DataSinkDescriptor dataSink : vertex.getDataSinks()) { if (dataSink.getCredentials() != null) { dagCredentials.addAll(dataSink.getCredentials()); } } VertexPlan.Builder vertexBuilder = VertexPlan.newBuilder(); vertexBuilder.setName(vertex.getName()); vertexBuilder.setType(PlanVertexType.NORMAL); // vertex type is implicitly NORMAL until TEZ-46. vertexBuilder .setProcessorDescriptor(DagTypeConverters.convertToDAGPlan(vertex.getProcessorDescriptor())); if (vertex.getInputs().size() > 0) { for (RootInputLeafOutput<InputDescriptor, InputInitializerDescriptor> input : vertex.getInputs()) { vertexBuilder.addInputs(DagTypeConverters.convertToDAGPlan(input)); } } if (vertex.getOutputs().size() > 0) { for (RootInputLeafOutput<OutputDescriptor, OutputCommitterDescriptor> output : vertex .getOutputs()) { vertexBuilder.addOutputs(DagTypeConverters.convertToDAGPlan(output)); } } if (vertex.getConf() != null && vertex.getConf().size() > 0) { ConfigurationProto.Builder confBuilder = ConfigurationProto.newBuilder(); for (Map.Entry<String, String> entry : vertex.getConf().entrySet()) { PlanKeyValuePair.Builder keyValueBuilder = PlanKeyValuePair.newBuilder(); keyValueBuilder.setKey(entry.getKey()); keyValueBuilder.setValue(entry.getValue()); confBuilder.addConfKeyValues(keyValueBuilder); } vertexBuilder.setVertexConf(confBuilder); } //task config PlanTaskConfiguration.Builder taskConfigBuilder = PlanTaskConfiguration.newBuilder(); taskConfigBuilder.setNumTasks(vertexParallelism); taskConfigBuilder.setMemoryMb(vertexTaskResource.getMemory()); taskConfigBuilder.setVirtualCores(vertexTaskResource.getVirtualCores()); taskConfigBuilder.setJavaOpts( TezClientUtils.addDefaultsToTaskLaunchCmdOpts(vertex.getTaskLaunchCmdOpts(), tezConf)); taskConfigBuilder.setTaskModule(vertex.getName()); if (!vertexLRs.isEmpty()) { taskConfigBuilder.addAllLocalResource(DagTypeConverters.convertToDAGPlan(vertexLRs)); } Map<String, String> taskEnv = Maps.newHashMap(vertex.getTaskEnvironment()); TezYARNUtils.setupDefaultEnv(taskEnv, tezConf, TezConfiguration.TEZ_TASK_LAUNCH_ENV, TezConfiguration.TEZ_TASK_LAUNCH_ENV_DEFAULT, tezLrsAsArchive); for (Map.Entry<String, String> entry : taskEnv.entrySet()) { PlanKeyValuePair.Builder envSettingBuilder = PlanKeyValuePair.newBuilder(); envSettingBuilder.setKey(entry.getKey()); envSettingBuilder.setValue(entry.getValue()); taskConfigBuilder.addEnvironmentSetting(envSettingBuilder); } if (vertexLocationHint != null) { if (vertexLocationHint.getTaskLocationHints() != null) { for (TaskLocationHint hint : vertexLocationHint.getTaskLocationHints()) { PlanTaskLocationHint.Builder taskLocationHintBuilder = PlanTaskLocationHint.newBuilder(); // we can allow this later on if needed if (hint.getAffinitizedTask() != null) { throw new TezUncheckedException( "Task based affinity may not be specified via the DAG API"); } if (hint.getHosts() != null) { taskLocationHintBuilder.addAllHost(hint.getHosts()); } if (hint.getRacks() != null) { taskLocationHintBuilder.addAllRack(hint.getRacks()); } vertexBuilder.addTaskLocationHint(taskLocationHintBuilder); } } } if (vertex.getVertexManagerPlugin() != null) { vertexBuilder.setVertexManagerPlugin( DagTypeConverters.convertToDAGPlan(vertex.getVertexManagerPlugin())); } for (Edge inEdge : vertex.getInputEdges()) { vertexBuilder.addInEdgeId(inEdge.getId()); } for (Edge outEdge : vertex.getOutputEdges()) { vertexBuilder.addOutEdgeId(outEdge.getId()); } vertexBuilder.setTaskConfig(taskConfigBuilder); dagBuilder.addVertex(vertexBuilder); } for (Edge edge : edges) { EdgePlan.Builder edgeBuilder = EdgePlan.newBuilder(); edgeBuilder.setId(edge.getId()); edgeBuilder.setInputVertexName(edge.getInputVertex().getName()); edgeBuilder.setOutputVertexName(edge.getOutputVertex().getName()); edgeBuilder.setDataMovementType( DagTypeConverters.convertToDAGPlan(edge.getEdgeProperty().getDataMovementType())); edgeBuilder.setDataSourceType( DagTypeConverters.convertToDAGPlan(edge.getEdgeProperty().getDataSourceType())); edgeBuilder.setSchedulingType( DagTypeConverters.convertToDAGPlan(edge.getEdgeProperty().getSchedulingType())); edgeBuilder.setEdgeSource(DagTypeConverters.convertToDAGPlan(edge.getEdgeProperty().getEdgeSource())); edgeBuilder.setEdgeDestination( DagTypeConverters.convertToDAGPlan(edge.getEdgeProperty().getEdgeDestination())); if (edge.getEdgeProperty().getDataMovementType() == DataMovementType.CUSTOM) { if (edge.getEdgeProperty().getEdgeManagerDescriptor() != null) { edgeBuilder.setEdgeManager( DagTypeConverters.convertToDAGPlan(edge.getEdgeProperty().getEdgeManagerDescriptor())); } // else the AM will deal with this. } dagBuilder.addEdge(edgeBuilder); } ConfigurationProto.Builder confProtoBuilder = ConfigurationProto.newBuilder(); if (dagAccessControls != null) { Configuration aclConf = new Configuration(false); dagAccessControls.serializeToConfiguration(aclConf); Iterator<Entry<String, String>> aclConfIter = aclConf.iterator(); while (aclConfIter.hasNext()) { Entry<String, String> entry = aclConfIter.next(); PlanKeyValuePair.Builder kvp = PlanKeyValuePair.newBuilder(); kvp.setKey(entry.getKey()); kvp.setValue(entry.getValue()); TezConfiguration.validateProperty(entry.getKey(), Scope.DAG); confProtoBuilder.addConfKeyValues(kvp); } } if (additionalConfigs != null && !additionalConfigs.isEmpty()) { for (Entry<String, String> entry : additionalConfigs.entrySet()) { PlanKeyValuePair.Builder kvp = PlanKeyValuePair.newBuilder(); kvp.setKey(entry.getKey()); kvp.setValue(entry.getValue()); TezConfiguration.validateProperty(entry.getKey(), Scope.DAG); confProtoBuilder.addConfKeyValues(kvp); } } if (this.dagConf != null && !this.dagConf.isEmpty()) { for (Entry<String, String> entry : this.dagConf.entrySet()) { PlanKeyValuePair.Builder kvp = PlanKeyValuePair.newBuilder(); kvp.setKey(entry.getKey()); kvp.setValue(entry.getValue()); confProtoBuilder.addConfKeyValues(kvp); } } dagBuilder.setDagConf(confProtoBuilder); if (dagCredentials != null) { dagBuilder.setCredentialsBinary(DagTypeConverters.convertCredentialsToProto(dagCredentials)); TezCommonUtils.logCredentials(LOG, dagCredentials, "dag"); } return dagBuilder.build(); }
From source file:org.apache.tez.dag.app.dag.impl.DAGSchedulerMRR.java
License:Apache License
@Override public void scheduleTask(DAGEventSchedulerUpdate event) { TaskAttempt attempt = event.getAttempt(); Vertex vertex = dag.getVertex(attempt.getVertexID()); int vertexDistanceFromRoot = vertex.getDistanceFromRoot(); LOG.info("Schedule task: " + attempt.getID()); if (currentPartitioner == null) { // no partitioner. so set it. currentPartitioner = vertex;/*from w ww .j av a2s. co m*/ currentShufflerDepth = vertexDistanceFromRoot; assert realPartitionerResource == null; Resource partitionerResource = currentPartitioner.getTaskResource(); realPartitionerResource = Resource.newInstance(partitionerResource.getMemory(), partitionerResource.getVirtualCores()); LOG.info(vertex.getVertexId() + " is new partitioner at depth " + vertexDistanceFromRoot); } else if (currentShuffler == null && vertexDistanceFromRoot > currentShufflerDepth) { // vertex not a partitioner. no shuffler set. has more depth than current // shuffler. this must be the new shuffler. currentShuffler = vertex; currentShufflerDepth = vertexDistanceFromRoot; assert realShufflerResource == null; Resource shufflerResource = currentShuffler.getTaskResource(); realShufflerResource = Resource.newInstance(shufflerResource.getMemory(), shufflerResource.getVirtualCores()); LOG.info(vertex.getVertexId() + " is new shuffler at depth " + currentShufflerDepth); } if (currentShuffler == vertex) { pendingShuffleTasks.add(attempt); unassignedShuffleTasks.add(attempt.getTaskID()); schedulePendingShuffles(getNumShufflesToSchedule()); return; } if (currentPartitioner == vertex) { unassignedPartitionTasks.add(attempt.getTaskID()); } // sanity check // task should be a partitioner, a shuffler or a retry of an ancestor if (currentPartitioner != vertex && currentShuffler != vertex && vertexDistanceFromRoot >= currentPartitioner.getDistanceFromRoot()) { String message = vertex.getVertexId() + " is neither the " + " current partitioner: " + currentPartitioner.getVertexId() + " nor the current shuffler: " + currentShuffler.getVertexId(); LOG.fatal(message); throw new TezUncheckedException(message); } scheduleTaskAttempt(attempt); }
From source file:org.apache.tez.dag.app.rm.LlapTaskSchedulerService.java
License:Apache License
@Override public Resource getTotalResources() { int memory = 0; int vcores = 0; readLock.lock();/* w ww . ja v a2s.c om*/ try { for (ServiceInstance inst : activeInstances.getAll().values()) { if (inst.isAlive()) { Resource r = inst.getResource(); LOG.info("Found instance " + inst); memory += r.getMemory(); vcores += r.getVirtualCores(); } else { LOG.info("Ignoring dead instance " + inst); } } } finally { readLock.unlock(); } return Resource.newInstance(memory, vcores); }
From source file:org.apache.tez.dag.app.rm.LlapTaskSchedulerService.java
License:Apache License
/** * The difference between this and getTotalResources() is that this only gives currently free * resource instances, while the other lists all the instances that may become available in a * while.// w w w . j a va 2 s .c o m */ @Override public Resource getAvailableResources() { // need a state store eventually for current state & measure backoffs int memory = 0; int vcores = 0; readLock.lock(); try { for (Entry<ServiceInstance, NodeInfo> entry : instanceToNodeMap.entrySet()) { if (entry.getKey().isAlive() && !entry.getValue().isDisabled()) { Resource r = entry.getKey().getResource(); memory += r.getMemory(); vcores += r.getVirtualCores(); } } } finally { readLock.unlock(); } return Resource.newInstance(memory, vcores); }
From source file:org.apache.tez.dag.app.rm.YarnTaskSchedulerService.java
License:Apache License
boolean canFit(Resource arg0, Resource arg1) { int mem0 = arg0.getMemory(); int mem1 = arg1.getMemory(); int cpu0 = arg0.getVirtualCores(); int cpu1 = arg1.getVirtualCores(); if (mem0 <= mem1 && cpu0 <= cpu1) { return true; }/* w ww. j a v a2 s .c o m*/ return false; }
From source file:org.apache.tez.mapreduce.hadoop.TestMRHelpers.java
License:Apache License
@Test(timeout = 5000) public void testContainerResourceConstruction() { JobConf conf = new JobConf(new Configuration()); Resource mapResource = MRHelpers.getResourceForMRMapper(conf); Resource reduceResource = MRHelpers.getResourceForMRReducer(conf); Assert.assertEquals(MRJobConfig.DEFAULT_MAP_CPU_VCORES, mapResource.getVirtualCores()); Assert.assertEquals(MRJobConfig.DEFAULT_MAP_MEMORY_MB, mapResource.getMemory()); Assert.assertEquals(MRJobConfig.DEFAULT_REDUCE_CPU_VCORES, reduceResource.getVirtualCores()); Assert.assertEquals(MRJobConfig.DEFAULT_REDUCE_MEMORY_MB, reduceResource.getMemory()); conf.setInt(MRJobConfig.MAP_CPU_VCORES, 2); conf.setInt(MRJobConfig.MAP_MEMORY_MB, 123); conf.setInt(MRJobConfig.REDUCE_CPU_VCORES, 20); conf.setInt(MRJobConfig.REDUCE_MEMORY_MB, 1234); mapResource = MRHelpers.getResourceForMRMapper(conf); reduceResource = MRHelpers.getResourceForMRReducer(conf); Assert.assertEquals(2, mapResource.getVirtualCores()); Assert.assertEquals(123, mapResource.getMemory()); Assert.assertEquals(20, reduceResource.getVirtualCores()); Assert.assertEquals(1234, reduceResource.getMemory()); }
From source file:org.apache.twill.internal.yarn.Hadoop21YarnAMClient.java
License:Apache License
@Override protected Resource adjustCapability(Resource resource) { int cores = resource.getVirtualCores(); int updatedCores = Math.min(resource.getVirtualCores(), maxCapability.getVirtualCores()); if (cores != updatedCores) { resource.setVirtualCores(updatedCores); LOG.info("Adjust virtual cores requirement from {} to {}.", cores, updatedCores); }/*from www .jav a 2s.com*/ int updatedMemory = Math.min(resource.getMemory(), maxCapability.getMemory()); if (resource.getMemory() != updatedMemory) { resource.setMemory(updatedMemory); LOG.info("Adjust memory requirement from {} to {} MB.", resource.getMemory(), updatedMemory); } return resource; }
From source file:org.apache.twill.internal.yarn.Hadoop21YarnAppClient.java
License:Apache License
@Override public ProcessLauncher<ApplicationMasterInfo> createLauncher(TwillSpecification twillSpec, @Nullable String schedulerQueue) throws Exception { // Request for new application YarnClientApplication application = yarnClient.createApplication(); final GetNewApplicationResponse response = application.getNewApplicationResponse(); final ApplicationId appId = response.getApplicationId(); // Setup the context for application submission final ApplicationSubmissionContext appSubmissionContext = application.getApplicationSubmissionContext(); appSubmissionContext.setApplicationId(appId); appSubmissionContext.setApplicationName(twillSpec.getName()); if (schedulerQueue != null) { appSubmissionContext.setQueue(schedulerQueue); }//from w w w . j a v a2 s. c o m // TODO: Make it adjustable through TwillSpec (TWILL-90) // Set the resource requirement for AM final Resource capability = adjustMemory(response, Resource.newInstance(Constants.APP_MASTER_MEMORY_MB, 1)); ApplicationMasterInfo appMasterInfo = new ApplicationMasterInfo(appId, capability.getMemory(), capability.getVirtualCores()); ApplicationSubmitter submitter = new ApplicationSubmitter() { @Override public ProcessController<YarnApplicationReport> submit(YarnLaunchContext context) { ContainerLaunchContext launchContext = context.getLaunchContext(); appSubmissionContext.setAMContainerSpec(launchContext); appSubmissionContext.setResource(capability); appSubmissionContext.setMaxAppAttempts(2); try { yarnClient.submitApplication(appSubmissionContext); return new ProcessControllerImpl(yarnClient, appId); } catch (Exception e) { LOG.error("Failed to submit application {}", appId, e); throw Throwables.propagate(e); } } }; return new ApplicationMasterProcessLauncher(appMasterInfo, submitter); }