List of usage examples for org.apache.hadoop.yarn.api.records NodeState RUNNING
NodeState RUNNING
To view the source code for org.apache.hadoop.yarn.api.records NodeState RUNNING.
Click Source Link
From source file:cn.edu.buaa.act.petuumOnYarn.ApplicationMaster.java
License:Apache License
private boolean getAvaliableNodes() { List<NodeReport> clusterNodeReports; try {//ww w . j a va2s .co m YarnClient yarnClient = YarnClient.createYarnClient(); yarnClient.init(conf); yarnClient.start(); clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING); for (NodeReport node : clusterNodeReports) { LOG.info("node infos:" + node.getHttpAddress()); } avaliableNodeList = new ArrayList<NodeReport>(); if (numNodes <= clusterNodeReports.size()) { for (NodeReport node : clusterNodeReports) { if (node.getCapability().getMemory() >= containerMemory && node.getCapability().getVirtualCores() >= containerVirtualCores) { avaliableNodeList.add(node); } } if (avaliableNodeList.size() >= numNodes) numTotalContainers = numNodes; else { LOG.error("Resource isn't enough"); return false; } } else { LOG.error("cluster nodes isn't enough"); return false; } } catch (Exception e) { LOG.error(e.getMessage()); LOG.error(e.getStackTrace()); return false; } return true; }
From source file:cn.edu.buaa.act.petuumOnYarn.Client.java
License:Apache License
/** * Main run function for the client/*w w w .java 2s. c o m*/ * * @return true if application completed successfully * @throws IOException * @throws YarnException */ public boolean run() throws IOException, YarnException { LOG.info("Running Client"); yarnClient.start(); String[] s; s = conf.getStrings(YarnConfiguration.RM_ADDRESS); for (String ss : s) LOG.info("RM address: " + ss); YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics(); LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers()); List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING); LOG.info("Got Cluster node info from ASM"); for (NodeReport node : clusterNodeReports) { LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress" + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers" + node.getNumContainers() + ", nodeIdHost" + node.getNodeId().getHost()); } QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue); LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity() + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount=" + queueInfo.getChildQueues().size()); List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo(); for (QueueUserACLInfo aclInfo : listAclInfo) { for (QueueACL userAcl : aclInfo.getUserAcls()) { LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl=" + userAcl.name()); } } // Get a new application id YarnClientApplication app = yarnClient.createApplication(); GetNewApplicationResponse appResponse = app.getNewApplicationResponse(); int maxMem = appResponse.getMaximumResourceCapability().getMemory(); LOG.info("Max mem capabililty of resources in this cluster " + maxMem); // A resource ask cannot exceed the max. if (amMemory > maxMem) { LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified=" + amMemory + ", max=" + maxMem); amMemory = maxMem; } int maxVCores = appResponse.getMaximumResourceCapability().getVirtualCores(); LOG.info("Max virtual cores capabililty of resources in this cluster " + maxVCores); if (amVCores > maxVCores) { LOG.info("AM virtual cores specified above max threshold of cluster. " + "Using max value." + ", specified=" + amVCores + ", max=" + maxVCores); amVCores = maxVCores; } // set the application name ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); ApplicationId appId = appContext.getApplicationId(); appContext.setKeepContainersAcrossApplicationAttempts(keepContainers); appContext.setApplicationName(appName); // set local resources for the application master // local files or archives as needed // In this scenario, the jar file for the application master is part of // the local resources Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); LOG.info("Copy App Master jar from local filesystem and add to local environment"); // Copy the application master jar to the filesystem // Create a local resource to point to the destination jar path FileSystem fs = FileSystem.get(conf); YarnUtil.copyAndAddToLocalResources(fs, appMasterJar, petuumHDFSPathPrefix, appMasterJarPath, localResources, null); scriptHDFSPath = YarnUtil.copyToHDFS(fs, scriptPath, petuumHDFSPathPrefix, launchPath, null); // Set the log4j properties if needed if (!log4jPropFile.isEmpty()) { YarnUtil.copyAndAddToLocalResources(fs, log4jPropFile, petuumHDFSPathPrefix, log4jPath, localResources, null); } // Set the env variables to be setup in the env where the application // master will be run LOG.info("Set the environment for the application master"); Map<String, String> env = new HashMap<String, String>(); // Add AppMaster.jar location to classpath // At some point we should not be required to add // the hadoop specific classpaths to the env. // It should be provided out of the box. // For now setting all required classpaths including // the classpath to "." for the application jar StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$$()) .append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./*"); for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH)) { classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR); classPathEnv.append(c.trim()); } classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./log4j.properties"); // add the runtime classpath needed for tests to work if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { classPathEnv.append(':'); classPathEnv.append(System.getProperty("java.class.path")); } env.put("CLASSPATH", classPathEnv.toString()); // Set the necessary command to execute the application master Vector<CharSequence> vargs = new Vector<CharSequence>(30); // Set java executable command LOG.info("Setting up app master command"); vargs.add(Environment.JAVA_HOME.$$() + "/bin/java"); // Set Xmx based on am memory size vargs.add("-Xmx" + amMemory + "m"); // Set class name vargs.add(appMasterMainClass); // Set params for Application Master vargs.add("--container_memory " + String.valueOf(containerMemory)); vargs.add("--container_vcores " + String.valueOf(containerVirtualCores)); vargs.add("--num_nodes " + String.valueOf(numNodes)); vargs.add("--start_port " + String.valueOf(startPort)); vargs.add("--priority " + String.valueOf(workerPriority)); vargs.add("--script_hdfs_path " + scriptHDFSPath); for (Map.Entry<String, String> entry : shellEnv.entrySet()) { vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue()); } if (debugFlag) { vargs.add("--debug"); } vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr"); // Get final commmand StringBuilder command = new StringBuilder(); for (CharSequence str : vargs) { command.append(str).append(" "); } LOG.info("Completed setting up app master command " + command.toString()); List<String> commands = new ArrayList<String>(); commands.add(command.toString()); // Set up the container launch context for the application master ContainerLaunchContext amContainer = ContainerLaunchContext.newInstance(localResources, env, commands, null, null, null); // Set up resource type requirements // For now, both memory and vcores are supported, so we set memory and // vcores requirements Resource capability = Resource.newInstance(amMemory, amVCores); appContext.setResource(capability); // Service data is a binary blob that can be passed to the application // Not needed in this scenario // amContainer.setServiceData(serviceData); // Setup security tokens if (UserGroupInformation.isSecurityEnabled()) { // Note: Credentials class is marked as LimitedPrivate for HDFS and // MapReduce Credentials credentials = new Credentials(); String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL); if (tokenRenewer == null || tokenRenewer.length() == 0) { throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer"); } // For now, only getting tokens for the default file-system. final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials); if (tokens != null) { for (Token<?> token : tokens) { LOG.info("Got dt for " + fs.getUri() + "; " + token); } } DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); amContainer.setTokens(fsTokens); } appContext.setAMContainerSpec(amContainer); // Set the priority for the application master Priority pri = Priority.newInstance(amPriority); appContext.setPriority(pri); // Set the queue to which this application is to be submitted in the RM appContext.setQueue(amQueue); // Submit the application to the applications manager // SubmitApplicationResponse submitResp = // applicationsManager.submitApplication(appRequest); // Ignore the response as either a valid response object is returned on // success // or an exception thrown to denote some form of a failure LOG.info("Submitting application to ASM"); yarnClient.submitApplication(appContext); // Monitor the application currentTime = System.currentTimeMillis(); LOG.info("submit AM in " + (currentTime - startTime) + "ms"); return monitorApplication(appId); }
From source file:com.bigjob.Client.java
License:Apache License
/** * Main run function for the client/*from w w w . j a v a 2 s . c om*/ * @return true if application completed successfully * @throws IOException * @throws YarnException */ public boolean run() throws IOException, YarnException { LOG.info("Running Client"); yarnClient.start(); YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics(); LOG.info("Got Cluster metric info from ASM (RM)" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers()); List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING); LOG.info("Got Cluster node info from ASM"); for (NodeReport node : clusterNodeReports) { LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress" + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers" + node.getNumContainers()); } QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue); LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity() + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount=" + queueInfo.getChildQueues().size()); List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo(); for (QueueUserACLInfo aclInfo : listAclInfo) { for (QueueACL userAcl : aclInfo.getUserAcls()) { LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl=" + userAcl.name()); } } // Get a new application id YarnClientApplication app = yarnClient.createApplication(); GetNewApplicationResponse appResponse = app.getNewApplicationResponse(); // TODO get min/max resource capabilities from RM and change memory ask if needed // If we do not have min/max, we may not be able to correctly request // the required resources from the RM for the app master // Memory ask has to be a multiple of min and less than max. // Dump out information about cluster capability as seen by the resource manager int maxMem = appResponse.getMaximumResourceCapability().getMemory(); LOG.info("Max mem capabililty of resources in this cluster " + maxMem); // A resource ask cannot exceed the max. if (amMemory > maxMem) { LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified=" + amMemory + ", max=" + maxMem); amMemory = maxMem; } int maxVCores = appResponse.getMaximumResourceCapability().getVirtualCores(); LOG.info("Max virtual cores capabililty of resources in this cluster " + maxVCores); if (amVCores > maxVCores) { LOG.info("AM virtual cores specified above max threshold of cluster. " + "Using max value." + ", specified=" + amVCores + ", max=" + maxVCores); amVCores = maxVCores; } // set the application name ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); ApplicationId appId = appContext.getApplicationId(); appContext.setApplicationName(appName); // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); // set local resources for the application master // local files or archives as needed // In this scenario, the jar file for the application master is part of the local resources Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); LOG.info("Copy App Master jar from local filesystem and add to local environment"); // Copy the application master jar to the filesystem // Create a local resource to point to the destination jar path // if (dfsUrl!=null && dfsUrl.equals("")==false){ // conf.set("fs.defaultFS", dfsUrl); // } FileSystem fs = FileSystem.get(conf); addToLocalResources(fs, appMasterJar, appMasterJarPath, appId.getId(), localResources, null); // Set the log4j properties if needed if (!log4jPropFile.isEmpty()) { addToLocalResources(fs, log4jPropFile, log4jPath, appId.getId(), localResources, null); } // The shell script has to be made available on the final container(s) // where it will be executed. // To do this, we need to first copy into the filesystem that is visible // to the yarn framework. // We do not need to set this as a local resource for the application // master as the application master does not need it. String hdfsShellScriptLocation = ""; long hdfsShellScriptLen = 0; long hdfsShellScriptTimestamp = 0; if (!shellScriptPath.isEmpty()) { Path shellSrc = new Path(shellScriptPath); String shellPathSuffix = appName + "/" + appId.getId() + "/" + (Shell.WINDOWS ? windowBatPath : linuxShellPath); Path shellDst = new Path(fs.getHomeDirectory(), shellPathSuffix); fs.copyFromLocalFile(false, true, shellSrc, shellDst); hdfsShellScriptLocation = shellDst.toUri().toString(); FileStatus shellFileStatus = fs.getFileStatus(shellDst); hdfsShellScriptLen = shellFileStatus.getLen(); hdfsShellScriptTimestamp = shellFileStatus.getModificationTime(); } if (!shellCommand.isEmpty()) { addToLocalResources(fs, null, shellCommandPath, appId.getId(), localResources, shellCommand); } if (shellArgs.length > 0) { addToLocalResources(fs, null, shellArgsPath, appId.getId(), localResources, StringUtils.join(shellArgs, " ")); } // Set local resource info into app master container launch context amContainer.setLocalResources(localResources); // Set the necessary security tokens as needed //amContainer.setContainerTokens(containerToken); // Set the env variables to be setup in the env where the application master will be run LOG.info("Set the environment for the application master"); Map<String, String> env = new HashMap<String, String>(); // put location of shell script into env // using the env info, the application master will create the correct local resource for the // eventual containers that will be launched to execute the shell scripts env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION, hdfsShellScriptLocation); env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP, Long.toString(hdfsShellScriptTimestamp)); env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLEN, Long.toString(hdfsShellScriptLen)); // Add AppMaster.jar location to classpath // At some point we should not be required to add // the hadoop specific classpaths to the env. // It should be provided out of the box. // For now setting all required classpaths including // the classpath to "." for the application jar StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar) .append("./*"); for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) { classPathEnv.append(File.pathSeparatorChar); classPathEnv.append(c.trim()); } classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties"); // add the runtime classpath needed for tests to work if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { classPathEnv.append(':'); classPathEnv.append(System.getProperty("java.class.path")); } env.put("CLASSPATH", classPathEnv.toString()); amContainer.setEnvironment(env); // Set the necessary command to execute the application master Vector<CharSequence> vargs = new Vector<CharSequence>(30); // Set java executable command LOG.info("Setting up app master command"); vargs.add(Environment.JAVA_HOME.$() + "/bin/java"); // Set Xmx based on am memory size vargs.add("-Xmx" + amMemory + "m"); // Set class name vargs.add(appMasterMainClass); // Set params for Application Master vargs.add("--container_memory " + String.valueOf(containerMemory)); vargs.add("--container_vcores " + String.valueOf(containerVirtualCores)); vargs.add("--num_containers " + String.valueOf(numContainers)); vargs.add("--priority " + String.valueOf(shellCmdPriority)); for (Map.Entry<String, String> entry : shellEnv.entrySet()) { vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue()); } if (debugFlag) { vargs.add("--debug"); } vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr"); // Get final commmand StringBuilder command = new StringBuilder(); for (CharSequence str : vargs) { command.append(str).append(" "); } LOG.info("Completed setting up app master command " + command.toString()); List<String> commands = new ArrayList<String>(); commands.add(command.toString()); amContainer.setCommands(commands); // Set up resource type requirements // For now, both memory and vcores are supported, so we set memory and // vcores requirements Resource capability = Records.newRecord(Resource.class); capability.setMemory(amMemory); capability.setVirtualCores(amVCores); appContext.setResource(capability); // Service data is a binary blob that can be passed to the application // Not needed in this scenario // amContainer.setServiceData(serviceData); // Setup security tokens if (UserGroupInformation.isSecurityEnabled()) { Credentials credentials = new Credentials(); String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL); if (tokenRenewer == null || tokenRenewer.length() == 0) { throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer"); } // For now, only getting tokens for the default file-system. final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials); if (tokens != null) { for (Token<?> token : tokens) { LOG.info("Got dt for " + fs.getUri() + "; " + token); } } DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); amContainer.setTokens(fsTokens); } appContext.setAMContainerSpec(amContainer); // Set the priority for the application master Priority pri = Records.newRecord(Priority.class); // TODO - what is the range for priority? how to decide? pri.setPriority(amPriority); appContext.setPriority(pri); // Set the queue to which this application is to be submitted in the RM appContext.setQueue(amQueue); // Submit the application to the applications manager // SubmitApplicationResponse submitResp = applicationsManager.submitApplication(appRequest); // Ignore the response as either a valid response object is returned on success // or an exception thrown to denote some form of a failure LOG.info("Submitting application to ASM"); yarnClient.submitApplication(appContext); // TODO // Try submitting the same request again // app submission failure? // Monitor the application //return monitorApplication(appId); System.out.println("ApplicationId:" + appId); return true; }
From source file:com.cfets.door.yarn.jboss.JBossClient.java
License:Apache License
/** * Main run function for the client//from w ww . jav a 2 s . c o m * * @return true if application completed successfully * @throws IOException * @throws YarnException */ public boolean run() throws IOException, YarnException { LOG.info("Running Client"); yarnClient.start(); YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics(); LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers()); List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING); LOG.info("Got Cluster node info from ASM"); for (NodeReport node : clusterNodeReports) { LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress" + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers" + node.getNumContainers()); } QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue); LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity() + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount=" + queueInfo.getChildQueues().size()); List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo(); for (QueueUserACLInfo aclInfo : listAclInfo) { for (QueueACL userAcl : aclInfo.getUserAcls()) { LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl=" + userAcl.name()); } } YarnClientApplication app = yarnClient.createApplication(); GetNewApplicationResponse appResponse = app.getNewApplicationResponse(); int maxMem = appResponse.getMaximumResourceCapability().getMemory(); LOG.info("Max mem capabililty of resources in this cluster " + maxMem); if (amMemory > maxMem) { LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified=" + amMemory + ", max=" + maxMem); amMemory = maxMem; } ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); ApplicationId appId = appContext.getApplicationId(); appContext.setApplicationName(appName); ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); LOG.info("Copy App Master jar from local filesystem and add to local environment"); FileSystem fs = FileSystem.get(conf); Path src = new Path(appJar); String pathSuffix = appName + File.separator + appId.getId() + File.separator + JBossConstants.JBOSS_ON_YARN_APP; Path dst = new Path(fs.getHomeDirectory(), pathSuffix); jbossAppUri = dst.toUri().toString(); fs.copyFromLocalFile(false, true, src, dst); FileStatus destStatus = fs.getFileStatus(dst); LocalResource amJarRsrc = Records.newRecord(LocalResource.class); amJarRsrc.setType(LocalResourceType.FILE); amJarRsrc.setVisibility(LocalResourceVisibility.APPLICATION); amJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(dst)); amJarRsrc.setTimestamp(destStatus.getModificationTime()); amJarRsrc.setSize(destStatus.getLen()); localResources.put(JBossConstants.JBOSS_ON_YARN_APP, amJarRsrc); if (!log4jPropFile.isEmpty()) { Path log4jSrc = new Path(log4jPropFile); Path log4jDst = new Path(fs.getHomeDirectory(), "log4j.props"); fs.copyFromLocalFile(false, true, log4jSrc, log4jDst); FileStatus log4jFileStatus = fs.getFileStatus(log4jDst); LocalResource log4jRsrc = Records.newRecord(LocalResource.class); log4jRsrc.setType(LocalResourceType.FILE); log4jRsrc.setVisibility(LocalResourceVisibility.APPLICATION); log4jRsrc.setResource(ConverterUtils.getYarnUrlFromURI(log4jDst.toUri())); log4jRsrc.setTimestamp(log4jFileStatus.getModificationTime()); log4jRsrc.setSize(log4jFileStatus.getLen()); localResources.put("log4j.properties", log4jRsrc); } amContainer.setLocalResources(localResources); LOG.info("Set the environment for the application master"); Map<String, String> env = new HashMap<String, String>(); StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar) .append("./*"); for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) { classPathEnv.append(File.pathSeparatorChar); classPathEnv.append(c.trim()); } classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties"); if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { classPathEnv.append(':'); classPathEnv.append(System.getProperty("java.class.path")); } env.put("CLASSPATH", classPathEnv.toString()); amContainer.setEnvironment(env); Vector<CharSequence> vargs = new Vector<CharSequence>(30); LOG.info("Setting up app master command"); vargs.add(Environment.JAVA_HOME.$() + "/bin/java"); vargs.add("-Xmx" + amMemory + "m"); vargs.add(appMasterMainClass); vargs.add("--container_memory " + String.valueOf(containerMemory)); vargs.add("--num_containers " + String.valueOf(numContainers)); vargs.add("--priority " + String.valueOf(shellCmdPriority)); vargs.add("--admin_user " + adminUser); vargs.add("--admin_password " + adminPassword); vargs.add("--jar " + jbossAppUri); if (debugFlag) { vargs.add("--debug"); } vargs.add("1>" + JBossConstants.JBOSS_CONTAINER_LOG_DIR + "/JBossApplicationMaster.stdout"); vargs.add("2>" + JBossConstants.JBOSS_CONTAINER_LOG_DIR + "/JBossApplicationMaster.stderr"); StringBuilder command = new StringBuilder(); for (CharSequence str : vargs) { command.append(str).append(" "); } LOG.info("Completed setting up app master command " + command.toString()); List<String> commands = new ArrayList<String>(); commands.add(command.toString()); amContainer.setCommands(commands); Resource capability = Records.newRecord(Resource.class); capability.setMemory(amMemory); appContext.setResource(capability); appContext.setAMContainerSpec(amContainer); Priority pri = Records.newRecord(Priority.class); pri.setPriority(amPriority); appContext.setPriority(pri); appContext.setQueue(amQueue); LOG.info("Submitting the application to ASM"); yarnClient.submitApplication(appContext); return monitorApplication(appId); }
From source file:com.cloudera.llama.am.MiniLlama.java
License:Apache License
private List<NodeId> getYarnNodeIds(Configuration conf) throws Exception { List<NodeId> list = new ArrayList<NodeId>(); if (miniYarn != null) { int clusterNodes = getConf().getInt(MINI_CLUSTER_NODES_KEY, 1); for (int i = 0; i < clusterNodes; i++) { list.add(miniYarn.getNodeManager(i).getNMContext().getNodeId()); }//w w w .jav a 2s .c o m } else { YarnClient yarnClient = YarnClient.createYarnClient(); yarnClient.init(conf); yarnClient.start(); List<NodeReport> nodes = yarnClient.getNodeReports(NodeState.RUNNING); for (int i = 0; i < nodes.size(); i++) { list.add(nodes.get(i).getNodeId()); } yarnClient.stop(); } return list; }
From source file:com.cloudera.llama.am.yarn.YarnRMConnector.java
License:Apache License
private void _registerSchedulerAndCreateNMClient(String queue) throws Exception { NMTokenCache nmTokenCache = new NMTokenCache(); nmClient = NMClient.createNMClient(); nmClient.setNMTokenCache(nmTokenCache); nmClient.init(yarnConf);// w ww . ja v a2 s . co m nmClient.start(); LOG.debug("Started NMClient, AM '{}' with scheduler for '{}' queue", appId, queue); int heartbeatInterval = getConf().getInt(HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERNAL_DEFAULT); AMRMClient<LlamaContainerRequest> amRmClient = AMRMClient.createAMRMClient(); amRmClient.setNMTokenCache(nmTokenCache); amRmClientAsync = AMRMClientAsync.createAMRMClientAsync(amRmClient, heartbeatInterval, YarnRMConnector.this); amRmClientAsync.init(yarnConf); amRmClientAsync.start(); String urlWithoutScheme = getConf().get(ADVERTISED_TRACKING_URL_KEY, "http://") .substring("http://".length()); RegisterApplicationMasterResponse response = amRmClientAsync.registerApplicationMaster( getConf().get(ADVERTISED_HOSTNAME_KEY, ""), getConf().getInt(ADVERTISED_PORT_KEY, 0), urlWithoutScheme); maxResource = response.getMaximumResourceCapability(); nodes = Collections.synchronizedMap(new HashMap<String, Resource>()); for (NodeReport nodeReport : yarnClient.getNodeReports()) { if (nodeReport.getNodeState() == NodeState.RUNNING) { String nodeKey = getNodeName(nodeReport.getNodeId()); nodes.put(nodeKey, nodeReport.getCapability()); LOG.debug("Added node '{}' with '{}' cpus and '{}' memory", nodeKey, nodeReport.getCapability().getVirtualCores(), nodeReport.getCapability().getMemory()); } } LOG.debug("Registered with scheduler, AM '{}' for '{}' queue", appId, queue); }
From source file:com.cloudera.llama.am.yarn.YarnRMConnector.java
License:Apache License
@Override public List<NodeInfo> getNodes() throws LlamaException { List<NodeInfo> ret = new ArrayList<NodeInfo>(); try {//from w w w .j a va2 s . c om if (nodes == null) { // Get it from the yarn client. List<NodeReport> nodeReports = yarnClient.getNodeReports(NodeState.RUNNING); for (NodeReport nodeReport : nodeReports) { Resource resource = nodeReport.getCapability(); NodeInfo nodeInfo = new NodeInfo(getNodeName(nodeReport.getNodeId()), resource.getVirtualCores(), resource.getMemory()); ret.add(nodeInfo); } } else { // Get it from the nodes structure which is being kept upto date. synchronized (nodes) { for (Map.Entry<String, Resource> entry : nodes.entrySet()) { Resource nodeReport = entry.getValue(); NodeInfo nodeInfo = new NodeInfo(entry.getKey(), nodeReport.getVirtualCores(), nodeReport.getMemory()); ret.add(nodeInfo); } } } return ret; } catch (Throwable ex) { throw new LlamaException(ex, ErrorCode.AM_CANNOT_GET_NODES, appId); } }
From source file:com.cloudera.llama.am.yarn.YarnRMConnector.java
License:Apache License
@Override public void onNodesUpdated(List<NodeReport> nodeReports) { LOG.debug("Received nodes update for '{}' nodes", nodeReports.size()); for (NodeReport nodeReport : nodeReports) { if (nodeReport.getNodeState() == NodeState.RUNNING) { String nodeKey = getNodeName(nodeReport.getNodeId()); nodes.put(nodeKey, nodeReport.getCapability()); LOG.debug("Added node '{}' with '{}' cpus and '{}' memory", nodeKey, nodeReport.getCapability().getVirtualCores(), nodeReport.getCapability().getMemory()); } else {/*from w w w .java2 s . c om*/ LOG.debug("Removed node '{}'", nodeReport.getNodeId()); nodes.remove(getNodeName(nodeReport.getNodeId())); } } }
From source file:com.datatorrent.stram.AffinityRulesTest.java
License:Apache License
@Test public void testOperatorPartitionsAntiAffinity() { LogicalPlan dag = new LogicalPlan(); TestGeneratorInputOperator o1 = dag.addOperator("O1", new TestGeneratorInputOperator()); GenericTestOperator o2 = dag.addOperator("O2", new GenericTestOperator()); GenericTestOperator o3 = dag.addOperator("O3", new GenericTestOperator()); dag.addStream("stream1", o1.outport, o2.inport1); dag.addStream("stream2", o2.outport1, o3.inport1); dag.setOperatorAttribute(o2, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(5)); AffinityRulesSet ruleSet = new AffinityRulesSet(); // Valid case: List<AffinityRule> rules = new ArrayList<>(); ruleSet.setAffinityRules(rules);/*from w w w .java 2 s . c o m*/ AffinityRule rule1 = new AffinityRule(Type.ANTI_AFFINITY, Locality.NODE_LOCAL, false, "O2", "O2"); rules.add(rule1); dag.setAttribute(DAGContext.AFFINITY_RULES_SET, ruleSet); dag.validate(); dag.getAttributes().put(com.datatorrent.api.Context.DAGContext.APPLICATION_PATH, testMeta.getAbsolutePath()); dag.setAttribute(OperatorContext.STORAGE_AGENT, new MemoryStorageAgent()); StreamingContainerManager scm = new StreamingContainerManager(dag); // Check Physical Plan assigns anti-affinity preferences correctly for (ContainerStartRequest csr : scm.containerStartRequests) { PTContainer container = csr.container; // Check that for containers for O2 partitions, anti-affinity Preference // are set properly if (container.getOperators().get(0).getName().equals("O2")) { Assert.assertEquals("Anti-affinity containers set should have 4 containers for other partitions ", 4, container.getStrictAntiPrefs().size()); for (PTContainer c : container.getStrictAntiPrefs()) { for (PTOperator operator : c.getOperators()) { Assert.assertEquals("Partion for O2 should be Anti Prefs", "O2", operator.getName()); } } } } // Check resource handler assigns different hosts for each partition ResourceRequestHandler rr = new ResourceRequestHandler(); int containerMem = 1000; Map<String, NodeReport> nodeReports = Maps.newHashMap(); for (int i = 0; i < 10; i++) { String hostName = "host" + i; NodeReport nr = BuilderUtils.newNodeReport(BuilderUtils.newNodeId(hostName, 0), NodeState.RUNNING, "httpAddress", "rackName", BuilderUtils.newResource(0, 0), BuilderUtils.newResource(containerMem * 2, 2), 0, null, 0); nodeReports.put(nr.getNodeId().getHost(), nr); } // set resources rr.updateNodeReports(Lists.newArrayList(nodeReports.values())); Set<String> partitionHostNames = new HashSet<>(); for (ContainerStartRequest csr : scm.containerStartRequests) { String host = rr.getHost(csr, true); csr.container.host = host; if (csr.container.getOperators().get(0).getName().equals("O2")) { Assert.assertNotNull("Host name should not be null", host); LOG.info("Partition {} for operator O2 has host = {} ", csr.container.getId(), host); Assert.assertTrue("Each Partition should have a different host", !partitionHostNames.contains(host)); partitionHostNames.add(host); } } }
From source file:com.datatorrent.stram.AffinityRulesTest.java
License:Apache License
@Test public void testAntiAffinityInOperators() { LogicalPlan dag = new LogicalPlan(); dag.getAttributes().put(com.datatorrent.api.Context.DAGContext.APPLICATION_PATH, testMeta.getAbsolutePath()); dag.setAttribute(OperatorContext.STORAGE_AGENT, new MemoryStorageAgent()); GenericTestOperator o1 = dag.addOperator("O1", GenericTestOperator.class); dag.setOperatorAttribute(o1, OperatorContext.MEMORY_MB, 256); GenericTestOperator o2 = dag.addOperator("O2", GenericTestOperator.class); dag.setOperatorAttribute(o2, OperatorContext.MEMORY_MB, 256); dag.getMeta(o1).getAttributes().put(OperatorContext.LOCALITY_HOST, "host1"); AffinityRulesSet ruleSet = new AffinityRulesSet(); List<AffinityRule> rules = new ArrayList<>(); ruleSet.setAffinityRules(rules);/* w w w . j av a 2 s . c om*/ AffinityRule rule1 = new AffinityRule(Type.ANTI_AFFINITY, Locality.NODE_LOCAL, false, "O1", "O2"); rules.add(rule1); dag.setAttribute(DAGContext.AFFINITY_RULES_SET, ruleSet); dag.addStream("o1_outport1", o1.outport1, o2.inport1);// .setLocality(Locality.NODE_LOCAL); StreamingContainerManager scm = new StreamingContainerManager(dag); ResourceRequestHandler rr = new ResourceRequestHandler(); int containerMem = 1000; Map<String, NodeReport> nodeReports = Maps.newHashMap(); NodeReport nr = BuilderUtils.newNodeReport(BuilderUtils.newNodeId("host1", 0), NodeState.RUNNING, "httpAddress", "rackName", BuilderUtils.newResource(0, 0), BuilderUtils.newResource(containerMem * 2, 2), 0, null, 0); nodeReports.put(nr.getNodeId().getHost(), nr); nr = BuilderUtils.newNodeReport(BuilderUtils.newNodeId("host2", 0), NodeState.RUNNING, "httpAddress", "rackName", BuilderUtils.newResource(0, 0), BuilderUtils.newResource(containerMem * 2, 2), 0, null, 0); nodeReports.put(nr.getNodeId().getHost(), nr); // set resources rr.updateNodeReports(Lists.newArrayList(nodeReports.values())); for (ContainerStartRequest csr : scm.containerStartRequests) { String host = rr.getHost(csr, true); csr.container.host = host; if (csr.container.getOperators().get(0).getName().equals("O1")) { Assert.assertEquals("Hosts set to host1 for Operator O1", "host1", host); } if (csr.container.getOperators().get(0).getName().equals("O2")) { Assert.assertEquals("Hosts set to host2 for Operator O2", "host2", host); } } }