List of usage examples for org.apache.hadoop.yarn.conf YarnConfiguration YARN_APPLICATION_CLASSPATH
String YARN_APPLICATION_CLASSPATH
To view the source code for org.apache.hadoop.yarn.conf YarnConfiguration YARN_APPLICATION_CLASSPATH.
Click Source Link
From source file:org.apache.hama.bsp.YARNBSPJobClient.java
License:Apache License
@Override protected RunningJob launchJob(BSPJobID jobId, BSPJob normalJob, Path submitJobFile, FileSystem pFs) throws IOException { YARNBSPJob job = (YARNBSPJob) normalJob; LOG.info("Submitting job..."); if (getConf().get("bsp.child.mem.in.mb") == null) { LOG.warn("BSP Child memory has not been set, YARN will guess your needs or use default values."); }//from w ww .ja v a 2s . com FileSystem fs = pFs; if (fs == null) { fs = FileSystem.get(getConf()); } if (getConf().get("bsp.user.name") == null) { String s = getUnixUserName(); getConf().set("bsp.user.name", s); LOG.debug("Retrieved username: " + s); } yarnClient.start(); try { YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics(); LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers()); List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING); LOG.info("Got Cluster node info from ASM"); for (NodeReport node : clusterNodeReports) { LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress" + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers" + node.getNumContainers()); } QueueInfo queueInfo = yarnClient.getQueueInfo("default"); LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity() + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount=" + queueInfo.getChildQueues().size()); List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo(); for (QueueUserACLInfo aclInfo : listAclInfo) { for (QueueACL userAcl : aclInfo.getUserAcls()) { LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl=" + userAcl.name()); } } // Get a new application id YarnClientApplication app = yarnClient.createApplication(); // Create a new ApplicationSubmissionContext //ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class); ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); id = appContext.getApplicationId(); // set the application name appContext.setApplicationName(job.getJobName()); // Create a new container launch context for the AM's container ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); // Define the local resources required Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); // Lets assume the jar we need for our ApplicationMaster is available in // HDFS at a certain known path to us and we want to make it available to // the ApplicationMaster in the launched container if (job.getJar() == null) { throw new IllegalArgumentException("Jar must be set in order to run the application!"); } Path jarPath = new Path(job.getJar()); jarPath = fs.makeQualified(jarPath); getConf().set("bsp.jar", jarPath.makeQualified(fs.getUri(), jarPath).toString()); FileStatus jarStatus = fs.getFileStatus(jarPath); LocalResource amJarRsrc = Records.newRecord(LocalResource.class); amJarRsrc.setType(LocalResourceType.FILE); amJarRsrc.setVisibility(LocalResourceVisibility.APPLICATION); amJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(jarPath)); amJarRsrc.setTimestamp(jarStatus.getModificationTime()); amJarRsrc.setSize(jarStatus.getLen()); // this creates a symlink in the working directory localResources.put(YARNBSPConstants.APP_MASTER_JAR_PATH, amJarRsrc); // add hama related jar files to localresources for container List<File> hamaJars; if (System.getProperty("hama.home.dir") != null) hamaJars = localJarfromPath(System.getProperty("hama.home.dir")); else hamaJars = localJarfromPath(getConf().get("hama.home.dir")); String hamaPath = getSystemDir() + "/hama"; for (File fileEntry : hamaJars) { addToLocalResources(fs, fileEntry.getCanonicalPath(), hamaPath, fileEntry.getName(), localResources); } // Set the local resources into the launch context amContainer.setLocalResources(localResources); // Set up the environment needed for the launch context Map<String, String> env = new HashMap<String, String>(); // Assuming our classes or jars are available as local resources in the // working directory from which the command will be run, we need to append // "." to the path. // By default, all the hadoop specific classpaths will already be available // in $CLASSPATH, so we should be careful not to overwrite it. StringBuilder classPathEnv = new StringBuilder(ApplicationConstants.Environment.CLASSPATH.$()) .append(File.pathSeparatorChar).append("./*"); for (String c : yarnConf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) { classPathEnv.append(File.pathSeparatorChar); classPathEnv.append(c.trim()); } env.put(YARNBSPConstants.HAMA_YARN_LOCATION, jarPath.toUri().toString()); env.put(YARNBSPConstants.HAMA_YARN_SIZE, Long.toString(jarStatus.getLen())); env.put(YARNBSPConstants.HAMA_YARN_TIMESTAMP, Long.toString(jarStatus.getModificationTime())); env.put(YARNBSPConstants.HAMA_LOCATION, hamaPath); env.put("CLASSPATH", classPathEnv.toString()); amContainer.setEnvironment(env); // Set the necessary command to execute on the allocated container Vector<CharSequence> vargs = new Vector<CharSequence>(5); vargs.add("${JAVA_HOME}/bin/java"); vargs.add("-cp " + classPathEnv + ""); vargs.add(ApplicationMaster.class.getCanonicalName()); vargs.add(submitJobFile.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString()); vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/hama-appmaster.stdout"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/hama-appmaster.stderr"); // Get final commmand StringBuilder command = new StringBuilder(); for (CharSequence str : vargs) { command.append(str).append(" "); } List<String> commands = new ArrayList<String>(); commands.add(command.toString()); amContainer.setCommands(commands); LOG.debug("Start command: " + command); Resource capability = Records.newRecord(Resource.class); // we have at least 3 threads, which comsumes 1mb each, for each bsptask and // a base usage of 100mb capability.setMemory(3 * job.getNumBspTask() + getConf().getInt("hama.appmaster.memory.mb", 100)); LOG.info("Set memory for the application master to " + capability.getMemory() + "mb!"); // Set the container launch content into the ApplicationSubmissionContext appContext.setResource(capability); // Setup security tokens if (UserGroupInformation.isSecurityEnabled()) { // Note: Credentials class is marked as LimitedPrivate for HDFS and MapReduce Credentials credentials = new Credentials(); String tokenRenewer = yarnConf.get(YarnConfiguration.RM_PRINCIPAL); if (tokenRenewer == null || tokenRenewer.length() == 0) { throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer"); } // For now, only getting tokens for the default file-system. final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials); if (tokens != null) { for (Token<?> token : tokens) { LOG.info("Got dt for " + fs.getUri() + "; " + token); } } DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); amContainer.setTokens(fsTokens); } appContext.setAMContainerSpec(amContainer); // Create the request to send to the ApplicationsManager ApplicationId appId = appContext.getApplicationId(); yarnClient.submitApplication(appContext); return monitorApplication(appId) ? new NetworkedJob() : null; } catch (YarnException e) { e.printStackTrace(); return null; } }
From source file:org.apache.helix.provisioning.yarn.AppLauncher.java
License:Apache License
public boolean launch() throws Exception { LOG.info("Running Client"); yarnClient.start();/*from w w w.jav a 2 s. c o m*/ // Get a new application id YarnClientApplication app = yarnClient.createApplication(); GetNewApplicationResponse appResponse = app.getNewApplicationResponse(); // TODO get min/max resource capabilities from RM and change memory ask if needed // If we do not have min/max, we may not be able to correctly request // the required resources from the RM for the app master // Memory ask has to be a multiple of min and less than max. // Dump out information about cluster capability as seen by the resource manager int maxMem = appResponse.getMaximumResourceCapability().getMemory(); LOG.info("Max mem capabililty of resources in this cluster " + maxMem); // set the application name ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); _appId = appContext.getApplicationId(); _appMasterConfig.setAppId(_appId.getId()); String appName = _applicationSpec.getAppName(); _appMasterConfig.setAppName(appName); _appMasterConfig.setApplicationSpecFactory(_applicationSpecFactory.getClass().getCanonicalName()); appContext.setApplicationName(appName); // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); LOG.info("Copy Application archive file from local filesystem and add to local environment"); // Copy the application master jar to the filesystem // Create a local resource to point to the destination jar path FileSystem fs = FileSystem.get(_conf); // get packages for each component packages Map<String, URI> packages = new HashMap<String, URI>(); packages.put(AppMasterConfig.AppEnvironment.APP_MASTER_PKG.toString(), appMasterArchive.toURI()); packages.put(AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString(), _yamlConfigFile.toURI()); for (String serviceName : _applicationSpec.getServices()) { packages.put(serviceName, _applicationSpec.getServicePackage(serviceName)); } Map<String, Path> hdfsDest = new HashMap<String, Path>(); Map<String, String> classpathMap = new HashMap<String, String>(); for (String name : packages.keySet()) { URI uri = packages.get(name); Path dst = copyToHDFS(fs, name, uri); hdfsDest.put(name, dst); String classpath = generateClasspathAfterExtraction(name, new File(uri)); classpathMap.put(name, classpath); _appMasterConfig.setClasspath(name, classpath); String serviceMainClass = _applicationSpec.getServiceMainClass(name); if (serviceMainClass != null) { _appMasterConfig.setMainClass(name, serviceMainClass); } } // Get YAML files describing all workflows to immediately start Map<String, URI> workflowFiles = new HashMap<String, URI>(); List<TaskConfig> taskConfigs = _applicationSpec.getTaskConfigs(); if (taskConfigs != null) { for (TaskConfig taskConfig : taskConfigs) { URI configUri = taskConfig.getYamlURI(); if (taskConfig.name != null && configUri != null) { workflowFiles.put(taskConfig.name, taskConfig.getYamlURI()); } } } // set local resources for the application master // local files or archives as needed // In this scenario, the jar file for the application master is part of the local resources Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); LocalResource appMasterPkg = setupLocalResource(fs, hdfsDest.get(AppMasterConfig.AppEnvironment.APP_MASTER_PKG.toString())); LocalResource appSpecFile = setupLocalResource(fs, hdfsDest.get(AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString())); localResources.put(AppMasterConfig.AppEnvironment.APP_MASTER_PKG.toString(), appMasterPkg); localResources.put(AppMasterConfig.AppEnvironment.APP_SPEC_FILE.toString(), appSpecFile); for (String name : workflowFiles.keySet()) { URI uri = workflowFiles.get(name); Path dst = copyToHDFS(fs, name, uri); LocalResource taskLocalResource = setupLocalResource(fs, dst); localResources.put(AppMasterConfig.AppEnvironment.TASK_CONFIG_FILE.toString() + "_" + name, taskLocalResource); } // Set local resource info into app master container launch context amContainer.setLocalResources(localResources); // Set the necessary security tokens as needed // amContainer.setContainerTokens(containerToken); // Add AppMaster.jar location to classpath // At some point we should not be required to add // the hadoop specific classpaths to the env. // It should be provided out of the box. // For now setting all required classpaths including // the classpath to "." for the application jar StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar) .append("./*").append(File.pathSeparatorChar); classPathEnv.append(classpathMap.get(AppMasterConfig.AppEnvironment.APP_MASTER_PKG.toString())); for (String c : _conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) { classPathEnv.append(File.pathSeparatorChar); classPathEnv.append(c.trim()); } classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties"); // add the runtime classpath needed for tests to work if (_conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { classPathEnv.append(':'); classPathEnv.append(System.getProperty("java.class.path")); } LOG.info("\n\n Setting the classpath to launch AppMaster:\n\n"); // Set the env variables to be setup in the env where the application master will be run Map<String, String> env = new HashMap<String, String>(_appMasterConfig.getEnv()); env.put("CLASSPATH", classPathEnv.toString()); amContainer.setEnvironment(env); // Set the necessary command to execute the application master Vector<CharSequence> vargs = new Vector<CharSequence>(30); // Set java executable command LOG.info("Setting up app master launch command"); vargs.add(Environment.JAVA_HOME.$() + "/bin/java"); int amMemory = 4096; // Set Xmx based on am memory size vargs.add("-Xmx" + amMemory + "m"); // Set class name vargs.add(AppMasterLauncher.class.getCanonicalName()); // Set params for Application Master // vargs.add("--num_containers " + String.valueOf(numContainers)); vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr"); // Get final commmand StringBuilder command = new StringBuilder(); for (CharSequence str : vargs) { command.append(str).append(" "); } LOG.info("Completed setting up app master command " + command.toString()); List<String> commands = new ArrayList<String>(); commands.add(command.toString()); amContainer.setCommands(commands); // Set up resource type requirements // For now, only memory is supported so we set memory requirements Resource capability = Records.newRecord(Resource.class); capability.setMemory(amMemory); appContext.setResource(capability); // Service data is a binary blob that can be passed to the application // Not needed in this scenario // amContainer.setServiceData(serviceData); // Setup security tokens if (UserGroupInformation.isSecurityEnabled()) { Credentials credentials = new Credentials(); String tokenRenewer = _conf.get(YarnConfiguration.RM_PRINCIPAL); if (tokenRenewer == null || tokenRenewer.length() == 0) { throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer"); } // For now, only getting tokens for the default file-system. final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials); if (tokens != null) { for (Token<?> token : tokens) { LOG.info("Got dt for " + fs.getUri() + "; " + token); } } DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); amContainer.setTokens(fsTokens); } appContext.setAMContainerSpec(amContainer); // Set the priority for the application master Priority pri = Records.newRecord(Priority.class); int amPriority = 0; // TODO - what is the range for priority? how to decide? pri.setPriority(amPriority); appContext.setPriority(pri); String amQueue = "default"; // Set the queue to which this application is to be submitted in the RM appContext.setQueue(amQueue); LOG.info("Submitting application to YARN Resource Manager"); ApplicationId applicationId = yarnClient.submitApplication(appContext); LOG.info("Submitted application with applicationId:" + applicationId); return true; }
From source file:org.apache.helix.provisioning.yarn.YarnProvisioner.java
License:Apache License
private ContainerLaunchContext createLaunchContext(ContainerId containerId, Container container, Participant participant) throws Exception { ContainerLaunchContext participantContainer = Records.newRecord(ContainerLaunchContext.class); // Map<String, String> envs = System.getenv(); String appName = applicationMasterConfig.getAppName(); int appId = applicationMasterConfig.getAppId(); String serviceName = _resourceConfig.getId().stringify(); String serviceClasspath = applicationMasterConfig.getClassPath(serviceName); String mainClass = applicationMasterConfig.getMainClass(serviceName); String zkAddress = applicationMasterConfig.getZKAddress(); // set the localresources needed to launch container Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); LocalResource servicePackageResource = Records.newRecord(LocalResource.class); YarnConfiguration conf = new YarnConfiguration(); FileSystem fs;//from w w w .jav a2 s. c o m fs = FileSystem.get(conf); String pathSuffix = appName + "/" + appId + "/" + serviceName + ".tar"; Path dst = new Path(fs.getHomeDirectory(), pathSuffix); FileStatus destStatus = fs.getFileStatus(dst); // Set the type of resource - file or archive // archives are untarred at destination // we don't need the jar file to be untarred for now servicePackageResource.setType(LocalResourceType.ARCHIVE); // Set visibility of the resource // Setting to most private option servicePackageResource.setVisibility(LocalResourceVisibility.APPLICATION); // Set the resource to be copied over servicePackageResource.setResource(ConverterUtils.getYarnUrlFromPath(dst)); // Set timestamp and length of file so that the framework // can do basic sanity checks for the local resource // after it has been copied over to ensure it is the same // resource the client intended to use with the application servicePackageResource.setTimestamp(destStatus.getModificationTime()); servicePackageResource.setSize(destStatus.getLen()); LOG.info("Setting local resource:" + servicePackageResource + " for service" + serviceName); localResources.put(serviceName, servicePackageResource); // Set local resource info into app master container launch context participantContainer.setLocalResources(localResources); // Set the necessary security tokens as needed // amContainer.setContainerTokens(containerToken); // Set the env variables to be setup in the env where the application master will be run LOG.info("Set the environment for the application master"); Map<String, String> env = new HashMap<String, String>(); env.put(serviceName, dst.getName()); // Add AppMaster.jar location to classpath // At some point we should not be required to add // the hadoop specific classpaths to the env. // It should be provided out of the box. // For now setting all required classpaths including // the classpath to "." for the application jar StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar) .append("./*"); classPathEnv.append(File.pathSeparatorChar); classPathEnv.append(serviceClasspath); for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) { classPathEnv.append(File.pathSeparatorChar); classPathEnv.append(c.trim()); } classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties"); LOG.info("Setting classpath for service:\n" + classPathEnv.toString()); env.put("CLASSPATH", classPathEnv.toString()); participantContainer.setEnvironment(env); if (applicationMaster.allTokens != null) { LOG.info("Setting tokens: " + applicationMaster.allTokens); participantContainer.setTokens(applicationMaster.allTokens); } // Set the necessary command to execute the application master Vector<CharSequence> vargs = new Vector<CharSequence>(30); // Set java executable command LOG.info("Setting up app master command"); vargs.add(Environment.JAVA_HOME.$() + "/bin/java"); // Set Xmx based on am memory size vargs.add("-Xmx" + 4096 + "m"); // Set class name vargs.add(ParticipantLauncher.class.getCanonicalName()); // Set params for container participant vargs.add("--zkAddress " + zkAddress); vargs.add("--cluster " + appName); vargs.add("--participantId " + participant.getId().stringify()); vargs.add("--participantClass " + mainClass); vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/ContainerParticipant.stdout"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/ContainerParticipant.stderr"); // Get final commmand StringBuilder command = new StringBuilder(); for (CharSequence str : vargs) { command.append(str).append(" "); } LOG.info("Completed setting up container launch command " + command.toString() + " with arguments \n" + vargs); List<String> commands = new ArrayList<String>(); commands.add(command.toString()); participantContainer.setCommands(commands); return participantContainer; }
From source file:org.apache.hoya.tools.HoyaUtils.java
License:Apache License
/** * Build up the classpath for execution//from w w w. j ava 2 s . co m * -behaves very differently on a mini test cluster vs a production * production one. * * @param hoyaConfDir relative path to the dir containing hoya config options to put on the * classpath -or null * @param libdir directory containing the JAR files * @param config the configuration * @param usingMiniMRCluster flag to indicate the MiniMR cluster is in use * (and hence the current classpath should be used, not anything built up) * @return a classpath */ public static String buildClasspath(String hoyaConfDir, String libdir, Configuration config, boolean usingMiniMRCluster) { // Add AppMaster.jar location to classpath // At some point we should not be required to add // the hadoop specific classpaths to the env. // It should be provided out of the box. // For now setting all required classpaths including // the classpath to "." for the application jar StringBuilder classPathEnv = new StringBuilder(); // add the runtime classpath needed for tests to work if (usingMiniMRCluster) { // for mini cluster we pass down the java CP properties // and nothing else classPathEnv.append(System.getProperty("java.class.path")); } else { char col = File.pathSeparatorChar; classPathEnv.append(ApplicationConstants.Environment.CLASSPATH.$()); String[] strs = config.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH); if (strs != null) { for (String c : strs) { classPathEnv.append(col); classPathEnv.append(c.trim()); } } classPathEnv.append(col).append("./").append(libdir).append("/*"); if (hoyaConfDir != null) { classPathEnv.append(col).append(hoyaConfDir); } } return classPathEnv.toString(); }
From source file:org.apache.ignite.yarn.IgniteYarnClient.java
License:Apache License
/** * @param envs Environment variables.//w w w. j av a 2s .c o m * @param conf Yarn configuration. */ private static void setupAppMasterEnv(Map<String, String> envs, YarnConfiguration conf) { for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) Apps.addToEnvironment(envs, Environment.CLASSPATH.name(), c.trim(), File.pathSeparator); Apps.addToEnvironment(envs, Environment.CLASSPATH.name(), Environment.PWD.$() + File.separator + "*", File.pathSeparator); }
From source file:org.apache.metron.maas.service.Client.java
License:Apache License
/** * Main run function for the client//from w ww . j ava2s.c om * @return true if application completed successfully * @throws IOException * @throws YarnException */ public boolean run() throws IOException, YarnException { LOG.info("Running Client"); yarnClient.start(); YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics(); LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers()); List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING); LOG.info("Got Cluster node info from ASM"); for (NodeReport node : clusterNodeReports) { LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress" + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers" + node.getNumContainers()); } QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue); LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity() + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount=" + queueInfo.getChildQueues().size()); List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo(); for (QueueUserACLInfo aclInfo : listAclInfo) { for (QueueACL userAcl : aclInfo.getUserAcls()) { LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl=" + userAcl.name()); } } if (domainId != null && domainId.length() > 0 && toCreateDomain) { prepareTimelineDomain(); } // Get a new application id YarnClientApplication app = yarnClient.createApplication(); GetNewApplicationResponse appResponse = app.getNewApplicationResponse(); // TODO get min/max resource capabilities from RM and change memory ask if needed // If we do not have min/max, we may not be able to correctly request // the required resources from the RM for the app master // Memory ask has to be a multiple of min and less than max. // Dump out information about cluster capability as seen by the resource manager int maxMem = appResponse.getMaximumResourceCapability().getMemory(); LOG.info("Max mem capabililty of resources in this cluster " + maxMem); // A resource ask cannot exceed the max. if (amMemory > maxMem) { LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified=" + amMemory + ", max=" + maxMem); amMemory = maxMem; } int maxVCores = appResponse.getMaximumResourceCapability().getVirtualCores(); LOG.info("Max virtual cores capabililty of resources in this cluster " + maxVCores); if (amVCores > maxVCores) { LOG.info("AM virtual cores specified above max threshold of cluster. " + "Using max value." + ", specified=" + amVCores + ", max=" + maxVCores); amVCores = maxVCores; } // set the application name ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); ApplicationId appId = appContext.getApplicationId(); appContext.setKeepContainersAcrossApplicationAttempts(keepContainers); appContext.setApplicationName(appName); if (attemptFailuresValidityInterval >= 0) { appContext.setAttemptFailuresValidityInterval(attemptFailuresValidityInterval); } // set local resources for the application master // local files or archives as needed // In this scenario, the jar file for the application master is part of the local resources Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); LOG.info("Copy App Master jar from local filesystem and add to local environment"); // Copy the application master jar to the filesystem // Create a local resource to point to the destination jar path FileSystem fs = FileSystem.get(conf); Path ajPath = addToLocalResources(fs, appMasterJar, appMasterJarPath, appId.toString(), localResources, null); // Set the log4j properties if needed if (!log4jPropFile.isEmpty()) { addToLocalResources(fs, log4jPropFile, log4jPath, appId.toString(), localResources, null); } // Set the necessary security tokens as needed //amContainer.setContainerTokens(containerToken); // Set the env variables to be setup in the env where the application master will be run LOG.info("Set the environment for the application master"); Map<String, String> env = new HashMap<String, String>(); // put location of shell script into env // using the env info, the application master will create the correct local resource for the // eventual containers that will be launched to execute the shell scripts if (domainId != null && domainId.length() > 0) { env.put(Constants.TIMELINEDOMAIN, domainId); } // Add AppMaster.jar location to classpath // At some point we should not be required to add // the hadoop specific classpaths to the env. // It should be provided out of the box. // For now setting all required classpaths including // the classpath to "." for the application jar StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$$()) .append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./*"); for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH)) { classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR); classPathEnv.append(c.trim()); } classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./log4j.properties"); // add the runtime classpath needed for tests to work if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { classPathEnv.append(':'); classPathEnv.append(System.getProperty("java.class.path")); } env.put("CLASSPATH", classPathEnv.toString()); // Set the necessary command to execute the application master Vector<CharSequence> vargs = new Vector<CharSequence>(30); // Set java executable command LOG.info("Setting up app master command"); vargs.add(Environment.JAVA_HOME.$$() + "/bin/java"); // Set Xmx based on am memory size vargs.add("-Xmx" + amMemory + "m"); // Set class name vargs.add(appMasterMainClass); // Set params for Application Master vargs.add(ApplicationMaster.AMOptions.toArgs(ApplicationMaster.AMOptions.ZK_QUORUM.of(zkQuorum), ApplicationMaster.AMOptions.ZK_ROOT.of(zkRoot), ApplicationMaster.AMOptions.APP_JAR_PATH.of(ajPath.toString()))); if (null != nodeLabelExpression) { appContext.setNodeLabelExpression(nodeLabelExpression); } for (Map.Entry<String, String> entry : shellEnv.entrySet()) { vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue()); } vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr"); // Get final commmand StringBuilder command = new StringBuilder(); for (CharSequence str : vargs) { command.append(str).append(" "); } LOG.info("Completed setting up app master command " + command.toString()); List<String> commands = new ArrayList<String>(); commands.add(command.toString()); // Set up the container launch context for the application master ContainerLaunchContext amContainer = ContainerLaunchContext.newInstance(localResources, env, commands, null, null, null); // Set up resource type requirements // For now, both memory and vcores are supported, so we set memory and // vcores requirements Resource capability = Resource.newInstance(amMemory, amVCores); appContext.setResource(capability); // Service data is a binary blob that can be passed to the application // Not needed in this scenario // Setup security tokens if (UserGroupInformation.isSecurityEnabled()) { // Note: Credentials class is marked as LimitedPrivate for HDFS and MapReduce Credentials credentials = new Credentials(); String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL); if (tokenRenewer == null || tokenRenewer.length() == 0) { throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer"); } // For now, only getting tokens for the default file-system. final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials); if (tokens != null) { for (Token<?> token : tokens) { LOG.info("Got dt for " + fs.getUri() + "; " + token); } } DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); amContainer.setTokens(fsTokens); } appContext.setAMContainerSpec(amContainer); // Set the priority for the application master Priority pri = Priority.newInstance(amPriority); appContext.setPriority(pri); // Set the queue to which this application is to be submitted in the RM appContext.setQueue(amQueue); // Submit the application to the applications manager // SubmitApplicationResponse submitResp = applicationsManager.submitApplication(appRequest); // Ignore the response as either a valid response object is returned on success // or an exception thrown to denote some form of a failure LOG.info("Submitting application to ASM"); yarnClient.submitApplication(appContext); // Monitor the application return monitorApplication(appId); }
From source file:org.apache.oozie.util.ClasspathUtils.java
License:Apache License
public static void setupClasspath(Map<String, String> env, Configuration conf) throws IOException { // Propagate the system classpath when using the mini cluster if (usingMiniYarnCluster) { MRApps.addToEnvironment(env, ApplicationConstants.Environment.CLASSPATH.name(), System.getProperty("java.class.path"), conf); }//from ww w.j a va 2s. c o m for (String entry : CLASSPATH_ENTRIES) { MRApps.addToEnvironment(env, ApplicationConstants.Environment.CLASSPATH.name(), entry, conf); } // a * in the classpath will only find a .jar, so we need to filter out // all .jars and add everything else addToClasspathIfNotJar(org.apache.hadoop.mapreduce.filecache.DistributedCache.getFileClassPaths(conf), org.apache.hadoop.mapreduce.filecache.DistributedCache.getCacheFiles(conf), conf, env, ApplicationConstants.Environment.PWD.$()); addToClasspathIfNotJar(org.apache.hadoop.mapreduce.filecache.DistributedCache.getArchiveClassPaths(conf), org.apache.hadoop.mapreduce.filecache.DistributedCache.getCacheArchives(conf), conf, env, ApplicationConstants.Environment.PWD.$()); boolean crossPlatform = conf.getBoolean(MRConfig.MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM, MRConfig.DEFAULT_MAPREDUCE_APP_SUBMISSION_CROSS_PLATFORM); for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, crossPlatform ? YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH : YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) { MRApps.addToEnvironment(env, ApplicationConstants.Environment.CLASSPATH.name(), c.trim(), conf); } }
From source file:org.apache.oozie.util.ClasspathUtils.java
License:Apache License
public static Configuration addToClasspathFromLocalShareLib(Configuration conf, Path libPath) { if (conf == null) { conf = new Configuration(false); }/* ww w . j a v a 2s .co m*/ final String pathStr = normalizedLocalFsPath(libPath); String appClassPath = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH); if (org.apache.commons.lang3.StringUtils.isEmpty(appClassPath)) { addPathToYarnClasspathInConfig(conf, pathStr, StringUtils.join(File.pathSeparator, YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH)); } else { addPathToYarnClasspathInConfig(conf, pathStr, appClassPath); } return conf; }
From source file:org.apache.oozie.util.ClasspathUtils.java
License:Apache License
private static void addPathToYarnClasspathInConfig(Configuration conf, String pathStr, String appClassPath) { conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, appClassPath + File.pathSeparator + pathStr); }
From source file:org.apache.pig.test.TezMiniCluster.java
License:Apache License
@Override public void setupMiniDfsAndMrClusters() { try {/*w ww . j a va 2 s .com*/ deleteConfFiles(); CONF_DIR.mkdirs(); // Build mini DFS cluster Configuration hdfsConf = new Configuration(false); hdfsConf.addResource("core-default.xml"); hdfsConf.addResource("hdfs-default.xml"); m_dfs = new MiniDFSCluster.Builder(hdfsConf).numDataNodes(2).format(true).racks(null).build(); m_fileSys = m_dfs.getFileSystem(); m_dfs_conf = m_dfs.getConfiguration(0); //Create user home directory m_fileSys.mkdirs(m_fileSys.getWorkingDirectory()); // Write core-site.xml Configuration core_site = new Configuration(false); core_site.set(FileSystem.FS_DEFAULT_NAME_KEY, m_dfs_conf.get(FileSystem.FS_DEFAULT_NAME_KEY)); core_site.writeXml(new FileOutputStream(CORE_CONF_FILE)); Configuration hdfs_site = new Configuration(false); for (Entry<String, String> conf : m_dfs_conf) { if (ArrayUtils.contains(m_dfs_conf.getPropertySources(conf.getKey()), "programatically")) { hdfs_site.set(conf.getKey(), m_dfs_conf.getRaw(conf.getKey())); } } hdfs_site.writeXml(new FileOutputStream(HDFS_CONF_FILE)); // Build mini YARN cluster m_mr = new MiniMRYarnCluster("PigMiniCluster", 2); m_mr.init(m_dfs_conf); m_mr.start(); m_mr_conf = m_mr.getConfig(); m_mr_conf.set(MRConfiguration.FRAMEWORK_NAME, "yarn-tez"); m_mr_conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, System.getProperty("java.class.path")); m_mr_conf.set(MRJobConfig.MAP_JAVA_OPTS, "-Xmx2048m"); m_mr_conf.set(MRJobConfig.REDUCE_JAVA_OPTS, "-Xmx2048m"); Configuration mapred_site = new Configuration(false); Configuration yarn_site = new Configuration(false); for (Entry<String, String> conf : m_mr_conf) { if (ArrayUtils.contains(m_mr_conf.getPropertySources(conf.getKey()), "programatically")) { if (conf.getKey().contains("yarn")) { yarn_site.set(conf.getKey(), m_mr_conf.getRaw(conf.getKey())); } else if (!conf.getKey().startsWith("dfs")) { mapred_site.set(conf.getKey(), m_mr_conf.getRaw(conf.getKey())); } } } mapred_site.writeXml(new FileOutputStream(MAPRED_CONF_FILE)); yarn_site.writeXml(new FileOutputStream(YARN_CONF_FILE)); // Write tez-site.xml Configuration tez_conf = new Configuration(false); // TODO PIG-3659 - Remove this once memory management is fixed tez_conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_IO_SORT_MB, "20"); tez_conf.set("tez.lib.uris", "hdfs:///tez,hdfs:///tez/lib"); // Set to a lower value so that tests don't get stuck for long because of 1 AM running at a time tez_conf.set(TezConfiguration.TEZ_SESSION_AM_DAG_SUBMIT_TIMEOUT_SECS, "20"); // Lower the max task attempts to 2 so that negative tests fail // faster. By default, tasks retry 4 times tez_conf.set(TezConfiguration.TEZ_AM_TASK_MAX_FAILED_ATTEMPTS, "2"); tez_conf.writeXml(new FileOutputStream(TEZ_CONF_FILE)); // Copy tez jars to hdfs m_fileSys.mkdirs(new Path("/tez/lib")); FileFilter fileFilter = new RegexFileFilter("tez-.+\\.jar$"); File[] tezJars = TEZ_LIB_DIR.listFiles(fileFilter); for (int i = 0; i < tezJars.length; i++) { if (tezJars[i].getName().startsWith("tez-api")) { m_fileSys.copyFromLocalFile(new Path(tezJars[i].getAbsoluteFile().toString()), new Path("/tez")); } else { m_fileSys.copyFromLocalFile(new Path(tezJars[i].getAbsoluteFile().toString()), new Path("/tez/lib")); } } m_conf = m_mr_conf; // Turn FetchOptimizer off so that we can actually test Tez m_conf.set(PigConfiguration.OPT_FETCH, System.getProperty("test.opt.fetch", "false")); System.setProperty("junit.hadoop.conf", CONF_DIR.getPath()); System.setProperty("hadoop.log.dir", "build/test/logs"); } catch (IOException e) { throw new RuntimeException(e); } }