List of usage examples for org.apache.hadoop.yarn.api.records LocalResourceType ARCHIVE
LocalResourceType ARCHIVE
To view the source code for org.apache.hadoop.yarn.api.records LocalResourceType ARCHIVE.
Click Source Link
NodeManager
. From source file:com.continuuity.weave.yarn.utils.YarnUtils.java
License:Open Source License
private static LocalResource setLocalResourceType(LocalResource localResource, LocalFile localFile) { if (localFile.isArchive()) { if (localFile.getPattern() == null) { localResource.setType(LocalResourceType.ARCHIVE); } else {/*from w ww. ja v a 2s . c o m*/ localResource.setType(LocalResourceType.PATTERN); localResource.setPattern(localFile.getPattern()); } } else { localResource.setType(LocalResourceType.FILE); } return localResource; }
From source file:com.continuuity.weave.yarn.YarnWeavePreparer.java
License:Open Source License
private void saveKafka(Map<String, LocalResource> localResources) throws IOException { LOG.debug("Copy kafka.tgz"); Location location = copyFromURL(getClass().getClassLoader().getResource(KAFKA_ARCHIVE), createTempLocation("kafka", ".tgz")); LOG.debug("Done kafka.tgz"); LocalResource localResource = YarnUtils.createLocalResource(location); localResource.setType(LocalResourceType.ARCHIVE); localResources.put("kafka.tgz", localResource); }
From source file:com.datatorrent.stram.LaunchContainerRunnable.java
License:Apache License
/** * Connects to CM, sets up container launch context and eventually dispatches the container start request to the CM. *//*from ww w. ja v a2 s.com*/ @Override public void run() { LOG.info("Setting up container launch context for containerid={}", container.getId()); ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class); setClasspath(containerEnv); try { // propagate to replace node managers user name (effective in non-secure mode) containerEnv.put("HADOOP_USER_NAME", UserGroupInformation.getLoginUser().getUserName()); } catch (Exception e) { LOG.error("Failed to retrieve principal name", e); } // Set the environment ctx.setEnvironment(containerEnv); ctx.setTokens(tokens); // Set the local resources Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); // add resources for child VM try { // child VM dependencies FileSystem fs = StramClientUtils.newFileSystemInstance(nmClient.getConfig()); try { addFilesToLocalResources(LocalResourceType.FILE, dag.getAttributes().get(LogicalPlan.LIBRARY_JARS), localResources, fs); String archives = dag.getAttributes().get(LogicalPlan.ARCHIVES); if (archives != null) { addFilesToLocalResources(LocalResourceType.ARCHIVE, archives, localResources, fs); } ctx.setLocalResources(localResources); } finally { fs.close(); } } catch (IOException e) { LOG.error("Failed to prepare local resources.", e); return; } // Set the necessary command to execute on the allocated container List<CharSequence> vargs = getChildVMCommand(container.getId().toString()); // Get final command StringBuilder command = new StringBuilder(1024); for (CharSequence str : vargs) { command.append(str).append(" "); } LOG.info("Launching on node: {} command: {}", container.getNodeId(), command); List<String> commands = new ArrayList<String>(); commands.add(command.toString()); ctx.setCommands(commands); nmClient.startContainerAsync(container, ctx); }
From source file:com.datatorrent.stram.StramClient.java
License:Apache License
/** * Launch application for the dag represented by this client. * * @throws YarnException// ww w .ja v a2 s . c o m * @throws IOException */ public void startApplication() throws YarnException, IOException { Class<?>[] defaultClasses; if (applicationType.equals(YARN_APPLICATION_TYPE)) { //TODO restrict the security check to only check if security is enabled for webservices. if (UserGroupInformation.isSecurityEnabled()) { defaultClasses = DATATORRENT_SECURITY_CLASSES; } else { defaultClasses = DATATORRENT_CLASSES; } } else { throw new IllegalStateException(applicationType + " is not a valid application type."); } LinkedHashSet<String> localJarFiles = findJars(dag, defaultClasses); if (resources != null) { localJarFiles.addAll(resources); } YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics(); LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers()); //GetClusterNodesRequest clusterNodesReq = Records.newRecord(GetClusterNodesRequest.class); //GetClusterNodesResponse clusterNodesResp = rmClient.clientRM.getClusterNodes(clusterNodesReq); //LOG.info("Got Cluster node info from ASM"); //for (NodeReport node : clusterNodesResp.getNodeReports()) { // LOG.info("Got node report from ASM for" // + ", nodeId=" + node.getNodeId() // + ", nodeAddress" + node.getHttpAddress() // + ", nodeRackName" + node.getRackName() // + ", nodeNumContainers" + node.getNumContainers() // + ", nodeHealthStatus" + node.getHealthReport()); //} List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo(); for (QueueUserACLInfo aclInfo : listAclInfo) { for (QueueACL userAcl : aclInfo.getUserAcls()) { LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl=" + userAcl.name()); } } // Get a new application id YarnClientApplication newApp = yarnClient.createApplication(); appId = newApp.getNewApplicationResponse().getApplicationId(); // Dump out information about cluster capability as seen by the resource manager int maxMem = newApp.getNewApplicationResponse().getMaximumResourceCapability().getMemory(); LOG.info("Max mem capabililty of resources in this cluster " + maxMem); int amMemory = dag.getMasterMemoryMB(); if (amMemory > maxMem) { LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified=" + amMemory + ", max=" + maxMem); amMemory = maxMem; } if (dag.getAttributes().get(LogicalPlan.APPLICATION_ID) == null) { dag.setAttribute(LogicalPlan.APPLICATION_ID, appId.toString()); } // Create launch context for app master LOG.info("Setting up application submission context for ASM"); ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class); // set the application id appContext.setApplicationId(appId); // set the application name appContext.setApplicationName(dag.getValue(LogicalPlan.APPLICATION_NAME)); appContext.setApplicationType(this.applicationType); if (YARN_APPLICATION_TYPE.equals(this.applicationType)) { //appContext.setMaxAppAttempts(1); // no retries until Stram is HA } // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); // Setup security tokens // If security is enabled get ResourceManager and NameNode delegation tokens. // Set these tokens on the container so that they are sent as part of application submission. // This also sets them up for renewal by ResourceManager. The NameNode delegation rmToken // is also used by ResourceManager to fetch the jars from HDFS and set them up for the // application master launch. if (UserGroupInformation.isSecurityEnabled()) { Credentials credentials = new Credentials(); String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL); if (tokenRenewer == null || tokenRenewer.length() == 0) { throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer"); } // For now, only getting tokens for the default file-system. FileSystem fs = StramClientUtils.newFileSystemInstance(conf); try { final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials); if (tokens != null) { for (Token<?> token : tokens) { LOG.info("Got dt for " + fs.getUri() + "; " + token); } } } finally { fs.close(); } addRMDelegationToken(tokenRenewer, credentials); DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); amContainer.setTokens(fsTokens); } // set local resources for the application master // local files or archives as needed // In this scenario, the jar file for the application master is part of the local resources Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); // copy required jar files to dfs, to be localized for containers FileSystem fs = StramClientUtils.newFileSystemInstance(conf); try { Path appsBasePath = new Path(StramClientUtils.getDTDFSRootDir(fs, conf), StramClientUtils.SUBDIR_APPS); Path appPath = new Path(appsBasePath, appId.toString()); String libJarsCsv = copyFromLocal(fs, appPath, localJarFiles.toArray(new String[] {})); LOG.info("libjars: {}", libJarsCsv); dag.getAttributes().put(LogicalPlan.LIBRARY_JARS, libJarsCsv); LaunchContainerRunnable.addFilesToLocalResources(LocalResourceType.FILE, libJarsCsv, localResources, fs); if (archives != null) { String[] localFiles = archives.split(","); String archivesCsv = copyFromLocal(fs, appPath, localFiles); LOG.info("archives: {}", archivesCsv); dag.getAttributes().put(LogicalPlan.ARCHIVES, archivesCsv); LaunchContainerRunnable.addFilesToLocalResources(LocalResourceType.ARCHIVE, archivesCsv, localResources, fs); } if (files != null) { String[] localFiles = files.split(","); String filesCsv = copyFromLocal(fs, appPath, localFiles); LOG.info("files: {}", filesCsv); dag.getAttributes().put(LogicalPlan.FILES, filesCsv); LaunchContainerRunnable.addFilesToLocalResources(LocalResourceType.FILE, filesCsv, localResources, fs); } dag.getAttributes().put(LogicalPlan.APPLICATION_PATH, appPath.toString()); if (dag.getAttributes() .get(OperatorContext.STORAGE_AGENT) == null) { /* which would be the most likely case */ Path checkpointPath = new Path(appPath, LogicalPlan.SUBDIR_CHECKPOINTS); // use conf client side to pickup any proxy settings from dt-site.xml dag.setAttribute(OperatorContext.STORAGE_AGENT, new FSStorageAgent(checkpointPath.toString(), conf)); } if (dag.getAttributes().get(LogicalPlan.CONTAINER_OPTS_CONFIGURATOR) == null) { dag.setAttribute(LogicalPlan.CONTAINER_OPTS_CONFIGURATOR, new BasicContainerOptConfigurator()); } // Set the log4j properties if needed if (!log4jPropFile.isEmpty()) { Path log4jSrc = new Path(log4jPropFile); Path log4jDst = new Path(appPath, "log4j.props"); fs.copyFromLocalFile(false, true, log4jSrc, log4jDst); FileStatus log4jFileStatus = fs.getFileStatus(log4jDst); LocalResource log4jRsrc = Records.newRecord(LocalResource.class); log4jRsrc.setType(LocalResourceType.FILE); log4jRsrc.setVisibility(LocalResourceVisibility.APPLICATION); log4jRsrc.setResource(ConverterUtils.getYarnUrlFromURI(log4jDst.toUri())); log4jRsrc.setTimestamp(log4jFileStatus.getModificationTime()); log4jRsrc.setSize(log4jFileStatus.getLen()); localResources.put("log4j.properties", log4jRsrc); } if (originalAppId != null) { Path origAppPath = new Path(appsBasePath, this.originalAppId); LOG.info("Restart from {}", origAppPath); copyInitialState(origAppPath); } // push logical plan to DFS location Path cfgDst = new Path(appPath, LogicalPlan.SER_FILE_NAME); FSDataOutputStream outStream = fs.create(cfgDst, true); LogicalPlan.write(this.dag, outStream); outStream.close(); Path launchConfigDst = new Path(appPath, LogicalPlan.LAUNCH_CONFIG_FILE_NAME); outStream = fs.create(launchConfigDst, true); conf.writeXml(outStream); outStream.close(); FileStatus topologyFileStatus = fs.getFileStatus(cfgDst); LocalResource topologyRsrc = Records.newRecord(LocalResource.class); topologyRsrc.setType(LocalResourceType.FILE); topologyRsrc.setVisibility(LocalResourceVisibility.APPLICATION); topologyRsrc.setResource(ConverterUtils.getYarnUrlFromURI(cfgDst.toUri())); topologyRsrc.setTimestamp(topologyFileStatus.getModificationTime()); topologyRsrc.setSize(topologyFileStatus.getLen()); localResources.put(LogicalPlan.SER_FILE_NAME, topologyRsrc); // Set local resource info into app master container launch context amContainer.setLocalResources(localResources); // Set the necessary security tokens as needed //amContainer.setContainerTokens(containerToken); // Set the env variables to be setup in the env where the application master will be run LOG.info("Set the environment for the application master"); Map<String, String> env = new HashMap<String, String>(); // Add application jar(s) location to classpath // At some point we should not be required to add // the hadoop specific classpaths to the env. // It should be provided out of the box. // For now setting all required classpaths including // the classpath to "." for the application jar(s) // including ${CLASSPATH} will duplicate the class path in app master, removing it for now //StringBuilder classPathEnv = new StringBuilder("${CLASSPATH}:./*"); StringBuilder classPathEnv = new StringBuilder("./*"); String classpath = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH); for (String c : StringUtils.isBlank(classpath) ? YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH : classpath.split(",")) { if (c.equals("$HADOOP_CLIENT_CONF_DIR")) { // SPOI-2501 continue; } classPathEnv.append(':'); classPathEnv.append(c.trim()); } env.put("CLASSPATH", classPathEnv.toString()); // propagate to replace node managers user name (effective in non-secure mode) env.put("HADOOP_USER_NAME", UserGroupInformation.getLoginUser().getUserName()); amContainer.setEnvironment(env); // Set the necessary command to execute the application master ArrayList<CharSequence> vargs = new ArrayList<CharSequence>(30); // Set java executable command LOG.info("Setting up app master command"); vargs.add(javaCmd); if (dag.isDebug()) { vargs.add("-agentlib:jdwp=transport=dt_socket,server=y,suspend=n"); } // Set Xmx based on am memory size // default heap size 75% of total memory if (dag.getMasterJVMOptions() != null) { vargs.add(dag.getMasterJVMOptions()); } vargs.add("-Xmx" + (amMemory * 3 / 4) + "m"); vargs.add("-XX:+HeapDumpOnOutOfMemoryError"); vargs.add("-XX:HeapDumpPath=/tmp/dt-heap-" + appId.getId() + ".bin"); vargs.add("-Dhadoop.root.logger=" + (dag.isDebug() ? "DEBUG" : "INFO") + ",RFA"); vargs.add("-Dhadoop.log.dir=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR); vargs.add(String.format("-D%s=%s", StreamingContainer.PROP_APP_PATH, dag.assertAppPath())); if (dag.isDebug()) { vargs.add("-Dlog4j.debug=true"); } String loggersLevel = conf.get(DTLoggerFactory.DT_LOGGERS_LEVEL); if (loggersLevel != null) { vargs.add(String.format("-D%s=%s", DTLoggerFactory.DT_LOGGERS_LEVEL, loggersLevel)); } vargs.add(StreamingAppMaster.class.getName()); vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr"); // Get final command StringBuilder command = new StringBuilder(9 * vargs.size()); for (CharSequence str : vargs) { command.append(str).append(" "); } LOG.info("Completed setting up app master command " + command.toString()); List<String> commands = new ArrayList<String>(); commands.add(command.toString()); amContainer.setCommands(commands); // Set up resource type requirements // For now, only memory is supported so we set memory requirements Resource capability = Records.newRecord(Resource.class); capability.setMemory(amMemory); appContext.setResource(capability); // Service data is a binary blob that can be passed to the application // Not needed in this scenario // amContainer.setServiceData(serviceData); appContext.setAMContainerSpec(amContainer); // Set the priority for the application master Priority pri = Records.newRecord(Priority.class); pri.setPriority(amPriority); appContext.setPriority(pri); // Set the queue to which this application is to be submitted in the RM appContext.setQueue(queueName); // Submit the application to the applications manager // SubmitApplicationResponse submitResp = rmClient.submitApplication(appRequest); // Ignore the response as either a valid response object is returned on success // or an exception thrown to denote some form of a failure String specStr = Objects.toStringHelper("Submitting application: ") .add("name", appContext.getApplicationName()).add("queue", appContext.getQueue()) .add("user", UserGroupInformation.getLoginUser()).add("resource", appContext.getResource()) .toString(); LOG.info(specStr); if (dag.isDebug()) { //LOG.info("Full submission context: " + appContext); } yarnClient.submitApplication(appContext); } finally { fs.close(); } }
From source file:com.hazelcast.yarn.ApplicationMaster.java
License:Open Source License
@Override public synchronized void onContainersAllocated(List<Container> containerList) { for (Container container : containerList) { if (checkContainer(container)) { try { ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class); Map<String, String> env = new HashMap<String, String>(System.getenv()); ctx.setEnvironment(env); Map<String, LocalResource> resources = new HashMap<String, LocalResource>(); resources.put("hazelcast", YarnUtil.createFileResource(this.hazelcastPath, this.hdfs, LocalResourceType.ARCHIVE)); if (this.properties.customLibs() != null) { resources.put("libs", YarnUtil.createFileResource(new Path(this.properties.customLibs()), this.hdfs, LocalResourceType.FILE)); }/*from ww w.j ava2 s . c o m*/ if (this.properties.jvmOpts() != null && !this.properties.jvmOpts().isEmpty()) { env.put("JVM_OPTS", this.properties.jvmOpts()); } ctx.setEnvironment(env); String command = "cd ./hazelcast/*/bin/ && ./server.sh hazelcast.xml" + " -J-Xmx" + container.getResource().getMemory() + "m" + " -J-Xms" + container.getResource().getMemory() + "m" + YarnUtil.LOGS; LOG.log(Level.INFO, command); ctx.setLocalResources(resources); ctx.setCommands(Collections.singletonList(command)); LOG.log(Level.INFO, "Launching container: {0}.", container.getId()); this.nmClient.startContainer(container, ctx); this.containers.put(container.getId(), new HazelcastContainer(container.getId(), container.getNodeId(), container.getResource().getVirtualCores(), container.getResource().getMemory())); } catch (Exception ex) { LOG.log(Level.WARNING, "Error launching container " + container.getId(), ex); } } else { LOG.log(Level.INFO, "Checking failed for container=" + container.toString()); } } }
From source file:com.scaleoutsoftware.soss.hserver.hadoop.DistributedCacheManager.java
License:Apache License
/** * Set up the distributed cache by localizing the resources, and updating * the configuration with references to the localized resources. * @param conf job configuration// www . jav a 2 s .c om * @throws IOException */ public void setup(Configuration conf) throws IOException { //If we are not 0th worker, wait for 0th worker to set up the cache if (InvocationWorker.getIgWorkerIndex() > 0 && InvocationWorker.getNumberOfWorkers() > 1) { try { InvocationWorker.getSynchronizationBarrier().waitForComplete(ACTION_NAME, SYNCHRONIZATION_WAIT_MS, WAIT_GRANULARITY_MS); } catch (Exception e) { throw new RuntimeException(e); } return; } File workDir = new File(System.getProperty("user.dir")); // Generate YARN local resources objects corresponding to the distributed // cache configuration Map<String, LocalResource> localResources = new LinkedHashMap<String, LocalResource>(); MRApps.setupDistributedCache(conf, localResources); //CODE CHANGE FROM ORIGINAL FILE: //We need to clear the resources from jar files, since they are distributed through the IG. // Iterator<Map.Entry<String, LocalResource>> iterator = localResources.entrySet().iterator(); while (iterator.hasNext()) { Entry<String, LocalResource> entry = iterator.next(); if (entry.getKey().endsWith(".jar")) { iterator.remove(); } } // Generating unique numbers for FSDownload. AtomicLong uniqueNumberGenerator = new AtomicLong(System.currentTimeMillis()); // Find which resources are to be put on the local classpath Map<String, Path> classpaths = new HashMap<String, Path>(); Path[] archiveClassPaths = DistributedCache.getArchiveClassPaths(conf); if (archiveClassPaths != null) { for (Path p : archiveClassPaths) { FileSystem remoteFS = p.getFileSystem(conf); p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(), remoteFS.getWorkingDirectory())); classpaths.put(p.toUri().getPath().toString(), p); } } Path[] fileClassPaths = DistributedCache.getFileClassPaths(conf); if (fileClassPaths != null) { for (Path p : fileClassPaths) { FileSystem remoteFS = p.getFileSystem(conf); p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(), remoteFS.getWorkingDirectory())); classpaths.put(p.toUri().getPath().toString(), p); } } // Localize the resources LocalDirAllocator localDirAllocator = new LocalDirAllocator(MRConfig.LOCAL_DIR); FileContext localFSFileContext = FileContext.getLocalFSFileContext(); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); ExecutorService exec = null; try { ThreadFactory tf = new ThreadFactoryBuilder() .setNameFormat("LocalDistributedCacheManager Downloader #%d").build(); exec = Executors.newCachedThreadPool(tf); Path destPath = localDirAllocator.getLocalPathForWrite(".", conf); Map<LocalResource, Future<Path>> resourcesToPaths = Maps.newHashMap(); for (LocalResource resource : localResources.values()) { Callable<Path> download = new FSDownload(localFSFileContext, ugi, conf, new Path(destPath, Long.toString(uniqueNumberGenerator.incrementAndGet())), resource); Future<Path> future = exec.submit(download); resourcesToPaths.put(resource, future); } for (Entry<String, LocalResource> entry : localResources.entrySet()) { LocalResource resource = entry.getValue(); Path path; try { path = resourcesToPaths.get(resource).get(); } catch (InterruptedException e) { throw new IOException(e); } catch (ExecutionException e) { throw new IOException(e); } String pathString = path.toUri().toString(); String link = entry.getKey(); String target = new File(path.toUri()).getPath(); symlink(workDir, target, link); if (resource.getType() == LocalResourceType.ARCHIVE) { localArchives.add(pathString); } else if (resource.getType() == LocalResourceType.FILE) { localFiles.add(pathString); } else if (resource.getType() == LocalResourceType.PATTERN) { //PATTERN is not currently used in local mode throw new IllegalArgumentException( "Resource type PATTERN is not " + "implemented yet. " + resource.getResource()); } Path resourcePath; try { resourcePath = ConverterUtils.getPathFromYarnURL(resource.getResource()); } catch (URISyntaxException e) { throw new IOException(e); } LOG.info(String.format("Localized %s as %s", resourcePath, path)); String cp = resourcePath.toUri().getPath(); if (classpaths.keySet().contains(cp)) { localClasspaths.add(path.toUri().getPath().toString()); } } } finally { if (exec != null) { exec.shutdown(); } } // Update the configuration object with localized data. if (!localArchives.isEmpty()) { conf.set(MRJobConfig.CACHE_LOCALARCHIVES, StringUtils.arrayToString(localArchives.toArray(new String[localArchives.size()]))); } if (!localFiles.isEmpty()) { conf.set(MRJobConfig.CACHE_LOCALFILES, StringUtils.arrayToString(localFiles.toArray(new String[localArchives.size()]))); } setupCalled = true; //If we are 0th worker, signal action complete if (InvocationWorker.getIgWorkerIndex() == 0 && InvocationWorker.getNumberOfWorkers() > 1) { try { InvocationWorker.getSynchronizationBarrier().signalComplete(ACTION_NAME); } catch (Exception e) { throw new RuntimeException(e); } } }
From source file:com.yahoo.storm.yarn.StormAMRMClient.java
License:Open Source License
public void launchSupervisorOnContainer(Container container) throws IOException { // create a container launch context ContainerLaunchContext launchContext = Records.newRecord(ContainerLaunchContext.class); UserGroupInformation user = UserGroupInformation.getCurrentUser(); try {//from ww w . j a va2 s.co m Credentials credentials = user.getCredentials(); DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); launchContext.setTokens(securityTokens); } catch (IOException e) { LOG.warn("Getting current user info failed when trying to launch the container" + e.getMessage()); } // CLC: env Map<String, String> env = new HashMap<String, String>(); env.put("STORM_LOG_DIR", ApplicationConstants.LOG_DIR_EXPANSION_VAR); launchContext.setEnvironment(env); // CLC: local resources includes storm, conf Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); String storm_zip_path = (String) storm_conf.get("storm.zip.path"); Path zip = new Path(storm_zip_path); FileSystem fs = FileSystem.get(hadoopConf); String vis = (String) storm_conf.get("storm.zip.visibility"); if (vis.equals("PUBLIC")) localResources.put("storm", Util.newYarnAppResource(fs, zip, LocalResourceType.ARCHIVE, LocalResourceVisibility.PUBLIC)); else if (vis.equals("PRIVATE")) localResources.put("storm", Util.newYarnAppResource(fs, zip, LocalResourceType.ARCHIVE, LocalResourceVisibility.PRIVATE)); else if (vis.equals("APPLICATION")) localResources.put("storm", Util.newYarnAppResource(fs, zip, LocalResourceType.ARCHIVE, LocalResourceVisibility.APPLICATION)); String appHome = Util.getApplicationHomeForId(appAttemptId.toString()); Path confDst = Util.createConfigurationFileInFs(fs, appHome, this.storm_conf, this.hadoopConf); localResources.put("conf", Util.newYarnAppResource(fs, confDst)); launchContext.setLocalResources(localResources); // CLC: command List<String> supervisorArgs = Util.buildSupervisorCommands(this.storm_conf); launchContext.setCommands(supervisorArgs); try { LOG.info("Use NMClient to launch supervisors in container. "); nmClient.startContainer(container, launchContext); String userShortName = user.getShortUserName(); if (userShortName != null) LOG.info("Supervisor log: http://" + container.getNodeHttpAddress() + "/node/containerlogs/" + container.getId().toString() + "/" + userShortName + "/supervisor.log"); } catch (Exception e) { LOG.error("Caught an exception while trying to start a container", e); System.exit(-1); } }
From source file:com.yahoo.storm.yarn.StormOnYarn.java
License:Open Source License
private void launchApp(String appName, String queue, int amMB, String storm_zip_location) throws Exception { LOG.debug("StormOnYarn:launchApp() ..."); YarnClientApplication client_app = _yarn.createApplication(); GetNewApplicationResponse app = client_app.getNewApplicationResponse(); _appId = app.getApplicationId();//from w w w . j a va 2s.c om LOG.debug("_appId:" + _appId); if (amMB > app.getMaximumResourceCapability().getMemory()) { //TODO need some sanity checks amMB = app.getMaximumResourceCapability().getMemory(); } ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class); appContext.setApplicationId(app.getApplicationId()); appContext.setApplicationName(appName); appContext.setQueue(queue); // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); // set local resources for the application master // local files or archives as needed // In this scenario, the jar file for the application master is part of the // local resources LOG.info("Copy App Master jar from local filesystem and add to local environment"); // Copy the application master jar to the filesystem // Create a local resource to point to the destination jar path String appMasterJar = findContainingJar(MasterServer.class); FileSystem fs = FileSystem.get(_hadoopConf); Path src = new Path(appMasterJar); String appHome = Util.getApplicationHomeForId(_appId.toString()); Path dst = new Path(fs.getHomeDirectory(), appHome + Path.SEPARATOR + "AppMaster.jar"); fs.copyFromLocalFile(false, true, src, dst); localResources.put("AppMaster.jar", Util.newYarnAppResource(fs, dst)); String stormVersion = Util.getStormVersion(); Path zip; if (storm_zip_location != null) { zip = new Path(storm_zip_location); } else { zip = new Path("/lib/storm/" + stormVersion + "/storm.zip"); } _stormConf.put("storm.zip.path", zip.makeQualified(fs).toUri().getPath()); LocalResourceVisibility visibility = LocalResourceVisibility.PUBLIC; _stormConf.put("storm.zip.visibility", "PUBLIC"); if (!Util.isPublic(fs, zip)) { visibility = LocalResourceVisibility.APPLICATION; _stormConf.put("storm.zip.visibility", "APPLICATION"); } localResources.put("storm", Util.newYarnAppResource(fs, zip, LocalResourceType.ARCHIVE, visibility)); Path confDst = Util.createConfigurationFileInFs(fs, appHome, _stormConf, _hadoopConf); // establish a symbolic link to conf directory localResources.put("conf", Util.newYarnAppResource(fs, confDst)); // Setup security tokens Path[] paths = new Path[3]; paths[0] = dst; paths[1] = zip; paths[2] = confDst; Credentials credentials = new Credentials(); TokenCache.obtainTokensForNamenodes(credentials, paths, _hadoopConf); DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); //security tokens for HDFS distributed cache amContainer.setTokens(securityTokens); // Set local resource info into app master container launch context amContainer.setLocalResources(localResources); // Set the env variables to be setup in the env where the application master // will be run LOG.info("Set the environment for the application master"); Map<String, String> env = new HashMap<String, String>(); // add the runtime classpath needed for tests to work Apps.addToEnvironment(env, Environment.CLASSPATH.name(), "./conf"); Apps.addToEnvironment(env, Environment.CLASSPATH.name(), "./AppMaster.jar"); //Make sure that AppMaster has access to all YARN JARs List<String> yarn_classpath_cmd = java.util.Arrays.asList("yarn", "classpath"); ProcessBuilder pb = new ProcessBuilder(yarn_classpath_cmd).redirectError(Redirect.INHERIT); LOG.info("YARN CLASSPATH COMMAND = [" + yarn_classpath_cmd + "]"); pb.environment().putAll(System.getenv()); Process proc = pb.start(); BufferedReader reader = new BufferedReader(new InputStreamReader(proc.getInputStream(), "UTF-8")); String line = ""; String yarn_class_path = (String) _stormConf.get("storm.yarn.yarn_classpath"); if (yarn_class_path == null) { StringBuilder yarn_class_path_builder = new StringBuilder(); while ((line = reader.readLine()) != null) { yarn_class_path_builder.append(line); } yarn_class_path = yarn_class_path_builder.toString(); } LOG.info("YARN CLASSPATH = [" + yarn_class_path + "]"); proc.waitFor(); reader.close(); Apps.addToEnvironment(env, Environment.CLASSPATH.name(), yarn_class_path); String stormHomeInZip = Util.getStormHomeInZip(fs, zip, stormVersion); Apps.addToEnvironment(env, Environment.CLASSPATH.name(), "./storm/" + stormHomeInZip + "/*"); Apps.addToEnvironment(env, Environment.CLASSPATH.name(), "./storm/" + stormHomeInZip + "/lib/*"); String java_home = (String) _stormConf.get("storm.yarn.java_home"); if (java_home == null) java_home = System.getenv("JAVA_HOME"); if (java_home != null && !java_home.isEmpty()) env.put("JAVA_HOME", java_home); LOG.info("Using JAVA_HOME = [" + env.get("JAVA_HOME") + "]"); env.put("appJar", appMasterJar); env.put("appName", appName); env.put("appId", new Integer(_appId.getId()).toString()); env.put("STORM_LOG_DIR", ApplicationConstants.LOG_DIR_EXPANSION_VAR); amContainer.setEnvironment(env); // Set the necessary command to execute the application master Vector<String> vargs = new Vector<String>(); if (java_home != null && !java_home.isEmpty()) vargs.add(env.get("JAVA_HOME") + "/bin/java"); else vargs.add("java"); vargs.add("-Dstorm.home=./storm/" + stormHomeInZip + "/"); vargs.add("-Dlogfile.name=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/master.log"); //vargs.add("-verbose:class"); vargs.add("com.yahoo.storm.yarn.MasterServer"); vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"); // Set java executable command LOG.info("Setting up app master command:" + vargs); amContainer.setCommands(vargs); // Set up resource type requirements // For now, only memory is supported so we set memory requirements Resource capability = Records.newRecord(Resource.class); capability.setMemory(amMB); appContext.setResource(capability); appContext.setAMContainerSpec(amContainer); _yarn.submitApplication(appContext); }
From source file:edu.uci.ics.asterix.aoya.AsterixApplicationMaster.java
License:Apache License
/** * Here I am just pointing the Containers to the exisiting HDFS resources given by the Client * filesystem of the nodes.// w w w .jav a 2s . co m * * @throws IOException */ private void localizeDFSResources() throws IOException { //if performing an 'offline' task, skip a lot of resource distribution if (obliterate || backup || restore) { if (appMasterJar == null || ("").equals(appMasterJar)) { //this can happen in a jUnit testing environment. we don't need to set it there. if (!conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { throw new IllegalStateException("AM jar not provided in environment."); } else { return; } } FileSystem fs = FileSystem.get(conf); FileStatus appMasterJarStatus = fs.getFileStatus(appMasterJar); LocalResource obliteratorJar = Records.newRecord(LocalResource.class); obliteratorJar.setType(LocalResourceType.FILE); obliteratorJar.setVisibility(LocalResourceVisibility.PRIVATE); obliteratorJar.setResource(ConverterUtils.getYarnUrlFromPath(appMasterJar)); obliteratorJar.setTimestamp(appMasterJarStatus.getModificationTime()); obliteratorJar.setSize(appMasterJarStatus.getLen()); localResources.put("asterix-yarn.jar", obliteratorJar); LOG.info(localResources.values()); return; } //otherwise, distribute evertything to start up asterix LocalResource asterixZip = Records.newRecord(LocalResource.class); //this un-tar's the asterix distribution asterixZip.setType(LocalResourceType.ARCHIVE); asterixZip.setVisibility(LocalResourceVisibility.PRIVATE); try { asterixZip.setResource(ConverterUtils.getYarnUrlFromURI(new URI(asterixZipPath))); } catch (URISyntaxException e) { LOG.error("Error locating Asterix zip" + " in env, path=" + asterixZipPath); throw new IOException(e); } asterixZip.setTimestamp(asterixZipTimestamp); asterixZip.setSize(asterixZipLen); localResources.put(ASTERIX_ZIP_NAME, asterixZip); //now let's do the same for the cluster description XML LocalResource asterixConf = Records.newRecord(LocalResource.class); asterixConf.setType(LocalResourceType.FILE); asterixConf.setVisibility(LocalResourceVisibility.PRIVATE); try { asterixConf.setResource(ConverterUtils.getYarnUrlFromURI(new URI(asterixConfPath))); } catch (URISyntaxException e) { LOG.error("Error locating Asterix config" + " in env, path=" + asterixConfPath); throw new IOException(e); } //TODO: I could avoid localizing this everywhere by only calling this block on the metadata node. asterixConf.setTimestamp(asterixConfTimestamp); asterixConf.setSize(asterixConfLen); localResources.put("cluster-config.xml", asterixConf); //now add the libraries if there are any try { FileSystem fs = FileSystem.get(conf); Path p = new Path(dfsBasePath, instanceConfPath + File.separator + "library" + Path.SEPARATOR); if (fs.exists(p)) { FileStatus[] dataverses = fs.listStatus(p); for (FileStatus d : dataverses) { if (!d.isDirectory()) throw new IOException("Library configuration directory structure is incorrect"); FileStatus[] libraries = fs.listStatus(d.getPath()); for (FileStatus l : libraries) { if (l.isDirectory()) throw new IOException("Library configuration directory structure is incorrect"); LocalResource lr = Records.newRecord(LocalResource.class); lr.setResource(ConverterUtils.getYarnUrlFromURI(l.getPath().toUri())); lr.setSize(l.getLen()); lr.setTimestamp(l.getModificationTime()); lr.setType(LocalResourceType.ARCHIVE); lr.setVisibility(LocalResourceVisibility.PRIVATE); localResources.put("library" + Path.SEPARATOR + d.getPath().getName() + Path.SEPARATOR + l.getPath().getName().split("\\.")[0], lr); LOG.info("Found library: " + l.getPath().toString()); LOG.info(l.getPath().getName()); } } } } catch (FileNotFoundException e) { LOG.info("No external libraries present"); //do nothing, it just means there aren't libraries. that is possible and ok // it should be handled by the fs.exists(p) check though. } LOG.info(localResources.values()); }
From source file:edu.uci.ics.asterix.aoya.AsterixYARNClient.java
License:Apache License
/** * Uploads binary resources to HDFS for use by the AM * @return// w ww. j av a 2s. co m * @throws IOException * @throws YarnException */ public List<DFSResourceCoordinate> distributeBinaries() throws IOException, YarnException { List<DFSResourceCoordinate> resources = new ArrayList<DFSResourceCoordinate>(2); // Copy the application master jar to the filesystem // Create a local resource to point to the destination jar path FileSystem fs = FileSystem.get(conf); Path src, dst; FileStatus destStatus; String pathSuffix; // adding info so we can add the jar to the App master container path // Add the asterix tarfile to HDFS for easy distribution // Keep it all archived for now so add it as a file... pathSuffix = CONF_DIR_REL + instanceFolder + "asterix-server.zip"; dst = new Path(fs.getHomeDirectory(), pathSuffix); if (refresh) { if (fs.exists(dst)) { fs.delete(dst, false); } } if (!fs.exists(dst)) { src = new Path(asterixZip); LOG.info("Copying Asterix distributable to DFS"); fs.copyFromLocalFile(false, true, src, dst); } destStatus = fs.getFileStatus(dst); LocalResource asterixTarLoc = Records.newRecord(LocalResource.class); asterixTarLoc.setType(LocalResourceType.ARCHIVE); asterixTarLoc.setVisibility(LocalResourceVisibility.PRIVATE); asterixTarLoc.setResource(ConverterUtils.getYarnUrlFromPath(dst)); asterixTarLoc.setTimestamp(destStatus.getModificationTime()); // adding info so we can add the tarball to the App master container path DFSResourceCoordinate tar = new DFSResourceCoordinate(); tar.envs.put(dst.toUri().toString(), AConstants.TARLOCATION); tar.envs.put(Long.toString(asterixTarLoc.getSize()), AConstants.TARLEN); tar.envs.put(Long.toString(asterixTarLoc.getTimestamp()), AConstants.TARTIMESTAMP); tar.res = asterixTarLoc; tar.name = "asterix-server.zip"; resources.add(tar); // Set the log4j properties if needed if (!log4jPropFile.isEmpty()) { Path log4jSrc = new Path(log4jPropFile); Path log4jDst = new Path(fs.getHomeDirectory(), "log4j.props"); fs.copyFromLocalFile(false, true, log4jSrc, log4jDst); FileStatus log4jFileStatus = fs.getFileStatus(log4jDst); LocalResource log4jRsrc = Records.newRecord(LocalResource.class); log4jRsrc.setType(LocalResourceType.FILE); log4jRsrc.setVisibility(LocalResourceVisibility.PRIVATE); log4jRsrc.setResource(ConverterUtils.getYarnUrlFromURI(log4jDst.toUri())); log4jRsrc.setTimestamp(log4jFileStatus.getModificationTime()); log4jRsrc.setSize(log4jFileStatus.getLen()); DFSResourceCoordinate l4j = new DFSResourceCoordinate(); tar.res = log4jRsrc; tar.name = "log4j.properties"; resources.add(l4j); } resources.addAll(installAmLibs()); return resources; }