List of usage examples for org.apache.hadoop.fs FileSystem getHomeDirectory
public Path getHomeDirectory()
From source file:com.splunk.shuttl.testutil.TUtilsPath.java
License:Apache License
/** * SafePathCreator is used to get a directory in a file system which is class * unique, readable and writable./* w w w . j a v a2 s . c om*/ * * It returns a path like this: /User/XXX/org.shuttl.HadoopTest/ * */ public static Path getSafeDirectory(FileSystem fileSystem, Class<?> clazz) { return new Path(fileSystem.getHomeDirectory(), clazz.getName()); }
From source file:com.splunk.shuttl.testutil.TUtilsPathTest.java
License:Apache License
@Test(groups = { "fast-unit" }) public void safePath_should_beSeparated_by_HomeDirectoryAndNameOfTestCase_toAchieve_nicerStructure() { FileSystem fileSystem = TUtilsFileSystem.getLocalFileSystem(); Path safePath = TUtilsPath.getSafeDirectory(fileSystem); Path expected = new Path(fileSystem.getHomeDirectory() + "/" + this.getClass().getName()); assertEquals(safePath, expected);/*from w w w . j av a2s . c om*/ }
From source file:com.srini.hadoopYarn.Client.java
License:Apache License
/** * Main run function for the client/* ww w. java 2s.c om*/ * @return true if application completed successfully * @throws IOException * @throws YarnException */ public boolean run() throws IOException, YarnException { LOG.info("Running Client"); yarnClient.start(); YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics(); LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers()); List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING); LOG.info("Got Cluster node info from ASM"); for (NodeReport node : clusterNodeReports) { LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress" + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers" + node.getNumContainers()); } QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue); LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity() + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount=" + queueInfo.getChildQueues().size()); List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo(); for (QueueUserACLInfo aclInfo : listAclInfo) { for (QueueACL userAcl : aclInfo.getUserAcls()) { LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl=" + userAcl.name()); } } // Get a new application id YarnClientApplication app = yarnClient.createApplication(); GetNewApplicationResponse appResponse = app.getNewApplicationResponse(); // TODO get min/max resource capabilities from RM and change memory ask if needed // If we do not have min/max, we may not be able to correctly request // the required resources from the RM for the app master // Memory ask has to be a multiple of min and less than max. // Dump out information about cluster capability as seen by the resource manager int maxMem = appResponse.getMaximumResourceCapability().getMemory(); LOG.info("Max mem capabililty of resources in this cluster " + maxMem); // A resource ask cannot exceed the max. if (amMemory > maxMem) { LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified=" + amMemory + ", max=" + maxMem); amMemory = maxMem; } // set the application name ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); ApplicationId appId = appContext.getApplicationId(); appContext.setApplicationName(appName); // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); // set local resources for the application master // local files or archives as needed // In this scenario, the jar file for the application master is part of the local resources Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); LOG.info("Copy App Master jar from local filesystem and add to local environment"); // Copy the application master jar to the filesystem // Create a local resource to point to the destination jar path FileSystem fs = FileSystem.get(conf); Path src = new Path(appMasterJar); String pathSuffix = appName + "/" + appId.getId() + "/AppMaster.jar"; Path dst = new Path(fs.getHomeDirectory(), pathSuffix); fs.copyFromLocalFile(false, true, src, dst); FileStatus destStatus = fs.getFileStatus(dst); LocalResource amJarRsrc = Records.newRecord(LocalResource.class); // Set the type of resource - file or archive // archives are untarred at destination // we don't need the jar file to be untarred for now amJarRsrc.setType(LocalResourceType.FILE); // Set visibility of the resource // Setting to most private option amJarRsrc.setVisibility(LocalResourceVisibility.APPLICATION); // Set the resource to be copied over amJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(dst)); // Set timestamp and length of file so that the framework // can do basic sanity checks for the local resource // after it has been copied over to ensure it is the same // resource the client intended to use with the application amJarRsrc.setTimestamp(destStatus.getModificationTime()); amJarRsrc.setSize(destStatus.getLen()); localResources.put("AppMaster.jar", amJarRsrc); // Set the log4j properties if needed if (!log4jPropFile.isEmpty()) { Path log4jSrc = new Path(log4jPropFile); Path log4jDst = new Path(fs.getHomeDirectory(), "log4j.props"); fs.copyFromLocalFile(false, true, log4jSrc, log4jDst); FileStatus log4jFileStatus = fs.getFileStatus(log4jDst); LocalResource log4jRsrc = Records.newRecord(LocalResource.class); log4jRsrc.setType(LocalResourceType.FILE); log4jRsrc.setVisibility(LocalResourceVisibility.APPLICATION); log4jRsrc.setResource(ConverterUtils.getYarnUrlFromURI(log4jDst.toUri())); log4jRsrc.setTimestamp(log4jFileStatus.getModificationTime()); log4jRsrc.setSize(log4jFileStatus.getLen()); localResources.put("log4j.properties", log4jRsrc); } // The shell script has to be made available on the final container(s) // where it will be executed. // To do this, we need to first copy into the filesystem that is visible // to the yarn framework. // We do not need to set this as a local resource for the application // master as the application master does not need it. String hdfsShellScriptLocation = ""; long hdfsShellScriptLen = 0; long hdfsShellScriptTimestamp = 0; if (!shellScriptPath.isEmpty()) { Path shellSrc = new Path(shellScriptPath); String shellPathSuffix = appName + "/" + appId.getId() + "/ExecShellScript.sh"; Path shellDst = new Path(fs.getHomeDirectory(), shellPathSuffix); fs.copyFromLocalFile(false, true, shellSrc, shellDst); hdfsShellScriptLocation = shellDst.toUri().toString(); FileStatus shellFileStatus = fs.getFileStatus(shellDst); hdfsShellScriptLen = shellFileStatus.getLen(); hdfsShellScriptTimestamp = shellFileStatus.getModificationTime(); } // Set local resource info into app master container launch context amContainer.setLocalResources(localResources); // Set the necessary security tokens as needed //amContainer.setContainerTokens(containerToken); // Set the env variables to be setup in the env where the application master will be run LOG.info("Set the environment for the application master"); Map<String, String> env = new HashMap<String, String>(); // put location of shell script into env // using the env info, the application master will create the correct local resource for the // eventual containers that will be launched to execute the shell scripts env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION, hdfsShellScriptLocation); env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP, Long.toString(hdfsShellScriptTimestamp)); env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLEN, Long.toString(hdfsShellScriptLen)); // Add AppMaster.jar location to classpath // At some point we should not be required to add // the hadoop specific classpaths to the env. // It should be provided out of the box. // For now setting all required classpaths including // the classpath to "." for the application jar StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar) .append("./*"); for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) { classPathEnv.append(File.pathSeparatorChar); classPathEnv.append(c.trim()); } classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties"); // add the runtime classpath needed for tests to work if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { classPathEnv.append(':'); classPathEnv.append(System.getProperty("java.class.path")); } env.put("CLASSPATH", classPathEnv.toString()); amContainer.setEnvironment(env); // Set the necessary command to execute the application master Vector<CharSequence> vargs = new Vector<CharSequence>(30); // Set java executable command LOG.info("Setting up app master command"); vargs.add(Environment.JAVA_HOME.$() + "/bin/java"); // Set Xmx based on am memory size vargs.add("-Xmx" + amMemory + "m"); // Set class name vargs.add(appMasterMainClass); // Set params for Application Master vargs.add("--container_memory " + String.valueOf(containerMemory)); vargs.add("--num_containers " + String.valueOf(numContainers)); vargs.add("--priority " + String.valueOf(shellCmdPriority)); if (!shellCommand.isEmpty()) { vargs.add("--shell_command " + shellCommand + ""); } if (!shellArgs.isEmpty()) { vargs.add("--shell_args " + shellArgs + ""); } for (Map.Entry<String, String> entry : shellEnv.entrySet()) { vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue()); } if (debugFlag) { vargs.add("--debug"); } vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr"); // Get final commmand StringBuilder command = new StringBuilder(); for (CharSequence str : vargs) { command.append(str).append(" "); } LOG.info("Completed setting up app master command " + command.toString()); List<String> commands = new ArrayList<String>(); commands.add(command.toString()); amContainer.setCommands(commands); // Set up resource type requirements // For now, only memory is supported so we set memory requirements Resource capability = Records.newRecord(Resource.class); capability.setMemory(amMemory); appContext.setResource(capability); // Service data is a binary blob that can be passed to the application // Not needed in this scenario // amContainer.setServiceData(serviceData); // Setup security tokens if (UserGroupInformation.isSecurityEnabled()) { Credentials credentials = new Credentials(); String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL); if (tokenRenewer == null || tokenRenewer.length() == 0) { throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer"); } // For now, only getting tokens for the default file-system. final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials); if (tokens != null) { for (Token<?> token : tokens) { LOG.info("Got dt for " + fs.getUri() + "; " + token); } } DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); amContainer.setTokens(fsTokens); } appContext.setAMContainerSpec(amContainer); // Set the priority for the application master Priority pri = Records.newRecord(Priority.class); // TODO - what is the range for priority? how to decide? pri.setPriority(amPriority); appContext.setPriority(pri); // Set the queue to which this application is to be submitted in the RM appContext.setQueue(amQueue); // Submit the application to the applications manager // SubmitApplicationResponse submitResp = applicationsManager.submitApplication(appRequest); // Ignore the response as either a valid response object is returned on success // or an exception thrown to denote some form of a failure LOG.info("Submitting application to ASM"); yarnClient.submitApplication(appContext); // TODO // Try submitting the same request again // app submission failure? // Monitor the application return monitorApplication(appId); }
From source file:com.streamsets.pipeline.spark.SparkStreamingBinding.java
License:Apache License
@Override public void init() throws Exception { for (Object key : properties.keySet()) { logMessage("Property => " + key + " => " + properties.getProperty(key.toString()), isRunningInMesos); }/* w w w . j av a 2 s.c om*/ final SparkConf conf = new SparkConf().setAppName("StreamSets Data Collector - Streaming Mode"); conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer"); final String topic = getProperty(TOPIC); final long duration; String durationAsString = getProperty(MAX_WAIT_TIME); try { duration = Long.parseLong(durationAsString); } catch (NumberFormatException ex) { String msg = "Invalid " + MAX_WAIT_TIME + " '" + durationAsString + "' : " + ex; throw new IllegalArgumentException(msg, ex); } Configuration hadoopConf = new SparkHadoopUtil().newConfiguration(conf); if (isRunningInMesos) { hadoopConf = getHadoopConf(hadoopConf); } else { hadoopConf = new Configuration(); } URI hdfsURI = FileSystem.getDefaultUri(hadoopConf); logMessage("Default FS URI: " + hdfsURI, isRunningInMesos); FileSystem hdfs = (new Path(hdfsURI)).getFileSystem(hadoopConf); Path sdcCheckpointPath = new Path(hdfs.getHomeDirectory(), ".streamsets-spark-streaming/" + getProperty("sdc.id") + "/" + encode(topic)); // encode as remote pipeline name might have colon within it String pipelineName = encode(getProperty("cluster.pipeline.name")); final Path checkPointPath = new Path(sdcCheckpointPath, pipelineName); hdfs.mkdirs(checkPointPath); if (!hdfs.isDirectory(checkPointPath)) { throw new IllegalStateException("Could not create checkpoint path: " + sdcCheckpointPath); } if (isRunningInMesos) { String scheme = hdfsURI.getScheme(); if (scheme.equals("hdfs")) { File mesosBootstrapFile = BootstrapCluster.getMesosBootstrapFile(); Path mesosBootstrapPath = new Path(checkPointPath, mesosBootstrapFile.getName()); // in case of hdfs, copy the jar file from local path to hdfs hdfs.copyFromLocalFile(false, true, new Path(mesosBootstrapFile.toURI()), mesosBootstrapPath); conf.setJars(new String[] { mesosBootstrapPath.toString() }); } else if (scheme.equals("s3") || scheme.equals("s3n") || scheme.equals("s3a")) { // we cant upload the jar to s3 as executors wont understand s3 scheme without the aws jar. // So have the jar available on http conf.setJars(new String[] { getProperty("mesos.jar.url") }); } else { throw new IllegalStateException("Unsupported scheme: " + scheme); } } JavaStreamingContextFactory javaStreamingContextFactory = new JavaStreamingContextFactoryImpl(conf, duration, checkPointPath.toString(), getProperty(METADATA_BROKER_LIST), topic, properties.getProperty(AUTO_OFFSET_RESET, "").trim(), isRunningInMesos); ssc = JavaStreamingContext.getOrCreate(checkPointPath.toString(), hadoopConf, javaStreamingContextFactory, true); // mesos tries to stop the context internally, so don't do it here - deadlock bug in spark if (!isRunningInMesos) { final Thread shutdownHookThread = new Thread("Spark.shutdownHook") { @Override public void run() { LOG.debug("Gracefully stopping Spark Streaming Application"); ssc.stop(true, true); LOG.info("Application stopped"); } }; Runtime.getRuntime().addShutdownHook(shutdownHookThread); } logMessage("Making calls through spark context ", isRunningInMesos); ssc.start(); }
From source file:com.toy.Client.java
License:Apache License
/** * Start a new Application Master and deploy the web application on 2 Tomcat containers * * @throws Exception/*w w w. j a v a2s . co m*/ */ void start() throws Exception { //Check tomcat dir final File tomcatHomeDir = new File(toyConfig.tomcat); final File tomcatLibraries = new File(tomcatHomeDir, "lib"); final File tomcatBinaries = new File(tomcatHomeDir, "bin"); Preconditions.checkState(tomcatLibraries.isDirectory(), tomcatLibraries.getAbsolutePath() + " does not exist"); //Check war file final File warFile = new File(toyConfig.war); Preconditions.checkState(warFile.isFile(), warFile.getAbsolutePath() + " does not exist"); yarn = YarnClient.createYarnClient(); yarn.init(configuration); yarn.start(); YarnClientApplication yarnApplication = yarn.createApplication(); GetNewApplicationResponse newApplication = yarnApplication.getNewApplicationResponse(); appId = newApplication.getApplicationId(); ApplicationSubmissionContext appContext = yarnApplication.getApplicationSubmissionContext(); appContext.setApplicationName("Tomcat : " + tomcatHomeDir.getName() + "\n War : " + warFile.getName()); // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); // Register required libraries Map<String, LocalResource> localResources = new HashMap<>(); FileSystem fs = FileSystem.get(configuration); uploadDepAndRegister(localResources, appId, fs, "lib-ext/curator-client-2.3.0.jar"); uploadDepAndRegister(localResources, appId, fs, "lib-ext/curator-framework-2.3.0.jar"); uploadDepAndRegister(localResources, appId, fs, "lib-ext/curator-recipes-2.3.0.jar"); // Register application master jar registerLocalResource(localResources, appId, fs, new Path(appMasterJar)); // Register the WAR that will be deployed on Tomcat registerLocalResource(localResources, appId, fs, new Path(warFile.getAbsolutePath())); // Register Tomcat libraries for (File lib : tomcatLibraries.listFiles()) { registerLocalResource(localResources, appId, fs, new Path(lib.getAbsolutePath())); } File juli = new File(tomcatBinaries, "tomcat-juli.jar"); if (juli.exists()) { registerLocalResource(localResources, appId, fs, new Path(juli.getAbsolutePath())); } amContainer.setLocalResources(localResources); // Setup master environment Map<String, String> env = new HashMap<>(); final String TOMCAT_LIBS = fs.getHomeDirectory() + "/" + Constants.TOY_PREFIX + appId.toString(); env.put(Constants.TOMCAT_LIBS, TOMCAT_LIBS); if (toyConfig.zookeeper != null) { env.put(Constants.ZOOKEEPER_QUORUM, toyConfig.zookeeper); } else { env.put(Constants.ZOOKEEPER_QUORUM, NetUtils.getHostname()); } // 1. Compute classpath StringBuilder classPathEnv = new StringBuilder(ApplicationConstants.Environment.CLASSPATH.$()) .append(File.pathSeparatorChar).append("./*"); for (String c : configuration.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) { classPathEnv.append(File.pathSeparatorChar); classPathEnv.append(c.trim()); } classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties"); // add the runtime classpath needed for tests to work if (configuration.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { classPathEnv.append(':'); classPathEnv.append(System.getProperty("java.class.path")); } env.put("CLASSPATH", classPathEnv.toString()); env.put(Constants.WAR, warFile.getName()); // For unit test with YarnMiniCluster env.put(YarnConfiguration.RM_SCHEDULER_ADDRESS, configuration.get(YarnConfiguration.RM_SCHEDULER_ADDRESS)); amContainer.setEnvironment(env); // 1.2 Set constraint for the app master Resource capability = Records.newRecord(Resource.class); capability.setMemory(32); appContext.setResource(capability); // 2. Compute app master cmd line Vector<CharSequence> vargs = new Vector<>(10); // Set java executable command vargs.add(ApplicationConstants.Environment.JAVA_HOME.$() + "/bin/java"); // Set Xmx based on am memory size vargs.add("-Xmx32m"); // Set class name vargs.add(TOYMaster.class.getCanonicalName()); vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr"); StringBuilder command = new StringBuilder(); for (CharSequence str : vargs) { command.append(str).append(" "); } LOG.info("Completed setting up app master command " + command.toString()); List<String> commands = new ArrayList<>(); commands.add(command.toString()); amContainer.setCommands(commands); appContext.setAMContainerSpec(amContainer); // 3. Setup security tokens if (UserGroupInformation.isSecurityEnabled()) { Credentials credentials = new Credentials(); String tokenRenewer = configuration.get(YarnConfiguration.RM_PRINCIPAL); if (tokenRenewer == null || tokenRenewer.length() == 0) { throw new Exception("Can't get Master Kerberos principal for the RM to use as renewer"); } // For now, only getting tokens for the default file-system. final org.apache.hadoop.security.token.Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials); if (tokens != null) { for (org.apache.hadoop.security.token.Token<?> token : tokens) { LOG.info("Got dt for " + fs.getUri() + "; " + token); } } DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); amContainer.setTokens(fsTokens); } appContext.setQueue("default"); LOG.info("Submitting TOY application {} to ASM", appId.toString()); yarn.submitApplication(appContext); // Monitor the application and exit if it is RUNNING monitorApplication(appId); }
From source file:com.toy.Client.java
License:Apache License
private static void registerLocalResource(Map<String, LocalResource> localResources, ApplicationId appId, FileSystem fs, Path src) throws IOException { String pathSuffix = Constants.TOY_PREFIX + appId.toString() + "/" + src.getName(); Path dst = new Path(fs.getHomeDirectory(), pathSuffix); LOG.info("Copy {} from local filesystem to {} and add to local environment", src.getName(), dst.toUri()); fs.copyFromLocalFile(false, true, src, dst); FileStatus destStatus = fs.getFileStatus(dst); LocalResource amJarRsrc = Records.newRecord(LocalResource.class); amJarRsrc.setType(LocalResourceType.FILE); amJarRsrc.setVisibility(LocalResourceVisibility.APPLICATION); amJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(dst)); amJarRsrc.setTimestamp(destStatus.getModificationTime()); amJarRsrc.setSize(destStatus.getLen()); localResources.put(src.getName(), amJarRsrc); }
From source file:com.toy.Client.java
License:Apache License
private void uploadDepAndRegister(Map<String, LocalResource> localResources, ApplicationId appId, FileSystem fs, String depname) throws IOException { File dep = new File(depname); if (!dep.exists()) throw new IOException(dep.getAbsolutePath() + " does not exist"); Path dst = new Path(fs.getHomeDirectory(), Constants.TOY_PREFIX + appId.toString() + "/" + dep.getName()); LOG.info("Copy {} from local filesystem to {} and add to local environment", dep.getName(), dst.toUri()); FileInputStream input = new FileInputStream(dep); final FSDataOutputStream outputStream = fs.create(dst, true); ByteStreams.copy(input, outputStream); input.close();/*from w w w . j av a 2 s. com*/ outputStream.close(); LocalResource amJarRsrc = Records.newRecord(LocalResource.class); amJarRsrc.setType(LocalResourceType.FILE); amJarRsrc.setVisibility(LocalResourceVisibility.APPLICATION); amJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(dst)); FileStatus destStatus = fs.getFileStatus(dst); amJarRsrc.setTimestamp(destStatus.getModificationTime()); amJarRsrc.setSize(destStatus.getLen()); localResources.put(dep.getName(), amJarRsrc); }
From source file:com.yahoo.storm.yarn.StormOnYarn.java
License:Open Source License
private void launchApp(String appName, String queue, int amMB, String storm_zip_location) throws Exception { LOG.debug("StormOnYarn:launchApp() ..."); YarnClientApplication client_app = _yarn.createApplication(); GetNewApplicationResponse app = client_app.getNewApplicationResponse(); _appId = app.getApplicationId();//from w w w . j a v a 2s. c o m LOG.debug("_appId:" + _appId); if (amMB > app.getMaximumResourceCapability().getMemory()) { //TODO need some sanity checks amMB = app.getMaximumResourceCapability().getMemory(); } ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class); appContext.setApplicationId(app.getApplicationId()); appContext.setApplicationName(appName); appContext.setQueue(queue); // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); // set local resources for the application master // local files or archives as needed // In this scenario, the jar file for the application master is part of the // local resources LOG.info("Copy App Master jar from local filesystem and add to local environment"); // Copy the application master jar to the filesystem // Create a local resource to point to the destination jar path String appMasterJar = findContainingJar(MasterServer.class); FileSystem fs = FileSystem.get(_hadoopConf); Path src = new Path(appMasterJar); String appHome = Util.getApplicationHomeForId(_appId.toString()); Path dst = new Path(fs.getHomeDirectory(), appHome + Path.SEPARATOR + "AppMaster.jar"); fs.copyFromLocalFile(false, true, src, dst); localResources.put("AppMaster.jar", Util.newYarnAppResource(fs, dst)); String stormVersion = Util.getStormVersion(); Path zip; if (storm_zip_location != null) { zip = new Path(storm_zip_location); } else { zip = new Path("/lib/storm/" + stormVersion + "/storm.zip"); } _stormConf.put("storm.zip.path", zip.makeQualified(fs).toUri().getPath()); LocalResourceVisibility visibility = LocalResourceVisibility.PUBLIC; _stormConf.put("storm.zip.visibility", "PUBLIC"); if (!Util.isPublic(fs, zip)) { visibility = LocalResourceVisibility.APPLICATION; _stormConf.put("storm.zip.visibility", "APPLICATION"); } localResources.put("storm", Util.newYarnAppResource(fs, zip, LocalResourceType.ARCHIVE, visibility)); Path confDst = Util.createConfigurationFileInFs(fs, appHome, _stormConf, _hadoopConf); // establish a symbolic link to conf directory localResources.put("conf", Util.newYarnAppResource(fs, confDst)); // Setup security tokens Path[] paths = new Path[3]; paths[0] = dst; paths[1] = zip; paths[2] = confDst; Credentials credentials = new Credentials(); TokenCache.obtainTokensForNamenodes(credentials, paths, _hadoopConf); DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); //security tokens for HDFS distributed cache amContainer.setTokens(securityTokens); // Set local resource info into app master container launch context amContainer.setLocalResources(localResources); // Set the env variables to be setup in the env where the application master // will be run LOG.info("Set the environment for the application master"); Map<String, String> env = new HashMap<String, String>(); // add the runtime classpath needed for tests to work Apps.addToEnvironment(env, Environment.CLASSPATH.name(), "./conf"); Apps.addToEnvironment(env, Environment.CLASSPATH.name(), "./AppMaster.jar"); //Make sure that AppMaster has access to all YARN JARs List<String> yarn_classpath_cmd = java.util.Arrays.asList("yarn", "classpath"); ProcessBuilder pb = new ProcessBuilder(yarn_classpath_cmd).redirectError(Redirect.INHERIT); LOG.info("YARN CLASSPATH COMMAND = [" + yarn_classpath_cmd + "]"); pb.environment().putAll(System.getenv()); Process proc = pb.start(); BufferedReader reader = new BufferedReader(new InputStreamReader(proc.getInputStream(), "UTF-8")); String line = ""; String yarn_class_path = (String) _stormConf.get("storm.yarn.yarn_classpath"); if (yarn_class_path == null) { StringBuilder yarn_class_path_builder = new StringBuilder(); while ((line = reader.readLine()) != null) { yarn_class_path_builder.append(line); } yarn_class_path = yarn_class_path_builder.toString(); } LOG.info("YARN CLASSPATH = [" + yarn_class_path + "]"); proc.waitFor(); reader.close(); Apps.addToEnvironment(env, Environment.CLASSPATH.name(), yarn_class_path); String stormHomeInZip = Util.getStormHomeInZip(fs, zip, stormVersion); Apps.addToEnvironment(env, Environment.CLASSPATH.name(), "./storm/" + stormHomeInZip + "/*"); Apps.addToEnvironment(env, Environment.CLASSPATH.name(), "./storm/" + stormHomeInZip + "/lib/*"); String java_home = (String) _stormConf.get("storm.yarn.java_home"); if (java_home == null) java_home = System.getenv("JAVA_HOME"); if (java_home != null && !java_home.isEmpty()) env.put("JAVA_HOME", java_home); LOG.info("Using JAVA_HOME = [" + env.get("JAVA_HOME") + "]"); env.put("appJar", appMasterJar); env.put("appName", appName); env.put("appId", new Integer(_appId.getId()).toString()); env.put("STORM_LOG_DIR", ApplicationConstants.LOG_DIR_EXPANSION_VAR); amContainer.setEnvironment(env); // Set the necessary command to execute the application master Vector<String> vargs = new Vector<String>(); if (java_home != null && !java_home.isEmpty()) vargs.add(env.get("JAVA_HOME") + "/bin/java"); else vargs.add("java"); vargs.add("-Dstorm.home=./storm/" + stormHomeInZip + "/"); vargs.add("-Dlogfile.name=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/master.log"); //vargs.add("-verbose:class"); vargs.add("com.yahoo.storm.yarn.MasterServer"); vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"); // Set java executable command LOG.info("Setting up app master command:" + vargs); amContainer.setCommands(vargs); // Set up resource type requirements // For now, only memory is supported so we set memory requirements Resource capability = Records.newRecord(Resource.class); capability.setMemory(amMB); appContext.setResource(capability); appContext.setAMContainerSpec(amContainer); _yarn.submitApplication(appContext); }
From source file:com.yahoo.storm.yarn.Util.java
License:Open Source License
@SuppressWarnings("rawtypes") static Path createConfigurationFileInFs(FileSystem fs, String appHome, Map stormConf, YarnConfiguration yarnConf) throws IOException { // dump stringwriter's content into FS conf/storm.yaml Path confDst = new Path(fs.getHomeDirectory(), appHome + Path.SEPARATOR + STORM_CONF_PATH_STRING); Path dirDst = confDst.getParent(); fs.mkdirs(dirDst);// www .ja v a 2s . c o m //storm.yaml FSDataOutputStream out = fs.create(confDst); Yaml yaml = new Yaml(); OutputStreamWriter writer = new OutputStreamWriter(out); rmNulls(stormConf); yaml.dump(stormConf, writer); writer.close(); out.close(); //yarn-site.xml Path yarn_site_xml = new Path(dirDst, "yarn-site.xml"); out = fs.create(yarn_site_xml); writer = new OutputStreamWriter(out); yarnConf.writeXml(writer); writer.close(); out.close(); //logback.xml Path logback_xml = new Path(dirDst, "logback.xml"); out = fs.create(logback_xml); CreateLogbackXML(out); out.close(); return dirDst; }
From source file:com.yata.core.HDFSManager.java
License:Apache License
/** * * @param hdfsTestDataSourceFile/* w ww . j av a 2 s .c o m*/ * @param hdfsTestDataTargetFile * @throws IOException * * hadoop fs -cp /projects/ddsw/dev/data/backup/dealer_hierarchy/<<DOMAIN_NAME>>/<<FILE_NAME>> /projects/ddsw/dev/data/raw/nas/<<DOMAIN_NAME>> */ public void copyHDFSData(String hdfsTestDataSourceFile, String hdfsTestDataTargetFile) throws OozieClientException { System.out.println("copyHDFSData@" + className + " : Loading Test Data From :-> " + hdfsTestDataSourceFile + " : Into :-> " + hdfsTestDataTargetFile); FileSystem hdfs = null; Path hdfsTestDataSource = null; Path hdfsTestDataTarget = null; try { hdfs = getHdfsFileSytem(); System.out.println("copyHDFSData@" + className + " : HDFS :-> " + hdfs); System.out.println("copyHDFSData@" + className + " : HDFSHomeDirectory :-> " + hdfs.getHomeDirectory()); System.out.println("copyHDFSData@" + className + " : HDFS-URI :-> " + hdfs.getUri()); System.out.println( "copyHDFSData@" + className + " : HDFSWorkingDirectory :-> " + hdfs.getWorkingDirectory()); System.out.println("copyHDFSData@" + className + " : HDFS : " + hdfs + " : Exists :-> " + hdfs.exists(hdfs.getHomeDirectory())); hdfsTestDataSource = new Path(hdfs.getUri().getPath() + hdfsTestDataSourceFile); hdfsTestDataTarget = new Path(hdfs.getUri().getPath() + hdfsTestDataTargetFile); System.out.println("copyHDFSData@" + className + " : HDFS TEST DATA : " + hdfsTestDataSource + " : Exists :-> " + hdfs.exists(hdfsTestDataSource)); System.out.println("copyHDFSData@" + className + " : HDFS DOMAIN DATA : " + hdfsTestDataTarget + " : Exists :-> " + hdfs.exists(hdfsTestDataTarget)); } catch (IOException e) { e.printStackTrace(); throw new OozieClientException("ERR_CODE_1218", "copyHDFSData@" + className + " : IOException while getting HDFS FileSystem - EXITING..."); } FileUtil hdfsUtil = new FileUtil(); try { hdfsUtil.copy(hdfs, hdfsTestDataSource, hdfs, hdfsTestDataTarget, false, true, hdfs.getConf()); System.out.println("copyHDFSData@" + className + " : NOW : HDFS TEST DATA : " + hdfsTestDataSource + " : Exists :-> " + hdfs.exists(hdfsTestDataSource)); System.out.println("copyHDFSData@" + className + " : HDFS DOMAIN DATA : " + hdfsTestDataTarget + " : Exists :-> " + hdfs.exists(hdfsTestDataTarget)); } catch (IOException e) { e.printStackTrace(); throw new OozieClientException("ERR_CODE_1218", "copyHDFSData@" + className + " : IOException while Copying HDFS Data - EXITING..."); } /** * IMPORTANT * If the Source Data file on HDFS is not owned by the Hive/Hadoop User, then use the command below to * change the permission for Hive/Hadoop User to move/delete the file once processed... */ try { hdfs.setPermission(hdfsTestDataTarget, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.READ_EXECUTE)); } catch (IOException e) { e.printStackTrace(); throw new OozieClientException("ERR_CODE_1218", "copyHDFSData@" + className + " : IOException while Changing HDFS File Permissions - EXITING..."); } }