List of usage examples for org.apache.hadoop.yarn.conf YarnConfiguration YarnConfiguration
public YarnConfiguration()
From source file:hadoop.yarn.distributedshell.DshellApplicationMaster.java
License:Apache License
public DshellApplicationMaster() { // Set up the configuration conf = new YarnConfiguration(); }
From source file:hadoop.yarn.distributedshell.DshellClient.java
License:Apache License
/** * */ public DshellClient() throws Exception { this(new YarnConfiguration()); }
From source file:husky.client.HuskyYarnClient.java
License:Apache License
public HuskyYarnClient() throws IOException { mYarnConf = new YarnConfiguration(); mFileSystem = FileSystem.get(mYarnConf); mYarnClient = YarnClient.createYarnClient(); mYarnClient.init(mYarnConf);//from w ww .j ava2s .co m }
From source file:husky.server.HuskyApplicationMaster.java
License:Apache License
public HuskyApplicationMaster() throws IOException { mYarnConf = new YarnConfiguration(); mFileSystem = FileSystem.get(mYarnConf); }
From source file:hws.core.JobClient.java
License:Apache License
public void run(String[] args) throws Exception { //final String command = args[0]; //final int n = Integer.valueOf(args[1]); //final Path jarPath = new Path(args[2]); Options options = new Options(); /*options.addOption(OptionBuilder.withLongOpt("jar") .withDescription( "Jar path" ) .hasArg()//from www .j a v a 2 s . c o m .withArgName("JarPath") .create()); options.addOption(OptionBuilder.withLongOpt("scheduler") .withDescription( "Scheduler class name" ) .hasArg() .withArgName("ClassName") .create()); */options.addOption(OptionBuilder.withLongOpt("zk-servers") .withDescription("List of the ZooKeeper servers").hasArgs().withArgName("zkAddrs").create("zks")); //options.addOption("l", "list", false, "list modules"); options.addOption(OptionBuilder.withLongOpt("load").withDescription("load new modules").hasArgs() .withArgName("XMLFiles").create()); /*options.addOption(OptionBuilder.withLongOpt( "remove" ) .withDescription( "remove modules" ) .hasArgs() .withArgName("ModuleNames") .create("rm")); */CommandLineParser parser = new BasicParser(); CommandLine cmd = parser.parse(options, args); //Path jarPath = null; //String schedulerClassName = null; String[] xmlFileNames = null; //String []moduleNames = null; String zksArgs = ""; String[] zkServers = null; if (cmd.hasOption("zks")) { zksArgs = "-zks"; zkServers = cmd.getOptionValues("zks"); for (String zks : zkServers) { zksArgs += " " + zks; } } //Logger setup //FSDataOutputStream writer = FileSystem.get(conf).create(new Path("hdfs:///hws/apps/"+appIdStr+"/logs/jobClient.log")); //Logger.addOutputStream(writer); /*if(cmd.hasOption("l")){ LOG.warn("Argument --list (-l) is not supported yet."); } if(cmd.hasOption("jar")){ jarPath = new Path(cmd.getOptionValue("jar")); } if(cmd.hasOption("scheduler")){ schedulerClassName = cmd.getOptionValue("scheduler"); }*/ if (cmd.hasOption("load")) { xmlFileNames = cmd.getOptionValues("load"); } /*else if(cmd.hasOption("rm")){ moduleNames = cmd.getOptionValues("rm"); }*/ //LOG.info("Jar-Path "+jarPath); if (xmlFileNames != null) { String paths = ""; for (String path : xmlFileNames) { paths += path + "; "; } LOG.info("Load XMLs: " + paths); } /*if(moduleNames!=null){ String modules = ""; for(String module: moduleNames){ modules += module+"; "; } LOG.info("remove: "+modules); }*/ // Create yarnClient YarnConfiguration conf = new YarnConfiguration(); YarnClient yarnClient = YarnClient.createYarnClient(); yarnClient.init(conf); yarnClient.start(); // Create application via yarnClient YarnClientApplication app = yarnClient.createApplication(); System.out.println("LOG Path: " + ApplicationConstants.LOG_DIR_EXPANSION_VAR); // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); ApplicationId appId = appContext.getApplicationId(); ZkClient zk = new ZkClient(zkServers[0]); //TODO select a ZooKeeper server if (!zk.exists("/hadoop-watershed")) { zk.createPersistent("/hadoop-watershed", ""); } zk.createPersistent("/hadoop-watershed/" + appId.toString(), ""); FileSystem fs = FileSystem.get(conf); LOG.info("Collecting files to upload"); fs.mkdirs(new Path("hdfs:///hws/apps/" + appId.toString())); fs.mkdirs(new Path("hdfs:///hws/apps/" + appId.toString() + "/logs")); ModulePipeline modulePipeline = ModulePipeline.fromXMLFiles(xmlFileNames); LOG.info("Uploading files to HDFS"); for (String path : modulePipeline.files()) { uploadFile(fs, new File(path), appId); } LOG.info("Upload finished"); String modulePipelineJson = Json.dumps(modulePipeline); String modulePipelineBase64 = Base64.encodeBase64String(StringUtils.getBytesUtf8(modulePipelineJson)) .replaceAll("\\s", ""); LOG.info("ModulePipeline: " + modulePipelineJson); //LOG.info("ModulePipeline: "+modulePipelineBase64); amContainer.setCommands(Collections.singletonList("$JAVA_HOME/bin/java" + " -Xmx256M" + " hws.core.JobMaster" + " -aid " + appId.toString() + " --load " + modulePipelineBase64 + " " + zksArgs + " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout" + " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr")); // Setup jar for ApplicationMaster //LocalResource appMasterJar = Records.newRecord(LocalResource.class); //setupAppMasterJar(jarPath, appMasterJar); //amContainer.setLocalResources(Collections.singletonMap("hws.jar", appMasterJar)); LOG.info("Listing files for YARN-Watershed"); RemoteIterator<LocatedFileStatus> filesIterator = fs.listFiles(new Path("hdfs:///hws/bin/"), false); Map<String, LocalResource> resources = new HashMap<String, LocalResource>(); LOG.info("Files setup as resource"); while (filesIterator.hasNext()) { LocatedFileStatus fileStatus = filesIterator.next(); // Setup jar for ApplicationMaster LocalResource containerJar = Records.newRecord(LocalResource.class); ContainerUtils.setupContainerJar(fs, fileStatus.getPath(), containerJar); resources.put(fileStatus.getPath().getName(), containerJar); } LOG.info("container resource setup"); amContainer.setLocalResources(resources); fs.close(); //closing FileSystem interface // Setup CLASSPATH for ApplicationMaster Map<String, String> appMasterEnv = new HashMap<String, String>(); ContainerUtils.setupContainerEnv(appMasterEnv, conf); amContainer.setEnvironment(appMasterEnv); // Set up resource type requirements for ApplicationMaster Resource capability = Records.newRecord(Resource.class); capability.setMemory(256); capability.setVirtualCores(1); // Finally, set-up ApplicationSubmissionContext for the application //ApplicationSubmissionContext appContext = //app.getApplicationSubmissionContext(); appContext.setApplicationName("Hadoop-Watershed"); // application name appContext.setAMContainerSpec(amContainer); appContext.setResource(capability); appContext.setQueue("default"); // queue // Submit application LOG.info("Submitting application " + appId); yarnClient.submitApplication(appContext); LOG.info("Waiting for containers to finish"); zk.waitUntilExists("/hadoop-watershed/" + appId.toString() + "/done", TimeUnit.MILLISECONDS, 250); ApplicationReport appReport = yarnClient.getApplicationReport(appId); YarnApplicationState appState = appReport.getYarnApplicationState(); while (appState != YarnApplicationState.FINISHED && appState != YarnApplicationState.KILLED && appState != YarnApplicationState.FAILED) { Thread.sleep(100); appReport = yarnClient.getApplicationReport(appId); appState = appReport.getYarnApplicationState(); } System.out.println("Application " + appId + " finished with" + " state " + appState + " at " + appReport.getFinishTime()); System.out.println("deleting " + appId.toString() + " znode"); zk.deleteRecursive("/hadoop-watershed/" + appId.toString()); //TODO remove app folder from ZooKeeper }
From source file:hws.core.JobMaster.java
License:Apache License
public JobMaster(ModulePipeline modulePipeline, String appIdStr, String zksArgs, String[] zkServers) { this.numContainersToWaitFor = 0; //TODO remove configuration = new YarnConfiguration(); this.appIdStr = appIdStr; this.modulePipeline = modulePipeline; nmClient = NMClient.createNMClient(); nmClient.init(configuration);/*from w ww . j a v a2 s . co m*/ nmClient.start(); this.instances = this.modulePipeline.instances(); this.finishListeners = new ConcurrentHashMap<String, IZkChildListener>(); this.haltedProducers = new ConcurrentHashMap<String, List<String>>(); this.zksArgs = zksArgs; this.zkServers = zkServers; //Logger setup try { FSDataOutputStream writer = FileSystem.get(configuration) .create(new Path("hdfs:///hws/apps/" + appIdStr + "/logs/jobMaster.log")); Logger.addOutputStream(writer); } catch (IOException e) { //e.printStackTrace(); } zk = new ZkClient(zkServers[0]); //TODO choose the ZooKeeper server }
From source file:io.amient.yarn1.YarnClient.java
License:Open Source License
/** * This method should be called by the implementing application static main * method. It does all the work around creating a yarn application and * submitting the request to the yarn resource manager. The class given in * the appClass argument will be run inside the yarn-allocated master * container./*from w w w .j a v a 2 s .c o m*/ */ public static void submitApplicationMaster(Properties appConfig, Class<? extends YarnMaster> masterClass, String[] args, Boolean awaitCompletion) throws Exception { log.info("Yarn1 App Configuration:"); for (Object param : appConfig.keySet()) { log.info(param.toString() + " = " + appConfig.get(param).toString()); } String yarnConfigPath = appConfig.getProperty("yarn1.site", "/etc/hadoop"); String masterClassName = masterClass.getName(); appConfig.setProperty("yarn1.master.class", masterClassName); String applicationName = appConfig.getProperty("yarn1.application.name", masterClassName); log.info("--------------------------------------------------------------"); if (Boolean.valueOf(appConfig.getProperty("yarn1.local.mode", "false"))) { YarnMaster.run(appConfig, args); return; } int masterPriority = Integer.valueOf( appConfig.getProperty("yarn1.master.priority", String.valueOf(YarnMaster.DEFAULT_MASTER_PRIORITY))); int masterMemoryMb = Integer.valueOf(appConfig.getProperty("yarn1.master.memory.mb", String.valueOf(YarnMaster.DEFAULT_MASTER_MEMORY_MB))); int masterNumCores = Integer.valueOf( appConfig.getProperty("yarn1.master.num.cores", String.valueOf(YarnMaster.DEFAULT_MASTER_CORES))); String queue = appConfig.getProperty("yarn1.queue"); Configuration yarnConfig = new YarnConfiguration(); yarnConfig.addResource(new FileInputStream(yarnConfigPath + "/core-site.xml")); yarnConfig.addResource(new FileInputStream(yarnConfigPath + "/hdfs-site.xml")); yarnConfig.addResource(new FileInputStream(yarnConfigPath + "/yarn-site.xml")); for (Map.Entry<Object, Object> entry : appConfig.entrySet()) { yarnConfig.set(entry.getKey().toString(), entry.getValue().toString()); } final org.apache.hadoop.yarn.client.api.YarnClient yarnClient = org.apache.hadoop.yarn.client.api.YarnClient .createYarnClient(); yarnClient.init(yarnConfig); yarnClient.start(); for (NodeReport report : yarnClient.getNodeReports(NodeState.RUNNING)) { log.debug("Node report:" + report.getNodeId() + " @ " + report.getHttpAddress() + " | " + report.getCapability()); } log.info("Submitting application master class " + masterClassName); YarnClientApplication app = yarnClient.createApplication(); GetNewApplicationResponse appResponse = app.getNewApplicationResponse(); final ApplicationId appId = appResponse.getApplicationId(); if (appId == null) { System.exit(111); } else { appConfig.setProperty("am.timestamp", String.valueOf(appId.getClusterTimestamp())); appConfig.setProperty("am.id", String.valueOf(appId.getId())); } YarnClient.distributeResources(yarnConfig, appConfig, applicationName); String masterJvmArgs = appConfig.getProperty("yarn1.master.jvm.args", ""); YarnContainerContext masterContainer = new YarnContainerContext(yarnConfig, appConfig, masterJvmArgs, masterPriority, masterMemoryMb, masterNumCores, applicationName, YarnMaster.class, args); ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); appContext.setApplicationName(masterClassName); appContext.setResource(masterContainer.capability); appContext.setPriority(masterContainer.priority); appContext.setQueue(queue); appContext.setApplicationType(appConfig.getProperty("yarn1.application.type", "YARN")); appContext.setAMContainerSpec(masterContainer.createContainerLaunchContext()); log.info("Master container spec: " + masterContainer.capability); yarnClient.submitApplication(appContext); ApplicationReport report = yarnClient.getApplicationReport(appId); log.info("Tracking URL: " + report.getTrackingUrl()); if (awaitCompletion) { Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { if (!yarnClient.isInState(Service.STATE.STOPPED)) { log.info("Killing yarn application in shutdown hook"); try { yarnClient.killApplication(appId); } catch (Throwable e) { log.error("Failed to kill yarn application - please check YARN Resource Manager", e); } } } }); float lastProgress = -0.0f; while (true) { try { Thread.sleep(10000); report = yarnClient.getApplicationReport(appId); if (lastProgress != report.getProgress()) { lastProgress = report.getProgress(); log.info(report.getApplicationId() + " " + (report.getProgress() * 100.00) + "% " + (System.currentTimeMillis() - report.getStartTime()) + "(ms) " + report.getDiagnostics()); } if (!report.getFinalApplicationStatus().equals(FinalApplicationStatus.UNDEFINED)) { log.info(report.getApplicationId() + " " + report.getFinalApplicationStatus()); log.info("Tracking url: " + report.getTrackingUrl()); log.info("Finish time: " + ((System.currentTimeMillis() - report.getStartTime()) / 1000) + "(s)"); break; } } catch (Throwable e) { log.error("Master Heart Beat Error - terminating", e); yarnClient.killApplication(appId); Thread.sleep(2000); } } yarnClient.stop(); if (!report.getFinalApplicationStatus().equals(FinalApplicationStatus.SUCCEEDED)) { System.exit(112); } } yarnClient.stop(); }
From source file:io.amient.yarn1.YarnMaster.java
License:Open Source License
/** * Default constructor can be used for local execution *//*from w ww .java 2s.co m*/ public YarnMaster(Properties appConfig) { this.appConfig = appConfig; this.masterClassName = this.getClass().getName(); this.applicationName = appConfig.getProperty("yarn1.application.name", masterClassName); yarnConfig = new YarnConfiguration(); localMode = Boolean.valueOf(appConfig.getProperty("yarn1.local.mode", "false")); masterMemoryMb = Integer.valueOf(appConfig.getProperty("yarn1.master.memory.mb", String.valueOf(YarnMaster.DEFAULT_MASTER_MEMORY_MB))); masterCores = Integer.valueOf( appConfig.getProperty("yarn1.master.num.cores", String.valueOf(YarnMaster.DEFAULT_MASTER_CORES))); masterPriority = Integer.valueOf( appConfig.getProperty("yarn1.master.priority", String.valueOf(YarnMaster.DEFAULT_MASTER_PRIORITY))); if (appConfig.containsKey("yarn1.client.tracking.url")) { try { trackingUrl = new URL(appConfig.getProperty("yarn1.client.tracking.url")); } catch (MalformedURLException e) { log.warn("Invalid client tracking url", e); } } }
From source file:io.fluo.cluster.OracleApp.java
License:Apache License
public static void main(String[] args) throws ConfigurationException, Exception { OracleAppOptions options = new OracleAppOptions(); JCommander jcommand = new JCommander(options, args); if (options.displayHelp()) { jcommand.usage();// w w w. j av a 2 s. com System.exit(-1); } Logging.init("oracle", options.getFluoHome() + "/conf", "STDOUT"); File configFile = new File(options.getFluoHome() + "/conf/fluo.properties"); FluoConfiguration config = new FluoConfiguration(configFile); if (!config.hasRequiredOracleProps()) { log.error("fluo.properties is missing required properties for oracle"); System.exit(-1); } Environment env = new Environment(config); YarnConfiguration yarnConfig = new YarnConfiguration(); yarnConfig.addResource(new Path(options.getHadoopPrefix() + "/etc/hadoop/core-site.xml")); yarnConfig.addResource(new Path(options.getHadoopPrefix() + "/etc/hadoop/yarn-site.xml")); TwillRunnerService twillRunner = new YarnTwillRunnerService(yarnConfig, env.getZookeepers()); twillRunner.startAndWait(); TwillPreparer preparer = twillRunner.prepare(new OracleApp(options, config)); TwillController controller = preparer.start(); controller.start(); while (controller.isRunning() == false) Thread.sleep(2000); env.close(); System.exit(0); }
From source file:io.fluo.cluster.runner.YarnAppRunner.java
License:Apache License
private synchronized TwillRunnerService getTwillRunner() { if (twillRunner == null) { YarnConfiguration yarnConfig = new YarnConfiguration(); yarnConfig.addResource(new Path(hadoopPrefix + "/etc/hadoop/core-site.xml")); yarnConfig.addResource(new Path(hadoopPrefix + "/etc/hadoop/yarn-site.xml")); twillRunner = new YarnTwillRunnerService(yarnConfig, config.getAppZookeepers() + ZookeeperPath.TWILL); twillRunner.startAndWait();// w ww . j a v a 2s.c om // sleep to give twill time to retrieve state from zookeeper try { Thread.sleep(1000); } catch (InterruptedException e) { throw new IllegalStateException(e); } } return twillRunner; }