List of usage examples for org.apache.hadoop.yarn.conf YarnConfiguration YarnConfiguration
public YarnConfiguration()
From source file:ApplicationMaster.java
License:Apache License
public ApplicationMaster() { // Set up the configuration conf = new YarnConfiguration(); }
From source file:AggregatedLogsPurger.java
License:Apache License
public static void main(String[] args) throws Exception { ToolRunner.run(new YarnConfiguration(), new AggregatedLogsPurger(), args); }
From source file:alluxio.yarn.ApplicationMaster.java
License:Apache License
/** * @param args Command line arguments to launch application master *///from w ww .j a v a2 s . co m public static void main(String[] args) { Options options = new Options(); options.addOption("num_workers", true, "Number of Alluxio workers to launch. Default 1"); options.addOption("master_address", true, "(Required) Address to run Alluxio master"); options.addOption("resource_path", true, "(Required) HDFS path containing the Application Master"); try { LOG.info("Starting Application Master with args {}", Arrays.toString(args)); final CommandLine cliParser = new GnuParser().parse(options, args); YarnConfiguration conf = new YarnConfiguration(); UserGroupInformation.setConfiguration(conf); if (UserGroupInformation.isSecurityEnabled()) { String user = System.getenv("ALLUXIO_USER"); UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user); for (Token token : UserGroupInformation.getCurrentUser().getTokens()) { ugi.addToken(token); } LOG.info("UserGroupInformation: " + ugi); ugi.doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { runApplicationMaster(cliParser); return null; } }); } else { runApplicationMaster(cliParser); } } catch (Exception e) { LOG.error("Error running Application Master", e); System.exit(1); } }
From source file:alluxio.yarn.ApplicationMaster.java
License:Apache License
private static Map<String, LocalResource> setupLocalResources(String resourcePath) { try {/* w ww .j av a2 s.c om*/ Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); for (String resourceName : LOCAL_RESOURCE_NAMES) { localResources.put(resourceName, YarnUtils.createLocalResourceOfFile(new YarnConfiguration(), PathUtils.concatPath(resourcePath, resourceName))); } return localResources; } catch (IOException e) { throw new RuntimeException("Cannot find resource", e); } }
From source file:azkaban.jobtype.HadoopJobUtils.java
License:Apache License
/** * <pre>/*from w w w . jav a2 s . c om*/ * Uses YarnClient to kill the job on HDFS. * Using JobClient only works partially: * If yarn container has started but spark job haven't, it will kill * If spark job has started, the cancel will hang until the spark job is complete * If the spark job is complete, it will return immediately, with a job not found on job tracker * </pre> * * @param applicationId * @throws IOException * @throws YarnException */ public static void killJobOnCluster(String applicationId, Logger log) throws YarnException, IOException { YarnConfiguration yarnConf = new YarnConfiguration(); YarnClient yarnClient = YarnClient.createYarnClient(); yarnClient.init(yarnConf); yarnClient.start(); String[] split = applicationId.split("_"); ApplicationId aid = ApplicationId.newInstance(Long.parseLong(split[1]), Integer.parseInt(split[2])); log.info("start klling application: " + aid); yarnClient.killApplication(aid); log.info("successfully killed application: " + aid); }
From source file:base.test.HelloWorld.java
License:Apache License
public static void main(String[] args) { if (args.length < 1) { System.err.println("Arguments format: <host:port of zookeeper server>"); System.exit(1);//from www .j a v a2 s . c o m } String zkStr = args[0]; YarnConfiguration yarnConfiguration = new YarnConfiguration(); yarnConfiguration.setSocketAddr("yarn.resourcemanager.address", new InetSocketAddress("192.168.80.103", 8032)); final TwillRunnerService twillRunner = new YarnTwillRunnerService(yarnConfiguration, zkStr); twillRunner.start(); String yarnClasspath = yarnConfiguration.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH, Joiner.on(",").join(YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)); List<String> applicationClassPaths = Lists.newArrayList(); Iterables.addAll(applicationClassPaths, Splitter.on(",").split(yarnClasspath)); final TwillController controller = twillRunner.prepare(new HelloWorldRunnable()) .addLogHandler(new PrinterLogHandler(new PrintWriter(System.out, true))) .withApplicationClassPaths(applicationClassPaths) .withBundlerClassAcceptor(new HadoopClassExcluder()).start(); Runtime.getRuntime().addShutdownHook(new Thread() { @Override public void run() { try { Futures.getUnchecked(controller.terminate()); } finally { twillRunner.stop(); } } }); try { controller.awaitTerminated(); } catch (ExecutionException e) { e.printStackTrace(); } }
From source file:cn.edu.buaa.act.petuumOnYarn.Client.java
License:Apache License
/** */ public Client() throws Exception { this(new YarnConfiguration()); }
From source file:co.cask.cdap.explore.service.ExploreServiceUtilsTest.java
License:Apache License
@Test public void hijackConfFileTest() throws Exception { Configuration conf = new Configuration(false); conf.set("foo", "bar"); Assert.assertEquals(1, conf.size()); File tempDir = tmpFolder.newFolder(); File confFile = tmpFolder.newFile("hive-site.xml"); try (FileOutputStream os = new FileOutputStream(confFile)) { conf.writeXml(os);//w w w . j a v a 2s .co m } File newConfFile = ExploreServiceUtils.updateConfFileForExplore(confFile, tempDir); conf = new Configuration(false); conf.addResource(newConfFile.toURI().toURL()); Assert.assertEquals(3, conf.size()); Assert.assertEquals("false", conf.get(Job.MAPREDUCE_JOB_USER_CLASSPATH_FIRST)); Assert.assertEquals("false", conf.get(Job.MAPREDUCE_JOB_CLASSLOADER)); Assert.assertEquals("bar", conf.get("foo")); // check yarn-site changes confFile = tmpFolder.newFile("yarn-site.xml"); conf = new YarnConfiguration(); try (FileOutputStream os = new FileOutputStream(confFile)) { conf.writeXml(os); } String yarnApplicationClassPath = "$PWD/*," + conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH, Joiner.on(",").join(YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)); newConfFile = ExploreServiceUtils.updateConfFileForExplore(confFile, tempDir); conf = new Configuration(false); conf.addResource(newConfFile.toURI().toURL()); Assert.assertEquals(yarnApplicationClassPath, conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH)); // check mapred-site changes confFile = tmpFolder.newFile("mapred-site.xml"); conf = new YarnConfiguration(); try (FileOutputStream os = new FileOutputStream(confFile)) { conf.writeXml(os); } String mapredApplicationClassPath = "$PWD/*," + conf.get(MRJobConfig.MAPREDUCE_APPLICATION_CLASSPATH, MRJobConfig.DEFAULT_MAPREDUCE_APPLICATION_CLASSPATH); newConfFile = ExploreServiceUtils.updateConfFileForExplore(confFile, tempDir); conf = new Configuration(false); conf.addResource(newConfFile.toURI().toURL()); Assert.assertEquals(mapredApplicationClassPath, conf.get(MRJobConfig.MAPREDUCE_APPLICATION_CLASSPATH)); // Ensure conf files that are not hive-site.xml/mapred-site.xml/yarn-site.xml are unchanged confFile = tmpFolder.newFile("core-site.xml"); Assert.assertEquals(confFile, ExploreServiceUtils.updateConfFileForExplore(confFile, tempDir)); }
From source file:com.accumulobook.advanced.mapreduce.MiniMRClusterRunner.java
License:Apache License
public void setup() throws IOException { Configuration conf = new YarnConfiguration(); cluster = MiniMRClientClusterFactory.create(this.getClass(), 2, conf); }
From source file:com.bigjob.Client.java
License:Apache License
/** * Parse command line options/*w w w. j av a2 s.c o m*/ * @param args Parsed command line options * @return Whether the init was successful to run the client * @throws ParseException */ public boolean init(String[] args) throws ParseException { CommandLine cliParser = new GnuParser().parse(opts, args); if (args.length == 0) { throw new IllegalArgumentException("No args specified for client to initialize"); } if (cliParser.hasOption("log_properties")) { String log4jPath = cliParser.getOptionValue("log_properties"); try { Log4jPropertyHelper.updateLog4jConfiguration(Client.class, log4jPath); } catch (Exception e) { LOG.warn("Can not set up custom log4j properties. " + e); } } if (cliParser.hasOption("help")) { printUsage(); return false; } if (cliParser.hasOption("debug")) { debugFlag = true; } if (fileExist("log4j.properties")) { try { Log4jPropertyHelper.updateLog4jConfiguration(ApplicationMaster.class, "log4j.properties"); } catch (Exception e) { LOG.warn("Can not set up custom log4j properties. " + e); } } else { LOG.warn("No Log4j found"); } yarnClient = YarnClient.createYarnClient(); String configPath = cliParser.getOptionValue("config", ""); if (configPath.compareTo("") == 0) { conf = new YarnConfiguration(); } else { conf = new YarnConfiguration(); } yarnClient.init(conf); appName = cliParser.getOptionValue("appname", appName); amPriority = Integer.parseInt(cliParser.getOptionValue("priority", "0")); amQueue = cliParser.getOptionValue("queue", "default"); amMemory = Integer.parseInt(cliParser.getOptionValue("master_memory", "10")); amVCores = Integer.parseInt(cliParser.getOptionValue("master_vcores", "1")); serviceUrl = cliParser.getOptionValue("service_url", "yarn://localhost?fs=hdfs://localhost:9000"); try { org.apache.commons.httpclient.URI url = new org.apache.commons.httpclient.URI(serviceUrl, false); //YARN URL String host = url.getHost(); int port = 8032; if (url.getPort() != -1) { port = url.getPort(); } ; String yarnRM = host + ":" + port; LOG.info("Connecting to YARN at: " + yarnRM); conf.set("yarn.resourcemanager.address", yarnRM); //Hadoop FS/HDFS URL String query = url.getQuery(); if (query.startsWith("fs=")) { dfsUrl = query.substring(3, query.length()); LOG.info("Connect to Hadoop FS: " + dfsUrl); conf.set("fs.defaultFS", dfsUrl); } } catch (Exception e) { e.printStackTrace(); } if (amMemory < 0) { throw new IllegalArgumentException( "Invalid memory specified for application master, exiting." + " Specified memory=" + amMemory); } if (amVCores < 0) { throw new IllegalArgumentException("Invalid virtual cores specified for application master, exiting." + " Specified virtual cores=" + amVCores); } if (!cliParser.hasOption("jar")) { throw new IllegalArgumentException("No jar file specified for application master"); } appMasterJar = cliParser.getOptionValue("jar"); if (!cliParser.hasOption("shell_command") && !cliParser.hasOption("shell_script")) { throw new IllegalArgumentException( "No shell command or shell script specified to be executed by application master"); } else if (cliParser.hasOption("shell_command") && cliParser.hasOption("shell_script")) { throw new IllegalArgumentException( "Can not specify shell_command option " + "and shell_script option at the same time"); } else if (cliParser.hasOption("shell_command")) { shellCommand = cliParser.getOptionValue("shell_command"); } else { shellScriptPath = cliParser.getOptionValue("shell_script"); } if (cliParser.hasOption("shell_args")) { shellArgs = cliParser.getOptionValues("shell_args"); } if (cliParser.hasOption("shell_env")) { String envs[] = cliParser.getOptionValues("shell_env"); for (String env : envs) { env = env.trim(); int index = env.indexOf('='); if (index == -1) { shellEnv.put(env, ""); continue; } String key = env.substring(0, index); String val = ""; if (index < (env.length() - 1)) { val = env.substring(index + 1); } shellEnv.put(key, val); } } shellCmdPriority = Integer.parseInt(cliParser.getOptionValue("shell_cmd_priority", "0")); containerMemory = Integer.parseInt(cliParser.getOptionValue("container_memory", "10")); containerVirtualCores = Integer.parseInt(cliParser.getOptionValue("container_vcores", "1")); numContainers = Integer.parseInt(cliParser.getOptionValue("num_containers", "1")); if (containerMemory < 0 || containerVirtualCores < 0 || numContainers < 1) { throw new IllegalArgumentException("Invalid no. of containers or container memory/vcores specified," + " exiting." + " Specified containerMemory=" + containerMemory + ", containerVirtualCores=" + containerVirtualCores + ", numContainer=" + numContainers); } clientTimeout = Integer.parseInt(cliParser.getOptionValue("timeout", "600000")); log4jPropFile = cliParser.getOptionValue("log_properties", ""); return true; }