List of usage examples for org.apache.hadoop.yarn.client.api YarnClient createYarnClient
@Public public static YarnClient createYarnClient()
From source file:com.bigjob.Client.java
License:Apache License
/** * Parse command line options// w w w. j a v a2s . co m * @param args Parsed command line options * @return Whether the init was successful to run the client * @throws ParseException */ public boolean init(String[] args) throws ParseException { CommandLine cliParser = new GnuParser().parse(opts, args); if (args.length == 0) { throw new IllegalArgumentException("No args specified for client to initialize"); } if (cliParser.hasOption("log_properties")) { String log4jPath = cliParser.getOptionValue("log_properties"); try { Log4jPropertyHelper.updateLog4jConfiguration(Client.class, log4jPath); } catch (Exception e) { LOG.warn("Can not set up custom log4j properties. " + e); } } if (cliParser.hasOption("help")) { printUsage(); return false; } if (cliParser.hasOption("debug")) { debugFlag = true; } if (fileExist("log4j.properties")) { try { Log4jPropertyHelper.updateLog4jConfiguration(ApplicationMaster.class, "log4j.properties"); } catch (Exception e) { LOG.warn("Can not set up custom log4j properties. " + e); } } else { LOG.warn("No Log4j found"); } yarnClient = YarnClient.createYarnClient(); String configPath = cliParser.getOptionValue("config", ""); if (configPath.compareTo("") == 0) { conf = new YarnConfiguration(); } else { conf = new YarnConfiguration(); } yarnClient.init(conf); appName = cliParser.getOptionValue("appname", appName); amPriority = Integer.parseInt(cliParser.getOptionValue("priority", "0")); amQueue = cliParser.getOptionValue("queue", "default"); amMemory = Integer.parseInt(cliParser.getOptionValue("master_memory", "10")); amVCores = Integer.parseInt(cliParser.getOptionValue("master_vcores", "1")); serviceUrl = cliParser.getOptionValue("service_url", "yarn://localhost?fs=hdfs://localhost:9000"); try { org.apache.commons.httpclient.URI url = new org.apache.commons.httpclient.URI(serviceUrl, false); //YARN URL String host = url.getHost(); int port = 8032; if (url.getPort() != -1) { port = url.getPort(); } ; String yarnRM = host + ":" + port; LOG.info("Connecting to YARN at: " + yarnRM); conf.set("yarn.resourcemanager.address", yarnRM); //Hadoop FS/HDFS URL String query = url.getQuery(); if (query.startsWith("fs=")) { dfsUrl = query.substring(3, query.length()); LOG.info("Connect to Hadoop FS: " + dfsUrl); conf.set("fs.defaultFS", dfsUrl); } } catch (Exception e) { e.printStackTrace(); } if (amMemory < 0) { throw new IllegalArgumentException( "Invalid memory specified for application master, exiting." + " Specified memory=" + amMemory); } if (amVCores < 0) { throw new IllegalArgumentException("Invalid virtual cores specified for application master, exiting." + " Specified virtual cores=" + amVCores); } if (!cliParser.hasOption("jar")) { throw new IllegalArgumentException("No jar file specified for application master"); } appMasterJar = cliParser.getOptionValue("jar"); if (!cliParser.hasOption("shell_command") && !cliParser.hasOption("shell_script")) { throw new IllegalArgumentException( "No shell command or shell script specified to be executed by application master"); } else if (cliParser.hasOption("shell_command") && cliParser.hasOption("shell_script")) { throw new IllegalArgumentException( "Can not specify shell_command option " + "and shell_script option at the same time"); } else if (cliParser.hasOption("shell_command")) { shellCommand = cliParser.getOptionValue("shell_command"); } else { shellScriptPath = cliParser.getOptionValue("shell_script"); } if (cliParser.hasOption("shell_args")) { shellArgs = cliParser.getOptionValues("shell_args"); } if (cliParser.hasOption("shell_env")) { String envs[] = cliParser.getOptionValues("shell_env"); for (String env : envs) { env = env.trim(); int index = env.indexOf('='); if (index == -1) { shellEnv.put(env, ""); continue; } String key = env.substring(0, index); String val = ""; if (index < (env.length() - 1)) { val = env.substring(index + 1); } shellEnv.put(key, val); } } shellCmdPriority = Integer.parseInt(cliParser.getOptionValue("shell_cmd_priority", "0")); containerMemory = Integer.parseInt(cliParser.getOptionValue("container_memory", "10")); containerVirtualCores = Integer.parseInt(cliParser.getOptionValue("container_vcores", "1")); numContainers = Integer.parseInt(cliParser.getOptionValue("num_containers", "1")); if (containerMemory < 0 || containerVirtualCores < 0 || numContainers < 1) { throw new IllegalArgumentException("Invalid no. of containers or container memory/vcores specified," + " exiting." + " Specified containerMemory=" + containerMemory + ", containerVirtualCores=" + containerVirtualCores + ", numContainer=" + numContainers); } clientTimeout = Integer.parseInt(cliParser.getOptionValue("timeout", "600000")); log4jPropFile = cliParser.getOptionValue("log_properties", ""); return true; }
From source file:com.cfets.door.yarn.jboss.JBossClient.java
License:Apache License
/** *///from w w w.j av a2 s . c o m public JBossClient(Configuration conf) throws Exception { this.conf = conf; yarnClient = YarnClient.createYarnClient(); yarnClient.init(conf); opts = new Options(); opts.addOption("appname", true, "Application Name. Default value - JBoss on YARN"); opts.addOption("priority", true, "Application Priority. Default 0"); opts.addOption("queue", true, "RM Queue in which this application is to be submitted"); opts.addOption("timeout", true, "Application timeout in milliseconds"); opts.addOption("master_memory", true, "Amount of memory in MB to be requested to run the application master"); opts.addOption("jar", true, "JAR file containing the application master"); opts.addOption("container_memory", true, "Amount of memory in MB to be requested to run the shell command"); opts.addOption("num_containers", true, "No. of containers on which the shell command needs to be executed"); opts.addOption("admin_user", true, "User id for initial administrator user"); opts.addOption("admin_password", true, "Password for initial administrator user"); opts.addOption("log_properties", true, "log4j.properties file"); opts.addOption("debug", false, "Dump out debug information"); opts.addOption("help", false, "Print usage"); }
From source file:com.cloudera.kitten.client.service.YarnClientFactory.java
License:Open Source License
@Override public YarnClient connect() { YarnClient client = YarnClient.createYarnClient(); client.init(conf); client.start(); return client; }
From source file:com.cloudera.llama.am.MiniLlama.java
License:Apache License
private Map<String, String> getDataNodeNodeManagerMapping(Configuration conf) throws Exception { Map<String, String> map = new HashMap<String, String>(); DFSClient dfsClient = new DFSClient(new URI(conf.get("fs.defaultFS")), conf); DatanodeInfo[] DNs = dfsClient.datanodeReport(HdfsConstants.DatanodeReportType.ALL); YarnClient yarnClient = YarnClient.createYarnClient(); yarnClient.init(conf);/*from ww w . ja v a 2 s .c om*/ yarnClient.start(); List<NodeId> nodeIds = getYarnNodeIds(conf); if (nodeIds.size() != DNs.length) { throw new RuntimeException("Number of DNs and NMs differ, MiniLlama " + "node mapping requires them to be equal at startup"); } LOG.info("HDFS/YARN mapping:"); for (int i = 0; i < DNs.length; i++) { String key = DNs[i].getXferAddr(); NodeId nodeId = nodeIds.get(i); String value = nodeId.getHost() + ":" + nodeId.getPort(); map.put(key, value); LOG.info(" DN/NM: " + key + "/" + value); } yarnClient.stop(); nodes = map.size(); verifySingleHost(map.keySet(), "DataNode"); verifySingleHost(map.values(), "NodeManager"); return map; }
From source file:com.cloudera.llama.am.MiniLlama.java
License:Apache License
private List<NodeId> getYarnNodeIds(Configuration conf) throws Exception { List<NodeId> list = new ArrayList<NodeId>(); if (miniYarn != null) { int clusterNodes = getConf().getInt(MINI_CLUSTER_NODES_KEY, 1); for (int i = 0; i < clusterNodes; i++) { list.add(miniYarn.getNodeManager(i).getNMContext().getNodeId()); }/* www. j ava 2 s . c o m*/ } else { YarnClient yarnClient = YarnClient.createYarnClient(); yarnClient.init(conf); yarnClient.start(); List<NodeReport> nodes = yarnClient.getNodeReports(NodeState.RUNNING); for (int i = 0; i < nodes.size(); i++) { list.add(nodes.get(i).getNodeId()); } yarnClient.stop(); } return list; }
From source file:com.cloudera.llama.am.yarn.TestLlamaAMWithYarn.java
License:Apache License
/** * Test to verify Llama deletes old reservations on startup. */// w w w . j a v a2s . c o m @Test(timeout = 60000) public void testLlamaDeletesOldReservationsOnStartup() throws Exception { YarnClient client = null; LlamaAM llamaAM1 = null, llamaAM2 = null, llamaAM3 = null; EnumSet<YarnApplicationState> running = EnumSet.of(YarnApplicationState.RUNNING); try { startYarn(createMiniYarnConfig(false)); client = YarnClient.createYarnClient(); client.init(miniYarn.getConfig()); client.start(); Assert.assertEquals("Non-zero YARN apps even before any reservations", 0, client.getApplications().size()); llamaAM1 = LlamaAM.create(getLlamaConfiguration()); llamaAM1.start(); Assert.assertEquals("Mismatch between #YARN apps and #Queues", 2, client.getApplications(running).size()); // Start another Llama of the same cluster-id to see if old YARN apps // are deleted. llamaAM2 = LlamaAM.create(getLlamaConfiguration()); llamaAM2.start(); Assert.assertEquals( "Mismatch between #YARN apps and #Queues. Only apps" + " from the latest started Llama should be running.", 2, client.getApplications(running).size()); // Start Llama of different cluster-id to see old YARN apps are not // deleted. Configuration confWithDifferentCluserId = getLlamaConfiguration(); confWithDifferentCluserId.set(LlamaAM.CLUSTER_ID, "new-cluster"); llamaAM3 = LlamaAM.create(confWithDifferentCluserId); llamaAM3.start(); Assert.assertEquals("Mismatch between #YARN apps and #Queues for " + "multiple clusters", 4, client.getApplications(running).size()); } finally { client.stop(); llamaAM1.stop(); llamaAM2.stop(); llamaAM3.stop(); stopYarn(); } }
From source file:com.cloudera.llama.am.yarn.YarnRMConnector.java
License:Apache License
private void _start() throws Exception { yarnClient = YarnClient.createYarnClient(); yarnClient.init(yarnConf); yarnClient.start(); }
From source file:com.continuuity.weave.internal.yarn.Hadoop21YarnAppClient.java
License:Apache License
public Hadoop21YarnAppClient(Configuration configuration) { this.yarnClient = YarnClient.createYarnClient(); yarnClient.init(configuration); }
From source file:com.datatorrent.stram.client.StramAgent.java
License:Apache License
private StramWebServicesInfo retrieveWebServicesInfo(String appId) { YarnClient yarnClient = YarnClient.createYarnClient(); String url;/* w w w . j a v a 2 s .co m*/ try { yarnClient.init(conf); yarnClient.start(); ApplicationReport ar = yarnClient.getApplicationReport(ConverterUtils.toApplicationId(appId)); String trackingUrl = ar.getTrackingUrl(); if (!trackingUrl.startsWith("http://") && !trackingUrl.startsWith("https://")) { url = "http://" + trackingUrl; } else { url = trackingUrl; } if (StringUtils.isBlank(url)) { LOG.error("Cannot get tracking url from YARN"); return null; } if (url.endsWith("/")) { url = url.substring(0, url.length() - 1); } url += WebServices.PATH; } catch (Exception ex) { //LOG.error("Caught exception when retrieving web services info", ex); return null; } finally { yarnClient.stop(); } WebServicesClient webServicesClient = new WebServicesClient(); try { JSONObject response; String secToken = null; ClientResponse clientResponse; int i = 0; while (true) { LOG.debug("Accessing url {}", url); clientResponse = webServicesClient.process(url, ClientResponse.class, new WebServicesClient.GetWebServicesHandler<ClientResponse>()); String val = clientResponse.getHeaders().getFirst("Refresh"); if (val == null) { break; } int index = val.indexOf("url="); if (index < 0) { break; } url = val.substring(index + 4); if (i++ > MAX_REDIRECTS) { LOG.error("Cannot get web service info -- exceeded the max number of redirects"); return null; } } if (!UserGroupInformation.isSecurityEnabled()) { response = new JSONObject(clientResponse.getEntity(String.class)); } else { if (UserGroupInformation.isSecurityEnabled()) { for (NewCookie nc : clientResponse.getCookies()) { if (LOG.isDebugEnabled()) { LOG.debug("Cookie " + nc.getName() + " " + nc.getValue()); } if (nc.getName().equals(StramWSFilter.CLIENT_COOKIE)) { secToken = nc.getValue(); } } } response = new JSONObject(clientResponse.getEntity(String.class)); } String version = response.getString("version"); response = webServicesClient.process(url + "/" + version + "/stram/info", JSONObject.class, new WebServicesClient.GetWebServicesHandler<JSONObject>()); String appMasterUrl = response.getString("appMasterTrackingUrl"); String appPath = response.getString("appPath"); String user = response.getString("user"); JSONObject permissionsInfo = null; FSDataInputStream is = null; try { is = fileSystem.open(new Path(appPath, "permissions.json")); permissionsInfo = new JSONObject(IOUtils.toString(is)); } catch (JSONException ex) { LOG.error("Error reading from the permissions info. Ignoring", ex); } catch (IOException ex) { // ignore } finally { IOUtils.closeQuietly(is); } return new StramWebServicesInfo(appMasterUrl, version, appPath, user, secToken, permissionsInfo); } catch (Exception ex) { LOG.debug("Caught exception when retrieving web service info for app " + appId, ex); return null; } }
From source file:com.datatorrent.stram.security.StramUserLogin.java
License:Apache License
public static long refreshTokens(long tokenLifeTime, String destinationDir, String destinationFile, final Configuration conf, String hdfsKeyTabFile, final Credentials credentials, final InetSocketAddress rmAddress, final boolean renewRMToken) throws IOException { long expiryTime = System.currentTimeMillis() + tokenLifeTime; //renew tokens final String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL); if (tokenRenewer == null || tokenRenewer.length() == 0) { throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer"); }/*w w w .ja v a 2 s .c o m*/ FileSystem fs = FileSystem.newInstance(conf); File keyTabFile; try { keyTabFile = FSUtil.copyToLocalFileSystem(fs, destinationDir, destinationFile, hdfsKeyTabFile, conf); } finally { fs.close(); } UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI( UserGroupInformation.getCurrentUser().getUserName(), keyTabFile.getAbsolutePath()); try { ugi.doAs(new PrivilegedExceptionAction<Object>() { @Override public Object run() throws Exception { FileSystem fs1 = FileSystem.newInstance(conf); YarnClient yarnClient = null; if (renewRMToken) { yarnClient = YarnClient.createYarnClient(); yarnClient.init(conf); yarnClient.start(); } Credentials creds = new Credentials(); try { fs1.addDelegationTokens(tokenRenewer, creds); if (renewRMToken) { org.apache.hadoop.yarn.api.records.Token rmDelToken = yarnClient .getRMDelegationToken(new Text(tokenRenewer)); Token<RMDelegationTokenIdentifier> rmToken = ConverterUtils.convertFromYarn(rmDelToken, rmAddress); creds.addToken(rmToken.getService(), rmToken); } } finally { fs1.close(); if (renewRMToken) { yarnClient.stop(); } } credentials.addAll(creds); return null; } }); UserGroupInformation.getCurrentUser().addCredentials(credentials); } catch (InterruptedException e) { LOG.error("Error while renewing tokens ", e); expiryTime = System.currentTimeMillis(); } catch (IOException e) { LOG.error("Error while renewing tokens ", e); expiryTime = System.currentTimeMillis(); } LOG.debug("number of tokens: {}", credentials.getAllTokens().size()); Iterator<Token<?>> iter = credentials.getAllTokens().iterator(); while (iter.hasNext()) { Token<?> token = iter.next(); LOG.debug("updated token: {}", token); } keyTabFile.delete(); return expiryTime; }