List of usage examples for org.apache.hadoop.yarn.conf YarnConfiguration RM_WEBAPP_ADDRESS
String RM_WEBAPP_ADDRESS
To view the source code for org.apache.hadoop.yarn.conf YarnConfiguration RM_WEBAPP_ADDRESS.
Click Source Link
From source file:co.cask.cdap.operations.yarn.YarnInfo.java
License:Apache License
private URL getNonHAWebURL() throws MalformedURLException { String protocol;/*from w ww .j a v a 2 s .c o m*/ String hostPort; if (isHttpsEnabled()) { protocol = "https"; hostPort = conf.get(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS); } else { protocol = "http"; hostPort = conf.get(YarnConfiguration.RM_WEBAPP_ADDRESS); } int portBeginIndex = hostPort.indexOf(":") + 1; String host = hostPort.substring(0, portBeginIndex - 1); int port = Integer.parseInt(hostPort.substring(portBeginIndex)); return new URL(protocol, host, port, ""); }
From source file:com.datatorrent.stram.client.StramClientUtilsTest.java
License:Apache License
@Test public void testRMWebAddress() throws UnknownHostException { Configuration conf = new YarnConfiguration(new Configuration(false)) { @Override//from ww w.jav a 2s . c om public InetSocketAddress getSocketAddr(String name, String defaultAddress, int defaultPort) { String rmId = get(ConfigUtils.RM_HA_ID); if (rmId != null) { name = name + "." + rmId; } return super.getSocketAddr(name, defaultAddress, defaultPort); } }; // basic test conf.setBoolean(CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_KEY, false); conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS, "192.168.1.1:8032"); conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS, "192.168.1.2:8032"); Assert.assertEquals(getHostString("192.168.1.1") + ":8032", StramClientUtils.getSocketConnectString(StramClientUtils.getRMWebAddress(conf, null))); List<InetSocketAddress> addresses = StramClientUtils.getRMAddresses(conf); Assert.assertEquals(1, addresses.size()); Assert.assertEquals(getHostString("192.168.1.1") + ":8032", StramClientUtils.getSocketConnectString(addresses.get(0))); conf.setBoolean(CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_KEY, true); Assert.assertEquals(getHostString("192.168.1.2") + ":8032", StramClientUtils.getSocketConnectString(StramClientUtils.getRMWebAddress(conf, null))); addresses = StramClientUtils.getRMAddresses(conf); Assert.assertEquals(1, addresses.size()); Assert.assertEquals(getHostString("192.168.1.2") + ":8032", StramClientUtils.getSocketConnectString(addresses.get(0))); // set localhost if host is unknown conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS, "someunknownhost.:8032"); Assert.assertEquals(InetAddress.getLocalHost().getCanonicalHostName() + ":8032", StramClientUtils.getSocketConnectString(StramClientUtils.getRMWebAddress(conf, null))); // set localhost conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS, "127.0.0.1:8032"); Assert.assertEquals(InetAddress.getLocalHost().getCanonicalHostName() + ":8032", StramClientUtils.getSocketConnectString(StramClientUtils.getRMWebAddress(conf, null))); // test when HA is enabled conf.setBoolean(ConfigUtils.RM_HA_ENABLED, true); conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + ".rm1", "192.168.1.1:8032"); conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS + ".rm2", "192.168.1.2:8032"); conf.set("yarn.resourcemanager.ha.rm-ids", "rm1,rm2"); Assert.assertEquals(getHostString("192.168.1.1") + ":8032", StramClientUtils.getSocketConnectString(StramClientUtils.getRMWebAddress(conf, "rm1"))); Assert.assertEquals(getHostString("192.168.1.2") + ":8032", StramClientUtils.getSocketConnectString(StramClientUtils.getRMWebAddress(conf, "rm2"))); addresses = StramClientUtils.getRMAddresses(conf); Assert.assertEquals(2, addresses.size()); Assert.assertEquals(getHostString("192.168.1.1") + ":8032", StramClientUtils.getSocketConnectString(addresses.get(0))); Assert.assertEquals(getHostString("192.168.1.2") + ":8032", StramClientUtils.getSocketConnectString(addresses.get(1))); }
From source file:com.github.sakserv.minicluster.impl.MRLocalCluster.java
License:Apache License
@Override public void configure() throws Exception { // Handle Windows WindowsLibsUtils.setHadoopHome();//from w w w . j a va2s. c o m configuration.set(YarnConfiguration.RM_ADDRESS, resourceManagerAddress); configuration.set(YarnConfiguration.RM_HOSTNAME, resourceManagerHostname); configuration.set(YarnConfiguration.RM_SCHEDULER_ADDRESS, resourceManagerSchedulerAddress); configuration.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, resourceManagerResourceTrackerAddress); configuration.set(YarnConfiguration.RM_WEBAPP_ADDRESS, resourceManagerWebappAddress); configuration.set(JHAdminConfig.MR_HISTORY_ADDRESS, jobHistoryAddress); configuration.set(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, "true"); configuration.set(JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS, "true"); if (getUseInJvmContainerExecutor()) { configuration.set(YarnConfiguration.NM_CONTAINER_EXECUTOR, inJvmContainerExecutorClass); configuration.set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName()); configuration.set("fs.file.impl", org.apache.hadoop.fs.LocalFileSystem.class.getName()); } if (null != hdfsDefaultFs) { configuration.set("fs.defaultFS", hdfsDefaultFs); configuration.set("dfs.replication", "1"); } }
From source file:com.github.sakserv.minicluster.impl.YarnLocalCluster.java
License:Apache License
@Override public void configure() throws Exception { // Handle Windows WindowsLibsUtils.setHadoopHome();//from ww w . ja v a2s. c om configuration.set(YarnConfiguration.RM_ADDRESS, resourceManagerAddress); configuration.set(YarnConfiguration.RM_HOSTNAME, resourceManagerHostname); configuration.set(YarnConfiguration.RM_SCHEDULER_ADDRESS, resourceManagerSchedulerAddress); configuration.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, resourceManagerResourceTrackerAddress); configuration.set(YarnConfiguration.RM_WEBAPP_ADDRESS, resourceManagerWebappAddress); configuration.set(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, "true"); if (getUseInJvmContainerExecutor()) { configuration.set(YarnConfiguration.NM_CONTAINER_EXECUTOR, inJvmContainerExecutorClass); configuration.set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName()); configuration.set("fs.file.impl", org.apache.hadoop.fs.LocalFileSystem.class.getName()); } }
From source file:com.splicemachine.test.SpliceTestYarnPlatform.java
License:Apache License
public void start(int nodeCount) throws Exception { if (yarnCluster == null) { LOG.info("Starting up YARN cluster with " + nodeCount + " nodes. Server yarn-site.xml is: " + yarnSiteConfigURL);/*from ww w . j av a 2 s. com*/ conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS, "localhost:0"); yarnCluster = new MiniYARNClusterSplice(SpliceTestYarnPlatform.class.getSimpleName(), nodeCount, 1, 1); yarnCluster.init(conf); yarnCluster.start(); NodeManager nm = getNodeManager(); waitForNMToRegister(nm); // save the server config to classpath so yarn clients can read it Configuration yarnClusterConfig = yarnCluster.getConfig(); yarnClusterConfig.set("yarn.application.classpath", new File(yarnSiteConfigURL.getPath()).getParent()); //write the document to a buffer (not directly to the file, as that //can cause the file being written to get read -which will then fail. ByteArrayOutputStream bytesOut = new ByteArrayOutputStream(); yarnClusterConfig.writeXml(bytesOut); bytesOut.close(); //write the bytes to the file in the classpath OutputStream os = new FileOutputStream(new File(yarnSiteConfigURL.getPath())); os.write(bytesOut.toByteArray()); os.close(); } LOG.info("YARN cluster started."); }
From source file:org.apache.kylin.tool.common.HadoopConfExtractor.java
License:Apache License
public static String extractYarnMasterUrl(Configuration conf) { KylinConfig config = KylinConfig.getInstanceFromEnv(); final String yarnStatusCheckUrl = config.getYarnStatusCheckUrl(); Pattern pattern = Pattern.compile("(http(s)?://)([^:]*):([^/])*.*"); if (yarnStatusCheckUrl != null) { Matcher m = pattern.matcher(yarnStatusCheckUrl); if (m.matches()) { return m.group(1) + m.group(2) + ":" + m.group(3); }// w w w . j a v a 2 s .co m } logger.info("kylin.engine.mr.yarn-check-status-url" + " is not set, read from hadoop configuration"); String webappConfKey, defaultAddr; if (YarnConfiguration.useHttps(conf)) { webappConfKey = YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS; defaultAddr = YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_ADDRESS; } else { webappConfKey = YarnConfiguration.RM_WEBAPP_ADDRESS; defaultAddr = YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS; } String rmWebHost; if (HAUtil.isHAEnabled(conf)) { YarnConfiguration yarnConf = new YarnConfiguration(conf); String active = RMHAUtils.findActiveRMHAId(yarnConf); rmWebHost = HAUtil.getConfValueForRMInstance(HAUtil.addSuffix(webappConfKey, active), defaultAddr, yarnConf); } else { rmWebHost = HAUtil.getConfValueForRMInstance(webappConfKey, defaultAddr, conf); } if (StringUtils.isEmpty(rmWebHost)) { return null; } if (!rmWebHost.startsWith("http://") && !rmWebHost.startsWith("https://")) { rmWebHost = (YarnConfiguration.useHttps(conf) ? "https://" : "http://") + rmWebHost; } Matcher m = pattern.matcher(rmWebHost); Preconditions.checkArgument(m.matches(), "Yarn master URL not found."); logger.info("yarn master url: " + rmWebHost); return rmWebHost; }
From source file:org.apache.kylin.tool.JobTaskCounterExtractor.java
License:Apache License
private String getRestCheckUrl() { KylinConfig config = KylinConfig.getInstanceFromEnv(); final String yarnStatusCheckUrl = config.getYarnStatusCheckUrl(); Pattern pattern = Pattern.compile("(http://)(.*):.*"); if (yarnStatusCheckUrl != null) { Matcher m = pattern.matcher(yarnStatusCheckUrl); m.matches();// w ww . j a v a 2 s . co m yarnUrl = m.group(1) + m.group(2) + ":19888"; return yarnUrl; } else { logger.info("kylin.job.yarn.app.rest.check.status.url" + " is not set read from hadoop configuration"); } Configuration conf = HadoopUtil.getCurrentConfiguration(); String rmWebHost = HAUtil.getConfValueForRMInstance(YarnConfiguration.RM_WEBAPP_ADDRESS, YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS, conf); if (HAUtil.isHAEnabled(conf)) { YarnConfiguration yarnConf = new YarnConfiguration(conf); String active = RMHAUtils.findActiveRMHAId(yarnConf); rmWebHost = HAUtil.getConfValueForRMInstance( HAUtil.addSuffix(YarnConfiguration.RM_WEBAPP_ADDRESS, active), YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS, yarnConf); } if (StringUtils.isEmpty(rmWebHost)) { return null; } if (rmWebHost.startsWith("http://") || rmWebHost.startsWith("https://")) { //do nothing } else { rmWebHost = "http://" + rmWebHost; } Matcher m = pattern.matcher(rmWebHost); m.matches(); return m.group(1) + m.group(2) + ":19888"; }
From source file:org.apache.kylin.tool.MrJobInfoExtractor.java
License:Apache License
private void extractRestCheckUrl() { KylinConfig config = KylinConfig.getInstanceFromEnv(); final String yarnStatusCheckUrl = config.getYarnStatusCheckUrl(); Pattern pattern = Pattern.compile("(http://)([^:]*):([^/])*.*"); if (yarnStatusCheckUrl != null) { Matcher m = pattern.matcher(yarnStatusCheckUrl); if (m.matches()) { jobHistoryUrlBase = m.group(1) + m.group(2) + ":19888"; yarnMasterUrlBase = m.group(1) + m.group(2) + ":" + m.group(3); }/*from www . j av a 2 s . c om*/ } logger.info("kylin.engine.mr.yarn-check-status-url" + " is not set, read from hadoop configuration"); Configuration conf = HadoopUtil.getCurrentConfiguration(); String rmWebHost = HAUtil.getConfValueForRMInstance(YarnConfiguration.RM_WEBAPP_ADDRESS, YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS, conf); if (HAUtil.isHAEnabled(conf)) { YarnConfiguration yarnConf = new YarnConfiguration(conf); String active = RMHAUtils.findActiveRMHAId(yarnConf); rmWebHost = HAUtil.getConfValueForRMInstance( HAUtil.addSuffix(YarnConfiguration.RM_WEBAPP_ADDRESS, active), YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS, yarnConf); } if (StringUtils.isEmpty(rmWebHost)) { return; } if (!rmWebHost.startsWith("http://") && !rmWebHost.startsWith("https://")) { rmWebHost = "http://" + rmWebHost; } Matcher m = pattern.matcher(rmWebHost); Preconditions.checkArgument(m.matches(), "Yarn master URL not found."); yarnMasterUrlBase = rmWebHost; jobHistoryUrlBase = m.group(1) + HAUtil.getConfValueForRMInstance("mapreduce.jobhistory.webapp.address", m.group(2) + ":19888", conf); }
From source file:org.apache.phoenix.util.PhoenixMRJobUtil.java
License:Apache License
public static int getRMPort(Configuration conf) throws IOException { String rmHostPortStr = conf.get(YarnConfiguration.RM_WEBAPP_ADDRESS); String[] rmHostPort = rmHostPortStr.split(":"); if (rmHostPort == null || rmHostPort.length != 2) { throw new IOException( "Invalid value for property " + YarnConfiguration.RM_WEBAPP_ADDRESS + " = " + rmHostPortStr); }// w w w. j a v a 2s . co m int rmPort = Integer.parseInt(rmHostPort[1]); return rmPort; }
From source file:org.apache.slider.server.appmaster.SliderAppMaster.java
License:Apache License
/** * Create and run the cluster.//from w w w .ja v a 2 s.com * @return exit code * @throws Throwable on a failure */ private int createAndRunCluster(String clustername) throws Throwable { //load the cluster description from the cd argument String sliderClusterDir = serviceArgs.getSliderClusterURI(); URI sliderClusterURI = new URI(sliderClusterDir); Path clusterDirPath = new Path(sliderClusterURI); log.info("Application defined at {}", sliderClusterURI); SliderFileSystem fs = getClusterFS(); // build up information about the running application -this // will be passed down to the cluster status MapOperations appInformation = new MapOperations(); AggregateConf instanceDefinition = InstanceIO.loadInstanceDefinitionUnresolved(fs, clusterDirPath); instanceDefinition.setName(clustername); log.info("Deploying cluster {}:", instanceDefinition); stateForProviders.setApplicationName(clustername); Configuration serviceConf = getConfig(); SecurityConfiguration securityConfiguration = new SecurityConfiguration(serviceConf, instanceDefinition, clustername); // obtain security state boolean securityEnabled = securityConfiguration.isSecurityEnabled(); // set the global security flag for the instance definition instanceDefinition.getAppConfOperations().set(KEY_SECURITY_ENABLED, securityEnabled); // triggers resolution and snapshotting in agent appState.updateInstanceDefinition(instanceDefinition); File confDir = getLocalConfDir(); if (!confDir.exists() || !confDir.isDirectory()) { log.info("Conf dir {} does not exist.", confDir); File parentFile = confDir.getParentFile(); log.info("Parent dir {}:\n{}", parentFile, SliderUtils.listDir(parentFile)); } // IP filtering serviceConf.set(HADOOP_HTTP_FILTER_INITIALIZERS, AM_FILTER_NAME); //get our provider MapOperations globalInternalOptions = getGlobalInternalOptions(); String providerType = globalInternalOptions.getMandatoryOption(InternalKeys.INTERNAL_PROVIDER_NAME); log.info("Cluster provider type is {}", providerType); SliderProviderFactory factory = SliderProviderFactory.createSliderProviderFactory(providerType); providerService = factory.createServerProvider(); // init the provider BUT DO NOT START IT YET initAndAddService(providerService); providerRMOperationHandler = new ProviderNotifyingOperationHandler(providerService); // create a slider AM provider sliderAMProvider = new SliderAMProviderService(); initAndAddService(sliderAMProvider); InetSocketAddress address = SliderUtils.getRmSchedulerAddress(serviceConf); log.info("RM is at {}", address); yarnRPC = YarnRPC.create(serviceConf); /* * Extract the container ID. This is then * turned into an (incompete) container */ appMasterContainerID = ConverterUtils.toContainerId( SliderUtils.mandatoryEnvVariable(ApplicationConstants.Environment.CONTAINER_ID.name())); appAttemptID = appMasterContainerID.getApplicationAttemptId(); ApplicationId appid = appAttemptID.getApplicationId(); log.info("AM for ID {}", appid.getId()); appInformation.put(StatusKeys.INFO_AM_CONTAINER_ID, appMasterContainerID.toString()); appInformation.put(StatusKeys.INFO_AM_APP_ID, appid.toString()); appInformation.put(StatusKeys.INFO_AM_ATTEMPT_ID, appAttemptID.toString()); Map<String, String> envVars; List<Container> liveContainers; /** * It is critical this section is synchronized, to stop async AM events * arriving while registering a restarting AM. */ synchronized (appState) { int heartbeatInterval = HEARTBEAT_INTERVAL; //add the RM client -this brings the callbacks in asyncRMClient = AMRMClientAsync.createAMRMClientAsync(heartbeatInterval, this); addService(asyncRMClient); //now bring it up deployChildService(asyncRMClient); //nmclient relays callbacks back to this class nmClientAsync = new NMClientAsyncImpl("nmclient", this); deployChildService(nmClientAsync); // set up secret manager secretManager = new ClientToAMTokenSecretManager(appAttemptID, null); if (securityEnabled) { // fix up the ACLs if they are not set String acls = getConfig().get(SliderXmlConfKeys.KEY_PROTOCOL_ACL); if (acls == null) { getConfig().set(SliderXmlConfKeys.KEY_PROTOCOL_ACL, "*"); } } //bring up the Slider RPC service startSliderRPCServer(instanceDefinition); rpcServiceAddress = rpcService.getConnectAddress(); appMasterHostname = rpcServiceAddress.getHostName(); appMasterRpcPort = rpcServiceAddress.getPort(); appMasterTrackingUrl = null; log.info("AM Server is listening at {}:{}", appMasterHostname, appMasterRpcPort); appInformation.put(StatusKeys.INFO_AM_HOSTNAME, appMasterHostname); appInformation.set(StatusKeys.INFO_AM_RPC_PORT, appMasterRpcPort); log.info("Starting Yarn registry"); registryOperations = startRegistryOperationsService(); log.info(registryOperations.toString()); //build the role map List<ProviderRole> providerRoles = new ArrayList<ProviderRole>(providerService.getRoles()); providerRoles.addAll(SliderAMClientProvider.ROLES); // Start up the WebApp and track the URL for it certificateManager = new CertificateManager(); MapOperations component = instanceDefinition.getAppConfOperations() .getComponent(SliderKeys.COMPONENT_AM); certificateManager.initialize(component); certificateManager.setPassphrase(instanceDefinition.getPassphrase()); if (component.getOptionBool(AgentKeys.KEY_AGENT_TWO_WAY_SSL_ENABLED, false)) { uploadServerCertForLocalization(clustername, fs); } startAgentWebApp(appInformation, serviceConf); int port = getPortToRequest(instanceDefinition); webApp = new SliderAMWebApp(registryOperations); WebApps.$for(SliderAMWebApp.BASE_PATH, WebAppApi.class, new WebAppApiImpl(this, stateForProviders, providerService, certificateManager, registryOperations), RestPaths.WS_CONTEXT).withHttpPolicy(serviceConf, HttpConfig.Policy.HTTP_ONLY).at(port) .start(webApp); String scheme = WebAppUtils.HTTP_PREFIX; appMasterTrackingUrl = scheme + appMasterHostname + ":" + webApp.port(); WebAppService<SliderAMWebApp> webAppService = new WebAppService<SliderAMWebApp>("slider", webApp); webAppService.init(serviceConf); webAppService.start(); addService(webAppService); appInformation.put(StatusKeys.INFO_AM_WEB_URL, appMasterTrackingUrl + "/"); appInformation.set(StatusKeys.INFO_AM_WEB_PORT, webApp.port()); // Register self with ResourceManager // This will start heartbeating to the RM // address = SliderUtils.getRmSchedulerAddress(asyncRMClient.getConfig()); log.info("Connecting to RM at {},address tracking URL={}", appMasterRpcPort, appMasterTrackingUrl); amRegistrationData = asyncRMClient.registerApplicationMaster(appMasterHostname, appMasterRpcPort, appMasterTrackingUrl); Resource maxResources = amRegistrationData.getMaximumResourceCapability(); containerMaxMemory = maxResources.getMemory(); containerMaxCores = maxResources.getVirtualCores(); appState.setContainerLimits(maxResources.getMemory(), maxResources.getVirtualCores()); // build the handler for RM request/release operations; this uses // the max value as part of its lookup rmOperationHandler = new AsyncRMOperationHandler(asyncRMClient, maxResources); // set the RM-defined maximum cluster values appInformation.put(ResourceKeys.YARN_CORES, Integer.toString(containerMaxCores)); appInformation.put(ResourceKeys.YARN_MEMORY, Integer.toString(containerMaxMemory)); // process the initial user to obtain the set of user // supplied credentials (tokens were passed in by client). Remove AMRM // token and HDFS delegation token, the latter because we will provide an // up to date token for container launches (getContainerCredentials()). UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); Credentials credentials = currentUser.getCredentials(); Iterator<Token<? extends TokenIdentifier>> iter = credentials.getAllTokens().iterator(); while (iter.hasNext()) { Token<? extends TokenIdentifier> token = iter.next(); log.info("Token {}", token.getKind()); if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME) || token.getKind().equals(DelegationTokenIdentifier.HDFS_DELEGATION_KIND)) { iter.remove(); } } // at this point this credentials map is probably clear, but leaving this // code to allow for future tokens... containerCredentials = credentials; if (securityEnabled) { secretManager.setMasterKey(amRegistrationData.getClientToAMTokenMasterKey().array()); applicationACLs = amRegistrationData.getApplicationACLs(); //tell the server what the ACLs are rpcService.getServer().refreshServiceAcl(serviceConf, new SliderAMPolicyProvider()); // perform keytab based login to establish kerberos authenticated // principal. Can do so now since AM registration with RM above required // tokens associated to principal String principal = securityConfiguration.getPrincipal(); File localKeytabFile = securityConfiguration.getKeytabFile(instanceDefinition); // Now log in... login(principal, localKeytabFile); // obtain new FS reference that should be kerberos based and different // than the previously cached reference fs = getClusterFS(); } // extract container list liveContainers = amRegistrationData.getContainersFromPreviousAttempts(); //now validate the installation Configuration providerConf = providerService.loadProviderConfigurationInformation(confDir); providerService.initializeApplicationConfiguration(instanceDefinition, fs); providerService.validateApplicationConfiguration(instanceDefinition, confDir, securityEnabled); //determine the location for the role history data Path historyDir = new Path(clusterDirPath, HISTORY_DIR_NAME); //build the instance appState.buildInstance(instanceDefinition, serviceConf, providerConf, providerRoles, fs.getFileSystem(), historyDir, liveContainers, appInformation, new SimpleReleaseSelector()); providerService.rebuildContainerDetails(liveContainers, instanceDefinition.getName(), appState.getRolePriorityMap()); // add the AM to the list of nodes in the cluster appState.buildAppMasterNode(appMasterContainerID, appMasterHostname, webApp.port(), appMasterHostname + ":" + webApp.port()); // build up environment variables that the AM wants set in every container // irrespective of provider and role. envVars = new HashMap<String, String>(); if (hadoop_user_name != null) { envVars.put(HADOOP_USER_NAME, hadoop_user_name); } } String rolesTmpSubdir = appMasterContainerID.toString() + "/roles"; String amTmpDir = globalInternalOptions.getMandatoryOption(InternalKeys.INTERNAL_AM_TMP_DIR); Path tmpDirPath = new Path(amTmpDir); Path launcherTmpDirPath = new Path(tmpDirPath, rolesTmpSubdir); fs.getFileSystem().mkdirs(launcherTmpDirPath); //launcher service launchService = new RoleLaunchService(actionQueues, providerService, fs, new Path(getGeneratedConfDir()), envVars, launcherTmpDirPath); deployChildService(launchService); appState.noteAMLaunched(); //Give the provider access to the state, and AM providerService.bind(stateForProviders, actionQueues, liveContainers); sliderAMProvider.bind(stateForProviders, actionQueues, liveContainers); // chaos monkey maybeStartMonkey(); // setup token renewal and expiry handling for long lived apps // if (SliderUtils.isHadoopClusterSecure(getConfig())) { // fsDelegationTokenManager = new FsDelegationTokenManager(actionQueues); // fsDelegationTokenManager.acquireDelegationToken(getConfig()); // } // if not a secure cluster, extract the username -it will be // propagated to workers if (!UserGroupInformation.isSecurityEnabled()) { hadoop_user_name = System.getenv(HADOOP_USER_NAME); log.info(HADOOP_USER_NAME + "='{}'", hadoop_user_name); } service_user_name = RegistryUtils.currentUser(); log.info("Registry service username ={}", service_user_name); // now do the registration registerServiceInstance(clustername, appid); // log the YARN and web UIs log.info("RM Webapp address {}", serviceConf.get(YarnConfiguration.RM_WEBAPP_ADDRESS)); log.info("slider Webapp address {}", appMasterTrackingUrl); // declare the cluster initialized log.info("Application Master Initialization Completed"); initCompleted.set(true); try { // start handling any scheduled events startQueueProcessing(); // Start the Slider AM provider sliderAMProvider.start(); // launch the real provider; this is expected to trigger a callback that // starts the node review process launchProviderService(instanceDefinition, confDir); //now block waiting to be told to exit the process waitForAMCompletionSignal(); } catch (Exception e) { log.error("Exception : {}", e, e); onAMStop(new ActionStopSlider(e)); } //shutdown time return finish(); }