List of usage examples for org.apache.hadoop.security Credentials Credentials
public Credentials()
From source file:UnmanagedAMLauncher.java
License:Apache License
public void launchAM(ApplicationAttemptId attemptId) throws IOException, YarnException { Credentials credentials = new Credentials(); Token<AMRMTokenIdentifier> token = rmClient.getAMRMToken(attemptId.getApplicationId()); // Service will be empty but that's okay, we are just passing down only // AMRMToken down to the real AM which eventually sets the correct // service-address. credentials.addToken(token.getService(), token); File tokenFile = File.createTempFile("unmanagedAMRMToken", "", new File(System.getProperty("user.dir"))); try {/*from ww w.j a va 2 s.c om*/ FileUtil.chmod(tokenFile.getAbsolutePath(), "600"); } catch (InterruptedException ex) { throw new RuntimeException(ex); } tokenFile.deleteOnExit(); DataOutputStream os = new DataOutputStream(new FileOutputStream(tokenFile, true)); credentials.writeTokenStorageToStream(os); os.close(); Map<String, String> env = System.getenv(); ArrayList<String> envAMList = new ArrayList<String>(); boolean setClasspath = false; for (Map.Entry<String, String> entry : env.entrySet()) { String key = entry.getKey(); String value = entry.getValue(); if (key.equals("CLASSPATH")) { setClasspath = true; if (classpath != null) { value = value + File.pathSeparator + classpath; } } envAMList.add(key + "=" + value); } if (!setClasspath && classpath != null) { envAMList.add("CLASSPATH=" + classpath); } ContainerId containerId = ContainerId.newContainerId(attemptId, 0); String hostname = InetAddress.getLocalHost().getHostName(); envAMList.add(Environment.CONTAINER_ID.name() + "=" + containerId); envAMList.add(Environment.NM_HOST.name() + "=" + hostname); envAMList.add(Environment.NM_HTTP_PORT.name() + "=0"); envAMList.add(Environment.NM_PORT.name() + "=0"); envAMList.add(Environment.LOCAL_DIRS.name() + "= /tmp"); envAMList.add(ApplicationConstants.APP_SUBMIT_TIME_ENV + "=" + System.currentTimeMillis()); envAMList.add(ApplicationConstants.CONTAINER_TOKEN_FILE_ENV_NAME + "=" + tokenFile.getAbsolutePath()); String[] envAM = new String[envAMList.size()]; Process amProc = Runtime.getRuntime().exec(amCmd, envAMList.toArray(envAM)); final BufferedReader errReader = new BufferedReader(new InputStreamReader(amProc.getErrorStream())); final BufferedReader inReader = new BufferedReader(new InputStreamReader(amProc.getInputStream())); // read error and input streams as this would free up the buffers // free the error stream buffer Thread errThread = new Thread() { @Override public void run() { try { String line = errReader.readLine(); while ((line != null) && !isInterrupted()) { System.err.println(line); line = errReader.readLine(); } } catch (IOException ioe) { LOG.warn("Error reading the error stream", ioe); } } }; Thread outThread = new Thread() { @Override public void run() { try { String line = inReader.readLine(); while ((line != null) && !isInterrupted()) { System.out.println(line); line = inReader.readLine(); } } catch (IOException ioe) { LOG.warn("Error reading the out stream", ioe); } } }; try { errThread.start(); outThread.start(); } catch (IllegalStateException ise) { } // wait for the process to finish and check the exit code try { int exitCode = amProc.waitFor(); LOG.info("AM process exited with value: " + exitCode); } catch (InterruptedException e) { e.printStackTrace(); } finally { amCompleted = true; } try { // make sure that the error thread exits // on Windows these threads sometimes get stuck and hang the execution // timeout and join later after destroying the process. errThread.join(); outThread.join(); errReader.close(); inReader.close(); } catch (InterruptedException ie) { LOG.info("ShellExecutor: Interrupted while reading the error/out stream", ie); } catch (IOException ioe) { LOG.warn("Error while closing the error/out stream", ioe); } amProc.destroy(); }
From source file:alluxio.yarn.Client.java
License:Apache License
private void setupContainerLaunchContext() throws IOException, YarnException { Map<String, String> applicationMasterArgs = ImmutableMap.<String, String>of("-num_workers", Integer.toString(mNumWorkers), "-master_address", mMasterAddress, "-resource_path", mResourcePath); final String amCommand = YarnUtils.buildCommand(YarnContainerType.APPLICATION_MASTER, applicationMasterArgs);//from ww w. j a va2 s .c o m System.out.println("ApplicationMaster command: " + amCommand); mAmContainer.setCommands(Collections.singletonList(amCommand)); // Setup local resources Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); localResources.put("alluxio.tar.gz", YarnUtils.createLocalResourceOfFile(mYarnConf, mResourcePath + "/alluxio.tar.gz")); localResources.put("alluxio-yarn-setup.sh", YarnUtils.createLocalResourceOfFile(mYarnConf, mResourcePath + "/alluxio-yarn-setup.sh")); localResources.put("alluxio.jar", YarnUtils.createLocalResourceOfFile(mYarnConf, mResourcePath + "/alluxio.jar")); mAmContainer.setLocalResources(localResources); // Setup CLASSPATH for ApplicationMaster Map<String, String> appMasterEnv = new HashMap<String, String>(); setupAppMasterEnv(appMasterEnv); mAmContainer.setEnvironment(appMasterEnv); // Set up security tokens for launching our ApplicationMaster container. if (UserGroupInformation.isSecurityEnabled()) { Credentials credentials = new Credentials(); String tokenRenewer = mYarnConf.get(YarnConfiguration.RM_PRINCIPAL); if (tokenRenewer == null || tokenRenewer.length() == 0) { throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer"); } org.apache.hadoop.fs.FileSystem fs = org.apache.hadoop.fs.FileSystem.get(mYarnConf); // getting tokens for the default file-system. final Token<?>[] tokens = fs.addDelegationTokens(tokenRenewer, credentials); if (tokens != null) { for (Token<?> token : tokens) { LOG.info("Got dt for " + fs.getUri() + "; " + token); } } // getting yarn resource manager token org.apache.hadoop.conf.Configuration config = mYarnClient.getConfig(); Token<TokenIdentifier> token = ConverterUtils.convertFromYarn( mYarnClient.getRMDelegationToken(new org.apache.hadoop.io.Text(tokenRenewer)), ClientRMProxy.getRMDelegationTokenService(config)); LOG.info("Added RM delegation token: " + token); credentials.addToken(token.getService(), token); DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer buffer = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); mAmContainer.setTokens(buffer); } }
From source file:azkaban.security.HadoopSecurityManager_H_1_0.java
License:Apache License
@Override public synchronized void prefetchToken(final File tokenFile, final Props props, final Logger logger) throws HadoopSecurityManagerException { final String userToProxy = props.getString(USER_TO_PROXY); logger.info("Getting hadoop tokens for " + userToProxy); final Credentials cred = new Credentials(); if (props.getBoolean(OBTAIN_HCAT_TOKEN, false)) { try {/* w ww . ja v a 2s . c o m*/ logger.info("Pre-fetching Hive MetaStore token from hive"); HiveConf hiveConf = new HiveConf(); logger.info("HiveConf.ConfVars.METASTOREURIS.varname " + hiveConf.get(HiveConf.ConfVars.METASTOREURIS.varname)); logger.info("HIVE_METASTORE_SASL_ENABLED " + hiveConf.get(HIVE_METASTORE_SASL_ENABLED)); logger.info("HIVE_METASTORE_KERBEROS_PRINCIPAL " + hiveConf.get(HIVE_METASTORE_KERBEROS_PRINCIPAL)); logger.info("HIVE_METASTORE_LOCAL " + hiveConf.get(HIVE_METASTORE_LOCAL)); HiveMetaStoreClient hiveClient = new HiveMetaStoreClient(hiveConf); String hcatTokenStr = hiveClient.getDelegationToken(userToProxy, UserGroupInformation.getLoginUser().getShortUserName()); Token<DelegationTokenIdentifier> hcatToken = new Token<DelegationTokenIdentifier>(); hcatToken.decodeFromUrlString(hcatTokenStr); logger.info("Created hive metastore token: " + hcatTokenStr); logger.info("Token kind: " + hcatToken.getKind()); logger.info("Token id: " + hcatToken.getIdentifier()); logger.info("Token service: " + hcatToken.getService()); cred.addToken(hcatToken.getService(), hcatToken); } catch (Exception e) { e.printStackTrace(); logger.error("Failed to get hive metastore token." + e.getMessage() + e.getCause()); } catch (Throwable t) { t.printStackTrace(); logger.error("Failed to get hive metastore token." + t.getMessage() + t.getCause()); } } try { getProxiedUser(userToProxy).doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { getToken(userToProxy); return null; } private void getToken(String userToProxy) throws InterruptedException, IOException, HadoopSecurityManagerException { logger.info("Here is the props for " + OBTAIN_NAMENODE_TOKEN + ": " + props.getBoolean(OBTAIN_NAMENODE_TOKEN)); if (props.getBoolean(OBTAIN_NAMENODE_TOKEN, false)) { FileSystem fs = FileSystem.get(conf); // check if we get the correct FS, and most importantly, the // conf logger.info("Getting DFS token from " + fs.getUri()); Token<?> fsToken = fs.getDelegationToken(userToProxy); if (fsToken == null) { logger.error("Failed to fetch DFS token for "); throw new HadoopSecurityManagerException( "Failed to fetch DFS token for " + userToProxy); } logger.info("Created DFS token: " + fsToken.toString()); logger.info("Token kind: " + fsToken.getKind()); logger.info("Token id: " + fsToken.getIdentifier()); logger.info("Token service: " + fsToken.getService()); cred.addToken(fsToken.getService(), fsToken); } if (props.getBoolean(OBTAIN_JOBTRACKER_TOKEN, false)) { JobClient jobClient = new JobClient(new JobConf()); logger.info("Pre-fetching JT token from JobTracker"); Token<DelegationTokenIdentifier> mrdt = jobClient.getDelegationToken(new Text("mr token")); if (mrdt == null) { logger.error("Failed to fetch JT token"); throw new HadoopSecurityManagerException("Failed to fetch JT token for " + userToProxy); } logger.info("Created JT token: " + mrdt.toString()); logger.info("Token kind: " + mrdt.getKind()); logger.info("Token id: " + mrdt.getIdentifier()); logger.info("Token service: " + mrdt.getService()); cred.addToken(mrdt.getService(), mrdt); } } }); FileOutputStream fos = null; DataOutputStream dos = null; try { fos = new FileOutputStream(tokenFile); dos = new DataOutputStream(fos); cred.writeTokenStorageToStream(dos); } finally { if (dos != null) { dos.close(); } if (fos != null) { fos.close(); } } // stash them to cancel after use. logger.info("Tokens loaded in " + tokenFile.getAbsolutePath()); } catch (Exception e) { e.printStackTrace(); throw new HadoopSecurityManagerException( "Failed to get hadoop tokens! " + e.getMessage() + e.getCause()); } catch (Throwable t) { t.printStackTrace(); throw new HadoopSecurityManagerException( "Failed to get hadoop tokens! " + t.getMessage() + t.getCause()); } }
From source file:azkaban.security.HadoopSecurityManager_H_2_0.java
License:Apache License
@Override public synchronized void prefetchToken(final File tokenFile, final Props props, final Logger logger) throws HadoopSecurityManagerException { final String userToProxy = props.getString(USER_TO_PROXY); logger.info("Getting hadoop tokens based on props for " + userToProxy); final Credentials cred = new Credentials(); if (props.getBoolean(OBTAIN_HCAT_TOKEN, false)) { try {/*from w w w .j a va 2 s . co m*/ // first we fetch and save the default hcat token. logger.info("Pre-fetching default Hive MetaStore token from hive"); HiveConf hiveConf = new HiveConf(); Token<DelegationTokenIdentifier> hcatToken = fetchHcatToken(userToProxy, hiveConf, null, logger); cred.addToken(hcatToken.getService(), hcatToken); // check and see if user specified the extra hcat locations we need to // look at and fetch token. final List<String> extraHcatLocations = props.getStringList(EXTRA_HCAT_LOCATION); if (Collections.EMPTY_LIST != extraHcatLocations) { logger.info("Need to pre-fetch extra metaStore tokens from hive."); // start to process the user inputs. for (String thriftUrl : extraHcatLocations) { logger.info("Pre-fetching metaStore token from : " + thriftUrl); hiveConf = new HiveConf(); hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, thriftUrl); hcatToken = fetchHcatToken(userToProxy, hiveConf, thriftUrl, logger); cred.addToken(hcatToken.getService(), hcatToken); } } } catch (Throwable t) { String message = "Failed to get hive metastore token." + t.getMessage() + t.getCause(); logger.error(message, t); throw new HadoopSecurityManagerException(message); } } if (props.getBoolean(OBTAIN_JOBHISTORYSERVER_TOKEN, false)) { YarnRPC rpc = YarnRPC.create(conf); final String serviceAddr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS); logger.debug("Connecting to HistoryServer at: " + serviceAddr); HSClientProtocol hsProxy = (HSClientProtocol) rpc.getProxy(HSClientProtocol.class, NetUtils.createSocketAddr(serviceAddr), conf); logger.info("Pre-fetching JH token from job history server"); Token<?> jhsdt = null; try { jhsdt = getDelegationTokenFromHS(hsProxy); } catch (Exception e) { logger.error("Failed to fetch JH token", e); throw new HadoopSecurityManagerException("Failed to fetch JH token for " + userToProxy); } if (jhsdt == null) { logger.error("getDelegationTokenFromHS() returned null"); throw new HadoopSecurityManagerException("Unable to fetch JH token for " + userToProxy); } logger.info("Created JH token: " + jhsdt.toString()); logger.info("Token kind: " + jhsdt.getKind()); logger.info("Token id: " + jhsdt.getIdentifier()); logger.info("Token service: " + jhsdt.getService()); cred.addToken(jhsdt.getService(), jhsdt); } try { getProxiedUser(userToProxy).doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { getToken(userToProxy); return null; } private void getToken(String userToProxy) throws InterruptedException, IOException, HadoopSecurityManagerException { logger.info("Here is the props for " + OBTAIN_NAMENODE_TOKEN + ": " + props.getBoolean(OBTAIN_NAMENODE_TOKEN)); if (props.getBoolean(OBTAIN_NAMENODE_TOKEN, false)) { FileSystem fs = FileSystem.get(conf); // check if we get the correct FS, and most importantly, the // conf logger.info("Getting DFS token from " + fs.getUri()); Token<?> fsToken = fs .getDelegationToken(getMRTokenRenewerInternal(new JobConf()).toString()); if (fsToken == null) { logger.error("Failed to fetch DFS token for "); throw new HadoopSecurityManagerException( "Failed to fetch DFS token for " + userToProxy); } logger.info("Created DFS token: " + fsToken.toString()); logger.info("Token kind: " + fsToken.getKind()); logger.info("Token id: " + fsToken.getIdentifier()); logger.info("Token service: " + fsToken.getService()); cred.addToken(fsToken.getService(), fsToken); // getting additional name nodes tokens String otherNamenodes = props.get(OTHER_NAMENODES_TO_GET_TOKEN); if ((otherNamenodes != null) && (otherNamenodes.length() > 0)) { logger.info(OTHER_NAMENODES_TO_GET_TOKEN + ": '" + otherNamenodes + "'"); String[] nameNodeArr = otherNamenodes.split(","); Path[] ps = new Path[nameNodeArr.length]; for (int i = 0; i < ps.length; i++) { ps[i] = new Path(nameNodeArr[i].trim()); } TokenCache.obtainTokensForNamenodes(cred, ps, conf); logger.info("Successfully fetched tokens for: " + otherNamenodes); } else { logger.info(OTHER_NAMENODES_TO_GET_TOKEN + " was not configured"); } } if (props.getBoolean(OBTAIN_JOBTRACKER_TOKEN, false)) { JobConf jobConf = new JobConf(); JobClient jobClient = new JobClient(jobConf); logger.info("Pre-fetching JT token from JobTracker"); Token<DelegationTokenIdentifier> mrdt = jobClient .getDelegationToken(getMRTokenRenewerInternal(jobConf)); if (mrdt == null) { logger.error("Failed to fetch JT token"); throw new HadoopSecurityManagerException("Failed to fetch JT token for " + userToProxy); } logger.info("Created JT token: " + mrdt.toString()); logger.info("Token kind: " + mrdt.getKind()); logger.info("Token id: " + mrdt.getIdentifier()); logger.info("Token service: " + mrdt.getService()); cred.addToken(mrdt.getService(), mrdt); } } }); FileOutputStream fos = null; DataOutputStream dos = null; try { fos = new FileOutputStream(tokenFile); dos = new DataOutputStream(fos); cred.writeTokenStorageToStream(dos); } finally { if (dos != null) { try { dos.close(); } catch (Throwable t) { // best effort logger.error("encountered exception while closing DataOutputStream of the tokenFile", t); } } if (fos != null) { fos.close(); } } // stash them to cancel after use. logger.info("Tokens loaded in " + tokenFile.getAbsolutePath()); } catch (Exception e) { throw new HadoopSecurityManagerException( "Failed to get hadoop tokens! " + e.getMessage() + e.getCause(), e); } catch (Throwable t) { throw new HadoopSecurityManagerException( "Failed to get hadoop tokens! " + t.getMessage() + t.getCause(), t); } }
From source file:cascading.flow.tez.planner.Hadoop2TezFlowStepJob.java
License:Open Source License
private Path prepareEnsureStagingDir(TezConfiguration workingConf) throws IOException { String stepStagingPath = createStepStagingPath(); workingConf.set(TezConfiguration.TEZ_AM_STAGING_DIR, stepStagingPath); Path stagingDir = new Path(stepStagingPath); FileSystem fileSystem = FileSystem.get(workingConf); stagingDir = fileSystem.makeQualified(stagingDir); TokenCache.obtainTokensForNamenodes(new Credentials(), new Path[] { stagingDir }, workingConf); TezClientUtils.ensureStagingDirExists(workingConf, stagingDir); if (fileSystem.getScheme().startsWith("file:/")) new File(stagingDir.toUri()).mkdirs(); return stagingDir; }
From source file:cn.edu.buaa.act.petuumOnYarn.Client.java
License:Apache License
/** * Main run function for the client// ww w. j a v a 2s . c o m * * @return true if application completed successfully * @throws IOException * @throws YarnException */ public boolean run() throws IOException, YarnException { LOG.info("Running Client"); yarnClient.start(); String[] s; s = conf.getStrings(YarnConfiguration.RM_ADDRESS); for (String ss : s) LOG.info("RM address: " + ss); YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics(); LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers()); List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING); LOG.info("Got Cluster node info from ASM"); for (NodeReport node : clusterNodeReports) { LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress" + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers" + node.getNumContainers() + ", nodeIdHost" + node.getNodeId().getHost()); } QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue); LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity() + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount=" + queueInfo.getChildQueues().size()); List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo(); for (QueueUserACLInfo aclInfo : listAclInfo) { for (QueueACL userAcl : aclInfo.getUserAcls()) { LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl=" + userAcl.name()); } } // Get a new application id YarnClientApplication app = yarnClient.createApplication(); GetNewApplicationResponse appResponse = app.getNewApplicationResponse(); int maxMem = appResponse.getMaximumResourceCapability().getMemory(); LOG.info("Max mem capabililty of resources in this cluster " + maxMem); // A resource ask cannot exceed the max. if (amMemory > maxMem) { LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified=" + amMemory + ", max=" + maxMem); amMemory = maxMem; } int maxVCores = appResponse.getMaximumResourceCapability().getVirtualCores(); LOG.info("Max virtual cores capabililty of resources in this cluster " + maxVCores); if (amVCores > maxVCores) { LOG.info("AM virtual cores specified above max threshold of cluster. " + "Using max value." + ", specified=" + amVCores + ", max=" + maxVCores); amVCores = maxVCores; } // set the application name ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); ApplicationId appId = appContext.getApplicationId(); appContext.setKeepContainersAcrossApplicationAttempts(keepContainers); appContext.setApplicationName(appName); // set local resources for the application master // local files or archives as needed // In this scenario, the jar file for the application master is part of // the local resources Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); LOG.info("Copy App Master jar from local filesystem and add to local environment"); // Copy the application master jar to the filesystem // Create a local resource to point to the destination jar path FileSystem fs = FileSystem.get(conf); YarnUtil.copyAndAddToLocalResources(fs, appMasterJar, petuumHDFSPathPrefix, appMasterJarPath, localResources, null); scriptHDFSPath = YarnUtil.copyToHDFS(fs, scriptPath, petuumHDFSPathPrefix, launchPath, null); // Set the log4j properties if needed if (!log4jPropFile.isEmpty()) { YarnUtil.copyAndAddToLocalResources(fs, log4jPropFile, petuumHDFSPathPrefix, log4jPath, localResources, null); } // Set the env variables to be setup in the env where the application // master will be run LOG.info("Set the environment for the application master"); Map<String, String> env = new HashMap<String, String>(); // Add AppMaster.jar location to classpath // At some point we should not be required to add // the hadoop specific classpaths to the env. // It should be provided out of the box. // For now setting all required classpaths including // the classpath to "." for the application jar StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$$()) .append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./*"); for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH)) { classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR); classPathEnv.append(c.trim()); } classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./log4j.properties"); // add the runtime classpath needed for tests to work if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { classPathEnv.append(':'); classPathEnv.append(System.getProperty("java.class.path")); } env.put("CLASSPATH", classPathEnv.toString()); // Set the necessary command to execute the application master Vector<CharSequence> vargs = new Vector<CharSequence>(30); // Set java executable command LOG.info("Setting up app master command"); vargs.add(Environment.JAVA_HOME.$$() + "/bin/java"); // Set Xmx based on am memory size vargs.add("-Xmx" + amMemory + "m"); // Set class name vargs.add(appMasterMainClass); // Set params for Application Master vargs.add("--container_memory " + String.valueOf(containerMemory)); vargs.add("--container_vcores " + String.valueOf(containerVirtualCores)); vargs.add("--num_nodes " + String.valueOf(numNodes)); vargs.add("--start_port " + String.valueOf(startPort)); vargs.add("--priority " + String.valueOf(workerPriority)); vargs.add("--script_hdfs_path " + scriptHDFSPath); for (Map.Entry<String, String> entry : shellEnv.entrySet()) { vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue()); } if (debugFlag) { vargs.add("--debug"); } vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr"); // Get final commmand StringBuilder command = new StringBuilder(); for (CharSequence str : vargs) { command.append(str).append(" "); } LOG.info("Completed setting up app master command " + command.toString()); List<String> commands = new ArrayList<String>(); commands.add(command.toString()); // Set up the container launch context for the application master ContainerLaunchContext amContainer = ContainerLaunchContext.newInstance(localResources, env, commands, null, null, null); // Set up resource type requirements // For now, both memory and vcores are supported, so we set memory and // vcores requirements Resource capability = Resource.newInstance(amMemory, amVCores); appContext.setResource(capability); // Service data is a binary blob that can be passed to the application // Not needed in this scenario // amContainer.setServiceData(serviceData); // Setup security tokens if (UserGroupInformation.isSecurityEnabled()) { // Note: Credentials class is marked as LimitedPrivate for HDFS and // MapReduce Credentials credentials = new Credentials(); String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL); if (tokenRenewer == null || tokenRenewer.length() == 0) { throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer"); } // For now, only getting tokens for the default file-system. final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials); if (tokens != null) { for (Token<?> token : tokens) { LOG.info("Got dt for " + fs.getUri() + "; " + token); } } DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); amContainer.setTokens(fsTokens); } appContext.setAMContainerSpec(amContainer); // Set the priority for the application master Priority pri = Priority.newInstance(amPriority); appContext.setPriority(pri); // Set the queue to which this application is to be submitted in the RM appContext.setQueue(amQueue); // Submit the application to the applications manager // SubmitApplicationResponse submitResp = // applicationsManager.submitApplication(appRequest); // Ignore the response as either a valid response object is returned on // success // or an exception thrown to denote some form of a failure LOG.info("Submitting application to ASM"); yarnClient.submitApplication(appContext); // Monitor the application currentTime = System.currentTimeMillis(); LOG.info("submit AM in " + (currentTime - startTime) + "ms"); return monitorApplication(appId); }
From source file:co.cask.cdap.app.runtime.spark.distributed.SparkExecutionServiceTest.java
License:Apache License
@Test public void testWriteCredentials() throws Exception { ProgramRunId programRunId = new ProgramRunId("ns", "app", ProgramType.SPARK, "test", RunIds.generate().getId());/* w w w . j a v a2 s .co m*/ // Start a service that doesn't support workflow token SparkExecutionService service = new SparkExecutionService(locationFactory, InetAddress.getLoopbackAddress().getCanonicalHostName(), programRunId, null); service.startAndWait(); try { SparkExecutionClient client = new SparkExecutionClient(service.getBaseURI(), programRunId); Location targetLocation = locationFactory.create(UUID.randomUUID().toString()).append("credentials"); client.writeCredentials(targetLocation); FileStatus status = dfsCluster.getFileSystem().getFileStatus(new Path(targetLocation.toURI())); // Verify the file permission is 600 Assert.assertEquals(FsAction.READ_WRITE, status.getPermission().getUserAction()); Assert.assertEquals(FsAction.NONE, status.getPermission().getGroupAction()); Assert.assertEquals(FsAction.NONE, status.getPermission().getOtherAction()); // Should be able to deserialize back to credentials Credentials credentials = new Credentials(); try (DataInputStream is = new DataInputStream(targetLocation.getInputStream())) { credentials.readTokenStorageStream(is); } // Call complete to notify the service it has been stopped client.completed(null); } finally { service.stopAndWait(); } }
From source file:co.cask.cdap.app.runtime.spark.SparkCredentialsUpdaterTest.java
License:Apache License
@Test public void testUpdater() throws Exception { Location credentialsDir = Locations.toLocation(TEMPORARY_FOLDER.newFolder()); // Create a updater that don't do any auto-update within the test time and don't cleanup SparkCredentialsUpdater updater = new SparkCredentialsUpdater(createCredentialsSupplier(), credentialsDir, "credentials", TimeUnit.DAYS.toMillis(1), TimeUnit.DAYS.toMillis(1), Integer.MAX_VALUE) { @Override//from w w w. j a v a 2s.c om long getNextUpdateDelay(Credentials credentials) throws IOException { return TimeUnit.DAYS.toMillis(1); } }; // Before the updater starts, the directory is empty Assert.assertTrue(credentialsDir.list().isEmpty()); UserGroupInformation.getCurrentUser().addToken( new Token<>(Bytes.toBytes("id"), Bytes.toBytes("pass"), new Text("kind"), new Text("service"))); updater.startAndWait(); try { List<Location> expectedFiles = new ArrayList<>(); expectedFiles.add(credentialsDir.append("credentials-1")); for (int i = 1; i <= 10; i++) { Assert.assertEquals(expectedFiles, listAndSort(credentialsDir)); // Read the credentials from the last file Credentials newCredentials = new Credentials(); try (DataInputStream is = new DataInputStream( expectedFiles.get(expectedFiles.size() - 1).getInputStream())) { newCredentials.readTokenStorageStream(is); } // Should contains all tokens of the current user Credentials userCredentials = UserGroupInformation.getCurrentUser().getCredentials(); for (Token<? extends TokenIdentifier> token : userCredentials.getAllTokens()) { Assert.assertEquals(token, newCredentials.getToken(token.getService())); } UserGroupInformation.getCurrentUser().addToken(new Token<>(Bytes.toBytes("id" + i), Bytes.toBytes("pass" + i), new Text("kind" + i), new Text("service" + i))); updater.run(); expectedFiles.add(credentialsDir.append("credentials-" + (i + 1))); } } finally { updater.stopAndWait(); } }
From source file:co.cask.cdap.data.runtime.main.TokenSecureStoreUpdater.java
License:Apache License
@Inject public TokenSecureStoreUpdater(Configuration hConf, CConfiguration cConf, LocationFactory locationFactory) { this.hConf = hConf; this.locationFactory = locationFactory; secureExplore = cConf.getBoolean(Constants.Explore.EXPLORE_ENABLED) && UserGroupInformation.isSecurityEnabled(); credentials = new Credentials(); updateInterval = calculateUpdateInterval(); }
From source file:co.cask.cdap.data.runtime.main.TokenSecureStoreUpdater.java
License:Apache License
private void refreshCredentials() { try {/*from w w w . ja va 2 s . com*/ Credentials refreshedCredentials = new Credentials(); if (User.isHBaseSecurityEnabled(hConf)) { HBaseTokenUtils.obtainToken(hConf, refreshedCredentials); } if (secureExplore) { HiveTokenUtils.obtainToken(refreshedCredentials); YarnTokenUtils.obtainToken(hConf, refreshedCredentials); JobHistoryServerTokenUtils.obtainToken(hConf, refreshedCredentials); } YarnUtils.addDelegationTokens(hConf, locationFactory, refreshedCredentials); credentials = refreshedCredentials; } catch (IOException ioe) { throw Throwables.propagate(ioe); } }