List of usage examples for org.apache.hadoop.security Credentials Credentials
public Credentials()
From source file:co.cask.cdap.data.security.HBaseSecureStoreUpdater.java
License:Apache License
@Inject public HBaseSecureStoreUpdater(Configuration hConf, LocationFactory locationFactory) { this.hConf = hConf; this.locationFactory = locationFactory; this.credentials = new Credentials(); }
From source file:co.cask.cdap.security.impersonation.RemoteUGIProvider.java
License:Apache License
private static Credentials readCredentials(Location location) throws IOException { Credentials credentials = new Credentials(); try (DataInputStream input = new DataInputStream(new BufferedInputStream(location.getInputStream()))) { credentials.readTokenStorageStream(input); }/*from w ww. j av a 2 s . c om*/ LOG.debug("Read credentials from {}", location); return credentials; }
From source file:co.cask.cdap.security.TokenSecureStoreUpdater.java
License:Apache License
private Credentials refreshCredentials() { try {//w ww .j av a 2 s .co m Credentials refreshedCredentials = new Credentials(); if (User.isSecurityEnabled()) { YarnTokenUtils.obtainToken(hConf, refreshedCredentials); } if (User.isHBaseSecurityEnabled(hConf)) { HBaseTokenUtils.obtainToken(hConf, refreshedCredentials); } if (secureExplore) { HiveTokenUtils.obtainToken(refreshedCredentials); JobHistoryServerTokenUtils.obtainToken(hConf, refreshedCredentials); } addDelegationTokens(hConf, locationFactory, refreshedCredentials); return refreshedCredentials; } catch (IOException ioe) { throw Throwables.propagate(ioe); } }
From source file:co.cask.tigon.internal.app.runtime.distributed.AbstractDistributedProgramRunner.java
License:Apache License
@Override public final ProgramController run(final Program program, final ProgramOptions options) { final File hConfFile; final File cConfFile; final Program copiedProgram; final File programDir; // Temp directory for unpacking the program try {/*from ww w. ja v a 2 s. c o m*/ // Copy config files and program jar to local temp, and ask Twill to localize it to container. // What Twill does is to save those files in HDFS and keep using them during the lifetime of application. // Twill will manage the cleanup of those files in HDFS. hConfFile = saveHConf(hConf, File.createTempFile("hConf", ".xml")); cConfFile = saveCConf(cConf, File.createTempFile("cConf", ".xml")); programDir = Files.createTempDir(); copiedProgram = copyProgramJar(program, programDir); } catch (IOException e) { throw Throwables.propagate(e); } final String runtimeArgs = new Gson().toJson(options.getUserArguments()); // Obtains and add the HBase delegation token as well (if in non-secure mode, it's a no-op) // Twill would also ignore it if it is not running in secure mode. // The HDFS token should already obtained by Twill. return launch(copiedProgram, options, hConfFile, cConfFile, new ApplicationLauncher() { @Override public TwillController launch(TwillApplication twillApplication) { TwillPreparer twillPreparer = twillRunner.prepare(twillApplication); if (options.isDebug()) { LOG.info("Starting {} with debugging enabled.", program.getId()); twillPreparer.enableDebugging(); } TwillController twillController = twillPreparer .withDependencies(new HBaseTableUtilFactory().get().getClass()) .addSecureStore( YarnSecureStore.create(HBaseTokenUtils.obtainToken(hConf, new Credentials()))) .withApplicationArguments(String.format("--%s", RunnableOptions.JAR), copiedProgram.getJarLocation().getName(), String.format("--%s", RunnableOptions.RUNTIME_ARGS), runtimeArgs) .start(); return addCleanupListener(twillController, hConfFile, cConfFile, copiedProgram, programDir); } }); }
From source file:com.alibaba.jstorm.hdfs.common.security.AutoHDFS.java
License:Apache License
@SuppressWarnings("unchecked") protected Credentials getCredentials(Map<String, String> credentials) { Credentials credential = null;//from w ww . j a va2s. c o m if (credentials != null && credentials.containsKey(getCredentialKey())) { try { byte[] credBytes = DatatypeConverter.parseBase64Binary(credentials.get(getCredentialKey())); ObjectInputStream in = new ObjectInputStream(new ByteArrayInputStream(credBytes)); credential = new Credentials(); credential.readFields(in); } catch (Exception e) { LOG.error("Could not obtain credentials from credentials map.", e); } } return credential; }
From source file:com.bigjob.Client.java
License:Apache License
/** * Main run function for the client//from w ww.j a v a 2s.co m * @return true if application completed successfully * @throws IOException * @throws YarnException */ public boolean run() throws IOException, YarnException { LOG.info("Running Client"); yarnClient.start(); YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics(); LOG.info("Got Cluster metric info from ASM (RM)" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers()); List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING); LOG.info("Got Cluster node info from ASM"); for (NodeReport node : clusterNodeReports) { LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress" + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers" + node.getNumContainers()); } QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue); LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity() + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount=" + queueInfo.getChildQueues().size()); List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo(); for (QueueUserACLInfo aclInfo : listAclInfo) { for (QueueACL userAcl : aclInfo.getUserAcls()) { LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl=" + userAcl.name()); } } // Get a new application id YarnClientApplication app = yarnClient.createApplication(); GetNewApplicationResponse appResponse = app.getNewApplicationResponse(); // TODO get min/max resource capabilities from RM and change memory ask if needed // If we do not have min/max, we may not be able to correctly request // the required resources from the RM for the app master // Memory ask has to be a multiple of min and less than max. // Dump out information about cluster capability as seen by the resource manager int maxMem = appResponse.getMaximumResourceCapability().getMemory(); LOG.info("Max mem capabililty of resources in this cluster " + maxMem); // A resource ask cannot exceed the max. if (amMemory > maxMem) { LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified=" + amMemory + ", max=" + maxMem); amMemory = maxMem; } int maxVCores = appResponse.getMaximumResourceCapability().getVirtualCores(); LOG.info("Max virtual cores capabililty of resources in this cluster " + maxVCores); if (amVCores > maxVCores) { LOG.info("AM virtual cores specified above max threshold of cluster. " + "Using max value." + ", specified=" + amVCores + ", max=" + maxVCores); amVCores = maxVCores; } // set the application name ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); ApplicationId appId = appContext.getApplicationId(); appContext.setApplicationName(appName); // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); // set local resources for the application master // local files or archives as needed // In this scenario, the jar file for the application master is part of the local resources Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); LOG.info("Copy App Master jar from local filesystem and add to local environment"); // Copy the application master jar to the filesystem // Create a local resource to point to the destination jar path // if (dfsUrl!=null && dfsUrl.equals("")==false){ // conf.set("fs.defaultFS", dfsUrl); // } FileSystem fs = FileSystem.get(conf); addToLocalResources(fs, appMasterJar, appMasterJarPath, appId.getId(), localResources, null); // Set the log4j properties if needed if (!log4jPropFile.isEmpty()) { addToLocalResources(fs, log4jPropFile, log4jPath, appId.getId(), localResources, null); } // The shell script has to be made available on the final container(s) // where it will be executed. // To do this, we need to first copy into the filesystem that is visible // to the yarn framework. // We do not need to set this as a local resource for the application // master as the application master does not need it. String hdfsShellScriptLocation = ""; long hdfsShellScriptLen = 0; long hdfsShellScriptTimestamp = 0; if (!shellScriptPath.isEmpty()) { Path shellSrc = new Path(shellScriptPath); String shellPathSuffix = appName + "/" + appId.getId() + "/" + (Shell.WINDOWS ? windowBatPath : linuxShellPath); Path shellDst = new Path(fs.getHomeDirectory(), shellPathSuffix); fs.copyFromLocalFile(false, true, shellSrc, shellDst); hdfsShellScriptLocation = shellDst.toUri().toString(); FileStatus shellFileStatus = fs.getFileStatus(shellDst); hdfsShellScriptLen = shellFileStatus.getLen(); hdfsShellScriptTimestamp = shellFileStatus.getModificationTime(); } if (!shellCommand.isEmpty()) { addToLocalResources(fs, null, shellCommandPath, appId.getId(), localResources, shellCommand); } if (shellArgs.length > 0) { addToLocalResources(fs, null, shellArgsPath, appId.getId(), localResources, StringUtils.join(shellArgs, " ")); } // Set local resource info into app master container launch context amContainer.setLocalResources(localResources); // Set the necessary security tokens as needed //amContainer.setContainerTokens(containerToken); // Set the env variables to be setup in the env where the application master will be run LOG.info("Set the environment for the application master"); Map<String, String> env = new HashMap<String, String>(); // put location of shell script into env // using the env info, the application master will create the correct local resource for the // eventual containers that will be launched to execute the shell scripts env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION, hdfsShellScriptLocation); env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP, Long.toString(hdfsShellScriptTimestamp)); env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLEN, Long.toString(hdfsShellScriptLen)); // Add AppMaster.jar location to classpath // At some point we should not be required to add // the hadoop specific classpaths to the env. // It should be provided out of the box. // For now setting all required classpaths including // the classpath to "." for the application jar StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar) .append("./*"); for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) { classPathEnv.append(File.pathSeparatorChar); classPathEnv.append(c.trim()); } classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties"); // add the runtime classpath needed for tests to work if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { classPathEnv.append(':'); classPathEnv.append(System.getProperty("java.class.path")); } env.put("CLASSPATH", classPathEnv.toString()); amContainer.setEnvironment(env); // Set the necessary command to execute the application master Vector<CharSequence> vargs = new Vector<CharSequence>(30); // Set java executable command LOG.info("Setting up app master command"); vargs.add(Environment.JAVA_HOME.$() + "/bin/java"); // Set Xmx based on am memory size vargs.add("-Xmx" + amMemory + "m"); // Set class name vargs.add(appMasterMainClass); // Set params for Application Master vargs.add("--container_memory " + String.valueOf(containerMemory)); vargs.add("--container_vcores " + String.valueOf(containerVirtualCores)); vargs.add("--num_containers " + String.valueOf(numContainers)); vargs.add("--priority " + String.valueOf(shellCmdPriority)); for (Map.Entry<String, String> entry : shellEnv.entrySet()) { vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue()); } if (debugFlag) { vargs.add("--debug"); } vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr"); // Get final commmand StringBuilder command = new StringBuilder(); for (CharSequence str : vargs) { command.append(str).append(" "); } LOG.info("Completed setting up app master command " + command.toString()); List<String> commands = new ArrayList<String>(); commands.add(command.toString()); amContainer.setCommands(commands); // Set up resource type requirements // For now, both memory and vcores are supported, so we set memory and // vcores requirements Resource capability = Records.newRecord(Resource.class); capability.setMemory(amMemory); capability.setVirtualCores(amVCores); appContext.setResource(capability); // Service data is a binary blob that can be passed to the application // Not needed in this scenario // amContainer.setServiceData(serviceData); // Setup security tokens if (UserGroupInformation.isSecurityEnabled()) { Credentials credentials = new Credentials(); String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL); if (tokenRenewer == null || tokenRenewer.length() == 0) { throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer"); } // For now, only getting tokens for the default file-system. final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials); if (tokens != null) { for (Token<?> token : tokens) { LOG.info("Got dt for " + fs.getUri() + "; " + token); } } DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); amContainer.setTokens(fsTokens); } appContext.setAMContainerSpec(amContainer); // Set the priority for the application master Priority pri = Records.newRecord(Priority.class); // TODO - what is the range for priority? how to decide? pri.setPriority(amPriority); appContext.setPriority(pri); // Set the queue to which this application is to be submitted in the RM appContext.setQueue(amQueue); // Submit the application to the applications manager // SubmitApplicationResponse submitResp = applicationsManager.submitApplication(appRequest); // Ignore the response as either a valid response object is returned on success // or an exception thrown to denote some form of a failure LOG.info("Submitting application to ASM"); yarnClient.submitApplication(appContext); // TODO // Try submitting the same request again // app submission failure? // Monitor the application //return monitorApplication(appId); System.out.println("ApplicationId:" + appId); return true; }
From source file:com.cloudera.hue.CredentialsMerger.java
License:Apache License
/** * Merge several credentials files into one. Give the desired output file * first, followed by all of the input files. * * <p>File formats are tried in this order: TokenStorageFile, urlEncodedString. * </p>/*w ww .ja va 2s .c o m*/ * * @param args <out> <in1> ... * @throws IOException in the event of an error reading or writing files. */ public static void main(String[] args) throws IOException { if (args.length < 2) { printUsage(); System.exit(1); } Path outputFile = new Path("file://" + new File(args[0]).getAbsolutePath()); Configuration conf = new Configuration(); Credentials credentials = new Credentials(); for (int i = 1; i < args.length; i++) { try { Credentials singleFileCredentials = Credentials .readTokenStorageFile(new Path("file://" + new File(args[i]).getAbsolutePath()), conf); credentials.addAll(singleFileCredentials); } catch (IOException e) { BufferedReader reader = new BufferedReader(new FileReader(args[i])); try { // Retry to read the token with an encodedUrl format Token<?> token = new Token(); String encodedtoken = reader.readLine(); token.decodeFromUrlString(encodedtoken); credentials.addToken(new Text(args[i]), token); } finally { reader.close(); } } } credentials.writeTokenStorageFile(outputFile, conf); }
From source file:com.cloudera.recordservice.hcatalog.common.TestHCatRSUtil.java
License:Apache License
@Test public void copyCredentialsToJobConfTest() { JobConf conf = new JobConf(); Credentials cred = new Credentials(); cred.addToken(new Text("Alias"), new Token<TokenIdentifier>()); HCatRSUtil.copyCredentialsToJobConf(cred, conf); assertEquals(1, conf.getCredentials().numberOfTokens()); }
From source file:com.cloudera.recordservice.mapreduce.MapReduceTest.java
License:Apache License
private void verifyInputSplits(int numSplits, int numCols, Configuration config) throws IOException { List<InputSplit> splits = PlanUtil.getSplits(config, new Credentials()).splits; assertEquals(numSplits, splits.size()); RecordServiceInputSplit split = (RecordServiceInputSplit) splits.get(0); assertEquals(numCols, split.getSchema().getNumColumns()); }
From source file:com.cloudera.recordservice.mapreduce.MapReduceTest.java
License:Apache License
private void verifyException(String msg, String db, String tbl, String... columns) { Configuration config = new Configuration(); boolean exceptionThrown = false; try {/*from w w w. j a v a 2s .c o m*/ RecordServiceConfig.setInputTable(config, db, tbl, columns); PlanUtil.getSplits(config, new Credentials()); } catch (IOException e) { exceptionThrown = true; assertTrue(e.getMessage(), e.getMessage().contains(msg)); } catch (IllegalArgumentException e) { exceptionThrown = true; assertTrue(e.getMessage(), e.getMessage().contains(msg)); } assertTrue(exceptionThrown); }