List of usage examples for org.apache.hadoop.security UserGroupInformation getCurrentUser
@InterfaceAudience.Public @InterfaceStability.Evolving public static UserGroupInformation getCurrentUser() throws IOException
From source file:org.apache.hcatalog.templeton.tool.TempletonControllerJob.java
License:Apache License
/** * Enqueue the job and print out the job id for later collection. *//*from w ww. ja v a 2s .c om*/ @Override public int run(String[] args) throws IOException, InterruptedException, ClassNotFoundException { Configuration conf = getConf(); conf.set(JAR_ARGS_NAME, TempletonUtils.encodeArray(args)); conf.set("user.name", UserGroupInformation.getCurrentUser().getShortUserName()); Job job = new Job(conf); job.setJarByClass(TempletonControllerJob.class); job.setJobName("TempletonControllerJob"); job.setMapperClass(LaunchMapper.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); job.setInputFormatClass(SingleInputFormat.class); NullOutputFormat<NullWritable, NullWritable> of = new NullOutputFormat<NullWritable, NullWritable>(); job.setOutputFormatClass(of.getClass()); job.setNumReduceTasks(0); JobClient jc = new JobClient(new JobConf(job.getConfiguration())); Token<DelegationTokenIdentifier> mrdt = jc.getDelegationToken(new Text("mr token")); job.getCredentials().addToken(new Text("mr token"), mrdt); job.submit(); submittedJobId = job.getJobID(); return 0; }
From source file:org.apache.helix.provisioning.yarn.GenericApplicationMaster.java
License:Apache License
/** * Parse command line options// w ww . j a v a2 s. co m * @param args Command line args * @return Whether init successful and run should be invoked * @throws ParseException * @throws IOException * @throws YarnException */ public boolean start() throws ParseException, IOException, YarnException { if (Boolean.getBoolean(System.getenv("debug"))) { dumpOutDebugInfo(); } Map<String, String> envs = System.getenv(); if (!envs.containsKey(ApplicationConstants.APP_SUBMIT_TIME_ENV)) { throw new RuntimeException(ApplicationConstants.APP_SUBMIT_TIME_ENV + " not set in the environment"); } if (!envs.containsKey(Environment.NM_HOST.name())) { throw new RuntimeException(Environment.NM_HOST.name() + " not set in the environment"); } if (!envs.containsKey(Environment.NM_HTTP_PORT.name())) { throw new RuntimeException(Environment.NM_HTTP_PORT + " not set in the environment"); } if (!envs.containsKey(Environment.NM_PORT.name())) { throw new RuntimeException(Environment.NM_PORT.name() + " not set in the environment"); } LOG.info("Application master for app" + ", appId=" + appAttemptID.getApplicationId().getId() + ", clustertimestamp=" + appAttemptID.getApplicationId().getClusterTimestamp() + ", attemptId=" + appAttemptID.getAttemptId()); LOG.info("Starting ApplicationMaster"); Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials(); LOG.info("Credentials: " + credentials); DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); // Now remove the AM->RM token so that containers cannot access it. Iterator<Token<?>> iter = credentials.getAllTokens().iterator(); while (iter.hasNext()) { Token<?> token = iter.next(); LOG.info("Processing token: " + token); if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) { iter.remove(); } } allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); AMRMClientAsync.CallbackHandler allocListener = new RMCallbackHandler(this); amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener); amRMClient.init(conf); amRMClient.start(); containerListener = createNMCallbackHandler(); nmClientAsync = new NMClientAsyncImpl(containerListener); nmClientAsync.init(conf); nmClientAsync.start(); // Setup local RPC Server to accept status requests directly from clients // TODO need to setup a protocol for client to be able to communicate to // the RPC server // TODO use the rpc port info to register with the RM for the client to // send requests to this app master // Register self with ResourceManager // This will start heartbeating to the RM appMasterHostname = NetUtils.getHostname(); RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname, appMasterRpcPort, appMasterTrackingUrl); // Dump out information about cluster capability as seen by the // resource manager int maxMem = response.getMaximumResourceCapability().getMemory(); LOG.info("Max mem capabililty of resources in this cluster " + maxMem); return true; }
From source file:org.apache.hive.common.util.MockFileSystem.java
License:Apache License
private void checkAccess() throws IOException { if (blockedUgi == null) return;/* w w w . j ava 2 s .co m*/ if (!blockedUgi.equals(UserGroupInformation.getCurrentUser().getShortUserName())) return; throw new MockAccessDenied(); }
From source file:org.apache.hive.hcatalog.mapreduce.Security.java
License:Apache License
void handleSecurity(Credentials credentials, OutputJobInfo outputJobInfo, IMetaStoreClient client, Configuration conf, boolean harRequested) throws IOException, MetaException, TException, Exception { if (UserGroupInformation.isSecurityEnabled()) { UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); // check if oozie has set up a hcat deleg. token - if so use it TokenSelector<? extends TokenIdentifier> hiveTokenSelector = new DelegationTokenSelector(); //Oozie does not change the service field of the token //hence by default token generation will have a value of "new Text("")" //HiveClient will look for a use TokenSelector.selectToken() with service //set to empty "Text" if hive.metastore.token.signature property is set to null Token<? extends TokenIdentifier> hiveToken = hiveTokenSelector.selectToken(new Text(), ugi.getTokens()); if (hiveToken == null) { // we did not get token set up by oozie, let's get them ourselves here. // we essentially get a token per unique Output HCatTableInfo - this is // done because through Pig, setOutput() method is called multiple times // We want to only get the token once per unique output HCatTableInfo - // we cannot just get one token since in multi-query case (> 1 store in 1 job) // or the case when a single pig script results in > 1 jobs, the single // token will get cancelled by the output committer and the subsequent // stores will fail - by tying the token with the concatenation of // dbname, tablename and partition keyvalues of the output // TableInfo, we can have as many tokens as there are stores and the TokenSelector // will correctly pick the right tokens which the committer will use and // cancel. String tokenSignature = getTokenSignature(outputJobInfo); // get delegation tokens from hcat server and store them into the "job" // These will be used in to publish partitions to // hcat normally in OutputCommitter.commitJob() // when the JobTracker in Hadoop MapReduce starts supporting renewal of // arbitrary tokens, the renewer should be the principal of the JobTracker hiveToken = HCatUtil.extractThriftToken( client.getDelegationToken(ugi.getUserName(), ugi.getUserName()), tokenSignature); if (harRequested) { TokenSelector<? extends TokenIdentifier> jtTokenSelector = new org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenSelector(); Token jtToken = jtTokenSelector.selectToken( org.apache.hadoop.security.SecurityUtil.buildTokenService( ShimLoader.getHadoopShims().getHCatShim().getResourceManagerAddress(conf)), ugi.getTokens()); if (jtToken == null) { //we don't need to cancel this token as the TokenRenewer for JT tokens //takes care of cancelling them credentials.addToken(new Text("hcat jt token"), HCatUtil.getJobTrackerDelegationToken(conf, ugi.getUserName())); }//from ww w.j a v a 2s . c o m } credentials.addToken(new Text(ugi.getUserName() + "_" + tokenSignature), hiveToken); // this will be used by the outputcommitter to pass on to the metastore client // which in turn will pass on to the TokenSelector so that it can select // the right token. conf.set(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE, tokenSignature); } } }
From source file:org.apache.hive.hcatalog.templeton.LauncherDelegator.java
License:Apache License
/** * Dynamically determine the list of hive shim jars that need to be added * to the Templeton launcher job classpath. *///from w w w .j a va2 s. c om private String getShimLibjars() { WebHCatJTShim shim = null; try { shim = ShimLoader.getHadoopShims().getWebHCatShim(appConf, UserGroupInformation.getCurrentUser()); } catch (IOException e) { throw new RuntimeException("Failed to get WebHCatShim", e); } // Besides the HiveShims jar which is Hadoop version dependent we also // always need to include hive shims common jars. Path shimCommonJar = new Path( TempletonUtils.findContainingJar(ShimLoader.class, HIVE_SHIMS_FILENAME_PATTERN)); Path shimCommonSecureJar = new Path( TempletonUtils.findContainingJar(HadoopShimsSecure.class, HIVE_SHIMS_FILENAME_PATTERN)); Path shimJar = new Path(TempletonUtils.findContainingJar(shim.getClass(), HIVE_SHIMS_FILENAME_PATTERN)); return String.format("%s,%s,%s", shimCommonJar.toString(), shimCommonSecureJar.toString(), shimJar.toString()); }
From source file:org.apache.hive.hcatalog.templeton.tool.LaunchMapper.java
License:Apache License
/** * Kills child jobs of this launcher that have been tagged with this job's ID. *//* w ww . jav a 2s . co m*/ private void killLauncherChildJobs(Configuration conf, String jobId) throws IOException { // Extract the launcher job submit/start time and use that to scope down // the search interval when we look for child jobs long startTime = getTempletonLaunchTime(conf); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); WebHCatJTShim tracker = ShimLoader.getHadoopShims().getWebHCatShim(conf, ugi); try { tracker.killJobs(jobId, startTime); } finally { tracker.close(); } }
From source file:org.apache.hive.hcatalog.templeton.tool.LaunchMapper.java
License:Apache License
/** * Attempts to reconnect to an already running child job of the templeton launcher. This * is used in cases where the templeton launcher task has failed and is retried by the * MR framework. If reconnect to the child job is possible, the method will continue * tracking its progress until completion. * @return Returns true if reconnect was successful, false if not supported or * no child jobs were found.//from w w w .ja v a2 s .co m */ private boolean tryReconnectToRunningJob(Configuration conf, Context context, LauncherDelegator.JobType jobType, String statusdir) throws IOException, InterruptedException { if (!reconnectToRunningJobEnabledAndSupported(conf, jobType)) { return false; } long startTime = getTempletonLaunchTime(conf); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); WebHCatJTShim tracker = ShimLoader.getHadoopShims().getWebHCatShim(conf, ugi); try { Set<String> childJobs = tracker.getJobs(context.getJobID().toString(), startTime); if (childJobs.size() == 0) { LOG.info("No child jobs found to reconnect with"); return false; } if (childJobs.size() > 1) { LOG.warn(String.format("Found more than one child job to reconnect with: %s, skipping reconnect", Arrays.toString(childJobs.toArray()))); return false; } String childJobIdString = childJobs.iterator().next(); org.apache.hadoop.mapred.JobID childJobId = org.apache.hadoop.mapred.JobID.forName(childJobIdString); LOG.info(String.format("Reconnecting to an existing job %s", childJobIdString)); // Update job state with the childJob id updateJobStatePercentAndChildId(conf, context.getJobID().toString(), null, childJobIdString); do { org.apache.hadoop.mapred.JobStatus jobStatus = tracker.getJobStatus(childJobId); if (jobStatus.isJobComplete()) { LOG.info(String.format("Child job %s completed", childJobIdString)); int exitCode = 0; if (jobStatus.getRunState() != org.apache.hadoop.mapred.JobStatus.SUCCEEDED) { exitCode = 1; } updateJobStateToDoneAndWriteExitValue(conf, statusdir, context.getJobID().toString(), exitCode); break; } String percent = String.format("map %s%%, reduce %s%%", jobStatus.mapProgress() * 100, jobStatus.reduceProgress() * 100); updateJobStatePercentAndChildId(conf, context.getJobID().toString(), percent, null); LOG.info("KeepAlive Heart beat"); context.progress(); Thread.sleep(POLL_JOBPROGRESS_MSEC); } while (true); // Reconnect was successful return true; } catch (IOException ex) { LOG.error("Exception encountered in tryReconnectToRunningJob", ex); throw ex; } finally { tracker.close(); } }
From source file:org.apache.hive.hcatalog.templeton.tool.TempletonControllerJob.java
License:Apache License
/** * Enqueue the job and print out the job id for later collection. * @see org.apache.hive.hcatalog.templeton.CompleteDelegator *//*from w w w . j a v a 2 s .com*/ @Override public int run(String[] args) throws IOException, InterruptedException, ClassNotFoundException, TException { if (LOG.isDebugEnabled()) { LOG.debug("Preparing to submit job: " + Arrays.toString(args)); } Configuration conf = getConf(); conf.set(JAR_ARGS_NAME, TempletonUtils.encodeArray(args)); String memoryMb = appConf.mapperMemoryMb(); if (memoryMb != null && memoryMb.length() != 0) { conf.set(AppConfig.HADOOP_MAP_MEMORY_MB, memoryMb); } String amMemoryMB = appConf.amMemoryMb(); if (amMemoryMB != null && !amMemoryMB.isEmpty()) { conf.set(AppConfig.HADOOP_MR_AM_MEMORY_MB, amMemoryMB); } String amJavaOpts = appConf.controllerAMChildOpts(); if (amJavaOpts != null && !amJavaOpts.isEmpty()) { conf.set(AppConfig.HADOOP_MR_AM_JAVA_OPTS, amJavaOpts); } String user = UserGroupInformation.getCurrentUser().getShortUserName(); conf.set("user.name", user); Job job = new Job(conf); job.setJarByClass(LaunchMapper.class); job.setJobName(TempletonControllerJob.class.getSimpleName()); job.setMapperClass(LaunchMapper.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(Text.class); job.setInputFormatClass(SingleInputFormat.class); NullOutputFormat<NullWritable, NullWritable> of = new NullOutputFormat<NullWritable, NullWritable>(); job.setOutputFormatClass(of.getClass()); job.setNumReduceTasks(0); JobClient jc = new JobClient(new JobConf(job.getConfiguration())); if (UserGroupInformation.isSecurityEnabled()) { Token<DelegationTokenIdentifier> mrdt = jc.getDelegationToken(new Text("mr token")); job.getCredentials().addToken(new Text("mr token"), mrdt); } String metastoreTokenStrForm = addHMSToken(job, user); job.submit(); submittedJobId = job.getJobID(); if (metastoreTokenStrForm != null) { //so that it can be cancelled later from CompleteDelegator DelegationTokenCache.getStringFormTokenCache().storeDelegationToken(submittedJobId.toString(), metastoreTokenStrForm); LOG.debug("Added metastore delegation token for jobId=" + submittedJobId.toString() + " user=" + user); } return 0; }
From source file:org.apache.hive.service.auth.HiveAuthFactory.java
License:Apache License
public HiveAuthFactory(HiveConf conf) throws TTransportException, IOException { this.conf = conf; transportMode = conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_TRANSPORT_MODE); authTypeStr = conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_AUTHENTICATION); // In http mode we use NOSASL as the default auth type if ("http".equalsIgnoreCase(transportMode)) { if (authTypeStr == null) { authTypeStr = AuthTypes.NOSASL.getAuthName(); }/*w w w . j ava 2s . co m*/ } else { if (authTypeStr == null) { authTypeStr = AuthTypes.NONE.getAuthName(); } if (authTypeStr.equalsIgnoreCase(AuthTypes.KERBEROS.getAuthName())) { String principal = conf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL); String keytab = conf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB); if (needUgiLogin(UserGroupInformation.getCurrentUser(), SecurityUtil.getServerPrincipal(principal, "0.0.0.0"), keytab)) { saslServer = ShimLoader.getHadoopThriftAuthBridge().createServer(principal, keytab); } else { // Using the default constructor to avoid unnecessary UGI login. saslServer = new HadoopThriftAuthBridge.Server(); } // start delegation token manager try { // rawStore is only necessary for DBTokenStore Object rawStore = null; String tokenStoreClass = conf .getVar(HiveConf.ConfVars.METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS); if (tokenStoreClass.equals(DBTokenStore.class.getName())) { HMSHandler baseHandler = new HiveMetaStore.HMSHandler("new db based metaserver", conf, true); rawStore = baseHandler.getMS(); } saslServer.startDelegationTokenSecretManager(conf, rawStore, ServerMode.HIVESERVER2); } catch (MetaException | IOException e) { throw new TTransportException("Failed to start token manager", e); } } } }
From source file:org.apache.hive.service.auth.HiveAuthFactory.java
License:Apache License
private static String getKeytabFromUgi() { synchronized (UserGroupInformation.class) { try {//from w w w .ja va 2s .com if (keytabFile != null) { return (String) keytabFile.get(null); } else if (getKeytab != null) { return (String) getKeytab.invoke(UserGroupInformation.getCurrentUser()); } else { return null; } } catch (Exception e) { LOG.debug("Fail to get keytabFile path via reflection", e); return null; } } }