Example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser

List of usage examples for org.apache.hadoop.security UserGroupInformation getCurrentUser

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation getCurrentUser() throws IOException 

Source Link

Document

Return the current user, including any doAs in the current stack.

Usage

From source file:org.apache.gobblin.yarn.GobblinYarnAppLauncher.java

License:Apache License

private void setupSecurityTokens(ContainerLaunchContext containerLaunchContext) throws IOException {
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();

    // Pass on the credentials from the hadoop token file if present.
    // The value in the token file takes precedence.
    if (System.getenv(HADOOP_TOKEN_FILE_LOCATION) != null) {
        Credentials tokenFileCredentials = Credentials
                .readTokenStorageFile(new File(System.getenv(HADOOP_TOKEN_FILE_LOCATION)), new Configuration());
        credentials.addAll(tokenFileCredentials);
    }/*w  w w.  j  a  v  a  2  s  .c om*/

    String tokenRenewer = this.yarnConfiguration.get(YarnConfiguration.RM_PRINCIPAL);
    if (tokenRenewer == null || tokenRenewer.length() == 0) {
        throw new IOException("Failed to get master Kerberos principal for the RM to use as renewer");
    }

    // For now, only getting tokens for the default file-system.
    Token<?> tokens[] = this.fs.addDelegationTokens(tokenRenewer, credentials);
    if (tokens != null) {
        for (Token<?> token : tokens) {
            LOGGER.info("Got delegation token for " + this.fs.getUri() + "; " + token);
        }
    }

    Closer closer = Closer.create();
    try {
        DataOutputBuffer dataOutputBuffer = closer.register(new DataOutputBuffer());
        credentials.writeTokenStorageToStream(dataOutputBuffer);
        ByteBuffer fsTokens = ByteBuffer.wrap(dataOutputBuffer.getData(), 0, dataOutputBuffer.getLength());
        containerLaunchContext.setTokens(fsTokens);
    } catch (Throwable t) {
        throw closer.rethrow(t);
    } finally {
        closer.close();
    }
}

From source file:org.apache.gobblin.yarn.YarnServiceTest.java

License:Apache License

private void startApp() throws Exception {
    // submit a dummy app
    ApplicationSubmissionContext appSubmissionContext = yarnClient.createApplication()
            .getApplicationSubmissionContext();
    this.applicationId = appSubmissionContext.getApplicationId();

    ContainerLaunchContext containerLaunchContext = BuilderUtils.newContainerLaunchContext(
            Collections.emptyMap(), Collections.emptyMap(), Arrays.asList("sleep", "100"),
            Collections.emptyMap(), null, Collections.emptyMap());

    // Setup the application submission context
    appSubmissionContext.setApplicationName("TestApp");
    appSubmissionContext.setResource(Resource.newInstance(128, 1));
    appSubmissionContext.setPriority(Priority.newInstance(0));
    appSubmissionContext.setAMContainerSpec(containerLaunchContext);

    this.yarnClient.submitApplication(appSubmissionContext);

    // wait for application to be accepted
    int i;//from  w w w .  j a  va2s .  c  o  m
    RMAppAttempt attempt = null;
    for (i = 0; i < 120; i++) {
        ApplicationReport appReport = yarnClient.getApplicationReport(applicationId);

        if (appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED) {
            this.applicationAttemptId = appReport.getCurrentApplicationAttemptId();
            attempt = yarnCluster.getResourceManager().getRMContext().getRMApps()
                    .get(appReport.getCurrentApplicationAttemptId().getApplicationId()).getCurrentAppAttempt();
            break;
        }
        Thread.sleep(1000);
    }

    Assert.assertTrue(i < 120, "timed out waiting for ACCEPTED state");

    // Set the AM-RM token in the UGI for access during testing
    UserGroupInformation.setLoginUser(
            UserGroupInformation.createRemoteUser(UserGroupInformation.getCurrentUser().getUserName()));
    UserGroupInformation.getCurrentUser().addToken(attempt.getAMRMToken());
}

From source file:org.apache.hama.bsp.ApplicationMaster.java

License:Apache License

/**
 * Main run function for the application master
 * //from w  ww . jav a  2 s. co  m
 * @throws org.apache.hadoop.yarn.exceptions.YarnException
 * @throws IOException
 */
@SuppressWarnings({ "unchecked" })
public void run() throws YarnException, IOException, InterruptedException {
    LOG.info("Starting ApplicationMaster");

    // Note: Credentials, Token, UserGroupInformation, DataOutputBuffer class
    // are marked as LimitedPrivate
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    LOG.info("Executing with tokens:");
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        LOG.info(token);
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    allTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());

    // Create appSubmitterUgi and add original tokens to it
    String appSubmitterUserName = System.getenv(ApplicationConstants.Environment.USER.name());
    appSubmitterUgi = UserGroupInformation.createRemoteUser(appSubmitterUserName);
    appSubmitterUgi.addCredentials(credentials);

    AMRMClientAsync.CallbackHandler allocListener = new RMCallbackHandler();
    amRMClient = AMRMClientAsync.createAMRMClientAsync(1000, allocListener);
    amRMClient.init(localConf);
    amRMClient.start();

    containerListener = createNMCallbackHandler();
    nmClientAsync = new NMClientAsyncImpl(containerListener);
    nmClientAsync.init(localConf);
    nmClientAsync.start();

    // Setup local RPC Server to accept status requests directly from clients
    // TODO need to setup a protocol for client to be able to communicate to
    // the RPC server
    // TODO use the rpc port info to register with the RM for the client to
    // send requests to this app master

    // Register self with ResourceManager
    // This will start heartbeating to the RM
    appMasterHostname = NetUtils.getHostname();
    RegisterApplicationMasterResponse response = amRMClient.registerApplicationMaster(appMasterHostname,
            appMasterRpcPort, appMasterTrackingUrl);
    // Dump out information about cluster capability as seen by the
    // resource manager
    int maxMem = response.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capability of resources in this cluster " + maxMem);

    int maxVCores = response.getMaximumResourceCapability().getVirtualCores();
    LOG.info("Max vcores capability of resources in this cluster " + maxVCores);

    // A resource ask cannot exceed the max.
    if (containerMemory > maxMem) {
        LOG.info("Container memory specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerMemory + ", max=" + maxMem);
        containerMemory = maxMem;
    }

    if (containerVirtualCores > maxVCores) {
        LOG.info("Container virtual cores specified above max threshold of cluster." + " Using max value."
                + ", specified=" + containerVirtualCores + ", max=" + maxVCores);
        containerVirtualCores = maxVCores;
    }

    List<Container> previousAMRunningContainers = response.getContainersFromPreviousAttempts();
    LOG.info(appAttemptID + " received " + previousAMRunningContainers.size()
            + " previous attempts' running containers on AM registration.");
    for (Container container : previousAMRunningContainers) {
        launchedContainers.add(container.getId());
    }
    numAllocatedContainers.addAndGet(previousAMRunningContainers.size());

    int numTotalContainersToRequest = numTotalContainers - previousAMRunningContainers.size();
    // Setup ask for containers from RM
    // Send request for containers to RM
    // Until we get our fully allocated quota, we keep on polling RM for
    // containers
    // Keep looping until all the containers are launched and shell script
    // executed on them ( regardless of success/failure).
    for (int i = 0; i < numTotalContainersToRequest; ++i) {
        AMRMClient.ContainerRequest containerAsk = setupContainerAskForRM();
        amRMClient.addContainerRequest(containerAsk);
    }
    numRequestedContainers.set(numTotalContainers);
}

From source file:org.apache.hama.bsp.BSPApplicationMaster.java

License:Apache License

/**
 * Connects to the Resource Manager./*from w  w w  .j a  v  a  2 s .c om*/
 * 
 * @param yarnConf
 * @return a new RPC connection to the Resource Manager.
 */
private ApplicationMasterProtocol getYarnRPCConnection(Configuration yarnConf) throws IOException {
    // Connect to the Scheduler of the ResourceManager.
    UserGroupInformation currentUser = UserGroupInformation.createRemoteUser(appAttemptId.toString());
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();

    final InetSocketAddress rmAddress = NetUtils.createSocketAddr(yarnConf
            .get(YarnConfiguration.RM_SCHEDULER_ADDRESS, YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS));

    Token<? extends TokenIdentifier> amRMToken = setupAndReturnAMRMToken(rmAddress, credentials.getAllTokens());
    currentUser.addToken(amRMToken);

    final Configuration conf = yarnConf;

    ApplicationMasterProtocol client = currentUser.doAs(new PrivilegedAction<ApplicationMasterProtocol>() {
        @Override
        public ApplicationMasterProtocol run() {
            return (ApplicationMasterProtocol) yarnRPC.getProxy(ApplicationMasterProtocol.class, rmAddress,
                    conf);
        }
    });
    LOG.info("Connecting to ResourceManager at " + rmAddress);
    return client;
}

From source file:org.apache.hama.ipc.AsyncRPC.java

License:Apache License

/**
 * Construct a client-side proxy object that implements the named protocol,
 * talking to a server at the named address.
 * //w w w.  ja  v  a2 s. c  om
 * @param protocol
 * @param clientVersion
 * @param addr
 * @param conf
 * @param factory
 * @return the proxy
 * @throws IOException
 */
public static VersionedProtocol getProxy(Class<? extends VersionedProtocol> protocol, long clientVersion,
        InetSocketAddress addr, Configuration conf, SocketFactory factory) throws IOException {
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    return getProxy(protocol, clientVersion, addr, ugi, conf, factory, 0);
}

From source file:org.apache.hama.ipc.RPC.java

License:Apache License

/**
 * Construct a client-side proxy object that implements the named protocol,
 * talking to a server at the named address.
 *//*from   ww  w.  ja va2  s. c  o m*/
public static VersionedProtocol getProxy(Class<? extends VersionedProtocol> protocol, long clientVersion,
        InetSocketAddress addr, Configuration conf, SocketFactory factory, int rpcTimeout) throws IOException {
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    return getProxy(protocol, clientVersion, addr, ugi, conf, factory, rpcTimeout);
}

From source file:org.apache.hcatalog.hcatmix.HCatMixSetup.java

License:Apache License

public void createTable(HiveTableSchema hiveTableSchema) throws IOException, TException, NoSuchObjectException,
        MetaException, AlreadyExistsException, InvalidObjectException {
    LOG.info("About to create table: " + hiveTableSchema.getName());
    Table table = new Table();
    table.setDbName(hiveTableSchema.getDatabaseName());
    table.setTableName(hiveTableSchema.getName());
    try {//w w w. j  a  v a2 s .  c o m
        table.setOwner(UserGroupInformation.getCurrentUser().getUserName());
    } catch (IOException e) {
        throw new IOException("Couldn't get user information. Cannot create table", e);
    }
    table.setOwnerIsSet(true);
    StorageDescriptor sd = new StorageDescriptor();
    sd.setCols(hiveTableSchema.getColumnFieldSchemas());
    table.setSd(sd);
    sd.setParameters(new HashMap<String, String>());
    sd.setSerdeInfo(new SerDeInfo());
    sd.getSerdeInfo().setName(table.getTableName());
    sd.getSerdeInfo().setParameters(new HashMap<String, String>());

    sd.setInputFormat(org.apache.hadoop.hive.ql.io.RCFileInputFormat.class.getName());
    sd.setOutputFormat(org.apache.hadoop.hive.ql.io.RCFileOutputFormat.class.getName());
    sd.getSerdeInfo().getParameters().put(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT,
            "1");
    sd.getSerdeInfo().setSerializationLib(org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe.class.getName());
    table.setPartitionKeys(hiveTableSchema.getPartitionFieldSchemas());

    hiveClient.createTable(table);
    LOG.info("Successfully created table: " + table.getTableName());
}

From source file:org.apache.hcatalog.hcatmix.load.HadoopLoadGenerator.java

License:Apache License

/**
 * Prepare input directory/jobConf and launch the hadoop job, for load testing
 *
 * @param confFileName The properties file for the task, should be available in the classpath
 * @param conf/*from  w  ww  .j ava  2s.c o  m*/
 * @return
 * @throws IOException
 * @throws MetaException
 * @throws TException
 */
public SortedMap<Long, ReduceResult> runLoadTest(String confFileName, Configuration conf)
        throws Exception, MetaException, TException {
    JobConf jobConf;
    if (conf != null) {
        jobConf = new JobConf(conf);
    } else {
        jobConf = new JobConf(new Configuration());
    }
    InputStream confFileIS;
    try {
        confFileIS = HCatMixUtils.getInputStream(confFileName);
    } catch (Exception e) {
        LOG.error("Couldn't load configuration file " + confFileName);
        throw e;
    }
    Properties props = new Properties();
    try {
        props.load(confFileIS);
    } catch (IOException e) {
        LOG.error("Couldn't load properties file: " + confFileName, e);
        throw e;
    }

    LOG.info("Loading configuration file: " + confFileName);
    addToJobConf(jobConf, props, Conf.MAP_RUN_TIME_MINUTES);
    addToJobConf(jobConf, props, Conf.STAT_COLLECTION_INTERVAL_MINUTE);
    addToJobConf(jobConf, props, Conf.THREAD_INCREMENT_COUNT);
    addToJobConf(jobConf, props, Conf.THREAD_INCREMENT_INTERVAL_MINUTES);
    addToJobConf(jobConf, props, Conf.THREAD_COMPLETION_BUFFER_MINUTES);

    int numMappers = Integer
            .parseInt(props.getProperty(Conf.NUM_MAPPERS.propName, "" + Conf.NUM_MAPPERS.defaultValue));
    Path inputDir = new Path(props.getProperty(Conf.INPUT_DIR.propName, Conf.INPUT_DIR.defaultValueStr));
    Path outputDir = new Path(props.getProperty(Conf.OUTPUT_DIR.propName, Conf.OUTPUT_DIR.defaultValueStr));

    jobConf.setJobName(JOB_NAME);
    jobConf.setNumMapTasks(numMappers);
    jobConf.setMapperClass(HCatMapper.class);
    jobConf.setJarByClass(HCatMapper.class);
    jobConf.setReducerClass(HCatReducer.class);
    jobConf.setMapOutputKeyClass(LongWritable.class);
    jobConf.setMapOutputValueClass(IntervalResult.class);
    jobConf.setOutputKeyClass(LongWritable.class);
    jobConf.setOutputValueClass(ReduceResult.class);
    jobConf.setOutputFormat(SequenceFileOutputFormat.class);
    jobConf.set(Conf.TASK_CLASS_NAMES.getJobConfKey(),
            props.getProperty(Conf.TASK_CLASS_NAMES.propName, Conf.TASK_CLASS_NAMES.defaultValueStr));

    fs = FileSystem.get(jobConf);
    Path jarRoot = new Path("/tmp/hcatmix_jar_" + new Random().nextInt());
    HadoopUtils.uploadClasspathAndAddToJobConf(jobConf, jarRoot);
    fs.deleteOnExit(jarRoot);

    FileInputFormat.setInputPaths(jobConf, createInputFiles(inputDir, numMappers));
    if (fs.exists(outputDir)) {
        fs.delete(outputDir, true);
    }
    FileOutputFormat.setOutputPath(jobConf, outputDir);

    // Set up delegation token required for hiveMetaStoreClient in map task
    HiveConf hiveConf = new HiveConf(HadoopLoadGenerator.class);
    HiveMetaStoreClient hiveClient = new HiveMetaStoreClient(hiveConf);
    String tokenStr = hiveClient.getDelegationToken(UserGroupInformation.getCurrentUser().getUserName(),
            "mapred");
    Token<? extends AbstractDelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
    token.decodeFromUrlString(tokenStr);
    token.setService(new Text(METASTORE_TOKEN_SIGNATURE));
    jobConf.getCredentials().addToken(new Text(METASTORE_TOKEN_KEY), token);

    // Submit the job, once the job is complete see output
    LOG.info("Submitted hadoop job");
    RunningJob j = JobClient.runJob(jobConf);
    LOG.info("JobID is: " + j.getJobName());
    if (!j.isSuccessful()) {
        throw new IOException("Job failed");
    }
    return readResult(outputDir, jobConf);
}

From source file:org.apache.hcatalog.hcatmix.load.tasks.HCatLoadTask.java

License:Apache License

@Override
public void configure(JobConf jobConf) throws Exception {
    Token token = jobConf.getCredentials().getToken(new Text(HadoopLoadGenerator.METASTORE_TOKEN_KEY));

    try {/*  w  w w  .  jav  a  2  s  .  co m*/
        UserGroupInformation.getCurrentUser().addToken(token);
    } catch (IOException e) {
        LOG.info("Error adding token to user", e);
    }
    if (token == null) {
        throw new IllegalArgumentException("Delegation token needs to be set");
    }

    hiveConf = new HiveConf(Task.class);
    hiveConf.set(HIVE_CONF_TOKEN_KEY, HadoopLoadGenerator.METASTORE_TOKEN_SIGNATURE);
    hiveClient = new ThreadLocal<HiveMetaStoreClient>() {
        @Override
        protected HiveMetaStoreClient initialValue() {
            try {
                return new HiveMetaStoreClient(hiveConf);
            } catch (MetaException e) {
                throw new RuntimeException("Couldn't create HiveMetaStoreClient", e);
            }
        }
    };

    HiveTableSchema tableSchema = HCatMixUtils.getFirstTableFromConf(LOAD_TEST_HCAT_SPEC_FILE);
    dbName = tableSchema.getDatabaseName();
    tableName = tableSchema.getName();
    LOG.info("Table to do load test on is: " + dbName + "." + tableName);
}

From source file:org.apache.hcatalog.mapreduce.Security.java

License:Apache License

void handleSecurity(Credentials credentials, OutputJobInfo outputJobInfo, HiveMetaStoreClient client,
        Configuration conf, boolean harRequested) throws IOException, MetaException, TException, Exception {
    if (UserGroupInformation.isSecurityEnabled()) {
        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
        // check if oozie has set up a hcat deleg. token - if so use it
        TokenSelector<? extends TokenIdentifier> hiveTokenSelector = new DelegationTokenSelector();
        //Oozie does not change the service field of the token
        //hence by default token generation will have a value of "new Text("")"
        //HiveClient will look for a use TokenSelector.selectToken() with service
        //set to empty "Text" if hive.metastore.token.signature property is set to null
        Token<? extends TokenIdentifier> hiveToken = hiveTokenSelector.selectToken(new Text(), ugi.getTokens());
        if (hiveToken == null) {
            // we did not get token set up by oozie, let's get them ourselves here.
            // we essentially get a token per unique Output HCatTableInfo - this is
            // done because through Pig, setOutput() method is called multiple times
            // We want to only get the token once per unique output HCatTableInfo -
            // we cannot just get one token since in multi-query case (> 1 store in 1 job)
            // or the case when a single pig script results in > 1 jobs, the single
            // token will get cancelled by the output committer and the subsequent
            // stores will fail - by tying the token with the concatenation of
            // dbname, tablename and partition keyvalues of the output
            // TableInfo, we can have as many tokens as there are stores and the TokenSelector
            // will correctly pick the right tokens which the committer will use and
            // cancel.
            String tokenSignature = getTokenSignature(outputJobInfo);
            // get delegation tokens from hcat server and store them into the "job"
            // These will be used in to publish partitions to
            // hcat normally in OutputCommitter.commitJob()
            // when the JobTracker in Hadoop MapReduce starts supporting renewal of
            // arbitrary tokens, the renewer should be the principal of the JobTracker
            hiveToken = HCatUtil.extractThriftToken(client.getDelegationToken(ugi.getUserName()),
                    tokenSignature);//from  www  . j  a va 2  s.c  om

            if (harRequested) {
                TokenSelector<? extends TokenIdentifier> jtTokenSelector = new org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenSelector();
                Token jtToken = jtTokenSelector.selectToken(
                        org.apache.hadoop.security.SecurityUtil.buildTokenService(
                                HCatHadoopShims.Instance.get().getResourceManagerAddress(conf)),
                        ugi.getTokens());
                if (jtToken == null) {
                    //we don't need to cancel this token as the TokenRenewer for JT tokens
                    //takes care of cancelling them
                    credentials.addToken(new Text("hcat jt token"),
                            HCatUtil.getJobTrackerDelegationToken(conf, ugi.getUserName()));
                }
            }

            credentials.addToken(new Text(ugi.getUserName() + "_" + tokenSignature), hiveToken);
            // this will be used by the outputcommitter to pass on to the metastore client
            // which in turn will pass on to the TokenSelector so that it can select
            // the right token.
            conf.set(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE, tokenSignature);
        }
    }
}