Example usage for org.apache.hadoop.security UserGroupInformation getTokens

List of usage examples for org.apache.hadoop.security UserGroupInformation getTokens

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation getTokens.

Prototype

public Collection<Token<? extends TokenIdentifier>> getTokens() 

Source Link

Document

Obtain the collection of tokens associated with this user.

Usage

From source file:azkaban.jobtype.HadoopJavaJobRunnerMain.java

License:Apache License

public HadoopJavaJobRunnerMain() throws Exception {
    Runtime.getRuntime().addShutdownHook(new Thread() {
        @Override/*from   w  w  w. j a  v a2 s .co m*/
        public void run() {
            cancelJob();
        }
    });

    try {
        _jobName = System.getenv(ProcessJob.JOB_NAME_ENV);
        String propsFile = System.getenv(ProcessJob.JOB_PROP_ENV);

        _logger = Logger.getRootLogger();
        _logger.removeAllAppenders();
        ConsoleAppender appender = new ConsoleAppender(DEFAULT_LAYOUT);
        appender.activateOptions();
        _logger.addAppender(appender);
        _logger.setLevel(Level.INFO); //Explicitly setting level to INFO

        Properties props = new Properties();
        props.load(new BufferedReader(new FileReader(propsFile)));

        HadoopConfigurationInjector.injectResources(new Props(null, props));

        final Configuration conf = new Configuration();

        UserGroupInformation.setConfiguration(conf);
        securityEnabled = UserGroupInformation.isSecurityEnabled();

        _logger.info("Running job " + _jobName);
        String className = props.getProperty(JOB_CLASS);
        if (className == null) {
            throw new Exception("Class name is not set.");
        }
        _logger.info("Class name " + className);

        UserGroupInformation loginUser = null;
        UserGroupInformation proxyUser = null;

        if (shouldProxy(props)) {
            String userToProxy = props.getProperty("user.to.proxy");
            if (securityEnabled) {
                String filelocation = System.getenv(HADOOP_TOKEN_FILE_LOCATION);
                _logger.info("Found token file " + filelocation);
                _logger.info("Security enabled is " + UserGroupInformation.isSecurityEnabled());

                _logger.info("Setting mapreduce.job.credentials.binary to " + filelocation);
                System.setProperty("mapreduce.job.credentials.binary", filelocation);

                _logger.info("Proxying enabled.");

                loginUser = UserGroupInformation.getLoginUser();

                _logger.info("Current logged in user is " + loginUser.getUserName());

                proxyUser = UserGroupInformation.createProxyUser(userToProxy, loginUser);
                for (Token<?> token : loginUser.getTokens()) {
                    proxyUser.addToken(token);
                }
            } else {
                proxyUser = UserGroupInformation.createRemoteUser(userToProxy);
            }
            _logger.info("Proxied as user " + userToProxy);
        }

        // Create the object using proxy
        if (shouldProxy(props)) {
            _javaObject = getObjectAsProxyUser(props, _logger, _jobName, className, proxyUser);
        } else {
            _javaObject = getObject(_jobName, className, props, _logger);
        }

        if (_javaObject == null) {
            _logger.info("Could not create java object to run job: " + className);
            throw new Exception("Could not create running object");
        }
        _logger.info("Got object " + _javaObject.toString());

        _cancelMethod = props.getProperty(CANCEL_METHOD_PARAM, DEFAULT_CANCEL_METHOD);

        final String runMethod = props.getProperty(RUN_METHOD_PARAM, DEFAULT_RUN_METHOD);
        _logger.info("Invoking method " + runMethod);

        if (shouldProxy(props)) {
            _logger.info("Proxying enabled.");
            runMethodAsUser(props, _javaObject, runMethod, proxyUser);
        } else {
            _logger.info("Proxy check failed, not proxying run.");
            runMethod(_javaObject, runMethod);
        }

        _isFinished = true;

        // Get the generated properties and store them to disk, to be read
        // by ProcessJob.
        try {
            final Method generatedPropertiesMethod = _javaObject.getClass()
                    .getMethod(GET_GENERATED_PROPERTIES_METHOD, new Class<?>[] {});
            Object outputGendProps = generatedPropertiesMethod.invoke(_javaObject, new Object[] {});

            if (outputGendProps != null) {
                final Method toPropertiesMethod = outputGendProps.getClass().getMethod("toProperties",
                        new Class<?>[] {});
                Properties properties = (Properties) toPropertiesMethod.invoke(outputGendProps,
                        new Object[] {});

                Props outputProps = new Props(null, properties);
                outputGeneratedProperties(outputProps);
            } else {
                _logger.info(GET_GENERATED_PROPERTIES_METHOD
                        + " method returned null.  No properties to pass along");
            }
        } catch (NoSuchMethodException e) {
            _logger.info(String.format(
                    "Apparently there isn't a method[%s] on object[%s], using " + "empty Props object instead.",
                    GET_GENERATED_PROPERTIES_METHOD, _javaObject));
            outputGeneratedProperties(new Props());
        }
    } catch (Exception e) {
        _isFinished = true;
        throw e;
    }
}

From source file:azkaban.jobtype.HadoopSecureWrapperUtils.java

License:Apache License

/**
 * Perform all the magic required to get the proxyUser in a securitized grid
 * //from   w w  w .j a  v  a  2 s  .  c om
 * @param userToProxy
 * @return a UserGroupInformation object for the specified userToProxy, which will also contain
 *         the logged in user's tokens
 * @throws IOException
 */
private static UserGroupInformation createSecurityEnabledProxyUser(String userToProxy, String filelocation,
        Logger log) throws IOException {

    if (!new File(filelocation).exists()) {
        throw new RuntimeException("hadoop token file doesn't exist.");
    }

    log.info("Found token file.  Setting " + HadoopSecurityManager.MAPREDUCE_JOB_CREDENTIALS_BINARY + " to "
            + filelocation);
    System.setProperty(HadoopSecurityManager.MAPREDUCE_JOB_CREDENTIALS_BINARY, filelocation);

    UserGroupInformation loginUser = null;

    loginUser = UserGroupInformation.getLoginUser();
    log.info("Current logged in user is " + loginUser.getUserName());

    UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(userToProxy, loginUser);

    for (Token<?> token : loginUser.getTokens()) {
        proxyUser.addToken(token);
    }
    return proxyUser;
}

From source file:com.bigstep.datalake.TokenAspect.java

License:Apache License

@VisibleForTesting
Token<DelegationTokenIdentifier> selectDelegationToken(UserGroupInformation ugi) {
    return dtSelector.selectToken(serviceName, ugi.getTokens());
}

From source file:com.datatorrent.stram.LaunchContainerRunnable.java

License:Apache License

public static ByteBuffer getTokens(UserGroupInformation ugi,
        Token<StramDelegationTokenIdentifier> delegationToken) {
    try {/* w w  w .  jav  a  2s .c  o  m*/
        Collection<Token<? extends TokenIdentifier>> tokens = ugi.getTokens();
        Credentials credentials = new Credentials();
        for (Token<? extends TokenIdentifier> token : tokens) {
            if (!token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
                credentials.addToken(token.getService(), token);
                LOG.info("Passing container token {}", token);
            }
        }
        credentials.addToken(delegationToken.getService(), delegationToken);
        DataOutputBuffer dataOutput = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dataOutput);
        byte[] tokenBytes = dataOutput.getData();
        ByteBuffer cTokenBuf = ByteBuffer.wrap(tokenBytes);
        return cTokenBuf.duplicate();
    } catch (IOException e) {
        throw new RuntimeException("Error generating delegation token", e);
    }
}

From source file:eu.stratosphere.yarn.Utils.java

License:Apache License

public static void setTokensFor(ContainerLaunchContext amContainer, Path[] paths, Configuration conf)
        throws IOException {
    Credentials credentials = new Credentials();
    // for HDFS/*from  ww  w  .j  a v a 2 s  .c o m*/
    TokenCache.obtainTokensForNamenodes(credentials, paths, conf);
    // for user
    UserGroupInformation currUsr = UserGroupInformation.getCurrentUser();

    Collection<Token<? extends TokenIdentifier>> usrTok = currUsr.getTokens();
    for (Token<? extends TokenIdentifier> token : usrTok) {
        final Text id = new Text(token.getIdentifier());
        LOG.info("Adding user token " + id + " with " + token);
        credentials.addToken(id, token);
    }
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    LOG.debug("Wrote tokens. Credentials buffer length: " + dob.getLength());

    ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
    amContainer.setTokens(securityTokens);
}

From source file:org.apache.flink.runtime.security.SecurityContext.java

License:Apache License

public static void install(SecurityConfiguration config) throws Exception {

    // perform static initialization of UGI, JAAS
    if (installedContext != null) {
        LOG.warn("overriding previous security context");
    }//from  w ww . ja  v a2  s .c  o m

    // establish the JAAS config
    JaasConfiguration jaasConfig = new JaasConfiguration(config.keytab, config.principal);
    javax.security.auth.login.Configuration.setConfiguration(jaasConfig);

    populateSystemSecurityProperties(config.flinkConf);

    // establish the UGI login user
    UserGroupInformation.setConfiguration(config.hadoopConf);

    UserGroupInformation loginUser;

    if (UserGroupInformation.isSecurityEnabled() && config.keytab != null
            && !StringUtils.isBlank(config.principal)) {
        String keytabPath = (new File(config.keytab)).getAbsolutePath();

        UserGroupInformation.loginUserFromKeytab(config.principal, keytabPath);

        loginUser = UserGroupInformation.getLoginUser();

        // supplement with any available tokens
        String fileLocation = System.getenv(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION);
        if (fileLocation != null) {
            /*
             * Use reflection API since the API semantics are not available in Hadoop1 profile. Below APIs are
             * used in the context of reading the stored tokens from UGI.
             * Credentials cred = Credentials.readTokenStorageFile(new File(fileLocation), config.hadoopConf);
             * loginUser.addCredentials(cred);
            */
            try {
                Method readTokenStorageFileMethod = Credentials.class.getMethod("readTokenStorageFile",
                        File.class, org.apache.hadoop.conf.Configuration.class);
                Credentials cred = (Credentials) readTokenStorageFileMethod.invoke(null, new File(fileLocation),
                        config.hadoopConf);
                Method addCredentialsMethod = UserGroupInformation.class.getMethod("addCredentials",
                        Credentials.class);
                addCredentialsMethod.invoke(loginUser, cred);
            } catch (NoSuchMethodException e) {
                LOG.warn("Could not find method implementations in the shaded jar. Exception: {}", e);
            }
        }
    } else {
        // login with current user credentials (e.g. ticket cache)
        try {
            //Use reflection API to get the login user object
            //UserGroupInformation.loginUserFromSubject(null);
            Method loginUserFromSubjectMethod = UserGroupInformation.class.getMethod("loginUserFromSubject",
                    Subject.class);
            Subject subject = null;
            loginUserFromSubjectMethod.invoke(null, subject);
        } catch (NoSuchMethodException e) {
            LOG.warn("Could not find method implementations in the shaded jar. Exception: {}", e);
        }

        loginUser = UserGroupInformation.getLoginUser();
        // note that the stored tokens are read automatically
    }

    boolean delegationToken = false;
    final Text HDFS_DELEGATION_KIND = new Text("HDFS_DELEGATION_TOKEN");
    Collection<Token<? extends TokenIdentifier>> usrTok = loginUser.getTokens();
    for (Token<? extends TokenIdentifier> token : usrTok) {
        final Text id = new Text(token.getIdentifier());
        LOG.debug("Found user token " + id + " with " + token);
        if (token.getKind().equals(HDFS_DELEGATION_KIND)) {
            delegationToken = true;
        }
    }

    if (UserGroupInformation.isSecurityEnabled() && !loginUser.hasKerberosCredentials()) {
        //throw an error in non-yarn deployment if kerberos cache is not available
        if (!delegationToken) {
            LOG.error("Hadoop Security is enabled but current login user does not have Kerberos Credentials");
            throw new RuntimeException(
                    "Hadoop Security is enabled but current login user does not have Kerberos Credentials");
        }
    }

    installedContext = new SecurityContext(loginUser);
}

From source file:org.apache.flink.runtime.util.HadoopUtils.java

License:Apache License

/**
 * Indicates whether the current user has an HDFS delegation token.
 *//*from w ww. ja v  a2 s .  com*/
public static boolean hasHDFSDelegationToken() throws Exception {
    UserGroupInformation loginUser = UserGroupInformation.getCurrentUser();
    Collection<Token<? extends TokenIdentifier>> usrTok = loginUser.getTokens();
    for (Token<? extends TokenIdentifier> token : usrTok) {
        if (token.getKind().equals(HDFS_DELEGATION_TOKEN_KIND)) {
            return true;
        }
    }
    return false;
}

From source file:org.apache.flink.yarn.YarnApplicationMasterRunner.java

License:Apache License

/**
 * The instance entry point for the YARN application master. Obtains user group
 * information and calls the main work method {@link #runApplicationMaster()} as a
 * privileged action.// ww w. ja  va  2  s .com
 *
 * @param args The command line arguments.
 * @return The process exit code.
 */
protected int run(String[] args) {
    try {
        LOG.debug("All environment variables: {}", ENV);

        final String yarnClientUsername = ENV.get(YarnConfigKeys.ENV_CLIENT_USERNAME);
        require(yarnClientUsername != null, "YARN client user name environment variable {} not set",
                YarnConfigKeys.ENV_CLIENT_USERNAME);

        final UserGroupInformation currentUser;
        try {
            currentUser = UserGroupInformation.getCurrentUser();
        } catch (Throwable t) {
            throw new Exception("Cannot access UserGroupInformation information for current user", t);
        }

        LOG.info("YARN daemon runs as user {}. Running Flink Application Master/JobManager as user {}",
                currentUser.getShortUserName(), yarnClientUsername);

        UserGroupInformation ugi = UserGroupInformation.createRemoteUser(yarnClientUsername);

        // transfer all security tokens, for example for authenticated HDFS and HBase access
        for (Token<?> token : currentUser.getTokens()) {
            ugi.addToken(token);
        }

        // run the actual work in a secured privileged action
        return ugi.doAs(new PrivilegedAction<Integer>() {
            @Override
            public Integer run() {
                return runApplicationMaster();
            }
        });
    } catch (Throwable t) {
        // make sure that everything whatever ends up in the log
        LOG.error("YARN Application Master initialization failed", t);
        return INIT_ERROR_EXIT_CODE;
    }
}

From source file:org.apache.hcatalog.mapreduce.Security.java

License:Apache License

void handleSecurity(Credentials credentials, OutputJobInfo outputJobInfo, HiveMetaStoreClient client,
        Configuration conf, boolean harRequested) throws IOException, MetaException, TException, Exception {
    if (UserGroupInformation.isSecurityEnabled()) {
        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
        // check if oozie has set up a hcat deleg. token - if so use it
        TokenSelector<? extends TokenIdentifier> hiveTokenSelector = new DelegationTokenSelector();
        //Oozie does not change the service field of the token
        //hence by default token generation will have a value of "new Text("")"
        //HiveClient will look for a use TokenSelector.selectToken() with service
        //set to empty "Text" if hive.metastore.token.signature property is set to null
        Token<? extends TokenIdentifier> hiveToken = hiveTokenSelector.selectToken(new Text(), ugi.getTokens());
        if (hiveToken == null) {
            // we did not get token set up by oozie, let's get them ourselves here.
            // we essentially get a token per unique Output HCatTableInfo - this is
            // done because through Pig, setOutput() method is called multiple times
            // We want to only get the token once per unique output HCatTableInfo -
            // we cannot just get one token since in multi-query case (> 1 store in 1 job)
            // or the case when a single pig script results in > 1 jobs, the single
            // token will get cancelled by the output committer and the subsequent
            // stores will fail - by tying the token with the concatenation of
            // dbname, tablename and partition keyvalues of the output
            // TableInfo, we can have as many tokens as there are stores and the TokenSelector
            // will correctly pick the right tokens which the committer will use and
            // cancel.
            String tokenSignature = getTokenSignature(outputJobInfo);
            // get delegation tokens from hcat server and store them into the "job"
            // These will be used in to publish partitions to
            // hcat normally in OutputCommitter.commitJob()
            // when the JobTracker in Hadoop MapReduce starts supporting renewal of
            // arbitrary tokens, the renewer should be the principal of the JobTracker
            hiveToken = HCatUtil.extractThriftToken(client.getDelegationToken(ugi.getUserName()),
                    tokenSignature);/*from   w  w  w  . j  av a2 s  . c  o  m*/

            if (harRequested) {
                TokenSelector<? extends TokenIdentifier> jtTokenSelector = new org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenSelector();
                Token jtToken = jtTokenSelector.selectToken(
                        org.apache.hadoop.security.SecurityUtil.buildTokenService(
                                HCatHadoopShims.Instance.get().getResourceManagerAddress(conf)),
                        ugi.getTokens());
                if (jtToken == null) {
                    //we don't need to cancel this token as the TokenRenewer for JT tokens
                    //takes care of cancelling them
                    credentials.addToken(new Text("hcat jt token"),
                            HCatUtil.getJobTrackerDelegationToken(conf, ugi.getUserName()));
                }
            }

            credentials.addToken(new Text(ugi.getUserName() + "_" + tokenSignature), hiveToken);
            // this will be used by the outputcommitter to pass on to the metastore client
            // which in turn will pass on to the TokenSelector so that it can select
            // the right token.
            conf.set(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE, tokenSignature);
        }
    }
}

From source file:org.apache.hive.hcatalog.mapreduce.Security.java

License:Apache License

void handleSecurity(Credentials credentials, OutputJobInfo outputJobInfo, IMetaStoreClient client,
        Configuration conf, boolean harRequested) throws IOException, MetaException, TException, Exception {
    if (UserGroupInformation.isSecurityEnabled()) {
        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
        // check if oozie has set up a hcat deleg. token - if so use it
        TokenSelector<? extends TokenIdentifier> hiveTokenSelector = new DelegationTokenSelector();
        //Oozie does not change the service field of the token
        //hence by default token generation will have a value of "new Text("")"
        //HiveClient will look for a use TokenSelector.selectToken() with service
        //set to empty "Text" if hive.metastore.token.signature property is set to null
        Token<? extends TokenIdentifier> hiveToken = hiveTokenSelector.selectToken(new Text(), ugi.getTokens());
        if (hiveToken == null) {
            // we did not get token set up by oozie, let's get them ourselves here.
            // we essentially get a token per unique Output HCatTableInfo - this is
            // done because through Pig, setOutput() method is called multiple times
            // We want to only get the token once per unique output HCatTableInfo -
            // we cannot just get one token since in multi-query case (> 1 store in 1 job)
            // or the case when a single pig script results in > 1 jobs, the single
            // token will get cancelled by the output committer and the subsequent
            // stores will fail - by tying the token with the concatenation of
            // dbname, tablename and partition keyvalues of the output
            // TableInfo, we can have as many tokens as there are stores and the TokenSelector
            // will correctly pick the right tokens which the committer will use and
            // cancel.
            String tokenSignature = getTokenSignature(outputJobInfo);
            // get delegation tokens from hcat server and store them into the "job"
            // These will be used in to publish partitions to
            // hcat normally in OutputCommitter.commitJob()
            // when the JobTracker in Hadoop MapReduce starts supporting renewal of
            // arbitrary tokens, the renewer should be the principal of the JobTracker
            hiveToken = HCatUtil.extractThriftToken(
                    client.getDelegationToken(ugi.getUserName(), ugi.getUserName()), tokenSignature);

            if (harRequested) {
                TokenSelector<? extends TokenIdentifier> jtTokenSelector = new org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenSelector();
                Token jtToken = jtTokenSelector.selectToken(
                        org.apache.hadoop.security.SecurityUtil.buildTokenService(
                                ShimLoader.getHadoopShims().getHCatShim().getResourceManagerAddress(conf)),
                        ugi.getTokens());
                if (jtToken == null) {
                    //we don't need to cancel this token as the TokenRenewer for JT tokens
                    //takes care of cancelling them
                    credentials.addToken(new Text("hcat jt token"),
                            HCatUtil.getJobTrackerDelegationToken(conf, ugi.getUserName()));
                }//ww w.ja  v a  2s . c  o m
            }

            credentials.addToken(new Text(ugi.getUserName() + "_" + tokenSignature), hiveToken);
            // this will be used by the outputcommitter to pass on to the metastore client
            // which in turn will pass on to the TokenSelector so that it can select
            // the right token.
            conf.set(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE, tokenSignature);
        }
    }
}