Example usage for org.apache.hadoop.security UserGroupInformation loginUserFromKeytab

List of usage examples for org.apache.hadoop.security UserGroupInformation loginUserFromKeytab

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation loginUserFromKeytab.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static void loginUserFromKeytab(String user, String path) throws IOException 

Source Link

Document

Log a user in from a keytab file.

Usage

From source file:org.apache.flink.runtime.security.modules.HadoopModule.java

License:Apache License

@Override
public void install(SecurityUtils.SecurityConfiguration securityConfig) throws SecurityInstallException {

    UserGroupInformation.setConfiguration(securityConfig.getHadoopConfiguration());

    try {//from   ww w. j a v  a 2 s  . co m
        if (UserGroupInformation.isSecurityEnabled() && !StringUtils.isBlank(securityConfig.getKeytab())
                && !StringUtils.isBlank(securityConfig.getPrincipal())) {
            String keytabPath = (new File(securityConfig.getKeytab())).getAbsolutePath();

            UserGroupInformation.loginUserFromKeytab(securityConfig.getPrincipal(), keytabPath);

            loginUser = UserGroupInformation.getLoginUser();

            // supplement with any available tokens
            String fileLocation = System.getenv(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION);
            if (fileLocation != null) {
                /*
                 * Use reflection API since the API semantics are not available in Hadoop1 profile. Below APIs are
                 * used in the context of reading the stored tokens from UGI.
                 * Credentials cred = Credentials.readTokenStorageFile(new File(fileLocation), config.hadoopConf);
                 * loginUser.addCredentials(cred);
                */
                try {
                    Method readTokenStorageFileMethod = Credentials.class.getMethod("readTokenStorageFile",
                            File.class, org.apache.hadoop.conf.Configuration.class);
                    Credentials cred = (Credentials) readTokenStorageFileMethod.invoke(null,
                            new File(fileLocation), securityConfig.getHadoopConfiguration());
                    Method addCredentialsMethod = UserGroupInformation.class.getMethod("addCredentials",
                            Credentials.class);
                    addCredentialsMethod.invoke(loginUser, cred);
                } catch (NoSuchMethodException e) {
                    LOG.warn("Could not find method implementations in the shaded jar. Exception: {}", e);
                } catch (InvocationTargetException e) {
                    throw e.getTargetException();
                }
            }
        } else {
            // login with current user credentials (e.g. ticket cache, OS login)
            // note that the stored tokens are read automatically
            try {
                //Use reflection API to get the login user object
                //UserGroupInformation.loginUserFromSubject(null);
                Method loginUserFromSubjectMethod = UserGroupInformation.class.getMethod("loginUserFromSubject",
                        Subject.class);
                loginUserFromSubjectMethod.invoke(null, (Subject) null);
            } catch (NoSuchMethodException e) {
                LOG.warn("Could not find method implementations in the shaded jar. Exception: {}", e);
            } catch (InvocationTargetException e) {
                throw e.getTargetException();
            }

            loginUser = UserGroupInformation.getLoginUser();
        }

        if (UserGroupInformation.isSecurityEnabled()) {
            // note: UGI::hasKerberosCredentials inaccurately reports false
            // for logins based on a keytab (fixed in Hadoop 2.6.1, see HADOOP-10786),
            // so we check only in ticket cache scenario.
            if (securityConfig.useTicketCache() && !loginUser.hasKerberosCredentials()) {
                // a delegation token is an adequate substitute in most cases
                if (!HadoopUtils.hasHDFSDelegationToken()) {
                    LOG.warn(
                            "Hadoop security is enabled but current login user does not have Kerberos credentials");
                }
            }
        }

        LOG.info("Hadoop user set to {}", loginUser);

    } catch (Throwable ex) {
        throw new SecurityInstallException("Unable to set the Hadoop login user", ex);
    }
}

From source file:org.apache.flink.runtime.security.SecurityContext.java

License:Apache License

public static void install(SecurityConfiguration config) throws Exception {

    // perform static initialization of UGI, JAAS
    if (installedContext != null) {
        LOG.warn("overriding previous security context");
    }//from   w w  w  . j a va2 s.  co m

    // establish the JAAS config
    JaasConfiguration jaasConfig = new JaasConfiguration(config.keytab, config.principal);
    javax.security.auth.login.Configuration.setConfiguration(jaasConfig);

    populateSystemSecurityProperties(config.flinkConf);

    // establish the UGI login user
    UserGroupInformation.setConfiguration(config.hadoopConf);

    UserGroupInformation loginUser;

    if (UserGroupInformation.isSecurityEnabled() && config.keytab != null
            && !StringUtils.isBlank(config.principal)) {
        String keytabPath = (new File(config.keytab)).getAbsolutePath();

        UserGroupInformation.loginUserFromKeytab(config.principal, keytabPath);

        loginUser = UserGroupInformation.getLoginUser();

        // supplement with any available tokens
        String fileLocation = System.getenv(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION);
        if (fileLocation != null) {
            /*
             * Use reflection API since the API semantics are not available in Hadoop1 profile. Below APIs are
             * used in the context of reading the stored tokens from UGI.
             * Credentials cred = Credentials.readTokenStorageFile(new File(fileLocation), config.hadoopConf);
             * loginUser.addCredentials(cred);
            */
            try {
                Method readTokenStorageFileMethod = Credentials.class.getMethod("readTokenStorageFile",
                        File.class, org.apache.hadoop.conf.Configuration.class);
                Credentials cred = (Credentials) readTokenStorageFileMethod.invoke(null, new File(fileLocation),
                        config.hadoopConf);
                Method addCredentialsMethod = UserGroupInformation.class.getMethod("addCredentials",
                        Credentials.class);
                addCredentialsMethod.invoke(loginUser, cred);
            } catch (NoSuchMethodException e) {
                LOG.warn("Could not find method implementations in the shaded jar. Exception: {}", e);
            }
        }
    } else {
        // login with current user credentials (e.g. ticket cache)
        try {
            //Use reflection API to get the login user object
            //UserGroupInformation.loginUserFromSubject(null);
            Method loginUserFromSubjectMethod = UserGroupInformation.class.getMethod("loginUserFromSubject",
                    Subject.class);
            Subject subject = null;
            loginUserFromSubjectMethod.invoke(null, subject);
        } catch (NoSuchMethodException e) {
            LOG.warn("Could not find method implementations in the shaded jar. Exception: {}", e);
        }

        loginUser = UserGroupInformation.getLoginUser();
        // note that the stored tokens are read automatically
    }

    boolean delegationToken = false;
    final Text HDFS_DELEGATION_KIND = new Text("HDFS_DELEGATION_TOKEN");
    Collection<Token<? extends TokenIdentifier>> usrTok = loginUser.getTokens();
    for (Token<? extends TokenIdentifier> token : usrTok) {
        final Text id = new Text(token.getIdentifier());
        LOG.debug("Found user token " + id + " with " + token);
        if (token.getKind().equals(HDFS_DELEGATION_KIND)) {
            delegationToken = true;
        }
    }

    if (UserGroupInformation.isSecurityEnabled() && !loginUser.hasKerberosCredentials()) {
        //throw an error in non-yarn deployment if kerberos cache is not available
        if (!delegationToken) {
            LOG.error("Hadoop Security is enabled but current login user does not have Kerberos Credentials");
            throw new RuntimeException(
                    "Hadoop Security is enabled but current login user does not have Kerberos Credentials");
        }
    }

    installedContext = new SecurityContext(loginUser);
}

From source file:org.apache.flume.auth.KerberosAuthenticator.java

License:Apache License

/**
 * When valid principal and keytab are provided and if authentication has
 * not yet been done for this object, this method authenticates the
 * credentials and populates the ugi. In case of null or invalid credentials
 * IllegalArgumentException is thrown. In case of failure to authenticate,
 * SecurityException is thrown. If authentication has already happened on
 * this KerberosAuthenticator object, then this method checks to see if the current
 * credentials passed are same as the validated credentials. If not, it throws
 * an exception as this authenticator can represent only one Principal.
 *
 * @param principal/*ww w. ja v  a  2s.c  o  m*/
 * @param keytab
 */
public synchronized void authenticate(String principal, String keytab) {
    // sanity checking

    Preconditions.checkArgument(principal != null && !principal.isEmpty(),
            "Invalid Kerberos principal: " + String.valueOf(principal));
    Preconditions.checkArgument(keytab != null && !keytab.isEmpty(),
            "Invalid Kerberos keytab: " + String.valueOf(keytab));
    File keytabFile = new File(keytab);
    Preconditions.checkArgument(keytabFile.isFile() && keytabFile.canRead(),
            "Keytab is not a readable file: " + String.valueOf(keytab));

    // resolve the requested principal
    String resolvedPrincipal;
    try {
        // resolves _HOST pattern using standard Hadoop search/replace
        // via DNS lookup when 2nd argument is empty
        resolvedPrincipal = SecurityUtil.getServerPrincipal(principal, "");
    } catch (IOException e) {
        throw new IllegalArgumentException(
                "Host lookup error resolving kerberos principal (" + principal + "). Exception follows.", e);
    }
    Preconditions.checkNotNull(resolvedPrincipal, "Resolved Principal must not be null");

    // be cruel and unusual when user tries to login as multiple principals
    // this isn't really valid with a reconfigure but this should be rare
    // enough to warrant a restart of the agent JVM
    // TODO: find a way to interrogate the entire current config state,
    // since we don't have to be unnecessarily protective if they switch all
    // HDFS sinks to use a different principal all at once.

    KerberosUser newUser = new KerberosUser(resolvedPrincipal, keytab);
    Preconditions.checkState(prevUser == null || prevUser.equals(newUser),
            "Cannot use multiple kerberos principals in the same agent. "
                    + " Must restart agent to use new principal or keytab. " + "Previous = %s, New = %s",
            prevUser, newUser);

    // enable the kerberos mode of UGI, before doing anything else
    if (!UserGroupInformation.isSecurityEnabled()) {
        Configuration conf = new Configuration(false);
        conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
        UserGroupInformation.setConfiguration(conf);
    }

    // We are interested in currently logged in user with kerberos creds
    UserGroupInformation curUser = null;
    try {
        curUser = UserGroupInformation.getLoginUser();
        if (curUser != null && !curUser.hasKerberosCredentials()) {
            curUser = null;
        }
    } catch (IOException e) {
        LOG.warn("User unexpectedly had no active login. Continuing with " + "authentication", e);
    }

    /*
     *  if ugi is not null,
     *     if ugi matches currently logged in kerberos user, we are good
     *     else we are logged out, so relogin our ugi
     *  else if ugi is null, login and populate state
     */
    try {
        if (ugi != null) {
            if (curUser != null && curUser.getUserName().equals(ugi.getUserName())) {
                LOG.debug("Using existing principal login: {}", ugi);
            } else {
                LOG.info("Attempting kerberos Re-login as principal ({}) ", new Object[] { ugi.getUserName() });
                ugi.reloginFromKeytab();
            }
        } else {
            LOG.info("Attempting kerberos login as principal ({}) from keytab " + "file ({})",
                    new Object[] { resolvedPrincipal, keytab });
            UserGroupInformation.loginUserFromKeytab(resolvedPrincipal, keytab);
            this.ugi = UserGroupInformation.getLoginUser();
            this.prevUser = new KerberosUser(resolvedPrincipal, keytab);
            this.privilegedExecutor = new UGIExecutor(this.ugi);
        }
    } catch (IOException e) {
        throw new SecurityException(
                "Authentication error while attempting to " + "login as kerberos principal ("
                        + resolvedPrincipal + ") using " + "keytab (" + keytab + "). Exception follows.",
                e);
    }

    printUGI(this.ugi);
}

From source file:org.apache.flume.sink.kite.KerberosUtil.java

License:Apache License

/**
 * Static synchronized method for static Kerberos login. <br/>
 * Static synchronized due to a thundering herd problem when multiple Sinks
 * attempt to log in using the same principal at the same time with the
 * intention of impersonating different users (or even the same user).
 * If this is not controlled, MIT Kerberos v5 believes it is seeing a replay
 * attach and it returns:/*from  www .j a  v a 2 s. c o m*/
 * <blockquote>Request is a replay (34) - PROCESS_TGS</blockquote>
 * In addition, since the underlying Hadoop APIs we are using for
 * impersonation are static, we define this method as static as well.
 *
 * @param principal
 *         Fully-qualified principal to use for authentication.
 * @param keytab
 *         Location of keytab file containing credentials for principal.
 * @return Logged-in user
 * @throws org.apache.flume.sink.kite.KerberosUtil.SecurityException
 *         if login fails.
 * @throws IllegalArgumentException
 *         if the principal or the keytab is not usable
 */
public static synchronized UserGroupInformation login(String principal, String keytab) {
    // resolve the requested principal, if it is present
    String finalPrincipal = null;
    if (principal != null && !principal.isEmpty()) {
        try {
            // resolves _HOST pattern using standard Hadoop search/replace
            // via DNS lookup when 2nd argument is empty
            finalPrincipal = SecurityUtil.getServerPrincipal(principal, "");
        } catch (IOException e) {
            throw new SecurityException("Failed to resolve Kerberos principal", e);
        }
    }

    // check if there is a user already logged in
    UserGroupInformation currentUser = null;
    try {
        currentUser = UserGroupInformation.getLoginUser();
    } catch (IOException e) {
        // not a big deal but this shouldn't typically happen because it will
        // generally fall back to the UNIX user
        LOG.debug("Unable to get login user before Kerberos auth attempt", e);
    }

    // if the current user is valid (matches the given principal) then use it
    if (currentUser != null) {
        if (finalPrincipal == null || finalPrincipal.equals(currentUser.getUserName())) {
            LOG.debug("Using existing login for {}: {}", finalPrincipal, currentUser);
            return currentUser;
        } else {
            // be cruel and unusual when user tries to login as multiple principals
            // this isn't really valid with a reconfigure but this should be rare
            // enough to warrant a restart of the agent JVM
            // TODO: find a way to interrogate the entire current config state,
            // since we don't have to be unnecessarily protective if they switch all
            // HDFS sinks to use a different principal all at once.
            throw new SecurityException("Cannot use multiple Kerberos principals: " + finalPrincipal
                    + " would replace " + currentUser.getUserName());
        }
    }

    // prepare for a new login
    Preconditions.checkArgument(principal != null && !principal.isEmpty(),
            "Invalid Kerberos principal: " + String.valueOf(principal));
    Preconditions.checkNotNull(finalPrincipal, "Resolved principal must not be null");
    Preconditions.checkArgument(keytab != null && !keytab.isEmpty(),
            "Invalid Kerberos keytab: " + String.valueOf(keytab));
    File keytabFile = new File(keytab);
    Preconditions.checkArgument(keytabFile.isFile() && keytabFile.canRead(),
            "Keytab is not a readable file: " + String.valueOf(keytab));

    try {
        // attempt static kerberos login
        LOG.debug("Logging in as {} with {}", finalPrincipal, keytab);
        UserGroupInformation.loginUserFromKeytab(principal, keytab);
        return UserGroupInformation.getLoginUser();
    } catch (IOException e) {
        throw new SecurityException("Kerberos login failed", e);
    }
}

From source file:org.apache.gobblin.util.hadoop.TokenUtils.java

License:Apache License

/**
 * Get Hadoop tokens (tokens for job history server, job tracker and HDFS) using Kerberos keytab.
 *
 * @param state A {@link State} object that should contain property {@link #USER_TO_PROXY},
 * {@link #KEYTAB_USER} and {@link #KEYTAB_LOCATION}. To obtain tokens for
 * other namenodes, use property {@link #OTHER_NAMENODES} with comma separated HDFS URIs.
 * @param tokenFile If present, the file will store materialized credentials.
 * @param cred A im-memory representation of credentials.
 *///w  w w .j a  va 2s .co  m
public static void getHadoopTokens(final State state, Optional<File> tokenFile, Credentials cred)
        throws IOException, InterruptedException {

    Preconditions.checkArgument(state.contains(KEYTAB_USER), "Missing required property " + KEYTAB_USER);
    Preconditions.checkArgument(state.contains(KEYTAB_LOCATION),
            "Missing required property " + KEYTAB_LOCATION);

    Configuration configuration = new Configuration();
    configuration.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS);
    UserGroupInformation.setConfiguration(configuration);
    UserGroupInformation.loginUserFromKeytab(obtainKerberosPrincipal(state), state.getProp(KEYTAB_LOCATION));

    final Optional<String> userToProxy = Strings.isNullOrEmpty(state.getProp(USER_TO_PROXY))
            ? Optional.<String>absent()
            : Optional.fromNullable(state.getProp(USER_TO_PROXY));
    final Configuration conf = new Configuration();

    LOG.info("Getting tokens for " + userToProxy);

    getJhToken(conf, cred);
    getFsAndJtTokens(state, conf, userToProxy, cred);

    if (tokenFile.isPresent()) {
        persistTokens(cred, tokenFile.get());
    }
}

From source file:org.apache.hawq.ranger.authorization.RangerHawqPluginResource.java

License:Apache License

/**
 * Constructor. Creates a new instance of the resource that uses <code>RangerHawqAuthorizer</code>.
 *//*from  w  ww  . j  a  va2 s . c  o m*/
public RangerHawqPluginResource() {
    // set UserGroupInformation under kerberos authentication
    if (Utils.getAuth() == Utils.AuthMethod.KERBEROS) {
        Configuration conf = new Configuration();
        conf.set("hadoop.security.authentication", "kerberos");
        UserGroupInformation.setConfiguration(conf);

        String prin = Utils.getPrincipal();
        String keytab = Utils.getKeytab();

        if (!prin.equals("") && !keytab.equals("")) {
            try {
                UserGroupInformation.loginUserFromKeytab(prin, keytab);
            } catch (Exception e) {
                LOG.warn(String.format("loginUserFromKeytab failed, user[%s], keytab[%s]", prin, keytab));
            }
        }
    }

    if (LOG.isDebugEnabled()) {
        try {
            UserGroupInformation user = UserGroupInformation.getLoginUser();
            LOG.debug(String.format("login user: %s", user));
        } catch (Exception e) {
            LOG.warn("get login user failed exception: " + e);
        }
    }

    this.authorizer = RangerHawqAuthorizer.getInstance();

}

From source file:org.apache.hcatalog.templeton.Main.java

License:Apache License

public Server runServer(int port) throws Exception {

    //Authenticate using keytab
    if (UserGroupInformation.isSecurityEnabled()) {
        UserGroupInformation.loginUserFromKeytab(conf.kerberosPrincipal(), conf.kerberosKeytab());
    }/*ww w . j  a va  2  s .c o  m*/

    // Create the Jetty server
    Server server = new Server(port);
    ServletContextHandler root = new ServletContextHandler(server, "/");

    // Add the Auth filter
    FilterHolder fHolder = makeAuthFilter();

    /* 
     * We add filters for each of the URIs supported by templeton.
     * If we added the entire sub-structure using '/*', the mapreduce 
     * notification cannot give the callback to templeton in secure mode.
     * This is because mapreduce does not use secure credentials for 
     * callbacks. So jetty would fail the request as unauthorized.
     */
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/ddl/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/pig/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/hive/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/queue/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/mapreduce/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/status/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/version/*", FilterMapping.REQUEST);

    // Connect Jersey
    ServletHolder h = new ServletHolder(new ServletContainer(makeJerseyConfig()));
    root.addServlet(h, "/" + SERVLET_PATH + "/*");
    // Add any redirects
    addRedirects(server);

    // Start the server
    server.start();
    this.server = server;
    return server;
}

From source file:org.apache.hive.hcatalog.templeton.Main.java

License:Apache License

public Server runServer(int port) throws Exception {

    //Authenticate using keytab
    if (UserGroupInformation.isSecurityEnabled()) {
        UserGroupInformation.loginUserFromKeytab(conf.kerberosPrincipal(), conf.kerberosKeytab());
    }//from ww  w. ja v  a 2  s .  c  o m

    // Create the Jetty server. If jetty conf file exists, use that to create server
    // to have more control.
    Server server = null;
    if (StringUtils.isEmpty(conf.jettyConfiguration())) {
        server = new Server(port);
    } else {
        FileInputStream jettyConf = new FileInputStream(conf.jettyConfiguration());
        XmlConfiguration configuration = new XmlConfiguration(jettyConf);
        server = (Server) configuration.configure();
    }

    ServletContextHandler root = new ServletContextHandler(server, "/");

    // Add the Auth filter
    FilterHolder fHolder = makeAuthFilter();

    /* 
     * We add filters for each of the URIs supported by templeton.
     * If we added the entire sub-structure using '/*', the mapreduce 
     * notification cannot give the callback to templeton in secure mode.
     * This is because mapreduce does not use secure credentials for 
     * callbacks. So jetty would fail the request as unauthorized.
     */
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/ddl/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/pig/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/hive/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/sqoop/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/queue/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/jobs/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/mapreduce/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/status/*", FilterMapping.REQUEST);
    root.addFilter(fHolder, "/" + SERVLET_PATH + "/v1/version/*", FilterMapping.REQUEST);

    if (conf.getBoolean(AppConfig.XSRF_FILTER_ENABLED, false)) {
        root.addFilter(makeXSRFFilter(), "/" + SERVLET_PATH + "/*", FilterMapping.REQUEST);
        LOG.debug("XSRF filter enabled");
    } else {
        LOG.warn("XSRF filter disabled");
    }

    // Connect Jersey
    ServletHolder h = new ServletHolder(new ServletContainer(makeJerseyConfig()));
    root.addServlet(h, "/" + SERVLET_PATH + "/*");
    // Add any redirects
    addRedirects(server);

    // Start the server
    server.start();
    this.server = server;
    return server;
}

From source file:org.apache.hive.minikdc.MiniHiveKdc.java

License:Apache License

/**
 * Login the given principal, using corresponding keytab file from internal map
 * @param principal/* w  ww .  jav a  2s  .  c om*/
 * @return
 * @throws Exception
 */
public UserGroupInformation loginUser(String principal) throws Exception {
    UserGroupInformation.loginUserFromKeytab(principal, getKeyTabFile(principal));
    return Utils.getUGI();
}

From source file:org.apache.hive.service.auth.HiveAuthFactory.java

License:Apache License

public static void loginFromKeytab(HiveConf hiveConf) throws IOException {
    String principal = hiveConf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL);
    String keyTabFile = hiveConf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB);
    if (principal.isEmpty() || keyTabFile.isEmpty()) {
        throw new IOException("HiveServer2 Kerberos principal or keytab is not correctly configured");
    } else {/*  ww w  .  ja  va  2  s.  c  o  m*/
        UserGroupInformation.loginUserFromKeytab(SecurityUtil.getServerPrincipal(principal, "0.0.0.0"),
                keyTabFile);
    }
}