Example usage for org.apache.hadoop.security UserGroupInformation getUserName

List of usage examples for org.apache.hadoop.security UserGroupInformation getUserName

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation getUserName.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public String getUserName() 

Source Link

Document

Get the user's full principal name.

Usage

From source file:org.apache.flume.sink.kite.KerberosUtil.java

License:Apache License

/**
 * Static synchronized method for static Kerberos login. <br/>
 * Static synchronized due to a thundering herd problem when multiple Sinks
 * attempt to log in using the same principal at the same time with the
 * intention of impersonating different users (or even the same user).
 * If this is not controlled, MIT Kerberos v5 believes it is seeing a replay
 * attach and it returns:/*from  w  ww .  j a v  a  2  s.co  m*/
 * <blockquote>Request is a replay (34) - PROCESS_TGS</blockquote>
 * In addition, since the underlying Hadoop APIs we are using for
 * impersonation are static, we define this method as static as well.
 *
 * @param principal
 *         Fully-qualified principal to use for authentication.
 * @param keytab
 *         Location of keytab file containing credentials for principal.
 * @return Logged-in user
 * @throws org.apache.flume.sink.kite.KerberosUtil.SecurityException
 *         if login fails.
 * @throws IllegalArgumentException
 *         if the principal or the keytab is not usable
 */
public static synchronized UserGroupInformation login(String principal, String keytab) {
    // resolve the requested principal, if it is present
    String finalPrincipal = null;
    if (principal != null && !principal.isEmpty()) {
        try {
            // resolves _HOST pattern using standard Hadoop search/replace
            // via DNS lookup when 2nd argument is empty
            finalPrincipal = SecurityUtil.getServerPrincipal(principal, "");
        } catch (IOException e) {
            throw new SecurityException("Failed to resolve Kerberos principal", e);
        }
    }

    // check if there is a user already logged in
    UserGroupInformation currentUser = null;
    try {
        currentUser = UserGroupInformation.getLoginUser();
    } catch (IOException e) {
        // not a big deal but this shouldn't typically happen because it will
        // generally fall back to the UNIX user
        LOG.debug("Unable to get login user before Kerberos auth attempt", e);
    }

    // if the current user is valid (matches the given principal) then use it
    if (currentUser != null) {
        if (finalPrincipal == null || finalPrincipal.equals(currentUser.getUserName())) {
            LOG.debug("Using existing login for {}: {}", finalPrincipal, currentUser);
            return currentUser;
        } else {
            // be cruel and unusual when user tries to login as multiple principals
            // this isn't really valid with a reconfigure but this should be rare
            // enough to warrant a restart of the agent JVM
            // TODO: find a way to interrogate the entire current config state,
            // since we don't have to be unnecessarily protective if they switch all
            // HDFS sinks to use a different principal all at once.
            throw new SecurityException("Cannot use multiple Kerberos principals: " + finalPrincipal
                    + " would replace " + currentUser.getUserName());
        }
    }

    // prepare for a new login
    Preconditions.checkArgument(principal != null && !principal.isEmpty(),
            "Invalid Kerberos principal: " + String.valueOf(principal));
    Preconditions.checkNotNull(finalPrincipal, "Resolved principal must not be null");
    Preconditions.checkArgument(keytab != null && !keytab.isEmpty(),
            "Invalid Kerberos keytab: " + String.valueOf(keytab));
    File keytabFile = new File(keytab);
    Preconditions.checkArgument(keytabFile.isFile() && keytabFile.canRead(),
            "Keytab is not a readable file: " + String.valueOf(keytab));

    try {
        // attempt static kerberos login
        LOG.debug("Logging in as {} with {}", finalPrincipal, keytab);
        UserGroupInformation.loginUserFromKeytab(principal, keytab);
        return UserGroupInformation.getLoginUser();
    } catch (IOException e) {
        throw new SecurityException("Kerberos login failed", e);
    }
}

From source file:org.apache.hawq.pxf.service.servlet.SecurityServletFilter.java

License:Apache License

/**
 * If user impersonation is configured, examines the request for the presense of the expected security headers
 * and create a proxy user to execute further request chain. Responds with an HTTP error if the header is missing
 * or the chain processing throws an exception.
 *
 * @param request http request/*w w w  .j  av a2 s  . c o m*/
 * @param response http response
 * @param chain filter chain
 */
@Override
public void doFilter(final ServletRequest request, final ServletResponse response, final FilterChain chain)
        throws IOException, ServletException {

    if (SecureLogin.isUserImpersonationEnabled()) {

        // retrieve user header and make sure header is present and is not empty
        final String user = ((HttpServletRequest) request).getHeader(USER_HEADER);
        if (user == null) {
            throw new IllegalArgumentException(MISSING_HEADER_ERROR);
        } else if (user.trim().isEmpty()) {
            throw new IllegalArgumentException(EMPTY_HEADER_ERROR);
        }

        // TODO refresh Kerberos token when security is enabled

        // prepare pivileged action to run on behalf of proxy user
        PrivilegedExceptionAction<Boolean> action = new PrivilegedExceptionAction<Boolean>() {
            @Override
            public Boolean run() throws IOException, ServletException {
                LOG.debug("Performing request chain call for proxy user = " + user);
                chain.doFilter(request, response);
                return true;
            }
        };

        // create proxy user UGI from the UGI of the logged in user and execute the servlet chain as that user
        UserGroupInformation proxyUGI = null;
        try {
            LOG.debug("Creating proxy user = " + user);
            proxyUGI = UserGroupInformation.createProxyUser(user, UserGroupInformation.getLoginUser());
            proxyUGI.doAs(action);
        } catch (UndeclaredThrowableException ute) {
            // unwrap the real exception thrown by the action
            throw new ServletException(ute.getCause());
        } catch (InterruptedException ie) {
            throw new ServletException(ie);
        } finally {
            try {
                if (proxyUGI != null) {
                    LOG.debug("Closing FileSystem for proxy user = " + proxyUGI.getUserName());
                    FileSystem.closeAllForUGI(proxyUGI);
                }
            } catch (Throwable t) {
                LOG.warn("Error closing FileSystem for proxy user = " + proxyUGI.getUserName());
            }
        }
    } else {
        // no user impersonation is configured
        chain.doFilter(request, response);
    }
}

From source file:org.apache.hawq.pxf.service.utilities.SecuredHDFS.java

License:Apache License

/**
 * The function will verify the token with NameNode if available and will
 * create a UserGroupInformation.//w w  w  .  j  a  v a  2 s.com
 *
 * Code in this function is copied from JspHelper.getTokenUGI
 *
 * @param identifier Delegation token identifier
 * @param password Delegation token password
 * @param kind the kind of token
 * @param service the service for this token
 * @param servletContext Jetty servlet context which contains the NN address
 *
 * @throws SecurityException Thrown when authentication fails
 */
private static void verifyToken(byte[] identifier, byte[] password, Text kind, Text service,
        ServletContext servletContext) {
    try {
        Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(identifier, password,
                kind, service);

        ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
        DataInputStream in = new DataInputStream(buf);
        DelegationTokenIdentifier id = new DelegationTokenIdentifier();
        id.readFields(in);

        final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(servletContext);
        if (nn != null) {
            nn.getNamesystem().verifyToken(id, token.getPassword());
        }

        UserGroupInformation userGroupInformation = id.getUser();
        userGroupInformation.addToken(token);
        LOG.debug("user " + userGroupInformation.getUserName() + " (" + userGroupInformation.getShortUserName()
                + ") authenticated");

        // re-login if necessary
        userGroupInformation.checkTGTAndReloginFromKeytab();
    } catch (IOException e) {
        throw new SecurityException("Failed to verify delegation token " + e, e);
    }
}

From source file:org.apache.hcatalog.mapreduce.Security.java

License:Apache License

void handleSecurity(Credentials credentials, OutputJobInfo outputJobInfo, HiveMetaStoreClient client,
        Configuration conf, boolean harRequested) throws IOException, MetaException, TException, Exception {
    if (UserGroupInformation.isSecurityEnabled()) {
        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
        // check if oozie has set up a hcat deleg. token - if so use it
        TokenSelector<? extends TokenIdentifier> hiveTokenSelector = new DelegationTokenSelector();
        //Oozie does not change the service field of the token
        //hence by default token generation will have a value of "new Text("")"
        //HiveClient will look for a use TokenSelector.selectToken() with service
        //set to empty "Text" if hive.metastore.token.signature property is set to null
        Token<? extends TokenIdentifier> hiveToken = hiveTokenSelector.selectToken(new Text(), ugi.getTokens());
        if (hiveToken == null) {
            // we did not get token set up by oozie, let's get them ourselves here.
            // we essentially get a token per unique Output HCatTableInfo - this is
            // done because through Pig, setOutput() method is called multiple times
            // We want to only get the token once per unique output HCatTableInfo -
            // we cannot just get one token since in multi-query case (> 1 store in 1 job)
            // or the case when a single pig script results in > 1 jobs, the single
            // token will get cancelled by the output committer and the subsequent
            // stores will fail - by tying the token with the concatenation of
            // dbname, tablename and partition keyvalues of the output
            // TableInfo, we can have as many tokens as there are stores and the TokenSelector
            // will correctly pick the right tokens which the committer will use and
            // cancel.
            String tokenSignature = getTokenSignature(outputJobInfo);
            // get delegation tokens from hcat server and store them into the "job"
            // These will be used in to publish partitions to
            // hcat normally in OutputCommitter.commitJob()
            // when the JobTracker in Hadoop MapReduce starts supporting renewal of
            // arbitrary tokens, the renewer should be the principal of the JobTracker
            hiveToken = HCatUtil.extractThriftToken(client.getDelegationToken(ugi.getUserName()),
                    tokenSignature);/*  w  w w.ja  v  a  2s  .c  om*/

            if (harRequested) {
                TokenSelector<? extends TokenIdentifier> jtTokenSelector = new org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenSelector();
                Token jtToken = jtTokenSelector.selectToken(
                        org.apache.hadoop.security.SecurityUtil.buildTokenService(
                                HCatHadoopShims.Instance.get().getResourceManagerAddress(conf)),
                        ugi.getTokens());
                if (jtToken == null) {
                    //we don't need to cancel this token as the TokenRenewer for JT tokens
                    //takes care of cancelling them
                    credentials.addToken(new Text("hcat jt token"),
                            HCatUtil.getJobTrackerDelegationToken(conf, ugi.getUserName()));
                }
            }

            credentials.addToken(new Text(ugi.getUserName() + "_" + tokenSignature), hiveToken);
            // this will be used by the outputcommitter to pass on to the metastore client
            // which in turn will pass on to the TokenSelector so that it can select
            // the right token.
            conf.set(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE, tokenSignature);
        }
    }
}

From source file:org.apache.hcatalog.templeton.SecureProxySupport.java

License:Apache License

private String buildHcatDelegationToken(String user)
        throws IOException, InterruptedException, MetaException, TException {
    HiveConf c = new HiveConf();
    final HiveMetaStoreClient client = new HiveMetaStoreClient(c);
    LOG.info("user: " + user + " loginUser: " + UserGroupInformation.getLoginUser().getUserName());
    final TokenWrapper twrapper = new TokenWrapper();
    final UserGroupInformation ugi = UgiFactory.getUgi(user);
    String s = ugi.doAs(new PrivilegedExceptionAction<String>() {
        public String run() throws IOException, MetaException, TException {
            String u = ugi.getUserName();
            return client.getDelegationToken(u);
        }//from w  ww . jav  a 2  s  . c  om
    });
    return s;
}

From source file:org.apache.hive.hcatalog.mapreduce.Security.java

License:Apache License

void handleSecurity(Credentials credentials, OutputJobInfo outputJobInfo, IMetaStoreClient client,
        Configuration conf, boolean harRequested) throws IOException, MetaException, TException, Exception {
    if (UserGroupInformation.isSecurityEnabled()) {
        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
        // check if oozie has set up a hcat deleg. token - if so use it
        TokenSelector<? extends TokenIdentifier> hiveTokenSelector = new DelegationTokenSelector();
        //Oozie does not change the service field of the token
        //hence by default token generation will have a value of "new Text("")"
        //HiveClient will look for a use TokenSelector.selectToken() with service
        //set to empty "Text" if hive.metastore.token.signature property is set to null
        Token<? extends TokenIdentifier> hiveToken = hiveTokenSelector.selectToken(new Text(), ugi.getTokens());
        if (hiveToken == null) {
            // we did not get token set up by oozie, let's get them ourselves here.
            // we essentially get a token per unique Output HCatTableInfo - this is
            // done because through Pig, setOutput() method is called multiple times
            // We want to only get the token once per unique output HCatTableInfo -
            // we cannot just get one token since in multi-query case (> 1 store in 1 job)
            // or the case when a single pig script results in > 1 jobs, the single
            // token will get cancelled by the output committer and the subsequent
            // stores will fail - by tying the token with the concatenation of
            // dbname, tablename and partition keyvalues of the output
            // TableInfo, we can have as many tokens as there are stores and the TokenSelector
            // will correctly pick the right tokens which the committer will use and
            // cancel.
            String tokenSignature = getTokenSignature(outputJobInfo);
            // get delegation tokens from hcat server and store them into the "job"
            // These will be used in to publish partitions to
            // hcat normally in OutputCommitter.commitJob()
            // when the JobTracker in Hadoop MapReduce starts supporting renewal of
            // arbitrary tokens, the renewer should be the principal of the JobTracker
            hiveToken = HCatUtil.extractThriftToken(
                    client.getDelegationToken(ugi.getUserName(), ugi.getUserName()), tokenSignature);

            if (harRequested) {
                TokenSelector<? extends TokenIdentifier> jtTokenSelector = new org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenSelector();
                Token jtToken = jtTokenSelector.selectToken(
                        org.apache.hadoop.security.SecurityUtil.buildTokenService(
                                ShimLoader.getHadoopShims().getHCatShim().getResourceManagerAddress(conf)),
                        ugi.getTokens());
                if (jtToken == null) {
                    //we don't need to cancel this token as the TokenRenewer for JT tokens
                    //takes care of cancelling them
                    credentials.addToken(new Text("hcat jt token"),
                            HCatUtil.getJobTrackerDelegationToken(conf, ugi.getUserName()));
                }/* w  w w .  j a  v a  2 s . c  o  m*/
            }

            credentials.addToken(new Text(ugi.getUserName() + "_" + tokenSignature), hiveToken);
            // this will be used by the outputcommitter to pass on to the metastore client
            // which in turn will pass on to the TokenSelector so that it can select
            // the right token.
            conf.set(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE, tokenSignature);
        }
    }
}

From source file:org.apache.hive.hcatalog.templeton.SecureProxySupport.java

License:Apache License

private String buildHcatDelegationToken(String user) throws IOException, InterruptedException, TException {
    final HiveConf c = new HiveConf();
    final IMetaStoreClient client = HCatUtil.getHiveMetastoreClient(c);
    LOG.info("user: " + user + " loginUser: " + UserGroupInformation.getLoginUser().getUserName());
    final UserGroupInformation ugi = UgiFactory.getUgi(user);
    String s = ugi.doAs(new PrivilegedExceptionAction<String>() {
        public String run() throws IOException, MetaException, TException {
            String u = ugi.getUserName();
            return client.getDelegationToken(c.getUser(), u);
        }/* w ww .j  av  a  2  s .co m*/
    });
    return s;
}

From source file:org.apache.hive.hcatalog.templeton.tool.TempletonControllerJob.java

License:Apache License

private String buildHcatDelegationToken(String user) throws IOException, InterruptedException, TException {
    final HiveConf c = new HiveConf();
    LOG.debug("Creating hive metastore delegation token for user " + user);
    final UserGroupInformation ugi = UgiFactory.getUgi(user);
    UserGroupInformation real = ugi.getRealUser();
    return real.doAs(new PrivilegedExceptionAction<String>() {
        @Override/* w  ww  .  j  a v  a  2s  . co m*/
        public String run() throws IOException, TException, InterruptedException {
            final IMetaStoreClient client = HCatUtil.getHiveMetastoreClient(c);
            return ugi.doAs(new PrivilegedExceptionAction<String>() {
                @Override
                public String run() throws IOException, TException, InterruptedException {
                    String u = ugi.getUserName();
                    return client.getDelegationToken(c.getUser(), u);
                }
            });
        }
    });
}

From source file:org.apache.hive.service.auth.HiveAuthFactory.java

License:Apache License

public static boolean needUgiLogin(UserGroupInformation ugi, String principal, String keytab) {
    return null == ugi || !ugi.hasKerberosCredentials() || !ugi.getUserName().equals(principal)
            || !Objects.equals(keytab, getKeytabFromUgi());
}

From source file:org.apache.hoya.yarn.appmaster.rpc.RpcBinder.java

License:Apache License

public static HoyaClusterProtocol getProxy(final Configuration conf, ApplicationReport application,
        final int rpcTimeout) throws IOException, HoyaException, InterruptedException {

    String host = application.getHost();
    int port = application.getRpcPort();
    String address = host + ":" + port;
    if (host == null || 0 == port) {
        throw new HoyaException(HoyaExitCodes.EXIT_CONNECTIVITY_PROBLEM,
                "Hoya YARN instance " + application.getName() + " isn't providing a valid address for the"
                        + " Hoya RPC protocol: " + address);
    }//w w  w .j  a  v a 2 s .c  o m

    UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
    final UserGroupInformation newUgi = UserGroupInformation.createRemoteUser(currentUser.getUserName());
    final InetSocketAddress serviceAddr = NetUtils.createSocketAddrForHost(application.getHost(),
            application.getRpcPort());
    HoyaClusterProtocol realProxy;

    log.debug("Connecting to {}", serviceAddr);
    if (UserGroupInformation.isSecurityEnabled()) {
        org.apache.hadoop.yarn.api.records.Token clientToAMToken = application.getClientToAMToken();
        Token<ClientToAMTokenIdentifier> token = ConverterUtils.convertFromYarn(clientToAMToken, serviceAddr);
        newUgi.addToken(token);
        realProxy = newUgi.doAs(new PrivilegedExceptionAction<HoyaClusterProtocol>() {
            @Override
            public HoyaClusterProtocol run() throws IOException {
                return connectToServer(serviceAddr, newUgi, conf, rpcTimeout);
            }
        });
    } else {
        return connectToServer(serviceAddr, newUgi, conf, rpcTimeout);
    }
    return realProxy;
}