Example usage for org.apache.hadoop.security UserGroupInformation getUserName

List of usage examples for org.apache.hadoop.security UserGroupInformation getUserName

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation getUserName.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public String getUserName() 

Source Link

Document

Get the user's full principal name.

Usage

From source file:org.apache.accumulo.core.cli.MapReduceClientOpts.java

License:Apache License

@Override
public AuthenticationToken getToken() {
    AuthenticationToken authToken = super.getToken();
    // For MapReduce, Kerberos credentials don't make it to the Mappers and Reducers,
    // so we need to request a delegation token and use that instead.
    if (authToken instanceof KerberosToken) {
        log.info("Received KerberosToken, fetching DelegationToken for MapReduce");
        final KerberosToken krbToken = (KerberosToken) authToken;

        try {//from  www.ja  va2s.c o  m
            UserGroupInformation user = UserGroupInformation.getCurrentUser();
            if (!user.hasKerberosCredentials()) {
                throw new IllegalStateException("Expected current user to have Kerberos credentials");
            }

            String newPrincipal = user.getUserName();
            log.info("Obtaining delegation token for {}", newPrincipal);

            setPrincipal(newPrincipal);
            Connector conn = getInstance().getConnector(newPrincipal, krbToken);

            // Do the explicit check to see if the user has the permission to get a delegation token
            if (!conn.securityOperations().hasSystemPermission(conn.whoami(),
                    SystemPermission.OBTAIN_DELEGATION_TOKEN)) {
                log.error(
                        "{} doesn't have the {} SystemPermission neccesary to obtain a delegation token. MapReduce tasks cannot automatically use the client's"
                                + " credentials on remote servers. Delegation tokens provide a means to run MapReduce without distributing the user's credentials.",
                        user.getUserName(), SystemPermission.OBTAIN_DELEGATION_TOKEN.name());
                throw new IllegalStateException(
                        conn.whoami() + " does not have permission to obtain a delegation token");
            }

            // Get the delegation token from Accumulo
            return conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
        } catch (Exception e) {
            final String msg = "Failed to acquire DelegationToken for use with MapReduce";
            log.error(msg, e);
            throw new RuntimeException(msg, e);
        }
    }
    return authToken;
}

From source file:org.apache.accumulo.core.client.mapreduce.lib.impl.MapReduceClientOpts.java

License:Apache License

@Override
public AuthenticationToken getToken() {
    AuthenticationToken authToken = super.getToken();
    // For MapReduce, Kerberos credentials don't make it to the Mappers and Reducers,
    // so we need to request a delegation token and use that instead.
    if (authToken instanceof KerberosToken) {
        log.info("Received KerberosToken, fetching DelegationToken for MapReduce");
        final KerberosToken krbToken = (KerberosToken) authToken;

        try {/*  w w w .  j av a  2  s .  c o  m*/
            UserGroupInformation user = UserGroupInformation.getCurrentUser();
            if (!user.hasKerberosCredentials()) {
                throw new IllegalStateException("Expected current user to have Kerberos credentials");
            }

            String newPrincipal = user.getUserName();
            log.info("Obtaining delegation token for {}", newPrincipal);

            setPrincipal(newPrincipal);
            Connector conn = Connector.builder().usingClientInfo(getClientInfo())
                    .usingToken(newPrincipal, krbToken).build();

            // Do the explicit check to see if the user has the permission to get a delegation token
            if (!conn.securityOperations().hasSystemPermission(conn.whoami(),
                    SystemPermission.OBTAIN_DELEGATION_TOKEN)) {
                log.error(
                        "{} doesn't have the {} SystemPermission neccesary to obtain a delegation"
                                + " token. MapReduce tasks cannot automatically use the client's"
                                + " credentials on remote servers. Delegation tokens provide a means to run"
                                + " MapReduce without distributing the user's credentials.",
                        user.getUserName(), SystemPermission.OBTAIN_DELEGATION_TOKEN.name());
                throw new IllegalStateException(
                        conn.whoami() + " does not have permission to obtain a delegation token");
            }

            // Get the delegation token from Accumulo
            return conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
        } catch (Exception e) {
            final String msg = "Failed to acquire DelegationToken for use with MapReduce";
            log.error(msg, e);
            throw new RuntimeException(msg, e);
        }
    }
    return authToken;
}

From source file:org.apache.accumulo.core.client.security.tokens.KerberosToken.java

License:Apache License

/**
 * Creates a token using the provided principal and the currently logged-in user via {@link UserGroupInformation}.
 *
 * @param principal/* w  ww.j ava2s . c  o m*/
 *          The user that is logged in
 */
public KerberosToken(String principal) throws IOException {
    requireNonNull(principal);
    final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    checkArgument(ugi.hasKerberosCredentials(), "Subject is not logged in via Kerberos");
    checkArgument(principal.equals(ugi.getUserName()),
            "Provided principal does not match currently logged-in user");
    this.principal = ugi.getUserName();
}

From source file:org.apache.accumulo.core.client.security.tokens.KerberosToken.java

License:Apache License

/**
 * Creates a token and logs in via {@link UserGroupInformation} using the provided principal and keytab. A key for the principal must exist in the keytab,
 * otherwise login will fail./*from  w w  w.j av  a 2  s  .c  o  m*/
 *
 * @param principal
 *          The Kerberos principal
 * @param keytab
 *          A keytab file
 * @param replaceCurrentUser
 *          Should the current Hadoop user be replaced with this user
 * @deprecated since 1.8.0, @see #KerberosToken(String, File)
 */
@Deprecated
public KerberosToken(String principal, File keytab, boolean replaceCurrentUser) throws IOException {
    requireNonNull(principal, "Principal was null");
    requireNonNull(keytab, "Keytab was null");
    checkArgument(keytab.exists() && keytab.isFile(), "Keytab was not a normal file");
    UserGroupInformation ugi;
    if (replaceCurrentUser) {
        UserGroupInformation.loginUserFromKeytab(principal, keytab.getAbsolutePath());
        ugi = UserGroupInformation.getCurrentUser();
    } else {
        ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytab.getAbsolutePath());
    }
    this.principal = ugi.getUserName();
    this.keytab = keytab;
}

From source file:org.apache.accumulo.core.clientImpl.mapreduce.lib.MapReduceClientOpts.java

License:Apache License

@Override
public AuthenticationToken getToken() {
    AuthenticationToken authToken = super.getToken();
    // For MapReduce, Kerberos credentials don't make it to the Mappers and Reducers,
    // so we need to request a delegation token and use that instead.
    if (authToken instanceof KerberosToken) {
        log.info("Received KerberosToken, fetching DelegationToken for MapReduce");
        final KerberosToken krbToken = (KerberosToken) authToken;

        try {/*from  w w w  .ja  v a 2s .c  o  m*/
            UserGroupInformation user = UserGroupInformation.getCurrentUser();
            if (!user.hasKerberosCredentials()) {
                throw new IllegalStateException("Expected current user to have Kerberos credentials");
            }

            String newPrincipal = user.getUserName();
            log.info("Obtaining delegation token for {}", newPrincipal);

            setPrincipal(newPrincipal);
            try (AccumuloClient client = Accumulo.newClient().from(getClientProperties())
                    .as(newPrincipal, krbToken).build()) {

                // Do the explicit check to see if the user has the permission to get a delegation token
                if (!client.securityOperations().hasSystemPermission(client.whoami(),
                        SystemPermission.OBTAIN_DELEGATION_TOKEN)) {
                    log.error(
                            "{} doesn't have the {} SystemPermission neccesary to obtain a delegation"
                                    + " token. MapReduce tasks cannot automatically use the client's"
                                    + " credentials on remote servers. Delegation tokens provide a means to run"
                                    + " MapReduce without distributing the user's credentials.",
                            user.getUserName(), SystemPermission.OBTAIN_DELEGATION_TOKEN.name());
                    throw new IllegalStateException(
                            client.whoami() + " does not have permission to obtain a delegation token");
                }
                // Get the delegation token from Accumulo
                return client.securityOperations().getDelegationToken(new DelegationTokenConfig());
            }
        } catch (Exception e) {
            final String msg = "Failed to acquire DelegationToken for use with MapReduce";
            log.error(msg, e);
            throw new RuntimeException(msg, e);
        }
    }
    return authToken;
}

From source file:org.apache.accumulo.core.rpc.SaslConnectionParams.java

License:Apache License

protected void updatePrincipalFromUgi() {
    // Ensure we're using Kerberos auth for Hadoop UGI
    if (!UserGroupInformation.isSecurityEnabled()) {
        throw new RuntimeException("Cannot use SASL if Hadoop security is not enabled");
    }/*from  w  ww  . j a v a  2  s  . c  o  m*/

    // Get the current user
    UserGroupInformation currentUser;
    try {
        currentUser = UserGroupInformation.getCurrentUser();
    } catch (IOException e) {
        throw new RuntimeException("Failed to get current user", e);
    }

    // The full name is our principal
    this.principal = currentUser.getUserName();
    if (null == this.principal) {
        throw new RuntimeException("Got null username from " + currentUser);
    }

}

From source file:org.apache.accumulo.examples.cli.MapReduceClientOpts.java

License:Apache License

@Override
public AuthenticationToken getToken() {
    AuthenticationToken authToken = super.getToken();
    // For MapReduce, Kerberos credentials don't make it to the Mappers and Reducers,
    // so we need to request a delegation token and use that instead.
    if (authToken instanceof KerberosToken) {
        log.info("Received KerberosToken, fetching DelegationToken for MapReduce");

        try {//from  www  .  j a v  a  2  s. c  o  m
            UserGroupInformation user = UserGroupInformation.getCurrentUser();
            if (!user.hasKerberosCredentials()) {
                throw new IllegalStateException("Expected current user to have Kerberos credentials");
            }

            String newPrincipal = user.getUserName();
            log.info("Obtaining delegation token for {}", newPrincipal);

            Connector conn = getConnector();

            // Do the explicit check to see if the user has the permission to get a delegation token
            if (!conn.securityOperations().hasSystemPermission(conn.whoami(),
                    SystemPermission.OBTAIN_DELEGATION_TOKEN)) {
                log.error(
                        "{} doesn't have the {} SystemPermission neccesary to obtain a delegation token. MapReduce tasks cannot automatically use the client's"
                                + " credentials on remote servers. Delegation tokens provide a means to run MapReduce without distributing the user's credentials.",
                        user.getUserName(), SystemPermission.OBTAIN_DELEGATION_TOKEN.name());
                throw new IllegalStateException(
                        conn.whoami() + " does not have permission to obtain a delegation token");
            }

            // Get the delegation token from Accumulo
            return conn.securityOperations().getDelegationToken(new DelegationTokenConfig());
        } catch (Exception e) {
            final String msg = "Failed to acquire DelegationToken for use with MapReduce";
            log.error(msg, e);
            throw new RuntimeException(msg, e);
        }
    }
    return authToken;
}

From source file:org.apache.accumulo.hadoopImpl.mapreduce.lib.MapReduceClientOpts.java

License:Apache License

public Properties getClientProps() {
    Properties props = super.getClientProps();
    // For MapReduce, Kerberos credentials don't make it to the Mappers and Reducers,
    // so we need to request a delegation token and use that instead.
    AuthenticationToken authToken = ClientProperty.getAuthenticationToken(props);
    if (authToken instanceof KerberosToken) {
        log.info("Received KerberosToken, fetching DelegationToken for MapReduce");
        final KerberosToken krbToken = (KerberosToken) authToken;

        try {//from   w w  w  . j av a2  s  .  c om
            UserGroupInformation user = UserGroupInformation.getCurrentUser();
            if (!user.hasKerberosCredentials()) {
                throw new IllegalStateException("Expected current user to have Kerberos credentials");
            }

            String newPrincipal = user.getUserName();
            log.info("Obtaining delegation token for {}", newPrincipal);

            try (AccumuloClient client = Accumulo.newClient().from(props).as(newPrincipal, krbToken).build()) {

                // Do the explicit check to see if the user has the permission to get a delegation token
                if (!client.securityOperations().hasSystemPermission(client.whoami(),
                        SystemPermission.OBTAIN_DELEGATION_TOKEN)) {
                    log.error(
                            "{} doesn't have the {} SystemPermission neccesary to obtain a delegation"
                                    + " token. MapReduce tasks cannot automatically use the client's"
                                    + " credentials on remote servers. Delegation tokens provide a means to run"
                                    + " MapReduce without distributing the user's credentials.",
                            user.getUserName(), SystemPermission.OBTAIN_DELEGATION_TOKEN.name());
                    throw new IllegalStateException(
                            client.whoami() + " does not have permission to obtain a delegation token");
                }

                // Get the delegation token from Accumulo
                AuthenticationToken token = client.securityOperations()
                        .getDelegationToken(new DelegationTokenConfig());

                props.setProperty(ClientProperty.AUTH_PRINCIPAL.getKey(), newPrincipal);
                ClientProperty.setAuthenticationToken(props, token);
            }
        } catch (IOException | AccumuloException | AccumuloSecurityException e) {
            final String msg = "Failed to acquire DelegationToken for use with MapReduce";
            log.error(msg, e);
            throw new RuntimeException(msg, e);
        }
    }
    return props;
}

From source file:org.apache.accumulo.proxy.Proxy.java

License:Apache License

public static ServerAddress createProxyServer(HostAndPort address, TProtocolFactory protocolFactory,
        Properties properties, ClientConfiguration clientConf) throws Exception {
    final int numThreads = Integer
            .parseInt(properties.getProperty(THRIFT_THREAD_POOL_SIZE_KEY, THRIFT_THREAD_POOL_SIZE_DEFAULT));
    final long maxFrameSize = AccumuloConfiguration
            .getMemoryInBytes(properties.getProperty(THRIFT_MAX_FRAME_SIZE_KEY, THRIFT_MAX_FRAME_SIZE_DEFAULT));
    final int simpleTimerThreadpoolSize = Integer
            .parseInt(Property.GENERAL_SIMPLETIMER_THREADPOOL_SIZE.getDefaultValue());
    // How frequently to try to resize the thread pool
    final long threadpoolResizeInterval = 1000l * 5;
    // No timeout
    final long serverSocketTimeout = 0l;
    // Use the new hadoop metrics2 support
    final MetricsFactory metricsFactory = new MetricsFactory(false);
    final String serverName = "Proxy", threadName = "Accumulo Thrift Proxy";

    // create the implementation of the proxy interface
    ProxyServer impl = new ProxyServer(properties);

    // Wrap the implementation -- translate some exceptions
    AccumuloProxy.Iface wrappedImpl = RpcWrapper.service(impl,
            new AccumuloProxy.Processor<AccumuloProxy.Iface>(impl));

    // Create the processor from the implementation
    TProcessor processor = new AccumuloProxy.Processor<>(wrappedImpl);

    // Get the type of thrift server to instantiate
    final String serverTypeStr = properties.getProperty(THRIFT_SERVER_TYPE, THRIFT_SERVER_TYPE_DEFAULT);
    ThriftServerType serverType = DEFAULT_SERVER_TYPE;
    if (!THRIFT_SERVER_TYPE_DEFAULT.equals(serverTypeStr)) {
        serverType = ThriftServerType.get(serverTypeStr);
    }// w  ww  . ja v a2s  . co  m

    SslConnectionParams sslParams = null;
    SaslServerConnectionParams saslParams = null;
    switch (serverType) {
    case SSL:
        sslParams = SslConnectionParams.forClient(ClientContext.convertClientConfig(clientConf));
        break;
    case SASL:
        if (!clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
            // ACCUMULO-3651 Changed level to error and added FATAL to message for slf4j capability
            log.error("FATAL: SASL thrift server was requested but it is disabled in client configuration");
            throw new RuntimeException("SASL is not enabled in configuration");
        }

        // Kerberos needs to be enabled to use it
        if (!UserGroupInformation.isSecurityEnabled()) {
            // ACCUMULO-3651 Changed level to error and added FATAL to message for slf4j capability
            log.error("FATAL: Hadoop security is not enabled");
            throw new RuntimeException();
        }

        // Login via principal and keytab
        final String kerberosPrincipal = properties.getProperty(KERBEROS_PRINCIPAL, ""),
                kerberosKeytab = properties.getProperty(KERBEROS_KEYTAB, "");
        if (StringUtils.isBlank(kerberosPrincipal) || StringUtils.isBlank(kerberosKeytab)) {
            // ACCUMULO-3651 Changed level to error and added FATAL to message for slf4j capability
            log.error("FATAL: Kerberos principal and keytab must be provided");
            throw new RuntimeException();
        }
        UserGroupInformation.loginUserFromKeytab(kerberosPrincipal, kerberosKeytab);
        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
        log.info("Logged in as " + ugi.getUserName());

        // The kerberosPrimary set in the SASL server needs to match the principal we're logged in as.
        final String shortName = ugi.getShortUserName();
        log.info("Setting server primary to {}", shortName);
        clientConf.setProperty(ClientProperty.KERBEROS_SERVER_PRIMARY, shortName);

        KerberosToken token = new KerberosToken();
        saslParams = new SaslServerConnectionParams(clientConf, token, null);

        processor = new UGIAssumingProcessor(processor);

        break;
    default:
        // nothing to do -- no extra configuration necessary
        break;
    }

    // Hook up support for tracing for thrift calls
    TimedProcessor timedProcessor = new TimedProcessor(metricsFactory, processor, serverName, threadName);

    // Create the thrift server with our processor and properties
    ServerAddress serverAddr = TServerUtils.startTServer(serverType, timedProcessor, protocolFactory,
            serverName, threadName, numThreads, simpleTimerThreadpoolSize, threadpoolResizeInterval,
            maxFrameSize, sslParams, saslParams, serverSocketTimeout, address);

    return serverAddr;
}

From source file:org.apache.accumulo.server.AccumuloServerContext.java

License:Apache License

/**
 * A "client-side" assertion for servers to validate that they are logged in as the expected user, per the configuration, before performing any RPC
 *//*from w w w. j  a v a2 s. co m*/
// Should be private, but package-protected so EasyMock will work
void enforceKerberosLogin() {
    final AccumuloConfiguration conf = confFactory.getSiteConfiguration();
    // Unwrap _HOST into the FQDN to make the kerberos principal we'll compare against
    final String kerberosPrincipal = SecurityUtil
            .getServerPrincipal(conf.get(Property.GENERAL_KERBEROS_PRINCIPAL));
    UserGroupInformation loginUser;
    try {
        // The system user should be logged in via keytab when the process is started, not the currentUser() like KerberosToken
        loginUser = UserGroupInformation.getLoginUser();
    } catch (IOException e) {
        throw new RuntimeException("Could not get login user", e);
    }

    checkArgument(loginUser.hasKerberosCredentials(), "Server does not have Kerberos credentials");
    checkArgument(kerberosPrincipal.equals(loginUser.getUserName()),
            "Expected login user to be " + kerberosPrincipal + " but was " + loginUser.getUserName());
}