List of usage examples for org.apache.hadoop.security UserGroupInformation getCurrentUser
@InterfaceAudience.Public @InterfaceStability.Evolving public static UserGroupInformation getCurrentUser() throws IOException
From source file:org.apache.accumulo.core.cli.MapReduceClientOpts.java
License:Apache License
@Override public AuthenticationToken getToken() { AuthenticationToken authToken = super.getToken(); // For MapReduce, Kerberos credentials don't make it to the Mappers and Reducers, // so we need to request a delegation token and use that instead. if (authToken instanceof KerberosToken) { log.info("Received KerberosToken, fetching DelegationToken for MapReduce"); final KerberosToken krbToken = (KerberosToken) authToken; try {/* www .j a v a 2 s. c om*/ UserGroupInformation user = UserGroupInformation.getCurrentUser(); if (!user.hasKerberosCredentials()) { throw new IllegalStateException("Expected current user to have Kerberos credentials"); } String newPrincipal = user.getUserName(); log.info("Obtaining delegation token for {}", newPrincipal); setPrincipal(newPrincipal); Connector conn = getInstance().getConnector(newPrincipal, krbToken); // Do the explicit check to see if the user has the permission to get a delegation token if (!conn.securityOperations().hasSystemPermission(conn.whoami(), SystemPermission.OBTAIN_DELEGATION_TOKEN)) { log.error( "{} doesn't have the {} SystemPermission neccesary to obtain a delegation token. MapReduce tasks cannot automatically use the client's" + " credentials on remote servers. Delegation tokens provide a means to run MapReduce without distributing the user's credentials.", user.getUserName(), SystemPermission.OBTAIN_DELEGATION_TOKEN.name()); throw new IllegalStateException( conn.whoami() + " does not have permission to obtain a delegation token"); } // Get the delegation token from Accumulo return conn.securityOperations().getDelegationToken(new DelegationTokenConfig()); } catch (Exception e) { final String msg = "Failed to acquire DelegationToken for use with MapReduce"; log.error(msg, e); throw new RuntimeException(msg, e); } } return authToken; }
From source file:org.apache.accumulo.core.client.mapreduce.lib.impl.MapReduceClientOpts.java
License:Apache License
@Override public AuthenticationToken getToken() { AuthenticationToken authToken = super.getToken(); // For MapReduce, Kerberos credentials don't make it to the Mappers and Reducers, // so we need to request a delegation token and use that instead. if (authToken instanceof KerberosToken) { log.info("Received KerberosToken, fetching DelegationToken for MapReduce"); final KerberosToken krbToken = (KerberosToken) authToken; try {/* w w w . j av a2 s . c o m*/ UserGroupInformation user = UserGroupInformation.getCurrentUser(); if (!user.hasKerberosCredentials()) { throw new IllegalStateException("Expected current user to have Kerberos credentials"); } String newPrincipal = user.getUserName(); log.info("Obtaining delegation token for {}", newPrincipal); setPrincipal(newPrincipal); Connector conn = Connector.builder().usingClientInfo(getClientInfo()) .usingToken(newPrincipal, krbToken).build(); // Do the explicit check to see if the user has the permission to get a delegation token if (!conn.securityOperations().hasSystemPermission(conn.whoami(), SystemPermission.OBTAIN_DELEGATION_TOKEN)) { log.error( "{} doesn't have the {} SystemPermission neccesary to obtain a delegation" + " token. MapReduce tasks cannot automatically use the client's" + " credentials on remote servers. Delegation tokens provide a means to run" + " MapReduce without distributing the user's credentials.", user.getUserName(), SystemPermission.OBTAIN_DELEGATION_TOKEN.name()); throw new IllegalStateException( conn.whoami() + " does not have permission to obtain a delegation token"); } // Get the delegation token from Accumulo return conn.securityOperations().getDelegationToken(new DelegationTokenConfig()); } catch (Exception e) { final String msg = "Failed to acquire DelegationToken for use with MapReduce"; log.error(msg, e); throw new RuntimeException(msg, e); } } return authToken; }
From source file:org.apache.accumulo.core.client.security.tokens.KerberosToken.java
License:Apache License
/** * Creates a token using the provided principal and the currently logged-in user via {@link UserGroupInformation}. * * @param principal//from ww w .j av a 2s . co m * The user that is logged in */ public KerberosToken(String principal) throws IOException { requireNonNull(principal); final UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); checkArgument(ugi.hasKerberosCredentials(), "Subject is not logged in via Kerberos"); checkArgument(principal.equals(ugi.getUserName()), "Provided principal does not match currently logged-in user"); this.principal = ugi.getUserName(); }
From source file:org.apache.accumulo.core.client.security.tokens.KerberosToken.java
License:Apache License
/** * Creates a token and logs in via {@link UserGroupInformation} using the provided principal and keytab. A key for the principal must exist in the keytab, * otherwise login will fail./*from w ww .j a va 2 s . c o m*/ * * @param principal * The Kerberos principal * @param keytab * A keytab file * @param replaceCurrentUser * Should the current Hadoop user be replaced with this user * @deprecated since 1.8.0, @see #KerberosToken(String, File) */ @Deprecated public KerberosToken(String principal, File keytab, boolean replaceCurrentUser) throws IOException { requireNonNull(principal, "Principal was null"); requireNonNull(keytab, "Keytab was null"); checkArgument(keytab.exists() && keytab.isFile(), "Keytab was not a normal file"); UserGroupInformation ugi; if (replaceCurrentUser) { UserGroupInformation.loginUserFromKeytab(principal, keytab.getAbsolutePath()); ugi = UserGroupInformation.getCurrentUser(); } else { ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytab.getAbsolutePath()); } this.principal = ugi.getUserName(); this.keytab = keytab; }
From source file:org.apache.accumulo.core.client.security.tokens.KerberosToken.java
License:Apache License
/** * Creates a token using the login user as returned by {@link UserGroupInformation#getCurrentUser()} * * @throws IOException//from w ww . j a v a2 s .c o m * If the current logged in user cannot be computed. */ public KerberosToken() throws IOException { this(UserGroupInformation.getCurrentUser().getUserName()); }
From source file:org.apache.accumulo.core.clientImpl.mapreduce.lib.MapReduceClientOpts.java
License:Apache License
@Override public AuthenticationToken getToken() { AuthenticationToken authToken = super.getToken(); // For MapReduce, Kerberos credentials don't make it to the Mappers and Reducers, // so we need to request a delegation token and use that instead. if (authToken instanceof KerberosToken) { log.info("Received KerberosToken, fetching DelegationToken for MapReduce"); final KerberosToken krbToken = (KerberosToken) authToken; try {/*from w ww . j a v a 2 s. c om*/ UserGroupInformation user = UserGroupInformation.getCurrentUser(); if (!user.hasKerberosCredentials()) { throw new IllegalStateException("Expected current user to have Kerberos credentials"); } String newPrincipal = user.getUserName(); log.info("Obtaining delegation token for {}", newPrincipal); setPrincipal(newPrincipal); try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()) .as(newPrincipal, krbToken).build()) { // Do the explicit check to see if the user has the permission to get a delegation token if (!client.securityOperations().hasSystemPermission(client.whoami(), SystemPermission.OBTAIN_DELEGATION_TOKEN)) { log.error( "{} doesn't have the {} SystemPermission neccesary to obtain a delegation" + " token. MapReduce tasks cannot automatically use the client's" + " credentials on remote servers. Delegation tokens provide a means to run" + " MapReduce without distributing the user's credentials.", user.getUserName(), SystemPermission.OBTAIN_DELEGATION_TOKEN.name()); throw new IllegalStateException( client.whoami() + " does not have permission to obtain a delegation token"); } // Get the delegation token from Accumulo return client.securityOperations().getDelegationToken(new DelegationTokenConfig()); } } catch (Exception e) { final String msg = "Failed to acquire DelegationToken for use with MapReduce"; log.error(msg, e); throw new RuntimeException(msg, e); } } return authToken; }
From source file:org.apache.accumulo.core.rpc.SaslConnectionParams.java
License:Apache License
protected void updatePrincipalFromUgi() { // Ensure we're using Kerberos auth for Hadoop UGI if (!UserGroupInformation.isSecurityEnabled()) { throw new RuntimeException("Cannot use SASL if Hadoop security is not enabled"); }/*w w w. j av a 2 s . c o m*/ // Get the current user UserGroupInformation currentUser; try { currentUser = UserGroupInformation.getCurrentUser(); } catch (IOException e) { throw new RuntimeException("Failed to get current user", e); } // The full name is our principal this.principal = currentUser.getUserName(); if (null == this.principal) { throw new RuntimeException("Got null username from " + currentUser); } }
From source file:org.apache.accumulo.core.rpc.ThriftUtil.java
License:Apache License
/** * Create a TTransport for clients to the given address with the provided socket timeout and session-layer configuration * * @param address/*from w ww . j a v a 2s . com*/ * Server address to connect to * @param timeout * Client socket timeout * @param sslParams * RPC options for SSL servers * @param saslParams * RPC options for SASL servers * @return An open TTransport which must be closed when finished */ public static TTransport createClientTransport(HostAndPort address, int timeout, SslConnectionParams sslParams, SaslConnectionParams saslParams) throws TTransportException { boolean success = false; TTransport transport = null; try { if (sslParams != null) { // The check in AccumuloServerContext ensures that servers are brought up with sane configurations, but we also want to validate clients if (null != saslParams) { throw new IllegalStateException("Cannot use both SSL and SASL"); } log.trace("Creating SSL client transport"); // TSSLTransportFactory handles timeout 0 -> forever natively if (sslParams.useJsse()) { transport = TSSLTransportFactory.getClientSocket(address.getHostText(), address.getPort(), timeout); } else { // JDK6's factory doesn't appear to pass the protocol onto the Socket properly so we have // to do some magic to make sure that happens. Not an issue in JDK7 // Taken from thrift-0.9.1 to make the SSLContext SSLContext sslContext = createSSLContext(sslParams); // Create the factory from it SSLSocketFactory sslSockFactory = sslContext.getSocketFactory(); // Wrap the real factory with our own that will set the protocol on the Socket before returning it ProtocolOverridingSSLSocketFactory wrappingSslSockFactory = new ProtocolOverridingSSLSocketFactory( sslSockFactory, new String[] { sslParams.getClientProtocol() }); // Create the TSocket from that transport = createClient(wrappingSslSockFactory, address.getHostText(), address.getPort(), timeout); // TSSLTransportFactory leaves transports open, so no need to open here } transport = ThriftUtil.transportFactory().getTransport(transport); } else if (null != saslParams) { if (!UserGroupInformation.isSecurityEnabled()) { throw new IllegalStateException("Expected Kerberos security to be enabled if SASL is in use"); } log.trace("Creating SASL connection to {}:{}", address.getHostText(), address.getPort()); // Make sure a timeout is set try { transport = TTimeoutTransport.create(address, timeout); } catch (IOException e) { log.warn("Failed to open transport to {}", address); throw new TTransportException(e); } try { // Log in via UGI, ensures we have logged in with our KRB credentials final UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); // Is this pricey enough that we want to cache it? final String hostname = InetAddress.getByName(address.getHostText()).getCanonicalHostName(); final SaslMechanism mechanism = saslParams.getMechanism(); log.trace("Opening transport to server as {} to {}/{} using {}", currentUser, saslParams.getKerberosServerPrimary(), hostname, mechanism); // Create the client SASL transport using the information for the server // Despite the 'protocol' argument seeming to be useless, it *must* be the primary of the server being connected to transport = new TSaslClientTransport(mechanism.getMechanismName(), null, saslParams.getKerberosServerPrimary(), hostname, saslParams.getSaslProperties(), saslParams.getCallbackHandler(), transport); // Wrap it all in a processor which will run with a doAs the current user transport = new UGIAssumingTransport(transport, currentUser); // Open the transport transport.open(); } catch (TTransportException e) { log.warn("Failed to open SASL transport", e); // We might have had a valid ticket, but it expired. We'll let the caller retry, but we will attempt to re-login to make the next attempt work. // Sadly, we have no way to determine the actual reason we got this TTransportException other than inspecting the exception msg. log.debug( "Caught TTransportException opening SASL transport, checking if re-login is necessary before propagating the exception."); attemptClientReLogin(); throw e; } catch (IOException e) { log.warn("Failed to open SASL transport", e); throw new TTransportException(e); } } else { log.trace("Opening normal transport"); if (timeout == 0) { transport = new TSocket(address.getHostText(), address.getPort()); transport.open(); } else { try { transport = TTimeoutTransport.create(address, timeout); } catch (IOException ex) { log.warn("Failed to open transport to " + address); throw new TTransportException(ex); } // Open the transport transport.open(); } transport = ThriftUtil.transportFactory().getTransport(transport); } success = true; } finally { if (!success && transport != null) { transport.close(); } } return transport; }
From source file:org.apache.accumulo.core.rpc.ThriftUtil.java
License:Apache License
/** * Some wonderful snippets of documentation from HBase on performing the re-login client-side (as well as server-side) in the following paragraph. We want to * attempt a re-login to automatically refresh the client's Krb "credentials" (remember, a server might also be a client, master sending RPC to tserver), but * we have to take care to avoid Kerberos' replay attack protection. * <p>/*ww w .j av a2 s . com*/ * If multiple clients with the same principal try to connect to the same server at the same time, the server assumes a replay attack is in progress. This is * a feature of kerberos. In order to work around this, what is done is that the client backs off randomly and tries to initiate the connection again. The * other problem is to do with ticket expiry. To handle that, a relogin is attempted. */ static void attemptClientReLogin() { try { UserGroupInformation loginUser = UserGroupInformation.getLoginUser(); if (null == loginUser || !loginUser.hasKerberosCredentials()) { // We should have already checked that we're logged in and have credentials. A precondition-like check. throw new RuntimeException("Expected to find Kerberos UGI credentials, but did not"); } UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); // A Proxy user is the "effective user" (in name only), riding on top of the "real user"'s Krb credentials. UserGroupInformation realUser = currentUser.getRealUser(); // re-login only in case it is the login user or superuser. if (loginUser.equals(currentUser) || loginUser.equals(realUser)) { if (UserGroupInformation.isLoginKeytabBased()) { log.info("Performing keytab-based Kerberos re-login"); loginUser.reloginFromKeytab(); } else { log.info("Performing ticket-cache-based Kerberos re-login"); loginUser.reloginFromTicketCache(); } // Avoid the replay attack protection, sleep 1 to 5000ms try { Thread.sleep((SASL_BACKOFF_RAND.nextInt(RELOGIN_MAX_BACKOFF) + 1)); } catch (InterruptedException e) { Thread.currentThread().interrupt(); return; } } else { log.debug("Not attempting Kerberos re-login: loginUser={}, currentUser={}, realUser={}", loginUser, currentUser, realUser); } } catch (IOException e) { // The inability to check is worrisome and deserves a RuntimeException instead of a propagated IO-like Exception. log.warn("Failed to check (and/or perform) Kerberos client re-login", e); throw new RuntimeException(e); } }
From source file:org.apache.accumulo.examples.cli.MapReduceClientOpts.java
License:Apache License
@Override public AuthenticationToken getToken() { AuthenticationToken authToken = super.getToken(); // For MapReduce, Kerberos credentials don't make it to the Mappers and Reducers, // so we need to request a delegation token and use that instead. if (authToken instanceof KerberosToken) { log.info("Received KerberosToken, fetching DelegationToken for MapReduce"); try {/*from w w w .ja v a 2 s.com*/ UserGroupInformation user = UserGroupInformation.getCurrentUser(); if (!user.hasKerberosCredentials()) { throw new IllegalStateException("Expected current user to have Kerberos credentials"); } String newPrincipal = user.getUserName(); log.info("Obtaining delegation token for {}", newPrincipal); Connector conn = getConnector(); // Do the explicit check to see if the user has the permission to get a delegation token if (!conn.securityOperations().hasSystemPermission(conn.whoami(), SystemPermission.OBTAIN_DELEGATION_TOKEN)) { log.error( "{} doesn't have the {} SystemPermission neccesary to obtain a delegation token. MapReduce tasks cannot automatically use the client's" + " credentials on remote servers. Delegation tokens provide a means to run MapReduce without distributing the user's credentials.", user.getUserName(), SystemPermission.OBTAIN_DELEGATION_TOKEN.name()); throw new IllegalStateException( conn.whoami() + " does not have permission to obtain a delegation token"); } // Get the delegation token from Accumulo return conn.securityOperations().getDelegationToken(new DelegationTokenConfig()); } catch (Exception e) { final String msg = "Failed to acquire DelegationToken for use with MapReduce"; log.error(msg, e); throw new RuntimeException(msg, e); } } return authToken; }