List of usage examples for org.apache.hadoop.security UserGroupInformation getUserName
@InterfaceAudience.Public
@InterfaceStability.Evolving
public String getUserName()
From source file:org.apache.lens.server.auth.DelegationTokenAuthenticationFilter.java
License:Apache License
@Override public void filter(ContainerRequestContext requestContext) throws IOException { Principal userPrincipal = requestContext.getSecurityContext().getUserPrincipal(); if (userPrincipal != null) { log.info("Authentication already done for principal {}, skipping this filter...", userPrincipal.getName()); return;//from ww w. j ava 2 s . co m } // only authenticate when @Authenticate is present on resource if (resourceInfo.getResourceClass() == null || resourceInfo.getResourceMethod() == null) { return; } if (!(resourceInfo.getResourceClass().isAnnotationPresent(Authenticate.class) || resourceInfo.getResourceMethod().isAnnotationPresent(Authenticate.class))) { return; } String delegationToken = requestContext.getHeaderString(HDFS_DELEGATION_TKN_HEADER); if (StringUtils.isBlank(delegationToken)) { return; } Token<AbstractDelegationTokenIdentifier> dt = new Token(); dt.decodeFromUrlString(delegationToken); UserGroupInformation user = dt.decodeIdentifier().getUser(); user.addToken(dt); log.info("Received delegation token for user: {}", user.getUserName()); try { user.doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws IOException { try (FileSystem fs = FileSystem.get(new Configuration())) { fs.exists(PATH_TO_CHECK); // dummy hdfs call requestContext.setSecurityContext(createSecurityContext(user.getUserName(), AUTH_SCHEME)); return null; } } }); } catch (InterruptedException | IOException e) { log.error("Error while doing HDFS op: ", e); throw new NotAuthorizedException(Response.status(401).entity("Invalid HDFS delegation token").build()); } }
From source file:org.apache.oozie.service.UserGroupInformationService.java
License:Apache License
@Override public void destroy() { for (UserGroupInformation ugi : cache.values()) { try {/*from w w w . ja v a2 s . co m*/ FileSystem.closeAllForUGI(ugi); } catch (IOException ioe) { XLog.getLog(this.getClass()) .warn("Exception occurred while closing filesystems for " + ugi.getUserName(), ioe); } } cache.clear(); }
From source file:org.apache.phoenix.queryserver.server.Main.java
License:Apache License
@Override public int run(String[] args) throws Exception { logProcessInfo(getConf());/*from w ww . j a va 2s.co m*/ try { final boolean isKerberos = "kerberos" .equalsIgnoreCase(getConf().get(QueryServices.QUERY_SERVER_HBASE_SECURITY_CONF_ATTRIB)); // handle secure cluster credentials if (isKerberos) { String hostname = Strings.domainNamePointerToHostName(DNS.getDefaultHost( getConf().get(QueryServices.QUERY_SERVER_DNS_INTERFACE_ATTRIB, "default"), getConf().get(QueryServices.QUERY_SERVER_DNS_NAMESERVER_ATTRIB, "default"))); if (LOG.isDebugEnabled()) { LOG.debug("Login to " + hostname + " using " + getConf().get(QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB) + " and principal " + getConf().get(QueryServices.QUERY_SERVER_KERBEROS_PRINCIPAL_ATTRIB) + "."); } SecurityUtil.login(getConf(), QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB, QueryServices.QUERY_SERVER_KERBEROS_PRINCIPAL_ATTRIB, hostname); LOG.info("Login successful."); } Class<? extends PhoenixMetaFactory> factoryClass = getConf().getClass( QueryServices.QUERY_SERVER_META_FACTORY_ATTRIB, PhoenixMetaFactoryImpl.class, PhoenixMetaFactory.class); int port = getConf().getInt(QueryServices.QUERY_SERVER_HTTP_PORT_ATTRIB, QueryServicesOptions.DEFAULT_QUERY_SERVER_HTTP_PORT); LOG.debug("Listening on port " + port); PhoenixMetaFactory factory = factoryClass.getDeclaredConstructor(Configuration.class) .newInstance(getConf()); Meta meta = factory.create(Arrays.asList(args)); Service service = new LocalService(meta); // Start building the Avatica HttpServer final HttpServer.Builder builder = new HttpServer.Builder().withPort(port).withHandler(service, getSerialization(getConf())); // Enable SPNEGO and Impersonation when using Kerberos if (isKerberos) { UserGroupInformation ugi = UserGroupInformation.getLoginUser(); // Make sure the proxyuser configuration is up to date ProxyUsers.refreshSuperUserGroupsConfiguration(getConf()); String keytabPath = getConf().get(QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB); File keytab = new File(keytabPath); // Enable SPNEGO and impersonation (through standard Hadoop configuration means) builder.withSpnego(ugi.getUserName()).withAutomaticLogin(keytab) .withImpersonation(new PhoenixDoAsCallback(ugi)); } // Build and start the HttpServer server = builder.build(); server.start(); runningLatch.countDown(); server.join(); return 0; } catch (Throwable t) { LOG.fatal("Unrecoverable service error. Shutting down.", t); this.t = t; return -1; } }
From source file:org.apache.phoenix.queryserver.server.QueryServer.java
License:Apache License
@Override public int run(String[] args) throws Exception { logProcessInfo(getConf());/*from w w w . ja v a 2 s .c o m*/ final boolean loadBalancerEnabled = getConf().getBoolean( QueryServices.PHOENIX_QUERY_SERVER_LOADBALANCER_ENABLED, QueryServicesOptions.DEFAULT_PHOENIX_QUERY_SERVER_LOADBALANCER_ENABLED); try { final boolean isKerberos = "kerberos" .equalsIgnoreCase(getConf().get(QueryServices.QUERY_SERVER_HBASE_SECURITY_CONF_ATTRIB)); final boolean disableSpnego = getConf().getBoolean( QueryServices.QUERY_SERVER_SPNEGO_AUTH_DISABLED_ATTRIB, QueryServicesOptions.DEFAULT_QUERY_SERVER_SPNEGO_AUTH_DISABLED); String hostname; final boolean disableLogin = getConf().getBoolean(QueryServices.QUERY_SERVER_DISABLE_KERBEROS_LOGIN, QueryServicesOptions.DEFAULT_QUERY_SERVER_DISABLE_KERBEROS_LOGIN); // handle secure cluster credentials if (isKerberos && !disableSpnego && !disableLogin) { hostname = Strings.domainNamePointerToHostName(DNS.getDefaultHost( getConf().get(QueryServices.QUERY_SERVER_DNS_INTERFACE_ATTRIB, "default"), getConf().get(QueryServices.QUERY_SERVER_DNS_NAMESERVER_ATTRIB, "default"))); if (LOG.isDebugEnabled()) { LOG.debug("Login to " + hostname + " using " + getConf().get(QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB) + " and principal " + getConf().get(QueryServices.QUERY_SERVER_KERBEROS_PRINCIPAL_ATTRIB) + "."); } SecurityUtil.login(getConf(), QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB, QueryServices.QUERY_SERVER_KERBEROS_PRINCIPAL_ATTRIB, hostname); LOG.info("Login successful."); } else { hostname = InetAddress.getLocalHost().getHostName(); LOG.info(" Kerberos is off and hostname is : " + hostname); } Class<? extends PhoenixMetaFactory> factoryClass = getConf().getClass( QueryServices.QUERY_SERVER_META_FACTORY_ATTRIB, PhoenixMetaFactoryImpl.class, PhoenixMetaFactory.class); int port = getConf().getInt(QueryServices.QUERY_SERVER_HTTP_PORT_ATTRIB, QueryServicesOptions.DEFAULT_QUERY_SERVER_HTTP_PORT); LOG.debug("Listening on port " + port); PhoenixMetaFactory factory = factoryClass.getDeclaredConstructor(Configuration.class) .newInstance(getConf()); Meta meta = factory.create(Arrays.asList(args)); Service service = new LocalService(meta); // Start building the Avatica HttpServer final HttpServer.Builder builder = new HttpServer.Builder().withPort(port).withHandler(service, getSerialization(getConf())); // Enable SPNEGO and Impersonation when using Kerberos if (isKerberos) { UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); LOG.debug("Current user is " + ugi); if (!ugi.hasKerberosCredentials()) { ugi = UserGroupInformation.getLoginUser(); LOG.debug("Current user does not have Kerberos credentials, using instead " + ugi); } // Make sure the proxyuser configuration is up to date ProxyUsers.refreshSuperUserGroupsConfiguration(getConf()); String keytabPath = getConf().get(QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB); File keytab = new File(keytabPath); String realmsString = getConf().get(QueryServices.QUERY_SERVER_KERBEROS_ALLOWED_REALMS, null); String[] additionalAllowedRealms = null; if (null != realmsString) { additionalAllowedRealms = StringUtils.split(realmsString, ','); } // Enable SPNEGO and impersonation (through standard Hadoop configuration means) builder.withSpnego(ugi.getUserName(), additionalAllowedRealms).withAutomaticLogin(keytab) .withImpersonation(new PhoenixDoAsCallback(ugi, getConf())); } setRemoteUserExtractorIfNecessary(builder, getConf()); // Build and start the HttpServer server = builder.build(); server.start(); if (loadBalancerEnabled) { registerToServiceProvider(hostname); } runningLatch.countDown(); server.join(); return 0; } catch (Throwable t) { LOG.fatal("Unrecoverable service error. Shutting down.", t); this.t = t; return -1; } finally { if (loadBalancerEnabled) { unRegister(); } } }
From source file:org.apache.ranger.audit.provider.MiscUtil.java
License:Apache License
public static UserGroupInformation createUGIFromSubject(Subject subject) throws IOException { logger.info("SUBJECT " + (subject == null ? "not found" : "found")); UserGroupInformation ugi = null; if (subject != null) { logger.info("SUBJECT.PRINCIPALS.size()=" + subject.getPrincipals().size()); Set<Principal> principals = subject.getPrincipals(); for (Principal principal : principals) { logger.info("SUBJECT.PRINCIPAL.NAME=" + principal.getName()); }//from w w w . j a v a 2 s .co m try { // Do not remove the below statement. The default // getLoginUser does some initialization which is needed // for getUGIFromSubject() to work. UserGroupInformation.getLoginUser(); logger.info("Default UGI before using new Subject:" + UserGroupInformation.getLoginUser()); } catch (Throwable t) { logger.error(t); } ugi = UserGroupInformation.getUGIFromSubject(subject); logger.info("SUBJECT.UGI.NAME=" + ugi.getUserName() + ", ugi=" + ugi); } else { logger.info("Server username is not available"); } return ugi; }
From source file:org.apache.ranger.tagsync.process.TagSynchronizer.java
License:Apache License
private static boolean initializeKerberosIdentity(Properties props) { if (LOG.isDebugEnabled()) { LOG.debug("==> TagSynchronizer.initializeKerberosIdentity()"); }//from w w w .j a va2s .co m boolean ret = false; String authenticationType = TagSyncConfig.getAuthenticationType(props); String principal = TagSyncConfig.getKerberosPrincipal(props); String keytab = TagSyncConfig.getKerberosKeytab(props); String nameRules = TagSyncConfig.getNameRules(props); if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) { LOG.debug("authenticationType=" + authenticationType); LOG.debug("principal=" + principal); LOG.debug("keytab" + keytab); LOG.debug("nameRules=" + nameRules); } } final boolean isKerberized = !StringUtils.isEmpty(authenticationType) && authenticationType.trim().equalsIgnoreCase(AUTH_TYPE_KERBEROS) && SecureClientLogin.isKerberosCredentialExists(principal, keytab); if (isKerberized) { if (LOG.isDebugEnabled()) { LOG.debug("Trying to get kerberos identitiy"); } Subject subject = null; try { subject = SecureClientLogin.loginUserFromKeytab(principal, keytab, nameRules); } catch (IOException exception) { LOG.error("Could not get Subject from principal:[" + principal + "], keytab:[" + keytab + "], nameRules:[" + nameRules + "]", exception); } UserGroupInformation kerberosIdentity; if (subject != null) { try { UserGroupInformation.loginUserFromSubject(subject); kerberosIdentity = UserGroupInformation.getLoginUser(); if (kerberosIdentity != null) { props.put(TagSyncConfig.TAGSYNC_KERBEROS_IDENTITY, kerberosIdentity.getUserName()); if (LOG.isDebugEnabled()) { LOG.debug("Got UGI, user:[" + kerberosIdentity.getUserName() + "]"); } ret = true; } else { LOG.error("KerberosIdentity is null!"); } } catch (IOException exception) { LOG.error("Failed to get UGI from Subject:[" + subject + "]", exception); } } } else { if (LOG.isDebugEnabled()) { LOG.debug("Not configured for Kerberos Authentication"); } props.remove(TagSyncConfig.TAGSYNC_KERBEROS_IDENTITY); ret = true; } if (!ret) { props.remove(TagSyncConfig.TAGSYNC_KERBEROS_IDENTITY); } if (LOG.isDebugEnabled()) { LOG.debug("<== TagSynchronizer.initializeKerberosIdentity() : " + ret); } return ret; }
From source file:org.apache.slider.server.appmaster.rpc.RpcBinder.java
License:Apache License
public static SliderClusterProtocol getProxy(final Configuration conf, ApplicationReport application, final int rpcTimeout) throws IOException, SliderException, InterruptedException { String host = application.getHost(); int port = application.getRpcPort(); String address = host + ":" + port; if (host == null || 0 == port) { throw new SliderException(SliderExitCodes.EXIT_CONNECTIVITY_PROBLEM, "Slider instance " + application.getName() + " isn't providing a valid address for the" + " Slider RPC protocol: " + address); }//from w w w .j av a2 s . c o m UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); final UserGroupInformation newUgi = UserGroupInformation.createRemoteUser(currentUser.getUserName()); final InetSocketAddress serviceAddr = NetUtils.createSocketAddrForHost(application.getHost(), application.getRpcPort()); SliderClusterProtocol realProxy; log.debug("Connecting to {}", serviceAddr); if (UserGroupInformation.isSecurityEnabled()) { org.apache.hadoop.yarn.api.records.Token clientToAMToken = application.getClientToAMToken(); Token<ClientToAMTokenIdentifier> token = ConverterUtils.convertFromYarn(clientToAMToken, serviceAddr); newUgi.addToken(token); realProxy = newUgi.doAs(new PrivilegedExceptionAction<SliderClusterProtocol>() { @Override public SliderClusterProtocol run() throws IOException { return connectToServer(serviceAddr, newUgi, conf, rpcTimeout); } }); } else { return connectToServer(serviceAddr, newUgi, conf, rpcTimeout); } return realProxy; }
From source file:org.apache.tajo.cli.tools.TajoDump.java
License:Apache License
private static void printHeader(PrintWriter writer, UserGroupInformation userInfo, boolean includeUSerName, boolean includeDate) { writer.write("--\n"); writer.write("-- Tajo database dump\n"); if (includeUSerName) { writer.write("--\n-- Dump user: " + userInfo.getUserName() + "\n"); }//w ww .ja va2 s .c o m if (includeDate) { writer.write("--\n-- Dump date: " + toDateString() + "\n"); } writer.write("--\n"); writer.write("\n"); }
From source file:org.apache.twill.filesystem.LocationTestBase.java
License:Apache License
@Test public void testHomeLocation() throws Exception { LocationFactory locationFactory = createLocationFactory("/"); // Without UGI, the home location should be the same as the user Assert.assertEquals(System.getProperty("user.name"), locationFactory.getHomeLocation().getName()); // With UGI, the home location should be based on the UGI current user UserGroupInformation ugi = UserGroupInformation.createRemoteUser(System.getProperty("user.name") + "1"); locationFactory = ugi.doAs(new PrivilegedExceptionAction<LocationFactory>() { @Override//w ww .j a v a2 s.c om public LocationFactory run() throws Exception { return createLocationFactory("/"); } }); Assert.assertEquals(ugi.getUserName(), locationFactory.getHomeLocation().getName()); }
From source file:org.elasticsearch.repositories.hdfs.HdfsSecurityContext.java
License:Apache License
private Permission[] renderPermissions(UserGroupInformation ugi) { Permission[] permissions;//w ww .java 2 s . c om if (ugi.isFromKeytab()) { // KERBEROS // Leave room to append one extra permission based on the logged in user's info. int permlen = KERBEROS_AUTH_PERMISSIONS.length + 1; permissions = new Permission[permlen]; System.arraycopy(KERBEROS_AUTH_PERMISSIONS, 0, permissions, 0, KERBEROS_AUTH_PERMISSIONS.length); // Append a kerberos.ServicePermission to only allow initiating kerberos connections // as the logged in user. permissions[permissions.length - 1] = new ServicePermission(ugi.getUserName(), "initiate"); } else { // SIMPLE permissions = Arrays.copyOf(SIMPLE_AUTH_PERMISSIONS, SIMPLE_AUTH_PERMISSIONS.length); } return permissions; }