Example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser

List of usage examples for org.apache.hadoop.security UserGroupInformation getCurrentUser

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation getCurrentUser.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation getCurrentUser() throws IOException 

Source Link

Document

Return the current user, including any doAs in the current stack.

Usage

From source file:org.apache.metron.maas.service.yarn.YarnUtils.java

License:Apache License

public UserGroupInformation createUserGroup(Credentials credentials) throws IOException {
    credentials = credentials == null ? UserGroupInformation.getCurrentUser().getCredentials() : credentials;
    String appSubmitterUserName = System.getenv(ApplicationConstants.Environment.USER.name());
    UserGroupInformation appSubmitterUgi = UserGroupInformation.createRemoteUser(appSubmitterUserName);
    appSubmitterUgi.addCredentials(credentials);
    return appSubmitterUgi;
}

From source file:org.apache.metron.maas.service.yarn.YarnUtils.java

License:Apache License

public ByteBuffer tokensFromCredentials(Credentials credentials) throws IOException {
    // Note: Credentials, Token, UserGroupInformation, DataOutputBuffer class
    // are marked as LimitedPrivate
    credentials = credentials == null ? UserGroupInformation.getCurrentUser().getCredentials() : credentials;
    DataOutputBuffer dob = new DataOutputBuffer();
    credentials.writeTokenStorageToStream(dob);
    // Now remove the AM->RM token so that containers cannot access it.
    Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
    LOG.info("Executing with tokens:");
    while (iter.hasNext()) {
        Token<?> token = iter.next();
        LOG.info(token);/*from  w  ww  .j a  v a 2s  . c  o m*/
        if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
            iter.remove();
        }
    }
    return ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
}

From source file:org.apache.nifi.atlas.security.Kerberos.java

License:Apache License

@Override
public AtlasClientV2 createClient(String[] baseUrls) {
    final Configuration hadoopConf = new Configuration();
    hadoopConf.set("hadoop.security.authentication", "kerberos");
    UserGroupInformation.setConfiguration(hadoopConf);
    final UserGroupInformation ugi;
    try {// w ww. ja  v a  2 s  . c om
        UserGroupInformation.loginUserFromKeytab(principal, keytab);
        ugi = UserGroupInformation.getCurrentUser();
    } catch (IOException e) {
        throw new RuntimeException("Failed to login with Kerberos due to: " + e, e);
    }
    return new AtlasClientV2(ugi, null, baseUrls);
}

From source file:org.apache.nifi.hadoop.SecurityUtil.java

License:Apache License

/**
 * Initializes UserGroupInformation with the given Configuration and performs the login for the given principal
 * and keytab. All logins should happen through this class to ensure other threads are not concurrently modifying
 * UserGroupInformation./*from w  w  w  . j  a  va 2  s .co  m*/
 * <p/>
 * As of Apache NiFi 1.5.0, this method uses {@link UserGroupInformation#loginUserFromKeytab(String, String)} to
 * authenticate the given <code>principal</code>, which sets the static variable <code>loginUser</code> in the
 * {@link UserGroupInformation} instance.  Setting <code>loginUser</code> is necessary for
 * {@link org.apache.hadoop.ipc.Client.Connection#handleSaslConnectionFailure(int, int, Exception, Random, UserGroupInformation)}
 * to be able to attempt a relogin during a connection failure.  The <code>handleSaslConnectionFailure</code> method
 * calls <code>UserGroupInformation.getLoginUser().reloginFromKeytab()</code> statically, which can return null
 * if <code>loginUser</code> is not set, resulting in failure of the hadoop operation.
 * <p/>
 * In previous versions of NiFi, {@link UserGroupInformation#loginUserFromKeytabAndReturnUGI(String, String)} was
 * used to authenticate the <code>principal</code>, which does not set <code>loginUser</code>, making it impossible
 * for a
 * {@link org.apache.hadoop.ipc.Client.Connection#handleSaslConnectionFailure(int, int, Exception, Random, UserGroupInformation)}
 * to be able to implicitly relogin the principal.
 *
 * @param config the configuration instance
 * @param principal the principal to authenticate as
 * @param keyTab the keytab to authenticate with
 *
 * @return the UGI for the given principal
 *
 * @throws IOException if login failed
 */
public static synchronized UserGroupInformation loginKerberos(final Configuration config,
        final String principal, final String keyTab) throws IOException {
    Validate.notNull(config);
    Validate.notNull(principal);
    Validate.notNull(keyTab);

    UserGroupInformation.setConfiguration(config);
    UserGroupInformation.loginUserFromKeytab(principal.trim(), keyTab.trim());
    return UserGroupInformation.getCurrentUser();
}

From source file:org.apache.omid.tools.hbase.HBaseLogin.java

License:Apache License

public static UserGroupInformation loginIfNeeded(SecureHBaseConfig config) throws IOException {
    if (UserGroupInformation.isSecurityEnabled()) {
        LOG.info("Security is enabled, logging in with principal={}, keytab={}", config.getPrincipal(),
                config.getKeytab());// w  w  w  .j  av  a  2s.c o m
        UserGroupInformation.loginUserFromKeytab(config.getPrincipal(), config.getKeytab());
    }
    return UserGroupInformation.getCurrentUser();
}

From source file:org.apache.oozie.action.hadoop.HCatLauncherURIHandler.java

License:Apache License

private HCatClient getHCatClient(URI uri, Configuration conf) throws LauncherException {
    // Do not use the constructor public HiveConf(Configuration other, Class<?> cls)
    // It overwrites the values in conf with default values
    final HiveConf hiveConf = new HiveConf();
    for (Entry<String, String> entry : conf) {
        hiveConf.set(entry.getKey(), entry.getValue());
    }/*from w w  w  . j a  v a  2s. c  o m*/
    String serverURI = getMetastoreConnectURI(uri);
    if (!serverURI.equals("")) {
        hiveConf.set("hive.metastore.local", "false");
    }
    hiveConf.set(HiveConf.ConfVars.METASTOREURIS.varname, serverURI);
    try {
        System.out.println("Creating HCatClient for user=" + UserGroupInformation.getCurrentUser()
                + " and server=" + serverURI);
        // Delegation token fetched from metastore has new Text() as service and
        // HiveMetastoreClient looks for the same if not overriden by hive.metastore.token.signature
        // We are good as long as HCatCredentialHelper does not change the service of the token.
        return HCatClient.create(hiveConf);
    } catch (HCatException e) {
        throw new LauncherException("Error trying to connect to " + serverURI, e);
    } catch (IOException e) {
        throw new LauncherException("Error trying to connect to " + serverURI, e);
    }
}

From source file:org.apache.oozie.action.hadoop.LauncherAM.java

License:Apache License

private static UserGroupInformation getUserGroupInformation(Configuration launcherConf, Text... kindToFilter)
        throws IOException {
    final String submitterUser = launcherConf.get(OOZIE_SUBMITTER_USER);
    Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
    filterTokensByKind(credentials, kindToFilter);

    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(submitterUser);
    ugi.addCredentials(credentials);//w  ww .java  2s .  c o  m
    return ugi;
}

From source file:org.apache.phoenix.queryserver.server.PhoenixDoAsCallbackTest.java

License:Apache License

@Test
public void proxyingUsersAreCached() throws Exception {
    Configuration conf = new Configuration(false);
    // The user "server" can impersonate anyone
    conf.set("hadoop.proxyuser.server.groups", "*");
    conf.set("hadoop.proxyuser.server.hosts", "*");
    // Trigger ProxyUsers to refresh itself with the above configuration
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
    UserGroupInformation serverUgi = UserGroupInformation.createUserForTesting("server", new String[0]);
    PhoenixDoAsCallback callback = new PhoenixDoAsCallback(serverUgi, conf);

    UserGroupInformation user1 = callback.doAsRemoteUser("user1", "localhost:1234",
            new Callable<UserGroupInformation>() {
                public UserGroupInformation call() throws Exception {
                    return UserGroupInformation.getCurrentUser();
                }// w  w w . ja v  a  2s . com
            });

    UserGroupInformation user2 = callback.doAsRemoteUser("user2", "localhost:1235",
            new Callable<UserGroupInformation>() {
                public UserGroupInformation call() throws Exception {
                    return UserGroupInformation.getCurrentUser();
                }
            });

    UserGroupInformation user1Reference = callback.doAsRemoteUser("user1", "localhost:1234",
            new Callable<UserGroupInformation>() {
                public UserGroupInformation call() throws Exception {
                    return UserGroupInformation.getCurrentUser();
                }
            });

    // The UserGroupInformation.getCurrentUser() actually returns a new UGI instance, but the internal
    // subject is the same. We can verify things will work as expected that way.
    assertNotEquals(user1.hashCode(), user2.hashCode());
    assertEquals("These should be the same (cached) instance", user1.hashCode(), user1Reference.hashCode());
    assertEquals("These should be the same (cached) instance", user1, user1Reference);
}

From source file:org.apache.phoenix.queryserver.server.QueryServer.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    logProcessInfo(getConf());/* w  w  w. j a  va2s. c o m*/
    final boolean loadBalancerEnabled = getConf().getBoolean(
            QueryServices.PHOENIX_QUERY_SERVER_LOADBALANCER_ENABLED,
            QueryServicesOptions.DEFAULT_PHOENIX_QUERY_SERVER_LOADBALANCER_ENABLED);
    try {
        final boolean isKerberos = "kerberos"
                .equalsIgnoreCase(getConf().get(QueryServices.QUERY_SERVER_HBASE_SECURITY_CONF_ATTRIB));
        final boolean disableSpnego = getConf().getBoolean(
                QueryServices.QUERY_SERVER_SPNEGO_AUTH_DISABLED_ATTRIB,
                QueryServicesOptions.DEFAULT_QUERY_SERVER_SPNEGO_AUTH_DISABLED);
        String hostname;
        final boolean disableLogin = getConf().getBoolean(QueryServices.QUERY_SERVER_DISABLE_KERBEROS_LOGIN,
                QueryServicesOptions.DEFAULT_QUERY_SERVER_DISABLE_KERBEROS_LOGIN);

        // handle secure cluster credentials
        if (isKerberos && !disableSpnego && !disableLogin) {
            hostname = Strings.domainNamePointerToHostName(DNS.getDefaultHost(
                    getConf().get(QueryServices.QUERY_SERVER_DNS_INTERFACE_ATTRIB, "default"),
                    getConf().get(QueryServices.QUERY_SERVER_DNS_NAMESERVER_ATTRIB, "default")));
            if (LOG.isDebugEnabled()) {
                LOG.debug("Login to " + hostname + " using "
                        + getConf().get(QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB) + " and principal "
                        + getConf().get(QueryServices.QUERY_SERVER_KERBEROS_PRINCIPAL_ATTRIB) + ".");
            }
            SecurityUtil.login(getConf(), QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB,
                    QueryServices.QUERY_SERVER_KERBEROS_PRINCIPAL_ATTRIB, hostname);
            LOG.info("Login successful.");
        } else {
            hostname = InetAddress.getLocalHost().getHostName();
            LOG.info(" Kerberos is off and hostname is : " + hostname);
        }

        Class<? extends PhoenixMetaFactory> factoryClass = getConf().getClass(
                QueryServices.QUERY_SERVER_META_FACTORY_ATTRIB, PhoenixMetaFactoryImpl.class,
                PhoenixMetaFactory.class);
        int port = getConf().getInt(QueryServices.QUERY_SERVER_HTTP_PORT_ATTRIB,
                QueryServicesOptions.DEFAULT_QUERY_SERVER_HTTP_PORT);
        LOG.debug("Listening on port " + port);
        PhoenixMetaFactory factory = factoryClass.getDeclaredConstructor(Configuration.class)
                .newInstance(getConf());
        Meta meta = factory.create(Arrays.asList(args));
        Service service = new LocalService(meta);

        // Start building the Avatica HttpServer
        final HttpServer.Builder builder = new HttpServer.Builder().withPort(port).withHandler(service,
                getSerialization(getConf()));

        // Enable SPNEGO and Impersonation when using Kerberos
        if (isKerberos) {
            UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
            LOG.debug("Current user is " + ugi);
            if (!ugi.hasKerberosCredentials()) {
                ugi = UserGroupInformation.getLoginUser();
                LOG.debug("Current user does not have Kerberos credentials, using instead " + ugi);
            }

            // Make sure the proxyuser configuration is up to date
            ProxyUsers.refreshSuperUserGroupsConfiguration(getConf());

            String keytabPath = getConf().get(QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB);
            File keytab = new File(keytabPath);

            String realmsString = getConf().get(QueryServices.QUERY_SERVER_KERBEROS_ALLOWED_REALMS, null);
            String[] additionalAllowedRealms = null;
            if (null != realmsString) {
                additionalAllowedRealms = StringUtils.split(realmsString, ',');
            }

            // Enable SPNEGO and impersonation (through standard Hadoop configuration means)
            builder.withSpnego(ugi.getUserName(), additionalAllowedRealms).withAutomaticLogin(keytab)
                    .withImpersonation(new PhoenixDoAsCallback(ugi, getConf()));

        }
        setRemoteUserExtractorIfNecessary(builder, getConf());

        // Build and start the HttpServer
        server = builder.build();
        server.start();
        if (loadBalancerEnabled) {
            registerToServiceProvider(hostname);
        }
        runningLatch.countDown();
        server.join();
        return 0;
    } catch (Throwable t) {
        LOG.fatal("Unrecoverable service error. Shutting down.", t);
        this.t = t;
        return -1;
    } finally {
        if (loadBalancerEnabled) {
            unRegister();
        }
    }
}

From source file:org.apache.ratis.hadooprpc.Proxy.java

License:Apache License

public static <PROTOCOL> PROTOCOL getProxy(Class<PROTOCOL> clazz, String addressStr, Configuration conf)
        throws IOException {
    RPC.setProtocolEngine(conf, clazz, ProtobufRpcEngineShaded.class);
    return RPC.getProxy(clazz, RPC.getProtocolVersion(clazz),
            org.apache.ratis.util.NetUtils.createSocketAddr(addressStr), UserGroupInformation.getCurrentUser(),
            conf, NetUtils.getSocketFactory(conf, clazz));
}