Example usage for org.apache.hadoop.fs CommonConfigurationKeysPublic HADOOP_SECURITY_AUTHENTICATION

List of usage examples for org.apache.hadoop.fs CommonConfigurationKeysPublic HADOOP_SECURITY_AUTHENTICATION

Introduction

In this page you can find the example usage for org.apache.hadoop.fs CommonConfigurationKeysPublic HADOOP_SECURITY_AUTHENTICATION.

Prototype

String HADOOP_SECURITY_AUTHENTICATION

To view the source code for org.apache.hadoop.fs CommonConfigurationKeysPublic HADOOP_SECURITY_AUTHENTICATION.

Click Source Link

Usage

From source file:org.apache.accumulo.test.functional.KerberosProxyIT.java

License:Apache License

@Before
public void startMac() throws Exception {
    MiniClusterHarness harness = new MiniClusterHarness();
    mac = harness.create(getClass().getName(), testName.getMethodName(), new PasswordToken("unused"),
            new MiniClusterConfigurationCallback() {

                @Override/*from  ww  w  . jav  a  2 s .  c o m*/
                public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
                    cfg.setNumTservers(1);
                    Map<String, String> siteCfg = cfg.getSiteConfig();
                    // Allow the proxy to impersonate the client user, but no one else
                    siteCfg.put(Property.INSTANCE_RPC_SASL_ALLOWED_USER_IMPERSONATION.getKey(),
                            proxyPrincipal + ":" + kdc.getRootUser().getPrincipal());
                    siteCfg.put(Property.INSTANCE_RPC_SASL_ALLOWED_HOST_IMPERSONATION.getKey(), "*");
                    cfg.setSiteConfig(siteCfg);
                }

            }, kdc);

    mac.start();
    MiniAccumuloConfigImpl cfg = mac.getConfig();

    // Generate Proxy configuration and start the proxy
    proxyProcess = startProxy(cfg);

    // Enabled kerberos auth
    Configuration conf = new Configuration(false);
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(conf);

    boolean success = false;
    ClusterUser rootUser = kdc.getRootUser();
    // Rely on the junit timeout rule
    while (!success) {
        UserGroupInformation ugi;
        try {
            ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(),
                    rootUser.getKeytab().getAbsolutePath());
        } catch (IOException ex) {
            log.info("Login as root is failing", ex);
            Thread.sleep(3000);
            continue;
        }

        TSocket socket = new TSocket(hostname, proxyPort);
        log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);
        TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname,
                Collections.singletonMap("javax.security.sasl.qop", "auth"), null, socket);

        final UGIAssumingTransport ugiTransport = new UGIAssumingTransport(transport, ugi);

        try {
            // UGI transport will perform the doAs for us
            ugiTransport.open();
            success = true;
        } catch (TTransportException e) {
            Throwable cause = e.getCause();
            if (null != cause && cause instanceof ConnectException) {
                log.info("Proxy not yet up, waiting");
                Thread.sleep(3000);
                proxyProcess = checkProxyAndRestart(proxyProcess, cfg);
                continue;
            }
        } finally {
            if (null != ugiTransport) {
                ugiTransport.close();
            }
        }
    }

    assertTrue("Failed to connect to the proxy repeatedly", success);
}

From source file:org.apache.accumulo.test.functional.KerberosRenewalIT.java

License:Apache License

@Before
public void startMac() throws Exception {
    MiniClusterHarness harness = new MiniClusterHarness();
    mac = harness.create(this, new PasswordToken("unused"), kdc, new MiniClusterConfigurationCallback() {

        @Override//from  ww w.jav a 2 s.  com
        public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
            Map<String, String> site = cfg.getSiteConfig();
            site.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "15s");
            // Reduce the period just to make sure we trigger renewal fast
            site.put(Property.GENERAL_KERBEROS_RENEWAL_PERIOD.getKey(), "5s");
            cfg.setSiteConfig(site);
        }

    });

    mac.getConfig().setNumTservers(1);
    mac.start();
    // Enabled kerberos auth
    Configuration conf = new Configuration(false);
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(conf);
}

From source file:org.apache.accumulo.test.proxy.SimpleProxyBase.java

License:Apache License

/**
 * Does the actual test setup, invoked by the concrete test class
 *//*from w  w  w . java2 s  . co m*/
public static void setUpProxy() throws Exception {
    assertNotNull("Implementations must initialize the TProtocolFactory", factory);

    Connector c = SharedMiniClusterBase.getConnector();
    Instance inst = c.getInstance();
    waitForAccumulo(c);

    hostname = InetAddress.getLocalHost().getCanonicalHostName();

    Properties props = new Properties();
    props.put("instance", inst.getInstanceName());
    props.put("zookeepers", inst.getZooKeepers());

    final String tokenClass;
    if (isKerberosEnabled()) {
        tokenClass = KerberosToken.class.getName();
        TestingKdc kdc = getKdc();

        // Create a principal+keytab for the proxy
        proxyKeytab = new File(kdc.getKeytabDir(), "proxy.keytab");
        hostname = InetAddress.getLocalHost().getCanonicalHostName();
        // Set the primary because the client needs to know it
        proxyPrimary = "proxy";
        // Qualify with an instance
        proxyPrincipal = proxyPrimary + "/" + hostname;
        kdc.createPrincipal(proxyKeytab, proxyPrincipal);
        // Tack on the realm too
        proxyPrincipal = kdc.qualifyUser(proxyPrincipal);

        props.setProperty("kerberosPrincipal", proxyPrincipal);
        props.setProperty("kerberosKeytab", proxyKeytab.getCanonicalPath());
        props.setProperty("thriftServerType", "sasl");

        // Enabled kerberos auth
        Configuration conf = new Configuration(false);
        conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
        UserGroupInformation.setConfiguration(conf);

        // Login for the Proxy itself
        UserGroupInformation.loginUserFromKeytab(proxyPrincipal, proxyKeytab.getAbsolutePath());

        // User for tests
        ClusterUser user = kdc.getRootUser();
        clientPrincipal = user.getPrincipal();
        clientKeytab = user.getKeytab();
    } else {
        clientPrincipal = "root";
        tokenClass = PasswordToken.class.getName();
        properties.put("password", SharedMiniClusterBase.getRootPassword());
        hostname = "localhost";
    }

    props.put("tokenClass", tokenClass);

    ClientConfiguration clientConfig = SharedMiniClusterBase.getCluster().getClientConfig();
    String clientConfPath = new File(SharedMiniClusterBase.getCluster().getConfig().getConfDir(), "client.conf")
            .getAbsolutePath();
    props.put("clientConfigurationFile", clientConfPath);
    properties.put("clientConfigurationFile", clientConfPath);

    proxyPort = PortUtils.getRandomFreePort();
    proxyServer = Proxy.createProxyServer(HostAndPort.fromParts(hostname, proxyPort), factory, props,
            clientConfig).server;
    while (!proxyServer.isServing())
        sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
}

From source file:org.apache.accumulo.test.replication.KerberosReplicationIT.java

License:Apache License

@Before
public void setup() throws Exception {
    MiniClusterHarness harness = new MiniClusterHarness();

    // Create a primary and a peer instance, both with the same "root" user
    primary = harness.create(getClass().getName(), testName.getMethodName(), new PasswordToken("unused"),
            getConfigCallback(PRIMARY_NAME), kdc);
    primary.start();/*from  w  ww . j a v a  2 s  .  c  o  m*/

    peer = harness.create(getClass().getName(), testName.getMethodName() + "_peer", new PasswordToken("unused"),
            getConfigCallback(PEER_NAME), kdc);
    peer.start();

    // Enable kerberos auth
    Configuration conf = new Configuration(false);
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(conf);
}

From source file:org.apache.accumulo.test.security.KerberosClientOptsTest.java

License:Apache License

@Before
public void resetUgiForKrb() {
    Configuration conf = new Configuration(false);
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(conf);
}

From source file:org.apache.atlas.web.listeners.LoginProcessorIT.java

License:Apache License

@Test
public void testKerberosLogin() throws Exception {
    final File keytab = setupKDCAndPrincipals();

    LoginProcessor processor = new LoginProcessor() {
        @Override/*from w  ww  .  j a v  a2s  . c om*/
        protected org.apache.commons.configuration.Configuration getApplicationConfiguration() {
            PropertiesConfiguration config = new PropertiesConfiguration();
            config.setProperty("atlas.authentication.method", "kerberos");
            config.setProperty("atlas.authentication.principal", "dgi@EXAMPLE.COM");
            config.setProperty("atlas.authentication.keytab", keytab.getAbsolutePath());
            return config;
        }

        @Override
        protected Configuration getHadoopConfiguration() {
            Configuration config = new Configuration(false);
            config.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
            config.setBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, true);
            config.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL, kerberosRule);

            return config;
        }

        @Override
        protected boolean isHadoopCluster() {
            return true;
        }
    };
    processor.login();

    Assert.assertTrue(UserGroupInformation.getLoginUser().getShortUserName().endsWith("dgi"));
    Assert.assertNotNull(UserGroupInformation.getCurrentUser());
    Assert.assertTrue(UserGroupInformation.isLoginKeytabBased());
    Assert.assertTrue(UserGroupInformation.isSecurityEnabled());

    kdc.stop();

}

From source file:org.apache.druid.security.kerberos.DruidKerberosUtil.java

License:Apache License

public static void authenticateIfRequired(String internalClientPrincipal, String internalClientKeytab) {
    if (!Strings.isNullOrEmpty(internalClientPrincipal) && !Strings.isNullOrEmpty(internalClientKeytab)) {
        Configuration conf = new Configuration();
        conf.setClassLoader(DruidKerberosModule.class.getClassLoader());
        conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
        UserGroupInformation.setConfiguration(conf);
        try {//  ww  w . jav a2 s .c o m
            //login for the first time.
            if (UserGroupInformation.getCurrentUser().hasKerberosCredentials() == false
                    || !UserGroupInformation.getCurrentUser().getUserName().equals(internalClientPrincipal)) {
                log.info("trying to authenticate user [%s] with keytab [%s]", internalClientPrincipal,
                        internalClientKeytab);
                UserGroupInformation.loginUserFromKeytab(internalClientPrincipal, internalClientKeytab);
                return;
            }
            //try to relogin in case the TGT expired
            if (UserGroupInformation.isLoginKeytabBased()) {
                log.info("Re-Login from key tab [%s] with principal [%s]", internalClientKeytab,
                        internalClientPrincipal);
                UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
                return;
            } else if (UserGroupInformation.isLoginTicketBased()) {
                log.info("Re-Login from Ticket cache");
                UserGroupInformation.getLoginUser().reloginFromTicketCache();
                return;
            }
        } catch (IOException e) {
            throw new ISE(e, "Failed to authenticate user principal [%s] with keytab [%s]",
                    internalClientPrincipal, internalClientKeytab);
        }
    }
}

From source file:org.apache.flink.yarn.AbstractYarnFlinkApplicationMasterRunner.java

License:Apache License

/**
 * The instance entry point for the YARN application master. Obtains user group
 * information and calls the main work method {@link #runApplicationMaster(org.apache.flink.configuration.Configuration)} as a
 * privileged action./* w w  w.ja v a  2  s  . co m*/
 *
 * @param args The command line arguments.
 * @return The process exit code.
 */
protected int run(String[] args) {
    try {
        LOG.debug("All environment variables: {}", ENV);

        final String yarnClientUsername = ENV.get(YarnConfigKeys.ENV_HADOOP_USER_NAME);
        Preconditions.checkArgument(yarnClientUsername != null,
                "YARN client user name environment variable {} not set", YarnConfigKeys.ENV_HADOOP_USER_NAME);

        final String currDir = ENV.get(Environment.PWD.key());
        Preconditions.checkArgument(currDir != null, "Current working directory variable (%s) not set",
                Environment.PWD.key());
        LOG.debug("Current working directory: {}", currDir);

        final String remoteKeytabPath = ENV.get(YarnConfigKeys.KEYTAB_PATH);
        LOG.debug("Remote keytab path obtained {}", remoteKeytabPath);

        final String remoteKeytabPrincipal = ENV.get(YarnConfigKeys.KEYTAB_PRINCIPAL);
        LOG.info("Remote keytab principal obtained {}", remoteKeytabPrincipal);

        String keytabPath = null;
        if (remoteKeytabPath != null) {
            File f = new File(currDir, Utils.KEYTAB_FILE_NAME);
            keytabPath = f.getAbsolutePath();
            LOG.debug("Keytab path: {}", keytabPath);
        }

        UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();

        LOG.info("YARN daemon is running as: {} Yarn client user obtainer: {}", currentUser.getShortUserName(),
                yarnClientUsername);

        // Flink configuration
        final Map<String, String> dynamicProperties = FlinkYarnSessionCli
                .getDynamicProperties(ENV.get(YarnConfigKeys.ENV_DYNAMIC_PROPERTIES));
        LOG.debug("YARN dynamic properties: {}", dynamicProperties);

        final Configuration flinkConfig = createConfiguration(currDir, dynamicProperties);
        if (keytabPath != null && remoteKeytabPrincipal != null) {
            flinkConfig.setString(SecurityOptions.KERBEROS_LOGIN_KEYTAB, keytabPath);
            flinkConfig.setString(SecurityOptions.KERBEROS_LOGIN_PRINCIPAL, remoteKeytabPrincipal);
        }

        org.apache.hadoop.conf.Configuration hadoopConfiguration = null;

        //To support Yarn Secure Integration Test Scenario
        File krb5Conf = new File(currDir, Utils.KRB5_FILE_NAME);
        if (krb5Conf.exists() && krb5Conf.canRead()) {
            String krb5Path = krb5Conf.getAbsolutePath();
            LOG.info("KRB5 Conf: {}", krb5Path);
            hadoopConfiguration = new org.apache.hadoop.conf.Configuration();
            hadoopConfiguration.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
            hadoopConfiguration.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, "true");
        }

        SecurityUtils.SecurityConfiguration sc;
        if (hadoopConfiguration != null) {
            sc = new SecurityUtils.SecurityConfiguration(flinkConfig, hadoopConfiguration);
        } else {
            sc = new SecurityUtils.SecurityConfiguration(flinkConfig);
        }

        SecurityUtils.install(sc);

        // Note that we use the "appMasterHostname" given by YARN here, to make sure
        // we use the hostnames given by YARN consistently throughout akka.
        // for akka "localhost" and "localhost.localdomain" are different actors.
        this.appMasterHostname = ENV.get(Environment.NM_HOST.key());
        Preconditions.checkArgument(appMasterHostname != null, "ApplicationMaster hostname variable %s not set",
                Environment.NM_HOST.key());
        LOG.info("YARN assigned hostname for application master: {}", appMasterHostname);

        return SecurityUtils.getInstalledContext().runSecured(new Callable<Integer>() {
            @Override
            public Integer call() throws Exception {
                return runApplicationMaster(flinkConfig);
            }
        });

    } catch (Throwable t) {
        // make sure that everything whatever ends up in the log
        LOG.error("YARN Application Master initialization failed", t);
        return INIT_ERROR_EXIT_CODE;
    }
}

From source file:org.apache.flink.yarn.YarnTaskExecutorRunner.java

License:Apache License

/**
 * The instance entry point for the YARN task executor. Obtains user group
 * information and calls the main work method {@link #runTaskExecutor(org.apache.flink.configuration.Configuration)} as a
 * privileged action./*w  w w.  jav a  2 s.co m*/
 *
 * @param args The command line arguments.
 * @return The process exit code.
 */
protected int run(String[] args) {
    try {
        LOG.debug("All environment variables: {}", ENV);

        final String yarnClientUsername = ENV.get(YarnConfigKeys.ENV_HADOOP_USER_NAME);
        final String localDirs = ENV.get(Environment.LOCAL_DIRS.key());
        LOG.info("Current working/local Directory: {}", localDirs);

        final String currDir = ENV.get(Environment.PWD.key());
        LOG.info("Current working Directory: {}", currDir);

        final String remoteKeytabPath = ENV.get(YarnConfigKeys.KEYTAB_PATH);
        LOG.info("TM: remote keytab path obtained {}", remoteKeytabPath);

        final String remoteKeytabPrincipal = ENV.get(YarnConfigKeys.KEYTAB_PRINCIPAL);
        LOG.info("TM: remote keytab principal obtained {}", remoteKeytabPrincipal);

        final Configuration configuration = GlobalConfiguration.loadConfiguration(currDir);
        FileSystem.setDefaultScheme(configuration);

        // configure local directory
        String flinkTempDirs = configuration.getString(ConfigConstants.TASK_MANAGER_TMP_DIR_KEY, null);
        if (flinkTempDirs == null) {
            LOG.info("Setting directories for temporary file " + localDirs);
            configuration.setString(ConfigConstants.TASK_MANAGER_TMP_DIR_KEY, localDirs);
        } else {
            LOG.info("Overriding YARN's temporary file directories with those "
                    + "specified in the Flink config: " + flinkTempDirs);
        }

        // tell akka to die in case of an error
        configuration.setBoolean(ConfigConstants.AKKA_JVM_EXIT_ON_FATAL_ERROR, true);

        String keytabPath = null;
        if (remoteKeytabPath != null) {
            File f = new File(currDir, Utils.KEYTAB_FILE_NAME);
            keytabPath = f.getAbsolutePath();
            LOG.info("keytab path: {}", keytabPath);
        }

        UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();

        LOG.info("YARN daemon is running as: {} Yarn client user obtainer: {}", currentUser.getShortUserName(),
                yarnClientUsername);

        org.apache.hadoop.conf.Configuration hadoopConfiguration = null;

        //To support Yarn Secure Integration Test Scenario
        File krb5Conf = new File(currDir, Utils.KRB5_FILE_NAME);
        if (krb5Conf.exists() && krb5Conf.canRead()) {
            String krb5Path = krb5Conf.getAbsolutePath();
            LOG.info("KRB5 Conf: {}", krb5Path);
            hadoopConfiguration = new org.apache.hadoop.conf.Configuration();
            hadoopConfiguration.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
            hadoopConfiguration.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, "true");
        }

        SecurityUtils.SecurityConfiguration sc;
        if (hadoopConfiguration != null) {
            sc = new SecurityUtils.SecurityConfiguration(configuration, hadoopConfiguration);
        } else {
            sc = new SecurityUtils.SecurityConfiguration(configuration);
        }

        if (keytabPath != null && remoteKeytabPrincipal != null) {
            configuration.setString(SecurityOptions.KERBEROS_LOGIN_KEYTAB, keytabPath);
            configuration.setString(SecurityOptions.KERBEROS_LOGIN_PRINCIPAL, remoteKeytabPrincipal);
        }

        SecurityUtils.install(sc);

        return SecurityUtils.getInstalledContext().runSecured(new Callable<Integer>() {
            @Override
            public Integer call() throws Exception {
                return runTaskExecutor(configuration);
            }
        });

    } catch (Throwable t) {
        // make sure that everything whatever ends up in the log
        LOG.error("YARN Application Master initialization failed", t);
        return INIT_ERROR_EXIT_CODE;
    }
}

From source file:org.apache.hoya.yarn.appmaster.HoyaAppMaster.java

License:Apache License

@Override //AbstractService
public synchronized void serviceInit(Configuration conf) throws Exception {

    // Load in the server configuration - if it is actually on the Classpath
    Configuration serverConf = ConfigHelper.loadFromResource(SERVER_RESOURCE);
    ConfigHelper.mergeConfigurations(conf, serverConf, SERVER_RESOURCE);

    AbstractActionArgs action = serviceArgs.getCoreAction();
    HoyaAMCreateAction createAction = (HoyaAMCreateAction) action;
    //sort out the location of the AM
    serviceArgs.applyDefinitions(conf);/*w w w  .j  a  v  a2s .c  o m*/
    serviceArgs.applyFileSystemURL(conf);

    String rmAddress = createAction.getRmAddress();
    if (rmAddress != null) {
        log.debug("Setting rm address from the command line: {}", rmAddress);
        HoyaUtils.setRmSchedulerAddress(conf, rmAddress);
    }
    serviceArgs.applyDefinitions(conf);
    serviceArgs.applyFileSystemURL(conf);
    //init security with our conf
    if (HoyaUtils.isClusterSecure(conf)) {
        log.info("Secure mode with kerberos realm {}", HoyaUtils.getKerberosRealm());
        UserGroupInformation.setConfiguration(conf);
        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
        log.debug("Authenticating as " + ugi.toString());
        HoyaUtils.verifyPrincipalSet(conf, DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY);
        // always enforce protocol to be token-based.
        conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
                SaslRpcServer.AuthMethod.TOKEN.toString());
    }
    log.info("Login user is {}", UserGroupInformation.getLoginUser());

    //look at settings of Hadoop Auth, to pick up a problem seen once
    checkAndWarnForAuthTokenProblems();

    super.serviceInit(conf);
}