Example usage for org.apache.hadoop.security UserGroupInformation setConfiguration

List of usage examples for org.apache.hadoop.security UserGroupInformation setConfiguration

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation setConfiguration.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static void setConfiguration(Configuration conf) 

Source Link

Document

Set the static configuration for UGI.

Usage

From source file:org.apache.accumulo.harness.SharedMiniClusterBase.java

License:Apache License

/**
 * Starts a MiniAccumuloCluster instance with the default configuration but also provides the caller the opportunity to update the configuration before the
 * MiniAccumuloCluster is started.//from   ww w  .  ja  v a2s  . c  o  m
 *
 * @param miniClusterCallback
 *          A callback to configure the minicluster before it is started.
 */
public static void startMiniClusterWithConfig(MiniClusterConfigurationCallback miniClusterCallback)
        throws Exception {
    File baseDir = new File(System.getProperty("user.dir") + "/target/mini-tests");
    assertTrue(baseDir.mkdirs() || baseDir.isDirectory());

    // Make a shared MAC instance instead of spinning up one per test method
    MiniClusterHarness harness = new MiniClusterHarness();

    if (TRUE.equals(System.getProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION))) {
        krb = new TestingKdc();
        krb.start();
        // Enabled krb auth
        Configuration conf = new Configuration(false);
        conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
        UserGroupInformation.setConfiguration(conf);
        // Login as the client
        ClusterUser rootUser = krb.getRootUser();
        // Get the krb token
        UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(),
                rootUser.getKeytab().getAbsolutePath());
        token = new KerberosToken();
    } else {
        rootPassword = "rootPasswordShared1";
        token = new PasswordToken(rootPassword);
    }

    cluster = harness.create(SharedMiniClusterBase.class.getName(),
            System.currentTimeMillis() + "_" + new Random().nextInt(Short.MAX_VALUE), token,
            miniClusterCallback, krb);
    cluster.start();

    if (null != krb) {
        final String traceTable = Property.TRACE_TABLE.getDefaultValue();
        final ClusterUser systemUser = krb.getAccumuloServerUser(), rootUser = krb.getRootUser();
        // Login as the trace user
        // Open a connector as the system user (ensures the user will exist for us to assign permissions to)
        UserGroupInformation.loginUserFromKeytab(systemUser.getPrincipal(),
                systemUser.getKeytab().getAbsolutePath());
        Connector conn = cluster.getConnector(systemUser.getPrincipal(), new KerberosToken());

        // Then, log back in as the "root" user and do the grant
        UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(),
                rootUser.getKeytab().getAbsolutePath());
        conn = cluster.getConnector(principal, token);

        // Create the trace table
        conn.tableOperations().create(traceTable);

        // Trace user (which is the same kerberos principal as the system user, but using a normal KerberosToken) needs
        // to have the ability to read, write and alter the trace table
        conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable,
                TablePermission.READ);
        conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable,
                TablePermission.WRITE);
        conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable,
                TablePermission.ALTER_TABLE);
    }
}

From source file:org.apache.accumulo.harness.SharedMiniClusterIT.java

License:Apache License

@BeforeClass
public static void startMiniCluster() throws Exception {
    File baseDir = new File(System.getProperty("user.dir") + "/target/mini-tests");
    baseDir.mkdirs();/* w ww .  j  a  v  a2  s.  c  om*/

    // Make a shared MAC instance instead of spinning up one per test method
    MiniClusterHarness harness = new MiniClusterHarness();

    if (TRUE.equals(System.getProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION))) {
        krb = new TestingKdc();
        krb.start();
        // Enabled krb auth
        Configuration conf = new Configuration(false);
        conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
        UserGroupInformation.setConfiguration(conf);
        // Login as the client
        UserGroupInformation.loginUserFromKeytab(krb.getClientPrincipal(),
                krb.getClientKeytab().getAbsolutePath());
        // Get the krb token
        principal = krb.getClientPrincipal();
        token = new KerberosToken(principal);
    } else {
        rootPassword = "rootPasswordShared1";
        token = new PasswordToken(rootPassword);
    }

    cluster = harness.create(SharedMiniClusterIT.class.getName(),
            System.currentTimeMillis() + "_" + new Random().nextInt(Short.MAX_VALUE), token, krb);
    cluster.start();
}

From source file:org.apache.accumulo.server.AccumuloServerContextTest.java

License:Apache License

@Before
public void setup() throws Exception {
    System.setProperty("java.security.krb5.realm", "accumulo");
    System.setProperty("java.security.krb5.kdc", "fake");
    Configuration conf = new Configuration(false);
    conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(conf);
    testUser = UserGroupInformation.createUserForTesting("test_user", new String[0]);
    username = testUser.getUserName();//from w  w  w.j  a v a 2 s .  c o m
}

From source file:org.apache.accumulo.server.ServerContextTest.java

License:Apache License

@Before
public void setup() {
    System.setProperty("java.security.krb5.realm", "accumulo");
    System.setProperty("java.security.krb5.kdc", "fake");
    Configuration conf = new Configuration(false);
    conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(conf);
    testUser = UserGroupInformation.createUserForTesting("test_user", new String[0]);
    username = testUser.getUserName();// ww  w.j a  v  a2  s  .com
}

From source file:org.apache.accumulo.test.functional.KerberosIT.java

License:Apache License

@AfterClass
public static void stopKdc() throws Exception {
    if (null != kdc) {
        kdc.stop();/*  w w w. j  a v  a2s .c  o m*/
    }
    if (null != krbEnabledForITs) {
        System.setProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION, krbEnabledForITs);
    }
    UserGroupInformation.setConfiguration(new Configuration(false));
}

From source file:org.apache.accumulo.test.functional.KerberosIT.java

License:Apache License

@Before
public void startMac() throws Exception {
    MiniClusterHarness harness = new MiniClusterHarness();
    mac = harness.create(this, new PasswordToken("unused"), kdc, new MiniClusterConfigurationCallback() {

        @Override//w  ww. j  a  va2s.c  o  m
        public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
            Map<String, String> site = cfg.getSiteConfig();
            site.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "15s");
            cfg.setSiteConfig(site);
        }

    });

    mac.getConfig().setNumTservers(1);
    mac.start();
    // Enabled kerberos auth
    Configuration conf = new Configuration(false);
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(conf);
}

From source file:org.apache.accumulo.test.functional.KerberosProxyIT.java

License:Apache License

@Before
public void startMac() throws Exception {
    MiniClusterHarness harness = new MiniClusterHarness();
    mac = harness.create(getClass().getName(), testName.getMethodName(), new PasswordToken("unused"),
            new MiniClusterConfigurationCallback() {

                @Override/*from ww  w.  j a va  2  s . c om*/
                public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
                    cfg.setNumTservers(1);
                    Map<String, String> siteCfg = cfg.getSiteConfig();
                    // Allow the proxy to impersonate the client user, but no one else
                    siteCfg.put(Property.INSTANCE_RPC_SASL_ALLOWED_USER_IMPERSONATION.getKey(),
                            proxyPrincipal + ":" + kdc.getRootUser().getPrincipal());
                    siteCfg.put(Property.INSTANCE_RPC_SASL_ALLOWED_HOST_IMPERSONATION.getKey(), "*");
                    cfg.setSiteConfig(siteCfg);
                }

            }, kdc);

    mac.start();
    MiniAccumuloConfigImpl cfg = mac.getConfig();

    // Generate Proxy configuration and start the proxy
    proxyProcess = startProxy(cfg);

    // Enabled kerberos auth
    Configuration conf = new Configuration(false);
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(conf);

    boolean success = false;
    ClusterUser rootUser = kdc.getRootUser();
    // Rely on the junit timeout rule
    while (!success) {
        UserGroupInformation ugi;
        try {
            ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(),
                    rootUser.getKeytab().getAbsolutePath());
        } catch (IOException ex) {
            log.info("Login as root is failing", ex);
            Thread.sleep(3000);
            continue;
        }

        TSocket socket = new TSocket(hostname, proxyPort);
        log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);
        TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname,
                Collections.singletonMap("javax.security.sasl.qop", "auth"), null, socket);

        final UGIAssumingTransport ugiTransport = new UGIAssumingTransport(transport, ugi);

        try {
            // UGI transport will perform the doAs for us
            ugiTransport.open();
            success = true;
        } catch (TTransportException e) {
            Throwable cause = e.getCause();
            if (null != cause && cause instanceof ConnectException) {
                log.info("Proxy not yet up, waiting");
                Thread.sleep(3000);
                proxyProcess = checkProxyAndRestart(proxyProcess, cfg);
                continue;
            }
        } finally {
            if (null != ugiTransport) {
                ugiTransport.close();
            }
        }
    }

    assertTrue("Failed to connect to the proxy repeatedly", success);
}

From source file:org.apache.accumulo.test.functional.KerberosRenewalIT.java

License:Apache License

@Before
public void startMac() throws Exception {
    MiniClusterHarness harness = new MiniClusterHarness();
    mac = harness.create(this, new PasswordToken("unused"), kdc, new MiniClusterConfigurationCallback() {

        @Override/*w w  w .  ja  v  a  2 s. c om*/
        public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
            Map<String, String> site = cfg.getSiteConfig();
            site.put(Property.INSTANCE_ZK_TIMEOUT.getKey(), "15s");
            // Reduce the period just to make sure we trigger renewal fast
            site.put(Property.GENERAL_KERBEROS_RENEWAL_PERIOD.getKey(), "5s");
            cfg.setSiteConfig(site);
        }

    });

    mac.getConfig().setNumTservers(1);
    mac.start();
    // Enabled kerberos auth
    Configuration conf = new Configuration(false);
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(conf);
}

From source file:org.apache.accumulo.test.proxy.SimpleProxyBase.java

License:Apache License

/**
 * Does the actual test setup, invoked by the concrete test class
 *//*from   w  w  w. j  ava 2 s .  com*/
public static void setUpProxy() throws Exception {
    assertNotNull("Implementations must initialize the TProtocolFactory", factory);

    Connector c = SharedMiniClusterBase.getConnector();
    Instance inst = c.getInstance();
    waitForAccumulo(c);

    hostname = InetAddress.getLocalHost().getCanonicalHostName();

    Properties props = new Properties();
    props.put("instance", inst.getInstanceName());
    props.put("zookeepers", inst.getZooKeepers());

    final String tokenClass;
    if (isKerberosEnabled()) {
        tokenClass = KerberosToken.class.getName();
        TestingKdc kdc = getKdc();

        // Create a principal+keytab for the proxy
        proxyKeytab = new File(kdc.getKeytabDir(), "proxy.keytab");
        hostname = InetAddress.getLocalHost().getCanonicalHostName();
        // Set the primary because the client needs to know it
        proxyPrimary = "proxy";
        // Qualify with an instance
        proxyPrincipal = proxyPrimary + "/" + hostname;
        kdc.createPrincipal(proxyKeytab, proxyPrincipal);
        // Tack on the realm too
        proxyPrincipal = kdc.qualifyUser(proxyPrincipal);

        props.setProperty("kerberosPrincipal", proxyPrincipal);
        props.setProperty("kerberosKeytab", proxyKeytab.getCanonicalPath());
        props.setProperty("thriftServerType", "sasl");

        // Enabled kerberos auth
        Configuration conf = new Configuration(false);
        conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
        UserGroupInformation.setConfiguration(conf);

        // Login for the Proxy itself
        UserGroupInformation.loginUserFromKeytab(proxyPrincipal, proxyKeytab.getAbsolutePath());

        // User for tests
        ClusterUser user = kdc.getRootUser();
        clientPrincipal = user.getPrincipal();
        clientKeytab = user.getKeytab();
    } else {
        clientPrincipal = "root";
        tokenClass = PasswordToken.class.getName();
        properties.put("password", SharedMiniClusterBase.getRootPassword());
        hostname = "localhost";
    }

    props.put("tokenClass", tokenClass);

    ClientConfiguration clientConfig = SharedMiniClusterBase.getCluster().getClientConfig();
    String clientConfPath = new File(SharedMiniClusterBase.getCluster().getConfig().getConfDir(), "client.conf")
            .getAbsolutePath();
    props.put("clientConfigurationFile", clientConfPath);
    properties.put("clientConfigurationFile", clientConfPath);

    proxyPort = PortUtils.getRandomFreePort();
    proxyServer = Proxy.createProxyServer(HostAndPort.fromParts(hostname, proxyPort), factory, props,
            clientConfig).server;
    while (!proxyServer.isServing())
        sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
}

From source file:org.apache.accumulo.test.replication.KerberosReplicationIT.java

License:Apache License

@Before
public void setup() throws Exception {
    MiniClusterHarness harness = new MiniClusterHarness();

    // Create a primary and a peer instance, both with the same "root" user
    primary = harness.create(getClass().getName(), testName.getMethodName(), new PasswordToken("unused"),
            getConfigCallback(PRIMARY_NAME), kdc);
    primary.start();/*w w w. ja  va 2  s. c  o m*/

    peer = harness.create(getClass().getName(), testName.getMethodName() + "_peer", new PasswordToken("unused"),
            getConfigCallback(PEER_NAME), kdc);
    peer.start();

    // Enable kerberos auth
    Configuration conf = new Configuration(false);
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(conf);
}