Example usage for org.apache.hadoop.security UserGroupInformation loginUserFromKeytab

List of usage examples for org.apache.hadoop.security UserGroupInformation loginUserFromKeytab

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation loginUserFromKeytab.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static void loginUserFromKeytab(String user, String path) throws IOException 

Source Link

Document

Log a user in from a keytab file.

Usage

From source file:org.apache.accumulo.proxy.Proxy.java

License:Apache License

public static ServerAddress createProxyServer(HostAndPort address, TProtocolFactory protocolFactory,
        Properties properties, ClientConfiguration clientConf) throws Exception {
    final int numThreads = Integer
            .parseInt(properties.getProperty(THRIFT_THREAD_POOL_SIZE_KEY, THRIFT_THREAD_POOL_SIZE_DEFAULT));
    final long maxFrameSize = AccumuloConfiguration
            .getMemoryInBytes(properties.getProperty(THRIFT_MAX_FRAME_SIZE_KEY, THRIFT_MAX_FRAME_SIZE_DEFAULT));
    final int simpleTimerThreadpoolSize = Integer
            .parseInt(Property.GENERAL_SIMPLETIMER_THREADPOOL_SIZE.getDefaultValue());
    // How frequently to try to resize the thread pool
    final long threadpoolResizeInterval = 1000l * 5;
    // No timeout
    final long serverSocketTimeout = 0l;
    // Use the new hadoop metrics2 support
    final MetricsFactory metricsFactory = new MetricsFactory(false);
    final String serverName = "Proxy", threadName = "Accumulo Thrift Proxy";

    // create the implementation of the proxy interface
    ProxyServer impl = new ProxyServer(properties);

    // Wrap the implementation -- translate some exceptions
    AccumuloProxy.Iface wrappedImpl = RpcWrapper.service(impl,
            new AccumuloProxy.Processor<AccumuloProxy.Iface>(impl));

    // Create the processor from the implementation
    TProcessor processor = new AccumuloProxy.Processor<>(wrappedImpl);

    // Get the type of thrift server to instantiate
    final String serverTypeStr = properties.getProperty(THRIFT_SERVER_TYPE, THRIFT_SERVER_TYPE_DEFAULT);
    ThriftServerType serverType = DEFAULT_SERVER_TYPE;
    if (!THRIFT_SERVER_TYPE_DEFAULT.equals(serverTypeStr)) {
        serverType = ThriftServerType.get(serverTypeStr);
    }/*ww  w . j  a va  2  s  .c  o m*/

    SslConnectionParams sslParams = null;
    SaslServerConnectionParams saslParams = null;
    switch (serverType) {
    case SSL:
        sslParams = SslConnectionParams.forClient(ClientContext.convertClientConfig(clientConf));
        break;
    case SASL:
        if (!clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
            // ACCUMULO-3651 Changed level to error and added FATAL to message for slf4j capability
            log.error("FATAL: SASL thrift server was requested but it is disabled in client configuration");
            throw new RuntimeException("SASL is not enabled in configuration");
        }

        // Kerberos needs to be enabled to use it
        if (!UserGroupInformation.isSecurityEnabled()) {
            // ACCUMULO-3651 Changed level to error and added FATAL to message for slf4j capability
            log.error("FATAL: Hadoop security is not enabled");
            throw new RuntimeException();
        }

        // Login via principal and keytab
        final String kerberosPrincipal = properties.getProperty(KERBEROS_PRINCIPAL, ""),
                kerberosKeytab = properties.getProperty(KERBEROS_KEYTAB, "");
        if (StringUtils.isBlank(kerberosPrincipal) || StringUtils.isBlank(kerberosKeytab)) {
            // ACCUMULO-3651 Changed level to error and added FATAL to message for slf4j capability
            log.error("FATAL: Kerberos principal and keytab must be provided");
            throw new RuntimeException();
        }
        UserGroupInformation.loginUserFromKeytab(kerberosPrincipal, kerberosKeytab);
        UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
        log.info("Logged in as " + ugi.getUserName());

        // The kerberosPrimary set in the SASL server needs to match the principal we're logged in as.
        final String shortName = ugi.getShortUserName();
        log.info("Setting server primary to {}", shortName);
        clientConf.setProperty(ClientProperty.KERBEROS_SERVER_PRIMARY, shortName);

        KerberosToken token = new KerberosToken();
        saslParams = new SaslServerConnectionParams(clientConf, token, null);

        processor = new UGIAssumingProcessor(processor);

        break;
    default:
        // nothing to do -- no extra configuration necessary
        break;
    }

    // Hook up support for tracing for thrift calls
    TimedProcessor timedProcessor = new TimedProcessor(metricsFactory, processor, serverName, threadName);

    // Create the thrift server with our processor and properties
    ServerAddress serverAddr = TServerUtils.startTServer(serverType, timedProcessor, protocolFactory,
            serverName, threadName, numThreads, simpleTimerThreadpoolSize, threadpoolResizeInterval,
            maxFrameSize, sslParams, saslParams, serverSocketTimeout, address);

    return serverAddr;
}

From source file:org.apache.accumulo.server.init.Initialize.java

License:Apache License

private boolean initialize(Opts opts, String instanceNamePath, VolumeManager fs, String rootUser) {

    UUID uuid = UUID.randomUUID();
    // the actual disk locations of the root table and tablets
    String[] configuredVolumes = VolumeConfiguration.getVolumeUris(SiteConfiguration.getInstance());
    final String rootTabletDir = new Path(
            fs.choose(Optional.<String>empty(), configuredVolumes) + Path.SEPARATOR + ServerConstants.TABLE_DIR
                    + Path.SEPARATOR + RootTable.ID + RootTable.ROOT_TABLET_LOCATION).toString();

    try {//  w  w  w .j a v  a  2 s  .  c o  m
        initZooKeeper(opts, uuid.toString(), instanceNamePath, rootTabletDir);
    } catch (Exception e) {
        log.error("FATAL: Failed to initialize zookeeper", e);
        return false;
    }

    try {
        initFileSystem(opts, fs, uuid, rootTabletDir);
    } catch (Exception e) {
        log.error("FATAL Failed to initialize filesystem", e);

        if (SiteConfiguration.getInstance().get(Property.INSTANCE_VOLUMES).trim().equals("")) {
            Configuration fsConf = CachedConfiguration.getInstance();

            final String defaultFsUri = "file:///";
            String fsDefaultName = fsConf.get("fs.default.name", defaultFsUri),
                    fsDefaultFS = fsConf.get("fs.defaultFS", defaultFsUri);

            // Try to determine when we couldn't find an appropriate core-site.xml on the classpath
            if (defaultFsUri.equals(fsDefaultName) && defaultFsUri.equals(fsDefaultFS)) {
                log.error("FATAL: Default filesystem value ('fs.defaultFS' or 'fs.default.name') of '"
                        + defaultFsUri + "' was found in the Hadoop configuration");
                log.error(
                        "FATAL: Please ensure that the Hadoop core-site.xml is on the classpath using 'general.classpaths' in accumulo-site.xml");
            }
        }

        return false;
    }

    final ServerConfigurationFactory confFactory = new ServerConfigurationFactory(
            HdfsZooInstance.getInstance());

    // When we're using Kerberos authentication, we need valid credentials to perform initialization. If the user provided some, use them.
    // If they did not, fall back to the credentials present in accumulo-site.xml that the servers will use themselves.
    try {
        final SiteConfiguration siteConf = confFactory.getSiteConfiguration();
        if (siteConf.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
            final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
            // We don't have any valid creds to talk to HDFS
            if (!ugi.hasKerberosCredentials()) {
                final String accumuloKeytab = siteConf.get(Property.GENERAL_KERBEROS_KEYTAB),
                        accumuloPrincipal = siteConf.get(Property.GENERAL_KERBEROS_PRINCIPAL);

                // Fail if the site configuration doesn't contain appropriate credentials to login as servers
                if (StringUtils.isBlank(accumuloKeytab) || StringUtils.isBlank(accumuloPrincipal)) {
                    log.error(
                            "FATAL: No Kerberos credentials provided, and Accumulo is not properly configured for server login");
                    return false;
                }

                log.info("Logging in as " + accumuloPrincipal + " with " + accumuloKeytab);

                // Login using the keytab as the 'accumulo' user
                UserGroupInformation.loginUserFromKeytab(accumuloPrincipal, accumuloKeytab);
            }
        }
    } catch (IOException e) {
        log.error("FATAL: Failed to get the Kerberos user", e);
        return false;
    }

    try {
        AccumuloServerContext context = new AccumuloServerContext(confFactory);
        initSecurity(context, opts, uuid.toString(), rootUser);
    } catch (Exception e) {
        log.error("FATAL: Failed to initialize security", e);
        return false;
    }
    return true;
}

From source file:org.apache.accumulo.server.security.SecurityUtil.java

License:Apache License

/**
 * This will log in the given user in kerberos.
 *
 * @param principalConfig/* w  ww .j  av a2  s .com*/
 *          This is the principals name in the format NAME/HOST@REALM. {@link org.apache.hadoop.security.SecurityUtil#HOSTNAME_PATTERN} will automatically be
 *          replaced by the systems host name.
 * @return true if login succeeded, otherwise false
 */
static boolean login(String principalConfig, String keyTabPath) {
    try {
        String principalName = getServerPrincipal(principalConfig);
        if (keyTabPath != null && principalName != null && keyTabPath.length() != 0
                && principalName.length() != 0) {
            log.info("Attempting to login with keytab as " + principalName);
            UserGroupInformation.loginUserFromKeytab(principalName, keyTabPath);
            log.info("Succesfully logged in as user " + principalName);
            return true;
        }
    } catch (IOException io) {
        log.error("Error logging in user " + principalConfig + " using keytab at " + keyTabPath, io);
    }
    return false;
}

From source file:org.apache.accumulo.test.functional.CredentialsIT.java

License:Apache License

@After
public void deleteLocalUser() throws Exception {
    if (saslEnabled) {
        ClusterUser root = getAdminUser();
        UserGroupInformation.loginUserFromKeytab(root.getPrincipal(), root.getKeytab().getAbsolutePath());
    }/* w w w  .  j a  va  2s.  c om*/
    getConnector().securityOperations().dropLocalUser(username);
}

From source file:org.apache.accumulo.test.functional.KerberosIT.java

License:Apache License

@Test(expected = AccumuloSecurityException.class)
public void testRootUserHasIrrevocablePermissions() throws Exception {
    // Login as the client (provided to `accumulo init` as the "root" user)
    UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());

    final Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());

    // The server-side implementation should prevent the revocation of the 'root' user's systems permissions
    // because once they're gone, it's possible that they could never be restored.
    conn.securityOperations().revokeSystemPermission(rootUser.getPrincipal(), SystemPermission.GRANT);
}

From source file:org.apache.accumulo.test.functional.KerberosRenewalIT.java

License:Apache License

@Test(timeout = TEST_DURATION)
public void testReadAndWriteThroughTicketLifetime() throws Exception {
    // Attempt to use Accumulo for a duration of time that exceeds the Kerberos ticket lifetime.
    // This is a functional test to verify that Accumulo services renew their ticket.
    // If the test doesn't finish on its own, this signifies that Accumulo services failed
    // and the test should fail. If Accumulo services renew their ticket, the test case
    // should exit gracefully on its own.

    // Login as the "root" user
    UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(), rootUser.getKeytab().getAbsolutePath());
    log.info("Logged in as {}", rootUser.getPrincipal());

    Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
    log.info("Created connector as {}", rootUser.getPrincipal());
    assertEquals(rootUser.getPrincipal(), conn.whoami());

    long duration = 0;
    long last = System.currentTimeMillis();
    // Make sure we have a couple renewals happen
    while (duration < TICKET_TEST_LIFETIME) {
        // Create a table, write a record, compact, read the record, drop the table.
        createReadWriteDrop(conn);//from ww  w . jav  a 2s. c om
        // Wait a bit after
        Thread.sleep(5000);

        // Update the duration
        long now = System.currentTimeMillis();
        duration += now - last;
        last = now;
    }
}

From source file:org.apache.accumulo.test.functional.ScanIteratorIT.java

License:Apache License

@After
public void tearDown() throws Exception {
    if (null != user) {
        if (saslEnabled) {
            ClusterUser rootUser = getAdminUser();
            UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(),
                    rootUser.getKeytab().getAbsolutePath());
        }/* ww w.ja  v a2 s  .  co  m*/
        connector.securityOperations().dropLocalUser(user);
    }
}

From source file:org.apache.accumulo.test.proxy.SimpleProxyBase.java

License:Apache License

/**
 * Does the actual test setup, invoked by the concrete test class
 *///  ww  w.  j  a  v  a 2s  .c  o  m
public static void setUpProxy() throws Exception {
    assertNotNull("Implementations must initialize the TProtocolFactory", factory);

    Connector c = SharedMiniClusterBase.getConnector();
    Instance inst = c.getInstance();
    waitForAccumulo(c);

    hostname = InetAddress.getLocalHost().getCanonicalHostName();

    Properties props = new Properties();
    props.put("instance", inst.getInstanceName());
    props.put("zookeepers", inst.getZooKeepers());

    final String tokenClass;
    if (isKerberosEnabled()) {
        tokenClass = KerberosToken.class.getName();
        TestingKdc kdc = getKdc();

        // Create a principal+keytab for the proxy
        proxyKeytab = new File(kdc.getKeytabDir(), "proxy.keytab");
        hostname = InetAddress.getLocalHost().getCanonicalHostName();
        // Set the primary because the client needs to know it
        proxyPrimary = "proxy";
        // Qualify with an instance
        proxyPrincipal = proxyPrimary + "/" + hostname;
        kdc.createPrincipal(proxyKeytab, proxyPrincipal);
        // Tack on the realm too
        proxyPrincipal = kdc.qualifyUser(proxyPrincipal);

        props.setProperty("kerberosPrincipal", proxyPrincipal);
        props.setProperty("kerberosKeytab", proxyKeytab.getCanonicalPath());
        props.setProperty("thriftServerType", "sasl");

        // Enabled kerberos auth
        Configuration conf = new Configuration(false);
        conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
        UserGroupInformation.setConfiguration(conf);

        // Login for the Proxy itself
        UserGroupInformation.loginUserFromKeytab(proxyPrincipal, proxyKeytab.getAbsolutePath());

        // User for tests
        ClusterUser user = kdc.getRootUser();
        clientPrincipal = user.getPrincipal();
        clientKeytab = user.getKeytab();
    } else {
        clientPrincipal = "root";
        tokenClass = PasswordToken.class.getName();
        properties.put("password", SharedMiniClusterBase.getRootPassword());
        hostname = "localhost";
    }

    props.put("tokenClass", tokenClass);

    ClientConfiguration clientConfig = SharedMiniClusterBase.getCluster().getClientConfig();
    String clientConfPath = new File(SharedMiniClusterBase.getCluster().getConfig().getConfDir(), "client.conf")
            .getAbsolutePath();
    props.put("clientConfigurationFile", clientConfPath);
    properties.put("clientConfigurationFile", clientConfPath);

    proxyPort = PortUtils.getRandomFreePort();
    proxyServer = Proxy.createProxyServer(HostAndPort.fromParts(hostname, proxyPort), factory, props,
            clientConfig).server;
    while (!proxyServer.isServing())
        sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
}

From source file:org.apache.accumulo.test.proxy.SimpleProxyBase.java

License:Apache License

@Before
public void setup() throws Exception {
    // Create a new client for each test
    if (isKerberosEnabled()) {
        UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
        proxyClient = new TestProxyClient(hostname, proxyPort, factory, proxyPrimary,
                UserGroupInformation.getCurrentUser());
        client = proxyClient.proxy();//  w  ww  . j  a  v a 2  s .c  om
        creds = client.login(clientPrincipal, properties);

        TestingKdc kdc = getKdc();
        final ClusterUser user = kdc.getClientPrincipal(0);
        // Create another user
        client.createLocalUser(creds, user.getPrincipal(), s2bb("unused"));
        // Login in as that user we just created
        UserGroupInformation.loginUserFromKeytab(user.getPrincipal(), user.getKeytab().getAbsolutePath());
        final UserGroupInformation badUgi = UserGroupInformation.getCurrentUser();
        // Get a "Credentials" object for the proxy
        TestProxyClient badClient = new TestProxyClient(hostname, proxyPort, factory, proxyPrimary, badUgi);
        try {
            Client badProxy = badClient.proxy();
            badLogin = badProxy.login(user.getPrincipal(), properties);
        } finally {
            badClient.close();
        }

        // Log back in as the test user
        UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
        // Drop test user, invalidating the credentials (not to mention not having the krb credentials anymore)
        client.dropLocalUser(creds, user.getPrincipal());
    } else {
        proxyClient = new TestProxyClient(hostname, proxyPort, factory);
        client = proxyClient.proxy();
        creds = client.login("root", properties);

        // Create 'user'
        client.createLocalUser(creds, "user", s2bb(SharedMiniClusterBase.getRootPassword()));
        // Log in as 'user'
        badLogin = client.login("user", properties);
        // Drop 'user', invalidating the credentials
        client.dropLocalUser(creds, "user");
    }

    // Create some unique names for tables, namespaces, etc.
    String[] uniqueNames = getUniqueNames(2);

    // Create a general table to be used
    tableName = uniqueNames[0];
    client.createTable(creds, tableName, true, TimeType.MILLIS);

    // Create a general namespace to be used
    namespaceName = uniqueNames[1];
    client.createNamespace(creds, namespaceName);
}

From source file:org.apache.accumulo.test.proxy.SimpleProxyBase.java

License:Apache License

@After
public void teardown() throws Exception {
    if (null != tableName) {
        if (isKerberosEnabled()) {
            UserGroupInformation.loginUserFromKeytab(clientPrincipal, clientKeytab.getAbsolutePath());
        }//from   www .  ja  v  a  2 s  .c o  m
        try {
            if (client.tableExists(creds, tableName)) {
                client.deleteTable(creds, tableName);
            }
        } catch (Exception e) {
            log.warn("Failed to delete test table", e);
        }
    }

    if (null != namespaceName) {
        try {
            if (client.namespaceExists(creds, namespaceName)) {
                client.deleteNamespace(creds, namespaceName);
            }
        } catch (Exception e) {
            log.warn("Failed to delete test namespace", e);
        }
    }

    // Close the transport after the test
    if (null != proxyClient) {
        proxyClient.close();
    }
}