Example usage for org.apache.hadoop.security UserGroupInformation loginUserFromKeytab

List of usage examples for org.apache.hadoop.security UserGroupInformation loginUserFromKeytab

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation loginUserFromKeytab.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static void loginUserFromKeytab(String user, String path) throws IOException 

Source Link

Document

Log a user in from a keytab file.

Usage

From source file:org.apache.accumulo.core.cli.ClientOpts.java

License:Apache License

/**
 * Automatically update the options to use a KerberosToken when SASL is enabled for RPCs. Don't overwrite the options if the user has provided something
 * specifically./*  w w  w .j  a va2 s  .  co m*/
 */
public void updateKerberosCredentials(ClientConfiguration clientConfig) {
    final boolean clientConfSaslEnabled = Boolean
            .parseBoolean(clientConfig.get(ClientProperty.INSTANCE_RPC_SASL_ENABLED));
    if ((saslEnabled || clientConfSaslEnabled) && null == tokenClassName) {
        tokenClassName = KerberosToken.CLASS_NAME;
        // ACCUMULO-3701 We need to ensure we're logged in before parseArgs returns as the MapReduce Job is going to make a copy of the current user (UGI)
        // when it is instantiated.
        if (null != keytabPath) {
            File keytab = new File(keytabPath);
            if (!keytab.exists() || !keytab.isFile()) {
                throw new IllegalArgumentException("Keytab isn't a normal file: " + keytabPath);
            }
            if (null == principal) {
                throw new IllegalArgumentException("Principal must be provided if logging in via Keytab");
            }
            try {
                UserGroupInformation.loginUserFromKeytab(principal, keytab.getAbsolutePath());
            } catch (IOException e) {
                throw new RuntimeException("Failed to log in with keytab", e);
            }
        }
    }
}

From source file:org.apache.accumulo.core.client.security.tokens.KerberosToken.java

License:Apache License

/**
 * Creates a token and logs in via {@link UserGroupInformation} using the provided principal and keytab. A key for the principal must exist in the keytab,
 * otherwise login will fail.//from   w  w  w .  j  ava 2 s .  co  m
 *
 * @param principal
 *          The Kerberos principal
 * @param keytab
 *          A keytab file
 * @param replaceCurrentUser
 *          Should the current Hadoop user be replaced with this user
 * @deprecated since 1.8.0, @see #KerberosToken(String, File)
 */
@Deprecated
public KerberosToken(String principal, File keytab, boolean replaceCurrentUser) throws IOException {
    requireNonNull(principal, "Principal was null");
    requireNonNull(keytab, "Keytab was null");
    checkArgument(keytab.exists() && keytab.isFile(), "Keytab was not a normal file");
    UserGroupInformation ugi;
    if (replaceCurrentUser) {
        UserGroupInformation.loginUserFromKeytab(principal, keytab.getAbsolutePath());
        ugi = UserGroupInformation.getCurrentUser();
    } else {
        ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytab.getAbsolutePath());
    }
    this.principal = ugi.getUserName();
    this.keytab = keytab;
}

From source file:org.apache.accumulo.core.security.SecurityUtil.java

License:Apache License

/**
 * This will log in the given user in kerberos.
 * /*from   ww  w  .j  av a 2s  .c  o  m*/
 * @param principalConfig
 *          This is the principals name in the format NAME/HOST@REALM. {@link org.apache.hadoop.security.SecurityUtil#HOSTNAME_PATTERN} will automatically be
 *          replaced by the systems host name.
 * @return true if login succeeded, otherwise false
 */
public static boolean login(String principalConfig, String keyTabPath) {
    try {
        String principalName = org.apache.hadoop.security.SecurityUtil.getServerPrincipal(principalConfig,
                InetAddress.getLocalHost().getCanonicalHostName());
        if (keyTabPath != null && principalName != null && keyTabPath.length() != 0
                && principalName.length() != 0) {
            UserGroupInformation.loginUserFromKeytab(principalName, keyTabPath);
            log.info("Succesfully logged in as user " + principalConfig);
            return true;
        }
    } catch (IOException io) {
        log.error("Error logging in user " + principalConfig + " using keytab at " + keyTabPath, io);
    }
    return false;
}

From source file:org.apache.accumulo.harness.AccumuloClusterHarness.java

License:Apache License

@Before
public void setupCluster() throws Exception {
    // Before we try to instantiate the cluster, check to see if the test even wants to run against this type of cluster
    Assume.assumeTrue(canRunTest(type));

    switch (type) {
    case MINI:/*w w w  .jav  a  2 s.c  om*/
        MiniClusterHarness miniClusterHarness = new MiniClusterHarness();
        // Intrinsically performs the callback to let tests alter MiniAccumuloConfig and core-site.xml
        MiniAccumuloClusterImpl impl = miniClusterHarness.create(this, getAdminToken(), krb);
        cluster = impl;
        // MAC makes a ClientConf for us, just set it
        ((AccumuloMiniClusterConfiguration) clusterConf).setClientConf(impl.getClientConfig());
        // Login as the "root" user
        if (null != krb) {
            ClusterUser rootUser = krb.getRootUser();
            // Log in the 'client' user
            UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(),
                    rootUser.getKeytab().getAbsolutePath());
        }
        break;
    case STANDALONE:
        StandaloneAccumuloClusterConfiguration conf = (StandaloneAccumuloClusterConfiguration) clusterConf;
        ClientConfiguration clientConf = conf.getClientConf();
        StandaloneAccumuloCluster standaloneCluster = new StandaloneAccumuloCluster(conf.getInstance(),
                clientConf, conf.getTmpDirectory(), conf.getUsers(), conf.getAccumuloServerUser());
        // If these are provided in the configuration, pass them into the cluster
        standaloneCluster.setAccumuloHome(conf.getAccumuloHome());
        standaloneCluster.setClientAccumuloConfDir(conf.getClientAccumuloConfDir());
        standaloneCluster.setServerAccumuloConfDir(conf.getServerAccumuloConfDir());
        standaloneCluster.setHadoopConfDir(conf.getHadoopConfDir());

        // For SASL, we need to get the Hadoop configuration files as well otherwise UGI will log in as SIMPLE instead of KERBEROS
        Configuration hadoopConfiguration = standaloneCluster.getHadoopConfiguration();
        if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
            UserGroupInformation.setConfiguration(hadoopConfiguration);
            // Login as the admin user to start the tests
            UserGroupInformation.loginUserFromKeytab(conf.getAdminPrincipal(),
                    conf.getAdminKeytab().getAbsolutePath());
        }

        // Set the implementation
        cluster = standaloneCluster;
        break;
    default:
        throw new RuntimeException("Unhandled type");
    }

    if (type.isDynamic()) {
        cluster.start();
    } else {
        log.info("Removing tables which appear to be from a previous test run");
        cleanupTables();
        log.info("Removing users which appear to be from a previous test run");
        cleanupUsers();
    }

    switch (type) {
    case MINI:
        if (null != krb) {
            final String traceTable = Property.TRACE_TABLE.getDefaultValue();
            final ClusterUser systemUser = krb.getAccumuloServerUser(), rootUser = krb.getRootUser();

            // Login as the trace user
            UserGroupInformation.loginUserFromKeytab(systemUser.getPrincipal(),
                    systemUser.getKeytab().getAbsolutePath());

            // Open a connector as the system user (ensures the user will exist for us to assign permissions to)
            UserGroupInformation.loginUserFromKeytab(systemUser.getPrincipal(),
                    systemUser.getKeytab().getAbsolutePath());
            Connector conn = cluster.getConnector(systemUser.getPrincipal(), new KerberosToken());

            // Then, log back in as the "root" user and do the grant
            UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(),
                    rootUser.getKeytab().getAbsolutePath());
            conn = getConnector();

            // Create the trace table
            conn.tableOperations().create(traceTable);

            // Trace user (which is the same kerberos principal as the system user, but using a normal KerberosToken) needs
            // to have the ability to read, write and alter the trace table
            conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable,
                    TablePermission.READ);
            conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable,
                    TablePermission.WRITE);
            conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable,
                    TablePermission.ALTER_TABLE);
        }
        break;
    default:
        // do nothing
    }
}

From source file:org.apache.accumulo.harness.AccumuloClusterIT.java

License:Apache License

@Before
public void setupCluster() throws Exception {
    // Before we try to instantiate the cluster, check to see if the test even wants to run against this type of cluster
    Assume.assumeTrue(canRunTest(type));

    switch (type) {
    case MINI://from   w  w w  .j  a  v  a  2 s  .  c  om
        MiniClusterHarness miniClusterHarness = new MiniClusterHarness();
        // Intrinsically performs the callback to let tests alter MiniAccumuloConfig and core-site.xml
        cluster = miniClusterHarness.create(this, getToken(), krb);
        break;
    case STANDALONE:
        StandaloneAccumuloClusterConfiguration conf = (StandaloneAccumuloClusterConfiguration) clusterConf;
        StandaloneAccumuloCluster standaloneCluster = new StandaloneAccumuloCluster(conf.getInstance());
        // If these are provided in the configuration, pass them into the cluster
        standaloneCluster.setAccumuloHome(conf.getAccumuloHome());
        standaloneCluster.setAccumuloConfDir(conf.getAccumuloConfDir());
        standaloneCluster.setHadoopConfDir(conf.getHadoopConfDir());
        // Set the implementation
        cluster = standaloneCluster;
        break;
    default:
        throw new RuntimeException("Unhandled type");
    }

    if (type.isDynamic()) {
        cluster.start();
        if (null != krb) {
            // Log in the 'client' user
            UserGroupInformation.loginUserFromKeytab(getClientPrincipal(), getClientKeytab().getAbsolutePath());
        }
    } else {
        log.info("Removing tables which appear to be from a previous test run");
        cleanupTables();
        log.info("Removing users which appear to be from a previous test run");
        cleanupUsers();
    }
}

From source file:org.apache.accumulo.harness.conf.AccumuloMiniClusterConfiguration.java

License:Apache License

@Override
public AuthenticationToken getAdminToken() {
    if (saslEnabled) {
        // Turn on Kerberos authentication so UGI acts properly
        final Configuration conf = new Configuration(false);
        conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
        UserGroupInformation.setConfiguration(conf);

        ClusterUser rootUser = AccumuloClusterHarness.getKdc().getRootUser();
        try {//from w w w  .j  a v  a  2  s  . c  om
            UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(),
                    rootUser.getKeytab().getAbsolutePath());
            return new KerberosToken();
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    } else {
        String password = conf.get(ACCUMULO_MINI_PASSWORD_KEY);
        if (null == password) {
            password = ACCUMULO_MINI_PASSWORD_DEFAULT;
        }

        return new PasswordToken(password);
    }
}

From source file:org.apache.accumulo.harness.conf.StandaloneAccumuloClusterConfiguration.java

License:Apache License

@Override
public AuthenticationToken getAdminToken() {
    if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
        File keytab = getAdminKeytab();
        try {//w  w  w  .j a  v a2 s . c  om
            UserGroupInformation.loginUserFromKeytab(getAdminPrincipal(), keytab.getAbsolutePath());
            return new KerberosToken();
        } catch (IOException e) {
            // The user isn't logged in
            throw new RuntimeException("Failed to create KerberosToken", e);
        }
    } else {
        return new PasswordToken(getPassword());
    }
}

From source file:org.apache.accumulo.harness.SharedMiniClusterBase.java

License:Apache License

/**
 * Starts a MiniAccumuloCluster instance with the default configuration but also provides the caller the opportunity to update the configuration before the
 * MiniAccumuloCluster is started./*from  w  ww .  j av a 2  s. co  m*/
 *
 * @param miniClusterCallback
 *          A callback to configure the minicluster before it is started.
 */
public static void startMiniClusterWithConfig(MiniClusterConfigurationCallback miniClusterCallback)
        throws Exception {
    File baseDir = new File(System.getProperty("user.dir") + "/target/mini-tests");
    assertTrue(baseDir.mkdirs() || baseDir.isDirectory());

    // Make a shared MAC instance instead of spinning up one per test method
    MiniClusterHarness harness = new MiniClusterHarness();

    if (TRUE.equals(System.getProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION))) {
        krb = new TestingKdc();
        krb.start();
        // Enabled krb auth
        Configuration conf = new Configuration(false);
        conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
        UserGroupInformation.setConfiguration(conf);
        // Login as the client
        ClusterUser rootUser = krb.getRootUser();
        // Get the krb token
        UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(),
                rootUser.getKeytab().getAbsolutePath());
        token = new KerberosToken();
    } else {
        rootPassword = "rootPasswordShared1";
        token = new PasswordToken(rootPassword);
    }

    cluster = harness.create(SharedMiniClusterBase.class.getName(),
            System.currentTimeMillis() + "_" + new Random().nextInt(Short.MAX_VALUE), token,
            miniClusterCallback, krb);
    cluster.start();

    if (null != krb) {
        final String traceTable = Property.TRACE_TABLE.getDefaultValue();
        final ClusterUser systemUser = krb.getAccumuloServerUser(), rootUser = krb.getRootUser();
        // Login as the trace user
        // Open a connector as the system user (ensures the user will exist for us to assign permissions to)
        UserGroupInformation.loginUserFromKeytab(systemUser.getPrincipal(),
                systemUser.getKeytab().getAbsolutePath());
        Connector conn = cluster.getConnector(systemUser.getPrincipal(), new KerberosToken());

        // Then, log back in as the "root" user and do the grant
        UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(),
                rootUser.getKeytab().getAbsolutePath());
        conn = cluster.getConnector(principal, token);

        // Create the trace table
        conn.tableOperations().create(traceTable);

        // Trace user (which is the same kerberos principal as the system user, but using a normal KerberosToken) needs
        // to have the ability to read, write and alter the trace table
        conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable,
                TablePermission.READ);
        conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable,
                TablePermission.WRITE);
        conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable,
                TablePermission.ALTER_TABLE);
    }
}

From source file:org.apache.accumulo.harness.SharedMiniClusterBase.java

License:Apache License

public static AuthenticationToken getToken() {
    if (token instanceof KerberosToken) {
        try {//from   w  ww .  ja  va2  s  .  c o  m
            UserGroupInformation.loginUserFromKeytab(getPrincipal(),
                    krb.getRootUser().getKeytab().getAbsolutePath());
        } catch (IOException e) {
            throw new RuntimeException("Failed to login", e);
        }
    }
    return token;
}

From source file:org.apache.accumulo.harness.SharedMiniClusterIT.java

License:Apache License

@BeforeClass
public static void startMiniCluster() throws Exception {
    File baseDir = new File(System.getProperty("user.dir") + "/target/mini-tests");
    baseDir.mkdirs();//from w  ww  .  j ava2 s .c  o m

    // Make a shared MAC instance instead of spinning up one per test method
    MiniClusterHarness harness = new MiniClusterHarness();

    if (TRUE.equals(System.getProperty(MiniClusterHarness.USE_KERBEROS_FOR_IT_OPTION))) {
        krb = new TestingKdc();
        krb.start();
        // Enabled krb auth
        Configuration conf = new Configuration(false);
        conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
        UserGroupInformation.setConfiguration(conf);
        // Login as the client
        UserGroupInformation.loginUserFromKeytab(krb.getClientPrincipal(),
                krb.getClientKeytab().getAbsolutePath());
        // Get the krb token
        principal = krb.getClientPrincipal();
        token = new KerberosToken(principal);
    } else {
        rootPassword = "rootPasswordShared1";
        token = new PasswordToken(rootPassword);
    }

    cluster = harness.create(SharedMiniClusterIT.class.getName(),
            System.currentTimeMillis() + "_" + new Random().nextInt(Short.MAX_VALUE), token, krb);
    cluster.start();
}