Example usage for org.apache.hadoop.security UserGroupInformation loginUserFromKeytab

List of usage examples for org.apache.hadoop.security UserGroupInformation loginUserFromKeytab

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation loginUserFromKeytab.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static void loginUserFromKeytab(String user, String path) throws IOException 

Source Link

Document

Log a user in from a keytab file.

Usage

From source file:gobblin.runtime.instance.plugin.hadoop.HadoopKerberosKeytabAuthenticationPlugin.java

License:Apache License

/** {@inheritDoc} */
@Override/*from  w  ww  . jav  a2  s .  co  m*/
protected void startUp() throws Exception {
    try {
        UserGroupInformation.setConfiguration(_hadoopConf);
        if (UserGroupInformation.isSecurityEnabled()) {
            UserGroupInformation.loginUserFromKeytab(_loginUser, _loginUserKeytabFile);
        }
    } catch (Throwable t) {
        log.error("Failed to start up HadoopKerberosKeytabAuthenticationPlugin", t);
        throw t;
    }

}

From source file:gobblin.util.ProxiedFileSystemUtils.java

License:Apache License

private static UserGroupInformation loginAndProxyAsUser(@NonNull String userNameToProxyAs,
        @NonNull String superUserName, Path superUserKeytabLocation) throws IOException {

    if (!UserGroupInformation.getLoginUser().getUserName().equals(superUserName)) {
        Preconditions.checkNotNull(superUserKeytabLocation);
        UserGroupInformation.loginUserFromKeytab(superUserName, superUserKeytabLocation.toString());
    }/*ww w .  j a va 2 s.  com*/
    return UserGroupInformation.createProxyUser(userNameToProxyAs, UserGroupInformation.getLoginUser());
}

From source file:gobblin.util.ProxiedFileSystemWrapper.java

License:Apache License

/**
 * Getter for proxiedFs, using the passed parameters to create an instance of a proxiedFs.
 * @param properties//  ww w. ja  va 2 s .com
 * @param authType is either TOKEN or KEYTAB.
 * @param authPath is the KEYTAB location if the authType is KEYTAB; otherwise, it is the token file.
 * @param uri File system URI.
 * @throws IOException
 * @throws InterruptedException
 * @throws URISyntaxException
 * @return proxiedFs
 */
public FileSystem getProxiedFileSystem(State properties, AuthType authType, String authPath, String uri,
        final Configuration conf) throws IOException, InterruptedException, URISyntaxException {
    Preconditions.checkArgument(
            StringUtils.isNotBlank(properties.getProp(ConfigurationKeys.FS_PROXY_AS_USER_NAME)),
            "State does not contain a proper proxy user name");
    String proxyUserName = properties.getProp(ConfigurationKeys.FS_PROXY_AS_USER_NAME);
    UserGroupInformation proxyUser;
    switch (authType) {
    case KEYTAB: // If the authentication type is KEYTAB, log in a super user first before creating a proxy user.
        Preconditions.checkArgument(
                StringUtils
                        .isNotBlank(properties.getProp(ConfigurationKeys.SUPER_USER_NAME_TO_PROXY_AS_OTHERS)),
                "State does not contain a proper proxy token file name");
        String superUser = properties.getProp(ConfigurationKeys.SUPER_USER_NAME_TO_PROXY_AS_OTHERS);
        UserGroupInformation.loginUserFromKeytab(superUser, authPath);
        proxyUser = UserGroupInformation.createProxyUser(proxyUserName, UserGroupInformation.getLoginUser());
        break;
    case TOKEN: // If the authentication type is TOKEN, create a proxy user and then add the token to the user.
        proxyUser = UserGroupInformation.createProxyUser(proxyUserName, UserGroupInformation.getLoginUser());
        Optional<Token<?>> proxyToken = getTokenFromSeqFile(authPath, proxyUserName);
        if (proxyToken.isPresent()) {
            proxyUser.addToken(proxyToken.get());
        } else {
            LOG.warn("No delegation token found for the current proxy user.");
        }
        break;
    default:
        LOG.warn(
                "Creating a proxy user without authentication, which could not perform File system operations.");
        proxyUser = UserGroupInformation.createProxyUser(proxyUserName, UserGroupInformation.getLoginUser());
        break;
    }

    final URI fsURI = URI.create(uri);
    proxyUser.doAs(new PrivilegedExceptionAction<Void>() {
        @Override
        public Void run() throws IOException {
            LOG.debug("Now performing file system operations as :" + UserGroupInformation.getCurrentUser());
            proxiedFs = FileSystem.get(fsURI, conf);
            return null;
        }
    });
    return this.proxiedFs;
}

From source file:gobblin.yarn.YarnAppSecurityManager.java

License:Apache License

/**
 * Login the user from a given keytab file.
 *///from ww  w . ja  va 2 s  . c  o  m
private void loginFromKeytab() throws IOException {
    String keyTabFilePath = this.config.getString(GobblinYarnConfigurationKeys.KEYTAB_FILE_PATH);
    if (Strings.isNullOrEmpty(keyTabFilePath)) {
        throw new IOException("Keytab file path is not defined for Kerberos login");
    }

    if (!new File(keyTabFilePath).exists()) {
        throw new IOException("Keytab file not found at: " + keyTabFilePath);
    }

    String principal = this.config.getString(GobblinYarnConfigurationKeys.KEYTAB_PRINCIPAL_NAME);
    if (Strings.isNullOrEmpty(principal)) {
        principal = this.loginUser.getShortUserName() + "/localhost@LOCALHOST";
    }

    Configuration conf = new Configuration();
    conf.set("hadoop.security.authentication",
            UserGroupInformation.AuthenticationMethod.KERBEROS.toString().toLowerCase());
    UserGroupInformation.setConfiguration(conf);
    UserGroupInformation.loginUserFromKeytab(principal, keyTabFilePath);
    LOGGER.info(String.format("Logged in from keytab file %s using principal %s", keyTabFilePath, principal));

    this.loginUser = UserGroupInformation.getLoginUser();

    getNewDelegationTokenForLoginUser();
    writeDelegationTokenToFile();

    if (!this.firstLogin) {
        // Send a message to the controller and all the participants
        sendTokenFileUpdatedMessage(InstanceType.CONTROLLER);
        sendTokenFileUpdatedMessage(InstanceType.PARTICIPANT);
    }
}

From source file:io.confluent.connect.hdfs.DataWriter.java

License:Apache License

public DataWriter(HdfsSinkConnectorConfig connectorConfig, SinkTaskContext context, AvroData avroData) {
    try {//w  ww.  j  av a 2  s.  c  o  m
        String hadoopHome = connectorConfig.getString(HdfsSinkConnectorConfig.HADOOP_HOME_CONFIG);
        System.setProperty("hadoop.home.dir", hadoopHome);

        this.connectorConfig = connectorConfig;
        this.avroData = avroData;
        this.context = context;

        String hadoopConfDir = connectorConfig.getString(HdfsSinkConnectorConfig.HADOOP_CONF_DIR_CONFIG);
        log.info("Hadoop configuration directory {}", hadoopConfDir);
        conf = new Configuration();
        if (!hadoopConfDir.equals("")) {
            conf.addResource(new Path(hadoopConfDir + "/core-site.xml"));
            conf.addResource(new Path(hadoopConfDir + "/hdfs-site.xml"));
        }

        boolean secureHadoop = connectorConfig
                .getBoolean(HdfsSinkConnectorConfig.HDFS_AUTHENTICATION_KERBEROS_CONFIG);
        if (secureHadoop) {
            SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
            String principalConfig = connectorConfig
                    .getString(HdfsSinkConnectorConfig.CONNECT_HDFS_PRINCIPAL_CONFIG);
            String keytab = connectorConfig.getString(HdfsSinkConnectorConfig.CONNECT_HDFS_KEYTAB_CONFIG);

            if (principalConfig == null || keytab == null) {
                throw new ConfigException(
                        "Hadoop is using Kerboros for authentication, you need to provide both a connect principal and "
                                + "the path to the keytab of the principal.");
            }

            conf.set("hadoop.security.authentication", "kerberos");
            conf.set("hadoop.security.authorization", "true");
            String hostname = InetAddress.getLocalHost().getCanonicalHostName();
            // replace the _HOST specified in the principal config to the actual host
            String principal = SecurityUtil.getServerPrincipal(principalConfig, hostname);
            String namenodePrincipalConfig = connectorConfig
                    .getString(HdfsSinkConnectorConfig.HDFS_NAMENODE_PRINCIPAL_CONFIG);

            String namenodePrincipal = SecurityUtil.getServerPrincipal(namenodePrincipalConfig, hostname);
            // namenode principal is needed for multi-node hadoop cluster
            if (conf.get("dfs.namenode.kerberos.principal") == null) {
                conf.set("dfs.namenode.kerberos.principal", namenodePrincipal);
            }
            log.info("Hadoop namenode principal: " + conf.get("dfs.namenode.kerberos.principal"));

            UserGroupInformation.setConfiguration(conf);
            UserGroupInformation.loginUserFromKeytab(principal, keytab);
            final UserGroupInformation ugi = UserGroupInformation.getLoginUser();
            log.info("Login as: " + ugi.getUserName());

            final long renewPeriod = connectorConfig
                    .getLong(HdfsSinkConnectorConfig.KERBEROS_TICKET_RENEW_PERIOD_MS_CONFIG);

            isRunning = true;
            ticketRenewThread = new Thread(new Runnable() {
                @Override
                public void run() {
                    synchronized (DataWriter.this) {
                        while (isRunning) {
                            try {
                                DataWriter.this.wait(renewPeriod);
                                if (isRunning) {
                                    ugi.reloginFromKeytab();
                                }
                            } catch (IOException e) {
                                // We ignore this exception during relogin as each successful relogin gives
                                // additional 24 hours of authentication in the default config. In normal
                                // situations, the probability of failing relogin 24 times is low and if
                                // that happens, the task will fail eventually.
                                log.error("Error renewing the ticket", e);
                            } catch (InterruptedException e) {
                                // ignored
                            }
                        }
                    }
                }
            });
            log.info("Starting the Kerberos ticket renew thread with period {}ms.", renewPeriod);
            ticketRenewThread.start();
        }

        url = connectorConfig.getString(HdfsSinkConnectorConfig.HDFS_URL_CONFIG);
        topicsDir = connectorConfig.getString(HdfsSinkConnectorConfig.TOPICS_DIR_CONFIG);
        String logsDir = connectorConfig.getString(HdfsSinkConnectorConfig.LOGS_DIR_CONFIG);

        @SuppressWarnings("unchecked")
        Class<? extends Storage> storageClass = (Class<? extends Storage>) Class
                .forName(connectorConfig.getString(HdfsSinkConnectorConfig.STORAGE_CLASS_CONFIG));
        storage = StorageFactory.createStorage(storageClass, conf, url);

        createDir(topicsDir);
        createDir(topicsDir + HdfsSinkConnectorConstants.TEMPFILE_DIRECTORY);
        createDir(logsDir);

        format = getFormat();
        writerProvider = format.getRecordWriterProvider();
        schemaFileReader = format.getSchemaFileReader(avroData);

        partitioner = createPartitioner(connectorConfig);

        assignment = new HashSet<>(context.assignment());
        offsets = new HashMap<>();

        hiveIntegration = connectorConfig.getBoolean(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG);
        if (hiveIntegration) {
            hiveDatabase = connectorConfig.getString(HdfsSinkConnectorConfig.HIVE_DATABASE_CONFIG);
            hiveMetaStore = new HiveMetaStore(conf, connectorConfig);
            hive = format.getHiveUtil(connectorConfig, avroData, hiveMetaStore);
            executorService = Executors.newSingleThreadExecutor();
            hiveUpdateFutures = new LinkedList<>();
        }

        topicPartitionWriters = new HashMap<>();
        for (TopicPartition tp : assignment) {
            TopicPartitionWriter topicPartitionWriter = new TopicPartitionWriter(tp, storage, writerProvider,
                    partitioner, connectorConfig, context, avroData, hiveMetaStore, hive, schemaFileReader,
                    executorService, hiveUpdateFutures);
            topicPartitionWriters.put(tp, topicPartitionWriter);
        }
    } catch (ClassNotFoundException | IllegalAccessException | InstantiationException e) {
        throw new ConnectException("Reflection exception: ", e);
    } catch (IOException e) {
        throw new ConnectException(e);
    }
}

From source file:io.druid.security.kerberos.DruidKerberosUtil.java

License:Apache License

public static void authenticateIfRequired(AuthenticationKerberosConfig config) throws IOException {
    String principal = config.getPrincipal();
    String keytab = config.getKeytab();
    if (!Strings.isNullOrEmpty(principal) && !Strings.isNullOrEmpty(keytab)) {
        Configuration conf = new Configuration();
        conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
        UserGroupInformation.setConfiguration(conf);
        try {/*from   www .  ja v  a  2  s  .  c  o m*/
            if (UserGroupInformation.getCurrentUser().hasKerberosCredentials() == false
                    || !UserGroupInformation.getCurrentUser().getUserName().equals(principal)) {
                log.info("trying to authenticate user [%s] with keytab [%s]", principal, keytab);
                UserGroupInformation.loginUserFromKeytab(principal, keytab);
            }
        } catch (IOException e) {
            throw new ISE(e, "Failed to authenticate user principal [%s] with keytab [%s]", principal, keytab);
        }
    }
}

From source file:io.druid.storage.hdfs.HdfsStorageAuthentication.java

License:Apache License

/**
 * Dose authenticate against a secured hadoop cluster
 * In case of any bug fix make sure to fix the code in JobHelper#authenticate as well.
 *//*from w ww.j a  v a  2s  .  c om*/
@LifecycleStart
public void authenticate() {
    String principal = hdfsKerberosConfig.getPrincipal();
    String keytab = hdfsKerberosConfig.getKeytab();
    if (!Strings.isNullOrEmpty(principal) && !Strings.isNullOrEmpty(keytab)) {
        UserGroupInformation.setConfiguration(hadoopConf);
        if (UserGroupInformation.isSecurityEnabled()) {
            try {
                if (UserGroupInformation.getCurrentUser().hasKerberosCredentials() == false
                        || !UserGroupInformation.getCurrentUser().getUserName().equals(principal)) {
                    log.info("Trying to authenticate user [%s] with keytab [%s]..", principal, keytab);
                    UserGroupInformation.loginUserFromKeytab(principal, keytab);
                }
            } catch (IOException e) {
                throw new ISE(e, "Failed to authenticate user principal [%s] with keytab [%s]", principal,
                        keytab);
            }
        }
    }
}

From source file:joshelser.Server.java

License:Apache License

public static void main(String[] args) throws Exception {
    Opts opts = new Opts();

    opts.parseArgs(Server.class, args);

    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(conf);

    // Parse out the primary/instance@DOMAIN from the principal
    String principal = SecurityUtil.getServerPrincipal(opts.principal,
            InetAddress.getLocalHost().getCanonicalHostName());
    HadoopKerberosName name = new HadoopKerberosName(principal);
    String primary = name.getServiceName();
    String instance = name.getHostName();

    // Log in using the keytab
    UserGroupInformation.loginUserFromKeytab(principal, opts.keytab);

    // Get the info from our login
    UserGroupInformation serverUser = UserGroupInformation.getLoginUser();
    log.info("Current user: {}", serverUser);

    // Open the server using the provide dport
    TServerSocket serverTransport = new TServerSocket(opts.port);

    // Wrap our implementation with the interface's processor
    HdfsService.Processor<Iface> processor = new HdfsService.Processor<Iface>(new HdfsServiceImpl(fs));

    // Use authorization and confidentiality
    Map<String, String> saslProperties = new HashMap<String, String>();
    saslProperties.put(Sasl.QOP, "auth-conf");

    // Creating the server definition
    TSaslServerTransport.Factory saslTransportFactory = new TSaslServerTransport.Factory();
    saslTransportFactory.addServerDefinition("GSSAPI", // tell SASL to use GSSAPI, which supports Kerberos
            primary, // kerberos primary for server - "myprincipal" in myprincipal/my.server.com@MY.REALM
            instance, // kerberos instance for server - "my.server.com" in myprincipal/my.server.com@MY.REALM
            saslProperties, // Properties set, above
            new SaslRpcServer.SaslGssCallbackHandler()); // Ensures that authenticated user is the same as the authorized user

    // Make sure the TTransportFactory is performing a UGI.doAs
    TTransportFactory ugiTransportFactory = new TUGIAssumingTransportFactory(saslTransportFactory, serverUser);

    // Processor which takes the UGI for the RPC call, proxy that user on the server login, and then run as the proxied user
    TUGIAssumingProcessor ugiProcessor = new TUGIAssumingProcessor(processor);

    // Make a simple TTheadPoolServer with the processor and transport factory
    TServer server = new TThreadPoolServer(new TThreadPoolServer.Args(serverTransport)
            .transportFactory(ugiTransportFactory).processor(ugiProcessor));

    // Start the thrift server
    server.serve();/*from   w  w w  .ja v a2s  .  c om*/
}

From source file:ms.dew.core.hbase.HBaseAutoConfiguration.java

License:Apache License

/**
 * Init HBase connection.//from   w  w  w.  ja  v a2  s.  c o m
 *
 * @param hbaseProperties hbase settings properties
 * @return HBase connection
 * @throws IOException IOException
 */
@Bean
public Connection connection(HBaseProperties hbaseProperties, org.apache.hadoop.conf.Configuration conf)
        throws IOException {
    if ("kerberos".equalsIgnoreCase(hbaseProperties.getAuth().getType())) {
        System.setProperty("java.security.krb5.conf", hbaseProperties.getAuth().getKrb5());
        UserGroupInformation.setConfiguration(conf);
        UserGroupInformation.loginUserFromKeytab(hbaseProperties.getAuth().getPrincipal(),
                hbaseProperties.getAuth().getKeytab());
    }
    ThreadPoolExecutor poolExecutor = new ThreadPoolExecutor(200, Integer.MAX_VALUE, 60L, TimeUnit.SECONDS,
            new SynchronousQueue<>());
    poolExecutor.prestartCoreThread();
    return ConnectionFactory.createConnection(conf, poolExecutor);
}

From source file:org.apache.accumulo.cluster.ClusterUser.java

License:Apache License

/**
 * Computes the appropriate {@link AuthenticationToken} for the user represented by this object. May not yet be created in Accumulo.
 *
 * @return the correct {@link AuthenticationToken} to use with Accumulo for this user
 * @throws IOException//from w w w  . j av a2s  .  c o m
 *           if performing necessary login failed
 */
public AuthenticationToken getToken() throws IOException {
    if (null != password) {
        return new PasswordToken(password);
    } else if (null != keytab) {
        UserGroupInformation.loginUserFromKeytab(principal, keytab.getAbsolutePath());
        return new KerberosToken();
    }

    throw new IllegalStateException("One of password and keytab must be non-null");
}