Example usage for org.apache.hadoop.security UserGroupInformation setConfiguration

List of usage examples for org.apache.hadoop.security UserGroupInformation setConfiguration

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation setConfiguration.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static void setConfiguration(Configuration conf) 

Source Link

Document

Set the static configuration for UGI.

Usage

From source file:common.NameNode.java

License:Apache License

protected NameNode(Configuration conf, NamenodeRole role) throws IOException {
    UserGroupInformation.setConfiguration(conf);
    DFSUtil.login(conf, DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY);

    this.role = role;
    try {/*w  ww .j  av  a  2s .  com*/
        initialize(conf);
    } catch (IOException e) {
        this.stop();
        throw e;
    }
}

From source file:gobblin.hadoop.token.TokenUtils.java

License:Open Source License

/**
 * Get Hadoop tokens (tokens for job history server, job tracker and HDFS) using Kerberos keytab.
 *
 * @param state A {@link State} object that should contain property {@link #USER_TO_PROXY},
 * {@link #KEYTAB_USER} and {@link #KEYTAB_LOCATION}. To obtain tokens for
 * other namenodes, use property {@link #OTHER_NAMENODES} with comma separated HDFS URIs.
 * @return A {@link File} containing the negotiated credentials.
 *//*from   w ww . j  a  va 2  s.  co m*/
public static File getHadoopTokens(final State state) throws IOException, InterruptedException {

    Preconditions.checkArgument(state.contains(KEYTAB_USER), "Missing required property " + KEYTAB_USER);
    Preconditions.checkArgument(state.contains(KEYTAB_LOCATION),
            "Missing required property " + KEYTAB_LOCATION);

    Configuration configuration = new Configuration();
    configuration.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS);
    UserGroupInformation.setConfiguration(configuration);
    UserGroupInformation.loginUserFromKeytab(state.getProp(KEYTAB_USER), state.getProp(KEYTAB_LOCATION));

    final Optional<String> userToProxy = Strings.isNullOrEmpty(state.getProp(USER_TO_PROXY))
            ? Optional.<String>absent()
            : Optional.fromNullable(state.getProp(USER_TO_PROXY));
    final Configuration conf = new Configuration();
    final Credentials cred = new Credentials();

    LOG.info("Getting tokens for " + userToProxy);

    getJhToken(conf, cred);
    getFsAndJtTokens(state, conf, userToProxy, cred);

    File tokenFile = File.createTempFile("mr-azkaban", ".token");
    persistTokens(cred, tokenFile);

    return tokenFile;
}

From source file:gobblin.runtime.instance.plugin.hadoop.HadoopKerberosKeytabAuthenticationPlugin.java

License:Apache License

/** {@inheritDoc} */
@Override/*  w w  w  . j  av  a2 s  . com*/
protected void startUp() throws Exception {
    try {
        UserGroupInformation.setConfiguration(_hadoopConf);
        if (UserGroupInformation.isSecurityEnabled()) {
            UserGroupInformation.loginUserFromKeytab(_loginUser, _loginUserKeytabFile);
        }
    } catch (Throwable t) {
        log.error("Failed to start up HadoopKerberosKeytabAuthenticationPlugin", t);
        throw t;
    }

}

From source file:gobblin.yarn.YarnAppSecurityManager.java

License:Apache License

/**
 * Login the user from a given keytab file.
 *///from   ww w .ja va2 s.  c o  m
private void loginFromKeytab() throws IOException {
    String keyTabFilePath = this.config.getString(GobblinYarnConfigurationKeys.KEYTAB_FILE_PATH);
    if (Strings.isNullOrEmpty(keyTabFilePath)) {
        throw new IOException("Keytab file path is not defined for Kerberos login");
    }

    if (!new File(keyTabFilePath).exists()) {
        throw new IOException("Keytab file not found at: " + keyTabFilePath);
    }

    String principal = this.config.getString(GobblinYarnConfigurationKeys.KEYTAB_PRINCIPAL_NAME);
    if (Strings.isNullOrEmpty(principal)) {
        principal = this.loginUser.getShortUserName() + "/localhost@LOCALHOST";
    }

    Configuration conf = new Configuration();
    conf.set("hadoop.security.authentication",
            UserGroupInformation.AuthenticationMethod.KERBEROS.toString().toLowerCase());
    UserGroupInformation.setConfiguration(conf);
    UserGroupInformation.loginUserFromKeytab(principal, keyTabFilePath);
    LOGGER.info(String.format("Logged in from keytab file %s using principal %s", keyTabFilePath, principal));

    this.loginUser = UserGroupInformation.getLoginUser();

    getNewDelegationTokenForLoginUser();
    writeDelegationTokenToFile();

    if (!this.firstLogin) {
        // Send a message to the controller and all the participants
        sendTokenFileUpdatedMessage(InstanceType.CONTROLLER);
        sendTokenFileUpdatedMessage(InstanceType.PARTICIPANT);
    }
}

From source file:hydrograph.server.utilities.kerberos.KerberosUtilities.java

License:Open Source License

/**
 *
 * @param user/*from w  ww  . j av  a 2  s .co m*/
 * @param password
 * @param configuration
 * @throws LoginException
 * @throws IOException
 */
private void getKerberosToken(String user, char[] password, Configuration configuration)
        throws LoginException, IOException {
    LOG.trace("Entering method getKerberosToken() for user: " + user);
    URL url = HydrographService.class.getClassLoader().getResource("jaas.conf");
    System.setProperty("java.security.auth.login.config", url.toExternalForm());

    LOG.info("Generating Kerberos ticket for user: " + user);
    UserGroupInformation.setConfiguration(configuration);

    LoginContext lc = new LoginContext("EntryName", new UserPassCallbackHandler(user, password));
    lc.login();

    Subject subject = lc.getSubject();
    UserGroupInformation.loginUserFromSubject(subject);
    Subject.doAs(subject, this);
    LOG.info("Kerberos ticket successfully generated for user: " + user);
}

From source file:io.confluent.connect.hdfs.DataWriter.java

License:Apache License

public DataWriter(HdfsSinkConnectorConfig connectorConfig, SinkTaskContext context, AvroData avroData) {
    try {/* www . j a  va 2  s  . co m*/
        String hadoopHome = connectorConfig.getString(HdfsSinkConnectorConfig.HADOOP_HOME_CONFIG);
        System.setProperty("hadoop.home.dir", hadoopHome);

        this.connectorConfig = connectorConfig;
        this.avroData = avroData;
        this.context = context;

        String hadoopConfDir = connectorConfig.getString(HdfsSinkConnectorConfig.HADOOP_CONF_DIR_CONFIG);
        log.info("Hadoop configuration directory {}", hadoopConfDir);
        conf = new Configuration();
        if (!hadoopConfDir.equals("")) {
            conf.addResource(new Path(hadoopConfDir + "/core-site.xml"));
            conf.addResource(new Path(hadoopConfDir + "/hdfs-site.xml"));
        }

        boolean secureHadoop = connectorConfig
                .getBoolean(HdfsSinkConnectorConfig.HDFS_AUTHENTICATION_KERBEROS_CONFIG);
        if (secureHadoop) {
            SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
            String principalConfig = connectorConfig
                    .getString(HdfsSinkConnectorConfig.CONNECT_HDFS_PRINCIPAL_CONFIG);
            String keytab = connectorConfig.getString(HdfsSinkConnectorConfig.CONNECT_HDFS_KEYTAB_CONFIG);

            if (principalConfig == null || keytab == null) {
                throw new ConfigException(
                        "Hadoop is using Kerboros for authentication, you need to provide both a connect principal and "
                                + "the path to the keytab of the principal.");
            }

            conf.set("hadoop.security.authentication", "kerberos");
            conf.set("hadoop.security.authorization", "true");
            String hostname = InetAddress.getLocalHost().getCanonicalHostName();
            // replace the _HOST specified in the principal config to the actual host
            String principal = SecurityUtil.getServerPrincipal(principalConfig, hostname);
            String namenodePrincipalConfig = connectorConfig
                    .getString(HdfsSinkConnectorConfig.HDFS_NAMENODE_PRINCIPAL_CONFIG);

            String namenodePrincipal = SecurityUtil.getServerPrincipal(namenodePrincipalConfig, hostname);
            // namenode principal is needed for multi-node hadoop cluster
            if (conf.get("dfs.namenode.kerberos.principal") == null) {
                conf.set("dfs.namenode.kerberos.principal", namenodePrincipal);
            }
            log.info("Hadoop namenode principal: " + conf.get("dfs.namenode.kerberos.principal"));

            UserGroupInformation.setConfiguration(conf);
            UserGroupInformation.loginUserFromKeytab(principal, keytab);
            final UserGroupInformation ugi = UserGroupInformation.getLoginUser();
            log.info("Login as: " + ugi.getUserName());

            final long renewPeriod = connectorConfig
                    .getLong(HdfsSinkConnectorConfig.KERBEROS_TICKET_RENEW_PERIOD_MS_CONFIG);

            isRunning = true;
            ticketRenewThread = new Thread(new Runnable() {
                @Override
                public void run() {
                    synchronized (DataWriter.this) {
                        while (isRunning) {
                            try {
                                DataWriter.this.wait(renewPeriod);
                                if (isRunning) {
                                    ugi.reloginFromKeytab();
                                }
                            } catch (IOException e) {
                                // We ignore this exception during relogin as each successful relogin gives
                                // additional 24 hours of authentication in the default config. In normal
                                // situations, the probability of failing relogin 24 times is low and if
                                // that happens, the task will fail eventually.
                                log.error("Error renewing the ticket", e);
                            } catch (InterruptedException e) {
                                // ignored
                            }
                        }
                    }
                }
            });
            log.info("Starting the Kerberos ticket renew thread with period {}ms.", renewPeriod);
            ticketRenewThread.start();
        }

        url = connectorConfig.getString(HdfsSinkConnectorConfig.HDFS_URL_CONFIG);
        topicsDir = connectorConfig.getString(HdfsSinkConnectorConfig.TOPICS_DIR_CONFIG);
        String logsDir = connectorConfig.getString(HdfsSinkConnectorConfig.LOGS_DIR_CONFIG);

        @SuppressWarnings("unchecked")
        Class<? extends Storage> storageClass = (Class<? extends Storage>) Class
                .forName(connectorConfig.getString(HdfsSinkConnectorConfig.STORAGE_CLASS_CONFIG));
        storage = StorageFactory.createStorage(storageClass, conf, url);

        createDir(topicsDir);
        createDir(topicsDir + HdfsSinkConnectorConstants.TEMPFILE_DIRECTORY);
        createDir(logsDir);

        format = getFormat();
        writerProvider = format.getRecordWriterProvider();
        schemaFileReader = format.getSchemaFileReader(avroData);

        partitioner = createPartitioner(connectorConfig);

        assignment = new HashSet<>(context.assignment());
        offsets = new HashMap<>();

        hiveIntegration = connectorConfig.getBoolean(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG);
        if (hiveIntegration) {
            hiveDatabase = connectorConfig.getString(HdfsSinkConnectorConfig.HIVE_DATABASE_CONFIG);
            hiveMetaStore = new HiveMetaStore(conf, connectorConfig);
            hive = format.getHiveUtil(connectorConfig, avroData, hiveMetaStore);
            executorService = Executors.newSingleThreadExecutor();
            hiveUpdateFutures = new LinkedList<>();
        }

        topicPartitionWriters = new HashMap<>();
        for (TopicPartition tp : assignment) {
            TopicPartitionWriter topicPartitionWriter = new TopicPartitionWriter(tp, storage, writerProvider,
                    partitioner, connectorConfig, context, avroData, hiveMetaStore, hive, schemaFileReader,
                    executorService, hiveUpdateFutures);
            topicPartitionWriters.put(tp, topicPartitionWriter);
        }
    } catch (ClassNotFoundException | IllegalAccessException | InstantiationException e) {
        throw new ConnectException("Reflection exception: ", e);
    } catch (IOException e) {
        throw new ConnectException(e);
    }
}

From source file:io.druid.security.kerberos.DruidKerberosUtil.java

License:Apache License

public static void authenticateIfRequired(AuthenticationKerberosConfig config) throws IOException {
    String principal = config.getPrincipal();
    String keytab = config.getKeytab();
    if (!Strings.isNullOrEmpty(principal) && !Strings.isNullOrEmpty(keytab)) {
        Configuration conf = new Configuration();
        conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
        UserGroupInformation.setConfiguration(conf);
        try {/*from   ww w.  j a v  a2  s  .c o m*/
            if (UserGroupInformation.getCurrentUser().hasKerberosCredentials() == false
                    || !UserGroupInformation.getCurrentUser().getUserName().equals(principal)) {
                log.info("trying to authenticate user [%s] with keytab [%s]", principal, keytab);
                UserGroupInformation.loginUserFromKeytab(principal, keytab);
            }
        } catch (IOException e) {
            throw new ISE(e, "Failed to authenticate user principal [%s] with keytab [%s]", principal, keytab);
        }
    }
}

From source file:io.druid.storage.hdfs.HdfsStorageAuthentication.java

License:Apache License

/**
 * Dose authenticate against a secured hadoop cluster
 * In case of any bug fix make sure to fix the code in JobHelper#authenticate as well.
 *///from www  .  jav  a 2s . c  o  m
@LifecycleStart
public void authenticate() {
    String principal = hdfsKerberosConfig.getPrincipal();
    String keytab = hdfsKerberosConfig.getKeytab();
    if (!Strings.isNullOrEmpty(principal) && !Strings.isNullOrEmpty(keytab)) {
        UserGroupInformation.setConfiguration(hadoopConf);
        if (UserGroupInformation.isSecurityEnabled()) {
            try {
                if (UserGroupInformation.getCurrentUser().hasKerberosCredentials() == false
                        || !UserGroupInformation.getCurrentUser().getUserName().equals(principal)) {
                    log.info("Trying to authenticate user [%s] with keytab [%s]..", principal, keytab);
                    UserGroupInformation.loginUserFromKeytab(principal, keytab);
                }
            } catch (IOException e) {
                throw new ISE(e, "Failed to authenticate user principal [%s] with keytab [%s]", principal,
                        keytab);
            }
        }
    }
}

From source file:io.prestosql.plugin.hive.authentication.KerberosHadoopAuthentication.java

License:Apache License

public static KerberosHadoopAuthentication createKerberosHadoopAuthentication(
        KerberosAuthentication kerberosAuthentication, HdfsConfigurationInitializer initializer) {
    Configuration configuration = getInitialConfiguration();
    initializer.initializeConfiguration(configuration);

    // In order to enable KERBEROS authentication method for HDFS
    // UserGroupInformation.authenticationMethod static field must be set to KERBEROS
    // It is further used in many places in DfsClient
    configuration.set("hadoop.security.authentication", "kerberos");

    UserGroupInformation.setConfiguration(configuration);

    return new KerberosHadoopAuthentication(kerberosAuthentication);
}

From source file:ml.shifu.guagua.yarn.GuaguaAppMaster.java

License:Apache License

/**
 * Application entry point/*w w  w  .  j  a  va 2  s  .  c o m*/
 * 
 * @param args
 *            command-line args (set by GuaguaYarnClient, if any)
 */
public static void main(final String[] args) {
    LOG.info("Starting GuaguaAppMaster. ");
    String containerIdString = System.getenv().get(Environment.CONTAINER_ID.name());
    if (containerIdString == null) {
        // container id should always be set in the env by the framework
        throw new IllegalArgumentException("ContainerId not found in env vars.");
    }
    ContainerId containerId = ConverterUtils.toContainerId(containerIdString);
    ApplicationAttemptId appAttemptId = containerId.getApplicationAttemptId();
    Configuration conf = new YarnConfiguration();
    String jobUserName = System.getenv(ApplicationConstants.Environment.USER.name());
    conf.set(MRJobConfig.USER_NAME, jobUserName);
    try {
        UserGroupInformation.setConfiguration(conf);
        // Security framework already loaded the tokens into current UGI, just use them
        Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
        LOG.info("Executing with tokens:");
        for (Token<?> token : credentials.getAllTokens()) {
            LOG.info(token.toString());
        }

        UserGroupInformation appMasterUgi = UserGroupInformation.createRemoteUser(jobUserName);
        appMasterUgi.addCredentials(credentials);

        // Now remove the AM->RM token so tasks don't have it
        Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
        while (iter.hasNext()) {
            Token<?> token = iter.next();
            if (token.getKind().equals(AMRMTokenIdentifier.KIND_NAME)) {
                iter.remove();
            }
        }

        final GuaguaAppMaster appMaster = new GuaguaAppMaster(containerId, appAttemptId, conf);
        appMasterUgi.doAs(new PrivilegedAction<Void>() {
            @Override
            public Void run() {
                boolean result = false;
                try {
                    result = appMaster.run();
                } catch (Throwable t) {
                    LOG.error("GuaguaAppMaster caught a top-level exception in main.", t);
                    System.exit(1);
                }

                if (result) {
                    LOG.info("Guagua Application Master completed successfully. exiting");
                    System.exit(0);
                } else {
                    LOG.info("Guagua Application Master failed. exiting");
                    System.exit(2);
                }
                return null;
            }
        });

    } catch (Throwable t) {
        LOG.error("GuaguaAppMaster caught a top-level exception in main.", t);
        System.exit(1);
    }
}