List of usage examples for org.apache.hadoop.security UserGroupInformation reloginFromKeytab
@InterfaceAudience.Public @InterfaceStability.Evolving public void reloginFromKeytab() throws IOException
From source file:io.confluent.connect.hdfs.DataWriter.java
License:Apache License
public DataWriter(HdfsSinkConnectorConfig connectorConfig, SinkTaskContext context, AvroData avroData) { try {//from w ww. j a v a2 s .com String hadoopHome = connectorConfig.getString(HdfsSinkConnectorConfig.HADOOP_HOME_CONFIG); System.setProperty("hadoop.home.dir", hadoopHome); this.connectorConfig = connectorConfig; this.avroData = avroData; this.context = context; String hadoopConfDir = connectorConfig.getString(HdfsSinkConnectorConfig.HADOOP_CONF_DIR_CONFIG); log.info("Hadoop configuration directory {}", hadoopConfDir); conf = new Configuration(); if (!hadoopConfDir.equals("")) { conf.addResource(new Path(hadoopConfDir + "/core-site.xml")); conf.addResource(new Path(hadoopConfDir + "/hdfs-site.xml")); } boolean secureHadoop = connectorConfig .getBoolean(HdfsSinkConnectorConfig.HDFS_AUTHENTICATION_KERBEROS_CONFIG); if (secureHadoop) { SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, conf); String principalConfig = connectorConfig .getString(HdfsSinkConnectorConfig.CONNECT_HDFS_PRINCIPAL_CONFIG); String keytab = connectorConfig.getString(HdfsSinkConnectorConfig.CONNECT_HDFS_KEYTAB_CONFIG); if (principalConfig == null || keytab == null) { throw new ConfigException( "Hadoop is using Kerboros for authentication, you need to provide both a connect principal and " + "the path to the keytab of the principal."); } conf.set("hadoop.security.authentication", "kerberos"); conf.set("hadoop.security.authorization", "true"); String hostname = InetAddress.getLocalHost().getCanonicalHostName(); // replace the _HOST specified in the principal config to the actual host String principal = SecurityUtil.getServerPrincipal(principalConfig, hostname); String namenodePrincipalConfig = connectorConfig .getString(HdfsSinkConnectorConfig.HDFS_NAMENODE_PRINCIPAL_CONFIG); String namenodePrincipal = SecurityUtil.getServerPrincipal(namenodePrincipalConfig, hostname); // namenode principal is needed for multi-node hadoop cluster if (conf.get("dfs.namenode.kerberos.principal") == null) { conf.set("dfs.namenode.kerberos.principal", namenodePrincipal); } log.info("Hadoop namenode principal: " + conf.get("dfs.namenode.kerberos.principal")); UserGroupInformation.setConfiguration(conf); UserGroupInformation.loginUserFromKeytab(principal, keytab); final UserGroupInformation ugi = UserGroupInformation.getLoginUser(); log.info("Login as: " + ugi.getUserName()); final long renewPeriod = connectorConfig .getLong(HdfsSinkConnectorConfig.KERBEROS_TICKET_RENEW_PERIOD_MS_CONFIG); isRunning = true; ticketRenewThread = new Thread(new Runnable() { @Override public void run() { synchronized (DataWriter.this) { while (isRunning) { try { DataWriter.this.wait(renewPeriod); if (isRunning) { ugi.reloginFromKeytab(); } } catch (IOException e) { // We ignore this exception during relogin as each successful relogin gives // additional 24 hours of authentication in the default config. In normal // situations, the probability of failing relogin 24 times is low and if // that happens, the task will fail eventually. log.error("Error renewing the ticket", e); } catch (InterruptedException e) { // ignored } } } } }); log.info("Starting the Kerberos ticket renew thread with period {}ms.", renewPeriod); ticketRenewThread.start(); } url = connectorConfig.getString(HdfsSinkConnectorConfig.HDFS_URL_CONFIG); topicsDir = connectorConfig.getString(HdfsSinkConnectorConfig.TOPICS_DIR_CONFIG); String logsDir = connectorConfig.getString(HdfsSinkConnectorConfig.LOGS_DIR_CONFIG); @SuppressWarnings("unchecked") Class<? extends Storage> storageClass = (Class<? extends Storage>) Class .forName(connectorConfig.getString(HdfsSinkConnectorConfig.STORAGE_CLASS_CONFIG)); storage = StorageFactory.createStorage(storageClass, conf, url); createDir(topicsDir); createDir(topicsDir + HdfsSinkConnectorConstants.TEMPFILE_DIRECTORY); createDir(logsDir); format = getFormat(); writerProvider = format.getRecordWriterProvider(); schemaFileReader = format.getSchemaFileReader(avroData); partitioner = createPartitioner(connectorConfig); assignment = new HashSet<>(context.assignment()); offsets = new HashMap<>(); hiveIntegration = connectorConfig.getBoolean(HdfsSinkConnectorConfig.HIVE_INTEGRATION_CONFIG); if (hiveIntegration) { hiveDatabase = connectorConfig.getString(HdfsSinkConnectorConfig.HIVE_DATABASE_CONFIG); hiveMetaStore = new HiveMetaStore(conf, connectorConfig); hive = format.getHiveUtil(connectorConfig, avroData, hiveMetaStore); executorService = Executors.newSingleThreadExecutor(); hiveUpdateFutures = new LinkedList<>(); } topicPartitionWriters = new HashMap<>(); for (TopicPartition tp : assignment) { TopicPartitionWriter topicPartitionWriter = new TopicPartitionWriter(tp, storage, writerProvider, partitioner, connectorConfig, context, avroData, hiveMetaStore, hive, schemaFileReader, executorService, hiveUpdateFutures); topicPartitionWriters.put(tp, topicPartitionWriter); } } catch (ClassNotFoundException | IllegalAccessException | InstantiationException e) { throw new ConnectException("Reflection exception: ", e); } catch (IOException e) { throw new ConnectException(e); } }
From source file:org.apache.accumulo.core.rpc.ThriftUtil.java
License:Apache License
/** * Some wonderful snippets of documentation from HBase on performing the re-login client-side (as well as server-side) in the following paragraph. We want to * attempt a re-login to automatically refresh the client's Krb "credentials" (remember, a server might also be a client, master sending RPC to tserver), but * we have to take care to avoid Kerberos' replay attack protection. * <p>/*from w w w . j a v a 2 s .c o m*/ * If multiple clients with the same principal try to connect to the same server at the same time, the server assumes a replay attack is in progress. This is * a feature of kerberos. In order to work around this, what is done is that the client backs off randomly and tries to initiate the connection again. The * other problem is to do with ticket expiry. To handle that, a relogin is attempted. */ static void attemptClientReLogin() { try { UserGroupInformation loginUser = UserGroupInformation.getLoginUser(); if (null == loginUser || !loginUser.hasKerberosCredentials()) { // We should have already checked that we're logged in and have credentials. A precondition-like check. throw new RuntimeException("Expected to find Kerberos UGI credentials, but did not"); } UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); // A Proxy user is the "effective user" (in name only), riding on top of the "real user"'s Krb credentials. UserGroupInformation realUser = currentUser.getRealUser(); // re-login only in case it is the login user or superuser. if (loginUser.equals(currentUser) || loginUser.equals(realUser)) { if (UserGroupInformation.isLoginKeytabBased()) { log.info("Performing keytab-based Kerberos re-login"); loginUser.reloginFromKeytab(); } else { log.info("Performing ticket-cache-based Kerberos re-login"); loginUser.reloginFromTicketCache(); } // Avoid the replay attack protection, sleep 1 to 5000ms try { Thread.sleep((SASL_BACKOFF_RAND.nextInt(RELOGIN_MAX_BACKOFF) + 1)); } catch (InterruptedException e) { Thread.currentThread().interrupt(); return; } } else { log.debug("Not attempting Kerberos re-login: loginUser={}, currentUser={}, realUser={}", loginUser, currentUser, realUser); } } catch (IOException e) { // The inability to check is worrisome and deserves a RuntimeException instead of a propagated IO-like Exception. log.warn("Failed to check (and/or perform) Kerberos client re-login", e); throw new RuntimeException(e); } }