List of usage examples for org.apache.hadoop.security UserGroupInformation getLoginUser
@InterfaceAudience.Public @InterfaceStability.Evolving public static UserGroupInformation getLoginUser() throws IOException
From source file:com.rim.logdriver.sawmill.Authenticator.java
License:Apache License
private boolean authenticate(String proxyUserName) { UserGroupInformation proxyTicket;//from w w w. j a v a 2 s. c o m // logic for kerberos login boolean useSecurity = UserGroupInformation.isSecurityEnabled(); LOG.info("Hadoop Security enabled: " + useSecurity); if (useSecurity) { // sanity checking if (kerbConfPrincipal.isEmpty()) { LOG.error("Hadoop running in secure mode, but Flume config doesn't " + "specify a principal to use for Kerberos auth."); return false; } if (kerbKeytab.isEmpty()) { LOG.error("Hadoop running in secure mode, but Flume config doesn't " + "specify a keytab to use for Kerberos auth."); return false; } String principal; try { // resolves _HOST pattern using standard Hadoop search/replace // via DNS lookup when 2nd argument is empty principal = SecurityUtil.getServerPrincipal(kerbConfPrincipal, ""); } catch (IOException e) { LOG.error("Host lookup error resolving kerberos principal (" + kerbConfPrincipal + "). Exception follows.", e); return false; } Preconditions.checkNotNull(principal, "Principal must not be null"); KerberosUser prevUser = staticLogin.get(); KerberosUser newUser = new KerberosUser(principal, kerbKeytab); // be cruel and unusual when user tries to login as multiple principals // this isn't really valid with a reconfigure but this should be rare // enough to warrant a restart of the agent JVM // TODO: find a way to interrogate the entire current config state, // since we don't have to be unnecessarily protective if they switch all // HDFS sinks to use a different principal all at once. Preconditions.checkState(prevUser == null || prevUser.equals(newUser), "Cannot use multiple kerberos principals in the same agent. " + " Must restart agent to use new principal or keytab. " + "Previous = %s, New = %s", prevUser, newUser); // attempt to use cached credential if the user is the same // this is polite and should avoid flooding the KDC with auth requests UserGroupInformation curUser = null; if (prevUser != null && prevUser.equals(newUser)) { try { curUser = UserGroupInformation.getLoginUser(); } catch (IOException e) { LOG.warn("User unexpectedly had no active login. Continuing with " + "authentication", e); } } if (curUser == null || !curUser.getUserName().equals(principal)) { try { // static login kerberosLogin(this, principal, kerbKeytab); } catch (IOException e) { LOG.error("Authentication or file read error while attempting to " + "login as kerberos principal (" + principal + ") using " + "keytab (" + kerbKeytab + "). Exception follows.", e); return false; } } else { LOG.debug("{}: Using existing principal login: {}", this, curUser); } try { if (UserGroupInformation.getLoginUser().isFromKeytab() == false) { LOG.error("Not using a keytab for authentication. Shutting down."); System.exit(1); } } catch (IOException e) { LOG.error("Failed to get login user.", e); System.exit(1); } // we supposedly got through this unscathed... so store the static user staticLogin.set(newUser); } // hadoop impersonation works with or without kerberos security proxyTicket = null; if (!proxyUserName.isEmpty()) { try { proxyTicket = UserGroupInformation.createProxyUser(proxyUserName, UserGroupInformation.getLoginUser()); } catch (IOException e) { LOG.error("Unable to login as proxy user. Exception follows.", e); return false; } } UserGroupInformation ugi = null; if (proxyTicket != null) { ugi = proxyTicket; } else if (useSecurity) { try { ugi = UserGroupInformation.getLoginUser(); } catch (IOException e) { LOG.error("Unexpected error: Unable to get authenticated user after " + "apparent successful login! Exception follows.", e); return false; } } if (ugi != null) { // dump login information AuthenticationMethod authMethod = ugi.getAuthenticationMethod(); LOG.info("Auth method: {}", authMethod); LOG.info(" User name: {}", ugi.getUserName()); LOG.info(" Using keytab: {}", ugi.isFromKeytab()); if (authMethod == AuthenticationMethod.PROXY) { UserGroupInformation superUser; try { superUser = UserGroupInformation.getLoginUser(); LOG.info(" Superuser auth: {}", superUser.getAuthenticationMethod()); LOG.info(" Superuser name: {}", superUser.getUserName()); LOG.info(" Superuser using keytab: {}", superUser.isFromKeytab()); } catch (IOException e) { LOG.error("Unexpected error: unknown superuser impersonating proxy.", e); return false; } } LOG.info("Logged in as user {}", ugi.getUserName()); UGIState state = new UGIState(); state.ugi = proxyTicket; state.lastAuthenticated = System.currentTimeMillis(); proxyUserMap.put(proxyUserName, state); return true; } return true; }
From source file:com.streamsets.datacollector.security.DefaultLoginUgiProvider.java
License:Apache License
@Override public UserGroupInformation getLoginUgi(Configuration hdfsConfiguration) throws IOException { AccessControlContext accessContext = AccessController.getContext(); Subject subject = Subject.getSubject(accessContext); UserGroupInformation loginUgi;// w w w . j a va 2 s. c om //HADOOP-13805 HadoopConfigurationUtils.configureHadoopTreatSubjectExternal(hdfsConfiguration); UserGroupInformation.setConfiguration(hdfsConfiguration); if (UserGroupInformation.isSecurityEnabled()) { loginUgi = UserGroupInformation.getUGIFromSubject(subject); } else { UserGroupInformation.loginUserFromSubject(subject); loginUgi = UserGroupInformation.getLoginUser(); } if (LOG.isDebugEnabled()) { LOG.debug("Subject = {}, Principals = {}, Login UGI = {}", subject, subject == null ? "null" : subject.getPrincipals(), loginUgi); } return loginUgi; }
From source file:com.streamsets.datacollector.security.HadoopSecurityUtil.java
License:Apache License
public static UserGroupInformation getLoginUser(Configuration hdfsConfiguration) throws IOException { UserGroupInformation loginUgi;//from ww w. j a v a 2s .co m AccessControlContext accessContext = AccessController.getContext(); Subject subject = Subject.getSubject(accessContext); // As per SDC-2917 doing this avoids deadlock synchronized (SecurityUtil.getSubjectDomainLock(accessContext)) { // call some method to force load static block in KerberosName KerberosName.hasRulesBeenSet(); } // This should be always out of sync block UserGroupInformation.setConfiguration(hdfsConfiguration); synchronized (SecurityUtil.getSubjectDomainLock(accessContext)) { if (UserGroupInformation.isSecurityEnabled()) { loginUgi = UserGroupInformation.getUGIFromSubject(subject); } else { UserGroupInformation.loginUserFromSubject(subject); loginUgi = UserGroupInformation.getLoginUser(); } if (LOG.isDebugEnabled()) { LOG.debug("Subject = {}, Principals = {}, Login UGI = {}", subject, subject == null ? "null" : subject.getPrincipals(), loginUgi); } } return loginUgi; }
From source file:com.streamsets.datacollector.security.LoginUgiProviderFactory.java
License:Apache License
private static LoginUgiProviderFactory getDefaultLoginUgiProviderFactory() { return new LoginUgiProviderFactory() { @Override//from w w w. j a va2s .c o m public LoginUgiProvider createLoginUgiProvider() { return new LoginUgiProvider() { @Override public UserGroupInformation getLoginUgi(Subject subject) throws IOException { UserGroupInformation loginUgi; if (UserGroupInformation.isSecurityEnabled()) { loginUgi = UserGroupInformation.getUGIFromSubject(subject); } else { UserGroupInformation.loginUserFromSubject(subject); loginUgi = UserGroupInformation.getLoginUser(); } return loginUgi; } }; } }; }
From source file:com.streamsets.datacollector.security.MapRLoginUgiProvider.java
License:Apache License
@Override public UserGroupInformation getLoginUgi(Configuration hdfsConfiguration) throws IOException { // check system property to see if MapR U/P security is enabled String maprLoginEnabled = System.getProperty(MAPR_USERNAME_PASSWORD_SECURITY_ENABLED_KEY, MAPR_USERNAME_PASSWORD_SECURITY_ENABLED_DEFAULT); boolean isMapRLogin = Boolean.parseBoolean(maprLoginEnabled); AccessControlContext accessControlContext = AccessController.getContext(); Subject subject = Subject.getSubject(accessControlContext); //HADOOP-13805 HadoopConfigurationUtils.configureHadoopTreatSubjectExternal(hdfsConfiguration); // SDC-4015 As privateclassloader is false for MapR, UGI is shared and it also needs to be under jvm lock UserGroupInformation.setConfiguration(hdfsConfiguration); UserGroupInformation loginUgi;/*from w w w. j av a2 s .c o m*/ if (UserGroupInformation.isSecurityEnabled() && !isMapRLogin) { // The code in this block must only be executed in case Kerberos is enabled. // MapR implementation of UserGroupInformation.isSecurityEnabled() returns true even if Kerberos is not enabled. // System property helps to avoid this code path in such a case loginUgi = UserGroupInformation.getUGIFromSubject(subject); } else { UserGroupInformation.loginUserFromSubject(subject); loginUgi = UserGroupInformation.getLoginUser(); } if (LOG.isDebugEnabled()) { LOG.debug("Subject = {}, Principals = {}, Login UGI = {}", subject, subject == null ? "null" : subject.getPrincipals(), loginUgi); } return loginUgi; }
From source file:com.streamsets.datacollector.security.TestHadoopSecurityUtil.java
License:Apache License
@Test public void testGetLoginUser() throws Exception { final Configuration conf = new Configuration(); UserGroupInformation loginUser = UserGroupInformation.getLoginUser(); UserGroupInformation ugi = HadoopSecurityUtil.getLoginUser(conf); Assert.assertEquals(loginUser.getUserName(), ugi.getUserName()); }
From source file:com.streamsets.pipeline.stage.destination.hbase.HBaseTarget.java
License:Apache License
private void validateSecurityConfigs(List<ConfigIssue> issues) { try {/* w w w.ja va 2s . com*/ if (kerberosAuth) { hbaseConf.set(User.HBASE_SECURITY_CONF_KEY, UserGroupInformation.AuthenticationMethod.KERBEROS.name()); hbaseConf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, UserGroupInformation.AuthenticationMethod.KERBEROS.name()); if (hbaseConf.get(MASTER_KERBEROS_PRINCIPAL) == null) { try { hbaseConf.set(MASTER_KERBEROS_PRINCIPAL, "hbase/_HOST@" + KerberosUtil.getDefaultRealm()); } catch (Exception e) { issues.add(getContext().createConfigIssue(Groups.HBASE.name(), "masterPrincipal", Errors.HBASE_22)); } } if (hbaseConf.get(REGIONSERVER_KERBEROS_PRINCIPAL) == null) { try { hbaseConf.set(REGIONSERVER_KERBEROS_PRINCIPAL, "hbase/_HOST@" + KerberosUtil.getDefaultRealm()); } catch (Exception e) { issues.add(getContext().createConfigIssue(Groups.HBASE.name(), "regionServerPrincipal", Errors.HBASE_23)); } } } UserGroupInformation.setConfiguration(hbaseConf); Subject subject = Subject.getSubject(AccessController.getContext()); if (UserGroupInformation.isSecurityEnabled()) { loginUgi = UserGroupInformation.getUGIFromSubject(subject); } else { UserGroupInformation.loginUserFromSubject(subject); loginUgi = UserGroupInformation.getLoginUser(); } LOG.info("Subject = {}, Principals = {}, Login UGI = {}", subject, subject == null ? "null" : subject.getPrincipals(), loginUgi); StringBuilder logMessage = new StringBuilder(); if (kerberosAuth) { logMessage.append("Using Kerberos"); if (loginUgi.getAuthenticationMethod() != UserGroupInformation.AuthenticationMethod.KERBEROS) { issues.add(getContext().createConfigIssue(Groups.HBASE.name(), "kerberosAuth", Errors.HBASE_16, loginUgi.getAuthenticationMethod())); } } else { logMessage.append("Using Simple"); hbaseConf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, UserGroupInformation.AuthenticationMethod.SIMPLE.name()); } LOG.info("Authentication Config: " + logMessage); } catch (Exception ex) { LOG.info("Error validating security configuration: " + ex, ex); issues.add( getContext().createConfigIssue(Groups.HBASE.name(), null, Errors.HBASE_17, ex.toString(), ex)); } }
From source file:com.streamsets.pipeline.stage.destination.hdfs.HdfsTarget.java
License:Apache License
private boolean validateHadoopFS(List<ConfigIssue> issues) { boolean validHapoopFsUri = true; if (hdfsUri.contains("://")) { try {/*from w w w . j av a 2s .c o m*/ new URI(hdfsUri); } catch (Exception ex) { issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), null, Errors.HADOOPFS_22, hdfsUri, ex.toString(), ex)); validHapoopFsUri = false; } } else { issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsUri", Errors.HADOOPFS_18, hdfsUri)); validHapoopFsUri = false; } StringBuilder logMessage = new StringBuilder(); try { hdfsConfiguration = getHadoopConfiguration(issues); hdfsConfiguration.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, hdfsUri); // forcing UGI to initialize with the security settings from the stage UserGroupInformation.setConfiguration(hdfsConfiguration); Subject subject = Subject.getSubject(AccessController.getContext()); if (UserGroupInformation.isSecurityEnabled()) { loginUgi = UserGroupInformation.getUGIFromSubject(subject); } else { UserGroupInformation.loginUserFromSubject(subject); loginUgi = UserGroupInformation.getLoginUser(); } LOG.info("Subject = {}, Principals = {}, Login UGI = {}", subject, subject == null ? "null" : subject.getPrincipals(), loginUgi); if (hdfsKerberos) { logMessage.append("Using Kerberos"); if (loginUgi.getAuthenticationMethod() != UserGroupInformation.AuthenticationMethod.KERBEROS) { issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsKerberos", Errors.HADOOPFS_00, loginUgi.getAuthenticationMethod(), UserGroupInformation.AuthenticationMethod.KERBEROS)); } } else { logMessage.append("Using Simple"); hdfsConfiguration.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, UserGroupInformation.AuthenticationMethod.SIMPLE.name()); } if (validHapoopFsUri) { getUGI().doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { try (FileSystem fs = getFileSystemForInitDestroy()) { //to trigger the close } return null; } }); } } catch (Exception ex) { LOG.info("Validation Error: " + Errors.HADOOPFS_01.getMessage(), hdfsUri, ex.toString(), ex); issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), null, Errors.HADOOPFS_01, hdfsUri, String.valueOf(ex), ex)); } LOG.info("Authentication Config: " + logMessage); return validHapoopFsUri; }
From source file:com.streamsets.pipeline.stage.destination.hive.HiveTarget.java
License:Apache License
@Override protected List<ConfigIssue> init() { List<ConfigIssue> issues = super.init(); partitionsToFields = new HashMap<>(); columnsToFields = new HashMap<>(); hiveConf = new HiveConf(); if (null != hiveConfDir && !hiveConfDir.isEmpty()) { File hiveConfDir = new File(this.hiveConfDir); if (!hiveConfDir.isAbsolute()) { hiveConfDir = new File(getContext().getResourcesDirectory(), this.hiveConfDir).getAbsoluteFile(); }/*w ww. j av a2 s . co m*/ if (hiveConfDir.exists()) { File coreSite = new File(hiveConfDir.getAbsolutePath(), "core-site.xml"); File hiveSite = new File(hiveConfDir.getAbsolutePath(), "hive-site.xml"); File hdfsSite = new File(hiveConfDir.getAbsolutePath(), "hdfs-site.xml"); if (!coreSite.exists()) { issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "hiveConfDir", Errors.HIVE_06, coreSite.getName(), this.hiveConfDir)); } else { hiveConf.addResource(new Path(coreSite.getAbsolutePath())); } if (!hdfsSite.exists()) { issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "hiveConfDir", Errors.HIVE_06, hdfsSite.getName(), this.hiveConfDir)); } else { hiveConf.addResource(new Path(hdfsSite.getAbsolutePath())); } if (!hiveSite.exists()) { issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "hiveConfDir", Errors.HIVE_06, hiveSite.getName(), this.hiveConfDir)); } else { hiveConf.addResource(new Path(hiveSite.getAbsolutePath())); } } else { issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "hiveConfDir", Errors.HIVE_07, this.hiveConfDir)); } } else if (hiveThriftUrl == null || hiveThriftUrl.isEmpty()) { issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "hiveThriftUrl", Errors.HIVE_13)); } // Specified URL overrides what's in the Hive Conf hiveConf.set(HIVE_METASTORE_URI, hiveThriftUrl); // Add any additional hive conf overrides for (Map.Entry<String, String> entry : additionalHiveProperties.entrySet()) { hiveConf.set(entry.getKey(), entry.getValue()); } try { // forcing UGI to initialize with the security settings from the stage UserGroupInformation.setConfiguration(hiveConf); Subject subject = Subject.getSubject(AccessController.getContext()); if (UserGroupInformation.isSecurityEnabled()) { loginUgi = UserGroupInformation.getUGIFromSubject(subject); } else { UserGroupInformation.loginUserFromSubject(subject); loginUgi = UserGroupInformation.getLoginUser(); } LOG.info("Subject = {}, Principals = {}, Login UGI = {}", subject, subject == null ? "null" : subject.getPrincipals(), loginUgi); // Proxy users are not currently supported due to: https://issues.apache.org/jira/browse/HIVE-11089 } catch (IOException e) { issues.add(getContext().createConfigIssue(Groups.HIVE.name(), null, Errors.HIVE_11, e.getMessage())); } try { issues.addAll(loginUgi.doAs(new PrivilegedExceptionAction<List<ConfigIssue>>() { @Override public List<ConfigIssue> run() { List<ConfigIssue> issues = new ArrayList<>(); HiveMetaStoreClient client = null; try { client = new HiveMetaStoreClient(hiveConf); List<FieldSchema> columnNames = client.getFields(schema, tableName); for (FieldSchema field : columnNames) { columnsToFields.put(field.getName(), SDC_FIELD_SEP + field.getName()); } Table table = client.getTable(schema, tableName); List<FieldSchema> partitionKeys = table.getPartitionKeys(); for (FieldSchema field : partitionKeys) { partitionsToFields.put(field.getName(), SDC_FIELD_SEP + field.getName()); } } catch (UnknownDBException e) { issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "schema", Errors.HIVE_02, schema)); } catch (UnknownTableException e) { issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "table", Errors.HIVE_03, schema, tableName)); } catch (MetaException e) { issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "hiveUrl", Errors.HIVE_05, e.getMessage())); } catch (TException e) { issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "hiveUrl", Errors.HIVE_04, e.getMessage())); } finally { if (null != client) { client.close(); } } return issues; } })); } catch (Error | IOException | InterruptedException e) { LOG.error("Received unknown error in validation: {}", e.toString(), e); issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "", Errors.HIVE_01, e.toString())); } catch (UndeclaredThrowableException e) { LOG.error("Received unknown error in validation: {}", e.toString(), e); issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "", Errors.HIVE_01, e.getUndeclaredThrowable().toString())); } // Now apply any custom mappings if (validColumnMappings(issues)) { for (FieldMappingConfig mapping : columnMappings) { LOG.debug("Custom mapping field {} to column {}", mapping.field, mapping.columnName); if (columnsToFields.containsKey(mapping.columnName)) { LOG.debug("Mapping field {} to column {}", mapping.field, mapping.columnName); columnsToFields.put(mapping.columnName, mapping.field); } else if (partitionsToFields.containsKey(mapping.columnName)) { LOG.debug("Mapping field {} to partition {}", mapping.field, mapping.columnName); partitionsToFields.put(mapping.columnName, mapping.field); } } } dataGeneratorFactory = createDataGeneratorFactory(); // Note that cleanup is done synchronously by default while servicing .get hiveConnectionPool = CacheBuilder.newBuilder().maximumSize(10).expireAfterAccess(10, TimeUnit.MINUTES) .removalListener(new HiveConnectionRemovalListener()).build(new HiveConnectionLoader()); recordWriterPool = CacheBuilder.newBuilder().maximumSize(10).expireAfterAccess(10, TimeUnit.MINUTES) .build(new HiveRecordWriterLoader()); LOG.debug("Total issues: {}", issues.size()); return issues; }
From source file:com.streamsets.pipeline.stage.origin.hdfs.cluster.ClusterHdfsSource.java
License:Apache License
private void validateHadoopFS(List<ConfigIssue> issues) { boolean validHapoopFsUri = true; hadoopConf = getHadoopConfiguration(issues); String hdfsUriInConf;//w ww .j a v a 2 s. c o m if (hdfsUri != null && !hdfsUri.isEmpty()) { hadoopConf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, hdfsUri); } else { hdfsUriInConf = hadoopConf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY); if (hdfsUriInConf == null) { issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsUri", Errors.HADOOPFS_19)); return; } else { hdfsUri = hdfsUriInConf; } } if (hdfsUri.contains("://")) { try { URI uri = new URI(hdfsUri); if (!"hdfs".equals(uri.getScheme())) { issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsUri", Errors.HADOOPFS_12, hdfsUri, uri.getScheme())); validHapoopFsUri = false; } else if (uri.getAuthority() == null) { issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsUri", Errors.HADOOPFS_13, hdfsUri)); validHapoopFsUri = false; } } catch (Exception ex) { issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsUri", Errors.HADOOPFS_22, hdfsUri, ex.getMessage(), ex)); validHapoopFsUri = false; } } else { issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsUri", Errors.HADOOPFS_02, hdfsUri)); validHapoopFsUri = false; } StringBuilder logMessage = new StringBuilder(); try { // forcing UGI to initialize with the security settings from the stage UserGroupInformation.setConfiguration(hadoopConf); Subject subject = Subject.getSubject(AccessController.getContext()); if (UserGroupInformation.isSecurityEnabled()) { loginUgi = UserGroupInformation.getUGIFromSubject(subject); } else { UserGroupInformation.loginUserFromSubject(subject); loginUgi = UserGroupInformation.getLoginUser(); } LOG.info("Subject = {}, Principals = {}, Login UGI = {}", subject, subject == null ? "null" : subject.getPrincipals(), loginUgi); if (hdfsKerberos) { logMessage.append("Using Kerberos"); if (loginUgi.getAuthenticationMethod() != UserGroupInformation.AuthenticationMethod.KERBEROS) { issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsKerberos", Errors.HADOOPFS_00, loginUgi.getAuthenticationMethod(), UserGroupInformation.AuthenticationMethod.KERBEROS)); } } else { logMessage.append("Using Simple"); hadoopConf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, UserGroupInformation.AuthenticationMethod.SIMPLE.name()); } if (validHapoopFsUri) { getUGI().doAs(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { try (FileSystem fs = getFileSystemForInitDestroy()) { //to trigger the close } return null; } }); } } catch (Exception ex) { LOG.info("Error connecting to FileSystem: " + ex, ex); issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), null, Errors.HADOOPFS_11, hdfsUri, String.valueOf(ex), ex)); } LOG.info("Authentication Config: " + logMessage); }