Example usage for org.apache.hadoop.security UserGroupInformation setConfiguration

List of usage examples for org.apache.hadoop.security UserGroupInformation setConfiguration

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation setConfiguration.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static void setConfiguration(Configuration conf) 

Source Link

Document

Set the static configuration for UGI.

Usage

From source file:org.apache.nifi.util.hive.HiveConfigurator.java

License:Apache License

public void preload(Configuration configuration) {
    try {//from w  ww. ja v a2s.  c om
        FileSystem.get(configuration).close();
        UserGroupInformation.setConfiguration(configuration);
    } catch (IOException ioe) {
        // Suppress exception as future uses of this configuration will fail
    }
}

From source file:org.apache.oozie.action.hadoop.LauncherAM.java

License:Apache License

public static void main(String[] args) throws Exception {
    final LocalFsOperations localFsOperations = new LocalFsOperations();
    final Configuration launcherConf = readLauncherConfiguration(localFsOperations);
    UserGroupInformation.setConfiguration(launcherConf);
    // MRAppMaster adds this call as well, but it's included only in Hadoop 2.9+
    // SecurityUtil.setConfiguration(launcherConf);
    UserGroupInformation ugi = getUserGroupInformation(launcherConf);
    printTokens("Executing Oozie Launcher with tokens:", ugi.getTokens());
    // Executing code inside a doAs with an ugi equipped with correct tokens.
    ugi.doAs(new PrivilegedExceptionAction<Object>() {
        @Override/*w  ww  .j a v  a2s.  c  om*/
        public Object run() throws Exception {
            LauncherAM launcher = new LauncherAM(new AMRMClientAsyncFactory(), new AMRMCallBackHandler(),
                    new HdfsOperations(new SequenceFileWriterFactory()), new LocalFsOperations(),
                    new PrepareActionsHandler(new LauncherURIHandlerFactory(null)),
                    new LauncherAMCallbackNotifierFactory(), new LauncherSecurityManager(),
                    sysenv.getenv(ApplicationConstants.Environment.CONTAINER_ID.name()), launcherConf);
            launcher.run();
            return null;
        }
    });

}

From source file:org.apache.oozie.service.HadoopAccessorService.java

License:Apache License

public void init(Configuration conf) throws ServiceException {
    for (String name : ConfigurationService.getStrings(conf, JOB_TRACKER_WHITELIST)) {
        String tmp = name.toLowerCase().trim();
        if (tmp.length() == 0) {
            continue;
        }/* w ww. j a v  a  2 s. c  o m*/
        jobTrackerWhitelist.add(tmp);
    }
    LOG.info("JOB_TRACKER_WHITELIST :" + jobTrackerWhitelist.toString() + ", Total entries :"
            + jobTrackerWhitelist.size());
    for (String name : ConfigurationService.getStrings(conf, NAME_NODE_WHITELIST)) {
        String tmp = name.toLowerCase().trim();
        if (tmp.length() == 0) {
            continue;
        }
        nameNodeWhitelist.add(tmp);
    }
    LOG.info("NAME_NODE_WHITELIST :" + nameNodeWhitelist.toString() + ", Total entries :"
            + nameNodeWhitelist.size());

    boolean kerberosAuthOn = ConfigurationService.getBoolean(conf, KERBEROS_AUTH_ENABLED);
    LOG.info("Oozie Kerberos Authentication [{0}]", (kerberosAuthOn) ? "enabled" : "disabled");
    if (kerberosAuthOn) {
        kerberosInit(conf);
    } else {
        Configuration ugiConf = new Configuration();
        ugiConf.set("hadoop.security.authentication", "simple");
        UserGroupInformation.setConfiguration(ugiConf);
    }

    if (ugiService == null) { //for testing purposes, see XFsTestCase
        this.ugiService = new UserGroupInformationService();
    }

    loadHadoopConfigs(conf);
    preLoadActionConfigs(conf);

    supportedSchemes = new HashSet<String>();
    String[] schemesFromConf = ConfigurationService.getStrings(conf, SUPPORTED_FILESYSTEMS);
    if (schemesFromConf != null) {
        for (String scheme : schemesFromConf) {
            scheme = scheme.trim();
            // If user gives "*", supportedSchemes will be empty, so that checking is not done i.e. all schemes allowed
            if (scheme.equals("*")) {
                if (schemesFromConf.length > 1) {
                    throw new ServiceException(ErrorCode.E0100, getClass().getName(), SUPPORTED_FILESYSTEMS
                            + " should contain either only wildcard or explicit list, not both");
                }
                allSchemesSupported = true;
            }
            supportedSchemes.add(scheme);
        }
    }
}

From source file:org.apache.oozie.service.HadoopAccessorService.java

License:Apache License

private void kerberosInit(Configuration serviceConf) throws ServiceException {
    try {//from  w  w w  .  j a  v  a2s.  c om
        String keytabFile = ConfigurationService.get(serviceConf, KERBEROS_KEYTAB).trim();
        if (keytabFile.length() == 0) {
            throw new ServiceException(ErrorCode.E0026, KERBEROS_KEYTAB);
        }
        String principal = SecurityUtil.getServerPrincipal(
                serviceConf.get(KERBEROS_PRINCIPAL, "oozie/localhost@LOCALHOST"),
                InetAddress.getLocalHost().getCanonicalHostName());
        if (principal.length() == 0) {
            throw new ServiceException(ErrorCode.E0026, KERBEROS_PRINCIPAL);
        }
        Configuration conf = new Configuration();
        conf.set("hadoop.security.authentication", "kerberos");
        UserGroupInformation.setConfiguration(conf);
        UserGroupInformation.loginUserFromKeytab(principal, keytabFile);
        LOG.info("Got Kerberos ticket, keytab [{0}], Oozie principal principal [{1}]", keytabFile, principal);
    } catch (ServiceException ex) {
        throw ex;
    } catch (Exception ex) {
        throw new ServiceException(ErrorCode.E0100, getClass().getName(), ex.getMessage(), ex);
    }
}

From source file:org.apache.oozie.service.KerberosHadoopAccessorService.java

License:Open Source License

public void init(Configuration serviceConf) throws ServiceException {
    boolean kerberosAuthOn = serviceConf.getBoolean(KERBEROS_AUTH_ENABLED, true);
    XLog.getLog(getClass()).info("Oozie Kerberos Authentication [{0}]",
            (kerberosAuthOn) ? "enabled" : "disabled");
    if (kerberosAuthOn) {
        try {//  w  w  w . ja  v  a  2  s.c  o m
            String keytabFile = serviceConf
                    .get(KERBEROS_KEYTAB, System.getProperty("user.home") + "/oozie.keytab").trim();
            if (keytabFile.length() == 0) {
                throw new ServiceException(ErrorCode.E0026, KERBEROS_KEYTAB);
            }
            String principal = serviceConf.get(KERBEROS_PRINCIPAL, "oozie/localhost@LOCALHOST");
            if (principal.length() == 0) {
                throw new ServiceException(ErrorCode.E0026, KERBEROS_PRINCIPAL);
            }
            Configuration conf = new Configuration();
            conf.set("hadoop.security.authentication", "kerberos");
            UserGroupInformation.setConfiguration(conf);
            UserGroupInformation.loginUserFromKeytab(principal, keytabFile);
            XLog.getLog(getClass()).info("Got Kerberos ticket, keytab [{0}], Oozie principal principal [{1}]",
                    keytabFile, principal);
        } catch (ServiceException ex) {
            throw ex;
        } catch (Exception ex) {
            throw new ServiceException(ErrorCode.E0100, getClass().getName(), ex.getMessage(), ex);
        }
    } else {
        Configuration conf = new Configuration();
        conf.set("hadoop.security.authentication", "simple");
        UserGroupInformation.setConfiguration(conf);
    }
    localRealm = serviceConf.get("local.realm");

    userUgiMap = new ConcurrentHashMap<String, UserGroupInformation>();
}

From source file:org.apache.phoenix.end2end.HttpParamImpersonationQueryServerIT.java

License:Apache License

/**
 * Setup and start kerberos, hbase/*from w w w  . j  av a 2 s.  c  om*/
 */
@BeforeClass
public static void setUp() throws Exception {
    final Configuration conf = UTIL.getConfiguration();
    // Ensure the dirs we need are created/empty
    ensureIsEmptyDirectory(TEMP_DIR);
    ensureIsEmptyDirectory(KEYTAB_DIR);
    KEYTAB = new File(KEYTAB_DIR, "test.keytab");
    // Start a MiniKDC
    KDC = UTIL.setupMiniKdc(KEYTAB);
    // Create a service principal and spnego principal in one keytab
    // NB. Due to some apparent limitations between HDFS and HBase in the same JVM, trying to
    //     use separate identies for HBase and HDFS results in a GSS initiate error. The quick
    //     solution is to just use a single "service" principal instead of "hbase" and "hdfs"
    //     (or "dn" and "nn") per usual.
    KDC.createPrincipal(KEYTAB, SPNEGO_PRINCIPAL, SERVICE_PRINCIPAL);
    // Start ZK by hand
    UTIL.startMiniZKCluster();

    // Create a number of unprivileged users
    createUsers(2);

    // Set configuration for HBase
    HBaseKerberosUtils.setPrincipalForTesting(SERVICE_PRINCIPAL + "@" + KDC.getRealm());
    HBaseKerberosUtils.setSecuredConfiguration(conf);
    setHdfsSecuredConfiguration(conf);
    UserGroupInformation.setConfiguration(conf);
    conf.setInt(HConstants.MASTER_PORT, 0);
    conf.setInt(HConstants.MASTER_INFO_PORT, 0);
    conf.setInt(HConstants.REGIONSERVER_PORT, 0);
    conf.setInt(HConstants.REGIONSERVER_INFO_PORT, 0);
    conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, AccessController.class.getName());
    conf.setStrings(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, AccessController.class.getName());
    conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName(),
            TokenProvider.class.getName());

    // Secure Phoenix setup
    conf.set("phoenix.queryserver.kerberos.principal", SPNEGO_PRINCIPAL);
    conf.set("phoenix.queryserver.keytab.file", KEYTAB.getAbsolutePath());
    conf.setBoolean(QueryServices.QUERY_SERVER_DISABLE_KERBEROS_LOGIN, true);
    conf.setInt(QueryServices.QUERY_SERVER_HTTP_PORT_ATTRIB, 0);
    // Required so that PQS can impersonate the end-users to HBase
    conf.set("hadoop.proxyuser.HTTP.groups", "*");
    conf.set("hadoop.proxyuser.HTTP.hosts", "*");
    // user1 is allowed to impersonate others, user2 is not
    conf.set("hadoop.proxyuser.user1.groups", "*");
    conf.set("hadoop.proxyuser.user1.hosts", "*");
    conf.setBoolean(QueryServices.QUERY_SERVER_WITH_REMOTEUSEREXTRACTOR_ATTRIB, true);

    // Clear the cached singletons so we can inject our own.
    InstanceResolver.clearSingletons();
    // Make sure the ConnectionInfo doesn't try to pull a default Configuration
    InstanceResolver.getSingleton(ConfigurationFactory.class, new ConfigurationFactory() {
        @Override
        public Configuration getConfiguration() {
            return conf;
        }

        @Override
        public Configuration getConfiguration(Configuration confToClone) {
            Configuration copy = new Configuration(conf);
            copy.addResource(confToClone);
            return copy;
        }
    });
    updateDefaultRealm();

    // Start HDFS
    UTIL.startMiniDFSCluster(1);
    // Use LocalHBaseCluster to avoid HBaseTestingUtility from doing something wrong
    // NB. I'm not actually sure what HTU does incorrect, but this was pulled from some test
    //     classes in HBase itself. I couldn't get HTU to work myself (2017/07/06)
    Path rootdir = UTIL.getDataTestDirOnTestFS(HttpParamImpersonationQueryServerIT.class.getSimpleName());
    FSUtils.setRootDir(conf, rootdir);
    HBASE_CLUSTER = new LocalHBaseCluster(conf, 1);
    HBASE_CLUSTER.startup();

    // Then fork a thread with PQS in it.
    startQueryServer();
}

From source file:org.apache.phoenix.end2end.SecureQueryServerIT.java

License:Apache License

/**
 * Setup and start kerberos, hbase/*  w  ww  .  j a  va 2  s.co  m*/
 */
@BeforeClass
public static void setUp() throws Exception {
    final Configuration conf = UTIL.getConfiguration();
    // Ensure the dirs we need are created/empty
    ensureIsEmptyDirectory(TEMP_DIR);
    ensureIsEmptyDirectory(KEYTAB_DIR);
    KEYTAB = new File(KEYTAB_DIR, "test.keytab");
    // Start a MiniKDC
    KDC = UTIL.setupMiniKdc(KEYTAB);
    // Create a service principal and spnego principal in one keytab
    // NB. Due to some apparent limitations between HDFS and HBase in the same JVM, trying to
    //     use separate identies for HBase and HDFS results in a GSS initiate error. The quick
    //     solution is to just use a single "service" principal instead of "hbase" and "hdfs"
    //     (or "dn" and "nn") per usual.
    KDC.createPrincipal(KEYTAB, SPNEGO_PRINCIPAL, SERVICE_PRINCIPAL);
    // Start ZK by hand
    UTIL.startMiniZKCluster();

    // Create a number of unprivileged users
    createUsers(3);

    // Set configuration for HBase
    HBaseKerberosUtils.setPrincipalForTesting(SERVICE_PRINCIPAL + "@" + KDC.getRealm());
    HBaseKerberosUtils.setSecuredConfiguration(conf);
    setHdfsSecuredConfiguration(conf);
    UserGroupInformation.setConfiguration(conf);
    conf.setInt(HConstants.MASTER_PORT, 0);
    conf.setInt(HConstants.MASTER_INFO_PORT, 0);
    conf.setInt(HConstants.REGIONSERVER_PORT, 0);
    conf.setInt(HConstants.REGIONSERVER_INFO_PORT, 0);
    conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, TokenProvider.class.getName());

    // Secure Phoenix setup
    conf.set("phoenix.queryserver.kerberos.principal", SPNEGO_PRINCIPAL);
    conf.set("phoenix.queryserver.keytab.file", KEYTAB.getAbsolutePath());
    conf.setBoolean(QueryServices.QUERY_SERVER_DISABLE_KERBEROS_LOGIN, true);
    conf.setInt(QueryServices.QUERY_SERVER_HTTP_PORT_ATTRIB, 0);
    // Required so that PQS can impersonate the end-users to HBase
    conf.set("hadoop.proxyuser.HTTP.groups", "*");
    conf.set("hadoop.proxyuser.HTTP.hosts", "*");

    // Clear the cached singletons so we can inject our own.
    InstanceResolver.clearSingletons();
    // Make sure the ConnectionInfo doesn't try to pull a default Configuration
    InstanceResolver.getSingleton(ConfigurationFactory.class, new ConfigurationFactory() {
        @Override
        public Configuration getConfiguration() {
            return conf;
        }

        @Override
        public Configuration getConfiguration(Configuration confToClone) {
            Configuration copy = new Configuration(conf);
            copy.addResource(confToClone);
            return copy;
        }
    });
    updateDefaultRealm();

    // Start HDFS
    UTIL.startMiniDFSCluster(1);
    // Use LocalHBaseCluster to avoid HBaseTestingUtility from doing something wrong
    // NB. I'm not actually sure what HTU does incorrect, but this was pulled from some test
    //     classes in HBase itself. I couldn't get HTU to work myself (2017/07/06)
    Path rootdir = UTIL.getDataTestDirOnTestFS(SecureQueryServerIT.class.getSimpleName());
    FSUtils.setRootDir(conf, rootdir);
    HBASE_CLUSTER = new LocalHBaseCluster(conf, 1);
    HBASE_CLUSTER.startup();

    // Then fork a thread with PQS in it.
    startQueryServer();
}

From source file:org.apache.phoenix.jdbc.SecureUserConnectionsIT.java

License:Apache License

@BeforeClass
public static void setupKdc() throws Exception {
    ensureIsEmptyDirectory(KDC_DIR);/*from  w  w w  . ja  va 2 s.  com*/
    ensureIsEmptyDirectory(KEYTAB_DIR);
    // Create and start the KDC. MiniKDC appears to have a race condition in how it does
    // port allocation (with apache-ds). See PHOENIX-3287.
    boolean started = false;
    for (int i = 0; !started && i < KDC_START_ATTEMPTS; i++) {
        Properties kdcConf = MiniKdc.createConf();
        kdcConf.put(MiniKdc.DEBUG, true);
        KDC = new MiniKdc(kdcConf, KDC_DIR);
        try {
            KDC.start();
            started = true;
        } catch (Exception e) {
            LOG.warn("PHOENIX-3287: Failed to start KDC, retrying..", e);
        }
    }
    assertTrue("The embedded KDC failed to start successfully after " + KDC_START_ATTEMPTS + " attempts.",
            started);

    createUsers(NUM_USERS);
    createServiceUsers(NUM_USERS);

    final Configuration conf = new Configuration(false);
    conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    conf.set(User.HBASE_SECURITY_CONF_KEY, "kerberos");
    conf.setBoolean(User.HBASE_SECURITY_AUTHORIZATION_CONF_KEY, true);
    UserGroupInformation.setConfiguration(conf);

    // Clear the cached singletons so we can inject our own.
    InstanceResolver.clearSingletons();
    // Make sure the ConnectionInfo doesn't try to pull a default Configuration
    InstanceResolver.getSingleton(ConfigurationFactory.class, new ConfigurationFactory() {
        @Override
        public Configuration getConfiguration() {
            return conf;
        }

        @Override
        public Configuration getConfiguration(Configuration confToClone) {
            Configuration copy = new Configuration(conf);
            copy.addResource(confToClone);
            return copy;
        }
    });
    updateDefaultRealm();
}

From source file:org.apache.ranger.hadoop.client.HadoopFS.java

License:Apache License

private List<String> listFilesInternal(String baseDir, String fileMatching) {
    List<String> fileList = new ArrayList<String>();
    ClassLoader prevCl = Thread.currentThread().getContextClassLoader();
    String errMsg = " You can still save the repository and start creating "
            + "policies, but you would not be able to use autocomplete for "
            + "resource names. Check xa_portal.log for more info.";
    try {//from   ww  w.j av a  2 s .c  o  m
        Thread.currentThread().setContextClassLoader(getConfigHolder().getClassLoader());
        String dirPrefix = (baseDir.endsWith("/") ? baseDir : (baseDir + "/"));
        String filterRegEx = null;
        if (fileMatching != null && fileMatching.trim().length() > 0) {
            filterRegEx = fileMatching.trim();
        }

        Configuration conf = new Configuration();
        UserGroupInformation.setConfiguration(conf);

        FileSystem fs = null;
        try {
            fs = FileSystem.get(conf);

            FileStatus[] fileStats = fs.listStatus(new Path(baseDir));
            if (fileStats != null) {
                for (FileStatus stat : fileStats) {
                    Path path = stat.getPath();
                    String pathComponent = path.getName();
                    if (filterRegEx == null) {
                        fileList.add(dirPrefix + pathComponent);
                    } else if (FilenameUtils.wildcardMatch(pathComponent, fileMatching)) {
                        fileList.add(dirPrefix + pathComponent);
                    }
                }
            }
        } catch (UnknownHostException uhe) {
            String msgDesc = "listFilesInternal: Unable to connect using given config parameters"
                    + " of Hadoop environment [" + getDataSource() + "].";
            HadoopException hdpException = new HadoopException(msgDesc, uhe);
            hdpException.generateResponseDataMap(false, getMessage(uhe), msgDesc + errMsg, null, null);
            throw hdpException;
        } catch (FileNotFoundException fne) {
            String msgDesc = "listFilesInternal: Unable to locate files using given config parameters "
                    + "of Hadoop environment [" + getDataSource() + "].";
            HadoopException hdpException = new HadoopException(msgDesc, fne);
            hdpException.generateResponseDataMap(false, getMessage(fne), msgDesc + errMsg, null, null);
            throw hdpException;
        } finally {
        }
    } catch (IOException ioe) {
        String msgDesc = "listFilesInternal: Unable to get listing of files for directory " + baseDir
                + "] from Hadoop environment [" + getDataSource() + "].";
        HadoopException hdpException = new HadoopException(msgDesc, ioe);
        hdpException.generateResponseDataMap(false, getMessage(ioe), msgDesc + errMsg, null, null);
        throw hdpException;

    } catch (IllegalArgumentException iae) {
        String msgDesc = "Unable to get listing of files for directory [" + baseDir
                + "] from Hadoop environment [" + getDataSource() + "].";
        HadoopException hdpException = new HadoopException(msgDesc, iae);
        hdpException.generateResponseDataMap(false, getMessage(iae), msgDesc + errMsg, null, null);
        throw hdpException;
    } finally {
        Thread.currentThread().setContextClassLoader(prevCl);
    }
    return fileList;
}

From source file:org.apache.ranger.services.hdfs.client.HdfsClient.java

License:Apache License

private List<String> listFilesInternal(String baseDir, String fileMatching, final List<String> pathList)
        throws HadoopException {
    List<String> fileList = new ArrayList<String>();
    String errMsg = " You can still save the repository and start creating "
            + "policies, but you would not be able to use autocomplete for "
            + "resource names. Check ranger_admin.log for more info.";
    try {// w  w w  .j  a  v  a  2s. c o  m
        String dirPrefix = (baseDir.endsWith("/") ? baseDir : (baseDir + "/"));
        String filterRegEx = null;
        if (fileMatching != null && fileMatching.trim().length() > 0) {
            filterRegEx = fileMatching.trim();
        }

        UserGroupInformation.setConfiguration(conf);

        FileSystem fs = null;
        try {
            fs = FileSystem.get(conf);

            Path basePath = new Path(baseDir);
            FileStatus[] fileStats = fs.listStatus(basePath);

            if (LOG.isDebugEnabled()) {
                LOG.debug("<== HdfsClient fileStatus : " + fileStats.length + " PathList :" + pathList);
            }

            if (fileStats != null) {
                if (fs.exists(basePath) && ArrayUtils.isEmpty(fileStats)) {
                    fileList.add(basePath.toString());
                } else {
                    for (FileStatus stat : fileStats) {
                        Path path = stat.getPath();
                        String pathComponent = path.getName();
                        String prefixedPath = dirPrefix + pathComponent;
                        if (pathList != null && pathList.contains(prefixedPath)) {
                            continue;
                        }
                        if (filterRegEx == null) {
                            fileList.add(prefixedPath);
                        } else if (FilenameUtils.wildcardMatch(pathComponent, fileMatching)) {
                            fileList.add(prefixedPath);
                        }
                    }
                }
            }
        } catch (UnknownHostException uhe) {
            String msgDesc = "listFilesInternal: Unable to connect using given config parameters"
                    + " of Hadoop environment [" + getSerivceName() + "].";
            HadoopException hdpException = new HadoopException(msgDesc, uhe);
            hdpException.generateResponseDataMap(false, getMessage(uhe), msgDesc + errMsg, null, null);
            if (LOG.isDebugEnabled()) {
                LOG.debug("<== HdfsClient listFilesInternal Error : " + uhe);
            }
            throw hdpException;
        } catch (FileNotFoundException fne) {
            String msgDesc = "listFilesInternal: Unable to locate files using given config parameters "
                    + "of Hadoop environment [" + getSerivceName() + "].";
            HadoopException hdpException = new HadoopException(msgDesc, fne);
            hdpException.generateResponseDataMap(false, getMessage(fne), msgDesc + errMsg, null, null);

            if (LOG.isDebugEnabled()) {
                LOG.debug("<== HdfsClient listFilesInternal Error : " + fne);
            }

            throw hdpException;
        }
    } catch (IOException ioe) {
        String msgDesc = "listFilesInternal: Unable to get listing of files for directory " + baseDir
                + fileMatching + "] from Hadoop environment [" + getSerivceName() + "].";
        HadoopException hdpException = new HadoopException(msgDesc, ioe);
        hdpException.generateResponseDataMap(false, getMessage(ioe), msgDesc + errMsg, null, null);
        if (LOG.isDebugEnabled()) {
            LOG.debug("<== HdfsClient listFilesInternal Error : " + ioe);
        }
        throw hdpException;

    } catch (IllegalArgumentException iae) {
        String msgDesc = "Unable to get listing of files for directory [" + baseDir
                + "] from Hadoop environment [" + getSerivceName() + "].";
        HadoopException hdpException = new HadoopException(msgDesc, iae);
        hdpException.generateResponseDataMap(false, getMessage(iae), msgDesc + errMsg, null, null);
        if (LOG.isDebugEnabled()) {
            LOG.debug("<== HdfsClient listFilesInternal Error : " + iae);
        }
        throw hdpException;
    }
    return fileList;
}