Example usage for org.apache.hadoop.hdfs DFSConfigKeys DFS_NAMENODE_USER_NAME_KEY

List of usage examples for org.apache.hadoop.hdfs DFSConfigKeys DFS_NAMENODE_USER_NAME_KEY

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs DFSConfigKeys DFS_NAMENODE_USER_NAME_KEY.

Prototype

String DFS_NAMENODE_USER_NAME_KEY

To view the source code for org.apache.hadoop.hdfs DFSConfigKeys DFS_NAMENODE_USER_NAME_KEY.

Click Source Link

Usage

From source file:co.cask.cdap.operations.hdfs.HDFSInfo.java

License:Apache License

@Nullable
private URL getHAWebURL() throws IOException {
    String activeNamenode = null;
    String nameService = getNameService();
    HdfsConfiguration hdfsConf = new HdfsConfiguration(conf);
    String nameNodePrincipal = conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, "");

    hdfsConf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, nameNodePrincipal);

    for (String nnId : DFSUtil.getNameNodeIds(conf, nameService)) {
        HAServiceTarget haServiceTarget = new NNHAServiceTarget(hdfsConf, nameService, nnId);
        HAServiceProtocol proxy = haServiceTarget.getProxy(hdfsConf, 10000);
        HAServiceStatus serviceStatus = proxy.getServiceStatus();
        if (HAServiceProtocol.HAServiceState.ACTIVE != serviceStatus.getState()) {
            continue;
        }/*  w w w .  j  a  v  a  2s.  c o m*/
        activeNamenode = DFSUtil.getNamenodeServiceAddr(hdfsConf, nameService, nnId);
    }
    if (activeNamenode == null) {
        throw new IllegalStateException("Could not find an active namenode");
    }
    return rpcToHttpAddress(URI.create(activeNamenode));
}

From source file:com.facebook.presto.hive.HiveClientConfig.java

License:Apache License

@Config(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
public void setDfsNamenodeKerberosPrincipal(String dfsNamenodeKerberosPrincipal) {
    this.dfsNamenodeKerberosPrincipal = dfsNamenodeKerberosPrincipal;
}

From source file:com.streamsets.pipeline.lib.hdfs.common.HdfsBaseConfigBean.java

License:Apache License

protected Configuration getHadoopConfiguration(Stage.Context context, List<Stage.ConfigIssue> issues) {
    Configuration conf = new Configuration();
    conf.setClass("fs.file.impl", RawLocalFileSystem.class, FileSystem.class);
    //We handle the file system close ourselves in destroy
    //If enabled, Also this will cause issues (not allow us to rename the files on destroy call)
    // when we run a shutdown hook on app kill
    //See https://issues.streamsets.com/browse/SDC-4057
    conf.setBoolean("fs.automatic.close", false);

    // See SDC-5451, we set hadoop.treat.subject.external automatically to take advantage of HADOOP-13805
    HadoopConfigurationUtils.configureHadoopTreatSubjectExternal(conf);

    if (hdfsKerberos) {
        conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
                UserGroupInformation.AuthenticationMethod.KERBEROS.name());
        try {//from   w  w  w.j av  a 2 s.  com
            conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
                    "hdfs/_HOST@" + HadoopSecurityUtil.getDefaultRealm());
        } catch (Exception ex) {
            if (!hdfsConfigs.stream().anyMatch(i -> DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY.equals(i.key))) {
                issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(), null, Errors.HADOOPFS_28,
                        ex.toString()));
            }
        }
    }
    if (hdfsConfDir != null && !hdfsConfDir.isEmpty()) {
        File hadoopConfigDir = new File(hdfsConfDir);
        if ((context.getExecutionMode() == ExecutionMode.CLUSTER_BATCH
                || context.getExecutionMode() == ExecutionMode.CLUSTER_YARN_STREAMING
                || context.getExecutionMode() == ExecutionMode.CLUSTER_MESOS_STREAMING)
                && hadoopConfigDir.isAbsolute()) {
            //Do not allow absolute hadoop config directory in cluster mode
            issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(), getConfigBeanPrefix() + "hdfsConfDir",
                    Errors.HADOOPFS_45, hdfsConfDir));
        } else {
            if (!hadoopConfigDir.isAbsolute()) {
                hadoopConfigDir = new File(context.getResourcesDirectory(), hdfsConfDir).getAbsoluteFile();
            }
            if (!hadoopConfigDir.exists()) {
                issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(),
                        getConfigBeanPrefix() + "hdfsConfDir", Errors.HADOOPFS_25, hadoopConfigDir.getPath()));
            } else if (!hadoopConfigDir.isDirectory()) {
                issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(),
                        getConfigBeanPrefix() + "hdfsConfDir", Errors.HADOOPFS_26, hadoopConfigDir.getPath()));
            } else {
                File coreSite = new File(hadoopConfigDir, "core-site.xml");
                if (coreSite.exists()) {
                    if (!coreSite.isFile()) {
                        issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(),
                                getConfigBeanPrefix() + "hdfsConfDir", Errors.HADOOPFS_27, coreSite.getPath()));
                    }
                    conf.addResource(new Path(coreSite.getAbsolutePath()));
                }
                File hdfsSite = new File(hadoopConfigDir, "hdfs-site.xml");
                if (hdfsSite.exists()) {
                    if (!hdfsSite.isFile()) {
                        issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(),
                                getConfigBeanPrefix() + "hdfsConfDir", Errors.HADOOPFS_27, hdfsSite.getPath()));
                    }
                    conf.addResource(new Path(hdfsSite.getAbsolutePath()));
                }
            }
        }
    } else {
        Optional<HadoopConfigBean> fsDefaultFS = hdfsConfigs.stream()
                .filter(item -> CommonConfigurationKeys.FS_DEFAULT_NAME_KEY.equals(item.key)).findFirst();
        if (StringUtils.isEmpty(hdfsUri) && !fsDefaultFS.isPresent()) {
            // No URI, no config dir, and no fs.defaultFS config param
            // Avoid defaulting to writing to file:/// (SDC-5143)
            issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(), getConfigBeanPrefix() + "hdfsUri",
                    Errors.HADOOPFS_61));
        }
    }

    for (HadoopConfigBean configBean : hdfsConfigs) {
        try {
            conf.set(configBean.key, configBean.value.get());
        } catch (StageException e) {
            issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(), getConfigBeanPrefix() + "hdfsConfigs",
                    Errors.HADOOPFS_62, e.toString()));
        }
    }

    return conf;
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.HdfsTarget.java

License:Apache License

Configuration getHadoopConfiguration(List<ConfigIssue> issues) {
    Configuration conf = new Configuration();
    conf.setClass("fs.file.impl", RawLocalFileSystem.class, FileSystem.class);
    if (hdfsKerberos) {
        conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
                UserGroupInformation.AuthenticationMethod.KERBEROS.name());
        try {/*from  w w w.j a  v a  2 s  .c  o  m*/
            conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, "hdfs/_HOST@" + KerberosUtil.getDefaultRealm());
        } catch (Exception ex) {
            if (!hdfsConfigs.containsKey(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)) {
                issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), null, Errors.HADOOPFS_28,
                        ex.toString()));
            }
        }
    }
    if (hadoopConfDir != null && !hadoopConfDir.isEmpty()) {
        File hadoopConfigDir = new File(hadoopConfDir);
        if (getContext().isClusterMode() && hadoopConfigDir.isAbsolute()) {
            //Do not allow absolute hadoop config directory in cluster mode
            issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hadoopConfDir",
                    Errors.HADOOPFS_45, hadoopConfDir));
        } else {
            if (!hadoopConfigDir.isAbsolute()) {
                hadoopConfigDir = new File(getContext().getResourcesDirectory(), hadoopConfDir)
                        .getAbsoluteFile();
            }
            if (!hadoopConfigDir.exists()) {
                issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hadoopConfDir",
                        Errors.HADOOPFS_25, hadoopConfigDir.getPath()));
            } else if (!hadoopConfigDir.isDirectory()) {
                issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hadoopConfDir",
                        Errors.HADOOPFS_26, hadoopConfigDir.getPath()));
            } else {
                File coreSite = new File(hadoopConfigDir, "core-site.xml");
                if (coreSite.exists()) {
                    if (!coreSite.isFile()) {
                        issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hadoopConfDir",
                                Errors.HADOOPFS_27, coreSite.getPath()));
                    }
                    conf.addResource(new Path(coreSite.getAbsolutePath()));
                }
                File hdfsSite = new File(hadoopConfigDir, "hdfs-site.xml");
                if (hdfsSite.exists()) {
                    if (!hdfsSite.isFile()) {
                        issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hadoopConfDir",
                                Errors.HADOOPFS_27, hdfsSite.getPath()));
                    }
                    conf.addResource(new Path(hdfsSite.getAbsolutePath()));
                }
            }
        }
    }
    for (Map.Entry<String, String> config : hdfsConfigs.entrySet()) {
        conf.set(config.getKey(), config.getValue());
    }
    return conf;
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.HdfsTargetConfigBean.java

License:Apache License

private Configuration getHadoopConfiguration(Stage.Context context, List<Stage.ConfigIssue> issues) {
    Configuration conf = new Configuration();
    conf.setClass("fs.file.impl", RawLocalFileSystem.class, FileSystem.class);
    if (hdfsKerberos) {
        conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
                UserGroupInformation.AuthenticationMethod.KERBEROS.name());
        try {// ww w  . ja  v  a 2  s. c  o  m
            conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
                    "hdfs/_HOST@" + HadoopSecurityUtil.getDefaultRealm());
        } catch (Exception ex) {
            if (!hdfsConfigs.containsKey(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)) {
                issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(), null, Errors.HADOOPFS_28,
                        ex.toString()));
            }
        }
    }
    if (hdfsConfDir != null && !hdfsConfDir.isEmpty()) {
        File hadoopConfigDir = new File(hdfsConfDir);
        if ((context.getExecutionMode() == ExecutionMode.CLUSTER_BATCH
                || context.getExecutionMode() == ExecutionMode.CLUSTER_YARN_STREAMING
                || context.getExecutionMode() == ExecutionMode.CLUSTER_MESOS_STREAMING)
                && hadoopConfigDir.isAbsolute()) {
            //Do not allow absolute hadoop config directory in cluster mode
            issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(),
                    HDFS_TARGET_CONFIG_BEAN_PREFIX + "hdfsConfDir", Errors.HADOOPFS_45, hdfsConfDir));
        } else {
            if (!hadoopConfigDir.isAbsolute()) {
                hadoopConfigDir = new File(context.getResourcesDirectory(), hdfsConfDir).getAbsoluteFile();
            }
            if (!hadoopConfigDir.exists()) {
                issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(),
                        HDFS_TARGET_CONFIG_BEAN_PREFIX + "hdfsConfDir", Errors.HADOOPFS_25,
                        hadoopConfigDir.getPath()));
            } else if (!hadoopConfigDir.isDirectory()) {
                issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(),
                        HDFS_TARGET_CONFIG_BEAN_PREFIX + "hdfsConfDir", Errors.HADOOPFS_26,
                        hadoopConfigDir.getPath()));
            } else {
                File coreSite = new File(hadoopConfigDir, "core-site.xml");
                if (coreSite.exists()) {
                    if (!coreSite.isFile()) {
                        issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(),
                                HDFS_TARGET_CONFIG_BEAN_PREFIX + "hdfsConfDir", Errors.HADOOPFS_27,
                                coreSite.getPath()));
                    }
                    conf.addResource(new Path(coreSite.getAbsolutePath()));
                }
                File hdfsSite = new File(hadoopConfigDir, "hdfs-site.xml");
                if (hdfsSite.exists()) {
                    if (!hdfsSite.isFile()) {
                        issues.add(context.createConfigIssue(Groups.HADOOP_FS.name(),
                                HDFS_TARGET_CONFIG_BEAN_PREFIX + "hdfsConfDir", Errors.HADOOPFS_27,
                                hdfsSite.getPath()));
                    }
                    conf.addResource(new Path(hdfsSite.getAbsolutePath()));
                }
            }
        }
    }
    for (Map.Entry<String, String> config : hdfsConfigs.entrySet()) {
        conf.set(config.getKey(), config.getValue());
    }
    return conf;
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.metadataexecutor.HdfsConnectionConfig.java

License:Apache License

public void init(Stage.Context context, String prefix, List<Stage.ConfigIssue> issues) {
    conf = new Configuration();
    conf.setClass("fs.file.impl", RawLocalFileSystem.class, FileSystem.class);

    if (hdfsKerberos) {
        conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
                UserGroupInformation.AuthenticationMethod.KERBEROS.name());
        try {/*from  w w  w .j  a  va  2s  .c om*/
            conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
                    "hdfs/_HOST@" + HadoopSecurityUtil.getDefaultRealm());
        } catch (Exception ex) {
            if (!hdfsConfigs.containsKey(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)) {
                issues.add(context.createConfigIssue(Groups.HDFS.name(), null,
                        HdfsMetadataErrors.HDFS_METADATA_001, ex.toString()));
            }
        }
    }

    if (hdfsConfDir != null && !hdfsConfDir.isEmpty()) {
        File hadoopConfigDir = new File(hdfsConfDir);
        if (!hadoopConfigDir.isAbsolute()) {
            hadoopConfigDir = new File(context.getResourcesDirectory(), hdfsConfDir).getAbsoluteFile();
        }
        if (!hadoopConfigDir.exists()) {
            issues.add(context.createConfigIssue(Groups.HDFS.name(), JOIN.join(prefix, "hdfsConfDir"),
                    HdfsMetadataErrors.HDFS_METADATA_002, hadoopConfigDir.getPath()));
        } else if (!hadoopConfigDir.isDirectory()) {
            issues.add(context.createConfigIssue(Groups.HDFS.name(), JOIN.join(prefix, "hdfsConfDir"),
                    HdfsMetadataErrors.HDFS_METADATA_003, hadoopConfigDir.getPath()));
        } else {
            File coreSite = new File(hadoopConfigDir, "core-site.xml");
            if (coreSite.exists()) {
                if (!coreSite.isFile()) {
                    issues.add(context.createConfigIssue(Groups.HDFS.name(), JOIN.join(prefix, "hdfsConfDir"),
                            HdfsMetadataErrors.HDFS_METADATA_004, coreSite.getPath()));
                }
                conf.addResource(new Path(coreSite.getAbsolutePath()));
            }
            File hdfsSite = new File(hadoopConfigDir, "hdfs-site.xml");
            if (hdfsSite.exists()) {
                if (!hdfsSite.isFile()) {
                    issues.add(context.createConfigIssue(Groups.HDFS.name(), JOIN.join(prefix, "hdfsConfDir"),
                            HdfsMetadataErrors.HDFS_METADATA_004, hdfsSite.getPath()));
                }
                conf.addResource(new Path(hdfsSite.getAbsolutePath()));
            }
        }
    }

    // Unless user specified non-empty, non-null HDFS URI, we need to retrieve it's value
    if (StringUtils.isEmpty(hdfsUri)) {
        hdfsUri = conf.get("fs.defaultFS");
    }

    for (Map.Entry<String, String> config : hdfsConfigs.entrySet()) {
        conf.set(config.getKey(), config.getValue());
    }

    try {
        loginUgi = HadoopSecurityUtil.getLoginUser(conf);
        userUgi = HadoopSecurityUtil.getProxyUser(hdfsUser, context, loginUgi, issues, Groups.HDFS.name(),
                JOIN.join(prefix, "hdfsUser"));
    } catch (IOException e) {
        LOG.error("Can't create UGI", e);
        issues.add(context.createConfigIssue(Groups.HDFS.name(), null, HdfsMetadataErrors.HDFS_METADATA_005,
                e.getMessage(), e));
    }

    if (!issues.isEmpty()) {
        return;
    }

    try {
        fs = getUGI().doAs(
                (PrivilegedExceptionAction<FileSystem>) () -> FileSystem.newInstance(new URI(hdfsUri), conf));
    } catch (Exception ex) {
        LOG.error("Can't retrieve FileSystem instance", ex);
        issues.add(context.createConfigIssue(Groups.HDFS.name(), null, HdfsMetadataErrors.HDFS_METADATA_005,
                ex.getMessage(), ex));
    }
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.metadataxecutor.HdfsConnectionConfig.java

License:Apache License

public void init(Stage.Context context, String prefix, List<Stage.ConfigIssue> issues) {
    conf = new Configuration();
    conf.setClass("fs.file.impl", RawLocalFileSystem.class, FileSystem.class);

    if (hdfsKerberos) {
        conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
                UserGroupInformation.AuthenticationMethod.KERBEROS.name());
        try {/*from ww w  . j ava  2  s .c  om*/
            conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
                    "hdfs/_HOST@" + HadoopSecurityUtil.getDefaultRealm());
        } catch (Exception ex) {
            if (!hdfsConfigs.containsKey(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)) {
                issues.add(context.createConfigIssue(Groups.HDFS.name(), null,
                        HdfsMetadataErrors.HDFS_METADATA_001, ex.toString()));
            }
        }
    }

    if (hdfsConfDir != null && !hdfsConfDir.isEmpty()) {
        File hadoopConfigDir = new File(hdfsConfDir);
        if (!hadoopConfigDir.isAbsolute()) {
            hadoopConfigDir = new File(context.getResourcesDirectory(), hdfsConfDir).getAbsoluteFile();
        }
        if (!hadoopConfigDir.exists()) {
            issues.add(context.createConfigIssue(Groups.HDFS.name(), JOIN.join(prefix, "hdfsConfDir"),
                    HdfsMetadataErrors.HDFS_METADATA_002, hadoopConfigDir.getPath()));
        } else if (!hadoopConfigDir.isDirectory()) {
            issues.add(context.createConfigIssue(Groups.HDFS.name(), JOIN.join(prefix, "hdfsConfDir"),
                    HdfsMetadataErrors.HDFS_METADATA_003, hadoopConfigDir.getPath()));
        } else {
            File coreSite = new File(hadoopConfigDir, "core-site.xml");
            if (coreSite.exists()) {
                if (!coreSite.isFile()) {
                    issues.add(context.createConfigIssue(Groups.HDFS.name(), JOIN.join(prefix, "hdfsConfDir"),
                            HdfsMetadataErrors.HDFS_METADATA_004, coreSite.getPath()));
                }
                conf.addResource(new Path(coreSite.getAbsolutePath()));
            }
            File hdfsSite = new File(hadoopConfigDir, "hdfs-site.xml");
            if (hdfsSite.exists()) {
                if (!hdfsSite.isFile()) {
                    issues.add(context.createConfigIssue(Groups.HDFS.name(), JOIN.join(prefix, "hdfsConfDir"),
                            HdfsMetadataErrors.HDFS_METADATA_004, hdfsSite.getPath()));
                }
                conf.addResource(new Path(hdfsSite.getAbsolutePath()));
            }
        }
    }

    // Unless user specified non-empty, non-null HDFS URI, we need to retrieve it's value
    if (StringUtils.isEmpty(hdfsUri)) {
        hdfsUri = conf.get("fs.defaultFS");
    }

    for (Map.Entry<String, String> config : hdfsConfigs.entrySet()) {
        conf.set(config.getKey(), config.getValue());
    }

    try {
        loginUgi = HadoopSecurityUtil.getLoginUser(conf);
    } catch (IOException e) {
        LOG.error("Can't create login UGI", e);
        issues.add(context.createConfigIssue(Groups.HDFS.name(), null, HdfsMetadataErrors.HDFS_METADATA_005,
                e.getMessage(), e));
    }

    if (!issues.isEmpty()) {
        return;
    }

    try {
        fs = getUGI().doAs(new PrivilegedExceptionAction<FileSystem>() {
            @Override
            public FileSystem run() throws Exception {
                return FileSystem.newInstance(new URI(hdfsUri), conf);
            }
        });
    } catch (Exception ex) {
        LOG.error("Can't retrieve FileSystem instance", ex);
        issues.add(context.createConfigIssue(Groups.HDFS.name(), null, HdfsMetadataErrors.HDFS_METADATA_005,
                ex.getMessage(), ex));
    }
}

From source file:com.streamsets.pipeline.stage.origin.hdfs.cluster.ClusterHdfsSource.java

License:Apache License

Configuration getHadoopConfiguration(List<ConfigIssue> issues) {
    Configuration conf = new Configuration();
    if (hdfsKerberos) {
        conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
                UserGroupInformation.AuthenticationMethod.KERBEROS.name());
        try {/*from   w w  w . ja v a2  s.  c o m*/
            conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, "hdfs/_HOST@" + KerberosUtil.getDefaultRealm());
        } catch (Exception ex) {
            if (!hdfsConfigs.containsKey(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)) {
                issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), null, Errors.HADOOPFS_28,
                        ex.getMessage()));
            }
        }
    }
    if (hadoopConfDir != null && !hadoopConfDir.isEmpty()) {
        File hadoopConfigDir = new File(hadoopConfDir);
        if (hadoopConfigDir.isAbsolute()) {
            // Do not allow absolute hadoop config directory in cluster mode
            issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hadoopConfDir",
                    Errors.HADOOPFS_29, hadoopConfDir));
        } else {
            hadoopConfigDir = new File(getContext().getResourcesDirectory(), hadoopConfDir).getAbsoluteFile();
        }
        if (!hadoopConfigDir.exists()) {
            issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsConfDir",
                    Errors.HADOOPFS_25, hadoopConfigDir.getPath()));
        } else if (!hadoopConfigDir.isDirectory()) {
            issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsConfDir",
                    Errors.HADOOPFS_26, hadoopConfigDir.getPath()));
        } else {
            File coreSite = new File(hadoopConfigDir, "core-site.xml");
            if (coreSite.exists()) {
                if (!coreSite.isFile()) {
                    issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsConfDir",
                            Errors.HADOOPFS_27, coreSite.getPath()));
                }
                conf.addResource(new Path(coreSite.getAbsolutePath()));
            }
            File hdfsSite = new File(hadoopConfigDir, "hdfs-site.xml");
            if (hdfsSite.exists()) {
                if (!hdfsSite.isFile()) {
                    issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsConfDir",
                            Errors.HADOOPFS_27, hdfsSite.getPath()));
                }
                conf.addResource(new Path(hdfsSite.getAbsolutePath()));
            }
            File yarnSite = new File(hadoopConfigDir, "yarn-site.xml");
            if (yarnSite.exists()) {
                if (!yarnSite.isFile()) {
                    issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsConfDir",
                            Errors.HADOOPFS_27, yarnSite.getPath()));
                }
                conf.addResource(new Path(yarnSite.getAbsolutePath()));
            }
            File mapredSite = new File(hadoopConfigDir, "mapred-site.xml");
            if (mapredSite.exists()) {
                if (!mapredSite.isFile()) {
                    issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsConfDir",
                            Errors.HADOOPFS_27, mapredSite.getPath()));
                }
                conf.addResource(new Path(mapredSite.getAbsolutePath()));
            }
        }
    }
    for (Map.Entry<String, String> config : hdfsConfigs.entrySet()) {
        conf.set(config.getKey(), config.getValue());
    }
    return conf;
}

From source file:common.NameNode.java

License:Apache License

protected NameNode(Configuration conf, NamenodeRole role) throws IOException {
    UserGroupInformation.setConfiguration(conf);
    DFSUtil.login(conf, DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY);

    this.role = role;
    try {/*from  w w w . j  a v  a 2  s  .  c o  m*/
        initialize(conf);
    } catch (IOException e) {
        this.stop();
        throw e;
    }
}

From source file:org.apache.hoya.core.build.InstanceBuilder.java

License:Apache License

/**
 * Propagate any critical principals from the current site config down to the HBase one.
 */// w  ww  .  j a va2 s. c  om
public void propagatePrincipals() {
    String dfsPrincipal = conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY);
    if (dfsPrincipal != null) {
        String siteDfsPrincipal = OptionKeys.SITE_XML_PREFIX + DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY;
        instanceDescription.getAppConfOperations().set(siteDfsPrincipal, dfsPrincipal);
    }
}