Example usage for org.apache.hadoop.hdfs DFSConfigKeys DFS_NAMESERVICES

List of usage examples for org.apache.hadoop.hdfs DFSConfigKeys DFS_NAMESERVICES

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs DFSConfigKeys DFS_NAMESERVICES.

Prototype

String DFS_NAMESERVICES

To view the source code for org.apache.hadoop.hdfs DFSConfigKeys DFS_NAMESERVICES.

Click Source Link

Usage

From source file:co.cask.cdap.operations.hdfs.HDFSInfo.java

License:Apache License

@Nullable
private String getNameService() {
    Collection<String> nameservices = conf.getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES);
    if (nameservices.isEmpty()) {
        // we want to return null from this method if nameservices are not configured, so it can be used in methods like
        // HAUtil.isHAEnabled()
        return null;
    }/* w  w w .  jav  a  2s  .  co m*/
    if (1 == nameservices.size()) {
        return Iterables.getOnlyElement(nameservices);
    }
    throw new IllegalStateException(
            "Found multiple nameservices configured in HDFS. CDAP currently does not support "
                    + "HDFS Federation.");
}

From source file:co.cask.cdap.operations.hdfs.HDFSNodes.java

License:Apache License

@Nullable
private String getNameService() {
    Collection<String> nameservices = conf.getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES);
    if (nameservices.isEmpty()) {
        return null;
    }/*from www . j  a  va2  s .  c o  m*/
    if (1 == nameservices.size()) {
        return Iterables.getOnlyElement(nameservices);
    }
    throw new IllegalStateException(
            "Found multiple nameservices configured in HDFS. CDAP currently does not support "
                    + "HDFS Federation.");
}

From source file:org.apache.beam.sdk.io.hdfs.HadoopFileSystemRegistrar.java

License:Apache License

@Override
public Iterable<FileSystem> fromOptions(@Nonnull PipelineOptions options) {
    final List<Configuration> configurations = options.as(HadoopFileSystemOptions.class).getHdfsConfiguration();
    if (configurations == null) {
        // nothing to register
        return Collections.emptyList();
    }//w  w w. j a va  2s  . c om
    checkArgument(configurations.size() == 1,
            String.format("The %s currently only supports at most a single Hadoop configuration.",
                    HadoopFileSystemRegistrar.class.getSimpleName()));

    final ImmutableList.Builder<FileSystem> builder = ImmutableList.builder();
    final Set<String> registeredSchemes = new HashSet<>();

    // this will only do zero or one loop
    final Configuration configuration = Iterables.getOnlyElement(configurations);
    final String defaultFs = configuration.get(org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY);
    if (defaultFs != null && !defaultFs.isEmpty()) {
        final String scheme = Objects.requireNonNull(URI.create(defaultFs).getScheme(), String
                .format("Empty scheme for %s value.", org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY));
        builder.add(new HadoopFileSystem(scheme, configuration));
        registeredSchemes.add(scheme);
    }
    final String nameServices = configuration.get(DFSConfigKeys.DFS_NAMESERVICES);
    if (nameServices != null && !nameServices.isEmpty()) {
        // we can register schemes that are support by HA cluster
        for (String scheme : HA_SCHEMES) {
            if (!registeredSchemes.contains(scheme)) {
                builder.add(new HadoopFileSystem(scheme, configuration));
            }
        }
    }
    return builder.build();
}

From source file:org.apache.kylin.storage.hbase.HBaseConnection.java

License:Apache License

public static void addHBaseClusterNNHAConfiguration(Configuration conf) {
    String hdfsConfigFile = KylinConfig.getInstanceFromEnv().getHBaseClusterHDFSConfigFile();
    if (hdfsConfigFile == null || hdfsConfigFile.isEmpty()) {
        return;//from w w  w.  jav a 2s . c  o m
    }
    Configuration hdfsConf = new Configuration(false);
    hdfsConf.addResource(hdfsConfigFile);
    Collection<String> nameServices = hdfsConf.getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES);
    Collection<String> mainNameServices = conf.getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES);
    for (String serviceId : nameServices) {
        mainNameServices.add(serviceId);

        String serviceConfKey = DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + serviceId;
        String proxyConfKey = DFSConfigKeys.DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + serviceId;
        conf.set(serviceConfKey, hdfsConf.get(serviceConfKey, ""));
        conf.set(proxyConfKey, hdfsConf.get(proxyConfKey, ""));

        Collection<String> nameNodes = hdfsConf.getTrimmedStringCollection(serviceConfKey);
        for (String nameNode : nameNodes) {
            String rpcConfKey = DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + serviceId + "." + nameNode;
            conf.set(rpcConfKey, hdfsConf.get(rpcConfKey, ""));
        }
    }
    conf.setStrings(DFSConfigKeys.DFS_NAMESERVICES, mainNameServices.toArray(new String[0]));
    // See YARN-3021, instruct RM skip renew token of hbase cluster name services
    conf.setStrings(JOB_NAMENODES_TOKEN_RENEWAL_EXCLUDE, nameServices.toArray(new String[0]));
}