Example usage for org.apache.hadoop.yarn.conf YarnConfiguration RM_HOSTNAME

List of usage examples for org.apache.hadoop.yarn.conf YarnConfiguration RM_HOSTNAME

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.conf YarnConfiguration RM_HOSTNAME.

Prototype

String RM_HOSTNAME

To view the source code for org.apache.hadoop.yarn.conf YarnConfiguration RM_HOSTNAME.

Click Source Link

Usage

From source file:com.github.sakserv.minicluster.impl.MRLocalCluster.java

License:Apache License

@Override
public void configure() throws Exception {

    // Handle Windows
    WindowsLibsUtils.setHadoopHome();/*  w w w. j  a v  a2s.c om*/

    configuration.set(YarnConfiguration.RM_ADDRESS, resourceManagerAddress);
    configuration.set(YarnConfiguration.RM_HOSTNAME, resourceManagerHostname);
    configuration.set(YarnConfiguration.RM_SCHEDULER_ADDRESS, resourceManagerSchedulerAddress);
    configuration.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, resourceManagerResourceTrackerAddress);
    configuration.set(YarnConfiguration.RM_WEBAPP_ADDRESS, resourceManagerWebappAddress);
    configuration.set(JHAdminConfig.MR_HISTORY_ADDRESS, jobHistoryAddress);
    configuration.set(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, "true");
    configuration.set(JHAdminConfig.MR_HISTORY_MINICLUSTER_FIXED_PORTS, "true");
    if (getUseInJvmContainerExecutor()) {
        configuration.set(YarnConfiguration.NM_CONTAINER_EXECUTOR, inJvmContainerExecutorClass);
        configuration.set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName());
        configuration.set("fs.file.impl", org.apache.hadoop.fs.LocalFileSystem.class.getName());
    }

    if (null != hdfsDefaultFs) {
        configuration.set("fs.defaultFS", hdfsDefaultFs);
        configuration.set("dfs.replication", "1");
    }
}

From source file:com.github.sakserv.minicluster.impl.YarnLocalCluster.java

License:Apache License

@Override
public void configure() throws Exception {
    // Handle Windows
    WindowsLibsUtils.setHadoopHome();// w  ww  . j a v  a 2s .c  o  m

    configuration.set(YarnConfiguration.RM_ADDRESS, resourceManagerAddress);
    configuration.set(YarnConfiguration.RM_HOSTNAME, resourceManagerHostname);
    configuration.set(YarnConfiguration.RM_SCHEDULER_ADDRESS, resourceManagerSchedulerAddress);
    configuration.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, resourceManagerResourceTrackerAddress);
    configuration.set(YarnConfiguration.RM_WEBAPP_ADDRESS, resourceManagerWebappAddress);
    configuration.set(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, "true");
    if (getUseInJvmContainerExecutor()) {
        configuration.set(YarnConfiguration.NM_CONTAINER_EXECUTOR, inJvmContainerExecutorClass);
        configuration.set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName());
        configuration.set("fs.file.impl", org.apache.hadoop.fs.LocalFileSystem.class.getName());
    }
}

From source file:org.apache.phoenix.util.PhoenixMRJobUtil.java

License:Apache License

public static String getActiveResourceManagerHost(Configuration config, String zkQuorum)
        throws IOException, InterruptedException, JSONException, KeeperException,
        InvalidProtocolBufferException, ZooKeeperConnectionException {
    ZooKeeperWatcher zkw = null;/*from  w  ww  . j  a  va  2  s  .c  om*/
    ZooKeeper zk = null;
    String activeRMHost = null;
    try {
        zkw = new ZooKeeperWatcher(config, "get-active-yarnmanager", null);
        zk = new ZooKeeper(zkQuorum, 30000, zkw, false);

        List<String> children = zk.getChildren(YARN_LEADER_ELECTION, zkw);
        for (String subEntry : children) {
            List<String> subChildern = zk.getChildren(YARN_LEADER_ELECTION + "/" + subEntry, zkw);
            for (String eachEntry : subChildern) {
                if (eachEntry.contains(ACTIVE_STANDBY_ELECTOR_LOCK)) {
                    String path = YARN_LEADER_ELECTION + "/" + subEntry + "/" + ACTIVE_STANDBY_ELECTOR_LOCK;
                    byte[] data = zk.getData(path, zkw, new Stat());
                    ActiveRMInfoProto proto = ActiveRMInfoProto.parseFrom(data);
                    proto.getRmId();
                    LOG.info("Active RmId : " + proto.getRmId());

                    activeRMHost = config.get(YarnConfiguration.RM_HOSTNAME + "." + proto.getRmId());
                    LOG.info("activeResourceManagerHostname = " + activeRMHost);

                }
            }
        }
    } finally {
        if (zkw != null)
            zkw.close();
        if (zk != null)
            zk.close();
    }

    return activeRMHost;
}

From source file:org.springframework.xd.sqoop.SqoopRunner.java

License:Apache License

protected static Configuration createConfiguration(Map<String, String> configOptions) {

    Configuration configuration = new Configuration();
    setConfigurationProperty(configOptions, configuration, CommonConfigurationKeys.FS_DEFAULT_NAME_KEY);
    setConfigurationProperty(configOptions, configuration, YarnConfiguration.RM_HOSTNAME);
    setConfigurationProperty(configOptions, configuration, YarnConfiguration.RM_ADDRESS);
    setConfigurationProperty(configOptions, configuration, YarnConfiguration.RM_SCHEDULER_ADDRESS);
    setConfigurationProperty(configOptions, configuration, YarnConfiguration.YARN_APPLICATION_CLASSPATH);
    setConfigurationProperty(configOptions, configuration, "mapreduce.framework.name");
    if (StringUtils.hasText(configOptions.get("mapreduce.jobhistory.address"))) {
        setConfigurationProperty(configOptions, configuration, "mapreduce.jobhistory.address");
    }//from www.j  a v  a  2 s . c o  m
    if (configOptions.containsKey(SECURITY_AUTH_METHOD)
            && "kerberos".equals(configOptions.get(SECURITY_AUTH_METHOD))) {
        configuration.setBoolean("hadoop.security.authorization", true);
        configuration.set("hadoop.security.authentication", configOptions.get(SECURITY_AUTH_METHOD));
        configuration.set("dfs.namenode.kerberos.principal", configOptions.get(SECURITY_NAMENODE_PRINCIPAL));
        configuration.set("yarn.resourcemanager.principal", configOptions.get(SECURITY_RM_MANAGER_PRINCIPAL));
        if (StringUtils.hasText(configOptions.get(SECURITY_MAPREDUCE_JOBHISTORY_PRINCIPAL))) {
            configuration.set("mapreduce.jobhistory.principal",
                    configOptions.get(SECURITY_MAPREDUCE_JOBHISTORY_PRINCIPAL));
        }
        String userKeytab = configOptions.get(SECURITY_USER_KEYTAB);
        String userPrincipal = configOptions.get(SECURITY_USER_PRINCIPAL);
        UserGroupInformation.setConfiguration(configuration);
        if (StringUtils.hasText(userKeytab)) {
            configuration.set(ConfigurationFactoryBean.USERKEYTAB, userKeytab.trim());
        }
        if (StringUtils.hasText(userPrincipal)) {
            configuration.set(ConfigurationFactoryBean.USERPRINCIPAL, userPrincipal.trim());
        }
        if (StringUtils.hasText(userKeytab) && StringUtils.hasText(userPrincipal)) {
            try {
                SecurityUtil.login(configuration, ConfigurationFactoryBean.USERKEYTAB,
                        ConfigurationFactoryBean.USERPRINCIPAL);
            } catch (Exception e) {
                logger.warn("Cannot login using keytab " + userKeytab + " and principal " + userPrincipal, e);
            }
        }
    }

    for (Entry<String, String> entry : configOptions.entrySet()) {
        String key = entry.getKey();
        if (key.startsWith(SPRING_HADOOP_CONFIG_PREFIX + ".")) {
            String prop = key.substring(SPRING_HADOOP_CONFIG_PREFIX.length() + 1);
            String value = entry.getValue();
            logger.info("Setting configuration property: " + prop + "=" + value);
            configuration.set(prop, value);
        }
    }
    return configuration;
}