Example usage for org.apache.hadoop.yarn.conf YarnConfiguration get

List of usage examples for org.apache.hadoop.yarn.conf YarnConfiguration get

Introduction

In this page you can find the example usage for org.apache.hadoop.yarn.conf YarnConfiguration get.

Prototype

public String get(String name) 

Source Link

Document

Get the value of the name property, null if no such property exists.

Usage

From source file:com.datatorrent.stram.util.ConfigUtils.java

License:Apache License

public static String getRawContainerLogsUrl(YarnConfiguration conf, String nodeHttpAddress, String appId,
        String containerId) {/*from   w  ww. ja  va  2 s .c om*/
    String logDirs = conf.get(YarnConfiguration.NM_LOG_DIRS);
    if (logDirs.startsWith("${yarn.log.dir}")) {
        return ConfigUtils.getSchemePrefix(conf) + nodeHttpAddress + "/logs"
                + logDirs.substring("${yarn.log.dir}".length()) + "/" + appId + "/" + containerId;
    } else {
        try {
            String logDirsPath = new File(logDirs).getCanonicalPath();
            String yarnLogDirPath = new File(getYarnLogDir()).getCanonicalPath();
            if (logDirsPath.startsWith(yarnLogDirPath)) {
                return ConfigUtils.getSchemePrefix(conf) + nodeHttpAddress + "/logs"
                        + logDirsPath.substring(yarnLogDirPath.length()) + "/" + appId + "/" + containerId;
            } else {
                if (!rawContainerLogWarningPrinted) {
                    LOG.warn(
                            "Cannot determine the location of container logs because of incompatible node manager log location ({}) and yarn log location ({})",
                            logDirsPath, yarnLogDirPath);
                    rawContainerLogWarningPrinted = true;
                }
            }
        } catch (Exception ex) {
            if (!rawContainerLogWarningPrinted) {
                LOG.warn("Cannot determine the location of container logs because of error: ", ex);
                rawContainerLogWarningPrinted = true;
            }
        }
    }
    return null;
}

From source file:com.ibm.bi.dml.yarn.ropt.YarnClusterAnalyzer.java

License:Open Source License

/**
 * Analyzes properties of Yarn cluster and Hadoop configurations.
 *//*from  w ww. ja v a  2s  . com*/
public static void analyzeYarnCluster(YarnClient yarnClient, YarnConfiguration conf, boolean verbose) {
    try {
        List<NodeReport> nodesReport = yarnClient.getNodeReports();
        if (verbose)
            System.out.println("There are " + nodesReport.size() + " nodes in the cluster");
        if (nodesReport.isEmpty())
            throw new YarnException("There are zero available nodes in the yarn cluster");

        nodesMaxPhySorted = new ArrayList<Long>(nodesReport.size());
        clusterTotalMem = 0;
        clusterTotalCores = 0;
        clusterTotalNodes = 0;
        minimumMRContainerPhyMB = -1;
        for (NodeReport node : nodesReport) {
            Resource resource = node.getCapability();
            Resource used = node.getUsed();
            if (used == null)
                used = Resource.newInstance(0, 0);
            int mb = resource.getMemory();
            int cores = resource.getVirtualCores();
            if (mb <= 0)
                throw new YarnException("A node has non-positive memory " + mb);

            int myMinMRPhyMB = mb / cores / CPU_HYPER_FACTOR;
            if (minimumMRContainerPhyMB < myMinMRPhyMB)
                minimumMRContainerPhyMB = myMinMRPhyMB; // minimumMRContainerPhyMB needs to be the largest among the mins

            clusterTotalMem += (long) mb * 1024 * 1024;
            nodesMaxPhySorted.add((long) mb * 1024 * 1024);
            clusterTotalCores += cores;
            clusterTotalNodes++;
            if (verbose)
                System.out.println("\t" + node.getNodeId() + " has " + mb + " MB (" + used.getMemory()
                        + " MB used) memory and " + resource.getVirtualCores() + " (" + used.getVirtualCores()
                        + " used) cores");

        }
        Collections.sort(nodesMaxPhySorted, Collections.reverseOrder());

        nodesMaxBudgetSorted = new ArrayList<Double>(nodesMaxPhySorted.size());
        for (int i = 0; i < nodesMaxPhySorted.size(); i++)
            nodesMaxBudgetSorted.add(ResourceOptimizer.phyToBudget(nodesMaxPhySorted.get(i)));

        _remotePar = nodesReport.size();
        if (_remotePar == 0)
            throw new YarnException("There are no available nodes in the yarn cluster");

        // Now get the default cluster settings
        _remoteMRSortMem = (1024 * 1024) * conf.getLong("io.sort.mb", 100); //100MB

        //handle jvm max mem (map mem budget is relevant for map-side distcache and parfor)
        //(for robustness we probe both: child and map configuration parameters)
        String javaOpts1 = conf.get("mapred.child.java.opts"); //internally mapred/mapreduce synonym
        String javaOpts2 = conf.get("mapreduce.map.java.opts", null); //internally mapred/mapreduce synonym
        String javaOpts3 = conf.get("mapreduce.reduce.java.opts", null); //internally mapred/mapreduce synonym
        if (javaOpts2 != null) //specific value overrides generic
            _remoteJVMMaxMemMap = extractMaxMemoryOpt(javaOpts2);
        else
            _remoteJVMMaxMemMap = extractMaxMemoryOpt(javaOpts1);
        if (javaOpts3 != null) //specific value overrides generic
            _remoteJVMMaxMemReduce = extractMaxMemoryOpt(javaOpts3);
        else
            _remoteJVMMaxMemReduce = extractMaxMemoryOpt(javaOpts1);

        //HDFS blocksize
        String blocksize = conf.get(MRConfigurationNames.DFS_BLOCK_SIZE, "134217728");
        _blocksize = Long.parseLong(blocksize);

        minimalPhyAllocate = (long) 1024 * 1024
                * conf.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
                        YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
        maximumPhyAllocate = (long) 1024 * 1024
                * conf.getInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
                        YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB);
        mrAMPhy = (long) conf.getInt("yarn.app.mapreduce.am.resource.mb", 1536) * 1024 * 1024;

    } catch (Exception e) {
        throw new RuntimeException("Unable to analyze yarn cluster ", e);
    }

    /*
     * This is for AppMaster to query available resource in the cluster during heartbeat 
     * 
    AMRMClient<ContainerRequest> rmClient = AMRMClient.createAMRMClient();
    rmClient.init(conf);
    rmClient.start();
    AllocateResponse response = rmClient.allocate(0);
    int nodeCount = response.getNumClusterNodes();
    Resource resource = response.getAvailableResources();
    List<NodeReport> nodeUpdate = response.getUpdatedNodes();
            
    LOG.info("This is a " + nodeCount + " node cluster with totally " +
    resource.getMemory() + " memory and " + resource.getVirtualCores() + " cores");
    LOG.info(nodereport.size() + " updatedNode reports received");
    for (NodeReport node : nodeUpdate) {
       resource = node.getCapability();
       LOG.info(node.getNodeId() + " updated with " + resource.getMemory() + " memory and " + resource.getVirtualCores() + " cores");
    }*/
}

From source file:com.twitter.hraven.hadoopJobMonitor.ClusterStatusChecker.java

License:Apache License

/**
 * Get the app list from RM and check status of each
 *///from w  w w .  j a  va 2 s  . co m
@Override
public void run() {
    // 1. get the list of running apps
    LOG.info("Running " + ClusterStatusChecker.class.getName());
    try {
        YarnConfiguration yConf = new YarnConfiguration();
        LOG.info(yConf.get(YarnConfiguration.RM_ADDRESS));
        LOG.info("Getting appList ...");
        // TODO: in future hadoop API we will be able to filter the app list
        EnumSet<YarnApplicationState> states = EnumSet.of(YarnApplicationState.RUNNING);
        List<ApplicationReport> appList = rmDelegate.getApplications(states);
        LOG.info("appList received. size is: " + appList.size());
        for (ApplicationReport appReport : appList)
            checkAppStatus(appReport);
    } catch (YarnRuntimeException e) {
        LOG.error("Error in getting application list from RM", e);
    } catch (YarnException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
}

From source file:gobblin.yarn.GobblinYarnAppLauncherTest.java

License:Apache License

@BeforeClass
public void setUp() throws Exception {
    // Set java home in environment since it isn't set on some systems
    String javaHome = System.getProperty("java.home");
    setEnv("JAVA_HOME", javaHome);

    final YarnConfiguration clusterConf = new YarnConfiguration();
    clusterConf.set("yarn.resourcemanager.connect.max-wait.ms", "10000");

    MiniYARNCluster miniYARNCluster = this.closer.register(new MiniYARNCluster("TestCluster", 1, 1, 1));
    miniYARNCluster.init(clusterConf);/*w  w w  . j a v a  2  s. co m*/
    miniYARNCluster.start();

    // YARN client should not be started before the Resource Manager is up
    AssertWithBackoff.create().logger(LOG).timeoutMs(10000).assertTrue(new Predicate<Void>() {
        @Override
        public boolean apply(Void input) {
            return !clusterConf.get(YarnConfiguration.RM_ADDRESS).contains(":0");
        }
    }, "Waiting for RM");

    this.yarnClient = this.closer.register(YarnClient.createYarnClient());
    this.yarnClient.init(clusterConf);
    this.yarnClient.start();

    // Use a random ZK port
    TestingServer testingZKServer = this.closer.register(new TestingServer(-1));
    LOG.info("Testing ZK Server listening on: " + testingZKServer.getConnectString());

    // the zk port is dynamically configured
    try (PrintWriter pw = new PrintWriter("dynamic.conf")) {
        File dir = new File("target/dummydir");

        // dummy directory specified in configuration
        dir.mkdir();

        pw.println("gobblin.cluster.zk.connection.string=\"" + testingZKServer.getConnectString() + "\"");
        pw.println("jobconf.fullyQualifiedPath=\"" + dir.getAbsolutePath() + "\"");
    }

    // YARN config is dynamic and needs to be passed to other processes
    try (OutputStream os = new FileOutputStream(new File("yarn-site.xml"))) {
        clusterConf.writeXml(os);
    }

    this.curatorFramework = TestHelper.createZkClient(testingZKServer, this.closer);

    URL url = GobblinYarnAppLauncherTest.class.getClassLoader()
            .getResource(GobblinYarnAppLauncherTest.class.getSimpleName() + ".conf");
    Assert.assertNotNull(url, "Could not find resource " + url);

    this.config = ConfigFactory.parseURL(url).withValue("gobblin.cluster.zk.connection.string",
            ConfigValueFactory.fromAnyRef(testingZKServer.getConnectString())).resolve();

    String zkConnectionString = this.config.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY);
    this.helixManager = HelixManagerFactory.getZKHelixManager(
            this.config.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY),
            TestHelper.TEST_HELIX_INSTANCE_NAME, InstanceType.CONTROLLER, zkConnectionString);

    this.gobblinYarnAppLauncher = new GobblinYarnAppLauncher(this.config, clusterConf);
}

From source file:org.apache.gobblin.yarn.GobblinYarnAppLauncherTest.java

License:Apache License

@BeforeClass
public void setUp() throws Exception {
    // Set java home in environment since it isn't set on some systems
    String javaHome = System.getProperty("java.home");
    setEnv("JAVA_HOME", javaHome);

    final YarnConfiguration clusterConf = new YarnConfiguration();
    clusterConf.set("yarn.resourcemanager.connect.max-wait.ms", "10000");

    MiniYARNCluster miniYARNCluster = this.closer.register(new MiniYARNCluster("TestCluster", 1, 1, 1));
    miniYARNCluster.init(clusterConf);//from  w w w .  j a  v  a  2s . com
    miniYARNCluster.start();

    // YARN client should not be started before the Resource Manager is up
    AssertWithBackoff.create().logger(LOG).timeoutMs(10000).assertTrue(new Predicate<Void>() {
        @Override
        public boolean apply(Void input) {
            return !clusterConf.get(YarnConfiguration.RM_ADDRESS).contains(":0");
        }
    }, "Waiting for RM");

    this.yarnClient = this.closer.register(YarnClient.createYarnClient());
    this.yarnClient.init(clusterConf);
    this.yarnClient.start();

    // Use a random ZK port
    TestingServer testingZKServer = this.closer.register(new TestingServer(-1));
    LOG.info("Testing ZK Server listening on: " + testingZKServer.getConnectString());

    // the zk port is dynamically configured
    try (PrintWriter pw = new PrintWriter(DYNAMIC_CONF_PATH)) {
        File dir = new File("target/dummydir");

        // dummy directory specified in configuration
        dir.mkdir();

        pw.println("gobblin.cluster.zk.connection.string=\"" + testingZKServer.getConnectString() + "\"");
        pw.println("jobconf.fullyQualifiedPath=\"" + dir.getAbsolutePath() + "\"");
    }

    // YARN config is dynamic and needs to be passed to other processes
    try (OutputStream os = new FileOutputStream(new File(YARN_SITE_XML_PATH))) {
        clusterConf.writeXml(os);
    }

    this.curatorFramework = TestHelper.createZkClient(testingZKServer, this.closer);

    URL url = GobblinYarnAppLauncherTest.class.getClassLoader()
            .getResource(GobblinYarnAppLauncherTest.class.getSimpleName() + ".conf");
    Assert.assertNotNull(url, "Could not find resource " + url);

    this.config = ConfigFactory.parseURL(url).withValue("gobblin.cluster.zk.connection.string",
            ConfigValueFactory.fromAnyRef(testingZKServer.getConnectString())).resolve();

    String zkConnectionString = this.config.getString(GobblinClusterConfigurationKeys.ZK_CONNECTION_STRING_KEY);
    this.helixManager = HelixManagerFactory.getZKHelixManager(
            this.config.getString(GobblinClusterConfigurationKeys.HELIX_CLUSTER_NAME_KEY),
            TestHelper.TEST_HELIX_INSTANCE_NAME, InstanceType.CONTROLLER, zkConnectionString);

    this.gobblinYarnAppLauncher = new GobblinYarnAppLauncher(this.config, clusterConf);
}

From source file:org.apache.hoya.yarn.appmaster.state.AppState.java

License:Apache License

/**
 * handle completed node in the CD -move something from the live
 * server list to the completed server list
 * @param amConf YarnConfiguration//  w w w. ja v a 2  s.  c om
 * @param status the node that has just completed
 * @return NodeCompletionResult
 */
public synchronized NodeCompletionResult onCompletedNode(YarnConfiguration amConf, ContainerStatus status) {
    ContainerId containerId = status.getContainerId();
    NodeCompletionResult result = new NodeCompletionResult();
    RoleInstance roleInstance;

    if (containersBeingReleased.containsKey(containerId)) {
        log.info("Container was queued for release");
        Container container = containersBeingReleased.remove(containerId);
        RoleStatus roleStatus = lookupRoleStatus(container);
        log.info("decrementing role count for role {}", roleStatus.getName());
        roleStatus.decReleasing();
        roleStatus.decActual();
        roleStatus.incCompleted();
        roleHistory.onReleaseCompleted(container);

    } else if (surplusNodes.remove(containerId)) {
        //its a surplus one being purged
        result.surplusNode = true;
    } else {
        //a container has failed 
        result.containerFailed = true;
        roleInstance = activeContainers.remove(containerId);
        if (roleInstance != null) {
            //it was active, move it to failed 
            incFailedCountainerCount();
            failedNodes.put(containerId, roleInstance);
        } else {
            // the container may have been noted as failed already, so look
            // it up
            roleInstance = failedNodes.get(containerId);
        }
        if (roleInstance != null) {
            int roleId = roleInstance.roleId;
            log.info("Failed container in role {}", roleId);
            try {
                RoleStatus roleStatus = lookupRoleStatus(roleId);
                roleStatus.decActual();
                boolean shortLived = isShortLived(roleInstance);
                String message;
                if (roleInstance.container != null) {
                    String user = null;
                    try {
                        user = HoyaUtils.getCurrentUser().getShortUserName();
                    } catch (IOException ioe) {
                    }
                    String completedLogsUrl = null;
                    Container c = roleInstance.container;
                    String url = null;
                    if (amConf != null) {
                        url = amConf.get(YarnConfiguration.YARN_LOG_SERVER_URL);
                    }
                    if (user != null && url != null) {
                        completedLogsUrl = url + "/" + c.getNodeId() + "/" + roleInstance.getContainerId()
                                + "/ctx/" + user;
                    }
                    message = String.format(
                            "Failure %s on host %s" + (completedLogsUrl != null ? ", see %s" : ""),
                            roleInstance.getContainerId(), c.getNodeId().getHost(), completedLogsUrl);
                } else {
                    message = String.format("Failure %s", containerId.toString());
                }
                roleStatus.noteFailed(message);
                //have a look to see if it short lived
                if (shortLived) {
                    roleStatus.incStartFailed();
                }

                if (roleInstance.container != null) {
                    roleHistory.onFailedContainer(roleInstance.container, shortLived);
                }

            } catch (YarnRuntimeException e1) {
                log.error("Failed container of unknown role {}", roleId);
            }
        } else {
            //this isn't a known container.

            log.error("Notified of completed container {} that is not in the list"
                    + " of active or failed containers", containerId);
            completionOfUnknownContainerEvent.incrementAndGet();
        }
    }

    if (result.surplusNode) {
        //a surplus node
        return result;
    }

    //record the complete node's details; this pulls it from the livenode set 
    //remove the node
    ContainerId id = status.getContainerId();
    RoleInstance node = getLiveNodes().remove(id);
    if (node == null) {
        log.warn("Received notification of completion of unknown node {}", id);
        completionOfNodeNotInLiveListEvent.incrementAndGet();

    } else {
        node.state = ClusterDescription.STATE_DESTROYED;
        node.exitCode = status.getExitStatus();
        node.diagnostics = status.getDiagnostics();
        getCompletedNodes().put(id, node);
        result.roleInstance = node;
    }
    return result;
}

From source file:org.apache.ignite.yarn.IgniteYarnClient.java

License:Apache License

/**
 * Main methods has one mandatory parameter and one optional parameter.
 *
 * @param args Path to jar mandatory parameter and property file is optional.
 *//*from w ww.j a  v a  2 s.co m*/
public static void main(String[] args) throws Exception {
    checkArguments(args);

    // Set path to app master jar.
    String pathAppMasterJar = args[0];

    ClusterProperties props = ClusterProperties.from(args.length == 2 ? args[1] : null);

    YarnConfiguration conf = new YarnConfiguration();
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();

    // Create application via yarnClient
    YarnClientApplication app = yarnClient.createApplication();

    FileSystem fs = FileSystem.get(conf);

    Path ignite;

    // Load ignite and jar
    if (props.ignitePath() == null)
        ignite = getIgnite(props, fs);
    else
        ignite = new Path(props.ignitePath());

    // Upload the jar file to HDFS.
    Path appJar = IgniteYarnUtils.copyLocalToHdfs(fs, pathAppMasterJar,
            props.igniteWorkDir() + File.separator + IgniteYarnUtils.JAR_NAME);

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    amContainer.setCommands(Collections
            .singletonList(Environment.JAVA_HOME.$() + "/bin/java -Xmx512m " + ApplicationMaster.class.getName()
                    + IgniteYarnUtils.SPACE + ignite.toUri() + IgniteYarnUtils.YARN_LOG_OUT));

    // Setup jar for ApplicationMaster
    LocalResource appMasterJar = IgniteYarnUtils.setupFile(appJar, fs, LocalResourceType.FILE);

    amContainer.setLocalResources(Collections.singletonMap(IgniteYarnUtils.JAR_NAME, appMasterJar));

    // Setup CLASSPATH for ApplicationMaster
    Map<String, String> appMasterEnv = props.toEnvs();

    setupAppMasterEnv(appMasterEnv, conf);

    amContainer.setEnvironment(appMasterEnv);

    // Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials creds = new Credentials();

        String tokRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);

        if (tokRenewer == null || tokRenewer.length() == 0)
            throw new IOException("Master Kerberos principal for the RM is not set.");

        log.info("Found RM principal: " + tokRenewer);

        final Token<?> tokens[] = fs.addDelegationTokens(tokRenewer, creds);

        if (tokens != null)
            log.info("File system delegation tokens: " + Arrays.toString(tokens));

        amContainer.setTokens(IgniteYarnUtils.createTokenBuffer(creds));
    }

    // Set up resource type requirements for ApplicationMaster
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(512);
    capability.setVirtualCores(1);

    // Finally, set-up ApplicationSubmissionContext for the application
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    appContext.setApplicationName("ignition"); // application name
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(capability);
    appContext.setQueue("default"); // queue

    // Submit application
    ApplicationId appId = appContext.getApplicationId();

    yarnClient.submitApplication(appContext);

    log.log(Level.INFO, "Submitted application. Application id: {0}", appId);

    ApplicationReport appReport = yarnClient.getApplicationReport(appId);
    YarnApplicationState appState = appReport.getYarnApplicationState();

    while (appState == YarnApplicationState.NEW || appState == YarnApplicationState.NEW_SAVING
            || appState == YarnApplicationState.SUBMITTED || appState == YarnApplicationState.ACCEPTED) {
        TimeUnit.SECONDS.sleep(1L);

        appReport = yarnClient.getApplicationReport(appId);

        if (appState != YarnApplicationState.ACCEPTED
                && appReport.getYarnApplicationState() == YarnApplicationState.ACCEPTED)
            log.log(Level.INFO, "Application {0} is ACCEPTED.", appId);

        appState = appReport.getYarnApplicationState();
    }

    log.log(Level.INFO, "Application {0} is {1}.", new Object[] { appId, appState });
}

From source file:org.apache.myriad.scheduler.NMExecutorCLGenImpl.java

License:Apache License

@Override
public String getConfigurationUrl() {
    YarnConfiguration conf = new YarnConfiguration();
    String httpPolicy = conf.get(TaskFactory.YARN_HTTP_POLICY);
    if (httpPolicy != null && httpPolicy.equals(TaskFactory.YARN_HTTP_POLICY_HTTPS_ONLY)) {
        String address = conf.get(TaskFactory.YARN_RESOURCEMANAGER_WEBAPP_HTTPS_ADDRESS);
        if (address == null || address.isEmpty()) {
            address = conf.get(TaskFactory.YARN_RESOURCEMANAGER_HOSTNAME) + ":8090";
        }/*  ww w. j  a v  a  2  s  .  c  o m*/
        return "https://" + address + "/conf";
    } else {
        String address = conf.get(TaskFactory.YARN_RESOURCEMANAGER_WEBAPP_ADDRESS);
        if (address == null || address.isEmpty()) {
            address = conf.get(TaskFactory.YARN_RESOURCEMANAGER_HOSTNAME) + ":8088";
        }
        return "http://" + address + "/conf";
    }
}

From source file:org.apache.slider.core.launch.CredentialUtils.java

License:Apache License

/**
 * Look up and return the resource manager's principal. This method
 * automatically does the <code>_HOST</code> replacement in the principal and
 * correctly handles HA resource manager configurations.
 *
 * From: YARN-4629//from   w  ww .  j  a  v  a 2s . c om
 * @param conf the {@link Configuration} file from which to read the
 * principal
 * @return the resource manager's principal string
 * @throws IOException thrown if there's an error replacing the host name
 */
public static String getRMPrincipal(Configuration conf) throws IOException {
    String principal = conf.get(RM_PRINCIPAL, "");
    String hostname;
    Preconditions.checkState(!principal.isEmpty(), "Not set: " + RM_PRINCIPAL);

    if (HAUtil.isHAEnabled(conf)) {
        YarnConfiguration yarnConf = new YarnConfiguration(conf);
        if (yarnConf.get(RM_HA_ID) == null) {
            // If RM_HA_ID is not configured, use the first of RM_HA_IDS.
            // Any valid RM HA ID should work.
            String[] rmIds = yarnConf.getStrings(RM_HA_IDS);
            Preconditions.checkState((rmIds != null) && (rmIds.length > 0), "Not set " + RM_HA_IDS);
            yarnConf.set(RM_HA_ID, rmIds[0]);
        }

        hostname = yarnConf.getSocketAddr(RM_ADDRESS, DEFAULT_RM_ADDRESS, DEFAULT_RM_PORT).getHostName();
    } else {
        hostname = conf.getSocketAddr(RM_ADDRESS, DEFAULT_RM_ADDRESS, DEFAULT_RM_PORT).getHostName();
    }
    return SecurityUtil.getServerPrincipal(principal, hostname);
}

From source file:org.apache.sysml.yarn.ropt.YarnClusterAnalyzer.java

License:Apache License

/**
 * Analyzes properties of Yarn cluster and Hadoop configurations.
 * /*from w  ww .  j  ava  2s .co  m*/
 * @param yarnClient hadoop yarn client
 * @param conf hadoop yarn configuration
 * @param verbose output info to standard output
 */
public static void analyzeYarnCluster(YarnClient yarnClient, YarnConfiguration conf, boolean verbose) {
    try {
        List<NodeReport> nodesReport = yarnClient.getNodeReports();
        if (verbose)
            System.out.println("There are " + nodesReport.size() + " nodes in the cluster");
        if (nodesReport.isEmpty())
            throw new YarnException("There are zero available nodes in the yarn cluster");

        nodesMaxPhySorted = new ArrayList<>(nodesReport.size());
        clusterTotalMem = 0;
        clusterTotalCores = 0;
        clusterTotalNodes = 0;
        minimumMRContainerPhyMB = -1;
        for (NodeReport node : nodesReport) {
            Resource resource = node.getCapability();
            Resource used = node.getUsed();
            if (used == null)
                used = Resource.newInstance(0, 0);
            int mb = resource.getMemory();
            int cores = resource.getVirtualCores();
            if (mb <= 0)
                throw new YarnException("A node has non-positive memory " + mb);

            int myMinMRPhyMB = mb / cores / CPU_HYPER_FACTOR;
            if (minimumMRContainerPhyMB < myMinMRPhyMB)
                minimumMRContainerPhyMB = myMinMRPhyMB; // minimumMRContainerPhyMB needs to be the largest among the mins

            clusterTotalMem += (long) mb * 1024 * 1024;
            nodesMaxPhySorted.add((long) mb * 1024 * 1024);
            clusterTotalCores += cores;
            clusterTotalNodes++;
            if (verbose)
                System.out.println("\t" + node.getNodeId() + " has " + mb + " MB (" + used.getMemory()
                        + " MB used) memory and " + resource.getVirtualCores() + " (" + used.getVirtualCores()
                        + " used) cores");

        }
        Collections.sort(nodesMaxPhySorted, Collections.reverseOrder());

        nodesMaxBudgetSorted = new ArrayList<>(nodesMaxPhySorted.size());
        for (int i = 0; i < nodesMaxPhySorted.size(); i++)
            nodesMaxBudgetSorted.add(ResourceOptimizer.phyToBudget(nodesMaxPhySorted.get(i)));

        _remotePar = nodesReport.size();
        if (_remotePar == 0)
            throw new YarnException("There are no available nodes in the yarn cluster");

        // Now get the default cluster settings
        _remoteMRSortMem = (1024 * 1024) * conf.getLong(MRConfigurationNames.MR_TASK_IO_SORT_MB, 100); //100MB

        //handle jvm max mem (map mem budget is relevant for map-side distcache and parfor)
        //(for robustness we probe both: child and map configuration parameters)
        String javaOpts1 = conf.get(MRConfigurationNames.MR_CHILD_JAVA_OPTS); //internally mapred/mapreduce synonym
        String javaOpts2 = conf.get(MRConfigurationNames.MR_MAP_JAVA_OPTS, null); //internally mapred/mapreduce synonym
        String javaOpts3 = conf.get(MRConfigurationNames.MR_REDUCE_JAVA_OPTS, null); //internally mapred/mapreduce synonym
        if (javaOpts2 != null) //specific value overrides generic
            _remoteJVMMaxMemMap = extractMaxMemoryOpt(javaOpts2);
        else
            _remoteJVMMaxMemMap = extractMaxMemoryOpt(javaOpts1);
        if (javaOpts3 != null) //specific value overrides generic
            _remoteJVMMaxMemReduce = extractMaxMemoryOpt(javaOpts3);
        else
            _remoteJVMMaxMemReduce = extractMaxMemoryOpt(javaOpts1);

        //HDFS blocksize
        String blocksize = conf.get(MRConfigurationNames.DFS_BLOCKSIZE, "134217728");
        _blocksize = Long.parseLong(blocksize);

        minimalPhyAllocate = (long) 1024 * 1024
                * conf.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB,
                        YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB);
        maximumPhyAllocate = (long) 1024 * 1024
                * conf.getInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
                        YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB);
        mrAMPhy = (long) conf.getInt(MRConfigurationNames.YARN_APP_MR_AM_RESOURCE_MB, 1536) * 1024 * 1024;

    } catch (Exception e) {
        throw new RuntimeException("Unable to analyze yarn cluster ", e);
    }

    /*
     * This is for AppMaster to query available resource in the cluster during heartbeat 
     * 
    AMRMClient<ContainerRequest> rmClient = AMRMClient.createAMRMClient();
    rmClient.init(conf);
    rmClient.start();
    AllocateResponse response = rmClient.allocate(0);
    int nodeCount = response.getNumClusterNodes();
    Resource resource = response.getAvailableResources();
    List<NodeReport> nodeUpdate = response.getUpdatedNodes();
            
    LOG.info("This is a " + nodeCount + " node cluster with totally " +
    resource.getMemory() + " memory and " + resource.getVirtualCores() + " cores");
    LOG.info(nodereport.size() + " updatedNode reports received");
    for (NodeReport node : nodeUpdate) {
       resource = node.getCapability();
       LOG.info(node.getNodeId() + " updated with " + resource.getMemory() + " memory and " + resource.getVirtualCores() + " cores");
    }*/
}