Example usage for org.apache.hadoop.mapred ClusterStatus getReduceTasks

List of usage examples for org.apache.hadoop.mapred ClusterStatus getReduceTasks

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred ClusterStatus getReduceTasks.

Prototype

public int getReduceTasks() 

Source Link

Document

Get the number of currently running reduce tasks in the cluster.

Usage

From source file:com.ibm.bi.dml.runtime.controlprogram.parfor.stat.InfrastructureAnalyzer.java

License:Open Source License

/**
 * Gets the fraction of running map/reduce tasks to existing
 * map/reduce task slots. /*w w w.j a  v  a 2s .c  o m*/
 * 
 * NOTE: on YARN the number of slots is a spurious indicator 
 * because containers are purely scheduled based on memory. 
 * 
 * @return
 * @throws IOException
 */
public static double getClusterUtilization(boolean mapOnly) throws IOException {
    //in local mode, the cluster utilization is always 0.0 

    JobConf job = ConfigurationManager.getCachedJobConf();
    JobClient client = new JobClient(job);
    ClusterStatus stat = client.getClusterStatus();

    double ret = 0.0;
    if (stat != null) //if in cluster mode
    {
        if (mapOnly) {
            int capacity = stat.getMaxMapTasks();
            int current = stat.getMapTasks();
            ret = ((double) current) / capacity;
        } else {
            int capacity = stat.getMaxMapTasks() + stat.getMaxReduceTasks();
            int current = stat.getMapTasks() + stat.getReduceTasks();
            ret = ((double) current) / capacity;
        }
    }

    return ret;
}

From source file:com.impetus.ankush2.hadoop.monitor.JobStatusProvider.java

License:Open Source License

/**
 * Gets the job metrics.//from w  w w .java  2s.com
 * 
 * @return List
 */
public Map<String, Object> getJobMetrics() throws AnkushException {
    String errMsg = "Unable to getch Hadoop Metrics, could not connect to Hadoop JobClient.";
    try {
        // Checking for !null
        if (jobClient != null) {
            // Creating an empty map for storing Hadoop Job Metrics information
            LinkedHashMap<String, Object> hadoopJobMetrics = new LinkedHashMap<String, Object>();
            try {
                // Checking for null jobClient
                if (jobClient != null) {
                    LOG.info("Fetching Hadoop Metrics Information.." + jobClient);
                    // Get status information about the Map-Reduce cluster.
                    ClusterStatus clusterStatus = jobClient.getClusterStatus();
                    // Get the current state of the JobTracker,
                    State jobTrackerState = clusterStatus.getJobTrackerState();
                    // Get the number of currently running map tasks in the cluster.
                    int mapTasks = clusterStatus.getMapTasks();
                    // Get the maximum capacity for running map tasks in the
                    // cluster.
                    int maxMapTasks = clusterStatus.getMaxMapTasks();
                    // Get the maximum capacity for running reduce tasks in the
                    // cluster.
                    int maxReduceTasks = clusterStatus.getMaxReduceTasks();
                    // Get the number of currently running reduce tasks in the
                    // cluster.
                    int reduceTasks = clusterStatus.getReduceTasks();
                    // Get the number of active task trackers in the cluster.
                    int taskTrackers = clusterStatus.getTaskTrackers();
                    // Get the number of blacklisted task trackers in the cluster.
                    int blackListedTrackers = clusterStatus.getBlacklistedTrackers();

                    long ttExpiryInterval = clusterStatus.getTTExpiryInterval();

                    int defaultMaps = 0;
                    int defaultReduces = 0;
                    try {
                        defaultMaps = jobClient.getDefaultMaps();
                        defaultReduces = jobClient.getDefaultReduces();
                    } catch (Exception e) {

                        //e.printStackTrace();
                    }

                    // Putting Hadoop Metrics information in a map
                    hadoopJobMetrics.put("jobTrackerState", String.valueOf(jobTrackerState));
                    hadoopJobMetrics.put("defaultMaps", String.valueOf(defaultMaps));
                    hadoopJobMetrics.put("defaultReduces", String.valueOf(defaultReduces));
                    hadoopJobMetrics.put("mapTasks", String.valueOf(mapTasks));
                    hadoopJobMetrics.put("reduceTasks", String.valueOf(reduceTasks));
                    hadoopJobMetrics.put("maxMapTasksCapacity", String.valueOf(maxMapTasks));
                    hadoopJobMetrics.put("maxReduceTasksCapacity", String.valueOf(maxReduceTasks));
                    hadoopJobMetrics.put("taskTrackers", String.valueOf(taskTrackers));
                    hadoopJobMetrics.put("blackListedTrackers", String.valueOf(blackListedTrackers));

                    hadoopJobMetrics.put("taskTrackerExpiryInterval", String.valueOf(ttExpiryInterval));

                    hadoopJobMetrics.put("schedulerType", getSchedulerType());

                    int totalJobSubmission = 0;
                    // Get the jobs that are submitted.
                    JobStatus[] jobStatus = jobClient.getAllJobs();
                    if (jobStatus != null) {
                        totalJobSubmission = jobClient.getAllJobs().length;
                    }

                    List<Map<String, Object>> allJobsList = listAllJobs();
                    int totalJobRunning = getRunningJobList(allJobsList).size();
                    int completedJobs = getCompletedJobs(allJobsList).size();

                    hadoopJobMetrics.put("totalJobSubmission", String.valueOf(totalJobSubmission));
                    hadoopJobMetrics.put("totalJobRunning", String.valueOf(totalJobRunning));
                    hadoopJobMetrics.put("totalJobsCompleted", String.valueOf(completedJobs));
                } else {
                    HadoopUtils.addAndLogError(this.LOG, this.clusterConfig, errMsg,
                            Constant.Component.Name.HADOOP);
                    throw new AnkushException(errMsg);
                }
            } catch (AnkushException e) {
                throw e;
            } catch (Exception e) {
                HadoopUtils.addAndLogError(this.LOG, this.clusterConfig, errMsg, Constant.Component.Name.HADOOP,
                        e);
                throw new AnkushException(errMsg);
            }
            return hadoopJobMetrics;
        } else {
            throw new AnkushException(errMsg);
        }
    } catch (AnkushException e) {
        throw e;
    } catch (Exception e) {
        HadoopUtils.addAndLogError(this.LOG, this.clusterConfig, errMsg, Constant.Component.Name.HADOOP, e);
        throw new AnkushException(errMsg);
    }
}

From source file:hibench.DataOptions.java

License:Apache License

public static int getMaxNumReduce() throws IOException {
    JobConf job = new JobConf(WebDataGen.class);
    JobClient client = new JobClient(job);
    ClusterStatus cluster = client.getClusterStatus();
    int maxReduce = cluster.getMaxReduceTasks();
    int runnings = cluster.getReduceTasks();
    return maxReduce - runnings;
}

From source file:org.apache.accumulo.server.test.continuous.ContinuousStatsCollector.java

License:Apache License

private static String getMRStats() throws Exception {
    Configuration conf = CachedConfiguration.getInstance();
    @SuppressWarnings("deprecation")
    JobClient jc = new JobClient(new org.apache.hadoop.mapred.JobConf(conf));

    ClusterStatus cs = jc.getClusterStatus(false);

    return "" + cs.getMapTasks() + " " + cs.getMaxMapTasks() + " " + cs.getReduceTasks() + " "
            + cs.getMaxReduceTasks() + " " + cs.getTaskTrackers() + " " + cs.getBlacklistedTrackers();

}

From source file:org.apache.accumulo.test.continuous.ContinuousStatsCollector.java

License:Apache License

private static String getMRStats() throws Exception {
    Configuration conf = CachedConfiguration.getInstance();
    // No alternatives for hadoop 20
    JobClient jc = new JobClient(new org.apache.hadoop.mapred.JobConf(conf));

    ClusterStatus cs = jc.getClusterStatus(false);

    return "" + cs.getMapTasks() + " " + cs.getMaxMapTasks() + " " + cs.getReduceTasks() + " "
            + cs.getMaxReduceTasks() + " " + cs.getTaskTrackers() + " " + cs.getBlacklistedTrackers();

}

From source file:org.smartfrog.services.hadoop.components.cluster.ClusterStatusCheckerImpl.java

License:Open Source License

/**
 * Check the cluster status//ww w. j a v  a2 s .c om
 *
 * @throws SFHadoopException on any problem with the checks
 * @return a cluster status string
 */
private String checkClusterStatus() throws SmartFrogException {

    try {
        JobClient cluster = createClientOnDemand();
        ClusterStatus status = cluster.getClusterStatus();
        StringBuilder result = new StringBuilder();

        if (supportedFileSystem) {
            Path sysDir = cluster.getSystemDir();
            URI uri = sysDir.toUri();
            sfLog().info("Checking filesystem " + uri);
            ManagedConfiguration conf = (ManagedConfiguration) cluster.getConf();
            String impl = "fs." + uri.getScheme() + ".impl";
            String classname = conf.get(impl);
            if (classname == null) {
                maybeDumpConfiguration(conf);
                throw new SFHadoopException("File system " + uri + " will not load "
                        + " - no configuration mapping for " + impl + " in " + conf.dump(), this, conf);
            }
            try {
                conf.getClassByName(classname);
            } catch (ClassNotFoundException e) {
                throw new SFHadoopException("File system " + uri + " will not load "
                        + " - unable to locate class " + impl + " : " + e, e, this, conf);
            }
            try {
                result.append("Filesystem: ").append(uri).append(" ; ");
                FileSystem fs = cluster.getFs();
            } catch (IOException e) {
                throw new SFHadoopException("File system " + uri + " will not load " + e, e, this, conf);
            } catch (IllegalArgumentException e) {
                throw new SFHadoopException("Bad File system URI" + e, e, this, conf);
            }
        }
        if (jobtrackerLive) {
            sfLog().info("Checking jobTracker ");
            JobTracker.State state = status.getJobTrackerState();
            if (!state.equals(JobTracker.State.RUNNING)) {
                throw new SFHadoopException(
                        "Job Tracker at " + jobtracker + " is not running. It is in the state " + state, this);
            }
            result.append("Job tracker is in state ").append(status);
        }
        checkRange(minActiveMapTasks, maxActiveMapTasks, status.getMapTasks(), "map task");
        checkRange(minActiveReduceTasks, maxActiveReduceTasks, status.getReduceTasks(), "reduce task");
        checkMax(maxSupportedMapTasks, status.getMaxMapTasks(), "supported max map task");
        checkMax(maxSupportedReduceTasks, status.getMaxReduceTasks(), "supported max reduce task");
        result.append(" Map Tasks = ").append(status.getMapTasks());
        result.append(" Reduce Tasks = ").append(status.getReduceTasks());
        return result.toString();
    } catch (IOException e) {
        throw new SFHadoopException("Cannot connect to" + jobtracker, e, this);
    }
}