Example usage for org.apache.hadoop.mapred ClusterStatus getBlacklistedTrackers

List of usage examples for org.apache.hadoop.mapred ClusterStatus getBlacklistedTrackers

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred ClusterStatus getBlacklistedTrackers.

Prototype

public int getBlacklistedTrackers() 

Source Link

Document

Get the number of blacklisted task trackers in the cluster.

Usage

From source file:com.impetus.ankush2.hadoop.monitor.JobStatusProvider.java

License:Open Source License

/**
 * Gets the job metrics.//from ww  w . j  a v  a  2s  . c om
 * 
 * @return List
 */
public Map<String, Object> getJobMetrics() throws AnkushException {
    String errMsg = "Unable to getch Hadoop Metrics, could not connect to Hadoop JobClient.";
    try {
        // Checking for !null
        if (jobClient != null) {
            // Creating an empty map for storing Hadoop Job Metrics information
            LinkedHashMap<String, Object> hadoopJobMetrics = new LinkedHashMap<String, Object>();
            try {
                // Checking for null jobClient
                if (jobClient != null) {
                    LOG.info("Fetching Hadoop Metrics Information.." + jobClient);
                    // Get status information about the Map-Reduce cluster.
                    ClusterStatus clusterStatus = jobClient.getClusterStatus();
                    // Get the current state of the JobTracker,
                    State jobTrackerState = clusterStatus.getJobTrackerState();
                    // Get the number of currently running map tasks in the cluster.
                    int mapTasks = clusterStatus.getMapTasks();
                    // Get the maximum capacity for running map tasks in the
                    // cluster.
                    int maxMapTasks = clusterStatus.getMaxMapTasks();
                    // Get the maximum capacity for running reduce tasks in the
                    // cluster.
                    int maxReduceTasks = clusterStatus.getMaxReduceTasks();
                    // Get the number of currently running reduce tasks in the
                    // cluster.
                    int reduceTasks = clusterStatus.getReduceTasks();
                    // Get the number of active task trackers in the cluster.
                    int taskTrackers = clusterStatus.getTaskTrackers();
                    // Get the number of blacklisted task trackers in the cluster.
                    int blackListedTrackers = clusterStatus.getBlacklistedTrackers();

                    long ttExpiryInterval = clusterStatus.getTTExpiryInterval();

                    int defaultMaps = 0;
                    int defaultReduces = 0;
                    try {
                        defaultMaps = jobClient.getDefaultMaps();
                        defaultReduces = jobClient.getDefaultReduces();
                    } catch (Exception e) {

                        //e.printStackTrace();
                    }

                    // Putting Hadoop Metrics information in a map
                    hadoopJobMetrics.put("jobTrackerState", String.valueOf(jobTrackerState));
                    hadoopJobMetrics.put("defaultMaps", String.valueOf(defaultMaps));
                    hadoopJobMetrics.put("defaultReduces", String.valueOf(defaultReduces));
                    hadoopJobMetrics.put("mapTasks", String.valueOf(mapTasks));
                    hadoopJobMetrics.put("reduceTasks", String.valueOf(reduceTasks));
                    hadoopJobMetrics.put("maxMapTasksCapacity", String.valueOf(maxMapTasks));
                    hadoopJobMetrics.put("maxReduceTasksCapacity", String.valueOf(maxReduceTasks));
                    hadoopJobMetrics.put("taskTrackers", String.valueOf(taskTrackers));
                    hadoopJobMetrics.put("blackListedTrackers", String.valueOf(blackListedTrackers));

                    hadoopJobMetrics.put("taskTrackerExpiryInterval", String.valueOf(ttExpiryInterval));

                    hadoopJobMetrics.put("schedulerType", getSchedulerType());

                    int totalJobSubmission = 0;
                    // Get the jobs that are submitted.
                    JobStatus[] jobStatus = jobClient.getAllJobs();
                    if (jobStatus != null) {
                        totalJobSubmission = jobClient.getAllJobs().length;
                    }

                    List<Map<String, Object>> allJobsList = listAllJobs();
                    int totalJobRunning = getRunningJobList(allJobsList).size();
                    int completedJobs = getCompletedJobs(allJobsList).size();

                    hadoopJobMetrics.put("totalJobSubmission", String.valueOf(totalJobSubmission));
                    hadoopJobMetrics.put("totalJobRunning", String.valueOf(totalJobRunning));
                    hadoopJobMetrics.put("totalJobsCompleted", String.valueOf(completedJobs));
                } else {
                    HadoopUtils.addAndLogError(this.LOG, this.clusterConfig, errMsg,
                            Constant.Component.Name.HADOOP);
                    throw new AnkushException(errMsg);
                }
            } catch (AnkushException e) {
                throw e;
            } catch (Exception e) {
                HadoopUtils.addAndLogError(this.LOG, this.clusterConfig, errMsg, Constant.Component.Name.HADOOP,
                        e);
                throw new AnkushException(errMsg);
            }
            return hadoopJobMetrics;
        } else {
            throw new AnkushException(errMsg);
        }
    } catch (AnkushException e) {
        throw e;
    } catch (Exception e) {
        HadoopUtils.addAndLogError(this.LOG, this.clusterConfig, errMsg, Constant.Component.Name.HADOOP, e);
        throw new AnkushException(errMsg);
    }
}

From source file:eu.scape_project.tb.hadoopjobtracker.HadoobJobTrackerClient.java

License:Apache License

public HDClusterStatus ClusterStatus() {
    HDClusterStatus clusterStatus = new HDClusterStatus();
    try {//from   w w  w .  j a va2  s . co m
        ClusterStatus currentClusterStatus = myJobClient.getClusterStatus(true);

        String jobTrackerStatus = currentClusterStatus.getJobTrackerStatus().toString();
        int availableTaskTrackers = currentClusterStatus.getTaskTrackers();
        int blackTaskTrackers = currentClusterStatus.getBlacklistedTrackers();
        int maxMapTasks = currentClusterStatus.getMaxMapTasks();
        int maxReduceTasks = currentClusterStatus.getMaxReduceTasks();

        logger.info("JobTrackerState: " + jobTrackerStatus);
        logger.info("Active TaskTrackers: " + availableTaskTrackers);
        logger.info("BlackListed TaskTrackers: " + blackTaskTrackers);
        logger.info("Max MAP Tasks: " + maxMapTasks);
        logger.info("Max REDUCE Tasks: " + maxReduceTasks);

        clusterStatus.setJobTrackerState(jobTrackerStatus);
        clusterStatus.setTaskTrackersAvailable(availableTaskTrackers);
        clusterStatus.setTaskTrackersBlacklisted(blackTaskTrackers);
        clusterStatus.setMaxMapTaskSlots(maxMapTasks);
        clusterStatus.setMaxReduceTaskSlots(maxReduceTasks);

    } catch (IOException ex) {
        logger.error("Error filling HDClusterStatus. ERR: " + ex.getMessage());
    }

    return clusterStatus;
}

From source file:org.apache.accumulo.server.test.continuous.ContinuousStatsCollector.java

License:Apache License

private static String getMRStats() throws Exception {
    Configuration conf = CachedConfiguration.getInstance();
    @SuppressWarnings("deprecation")
    JobClient jc = new JobClient(new org.apache.hadoop.mapred.JobConf(conf));

    ClusterStatus cs = jc.getClusterStatus(false);

    return "" + cs.getMapTasks() + " " + cs.getMaxMapTasks() + " " + cs.getReduceTasks() + " "
            + cs.getMaxReduceTasks() + " " + cs.getTaskTrackers() + " " + cs.getBlacklistedTrackers();

}

From source file:org.apache.accumulo.test.continuous.ContinuousStatsCollector.java

License:Apache License

private static String getMRStats() throws Exception {
    Configuration conf = CachedConfiguration.getInstance();
    // No alternatives for hadoop 20
    JobClient jc = new JobClient(new org.apache.hadoop.mapred.JobConf(conf));

    ClusterStatus cs = jc.getClusterStatus(false);

    return "" + cs.getMapTasks() + " " + cs.getMaxMapTasks() + " " + cs.getReduceTasks() + " "
            + cs.getMaxReduceTasks() + " " + cs.getTaskTrackers() + " " + cs.getBlacklistedTrackers();

}