Example usage for org.apache.hadoop.mapred ClusterStatus getTTExpiryInterval

List of usage examples for org.apache.hadoop.mapred ClusterStatus getTTExpiryInterval

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred ClusterStatus getTTExpiryInterval.

Prototype

public long getTTExpiryInterval() 

Source Link

Document

Get the tasktracker expiry interval for the cluster

Usage

From source file:com.impetus.ankush2.hadoop.monitor.JobStatusProvider.java

License:Open Source License

/**
 * Gets the job metrics.// w  w  w .  ja v  a2  s  .c  o  m
 * 
 * @return List
 */
public Map<String, Object> getJobMetrics() throws AnkushException {
    String errMsg = "Unable to getch Hadoop Metrics, could not connect to Hadoop JobClient.";
    try {
        // Checking for !null
        if (jobClient != null) {
            // Creating an empty map for storing Hadoop Job Metrics information
            LinkedHashMap<String, Object> hadoopJobMetrics = new LinkedHashMap<String, Object>();
            try {
                // Checking for null jobClient
                if (jobClient != null) {
                    LOG.info("Fetching Hadoop Metrics Information.." + jobClient);
                    // Get status information about the Map-Reduce cluster.
                    ClusterStatus clusterStatus = jobClient.getClusterStatus();
                    // Get the current state of the JobTracker,
                    State jobTrackerState = clusterStatus.getJobTrackerState();
                    // Get the number of currently running map tasks in the cluster.
                    int mapTasks = clusterStatus.getMapTasks();
                    // Get the maximum capacity for running map tasks in the
                    // cluster.
                    int maxMapTasks = clusterStatus.getMaxMapTasks();
                    // Get the maximum capacity for running reduce tasks in the
                    // cluster.
                    int maxReduceTasks = clusterStatus.getMaxReduceTasks();
                    // Get the number of currently running reduce tasks in the
                    // cluster.
                    int reduceTasks = clusterStatus.getReduceTasks();
                    // Get the number of active task trackers in the cluster.
                    int taskTrackers = clusterStatus.getTaskTrackers();
                    // Get the number of blacklisted task trackers in the cluster.
                    int blackListedTrackers = clusterStatus.getBlacklistedTrackers();

                    long ttExpiryInterval = clusterStatus.getTTExpiryInterval();

                    int defaultMaps = 0;
                    int defaultReduces = 0;
                    try {
                        defaultMaps = jobClient.getDefaultMaps();
                        defaultReduces = jobClient.getDefaultReduces();
                    } catch (Exception e) {

                        //e.printStackTrace();
                    }

                    // Putting Hadoop Metrics information in a map
                    hadoopJobMetrics.put("jobTrackerState", String.valueOf(jobTrackerState));
                    hadoopJobMetrics.put("defaultMaps", String.valueOf(defaultMaps));
                    hadoopJobMetrics.put("defaultReduces", String.valueOf(defaultReduces));
                    hadoopJobMetrics.put("mapTasks", String.valueOf(mapTasks));
                    hadoopJobMetrics.put("reduceTasks", String.valueOf(reduceTasks));
                    hadoopJobMetrics.put("maxMapTasksCapacity", String.valueOf(maxMapTasks));
                    hadoopJobMetrics.put("maxReduceTasksCapacity", String.valueOf(maxReduceTasks));
                    hadoopJobMetrics.put("taskTrackers", String.valueOf(taskTrackers));
                    hadoopJobMetrics.put("blackListedTrackers", String.valueOf(blackListedTrackers));

                    hadoopJobMetrics.put("taskTrackerExpiryInterval", String.valueOf(ttExpiryInterval));

                    hadoopJobMetrics.put("schedulerType", getSchedulerType());

                    int totalJobSubmission = 0;
                    // Get the jobs that are submitted.
                    JobStatus[] jobStatus = jobClient.getAllJobs();
                    if (jobStatus != null) {
                        totalJobSubmission = jobClient.getAllJobs().length;
                    }

                    List<Map<String, Object>> allJobsList = listAllJobs();
                    int totalJobRunning = getRunningJobList(allJobsList).size();
                    int completedJobs = getCompletedJobs(allJobsList).size();

                    hadoopJobMetrics.put("totalJobSubmission", String.valueOf(totalJobSubmission));
                    hadoopJobMetrics.put("totalJobRunning", String.valueOf(totalJobRunning));
                    hadoopJobMetrics.put("totalJobsCompleted", String.valueOf(completedJobs));
                } else {
                    HadoopUtils.addAndLogError(this.LOG, this.clusterConfig, errMsg,
                            Constant.Component.Name.HADOOP);
                    throw new AnkushException(errMsg);
                }
            } catch (AnkushException e) {
                throw e;
            } catch (Exception e) {
                HadoopUtils.addAndLogError(this.LOG, this.clusterConfig, errMsg, Constant.Component.Name.HADOOP,
                        e);
                throw new AnkushException(errMsg);
            }
            return hadoopJobMetrics;
        } else {
            throw new AnkushException(errMsg);
        }
    } catch (AnkushException e) {
        throw e;
    } catch (Exception e) {
        HadoopUtils.addAndLogError(this.LOG, this.clusterConfig, errMsg, Constant.Component.Name.HADOOP, e);
        throw new AnkushException(errMsg);
    }
}