Example usage for org.apache.hadoop.mapreduce TaskCounter MAP_OUTPUT_BYTES

List of usage examples for org.apache.hadoop.mapreduce TaskCounter MAP_OUTPUT_BYTES

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce TaskCounter MAP_OUTPUT_BYTES.

Prototype

TaskCounter MAP_OUTPUT_BYTES

To view the source code for org.apache.hadoop.mapreduce TaskCounter MAP_OUTPUT_BYTES.

Click Source Link

Usage

From source file:co.cask.cdap.app.mapreduce.LocalMRJobInfoFetcher.java

License:Apache License

/**
 * @param runId for which information will be returned.
 * @return a {@link MRJobInfo} containing information about a particular MapReduce program run.
 */// w ww .j a  v  a 2  s. c o m
public MRJobInfo getMRJobInfo(Id.Run runId) {
    Preconditions.checkArgument(ProgramType.MAPREDUCE.equals(runId.getProgram().getType()));

    // baseTags has tag keys: ns.app.mr.runid
    Map<String, String> baseTags = Maps.newHashMap();
    baseTags.put(Constants.Metrics.Tag.NAMESPACE, runId.getNamespace().getId());
    baseTags.put(Constants.Metrics.Tag.APP, runId.getProgram().getApplicationId());
    baseTags.put(Constants.Metrics.Tag.MAPREDUCE, runId.getProgram().getId());
    baseTags.put(Constants.Metrics.Tag.RUN_ID, runId.getId());

    Map<String, String> mapTags = Maps.newHashMap(baseTags);
    mapTags.put(Constants.Metrics.Tag.MR_TASK_TYPE, MapReduceMetrics.TaskType.Mapper.getId());

    Map<String, String> reduceTags = Maps.newHashMap(baseTags);
    reduceTags.put(Constants.Metrics.Tag.MR_TASK_TYPE, MapReduceMetrics.TaskType.Reducer.getId());

    // map from RunId -> (CounterName -> CounterValue)
    Table<String, String, Long> mapTaskMetrics = HashBasedTable.create();
    Table<String, String, Long> reduceTaskMetrics = HashBasedTable.create();

    // Populate mapTaskMetrics and reduce Task Metrics via MetricStore. Used to construct MRTaskInfo below.
    Map<String, String> metricNamesToCounters = Maps.newHashMap();
    metricNamesToCounters.put(prependSystem(MapReduceMetrics.METRIC_TASK_INPUT_RECORDS),
            TaskCounter.MAP_INPUT_RECORDS.name());
    metricNamesToCounters.put(prependSystem(MapReduceMetrics.METRIC_TASK_OUTPUT_RECORDS),
            TaskCounter.MAP_OUTPUT_RECORDS.name());
    metricNamesToCounters.put(prependSystem(MapReduceMetrics.METRIC_TASK_BYTES),
            TaskCounter.MAP_OUTPUT_BYTES.name());
    metricNamesToCounters.put(prependSystem(MapReduceMetrics.METRIC_TASK_COMPLETION),
            MapReduceMetrics.METRIC_TASK_COMPLETION);

    // get metrics grouped by instance-id for the map tasks
    queryGroupedAggregates(mapTags, mapTaskMetrics, metricNamesToCounters);

    Map<String, Long> mapProgress = Maps.newHashMap();
    if (mapTaskMetrics.columnMap().containsKey(MapReduceMetrics.METRIC_TASK_COMPLETION)) {
        mapProgress = Maps
                .newHashMap(mapTaskMetrics.columnMap().remove(MapReduceMetrics.METRIC_TASK_COMPLETION));
    }

    Map<String, String> reduceMetricsToCounters = Maps.newHashMap();
    reduceMetricsToCounters.put(prependSystem(MapReduceMetrics.METRIC_TASK_INPUT_RECORDS),
            TaskCounter.REDUCE_INPUT_RECORDS.name());
    reduceMetricsToCounters.put(prependSystem(MapReduceMetrics.METRIC_TASK_OUTPUT_RECORDS),
            TaskCounter.REDUCE_OUTPUT_RECORDS.name());
    reduceMetricsToCounters.put(prependSystem(MapReduceMetrics.METRIC_TASK_COMPLETION),
            MapReduceMetrics.METRIC_TASK_COMPLETION);

    // get metrics grouped by instance-id for the map tasks
    queryGroupedAggregates(reduceTags, reduceTaskMetrics, reduceMetricsToCounters);

    Map<String, Long> reduceProgress = Maps.newHashMap();
    if (reduceTaskMetrics.columnMap().containsKey(MapReduceMetrics.METRIC_TASK_COMPLETION)) {
        reduceProgress = Maps
                .newHashMap(reduceTaskMetrics.columnMap().remove(MapReduceMetrics.METRIC_TASK_COMPLETION));
    }

    // Construct MRTaskInfos from the information we can get from Metric system.
    List<MRTaskInfo> mapTaskInfos = Lists.newArrayList();
    for (Map.Entry<String, Map<String, Long>> taskEntry : mapTaskMetrics.rowMap().entrySet()) {
        String mapTaskId = taskEntry.getKey();
        mapTaskInfos.add(new MRTaskInfo(mapTaskId, null, null, null, mapProgress.get(mapTaskId) / 100.0F,
                taskEntry.getValue()));
    }

    List<MRTaskInfo> reduceTaskInfos = Lists.newArrayList();
    for (Map.Entry<String, Map<String, Long>> taskEntry : reduceTaskMetrics.rowMap().entrySet()) {
        String reduceTaskId = taskEntry.getKey();
        reduceTaskInfos.add(new MRTaskInfo(reduceTaskId, null, null, null,
                reduceProgress.get(reduceTaskId) / 100.0F, taskEntry.getValue()));
    }

    return getJobCounters(mapTags, reduceTags, mapTaskInfos, reduceTaskInfos);
}

From source file:co.cask.cdap.app.mapreduce.LocalMRJobInfoFetcher.java

License:Apache License

private MRJobInfo getJobCounters(Map<String, String> mapTags, Map<String, String> reduceTags,
        List<MRTaskInfo> mapTaskInfos, List<MRTaskInfo> reduceTaskInfos) {
    HashMap<String, Long> metrics = Maps.newHashMap();

    Map<String, String> mapMetricsToCounters = ImmutableMap.of(
            prependSystem(MapReduceMetrics.METRIC_INPUT_RECORDS), TaskCounter.MAP_INPUT_RECORDS.name(),
            prependSystem(MapReduceMetrics.METRIC_OUTPUT_RECORDS), TaskCounter.MAP_OUTPUT_RECORDS.name(),
            prependSystem(MapReduceMetrics.METRIC_BYTES), TaskCounter.MAP_OUTPUT_BYTES.name(),
            prependSystem(MapReduceMetrics.METRIC_COMPLETION), MapReduceMetrics.METRIC_COMPLETION);

    getAggregates(mapTags, mapMetricsToCounters, metrics);
    float mapProgress = metrics.remove(MapReduceMetrics.METRIC_COMPLETION) / 100.0F;

    Map<String, String> reduceMetricsToCounters = ImmutableMap.of(
            prependSystem(MapReduceMetrics.METRIC_INPUT_RECORDS), TaskCounter.REDUCE_INPUT_RECORDS.name(),
            prependSystem(MapReduceMetrics.METRIC_OUTPUT_RECORDS), TaskCounter.REDUCE_OUTPUT_RECORDS.name(),
            prependSystem(MapReduceMetrics.METRIC_COMPLETION), MapReduceMetrics.METRIC_COMPLETION);

    getAggregates(reduceTags, reduceMetricsToCounters, metrics);
    float reduceProgress = metrics.remove(MapReduceMetrics.METRIC_COMPLETION) / 100.0F;
    return new MRJobInfo(mapProgress, reduceProgress, metrics, mapTaskInfos, reduceTaskInfos, false);
}

From source file:co.cask.cdap.internal.app.runtime.batch.MapReduceMetricsWriter.java

License:Apache License

private void reportMapredStats(Counters jobCounters) throws IOException, InterruptedException {
    JobStatus jobStatus = jobConf.getStatus();
    // map stats//from   w  w w .ja  va  2s  .co  m
    float mapProgress = jobStatus.getMapProgress();
    int runningMappers = 0;
    int runningReducers = 0;
    for (TaskReport tr : jobConf.getTaskReports(TaskType.MAP)) {
        reportMapTaskMetrics(tr);
        runningMappers += tr.getRunningTaskAttemptIds().size();
    }
    for (TaskReport tr : jobConf.getTaskReports(TaskType.REDUCE)) {
        reportReduceTaskMetrics(tr);
        runningReducers += tr.getRunningTaskAttemptIds().size();
    }
    int memoryPerMapper = jobConf.getConfiguration().getInt(Job.MAP_MEMORY_MB, Job.DEFAULT_MAP_MEMORY_MB);
    int memoryPerReducer = jobConf.getConfiguration().getInt(Job.REDUCE_MEMORY_MB,
            Job.DEFAULT_REDUCE_MEMORY_MB);

    long mapInputRecords = getTaskCounter(jobCounters, TaskCounter.MAP_INPUT_RECORDS);
    long mapOutputRecords = getTaskCounter(jobCounters, TaskCounter.MAP_OUTPUT_RECORDS);
    long mapOutputBytes = getTaskCounter(jobCounters, TaskCounter.MAP_OUTPUT_BYTES);

    mapperMetrics.gauge(MapReduceMetrics.METRIC_COMPLETION, (long) (mapProgress * 100));
    mapperMetrics.gauge(MapReduceMetrics.METRIC_INPUT_RECORDS, mapInputRecords);
    mapperMetrics.gauge(MapReduceMetrics.METRIC_OUTPUT_RECORDS, mapOutputRecords);
    mapperMetrics.gauge(MapReduceMetrics.METRIC_BYTES, mapOutputBytes);
    mapperMetrics.gauge(MapReduceMetrics.METRIC_USED_CONTAINERS, runningMappers);
    mapperMetrics.gauge(MapReduceMetrics.METRIC_USED_MEMORY, runningMappers * memoryPerMapper);

    LOG.trace("Reporting mapper stats: (completion, containers, memory) = ({}, {}, {})",
            (int) (mapProgress * 100), runningMappers, runningMappers * memoryPerMapper);

    // reduce stats
    float reduceProgress = jobStatus.getReduceProgress();
    long reduceInputRecords = getTaskCounter(jobCounters, TaskCounter.REDUCE_INPUT_RECORDS);
    long reduceOutputRecords = getTaskCounter(jobCounters, TaskCounter.REDUCE_OUTPUT_RECORDS);

    reducerMetrics.gauge(MapReduceMetrics.METRIC_COMPLETION, (long) (reduceProgress * 100));
    reducerMetrics.gauge(MapReduceMetrics.METRIC_INPUT_RECORDS, reduceInputRecords);
    reducerMetrics.gauge(MapReduceMetrics.METRIC_OUTPUT_RECORDS, reduceOutputRecords);
    reducerMetrics.gauge(MapReduceMetrics.METRIC_USED_CONTAINERS, runningReducers);
    reducerMetrics.gauge(MapReduceMetrics.METRIC_USED_MEMORY, runningReducers * memoryPerReducer);

    LOG.trace("Reporting reducer stats: (completion, containers, memory) = ({}, {}, {})",
            (int) (reduceProgress * 100), runningReducers, runningReducers * memoryPerReducer);
}

From source file:co.cask.cdap.internal.app.runtime.batch.MapReduceMetricsWriter.java

License:Apache License

private void reportMapTaskMetrics(TaskReport taskReport) {
    Counters counters = taskReport.getTaskCounters();
    MetricsContext metricsContext = mapTaskMetricsCollectors.getUnchecked(taskReport.getTaskId());
    metricsContext.gauge(MapReduceMetrics.METRIC_TASK_INPUT_RECORDS,
            getTaskCounter(counters, TaskCounter.MAP_INPUT_RECORDS));
    metricsContext.gauge(MapReduceMetrics.METRIC_TASK_OUTPUT_RECORDS,
            getTaskCounter(counters, TaskCounter.MAP_OUTPUT_RECORDS));
    metricsContext.gauge(MapReduceMetrics.METRIC_TASK_BYTES,
            getTaskCounter(counters, TaskCounter.MAP_OUTPUT_BYTES));
    metricsContext.gauge(MapReduceMetrics.METRIC_TASK_COMPLETION, (long) (taskReport.getProgress() * 100));
}