Example usage for org.apache.hadoop.mapred JobClient getCleanupTaskReports

List of usage examples for org.apache.hadoop.mapred JobClient getCleanupTaskReports

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred JobClient getCleanupTaskReports.

Prototype

public TaskReport[] getCleanupTaskReports(JobID jobId) throws IOException 

Source Link

Document

Get the information of the current state of the cleanup tasks of a job.

Usage

From source file:cascading.flow.hadoop.HadoopStepStats.java

License:Open Source License

@Override
public void captureDetail() {
    getTaskStats().clear();/*from  www  .  ja va  2 s .  c  o  m*/

    JobClient jobClient = getJobClient();

    try {
        addTaskStats(HadoopTaskStats.TaskType.SETUP, jobClient.getSetupTaskReports(getRunningJob().getID()),
                true);
        addTaskStats(HadoopTaskStats.TaskType.MAPPER, jobClient.getMapTaskReports(getRunningJob().getID()),
                false);
        addTaskStats(HadoopTaskStats.TaskType.REDUCER, jobClient.getReduceTaskReports(getRunningJob().getID()),
                false);
        addTaskStats(HadoopTaskStats.TaskType.CLEANUP, jobClient.getCleanupTaskReports(getRunningJob().getID()),
                true);

        int count = 0;

        while (true) {
            TaskCompletionEvent[] events = getRunningJob().getTaskCompletionEvents(count);

            if (events.length == 0)
                break;

            addTaskStats(events);
            count += 10;
        }
    } catch (IOException exception) {
        LOG.warn("unable to get task stats", exception);
    }
}

From source file:com.netflix.lipstick.pigtolipstick.BasicP2LClient.java

License:Apache License

protected void updatePlanStatusForCompletedJobId(P2jPlanStatus planStatus, String jobId) {
    LOG.info("Updating plan status for completed job " + jobId);
    updatePlanStatusForJobId(planStatus, jobId);
    JobClient jobClient = PigStats.get().getJobClient();
    JobID jobID = JobID.forName(jobId);/*from www  .  java2  s  .co m*/
    long startTime = Long.MAX_VALUE;
    long finishTime = Long.MIN_VALUE;
    /* The JobClient doesn't expose a way to get the Start and Finish time
       of the over all job[1] sadly, so we're pulling out the min task start
       time and max task finish time and using these to approximate.
            
       [1] - Which is really dumb.  The data obviously exists, it gets rendered
       in the job tracker via the JobInProgress but sadly this is internal
       to the remote job tracker so we don't have access to this
       information. */
    try {
        List<TaskReport> reports = Lists.newArrayList();
        reports.addAll(Arrays.asList(jobClient.getMapTaskReports(jobID)));
        reports.addAll(Arrays.asList(jobClient.getReduceTaskReports(jobID)));
        reports.addAll(Arrays.asList(jobClient.getCleanupTaskReports(jobID)));
        reports.addAll(Arrays.asList(jobClient.getSetupTaskReports(jobID)));
        for (TaskReport rpt : reports) {
            /* rpt.getStartTime() sometimes returns zero meaning it does
               not know what time it started so we need to prevent using
               this or we'll lose the actual lowest start time */
            long taskStartTime = rpt.getStartTime();
            if (0 != taskStartTime) {
                startTime = Math.min(startTime, taskStartTime);
            }
            finishTime = Math.max(finishTime, rpt.getFinishTime());
        }
        P2jJobStatus jobStatus = jobIdToJobStatusMap.get(jobId);
        if (startTime < Long.MAX_VALUE) {
            jobStatus.setStartTime(startTime);
        }
        if (finishTime > Long.MIN_VALUE) {
            jobStatus.setFinishTime(finishTime);
        }
        LOG.info("Determined start and finish times for job " + jobId);
    } catch (IOException e) {
        LOG.error("Error getting job info.", e);
    }

}

From source file:datafu.hourglass.jobs.StagedOutputJob.java

License:Apache License

/**
 * Writes Hadoop counters and other task statistics to a file in the file system.
 * /* ww w  . ja v a 2s.c  o m*/
 * @param fs
 * @throws IOException
 */
private void writeCounters(final FileSystem fs) throws IOException {
    final Path actualOutputPath = FileOutputFormat.getOutputPath(this);

    SimpleDateFormat timestampFormat = new SimpleDateFormat("yyyyMMddHHmmss");

    String suffix = timestampFormat.format(new Date());

    if (_countersParentPath != null) {
        if (!fs.exists(_countersParentPath)) {
            _log.info("Creating counter parent path " + _countersParentPath);
            fs.mkdirs(_countersParentPath, FsPermission.valueOf("-rwxrwxr-x"));
        }
        // make the name as unique as possible in this case because this may be a directory
        // where other counter files will be dropped
        _countersPath = new Path(_countersParentPath, ".counters." + suffix);
    } else {
        _countersPath = new Path(actualOutputPath, ".counters." + suffix);
    }

    _log.info(String.format("Writing counters to %s", _countersPath));
    FSDataOutputStream counterStream = fs.create(_countersPath);
    BufferedOutputStream buffer = new BufferedOutputStream(counterStream, 256 * 1024);
    OutputStreamWriter writer = new OutputStreamWriter(buffer);
    for (String groupName : getCounters().getGroupNames()) {
        for (Counter counter : getCounters().getGroup(groupName)) {
            writeAndLog(writer, String.format("%s=%d", counter.getName(), counter.getValue()));
        }
    }

    JobID jobID = this.getJobID();

    org.apache.hadoop.mapred.JobID oldJobId = new org.apache.hadoop.mapred.JobID(jobID.getJtIdentifier(),
            jobID.getId());

    long minStart = Long.MAX_VALUE;
    long maxFinish = 0;
    long setupStart = Long.MAX_VALUE;
    long cleanupFinish = 0;
    DescriptiveStatistics mapStats = new DescriptiveStatistics();
    DescriptiveStatistics reduceStats = new DescriptiveStatistics();
    boolean success = true;

    JobClient jobClient = new JobClient(this.conf);

    Map<String, String> taskIdToType = new HashMap<String, String>();

    TaskReport[] setupReports = jobClient.getSetupTaskReports(oldJobId);
    if (setupReports.length > 0) {
        _log.info("Processing setup reports");
        for (TaskReport report : jobClient.getSetupTaskReports(oldJobId)) {
            taskIdToType.put(report.getTaskID().toString(), "SETUP");
            if (report.getStartTime() == 0) {
                _log.warn("Skipping report with zero start time");
                continue;
            }
            setupStart = Math.min(setupStart, report.getStartTime());
        }
    } else {
        _log.error("No setup reports");
    }

    TaskReport[] mapReports = jobClient.getMapTaskReports(oldJobId);
    if (mapReports.length > 0) {
        _log.info("Processing map reports");
        for (TaskReport report : mapReports) {
            taskIdToType.put(report.getTaskID().toString(), "MAP");
            if (report.getFinishTime() == 0 || report.getStartTime() == 0) {
                _log.warn("Skipping report with zero start or finish time");
                continue;
            }
            minStart = Math.min(minStart, report.getStartTime());
            mapStats.addValue(report.getFinishTime() - report.getStartTime());
        }
    } else {
        _log.error("No map reports");
    }

    TaskReport[] reduceReports = jobClient.getReduceTaskReports(oldJobId);
    if (reduceReports.length > 0) {
        _log.info("Processing reduce reports");
        for (TaskReport report : reduceReports) {
            taskIdToType.put(report.getTaskID().toString(), "REDUCE");
            if (report.getFinishTime() == 0 || report.getStartTime() == 0) {
                _log.warn("Skipping report with zero start or finish time");
                continue;
            }
            maxFinish = Math.max(maxFinish, report.getFinishTime());
            reduceStats.addValue(report.getFinishTime() - report.getStartTime());
        }
    } else {
        _log.error("No reduce reports");
    }

    TaskReport[] cleanupReports = jobClient.getCleanupTaskReports(oldJobId);
    if (cleanupReports.length > 0) {
        _log.info("Processing cleanup reports");
        for (TaskReport report : cleanupReports) {
            taskIdToType.put(report.getTaskID().toString(), "CLEANUP");
            if (report.getFinishTime() == 0) {
                _log.warn("Skipping report with finish time of zero");
                continue;
            }
            cleanupFinish = Math.max(cleanupFinish, report.getFinishTime());
        }
    } else {
        _log.error("No cleanup reports");
    }

    if (minStart == Long.MAX_VALUE) {
        _log.error("Could not determine map-reduce start time");
        success = false;
    }
    if (maxFinish == 0) {
        _log.error("Could not determine map-reduce finish time");
        success = false;
    }

    if (setupStart == Long.MAX_VALUE) {
        _log.error("Could not determine setup start time");
        success = false;
    }
    if (cleanupFinish == 0) {
        _log.error("Could not determine cleanup finish time");
        success = false;
    }

    // Collect statistics on successful/failed/killed task attempts, categorized by setup/map/reduce/cleanup.
    // Unfortunately the job client doesn't have an easier way to get these statistics.
    Map<String, Integer> attemptStats = new HashMap<String, Integer>();
    _log.info("Processing task attempts");
    for (TaskCompletionEvent event : getTaskCompletionEvents(jobClient, oldJobId)) {
        String type = taskIdToType.get(event.getTaskAttemptId().getTaskID().toString());
        String status = event.getTaskStatus().toString();

        String key = String.format("%s_%s_ATTEMPTS", status, type);
        if (!attemptStats.containsKey(key)) {
            attemptStats.put(key, 0);
        }
        attemptStats.put(key, attemptStats.get(key) + 1);
    }

    if (success) {
        writeAndLog(writer, String.format("SETUP_START_TIME_MS=%d", setupStart));
        writeAndLog(writer, String.format("CLEANUP_FINISH_TIME_MS=%d", cleanupFinish));
        writeAndLog(writer, String.format("COMPLETE_WALL_CLOCK_TIME_MS=%d", cleanupFinish - setupStart));

        writeAndLog(writer, String.format("MAP_REDUCE_START_TIME_MS=%d", minStart));
        writeAndLog(writer, String.format("MAP_REDUCE_FINISH_TIME_MS=%d", maxFinish));
        writeAndLog(writer, String.format("MAP_REDUCE_WALL_CLOCK_TIME_MS=%d", maxFinish - minStart));

        writeAndLog(writer, String.format("MAP_TOTAL_TASKS=%d", (long) mapStats.getN()));
        writeAndLog(writer, String.format("MAP_MAX_TIME_MS=%d", (long) mapStats.getMax()));
        writeAndLog(writer, String.format("MAP_MIN_TIME_MS=%d", (long) mapStats.getMin()));
        writeAndLog(writer, String.format("MAP_AVG_TIME_MS=%d", (long) mapStats.getMean()));
        writeAndLog(writer, String.format("MAP_STD_TIME_MS=%d", (long) mapStats.getStandardDeviation()));
        writeAndLog(writer, String.format("MAP_SUM_TIME_MS=%d", (long) mapStats.getSum()));

        writeAndLog(writer, String.format("REDUCE_TOTAL_TASKS=%d", (long) reduceStats.getN()));
        writeAndLog(writer, String.format("REDUCE_MAX_TIME_MS=%d", (long) reduceStats.getMax()));
        writeAndLog(writer, String.format("REDUCE_MIN_TIME_MS=%d", (long) reduceStats.getMin()));
        writeAndLog(writer, String.format("REDUCE_AVG_TIME_MS=%d", (long) reduceStats.getMean()));
        writeAndLog(writer, String.format("REDUCE_STD_TIME_MS=%d", (long) reduceStats.getStandardDeviation()));
        writeAndLog(writer, String.format("REDUCE_SUM_TIME_MS=%d", (long) reduceStats.getSum()));

        writeAndLog(writer, String.format("MAP_REDUCE_SUM_TIME_MS=%d",
                (long) mapStats.getSum() + (long) reduceStats.getSum()));

        for (Map.Entry<String, Integer> attemptStat : attemptStats.entrySet()) {
            writeAndLog(writer, String.format("%s=%d", attemptStat.getKey(), attemptStat.getValue()));
        }
    }

    writer.close();
    buffer.close();
    counterStream.close();
}

From source file:org.estado.core.JobStatusChecker.java

License:Apache License

public void checkStatus() {
    List<org.estado.spi.JobStatus> jobStatusList = new ArrayList<org.estado.spi.JobStatus>();

    try {//w  w  w.j  a  va 2  s. com
        Configuration conf = new Configuration();
        JobClient client = new JobClient(new JobConf(conf));
        JobStatus[] jobStatuses = client.getAllJobs();
        showFilter();

        int jobCount = 0;
        for (JobStatus jobStatus : jobStatuses) {
            Long lastTaskEndTime = 0L;
            TaskReport[] mapReports = client.getMapTaskReports(jobStatus.getJobID());
            for (TaskReport r : mapReports) {
                if (lastTaskEndTime < r.getFinishTime()) {
                    lastTaskEndTime = r.getFinishTime();
                }
            }
            TaskReport[] reduceReports = client.getReduceTaskReports(jobStatus.getJobID());
            for (TaskReport r : reduceReports) {
                if (lastTaskEndTime < r.getFinishTime()) {
                    lastTaskEndTime = r.getFinishTime();
                }
            }
            client.getSetupTaskReports(jobStatus.getJobID());
            client.getCleanupTaskReports(jobStatus.getJobID());

            String jobId = jobStatus.getJobID().toString();
            String jobName = client.getJob(jobStatus.getJobID()).getJobName();
            Long startTime = jobStatus.getStartTime();
            String user = jobStatus.getUsername();
            int mapProgress = (int) (jobStatus.mapProgress() * 100);
            int reduceProgress = (int) (jobStatus.reduceProgress() * 100);
            org.estado.spi.JobStatus jobStat = null;
            ++jobCount;

            int runState = jobStatus.getRunState();
            switch (runState) {
            case JobStatus.SUCCEEDED:
                if (filter.contains("s")) {
                    Long duration = lastTaskEndTime - jobStatus.getStartTime();
                    jobStat = new org.estado.spi.JobStatus(cluster, jobId, jobName, null, user, startTime,
                            lastTaskEndTime, duration, mapProgress, reduceProgress, "completed");
                    ++sCount;
                }
                break;

            case JobStatus.RUNNING:
                if (filter.contains("r")) {
                    long duration = System.currentTimeMillis() - jobStatus.getStartTime();
                    jobStat = new org.estado.spi.JobStatus(cluster, jobId, jobName, null, user, startTime,
                            lastTaskEndTime, duration, mapProgress, reduceProgress, "running");
                    ++rCount;
                }
                break;

            case JobStatus.FAILED:
                if (filter.contains("f")) {
                    long duration = lastTaskEndTime - jobStatus.getStartTime();
                    jobStat = new org.estado.spi.JobStatus(cluster, jobId, jobName, null, user, startTime,
                            lastTaskEndTime, duration, mapProgress, reduceProgress, "failed");
                    RunningJob job = client.getJob(jobStatus.getJobID());
                    jobStat.setJobTasks(getTaskDetails(job));
                    ++fCount;
                }
                break;

            case JobStatus.PREP:
                if (filter.contains("p")) {
                    jobStat = new org.estado.spi.JobStatus(cluster, jobId, jobName, null, user, null, null,
                            null, 0, 0, "preparing");
                    ++pCount;
                }
                break;

            case JobStatus.KILLED:
                if (filter.contains("k")) {
                    long duration = lastTaskEndTime - jobStatus.getStartTime();

                    jobStat = new org.estado.spi.JobStatus(cluster, jobId, jobName, null, user, startTime,
                            lastTaskEndTime, duration, mapProgress, reduceProgress, "killed");

                    RunningJob job = client.getJob(jobStatus.getJobID());
                    jobStat.setJobTasks(getTaskDetails(job));
                    ++kCount;
                }
                break;
            }

            jobStatusList.add(jobStat);
        }

        //get counters
        for (org.estado.spi.JobStatus jobStat : jobStatusList) {
            if (!jobStat.getStatus().equals("preparing")) {
                List<JobCounterGroup> counterGroups = getJobCounters(jobStat.getJobId());
                jobStat.setCounterGroups(counterGroups);

                //additional data from counters
                setJobInfo(jobStat);
            }
        }

        //publish to all consumers
        for (JobStatusConsumer consumer : consumers) {
            consumer.handle(jobStatusList);
        }

        showJobCounts();
    } catch (Exception ex) {
        System.out.println("Jobs status checker failed" + ex.getMessage());
    }

}