Example usage for org.apache.hadoop.mapred TaskCompletionEvent toString

List of usage examples for org.apache.hadoop.mapred TaskCompletionEvent toString

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred TaskCompletionEvent toString.

Prototype

@Override
    public String toString() 

Source Link

Usage

From source file:com.ibm.jaql.lang.expr.hadoop.Util.java

License:Apache License

public static void logAllTaskSyslogs(RunningJob rj, boolean onlySuccessful) throws Exception {
    String fetch = System.getProperty(FETCH_SYSLOG_PROP, "false");
    if (fetch.equals("false"))
        return;/*w  w  w. ja v a2  s. c o  m*/
    TaskCompletionEvent[] events = rj.getTaskCompletionEvents(0);
    for (TaskCompletionEvent event : events) {
        if (onlySuccessful && (event.getTaskStatus() == TaskCompletionEvent.Status.SUCCEEDED)) {
            // print the syslog into the main log
            STATUS_LOG.info(event.toString());
            logTaskSyslogs(event.getTaskAttemptId(), event.getTaskTrackerHttp());
        } else {
            STATUS_LOG.info(event.toString());
            logTaskSyslogs(event.getTaskAttemptId(), event.getTaskTrackerHttp());
        }
    }
}

From source file:io.hops.erasure_coding.MapReduceEncoder.java

License:Apache License

/**
 * Checks if the map-reduce job has completed.
 *
 * @return true if the job completed, false otherwise.
 * @throws java.io.IOException/*from  ww  w .j  a  v a2s  .  c om*/
 */
public boolean checkComplete() throws IOException {
    JobID jobID = runningJob.getID();
    if (runningJob.isComplete()) {
        // delete job directory
        final String jobdir = jobconf.get(JOB_DIR_LABEL);
        if (jobdir != null) {
            final Path jobpath = new Path(jobdir);
            jobpath.getFileSystem(jobconf).delete(jobpath, true);
        }
        if (runningJob.isSuccessful()) {
            LOG.info("Job Complete(Succeeded): " + jobID);
        } else {
            LOG.info("Job Complete(Failed): " + jobID);
        }
        cleanUp();
        return true;
    } else {
        String report = (" job " + jobID + " map " + StringUtils.formatPercent(runningJob.mapProgress(), 0)
                + " reduce " + StringUtils.formatPercent(runningJob.reduceProgress(), 0));
        if (!report.equals(lastReport)) {
            LOG.info(report);
            lastReport = report;
        }
        TaskCompletionEvent[] events = runningJob.getTaskCompletionEvents(jobEventCounter);
        jobEventCounter += events.length;
        for (TaskCompletionEvent event : events) {
            if (event.getTaskStatus() == TaskCompletionEvent.Status.FAILED) {
                LOG.info(" Job " + jobID + " " + event.toString());
            }
        }
        return false;
    }
}

From source file:org.smartfrog.services.hadoop.mapreduce.submitter.SubmitterImpl.java

License:Open Source License

/**
 * Process task completions. The base class just logs it
 *
 * @param event event that has just finished
 *//*from  w ww.  java 2 s  . c  o m*/
protected void processTaskCompletionEvent(TaskCompletionEvent event) {
    sfLog().info(event.toString());
}

From source file:org.smartfrog.services.hadoop.mapreduce.submitter.SubmitterImpl.java

License:Open Source License

/**
 * Handl the end of the job/*from  www  .  j a  va2s . co  m*/
 *
 * @throws IOException on any failure
 */
private void processEndOfJob() throws IOException {
    boolean succeeded = runningJob.isSuccessful();
    int taskCount = 0;
    int failures = 0;
    String message = "Job " + runningJob.getJobName() + " ID=" + runningJob.getID().toString() + " has "
            + (succeeded ? " succeeded" : "failed");
    StringBuilder builder = new StringBuilder();

    TaskCompletionEvent[] history = runningJob.getTaskCompletionEvents(0);
    for (TaskCompletionEvent event : history) {
        taskCount++;
        builder.append(event.isMapTask() ? "\nMap: " : "\nReduce: ");
        builder.append(event.toString());
        if (event.getTaskStatus() != TaskCompletionEvent.Status.SUCCEEDED) {
            failures++;
            String[] diagnostics = runningJob.getTaskDiagnostics(event.getTaskAttemptId());
            for (String line : diagnostics) {
                builder.append("\n ");
                builder.append(line);
            }
        }
        builder.append("\n Tasks run :").append(taskCount).append(" failed: ").append(failures);
        if (!succeeded && dumpOnFailure) {
            builder.append("Job configuration used");
            builder.append(jobConf.dump());
        }
        message = message + builder.toString();

    }
    sfLog().info(message);
    if (terminateWhenJobFinishes) {
        TerminationRecord record = succeeded ? TerminationRecord.normal(message, sfCompleteNameSafe())
                : TerminationRecord.abnormal(message, sfCompleteNameSafe());
        new ComponentHelper(this).targetForTermination(record, false, false);
    }
}

From source file:weka.distributed.hadoop.HadoopJob.java

License:Open Source License

/**
 * Output task messages for the currently running job
 * // www. java2 s. c  o m
 * @param job the job to output messages for
 * @param startIndex the index to start outputting messages from
 * @return the index of the last message output
 * @throws IOException if a problem occurs
 */
protected int logTaskMessages(Job job, int startIndex) throws IOException {
    TaskCompletionEvent[] tcEvents = job.getTaskCompletionEvents(startIndex);

    // StringBuilder taskMessages = new StringBuilder();
    for (TaskCompletionEvent tcEvent : tcEvents) {
        logMessage(tcEvent.toString());
        // taskMessages.append(tcEvent.toString()).append("\n");
    }

    // logMessage(taskMessages.toString());

    return tcEvents.length;
}