Example usage for org.apache.hadoop.mapred TaskCompletionEvent isMapTask

List of usage examples for org.apache.hadoop.mapred TaskCompletionEvent isMapTask

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred TaskCompletionEvent isMapTask.

Prototype

public boolean isMapTask() 

Source Link

Usage

From source file:edu.stolaf.cs.wmrserver.HadoopEngine.java

License:Apache License

private Pair<ArrayList<TaskLog>, ArrayList<TaskLog>> getLogsFromCompletionEvents(TaskCompletionEvent[] events) {
    ArrayList<TaskLog> mapFailures = new ArrayList<TaskLog>();
    ArrayList<TaskLog> reduceFailures = new ArrayList<TaskLog>();
    for (TaskCompletionEvent event : events) {
        if (event.getTaskStatus() != TaskCompletionEvent.Status.SUCCEEDED) {
            TaskLog log = new TaskLog(event.getTaskTrackerHttp(), event.getTaskAttemptId());
            if (event.isMapTask())
                mapFailures.add(log);/*from   www . j a v  a2 s  .c o m*/
            else
                reduceFailures.add(log);
        }
    }

    return new Pair<ArrayList<TaskLog>, ArrayList<TaskLog>>(mapFailures, reduceFailures);
}

From source file:org.estado.core.JobStatusChecker.java

License:Apache License

private List<TaskStatus> getTaskDetails(RunningJob job) {
    TaskCompletionEvent[] tasks = new TaskCompletionEvent[0];
    List<TaskStatus> taskStatusList = new ArrayList<TaskStatus>();
    try {//from   ww w .  ja  va2s  .  c o m
        tasks = job.getTaskCompletionEvents(0);

        for (TaskCompletionEvent task : tasks) {
            TaskStatus taskStatus = new TaskStatus();
            taskStatus.setTaskId(task.getTaskAttemptId().toString());
            taskStatus.setStatus(task.getTaskStatus().toString());
            taskStatus.setDuration(task.getTaskRunTime() * 1L); //change to long
            taskStatus.setTaskType(task.isMapTask() ? "Map" : "Reduce");
            if (!task.getTaskStatus().equals(TaskCompletionEvent.Status.SUCCEEDED)) {
                String url = task.getTaskTrackerHttp() + "/tasklog?attemptid=" + task.getTaskAttemptId()
                        + "&all=true";
                URLConnection connection = new URL(url).openConnection();
                connection.setDoOutput(true);
                connection.connect();
                Scanner s = new java.util.Scanner(connection.getInputStream()).useDelimiter("\\A");
                String log = s.hasNext() ? s.next() : "";
                taskStatus.setLog(log);
            }
            taskStatusList.add(taskStatus);
        }
    } catch (IOException e) {
        e.printStackTrace();
    }

    return taskStatusList;
}

From source file:org.godhuli.rhipe.FileUtils.java

License:Apache License

public REXP getstatus(String jd, boolean geterrors) throws Exception {
    org.apache.hadoop.mapred.JobID jj = org.apache.hadoop.mapred.JobID.forName(jd);
    if (jj == null)
        throw new IOException("Jobtracker could not find jobID: " + jd);
    org.apache.hadoop.mapred.RunningJob rj = jclient.getJob(jj);
    if (rj == null)
        throw new IOException(
                "No such job: " + jd + " available, wrong job? or try the History Viewer (see the Web UI) ");
    String jobfile = rj.getJobFile();
    String jobname = rj.getJobName();
    // cfg.addResource(new Path(jobfile));
    org.apache.hadoop.mapred.Counters cc = rj.getCounters();
    long startsec = getStart(jclient, jj);
    double dura = ((double) System.currentTimeMillis() - startsec) / 1000;
    REXP ro = FileUtils.buildlistFromOldCounter(cc, dura);
    int jobs = rj.getJobState();
    String jobss = null;/*from w  w w. j ava 2s.  c  o m*/
    if (jobs == JobStatus.FAILED)
        jobss = "FAILED";
    else if (jobs == JobStatus.KILLED)
        jobss = "KILLED";
    else if (jobs == JobStatus.PREP)
        jobss = "PREP";
    else if (jobs == JobStatus.RUNNING)
        jobss = "RUNNING";
    else if (jobs == JobStatus.SUCCEEDED)
        jobss = "SUCCEEDED";
    float mapprog = rj.mapProgress(), reduprog = rj.reduceProgress();

    org.apache.hadoop.mapred.TaskReport[] maptr = jclient.getMapTaskReports(jj);
    org.apache.hadoop.mapred.TaskReport[] redtr = jclient.getReduceTaskReports(jj);

    int totalmaps = maptr.length, totalreds = redtr.length;
    int mappending = 0, redpending = 0, maprunning = 0, redrunning = 0, redfailed = 0, redkilled = 0,
            mapkilled = 0, mapfailed = 0, mapcomp = 0, redcomp = 0;
    for (int i = 0; i < maptr.length; i++) {
        TIPStatus t = maptr[i].getCurrentStatus();
        switch (t) {
        case COMPLETE:
            mapcomp++;
            break;
        case FAILED:
            mapfailed++;
            break;
        case PENDING:
            mappending++;
            break;
        case RUNNING:
            maprunning++;
            break;
        case KILLED:
            mapkilled++;
            break;
        }
    }
    for (int i = 0; i < redtr.length; i++) {
        TIPStatus t = redtr[i].getCurrentStatus();
        switch (t) {
        case COMPLETE:
            redcomp++;
            break;
        case FAILED:
            redfailed++;
            break;
        case PENDING:
            redpending++;
            break;
        case RUNNING:
            redrunning++;
            break;
        case KILLED:
            redkilled++;
            break;
        }
    }
    int reduceafails = 0, reduceakilled = 0, mapafails = 0, mapakilled = 0;
    int startfrom = 0;

    REXP.Builder errcontainer = REXP.newBuilder();
    errcontainer.setRclass(REXP.RClass.STRING);
    while (true) {
        org.apache.hadoop.mapred.TaskCompletionEvent[] events = rj.getTaskCompletionEvents(startfrom);
        for (int i = 0; i < events.length; i++) {
            org.apache.hadoop.mapred.TaskCompletionEvent e = events[i];
            int f = 0, k = 0;
            switch (e.getTaskStatus()) {
            case KILLED:
                if (e.isMapTask()) {
                    mapakilled++;
                } else {
                    reduceakilled++;
                }
                break;
            case TIPFAILED:
            case FAILED:
                if (e.isMapTask()) {
                    mapafails++;
                } else {
                    reduceafails++;
                }
                if (geterrors) {
                    REXPProtos.STRING.Builder content = REXPProtos.STRING.newBuilder();
                    String[] s = rj.getTaskDiagnostics(e.getTaskAttemptId());
                    if (s != null && s.length > 0) {
                        content.setStrval(s[0]);
                        errcontainer.addStringValue(content.build());
                    }
                }
                break;
            }
        }
        startfrom += events.length;
        if (events.length == 0)
            break;
    }

    REXP.Builder thevals = REXP.newBuilder();
    thevals.setRclass(REXP.RClass.LIST);
    thevals.addRexpValue(RObjects.makeStringVector(new String[] { jobss }));
    thevals.addRexpValue(RObjects.buildDoubleVector(new double[] { dura }));
    thevals.addRexpValue(RObjects.buildDoubleVector(new double[] { (double) mapprog, (double) reduprog }));
    thevals.addRexpValue(RObjects.buildIntVector(
            new int[] { totalmaps, mappending, maprunning, mapcomp, mapkilled, mapafails, mapakilled }));
    thevals.addRexpValue(RObjects.buildIntVector(
            new int[] { totalreds, redpending, redrunning, redcomp, redkilled, reduceafails, reduceakilled }));
    thevals.addRexpValue(ro);
    thevals.addRexpValue(errcontainer);
    thevals.addRexpValue(RObjects.makeStringVector(rj.getTrackingURL()));
    thevals.addRexpValue(RObjects.makeStringVector(new String[] { jobname }));
    thevals.addRexpValue(RObjects.makeStringVector(new String[] { jobfile }));
    return (thevals.build());
}

From source file:org.smartfrog.services.hadoop.mapreduce.submitter.SubmitterImpl.java

License:Open Source License

/**
 * Handl the end of the job/*from w  w w .  ja va 2 s  . c o  m*/
 *
 * @throws IOException on any failure
 */
private void processEndOfJob() throws IOException {
    boolean succeeded = runningJob.isSuccessful();
    int taskCount = 0;
    int failures = 0;
    String message = "Job " + runningJob.getJobName() + " ID=" + runningJob.getID().toString() + " has "
            + (succeeded ? " succeeded" : "failed");
    StringBuilder builder = new StringBuilder();

    TaskCompletionEvent[] history = runningJob.getTaskCompletionEvents(0);
    for (TaskCompletionEvent event : history) {
        taskCount++;
        builder.append(event.isMapTask() ? "\nMap: " : "\nReduce: ");
        builder.append(event.toString());
        if (event.getTaskStatus() != TaskCompletionEvent.Status.SUCCEEDED) {
            failures++;
            String[] diagnostics = runningJob.getTaskDiagnostics(event.getTaskAttemptId());
            for (String line : diagnostics) {
                builder.append("\n ");
                builder.append(line);
            }
        }
        builder.append("\n Tasks run :").append(taskCount).append(" failed: ").append(failures);
        if (!succeeded && dumpOnFailure) {
            builder.append("Job configuration used");
            builder.append(jobConf.dump());
        }
        message = message + builder.toString();

    }
    sfLog().info(message);
    if (terminateWhenJobFinishes) {
        TerminationRecord record = succeeded ? TerminationRecord.normal(message, sfCompleteNameSafe())
                : TerminationRecord.abnormal(message, sfCompleteNameSafe());
        new ComponentHelper(this).targetForTermination(record, false, false);
    }
}