Example usage for org.apache.hadoop.mapred.jobcontrol Job SUCCESS

List of usage examples for org.apache.hadoop.mapred.jobcontrol Job SUCCESS

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred.jobcontrol Job SUCCESS.

Prototype

int SUCCESS

To view the source code for org.apache.hadoop.mapred.jobcontrol Job SUCCESS.

Click Source Link

Usage

From source file:com.ebay.erl.mobius.core.MobiusJobRunner.java

License:Apache License

private static String jobToString(Job aJob) {
    StringBuffer sb = new StringBuffer();
    sb.append("job mapred id:\t")
            .append(aJob.getAssignedJobID() == null ? "unassigned" : aJob.getAssignedJobID().toString())
            .append("\t");
    sb.append("job name: ").append(aJob.getJobName()).append("\n");
    String state = "Unset";
    switch (aJob.getState()) {
    case Job.DEPENDENT_FAILED:
        state = "DEPENDENT_FAILED";
        break;/*from  w w  w  . j  a  va  2s.  c o  m*/
    case Job.FAILED:
        state = "FAILED";
        break;
    case Job.READY:
        state = "READY";
        break;
    case Job.RUNNING:
        state = "RUNNING";
        break;
    case Job.SUCCESS:
        state = "SUCCESS";
        break;
    case Job.WAITING:
        state = "WAITING";
        break;
    }

    sb.append("job state:\t").append(state).append("\n");

    sb.append("job id:\t").append(aJob.getJobID()).append("\n");

    sb.append("job message:\t").append(aJob.getMessage()).append("\n");

    //      comment out on March 30, 2012.  As NPE is thrown on Apollo.
    //
    //      if ( aJob.getDependingJobs () == null || aJob.getDependingJobs ().size () == 0 )
    //      {
    //         sb.append ("job has no depending job:\t").append ("\n");
    //      } else
    //      {
    //         sb.append ("job has ").append (aJob.getDependingJobs ().size ()).append (" dependeng jobs:\n");
    //         for ( int i = 0; i < aJob.getDependingJobs ().size (); i++ )
    //         {
    //            sb.append ("\t depending job ").append (i).append (":\t");
    //            sb.append ((aJob.getDependingJobs ().get (i)).getJobName ()).append ("\n");
    //         }
    //      }
    return sb.toString().trim();
}

From source file:org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.Launcher.java

License:Apache License

protected void getStats(Job job, JobClient jobClient, boolean errNotDbg, PigContext pigContext)
        throws Exception {
    JobID MRJobID = job.getAssignedJobID();
    String jobMessage = job.getMessage();
    Exception backendException = null;
    if (MRJobID == null) {
        try {/*from  w  ww .  j ava2s.co  m*/
            LogUtils.writeLog("Backend error message during job submission", jobMessage,
                    pigContext.getProperties().getProperty("pig.logfile"), log);
            backendException = getExceptionFromString(jobMessage);
        } catch (Exception e) {
            int errCode = 2997;
            String msg = "Unable to recreate exception from backend error: " + jobMessage;
            throw new ExecException(msg, errCode, PigException.BUG);
        }
        throw backendException;
    }
    try {
        TaskReport[] mapRep = jobClient.getMapTaskReports(MRJobID);
        getErrorMessages(mapRep, "map", errNotDbg, pigContext);
        totalHadoopTimeSpent += computeTimeSpent(mapRep);
        mapRep = null;
        TaskReport[] redRep = jobClient.getReduceTaskReports(MRJobID);
        getErrorMessages(redRep, "reduce", errNotDbg, pigContext);
        totalHadoopTimeSpent += computeTimeSpent(redRep);
        redRep = null;
    } catch (IOException e) {
        if (job.getState() == Job.SUCCESS) {
            // if the job succeeded, let the user know that
            // we were unable to get statistics
            log.warn("Unable to get job related diagnostics");
        } else {
            throw e;
        }
    }
}

From source file:org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.Launcher.java

License:Apache License

/**
 * Returns the progress of a Job j which is part of a submitted
 * JobControl object. The progress is for this Job. So it has to
 * be scaled down by the num of jobs that are present in the 
 * JobControl./* www  . jav a 2 s .  c  o m*/
 * @param j - The Job for which progress is required
 * @param jobClient - the JobClient to which it has been submitted
 * @return Returns the percentage progress of this Job
 * @throws IOException
 */
protected double progressOfRunningJob(Job j, JobClient jobClient) throws IOException {
    JobID mrJobID = j.getAssignedJobID();
    RunningJob rj = jobClient.getJob(mrJobID);
    if (rj == null && j.getState() == Job.SUCCESS)
        return 1;
    else if (rj == null)
        return 0;
    else {
        double mapProg = rj.mapProgress();
        double redProg = rj.reduceProgress();
        return (mapProg + redProg) / 2;
    }
}

From source file:org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.MapReduceLauncher.java

License:Apache License

private void getStats(Job job, boolean errNotDbg, PigContext pigContext) throws ExecException {
    JobID MRJobID = job.getAssignedJobID();
    String jobMessage = job.getMessage();
    Exception backendException = null;
    if (MRJobID == null) {
        try {//from ww  w. j  a va2s  .c  o m
            LogUtils.writeLog("Backend error message during job submission", jobMessage,
                    pigContext.getProperties().getProperty("pig.logfile"), log);
            backendException = getExceptionFromString(jobMessage);
        } catch (Exception e) {
            int errCode = 2997;
            String msg = "Unable to recreate exception from backend error: " + jobMessage;
            throw new ExecException(msg, errCode, PigException.BUG);
        }
        throw new ExecException(backendException);
    }
    try {
        TaskReport[] mapRep = HadoopShims.getTaskReports(job, TaskType.MAP);
        if (mapRep != null) {
            getErrorMessages(mapRep, "map", errNotDbg, pigContext);
            totalHadoopTimeSpent += computeTimeSpent(mapRep);
            mapRep = null;
        }
        TaskReport[] redRep = HadoopShims.getTaskReports(job, TaskType.REDUCE);
        if (redRep != null) {
            getErrorMessages(redRep, "reduce", errNotDbg, pigContext);
            totalHadoopTimeSpent += computeTimeSpent(redRep);
            redRep = null;
        }
    } catch (IOException e) {
        if (job.getState() == Job.SUCCESS) {
            // if the job succeeded, let the user know that
            // we were unable to get statistics
            log.warn("Unable to get job related diagnostics");
        } else {
            throw new ExecException(e);
        }
    } catch (Exception e) {
        throw new ExecException(e);
    }
}