Example usage for org.apache.hadoop.mapred JobStatus getFailureInfo

List of usage examples for org.apache.hadoop.mapred JobStatus getFailureInfo

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred JobStatus getFailureInfo.

Prototype

public synchronized String getFailureInfo() 

Source Link

Document

Gets any available info on the reason of failure of the job.

Usage

From source file:com.ikanow.infinit.e.processing.custom.CustomProcessingController.java

License:Open Source License

public boolean checkRunningJobs(CustomMapReduceJobPojo jobOverride) {
    Map<ObjectId, String> incompleteJobsMap = new HashMap<ObjectId, String>();
    //get mongo entries that have jobids?
    try {// w  ww  .j av a  2s . c o m
        JobClient jc = null;

        CustomMapReduceJobPojo cmr = jobOverride;
        if (null == cmr)
            cmr = CustomScheduleManager.getJobsToMakeComplete(_bHadoopEnabled, incompleteJobsMap);
        else if (null == cmr.jobidS)
            return true;

        while (cmr != null) {
            boolean markedComplete = false;
            //make sure its an actual ID, we now set jobidS to "" when running the job
            if (!cmr.jobidS.equals("")) // non null by construction
            {
                if (null == jc) {
                    try {
                        jc = new JobClient(InfiniteHadoopUtils.getJobClientConnection(prop_custom),
                                new Configuration());
                    } catch (Exception e) {
                        // Better delete this, no idea what's going on....                  
                        _logger.info(
                                "job_update_status_error_title=" + cmr.jobtitle + " job_update_status_error_id="
                                        + cmr._id.toString() + " job_update_status_error_message=Skipping job: "
                                        + cmr.jobidS + cmr.jobidN + ", this node does not run mapreduce");
                        _statusManager.setJobComplete(cmr, true, true, -1, -1,
                                "Failed to launch job, unknown error (check configuration in  /opt/hadoop-infinite/mapreduce/hadoop/, jobtracker may be localhost?).");
                        incompleteJobsMap.remove(cmr._id);
                        cmr = CustomScheduleManager.getJobsToMakeComplete(_bHadoopEnabled, incompleteJobsMap);
                        continue;
                    }
                }

                //check if job is done, and update if it is               
                JobStatus[] jobs = jc.getAllJobs();
                boolean bFound = false;
                for (JobStatus j : jobs) {
                    if (j.getJobID().getJtIdentifier().equals(cmr.jobidS)
                            && j.getJobID().getId() == cmr.jobidN) {
                        bFound = true;
                        boolean error = false;
                        markedComplete = j.isJobComplete();
                        String errorMessage = null;
                        if (JobStatus.FAILED == j.getRunState()) {
                            markedComplete = true;
                            error = true;
                            errorMessage = "Job failed while running, check for errors in the mapper/reducer or that your key/value classes are set up correctly? "
                                    + j.getFailureInfo();
                        }
                        _statusManager.setJobComplete(cmr, markedComplete, error, j.mapProgress(),
                                j.reduceProgress(), errorMessage);
                        break; // (from mini loop over hadoop jobs, not main loop over infinite tasks)
                    }
                }
                if (!bFound) { // Possible error
                    //check if its been longer than 5min and mark job as complete (it failed to launch)
                    Date currDate = new Date();
                    Date lastDate = cmr.lastRunTime;
                    //if its been more than 5 min (5m*60s*1000ms)               
                    if (currDate.getTime() - lastDate.getTime() > 300000) {
                        markedComplete = true;
                        _statusManager.setJobComplete(cmr, true, true, -1, -1,
                                "Failed to launch job, unknown error #2.");
                    }
                }
            } else // this job hasn't been started yet:
            {
                //check if its been longer than 5min and mark job as complete (it failed to launch)
                Date currDate = new Date();
                Date lastDate = cmr.lastRunTime;
                //if its been more than 5 min (5m*60s*1000ms)               
                if (currDate.getTime() - lastDate.getTime() > 300000) {
                    markedComplete = true;
                    _statusManager.setJobComplete(cmr, true, true, -1, -1,
                            "Failed to launch job, unknown error #1.");
                }
            }
            //job was done, remove flag
            if (markedComplete) {
                incompleteJobsMap.remove(cmr._id);
            }
            if (null == jobOverride)
                cmr = CustomScheduleManager.getJobsToMakeComplete(_bHadoopEnabled, incompleteJobsMap);
            else
                cmr = null;
        }
    } catch (Exception ex) {
        _logger.info("job_error_checking_status_message=" + InfiniteHadoopUtils.createExceptionMessage(ex));
    } catch (Error err) {
        // Really really want to get to the next line of code, and clear the status...
        _logger.info("job_error_checking_status_message=" + InfiniteHadoopUtils.createExceptionMessage(err));
    }

    if (null == jobOverride) {
        //set all incomplete jobs' status back
        for (ObjectId id : incompleteJobsMap.keySet()) {
            BasicDBObject update = new BasicDBObject(CustomMapReduceJobPojo.jobidS_, incompleteJobsMap.get(id));
            DbManager.getCustom().getLookup().update(new BasicDBObject(CustomMapReduceJobPojo._id_, id),
                    new BasicDBObject(MongoDbManager.set_, update));
        }
    }
    return incompleteJobsMap.isEmpty();
}

From source file:com.impetus.ankush2.hadoop.monitor.JobStatusProvider.java

License:Open Source License

/**
 * @param jobClient/* w w w.  jav  a 2 s . c o m*/
 * @param jobSts
 * @return
 * @throws IOException
 */
private Map<String, Object> getJobReport(JobStatus jobSts) throws IOException {
    // Creating an empty map for storing job information
    Map<String, Object> jobReport = new HashMap<String, Object>();
    // Returns the jobid of the Job
    org.apache.hadoop.mapred.JobID jobId = jobSts.getJobID();
    // Get an RunningJob object to track an ongoing Map-Reduce
    // job.
    RunningJob job = jobClient.getJob(jobId);
    String jobName = "";
    if (job != null) {
        // Get the name of the job.
        jobName = job.getJobName();
    }
    // Percentage of progress in maps
    float mapProgress = jobSts.mapProgress() * 100;
    // Percentage of progress in reduce
    float reduceProgress = jobSts.reduceProgress() * 100;

    int mapTotal = 0;
    int reduceTotal = 0;
    int mapComp = 0;
    int reduceComp = 0;

    // Count for Map and Reduce Complete
    try {
        // Get the information of the current state of the map
        // tasks of a job
        TaskReport[] mapTaskReports = jobClient.getMapTaskReports(jobId);
        // Get the total map
        mapTotal = mapTaskReports.length;
        // Iterating over the map tasks
        for (TaskReport taskReport : mapTaskReports) {
            // The current state of a map TaskInProgress as seen
            // by the JobTracker.
            TIPStatus currentStatus = taskReport.getCurrentStatus();
            if (currentStatus == TIPStatus.COMPLETE) {
                mapComp++;
            }
        }

        // Get the information of the current state of the
        // reduce tasks of a job.
        TaskReport[] reduceTaskReport = jobClient.getReduceTaskReports(jobId);
        // Get the total reduce
        reduceTotal = reduceTaskReport.length;
        // Iterating over the reduce tasks
        for (TaskReport taskReport : reduceTaskReport) {
            // The current state of a reduce TaskInProgress as
            // seen by the JobTracker.
            TIPStatus currentStatus = taskReport.getCurrentStatus();
            if (currentStatus == TIPStatus.COMPLETE) {
                reduceComp++;
            }
        }
    } catch (Exception e) {
        LOG.error(e.getMessage(), e);
    }
    // Percentage of progress in setup
    float setupProgress = jobSts.setupProgress() * 100;
    // The progress made on cleanup
    float cleanupProgress = jobSts.cleanupProgress() * 100;
    // gets any available info on the reason of failure of the
    // job..Returns the diagnostic information on why a job
    // might have failed.
    String failureInfo = jobSts.getFailureInfo();

    // Putting Job Sttaus information in map
    jobReport.put("jobId", jobId.toString());
    jobReport.put("jobName", jobName);
    jobReport.put("jobPriority", jobSts.getJobPriority().toString());
    jobReport.put("jobStartTime", jobSts.getStartTime());

    jobReport.put("userName", jobSts.getUsername());
    jobReport.put("jobComplete", jobSts.isJobComplete());

    jobReport.put("mapProgress", mapProgress);
    jobReport.put("reduceProgress", reduceProgress);

    jobReport.put("mapTotal", mapTotal);
    jobReport.put("reduceTotal", reduceTotal);
    jobReport.put("mapCompleted", mapComp);
    jobReport.put("reduceCompleted", reduceComp);

    jobReport.put("setupProgress", setupProgress);
    jobReport.put("cleanupProgress", cleanupProgress);

    jobReport.put("schedulingInfo", jobSts.getSchedulingInfo());
    jobReport.put("jobState", JobStatus.getJobRunState(jobSts.getRunState()));
    jobReport.put("failureInfo", failureInfo);
    jobReport.put("jobFile", job.getJobFile());
    jobReport.put("trackingURL", job.getTrackingURL());

    jobReport.putAll(getDetailedJobReport(jobId));
    return jobReport;
}

From source file:eu.scape_project.tb.hadoopjobtracker.HadoobJobTrackerClient.java

License:Apache License

public HDJobStatus[] UUIDJobs(String UUID) {

    JobStatus[] allHadoopJobs = null;//from   w w w .j  a v a  2  s  .c  o  m

    try {
        allHadoopJobs = myJobClient.getAllJobs();
    } catch (IOException ex) {
        logger.error("Error retrieving ALL jobs from JobTracker. ERR: " + ex.getMessage());
    }

    int numberOfJobs = allHadoopJobs.length;
    logger.info("Number of ALL jobs on the cluster: " + numberOfJobs);
    logger.info("Searching for jobs containing the UUID: '" + UUID + "'");

    ArrayList<HDJobStatus> allUUIDJobs = new ArrayList<HDJobStatus>();
    if (numberOfJobs > 0) {
        for (JobStatus singleJobStatus : allHadoopJobs) {

            JobID singleJobID = singleJobStatus.getJobID();
            String singleJobName = "N/A";
            try {
                singleJobName = getJobName(myJobClient, singleJobID);
            } catch (IOException ex) {
                logger.error("Error retrieving jobName for job " + singleJobID + ". ERR: " + ex.getMessage());
            }

            if ((singleJobName.toLowerCase().indexOf(UUID.toLowerCase()) >= 0) || (UUID.equals(""))) {
                HDJobStatus newJobStatus = new HDJobStatus();
                newJobStatus.setJobID(singleJobStatus.getJobID().toString());
                newJobStatus.setJobIsComplete(singleJobStatus.isJobComplete());
                newJobStatus.setJobUserName(singleJobStatus.getUsername());
                newJobStatus.setJobFailureInfo(singleJobStatus.getFailureInfo());
                newJobStatus.setJobName(singleJobName);
                newJobStatus.setJobMapProgress(Math.round(singleJobStatus.mapProgress() * 100));
                newJobStatus.setJobReduceProgress(Math.round(singleJobStatus.reduceProgress() * 100));
                allUUIDJobs.add(newJobStatus);
            }
        }
    }
    return allUUIDJobs.toArray(new HDJobStatus[allUUIDJobs.size()]);
}