Example usage for org.apache.hadoop.mapred RunningJob getTrackingURL

List of usage examples for org.apache.hadoop.mapred RunningJob getTrackingURL

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred RunningJob getTrackingURL.

Prototype

public String getTrackingURL();

Source Link

Document

Get the URL where some job progress information will be displayed.

Usage

From source file:azkaban.jobtype.MapReduceJobState.java

License:Apache License

public MapReduceJobState(RunningJob runningJob, TaskReport[] mapTaskReport, TaskReport[] reduceTaskReport)
        throws IOException {
    jobId = runningJob.getID().toString();
    jobName = runningJob.getJobName();// w w  w.  ja v  a2  s .  c  o m
    trackingURL = runningJob.getTrackingURL();
    isComplete = runningJob.isComplete();
    isSuccessful = runningJob.isSuccessful();
    mapProgress = runningJob.mapProgress();
    reduceProgress = runningJob.reduceProgress();
    failureInfo = runningJob.getFailureInfo();

    totalMappers = mapTaskReport.length;
    totalReducers = reduceTaskReport.length;

    for (TaskReport report : mapTaskReport) {
        if (report.getStartTime() < jobStartTime || jobStartTime == 0L) {
            jobStartTime = report.getStartTime();
        }

        TIPStatus status = report.getCurrentStatus();
        if (status != TIPStatus.PENDING && status != TIPStatus.RUNNING) {
            finishedMappersCount++;
        }
    }

    for (TaskReport report : reduceTaskReport) {
        if (jobLastUpdateTime < report.getFinishTime()) {
            jobLastUpdateTime = report.getFinishTime();
        }

        TIPStatus status = report.getCurrentStatus();
        if (status != TIPStatus.PENDING && status != TIPStatus.RUNNING) {
            finishedReducersCount++;
        }
    }

    // If not all the reducers are finished.
    if (finishedReducersCount != reduceTaskReport.length || jobLastUpdateTime == 0) {
        jobLastUpdateTime = System.currentTimeMillis();
    }

    counters = runningJob.getCounters();
}

From source file:com.atlantbh.jmeter.plugins.hadooputilities.jobstatistics.JobLayer.java

License:Apache License

public String getJobStatisticsByJobId(String jobTracker, String jobId) throws IOException {
    StringBuilder jobStatistics = new StringBuilder();

    JobClient client = prepareJobClient(jobTracker);
    JobID id = convertToJobId(jobId);//from w  w  w.  j  a  va  2  s . co  m

    RunningJob job = client.getJob(id);

    double mapProgress = job.mapProgress() * 100;
    double reduceProgress = job.reduceProgress() * 100;
    String mapPercentage = Double.toString(mapProgress) + "%";
    String reducePercentage = Double.toString(reduceProgress) + "%";

    jobStatistics.append("<job id='").append(jobId).append("'" + " name='").append(job.getJobName())
            .append("'>\n");
    jobStatistics.append(" <mapProgress>").append(mapPercentage).append("</mapProgress>\n");
    jobStatistics.append(" <reduceProgress>").append(reducePercentage).append("</reduceProgress>\n");
    jobStatistics.append(" <complete>").append(job.isComplete()).append("</complete>\n");
    jobStatistics.append(" <successful>").append(job.isSuccessful()).append("</successful>\n");
    jobStatistics.append(" <url>").append(job.getTrackingURL()).append("</url>\n");
    jobStatistics.append("</job>");

    return jobStatistics.toString();
}

From source file:com.google.mr4c.hadoop.HadoopAlgoRunner.java

License:Open Source License

private void submitJob() throws IOException {
    // most of this method copies JobClient.runJob()
    // addition here is logging the job URI
    JobClient client = new JobClient(m_jobConf);
    RunningJob job = client.submitJob(m_jobConf);
    m_log.info("Job URL is [{}]", job.getTrackingURL());
    try {// ww  w  . ja v  a2 s .c  o m
        if (!client.monitorAndPrintJob(m_jobConf, job)) {
            throw new IOException("Job failed!");
        }
    } catch (InterruptedException ie) {
        Thread.currentThread().interrupt();
    }
}

From source file:com.ibm.jaql.lang.expr.hadoop.Util.java

License:Apache License

public static void submitJob(JsonString submitClassName, JobConf conf) throws Exception {
    JobClient jc = new JobClient(conf);
    RunningJob rj = jc.submitJob(conf);
    String sc = JsonUtil.printToString(submitClassName);

    // log to status that a MR job is starting
    mrStatusStart(sc);//from   w w  w  . ja v  a  2s . com

    // log to status vital MR job information
    mrStatusInfo(sc, JsonUtil.printToString(new JsonString(rj.getID().toString())),
            JsonUtil.printToString(new JsonString(rj.getJobName())),
            JsonUtil.printToString(new JsonString(rj.getTrackingURL())));
    //STATUS_LOG.info("MAP-REDUCE INFO: " + rj.getID() + "," + rj.getJobName() + "," + rj.getTrackingURL());

    boolean failed = false;
    try {
        if (!jc.monitorAndPrintJob(conf, rj)) {
            LOG.error(new IOException("Job failed!"));
            failed = true;
            //throw new IOException("Job failed!");
        }
    } catch (InterruptedException ie) {
        Thread.currentThread().interrupt();
    }

    try {
        if (rj.isSuccessful()) {
            logAllTaskSyslogs(rj, true);
        } else {
            logAllTaskSyslogs(rj, false);
        }
    } catch (Throwable t) {
        // log it, but do not stop the world for this
        LOG.error(t);
    }

    // log to status that a MR job is stopping
    mrStatusStop(sc);

    // if the job failed, then throw an exception
    if (failed) {
        throw new IOException("Job failed!");
    }
}

From source file:com.impetus.ankush2.hadoop.monitor.JobStatusProvider.java

License:Open Source License

/**
 * @param jobClient/*from w  w  w .ja v  a  2 s . c  om*/
 * @param jobSts
 * @return
 * @throws IOException
 */
private Map<String, Object> getJobReport(JobStatus jobSts) throws IOException {
    // Creating an empty map for storing job information
    Map<String, Object> jobReport = new HashMap<String, Object>();
    // Returns the jobid of the Job
    org.apache.hadoop.mapred.JobID jobId = jobSts.getJobID();
    // Get an RunningJob object to track an ongoing Map-Reduce
    // job.
    RunningJob job = jobClient.getJob(jobId);
    String jobName = "";
    if (job != null) {
        // Get the name of the job.
        jobName = job.getJobName();
    }
    // Percentage of progress in maps
    float mapProgress = jobSts.mapProgress() * 100;
    // Percentage of progress in reduce
    float reduceProgress = jobSts.reduceProgress() * 100;

    int mapTotal = 0;
    int reduceTotal = 0;
    int mapComp = 0;
    int reduceComp = 0;

    // Count for Map and Reduce Complete
    try {
        // Get the information of the current state of the map
        // tasks of a job
        TaskReport[] mapTaskReports = jobClient.getMapTaskReports(jobId);
        // Get the total map
        mapTotal = mapTaskReports.length;
        // Iterating over the map tasks
        for (TaskReport taskReport : mapTaskReports) {
            // The current state of a map TaskInProgress as seen
            // by the JobTracker.
            TIPStatus currentStatus = taskReport.getCurrentStatus();
            if (currentStatus == TIPStatus.COMPLETE) {
                mapComp++;
            }
        }

        // Get the information of the current state of the
        // reduce tasks of a job.
        TaskReport[] reduceTaskReport = jobClient.getReduceTaskReports(jobId);
        // Get the total reduce
        reduceTotal = reduceTaskReport.length;
        // Iterating over the reduce tasks
        for (TaskReport taskReport : reduceTaskReport) {
            // The current state of a reduce TaskInProgress as
            // seen by the JobTracker.
            TIPStatus currentStatus = taskReport.getCurrentStatus();
            if (currentStatus == TIPStatus.COMPLETE) {
                reduceComp++;
            }
        }
    } catch (Exception e) {
        LOG.error(e.getMessage(), e);
    }
    // Percentage of progress in setup
    float setupProgress = jobSts.setupProgress() * 100;
    // The progress made on cleanup
    float cleanupProgress = jobSts.cleanupProgress() * 100;
    // gets any available info on the reason of failure of the
    // job..Returns the diagnostic information on why a job
    // might have failed.
    String failureInfo = jobSts.getFailureInfo();

    // Putting Job Sttaus information in map
    jobReport.put("jobId", jobId.toString());
    jobReport.put("jobName", jobName);
    jobReport.put("jobPriority", jobSts.getJobPriority().toString());
    jobReport.put("jobStartTime", jobSts.getStartTime());

    jobReport.put("userName", jobSts.getUsername());
    jobReport.put("jobComplete", jobSts.isJobComplete());

    jobReport.put("mapProgress", mapProgress);
    jobReport.put("reduceProgress", reduceProgress);

    jobReport.put("mapTotal", mapTotal);
    jobReport.put("reduceTotal", reduceTotal);
    jobReport.put("mapCompleted", mapComp);
    jobReport.put("reduceCompleted", reduceComp);

    jobReport.put("setupProgress", setupProgress);
    jobReport.put("cleanupProgress", cleanupProgress);

    jobReport.put("schedulingInfo", jobSts.getSchedulingInfo());
    jobReport.put("jobState", JobStatus.getJobRunState(jobSts.getRunState()));
    jobReport.put("failureInfo", failureInfo);
    jobReport.put("jobFile", job.getJobFile());
    jobReport.put("trackingURL", job.getTrackingURL());

    jobReport.putAll(getDetailedJobReport(jobId));
    return jobReport;
}

From source file:com.netflix.lipstick.pigtolipstick.BasicP2LClient.java

License:Apache License

/**
 * Build a P2jJobStatus object for the map/reduce job with id jobId.
 *
 * @param jobId the id of the map/reduce job
 * @return the newly created P2jJobStatus
 *//*from w w  w .  j a  v a 2 s.c  o m*/
@SuppressWarnings("deprecation")
protected P2jJobStatus buildJobStatusMap(String jobId) {
    JobClient jobClient = PigStats.get().getJobClient();
    P2jJobStatus js = jobIdToJobStatusMap.get(jobId);

    try {
        RunningJob rj = jobClient.getJob(jobId);
        if (rj == null) {
            LOG.warn("Couldn't find job status for jobId=" + jobId);
            return js;
        }

        JobID jobID = rj.getID();
        Counters counters = rj.getCounters();
        Map<String, P2jCounters> cMap = Maps.newHashMap();
        for (Group g : counters) {
            P2jCounters countersObj = new P2jCounters();
            cMap.put(g.getDisplayName(), countersObj);
            for (Counter c : g) {
                countersObj.getCounters().put(c.getDisplayName(), c.getValue());
            }
        }

        js.setCounters(cMap);
        TaskReport[] mapTaskReport = jobClient.getMapTaskReports(jobID);
        TaskReport[] reduceTaskReport = jobClient.getReduceTaskReports(jobID);
        js.setJobName(rj.getJobName());
        js.setTrackingUrl(rj.getTrackingURL());
        js.setIsComplete(rj.isComplete());
        js.setIsSuccessful(rj.isSuccessful());
        js.setMapProgress(rj.mapProgress());
        js.setReduceProgress(rj.reduceProgress());
        js.setTotalMappers(mapTaskReport.length);
        js.setTotalReducers(reduceTaskReport.length);
        return js;
    } catch (IOException e) {
        LOG.error("Error getting job info.", e);
    }

    return null;
}

From source file:org.apache.hive.hcatalog.templeton.tool.LogRetriever.java

License:Apache License

private void logJob(String logDir, String jobID, PrintWriter listWriter) throws IOException {
    RunningJob rj = jobClient.getJob(JobID.forName(jobID));
    String jobURLString = rj.getTrackingURL();

    Path jobDir = new Path(logDir, jobID);
    fs.mkdirs(jobDir);/*from w w w  . java2s .co m*/

    // Logger jobconf
    try {
        logJobConf(jobID, jobURLString, jobDir.toString());
    } catch (IOException e) {
        System.err.println("Cannot retrieve job.xml.html for " + jobID);
        e.printStackTrace();
    }

    listWriter.println("job: " + jobID + "(" + "name=" + rj.getJobName() + "," + "status="
            + JobStatus.getJobRunState(rj.getJobState()) + ")");

    // Get completed attempts
    List<AttemptInfo> attempts = new ArrayList<AttemptInfo>();
    for (String type : new String[] { "map", "reduce", "setup", "cleanup" }) {
        try {
            List<AttemptInfo> successAttempts = getCompletedAttempts(jobID, jobURLString, type);
            attempts.addAll(successAttempts);
        } catch (IOException e) {
            System.err.println("Cannot retrieve " + type + " tasks for " + jobID);
            e.printStackTrace();
        }
    }

    // Get failed attempts
    try {
        List<AttemptInfo> failedAttempts = getFailedAttempts(jobID, jobURLString);
        attempts.addAll(failedAttempts);
    } catch (IOException e) {
        System.err.println("Cannot retrieve failed attempts for " + jobID);
        e.printStackTrace();
    }

    // Logger attempts
    for (AttemptInfo attempt : attempts) {
        try {
            logAttempt(jobID, attempt, jobDir.toString());
            listWriter.println("  attempt:" + attempt.id + "(" + "type=" + attempt.type + "," + "status="
                    + attempt.status + "," + "starttime=" + attempt.startTime + "," + "endtime="
                    + attempt.endTime + ")");
        } catch (IOException e) {
            System.err.println("Cannot log attempt " + attempt.id);
            e.printStackTrace();
        }
    }

    listWriter.println();
}

From source file:org.apache.oozie.action.hadoop.JavaActionExecutor.java

License:Apache License

public void submitLauncher(FileSystem actionFs, Context context, WorkflowAction action)
        throws ActionExecutorException {
    JobClient jobClient = null;// ww  w . ja  v  a 2s.  c  o  m
    boolean exception = false;
    try {
        Path appPathRoot = new Path(context.getWorkflow().getAppPath());

        // app path could be a file
        if (actionFs.isFile(appPathRoot)) {
            appPathRoot = appPathRoot.getParent();
        }

        Element actionXml = XmlUtils.parseXml(action.getConf());

        // action job configuration
        Configuration actionConf = loadHadoopDefaultResources(context, actionXml);
        setupActionConf(actionConf, context, actionXml, appPathRoot);
        LOG.debug("Setting LibFilesArchives ");
        setLibFilesArchives(context, actionXml, appPathRoot, actionConf);

        String jobName = actionConf.get(HADOOP_JOB_NAME);
        if (jobName == null || jobName.isEmpty()) {
            jobName = XLog.format("oozie:action:T={0}:W={1}:A={2}:ID={3}", getType(),
                    context.getWorkflow().getAppName(), action.getName(), context.getWorkflow().getId());
            actionConf.set(HADOOP_JOB_NAME, jobName);
        }

        injectActionCallback(context, actionConf);

        if (actionConf.get(ACL_MODIFY_JOB) == null || actionConf.get(ACL_MODIFY_JOB).trim().equals("")) {
            // ONLY in the case where user has not given the
            // modify-job ACL specifically
            if (context.getWorkflow().getAcl() != null) {
                // setting the group owning the Oozie job to allow anybody in that
                // group to modify the jobs.
                actionConf.set(ACL_MODIFY_JOB, context.getWorkflow().getAcl());
            }
        }

        // Setting the credential properties in launcher conf
        JobConf credentialsConf = null;
        HashMap<String, CredentialsProperties> credentialsProperties = setCredentialPropertyToActionConf(
                context, action, actionConf);
        if (credentialsProperties != null) {

            // Adding if action need to set more credential tokens
            credentialsConf = new JobConf(false);
            XConfiguration.copy(actionConf, credentialsConf);
            setCredentialTokens(credentialsConf, context, action, credentialsProperties);

            // insert conf to action conf from credentialsConf
            for (Entry<String, String> entry : credentialsConf) {
                if (actionConf.get(entry.getKey()) == null) {
                    actionConf.set(entry.getKey(), entry.getValue());
                }
            }
        }

        JobConf launcherJobConf = createLauncherConf(actionFs, context, action, actionXml, actionConf);

        LOG.debug("Creating Job Client for action " + action.getId());
        jobClient = createJobClient(context, launcherJobConf);
        String launcherId = LauncherMapperHelper.getRecoveryId(launcherJobConf, context.getActionDir(),
                context.getRecoveryId());
        boolean alreadyRunning = launcherId != null;
        RunningJob runningJob;

        // if user-retry is on, always submit new launcher
        boolean isUserRetry = ((WorkflowActionBean) action).isUserRetry();

        if (alreadyRunning && !isUserRetry) {
            runningJob = jobClient.getJob(JobID.forName(launcherId));
            if (runningJob == null) {
                String jobTracker = launcherJobConf.get(HADOOP_JOB_TRACKER);
                throw new ActionExecutorException(ActionExecutorException.ErrorType.ERROR, "JA017",
                        "unknown job [{0}@{1}], cannot recover", launcherId, jobTracker);
            }
        } else {
            LOG.debug("Submitting the job through Job Client for action " + action.getId());

            // setting up propagation of the delegation token.
            HadoopAccessorService has = Services.get().get(HadoopAccessorService.class);
            Token<DelegationTokenIdentifier> mrdt = jobClient
                    .getDelegationToken(has.getMRDelegationTokenRenewer(launcherJobConf));
            launcherJobConf.getCredentials().addToken(HadoopAccessorService.MR_TOKEN_ALIAS, mrdt);

            // insert credentials tokens to launcher job conf if needed
            if (needInjectCredentials() && credentialsConf != null) {
                for (Token<? extends TokenIdentifier> tk : credentialsConf.getCredentials().getAllTokens()) {
                    Text fauxAlias = new Text(tk.getKind() + "_" + tk.getService());
                    LOG.debug("ADDING TOKEN: " + fauxAlias);
                    launcherJobConf.getCredentials().addToken(fauxAlias, tk);
                }
            } else {
                LOG.info("No need to inject credentials.");
            }
            runningJob = jobClient.submitJob(launcherJobConf);
            if (runningJob == null) {
                throw new ActionExecutorException(ActionExecutorException.ErrorType.ERROR, "JA017",
                        "Error submitting launcher for action [{0}]", action.getId());
            }
            launcherId = runningJob.getID().toString();
            LOG.debug("After submission get the launcherId " + launcherId);
        }

        String jobTracker = launcherJobConf.get(HADOOP_JOB_TRACKER);
        String consoleUrl = runningJob.getTrackingURL();
        context.setStartData(launcherId, jobTracker, consoleUrl);
    } catch (Exception ex) {
        exception = true;
        throw convertException(ex);
    } finally {
        if (jobClient != null) {
            try {
                jobClient.close();
            } catch (Exception e) {
                if (exception) {
                    LOG.error("JobClient error: ", e);
                } else {
                    throw convertException(e);
                }
            }
        }
    }
}

From source file:org.apache.sqoop.submission.mapreduce.MapreduceSubmissionEngine.java

License:Apache License

/**
 * {@inheritDoc}//from  ww w  .j av  a2  s. c  om
 */
@Override
public String externalLink(String submissionId) {
    try {
        RunningJob runningJob = jobClient.getJob(JobID.forName(submissionId));
        if (runningJob == null) {
            return null;
        }

        return runningJob.getTrackingURL();
    } catch (IOException e) {
        throw new SqoopException(MapreduceSubmissionError.MAPREDUCE_0003, e);
    }
}

From source file:org.godhuli.rhipe.FileUtils.java

License:Apache License

public REXP getstatus(String jd, boolean geterrors) throws Exception {
    org.apache.hadoop.mapred.JobID jj = org.apache.hadoop.mapred.JobID.forName(jd);
    if (jj == null)
        throw new IOException("Jobtracker could not find jobID: " + jd);
    org.apache.hadoop.mapred.RunningJob rj = jclient.getJob(jj);
    if (rj == null)
        throw new IOException(
                "No such job: " + jd + " available, wrong job? or try the History Viewer (see the Web UI) ");
    String jobfile = rj.getJobFile();
    String jobname = rj.getJobName();
    // cfg.addResource(new Path(jobfile));
    org.apache.hadoop.mapred.Counters cc = rj.getCounters();
    long startsec = getStart(jclient, jj);
    double dura = ((double) System.currentTimeMillis() - startsec) / 1000;
    REXP ro = FileUtils.buildlistFromOldCounter(cc, dura);
    int jobs = rj.getJobState();
    String jobss = null;//from   w w w.j  a  va  2s  . co m
    if (jobs == JobStatus.FAILED)
        jobss = "FAILED";
    else if (jobs == JobStatus.KILLED)
        jobss = "KILLED";
    else if (jobs == JobStatus.PREP)
        jobss = "PREP";
    else if (jobs == JobStatus.RUNNING)
        jobss = "RUNNING";
    else if (jobs == JobStatus.SUCCEEDED)
        jobss = "SUCCEEDED";
    float mapprog = rj.mapProgress(), reduprog = rj.reduceProgress();

    org.apache.hadoop.mapred.TaskReport[] maptr = jclient.getMapTaskReports(jj);
    org.apache.hadoop.mapred.TaskReport[] redtr = jclient.getReduceTaskReports(jj);

    int totalmaps = maptr.length, totalreds = redtr.length;
    int mappending = 0, redpending = 0, maprunning = 0, redrunning = 0, redfailed = 0, redkilled = 0,
            mapkilled = 0, mapfailed = 0, mapcomp = 0, redcomp = 0;
    for (int i = 0; i < maptr.length; i++) {
        TIPStatus t = maptr[i].getCurrentStatus();
        switch (t) {
        case COMPLETE:
            mapcomp++;
            break;
        case FAILED:
            mapfailed++;
            break;
        case PENDING:
            mappending++;
            break;
        case RUNNING:
            maprunning++;
            break;
        case KILLED:
            mapkilled++;
            break;
        }
    }
    for (int i = 0; i < redtr.length; i++) {
        TIPStatus t = redtr[i].getCurrentStatus();
        switch (t) {
        case COMPLETE:
            redcomp++;
            break;
        case FAILED:
            redfailed++;
            break;
        case PENDING:
            redpending++;
            break;
        case RUNNING:
            redrunning++;
            break;
        case KILLED:
            redkilled++;
            break;
        }
    }
    int reduceafails = 0, reduceakilled = 0, mapafails = 0, mapakilled = 0;
    int startfrom = 0;

    REXP.Builder errcontainer = REXP.newBuilder();
    errcontainer.setRclass(REXP.RClass.STRING);
    while (true) {
        org.apache.hadoop.mapred.TaskCompletionEvent[] events = rj.getTaskCompletionEvents(startfrom);
        for (int i = 0; i < events.length; i++) {
            org.apache.hadoop.mapred.TaskCompletionEvent e = events[i];
            int f = 0, k = 0;
            switch (e.getTaskStatus()) {
            case KILLED:
                if (e.isMapTask()) {
                    mapakilled++;
                } else {
                    reduceakilled++;
                }
                break;
            case TIPFAILED:
            case FAILED:
                if (e.isMapTask()) {
                    mapafails++;
                } else {
                    reduceafails++;
                }
                if (geterrors) {
                    REXPProtos.STRING.Builder content = REXPProtos.STRING.newBuilder();
                    String[] s = rj.getTaskDiagnostics(e.getTaskAttemptId());
                    if (s != null && s.length > 0) {
                        content.setStrval(s[0]);
                        errcontainer.addStringValue(content.build());
                    }
                }
                break;
            }
        }
        startfrom += events.length;
        if (events.length == 0)
            break;
    }

    REXP.Builder thevals = REXP.newBuilder();
    thevals.setRclass(REXP.RClass.LIST);
    thevals.addRexpValue(RObjects.makeStringVector(new String[] { jobss }));
    thevals.addRexpValue(RObjects.buildDoubleVector(new double[] { dura }));
    thevals.addRexpValue(RObjects.buildDoubleVector(new double[] { (double) mapprog, (double) reduprog }));
    thevals.addRexpValue(RObjects.buildIntVector(
            new int[] { totalmaps, mappending, maprunning, mapcomp, mapkilled, mapafails, mapakilled }));
    thevals.addRexpValue(RObjects.buildIntVector(
            new int[] { totalreds, redpending, redrunning, redcomp, redkilled, reduceafails, reduceakilled }));
    thevals.addRexpValue(ro);
    thevals.addRexpValue(errcontainer);
    thevals.addRexpValue(RObjects.makeStringVector(rj.getTrackingURL()));
    thevals.addRexpValue(RObjects.makeStringVector(new String[] { jobname }));
    thevals.addRexpValue(RObjects.makeStringVector(new String[] { jobfile }));
    return (thevals.build());
}