Example usage for org.apache.hadoop.mapred JobStatus PREP

List of usage examples for org.apache.hadoop.mapred JobStatus PREP

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred JobStatus PREP.

Prototype

int PREP

To view the source code for org.apache.hadoop.mapred JobStatus PREP.

Click Source Link

Usage

From source file:com.liferay.hadoop.action.HadoopJob.java

License:Open Source License

public String doExecute(HttpServletRequest request, HttpServletResponse response) throws Exception {

    response.setContentType(ContentTypes.TEXT_PLAIN_UTF8);

    PrintWriter writer = response.getWriter();

    FileSystem fileSystem = HadoopManager.getFileSystem();

    JobClient jobClient = HadoopManager.getJobClient();

    writer.println("-- Job Status --");

    Path inputPath = new Path("/index/*/*");
    Path outputPath = new Path("/wordcount/results");

    try {//  www. j a  v  a  2s  .  co  m
        if (_runningJob == null) {
            writer.println("Creating job");

            if (fileSystem.exists(_jobPath)) {
                fileSystem.delete(_jobPath, false);
            }

            if (!fileSystem.exists(_jobPath)) {
                writer.println("Deploying the job code to cluster");

                FSDataOutputStream outputStream = null;

                try {
                    outputStream = fileSystem.create(_jobPath);

                    ServletContext servletContext = HadoopManager.getServletContext();

                    InputStream inputStream = servletContext.getResourceAsStream("/WEB-INF/lib/hadoop-job.jar");

                    StreamUtil.transfer(inputStream, outputStream, false);
                } finally {
                    StreamUtil.cleanUp(outputStream);
                }

                writer.println("Job code deployed to cluster");
            }

            if (fileSystem.exists(outputPath)) {
                writer.println("A previous job output was found, backing it up");

                fileSystem.rename(outputPath,
                        outputPath.getParent().suffix("/.results-" + System.currentTimeMillis()));
            }

            _jobConf = HadoopManager.createNewJobConf();

            _jobConf.setJobName("Word Count");

            writer.println("Job '" + _jobConf.getJobName() + "' is being configured");

            _jobConf.setJarByClass(Map.class);
            _jobConf.setOutputKeyClass(Text.class);
            _jobConf.setOutputValueClass(IntWritable.class);
            _jobConf.setMapperClass(Map.class);
            _jobConf.setCombinerClass(Reduce.class);
            _jobConf.setReducerClass(Reduce.class);
            _jobConf.setInputFormat(TextInputFormat.class);
            _jobConf.setOutputFormat(TextOutputFormat.class);

            writer.println("Job code deployed to distributed cache's classpath");

            DistributedCache.addArchiveToClassPath(_jobPath, _jobConf, fileSystem);

            FileInputFormat.setInputPaths(_jobConf, inputPath);
            FileOutputFormat.setOutputPath(_jobConf, outputPath);

            writer.println("Submitting job the first time");

            _runningJob = jobClient.submitJob(_jobConf);

            writer.println("Job submitted");
        }

        int jobState = _runningJob.getJobState();

        writer.println(
                "Job status: " + jobState + " (RUNNING = 1, SUCCEEDED = 2, FAILED = 3, PREP = 4, KILLED = 5)");

        if ((jobState != JobStatus.RUNNING) && (jobState != JobStatus.PREP)) {

            writer.println("Re-issuing the job");

            if (fileSystem.exists(outputPath)) {
                writer.println("A previous job output was found, backing it up");

                fileSystem.rename(outputPath,
                        outputPath.getParent().suffix("/.results-" + System.currentTimeMillis()));
            }

            writer.println("Submitting job the first time");

            _runningJob = jobClient.submitJob(_jobConf);

            writer.println("Job submitted");
        }
    } catch (Exception ioe) {
        writer.println("Job error: ");

        ioe.printStackTrace(writer);
    }

    writer.flush();
    writer.close();

    return null;
}

From source file:com.liferay.hadoop.util.HadoopManager.java

License:Open Source License

public static void runJob(StoreEvent storeEvent) throws IOException {
    FileSystem fileSystem = getFileSystem();

    if (_servletContext == null) {
        return;/*from   w ww  .ja v a2  s . c o  m*/
    }

    JobClient jobClient = getJobClient();

    Path inputPath = new Path("/index".concat(storeEvent.getRootPath().toString()).concat("/*"));
    Path outputPath = new Path("/wordcount".concat(storeEvent.getRootPath().toString()).concat("/results"));

    try {
        if (_runningJob == null) {
            if (!fileSystem.exists(_jobPath)) {
                FSDataOutputStream outputStream = null;

                try {
                    outputStream = fileSystem.create(_jobPath);

                    InputStream inputStream = _servletContext
                            .getResourceAsStream("/WEB-INF/lib/hadoop-job.jar");

                    StreamUtil.transfer(inputStream, outputStream, false);
                } finally {
                    StreamUtil.cleanUp(outputStream);
                }
            }

            if (fileSystem.exists(outputPath)) {
                fileSystem.rename(outputPath,
                        outputPath.getParent().suffix("/.results-" + System.currentTimeMillis()));
            }

            _jobConf = new JobConf(_sharedJobConf);

            _jobConf.setJobName("Word Count");
            _jobConf.setJarByClass(Map.class);
            _jobConf.setOutputKeyClass(Text.class);
            _jobConf.setOutputValueClass(IntWritable.class);
            _jobConf.setMapperClass(Map.class);
            _jobConf.setCombinerClass(Reduce.class);
            _jobConf.setReducerClass(Reduce.class);
            _jobConf.setInputFormat(TextInputFormat.class);
            _jobConf.setOutputFormat(TextOutputFormat.class);

            DistributedCache.addArchiveToClassPath(_jobPath, _jobConf, fileSystem);

            FileInputFormat.setInputPaths(_jobConf, inputPath);
            FileOutputFormat.setOutputPath(_jobConf, outputPath);

            _runningJob = jobClient.submitJob(_jobConf);
        }

        int jobState = _runningJob.getJobState();

        if ((jobState != JobStatus.RUNNING) && (jobState != JobStatus.PREP)) {

            System.out.println("Re-issuing the word count job.");

            if (fileSystem.exists(outputPath)) {
                fileSystem.rename(outputPath,
                        outputPath.getParent().suffix("/.results-" + System.currentTimeMillis()));
            }

            _runningJob = jobClient.submitJob(_jobConf);
        }
    } catch (Exception ioe) {
        ioe.printStackTrace();
    }
}

From source file:org.apache.sqoop.submission.mapreduce.MapreduceSubmissionEngine.java

License:Apache License

/**
 * Convert map-reduce specific job status constants to Sqoop job status
 * constants.//  w ww  . ja  v  a  2  s  . c  o m
 *
 * @param status Map-reduce job constant
 * @return Equivalent submission status
 */
protected SubmissionStatus convertMapreduceState(int status) {
    if (status == JobStatus.PREP) {
        return SubmissionStatus.BOOTING;
    } else if (status == JobStatus.RUNNING) {
        return SubmissionStatus.RUNNING;
    } else if (status == JobStatus.FAILED) {
        return SubmissionStatus.FAILED;
    } else if (status == JobStatus.KILLED) {
        return SubmissionStatus.FAILED;
    } else if (status == JobStatus.SUCCEEDED) {
        return SubmissionStatus.SUCCEEDED;
    }

    throw new SqoopException(MapreduceSubmissionError.MAPREDUCE_0004, "Unknown status " + status);
}

From source file:org.estado.core.JobStatusChecker.java

License:Apache License

public void checkStatus() {
    List<org.estado.spi.JobStatus> jobStatusList = new ArrayList<org.estado.spi.JobStatus>();

    try {//w w w . j av a2 s .  c  o  m
        Configuration conf = new Configuration();
        JobClient client = new JobClient(new JobConf(conf));
        JobStatus[] jobStatuses = client.getAllJobs();
        showFilter();

        int jobCount = 0;
        for (JobStatus jobStatus : jobStatuses) {
            Long lastTaskEndTime = 0L;
            TaskReport[] mapReports = client.getMapTaskReports(jobStatus.getJobID());
            for (TaskReport r : mapReports) {
                if (lastTaskEndTime < r.getFinishTime()) {
                    lastTaskEndTime = r.getFinishTime();
                }
            }
            TaskReport[] reduceReports = client.getReduceTaskReports(jobStatus.getJobID());
            for (TaskReport r : reduceReports) {
                if (lastTaskEndTime < r.getFinishTime()) {
                    lastTaskEndTime = r.getFinishTime();
                }
            }
            client.getSetupTaskReports(jobStatus.getJobID());
            client.getCleanupTaskReports(jobStatus.getJobID());

            String jobId = jobStatus.getJobID().toString();
            String jobName = client.getJob(jobStatus.getJobID()).getJobName();
            Long startTime = jobStatus.getStartTime();
            String user = jobStatus.getUsername();
            int mapProgress = (int) (jobStatus.mapProgress() * 100);
            int reduceProgress = (int) (jobStatus.reduceProgress() * 100);
            org.estado.spi.JobStatus jobStat = null;
            ++jobCount;

            int runState = jobStatus.getRunState();
            switch (runState) {
            case JobStatus.SUCCEEDED:
                if (filter.contains("s")) {
                    Long duration = lastTaskEndTime - jobStatus.getStartTime();
                    jobStat = new org.estado.spi.JobStatus(cluster, jobId, jobName, null, user, startTime,
                            lastTaskEndTime, duration, mapProgress, reduceProgress, "completed");
                    ++sCount;
                }
                break;

            case JobStatus.RUNNING:
                if (filter.contains("r")) {
                    long duration = System.currentTimeMillis() - jobStatus.getStartTime();
                    jobStat = new org.estado.spi.JobStatus(cluster, jobId, jobName, null, user, startTime,
                            lastTaskEndTime, duration, mapProgress, reduceProgress, "running");
                    ++rCount;
                }
                break;

            case JobStatus.FAILED:
                if (filter.contains("f")) {
                    long duration = lastTaskEndTime - jobStatus.getStartTime();
                    jobStat = new org.estado.spi.JobStatus(cluster, jobId, jobName, null, user, startTime,
                            lastTaskEndTime, duration, mapProgress, reduceProgress, "failed");
                    RunningJob job = client.getJob(jobStatus.getJobID());
                    jobStat.setJobTasks(getTaskDetails(job));
                    ++fCount;
                }
                break;

            case JobStatus.PREP:
                if (filter.contains("p")) {
                    jobStat = new org.estado.spi.JobStatus(cluster, jobId, jobName, null, user, null, null,
                            null, 0, 0, "preparing");
                    ++pCount;
                }
                break;

            case JobStatus.KILLED:
                if (filter.contains("k")) {
                    long duration = lastTaskEndTime - jobStatus.getStartTime();

                    jobStat = new org.estado.spi.JobStatus(cluster, jobId, jobName, null, user, startTime,
                            lastTaskEndTime, duration, mapProgress, reduceProgress, "killed");

                    RunningJob job = client.getJob(jobStatus.getJobID());
                    jobStat.setJobTasks(getTaskDetails(job));
                    ++kCount;
                }
                break;
            }

            jobStatusList.add(jobStat);
        }

        //get counters
        for (org.estado.spi.JobStatus jobStat : jobStatusList) {
            if (!jobStat.getStatus().equals("preparing")) {
                List<JobCounterGroup> counterGroups = getJobCounters(jobStat.getJobId());
                jobStat.setCounterGroups(counterGroups);

                //additional data from counters
                setJobInfo(jobStat);
            }
        }

        //publish to all consumers
        for (JobStatusConsumer consumer : consumers) {
            consumer.handle(jobStatusList);
        }

        showJobCounts();
    } catch (Exception ex) {
        System.out.println("Jobs status checker failed" + ex.getMessage());
    }

}

From source file:org.godhuli.rhipe.FileUtils.java

License:Apache License

public REXP getstatus(String jd, boolean geterrors) throws Exception {
    org.apache.hadoop.mapred.JobID jj = org.apache.hadoop.mapred.JobID.forName(jd);
    if (jj == null)
        throw new IOException("Jobtracker could not find jobID: " + jd);
    org.apache.hadoop.mapred.RunningJob rj = jclient.getJob(jj);
    if (rj == null)
        throw new IOException(
                "No such job: " + jd + " available, wrong job? or try the History Viewer (see the Web UI) ");
    String jobfile = rj.getJobFile();
    String jobname = rj.getJobName();
    // cfg.addResource(new Path(jobfile));
    org.apache.hadoop.mapred.Counters cc = rj.getCounters();
    long startsec = getStart(jclient, jj);
    double dura = ((double) System.currentTimeMillis() - startsec) / 1000;
    REXP ro = FileUtils.buildlistFromOldCounter(cc, dura);
    int jobs = rj.getJobState();
    String jobss = null;/*from  w  w w. j av  a2s  .  c om*/
    if (jobs == JobStatus.FAILED)
        jobss = "FAILED";
    else if (jobs == JobStatus.KILLED)
        jobss = "KILLED";
    else if (jobs == JobStatus.PREP)
        jobss = "PREP";
    else if (jobs == JobStatus.RUNNING)
        jobss = "RUNNING";
    else if (jobs == JobStatus.SUCCEEDED)
        jobss = "SUCCEEDED";
    float mapprog = rj.mapProgress(), reduprog = rj.reduceProgress();

    org.apache.hadoop.mapred.TaskReport[] maptr = jclient.getMapTaskReports(jj);
    org.apache.hadoop.mapred.TaskReport[] redtr = jclient.getReduceTaskReports(jj);

    int totalmaps = maptr.length, totalreds = redtr.length;
    int mappending = 0, redpending = 0, maprunning = 0, redrunning = 0, redfailed = 0, redkilled = 0,
            mapkilled = 0, mapfailed = 0, mapcomp = 0, redcomp = 0;
    for (int i = 0; i < maptr.length; i++) {
        TIPStatus t = maptr[i].getCurrentStatus();
        switch (t) {
        case COMPLETE:
            mapcomp++;
            break;
        case FAILED:
            mapfailed++;
            break;
        case PENDING:
            mappending++;
            break;
        case RUNNING:
            maprunning++;
            break;
        case KILLED:
            mapkilled++;
            break;
        }
    }
    for (int i = 0; i < redtr.length; i++) {
        TIPStatus t = redtr[i].getCurrentStatus();
        switch (t) {
        case COMPLETE:
            redcomp++;
            break;
        case FAILED:
            redfailed++;
            break;
        case PENDING:
            redpending++;
            break;
        case RUNNING:
            redrunning++;
            break;
        case KILLED:
            redkilled++;
            break;
        }
    }
    int reduceafails = 0, reduceakilled = 0, mapafails = 0, mapakilled = 0;
    int startfrom = 0;

    REXP.Builder errcontainer = REXP.newBuilder();
    errcontainer.setRclass(REXP.RClass.STRING);
    while (true) {
        org.apache.hadoop.mapred.TaskCompletionEvent[] events = rj.getTaskCompletionEvents(startfrom);
        for (int i = 0; i < events.length; i++) {
            org.apache.hadoop.mapred.TaskCompletionEvent e = events[i];
            int f = 0, k = 0;
            switch (e.getTaskStatus()) {
            case KILLED:
                if (e.isMapTask()) {
                    mapakilled++;
                } else {
                    reduceakilled++;
                }
                break;
            case TIPFAILED:
            case FAILED:
                if (e.isMapTask()) {
                    mapafails++;
                } else {
                    reduceafails++;
                }
                if (geterrors) {
                    REXPProtos.STRING.Builder content = REXPProtos.STRING.newBuilder();
                    String[] s = rj.getTaskDiagnostics(e.getTaskAttemptId());
                    if (s != null && s.length > 0) {
                        content.setStrval(s[0]);
                        errcontainer.addStringValue(content.build());
                    }
                }
                break;
            }
        }
        startfrom += events.length;
        if (events.length == 0)
            break;
    }

    REXP.Builder thevals = REXP.newBuilder();
    thevals.setRclass(REXP.RClass.LIST);
    thevals.addRexpValue(RObjects.makeStringVector(new String[] { jobss }));
    thevals.addRexpValue(RObjects.buildDoubleVector(new double[] { dura }));
    thevals.addRexpValue(RObjects.buildDoubleVector(new double[] { (double) mapprog, (double) reduprog }));
    thevals.addRexpValue(RObjects.buildIntVector(
            new int[] { totalmaps, mappending, maprunning, mapcomp, mapkilled, mapafails, mapakilled }));
    thevals.addRexpValue(RObjects.buildIntVector(
            new int[] { totalreds, redpending, redrunning, redcomp, redkilled, reduceafails, reduceakilled }));
    thevals.addRexpValue(ro);
    thevals.addRexpValue(errcontainer);
    thevals.addRexpValue(RObjects.makeStringVector(rj.getTrackingURL()));
    thevals.addRexpValue(RObjects.makeStringVector(new String[] { jobname }));
    thevals.addRexpValue(RObjects.makeStringVector(new String[] { jobfile }));
    return (thevals.build());
}

From source file:org.godhuli.rhipe.FileUtils.java

License:Apache License

public byte[] getDetailedInfoForJob(String jd) throws Exception {
    org.apache.hadoop.mapred.JobID jj = org.apache.hadoop.mapred.JobID.forName(jd);
    if (jj == null)
        throw new IOException("Jobtracker could not find jobID: " + jd);
    org.apache.hadoop.mapred.RunningJob rj = jclient.getJob(jj);
    if (rj == null)
        throw new IOException(
                "No such job: " + jd + " available, wrong job? or try the History Viewer (see the Web UI) ");
    String jobfile = rj.getJobFile();
    String jobname = rj.getJobName();
    org.apache.hadoop.mapred.Counters cc = rj.getCounters();
    long startsec = getStart(jclient, jj);
    REXP allCounters = FileUtils.buildlistFromOldCounter(cc, 0);
    int jobs = rj.getJobState();
    String jobss = null;/*from w  w  w . jav  a  2  s  .com*/
    if (jobs == JobStatus.FAILED)
        jobss = "FAILED";
    else if (jobs == JobStatus.KILLED)
        jobss = "KILLED";
    else if (jobs == JobStatus.PREP)
        jobss = "PREP";
    else if (jobs == JobStatus.RUNNING)
        jobss = "RUNNING";
    else if (jobs == JobStatus.SUCCEEDED)
        jobss = "SUCCEEDED";
    float mapprog = rj.mapProgress(), reduprog = rj.reduceProgress();

    REXP.Builder thevals = REXP.newBuilder();
    thevals.setRclass(REXP.RClass.LIST);
    thevals.addRexpValue(RObjects.makeStringVector(new String[] { jobss }));
    thevals.addRexpValue(RObjects.buildDoubleVector(new double[] { startsec }));
    thevals.addRexpValue(RObjects.buildDoubleVector(new double[] { (double) mapprog, (double) reduprog }));
    thevals.addRexpValue(allCounters);
    thevals.addRexpValue(RObjects.makeStringVector(rj.getTrackingURL()));
    thevals.addRexpValue(RObjects.makeStringVector(new String[] { jobname }));
    thevals.addRexpValue(RObjects.makeStringVector(new String[] { jobfile }));

    org.apache.hadoop.mapred.TaskReport[] maptr = jclient.getMapTaskReports(jj);
    REXP.Builder thevalsA = REXP.newBuilder();
    thevalsA.setRclass(REXP.RClass.LIST);
    for (TaskReport t : maptr) {
        thevalsA.addRexpValue(TaskReportToRexp(t));
    }
    thevals.addRexpValue(thevalsA.build());

    org.apache.hadoop.mapred.TaskReport[] redtr = jclient.getReduceTaskReports(jj);
    REXP.Builder thevalsB = REXP.newBuilder();
    thevalsB.setRclass(REXP.RClass.LIST);
    for (TaskReport t : redtr) {
        thevalsB.addRexpValue(TaskReportToRexp(t));
    }
    thevals.addRexpValue(thevalsB.build());

    return thevals.build().toByteArray();
}

From source file:org.sleuthkit.web.sampleapp.server.SampleServiceImpl.java

License:Open Source License

/**
 * get simple job status/*ww  w .j ava  2s.c o  m*/
 * @param status hadoop job status
 * @return job status
 * @throws Exception
 */
private String getSimpleJobStatus(int status) throws IOException {
    switch (status) {
    case JobStatus.FAILED:
        return "failed";
    case JobStatus.KILLED:
        return "killed";
    case JobStatus.PREP:
        return "prepared";
    case JobStatus.RUNNING:
        return "running";
    case JobStatus.SUCCEEDED:
        return "completed";

    default:
        return "invalid";
    }
}