Example usage for org.apache.hadoop.mapreduce Job getJobState

List of usage examples for org.apache.hadoop.mapreduce Job getJobState

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce Job getJobState.

Prototype

public JobStatus.State getJobState() throws IOException, InterruptedException 

Source Link

Document

Returns the current state of the Job.

Usage

From source file:com.cloudera.oryx.computation.common.JobStep.java

License:Open Source License

private StepStatus determineStatus() throws IOException, InterruptedException {
    JobContext job = getJob();/* w  w w .j ava  2s.  c  o m*/
    if (job == null) {
        return StepStatus.COMPLETED;
    }
    Cluster cluster = new Cluster(getConf());
    try {
        JobID jobID = job.getJobID();
        if (jobID == null) {
            return StepStatus.PENDING;
        }
        Job runningJob = cluster.getJob(jobID);
        if (runningJob == null) {
            return StepStatus.PENDING;
        }
        JobStatus.State state = runningJob.getJobState();
        switch (state) {
        case PREP:
            return StepStatus.PENDING;
        case RUNNING:
            return StepStatus.RUNNING;
        case SUCCEEDED:
            return StepStatus.COMPLETED;
        case FAILED:
            return StepStatus.FAILED;
        case KILLED:
            return StepStatus.CANCELLED;
        }
        throw new IllegalArgumentException("Unknown Hadoop job state " + state);
    } finally {
        cluster.close();
    }
}

From source file:kogiri.common.report.Report.java

License:Open Source License

private String makeText(Job job) {
    String jobName = job.getJobName();
    String jobID = job.getJobID().toString();
    String jobStatus;/*  www  .j av a  2  s.c  om*/
    try {
        jobStatus = job.getJobState().name();
    } catch (IOException ex) {
        jobStatus = "Unknown";
    } catch (InterruptedException ex) {
        jobStatus = "Unknown";
    }

    String startTimeStr;
    try {
        startTimeStr = TimeHelper.getTimeString(job.getStartTime());
    } catch (Exception ex) {
        startTimeStr = "Unknown";
    }

    String finishTimeStr;
    try {
        finishTimeStr = TimeHelper.getTimeString(job.getFinishTime());
    } catch (Exception ex) {
        finishTimeStr = "Unknown";
    }

    String timeTakenStr;
    try {
        timeTakenStr = TimeHelper.getDiffTimeString(job.getStartTime(), job.getFinishTime());
    } catch (Exception ex) {
        timeTakenStr = "Unknown";
    }

    String countersStr;
    try {
        countersStr = job.getCounters().toString();
    } catch (Exception ex) {
        countersStr = "Unknown";
    }

    return "Job : " + jobName + "\n" + "JobID : " + jobID + "\n" + "Status : " + jobStatus + "\n"
            + "StartTime : " + startTimeStr + "\n" + "FinishTime : " + finishTimeStr + "\n" + "TimeTaken : "
            + timeTakenStr + "\n\n" + countersStr;
}

From source file:org.apache.kylin.engine.mr.common.MapReduceExecutable.java

License:Apache License

@Override
protected void onExecuteStart(ExecutableContext executableContext) {
    final Output output = getOutput();
    if (output.getExtra().containsKey(START_TIME)) {
        final String mrJobId = output.getExtra().get(ExecutableConstants.MR_JOB_ID);
        if (mrJobId == null) {
            getManager().updateJobOutput(getId(), ExecutableState.RUNNING, null, null);
            return;
        }/*from   w ww  .  j a  v  a 2  s.co m*/
        try {
            Configuration conf = HadoopUtil.getCurrentConfiguration();
            Job job = new Cluster(conf).getJob(JobID.forName(mrJobId));
            if (job == null || job.getJobState() == JobStatus.State.FAILED) {
                //remove previous mr job info
                super.onExecuteStart(executableContext);
            } else {
                getManager().updateJobOutput(getId(), ExecutableState.RUNNING, null, null);
            }
        } catch (IOException e) {
            logger.warn("error get hadoop status");
            super.onExecuteStart(executableContext);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            logger.warn("error get hadoop status");
            super.onExecuteStart(executableContext);
        }
    } else {
        super.onExecuteStart(executableContext);
    }
}

From source file:org.apache.kylin.job.common.MapReduceExecutable.java

License:Apache License

@Override
protected void onExecuteStart(ExecutableContext executableContext) {
    final Output output = executableManager.getOutput(getId());
    if (output.getExtra().containsKey(START_TIME)) {
        final String mrJobId = output.getExtra().get(ExecutableConstants.MR_JOB_ID);
        if (mrJobId == null) {
            executableManager.updateJobOutput(getId(), ExecutableState.RUNNING, null, null);
            return;
        }/*  ww  w  .  j a v  a2 s . c o m*/
        try {
            Job job = new Cluster(new Configuration()).getJob(JobID.forName(mrJobId));
            if (job.getJobState() == JobStatus.State.FAILED) {
                //remove previous mr job info
                super.onExecuteStart(executableContext);
            } else {
                executableManager.updateJobOutput(getId(), ExecutableState.RUNNING, null, null);
            }
        } catch (IOException e) {
            logger.warn("error get hadoop status");
            super.onExecuteStart(executableContext);
        } catch (InterruptedException e) {
            logger.warn("error get hadoop status");
            super.onExecuteStart(executableContext);
        }
    } else {
        super.onExecuteStart(executableContext);
    }
}

From source file:org.apache.tez.mapreduce.TestMRRJobs.java

License:Apache License

@Test(timeout = 60000)
public void testMRRSleepJob() throws IOException, InterruptedException, ClassNotFoundException {
    LOG.info("\n\n\nStarting testMRRSleepJob().");

    if (!(new File(MiniTezCluster.APPJAR)).exists()) {
        LOG.info("MRAppJar " + MiniTezCluster.APPJAR + " not found. Not running test.");
        return;//from   w w  w .ja v  a 2s .  c  om
    }

    Configuration sleepConf = new Configuration(mrrTezCluster.getConfig());

    MRRSleepJob sleepJob = new MRRSleepJob();
    sleepJob.setConf(sleepConf);

    Job job = sleepJob.createJob(1, 1, 1, 1, 1, 1, 1, 1, 1, 1);

    job.setJarByClass(MRRSleepJob.class);
    job.setMaxMapAttempts(1); // speed up failures
    job.submit();
    String trackingUrl = job.getTrackingURL();
    String jobId = job.getJobID().toString();
    boolean succeeded = job.waitForCompletion(true);
    Assert.assertTrue(succeeded);
    Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
    Assert.assertTrue("Tracking URL was " + trackingUrl + " but didn't Match Job ID " + jobId,
            trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));

    // FIXME once counters and task progress can be obtained properly
    // TODO use dag client to test counters and task progress?
    // what about completed jobs?
}

From source file:org.apache.tez.mapreduce.TestMRRJobs.java

License:Apache License

@Test(timeout = 60000)
public void testRandomWriter() throws IOException, InterruptedException, ClassNotFoundException {

    LOG.info("\n\n\nStarting testRandomWriter().");
    if (!(new File(MiniTezCluster.APPJAR)).exists()) {
        LOG.info("MRAppJar " + MiniTezCluster.APPJAR + " not found. Not running test.");
        return;/*w  ww. ja v a  2  s.com*/
    }

    RandomTextWriterJob randomWriterJob = new RandomTextWriterJob();
    mrrTezCluster.getConfig().set(RandomTextWriterJob.TOTAL_BYTES, "3072");
    mrrTezCluster.getConfig().set(RandomTextWriterJob.BYTES_PER_MAP, "1024");
    Job job = randomWriterJob.createJob(mrrTezCluster.getConfig());
    Path outputDir = new Path(OUTPUT_ROOT_DIR, "random-output");
    FileOutputFormat.setOutputPath(job, outputDir);
    job.setSpeculativeExecution(false);
    job.setJarByClass(RandomTextWriterJob.class);
    job.setMaxMapAttempts(1); // speed up failures
    job.submit();
    String trackingUrl = job.getTrackingURL();
    String jobId = job.getJobID().toString();
    boolean succeeded = job.waitForCompletion(true);
    Assert.assertTrue(succeeded);
    Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
    Assert.assertTrue("Tracking URL was " + trackingUrl + " but didn't Match Job ID " + jobId,
            trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));

    // Make sure there are three files in the output-dir

    RemoteIterator<FileStatus> iterator = FileContext.getFileContext(mrrTezCluster.getConfig())
            .listStatus(outputDir);
    int count = 0;
    while (iterator.hasNext()) {
        FileStatus file = iterator.next();
        if (!file.getPath().getName().equals(FileOutputCommitter.SUCCEEDED_FILE_NAME)) {
            count++;
        }
    }
    Assert.assertEquals("Number of part files is wrong!", 3, count);

}

From source file:org.apache.tez.mapreduce.TestMRRJobs.java

License:Apache License

@Test(timeout = 60000)
public void testFailingJob() throws IOException, InterruptedException, ClassNotFoundException {

    LOG.info("\n\n\nStarting testFailingJob().");

    if (!(new File(MiniTezCluster.APPJAR)).exists()) {
        LOG.info("MRAppJar " + MiniTezCluster.APPJAR + " not found. Not running test.");
        return;//from ww  w . j  a v  a 2s  .  c o m
    }

    Configuration sleepConf = new Configuration(mrrTezCluster.getConfig());

    MRRSleepJob sleepJob = new MRRSleepJob();
    sleepJob.setConf(sleepConf);

    Job job = sleepJob.createJob(1, 1, 1, 1, 1, 1, 1, 1, 1, 1);

    job.setJarByClass(MRRSleepJob.class);
    job.setMaxMapAttempts(1); // speed up failures
    job.getConfiguration().setBoolean(MRRSleepJob.MAP_FATAL_ERROR, true);
    job.getConfiguration().set(MRRSleepJob.MAP_ERROR_TASK_IDS, "*");

    job.submit();
    boolean succeeded = job.waitForCompletion(true);
    Assert.assertFalse(succeeded);
    Assert.assertEquals(JobStatus.State.FAILED, job.getJobState());

    // FIXME once counters and task progress can be obtained properly
    // TODO verify failed task diagnostics
}

From source file:org.apache.tez.mapreduce.TestMRRJobs.java

License:Apache License

@Test(timeout = 60000)
public void testFailingAttempt() throws IOException, InterruptedException, ClassNotFoundException {

    LOG.info("\n\n\nStarting testFailingAttempt().");

    if (!(new File(MiniTezCluster.APPJAR)).exists()) {
        LOG.info("MRAppJar " + MiniTezCluster.APPJAR + " not found. Not running test.");
        return;/*from w  w  w .  j av a  2  s .co m*/
    }

    Configuration sleepConf = new Configuration(mrrTezCluster.getConfig());

    MRRSleepJob sleepJob = new MRRSleepJob();
    sleepJob.setConf(sleepConf);

    Job job = sleepJob.createJob(1, 1, 1, 1, 1, 1, 1, 1, 1, 1);

    job.setJarByClass(MRRSleepJob.class);
    job.setMaxMapAttempts(3); // speed up failures
    job.getConfiguration().setBoolean(MRRSleepJob.MAP_THROW_ERROR, true);
    job.getConfiguration().set(MRRSleepJob.MAP_ERROR_TASK_IDS, "0");

    job.submit();
    boolean succeeded = job.waitForCompletion(true);
    Assert.assertTrue(succeeded);
    Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());

    // FIXME once counters and task progress can be obtained properly
    // TODO verify failed task diagnostics
}

From source file:org.apache.tez.mapreduce.TestMRRJobs.java

License:Apache License

@Test(timeout = 60000)
public void testMRRSleepJobWithCompression() throws IOException, InterruptedException, ClassNotFoundException {
    LOG.info("\n\n\nStarting testMRRSleepJobWithCompression().");

    if (!(new File(MiniTezCluster.APPJAR)).exists()) {
        LOG.info("MRAppJar " + MiniTezCluster.APPJAR + " not found. Not running test.");
        return;/*from ww  w  . j ava  2s  .  c  o  m*/
    }

    Configuration sleepConf = new Configuration(mrrTezCluster.getConfig());

    MRRSleepJob sleepJob = new MRRSleepJob();
    sleepJob.setConf(sleepConf);

    Job job = sleepJob.createJob(1, 1, 2, 1, 1, 1, 1, 1, 1, 1);

    job.setJarByClass(MRRSleepJob.class);
    job.setMaxMapAttempts(1); // speed up failures

    // enable compression
    job.getConfiguration().setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
    job.getConfiguration().set(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC, DefaultCodec.class.getName());

    job.submit();
    String trackingUrl = job.getTrackingURL();
    String jobId = job.getJobID().toString();
    boolean succeeded = job.waitForCompletion(true);
    Assert.assertTrue(succeeded);
    Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
    Assert.assertTrue("Tracking URL was " + trackingUrl + " but didn't Match Job ID " + jobId,
            trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));

    // FIXME once counters and task progress can be obtained properly
    // TODO use dag client to test counters and task progress?
    // what about completed jobs?

}