Example usage for org.apache.hadoop.mapred RunningJob isSuccessful

List of usage examples for org.apache.hadoop.mapred RunningJob isSuccessful

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred RunningJob isSuccessful.

Prototype

public boolean isSuccessful() throws IOException;

Source Link

Document

Check if the job completed successfully.

Usage

From source file:DataJoinJob.java

License:Apache License

/**
 * Submit/run a map/reduce job./*from   w  ww.ja  v  a  2  s .c o m*/
 * 
 * @param job
 * @return true for success
 * @throws IOException
 */
public static boolean runJob(JobConf job) throws IOException {
    JobClient jc = new JobClient(job);
    boolean sucess = true;
    RunningJob running = null;
    try {
        running = jc.submitJob(job);
        JobID jobId = running.getID();
        System.out.println("Job " + jobId + " is submitted");
        while (!running.isComplete()) {
            System.out.println("Job " + jobId + " is still running.");
            try {
                Thread.sleep(60000);
            } catch (InterruptedException e) {
            }
            running = jc.getJob(jobId);
        }
        sucess = running.isSuccessful();
    } finally {
        if (!sucess && (running != null)) {
            running.killJob();
        }
        jc.close();
    }
    return sucess;
}

From source file:azkaban.jobtype.MapReduceJobState.java

License:Apache License

public MapReduceJobState(RunningJob runningJob, TaskReport[] mapTaskReport, TaskReport[] reduceTaskReport)
        throws IOException {
    jobId = runningJob.getID().toString();
    jobName = runningJob.getJobName();//  ww w . ja  va  2 s .c  om
    trackingURL = runningJob.getTrackingURL();
    isComplete = runningJob.isComplete();
    isSuccessful = runningJob.isSuccessful();
    mapProgress = runningJob.mapProgress();
    reduceProgress = runningJob.reduceProgress();
    failureInfo = runningJob.getFailureInfo();

    totalMappers = mapTaskReport.length;
    totalReducers = reduceTaskReport.length;

    for (TaskReport report : mapTaskReport) {
        if (report.getStartTime() < jobStartTime || jobStartTime == 0L) {
            jobStartTime = report.getStartTime();
        }

        TIPStatus status = report.getCurrentStatus();
        if (status != TIPStatus.PENDING && status != TIPStatus.RUNNING) {
            finishedMappersCount++;
        }
    }

    for (TaskReport report : reduceTaskReport) {
        if (jobLastUpdateTime < report.getFinishTime()) {
            jobLastUpdateTime = report.getFinishTime();
        }

        TIPStatus status = report.getCurrentStatus();
        if (status != TIPStatus.PENDING && status != TIPStatus.RUNNING) {
            finishedReducersCount++;
        }
    }

    // If not all the reducers are finished.
    if (finishedReducersCount != reduceTaskReport.length || jobLastUpdateTime == 0) {
        jobLastUpdateTime = System.currentTimeMillis();
    }

    counters = runningJob.getCounters();
}

From source file:Brush.BrushAssembler.java

License:Apache License

public void end(RunningJob job) throws IOException {
    long endtime = System.currentTimeMillis();
    long diff = (endtime - JOBSTARTTIME) / 1000;

    msg(job.getJobID() + " " + diff + " s");

    if (!job.isSuccessful()) {
        System.out.println("Job was not successful");
        System.exit(1);/*from  w  w w.  j a  va 2s.  c  om*/
    }
}

From source file:ca.etsmtl.lasi.hbasewikipedialoader.HBaseWikipediaLoader.java

License:Apache License

/**
 * Main entry point./*from w  ww  .  j a  v a 2 s .  c o m*/
 * 
 * @param args
 *          The command line parameters.
 * @throws Exception
 *           When running the job fails.
 */
public static void main(String[] args) throws Exception {
    HBaseConfiguration conf = new HBaseConfiguration();
    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
    if (otherArgs.length < 1) {
        System.err.println("ERROR: Wrong number of parameters: " + args.length);
        System.err.println("Usage: " + NAME + " <inputdir>");
        System.exit(-1);
    }
    JobConf jobConf = createSubmittableJob(conf, otherArgs);
    RunningJob job = JobClient.runJob(jobConf);
    job.waitForCompletion();
    System.exit(job.isSuccessful() ? 0 : 1);
}

From source file:ca.etsmtl.lasi.hbasewikipedialoader.TestHBaseWikipediaLoader.java

License:Apache License

/**
 * Run the loader on the sample, test if it succeeded and
 * if the number of reduced articles is the same as the number of
 * rows in the table. This test expects that HBase was started on default
 * ports on the local machine./*w  w w.  j a  va 2 s. c o  m*/
 */
public void testWikipediaLoader() {
    try {
        HBaseConfiguration conf = new HBaseConfiguration();
        String[] args = new String[] { "sample/sample.xml" };
        JobConf jobConf = HBaseWikipediaLoader.createSubmittableJob(conf, args);
        RunningJob job = JobClient.runJob(jobConf);
        job.waitForCompletion();
        assertTrue(job.isSuccessful());
        HTable htable = new HTable(conf, HBaseWikipediaLoader.TABLE);
        Scan scan = new Scan();
        scan.addColumn(Bytes.toBytes("info"), Bytes.toBytes("id"));
        htable.setScannerCaching(100);
        ResultScanner scanner = htable.getScanner(scan);
        Iterator<Result> ite = scanner.iterator();
        int count = 0;
        while (ite.hasNext()) {
            Result res = ite.next();
            if (res.getRow() == null) {
                break;
            }
            count++;
        }
        scanner.close();
        assertTrue(job.getCounters().getCounter(HBaseWikipediaLoader.Counters.MAPPED_WIKI_ARTICLES) == count);
    } catch (IOException ex) {
        ex.printStackTrace();
        fail(ex.getMessage());
    }

}

From source file:com.alexholmes.hadooputils.combine.seqfile.mapred.CombineSequenceFileJob.java

License:Apache License

/**
 * The driver for the MapReduce job./*from   www  .  j  a v  a 2  s.  c  om*/
 *
 * @param conf           configuration
 * @param inputDirAsString  input directory in CSV-form
 * @param outputDirAsString output directory
 * @return true if the job completed successfully
 * @throws java.io.IOException         if something went wrong
 * @throws java.net.URISyntaxException if a URI wasn't correctly formed
 */
public boolean runJob(final Configuration conf, final String inputDirAsString, final String outputDirAsString)
        throws IOException, URISyntaxException, ClassNotFoundException, InterruptedException {

    JobConf job = new JobConf(conf);

    job.setJarByClass(CombineSequenceFileJob.class);
    job.setJobName("seqfilecombiner");

    job.setNumReduceTasks(0);

    job.setMapperClass(IdentityMapper.class);

    job.setInputFormat(CombineSequenceFileInputFormat.class);

    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(Text.class);

    FileInputFormat.setInputPaths(job, inputDirAsString);
    FileOutputFormat.setOutputPath(job, new Path(outputDirAsString));

    Date startTime = new Date();
    System.out.println("Job started: " + startTime);
    RunningJob jobResult = JobClient.runJob(job);

    Date endTime = new Date();
    System.out.println("Job ended: " + endTime);
    System.out.println("The job took "
            + TimeUnit.MILLISECONDS.toSeconds(endTime.getTime() - startTime.getTime()) + " seconds.");

    return jobResult.isSuccessful();
}

From source file:com.atlantbh.jmeter.plugins.hadooputilities.jobstatistics.JobLayer.java

License:Apache License

public String getJobStatisticsByJobId(String jobTracker, String jobId) throws IOException {
    StringBuilder jobStatistics = new StringBuilder();

    JobClient client = prepareJobClient(jobTracker);
    JobID id = convertToJobId(jobId);/*w ww . jav a 2s  .  com*/

    RunningJob job = client.getJob(id);

    double mapProgress = job.mapProgress() * 100;
    double reduceProgress = job.reduceProgress() * 100;
    String mapPercentage = Double.toString(mapProgress) + "%";
    String reducePercentage = Double.toString(reduceProgress) + "%";

    jobStatistics.append("<job id='").append(jobId).append("'" + " name='").append(job.getJobName())
            .append("'>\n");
    jobStatistics.append(" <mapProgress>").append(mapPercentage).append("</mapProgress>\n");
    jobStatistics.append(" <reduceProgress>").append(reducePercentage).append("</reduceProgress>\n");
    jobStatistics.append(" <complete>").append(job.isComplete()).append("</complete>\n");
    jobStatistics.append(" <successful>").append(job.isSuccessful()).append("</successful>\n");
    jobStatistics.append(" <url>").append(job.getTrackingURL()).append("</url>\n");
    jobStatistics.append("</job>");

    return jobStatistics.toString();
}

From source file:com.cloudera.circus.test.TestXTest.java

License:Open Source License

@Test
@TestHadoop//from  w ww.j  av  a  2s . com
public void testHadoopMapReduce() throws Exception {
    JobConf conf = getHadoopConf();
    FileSystem fs = FileSystem.get(conf);
    JobClient jobClient = new JobClient(conf);
    try {
        Path inputDir = new Path(getHadoopTestDir(), "input");
        Path outputDir = new Path(getHadoopTestDir(), "output");

        fs.mkdirs(inputDir);
        Writer writer = new OutputStreamWriter(fs.create(new Path(inputDir, "data.txt")));
        writer.write("a\n");
        writer.write("b\n");
        writer.write("c\n");
        writer.close();

        JobConf jobConf = getHadoopConf();
        jobConf.setInt("mapred.map.tasks", 1);
        jobConf.setInt("mapred.map.max.attempts", 1);
        jobConf.setInt("mapred.reduce.max.attempts", 1);
        jobConf.set("mapred.input.dir", inputDir.toString());
        jobConf.set("mapred.output.dir", outputDir.toString());
        final RunningJob runningJob = jobClient.submitJob(jobConf);
        waitFor(60 * 1000, true, new Predicate() {
            @Override
            public boolean evaluate() throws Exception {
                return runningJob.isComplete();
            }
        });
        Assert.assertTrue(runningJob.isSuccessful());
        Assert.assertTrue(fs.exists(new Path(outputDir, "part-00000")));
        BufferedReader reader = new BufferedReader(
                new InputStreamReader(fs.open(new Path(outputDir, "part-00000"))));
        Assert.assertTrue(reader.readLine().trim().endsWith("a"));
        Assert.assertTrue(reader.readLine().trim().endsWith("b"));
        Assert.assertTrue(reader.readLine().trim().endsWith("c"));
        Assert.assertNull(reader.readLine());
        reader.close();
    } finally {
        fs.close();
        jobClient.close();
    }
}

From source file:com.cloudera.recordservice.tests.JobQueue.java

License:Apache License

/**
 * Checks the synched job list returning true if all jobs marked as completed
 * were also marked as successful. Successful jobs are removed from the list.
 * On the first found failure, the method returns false and does not remove
 * said job from the list./*w w  w.ja v  a 2s  .  c o  m*/
 */
public boolean checkCompleted() {
    List<RunningJob> theFailureList = new LinkedList<RunningJob>();
    synchronized (synchedJobList_) {
        Iterator<Future> it = synchedJobList_.iterator();
        while (it.hasNext()) {
            Future f = it.next();
            try {
                RunningJob job = (RunningJob) f.get();
                if (job.isComplete()) {
                    if (!job.isSuccessful()) {
                        successful_ = false;
                        theFailureList.add(job);
                        return successful_;
                    }
                    it.remove();
                }
            } catch (IOException e) {
                LOGGER.debug(e.getStackTrace().toString());
                e.printStackTrace();
                successful_ = false;
                return successful_;
            } catch (ExecutionException ee) {
                LOGGER.debug(ee.getStackTrace().toString());
                ee.printStackTrace();
                successful_ = false;
                return successful_;
            } catch (InterruptedException ie) {
                ie.printStackTrace();
                LOGGER.debug(ie.getStackTrace().toString());
                successful_ = false;
                return successful_;
            }
        }
    }
    successful_ = true;
    Iterator<RunningJob> it = theFailureList.iterator();
    while (it.hasNext()) {
        System.out.println(it.next().getID());
    }
    return successful_;
}

From source file:com.cloudera.recordservice.tests.TestMiniClusterController.java

License:Apache License

/**
 * This method creates a sample MR job and submits that JobConf object to the
 * static MiniClusterController method to be executed.
 *//*  ww w.  j  av  a  2 s  .c  o  m*/
@Test
public void testRunningJobLocally() throws IOException, InterruptedException {
    JobConf sampleJob = createWordCountMRJobConf();
    RunningJob runningJob = miniCluster_.runJob(sampleJob);
    runningJob.waitForCompletion();
    assertTrue(runningJob.isSuccessful());
}