Example usage for org.apache.hadoop.mapreduce Job isSuccessful

List of usage examples for org.apache.hadoop.mapreduce Job isSuccessful

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce Job isSuccessful.

Prototype

public boolean isSuccessful() throws IOException 

Source Link

Document

Check if the job completed successfully.

Usage

From source file:org.apache.mahout.math.hadoop.stochasticsvd.QJob.java

License:Apache License

public static void run(Configuration conf, Path[] inputPaths, Path sbPath, Path outputPath, int aBlockRows,
        int minSplitSize, int k, int p, long seed, int numReduceTasks)
        throws ClassNotFoundException, InterruptedException, IOException {

    JobConf oldApiJob = new JobConf(conf);
    MultipleOutputs.addNamedOutput(oldApiJob, OUTPUT_QHAT,
            org.apache.hadoop.mapred.SequenceFileOutputFormat.class, SplitPartitionedWritable.class,
            DenseBlockWritable.class);
    MultipleOutputs.addNamedOutput(oldApiJob, OUTPUT_RHAT,
            org.apache.hadoop.mapred.SequenceFileOutputFormat.class, SplitPartitionedWritable.class,
            VectorWritable.class);

    Job job = new Job(oldApiJob);
    job.setJobName("Q-job");
    job.setJarByClass(QJob.class);

    job.setInputFormatClass(SequenceFileInputFormat.class);
    FileInputFormat.setInputPaths(job, inputPaths);
    if (minSplitSize > 0) {
        FileInputFormat.setMinInputSplitSize(job, minSplitSize);
    }//from w ww  .  j  av  a2 s.  co m

    FileOutputFormat.setOutputPath(job, outputPath);

    FileOutputFormat.setCompressOutput(job, true);
    FileOutputFormat.setOutputCompressorClass(job, DefaultCodec.class);
    SequenceFileOutputFormat.setOutputCompressionType(job, CompressionType.BLOCK);

    job.setMapOutputKeyClass(SplitPartitionedWritable.class);
    job.setMapOutputValueClass(VectorWritable.class);

    job.setOutputKeyClass(SplitPartitionedWritable.class);
    job.setOutputValueClass(VectorWritable.class);

    job.setMapperClass(QMapper.class);

    job.getConfiguration().setInt(PROP_AROWBLOCK_SIZE, aBlockRows);
    job.getConfiguration().setLong(PROP_OMEGA_SEED, seed);
    job.getConfiguration().setInt(PROP_K, k);
    job.getConfiguration().setInt(PROP_P, p);
    if (sbPath != null) {
        job.getConfiguration().set(PROP_SB_PATH, sbPath.toString());
    }

    /*
     * number of reduce tasks doesn't matter. we don't actually send anything to
     * reducers.
     */

    job.setNumReduceTasks(0 /* numReduceTasks */);

    job.submit();
    job.waitForCompletion(false);

    if (!job.isSuccessful()) {
        throw new IOException("Q job unsuccessful.");
    }

}

From source file:org.apache.mahout.math.hadoop.stochasticsvd.YtYJob.java

License:Apache License

public static void run(Configuration conf, Path[] inputPaths, Path outputPath, int k, int p, long seed)
        throws ClassNotFoundException, InterruptedException, IOException {

    Job job = new Job(conf);
    job.setJobName("YtY-job");
    job.setJarByClass(YtYJob.class);

    job.setInputFormatClass(SequenceFileInputFormat.class);
    FileInputFormat.setInputPaths(job, inputPaths);
    FileOutputFormat.setOutputPath(job, outputPath);

    SequenceFileOutputFormat.setOutputCompressionType(job, CompressionType.BLOCK);

    job.setMapOutputKeyClass(IntWritable.class);
    job.setMapOutputValueClass(VectorWritable.class);

    job.setOutputKeyClass(IntWritable.class);
    job.setOutputValueClass(VectorWritable.class);

    job.setMapperClass(YtYMapper.class);

    job.getConfiguration().setLong(PROP_OMEGA_SEED, seed);
    job.getConfiguration().setInt(PROP_K, k);
    job.getConfiguration().setInt(PROP_P, p);

    /*//from   www  . j  a va2  s .  c o m
     * we must reduce to just one matrix which means we need only one reducer.
     * But it's ok since each mapper outputs only one vector (a packed
     * UpperTriangular) so even if there're thousands of mappers, one reducer
     * should cope just fine.
     */
    job.setNumReduceTasks(1);

    job.submit();
    job.waitForCompletion(false);

    if (!job.isSuccessful()) {
        throw new IOException("YtY job unsuccessful.");
    }

}

From source file:org.apache.parquet.avro.TestInputOutputFormat.java

License:Apache License

private void waitForJob(Job job) throws Exception {
    job.submit();//  w  w  w .j  a va2s .co m
    while (!job.isComplete()) {
        LOG.debug("waiting for job " + job.getJobName());
        sleep(100);
    }
    LOG.info("status for job " + job.getJobName() + ": " + (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
    if (!job.isSuccessful()) {
        throw new RuntimeException("job failed " + job.getJobName());
    }
}

From source file:org.apache.parquet.hadoop.DeprecatedInputFormatTest.java

License:Apache License

private void waitForJob(Job job) throws InterruptedException, IOException {
    while (!job.isComplete()) {
        System.out.println("waiting for job " + job.getJobName());
        sleep(100);//from  w w w  . j av a2s .  c om
    }
    System.out.println(
            "status for job " + job.getJobName() + ": " + (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
    if (!job.isSuccessful()) {
        throw new RuntimeException("job failed " + job.getJobName());
    }
}

From source file:org.apache.parquet.hadoop.example.TestInputOutputFormat.java

License:Apache License

private void waitForJob(Job job) throws InterruptedException, IOException {
    while (!job.isComplete()) {
        LOG.debug("waiting for job " + job.getJobName());
        sleep(100);/*from www .ja v  a2  s.c  o  m*/
    }
    LOG.info("status for job " + job.getJobName() + ": " + (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
    if (!job.isSuccessful()) {
        throw new RuntimeException("job failed " + job.getJobName());
    }
}

From source file:org.apache.parquet.hadoop.TestInputFormatColumnProjection.java

License:Apache License

private void waitForJob(Job job) throws Exception {
    job.submit();//from ww  w  . jav a 2 s . c o  m
    while (!job.isComplete()) {
        sleep(100);
    }
    if (!job.isSuccessful()) {
        throw new RuntimeException("job failed " + job.getJobName());
    }
}

From source file:org.apache.parquet.hadoop.thrift.TestInputOutputFormat.java

License:Apache License

public static void waitForJob(Job job) throws Exception {
    job.submit();// w  w  w .java2  s .c  om
    while (!job.isComplete()) {
        LOG.debug("waiting for job " + job.getJobName());
        sleep(100);
    }
    LOG.info("status for job " + job.getJobName() + ": " + (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
    if (!job.isSuccessful()) {
        throw new RuntimeException("job failed " + job.getJobName());
    }
}

From source file:org.apache.parquet.proto.utils.WriteUsingMR.java

License:Apache License

static void waitForJob(Job job) throws Exception {
    job.submit();//  w  w  w  . ja  va 2s .  c  o  m
    while (!job.isComplete()) {
        LOG.debug("waiting for job " + job.getJobName());
        sleep(50);
    }
    LOG.debug("status for job " + job.getJobName() + ": " + (job.isSuccessful() ? "SUCCESS" : "FAILURE"));
    if (!job.isSuccessful()) {
        throw new RuntimeException("job failed " + job.getJobName());
    }
}

From source file:org.apache.phoenix.end2end.IndexScrutinyToolIT.java

License:Apache License

/**
 * Tests a data table that is correctly indexed. Scrutiny should report all rows as valid.
 *///from  w  w  w. j  ava 2s .com
@Test
public void testValidIndex() throws Exception {
    // insert two rows
    upsertRow(dataTableUpsertStmt, 1, "name-1", 94010);
    upsertRow(dataTableUpsertStmt, 2, "name-2", 95123);
    conn.commit();

    int numDataRows = countRows(dataTableFullName);
    int numIndexRows = countRows(indexTableFullName);

    // scrutiny should report everything as ok
    List<Job> completedJobs = runScrutiny(schemaName, dataTableName, indexTableName);
    Job job = completedJobs.get(0);
    assertTrue(job.isSuccessful());
    Counters counters = job.getCounters();
    assertEquals(2, getCounterValue(counters, VALID_ROW_COUNT));
    assertEquals(0, getCounterValue(counters, INVALID_ROW_COUNT));

    // make sure row counts weren't modified by scrutiny
    assertEquals(numDataRows, countRows(dataTableFullName));
    assertEquals(numIndexRows, countRows(indexTableFullName));
}

From source file:org.apache.phoenix.end2end.IndexScrutinyToolIT.java

License:Apache License

/**
 * Tests an index with the same # of rows as the data table, but one of the index rows is
 * incorrect Scrutiny should report the invalid rows.
 *//*from  ww  w.  j  a v a 2 s.c om*/
@Test
public void testEqualRowCountIndexIncorrect() throws Exception {
    // insert one valid row
    upsertRow(dataTableUpsertStmt, 1, "name-1", 94010);
    conn.commit();

    // disable the index and insert another row which is not indexed
    disableIndex();
    upsertRow(dataTableUpsertStmt, 2, "name-2", 95123);
    conn.commit();

    // insert a bad row into the index
    upsertIndexRow("badName", 2, 9999);
    conn.commit();

    // scrutiny should report the bad row
    List<Job> completedJobs = runScrutiny(schemaName, dataTableName, indexTableName);
    Job job = completedJobs.get(0);
    assertTrue(job.isSuccessful());
    Counters counters = job.getCounters();
    assertEquals(1, getCounterValue(counters, VALID_ROW_COUNT));
    assertEquals(1, getCounterValue(counters, INVALID_ROW_COUNT));
}