Example usage for org.apache.hadoop.mapreduce Job isSuccessful

List of usage examples for org.apache.hadoop.mapreduce Job isSuccessful

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce Job isSuccessful.

Prototype

public boolean isSuccessful() throws IOException 

Source Link

Document

Check if the job completed successfully.

Usage

From source file:org.apache.phoenix.end2end.IndexScrutinyToolIT.java

License:Apache License

/**
 * Tests an index where the index pk is correct (indexed col values are indexed correctly), but
 * a covered index value is incorrect. Scrutiny should report the invalid row
 *//*from   w w  w  .jav  a 2s  .c o  m*/
@Test
public void testCoveredValueIncorrect() throws Exception {
    // insert one valid row
    upsertRow(dataTableUpsertStmt, 1, "name-1", 94010);
    conn.commit();

    // disable index and insert another data row
    disableIndex();
    upsertRow(dataTableUpsertStmt, 2, "name-2", 95123);
    conn.commit();

    // insert a bad index row for the above data row
    upsertIndexRow("name-2", 2, 9999);
    conn.commit();

    // scrutiny should report the bad row
    List<Job> completedJobs = runScrutiny(schemaName, dataTableName, indexTableName);
    Job job = completedJobs.get(0);
    assertTrue(job.isSuccessful());
    Counters counters = job.getCounters();
    assertEquals(1, getCounterValue(counters, VALID_ROW_COUNT));
    assertEquals(1, getCounterValue(counters, INVALID_ROW_COUNT));
    assertEquals(1, getCounterValue(counters, BAD_COVERED_COL_VAL_COUNT));
}

From source file:org.apache.phoenix.end2end.IndexScrutinyToolIT.java

License:Apache License

/**
 * Test batching of row comparisons Inserts 1001 rows, with some random bad rows, and runs
 * scrutiny with batchsize of 10,/*from   w  w  w  . ja va 2  s  .  c om*/
 */
@Test
public void testBatching() throws Exception {
    // insert 1001 data and index rows
    int numTestRows = 1001;
    for (int i = 0; i < numTestRows; i++) {
        upsertRow(dataTableUpsertStmt, i, "name-" + i, i + 1000);
    }
    conn.commit();

    disableIndex();

    // randomly delete some rows from the index
    Random random = new Random();
    for (int i = 0; i < 100; i++) {
        int idToDelete = random.nextInt(numTestRows);
        deleteRow(indexTableFullName, "WHERE \":ID\"=" + idToDelete);
    }
    conn.commit();
    int numRows = countRows(indexTableFullName);
    int numDeleted = numTestRows - numRows;

    // run scrutiny with batch size of 10
    List<Job> completedJobs = runScrutiny(schemaName, dataTableName, indexTableName, System.currentTimeMillis(),
            10L);
    Job job = completedJobs.get(0);
    assertTrue(job.isSuccessful());
    Counters counters = job.getCounters();
    assertEquals(numTestRows - numDeleted, getCounterValue(counters, VALID_ROW_COUNT));
    assertEquals(numDeleted, getCounterValue(counters, INVALID_ROW_COUNT));
    assertEquals(numTestRows / 10 + numTestRows % 10, getCounterValue(counters, BATCHES_PROCESSED_COUNT));
}

From source file:org.apache.phoenix.end2end.IndexScrutinyToolIT.java

License:Apache License

/**
 * Tests when there are more data table rows than index table rows Scrutiny should report the
 * number of incorrect rows/*from  ww w.  j av  a 2  s  .  c  o m*/
 */
@Test
public void testMoreDataRows() throws Exception {
    upsertRow(dataTableUpsertStmt, 1, "name-1", 95123);
    conn.commit();
    disableIndex();
    // these rows won't have a corresponding index row
    upsertRow(dataTableUpsertStmt, 2, "name-2", 95124);
    upsertRow(dataTableUpsertStmt, 3, "name-3", 95125);
    conn.commit();

    List<Job> completedJobs = runScrutiny(schemaName, dataTableName, indexTableName);
    Job job = completedJobs.get(0);
    assertTrue(job.isSuccessful());
    Counters counters = job.getCounters();
    assertEquals(1, getCounterValue(counters, VALID_ROW_COUNT));
    assertEquals(2, getCounterValue(counters, INVALID_ROW_COUNT));
}

From source file:org.apache.phoenix.end2end.IndexScrutinyToolIT.java

License:Apache License

/**
 * Tests when there are more index table rows than data table rows Scrutiny should report the
 * number of incorrect rows when run with the index as the source table
 *//*from  w  w  w.  jav a  2 s.  c  om*/
@Test
public void testMoreIndexRows() throws Exception {
    upsertRow(dataTableUpsertStmt, 1, "name-1", 95123);
    conn.commit();
    disableIndex();
    // these index rows won't have a corresponding data row
    upsertIndexRow("name-2", 2, 95124);
    upsertIndexRow("name-3", 3, 95125);
    conn.commit();

    List<Job> completedJobs = runScrutiny(schemaName, dataTableName, indexTableName, System.currentTimeMillis(),
            10L, SourceTable.INDEX_TABLE_SOURCE);
    Job job = completedJobs.get(0);
    assertTrue(job.isSuccessful());
    Counters counters = job.getCounters();
    assertEquals(1, getCounterValue(counters, VALID_ROW_COUNT));
    assertEquals(2, getCounterValue(counters, INVALID_ROW_COUNT));
}

From source file:org.apache.phoenix.end2end.IndexScrutinyToolIT.java

License:Apache License

/**
 * Tests running with both the index and data tables as the source table If we have an
 * incorrectly indexed row, it should be reported in each direction
 *///from   w  w  w . jav  a 2 s. c  o m
@Test
public void testBothDataAndIndexAsSource() throws Exception {
    // insert one valid row
    upsertRow(dataTableUpsertStmt, 1, "name-1", 94010);
    conn.commit();

    // disable the index and insert another row which is not indexed
    disableIndex();
    upsertRow(dataTableUpsertStmt, 2, "name-2", 95123);
    conn.commit();

    // insert a bad row into the index
    upsertIndexRow("badName", 2, 9999);
    conn.commit();

    List<Job> completedJobs = runScrutiny(schemaName, dataTableName, indexTableName, System.currentTimeMillis(),
            10L, SourceTable.BOTH);
    assertEquals(2, completedJobs.size());
    for (Job job : completedJobs) {
        assertTrue(job.isSuccessful());
        Counters counters = job.getCounters();
        assertEquals(1, getCounterValue(counters, VALID_ROW_COUNT));
        assertEquals(1, getCounterValue(counters, INVALID_ROW_COUNT));
    }
}

From source file:org.apache.rya.joinselect.mr.JoinSelectProspectOutput.java

License:Apache License

@Override
public int run(final String[] args)
        throws AccumuloSecurityException, IOException, ClassNotFoundException, InterruptedException {

    final Configuration conf = getConf();
    final String inTable = conf.get(PROSPECTS_TABLE);
    final String auths = conf.get(AUTHS);
    final String outPath = conf.get(PROSPECTS_OUTPUTPATH);

    assert inTable != null && outPath != null;

    final Job job = new Job(conf, this.getClass().getSimpleName() + "_" + System.currentTimeMillis());
    job.setJarByClass(this.getClass());
    conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_USER_CLASSPATH_FIRST, true);

    JoinSelectStatsUtil.initTabToSeqFileJob(job, inTable, outPath, auths);
    job.setMapperClass(CardinalityMapper.class);

    job.setNumReduceTasks(0);/*from w ww.  ja v a 2 s.c o m*/

    job.waitForCompletion(true);

    return job.isSuccessful() ? 0 : 1;

}

From source file:org.apache.rya.prospector.mr.Prospector.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    final Configuration conf = getConf();

    truncatedDate = DateUtils.truncate(new Date(NOW), Calendar.MINUTE);

    final Path configurationPath = new Path(args[0]);
    conf.addResource(configurationPath);

    final String inTable = conf.get("prospector.intable");
    final String outTable = conf.get("prospector.outtable");
    final String auths_str = conf.get("prospector.auths");
    assert inTable != null;
    assert outTable != null;
    assert auths_str != null;

    final Job job = new Job(getConf(), this.getClass().getSimpleName() + "_" + System.currentTimeMillis());
    job.setJarByClass(this.getClass());

    final String[] auths = auths_str.split(",");
    ProspectorUtils.initMRJob(job, inTable, outTable, auths);

    job.getConfiguration().setLong("DATE", NOW);

    final String performant = conf.get(PERFORMANT);
    if (Boolean.parseBoolean(performant)) {
        /**//from   w  w w.  ja va2  s.  c  om
         * Apply some performance tuning
         */
        ProspectorUtils.addMRPerformance(job.getConfiguration());
    }

    job.setMapOutputKeyClass(IntermediateProspect.class);
    job.setMapOutputValueClass(LongWritable.class);

    job.setMapperClass(ProspectorMapper.class);
    job.setCombinerClass(ProspectorCombiner.class);
    job.setReducerClass(ProspectorReducer.class);
    job.waitForCompletion(true);

    final int success = job.isSuccessful() ? 0 : 1;

    if (success == 0) {
        final Mutation m = new Mutation(METADATA);
        m.put(PROSPECT_TIME, getReverseIndexDateTime(truncatedDate), new ColumnVisibility(DEFAULT_VIS),
                truncatedDate.getTime(), new Value(EMPTY));
        writeMutations(connector(instance(conf), conf), outTable, Collections.singleton(m));
    }

    return success;
}

From source file:org.apache.solr.hadoop.MorphlineBasicMiniMRTest.java

License:Apache License

@Test
public void mrRun() throws Exception {
    FileSystem fs = dfsCluster.getFileSystem();
    Path inDir = fs.makeQualified(new Path("/user/testing/testMapperReducer/input"));
    fs.delete(inDir, true);//from  w  ww .  jav a 2  s . co m
    String DATADIR = "/user/testing/testMapperReducer/data";
    Path dataDir = fs.makeQualified(new Path(DATADIR));
    fs.delete(dataDir, true);
    Path outDir = fs.makeQualified(new Path("/user/testing/testMapperReducer/output"));
    fs.delete(outDir, true);

    assertTrue(fs.mkdirs(inDir));
    Path INPATH = new Path(inDir, "input.txt");
    OutputStream os = fs.create(INPATH);
    Writer wr = new OutputStreamWriter(os, "UTF-8");
    wr.write(DATADIR + "/" + inputAvroFile);
    wr.close();

    assertTrue(fs.mkdirs(dataDir));
    fs.copyFromLocalFile(new Path(DOCUMENTS_DIR, inputAvroFile), dataDir);

    JobConf jobConf = getJobConf();
    if (ENABLE_LOCAL_JOB_RUNNER) { // enable Hadoop LocalJobRunner; this enables to run in debugger and set breakpoints
        jobConf.set("mapred.job.tracker", "local");
    }
    jobConf.setMaxMapAttempts(1);
    jobConf.setMaxReduceAttempts(1);
    jobConf.setJar(SEARCH_ARCHIVES_JAR);
    jobConf.setBoolean(ExtractingParams.IGNORE_TIKA_EXCEPTION, false);

    int shards = 2;
    int maxReducers = Integer.MAX_VALUE;
    if (ENABLE_LOCAL_JOB_RUNNER) {
        // local job runner has a couple of limitations: only one reducer is supported and the DistributedCache doesn't work.
        // see http://blog.cloudera.com/blog/2009/07/advice-on-qa-testing-your-mapreduce-jobs/
        maxReducers = 1;
        shards = 1;
    }

    String[] args = new String[] {
            "--morphline-file=" + RESOURCES_DIR + "/test-morphlines/solrCellDocumentTypes.conf",
            "--morphline-id=morphline1", "--solr-home-dir=" + MINIMR_CONF_DIR.getAbsolutePath(),
            "--output-dir=" + outDir.toString(), "--shards=" + shards, "--verbose",
            numRuns % 2 == 0 ? "--input-list=" + INPATH.toString() : dataDir.toString(),
            numRuns % 3 == 0 ? "--reducers=" + shards
                    : (numRuns % 3 == 1 ? "--reducers=-1" : "--reducers=" + Math.min(8, maxReducers)) };
    if (numRuns % 3 == 2) {
        args = concat(args, new String[] { "--fanout=2" });
    }
    if (numRuns == 0) {
        // force (slow) MapReduce based randomization to get coverage for that as well
        args = concat(new String[] { "-D", MapReduceIndexerTool.MAIN_MEMORY_RANDOMIZATION_THRESHOLD + "=-1" },
                args);
    }
    MapReduceIndexerTool tool = createTool();
    int res = ToolRunner.run(jobConf, tool, args);
    assertEquals(0, res);
    Job job = tool.job;
    assertTrue(job.isComplete());
    assertTrue(job.isSuccessful());

    if (numRuns % 3 != 2) {
        // Only run this check if mtree merge is disabled.
        // With mtree merge enabled the BatchWriter counters aren't available anymore because 
        // variable "job" now refers to the merge job rather than the indexing job
        assertEquals(
                "Invalid counter " + SolrRecordWriter.class.getName() + "." + SolrCounters.DOCUMENTS_WRITTEN,
                count,
                job.getCounters()
                        .findCounter(SolrCounters.class.getName(), SolrCounters.DOCUMENTS_WRITTEN.toString())
                        .getValue());
    }

    // Check the output is as expected
    outDir = new Path(outDir, MapReduceIndexerTool.RESULTS_DIR);
    Path[] outputFiles = FileUtil.stat2Paths(fs.listStatus(outDir));

    System.out.println("outputfiles:" + Arrays.toString(outputFiles));

    TestUtils.validateSolrServerDocumentCount(MINIMR_CONF_DIR, fs, outDir, count, shards);

    // run again with --dryrun mode:  
    tool = createTool();
    args = concat(args, new String[] { "--dry-run" });
    res = ToolRunner.run(jobConf, tool, args);
    assertEquals(0, res);

    numRuns++;
}

From source file:org.kiji.mapreduce.framework.JobHistoryKijiTable.java

License:Apache License

/**
 * Writes a job into the JobHistoryKijiTable.
 *
 * @param job The job to save./*from  w  ww  .  ja  va2s .co m*/
 * @param startTime The time the job began, in milliseconds.
 * @param endTime The time the job ended, in milliseconds
 * @throws IOException If there is an error writing to the table.
 */
public void recordJob(Job job, long startTime, long endTime) throws IOException {
    KijiTableWriter writer = mKijiTable.openTableWriter();
    EntityId jobEntity = mKijiTable.getEntityId(job.getJobID().toString());
    try {
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        writer.put(jobEntity, JOB_HISTORY_FAMILY, JOB_HISTORY_ID_QUALIFIER, startTime,
                job.getJobID().toString());
        writer.put(jobEntity, JOB_HISTORY_FAMILY, JOB_HISTORY_NAME_QUALIFIER, startTime, job.getJobName());
        writer.put(jobEntity, JOB_HISTORY_FAMILY, JOB_HISTORY_START_TIME_QUALIFIER, startTime, startTime);
        writer.put(jobEntity, JOB_HISTORY_FAMILY, JOB_HISTORY_END_TIME_QUALIFIER, startTime, endTime);
        writer.put(jobEntity, JOB_HISTORY_FAMILY, JOB_HISTORY_END_STATUS_QUALIFIER, startTime,
                job.isSuccessful() ? "SUCCEEDED" : "FAILED");
        writer.put(jobEntity, JOB_HISTORY_FAMILY, JOB_HISTORY_COUNTERS_QUALIFIER, startTime,
                job.getCounters().toString());
        job.getConfiguration().writeXml(baos);
        writer.put(jobEntity, JOB_HISTORY_FAMILY, JOB_HISTORY_CONFIGURATION_QUALIFIER, startTime,
                baos.toString("UTF-8"));
        writeIndividualCounters(writer, job);
    } finally {
        ResourceUtils.closeOrLog(writer);
    }
}

From source file:org.kiji.mapreduce.TestMapReduceJob.java

License:Apache License

@Test
public void testSubmit() throws ClassNotFoundException, IOException, InterruptedException {
    Job hadoopJob = createMock(Job.class);

    // Expect that the job is submitted and that it is successful.
    hadoopJob.submit();/*from   ww  w .ja  v  a2  s  .  c o  m*/
    expect(hadoopJob.isComplete()).andReturn(false);
    expect(hadoopJob.isComplete()).andReturn(true);
    expect(hadoopJob.isSuccessful()).andReturn(true);

    replay(hadoopJob);

    MapReduceJob job = new ConcreteMapReduceJob(hadoopJob);
    MapReduceJob.Status jobStatus = job.submit();
    assertFalse(jobStatus.isComplete());
    assertTrue(jobStatus.isComplete());
    assertTrue(jobStatus.isSuccessful());

    verify(hadoopJob);
}