Example usage for org.apache.hadoop.mapreduce Job getJobID

List of usage examples for org.apache.hadoop.mapreduce Job getJobID

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce Job getJobID.

Prototype

public JobID getJobID() 

Source Link

Document

Get the unique ID for the job.

Usage

From source file:org.apache.rya.accumulo.mr.GraphXEdgeInputFormatTest.java

License:Apache License

@SuppressWarnings("rawtypes")
@Test//w  w w .  j a v  a 2 s  .co m
public void testInputFormat() throws Exception {
    RyaStatement input = RyaStatement.builder().setSubject(new RyaURI("http://www.google.com"))
            .setPredicate(new RyaURI("http://some_other_uri")).setObject(new RyaURI("http://www.yahoo.com"))
            .setColumnVisibility(new byte[0]).setValue(new byte[0]).build();

    apiImpl.add(input);

    Job jobConf = Job.getInstance();

    GraphXEdgeInputFormat.setMockInstance(jobConf, instance.getInstanceName());
    GraphXEdgeInputFormat.setConnectorInfo(jobConf, username, password);
    GraphXEdgeInputFormat.setTableLayout(jobConf, TABLE_LAYOUT.SPO);
    GraphXEdgeInputFormat.setInputTableName(jobConf, table);
    GraphXEdgeInputFormat.setInputTableName(jobConf, table);

    GraphXEdgeInputFormat.setScanIsolation(jobConf, false);
    GraphXEdgeInputFormat.setLocalIterators(jobConf, false);
    GraphXEdgeInputFormat.setOfflineTableScan(jobConf, false);

    GraphXEdgeInputFormat inputFormat = new GraphXEdgeInputFormat();

    JobContext context = new JobContextImpl(jobConf.getConfiguration(), jobConf.getJobID());

    List<InputSplit> splits = inputFormat.getSplits(context);

    Assert.assertEquals(1, splits.size());

    TaskAttemptContext taskAttemptContext = new TaskAttemptContextImpl(context.getConfiguration(),
            new TaskAttemptID(new TaskID(), 1));

    RecordReader reader = inputFormat.createRecordReader(splits.get(0), taskAttemptContext);

    RecordReader ryaStatementRecordReader = (RecordReader) reader;
    ryaStatementRecordReader.initialize(splits.get(0), taskAttemptContext);

    List<Edge> results = new ArrayList<Edge>();
    while (ryaStatementRecordReader.nextKeyValue()) {
        Edge writable = (Edge) ryaStatementRecordReader.getCurrentValue();
        long srcId = writable.srcId();
        long destId = writable.dstId();
        RyaTypeWritable rtw = null;
        Object text = ryaStatementRecordReader.getCurrentKey();
        Edge<RyaTypeWritable> edge = new Edge<RyaTypeWritable>(srcId, destId, rtw);
        results.add(edge);

        System.out.println(text);
    }

    System.out.println(results.size());
    System.out.println(results);
    Assert.assertTrue(results.size() == 2);
}

From source file:org.apache.rya.accumulo.mr.GraphXInputFormatTest.java

License:Apache License

@Test
public void testInputFormat() throws Exception {
    RyaStatement input = RyaStatement.builder().setSubject(new RyaURI("http://www.google.com"))
            .setPredicate(new RyaURI("http://some_other_uri")).setObject(new RyaURI("http://www.yahoo.com"))
            .setColumnVisibility(new byte[0]).setValue(new byte[0]).build();

    apiImpl.add(input);//from www  .  ja va  2  s.  c o m

    Job jobConf = Job.getInstance();

    GraphXInputFormat.setMockInstance(jobConf, instance.getInstanceName());
    GraphXInputFormat.setConnectorInfo(jobConf, username, password);
    GraphXInputFormat.setInputTableName(jobConf, table);
    GraphXInputFormat.setInputTableName(jobConf, table);

    GraphXInputFormat.setScanIsolation(jobConf, false);
    GraphXInputFormat.setLocalIterators(jobConf, false);
    GraphXInputFormat.setOfflineTableScan(jobConf, false);

    GraphXInputFormat inputFormat = new GraphXInputFormat();

    JobContext context = new JobContextImpl(jobConf.getConfiguration(), jobConf.getJobID());

    List<InputSplit> splits = inputFormat.getSplits(context);

    Assert.assertEquals(1, splits.size());

    TaskAttemptContext taskAttemptContext = new TaskAttemptContextImpl(context.getConfiguration(),
            new TaskAttemptID(new TaskID(), 1));

    RecordReader<Object, RyaTypeWritable> reader = inputFormat.createRecordReader(splits.get(0),
            taskAttemptContext);

    RyaStatementRecordReader ryaStatementRecordReader = (RyaStatementRecordReader) reader;
    ryaStatementRecordReader.initialize(splits.get(0), taskAttemptContext);

    List<RyaType> results = new ArrayList<RyaType>();
    System.out.println("before while");
    while (ryaStatementRecordReader.nextKeyValue()) {
        System.out.println("in while");
        RyaTypeWritable writable = ryaStatementRecordReader.getCurrentValue();
        RyaType value = writable.getRyaType();
        Object text = ryaStatementRecordReader.getCurrentKey();
        RyaType type = new RyaType();
        type.setData(value.getData());
        type.setDataType(value.getDataType());
        results.add(type);

        System.out.println(value.getData());
        System.out.println(value.getDataType());
        System.out.println(results);
        System.out.println(type);
        System.out.println(text);
        System.out.println(value);
    }
    System.out.println("after while");

    System.out.println(results.size());
    System.out.println(results);
    //        Assert.assertTrue(results.size() == 2);
    //        Assert.assertTrue(results.contains(input));
}

From source file:org.apache.rya.accumulo.mr.RyaInputFormatTest.java

License:Apache License

@Test
public void testInputFormat() throws Exception {

    RyaStatement input = RyaStatement.builder().setSubject(new RyaURI("http://www.google.com"))
            .setPredicate(new RyaURI("http://some_other_uri")).setObject(new RyaURI("http://www.yahoo.com"))
            .setColumnVisibility(new byte[0]).setValue(new byte[0]).build();

    apiImpl.add(input);//from   ww  w  .  j  a  v  a  2s. c  o  m

    Job jobConf = Job.getInstance();

    RyaInputFormat.setMockInstance(jobConf, instance.getInstanceName());
    RyaInputFormat.setConnectorInfo(jobConf, username, password);
    RyaInputFormat.setTableLayout(jobConf, TABLE_LAYOUT.SPO);

    AccumuloInputFormat.setInputTableName(jobConf, table);
    AccumuloInputFormat.setInputTableName(jobConf, table);
    AccumuloInputFormat.setScanIsolation(jobConf, false);
    AccumuloInputFormat.setLocalIterators(jobConf, false);
    AccumuloInputFormat.setOfflineTableScan(jobConf, false);

    RyaInputFormat inputFormat = new RyaInputFormat();

    JobContext context = new JobContextImpl(jobConf.getConfiguration(), jobConf.getJobID());

    List<InputSplit> splits = inputFormat.getSplits(context);

    Assert.assertEquals(1, splits.size());

    TaskAttemptContext taskAttemptContext = new TaskAttemptContextImpl(context.getConfiguration(),
            new TaskAttemptID(new TaskID(), 1));

    RecordReader<Text, RyaStatementWritable> reader = inputFormat.createRecordReader(splits.get(0),
            taskAttemptContext);

    RyaStatementRecordReader ryaStatementRecordReader = (RyaStatementRecordReader) reader;
    ryaStatementRecordReader.initialize(splits.get(0), taskAttemptContext);

    List<RyaStatement> results = new ArrayList<RyaStatement>();
    while (ryaStatementRecordReader.nextKeyValue()) {
        RyaStatementWritable writable = ryaStatementRecordReader.getCurrentValue();
        RyaStatement value = writable.getRyaStatement();
        Text text = ryaStatementRecordReader.getCurrentKey();
        RyaStatement stmt = RyaStatement.builder().setSubject(value.getSubject())
                .setPredicate(value.getPredicate()).setObject(value.getObject()).setContext(value.getContext())
                .setQualifier(value.getQualifer()).setColumnVisibility(value.getColumnVisibility())
                .setValue(value.getValue()).build();
        results.add(stmt);

        System.out.println(text);
        System.out.println(value);
    }

    Assert.assertTrue(results.size() == 2);
    Assert.assertTrue(results.contains(input));
}

From source file:org.apache.solr.hadoop.ForkedMapReduceIndexerTool.java

License:Apache License

public static int runIndexingPipeline(Job job, JobProcessCallback callback, Configuration conf, Options options,
        long programStartTime, FileSystem fs, Path fullInputList, long numFiles, int realMappers, int reducers)
        throws IOException, KeeperException, InterruptedException, ClassNotFoundException,
        FileNotFoundException {/* ww w  .  j a  v  a2  s .c  o  m*/
    long startTime;
    float secs;

    Path outputResultsDir = new Path(options.outputDir, RESULTS_DIR);
    Path outputReduceDir = new Path(options.outputDir, "reducers");
    Path outputTreeMergeStep = new Path(options.outputDir, "mtree-merge-output");

    FileOutputFormat.setOutputPath(job, outputReduceDir);

    if (job.getConfiguration().get(JobContext.REDUCE_CLASS_ATTR) == null) { // enable customization
        job.setReducerClass(SolrReducer.class);
    }
    if (options.updateConflictResolver == null) {
        throw new IllegalArgumentException("updateConflictResolver must not be null");
    }
    job.getConfiguration().set(SolrReducer.UPDATE_CONFLICT_RESOLVER, options.updateConflictResolver);
    job.getConfiguration().setInt(SolrOutputFormat.SOLR_RECORD_WRITER_MAX_SEGMENTS, options.maxSegments);

    if (options.zkHost != null) {
        assert options.collection != null;
        /*
         * MapReduce partitioner that partitions the Mapper output such that each
         * SolrInputDocument gets sent to the SolrCloud shard that it would have
         * been sent to if the document were ingested via the standard SolrCloud
         * Near Real Time (NRT) API.
         * 
         * In other words, this class implements the same partitioning semantics
         * as the standard SolrCloud NRT API. This enables to mix batch updates
         * from MapReduce ingestion with updates from standard NRT ingestion on
         * the same SolrCloud cluster, using identical unique document keys.
         */
        if (job.getConfiguration().get(JobContext.PARTITIONER_CLASS_ATTR) == null) { // enable customization
            job.setPartitionerClass(ForkedSolrCloudPartitioner.class);
        }
        job.getConfiguration().set(ForkedSolrCloudPartitioner.ZKHOST, options.zkHost);
        job.getConfiguration().set(ForkedSolrCloudPartitioner.COLLECTION, options.collection);
    }
    job.getConfiguration().setInt(ForkedSolrCloudPartitioner.SHARDS, options.shards);

    job.setOutputFormatClass(SolrOutputFormat.class);
    if (options.solrHomeDir != null) {
        SolrOutputFormat.setupSolrHomeCache(options.solrHomeDir, job);
    } else {
        assert options.zkHost != null;
        // use the config that this collection uses for the SolrHomeCache.
        ForkedZooKeeperInspector zki = new ForkedZooKeeperInspector();
        SolrZkClient zkClient = zki.getZkClient(options.zkHost);
        try {
            String configName = zki.readConfigName(zkClient, options.collection);
            File tmpSolrHomeDir = zki.downloadConfigDir(zkClient, configName);
            SolrOutputFormat.setupSolrHomeCache(tmpSolrHomeDir, job);
            LOG.debug("Using " + tmpSolrHomeDir + " as solr home");
            options.solrHomeDir = tmpSolrHomeDir;
        } finally {
            zkClient.close();
        }
    }

    //    MorphlineMapRunner runner = setupMorphline(job, options);
    //    if (options.isDryRun && runner != null) {
    //      LOG.info("Indexing {} files in dryrun mode", numFiles);
    //      startTime = System.currentTimeMillis();
    //      dryRun(job, runner, fs, fullInputList);
    //      secs = (System.currentTimeMillis() - startTime) / 1000.0f;
    //      LOG.info("Done. Indexing {} files in dryrun mode took {} secs", numFiles, secs);
    //      goodbye(null, programStartTime);
    //      return 0;
    //    }
    //    job.getConfiguration().set(MorphlineMapRunner.MORPHLINE_FILE_PARAM, options.morphlineFile.getName());

    job.setNumReduceTasks(reducers);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(SolrInputDocumentWritable.class);
    LOG.info("Indexing data into {} reducers", new Object[] { reducers });
    startTime = System.currentTimeMillis();
    job.submit();
    callback.jobStarted(job.getJobID().toString(), job.getTrackingURL());
    if (!waitForCompletion(job, options.isVerbose)) {
        return -1; // job failed
    }

    secs = (System.currentTimeMillis() - startTime) / 1000.0f;
    LOG.info("Done. Indexing data into {} reducers took {} secs", new Object[] { reducers, secs });

    int mtreeMergeIterations = 0;
    if (reducers > options.shards) {
        mtreeMergeIterations = (int) Math.round(log(options.fanout, reducers / options.shards));
    }
    LOG.debug("MTree merge iterations to do: {}", mtreeMergeIterations);
    int mtreeMergeIteration = 1;
    while (reducers > options.shards) { // run a mtree merge iteration
        job = Job.getInstance(conf);
        job.setJarByClass(ForkedMapReduceIndexerTool.class);
        job.setJobName(ForkedMapReduceIndexerTool.class.getName() + "/"
                + Utils.getShortClassName(ForkedTreeMergeMapper.class));
        job.setMapperClass(ForkedTreeMergeMapper.class);
        job.setOutputFormatClass(ForkedTreeMergeOutputFormat.class);
        job.setNumReduceTasks(0);
        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(NullWritable.class);
        job.setInputFormatClass(NLineInputFormat.class);

        Path inputStepDir = new Path(options.outputDir, "mtree-merge-input-iteration" + mtreeMergeIteration);
        fullInputList = new Path(inputStepDir, FULL_INPUT_LIST);
        LOG.debug("MTree merge iteration {}/{}: Creating input list file for mappers {}",
                new Object[] { mtreeMergeIteration, mtreeMergeIterations, fullInputList });
        numFiles = createTreeMergeInputDirList(job, outputReduceDir, fs, fullInputList);
        if (numFiles != reducers) {
            throw new IllegalStateException("Not same reducers: " + reducers + ", numFiles: " + numFiles);
        }
        NLineInputFormat.addInputPath(job, fullInputList);
        NLineInputFormat.setNumLinesPerSplit(job, options.fanout);
        FileOutputFormat.setOutputPath(job, outputTreeMergeStep);

        LOG.info("MTree merge iteration {}/{}: Merging {} shards into {} shards using fanout {}",
                new Object[] { mtreeMergeIteration, mtreeMergeIterations, reducers, (reducers / options.fanout),
                        options.fanout });
        startTime = System.currentTimeMillis();
        job.submit();
        callback.jobStarted(job.getJobID().toString(), job.getTrackingURL());
        if (!waitForCompletion(job, options.isVerbose)) {
            return -1; // job failed
        }
        if (!renameTreeMergeShardDirs(outputTreeMergeStep, job, fs)) {
            return -1;
        }
        secs = (System.currentTimeMillis() - startTime) / 1000.0f;
        LOG.info(
                "MTree merge iteration {}/{}: Done. Merging {} shards into {} shards using fanout {} took {} secs",
                new Object[] { mtreeMergeIteration, mtreeMergeIterations, reducers, (reducers / options.fanout),
                        options.fanout, secs });

        if (!delete(outputReduceDir, true, fs)) {
            return -1;
        }
        if (!rename(outputTreeMergeStep, outputReduceDir, fs)) {
            return -1;
        }
        assert reducers % options.fanout == 0;
        reducers = reducers / options.fanout;
        mtreeMergeIteration++;
    }
    assert reducers == options.shards;

    // normalize output shard dir prefix, i.e.
    // rename part-r-00000 to part-00000 (stems from zero tree merge iterations)
    // rename part-m-00000 to part-00000 (stems from > 0 tree merge iterations)
    for (FileStatus stats : fs.listStatus(outputReduceDir)) {
        String dirPrefix = SolrOutputFormat.getOutputName(job);
        Path srcPath = stats.getPath();
        if (stats.isDirectory() && srcPath.getName().startsWith(dirPrefix)) {
            String dstName = dirPrefix + srcPath.getName().substring(dirPrefix.length() + "-m".length());
            Path dstPath = new Path(srcPath.getParent(), dstName);
            if (!rename(srcPath, dstPath, fs)) {
                return -1;
            }
        }
    }
    ;

    // publish results dir
    if (!rename(outputReduceDir, outputResultsDir, fs)) {
        return -1;
    }

    if (options.goLive && !new GoLive().goLive(options, listSortedOutputShardDirs(job, outputResultsDir, fs))) {
        return -1;
    }

    goodbye(job, programStartTime);
    return 0;
}

From source file:org.apache.solr.hadoop.ForkedMapReduceIndexerTool.java

License:Apache License

private static String getJobInfo(Job job) {
    return "jobName: " + job.getJobName() + ", jobId: " + job.getJobID();
}

From source file:org.apache.sqoop.submission.mapreduce.MapreduceSubmissionEngine.java

License:Apache License

/**
 * {@inheritDoc}//from   ww  w . jav a2  s .  c o m
 */
@Override
public boolean submit(JobRequest mrJobRequest) {
    // We're supporting only map reduce jobs
    MRJobRequest request = (MRJobRequest) mrJobRequest;

    // Clone global configuration
    Configuration configuration = new Configuration(globalConfiguration);

    // Serialize driver context into job configuration
    for (Map.Entry<String, String> entry : request.getDriverContext()) {
        if (entry.getValue() == null) {
            LOG.warn("Ignoring null driver context value for key " + entry.getKey());
            continue;
        }
        configuration.set(entry.getKey(), entry.getValue());
    }

    // Serialize connector context as a sub namespace
    for (Map.Entry<String, String> entry : request.getConnectorContext(Direction.FROM)) {
        if (entry.getValue() == null) {
            LOG.warn("Ignoring null connector context value for key " + entry.getKey());
            continue;
        }
        configuration.set(MRJobConstants.PREFIX_CONNECTOR_FROM_CONTEXT + entry.getKey(), entry.getValue());
    }

    for (Map.Entry<String, String> entry : request.getConnectorContext(Direction.TO)) {
        if (entry.getValue() == null) {
            LOG.warn("Ignoring null connector context value for key " + entry.getKey());
            continue;
        }
        configuration.set(MRJobConstants.PREFIX_CONNECTOR_TO_CONTEXT + entry.getKey(), entry.getValue());
    }

    // Set up notification URL if it's available
    if (request.getNotificationUrl() != null) {
        configuration.set("job.end.notification.url", request.getNotificationUrl());
    }

    // Turn off speculative execution
    configuration.setBoolean("mapred.map.tasks.speculative.execution", false);
    configuration.setBoolean("mapred.reduce.tasks.speculative.execution", false);

    // Promote all required jars to the job
    configuration.set("tmpjars", StringUtils.join(request.getJars(), ","));

    try {
        Job job = new Job(configuration);

        // link configs
        MRConfigurationUtils.setConnectorLinkConfig(Direction.FROM, job,
                request.getConnectorLinkConfig(Direction.FROM));
        MRConfigurationUtils.setConnectorLinkConfig(Direction.TO, job,
                request.getConnectorLinkConfig(Direction.TO));

        // from and to configs
        MRConfigurationUtils.setConnectorJobConfig(Direction.FROM, job, request.getJobConfig(Direction.FROM));
        MRConfigurationUtils.setConnectorJobConfig(Direction.TO, job, request.getJobConfig(Direction.TO));

        MRConfigurationUtils.setDriverConfig(job, request.getDriverConfig());
        MRConfigurationUtils.setConnectorSchema(Direction.FROM, job, request.getSummary().getFromSchema());
        MRConfigurationUtils.setConnectorSchema(Direction.TO, job, request.getSummary().getToSchema());

        if (request.getJobName() != null) {
            job.setJobName("Sqoop: " + request.getJobName());
        } else {
            job.setJobName("Sqoop job with id: " + request.getJobId());
        }

        job.setInputFormatClass(request.getInputFormatClass());

        job.setMapperClass(request.getMapperClass());
        job.setMapOutputKeyClass(request.getMapOutputKeyClass());
        job.setMapOutputValueClass(request.getMapOutputValueClass());

        // Set number of reducers as number of configured loaders  or suppress
        // reduce phase entirely if loaders are not set at all.
        if (request.getLoaders() != null) {
            job.setNumReduceTasks(request.getLoaders());
        } else {
            job.setNumReduceTasks(0);
        }

        job.setOutputFormatClass(request.getOutputFormatClass());
        job.setOutputKeyClass(request.getOutputKeyClass());
        job.setOutputValueClass(request.getOutputValueClass());

        // If we're in local mode than wait on completion. Local job runner do not
        // seems to be exposing API to get previously submitted job which makes
        // other methods of the submission engine quite useless.
        if (isLocal()) {
            job.waitForCompletion(true);
        } else {
            job.submit();
        }

        String jobId = job.getJobID().toString();
        request.getSummary().setExternalId(jobId);
        request.getSummary().setExternalLink(job.getTrackingURL());

        LOG.debug("Executed new map-reduce job with id " + jobId);
    } catch (Exception e) {
        request.getSummary().setException(e);
        LOG.error("Error in submitting job", e);
        return false;
    }
    return true;
}

From source file:org.apache.tez.mapreduce.examples.OrderedWordCount.java

License:Apache License

public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
    String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
    if (otherArgs.length != 2) {
        System.err.println("Usage: wordcount <in> <out>");
        System.exit(2);//from w  w  w. j  a v  a 2 s .  co m
    }

    // Configure intermediate reduces
    conf.setInt(MRJobConfig.MRR_INTERMEDIATE_STAGES, 1);

    // Set reducer class for intermediate reduce
    conf.setClass(MultiStageMRConfigUtil.getPropertyNameForIntermediateStage(1, "mapreduce.job.reduce.class"),
            IntSumReducer.class, Reducer.class);
    // Set reducer output key class
    conf.setClass(
            MultiStageMRConfigUtil.getPropertyNameForIntermediateStage(1, "mapreduce.map.output.key.class"),
            IntWritable.class, Object.class);
    // Set reducer output value class
    conf.setClass(
            MultiStageMRConfigUtil.getPropertyNameForIntermediateStage(1, "mapreduce.map.output.value.class"),
            Text.class, Object.class);
    conf.setInt(MultiStageMRConfigUtil.getPropertyNameForIntermediateStage(1, "mapreduce.job.reduces"), 2);

    @SuppressWarnings("deprecation")
    Job job = new Job(conf, "orderedwordcount");
    job.setJarByClass(OrderedWordCount.class);

    // Configure map
    job.setMapperClass(TokenizerMapper.class);
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(IntWritable.class);

    // Configure reduce
    job.setReducerClass(MyOrderByNoOpReducer.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);

    FileInputFormat.addInputPath(job, new Path(otherArgs[0]));
    FileOutputFormat.setOutputPath(job, new Path(otherArgs[1]));

    YarnClient yarnClient = new YarnClientImpl();
    yarnClient.init(conf);
    yarnClient.start();

    TezClient tezClient = new TezClient(new TezConfiguration(conf));

    job.submit();
    JobID jobId = job.getJobID();
    ApplicationId appId = TypeConverter.toYarn(jobId).getAppId();

    DAGClient dagClient = tezClient.getDAGClient(appId);
    DAGStatus dagStatus = null;
    while (true) {
        dagStatus = dagClient.getDAGStatus();
        if (dagStatus.getState() == DAGStatus.State.RUNNING || dagStatus.getState() == DAGStatus.State.SUCCEEDED
                || dagStatus.getState() == DAGStatus.State.FAILED
                || dagStatus.getState() == DAGStatus.State.KILLED
                || dagStatus.getState() == DAGStatus.State.ERROR) {
            break;
        }
        try {
            Thread.sleep(500);
        } catch (InterruptedException e) {
            // continue;
        }
    }

    while (dagStatus.getState() == DAGStatus.State.RUNNING) {
        try {
            ExampleDriver.printMRRDAGStatus(dagStatus);
            try {
                Thread.sleep(1000);
            } catch (InterruptedException e) {
                // continue;
            }
            dagStatus = dagClient.getDAGStatus();
        } catch (TezException e) {
            LOG.fatal("Failed to get application progress. Exiting");
            System.exit(-1);
        }
    }

    ExampleDriver.printMRRDAGStatus(dagStatus);
    LOG.info("Application completed. " + "FinalState=" + dagStatus.getState());
    System.exit(dagStatus.getState() == DAGStatus.State.SUCCEEDED ? 0 : 1);
}

From source file:org.apache.tez.mapreduce.TestMRRJobs.java

License:Apache License

@Test(timeout = 60000)
public void testMRRSleepJob() throws IOException, InterruptedException, ClassNotFoundException {
    LOG.info("\n\n\nStarting testMRRSleepJob().");

    if (!(new File(MiniTezCluster.APPJAR)).exists()) {
        LOG.info("MRAppJar " + MiniTezCluster.APPJAR + " not found. Not running test.");
        return;/*from  ww  w  .  ja  va2s  .c  om*/
    }

    Configuration sleepConf = new Configuration(mrrTezCluster.getConfig());

    MRRSleepJob sleepJob = new MRRSleepJob();
    sleepJob.setConf(sleepConf);

    Job job = sleepJob.createJob(1, 1, 1, 1, 1, 1, 1, 1, 1, 1);

    job.setJarByClass(MRRSleepJob.class);
    job.setMaxMapAttempts(1); // speed up failures
    job.submit();
    String trackingUrl = job.getTrackingURL();
    String jobId = job.getJobID().toString();
    boolean succeeded = job.waitForCompletion(true);
    Assert.assertTrue(succeeded);
    Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
    Assert.assertTrue("Tracking URL was " + trackingUrl + " but didn't Match Job ID " + jobId,
            trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));

    // FIXME once counters and task progress can be obtained properly
    // TODO use dag client to test counters and task progress?
    // what about completed jobs?
}

From source file:org.apache.tez.mapreduce.TestMRRJobs.java

License:Apache License

@Test(timeout = 60000)
public void testRandomWriter() throws IOException, InterruptedException, ClassNotFoundException {

    LOG.info("\n\n\nStarting testRandomWriter().");
    if (!(new File(MiniTezCluster.APPJAR)).exists()) {
        LOG.info("MRAppJar " + MiniTezCluster.APPJAR + " not found. Not running test.");
        return;//from  w ww  . j a  v  a2  s .  co m
    }

    RandomTextWriterJob randomWriterJob = new RandomTextWriterJob();
    mrrTezCluster.getConfig().set(RandomTextWriterJob.TOTAL_BYTES, "3072");
    mrrTezCluster.getConfig().set(RandomTextWriterJob.BYTES_PER_MAP, "1024");
    Job job = randomWriterJob.createJob(mrrTezCluster.getConfig());
    Path outputDir = new Path(OUTPUT_ROOT_DIR, "random-output");
    FileOutputFormat.setOutputPath(job, outputDir);
    job.setSpeculativeExecution(false);
    job.setJarByClass(RandomTextWriterJob.class);
    job.setMaxMapAttempts(1); // speed up failures
    job.submit();
    String trackingUrl = job.getTrackingURL();
    String jobId = job.getJobID().toString();
    boolean succeeded = job.waitForCompletion(true);
    Assert.assertTrue(succeeded);
    Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
    Assert.assertTrue("Tracking URL was " + trackingUrl + " but didn't Match Job ID " + jobId,
            trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));

    // Make sure there are three files in the output-dir

    RemoteIterator<FileStatus> iterator = FileContext.getFileContext(mrrTezCluster.getConfig())
            .listStatus(outputDir);
    int count = 0;
    while (iterator.hasNext()) {
        FileStatus file = iterator.next();
        if (!file.getPath().getName().equals(FileOutputCommitter.SUCCEEDED_FILE_NAME)) {
            count++;
        }
    }
    Assert.assertEquals("Number of part files is wrong!", 3, count);

}

From source file:org.apache.tez.mapreduce.TestMRRJobs.java

License:Apache License

@Test(timeout = 60000)
public void testMRRSleepJobWithCompression() throws IOException, InterruptedException, ClassNotFoundException {
    LOG.info("\n\n\nStarting testMRRSleepJobWithCompression().");

    if (!(new File(MiniTezCluster.APPJAR)).exists()) {
        LOG.info("MRAppJar " + MiniTezCluster.APPJAR + " not found. Not running test.");
        return;/*from w w w  .  j a v  a  2s.co  m*/
    }

    Configuration sleepConf = new Configuration(mrrTezCluster.getConfig());

    MRRSleepJob sleepJob = new MRRSleepJob();
    sleepJob.setConf(sleepConf);

    Job job = sleepJob.createJob(1, 1, 2, 1, 1, 1, 1, 1, 1, 1);

    job.setJarByClass(MRRSleepJob.class);
    job.setMaxMapAttempts(1); // speed up failures

    // enable compression
    job.getConfiguration().setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
    job.getConfiguration().set(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC, DefaultCodec.class.getName());

    job.submit();
    String trackingUrl = job.getTrackingURL();
    String jobId = job.getJobID().toString();
    boolean succeeded = job.waitForCompletion(true);
    Assert.assertTrue(succeeded);
    Assert.assertEquals(JobStatus.State.SUCCEEDED, job.getJobState());
    Assert.assertTrue("Tracking URL was " + trackingUrl + " but didn't Match Job ID " + jobId,
            trackingUrl.endsWith(jobId.substring(jobId.lastIndexOf("_")) + "/"));

    // FIXME once counters and task progress can be obtained properly
    // TODO use dag client to test counters and task progress?
    // what about completed jobs?

}