Example usage for org.apache.hadoop.mapreduce Job getJobID

List of usage examples for org.apache.hadoop.mapreduce Job getJobID

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce Job getJobID.

Prototype

public JobID getJobID() 

Source Link

Document

Get the unique ID for the job.

Usage

From source file:org.gridgain.client.hadoop.GridHadoopClientProtocolSelfTest.java

License:Open Source License

/**
 * Test job submission.//from ww w.j  a  v a 2 s. c o  m
 *
 * @param noCombiners Whether there are no combiners.
 * @param noReducers Whether there are no reducers.
 * @throws Exception If failed.
 */
public void checkJobSubmit(boolean noCombiners, boolean noReducers) throws Exception {
    GridGgfs ggfs = grid(0).ggfs(GridHadoopAbstractSelfTest.ggfsName);

    ggfs.mkdirs(new GridGgfsPath(PATH_INPUT));

    try (BufferedWriter bw = new BufferedWriter(
            new OutputStreamWriter(ggfs.create(new GridGgfsPath(PATH_INPUT + "/test.file"), true)))) {

        bw.write("word");
    }

    Configuration conf = config(GridHadoopAbstractSelfTest.REST_PORT);

    final Job job = Job.getInstance(conf);

    job.setJobName(JOB_NAME);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);

    job.setMapperClass(TestMapper.class);
    job.setReducerClass(TestReducer.class);

    if (!noCombiners)
        job.setCombinerClass(TestCombiner.class);

    if (noReducers)
        job.setNumReduceTasks(0);

    job.setInputFormatClass(TextInputFormat.class);
    job.setOutputFormatClass(TestOutputFormat.class);

    FileInputFormat.setInputPaths(job, new Path(PATH_INPUT));
    FileOutputFormat.setOutputPath(job, new Path(PATH_OUTPUT));

    job.submit();

    JobID jobId = job.getJobID();

    // Setup phase.
    JobStatus jobStatus = job.getStatus();
    checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
    assert jobStatus.getSetupProgress() >= 0.0f && jobStatus.getSetupProgress() < 1.0f;
    assert jobStatus.getMapProgress() == 0.0f;
    assert jobStatus.getReduceProgress() == 0.0f;

    U.sleep(2100);

    JobStatus recentJobStatus = job.getStatus();

    assert recentJobStatus.getSetupProgress() > jobStatus.getSetupProgress() : "Old="
            + jobStatus.getSetupProgress() + ", new=" + recentJobStatus.getSetupProgress();

    // Transferring to map phase.
    setupLockFile.delete();

    assert GridTestUtils.waitForCondition(new GridAbsPredicate() {
        @Override
        public boolean apply() {
            try {
                return F.eq(1.0f, job.getStatus().getSetupProgress());
            } catch (Exception e) {
                throw new RuntimeException("Unexpected exception.", e);
            }
        }
    }, 5000L);

    // Map phase.
    jobStatus = job.getStatus();
    checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
    assert jobStatus.getSetupProgress() == 1.0f;
    assert jobStatus.getMapProgress() >= 0.0f && jobStatus.getMapProgress() < 1.0f;
    assert jobStatus.getReduceProgress() == 0.0f;

    U.sleep(2100);

    recentJobStatus = job.getStatus();

    assert recentJobStatus.getMapProgress() > jobStatus.getMapProgress() : "Old=" + jobStatus.getMapProgress()
            + ", new=" + recentJobStatus.getMapProgress();

    // Transferring to reduce phase.
    mapLockFile.delete();

    assert GridTestUtils.waitForCondition(new GridAbsPredicate() {
        @Override
        public boolean apply() {
            try {
                return F.eq(1.0f, job.getStatus().getMapProgress());
            } catch (Exception e) {
                throw new RuntimeException("Unexpected exception.", e);
            }
        }
    }, 5000L);

    if (!noReducers) {
        // Reduce phase.
        jobStatus = job.getStatus();
        checkJobStatus(jobStatus, jobId, JOB_NAME, JobStatus.State.RUNNING, 0.0f);
        assert jobStatus.getSetupProgress() == 1.0f;
        assert jobStatus.getMapProgress() == 1.0f;
        assert jobStatus.getReduceProgress() >= 0.0f && jobStatus.getReduceProgress() < 1.0f;

        // Ensure that reduces progress increases.
        U.sleep(2100);

        recentJobStatus = job.getStatus();

        assert recentJobStatus.getReduceProgress() > jobStatus.getReduceProgress() : "Old="
                + jobStatus.getReduceProgress() + ", new=" + recentJobStatus.getReduceProgress();

        reduceLockFile.delete();
    }

    job.waitForCompletion(false);

    jobStatus = job.getStatus();
    checkJobStatus(job.getStatus(), jobId, JOB_NAME, JobStatus.State.SUCCEEDED, 1.0f);
    assert jobStatus.getSetupProgress() == 1.0f;
    assert jobStatus.getMapProgress() == 1.0f;
    assert jobStatus.getReduceProgress() == 1.0f;

    dumpGgfs(ggfs, new GridGgfsPath(PATH_OUTPUT));
}

From source file:org.imageterrier.indexers.hadoop.HadoopIndexer.java

License:Mozilla Public License

/**
 * Process the arguments and start the map-reduce indexing.
 * //from   w  w  w.j a va2s .  co  m
 * @param args
 * @throws Exception
 */
@Override
public int run(String[] args) throws Exception {
    final long time = System.currentTimeMillis();

    final HadoopIndexerOptions options = new HadoopIndexerOptions();
    final CmdLineParser parser = new CmdLineParser(options);
    try {
        parser.parseArgument(args);
    } catch (final CmdLineException e) {
        parser.printUsage(System.err);
        logger.fatal(e.getMessage());
        logger.fatal(usage());
        return 1;
    }

    if (Files.exists(options.getOutputPathString())
            && Index.existsIndex(options.getOutputPathString(), ApplicationSetup.TERRIER_INDEX_PREFIX)) {
        logger.fatal("Cannot index while index exists at " + options.getOutputPathString() + ","
                + ApplicationSetup.TERRIER_INDEX_PREFIX);
        return 1;
    }

    // create job
    final Job job = createJob(options);

    // set args string
    job.getConfiguration().setStrings(INDEXER_ARGS_STRING, args);
    options.configureFilterMode(job.getConfiguration());

    // run job
    JobID jobId = null;
    boolean ranOK = true;
    try {
        ranOK = job.waitForCompletion(true);
        jobId = job.getJobID();
    } catch (final Exception e) {
        logger.error("Problem running job", e);
        ranOK = false;
    }

    if (jobId != null) {
        deleteTaskFiles(options.getOutputPathString(), jobId);
    }

    if (ranOK) {
        if (!options.isDocumentPartitionMode()) {
            if (job.getNumReduceTasks() > 1) {
                mergeLexiconInvertedFiles(options.getOutputPathString(), job.getNumReduceTasks());
            }
        }

        finish(options.getOutputPathString(), options.isDocumentPartitionMode() ? job.getNumReduceTasks() : 1,
                job.getConfiguration());
    }

    System.out.println("Time Taken = " + ((System.currentTimeMillis() - time) / 1000) + " seconds");

    return 0;
}

From source file:org.janusgraph.hadoop.scan.HadoopScanRunner.java

License:Apache License

public static ScanMetrics runJob(org.apache.hadoop.conf.Configuration hadoopConf,
        Class<? extends InputFormat> inputFormat, String jobName, Class<? extends Mapper> mapperClass)
        throws IOException, InterruptedException, ClassNotFoundException {

    Job job = Job.getInstance(hadoopConf);

    //job.setJarByClass(HadoopScanMapper.class);
    job.setJarByClass(mapperClass);// w  w w  .j a va  2s  .  c o  m
    //job.setJobName(HadoopScanMapper.class.getSimpleName() + "[" + scanJob + "]");
    job.setJobName(jobName);
    job.setOutputKeyClass(NullWritable.class);
    job.setOutputValueClass(NullWritable.class);
    job.setMapOutputKeyClass(NullWritable.class);
    job.setMapOutputValueClass(NullWritable.class);
    job.setNumReduceTasks(0);
    //job.setMapperClass(HadoopScanMapper.class);
    job.setMapperClass(mapperClass);
    job.setOutputFormatClass(NullOutputFormat.class);
    job.setInputFormatClass(inputFormat);

    boolean success = job.waitForCompletion(true);

    if (!success) {
        String f;
        try {
            // Just in case one of Job's methods throws an exception
            f = String.format("MapReduce JobID %s terminated abnormally: %s", job.getJobID().toString(),
                    HadoopCompatLoader.DEFAULT_COMPAT.getJobFailureString(job));
        } catch (RuntimeException e) {
            f = "Job failed (unable to read job status programmatically -- see MapReduce logs for information)";
        }
        throw new IOException(f);
    } else {
        return DEFAULT_COMPAT.getMetrics(job.getCounters());
    }
}

From source file:org.kiji.mapreduce.framework.JobHistoryKijiTable.java

License:Apache License

/**
 * Helper method to write individual counters to job history table's counter family.
 *
 * @param writer The {@link KijiTableWriter} for the job history table.
 * @param job The {@link Job} whose counters we are recording.
 * @throws IOException If there is an error writing to the table.
 *///from ww w .  j a v  a2s . com
private void writeIndividualCounters(KijiTableWriter writer, Job job) throws IOException {
    EntityId jobEntity = mKijiTable.getEntityId(job.getJobID().toString());
    Counters counters = job.getCounters();
    for (String grpName : counters.getGroupNames()) {
        Iterator<Counter> counterIterator = counters.getGroup(grpName).iterator();
        while (counterIterator.hasNext()) {
            Counter ctr = counterIterator.next();
            writer.put(jobEntity, JOB_HISTORY_COUNTERS_FAMILY, grpName + ":" + ctr.getName(), ctr.getValue());
        }
    }
}

From source file:org.kiji.mapreduce.framework.JobHistoryKijiTable.java

License:Apache License

/**
 * Writes a job into the JobHistoryKijiTable.
 *
 * @param job The job to save.//from   w  w  w. j av  a 2  s  .  c o m
 * @param startTime The time the job began, in milliseconds.
 * @param endTime The time the job ended, in milliseconds
 * @throws IOException If there is an error writing to the table.
 */
public void recordJob(Job job, long startTime, long endTime) throws IOException {
    KijiTableWriter writer = mKijiTable.openTableWriter();
    EntityId jobEntity = mKijiTable.getEntityId(job.getJobID().toString());
    try {
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        writer.put(jobEntity, JOB_HISTORY_FAMILY, JOB_HISTORY_ID_QUALIFIER, startTime,
                job.getJobID().toString());
        writer.put(jobEntity, JOB_HISTORY_FAMILY, JOB_HISTORY_NAME_QUALIFIER, startTime, job.getJobName());
        writer.put(jobEntity, JOB_HISTORY_FAMILY, JOB_HISTORY_START_TIME_QUALIFIER, startTime, startTime);
        writer.put(jobEntity, JOB_HISTORY_FAMILY, JOB_HISTORY_END_TIME_QUALIFIER, startTime, endTime);
        writer.put(jobEntity, JOB_HISTORY_FAMILY, JOB_HISTORY_END_STATUS_QUALIFIER, startTime,
                job.isSuccessful() ? "SUCCEEDED" : "FAILED");
        writer.put(jobEntity, JOB_HISTORY_FAMILY, JOB_HISTORY_COUNTERS_QUALIFIER, startTime,
                job.getCounters().toString());
        job.getConfiguration().writeXml(baos);
        writer.put(jobEntity, JOB_HISTORY_FAMILY, JOB_HISTORY_CONFIGURATION_QUALIFIER, startTime,
                baos.toString("UTF-8"));
        writeIndividualCounters(writer, job);
    } finally {
        ResourceUtils.closeOrLog(writer);
    }
}

From source file:org.kiji.mapreduce.JobHistoryKijiTable.java

License:Apache License

/**
 * Writes a job into the JobHistoryKijiTable.
 *
 * @param job The job to save./* w w w  .ja v a 2 s. co m*/
 * @param startTime The time the job began, in milliseconds.
 * @param endTime The time the job ended, in milliseconds
 * @throws IOException If there is an error writing to the table.
 */
public void recordJob(Job job, long startTime, long endTime) throws IOException {
    KijiTableWriter writer = mKijiTable.openTableWriter();
    EntityId jobEntity = mKijiTable.getEntityId(job.getJobID().toString());
    try {
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        writer.put(jobEntity, "info", "jobId", startTime, job.getJobID().toString());
        writer.put(jobEntity, "info", "jobName", startTime, job.getJobName());
        writer.put(jobEntity, "info", "startTime", startTime, startTime);
        writer.put(jobEntity, "info", "endTime", startTime, endTime);
        writer.put(jobEntity, "info", "counters", startTime, job.getCounters().toString());
        job.getConfiguration().writeXml(baos);
        writer.put(jobEntity, "info", "configuration", startTime, baos.toString("UTF-8"));
    } finally {
        IOUtils.closeQuietly(writer);
    }
}

From source file:org.lilyproject.indexer.master.IndexerMaster.java

License:Apache License

private void startFullIndexBuild(String indexName) {
    try {/*from w w  w  .  j  a  va2  s .co  m*/
        String lock = indexerModel.lockIndex(indexName);
        try {
            // Read current situation of record and assure it is still actual
            IndexDefinition index = indexerModel.getMutableIndex(indexName);
            if (needsBatchBuildStart(index)) {
                Job job = BatchIndexBuilder.startBatchBuildJob(index, mapReduceJobConf, hbaseConf,
                        zkConnectString, zkSessionTimeout);

                ActiveBatchBuildInfo jobInfo = new ActiveBatchBuildInfo();
                jobInfo.setSubmitTime(System.currentTimeMillis());
                jobInfo.setJobId(job.getJobID().toString());
                jobInfo.setTrackingUrl(job.getTrackingURL());
                index.setActiveBatchBuildInfo(jobInfo);

                index.setBatchBuildState(IndexBatchBuildState.BUILDING);

                indexerModel.updateIndexInternal(index);

                log.info(
                        "Started index build job for index " + indexName + ", job ID =  " + jobInfo.getJobId());
            }
        } finally {
            indexerModel.unlockIndex(lock);
        }
    } catch (Throwable t) {
        log.error("Error trying to start index build job for index " + indexName, t);
    }
}

From source file:org.mrgeo.mapreduce.job.JobListener.java

License:Apache License

public boolean cancelAll() throws JobCancelFailedException {
    boolean success = true;
    setCancelled();/* www  .  j  av a2s  .  co  m*/
    synchronized (jobsListLock) {
        for (Job job : jobsList) {
            _log.info("User requested cancellation - killing job " + job.getJobName());
            //this is a hadoop job, so kill it.
            try {
                job.killJob();
            } catch (IOException e) {
                //log it, make a note of the fact that the job cancel failed 
                //so you can propagate the exception back
                _log.error("Kill job failed for " + job.getJobID());
                success = false;
            }
        }
        if (!success) {
            throw new JobCancelFailedException(
                    "Cancel failed for some of the hadoop jobs, see log for details.");
        }
    }
    return success;
}

From source file:org.mrgeo.resources.job.JobInfoDetailedResponse.java

License:Apache License

public void addHadoopJob(Job job) {
    _hadoopJobs.add(new Long(job.getJobID().getId()));
}

From source file:org.springframework.data.hadoop.mapreduce.JobUtils.java

License:Apache License

public static JobID getJobId(Job job) {
    if (job == null) {
        return null;
    }//from www . j a  va2  s .c  om
    return job.getJobID();
}