Example usage for org.apache.hadoop.mapred JobID JobID

List of usage examples for org.apache.hadoop.mapred JobID JobID

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred JobID JobID.

Prototype

public JobID(String jtIdentifier, int id) 

Source Link

Document

Constructs a JobID object

Usage

From source file:co.cask.cdap.internal.app.runtime.batch.dataset.input.MultiInputFormat.java

License:Apache License

@SuppressWarnings("unchecked")
public List<InputSplit> getSplits(JobContext job) throws IOException, InterruptedException {
    List<InputSplit> splits = new ArrayList<>();
    Map<String, MultipleInputs.MapperInput> mapperInputMap = MultipleInputs.getInputMap(job.getConfiguration());

    for (Map.Entry<String, MultipleInputs.MapperInput> mapperInputEntry : mapperInputMap.entrySet()) {
        String inputName = mapperInputEntry.getKey();
        MultipleInputs.MapperInput mapperInput = mapperInputEntry.getValue();
        String mapperClassName = mapperInput.getMapperClassName();
        Job jobCopy = new Job(job.getConfiguration());
        Configuration confCopy = jobCopy.getConfiguration();

        // set configuration specific for this input onto the jobCopy
        ConfigurationUtil.setAll(mapperInput.getInputFormatConfiguration(), confCopy);

        Class<?> inputFormatClass = confCopy.getClassByNameOrNull(mapperInput.getInputFormatClassName());
        Preconditions.checkNotNull(inputFormatClass, "Class could not be found: ",
                mapperInput.getInputFormatClassName());

        InputFormat<K, V> inputFormat = (InputFormat) ReflectionUtils.newInstance(inputFormatClass, confCopy);
        //some input format need a jobId to getSplits
        jobCopy.setJobID(new JobID(inputName, inputName.hashCode()));

        // Get splits for each input path and tag with InputFormat
        // and Mapper types by wrapping in a MultiInputTaggedSplit.
        List<InputSplit> formatSplits = inputFormat.getSplits(jobCopy);
        for (InputSplit split : formatSplits) {
            splits.add(new MultiInputTaggedSplit(split, confCopy, inputName,
                    mapperInput.getInputFormatConfiguration(), inputFormat.getClass(), mapperClassName));
        }/*from ww w  . j av a 2 s  .  com*/
    }
    return splits;
}

From source file:com.atlantbh.jmeter.plugins.hadooputilities.jobstatistics.JobLayer.java

License:Apache License

protected JobID convertToJobId(String jobId) {
    String id = jobId.replace("job_", "");
    return new JobID(id.split("_")[0], Integer.valueOf(id.split("_")[1]));
}

From source file:com.ibm.jaql.io.hadoop.DefaultHadoopOutputAdapter.java

License:Apache License

public void open() throws Exception {
    this.conf = new JobConf();
    this.reporter = Reporter.NULL;

    // Some OutputFormats (like FileOutputFormat) require that the job id/task id set.
    // So let's set it for all output formats, just in case they need it too.
    JobID jobid = new JobID("sequential", jobCounter.getAndIncrement());
    TaskAttemptID taskid = new TaskAttemptID(new TaskID(jobid, true, 0), 0);
    conf.set("mapred.task.id", taskid.toString());

    setSequential(conf);/*from  w w  w .  j a  va2 s .c o m*/

    // Create a task so we can use committers.
    sequentialJob = new ExposeJobContext(conf, jobid);
    sequentialTask = new ExposeTaskAttemptContext(conf, taskid);

    // Give the commiter a chance initialize.
    OutputCommitter committer = conf.getOutputCommitter();
    // FIXME: We skip job setup for now because  
    committer.setupJob(sequentialJob);
    committer.setupTask(sequentialTask);

    if (oFormat instanceof JobConfigurable)
        ((JobConfigurable) oFormat).configure(conf);
}

From source file:com.ikanow.infinit.e.processing.custom.CustomProcessingController.java

License:Open Source License

public boolean killRunningJob(CustomMapReduceJobPojo jobToKillInfo) {
    try {//from w w w.ja v a  2 s.c  om
        Configuration conf = new Configuration();
        JobClient jc = new JobClient(InfiniteHadoopUtils.getJobClientConnection(prop_custom), conf);
        jc.setConf(conf); // (doesn't seem to be set by the above call)

        RunningJob jobToKill = jc.getJob(new JobID(jobToKillInfo.jobidS, jobToKillInfo.jobidN));
        if (null == jobToKill) {
            _logger.error("Couldn't find this job: " + jobToKillInfo.jobidS + "_" + jobToKillInfo.jobidN + " / "
                    + new JobID(jobToKillInfo.jobidS, jobToKillInfo.jobidN).toString());
            return false;
        }
        jobToKill.killJob();

        int nRuns = 0;
        while (!checkRunningJobs(jobToKillInfo)) {
            try {
                Thread.sleep(5000);
            } catch (Exception e) {
            }
            if (++nRuns > 24) { // bail out after 2 minutes 
                _logger.error("Killed job: " + jobToKillInfo.jobidS + "_" + jobToKillInfo.jobidN
                        + ", but job failed to stop within time allowed");
                return false;
            }
        }
        if (null != jobToKillInfo.derivedFromSourceKey) { // Update the derived source, if one existse 
            BasicDBObject query = new BasicDBObject(SourcePojo.key_, jobToKillInfo.derivedFromSourceKey);
            BasicDBObject setUpdate = new BasicDBObject(SourceHarvestStatusPojo.sourceQuery_harvest_status_,
                    HarvestEnum.error.toString());
            setUpdate.put(SourceHarvestStatusPojo.sourceQuery_harvest_message_, "Manually stopped");
            BasicDBObject srcUpdate = new BasicDBObject(DbManager.set_, setUpdate);
            DbManager.getIngest().getSource().update(query, srcUpdate, false, false);
        } //TESTED (actually a bit pointless usually because is then overwritten by the source publish)
        return true;
    } catch (Exception e) {
        _logger.error("Failed to kill job: " + jobToKillInfo.jobidS + "_" + jobToKillInfo.jobidN + " / "
                + e.getMessage(), e);
        return false;
    }
}

From source file:com.mellanox.hadoop.mapred.UdaShuffleHandler.java

License:Apache License

@Override
public void initializeApplication(ApplicationInitializationContext context) {
    LOG.info("starting initializeApplication of UdaShuffleHandler");

    String user = context.getUser();
    ApplicationId appId = context.getApplicationId();

    JobID jobId = new JobID(Long.toString(appId.getClusterTimestamp()), appId.getId());
    //     rdmaChannel = new UdaPluginSH(conf, user, jobId);     
    rdmaChannel.addJob(user, jobId);//from  w ww .  j  av a  2s  . co  m
    LOG.info("finished initializeApplication of UdaShuffleHandler");
}

From source file:com.mellanox.hadoop.mapred.UdaShuffleHandler.java

License:Apache License

@Override
public void stopApplication(ApplicationTerminationContext context) {
    ApplicationId appId = context.getApplicationId();
    LOG.info("stopApplication of UdaShuffleHandler");
    JobID jobId = new JobID(Long.toString(appId.getClusterTimestamp()), appId.getId());
    rdmaChannel.removeJob(jobId);/*  w  ww.j a v a 2 s  .  c  o  m*/
    LOG.info("stopApplication of UdaShuffleHandler is done");

}

From source file:edu.uci.ics.hyracks.hadoop.compat.client.HyracksRunningJob.java

License:Apache License

@Override
public JobID getID() {
    return new JobID(this.jobId.toString(), 1);
}

From source file:org.apache.ignite.internal.processors.hadoop.impl.v2.HadoopV2Job.java

License:Apache License

/**
 * Constructor.//from  ww  w  .j  a  va  2  s.c o  m
 *
 * @param jobId Job ID.
 * @param jobInfo Job info.
 * @param log Logger.
 * @param libNames Optional additional native library names.
 * @param helper Hadoop helper.
 */
public HadoopV2Job(HadoopJobId jobId, final HadoopDefaultJobInfo jobInfo, IgniteLogger log,
        @Nullable String[] libNames, HadoopHelper helper) {
    assert jobId != null;
    assert jobInfo != null;

    this.jobId = jobId;
    this.jobInfo = jobInfo;
    this.libNames = libNames;
    this.helper = helper;
    this.log = log;

    ClassLoader oldLdr = HadoopCommonUtils.setContextClassLoader(getClass().getClassLoader());

    try {
        hadoopJobID = new JobID(jobId.globalId().toString(), jobId.localId());

        jobConf = new JobConf();

        HadoopFileSystemsUtils.setupFileSystems(jobConf);

        for (Map.Entry<String, String> e : jobInfo.properties().entrySet())
            jobConf.set(e.getKey(), e.getValue());

        jobCtx = new JobContextImpl(jobConf, hadoopJobID);

        rsrcMgr = new HadoopV2JobResourceManager(jobId, jobCtx, log, this);
    } finally {
        HadoopCommonUtils.restoreContextClassLoader(oldLdr);
    }
}

From source file:org.apache.ignite.internal.processors.hadoop.impl.v2.HadoopV2TaskContext.java

License:Apache License

/**
 * @param taskInfo Task info.//ww  w .j a v  a2 s .  co m
 * @param job Job.
 * @param jobId Job ID.
 * @param locNodeId Local node ID.
 * @param jobConfDataInput DataInput for read JobConf.
 */
public HadoopV2TaskContext(HadoopTaskInfo taskInfo, HadoopJobEx job, HadoopJobId jobId,
        @Nullable UUID locNodeId, DataInput jobConfDataInput) throws IgniteCheckedException {
    super(taskInfo, job);
    this.locNodeId = locNodeId;

    // Before create JobConf instance we should set new context class loader.
    ClassLoader oldLdr = HadoopCommonUtils.setContextClassLoader(getClass().getClassLoader());

    try {
        JobConf jobConf = new JobConf();

        try {
            jobConf.readFields(jobConfDataInput);
        } catch (IOException e) {
            throw new IgniteCheckedException(e);
        }

        // For map-reduce jobs prefer local writes.
        jobConf.setBooleanIfUnset(PARAM_IGFS_PREFER_LOCAL_WRITES, true);

        initializePartiallyRawComparator(jobConf);

        jobCtx = new JobContextImpl(jobConf, new JobID(jobId.globalId().toString(), jobId.localId()));

        useNewMapper = jobConf.getUseNewMapper();
        useNewReducer = jobConf.getUseNewReducer();
        useNewCombiner = jobConf.getCombinerClass() == null;
    } finally {
        HadoopCommonUtils.restoreContextClassLoader(oldLdr);
    }
}

From source file:org.apache.ignite.internal.processors.hadoop.v2.GridHadoopV2Job.java

License:Apache License

/**
 * @param jobId Job ID.//from  w w w .j  av  a  2s.c  o  m
 * @param jobInfo Job info.
 * @param log Logger.
 */
public GridHadoopV2Job(GridHadoopJobId jobId, final GridHadoopDefaultJobInfo jobInfo, IgniteLogger log) {
    assert jobId != null;
    assert jobInfo != null;

    this.jobId = jobId;
    this.jobInfo = jobInfo;

    hadoopJobID = new JobID(jobId.globalId().toString(), jobId.localId());

    GridHadoopClassLoader clsLdr = (GridHadoopClassLoader) getClass().getClassLoader();

    // Before create JobConf instance we should set new context class loader.
    Thread.currentThread().setContextClassLoader(clsLdr);

    jobConf = new JobConf();

    GridHadoopFileSystemsUtils.setupFileSystems(jobConf);

    Thread.currentThread().setContextClassLoader(null);

    for (Map.Entry<String, String> e : jobInfo.properties().entrySet())
        jobConf.set(e.getKey(), e.getValue());

    jobCtx = new JobContextImpl(jobConf, hadoopJobID);

    rsrcMgr = new GridHadoopV2JobResourceManager(jobId, jobCtx, log);
}