Example usage for org.apache.hadoop.mapred TaskAttemptID TaskAttemptID

List of usage examples for org.apache.hadoop.mapred TaskAttemptID TaskAttemptID

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred TaskAttemptID TaskAttemptID.

Prototype

public TaskAttemptID(TaskID taskId, int id) 

Source Link

Document

Constructs a TaskAttemptID object from given TaskID .

Usage

From source file:com.ibm.jaql.io.hadoop.DefaultHadoopOutputAdapter.java

License:Apache License

public void open() throws Exception {
    this.conf = new JobConf();
    this.reporter = Reporter.NULL;

    // Some OutputFormats (like FileOutputFormat) require that the job id/task id set.
    // So let's set it for all output formats, just in case they need it too.
    JobID jobid = new JobID("sequential", jobCounter.getAndIncrement());
    TaskAttemptID taskid = new TaskAttemptID(new TaskID(jobid, true, 0), 0);
    conf.set("mapred.task.id", taskid.toString());

    setSequential(conf);//from   w w w  .  ja  v a  2s .c  o m

    // Create a task so we can use committers.
    sequentialJob = new ExposeJobContext(conf, jobid);
    sequentialTask = new ExposeTaskAttemptContext(conf, taskid);

    // Give the commiter a chance initialize.
    OutputCommitter committer = conf.getOutputCommitter();
    // FIXME: We skip job setup for now because  
    committer.setupJob(sequentialJob);
    committer.setupTask(sequentialTask);

    if (oFormat instanceof JobConfigurable)
        ((JobConfigurable) oFormat).configure(conf);
}

From source file:org.apache.falcon.logging.v2.TaskLogRetrieverYarnTest.java

License:Apache License

private TaskCompletionEvent[] getTaskCompletionEvents(int numEvents, JobID jobID) {
    TaskCompletionEvent[] taskCompletionEvents = new TaskCompletionEvent[numEvents];
    for (int i = 0; i < numEvents; i++) {
        TaskAttemptID taskAttemptID = new TaskAttemptID(new TaskID(jobID, true, 0), i);
        TaskCompletionEvent taskCompletionEvent = new TaskCompletionEvent(0, taskAttemptID, 0, true,
                TaskCompletionEvent.Status.SUCCEEDED, "tracker:0");
        taskCompletionEvents[i] = taskCompletionEvent;
    }//w  w  w.j a v a 2s.  c  o  m
    return taskCompletionEvents;
}

From source file:org.apache.ignite.internal.processors.hadoop.impl.v2.HadoopV2TaskContext.java

License:Apache License

/**
 * Creates Hadoop attempt ID./*from w  ww  . ja v  a2 s.  c o m*/
 *
 * @return Attempt ID.
 */
public TaskAttemptID attemptId() {
    TaskID tid = new TaskID(jobCtx.getJobID(), taskType(taskInfo().type()), taskInfo().taskNumber());

    return new TaskAttemptID(tid, taskInfo().attempt());
}

From source file:org.apache.tez.mapreduce.hadoop.IDConverter.java

License:Apache License

public static TaskAttemptID toMRTaskAttemptId(TezTaskAttemptID taskAttemptId) {
    return new TaskAttemptID(toMRTaskId(taskAttemptId.getTaskID()), taskAttemptId.getId());
}

From source file:org.apache.tez.mapreduce.input.base.MRInputBase.java

License:Apache License

public List<Event> initialize() throws IOException {
    getContext().requestInitialMemory(0l, null); // mandatory call
    MRRuntimeProtos.MRInputUserPayloadProto mrUserPayload = MRInputHelpers
            .parseMRInputPayload(getContext().getUserPayload());
    boolean isGrouped = mrUserPayload.getGroupingEnabled();
    Preconditions.checkArgument(mrUserPayload.hasSplits() == false,
            "Split information not expected in " + this.getClass().getName());
    Configuration conf = TezUtils.createConfFromByteString(mrUserPayload.getConfigurationBytes());
    this.jobConf = new JobConf(conf);
    useNewApi = this.jobConf.getUseNewMapper();
    if (isGrouped) {
        if (useNewApi) {
            jobConf.set(MRJobConfig.INPUT_FORMAT_CLASS_ATTR,
                    org.apache.hadoop.mapreduce.split.TezGroupedSplitsInputFormat.class.getName());
        } else {//from www  . j  a  va2 s .co m
            jobConf.set("mapred.input.format.class",
                    org.apache.hadoop.mapred.split.TezGroupedSplitsInputFormat.class.getName());
        }
    }

    // Add tokens to the jobConf - in case they are accessed within the RR / IF
    jobConf.getCredentials().mergeAll(UserGroupInformation.getCurrentUser().getCredentials());

    TaskAttemptID taskAttemptId = new TaskAttemptID(
            new TaskID(Long.toString(getContext().getApplicationId().getClusterTimestamp()),
                    getContext().getApplicationId().getId(), TaskType.MAP, getContext().getTaskIndex()),
            getContext().getTaskAttemptNumber());

    jobConf.set(MRJobConfig.TASK_ATTEMPT_ID, taskAttemptId.toString());
    jobConf.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, getContext().getDAGAttemptNumber());

    this.inputRecordCounter = getContext().getCounters().findCounter(TaskCounter.INPUT_RECORDS_PROCESSED);

    return null;
}

From source file:org.apache.tez.mapreduce.processor.MRTask.java

License:Apache License

@Override
public void initialize() throws IOException, InterruptedException {

    DeprecatedKeys.init();//from   ww  w .ja va2 s.c o  m

    processorContext = getContext();
    counters = processorContext.getCounters();
    this.taskAttemptId = new TaskAttemptID(
            new TaskID(Long.toString(processorContext.getApplicationId().getClusterTimestamp()),
                    processorContext.getApplicationId().getId(), (isMap ? TaskType.MAP : TaskType.REDUCE),
                    processorContext.getTaskIndex()),
            processorContext.getTaskAttemptNumber());

    UserPayload userPayload = processorContext.getUserPayload();
    Configuration conf = TezUtils.createConfFromUserPayload(userPayload);
    if (conf instanceof JobConf) {
        this.jobConf = (JobConf) conf;
    } else {
        this.jobConf = new JobConf(conf);
    }
    jobConf.set(Constants.TEZ_RUNTIME_TASK_ATTEMPT_ID, taskAttemptId.toString());
    jobConf.set(MRJobConfig.TASK_ATTEMPT_ID, taskAttemptId.toString());
    jobConf.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, processorContext.getDAGAttemptNumber());

    LOG.info("MRTask.inited: taskAttemptId = " + taskAttemptId.toString());

    // TODO Post MRR
    // A single file per vertex will likely be a better solution. Does not
    // require translation - client can take care of this. Will work independent
    // of whether the configuration is for intermediate tasks or not. Has the
    // overhead of localizing multiple files per job - i.e. the client would
    // need to write these files to hdfs, add them as local resources per
    // vertex. A solution like this may be more practical once it's possible to
    // submit configuration parameters to the AM and effectively tasks via RPC.

    jobConf.set(MRJobConfig.VERTEX_NAME, processorContext.getTaskVertexName());

    if (LOG.isDebugEnabled() && userPayload != null) {
        Iterator<Entry<String, String>> iter = jobConf.iterator();
        String taskIdStr = taskAttemptId.getTaskID().toString();
        while (iter.hasNext()) {
            Entry<String, String> confEntry = iter.next();
            LOG.debug("TaskConf Entry" + ", taskId=" + taskIdStr + ", key=" + confEntry.getKey() + ", value="
                    + confEntry.getValue());
        }
    }

    configureMRTask();
}

From source file:org.pentaho.hadoop.shim.common.mapred.TaskCompletionEventProxyTest.java

License:Apache License

@Test
public void getTaskAttemptId() {
    final TaskAttemptID id = new TaskAttemptID(new TaskID(), 0);
    org.apache.hadoop.mapred.TaskCompletionEvent delegate = new org.apache.hadoop.mapred.TaskCompletionEvent() {
        public org.apache.hadoop.mapred.TaskAttemptID getTaskAttemptId() {
            return id;
        }/*w  w w.j a va  2 s .c om*/
    };
    TaskCompletionEventProxy proxy = new TaskCompletionEventProxy(delegate);

    assertEquals(id, proxy.getTaskAttemptId());
}