Example usage for org.apache.hadoop.mapreduce TaskAttemptID toString

List of usage examples for org.apache.hadoop.mapreduce TaskAttemptID toString

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce TaskAttemptID toString.

Prototype

@Override
    public String toString() 

Source Link

Usage

From source file:com.google.appengine.tools.mapreduce.ShardState.java

License:Apache License

/**
 * Gets the ShardState corresponding to the given TaskID.
 * /*  ww w. j  a v a 2s .  c  o  m*/
 * @param service the datastore to use for persisting the shard state
 * @param taskAttemptId the TaskID corresponding to this ShardState
 * @return the shard state corresponding to the provided key
 * @throws EntityNotFoundException if the given key can't be found
 */
public static ShardState getShardStateFromTaskAttemptId(DatastoreService service, TaskAttemptID taskAttemptId)
        throws EntityNotFoundException {
    ShardState state = new ShardState(service);
    Key key = KeyFactory.createKey("ShardState", taskAttemptId.toString());
    state.entity = service.get(key);
    return state;
}

From source file:com.google.appengine.tools.mapreduce.ShardState.java

License:Apache License

/**
 * Creates a shard state that's active but hasn't made any progress as of yet.
 * //from ww  w .jav  a2  s  .  c  o  m
 * The shard state isn't persisted when returned (so {@link #getKey()} will
 * return {@code null} until {@link #persist()} is called.
 * 
 * @param service the datastore to persist the ShardState to
 * @param taskAttemptId the TaskAttemptID corresponding to the returned 
 * ShardState
 * @return the initialized shard state
 */
public static ShardState generateInitializedShardState(DatastoreService service, TaskAttemptID taskAttemptId) {
    ShardState shardState = new ShardState(service);

    shardState.entity = new Entity("ShardState", taskAttemptId.toString());
    shardState.entity.setProperty(JOB_ID_PROPERTY, taskAttemptId.getJobID().toString());

    Counters counters = new Counters();
    shardState.setCounters(counters);

    shardState.setStatusString("");
    shardState.entity.setProperty(STATUS_PROPERTY, Status.ACTIVE.name());

    return shardState;
}

From source file:com.google.appengine.tools.mapreduce.v2.impl.ShardState.java

License:Apache License

/**
 * Creates a shard state that's active but hasn't made any progress as of yet.
 *
 * The shard state isn't persisted when returned (so {@link #getKey()} will
 * return {@code null} until {@link #persist()} is called.
 *
 * @param service the datastore to persist the ShardState to
 * @param taskAttemptId the TaskAttemptID corresponding to the returned
 * ShardState/*from  ww  w  .j  a  v  a  2  s  .  c  o m*/
 * @return the initialized shard state
 */
public static ShardState generateInitializedShardState(DatastoreService service, TaskAttemptID taskAttemptId) {
    ShardState shardState = new ShardState(service);

    shardState.entity = new Entity("ShardState", taskAttemptId.toString());
    shardState.entity.setProperty(JOB_ID_PROPERTY, taskAttemptId.getJobID().toString());

    Counters counters = new Counters();
    shardState.setCounters(counters);

    shardState.setStatusString("");
    shardState.entity.setProperty(STATUS_PROPERTY, "" + Status.ACTIVE);

    return shardState;
}

From source file:com.inmobi.conduit.distcp.tools.mapred.TestCopyMapper.java

License:Apache License

private static Mapper<Text, FileStatus, NullWritable, Text>.Context getMapperContext(CopyMapper copyMapper,
        final StatusReporter reporter, final InMemoryWriter writer) throws IOException, InterruptedException {
    Mapper.Context ctx = Mockito.mock(Mapper.Context.class);
    Mockito.when(ctx.getConfiguration()).thenReturn(getConfiguration());
    Mockito.doAnswer(new Answer() {
        @Override//from  w  ww.j  a  v  a  2s  . c  om
        public Object answer(InvocationOnMock invocationOnMock) throws Throwable {
            writer.write((NullWritable) invocationOnMock.getArguments()[0],
                    (Text) invocationOnMock.getArguments()[1]);
            return null;
        }
    }).when(ctx).write(Mockito.any(), Mockito.any());

    Mockito.doAnswer(new Answer<Counter>() {
        @Override
        public Counter answer(InvocationOnMock invocationOnMock) throws Throwable {
            return reporter.getCounter((Enum<?>) invocationOnMock.getArguments()[0]);
        }
    }).when(ctx).getCounter(Mockito.any(CopyMapper.Counter.class));

    Mockito.doAnswer(new Answer<Counter>() {
        @Override
        public Counter answer(InvocationOnMock invocationOnMock) throws Throwable {
            return reporter.getCounter((String) invocationOnMock.getArguments()[0],
                    (String) invocationOnMock.getArguments()[1]);
        }
    }).when(ctx).getCounter(Mockito.any(String.class), Mockito.any(String.class));

    final TaskAttemptID id = Mockito.mock(TaskAttemptID.class);
    Mockito.when(id.toString()).thenReturn("attempt1");
    Mockito.doAnswer(new Answer<TaskAttemptID>() {

        @Override
        public TaskAttemptID answer(InvocationOnMock invocationOnMock) throws Throwable {
            return id;
        }
    }).when(ctx).getTaskAttemptID();
    return ctx;
}

From source file:com.inmobi.conduit.local.CopyMapper.java

License:Apache License

private Path getTaskAttemptTmpDir(Context context) {
    TaskAttemptID attemptId = context.getTaskAttemptID();
    return new Path(getJobTmpDir(context, attemptId.getJobID()), attemptId.toString());
}

From source file:com.juniarto.secondsorter.SsJob.java

public static void killMap(TaskAttemptID taskAttemptID) throws Exception {

    //Job job = new Job(conf,"secondary sort");
    //job.failTask(taskAttemptID);
    //LOG.info("mapred job -kill-task " + taskAttemptID);
    //Process p = Runtime.getRuntime().exec("/home/hduser/hadoop-2.7.1-src/hadoop-dist/target/hadoop-2.7.1/bin/mapred job -fail-task " + taskAttemptID);
    //p.waitFor();
    ZMQ.Context context = ZMQ.context(1);
    ZMQ.Socket requester = context.socket(ZMQ.REQ);
    requester.connect("tcp://localhost:5557");

    String attemptID = taskAttemptID.toString();
    requester.send(attemptID.getBytes(), 0);
    byte[] reply = requester.recv(0);
    //LOG.info("RECEIVED " + new String(reply));
    requester.close();//from  w ww.  ja va  2 s  .  c om
    context.term();

}

From source file:com.linkedin.drelephant.mapreduce.fetchers.MapReduceFSFetcherHadoop2.java

License:Apache License

protected MapReduceTaskData[] getTaskData(String jobId, List<JobHistoryParser.TaskInfo> infoList) {
    int sampleSize = sampleAndGetSize(jobId, infoList);

    List<MapReduceTaskData> taskList = new ArrayList<MapReduceTaskData>();
    for (int i = 0; i < sampleSize; i++) {
        JobHistoryParser.TaskInfo tInfo = infoList.get(i);
        if (!"SUCCEEDED".equals(tInfo.getTaskStatus())) {
            logger.info(String.format("Skipped a failed task of %s: %s", jobId, tInfo.getTaskId().toString()));
            continue;
        }//w  w  w .ja v a  2  s .  c  o  m

        String taskId = tInfo.getTaskId().toString();
        TaskAttemptID attemptId = tInfo.getSuccessfulAttemptId();
        MapReduceTaskData taskData = new MapReduceTaskData(taskId, attemptId.toString());

        MapReduceCounterData taskCounterData = getCounterData(tInfo.getCounters());
        long[] taskExecTime = getTaskExecTime(tInfo.getAllTaskAttempts().get(attemptId));

        taskData.setTimeAndCounter(taskExecTime, taskCounterData);
        taskList.add(taskData);
    }
    return taskList.toArray(new MapReduceTaskData[taskList.size()]);
}

From source file:eu.stratosphere.hadoopcompatibility.mapreduce.HadoopOutputFormat.java

License:Apache License

/**
 * create the temporary output file for hadoop RecordWriter.
 * @param taskNumber The number of the parallel instance.
 * @param numTasks The number of parallel tasks.
 * @throws IOException/*from  w  ww  . j a va  2s . c  om*/
 */
@Override
public void open(int taskNumber, int numTasks) throws IOException {
    if (Integer.toString(taskNumber + 1).length() > 6) {
        throw new IOException("Task id too large.");
    }

    // for hadoop 2.2
    this.configuration.set("mapreduce.output.basename", "tmp");

    TaskAttemptID taskAttemptID = TaskAttemptID.forName("attempt__0000_r_"
            + String.format("%" + (6 - Integer.toString(taskNumber + 1).length()) + "s", " ").replace(" ", "0")
            + Integer.toString(taskNumber + 1) + "_0");

    try {
        this.context = HadoopUtils.instantiateTaskAttemptContext(this.configuration, taskAttemptID);
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
    this.configuration.set("mapred.task.id", taskAttemptID.toString());
    // for hadoop 2.2
    this.configuration.set("mapreduce.task.attempt.id", taskAttemptID.toString());

    this.fileOutputCommitter = new FileOutputCommitter(new Path(this.configuration.get("mapred.output.dir")),
            context);

    try {
        this.fileOutputCommitter.setupJob(HadoopUtils.instantiateJobContext(this.configuration, new JobID()));
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    // compatible for hadoop 2.2.0, the temporary output directory is different from hadoop 1.2.1
    this.configuration.set("mapreduce.task.output.dir", this.fileOutputCommitter.getWorkPath().toString());

    try {
        this.recordWriter = this.mapreduceOutputFormat.getRecordWriter(this.context);
    } catch (InterruptedException e) {
        throw new IOException("Could not create RecordWriter.", e);
    }
}

From source file:io.druid.indexer.updater.HadoopConverterJob.java

License:Apache License

public static Path getTaskPath(JobID jobID, TaskAttemptID taskAttemptID, Path workingDirectory) {
    return new Path(getJobPath(jobID, workingDirectory), taskAttemptID.toString());
}

From source file:it.crs4.pydoop.mapreduce.pipes.TestPipeApplication.java

License:Apache License

/**
 * test PipesMapRunner    test the transfer data from reader
 *
 * @throws Exception//from w  ww .  j a v a2  s .c o m
 */
@Test
public void testRunner() throws Exception {
    // clean old password files
    File[] psw = cleanTokenPasswordFile();
    try {
        JobID jobId = new JobID("201408272347", 0);
        TaskID taskId = new TaskID(jobId, TaskType.MAP, 0);
        TaskAttemptID taskAttemptid = new TaskAttemptID(taskId, 0);

        Job job = new Job(new Configuration());
        job.setJobID(jobId);
        Configuration conf = job.getConfiguration();
        conf.set(Submitter.IS_JAVA_RR, "true");
        conf.set(MRJobConfig.TASK_ATTEMPT_ID, taskAttemptid.toString());
        job.setInputFormatClass(DummyInputFormat.class);
        FileSystem fs = new RawLocalFileSystem();
        fs.setConf(conf);

        DummyInputFormat input_format = new DummyInputFormat();
        List<InputSplit> isplits = input_format.getSplits(job);

        InputSplit isplit = isplits.get(0);

        TaskAttemptContextImpl tcontext = new TaskAttemptContextImpl(conf, taskAttemptid);

        RecordReader<FloatWritable, NullWritable> rReader = input_format.createRecordReader(isplit, tcontext);

        TestMapContext context = new TestMapContext(conf, taskAttemptid, rReader, null, null, null, isplit);
        // stub for client
        File fCommand = getFileCommand("it.crs4.pydoop.mapreduce.pipes.PipeApplicationRunnableStub");
        conf.set(MRJobConfig.CACHE_LOCALFILES, fCommand.getAbsolutePath());
        // token for authorization
        Token<AMRMTokenIdentifier> token = new Token<AMRMTokenIdentifier>("user".getBytes(),
                "password".getBytes(), new Text("kind"), new Text("service"));
        TokenCache.setJobToken(token, job.getCredentials());
        conf.setBoolean(MRJobConfig.SKIP_RECORDS, true);
        PipesMapper<FloatWritable, NullWritable, IntWritable, Text> mapper = new PipesMapper<FloatWritable, NullWritable, IntWritable, Text>(
                context);

        initStdOut(conf);
        mapper.run(context);
        String stdOut = readStdOut(conf);

        // test part of translated data. As common file for client and test -
        // clients stdOut
        // check version
        assertTrue(stdOut.contains("CURRENT_PROTOCOL_VERSION:0"));
        // check key and value classes
        assertTrue(stdOut.contains("Key class:org.apache.hadoop.io.FloatWritable"));
        assertTrue(stdOut.contains("Value class:org.apache.hadoop.io.NullWritable"));
        // test have sent all data from reader
        assertTrue(stdOut.contains("value:0.0"));
        assertTrue(stdOut.contains("value:9.0"));

    } finally {
        if (psw != null) {
            // remove password files
            for (File file : psw) {
                file.deleteOnExit();
            }
        }
    }
}