Example usage for org.apache.hadoop.mapreduce TaskID TaskID

List of usage examples for org.apache.hadoop.mapreduce TaskID TaskID

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce TaskID TaskID.

Prototype

@Deprecated
public TaskID(JobID jobId, boolean isMap, int id) 

Source Link

Document

Constructs a TaskID object from given JobID .

Usage

From source file:com.asakusafw.runtime.compatibility.hadoop1.JobCompatibilityHadoop1.java

License:Apache License

@Override
public TaskID newMapTaskId(JobID jobId, int id) {
    if (jobId == null) {
        throw new IllegalArgumentException("jobId must not be null"); //$NON-NLS-1$
    }//ww w .j a  va2 s  .  c  o  m
    return new TaskID(jobId, true, id);
}

From source file:com.asakusafw.runtime.compatibility.hadoop1.JobCompatibilityHadoop1.java

License:Apache License

@Override
public TaskID newReduceTaskId(JobID jobId, int id) {
    if (jobId == null) {
        throw new IllegalArgumentException("jobId must not be null"); //$NON-NLS-1$
    }// ww w . ja  v  a2 s . c o  m
    return new TaskID(jobId, false, id);
}

From source file:com.asakusafw.runtime.compatibility.hadoop2.JobCompatibilityHadoop2.java

License:Apache License

private static TaskID newTaskIdMr1(JobID jobId, boolean isMap, int id) {
    // NOTE: for Hadoop 2.x MR1
    @SuppressWarnings("deprecation")
    TaskID result = new TaskID(jobId, isMap, id);
    return result;
}

From source file:com.asakusafw.testdriver.file.FileDeployer.java

License:Apache License

/**
 * Opens output for the specified {@link OutputFormat}.
 * @param <V> value type//from  www  .java 2s. com
 * @param definition target model definition
 * @param destination output location
 * @param output format
 * @return the opened {@link ModelOutput}
 * @throws IOException if failed to open the target output
 * @throws IllegalArgumentException if some parameters were {@code null}
 */
public <V> ModelOutput<V> openOutput(DataModelDefinition<V> definition, final String destination,
        FileOutputFormat<? super NullWritable, ? super V> output) throws IOException {
    assert destination != null;
    assert output != null;
    LOG.debug("Opening {} using {}", destination, output.getClass().getName());
    Job job = Job.getInstance(configuration);
    job.setOutputKeyClass(NullWritable.class);
    job.setOutputValueClass(definition.getModelClass());
    final File temporaryDir = File.createTempFile("asakusa", ".tempdir");
    if (temporaryDir.delete() == false || temporaryDir.mkdirs() == false) {
        throw new IOException("Failed to create temporary directory");
    }
    LOG.debug("Using staging deploy target: {}", temporaryDir);
    URI uri = temporaryDir.toURI();
    FileOutputFormat.setOutputPath(job, new Path(uri));
    TaskAttemptContext context = new TaskAttemptContextImpl(job.getConfiguration(),
            new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0));
    FileOutputFormatDriver<V> result = new FileOutputFormatDriver<V>(context, output, NullWritable.get()) {
        @Override
        public void close() throws IOException {
            super.close();
            deploy(destination, temporaryDir);
        }
    };
    return result;
}

From source file:com.asakusafw.testdriver.file.FileExporterRetriever.java

License:Apache License

@Override
public <V> DataModelSource createSource(DataModelDefinition<V> definition, FileExporterDescription description,
        TestContext context) throws IOException {
    LOG.info("??????: {}", description);
    VariableTable variables = createVariables(context);
    checkType(definition, description);//from www  . j  a  v a  2  s. com
    Configuration conf = configurations.newInstance();
    Job job = Job.getInstance(conf);
    String resolved = variables.parse(description.getPathPrefix(), false);
    FileInputFormat.setInputPaths(job, new Path(resolved));
    TaskAttemptContext taskContext = new TaskAttemptContextImpl(job.getConfiguration(),
            new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0));
    FileInputFormat<?, V> format = getOpposite(conf, description.getOutputFormat());
    FileInputFormatDriver<V> result = new FileInputFormatDriver<>(definition, taskContext, format);
    return result;
}

From source file:com.google.appengine.tools.mapreduce.MapReduceServletTest.java

License:Apache License

/**
 * Test that handleController has reasonable behavior when there are still
 * active workers./*from  www.j av  a 2s .  c o  m*/
 *
 * @throws EntityNotFoundException
 */
public void testHandleController_withContinue() throws EntityNotFoundException {
    JobID jobId = new JobID("foo", 1);
    HttpServletRequest request = createMockControllerRequest(0, jobId);
    replay(request);

    Configuration sampleConf = getSampleMapReduceConfiguration();

    persistMRState(jobId, sampleConf);

    ShardState shardState1 = ShardState.generateInitializedShardState(ds,
            new TaskAttemptID(new TaskID(jobId, true, 1), 1));
    Counters counters1 = new Counters();
    counters1.findCounter("a", "z").increment(1);
    shardState1.setCounters(counters1);
    shardState1.setInputSplit(sampleConf, new StubInputSplit(1));
    shardState1.setRecordReader(sampleConf, new StubRecordReader());
    shardState1.persist();

    ShardState shardState2 = ShardState.generateInitializedShardState(ds,
            new TaskAttemptID(new TaskID(jobId, true, 2), 1));
    Counters counters2 = new Counters();
    counters2.findCounter("a", "z").increment(1);
    shardState2.setCounters(counters2);
    shardState2.setInputSplit(sampleConf, new StubInputSplit(2));
    shardState2.setRecordReader(sampleConf, new StubRecordReader());
    shardState2.setDone();
    shardState2.persist();

    // doPost should call handleCallback()
    // resp is never used
    servlet.doPost(request, null);

    MapReduceState mrState = MapReduceState.getMapReduceStateFromJobID(ds, jobId);

    // Check result of aggregateState()
    assertEquals(2, mrState.getCounters().findCounter("a", "z").getValue());

    // Check the result of refillQuota()
    // Should fill the active thread but not the done one.
    assertEquals(1000, new QuotaManager(MemcacheServiceFactory.getMemcacheService())
            .get("" + shardState1.getTaskAttemptID()));
    assertEquals(0, new QuotaManager(MemcacheServiceFactory.getMemcacheService())
            .get("" + shardState2.getTaskAttemptID()));

    // Check that the next controller task got enqueued.
    QueueStateInfo defaultQueue = getDefaultQueueInfo();
    assertEquals(1, defaultQueue.getCountTasks());
    TaskStateInfo firstTask = defaultQueue.getTaskInfo().get(0);
    assertEquals("/mapreduce/" + MapReduceServlet.CONTROLLER_PATH, firstTask.getUrl());
    assertTrue(firstTask.getBody(), firstTask.getBody().indexOf("jobID=job_foo_0001") != -1);

    assertEquals(1, mrState.getActiveShardCount());
    assertEquals(2, mrState.getShardCount());

    verify(request);
}

From source file:com.inmobi.conduit.distcp.tools.mapred.lib.DynamicInputFormat.java

License:Apache License

private List<InputSplit> createSplits(JobContext jobContext, List<DynamicInputChunk> chunks)
        throws IOException {
    int numMaps = getNumMapTasks(HadoopCompat.getConfiguration(jobContext));

    final int nSplits = Math.min(numMaps, chunks.size());
    List<InputSplit> splits = new ArrayList<InputSplit>(nSplits);

    for (int i = 0; i < nSplits; ++i) {
        TaskID taskId = new TaskID(HadoopCompat.getJobId(jobContext), true, i);
        chunks.get(i).assignTo(taskId);//from  ww  w  .  j  av a  2  s .co  m
        splits.add(new FileSplit(chunks.get(i).getPath(), 0, 0, null));
    }
    DistCpUtils.publish(HadoopCompat.getConfiguration(jobContext), CONF_LABEL_NUM_SPLITS, splits.size());
    return splits;
}

From source file:com.netflix.bdp.s3.TestS3MultipartOutputCommitter.java

License:Apache License

private static Set<String> runTasks(JobContext job, int numTasks, int numFiles) throws IOException {
    Set<String> uploads = Sets.newHashSet();

    for (int taskId = 0; taskId < numTasks; taskId += 1) {
        TaskAttemptID attemptID = new TaskAttemptID(new TaskID(JOB_ID, TaskType.REDUCE, taskId),
                (taskId * 37) % numTasks);
        TaskAttemptContext attempt = new TaskAttemptContextImpl(new Configuration(job.getConfiguration()),
                attemptID);/* ww  w . j a  v  a 2s  .  com*/
        MockedS3Committer taskCommitter = new MockedS3Committer(S3_OUTPUT_PATH, attempt);
        commitTask(taskCommitter, attempt, numFiles);
        uploads.addAll(taskCommitter.results.getUploads());
    }

    return uploads;
}

From source file:com.scaleoutsoftware.soss.hserver.hadoop.HadoopVersionSpecificCode_1x.java

License:Apache License

@Override
public TaskAttemptID createTaskAttemptId(JobID jobID, boolean isMapper, int hadoopPartition) {
    return new TaskAttemptID(new TaskID(jobID, isMapper, hadoopPartition), 0);
}

From source file:com.twitter.hraven.hadoopJobMonitor.AppStatusCheckerTest.java

License:Apache License

public AppStatusCheckerTest() throws ConfigurationAccessException, RestException, SAXException, IOException,
        ParserConfigurationException, YarnException {
    appId = new MyApplicationId();
    appId.setId(oldJobId.getId());//www . j  a  v a  2 s . c  om
    appId.setClusterTimestamp(Long.parseLong(oldJobId.getJtIdentifier()));

    taskId = new TaskID(oldJobId, TaskType.MAP, 0);
    taskAttemptId = new TaskAttemptID(taskId, 0);

    vConf.setFloat(HadoopJobMonitorConfiguration.TASK_PROGRESS_THRESHOLD, 0.2f);
    vConf.getInt(HadoopJobMonitorConfiguration.MAX_CACHED_TASK_PROGRESSES, 10);
    vConf.getInt(HadoopJobMonitorConfiguration.MAX_CACHED_APP_CONFS, 10);
    AppConfCache.init(vConf);
    ProgressCache.init(vConf);
    HadoopJobMonitorMetrics.initSingleton(vConf);
    taskProgressCache = ProgressCache.getTaskProgressCache();
    attemptProgressCache = ProgressCache.getAttemptProgressCache();

    when(clientCache.getClient(any(JobID.class))).thenReturn(clientService);
    appReport = mock(ApplicationReport.class);
    when(appReport.getApplicationId()).thenReturn(appId);
    appStatusChecker = new AppStatusChecker(vConf, appReport, clientCache, rm, new AppCheckerProgress() {
        @Override
        public void finished() {
        }
    });

    mockStatic(RestClient.class);
    restClient = mock(RestClient.class);
    when(RestClient.getInstance()).thenReturn(restClient);
}