Example usage for org.apache.hadoop.mapreduce TaskAttemptID getId

List of usage examples for org.apache.hadoop.mapreduce TaskAttemptID getId

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce TaskAttemptID getId.

Prototype

public int getId() 

Source Link

Document

returns the int which represents the identifier

Usage

From source file:com.netflix.bdp.s3.TestS3MultipartOutputCommitter.java

License:Apache License

private static Path writeOutputFile(TaskAttemptID id, Path dest, String content, long copies)
        throws IOException {
    String fileName = ((id.getTaskType() == TaskType.REDUCE) ? "r_" : "m_") + id.getTaskID().getId() + "_"
            + id.getId() + "_" + UUID.randomUUID().toString();
    Path outPath = new Path(dest, fileName);
    FileSystem fs = outPath.getFileSystem(getConfiguration());

    try (OutputStream out = fs.create(outPath)) {
        byte[] bytes = content.getBytes(StandardCharsets.UTF_8);
        for (int i = 0; i < copies; i += 1) {
            out.write(bytes);/*w w  w. jav a  2  s. c  o m*/
        }
    }

    return outPath;
}

From source file:io.druid.indexer.JobHelper.java

License:Apache License

public static DataSegment serializeOutIndex(final DataSegment segmentTemplate,
        final Configuration configuration, final Progressable progressable, final TaskAttemptID taskAttemptID,
        final File mergedBase, final Path segmentBasePath) throws IOException {
    final FileSystem outputFS = FileSystem.get(segmentBasePath.toUri(), configuration);
    final Path tmpPath = new Path(segmentBasePath, String.format("index.zip.%d", taskAttemptID.getId()));
    final AtomicLong size = new AtomicLong(0L);
    final DataPusher zipPusher = (DataPusher) RetryProxy.create(DataPusher.class, new DataPusher() {
        @Override/*  ww w.ja va  2  s  .co m*/
        public long push() throws IOException {
            try (OutputStream outputStream = outputFS.create(tmpPath, true, DEFAULT_FS_BUFFER_SIZE,
                    progressable)) {
                size.set(zipAndCopyDir(mergedBase, outputStream, progressable));
                outputStream.flush();
            } catch (IOException | RuntimeException exception) {
                log.error(exception, "Exception in retry loop");
                throw exception;
            }
            return -1;
        }
    }, RetryPolicies.exponentialBackoffRetry(NUM_RETRIES, SECONDS_BETWEEN_RETRIES, TimeUnit.SECONDS));
    zipPusher.push();
    log.info("Zipped %,d bytes to [%s]", size.get(), tmpPath.toUri());

    final Path finalIndexZipFilePath = new Path(segmentBasePath, "index.zip");
    final URI indexOutURI = finalIndexZipFilePath.toUri();
    final ImmutableMap<String, Object> loadSpec;
    // TODO: Make this a part of Pushers or Pullers
    switch (outputFS.getScheme()) {
    case "hdfs":
        loadSpec = ImmutableMap.<String, Object>of("type", "hdfs", "path", indexOutURI.toString());
        break;
    case "s3":
    case "s3n":
        loadSpec = ImmutableMap.<String, Object>of("type", "s3_zip", "bucket", indexOutURI.getHost(), "key",
                indexOutURI.getPath().substring(1) // remove the leading "/"
        );
        break;
    case "file":
        loadSpec = ImmutableMap.<String, Object>of("type", "local", "path", indexOutURI.getPath());
        break;
    default:
        throw new IAE("Unknown file system scheme [%s]", outputFS.getScheme());
    }
    final DataSegment finalSegment = segmentTemplate.withLoadSpec(loadSpec).withSize(size.get())
            .withBinaryVersion(SegmentUtils.getVersionFromDir(mergedBase));

    if (!renameIndexFiles(outputFS, tmpPath, finalIndexZipFilePath)) {
        throw new IOException(String.format("Unable to rename [%s] to [%s]", tmpPath.toUri().toString(),
                finalIndexZipFilePath.toUri().toString()));
    }
    writeSegmentDescriptor(outputFS, finalSegment, new Path(segmentBasePath, "descriptor.json"), progressable);
    return finalSegment;
}

From source file:org.apache.beam.sdk.io.hadoop.format.HDFSSynchronizationTest.java

License:Apache License

@Test
public void testTaskAttemptIdAcquire() {
    int tasksCount = 100;
    int taskId = 25;

    for (int i = 0; i < tasksCount; i++) {
        TaskAttemptID taskAttemptID = tested.acquireTaskAttemptIdLock(configuration, taskId);
        assertTrue(isFileExists(getTaskAttemptIdPath(taskId, taskAttemptID.getId())));
    }/* ww  w.  j  a  v a  2 s. c  om*/
}

From source file:org.apache.druid.indexer.JobHelper.java

License:Apache License

public static Path makeTmpPath(final Path basePath, final FileSystem fs, final DataSegment segmentTemplate,
        final TaskAttemptID taskAttemptID, DataSegmentPusher dataSegmentPusher) {
    return new Path(prependFSIfNullScheme(fs, basePath), StringUtils.format("./%s.%d",
            dataSegmentPusher.makeIndexPathName(segmentTemplate, JobHelper.INDEX_ZIP), taskAttemptID.getId()));
}

From source file:org.apache.hcatalog.pig.TestE2EScenarios.java

License:Apache License

private TaskAttemptContext createTaskAttemptContext(Configuration tconf) {
    Configuration conf = (tconf == null) ? (new Configuration()) : tconf;
    TaskAttemptID taskId = new TaskAttemptID();
    conf.setInt("mapred.task.partition", taskId.getId());
    conf.set("mapred.task.id", "attempt__0000_r_000000_" + taskId.getId());
    TaskAttemptContext rtaskContext = new TaskAttemptContext(conf, taskId);
    return rtaskContext;
}

From source file:org.apache.hive.hcatalog.pig.TestE2EScenarios.java

License:Apache License

private TaskAttemptContext createTaskAttemptContext(Configuration tconf) {
    Configuration conf = (tconf == null) ? (new Configuration()) : tconf;
    TaskAttemptID taskId = HCatMapRedUtil.createTaskAttemptID(new JobID("200908190029", 1), false, 1, 1);
    conf.setInt("mapred.task.partition", taskId.getId());
    conf.set("mapred.task.id", taskId.toString());
    TaskAttemptContext rtaskContext = HCatMapRedUtil.createTaskAttemptContext(conf, taskId);
    return rtaskContext;
}

From source file:org.apache.pig.backend.hadoop.executionengine.shims.HadoopShims.java

License:Apache License

/**
 * Fetch mode needs to explicitly set the task id which is otherwise done by Hadoop
 * @param conf// w  w  w .j  ava 2  s  . c om
 * @param taskAttemptID
 */
public static void setTaskAttemptId(Configuration conf, TaskAttemptID taskAttemptID) {
    conf.setInt(MRConfiguration.JOB_APPLICATION_ATTEMPT_ID, taskAttemptID.getId());
}