Example usage for org.apache.hadoop.mapreduce.task TaskAttemptContextImpl TaskAttemptContextImpl

List of usage examples for org.apache.hadoop.mapreduce.task TaskAttemptContextImpl TaskAttemptContextImpl

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce.task TaskAttemptContextImpl TaskAttemptContextImpl.

Prototype

public TaskAttemptContextImpl(Configuration conf, TaskAttemptID taskId) 

Source Link

Usage

From source file:org.apache.rya.accumulo.mr.RdfFileInputFormatTest.java

License:Apache License

void init(String filename) throws IOException, InterruptedException {
    conf = job.getConfiguration();/*from   w  w w .jav  a  2  s .c o m*/
    File inputFile = new File(filename);
    Path inputPath = new Path(inputFile.getAbsoluteFile().toURI());
    InputSplit split = new FileSplit(inputPath, 0, inputFile.length(), null);
    TaskAttemptContext context = new TaskAttemptContextImpl(conf, new TaskAttemptID());
    reader = (RdfFileInputFormat.RdfFileRecordReader) new RdfFileInputFormat().createRecordReader(split,
            context);
    reader.initialize(split, context);
}

From source file:org.apache.rya.accumulo.mr.RyaInputFormatTest.java

License:Apache License

@Test
public void testInputFormat() throws Exception {

    RyaStatement input = RyaStatement.builder().setSubject(new RyaURI("http://www.google.com"))
            .setPredicate(new RyaURI("http://some_other_uri")).setObject(new RyaURI("http://www.yahoo.com"))
            .setColumnVisibility(new byte[0]).setValue(new byte[0]).build();

    apiImpl.add(input);/*from  w ww .j av a2  s  .co m*/

    Job jobConf = Job.getInstance();

    RyaInputFormat.setMockInstance(jobConf, instance.getInstanceName());
    RyaInputFormat.setConnectorInfo(jobConf, username, password);
    RyaInputFormat.setTableLayout(jobConf, TABLE_LAYOUT.SPO);

    AccumuloInputFormat.setInputTableName(jobConf, table);
    AccumuloInputFormat.setInputTableName(jobConf, table);
    AccumuloInputFormat.setScanIsolation(jobConf, false);
    AccumuloInputFormat.setLocalIterators(jobConf, false);
    AccumuloInputFormat.setOfflineTableScan(jobConf, false);

    RyaInputFormat inputFormat = new RyaInputFormat();

    JobContext context = new JobContextImpl(jobConf.getConfiguration(), jobConf.getJobID());

    List<InputSplit> splits = inputFormat.getSplits(context);

    Assert.assertEquals(1, splits.size());

    TaskAttemptContext taskAttemptContext = new TaskAttemptContextImpl(context.getConfiguration(),
            new TaskAttemptID(new TaskID(), 1));

    RecordReader<Text, RyaStatementWritable> reader = inputFormat.createRecordReader(splits.get(0),
            taskAttemptContext);

    RyaStatementRecordReader ryaStatementRecordReader = (RyaStatementRecordReader) reader;
    ryaStatementRecordReader.initialize(splits.get(0), taskAttemptContext);

    List<RyaStatement> results = new ArrayList<RyaStatement>();
    while (ryaStatementRecordReader.nextKeyValue()) {
        RyaStatementWritable writable = ryaStatementRecordReader.getCurrentValue();
        RyaStatement value = writable.getRyaStatement();
        Text text = ryaStatementRecordReader.getCurrentKey();
        RyaStatement stmt = RyaStatement.builder().setSubject(value.getSubject())
                .setPredicate(value.getPredicate()).setObject(value.getObject()).setContext(value.getContext())
                .setQualifier(value.getQualifer()).setColumnVisibility(value.getColumnVisibility())
                .setValue(value.getValue()).build();
        results.add(stmt);

        System.out.println(text);
        System.out.println(value);
    }

    Assert.assertTrue(results.size() == 2);
    Assert.assertTrue(results.contains(input));
}

From source file:org.apache.tajo.storage.hbase.HFileAppender.java

License:Apache License

@Override
public void init() throws IOException {
    super.init();

    Configuration taskConf = new Configuration();
    Path stagingResultDir = new Path(stagingDir, TajoConstants.RESULT_DIR_NAME);
    taskConf.set(FileOutputFormat.OUTDIR, stagingResultDir.toString());

    ExecutionBlockId ebId = taskAttemptId.getTaskId().getExecutionBlockId();
    writerContext = new TaskAttemptContextImpl(taskConf, new TaskAttemptID(ebId.getQueryId().toString(),
            ebId.getId(), TaskType.MAP, taskAttemptId.getTaskId().getId(), taskAttemptId.getId()));

    HFileOutputFormat2 hFileOutputFormat2 = new HFileOutputFormat2();
    try {/*  ww  w  .  j av  a2 s.co m*/
        writer = hFileOutputFormat2.getRecordWriter(writerContext);

        committer = new FileOutputCommitter(FileOutputFormat.getOutputPath(writerContext), writerContext);
        workingFilePath = committer.getWorkPath();
    } catch (InterruptedException e) {
        throw new IOException(e.getMessage(), e);
    }

    LOG.info("Created hbase file writer: " + workingFilePath);
}

From source file:org.apache.tez.mapreduce.committer.MROutputCommitter.java

License:Apache License

@SuppressWarnings("rawtypes")
private org.apache.hadoop.mapreduce.OutputCommitter getOutputCommitter(OutputCommitterContext context) {

    org.apache.hadoop.mapreduce.OutputCommitter committer = null;
    newApiCommitter = false;/*from w  w  w .  j a  va2 s .  c  om*/
    if (jobConf.getBoolean("mapred.reducer.new-api", false)
            || jobConf.getBoolean("mapred.mapper.new-api", false)) {
        newApiCommitter = true;
        LOG.info("Using mapred newApiCommitter.");
    }

    if (newApiCommitter) {
        TaskAttemptID taskAttemptID = new TaskAttemptID(
                Long.toString(context.getApplicationId().getClusterTimestamp()),
                context.getApplicationId().getId(),
                ((jobConf.getBoolean(MRConfig.IS_MAP_PROCESSOR, false) ? TaskType.MAP : TaskType.REDUCE)), 0,
                context.getDAGAttemptNumber());

        TaskAttemptContext taskContext = new TaskAttemptContextImpl(jobConf, taskAttemptID);
        try {
            OutputFormat outputFormat = ReflectionUtils.newInstance(taskContext.getOutputFormatClass(),
                    jobConf);
            committer = outputFormat.getOutputCommitter(taskContext);
        } catch (Exception e) {
            throw new TezUncheckedException(e);
        }
    } else {
        committer = ReflectionUtils.newInstance(jobConf.getClass("mapred.output.committer.class",
                FileOutputCommitter.class, org.apache.hadoop.mapred.OutputCommitter.class), jobConf);
    }
    LOG.info("OutputCommitter for outputName=" + context.getOutputName() + ", vertexName="
            + context.getVertexName() + ", outputCommitterClass=" + committer.getClass().getName());
    return committer;
}

From source file:org.apache.tez.mapreduce.committer.MROutputCommitter.java

License:Apache License

@Override
public void recoverTask(int taskIndex, int attemptId) throws IOException {
    if (!initialized) {
        throw new RuntimeException("Committer not initialized");
    }//from ww  w.ja v  a2 s .  c om
    TaskAttemptID taskAttemptID = new TaskAttemptID(
            Long.toString(getContext().getApplicationId().getClusterTimestamp())
                    + String.valueOf(getContext().getVertexIndex()),
            getContext().getApplicationId().getId(),
            ((jobConf.getBoolean(MRConfig.IS_MAP_PROCESSOR, false) ? TaskType.MAP : TaskType.REDUCE)),
            taskIndex, attemptId);
    TaskAttemptContext taskContext = new TaskAttemptContextImpl(jobConf, taskAttemptID);
    committer.recoverTask(taskContext);
}

From source file:org.apache.tinkerpop.gremlin.hadoop.structure.io.AbstractIoRegistryCheck.java

License:Apache License

private void validateIoRegistryGraph(final HadoopGraph graph,
        final Class<? extends GraphComputer> graphComputerClass,
        final RecordWriter<NullWritable, VertexWritable> writer) throws Exception {

    for (int i = 0; i < NUMBER_OF_VERTICES; i++) {
        final StarGraph starGraph = StarGraph.open();
        Vertex vertex = starGraph.addVertex(T.label, "place", T.id, i, "point", new ToyPoint(i, i * 10),
                "message", "I'm " + i, "triangle", new ToyTriangle(i, i * 10, i * 100));
        vertex.addEdge("connection", starGraph.addVertex(T.id, i > 0 ? i - 1 : NUMBER_OF_VERTICES - 1));
        writer.write(NullWritable.get(), new VertexWritable(starGraph.getStarVertex()));
    }//from w  w w .  j  a  v a 2 s  .  com
    writer.close(new TaskAttemptContextImpl(ConfUtil.makeHadoopConfiguration(graph.configuration()),
            new TaskAttemptID()));

    // OLAP TESTING //
    validatePointTriangles(graph.traversal().withComputer(graphComputerClass).V().project("point", "triangle")
            .by("point").by("triangle").toList());
    validatePointTriangles(graph.traversal().withComputer(graphComputerClass).V().out()
            .project("point", "triangle").by("point").by("triangle").toList());
    validatePointTriangles(graph.traversal().withComputer(graphComputerClass).V().out().out()
            .project("point", "triangle").by("point").by("triangle").toList());
    // OLTP TESTING //
    validatePointTriangles(
            graph.traversal().V().project("point", "triangle").by("point").by("triangle").toList());
    // HDFS TESTING //
    /*validatePointTriangles(IteratorUtils.<Map<String, Object>>asList(IteratorUtils.<Vertex, Map<String, Object>>map(FileSystemStorage.open(ConfUtil.makeHadoopConfiguration(graph.configuration())).head(graph.configuration().getInputLocation(), graph.configuration().getGraphReader()),
        vertex -> {
            return new HashMap<String, Object>() {{
                put("point", vertex.value("point"));
                put("triangle", vertex.value("triangle"));
            }};
        })));*/
}

From source file:org.apache.tinkerpop.gremlin.hadoop.structure.io.HadoopElementIterator.java

License:Apache License

public HadoopElementIterator(final HadoopGraph graph) {
    try {//  w  w  w.j  a v a2 s. c o m
        this.graph = graph;
        final Configuration configuration = ConfUtil.makeHadoopConfiguration(this.graph.configuration());
        final InputFormat<NullWritable, VertexWritable> inputFormat = ConfUtil
                .getReaderAsInputFormat(configuration);
        if (inputFormat instanceof FileInputFormat) {
            final Storage storage = FileSystemStorage.open(configuration);
            if (!this.graph.configuration().containsKey(Constants.GREMLIN_HADOOP_INPUT_LOCATION))
                return; // there is no input location and thus, no data (empty graph)
            if (!Constants.getSearchGraphLocation(this.graph.configuration().getInputLocation(), storage)
                    .isPresent())
                return; // there is no data at the input location (empty graph)
            configuration.set(Constants.MAPREDUCE_INPUT_FILEINPUTFORMAT_INPUTDIR, Constants
                    .getSearchGraphLocation(this.graph.configuration().getInputLocation(), storage).get());
        }
        final List<InputSplit> splits = inputFormat
                .getSplits(new JobContextImpl(configuration, new JobID(UUID.randomUUID().toString(), 1)));
        for (final InputSplit split : splits) {
            this.readers.add(inputFormat.createRecordReader(split,
                    new TaskAttemptContextImpl(configuration, new TaskAttemptID())));
        }
    } catch (final Exception e) {
        throw new IllegalStateException(e.getMessage(), e);
    }
}

From source file:org.apache.tinkerpop.gremlin.hadoop.structure.io.RecordReaderWriterTest.java

License:Apache License

private static void validateFileSplits(final List<FileSplit> fileSplits, final Configuration configuration,
        final Class<? extends InputFormat<NullWritable, VertexWritable>> inputFormatClass,
        final Optional<Class<? extends OutputFormat<NullWritable, VertexWritable>>> outFormatClass)
        throws Exception {

    final InputFormat inputFormat = ReflectionUtils.newInstance(inputFormatClass, configuration);
    final TaskAttemptContext job = new TaskAttemptContextImpl(configuration,
            new TaskAttemptID(UUID.randomUUID().toString(), 0, TaskType.MAP, 0, 0));

    int vertexCount = 0;
    int outEdgeCount = 0;
    int inEdgeCount = 0;

    final OutputFormat<NullWritable, VertexWritable> outputFormat = outFormatClass.isPresent()
            ? ReflectionUtils.newInstance(outFormatClass.get(), configuration)
            : null;/*from  w ww .j  a v  a  2  s. c  o m*/
    final RecordWriter<NullWritable, VertexWritable> writer = null == outputFormat ? null
            : outputFormat.getRecordWriter(job);

    boolean foundKeyValue = false;
    for (final FileSplit split : fileSplits) {
        logger.info("\treading file split {}", split.getPath().getName() + " ({}",
                split.getStart() + "..." + (split.getStart() + split.getLength()), "{} {} bytes)");
        final RecordReader reader = inputFormat.createRecordReader(split, job);

        float lastProgress = -1f;
        while (reader.nextKeyValue()) {
            //System.out.println("" + reader.getProgress() + "> " + reader.getCurrentKey() + ": " + reader.getCurrentValue());
            final float progress = reader.getProgress();
            assertTrue(progress >= lastProgress);
            assertEquals(NullWritable.class, reader.getCurrentKey().getClass());
            final VertexWritable vertexWritable = (VertexWritable) reader.getCurrentValue();
            if (null != writer)
                writer.write(NullWritable.get(), vertexWritable);
            vertexCount++;
            outEdgeCount = outEdgeCount + (int) IteratorUtils.count(vertexWritable.get().edges(Direction.OUT));
            inEdgeCount = inEdgeCount + (int) IteratorUtils.count(vertexWritable.get().edges(Direction.IN));
            //
            final Vertex vertex = vertexWritable.get();
            assertEquals(Integer.class, vertex.id().getClass());
            if (vertex.value("name").equals("SUGAR MAGNOLIA")) {
                foundKeyValue = true;
                assertEquals(92, IteratorUtils.count(vertex.edges(Direction.OUT)));
                assertEquals(77, IteratorUtils.count(vertex.edges(Direction.IN)));
            }
            lastProgress = progress;
        }
    }

    assertEquals(8049, outEdgeCount);
    assertEquals(8049, inEdgeCount);
    assertEquals(outEdgeCount, inEdgeCount);
    assertEquals(808, vertexCount);
    assertTrue(foundKeyValue);

    if (null != writer) {
        writer.close(new TaskAttemptContextImpl(configuration, job.getTaskAttemptID()));
        for (int i = 1; i < 10; i++) {
            final File outputDirectory = new File(
                    new URL(configuration.get("mapreduce.output.fileoutputformat.outputdir")).toURI());
            final List<FileSplit> splits = generateFileSplits(
                    new File(outputDirectory.getAbsoluteFile() + "/_temporary/0/_temporary/"
                            + job.getTaskAttemptID().getTaskID().toString().replace("task", "attempt") + "_0"
                            + "/part-m-00000"),
                    i);
            validateFileSplits(splits, configuration, inputFormatClass, Optional.empty());
        }
    }
}

From source file:org.janusgraph.hadoop.compat.h2.Hadoop2Compat.java

License:Apache License

@Override
public TaskAttemptContext newTask(Configuration c, TaskAttemptID t) {
    return new TaskAttemptContextImpl(c, t);
}

From source file:org.kiji.mapreduce.output.TestKijiHFileOutputFormat.java

License:Apache License

@Test
public void testMaxHFileSizeSameRow() throws Exception {
    final HFileKeyValue entry1 = entry("row-key", mDefaultLGId, "a", 1L, makeBytes(0, 1024));
    final HFileKeyValue entry2 = entry("row-key", mDefaultLGId, "b", 1L, makeBytes(0, 1024));

    mConf.setInt(KijiHFileOutputFormat.CONF_HREGION_MAX_FILESIZE, entry1.getLength() + 1);

    final TaskAttemptID taskAttemptId = new TaskAttemptID("jobTracker:jtPort", 314, TaskType.MAP, 159, 2);
    final TaskAttemptContext context = new TaskAttemptContextImpl(mConf, taskAttemptId);
    final Path outputDir = mFormat.getDefaultWorkFile(context, KijiHFileOutputFormat.OUTPUT_EXTENSION);
    final FileSystem fs = outputDir.getFileSystem(mConf);

    final RecordWriter<HFileKeyValue, NullWritable> writer = mFormat.getRecordWriter(context);
    writer.write(entry1, NW);//from  w w  w . j  a  v a  2 s  .co  m
    writer.write(entry2, NW);
    writer.close(context);

    final Path defaultDir = new Path(outputDir, mDefaultLGId.toString());
    assertTrue(fs.exists(defaultDir));

    final Path inMemoryDir = new Path(outputDir, mInMemoryLGId.toString());
    assertTrue(!fs.exists(inMemoryDir));

    assertHFileContent(new Path(defaultDir, "00000"), entry1.getKeyValue(), entry2.getKeyValue());
    assertFalse(fs.exists(new Path(defaultDir, "00001")));

    mFormat.getOutputCommitter(context).commitTask(context);
}