Example usage for org.apache.hadoop.mapreduce TaskAttemptContext getConfiguration

List of usage examples for org.apache.hadoop.mapreduce TaskAttemptContext getConfiguration

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce TaskAttemptContext getConfiguration.

Prototype

public Configuration getConfiguration();

Source Link

Document

Return the configuration for the job.

Usage

From source file:edu.uci.ics.pregelix.api.io.generated.GeneratedVertexReader.java

License:Apache License

@Override
final public void initialize(InputSplit inputSplit, TaskAttemptContext context) throws IOException {
    configuration = context.getConfiguration();
    totalRecords = configuration.getLong(GeneratedVertexReader.READER_VERTICES,
            GeneratedVertexReader.DEFAULT_READER_VERTICES);
    reverseIdOrder = configuration.getBoolean(GeneratedVertexReader.REVERSE_ID_ORDER,
            GeneratedVertexReader.DEAFULT_REVERSE_ID_ORDER);
    this.inputSplit = (BasicGenInputSplit) inputSplit;
}

From source file:edu.uci.ics.pregelix.runtime.converter.ReadConverterFactory.java

License:Apache License

@SuppressWarnings("rawtypes")
@Override//  w w  w  .j a  v  a  2  s. c  o m
public IReadConverter getReadConverter(IHyracksTaskContext ctx, int partitionId) throws HyracksDataException {
    final Configuration conf = confFactory.createConfiguration();
    // Set context properly
    ContextFactory ctxFactory = new ContextFactory();
    TaskAttemptContext mapperContext = ctxFactory.createContext(conf, partitionId);
    mapperContext.getConfiguration().setClassLoader(ctx.getJobletContext().getClassLoader());
    conf.setClassLoader(ctx.getJobletContext().getClassLoader());
    IterationUtils.setJobContext(BspUtils.getJobId(conf), ctx, mapperContext);
    Vertex.taskContext = mapperContext;

    final Vertex vertex = BspUtils.createVertex(conf);
    vertex.setVertexContext(IterationUtils.getVertexContext(BspUtils.getJobId(conf), ctx));

    final VertexInputConverter inputConverter = BspUtils.createVertexInputConverter(conf);

    return new IReadConverter() {

        @Override
        public void open(ARecordType recordType) throws HyracksDataException {
            inputConverter.open(recordType);
        }

        @Override
        public void convert(ARecordVisitablePointable recordPointable, ArrayTupleBuilder outputTb)
                throws HyracksDataException {
            try {
                // Converts an input AsterixDB record into an vertex object.
                vertex.reset();
                inputConverter.convert(recordPointable, vertex);

                // Outputs a tuple of <vertexId, vertex>.
                outputTb.reset();
                WritableComparable vertexId = vertex.getVertexId();
                DataOutput dos = outputTb.getDataOutput();
                vertexId.write(dos);
                outputTb.addFieldEndOffset();
                vertex.write(dos);
                outputTb.addFieldEndOffset();
            } catch (Exception e) {
                throw new HyracksDataException(e);
            }
        }

        @Override
        public void close() throws HyracksDataException {
            inputConverter.close();
        }

    };
}

From source file:edu.uci.ics.pregelix.runtime.converter.WriteConverterFactory.java

License:Apache License

@SuppressWarnings("rawtypes")
@Override// www  . j a  va 2 s.  c  om
public IWriteConverter getFieldWriteConverter(IHyracksTaskContext ctx, int partitionId)
        throws HyracksDataException {
    final Configuration conf = confFactory.createConfiguration();
    // Set context properly
    ContextFactory ctxFactory = new ContextFactory();
    TaskAttemptContext mapperContext = ctxFactory.createContext(conf, partitionId);
    mapperContext.getConfiguration().setClassLoader(ctx.getJobletContext().getClassLoader());
    conf.setClassLoader(ctx.getJobletContext().getClassLoader());
    IterationUtils.setJobContext(BspUtils.getJobId(conf), ctx, mapperContext);
    Vertex.taskContext = mapperContext;

    final Vertex vertex = BspUtils.createVertex(conf);
    vertex.setVertexContext(IterationUtils.getVertexContext(BspUtils.getJobId(conf), ctx));

    final VertexOutputConverter outputConverter = BspUtils.createVertexOutputConverter(conf);
    final ResetableByteArrayInputStream inputStream = new ResetableByteArrayInputStream();
    final DataInput dataInput = new DataInputStream(inputStream);
    final RecordBuilder recordBuilder = new RecordBuilder();

    return new IWriteConverter() {

        @Override
        public void open(ARecordType recordType) throws HyracksDataException {
            recordBuilder.reset(recordType);
            outputConverter.open(recordType);
        }

        @Override
        public void convert(byte[] data, int start, int len, ArrayTupleBuilder outputTb)
                throws HyracksDataException {
            try {
                inputStream.setByteArray(data, start);
                vertex.readFields(dataInput);
                recordBuilder.init();
                outputTb.reset();
                outputConverter.convert(vertex.getVertexId(), outputTb.getDataOutput());
                outputTb.addFieldEndOffset();
                outputConverter.convert(vertex, recordBuilder);
                // By default, the record type tag is stored in AsterixDB.
                recordBuilder.write(outputTb.getDataOutput(), true);
                outputTb.addFieldEndOffset();
            } catch (Exception e) {
                throw new HyracksDataException(e);
            }
        }

        @Override
        public void close() throws HyracksDataException {
            outputConverter.close();
        }
    };
}

From source file:edu.umd.cloud9.collection.clue.ClueWarcInputFormat2.java

License:Open Source License

/**
 * Just return the record reader// w  w w  .j  a va  2s.c  o  m
 */
@Override
public RecordReader<LongWritable, WebDocument> createRecordReader(InputSplit split, TaskAttemptContext context)
        throws IOException {
    return new ClueWarcRecordReader(context.getConfiguration(), (FileSplit) split);
}

From source file:edu.umn.cs.spatialHadoop.mapred.TextOutputFormat3.java

License:Open Source License

@Override
public RecordWriter<K, V> getRecordWriter(TaskAttemptContext task) throws IOException, InterruptedException {
    Configuration conf = task.getConfiguration();
    boolean isCompressed = getCompressOutput(task);
    String keyValueSeparator = conf.get("mapred.textoutputformat.separator", "\t");
    if (!isCompressed) {
        Path file = getDefaultWorkFile(task, "");
        FileSystem fs = file.getFileSystem(conf);
        FSDataOutputStream fileOut = fs.create(file, task);
        return new LineRecordWriter<K, V>(fileOut, keyValueSeparator);
    } else {//from  ww  w  .jav a2  s.c  o  m
        Class<? extends CompressionCodec> codecClass = getOutputCompressorClass(task, GzipCodec.class);
        // create the named codec
        CompressionCodec codec = ReflectionUtils.newInstance(codecClass, conf);
        // build the filename including the extension
        Path file = getDefaultWorkFile(task, codec.getDefaultExtension());
        FileSystem fs = file.getFileSystem(conf);
        FSDataOutputStream fileOut = fs.create(file, task);
        return new LineRecordWriter<K, V>(new DataOutputStream(codec.createOutputStream(fileOut)),
                keyValueSeparator);
    }
}

From source file:edu.umn.cs.spatialHadoop.mapreduce.RTreeRecordReader3.java

License:Open Source License

@Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
    Configuration conf = context != null ? context.getConfiguration() : new Configuration();
    initialize(split, conf);/*from w w  w. j a  va 2 s . com*/
}

From source file:edu.umn.cs.spatialHadoop.mapreduce.SpatialInputFormat3.java

License:Open Source License

@Override
public RecordReader<K, Iterable<V>> createRecordReader(InputSplit split, TaskAttemptContext context)
        throws IOException, InterruptedException {
    Path path;//from w w w .  ja  v a 2  s . co m
    String extension;
    if (split instanceof FileSplit) {
        FileSplit fsplit = (FileSplit) split;
        extension = FileUtil.getExtensionWithoutCompression(path = fsplit.getPath());
    } else if (split instanceof CombineFileSplit) {
        CombineFileSplit csplit = (CombineFileSplit) split;
        extension = FileUtil.getExtensionWithoutCompression(path = csplit.getPath(0));
    } else {
        throw new RuntimeException("Cannot process plits of type " + split.getClass());
    }
    // If this extension is for a compression, skip it and take the previous
    // extension
    if (extension.equals("hdf")) {
        // HDF File. Create HDFRecordReader
        return (RecordReader) new HDFRecordReader();
    }
    if (extension.equals("rtree")) {
        // File is locally indexed as RTree
        return (RecordReader) new RTreeRecordReader3<V>();
    }
    // For backward compatibility, check if the file is RTree indexed from
    // its signature
    Configuration conf = context != null ? context.getConfiguration() : new Configuration();
    if (SpatialSite.isRTree(path.getFileSystem(conf), path)) {
        return (RecordReader) new RTreeRecordReader3<V>();
    }
    // Check if a custom record reader is configured with this extension
    Class<?> recordReaderClass = conf.getClass("SpatialInputFormat." + extension + ".recordreader",
            SpatialRecordReader3.class);
    try {
        return (RecordReader<K, Iterable<V>>) recordReaderClass.newInstance();
    } catch (InstantiationException e) {
    } catch (IllegalAccessException e) {
    }
    // Use the default SpatialRecordReader if none of the above worked
    return (RecordReader) new SpatialRecordReader3<V>();
}

From source file:edu.umn.cs.spatialHadoop.mapreduce.SpatialRecordReader3.java

License:Open Source License

@Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
    Configuration conf = context != null ? context.getConfiguration() : new Configuration();
    if (context != null && context instanceof MapContext)
        inputRecordsCounter = ((MapContext) context).getCounter(Task.Counter.MAP_INPUT_RECORDS);
    initialize(split, conf);//from w  w  w  .j  av a 2  s . co  m
}

From source file:edu.umn.cs.spatialHadoop.nasa.HDFRecordReader.java

License:Open Source License

@Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {
    Configuration conf = context.getConfiguration();
    initialize(split, conf);/*ww w .  j  a  va 2 s .c  om*/
}

From source file:edu.umn.cs.spatialHadoop.visualization.CanvasOutputFormat.java

License:Open Source License

@Override
public RecordWriter<Object, Canvas> getRecordWriter(TaskAttemptContext task)
        throws IOException, InterruptedException {
    Path file = getDefaultWorkFile(task, "");
    FileSystem fs = file.getFileSystem(task.getConfiguration());
    return new CanvasRecordWriter(fs, file, task);
}