Example usage for org.apache.hadoop.mapreduce RecordReader subclass-usage

List of usage examples for org.apache.hadoop.mapreduce RecordReader subclass-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce RecordReader subclass-usage.

Usage

From source file org.apache.druid.indexer.hadoop.DatasourceRecordReader.java

public class DatasourceRecordReader extends RecordReader<NullWritable, InputRow> {
    private static final Logger logger = new Logger(DatasourceRecordReader.class);

    private DatasourceIngestionSpec spec;
    private IngestSegmentFirehose firehose;

From source file org.apache.giraph.bsp.BspRecordReader.java

/**
 * Only returns a single key-value pair so that the map() can run.
 */
class BspRecordReader extends RecordReader<Text, Text> {
    /** Singular key object */
    private static final Text ONLY_KEY = new Text("only key");

From source file org.apache.gora.mapreduce.GoraRecordReader.java

/**
 * An adapter for Result to Hadoop RecordReader.
 */
public class GoraRecordReader<K, T extends PersistentBase> extends RecordReader<K, T> {
    public static final Logger LOG = LoggerFactory.getLogger(GoraRecordReader.class);

From source file org.apache.hcatalog.mapreduce.HCatRecordReader.java

/** The HCat wrapper for the underlying RecordReader,
 * this ensures that the initialize on
 * the underlying record reader is done with the underlying split,
 * not with HCatSplit.
 */
class HCatRecordReader extends RecordReader<WritableComparable, HCatRecord> {

From source file org.apache.hcatalog.rcfile.RCFileMapReduceRecordReader.java

public class RCFileMapReduceRecordReader<K extends LongWritable, V extends BytesRefArrayWritable>
        extends RecordReader<LongWritable, BytesRefArrayWritable> {

    private Reader in;
    private long start;
    private long end;

From source file org.apache.hcatalog.templeton.tool.NullRecordReader.java

/**
 * An empty record reader.
 */
public class NullRecordReader extends RecordReader<NullWritable, NullWritable> {
    @Override
    public void initialize(InputSplit genericSplit, TaskAttemptContext context) throws IOException {

From source file org.apache.hive.hcatalog.mapreduce.HCatRecordReader.java

/** The HCat wrapper for the underlying RecordReader,
 * this ensures that the initialize on
 * the underlying record reader is done with the underlying split,
 * not with HCatSplit.
 */
class HCatRecordReader extends RecordReader<WritableComparable, HCatRecord> {

From source file org.apache.hive.hcatalog.rcfile.RCFileMapReduceRecordReader.java

public class RCFileMapReduceRecordReader<K extends LongWritable, V extends BytesRefArrayWritable>
        extends RecordReader<LongWritable, BytesRefArrayWritable> {

    private Reader in;
    private long start;
    private long end;

From source file org.apache.hive.hcatalog.templeton.tool.NullRecordReader.java

/**
 * An empty record reader.
 */
public class NullRecordReader extends RecordReader<NullWritable, NullWritable> {
    @Override
    public void initialize(InputSplit genericSplit, TaskAttemptContext context) throws IOException {

From source file org.apache.jena.grande.mapreduce.io.QuadRecordReader.java

public class QuadRecordReader extends RecordReader<LongWritable, QuadWritable> {

    private static final Logger log = LoggerFactory.getLogger(QuadRecordReader.class);

    public static final String MAX_LINE_LENGTH = "mapreduce.input.linerecordreader.line.maxlength";