Example usage for org.apache.hadoop.mapreduce RecordReader subclass-usage

List of usage examples for org.apache.hadoop.mapreduce RecordReader subclass-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce RecordReader subclass-usage.

Usage

From source file co.cask.hydrator.plugin.batch.source.XMLRecordReader.java

/**
 * XMLRecordReader class to read through a given xml document and to output xml blocks as per node path specified.
 */
public class XMLRecordReader extends RecordReader<LongWritable, Map<String, String>> {
    private static final Logger LOG = LoggerFactory.getLogger(XMLRecordReader.class);

From source file co.cask.hydrator.plugin.batchSource.KafkaRecordReader.java

/**
 * Kafka Record Reader to be used by {@link KafkaInputFormat}.
 */
public class KafkaRecordReader extends RecordReader<KafkaKey, KafkaMessage> {

    private KafkaSplit split;

From source file co.nubetech.apache.hadoop.DBRecordReader.java

/**
 * A RecordReader that reads records from a SQL table. Emits LongWritables
 * containing the record number as key and DBWritables as value.
 */
@InterfaceAudience.Public
@InterfaceStability.Evolving

From source file co.nubetech.hiho.dedup.DelimitedLineRecordReader.java

public class DelimitedLineRecordReader extends RecordReader<Text, Text> {

    final static Logger logger = Logger.getLogger(co.nubetech.hiho.dedup.DelimitedLineRecordReader.class);

    private String delimiter;
    private int column;

From source file co.nubetech.hiho.mapreduce.lib.db.apache.DBRecordReader.java

/**
 * A RecordReader that reads records from a SQL table.
 * Emits LongWritables containing the record number as 
 * key and DBWritables as value.  
 */
@InterfaceAudience.Public

From source file co.nubetech.hiho.mapreduce.lib.input.FileStreamRecordReader.java

public class FileStreamRecordReader extends RecordReader<Text, FSDataInputStream> {
    private FileSplit split;
    private TaskAttemptContext context;
    private FSDataInputStream stream;
    private boolean isRead = false;
    private String fileName;

From source file com.aerospike.hadoop.mapreduce.AerospikeRecordReader.java

public class AerospikeRecordReader extends RecordReader<AerospikeKey, AerospikeRecord>
        implements org.apache.hadoop.mapred.RecordReader<AerospikeKey, AerospikeRecord> {

    private class KeyRecPair {
        public AerospikeKey key;
        public AerospikeRecord rec;

From source file com.alectenharmsel.research.WholeBlockRecordReader.java

public class WholeBlockRecordReader extends RecordReader<Text, Text> {
    private FileSplit fileSplit;
    private boolean processed = false;
    private Text currKey, currValue;
    private long start, fileLength;
    private int blockSize;

From source file com.alexholmes.hadooputils.combine.common.mapreduce.CommonCombineFileRecordReader.java

/**
 * A {@link RecordReader} that works with {@link CombineFileSplit}'s generated via
 * {@link org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat}.
 * <p/>
 * All this class really does is coordinate creation of
 * {@link RecordReader}'s for each split contained within the

From source file com.aliyun.openservices.tablestore.hadoop.TableStoreRecordReader.java

public class TableStoreRecordReader extends RecordReader<PrimaryKeyWritable, RowWritable> {
    private static final Logger logger = LoggerFactory.getLogger(TableStoreRecordReader.class);

    private SyncClient ots;
    private RangeRowQueryCriteria scan;
    private PrimaryKey currentKey;