Example usage for org.apache.hadoop.mapreduce RecordReader subclass-usage

List of usage examples for org.apache.hadoop.mapreduce RecordReader subclass-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce RecordReader subclass-usage.

Usage

From source file org.apache.avro.mapreduce.AvroRecordReaderBase.java

/**
 * Abstract base class for <code>RecordReader</code>s that read Avro container files.
 *
 * @param <K> The type of key the record reader should generate.
 * @param <V> The type of value the record reader should generate.
 * @param <T> The type of the entries within the Avro container file being read.

From source file org.apache.ben.FileCleaningRecordReader.java

/**
 * Treats keys as offset in file and value as line. 
 */

public class FileCleaningRecordReader extends RecordReader<LongWritable, Text> {
    private static final Log LOG = LogFactory.getLog(FileCleaningRecordReader.class);

From source file org.apache.blur.mapreduce.lib.BlurRecordReader.java

public abstract class BlurRecordReader extends RecordReader<Text, BlurRecord> {

    //  private IndexReader reader;
    //  private Directory directory;
    //  private int startingDocId;
    //  private int endingDocId;

From source file org.apache.carbondata.hadoop.AbstractRecordReader.java

/**
 * This class will have all the common methods for vector and row based reader
 */
public abstract class AbstractRecordReader<T> extends RecordReader<Void, T> {

    protected int rowCount = 0;

From source file org.apache.carbondata.hadoop.CarbonRecordReader.java

/**
 * Reads the data from Carbon store.
 */
public class CarbonRecordReader<T> extends RecordReader<Void, T> {

    private QueryModel queryModel;

From source file org.apache.carbondata.hadoop.stream.StreamRecordReader.java

/**
 * Stream row record reader
 */
public class StreamRecordReader extends RecordReader<Void, Object> {

    // metadata

From source file org.apache.carbondata.spark.vectorreader.VectorizedCarbonRecordReader.java

/**
 * A specialized RecordReader that reads into InternalRows or ColumnarBatches directly using the
 * carbondata column APIs and fills the data directly into columns.
 */
class VectorizedCarbonRecordReader extends RecordReader<Void, Object> {

From source file org.apache.carbondata.streaming.CarbonStreamRecordReader.java

/**
 * Stream record reader
 */
public class CarbonStreamRecordReader extends RecordReader<Void, Object> {
    // vector reader
    private boolean isVectorReader;

From source file org.apache.cassandra.hadoop.ColumnFamilyRecordReader.java

@Deprecated
public class ColumnFamilyRecordReader
        extends RecordReader<ByteBuffer, SortedMap<ByteBuffer, ColumnFamilyRecordReader.Column>> implements
        org.apache.hadoop.mapred.RecordReader<ByteBuffer, SortedMap<ByteBuffer, ColumnFamilyRecordReader.Column>> {
    private static final Logger logger = LoggerFactory.getLogger(ColumnFamilyRecordReader.class);

From source file org.apache.cassandra.hadoop.cql3.CqlPagingRecordReader.java

/**
 * Hadoop RecordReader read the values return from the CQL query
 * It use CQL key range query to page through the wide rows.
 * <p/>
 * Return List<IColumn> as keys columns
 * <p/>