Example usage for org.apache.hadoop.mapreduce RecordReader subclass-usage

List of usage examples for org.apache.hadoop.mapreduce RecordReader subclass-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce RecordReader subclass-usage.

Usage

From source file org.apache.cassandra.hadoop.cql3.CqlRecordReader.java

/**
 * <p>
 * CqlRecordReader reads the rows return from the CQL query
 * It uses CQL auto-paging.
 * </p>
 * <p>

From source file org.apache.cassandra.hadoop2.ColumnFamilyRecordReader.java

public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap<ByteBuffer, Column>>
        implements org.apache.hadoop.mapred.RecordReader<ByteBuffer, SortedMap<ByteBuffer, Column>> {

    private static final Logger logger = LoggerFactory.getLogger(ColumnFamilyRecordReader.class);

    public static final int CASSANDRA_HADOOP_MAX_KEY_SIZE_DEFAULT = 8192;

From source file org.apache.cassandra.hadoop2.cql3.CqlPagingRecordReader.java

/**
 * Hadoop RecordReader read the values return from the CQL query It use CQL key
 * range query to page through the wide rows.
 * <p/>
 * Return List<IColumn> as keys columns
 * <p/>

From source file org.apache.cassandra.hadoop2.multiquery.MultiQueryRecordReader.java

/**
 * Hadoop RecordReader to read values returned from a CQL query for processing in Hadoop.
 *
 * This class leverages the DataStax Cassandra Java driver's automatic-paging capabilities to
 * simplify its code.
 *

From source file org.apache.crunch.impl.mr.run.CrunchRecordReader.java

class CrunchRecordReader<K, V> extends RecordReader<K, V> {

    private RecordReader<K, V> curReader;
    private CrunchInputSplit crunchSplit;
    private CombineFileSplit combineFileSplit;
    private TaskAttemptContext context;

From source file org.apache.crunch.io.text.csv.CSVRecordReader.java

/**
 * An extension of {@link RecordReader} used to intelligently read CSV files
 */
public class CSVRecordReader extends RecordReader<LongWritable, Text> {
    private static final Logger LOGGER = LoggerFactory.getLogger(CSVRecordReader.class);
    private long start;

From source file org.apache.crunch.kafka.inputformat.KafkaRecordReader.java

/**
 * A {@link RecordReader} for pulling data from Kafka.
 * @param <K> the key of the records from Kafka
 * @param <V> the value of the records from Kafka
 */
public class KafkaRecordReader<K, V> extends RecordReader<K, V> {

From source file org.apache.crunch.kafka.record.KafkaRecordReader.java

/**
 * A {@link RecordReader} for pulling data from Kafka.
 *
 * @param <K> the key of the records from Kafka
 * @param <V> the value of the records from Kafka
 */

From source file org.apache.crunch.types.avro.AvroRecordReader.java

/** An {@link RecordReader} for Avro data files. */
class AvroRecordReader<T> extends RecordReader<AvroWrapper<T>, NullWritable> {

    private FileReader<T> reader;
    private long start;
    private long end;

From source file org.apache.distributedlog.mapreduce.LogSegmentReader.java

/**
 * Record Reader to read from a log segment split
 */
class LogSegmentReader extends RecordReader<DLSN, LogRecordWithDLSN> {

    final String streamName;