Example usage for org.apache.hadoop.mapred RecordReader interface-usage

List of usage examples for org.apache.hadoop.mapred RecordReader interface-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred RecordReader interface-usage.

Usage

From source file com.thinkbiganalytics.inputformat.hadoop.mapred.OmnitureDataFileRecordReader.java

/**
 * EscapedLineReader gets around Omniture's pesky escaped tabs and newlines.
 * For more information about format, please refer to Omniture Documentation at
 * https://marketing.adobe.com/resources/help/en_US/sc/clickstream/analytics_clickstream.pdf.
 */
public class OmnitureDataFileRecordReader implements RecordReader<LongWritable, Text> {

From source file com.tikal.fuseday.bigdata.FastaRecordReader.java

/**
 * @author Jared Flatow
 * A FASTA record has a header line that is the key, and data lines that are the value
 * >header...
 * data
 * ...

From source file com.tomslabs.grid.avro.AvroTextRecordReader.java

public class AvroTextRecordReader<T> implements RecordReader<Text, Text> {

    private FileReader<T> reader;
    private long start;
    private long end;

From source file com.toshiba.mwcloud.gs.hadoop.mapred.GSRowRecordReader.java

/**
 * <div lang="ja">
 * GridDB?Row??GridDBRecordReader??
 * </div><div lang="en">
 * GridDB RecordReader class using GridDB Row object.
 * </div>

From source file com.tuplejump.calliope.hadoop.ColumnFamilyRecordReader.java

public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap<ByteBuffer, Column>>
        implements org.apache.hadoop.mapred.RecordReader<ByteBuffer, SortedMap<ByteBuffer, Column>> {
    private static final Logger logger = LoggerFactory.getLogger(ColumnFamilyRecordReader.class);

    public static final int CASSANDRA_HADOOP_MAX_KEY_SIZE_DEFAULT = 8192;

From source file com.tuplejump.calliope.hadoop.cql3.CqlPagingRecordReader.java

/**
 * Hadoop RecordReader read the values return from the CQL query
 * It use CQL key range query to page through the wide rows.
 * <p/>
 * Return List<IColumn> as keys columns
 * <p/>

From source file com.tuplejump.calliope.hadoop.cql3.CqlRecordReader.java

/**
 * CqlRecordReader reads the rows return from the CQL query
 * It uses CQL auto-paging.
 * <p/>
 * Return a Long as a local CQL row key starts from 0;
 * <p/>

From source file com.uber.hoodie.hadoop.realtime.HoodieRealtimeRecordReader.java

/**
 * Realtime Record Reader which can do compacted (merge-on-read) record reading or
 * unmerged reading (parquet and log files read in parallel) based on job configuration.
 */
public class HoodieRealtimeRecordReader implements RecordReader<NullWritable, ArrayWritable> {

From source file com.uber.hoodie.hadoop.realtime.RealtimeCompactedRecordReader.java

class RealtimeCompactedRecordReader extends AbstractRealtimeRecordReader
        implements RecordReader<NullWritable, ArrayWritable> {

    protected final RecordReader<NullWritable, ArrayWritable> parquetReader;
    private final Map<String, HoodieRecord<? extends HoodieRecordPayload>> deltaRecordMap;

From source file com.uber.hoodie.hadoop.realtime.RealtimeUnmergedRecordReader.java

class RealtimeUnmergedRecordReader extends AbstractRealtimeRecordReader
        implements RecordReader<NullWritable, ArrayWritable> {

    // Log Record unmerged scanner
    private final HoodieUnMergedLogRecordScanner logRecordScanner;