List of usage examples for org.apache.hadoop.mapreduce RecordReader subclass-usage
From source file com.tomslabs.grid.avro.AvroRecordReader.java
public class AvroRecordReader<T> extends RecordReader<T, Object> { private FsInput in; private DataFileReader<T> reader; private T datum = null; private long start;
From source file com.toshiba.mwcloud.gs.hadoop.mapreduce.GSRowRecordReader.java
/**
* <div lang="ja">
* GridDB?Row??GridDBRecordReader??
* </div><div lang="en">
* GridDB RecordReader class using GridDB Row object.
* </div>
From source file com.tuplejump.calliope.hadoop.ColumnFamilyRecordReader.java
public class ColumnFamilyRecordReader extends RecordReader<ByteBuffer, SortedMap<ByteBuffer, Column>> implements org.apache.hadoop.mapred.RecordReader<ByteBuffer, SortedMap<ByteBuffer, Column>> { private static final Logger logger = LoggerFactory.getLogger(ColumnFamilyRecordReader.class); public static final int CASSANDRA_HADOOP_MAX_KEY_SIZE_DEFAULT = 8192;
From source file com.tuplejump.calliope.hadoop.cql3.CqlPagingRecordReader.java
/**
* Hadoop RecordReader read the values return from the CQL query
* It use CQL key range query to page through the wide rows.
* <p/>
* Return List<IColumn> as keys columns
* <p/>
From source file com.tuplejump.calliope.hadoop.cql3.CqlRecordReader.java
/**
* CqlRecordReader reads the rows return from the CQL query
* It uses CQL auto-paging.
* <p/>
* Return a Long as a local CQL row key starts from 0;
* <p/>
From source file com.twitter.distributedlog.mapreduce.LogSegmentReader.java
/** * Record Reader to read from a log segment split */ class LogSegmentReader extends RecordReader<DLSN, LogRecordWithDLSN> { final String streamName;
From source file com.twitter.elephanttwin.retrieval.FilterRecordReader.java
/** * A "wrap" RecorderReader to filter out <K,V> pairs which don't pass the filter * conditions. * */ public class FilterRecordReader<K, V> extends RecordReader<K, V> {
From source file com.twitter.elephanttwin.retrieval.IndexedFilterRecordReader.java
/** * IndexedFilterRecordReader works on a FileSplit or IndexedFileSplit. * In the latter case, it goes through a chain of (merged) subblocks, * uses FilterRecordReader to do the actual scan and filtering. */ public class IndexedFilterRecordReader<K, V> extends RecordReader<K, V> {
From source file com.willetinc.hadoop.mapreduce.dynamodb.DynamoDBRecordReader.java
public abstract class DynamoDBRecordReader<T extends DynamoDBKeyWritable> extends RecordReader<LongWritable, T> { private Class<T> valueClass; private DynamoDBScanInputFormat.DynamoDBInputSplit split;
From source file com.wipro.ats.bdre.datagen.mr.RangeRecordReader.java
/** * A record reader that will generate a range of numbers. */ public class RangeRecordReader extends RecordReader<LongWritable, NullWritable> { long startRow; long finishedRows;