Example usage for org.apache.hadoop.mapreduce RecordReader subclass-usage

List of usage examples for org.apache.hadoop.mapreduce RecordReader subclass-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce RecordReader subclass-usage.

Usage

From source file diamondmapreduce.NLineRecordReader.java

public class NLineRecordReader extends RecordReader<LongWritable, Text> {

    //    private final int NLINESTOPROCESS = 45000;
    private final int NLINESTOPROCESS = 10000;
    private LineReader in;
    private LongWritable key;

From source file dk.aau.cs.cloudetl.io.SequenceIndexFileReader.java

/** An {@link RecordReader} for {@link SequenceFile}s. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class SequenceIndexFileReader<K, V> extends RecordReader<K, V> {
    private SequenceFile.Reader in;
    private long start;

From source file edu.american.student.redis.hadoop.RedisBigTableRecordReader.java

public class RedisBigTableRecordReader extends RecordReader<RedisBigTableKey, Text> {
    private Iterator<Entry<RedisBigTableKey, byte[]>> keyValueMapIter = null;
    private Entry<RedisBigTableKey, byte[]> currentEntry = null;
    private RedisBigTableKey key = null;
    private Text value = new Text();
    private float totalKVs = 0;

From source file edu.clu.cs.TaxiTripRecordReader.java

public class TaxiTripRecordReader extends RecordReader<LongWritable, TripDataTuple> {

    private LineRecordReader m_lineReader = new LineRecordReader();
    private TripDataTuple m_lineValue = new TripDataTuple();
    private SimpleDateFormat m_formatter = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");

From source file edu.gslis.streamcorpus.ThriftRecordReader.java

/**
 * Combinable record reader for thrift files. Since the thrift/chunk
 * files used in the KBA streamcorpus tend to be smaller than the 
 * default HDFS block size, we use the CombineFileSplot paradigm
 * to combine multiple compressed thrift files and reduce the number
 * of mappers required for job execution.

From source file edu.indiana.d2i.htrc.io.dataapi.IDRecorderReader.java

public class IDRecorderReader extends RecordReader<Text, Text> {
    private static final Log logger = LogFactory.getLog(IDRecorderReader.class);

    private Configuration conf = null;
    private int maxIdRetrieved = 0;
    private String delimitor = "";

From source file edu.indiana.d2i.htrc.io.index.lucene.LuceneRecordReader.java

public class LuceneRecordReader extends RecordReader<Text, VectorWritable> {

    private static final Log logger = LogFactory.getLog(LuceneRecordReader.class);

    private IDInputSplit split = null;
    private Iterator<String> iditerator = null;

From source file edu.indiana.d2i.htrc.io.index.solr.SolrRecordReader.java

public class SolrRecordReader extends RecordReader<Text, VectorWritable> {

    private static final Log logger = LogFactory.getLog(SolrRecordReader.class);

    private IDInputSplit split = null;
    private Iterator<String> iditerator = null;

From source file edu.iu.common.FileRecordReader.java

public class FileRecordReader extends RecordReader<String, String> {
    private Path path;
    private boolean done = false;

    @Override
    public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {

From source file edu.iu.common.MultiFileRecordReader.java

public class MultiFileRecordReader extends RecordReader<String, String> {
    private List<Path> pathList;
    private int progress;

    @Override
    public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException {