Example usage for org.apache.hadoop.mapreduce RecordReader subclass-usage

List of usage examples for org.apache.hadoop.mapreduce RecordReader subclass-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce RecordReader subclass-usage.

Usage

From source file org.seqdoop.hadoop_bam.BCFRecordReader.java

/** See {@link VCFRecordReader} for the meaning of the key. */
public class BCFRecordReader extends RecordReader<LongWritable, VariantContextWritable> {
    private final LongWritable key = new LongWritable();
    private final VariantContextWritable vc = new VariantContextWritable();

    private BCF2Codec codec = new BCF2Codec();

From source file org.seqdoop.hadoop_bam.cli.plugins.chipster.Summarize.java

final class SummarizeRecordReader extends RecordReader<LongWritable, Range> {

    private final RecordReader<LongWritable, SAMRecordWritable> baseRR;

    private final LongWritable key = new LongWritable();
    private final List<Range> ranges = new ArrayList<Range>();

From source file org.seqdoop.hadoop_bam.cli.plugins.chipster.SummarySort.java

final class SortRecordReader extends RecordReader<LongWritable, Text> {

    private final LongWritable key = new LongWritable();

    private final BlockCompressedLineRecordReader lineRR = new BlockCompressedLineRecordReader();

From source file org.seqdoop.hadoop_bam.cli.plugins.Sort.java

final class SortRecordReader extends RecordReader<LongWritable, SAMRecordWritable> {
    private final RecordReader<LongWritable, SAMRecordWritable> baseRR;

    private Configuration conf;

    public SortRecordReader(RecordReader<LongWritable, SAMRecordWritable> rr) {

From source file org.seqdoop.hadoop_bam.SAMRecordReader.java

/** See {@link BAMRecordReader} for the meaning of the key. */
public class SAMRecordReader extends RecordReader<LongWritable, SAMRecordWritable> {
    private LongWritable key = new LongWritable();
    private SAMRecordWritable record = new SAMRecordWritable();

    private FSDataInputStream input;

From source file org.seqdoop.hadoop_bam.VCFRecordReader.java

/** The key is the bitwise OR of the chromosome index in the upper 32 bits
 * and the 0-based leftmost coordinate in the lower.
 *
 * The chromosome index is based on the ordering of the contig lines in the VCF
 * header. If a chromosome name that cannot be found in the contig lines is
 * used, that name is instead hashed to form the upper part of the key.

From source file org.shaf.core.io.hadoop.WholeFileRecordReader.java

/**
 * The {@code RecordReader} for the whole files. The file name becomes a record
 * key and the file body becomes a record value.
 * 
 * @author Mykola Galushka
 */

From source file org.slc.sli.aggregation.mapreduce.io.MongoIdRecordReader.java

/**
 * MongoAggReader
 *
 */
public class MongoIdRecordReader extends RecordReader<EmittableKey, BSONWritable> {

From source file org.utils.TarballReader.java

/**
 * TarballReader.
 *
 * Outputs for file included in a tarball a key/value pair where the key is
 * the file name appended with date and time (.DYYMMDD.THHMMSS) and the value
 * is the content of the file.

From source file org.utils.UnsplittableFileReader.java

/**
 * UnsplittableFileReader.
 *
 * Outputs for a file a key/value pair where the key is the file name and line number,
 * and the value is the content of the current line.
 *