List of usage examples for org.apache.hadoop.mapreduce RecordReader subclass-usage
From source file input_format.HFileRecordReaderNoScan.java
public class HFileRecordReaderNoScan extends RecordReader<ImmutableBytesWritable, Text> { private ImmutableBytesWritable key = null; private Text value = null; private TableColumnSplit tsplit = null; private Reader reader = null;
From source file input_format.HFileRecordReaderScan.java
public class HFileRecordReaderScan extends RecordReader<ImmutableBytesWritable, Text> { private ResultScanner resultScanner = null; private Result result = null; private Iterator<KeyValue> list = null; private ImmutableBytesWritable key = null;
From source file input_format.MyLineRecordReader.java
/** * Treats keys as offset in file and value as line. */ public class MyLineRecordReader extends RecordReader<ImmutableBytesWritable, Text> { private static final Log LOG = LogFactory.getLog(MyLineRecordReader.class);
From source file InvertedIndex.NLineRecordReader.java
/** * Treats keys as offset in file and value as line. */ public class NLineRecordReader extends RecordReader<LongWritable, Text> { private static final Log LOG = LogFactory.getLog(NLineRecordReader.class);
From source file io.amient.kafka.hadoop.io.KafkaInputRecordReader.java
public class KafkaInputRecordReader extends RecordReader<MsgMetadataWritable, BytesWritable> { private static final Logger log = LoggerFactory.getLogger(KafkaInputRecordReader.class); private static final long LATEST_TIME = -1L; private static final long EARLIEST_TIME = -2L; final private static String CLIENT_ID = "kafka-hadoop-loader";
From source file io.covert.dns.collection.DnsRequestRecordReader.java
public class DnsRequestRecordReader extends RecordReader<Text, DnsRequest> { Pair<Text, DnsRequest> pair = null; LineRecordReader lineReader = new LineRecordReader(); Iterable<String> subdomains;
From source file io.druid.indexer.hadoop.DatasourceRecordReader.java
public class DatasourceRecordReader extends RecordReader<NullWritable, InputRow> { private static final Logger logger = new Logger(DatasourceRecordReader.class); private DatasourceIngestionSpec spec; private IngestSegmentFirehose firehose;
From source file io.fluo.webindex.data.util.WARCFileRecordReader.java
/**
* The WARC File Record Reader processes a single compressed input. The Record Reader returns a
* single WARC ArchiveReader that can contain numerous individual documents, each document handled
* in a single mapper.
*
* @author Stephen Merity (Smerity)
From source file io.imply.druid.hadoop.DruidRecordReader.java
public class DruidRecordReader extends RecordReader<NullWritable, InputRow> { private static final Logger log = new Logger(DruidRecordReader.class); private QueryableIndex queryableIndex = null; private IngestSegmentFirehose firehose = null; private File tmpDir = null;
From source file io.ssc.trackthetrackers.extraction.hadoop.io.ArcRecordReader.java
public class ArcRecordReader extends RecordReader<Text, ArcRecord> { private static final Logger log = LoggerFactory.getLogger(ArcRecordReader.class); private FSDataInputStream fsin; private GzipCompressorInputStream gzip;