List of usage examples for org.apache.hadoop.mapreduce.lib.input LineRecordReader LineRecordReader
public LineRecordReader()
From source file:FastqRecordReaderDouble.java
License:Open Source License
@Override public void initialize(final InputSplit inputSplit, final TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException { this.lrr = new LineRecordReader(); this.lrr.initialize(inputSplit, taskAttemptContext); }
From source file:DupleInputFormat.java
License:Apache License
@Override public RecordReader<LongWritable, Text> createRecordReader(InputSplit split, TaskAttemptContext context) { return new LineRecordReader(); }
From source file:be.uantwerpen.adrem.hadoop.util.SplitByKTextInputFormat.java
License:Apache License
@Override public RecordReader<LongWritable, Text> createRecordReader(InputSplit genericSplit, TaskAttemptContext context) throws IOException { context.setStatus(genericSplit.toString()); return new LineRecordReader(); }
From source file:bucket_sort.NLineInputFormat.java
License:Apache License
public RecordReader<LongWritable, Text> createRecordReader(InputSplit genericSplit, TaskAttemptContext context) throws IOException { context.setStatus(genericSplit.toString()); return new LineRecordReader(); }
From source file:com.github.jmabuin.blaspark.io.RowPerLineRecordReader.java
License:Open Source License
@Override public void initialize(final InputSplit inputSplit, final TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException { this.lrr = new LineRecordReader(); this.lrr.initialize(inputSplit, taskAttemptContext); }
From source file:com.inmobi.messaging.consumer.databus.mapreduce.DatabusRecordReader.java
License:Apache License
public DatabusRecordReader() { lineReader = new LineRecordReader(); }
From source file:com.mapred.EmplRecordReader.java
@Override public void initialize(InputSplit is, TaskAttemptContext tac) throws IOException, InterruptedException { FileSplit split = (FileSplit) is;/*from w w w. j a v a2 s.co m*/ position = 0L; end = split.getLength(); Configuration job = tac.getConfiguration(); final Path file = split.getPath(); FileSystem fs = file.getFileSystem(job); FSDataInputStream fileIn = fs.open(split.getPath()); in = new LineRecordReader(); in.initialize(is, tac); }
From source file:com.zinnia.nectar.util.hadoop.inputformat.FirstNLineInputFormat.java
License:Apache License
@Override public RecordReader<LongWritable, Text> createRecordReader(InputSplit inputSplit, TaskAttemptContext context) throws IOException, InterruptedException { // TODO Auto-generated method stub context.setStatus(inputSplit.toString()); return new LineRecordReader(); }
From source file:edu.umn.cs.spatialHadoop.operations.Head.java
License:Open Source License
/** * Reads a maximum of n lines from the given file. * @param fs//ww w . j av a2 s . co m * @param p * @param n * @return * @throws IOException */ public static String[] head(FileSystem fs, Path p, int n) throws IOException { String[] lines = new String[n]; FileStatus fstatus = fs.getFileStatus(p); TaskAttemptContext context = createDummyContext(); LineRecordReader lineReader = new LineRecordReader(); FileSplit split; if (p.getName().endsWith(".rtree")) { // R-tree indexed file FSDataInputStream in = fs.open(p); in.skip(8); // Skip R-tree signature int treeHeaderSize = RTree.skipHeader(in); in.close(); split = new FileSplit(p, treeHeaderSize + 8, fstatus.getLen() - treeHeaderSize - 8, new String[0]); } else { split = new FileSplit(p, 0, fstatus.getLen(), new String[0]); } lineReader.initialize(split, context); int numOfLines = 0; for (numOfLines = 0; numOfLines < lines.length && lineReader.nextKeyValue(); numOfLines++) { lines[numOfLines] = lineReader.getCurrentValue().toString(); } lineReader.close(); return lines; }
From source file:ir.ac.ut.snl.mrcd.MyRecordReader.java
public MyRecordReader(Configuration conf, FileSplit split) { lineRecordReader = new LineRecordReader(); lineReader = new LineRecordReader(); lineKey = lineReader.getCurrentKey(); lineValue = lineReader.getCurrentValue(); }