List of usage examples for org.apache.hadoop.mapreduce InputSplit subclass-usage
From source file edu.iu.common.MultiFileSplit.java
public class MultiFileSplit extends InputSplit implements Writable { private List<Path> files; private long length; private String[] hosts;
From source file edu.iu.fileformat.MultiFileSplit.java
public class MultiFileSplit extends InputSplit implements Writable { private List<Path> files; private long length; private String[] hosts;
From source file edu.uci.ics.hyracks.dataflow.hadoop.mapreduce.InputFileSplit.java
public class InputFileSplit extends InputSplit implements Writable { private Path file; private long start; private long length; private int blockId; private String[] hosts;
From source file fi.tkk.ics.hadoop.bam.FileVirtualSplit.java
/** Like a {@link org.apache.hadoop.mapreduce.lib.input.FileSplit}, but uses * BGZF virtual offsets to fit with {@link * net.sf.samtools.util.BlockCompressedInputStream}. */ public class FileVirtualSplit extends InputSplit implements Writable { private Path file;
From source file gr.ntua.h2rdf.inputFormat.MyInputSplit.java
/**
* <code>InputSplit</code> represents the data to be processed by an
* individual {@link Mapper}.
*
* <p>Typically, it presents a byte-oriented view on the input and is the
* responsibility of {@link RecordReader} of the job to process this and present
From source file input_format.MyInputSplit.java
/**
* <code>InputSplit</code> represents the data to be processed by an
* individual {@link Mapper}.
*
* <p>Typically, it presents a byte-oriented view on the input and is the
* responsibility of {@link RecordReader} of the job to process this and present
From source file io.amient.kafka.hadoop.io.KafkaInputSplit.java
public class KafkaInputSplit extends InputSplit implements Writable { private String brokerId; private String broker; private int partition; private String topic;
From source file io.druid.indexer.hadoop.DatasourceInputSplit.java
public class DatasourceInputSplit extends InputSplit implements Writable { private List<WindowedDataSegment> segments = null; //required for deserialization public DatasourceInputSplit() { }
From source file io.imply.druid.hadoop.DruidInputSplit.java
public class DruidInputSplit extends InputSplit implements Writable { private static final String[] EMPTY_LOCATIONS = new String[] {}; private WindowedDataSegment segment = null; public DruidInputSplit() {
From source file io.vitess.hadoop.VitessInputSplit.java
public class VitessInputSplit extends InputSplit implements Writable { private String[] locations; private SplitQueryResponse.Part split; public VitessInputSplit(SplitQueryResponse.Part split) {