Example usage for org.apache.hadoop.mapreduce.lib.input FileSplit subclass-usage

List of usage examples for org.apache.hadoop.mapreduce.lib.input FileSplit subclass-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce.lib.input FileSplit subclass-usage.

Usage

From source file co.cask.cdap.data.stream.StreamInputSplit.java

/**
 * Represents a mapreduce InputSplit for stream.
 */
public final class StreamInputSplit extends FileSplit implements Writable {

    private Path indexPath;

From source file com.blm.orc.OrcNewSplit.java

/**
 * OrcFileSplit. Holds file meta info
 *
 */
public class OrcNewSplit extends FileSplit {
    private ReaderImpl.FileMetaInfo fileMetaInfo;

From source file com.marklogic.contentpump.utilities.DelimitedSplit.java

/**
 * FileSplit for DelimitedText
 */
public class DelimitedSplit extends FileSplit {
    private TextArrayWritable header;

From source file com.pivotal.hawq.mapreduce.ao.file.HAWQAOSplit.java

/**
 * A section of an input file. Returned by
 * HAWQAOInputFormat.getSplits(JobContext) and passed to
 * HAWQAOInputFormat.createRecordReader(InputSplit,TaskAttemptContext).
 */
public class HAWQAOSplit extends FileSplit {

From source file com.splicemachine.mrio.api.core.SMSplit.java

public class SMSplit extends FileSplit {
    protected TableSplit split;

    public SMSplit() throws IOException {
        super(FSUtils.getRootDir(HConfiguration.unwrapDelegate()), 0, 0, null);
        split = new TableSplit();

From source file com.twitter.elephanttwin.retrieval.IndexedFileSplit.java

/**
 * IndexedFileSplit contains a list of sub-blocks to be read by a single
 * Mapper. Thus it can reduce the number of mappers to be used for the job.
 */
public class IndexedFileSplit extends FileSplit {
    private Path file;

From source file edu.uci.ics.pregelix.api.io.BasicGenInputSplit.java

/**
 * This InputSplit will not give any ordering or location data. It is used
 * internally by BspInputFormat (which determines how many tasks to run the
 * application on). Users should not use this directly.
 */
public class BasicGenInputSplit extends FileSplit implements Writable, Serializable {

From source file org.apache.carbondata.core.load.BlockDetails.java

/**
 * blocks info
 * TODO Remove this class after removing of kettle.
 */
public class BlockDetails extends FileSplit implements Serializable {

From source file org.apache.carbondata.hadoop.CarbonInputSplit.java

/**
 * Carbon input split to allow distributed read of CarbonInputFormat.
 */
public class CarbonInputSplit extends FileSplit implements Serializable, Writable {

    private static final long serialVersionUID = 3520344046772190207L;

From source file org.apache.carbondata.processing.csvload.BlockDetails.java

/**
 * blocks info
 */
public class BlockDetails extends FileSplit implements Serializable {

    /**