Example usage for org.apache.hadoop.mapreduce InputSplit subclass-usage

List of usage examples for org.apache.hadoop.mapreduce InputSplit subclass-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce InputSplit subclass-usage.

Usage

From source file org.apache.carbondata.hadoop.internal.CarbonInputSplit.java

/**
 * Carbon input split can be different format, application should create the record reader
 * based on format type.
 */
public abstract class CarbonInputSplit extends InputSplit {

From source file org.apache.cassandra.hadoop.ColumnFamilySplit.java

public class ColumnFamilySplit extends InputSplit implements Writable, org.apache.hadoop.mapred.InputSplit {
    private String startToken;
    private String endToken;
    private long length;
    private String[] dataNodes;

From source file org.apache.cassandra.hadoop2.ColumnFamilySplit.java

public class ColumnFamilySplit extends InputSplit implements Writable, org.apache.hadoop.mapred.InputSplit {

    private String startToken;
    private String endToken;
    private long length;
    private String[] dataNodes;

From source file org.apache.cassandra.hadoop2.multiquery.MultiQueryInputSplit.java

class MultiQueryInputSplit extends InputSplit implements Writable {
    private List<TokenRange> tokenRanges;
    private List<String> hosts;

    // TODO: Is there a better answer here?
    private static final long SPLIT_LENGTH = 1L;

From source file org.apache.crunch.impl.mr.run.CrunchInputSplit.java

class CrunchInputSplit extends InputSplit implements Writable, Configurable {

    private InputSplit inputSplit;
    private int nodeIndex;
    private FormatBundle<? extends InputFormat<?, ?>> bundle;
    private Configuration conf;

From source file org.apache.crunch.kafka.inputformat.KafkaInputSplit.java

/**
 * InputSplit that represent retrieving data from a single {@link TopicPartition} between the specified start
 * and end offsets.
 */
public class KafkaInputSplit extends InputSplit implements Writable {

From source file org.apache.crunch.kafka.record.KafkaInputSplit.java

/**
 * InputSplit that represent retrieving data from a single {@link TopicPartition} between the specified start
 * and end offsets.
 */
public class KafkaInputSplit extends InputSplit implements Writable {

From source file org.apache.distributedlog.mapreduce.LogSegmentSplit.java

/**
 * A input split that reads from a log segment.
 */
public class LogSegmentSplit extends InputSplit implements Writable {

    private LogSegmentMetadata logSegmentMetadata;

From source file org.apache.druid.indexer.hadoop.DatasourceInputSplit.java

public class DatasourceInputSplit extends InputSplit implements Writable {
    private static final String[] EMPTY_STR_ARRAY = new String[0];

    private List<WindowedDataSegment> segments = null;
    private String[] locations = null;

From source file org.apache.giraph.bsp.BspInputSplit.java

/**
 * This InputSplit will not give any ordering or location data.
 * It is used internally by BspInputFormat (which determines
 * how many tasks to run the application on).  Users should not use this
 * directly.
 */