Example usage for org.apache.hadoop.mapred InputFormat interface-usage

List of usage examples for org.apache.hadoop.mapred InputFormat interface-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred InputFormat interface-usage.

Usage

From source file cascading.dbmigrate.hadoop.DBInputFormat.java

public class DBInputFormat implements InputFormat<LongWritable, TupleWrapper> {

    private static final Logger LOG = LoggerFactory.getLogger(DBInputFormat.class);

    public static class DBRecordReader implements RecordReader<LongWritable, TupleWrapper> {

From source file cascading.hbase.helper.TableInputFormatBase.java

/**
 * A Base for {@link TableInputFormat}s. Receives a {@link HTable}, a
 * byte[] of input columns and optionally a {@link Filter}.
 * Subclasses may use other TableRecordReader implementations.
 * <p>
 * An example of a subclass:

From source file cascading.jdbc.db.DBInputFormat.java

/**
 * A InputFormat that reads input data from an SQL table.
 * <p/>
 * DBInputFormat emits LongWritables containing the record number as
 * key and DBWritables as value.
 * <p/>

From source file cascading.tap.hadoop.io.MultiInputFormat.java

/**
 * Class MultiInputFormat accepts multiple InputFormat class declarations allowing a single MR job
 * to read data from incompatible file types.
 */
public class MultiInputFormat implements InputFormat {
    /** Field LOG */

From source file cascading.tap.hadoop.MultiInputFormat.java

/**
 * Class MultiInputFormat accepts multiple InputFormat class declarations allowing a single MR job
 * to read data from incompatible file types.
 */
public class MultiInputFormat implements InputFormat {
    /** Field LOG */

From source file cascalog.TupleMemoryInputFormat.java

public class TupleMemoryInputFormat implements InputFormat<TupleWrapper, NullWritable> {

    public static final String TUPLES_PROPERTY = "memory.format.tuples";

    public static class TupleInputSplit implements InputSplit {
        public int numTuples;

From source file co.cask.cdap.hive.datasets.DatasetInputFormat.java

/**
 * Map reduce input format to read from datasets that implement RecordScannable.
 */
public class DatasetInputFormat implements InputFormat<Void, ObjectWritable> {
    private static final Gson GSON = new Gson();

From source file co.cask.cdap.hive.stream.HiveStreamInputFormat.java

/**
 * Stream input format for use in hive queries and only hive queries. Will not work outside of hive.
 */
public class HiveStreamInputFormat implements InputFormat<Void, ObjectWritable> {
    private static final Logger LOG = LoggerFactory.getLogger(HiveStreamInputFormat.class);

From source file com.aerospike.hadoop.mapreduce.AerospikeInputFormat.java

/**
 * An {@link InputFormat} for data stored in an Aerospike database.
 */
public class AerospikeInputFormat extends InputFormat<AerospikeKey, AerospikeRecord>
        implements org.apache.hadoop.mapred.InputFormat<AerospikeKey, AerospikeRecord> {

From source file com.aliyun.openservices.tablestore.hive.TableStoreInputFormat.java

public class TableStoreInputFormat implements InputFormat<PrimaryKeyWritable, RowWritable> {
    private static Logger logger = LoggerFactory.getLogger(TableStoreInputFormat.class);

    @Override
    public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
        Configuration dest = translateConfig(job);