List of usage examples for org.apache.hadoop.mapred JobConfigurable interface-usage
From source file org.commoncrawl.hadoop.io.deprecated.JetS3tARCSource.java
/**
* An {@link ARCSource} for gzipped ARC files stored on Amazon S3 that uses <a
* href="http://jets3t.s3.amazonaws.com/index.html">JetS3t</a> to interact with
* S3.
*
* @author Albert Chern
From source file org.commoncrawl.hadoop.io.deprecated.LocalARCSource.java
/** * An {@link ARCSource} for local files. * * @author Albert Chern */ public class LocalARCSource extends ARCSplitCalculator implements ARCSource, JobConfigurable {
From source file org.commoncrawl.hadoop.io.JetS3tARCSource.java
/**
* An {@link ARCSource} for gzipped ARC files stored on Amazon S3 that uses <a
* href="http://jets3t.s3.amazonaws.com/index.html">JetS3t</a> to interact with
* S3.
*
* @author Albert Chern
From source file org.commoncrawl.hadoop.io.LocalARCSource.java
/** * An {@link ARCSource} for local files. * * @author Albert Chern */ public class LocalARCSource extends ARCSplitCalculator implements ARCSource, JobConfigurable {
From source file org.dkpro.bigdata.io.hadoop.Text2CASInputFormat.java
/**
* Input format for generating CAS instances from <Text, Text> key/value pairs
*
* By default, the value of the <Text, Text> key/value lines in the input files
* is used as CAS document text. See {@link setDocumentTextExtractorClass} to
* change this behavior.
From source file org.example.mapred.PiInputFormat.java
/**
* NLineInputFormat which splits N lines of input as one split.
*
* In many "pleasantly" parallel applications, each process/mapper
* processes the same input file (s), but with computations are
* controlled by different parameters.(Referred to as "parameter sweeps").
From source file org.hypertable.hadoop.hive.HiveHTInputFormat.java
/** * HiveHTInputFormat implements InputFormat for Hypertable storage handler * tables, decorating an underlying Hypertable RowInputFormat with extra Hive logic * such as column pruning. */ public class HiveHTInputFormat<K extends BytesWritable, V extends Row>
From source file org.hypertable.hadoop.mapred.RowInputFormat.java
public class RowInputFormat implements org.apache.hadoop.mapred.InputFormat<BytesWritable, Row>, JobConfigurable { final Log LOG = LogFactory.getLog(InputFormat.class); public static final String NAMESPACE = "hypertable.mapreduce.input.namespace"; public static final String TABLE = "hypertable.mapreduce.input.table";
From source file org.hypertable.hadoop.mapred.TextTableInputFormat.java
public class TextTableInputFormat implements org.apache.hadoop.mapred.InputFormat<Text, Text>, JobConfigurable { final Log LOG = LogFactory.getLog(InputFormat.class); public static final String NAMESPACE = "hypertable.mapreduce.namespace"; public static final String INPUT_NAMESPACE = "hypertable.mapreduce.input.namespace";
From source file org.lwes.hadoop.io.JournalInputFormat.java
/** An {@link InputFormat} for plain text files. Files are broken into lines. * Either linefeed or carriage-return are used to signal end of line. Keys are * the position in the file, and values are the line of text.. */ public class JournalInputFormat extends FileInputFormat<LongWritable, EventWritable> implements JobConfigurable { JobConf conf;