Example usage for org.apache.hadoop.mapreduce OutputFormat subclass-usage

List of usage examples for org.apache.hadoop.mapreduce OutputFormat subclass-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce OutputFormat subclass-usage.

Usage

From source file org.apache.sqoop.job.mr.SqoopNullOutputFormat.java

/**
 * An output format for MapReduce job.
 */
public class SqoopNullOutputFormat extends OutputFormat<SqoopWritable, NullWritable> {

    public static final Logger LOG = Logger.getLogger(SqoopNullOutputFormat.class);

From source file org.apache.sqoop.job.mr.SqoopOutputFormat.java

/**
 * An output format for MapReduce job.
 */
public class SqoopOutputFormat extends OutputFormat<Text, NullWritable> {

    public static final Logger LOG = Logger.getLogger(SqoopOutputFormat.class);

From source file org.apache.sqoop.mapreduce.AsyncSqlOutputFormat.java

/**
 * Abstract OutputFormat class that allows the RecordWriter to buffer
 * up SQL commands which should be executed in a separate thread after
 * enough commands are created.
 *
 * This supports a configurable "spill threshold" at which

From source file org.apache.sqoop.mapreduce.db.DBOutputFormat.java

/**
 * A OutputFormat that sends the reduce output to a SQL table.
 * <p>
 * {@link DBOutputFormat} accepts &lt;key,value&gt; pairs, where
 * key has a type extending DBWritable. Returned {@link RecordWriter}
 * writes <b>only the key</b> to the database with a batch SQL query.

From source file org.apache.sqoop.mapreduce.DelegatingOutputFormat.java

/**
 * OutputFormat that produces a RecordReader which instantiates
 * a FieldMapProcessor which will process FieldMappable
 * output keys.
 *
 * <p>The output value is ignored.</p>

From source file org.apache.sqoop.mapreduce.SQLServerResilientExportOutputFormat.java

/**
 * Insert the emitted keys as records into a database table.
 * Insert failures are handled by the registered Failure Handler class which
 * allows for recovering from certain failures like intermittent connection
 * or database throttling, .. etc
 *

From source file org.bigsolr.hadoop.SolrOutputFormat.java

public class SolrOutputFormat extends OutputFormat implements org.apache.hadoop.mapred.OutputFormat {

    private static Logger log = Logger.getLogger(SolrOutputFormat.class);

    // New API
    @Override

From source file org.elasticsearch.hadoop.integration.mr.PrintStreamOutputFormat.java

public class PrintStreamOutputFormat extends org.apache.hadoop.mapreduce.OutputFormat implements OutputFormat {

    private Stream stream;

    private class PrintStreamRecordWriter extends org.apache.hadoop.mapreduce.RecordWriter implements RecordWriter {

From source file org.elasticsearch.hadoop.mr.EsOutputFormat.java

/**
 * ElasticSearch {@link OutputFormat} (old and new API) for adding data to an index inside ElasticSearch.
 */
@SuppressWarnings("rawtypes")
// since this class implements two generic interfaces, to avoid dealing with 4 types in every declaration, we force raw types...
public class EsOutputFormat extends OutputFormat implements org.apache.hadoop.mapred.OutputFormat {

From source file org.elasticsearch.hadoop.mr.MultiOutputFormat.java

public class MultiOutputFormat extends OutputFormat implements org.apache.hadoop.mapred.OutputFormat {

    private static class MultiNewRecordWriter extends RecordWriter {

        private final List<RecordWriter> writers;