Example usage for org.apache.hadoop.mapreduce OutputFormat subclass-usage

List of usage examples for org.apache.hadoop.mapreduce OutputFormat subclass-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce OutputFormat subclass-usage.

Usage

From source file org.apache.hive.hcatalog.mapreduce.HCatBaseOutputFormat.java

public abstract class HCatBaseOutputFormat extends OutputFormat<WritableComparable<?>, HCatRecord> {

    /**
     * Gets the table schema for the table specified in the HCatOutputFormat.setOutput call
     * on the specified job context.
     * @param conf the Configuration object

From source file org.apache.hive.hcatalog.mapreduce.MultiOutputFormat.java

/**
 * The MultiOutputFormat class simplifies writing output data to multiple
 * outputs.
 * <p>
 * Multiple output formats can be defined each with its own
 * <code>OutputFormat</code> class, own key class and own value class. Any

From source file org.apache.hive.hcatalog.mapreduce.OutputFormatContainer.java

/**
 *  This container class is used to wrap OutputFormat implementations and augment them with
 *  behavior necessary to work with HCatalog (ie metastore updates, hcatalog delegation tokens, etc).
 *  Containers are also used to provide storage specific implementations of some HCatalog features (ie dynamic partitioning).
 *  Hence users wishing to create storage specific implementations of HCatalog features should implement this class and override
 *  HCatStorageHandler.getOutputFormatContainer(OutputFormat outputFormat) to return the implementation.

From source file org.apache.kudu.mapreduce.KuduTableOutputFormat.java

/**
 * <p>
 * Use {@link
 * KuduTableMapReduceUtil.TableOutputFormatConfigurator}
 * to correctly setup this output format, then {@link
 * KuduTableMapReduceUtil#getTableFromContext(org.apache.hadoop.mapreduce.TaskInputOutputContext)}

From source file org.apache.nutch.indexer.IndexerOutputFormat.java

public class IndexerOutputFormat extends OutputFormat<String, NutchDocument> {

    @Override
    public RecordWriter<String, NutchDocument> getRecordWriter(TaskAttemptContext job)
            throws IOException, InterruptedException {

From source file org.apache.phoenix.mapreduce.PhoenixOutputFormat.java

/**
 * {@link OutputFormat} implementation for Phoenix.
 *
 */
public class PhoenixOutputFormat<T extends DBWritable> extends OutputFormat<NullWritable, T> {
    private static final Log LOG = LogFactory.getLog(PhoenixOutputFormat.class);

From source file org.apache.phoenix.pig.hadoop.PhoenixOutputFormat.java

/**
 * {@link OutputFormat} implementation for Phoenix
 * 
 * 
 *
 */

From source file org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigOutputFormat.java

/**
 * The better half of PigInputFormat which is responsible
 * for the Store functionality. It is the exact mirror
 * image of PigInputFormat having RecordWriter instead
 * of a RecordReader.
 */

From source file org.apache.rya.accumulo.mr.RyaOutputFormat.java

/**
 * {@link OutputFormat} that uses Rya, the {@link GeoIndexer}, the
 * {@link FreeTextIndexer}, the {@link TemporalIndexer}, and the
 * {@link EntityCentricIndex} as the sink of triple data. This
 * OutputFormat ignores the Keys and only writes the Values to Rya.
 * <p>

From source file org.apache.sqoop.execution.spark.SqoopNullOutputFormatSpark.java

/**
 * An output format for MapReduce job.
 */
public class SqoopNullOutputFormatSpark extends OutputFormat<IntermediateDataFormat<?>, Integer> {

    public static final Logger LOG = Logger.getLogger(SqoopNullOutputFormatSpark.class);