Example usage for org.apache.hadoop.mapreduce.lib.output FileOutputFormat subclass-usage

List of usage examples for org.apache.hadoop.mapreduce.lib.output FileOutputFormat subclass-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce.lib.output FileOutputFormat subclass-usage.

Usage

From source file org.apache.mahout.hadoop.mapreduce.lib.AvroOutputFormat.java

/** An {@link OutputFormat} that writes Avro files. Only the key is
 * used in the output file. The value must be null. Non-null values trigger
 * a warning and are not written to the output file.
 *
 * The output key metadata must be set (e.g., by
 * SchemaBasedJobData.setOutputKeySchema()) to metadata accepted by an

From source file org.apache.mnemonic.hadoop.mapreduce.MneOutputFormat.java

/**
 * A Mnemonic output format that satisfies the org.apache.hadoop.mapreduce API.
 */
public class MneOutputFormat<MV extends MneDurableOutputValue<?>> extends FileOutputFormat<NullWritable, MV> {

    @Override

From source file org.apache.orc.mapreduce.OrcOutputFormat.java

/**
 * An ORC output format that satisfies the org.apache.hadoop.mapreduce API.
 */
public class OrcOutputFormat<V extends Writable> extends FileOutputFormat<NullWritable, V> {
    private static final String EXTENSION = ".orc";
    // This is useful for unit tests or local runs where you don't need the

From source file org.apache.parquet.hadoop.ParquetOutputFormat.java

/**
 * OutputFormat to write to a Parquet file
 *
 * It requires a {@link WriteSupport} to convert the actual records to the underlying format.
 * It requires the schema of the incoming records. (provided by the write support)
 * It allows storing extra metadata in the footer (for example: for schema compatibility purpose when converting from a different schema language).

From source file org.apache.phoenix.mapreduce.MultiHfileOutputFormat.java

/**
 * The MultiHfileOutputFormat class simplifies writing HFiles for multiple tables.
 * It has been adapted from {#link HFileOutputFormat2} but differs from the fact it creates
 * HFiles for multiple tables.
 */
public class MultiHfileOutputFormat extends FileOutputFormat<TableRowkeyPair, Cell> {

From source file org.apache.pig.impl.io.BinStorageOutputFormat.java

/**
 *
 */
public class BinStorageOutputFormat extends FileOutputFormat<org.apache.hadoop.io.WritableComparable, Tuple> {

    /* (non-Javadoc)

From source file org.apache.pig.piggybank.storage.avro.PigAvroOutputFormat.java

/**
 * The OutputFormat for avro data.
 *
 */
public class PigAvroOutputFormat extends FileOutputFormat<NullWritable, Object> {

From source file org.apache.pig.piggybank.storage.hiverc.HiveRCOutputFormat.java

public class HiveRCOutputFormat extends FileOutputFormat<NullWritable, Writable> {

    private static final Logger LOG = LoggerFactory.getLogger(RCFileOutputFormat.class);

    public static String COMPRESSION_CODEC_CONF = "rcfile.output.compression.codec";

From source file org.apache.solr.hadoop.ForkedTreeMergeOutputFormat.java

/**
 * See {@link IndexMergeTool}.
 */
public class ForkedTreeMergeOutputFormat extends FileOutputFormat<Text, NullWritable> {

    @Override

From source file org.apache.solr.hadoop.SolrOutputFormat.java

public class SolrOutputFormat<K, V> extends FileOutputFormat<K, V> {

    private static final Logger LOG = LoggerFactory.getLogger(SolrOutputFormat.class);

    /**
     * The parameter used to pass the solr config zip file information. This will