Example usage for org.apache.hadoop.mapreduce.lib.output FileOutputFormat subclass-usage

List of usage examples for org.apache.hadoop.mapreduce.lib.output FileOutputFormat subclass-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce.lib.output FileOutputFormat subclass-usage.

Usage

From source file com.splout.db.hadoop.TupleSQLite4JavaOutputFormat.java

/**
 * An OutputFormat that accepts Pangool's Tuples and writes to a sqlite4Java SQLite file. The Tuples that are written to
 * it must conform to a particular schema: having a "_partition" integer field (which will then create a file named
 * "partition".db) and be a {@link NullableTuple} so that nulls are accepted as normal SQL values.
 * <p>
 * The different schemas that will be given to this OutputFormat are defined in the constructor by providing a

From source file com.tomslabs.grid.avro.AvroFileOutputFormat.java

public class AvroFileOutputFormat<T> extends FileOutputFormat<T, Object> {

    private static final Logger LOGGER = LoggerFactory.getLogger(AvroFileOutputFormat.class);

    /**
     * When the map/reduce job outputs Avro record, the String representation of

From source file com.topsoft.botspider.avro.mapreduce.output.ExtFileOutputFormat.java

/**
 * A extends class for {@link FileOutputFormat}s that read from
 * {@link FileSystem}s.
 */
public abstract class ExtFileOutputFormat<K, V> extends FileOutputFormat<K, V> {
    protected static final String BASE_OUTPUT_NAME = "mapreduce.output.basename";

From source file com.x.hadoop.mr.bbs.GBKOutputFormat.java

/** An {@link OutputFormat} that writes plain text files. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class GBKOutputFormat<K, V> extends FileOutputFormat<K, V> {
    public static String SEPERATOR = "mapreduce.output.textoutputformat.separator";

From source file com.xiaomi.linden.hadoop.indexing.reduce.IndexUpdateOutputFormat.java

/**
 * The record writer of this output format simply puts a message in an output
 * path when a shard update is done.
 */
public class IndexUpdateOutputFormat extends FileOutputFormat<Shard, Text> {

From source file com.zjy.mongo.BSONFileOutputFormat.java

public class BSONFileOutputFormat<K, V> extends FileOutputFormat<K, V> {

    @Override
    public RecordWriter<K, V> getRecordWriter(final TaskAttemptContext context) throws IOException {
        // Open data output stream

From source file de.gesundkrank.wikipedia.hadoop.util.MapFileOutputFormat.java

public class MapFileOutputFormat extends FileOutputFormat<WritableComparable, Writable> {

    private static final NumberFormat NUMBER_FORMAT = NumberFormat.getInstance();
    static {
        NUMBER_FORMAT.setMinimumIntegerDigits(4);
        NUMBER_FORMAT.setGroupingUsed(false);

From source file de.sec.dns.playground.ARFFOutputFormat.java

/** An {@link OutputFormat} that writes plain text files. */
public class ARFFOutputFormat<K, V> extends FileOutputFormat<K, V> {
    protected static class ARFFLineRecordWriter<K, V> extends RecordWriter<K, V> {

        private static final byte[] newline;

From source file de.tudarmstadt.ukp.dkpro.c4corpus.hadoop.io.WARCOutputFormat.java

/**
 * Hadoop OutputFormat for mapreduce jobs ('new' API) that want to write data to WARC files.
 * <br>
 * Usage:
 * <br>
 * ```java

From source file dev.geminileft.outputformat.MyTextOutputFormat.java

/** An {@link OutputFormat} that writes plain text files. */
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MyTextOutputFormat<K, V> extends FileOutputFormat<K, V> {
    public static String SEPERATOR = "mapreduce.output.textoutputformat.separator";
    public static String DELIMITER = "mapreduce.output.textoutputformat.delimiter";