Example usage for org.apache.hadoop.mapreduce RecordWriter subclass-usage

List of usage examples for org.apache.hadoop.mapreduce RecordWriter subclass-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce RecordWriter subclass-usage.

Usage

From source file org.bigsolr.hadoop.SolrRecordWriter.java

public class SolrRecordWriter extends RecordWriter<NullWritable, Writable>
        implements org.apache.hadoop.mapred.RecordWriter<NullWritable, Writable> {

    private static final String SERVER_URL = "solr.server.url";
    private static final String SERVER_MODE = "solr.server.mode";
    private static final String COLLECTION_NAME = "solr.server.collection";

From source file org.broadinstitute.sting.gatk.hadoop.BAMRecordWriter.java

/**
 * A base {@link RecordWriter} for BAM records.
 * 
 * <p>
 * Handles the output stream, writing the header if requested, and provides the
 * {@link #writeAlignment} function for subclasses.

From source file org.cloudgraph.mapreduce.GraphXmlRecordWriter.java

/**
 * 
 * @author Scott Cinnamond
 * @since 0.6.0
 */
public class GraphXmlRecordWriter extends RecordWriter<LongWritable, GraphWritable> {

From source file org.diqube.hadoop.DiqubeRecordWriter.java

/**
 * A {@link RecordWriter} which writes to .diqube files which contain potentially multiple table shards.
 *
 * @author Bastian Gloeckle
 */
public class DiqubeRecordWriter extends RecordWriter<NullWritable, DiqubeRow> {

From source file org.gbif.ocurrence.index.solr.SolrRecordWriter.java

/**
 * Instantiate a record writer that will build a Solr index.
 * 
 * A zip file containing the solr config and additional libraries is expected to
 * be passed via the distributed cache. The incoming written records are
 * converted via the specified document converter, and written to the index in

From source file org.gpfvic.mahout.common.DummyRecordWriter.java

public final class DummyRecordWriter<K, V> extends RecordWriter<K, V> {

    private final Map<K, List<V>> data = new TreeMap<K, List<V>>();

    @Override
    public void write(K key, V value) {

From source file org.kiji.avro.mapreduce.AvroKeyRecordWriter.java

/**
 * Writes Avro records to an Avro container file output stream.
 *
 * @param <T> The Java type of the Avro data to write.
 */
public class AvroKeyRecordWriter<T> extends RecordWriter<AvroKey<T>, NullWritable> {

From source file org.kiji.avro.mapreduce.AvroKeyValueRecordWriter.java

/**
 * Writes key/value pairs to an Avro container file.
 *
 * <p>Each entry in the Avro container file will be a generic record with two fields,
 * named 'key' and 'value'.  The input types may be basic Writable objects like Text or
 * IntWritable, or they may be AvroWrapper subclasses (AvroKey or AvroValue).  Writable

From source file org.msgpack.hadoop.mapreduce.output.MessagePackRecordWriter.java

public class MessagePackRecordWriter extends RecordWriter<NullWritable, MessagePackWritable> {
    protected final DataOutputStream out_;

    public MessagePackRecordWriter(DataOutputStream out) {
        out_ = out;
    }

From source file org.ojai.json.mapreduce.JSONFileOutputRecordWriter.java

public class JSONFileOutputRecordWriter extends RecordWriter<LongWritable, Document> {

    private JsonDocumentBuilder writer;
    private final OutputStream out;

    public JSONFileOutputRecordWriter(OutputStream fileOut) {