Example usage for org.apache.hadoop.mapreduce RecordWriter subclass-usage

List of usage examples for org.apache.hadoop.mapreduce RecordWriter subclass-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce RecordWriter subclass-usage.

Usage

From source file com.bigfishgames.biginsights.upsight.mapreduce.MyAvroKeyRecordWriter.java

/**
 * Writes Avro records to an Avro container file output stream.
 *
 * @param <T> The Java type of the Avro data to write.
 */
public class MyAvroKeyRecordWriter<T> extends RecordWriter<AvroKey<T>, NullWritable> implements Syncable {

From source file com.blackberry.logdriver.mapreduce.BinaryRecordWriter.java

public class BinaryRecordWriter extends RecordWriter<BytesWritable, NullWritable> {
    private static final Logger LOG = LoggerFactory.getLogger(BinaryRecordWriter.class);

    private FSDataOutputStream out;

    /**

From source file com.blackberry.logdriver.mapreduce.boom.PigBoomHourlyRecordWriter.java

public class PigBoomHourlyRecordWriter extends RecordWriter<Tuple, NullWritable> {

    private int index = 0;

    private Path path;
    private FSDataOutputStream out;

From source file com.blackberry.logdriver.mapreduce.boom.PigReBoomRecordWriter.java

public class PigReBoomRecordWriter extends RecordWriter<Tuple, NullWritable> {

    private Path path;
    private FSDataOutputStream out;
    private ReBoomWriter writer;

From source file com.buzzinate.dm.cassandra.ColumnFamilyRecordWriter.java

/**
 * The <code>ColumnFamilyRecordWriter</code> maps the output &lt;key, value&gt;
 * pairs to a Cassandra column family. In particular, it applies all mutations
 * in the value, which it associates with the key, and in turn the responsible
 * endpoint.
 *

From source file com.datasalt.pangool.solr.SolrRecordWriter.java

/**
 * Instantiate a record writer that will build a Solr index.
 * 
 * A zip file containing the solr config and additional libraries is expected to be passed via the distributed cache.
 * The incoming written records are converted via the specified document converter, and written to the index in batches.
 * When the job is done, the close copies the index to the destination output file system.

From source file com.facebook.hiveio.output.RecordWriterImpl.java

/**
 * RecordWriter for Hive
 */
class RecordWriterImpl extends RecordWriter<WritableComparable, HiveWritableRecord> {
    /** Logger */
    private static final Logger LOG = LoggerFactory.getLogger(RecordWriterImpl.class);

From source file com.ikanow.aleph2.analytics.hadoop.assets.BeFileOutputWriter.java

/** Output Writer specific to batch enrichment
 *  (Doesn't currently do anything, all the outputting occurs via the context)
 * @author jfreydank
 */
public class BeFileOutputWriter extends RecordWriter<String, Tuple2<Long, IBatchRecord>> {
    static final Logger _logger = LogManager.getLogger(BeFileOutputWriter.class);

From source file com.ikanow.aleph2.analytics.r.assets.BeFileOutputWriter.java

/** Output Writer specific to batch enrichment
 *  (Doesn't currently do anything, all the outputting occurs via the context)
 * @author jfreydank
 */
public class BeFileOutputWriter extends RecordWriter<String, Tuple2<Long, IBatchRecord>> {
    static final Logger _logger = LogManager.getLogger(BeFileOutputWriter.class);

From source file com.jumptap.h2redis.RedisHMRecordWriter.java

public class RedisHMRecordWriter extends RecordWriter<Text, Text> {
    private JedisPool pool;
    private final String lastUpdateKey;
    private final int ttl;
    private final KeyMaker keyMaker;