Example usage for org.apache.hadoop.mapreduce RecordWriter subclass-usage

List of usage examples for org.apache.hadoop.mapreduce RecordWriter subclass-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce RecordWriter subclass-usage.

Usage

From source file com.tuplejump.calliope.hadoop.BulkRecordWriter.java

final class BulkRecordWriter extends RecordWriter<ByteBuffer, List<Mutation>>
        implements org.apache.hadoop.mapred.RecordWriter<ByteBuffer, List<Mutation>> {
    private final static String OUTPUT_LOCATION = "mapreduce.output.bulkoutputformat.localdir";
    private final static String BUFFER_SIZE_IN_MB = "mapreduce.output.bulkoutputformat.buffersize";
    private final static String STREAM_THROTTLE_MBITS = "mapreduce.output.bulkoutputformat.streamthrottlembits";
    private final static String MAX_FAILED_HOSTS = "mapreduce.output.bulkoutputformat.maxfailedhosts";

From source file com.vertica.hadoop.VerticaRecordWriter.java

public class VerticaRecordWriter extends RecordWriter<Text, VerticaRecord> {
    private static final Log LOG = LogFactory.getLog("com.vertica.hadoop");

    Relation vTable = null;
    String schemaName = null;
    Connection connection = null;

From source file com.yahoo.glimmer.indexing.generator.IndexRecordWriter.java

public class IndexRecordWriter extends RecordWriter<IntWritable, IndexRecordWriterValue> {
    private static final Log LOG = LogFactory.getLog(IndexRecordWriter.class);
    private Map<Integer, IndexWrapper> indices = new HashMap<Integer, IndexWrapper>();

    public IndexRecordWriter(FileSystem fs, Path taskWorkPath, long numberOfDocs,
            RDFDocumentFactory.IndexType indexType, String hashValuePrefix, int indexWriterCacheSize,

From source file com.yahoo.glimmer.indexing.preprocessor.ResourceRecordWriter.java

/**
 * Writes to different output files depending on the contents of the value.
 * 
 * @author tep
 * 
 */

From source file com.zjy.mongo.output.BSONFileRecordWriter.java

public class BSONFileRecordWriter<K, V> extends RecordWriter<K, V> {

    private BSONEncoder bsonEnc = new BasicBSONEncoder();
    private FSDataOutputStream outFile = null;
    private FSDataOutputStream splitsFile = null;
    private long bytesWritten = 0L;

From source file com.zjy.mongo.output.MongoRecordWriter.java

public class MongoRecordWriter<K, V> extends RecordWriter<K, V> {

    private static final Log LOG = LogFactory.getLog(MongoRecordWriter.class);
    private final List<DBCollection> collections;
    private final TaskAttemptContext context;
    private final BSONWritable bsonWritable;

From source file datafu.hourglass.avro.AvroKeyValueWithMetadataRecordWriter.java

/**
 * Writes key/value pairs to an Avro container file.
 *
 * <p>Each entry in the Avro container file will be a generic record with two fields,
 * named 'key' and 'value'.  The input types may be basic Writable objects like Text or
 * IntWritable, or they may be AvroWrapper subclasses (AvroKey or AvroValue).  Writable

From source file datafu.hourglass.avro.AvroKeyWithMetadataRecordWriter.java

/**
 * Writes Avro records to an Avro container file output stream.
 *
 * @param <T> The Java type of the Avro data to write.
 */
public class AvroKeyWithMetadataRecordWriter<T> extends RecordWriter<AvroKey<T>, NullWritable> {

From source file de.hpi.isg.mdms.hadoop.cassandra.AbstractBulkRecordWriter.java

public abstract class AbstractBulkRecordWriter<K, V> extends org.apache.hadoop.mapreduce.RecordWriter<K, V> {

    public final static String OUTPUT_LOCATION = "mapreduce.output.bulkoutputformat.localdir";
    public final static String BUFFER_SIZE_IN_MB = "mapreduce.output.bulkoutputformat.buffersize";
    public final static String STREAM_THROTTLE_MBITS = "mapreduce.output.bulkoutputformat.streamthrottlembits";
    public final static String MAX_FAILED_HOSTS = "mapreduce.output.bulkoutputformat.maxfailedhosts";

From source file de.hpi.isg.mdms.hadoop.cassandra.CqlFlinkRecordWriter.java

final class CqlFlinkRecordWriter extends RecordWriter<String, ArrayList<Object>>
        implements org.apache.hadoop.mapred.RecordWriter<String, ArrayList<Object>> {

    private CqlBulkRecordWriter bulkRecordWriter;

    public CqlFlinkRecordWriter(org.apache.hadoop.mapred.JobConf job, org.apache.hadoop.util.Progressable progress)