Example usage for org.apache.hadoop.mapreduce OutputFormat subclass-usage

List of usage examples for org.apache.hadoop.mapreduce OutputFormat subclass-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce OutputFormat subclass-usage.

Usage

From source file com.tuplejump.calliope.hadoop.AbstractColumnFamilyOutputFormat.java

/**
 * The <code>ColumnFamilyOutputFormat</code> acts as a Hadoop-specific
 * OutputFormat that allows reduce tasks to store keys (and corresponding
 * values) as Cassandra rows (and respective columns) in a given
 * ColumnFamily.
 * <p/>

From source file com.tuplejump.calliope.hadoop.BulkOutputFormat.java

public class BulkOutputFormat extends OutputFormat<ByteBuffer, List<Mutation>>
        implements org.apache.hadoop.mapred.OutputFormat<ByteBuffer, List<Mutation>> {
    @Override
    public void checkOutputSpecs(JobContext context) {
        checkOutputSpecs(HadoopCompat.getConfiguration(context));
    }

From source file com.vertica.hadoop.VerticaOutputFormat.java

/**
 * Output formatter for loading data to Vertica
 * 
 */
public class VerticaOutputFormat extends OutputFormat<Text, VerticaRecord> {
    private static final Log LOG = LogFactory.getLog(VerticaOutputFormat.class);

From source file com.willetinc.hadoop.mapreduce.dynamodb.DynamoDBOutputFormat.java

/**
 * 
 */
public class DynamoDBOutputFormat<K extends DynamoDBKeyWritable, V> extends OutputFormat<K, V> {

    @Override

From source file com.zjy.mongo.MongoOutputFormat.java

public class MongoOutputFormat<K, V> extends OutputFormat<K, V> {
    public void checkOutputSpecs(final JobContext context) throws IOException {
        if (MongoConfigUtil.getOutputURIs(context.getConfiguration()).isEmpty()) {
            throw new IOException("No output URI is specified. You must set mongo.output.uri.");
        }
    }

From source file cz.seznam.euphoria.hadoop.output.DataSinkOutputFormat.java

/**
 * {@code OutputFormat} created from {@code DataSink}.
 * Because of the hadoop output format contract, we need to be able to
 * instantiate the format from {@code Class} object, therefore we
 * need to serialize the underlying {@code DataSink} to bytes and
 * store in to configuration.

From source file de.hpi.fgis.hdrs.mapreduce.TripleOutputFormat.java

public class TripleOutputFormat extends OutputFormat<NullWritable, Triple> implements Configurable {

    public static final String OUTPUT_INDEXES = "hdrs.mapreduce.outputindexes";

    private Configuration conf;

From source file edu.american.student.redis.hadoop.RedisBigTableOutputFormat.java

public class RedisBigTableOutputFormat extends OutputFormat<RedisBigTableKey, Text> {
    private static byte[] table;

    @Override
    public void checkOutputSpecs(JobContext arg0) throws IOException, InterruptedException {
        RedisForeman foreman = new RedisForeman();

From source file edu.arizona.cs.hadoop.fs.irods.output.HirodsFileOutputFormat.java

/**
 * A base class for {@link OutputFormat}s that read from {@link FileSystem}s.
 */
public abstract class HirodsFileOutputFormat<K, V> extends OutputFormat<K, V> {

    private static final Log LOG = LogFactory.getLog(HirodsFileOutputFormat.class);

From source file edu.indiana.d2i.htrc.io.mem.MemCachedOutputFormat.java

public class MemCachedOutputFormat<K extends Writable, V extends Writable> extends OutputFormat<K, V> {

    // committer starts
    public static class MemCachedOutputCommitter extends OutputCommitter {

        @Override