Example usage for org.apache.hadoop.mapreduce.lib.output TextOutputFormat subclass-usage

List of usage examples for org.apache.hadoop.mapreduce.lib.output TextOutputFormat subclass-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce.lib.output TextOutputFormat subclass-usage.

Usage

From source file be.ugent.intec.halvade.uploader.mapreduce.MyFastqOutputFormat.java

/**
 *
 * @author dries
 */
public class MyFastqOutputFormat extends TextOutputFormat<PairedIdWritable, FastqRecord> {

From source file boa.io.BoaOutputFormat.java

/**
 * Uses our custom output committer.
 * 
 * @author rdyer
 *
 * @param <K> the type of keys

From source file co.nubetech.hiho.mapreduce.lib.output.AppendTextOutputFormat.java

public class AppendTextOutputFormat extends TextOutputFormat {
    private String isAppend;
    private static long fileCount;

    private static final NumberFormat NUMBER_FORMAT = NumberFormat.getInstance();

From source file co.nubetech.hiho.mapreduce.lib.output.FTPTextOutputFormat.java

public class FTPTextOutputFormat<K, V> extends TextOutputFormat<K, V> {

    protected static class FTPLineRecordWriter<K, V> extends LineRecordWriter<K, V> {
        private static final String utf8 = "UTF-8";
        private static final byte[] newline;
        static {

From source file com.cloudera.dataflow.spark.TemplatedTextOutputFormat.java

public class TemplatedTextOutputFormat<K, V> extends TextOutputFormat<K, V> implements ShardNameTemplateAware {

    @Override
    public void checkOutputSpecs(JobContext job) {
        // don't fail if the output already exists
    }

From source file com.conversantmedia.mapreduce.output.BloomFilterOutputFormat.java

/**
 * Output fomat for writing bloom filters. This is a simple wrapper
 * around the Dotomi StringBloomFilter.
 *
 * @param <K> Key
 * @param <V> Value

From source file com.inmobi.conduit.distcp.tools.mapred.CopyOutputFormat.java

public class CopyOutputFormat<K, V> extends TextOutputFormat<K, V> {

    public static void setWorkingDirectory(Job job, Path workingDirectory) {
        job.getConfiguration().set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, workingDirectory.toString());
    }

From source file de.tudarmstadt.ukp.dkpro.c4corpus.hadoop.deduplication.DocumentInfoOutputFormat.java

/**
 * An output writer to write the DocumentInfo data structure to a machine readable file
 * The format is [docID;docLength;simHash;lang] for each document
 *
 * @author Omnia Zayed
 */

From source file edu.rutgers.ess.crs.utility.KeyValueCSVOutputFormat.java

public class KeyValueCSVOutputFormat extends TextOutputFormat<Text, TextArrayWritable> {
    public static String CSV_TOKEN_SEPARATOR_CONFIG;
    public static String CSV_KEYVALUE_SEPARATOR_CONFIG;

    public RecordWriter<Text, TextArrayWritable> getRecordWriter(final TaskAttemptContext context)
            throws IOException, InterruptedException {

From source file fi.tkk.ics.hadoop.bam.cli.plugins.chipster.Summarize.java

final class SummarizeOutputFormat extends TextOutputFormat<NullWritable, RangeCount> {
    @Override
    public RecordWriter<NullWritable, RangeCount> getRecordWriter(TaskAttemptContext ctx) throws IOException {
        Path path = getDefaultWorkFile(ctx, "");
        FileSystem fs = path.getFileSystem(ContextUtil.getConfiguration(ctx));