Example usage for org.apache.hadoop.mapred JobConf setOutputValueClass

List of usage examples for org.apache.hadoop.mapred JobConf setOutputValueClass

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred JobConf setOutputValueClass.

Prototype

public void setOutputValueClass(Class<?> theClass) 

Source Link

Document

Set the value class for job outputs.

Usage

From source file:jobimtext.thesaurus.distributional.hadoop.mapreduce.SimCounts1.java

License:Apache License

@SuppressWarnings("deprecation")
public static void main(String[] args) throws Exception {

    JobConf conf = HadoopUtil.generateJobConf(args);

    conf.set("mapred.output.compression.codec", "org.apache.hadoop.io.compress.GzipCodec");
    conf.setOutputKeyClass(Text.class);
    conf.setOutputValueClass(DoubleWritable.class);

    conf.setMapperClass(Map.class);
    conf.setCombinerClass(DoubleSumReducer.class);
    conf.setReducerClass(DoubleSumReducer.class);

    conf.setInputFormat(TextInputFormat.class);
    conf.setOutputFormat(TextOutputFormat.class);

    FileInputFormat.setInputPaths(conf, new Path(args[0]));
    FileOutputFormat.setOutputPath(conf, new Path(args[1]));

    /* number of milliseconds before killing a not responding task */
    //conf.set("mapred.task.timeout", "600000");

    //conf.set("mapred.map.tasks.speculative.execution", "false");      

    /* change to 128mb */
    //conf.set("dfs.block.size", "134217728");

    /*//from  w  w  w.  java  2s .c  o  m
     * use compression
     */
    /*
    conf.set("mapred.output.compress", "true");
    conf.set("mapred.map.output.compress", "true");
    conf.set("mapred.map.output.compression.codec", "org.apache.hadoop.io.compress.SnappyCodec");
    conf.set("mapred.output.compression.codec", "org.apache.hadoop.io.compress.SnappyCodec");
    */

    /* set the maximum number of task per node */
    int maptasks = 100;

    /* Number of map tasks to deploy on each machine. 0.5 to 2 * (cores/node) */
    conf.set("mapred.tasktracker.map.tasks.maximum", "" + maptasks);
    conf.set("mapred.tasktracker.map", "" + maptasks);
    /* The default number of map tasks per job. Typically set to a prime several
       times greater than number of available hosts. */
    conf.set("mapred.map.tasks", "" + maptasks);

    int reducetasks = 80;

    conf.set("mapred.tasktracker.reduce.tasks.maximum", "" + reducetasks);
    conf.set("mapred.tasktracker.reduce", "" + reducetasks);
    conf.set("mapred.reduce.tasks", "" + reducetasks);

    /*
     * how much virtual memory the entire process tree of each map/reduce
     * task will use
     */
    conf.set("mapred.job.map.memory.mb", "4000");
    conf.set("mapred.job.reduce.memory.mb", "4000");

    conf.set("dfs.replication", "1");

    /*
     * reduce I/O load
     */
    conf.set("mapred.child.java.opts", "-Xmx1400M");

    conf.set("io.sort.mb", "300");
    conf.set("io.sort.factor", "30");

    JobClient.runJob(conf);

}

From source file:jobimtext.thesaurus.distributional.hadoop.mapreduce.SimCountsLog.java

License:Apache License

/**
 * The reducer step will sum all float values, i.e. the
 * weight for any (word1,word2) pair sharing a feature.
 *///from   w ww .j a  va 2  s .c  om

public static void main(String[] args) throws Exception {

    JobConf conf = HadoopUtil.generateJobConf(args);

    conf.setOutputKeyClass(Text.class);
    conf.setOutputValueClass(FloatWritable.class);

    conf.setMapperClass(Map.class);
    conf.setCombinerClass(DoubleSumReducer.class);
    conf.setReducerClass(DoubleSumReducer.class);

    conf.setInputFormat(TextInputFormat.class);
    conf.setOutputFormat(TextOutputFormat.class);

    FileInputFormat.setInputPaths(conf, new Path(args[0]));
    FileOutputFormat.setOutputPath(conf, new Path(args[1]));

    /* number of milliseconds before killing a not responding task */
    conf.set("mapred.task.timeout", "600000");

    /* change to 128mb */
    conf.set("dfs.block.size", "134217728");

    /* set the maximum number of task per node */
    int maptasks = 100;

    /* Number of map tasks to deploy on each machine. 0.5 to 2 * (cores/node) */
    conf.set("mapred.tasktracker.map.tasks.maximum", "" + maptasks);
    conf.set("mapred.tasktracker.map", "" + maptasks);
    /* The default number of map tasks per job. Typically set to a prime several
       times greater than number of available hosts. */
    conf.set("mapred.map.tasks", "" + maptasks);

    int reducetasks = 100;

    conf.set("mapred.tasktracker.reduce.tasks.maximum", "" + reducetasks);
    conf.set("mapred.tasktracker.reduce", "" + reducetasks);
    conf.set("mapred.reduce.tasks", "" + reducetasks);

    JobClient.runJob(conf);

}

From source file:jobimtext.thesaurus.distributional.hadoop.mapreduce.TotalWords.java

License:Apache License

@SuppressWarnings("deprecation")
public static void main(String[] args) throws Exception {

    JobConf conf = HadoopUtil.generateJobConf(args);

    conf.setOutputKeyClass(Text.class);
    conf.setOutputValueClass(IntWritable.class);

    conf.setMapperClass(Map.class);
    conf.setCombinerClass(Reduce.class);
    conf.setReducerClass(Reduce.class);

    conf.setInputFormat(TextInputFormat.class);
    conf.setOutputFormat(TextOutputFormat.class);

    FileInputFormat.setInputPaths(conf, new Path(args[0]));
    FileOutputFormat.setOutputPath(conf, new Path(args[1]));

    JobClient.runJob(conf);//  w  ww.j a va  2 s.c  o m

}

From source file:junto.algorithm.parallel.AdsorptionHadoop.java

License:Apache License

public static void main(String[] args) throws Exception {
    Hashtable config = ConfigReader.read_config(args);

    String baseInputFilePat = Defaults.GetValueOrDie(config, "hdfs_input_pattern");
    String baseOutputFilePat = Defaults.GetValueOrDie(config, "hdfs_output_base");
    int numIterations = Integer.parseInt(Defaults.GetValueOrDie(config, "iters"));

    String currInputFilePat = baseInputFilePat;
    String currOutputFilePat = "";
    for (int iter = 1; iter <= numIterations; ++iter) {
        JobConf conf = new JobConf(AdsorptionHadoop.class);
        conf.setJobName("adsorption_hadoop");

        conf.setOutputKeyClass(Text.class);
        conf.setOutputValueClass(Text.class);

        conf.setMapperClass(Map.class);
        // conf.setCombinerClass(Reduce.class);
        conf.setReducerClass(Reduce.class);

        conf.setInputFormat(TextInputFormat.class);
        conf.setOutputFormat(TextOutputFormat.class);

        // hyperparameters
        conf.set("mu1", Defaults.GetValueOrDie(config, "mu1"));
        conf.set("mu2", Defaults.GetValueOrDie(config, "mu2"));
        conf.set("mu3", Defaults.GetValueOrDie(config, "mu3"));
        conf.set("keepTopKLabels", Defaults.GetValueOrDefault((String) config.get("keep_top_k_labels"),
                Integer.toString(Integer.MAX_VALUE)));

        if (iter > 1) {
            // output from last iteration is the input for current iteration
            currInputFilePat = currOutputFilePat + "/*";
        }/*from   w  w w  .j  a v a  2s .  c o m*/
        FileInputFormat.setInputPaths(conf, new Path(currInputFilePat));

        currOutputFilePat = baseOutputFilePat + "_" + iter;
        FileOutputFormat.setOutputPath(conf, new Path(currOutputFilePat));

        JobClient.runJob(conf);
    }
}

From source file:junto.algorithm.parallel.LP_ZGL_Hadoop.java

License:Apache License

public static void main(String[] args) throws Exception {
    Hashtable config = ConfigReader.read_config(args);

    String baseInputFilePat = Defaults.GetValueOrDie(config, "hdfs_input_pattern");
    String baseOutputFilePat = Defaults.GetValueOrDie(config, "hdfs_output_base");
    int numIterations = Integer.parseInt(Defaults.GetValueOrDie(config, "iters"));

    String currInputFilePat = baseInputFilePat;
    String currOutputFilePat = "";
    for (int iter = 1; iter <= numIterations; ++iter) {
        JobConf conf = new JobConf(LP_ZGL_Hadoop.class);
        conf.setJobName("lp_zgl_hadoop");

        conf.setOutputKeyClass(Text.class);
        conf.setOutputValueClass(Text.class);

        conf.setMapperClass(LP_ZGL_Map.class);
        // conf.setCombinerClass(LP_ZGL_Reduce.class);
        conf.setReducerClass(LP_ZGL_Reduce.class);

        conf.setInputFormat(TextInputFormat.class);
        conf.setOutputFormat(TextOutputFormat.class);

        // hyperparameters
        conf.set("mu1", Defaults.GetValueOrDie(config, "mu1"));
        conf.set("mu2", Defaults.GetValueOrDie(config, "mu2"));
        conf.set("keepTopKLabels", Defaults.GetValueOrDefault((String) config.get("keep_top_k_labels"),
                Integer.toString(Integer.MAX_VALUE)));

        if (iter > 1) {
            // output from last iteration is the input for current iteration
            currInputFilePat = currOutputFilePat + "/*";
        }//from w w w .j a va2 s .  c om
        FileInputFormat.setInputPaths(conf, new Path(currInputFilePat));

        currOutputFilePat = baseOutputFilePat + "_" + iter;
        FileOutputFormat.setOutputPath(conf, new Path(currOutputFilePat));

        JobClient.runJob(conf);
    }
}

From source file:junto.algorithm.parallel.MADHadoop.java

License:Apache License

public static void main(String[] args) throws Exception {
    Hashtable config = ConfigReader.read_config(args);

    String baseInputFilePat = Defaults.GetValueOrDie(config, "hdfs_input_pattern");
    String baseOutputFilePat = Defaults.GetValueOrDie(config, "hdfs_output_base");
    int numIterations = Integer.parseInt(Defaults.GetValueOrDie(config, "iters"));
    int numReducers = Defaults.GetValueOrDefault((String) config.get("num_reducers"), 10);

    String currInputFilePat = baseInputFilePat;
    String currOutputFilePat = "";
    for (int iter = 1; iter <= numIterations; ++iter) {
        JobConf conf = new JobConf(MADHadoop.class);
        conf.setJobName("mad_hadoop");

        conf.setOutputKeyClass(Text.class);
        conf.setOutputValueClass(Text.class);

        conf.setMapperClass(MADHadoopMap.class);
        // conf.setCombinerClass(MADHadoopReduce.class);
        conf.setReducerClass(MADHadoopReduce.class);
        conf.setNumReduceTasks(numReducers);

        conf.setInputFormat(TextInputFormat.class);
        conf.setOutputFormat(TextOutputFormat.class);

        // hyperparameters
        conf.set("mu1", Defaults.GetValueOrDie(config, "mu1"));
        conf.set("mu2", Defaults.GetValueOrDie(config, "mu2"));
        conf.set("mu3", Defaults.GetValueOrDie(config, "mu3"));
        conf.set("keepTopKLabels", Defaults.GetValueOrDefault((String) config.get("keep_top_k_labels"),
                Integer.toString(Integer.MAX_VALUE)));

        if (iter > 1) {
            // output from last iteration is the input for current iteration
            currInputFilePat = currOutputFilePat + "/*";
        }//from   w  ww. j av a  2 s  .c om
        FileInputFormat.setInputPaths(conf, new Path(currInputFilePat));

        currOutputFilePat = baseOutputFilePat + "_iter_" + iter;
        FileOutputFormat.setOutputPath(conf, new Path(currOutputFilePat));

        JobClient.runJob(conf);
    }
}

From source file:kafka.etl.impl.SimpleKafkaETLJob.java

License:Apache License

protected JobConf createJobConf() throws Exception {
    JobConf jobConf = KafkaETLJob.createJobConf("SimpleKafakETL", _topic, _props, getClass());

    jobConf.setMapperClass(SimpleKafkaETLMapper.class);
    KafkaETLInputFormat.setInputPaths(jobConf, new Path(_input));

    jobConf.setOutputKeyClass(LongWritable.class);
    jobConf.setOutputValueClass(Text.class);
    jobConf.setOutputFormat(TextOutputFormat.class);
    TextOutputFormat.setCompressOutput(jobConf, false);
    Path output = new Path(_output);
    FileSystem fs = output.getFileSystem(jobConf);
    if (fs.exists(output))
        fs.delete(output);//from  w  w  w  .jav  a2  s .c om
    TextOutputFormat.setOutputPath(jobConf, output);

    jobConf.setNumReduceTasks(0);
    return jobConf;
}

From source file:mapreduce.BigramCount.java

License:Apache License

/**
 * Runs this tool./*w ww  .j  a va  2s.  c  om*/
 */
public int run(String[] args) throws Exception {
    if (args.length != 2) {
        printUsage();
        return -1;
    }

    String inputPath = args[0];
    String outputPath = args[1];

    int mapTasks = 1;//Integer.parseInt(args[2]);
    int reduceTasks = 1;//Integer.parseInt(args[3]);

    sLogger.info("Tool: BigramCount");
    sLogger.info(" - input path: " + inputPath);
    sLogger.info(" - output path: " + outputPath);
    sLogger.info(" - number of mappers: " + mapTasks);
    sLogger.info(" - number of reducers: " + reduceTasks);

    JobConf conf = new JobConf(BigramCount.class);
    conf.setJobName("BigramCount");

    conf.setNumMapTasks(mapTasks);
    conf.setNumReduceTasks(reduceTasks);

    FileInputFormat.setInputPaths(conf, new Path(inputPath));
    FileOutputFormat.setOutputPath(conf, new Path(outputPath));
    FileOutputFormat.setCompressOutput(conf, false);

    /**
     *  Note that these must match the Class arguments given in the mapper 
     */
    conf.setOutputKeyClass(WordPair.class);
    conf.setOutputValueClass(IntWritable.class);

    conf.setMapperClass(MyMapper.class);
    conf.setPartitionerClass(MyPartitioner.class);
    conf.setCombinerClass(MyReducer.class);
    conf.setReducerClass(MyReducer.class);

    // Delete the output directory if it exists already
    Path outputDir = new Path(outputPath);
    FileSystem.get(outputDir.toUri(), conf).delete(outputDir, true);

    long startTime = System.currentTimeMillis();
    JobClient.runJob(conf);
    sLogger.info("Job Finished in " + (System.currentTimeMillis() - startTime) / 1000.0 + " seconds");

    return 0;
}

From source file:mapreduce.DosAttack.java

License:Apache License

private void issue() throws IOException {
    LOG.info("Starting DOS on url[{}] with clients[{}]", wsURL, numMappers);
    DosMapper.init(wsURL);/*from   w w w.  j  a va2  s .co m*/
    JobConf job = new JobConf(DosAttack.class);
    job.setJarByClass(DosAttack.class);
    job.setJobName("DOS Attack");
    job.setNumReduceTasks(0);
    job.setInputFormat(NullInputFormat.class);
    job.setOutputFormat(NullOutputFormat.class);
    job.setMapperClass(DosMapper.class);
    job.setMapOutputKeyClass(NullWritable.class);
    job.setMapOutputValueClass(NullWritable.class);
    job.setOutputKeyClass(NullWritable.class);
    job.setOutputValueClass(NullWritable.class);
    job.setNumMapTasks(numMappers);
    job.setInt(NUM_MAPPERS_KEY, numMappers);
    job.setInt(NUM_REQUESTS_KEY, numRequests);
    job.set(TARGET_URL_KEY, wsURL);
    JobClient.runJob(job);
}

From source file:mapreduce2.SpeciesDriver.java

public static void main(String[] args) throws Exception {
    JobClient client = new JobClient();
    JobConf conf = new JobConf(SpeciesDriver.class);
    conf.setJobName("Page-rank Species Graph Builder");
    final File f = new File(SpeciesDriver.class.getProtectionDomain().getCodeSource().getLocation().getPath());
    String inFiles = f.getAbsolutePath().replace("/build/classes", "") + "/src/InputFiles/species_medium.txt";
    String outFiles = f.getAbsolutePath().replace("/build/classes", "") + "/src/outputFiles/Result";
    FileInputFormat.setInputPaths(conf, new Path(inFiles));
    FileOutputFormat.setOutputPath(conf, new Path(outFiles));

    //conf.setOutputKeyClass(Text.class); 
    //conf.setOutputValueClass(Text.class); 
    conf.setMapperClass(SpeciesGraphBuilderMapper.class);
    conf.setMapOutputKeyClass(Text.class);
    conf.setMapOutputValueClass(Text.class);

    //conf.setInputFormat(org.apache.hadoop.mapred.TextInputFormat.class); 
    //conf.setOutputFormat(org.apache.hadoop.mapred.SequenceFileOutputFormat.class); 
    conf.setReducerClass(SpeciesGraphBuilderReducer.class);
    //conf.setCombinerClass(SpeciesGraphBuilderReducer.class); 

    //conf.setInputPath(new Path("graph1")); 
    //conf.setOutputPath(new Path("graph2")); 
    // take the input and output from the command line
    FileInputFormat.setInputPaths(conf, new Path(inFiles));
    FileOutputFormat.setOutputPath(conf, new Path(outFiles));

    client.setConf(conf);//from   w  w  w .java  2s  . c om
    try {
        JobClient.runJob(conf);
    } catch (Exception e) {
        e.printStackTrace();
    }

    inFiles = f.getAbsolutePath().replace("/build/classes", "") + "/src/outputFiles/Result/part-00000";
    for (int i = 0; i < 15; i++) {
        client = new JobClient();
        conf = new JobConf(SpeciesDriver.class);
        conf.setJobName("Species Iter");

        int count = i + 1;
        outFiles = f.getAbsolutePath().replace("/build/classes", "") + "/src/outputFiles/Result" + count;
        conf.setNumReduceTasks(5);

        conf.setOutputKeyClass(Text.class);
        conf.setOutputValueClass(Text.class);

        FileInputFormat.setInputPaths(conf, new Path(inFiles));
        FileOutputFormat.setOutputPath(conf, new Path(outFiles));

        conf.setMapperClass(SpeciesIterMapper2.class);
        conf.setReducerClass(SpeciesIterReducer2.class);
        conf.setCombinerClass(SpeciesIterReducer2.class);

        client.setConf(conf);
        try {
            JobClient.runJob(conf);
        } catch (Exception e) {
            e.printStackTrace();
        }
        inFiles = outFiles;

    }

    //Viewer
    client = new JobClient();
    conf = new JobConf(SpeciesDriver.class);
    conf.setJobName("Species Viewer");

    conf.setOutputKeyClass(FloatWritable.class);
    conf.setOutputValueClass(Text.class);

    inFiles = f.getAbsolutePath().replace("/build/classes", "") + "/src/outputFiles/Result15/part-00000";
    outFiles = f.getAbsolutePath().replace("/build/classes", "") + "/src/outputFiles/ResultFinal";

    FileInputFormat.setInputPaths(conf, new Path(inFiles));
    FileOutputFormat.setOutputPath(conf, new Path(outFiles));

    conf.setMapperClass(SpeciesViewerMapper.class);
    conf.setReducerClass(org.apache.hadoop.mapred.lib.IdentityReducer.class);

    client.setConf(conf);
    try {
        JobClient.runJob(conf);
    } catch (Exception e) {
        e.printStackTrace();
    }

}