Example usage for org.apache.hadoop.mapred JobConf setMapOutputKeyClass

List of usage examples for org.apache.hadoop.mapred JobConf setMapOutputKeyClass

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred JobConf setMapOutputKeyClass.

Prototype

public void setMapOutputKeyClass(Class<?> theClass) 

Source Link

Document

Set the key class for the map output data.

Usage

From source file:ivory.preprocess.BuildTermIdMap.java

License:Apache License

@SuppressWarnings("unused")
public int runTool() throws Exception {
    // create a new JobConf, inheriting from the configuration of this
    // PowerTool//from ww  w  .ja  va2  s.  c o  m
    JobConf conf = new JobConf(getConf(), BuildTermIdMap.class);
    FileSystem fs = FileSystem.get(conf);

    String indexPath = conf.get("Ivory.IndexPath");
    String collectionName = conf.get("Ivory.CollectionName");

    int mapTasks = conf.getInt("Ivory.NumMapTasks", 0);
    int reduceTasks = 1;
    int minSplitSize = conf.getInt("Ivory.MinSplitSize", 0);

    sLogger.info("PowerTool: BuildTermIdMap");
    sLogger.info(" - CollectionName: " + collectionName);
    sLogger.info(" - IndexPath: " + indexPath);
    sLogger.info(" - NumMapTasks: " + mapTasks);
    sLogger.info(" - NumReduceTasks: " + reduceTasks);

    RetrievalEnvironment env = new RetrievalEnvironment(indexPath, fs);

    if (!fs.exists(new Path(indexPath))) {
        sLogger.error("index path doesn't existing: skipping!");
        return 0;
    }

    Path termsFilePath = new Path(env.getIndexTermsData());
    Path termIDsFilePath = new Path(env.getIndexTermIdsData());
    Path idToTermFilePath = new Path(env.getIndexTermIdMappingData());
    Path dfByTermFilePath = new Path(env.getDfByTermData());
    Path cfByTermFilePath = new Path(env.getCfByTermData());
    Path dfByIntFilePath = new Path(env.getDfByIntData());
    Path cfByIntFilePath = new Path(env.getCfByIntData());

    if (fs.exists(termsFilePath) || fs.exists(termIDsFilePath) || fs.exists(idToTermFilePath)
            || fs.exists(dfByTermFilePath) || fs.exists(cfByTermFilePath) || fs.exists(dfByIntFilePath)
            || fs.exists(cfByIntFilePath)) {
        sLogger.info("term and term id data exist: skipping!");
        return 0;
    }

    Path tmpPath = new Path(env.getTempDirectory());
    fs.delete(tmpPath, true);

    conf.setJobName("BuildTermIdMap:" + collectionName);

    conf.setNumMapTasks(mapTasks);
    conf.setNumReduceTasks(reduceTasks);

    conf.setInt("Ivory.CollectionTermCount", (int) env.readCollectionTermCount());
    conf.setInt("mapred.min.split.size", minSplitSize);
    conf.set("mapred.child.java.opts", "-Xmx2048m");

    FileInputFormat.setInputPaths(conf, new Path(env.getTermDfCfDirectory()));
    FileOutputFormat.setOutputPath(conf, tmpPath);

    conf.setInputFormat(SequenceFileInputFormat.class);
    conf.setOutputFormat(SequenceFileOutputFormat.class);

    conf.setMapOutputKeyClass(Text.class);
    conf.setMapOutputValueClass(PairOfIntLong.class);
    conf.setOutputKeyClass(Text.class);

    conf.setMapperClass(IdentityMapper.class);
    conf.setReducerClass(MyReducer.class);

    long startTime = System.currentTimeMillis();
    RunningJob job = JobClient.runJob(conf);
    sLogger.info("Job Finished in " + (System.currentTimeMillis() - startTime) / 1000.0 + " seconds");

    fs.delete(tmpPath, true);

    return 0;
}

From source file:ivory.preprocess.GetTermCount.java

License:Apache License

public int runTool() throws Exception {
    // create a new JobConf, inheriting from the configuration of this
    // PowerTool//from w  ww. j a va2 s  .  c o  m
    JobConf conf = new JobConf(getConf(), GetTermCount.class);
    FileSystem fs = FileSystem.get(conf);

    String indexPath = conf.get(Constants.IndexPath);
    RetrievalEnvironment env = new RetrievalEnvironment(indexPath, fs);

    int mapTasks = conf.getInt(Constants.NumMapTasks, 0);
    int reduceTasks = conf.getInt(Constants.NumReduceTasks, 0);

    String collectionName = env.readCollectionName();
    String termDocVectorsPath = env.getTermDocVectorsDirectory();
    String termDfCfPath = env.getTermDfCfDirectory();

    if (!fs.exists(new Path(indexPath))) {
        sLogger.info("index path doesn't existing: skipping!");
        return 0;
    }

    sLogger.info("PowerTool: GetTermCount");
    sLogger.info(" - CollectionName: " + collectionName);
    sLogger.info(" - NumMapTasks: " + mapTasks);
    sLogger.info(" - NumReduceTasks: " + reduceTasks);
    sLogger.info(" - MinDf: " + conf.getInt(Constants.MinDf, 0));
    sLogger.info(" - MaxDf: " + conf.getInt(Constants.MaxDf, Integer.MAX_VALUE));

    Path outputPath = new Path(termDfCfPath);
    if (fs.exists(outputPath)) {
        sLogger.error("TermDfCf directory exist: skipping!");
        return 0;
    }

    conf.setJobName("GetTermCount:" + collectionName);

    conf.setNumMapTasks(mapTasks);
    conf.setNumReduceTasks(reduceTasks);
    conf.set("mapred.child.java.opts", "-Xmx2048m");

    FileInputFormat.setInputPaths(conf, new Path(termDocVectorsPath));
    FileOutputFormat.setOutputPath(conf, outputPath);

    conf.setInputFormat(SequenceFileInputFormat.class);
    conf.setOutputFormat(SequenceFileOutputFormat.class);

    conf.setMapOutputKeyClass(Text.class);
    conf.setMapOutputValueClass(PairOfIntLong.class);
    conf.setOutputKeyClass(Text.class);
    conf.setOutputValueClass(PairOfIntLong.class);

    conf.setMapperClass(MyMapper.class);
    conf.setCombinerClass(MyCombiner.class);
    conf.setReducerClass(MyReducer.class);

    long startTime = System.currentTimeMillis();
    RunningJob job = JobClient.runJob(conf);
    sLogger.info("Job Finished in " + (System.currentTimeMillis() - startTime) / 1000.0 + " seconds");

    Counters counters = job.getCounters();
    // write out number of postings
    int collectionTermCount = (int) counters.findCounter(Statistics.Terms).getCounter();
    env.writeCollectionTermCount(collectionTermCount);
    // NOTE: this value is not the same as number of postings, because
    // postings for non-English terms are discarded, or as result of df cut

    long collectionLength = counters.findCounter(Statistics.SumOfDocLengths).getCounter();
    env.writeCollectionLength(collectionLength);
    return 0;
}

From source file:ivory.ptc.AnchorTextInvertedIndex.java

License:Apache License

@Override
public int runTool() throws Exception {
    JobConf conf = new JobConf(getConf(), AnchorTextInvertedIndex.class);
    FileSystem fs = FileSystem.get(conf);
    String inPath = conf.get("Ivory.InputPath");
    String outPath = conf.get("Ivory.OutputPath");
    Path inputPath = new Path(inPath);
    Path outputPath = new Path(outPath);
    int mapTasks = conf.getInt("Ivory.NumMapTasks", 1);
    int reduceTasks = conf.getInt("Ivory.NumReduceTasks", 100);
    String weightingSchemeParameters = conf.get("Ivory.WeightingSchemeParameters");

    LOG.info("BuildAnchorTextInvertedIndex");
    LOG.info(" - input path: " + inPath);
    LOG.info(" - output path: " + outPath);
    LOG.info(" - number of reducers: " + reduceTasks);
    LOG.info(" - weighting scheme: " + conf.get("Ivory.WeightingScheme"));
    LOG.info(" - weighting scheme parameters: " + weightingSchemeParameters);

    String[] params = weightingSchemeParameters.split(PARAMETER_SEPARATER);
    for (String param : params) {
        DistributedCache.addCacheFile(new URI(param), conf);
    }// w w w.java  2s  . c o  m

    conf.setJobName("BuildAnchorTextInvertedIndex");
    conf.setNumMapTasks(mapTasks);
    conf.setNumReduceTasks(reduceTasks);
    conf.set("mapred.child.java.opts", "-Xmx4096m");
    conf.setInt("mapred.task.timeout", 60000000);

    FileInputFormat.setInputPaths(conf, inputPath);
    FileOutputFormat.setOutputPath(conf, outputPath);

    conf.setInputFormat(SequenceFileInputFormat.class);
    conf.setOutputFormat(SequenceFileOutputFormat.class);
    conf.setMapOutputKeyClass(Text.class);
    conf.setMapOutputValueClass(AnchorTextTarget.class);
    conf.setOutputKeyClass(Text.class);
    conf.setOutputValueClass(ArrayListWritable.class);
    conf.setMapperClass(MyMapper.class);
    conf.setReducerClass(MyReducer.class);

    fs.delete(outputPath);
    JobClient.runJob(conf);
    return 0;
}

From source file:ivory.ptc.driver.XMLFormatJudgments.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    if (args.length != 3) {
        printUsage();//from   ww  w.  j av  a 2 s .  c om
        return -1;
    }
    JobConf conf = new JobConf(getConf(), XMLFormatJudgments.class);
    // Command line arguments
    String inPath = args[0];
    String outPath = args[1];
    String docnoMapping = args[2];
    Path inputPath = new Path(inPath);
    Path outputPath = new Path(outPath);
    int mapTasks = 1;
    int reduceTasks = 1;

    conf.setJobName("FormatPseudoJudgments");
    conf.setNumMapTasks(mapTasks);
    conf.setNumReduceTasks(reduceTasks);
    conf.set("mapred.child.java.opts", "-Xmx2048m");
    DistributedCache.addCacheFile(new URI(docnoMapping), conf);
    FileSystem.get(conf).delete(outputPath);
    FileInputFormat.setInputPaths(conf, inputPath);
    FileOutputFormat.setOutputPath(conf, outputPath);
    conf.setInputFormat(SequenceFileInputFormat.class);
    conf.setOutputFormat(TextOutputFormat.class);
    conf.setMapOutputKeyClass(PseudoQuery.class);
    conf.setMapOutputValueClass(PseudoJudgments.class);
    conf.setOutputKeyClass(Text.class);
    conf.setOutputValueClass(Text.class);
    conf.setMapperClass(IdentityMapper.class);
    conf.setReducerClass(MyReducer.class);

    JobClient.runJob(conf);
    return 0;
}

From source file:ivory.ptc.driver.XMLFormatQueries.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    if (args.length != 2) {
        printUsage();//from  ww w .j  av a 2s  . c o m
        return -1;
    }

    JobConf conf = new JobConf(getConf(), XMLFormatQueries.class);
    // Command line arguments
    String inPath = args[0];
    String outPath = args[1];
    Path inputPath = new Path(inPath);
    Path outputPath = new Path(outPath);
    int mapTasks = 1;
    int reduceTasks = 1;

    conf.setJobName("FormatPseudoQueries");
    conf.setNumMapTasks(mapTasks);
    conf.setNumReduceTasks(reduceTasks);
    conf.set("mapred.child.java.opts", "-Xmx2048m");
    FileSystem.get(conf).delete(outputPath);
    FileInputFormat.setInputPaths(conf, inputPath);
    FileOutputFormat.setOutputPath(conf, outputPath);
    conf.setInputFormat(SequenceFileInputFormat.class);
    conf.setOutputFormat(TextOutputFormat.class);
    conf.setMapOutputKeyClass(PseudoQuery.class);
    conf.setMapOutputValueClass(PseudoJudgments.class);
    conf.setOutputKeyClass(Text.class);
    conf.setOutputValueClass(Text.class);
    conf.setMapperClass(IdentityMapper.class);
    conf.setReducerClass(MyReducer.class);
    JobClient.runJob(conf);
    return 0;
}

From source file:ivory.ptc.SortedPseudoTestCollection.java

License:Apache License

public int runTool() throws Exception {
    JobConf conf = new JobConf(getConf(), SortedPseudoTestCollection.class);
    FileSystem fs = FileSystem.get(conf);
    String inPath = conf.get("Ivory.InputPath");
    String outPath = conf.get("Ivory.OutputPath");
    Path inputPath = new Path(inPath);
    Path outputPath = new Path(outPath);
    int mapTasks = 1;
    int reduceTasks = 1;

    LOG.info("SortedPseudoTestCollection");
    LOG.info(" - Input path: " + conf.get("Ivory.InputPath"));
    LOG.info(" - Output path: " + conf.get("Ivory.OutputPath"));
    LOG.info(" - JudgmentExtractor: " + conf.get("Ivory.JudgmentExtractor"));
    LOG.info(" - JudgmentExtractorParameters: " + conf.get("Ivory.JudgmentExtractorParameters"));
    LOG.info(" - SamplingCriterion: " + conf.get("Ivory.SamplingCriterion"));
    LOG.info(" - SamplingCriterionParameters: " + conf.get("Ivory.SamplingCriterionParameters"));
    LOG.info(" - QueryScorer: " + conf.get("Ivory.QueryScorer"));

    conf.setJobName("SortedPTC");
    conf.setNumMapTasks(mapTasks);/*from  w  w  w. java2  s .co m*/
    conf.setNumReduceTasks(reduceTasks);
    conf.set("mapred.child.java.opts", "-Xmx4096m");

    FileInputFormat.setInputPaths(conf, inputPath);
    FileOutputFormat.setOutputPath(conf, outputPath);

    conf.setInputFormat(SequenceFileInputFormat.class);
    conf.setOutputFormat(SequenceFileOutputFormat.class);
    conf.setMapOutputKeyClass(PseudoQuery.class);
    conf.setMapOutputValueClass(PseudoJudgments.class);
    conf.setOutputKeyClass(PseudoQuery.class);
    conf.setOutputValueClass(PseudoJudgments.class);
    conf.setMapperClass(MyMapper.class);
    conf.setReducerClass(MyReducer.class);

    fs.delete(outputPath);
    JobClient.runJob(conf);
    return 0;
}

From source file:job.uncombine.compressed.BigBuildInvertedIndex.java

License:Apache License

/**
 * Runs this tool./*from  w w  w.  j av a  2  s  .c o  m*/
 */
public int run(String[] args) throws Exception {

    //long GB = 1024 * 1024 * 1024;
    //long totalDataSize = 1 * GB;

    int reduceNumArray[] = { 9, 18 };
    int splitSizeMBArray[] = { 64, 128, 256 };
    int xmxArray[] = { 1000, 2000, 3000, 4000 };
    int xmsArray[] = { 0, 1 };
    int ismbArray[] = { 200, 400, 600, 800 };

    for (int splitIndex = 0; splitIndex < splitSizeMBArray.length; splitIndex++) {
        for (int reduceNumIndex = 0; reduceNumIndex < reduceNumArray.length; reduceNumIndex++) {
            for (int xmxIndex = 0; xmxIndex < xmxArray.length; xmxIndex++) {
                for (int xmsIndex = 0; xmsIndex < xmsArray.length; xmsIndex++) {
                    for (int ismbIndex = 0; ismbIndex < ismbArray.length; ismbIndex++) {

                        int reduceNum = reduceNumArray[reduceNumIndex];
                        int splitMB = splitSizeMBArray[splitIndex];
                        int xmx = xmxArray[xmxIndex];
                        int xms = xmsArray[xmsIndex] * xmx;
                        int ismb = ismbArray[ismbIndex];

                        JobConf conf = new JobConf(getConf(), BigBuildInvertedIndex.class);

                        conf.setLong("mapred.min.split.size", SplitTable.getMapred_min_split_size(splitMB));
                        conf.setLong("mapred.max.split.size", SplitTable.getMapred_max_split_size(splitMB));

                        //conf.setInt("my.sample.split.num", (int) (totalDataSize / (splitMB * 1024 * 1024)));

                        conf.setInt("mapred.reduce.tasks", reduceNum);
                        conf.setInt("io.sort.mb", ismb);

                        if (xms == 0)
                            conf.set("mapred.child.java.opts", "-Xmx" + xmx + "m");
                        else
                            conf.set("mapred.child.java.opts", "-Xmx" + xmx + "m -Xms" + xms + "m");

                        conf.setInt("child.monitor.metrics.seconds", 2);
                        conf.setInt("child.monitor.jvm.seconds", 2);
                        conf.setInt("child.monitor.jstat.seconds", 2);

                        conf.setJobName("BigBuildInvertedIndex " + splitMB + "MB "
                                + conf.get("mapred.child.java.opts") + " ismb=" + ismb + " RN=" + reduceNum);

                        String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();
                        if (otherArgs.length != 2) {
                            System.err.println("Usage: BigBuildInvertedIndex <in> <out>");
                            System.exit(2);
                        }

                        conf.setMapOutputKeyClass(Text.class);
                        conf.setMapOutputValueClass(PairOfInts.class);
                        conf.setOutputKeyClass(Text.class);
                        conf.setOutputValueClass(PairOfWritables.class);
                        SequenceFileOutputFormat.setOutputCompressionType(conf, CompressionType.BLOCK);
                        conf.setOutputFormat(MapFileOutputFormat.class);

                        conf.setMapperClass(MyMapper.class);
                        // conf.setCombinerClass(IdentityReducer.class);
                        conf.setReducerClass(MyReducer.class);
                        FileInputFormat.setInputPaths(conf, new Path(otherArgs[0]));
                        FileOutputFormat.setOutputPath(conf, new Path(otherArgs[1]));

                        FileSystem.get(conf).delete(new Path(otherArgs[1]), true);

                        try {
                            JobClient.runJob(conf);
                        } catch (IOException e) {
                            e.printStackTrace();
                        }
                        Thread.sleep(15000);

                    }
                }
            }
        }
    }
    return 0;
}

From source file:jobimtext.thesaurus.distributional.hadoop.mapreduce.SimCounts1WithFeatures.java

License:Apache License

@SuppressWarnings("deprecation")
public static void main(String[] args) throws Exception {

    JobConf conf = HadoopUtil.generateJobConf(args);

    /* set the new defined type to be used */
    conf.setMapOutputKeyClass(Text.class);
    conf.setMapOutputValueClass(Text.class);

    conf.setMapperClass(Map.class);
    conf.setCombinerClass(Reduce.class);
    conf.setReducerClass(Reduce.class);

    conf.setInputFormat(TextInputFormat.class);
    conf.setOutputFormat(TextOutputFormat.class);

    FileInputFormat.setInputPaths(conf, new Path(args[0]));
    FileOutputFormat.setOutputPath(conf, new Path(args[1]));

    if (args.length > 3) {
        conf.setInt("threshold", Integer.parseInt(args[3]));
    }// w w w. j av a2s.  c o  m
    /* number of milliseconds before killing a not responding task */
    conf.set("mapred.task.timeout", "600000");

    /* change to 128mb */
    conf.set("dfs.block.size", "134217728");

    /* set the maximum number of task per node */
    int maptasks = 200;

    /*
     * Number of map tasks to deploy on each machine. 0.5 to 2 *
     * (cores/node)
     */
    conf.set("mapred.tasktracker.map.tasks.maximum", "" + maptasks);
    conf.set("mapred.tasktracker.map", "" + maptasks);
    /*
     * The default number of map tasks per job. Typically set to a prime
     * several times greater than number of available hosts.
     */
    conf.set("mapred.map.tasks", "" + maptasks);

    int reducetasks = 20;

    conf.set("mapred.tasktracker.reduce.tasks.maximum", "" + reducetasks);
    conf.set("mapred.tasktracker.reduce", "" + reducetasks);
    conf.set("mapred.reduce.tasks", "" + reducetasks);

    /*
     * how much virtual memory the entire process tree of each map/reduce
     * task will use
     */
    conf.set("mapred.job.map.memory.mb", "4000");
    conf.set("mapred.job.reduce.memory.mb", "4000");

    conf.set("dfs.replication", "1");

    /*
     * reduce I/O load
     */
    conf.set("mapred.child.java.opts", "-Xmx1400M");

    conf.set("io.sort.mb", "300");
    conf.set("io.sort.factor", "30");

    JobClient.runJob(conf);

}

From source file:mahout.analysis.MahoutOA.java

public static void main(String[] args) throws Exception {

    Path outPath = new Path(OUTPUT_PATH);
    JobConf conf = new JobConf(MahoutOA.class);
    conf.setJobName("Recommended books count");

    conf.setMapperClass(MahoutOAMapper.class);
    conf.setMapOutputKeyClass(Text.class);
    conf.setMapOutputValueClass(IntWritable.class);

    conf.setReducerClass(MahoutOAReducer.class);

    FileInputFormat.setInputPaths(conf, new Path(args[0]));
    FileOutputFormat.setOutputPath(conf, outPath);

    try {/*from   ww  w  .  ja va2  s . c  o  m*/
        FileSystem dfs = FileSystem.get(outPath.toUri(), conf);
        if (dfs.exists(outPath)) {
            dfs.delete(outPath, true);
        }
        JobClient.runJob(conf);
    } catch (Exception e) {
        e.printStackTrace();
    }

    JobConf conf1 = new JobConf(MahoutOA.class);
    Path outPathFinal = new Path(args[1]);

    conf1.setJobName("Recommended books count ordered");

    conf1.setMapperClass(MahoutOASortMapper.class);

    conf1.setMapOutputKeyClass(IntWritable.class);
    conf1.setMapOutputValueClass(Text.class);
    conf1.setOutputKeyComparatorClass(MahoutOAComparator.class);

    conf1.setReducerClass(MahoutOASortReducer.class);

    FileInputFormat.setInputPaths(conf1, new Path(OUTPUT_PATH));
    FileOutputFormat.setOutputPath(conf1, outPathFinal);

    try {
        FileSystem dfs = FileSystem.get(outPathFinal.toUri(), conf);
        if (dfs.exists(outPathFinal)) {
            dfs.delete(outPathFinal, true);
        }
        JobClient.runJob(conf1);
    } catch (Exception e) {
        e.printStackTrace();
    }
}

From source file:mapreduce.DosAttack.java

License:Apache License

private void issue() throws IOException {
    LOG.info("Starting DOS on url[{}] with clients[{}]", wsURL, numMappers);
    DosMapper.init(wsURL);//from   w w  w .  java  2  s . co m
    JobConf job = new JobConf(DosAttack.class);
    job.setJarByClass(DosAttack.class);
    job.setJobName("DOS Attack");
    job.setNumReduceTasks(0);
    job.setInputFormat(NullInputFormat.class);
    job.setOutputFormat(NullOutputFormat.class);
    job.setMapperClass(DosMapper.class);
    job.setMapOutputKeyClass(NullWritable.class);
    job.setMapOutputValueClass(NullWritable.class);
    job.setOutputKeyClass(NullWritable.class);
    job.setOutputValueClass(NullWritable.class);
    job.setNumMapTasks(numMappers);
    job.setInt(NUM_MAPPERS_KEY, numMappers);
    job.setInt(NUM_REQUESTS_KEY, numRequests);
    job.set(TARGET_URL_KEY, wsURL);
    JobClient.runJob(job);
}