Example usage for org.apache.hadoop.mapred FileOutputFormat getOutputPath

List of usage examples for org.apache.hadoop.mapred FileOutputFormat getOutputPath

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred FileOutputFormat getOutputPath.

Prototype

public static Path getOutputPath(JobConf conf) 

Source Link

Document

Get the Path to the output directory for the map-reduce job.

Usage

From source file:azkaban.jobtype.examples.java.WordCount.java

License:Apache License

public void run() throws Exception {
    logger.info(String.format("Starting %s", getClass().getSimpleName()));

    // hadoop conf should be on the classpath
    JobConf jobconf = getJobConf();/*w  w  w. j  ava2 s.com*/
    jobconf.setJarByClass(WordCount.class);

    jobconf.setOutputKeyClass(Text.class);
    jobconf.setOutputValueClass(IntWritable.class);

    jobconf.setMapperClass(Map.class);
    jobconf.setReducerClass(Reduce.class);

    jobconf.setInputFormat(TextInputFormat.class);
    jobconf.setOutputFormat(TextOutputFormat.class);

    FileInputFormat.addInputPath(jobconf, new Path(inputPath));
    FileOutputFormat.setOutputPath(jobconf, new Path(outputPath));

    if (forceOutputOverrite) {
        FileSystem fs = FileOutputFormat.getOutputPath(jobconf).getFileSystem(jobconf);
        fs.delete(FileOutputFormat.getOutputPath(jobconf), true);
    }

    super.run();
}

From source file:azkaban.jobtype.javautils.AbstractHadoopJob.java

License:Apache License

@SuppressWarnings("rawtypes")
public JobConf createJobConf(Class<? extends Mapper> mapperClass, Class<? extends Reducer> reducerClass)
        throws IOException, URISyntaxException {
    JobConf conf = new JobConf();
    // set custom class loader with custom find resource strategy.

    conf.setJobName(getJobName());/*from  www .j av a2 s  .  com*/
    conf.setMapperClass(mapperClass);
    if (reducerClass != null) {
        conf.setReducerClass(reducerClass);
    }

    if (props.getBoolean("is.local", false)) {
        conf.set("mapred.job.tracker", "local");
        conf.set("fs.default.name", "file:///");
        conf.set("mapred.local.dir", "/tmp/map-red");

        logger.info("Running locally, no hadoop jar set.");
    } else {
        HadoopUtils.setClassLoaderAndJar(conf, getClass());
        logger.info("Setting hadoop jar file for class:" + getClass() + "  to " + conf.getJar());
        logger.info("*************************************************************************");
        logger.info(
                "          Running on Real Hadoop Cluster(" + conf.get("mapred.job.tracker") + ")           ");
        logger.info("*************************************************************************");
    }

    // set JVM options if present
    if (props.containsKey("mapred.child.java.opts")) {
        conf.set("mapred.child.java.opts", props.getString("mapred.child.java.opts"));
        logger.info("mapred.child.java.opts set to " + props.getString("mapred.child.java.opts"));
    }

    // set input and output paths if they are present
    if (props.containsKey("input.paths")) {
        List<String> inputPaths = props.getStringList("input.paths");
        if (inputPaths.size() == 0)
            throw new IllegalArgumentException("Must specify at least one value for property 'input.paths'");
        for (String path : inputPaths) {
            HadoopUtils.addAllSubPaths(conf, new Path(path));
        }
    }

    if (props.containsKey("output.path")) {
        String location = props.get("output.path");
        FileOutputFormat.setOutputPath(conf, new Path(location));

        // For testing purpose only remove output file if exists
        if (props.getBoolean("force.output.overwrite", false)) {
            FileSystem fs = FileOutputFormat.getOutputPath(conf).getFileSystem(conf);
            fs.delete(FileOutputFormat.getOutputPath(conf), true);
        }
    }

    // Adds External jars to hadoop classpath
    String externalJarList = props.getString("hadoop.external.jarFiles", null);
    if (externalJarList != null) {
        FileSystem fs = FileSystem.get(conf);
        String[] jarFiles = externalJarList.split(",");
        for (String jarFile : jarFiles) {
            logger.info("Adding extenral jar File:" + jarFile);
            DistributedCache.addFileToClassPath(new Path(jarFile), conf, fs);
        }
    }

    // Adds distributed cache files
    String cacheFileList = props.getString("hadoop.cache.files", null);
    if (cacheFileList != null) {
        String[] cacheFiles = cacheFileList.split(",");
        for (String cacheFile : cacheFiles) {
            logger.info("Adding Distributed Cache File:" + cacheFile);
            DistributedCache.addCacheFile(new URI(cacheFile), conf);
        }
    }

    // Adds distributed cache files
    String archiveFileList = props.getString("hadoop.cache.archives", null);
    if (archiveFileList != null) {
        String[] archiveFiles = archiveFileList.split(",");
        for (String archiveFile : archiveFiles) {
            logger.info("Adding Distributed Cache Archive File:" + archiveFile);
            DistributedCache.addCacheArchive(new URI(archiveFile), conf);
        }
    }

    String hadoopCacheJarDir = props.getString("hdfs.default.classpath.dir", null);
    if (hadoopCacheJarDir != null) {
        FileSystem fs = FileSystem.get(conf);
        if (fs != null) {
            FileStatus[] status = fs.listStatus(new Path(hadoopCacheJarDir));

            if (status != null) {
                for (int i = 0; i < status.length; ++i) {
                    if (!status[i].isDir()) {
                        Path path = new Path(hadoopCacheJarDir, status[i].getPath().getName());
                        logger.info("Adding Jar to Distributed Cache Archive File:" + path);

                        DistributedCache.addFileToClassPath(path, conf, fs);
                    }
                }
            } else {
                logger.info("hdfs.default.classpath.dir " + hadoopCacheJarDir + " is empty.");
            }
        } else {
            logger.info("hdfs.default.classpath.dir " + hadoopCacheJarDir + " filesystem doesn't exist");
        }
    }

    for (String key : getProps().getKeySet()) {
        String lowerCase = key.toLowerCase();
        if (lowerCase.startsWith(HADOOP_PREFIX)) {
            String newKey = key.substring(HADOOP_PREFIX.length());
            conf.set(newKey, getProps().get(key));
        }
    }

    HadoopUtils.setPropsInJob(conf, getProps());

    // put in tokens
    if (System.getenv(HADOOP_TOKEN_FILE_LOCATION) != null) {
        conf.set(MAPREDUCE_JOB_CREDENTIALS_BINARY, System.getenv(HADOOP_TOKEN_FILE_LOCATION));
    }

    return conf;
}

From source file:babel.prep.corpus.CorpusGenerator.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length < 2 || args.length > 3) {
        usage();/* ww  w. ja  v  a  2  s . c  o m*/
        return;
    }

    CorpusGenerator gen = new CorpusGenerator();
    JobConf job = gen.createJobConf(args[0], args[1], (args.length == 3) && PARAM_XML.equals(args[2]));

    if (LOG.isInfoEnabled()) {
        LOG.info("DatedCorpusGenerator: " + job.getJobName());
    }

    gen.runPrepStep(job);

    if (LOG.isInfoEnabled()) {
        LOG.info(Stats.dumpStats() + "\n");
        LOG.info("Output: " + FileOutputFormat.getOutputPath(job));
        LOG.info("DatedCorpusGenerator: done");
    }
}

From source file:babel.prep.corpus.MultipleXMLLangFileOutputFormat.java

License:Apache License

public RecordWriter<Text, Page> getBaseRecordWriter(final FileSystem fs, JobConf job, String name,
        final Progressable progress) throws IOException {
    final Path dumpFile = new Path(FileOutputFormat.getOutputPath(job), name);

    // Get the old copy out of the way
    if (fs.exists(dumpFile))
        fs.delete(dumpFile, true);//from  w ww.ja va2 s.  co  m

    final XMLObjectWriter xmlWriter;

    try {
        xmlWriter = new XMLObjectWriter(fs.create(dumpFile), false);
    } catch (Exception e) {
        throw new RuntimeException("Failed to instantiate XMLObjectWriter.");
    }

    return new RecordWriter<Text, Page>() {
        public synchronized void write(Text key, Page page) throws IOException {
            try {
                xmlWriter.write(page);
            } catch (XMLStreamException e) {
                throw new RuntimeException("Error writing page XML.");
            }
        }

        public synchronized void close(Reporter reporter) throws IOException {
            try {
                xmlWriter.close();
            } catch (XMLStreamException e) {
                throw new RuntimeException("Error closing XMLObjectWriter.");
            }
        }
    };
}

From source file:babel.prep.datedcorpus.DatedCorpusGenerator.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length < 2 || args.length > 3) {
        usage();/*from   w w  w  .ja va  2  s. c  o  m*/
        return;
    }

    DatedCorpusGenerator gen = new DatedCorpusGenerator();
    JobConf job = gen.createJobConf(args[0], args[1]);

    if (LOG.isInfoEnabled()) {
        LOG.info("DatedCorpusGenerator: " + job.getJobName());
    }

    gen.runPrepStep(job);

    if (LOG.isInfoEnabled()) {
        LOG.info(Stats.dumpStats() + "\n");
        LOG.info("Output: " + FileOutputFormat.getOutputPath(job));
        LOG.info("DatedCorpusGenerator: done");
    }
}

From source file:babel.prep.datedcorpus.DatedLangFilesOutputFormat.java

License:Apache License

public RecordWriter<Text, Text> getBaseRecordWriter(final FileSystem fs, JobConf job, String name,
        final Progressable progress) throws IOException {
    final Path dumpFile = new Path(FileOutputFormat.getOutputPath(job), name);

    // Get the old copy out of the way
    if (fs.exists(dumpFile)) {
        fs.delete(dumpFile, true);/*from  w  ww  . j av  a2s  .c  om*/
    } else {
        fs.mkdirs(dumpFile.getParent());
    }

    return new RecordWriter<Text, Text>() {
        public synchronized void write(Text key, Text versText) throws IOException {
            try {
                BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(
                        new FileOutputStream(new File(dumpFile.toUri()), true), DEFAULT_CHARSET));

                writer.write(versText.toString());
                writer.close();
            } catch (Exception e) {
                throw new RuntimeException("Error writing page versions: " + e.toString());
            }
        }

        public synchronized void close(Reporter reporter) throws IOException {
        }
    };
}

From source file:babel.prep.extract.NutchPageExtractor.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length != 1) {
        usage();/*from w w w.jav  a  2  s .co  m*/
        return;
    }

    NutchPageExtractor extractor = new NutchPageExtractor();
    JobConf job = extractor.createJobConf(args[0]);

    if (LOG.isInfoEnabled()) {
        LOG.info("NutchPageExtractor: " + job.getJobName());
    }

    extractor.runPrepStep(job);

    if (LOG.isInfoEnabled()) {
        LOG.info(Stats.dumpStats() + "\n");
        LOG.info("Output: " + FileOutputFormat.getOutputPath(job));
        LOG.info("NutchPageExtractor: done");
    }
}

From source file:babel.prep.langid.LangIdentifier.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length != 3) {
        usage();//w  ww .  j av  a  2s . c  om
        return;
    }

    LangIdentifier identifier = new LangIdentifier();
    JobConf job = identifier.createJobConf(args[0], args[1], args[2]);

    if (LOG.isInfoEnabled()) {
        LOG.info("LangIdentifier: " + job.getJobName());
    }

    identifier.runPrepStep(job);

    if (LOG.isInfoEnabled()) {
        LOG.info(Stats.dumpStats() + "\n");
        LOG.info("Output: " + FileOutputFormat.getOutputPath(job));
        LOG.info("LangIdentifier: done");
    }
}

From source file:babel.prep.langidtime.LangAndTimeExtractor.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length != 3) {
        usage();/*from   w ww  .j a  v a2 s.c  om*/
        return;
    }

    LangAndTimeExtractor identifier = new LangAndTimeExtractor();
    JobConf job = identifier.createJobConf(args[0], args[1], args[2]);

    if (LOG.isInfoEnabled()) {
        LOG.info("LangAndTimeExtractor: " + job.getJobName());
    }

    identifier.runPrepStep(job);

    if (LOG.isInfoEnabled()) {
        LOG.info(Stats.dumpStats() + "\n");
        LOG.info("Output: " + FileOutputFormat.getOutputPath(job));
        LOG.info("LangAndTimeExtractor: done");
    }
}

From source file:babel.prep.merge.PageMerger.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length != 3) {
        usage();/*from  w w w. j  a  v a  2  s .  c  o m*/
        return;
    }

    PageMerger merger = new PageMerger();
    JobConf job = merger.createJobConf(args[0], args[1], args[2]);

    if (LOG.isInfoEnabled()) {
        LOG.info("PageMerger: " + job.getJobName());
    }

    merger.runPrepStep(job);

    if (LOG.isInfoEnabled()) {
        LOG.info(Stats.dumpStats() + "\n");
        LOG.info("Output: " + FileOutputFormat.getOutputPath(job));
        LOG.info("PageMerger: done");
    }
}