Example usage for org.apache.hadoop.fs Path Path

List of usage examples for org.apache.hadoop.fs Path Path

Introduction

In this page you can find the example usage for org.apache.hadoop.fs Path Path.

Prototype

public Path(URI aUri) 

Source Link

Document

Construct a path from a URI

Usage

From source file:HoopRemoteTask.java

License:Open Source License

/**
*
*/// w  w w. ja  va2s . co  m
public static void main(String args[]) throws Exception {
    // run the HoopLink constructor; We need this to have a global settings registry       
    @SuppressWarnings("unused")
    HoopLink link = new HoopLink();

    dbg("main ()");

    showTimeStamp();

    /**
     * I've taken out the statistics portion since it relies on code that isn't distributed
     * The next version will have this solved. I might try the solution in:
     * http://stackoverflow.com/questions/7443074/initialize-public-static-variable-in-hadoop-through-arguments
     * Although chances are I will switch to using Hoop to collect much better performance and distribution 
     * statistics. See Hoop.java for more information
     */

    HoopPerformanceMeasure metrics = new HoopPerformanceMeasure();
    metrics.setMarker("main");
    HoopLink.metrics.getDataSet().add(metrics);

    if (parseArgs(args) == false) {
        usage();
        return;
    }

    if (HoopLink.postonly == true) {
        postOnly();
        return;
    }

    if (HoopLink.task.equals("none") == true) {
        dbg("No task defined, please use the commandline option -task <task>");
        return;
    }

    dbg("Starting system ...");

    HoopRemoteTask driver = new HoopRemoteTask();

    if (HoopLink.useHadoop == false) {
        dbg("Starting built-in mapper ...");

        driver.indexDocuments();
    } else {
        dbg("Starting hadoop job ...");

        Configuration conf = new Configuration();

        // TRANSFER SETTHoopGS FROM HoopLink to Configuration!!!

        transferConf(conf);

        // Now we're feeling much better

        HoopRemoteTask.hdfs = FileSystem.get(conf);

        if (HoopLink.dbglocal == true) {
            dbg("Enabling local debugging ...");
            conf.set("mapred.job.tracker", "local");
        } else
            dbg("Disabling local debugging");

        JobConf job = new JobConf(conf, HoopRemoteTask.class);

        job.setJobName(driver.getClassName());

        driver.setJob(job);

        @SuppressWarnings("unused")
        String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs();

        job.setJarByClass(HoopRemoteTask.class);

        if (HoopLink.task.equals("invert") == true) {
            dbg("Configuring job for invert task ...");

            job.setReducerClass(HoopInvertedListReducer.class);
            job.setMapperClass(HoopInvertedListMapper.class);
            job.setMapOutputKeyClass(Text.class);
            job.setMapOutputValueClass(Text.class);
        }

        if (HoopLink.task.equals("wordcount") == true) {
            dbg("Configuring job for wordcount task ...");

            job.setReducerClass(HoopWordCountReducer.class);
            job.setMapperClass(HoopWordCountMapper.class);
            job.setMapOutputKeyClass(Text.class);
            job.setMapOutputValueClass(IntWritable.class);
        }

        dbg("Using input path: " + HoopLink.datapath);
        dbg("Using output path: " + HoopLink.outputpath);

        FileInputFormat.addInputPath(job, new Path(HoopLink.datapath));
        FileOutputFormat.setOutputPath(job, new Path(HoopLink.outputpath));

        job.setInputFormat(HoopWholeFileInputFormat.class);

        if ((HoopLink.shardcreate.equals("mos") == true) && (HoopLink.nrshards > 1)) {
            dbg("Setting output to sharded output streams class ...");

            job.setOutputFormat(HoopShardedOutputFormat.class);
        } else
            job.setOutputFormat(TextOutputFormat.class);

        /**
         * Temporarily commented out for testing purposes
         */

        //job.setPartitionerClass (HoopPartitioner.class);                      

        driver.register("Main");

        JobClient.runJob(job);

        postProcess(conf);
    }

    showTimeStamp();

    metrics.closeMarker();
    long timeTaken = metrics.getYValue();
    //long timeTaken=metrics.getMarkerRaw ();
    metrics.printMetrics(timeTaken);

    driver.unregister();

    /**
     * I've taken out the statistics portion since it relies on code that isn't distributed
     * The next version will have this solved. I might try the solution in:
     * http://stackoverflow.com/questions/7443074/initialize-public-static-variable-in-hadoop-through-arguments
     * Although chances are I will switch to using Hoop to collect much better performance and distribution 
     * statistics. See Hoop.java for more information
     */
    //stats.calcStatistics();
    //dbg (stats.printStatistics());
}

From source file:ReadSeqFile.java

License:Open Source License

public static void main(String[] args) throws IOException {
    String filename = "/tmp/output/part-00000";
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(conf);
    SequenceFile.Reader reader = new SequenceFile.Reader(fs, new Path(filename), conf);
    IndexKey key = new IndexKey();
    IndexValue value = new IndexValue();
    for (int i = 0; i < 100; i++) {
        reader.next(key, value);//from   w  ww  .j  a v a  2  s.c o  m
    }
}

From source file:ExtractTopPersonalizedPageRankNodes.java

License:Apache License

/**
 * Runs this tool./*  www .ja  va 2  s .  co m*/
 */
@SuppressWarnings({ "static-access" })
public int run(String[] args) throws Exception {
    Options options = new Options();

    options.addOption(OptionBuilder.withArgName("path").hasArg().withDescription("input path").create(INPUT));
    options.addOption(OptionBuilder.withArgName("path").hasArg().withDescription("output path").create(OUTPUT));
    options.addOption(OptionBuilder.withArgName("num").hasArg().withDescription("top n").create(TOP));
    options.addOption(OptionBuilder.withArgName("src").hasArg().withDescription("source node").create(SRC));

    CommandLine cmdline;
    CommandLineParser parser = new GnuParser();

    try {
        cmdline = parser.parse(options, args);
    } catch (ParseException exp) {
        System.err.println("Error parsing command line: " + exp.getMessage());
        return -1;
    }

    if (!cmdline.hasOption(INPUT) || !cmdline.hasOption(TOP)) {
        System.out.println("args: " + Arrays.toString(args));
        HelpFormatter formatter = new HelpFormatter();
        formatter.setWidth(120);
        formatter.printHelp(this.getClass().getName(), options);
        ToolRunner.printGenericCommandUsage(System.out);
        return -1;
    }

    String inputPath = cmdline.getOptionValue(INPUT);
    String outputPath = "abc";//cmdline.getOptionValue(OUTPUT);
    int n = Integer.parseInt(cmdline.getOptionValue(TOP));

    //LOG.info("Tool name: " + ExtractTopPersonalizedPageRankNodes.class.getSimpleName());
    //LOG.info(" - input: " + inputPath);
    //LOG.info(" - output: " + outputPath);
    //LOG.info(" - top: " + n);

    Configuration conf = getConf();
    conf.setInt("mapred.min.split.size", 1024 * 1024 * 1024);
    conf.setInt(TOP_PG, n);

    Job job = Job.getInstance(conf);
    job.setJobName(ExtractTopPersonalizedPageRankNodes.class.getName() + ":" + inputPath);
    job.setJarByClass(ExtractTopPersonalizedPageRankNodes.class);

    job.setNumReduceTasks(1);

    FileInputFormat.addInputPath(job, new Path(inputPath));
    FileOutputFormat.setOutputPath(job, new Path(outputPath));

    job.setInputFormatClass(SequenceFileInputFormat.class);
    job.setOutputFormatClass(TextOutputFormat.class);

    job.setMapOutputKeyClass(PairOfIntFloat.class);
    job.setMapOutputValueClass(IntWritable.class);

    job.setOutputKeyClass(FloatWritable.class);
    job.setOutputValueClass(IntWritable.class);

    job.setMapperClass(MyMapper.class);
    job.setPartitionerClass(MyPartitioner.class);
    job.setReducerClass(MyReducer.class);

    // Delete the output directory if it exists already.
    FileSystem.get(conf).delete(new Path(outputPath), true);

    job.waitForCompletion(true);

    FileSystem fileSystem = FileSystem.get(conf);
    Path path = new Path(outputPath + "/part-r-00000");
    ;
    //MapFile.Reader reader = new MapFile.Reader(new Path(outputPath+ "/part-r-00000"),conf);

    // InputStream fis=new FileInputStream(outputPath+"/part-r-00000");
    BufferedReader br = new BufferedReader(new InputStreamReader(fileSystem.open(path)));
    String s;
    float key;//=new FloatWritable();
    int value;//=new IntWritable();
    while ((s = br.readLine()) != null) {
        String[] sources = s.split("\\s+");
        key = Float.parseFloat(sources[0]);
        value = Integer.parseInt(sources[1]);
        if (key == 0.0f) {
            System.out.print("\n" + "Source: " + value + "\n");
        } else {
            System.out.print(String.format("%.5f %d", key, value) + "\n");
        }
    }
    //reader.close();
    br.close();

    //while(!SysOut.isEmpty())
    //{
    //   System.out.print(SysOut.poll());
    //}

    return 0;
}

From source file:NaivePageRank.java

License:Apache License

public static void main(String[] args) throws Exception {
    int iteration = -1;
    String inputPath = args[0];//  w w w.ja v a 2  s  . c o m
    String outputPath = args[1];
    int specIteration = 0;
    if (args.length > 2) {
        specIteration = Integer.parseInt(args[2]);
    }
    int numNodes = 100000;
    if (args.length > 3) {
        numNodes = Integer.parseInt(args[3]);
    }
    int numReducers = 32;
    if (args.length > 4) {
        numReducers = Integer.parseInt(args[4]);
    }
    System.out.println("specified iteration: " + specIteration);
    long start = System.currentTimeMillis();

    /**
     * job to count out-going links for each url
     */
    JobConf conf = new JobConf(NaivePageRank.class);
    conf.setJobName("PageRank-Count");
    conf.setOutputKeyClass(Text.class);
    conf.setOutputValueClass(Text.class);
    conf.setMapperClass(CountMapper.class);
    conf.setReducerClass(CountReducer.class);
    conf.setInputFormat(TextInputFormat.class);
    conf.setOutputFormat(TextOutputFormat.class);
    FileInputFormat.setInputPaths(conf, new Path(inputPath));
    FileOutputFormat.setOutputPath(conf, new Path(outputPath + "/count"));
    conf.setNumReduceTasks(numReducers);
    JobClient.runJob(conf);

    /******************** Initial Rank Assignment Job ***********************/
    conf = new JobConf(NaivePageRank.class);
    conf.setJobName("PageRank-Initialize");
    conf.setOutputKeyClass(Text.class);
    conf.setOutputValueClass(Text.class);
    conf.setMapperClass(InitialRankAssignmentMapper.class);
    conf.setReducerClass(InitialRankAssignmentReducer.class);
    conf.setInputFormat(TextInputFormat.class);
    conf.setOutputFormat(TextOutputFormat.class);
    FileInputFormat.setInputPaths(conf, new Path(inputPath));
    FileOutputFormat.setOutputPath(conf, new Path(outputPath + "/i" + iteration));
    conf.setNumReduceTasks(numReducers);
    // conf.setIterative(false);
    JobClient.runJob(conf);
    iteration++;

    do {
        /****************** Join Job ********************************/
        conf = new JobConf(NaivePageRank.class);
        conf.setJobName("PageRank-Join");
        conf.setOutputKeyClass(Text.class);
        // conf.setOutputValueClass(Text.class);
        conf.setMapperClass(ComputeRankMap.class);
        conf.setReducerClass(ComputeRankReduce.class);
        conf.setMapOutputKeyClass(TextPair.class);
        conf.setInputFormat(TextInputFormat.class);
        conf.setOutputFormat(TextOutputFormat.class);
        conf.setPartitionerClass(FirstPartitioner.class);
        conf.setOutputKeyComparatorClass(KeyComparator.class);
        conf.setOutputValueGroupingComparator(GroupComparator.class);

        // relation table
        FileInputFormat.setInputPaths(conf, new Path(inputPath));
        // rank table
        FileInputFormat.addInputPath(conf, new Path(outputPath + "/i" + (iteration - 1)));
        // count table
        FileInputFormat.addInputPath(conf, new Path(outputPath + "/count"));
        FileOutputFormat.setOutputPath(conf, new Path(outputPath + "/i" + iteration));
        conf.setNumReduceTasks(numReducers);
        JobClient.runJob(conf);
        iteration++;

        /******************** Rank Aggregate Job ***********************/
        conf = new JobConf(NaivePageRank.class);
        conf.setJobName("PageRank-Aggregate");
        conf.setOutputKeyClass(Text.class);
        conf.setOutputValueClass(Text.class);
        conf.setMapOutputKeyClass(Text.class);
        conf.setMapperClass(RankAggregateMapper.class);
        conf.setReducerClass(RankAggregateReducer.class);
        conf.setInputFormat(TextInputFormat.class);
        conf.setOutputFormat(TextOutputFormat.class);
        FileInputFormat.setInputPaths(conf, new Path(outputPath + "/i" + (iteration - 1)));
        FileOutputFormat.setOutputPath(conf, new Path(outputPath + "/i" + iteration));
        conf.setNumReduceTasks(numReducers);
        conf.setInt("haloop.num.nodes", numNodes);
        JobClient.runJob(conf);
        iteration++;
    } while (iteration < 2 * specIteration);

    long end = System.currentTimeMillis();
    System.out.println("running time " + (end - start) / 1000 + "s");
}

From source file:WordCount.java

License:Apache License

public int run(String[] args) throws Exception {

    ///start//from   w  w  w . jav a 2s .  co m
    final long startTime = System.currentTimeMillis();
    String outputReducerType = "filesystem";
    if (args != null && args[0].startsWith(OUTPUT_REDUCER_VAR)) {
        String[] s = args[0].split("=");
        if (s != null && s.length == 2)
            outputReducerType = s[1];
    }
    logger.info("output reducer type: " + outputReducerType);

    // use a smaller page size that doesn't divide the row count evenly to exercise the paging logic better
    ConfigHelper.setRangeBatchSize(getConf(), 99);

    for (int i = 0; i < WordCountSetup.TEST_COUNT; i++) {
        String columnName = "userId";
        Job job = new Job(getConf(), "wordcount");
        job.setJarByClass(WordCount.class);
        job.setMapperClass(TokenizerMapper.class);
        //System.out.println("test");

        if (outputReducerType.equalsIgnoreCase("filesystem")) {
            job.setReducerClass(ReducerToFilesystem.class);
            job.setOutputKeyClass(Text.class);
            job.setOutputValueClass(Text.class);
            FileOutputFormat.setOutputPath(job, new Path(OUTPUT_PATH_PREFIX + i));
        } else {
            job.setReducerClass(ReducerToCassandra.class);
            job.setMapOutputKeyClass(Text.class);
            job.setMapOutputValueClass(Text.class);
            job.setOutputKeyClass(ByteBuffer.class);
            job.setOutputValueClass(List.class);
            job.setOutputFormatClass(ColumnFamilyOutputFormat.class);
            ConfigHelper.setOutputColumnFamily(job.getConfiguration(), KEYSPACE, OUTPUT_COLUMN_FAMILY);
            job.getConfiguration().set(CONF_COLUMN_NAME, "sum");
        }

        job.setInputFormatClass(ColumnFamilyInputFormat.class);

        ConfigHelper.setInputRpcPort(job.getConfiguration(), "9160");
        ConfigHelper.setInputInitialAddress(job.getConfiguration(), "localhost");
        ConfigHelper.setInputPartitioner(job.getConfiguration(), "RandomPartitioner");
        ConfigHelper.setInputColumnFamily(job.getConfiguration(), KEYSPACE, COLUMN_FAMILY);
        SlicePredicate predicate = new SlicePredicate()
                .setColumn_names(Arrays.asList(ByteBufferUtil.bytes(columnName)));
        ConfigHelper.setInputSlicePredicate(job.getConfiguration(), predicate);

        // this will cause the predicate to be ignored in favor of scanning everything as a wide row

        //Son degisiklik
        // ConfigHelper.setInputColumnFamily(job.getConfiguration(), KEYSPACE, COLUMN_FAMILY, true);
        ConfigHelper.setOutputInitialAddress(job.getConfiguration(), "localhost");
        ConfigHelper.setOutputPartitioner(job.getConfiguration(), "RandomPartitioner");
        job.waitForCompletion(true);
    }
    //print
    final double duration = (System.currentTimeMillis() - startTime) / 1000.0; // after
    System.out.println();
    System.out.println("Job Finished in " + duration + " seconds");
    System.out.println();

    return 0;
}

From source file:DisplayKMeans.java

License:Apache License

public static void main(String[] args) throws Exception {
    DistanceMeasure measure = new ManhattanDistanceMeasure();
    Path samples = new Path("samples");
    Path output = new Path("output");
    Configuration conf = new Configuration();
    HadoopUtil.delete(conf, samples);//from w w  w.  j  a  va  2s . co  m
    HadoopUtil.delete(conf, output);

    RandomUtils.useTestSeed();
    generateSamples();
    writeSampleData(samples);
    boolean runClusterer = true;
    double convergenceDelta = 0.001;
    int numClusters = 2;
    int maxIterations = 10;
    if (runClusterer) {
        runSequentialKMeansClusterer(conf, samples, output, measure, numClusters, maxIterations,
                convergenceDelta);
    } else {
        runSequentialKMeansClassifier(conf, samples, output, measure, numClusters, maxIterations,
                convergenceDelta);
    }
    new DisplayKMeans();
}

From source file:HDFSFileFinder.java

License:Apache License

private static void getBlockLocationsFromHdfs() {
    StringBuilder sb = new StringBuilder();
    Configuration conf = new Configuration();
    boolean first = true;

    // make connection to hdfs
    try {/*w  ww .  j  ava2 s  .  c o  m*/
        if (verbose) {
            writer.println("DEBUG: Trying to connect to " + fsName);
        }
        FileSystem fs = FileSystem.get(conf);
        Path file = new Path(fileName);
        FileStatus fStatus = fs.getFileStatus(file);
        status = fStatus;
        bLocations = fs.getFileBlockLocations(status, 0, status.getLen());
        //print out all block locations
        for (BlockLocation aLocation : bLocations) {
            String[] names = aLocation.getHosts();
            for (String name : names) {
                InetAddress addr = InetAddress.getByName(name);
                String host = addr.getHostName();
                int idx = host.indexOf('.');
                String hostname;
                if (0 < idx) {
                    hostname = host.substring(0, host.indexOf('.'));
                } else {
                    hostname = host;
                }
                if (first) {
                    sb.append(hostname);
                    first = false;
                } else {
                    sb.append(",").append(hostname);
                }
            }
        }
        sb.append(NEWLINE);
    } catch (IOException e) {
        writer.println("Error getting block location data from namenode");
        e.printStackTrace();
    }
    writer.print(sb.toString());
    writer.flush();
}

From source file:GraphCompare.java

License:Apache License

public static void main(String[] args) throws Exception {
    Configuration conf = new Configuration();
    Job job1 = Job.getInstance(conf, "graph compare 1");

    job1.setJarByClass(GraphCompare.class);
    job1.setMapperClass(Map1.class);
    job1.setReducerClass(Reduce1.class);
    job1.setMapOutputKeyClass(Text.class);
    job1.setMapOutputValueClass(IntWritable.class);
    job1.setOutputKeyClass(IntWritable.class);
    job1.setOutputValueClass(IntWritable.class);

    job1.setNumReduceTasks(30);/*from   w  w w  .  ja v  a 2  s  . c  o m*/
    job1.getConfiguration().set("mapreduce.input.fileinputformat.split.maxsize", "25000");

    FileInputFormat.addInputPath(job1, new Path(args[0]));
    FileInputFormat.addInputPath(job1, new Path(args[1]));
    FileOutputFormat.setOutputPath(job1, new Path(args[2]));

    job1.waitForCompletion(true);

    Job job2 = Job.getInstance(conf, "graph compare 2");
    job2.setJarByClass(GraphCompare.class);
    job2.setMapperClass(Map2.class);
    job2.setReducerClass(Reduce2.class);
    job2.setMapOutputKeyClass(IntWritable.class);
    job2.setMapOutputValueClass(IntWritable.class);
    job2.setOutputKeyClass(IntWritable.class);
    job2.setOutputValueClass(IntWritable.class);

    job2.setNumReduceTasks(30);

    FileInputFormat.addInputPath(job2, new Path(args[2]));
    FileOutputFormat.setOutputPath(job2, new Path(args[3]));

    job2.waitForCompletion(true);
}

From source file:BigramRelativeFrequencyJson.java

License:Apache License

/**
 * Runs this tool.// ww  w.  ja va2  s.c  o  m
 */
public int run(String[] args) throws Exception {
    if (args.length != 3) {
        printUsage();
        return -1;
    }

    String inputPath = args[0];
    String outputPath = args[1];
    int reduceTasks = Integer.parseInt(args[2]);

    LOG.info("Tool name: " + BigramRelativeFrequencyJson.class.getSimpleName());
    LOG.info(" - input path: " + inputPath);
    LOG.info(" - output path: " + outputPath);
    LOG.info(" - num reducers: " + reduceTasks);

    Job job = Job.getInstance(getConf());
    job.setJobName(BigramRelativeFrequencyJson.class.getSimpleName());
    job.setJarByClass(BigramRelativeFrequencyJson.class);

    job.setNumReduceTasks(reduceTasks);

    FileInputFormat.setInputPaths(job, new Path(inputPath));
    FileOutputFormat.setOutputPath(job, new Path(outputPath));

    job.setMapOutputKeyClass(MyTuple.class);
    job.setMapOutputValueClass(FloatWritable.class);
    job.setOutputKeyClass(MyTuple.class);
    job.setOutputValueClass(FloatWritable.class);
    job.setOutputFormatClass(SequenceFileOutputFormat.class);

    job.setMapperClass(MyMapper.class);
    job.setCombinerClass(MyCombiner.class);
    job.setReducerClass(MyReducer.class);
    job.setPartitionerClass(MyPartitioner.class);

    // Delete the output directory if it exists already.
    Path outputDir = new Path(outputPath);
    FileSystem.get(getConf()).delete(outputDir, true);

    long startTime = System.currentTimeMillis();
    job.waitForCompletion(true);
    System.out.println("Job Finished in " + (System.currentTimeMillis() - startTime) / 1000.0 + " seconds");

    return 0;
}

From source file:TestBAM.java

License:Open Source License

public int run(String[] args) throws Exception {
    final Configuration conf = getConf();

    conf.set(MyOutputFormat.HEADER_FROM_FILE, args[0]);
    DistributedCache.addFileToClassPath(new Path("hdfs:///libjars/hadoop-bam-7.0.0-jar-with-dependencies.jar"),
            conf);/*from w  w w . j a v  a  2  s .  c  o m*/

    final Job job = new Job(conf);

    job.setJarByClass(TestBAM.class);
    job.setMapperClass(TestBAMMapper.class);
    job.setReducerClass(TestBAMReducer.class);

    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(SAMRecordWritable.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(SAMRecordWritable.class);

    job.setInputFormatClass(AnySAMInputFormat.class);
    job.setOutputFormatClass(TestBAM.MyOutputFormat.class);

    org.apache.hadoop.mapreduce.lib.input.FileInputFormat.addInputPath(job, new Path(args[0]));

    org.apache.hadoop.mapreduce.lib.output.FileOutputFormat.setOutputPath(job, new Path(args[1]));
    job.submit();

    if (!job.waitForCompletion(true)) {
        System.err.println("sort :: Job failed.");
        return 1;
    }

    return 0;
}