Example usage for org.apache.hadoop.fs FileSystem copyToLocalFile

List of usage examples for org.apache.hadoop.fs FileSystem copyToLocalFile

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem copyToLocalFile.

Prototype

public void copyToLocalFile(Path src, Path dst) throws IOException 

Source Link

Document

Copy it a file from the remote filesystem to the local one.

Usage

From source file:it.isislab.sof.core.engine.hadoop.mapreduce.netlogo.SOFReducerNetLogo.java

License:Apache License

public void reduce(Text key, Iterator<Text> values, OutputCollector<Text, Text> output, Reporter reporter)
        throws IOException {

    String EVALUATION_PROGRAM_THREAD = "evaluation" + Thread.currentThread().getId();
    FileSystem fs = FileSystem.get(conf);

    if (ISLOOP) {
        Path eprogram = new Path(EVALUATION_PROGRAM_THREAD);
        fs.copyToLocalFile(new Path(RATING_PROGRAM), eprogram);
        try {//w w w.jav  a 2s. co  m
            fs.mkdirs(new Path(RATING_PATH));
        } catch (Exception e) {
        }

    }

    if (ISLOOP) {
        Random r = new Random(System.currentTimeMillis());
        String id = MD5(key.toString() + r.nextDouble());
        String tmpEvalXml = "tmpEval" + id + ".xml";
        Path ptemp = new Path(tmpEvalXml);
        Path file_output = new Path(key.toString());
        fs.copyToLocalFile(file_output, ptemp);
        //generateEvaluation(tmpEvalXml,id,EVALUATION_PROGRAM_THREAD);
        String xmlOutput = key.toString().substring(key.toString().lastIndexOf("/") + 1);
        generateEvaluation(tmpEvalXml, xmlOutput, EVALUATION_PROGRAM_THREAD);

        File f = new File(System.getProperty("user.dir") + "/" + EVALUATION_PROGRAM_THREAD);
        f.delete();
    }

}

From source file:ldbc.socialnet.dbgen.generator.MRGenerateUsers.java

License:Open Source License

public int runGenerateJob(Configuration conf) throws Exception {
    FileSystem fs = FileSystem.get(conf);
    String hadoopDir = new String(conf.get("outputDir") + "/hadoop");
    String socialNetDir = new String(conf.get("outputDir") + "/social_network");
    int numThreads = Integer.parseInt(conf.get("numThreads"));
    System.out.println("NUMBER OF THREADS " + numThreads);

    /// --------- Execute Jobs ------
    long start = System.currentTimeMillis();

    /// --------------- First job Generating users----------------
    printProgress("Starting: Person generation");
    conf.set("pass", Integer.toString(0));
    Job job = new Job(conf, "SIB Generate Users & 1st Dimension");
    job.setMapOutputKeyClass(TupleKey.class);
    job.setMapOutputValueClass(ReducedUserProfile.class);
    job.setOutputKeyClass(TupleKey.class);
    job.setOutputValueClass(ReducedUserProfile.class);
    job.setJarByClass(GenerateUsersMapper.class);
    job.setMapperClass(GenerateUsersMapper.class);
    job.setNumReduceTasks(numThreads);//www  .  jav a 2 s.com
    job.setInputFormatClass(NLineInputFormat.class);
    conf.setInt("mapred.line.input.format.linespermap", 1);
    job.setOutputFormatClass(SequenceFileOutputFormat.class);
    FileInputFormat.setInputPaths(job, new Path(hadoopDir) + "/mrInputFile");
    FileOutputFormat.setOutputPath(job, new Path(hadoopDir + "/sib"));
    job.waitForCompletion(true);

    /// --------------- Sorting by first dimension  ----------------
    printProgress("Starting: Sorting by first dimension");
    HadoopFileRanker fileRanker = new HadoopFileRanker(conf, TupleKey.class, ReducedUserProfile.class);
    fileRanker.run(hadoopDir + "/sib", hadoopDir + "/sibSorting");
    fs.delete(new Path(hadoopDir + "/sib"), true);

    /// --------------- job Generating First dimension Friendships  ----------------
    printProgress("Starting: Friendship generation 1.");
    conf.set("pass", Integer.toString(0));
    conf.set("dimension", Integer.toString(1));
    job = new Job(conf, "SIB Generate Friendship - Interest");
    job.setMapOutputKeyClass(ComposedKey.class);
    job.setMapOutputValueClass(ReducedUserProfile.class);
    job.setOutputKeyClass(TupleKey.class);
    job.setOutputValueClass(ReducedUserProfile.class);
    job.setJarByClass(HadoopBlockMapper.class);
    job.setMapperClass(HadoopBlockMapper.class);
    job.setReducerClass(DimensionReducer.class);
    job.setNumReduceTasks(numThreads);
    job.setInputFormatClass(SequenceFileInputFormat.class);
    job.setOutputFormatClass(SequenceFileOutputFormat.class);
    job.setPartitionerClass(HadoopBlockPartitioner.class);
    job.setSortComparatorClass(ComposedKeyComparator.class);
    job.setGroupingComparatorClass(ComposedKeyGroupComparator.class);

    FileInputFormat.setInputPaths(job, new Path(hadoopDir + "/sibSorting"));
    FileOutputFormat.setOutputPath(job, new Path(hadoopDir + "/sib2"));
    job.waitForCompletion(true);
    fs.delete(new Path(hadoopDir + "/sibSorting"), true);

    /// --------------- Sorting phase 2  ----------------
    printProgress("Starting: Sorting by second dimension");
    fileRanker = new HadoopFileRanker(conf, TupleKey.class, ReducedUserProfile.class);
    fileRanker.run(hadoopDir + "/sib2", hadoopDir + "/sibSorting2");
    fs.delete(new Path(hadoopDir + "/sib2"), true);

    /// --------------- Second job Generating Friendships  ----------------
    printProgress("Starting: Friendship generation 2.");
    conf.set("pass", Integer.toString(1));
    conf.set("dimension", Integer.toString(2));
    job = new Job(conf, "SIB Generate Friendship - Interest");
    job.setMapOutputKeyClass(ComposedKey.class);
    job.setMapOutputValueClass(ReducedUserProfile.class);
    job.setOutputKeyClass(TupleKey.class);
    job.setOutputValueClass(ReducedUserProfile.class);
    job.setJarByClass(HadoopBlockMapper.class);
    job.setMapperClass(HadoopBlockMapper.class);
    job.setReducerClass(DimensionReducer.class);
    job.setNumReduceTasks(numThreads);
    job.setInputFormatClass(SequenceFileInputFormat.class);
    job.setOutputFormatClass(SequenceFileOutputFormat.class);
    job.setPartitionerClass(HadoopBlockPartitioner.class);
    job.setSortComparatorClass(ComposedKeyComparator.class);
    job.setGroupingComparatorClass(ComposedKeyGroupComparator.class);
    FileInputFormat.setInputPaths(job, new Path(hadoopDir + "/sibSorting2"));
    FileOutputFormat.setOutputPath(job, new Path(hadoopDir + "/sib3"));
    job.waitForCompletion(true);
    fs.delete(new Path(hadoopDir + "/sibSorting2"), true);

    /// --------------- Sorting phase 3--------------
    printProgress("Starting: Sorting by third dimension");
    fileRanker = new HadoopFileRanker(conf, TupleKey.class, ReducedUserProfile.class);
    fileRanker.run(hadoopDir + "/sib3", hadoopDir + "/sibSorting3");
    fs.delete(new Path(hadoopDir + "/sib3"), true);

    /// --------------- Third job Generating Friendships----------------
    printProgress("Starting: Friendship generation 3.");
    conf.set("pass", Integer.toString(2));
    conf.set("dimension", Integer.toString(2));
    job = new Job(conf, "SIB Generate Friendship - Random");
    job.setMapOutputKeyClass(ComposedKey.class);
    job.setMapOutputValueClass(ReducedUserProfile.class);
    job.setOutputKeyClass(TupleKey.class);
    job.setOutputValueClass(ReducedUserProfile.class);
    job.setJarByClass(HadoopBlockMapper.class);
    job.setMapperClass(HadoopBlockMapper.class);
    job.setReducerClass(DimensionReducer.class);
    job.setNumReduceTasks(numThreads);
    job.setInputFormatClass(SequenceFileInputFormat.class);
    job.setOutputFormatClass(SequenceFileOutputFormat.class);
    job.setPartitionerClass(HadoopBlockPartitioner.class);
    job.setSortComparatorClass(ComposedKeyComparator.class);
    job.setGroupingComparatorClass(ComposedKeyGroupComparator.class);
    FileInputFormat.setInputPaths(job, new Path(hadoopDir + "/sibSorting3"));
    FileOutputFormat.setOutputPath(job, new Path(hadoopDir + "/sib4"));
    job.waitForCompletion(true);
    fs.delete(new Path(hadoopDir + "/sibSorting3"), true);

    /// --------------- Sorting phase 3--------------

    printProgress("Starting: Sorting by third dimension (for activity generation)");
    fileRanker = new HadoopFileRanker(conf, TupleKey.class, ReducedUserProfile.class);
    fileRanker.run(hadoopDir + "/sib4", hadoopDir + "/sibSorting4");
    fs.delete(new Path(hadoopDir + "/sib4"), true);

    /// --------------- Fourth job: Serialize static network ----------------

    printProgress("Starting: Generating person activity");
    job = new Job(conf, "Generate user activity");
    job.setMapOutputKeyClass(ComposedKey.class);
    job.setMapOutputValueClass(ReducedUserProfile.class);
    job.setOutputKeyClass(TupleKey.class);
    job.setOutputValueClass(ReducedUserProfile.class);
    job.setJarByClass(HadoopBlockMapper.class);
    job.setMapperClass(HadoopBlockMapper.class);
    job.setReducerClass(UserActivityReducer.class);
    job.setNumReduceTasks(numThreads);
    job.setInputFormatClass(SequenceFileInputFormat.class);
    job.setOutputFormatClass(SequenceFileOutputFormat.class);
    job.setPartitionerClass(HadoopBlockPartitioner.class);
    job.setSortComparatorClass(ComposedKeyComparator.class);
    job.setGroupingComparatorClass(ComposedKeyGroupComparator.class);
    FileInputFormat.setInputPaths(job, new Path(hadoopDir + "/sibSorting4"));
    FileOutputFormat.setOutputPath(job, new Path(hadoopDir + "/sib5"));
    job.waitForCompletion(true);
    fs.delete(new Path(hadoopDir + "/sib5"), true);

    int numEvents = 0;
    long min = Long.MAX_VALUE;
    long max = Long.MIN_VALUE;

    if (conf.getBoolean("updateStreams", false)) {
        for (int i = 0; i < numThreads; ++i) {
            int numPartitions = conf.getInt("numUpdatePartitions", 1);
            for (int j = 0; j < numPartitions; ++j) {
                /// --------------- Fifth job: Sort update streams ----------------
                conf.setInt("mapred.line.input.format.linespermap", 1000000);
                conf.setInt("reducerId", i);
                conf.setInt("partitionId", j);
                conf.set("streamType", "forum");
                Job jobForum = new Job(conf, "Soring update streams " + j + " of reducer " + i);
                jobForum.setMapOutputKeyClass(LongWritable.class);
                jobForum.setMapOutputValueClass(Text.class);
                jobForum.setOutputKeyClass(LongWritable.class);
                jobForum.setOutputValueClass(Text.class);
                jobForum.setJarByClass(UpdateEventMapper.class);
                jobForum.setMapperClass(UpdateEventMapper.class);
                jobForum.setReducerClass(UpdateEventReducer.class);
                jobForum.setNumReduceTasks(1);
                jobForum.setInputFormatClass(SequenceFileInputFormat.class);
                jobForum.setOutputFormatClass(SequenceFileOutputFormat.class);
                jobForum.setPartitionerClass(UpdateEventPartitioner.class);
                FileInputFormat.addInputPath(jobForum,
                        new Path(socialNetDir + "/temp_updateStream_" + i + "_" + j + "_forum"));
                FileOutputFormat.setOutputPath(jobForum, new Path(hadoopDir + "/sibEnd"));
                printProgress("Starting: Sorting update streams");
                jobForum.waitForCompletion(true);
                fs.delete(new Path(socialNetDir + "/temp_updateStream_" + i + "_" + j + "_forum"), false);
                fs.delete(new Path(hadoopDir + "/sibEnd"), true);

                conf.setInt("mapred.line.input.format.linespermap", 1000000);
                conf.setInt("reducerId", i);
                conf.setInt("partitionId", j);
                conf.set("streamType", "person");
                Job jobPerson = new Job(conf, "Soring update streams " + j + " of reducer " + i);
                jobPerson.setMapOutputKeyClass(LongWritable.class);
                jobPerson.setMapOutputValueClass(Text.class);
                jobPerson.setOutputKeyClass(LongWritable.class);
                jobPerson.setOutputValueClass(Text.class);
                jobPerson.setJarByClass(UpdateEventMapper.class);
                jobPerson.setMapperClass(UpdateEventMapper.class);
                jobPerson.setReducerClass(UpdateEventReducer.class);
                jobPerson.setNumReduceTasks(1);
                jobPerson.setInputFormatClass(SequenceFileInputFormat.class);
                jobPerson.setOutputFormatClass(SequenceFileOutputFormat.class);
                jobPerson.setPartitionerClass(UpdateEventPartitioner.class);
                FileInputFormat.addInputPath(jobPerson,
                        new Path(socialNetDir + "/temp_updateStream_" + i + "_" + j + "_person"));
                FileOutputFormat.setOutputPath(jobPerson, new Path(hadoopDir + "/sibEnd"));
                printProgress("Starting: Sorting update streams");
                jobPerson.waitForCompletion(true);
                fs.delete(new Path(socialNetDir + "/temp_updateStream_" + i + "_" + j + "_person"), false);
                fs.delete(new Path(hadoopDir + "/sibEnd"), true);

                if (conf.getBoolean("updateStreams", false)) {
                    Properties properties = new Properties();
                    FSDataInputStream file = fs.open(new Path(conf.get("outputDir")
                            + "/social_network/updateStream_" + i + "_" + j + "_person.properties"));
                    properties.load(file);
                    if (properties.getProperty("min_write_event_start_time") != null) {
                        Long auxMin = Long.parseLong(properties.getProperty("min_write_event_start_time"));
                        min = auxMin < min ? auxMin : min;
                        Long auxMax = Long.parseLong(properties.getProperty("max_write_event_start_time"));
                        max = auxMax > max ? auxMax : max;
                        numEvents += Long.parseLong(properties.getProperty("num_events"));
                    }
                    file.close();
                    file = fs.open(new Path(conf.get("outputDir") + "/social_network/updateStream_" + i + "_"
                            + j + "_forum.properties"));
                    properties.load(file);
                    if (properties.getProperty("min_write_event_start_time") != null) {
                        Long auxMin = Long.parseLong(properties.getProperty("min_write_event_start_time"));
                        min = auxMin < min ? auxMin : min;
                        Long auxMax = Long.parseLong(properties.getProperty("max_write_event_start_time"));
                        max = auxMax > max ? auxMax : max;
                        numEvents += Long.parseLong(properties.getProperty("num_events"));
                    }
                    file.close();
                    fs.delete(new Path(conf.get("outputDir") + "/social_network/updateStream_" + i + "_" + j
                            + "_person.properties"), true);
                    fs.delete(new Path(conf.get("outputDir") + "/social_network/updateStream_" + i + "_" + j
                            + "_forum.properties"), true);
                }
            }
        }

        if (conf.getBoolean("updateStreams", false)) {
            OutputStream output = fs
                    .create(new Path(conf.get("outputDir") + "/social_network/updateStream.properties"));
            output.write(new String("ldbc.snb.interactive.gct_delta_duration:" + conf.get("deltaTime") + "\n")
                    .getBytes());
            output.write(
                    new String("ldbc.snb.interactive.min_write_event_start_time:" + min + "\n").getBytes());
            output.write(
                    new String("ldbc.snb.interactive.max_write_event_start_time:" + max + "\n").getBytes());
            output.write(new String("ldbc.snb.interactive.update_interleave:" + (max - min) / numEvents + "\n")
                    .getBytes());
            output.write(new String("ldbc.snb.interactive.num_events:" + numEvents).getBytes());
            output.close();
        }
    }

    /// --------------- Sixth job: Materialize the friends lists ----------------
    /*        Job job6 = new Job(conf,"Dump the friends lists");
            job6.setMapOutputKeyClass(ComposedKey.class);
            job6.setMapOutputValueClass(ReducedUserProfile.class);
            job6.setOutputKeyClass(ComposedKey.class);
            job6.setOutputValueClass(ReducedUserProfile.class);
            job6.setJarByClass(HadoopBlockMapper.class);
            job6.setMapperClass(HadoopBlockMapper.class);
            job6.setReducerClass(FriendListOutputReducer.class);
            job6.setNumReduceTasks(numThreads);
            job6.setInputFormatClass(SequenceFileInputFormat.class);
            job6.setOutputFormatClass(SequenceFileOutputFormat.class);
            job6.setPartitionerClass(HadoopBlockPartitioner.class);
            job6.setSortComparatorClass(ComposedKeyComparator.class);
            job6.setGroupingComparatorClass(ComposedKeyGroupComparator.class);
            FileInputFormat.setInputPaths(job6, new Path(hadoopDir + "/sibSorting4"));
            FileOutputFormat.setOutputPath(job6, new Path(hadoopDir + "/job6") );
            
            
            printProgress("Starting: Materialize friends for substitution parameters");
            int resMaterializeFriends = job6.waitForCompletion(true) ? 0 : 1;
            fs.delete(new Path(hadoopDir + "/sibSorting3"),true);
            */

    long end = System.currentTimeMillis();
    System.out.println(((end - start) / 1000) + " total seconds");
    for (int i = 0; i < numThreads; ++i) {
        fs.copyToLocalFile(new Path(socialNetDir + "/m" + i + "factors.txt"), new Path("./"));
        fs.copyToLocalFile(new Path(socialNetDir + "/m0friendList" + i + ".csv"), new Path("./"));
    }
    return 0;
}

From source file:ml.shifu.dtrain.DTrainRequestProcessor.java

License:Apache License

private void copyModelsToLocal(String fromHdfs, String toLocal) throws IOException {
    FileSystem fs = FileSystem.get(new Configuration());

    Path hdfsModels = new Path(fromHdfs);

    File localModels = new File(toLocal);
    // delete recursiveley
    FileUtils.deleteQuietly(localModels);

    if (fs.exists(hdfsModels)) {
        fs.copyToLocalFile(hdfsModels, new Path(toLocal));
        LOG.info("Copying models to local: " + toLocal);
    } else {//from   w  w  w . jav  a 2 s . co m
        LOG.error("Models not found on HDFS: " + toLocal);
    }
}

From source file:net.sf.katta.node.ShardManager.java

License:Apache License

private void installShard(String shardName, String shardPath, File localShardFolder) throws KattaException {
    LOG.info("install shard '" + shardName + "' from " + shardPath);
    // TODO sg: to fix HADOOP-4422 we try to download the shard 5 times
    int maxTries = 5;
    for (int i = 0; i < maxTries; i++) {
        URI uri;//from   www.j  a  va 2  s . c  o m
        try {
            uri = new URI(shardPath);
            FileSystem fileSystem = FileSystem.get(uri, new Configuration());
            if (_throttleSemaphore != null) {
                fileSystem = new ThrottledFileSystem(fileSystem, _throttleSemaphore);
            }
            final Path path = new Path(shardPath);
            boolean isZip = fileSystem.isFile(path) && shardPath.endsWith(".zip");

            File shardTmpFolder = new File(localShardFolder.getAbsolutePath() + "_tmp");
            try {
                FileUtil.deleteFolder(localShardFolder);
                FileUtil.deleteFolder(shardTmpFolder);

                if (isZip) {
                    FileUtil.unzip(path, shardTmpFolder, fileSystem,
                            System.getProperty("katta.spool.zip.shards", "false").equalsIgnoreCase("true"));
                } else {
                    fileSystem.copyToLocalFile(path, new Path(shardTmpFolder.getAbsolutePath()));
                }
                shardTmpFolder.renameTo(localShardFolder);
            } finally {
                // Ensure that the tmp folder is deleted on an error
                FileUtil.deleteFolder(shardTmpFolder);
            }
            // Looks like we were successful.
            if (i > 0) {
                LOG.error("Loaded shard:" + shardPath);
            }
            return;
        } catch (final URISyntaxException e) {
            throw new KattaException("Can not parse uri for path: " + shardPath, e);
        } catch (final Exception e) {
            LOG.error(String.format("Error loading shard: %s (try %d of %d)", shardPath, i, maxTries), e);
            if (i >= maxTries - 1) {
                throw new KattaException("Can not load shard: " + shardPath, e);
            }
        }
    }
}

From source file:nl.gridline.zieook.tasks.TaalServerTest.java

License:Apache License

@Ignore
private static void copyToLocal(Path remote, Path local) throws IOException {
    LOG.info("copy {} to {}", remote, local);
    FileSystem hdfs = FileSystem.get(hdfsConfig);
    hdfs.copyToLocalFile(remote, local);
}

From source file:nl.tudelft.graphalytics.giraph.GiraphPlatform.java

License:Apache License

@Override
public PlatformBenchmarkResult executeAlgorithmOnGraph(Benchmark benchmark) throws PlatformExecutionException {
    Algorithm algorithm = benchmark.getAlgorithm();
    Graph graph = benchmark.getGraph();/*  w  w w .j a v  a 2 s  .com*/
    Object parameters = benchmark.getAlgorithmParameters();

    LOG.info("Executing algorithm \"{}\" on graph \"{}\".", algorithm.getName(), graph.getName());

    int result;
    try {
        // Prepare the appropriate job for the given algorithm type
        GiraphJob job;
        switch (algorithm) {
        case BFS:
            job = new BreadthFirstSearchJob(parameters, graph.getGraphFormat());
            break;
        case CDLP:
            job = new CommunityDetectionLPJob(parameters, graph.getGraphFormat());
            break;
        case WCC:
            job = new WeaklyConnectedComponentsJob(graph.getGraphFormat());
            break;
        case FFM:
            job = new ForestFireModelJob(parameters, graph.getGraphFormat());
            break;
        case LCC:
            job = new LocalClusteringCoefficientJob(graph.getGraphFormat());
            break;
        case PR:
            job = new PageRankJob(parameters, graph.getGraphFormat());
            break;
        case SSSP:
            job = new SingleSourceShortestPathJob(parameters, graph);
            break;
        default:
            throw new IllegalArgumentException("Unsupported algorithm: " + algorithm);
        }

        // Create the job configuration using the Giraph properties file
        String hdfsOutputPath = Paths.get(hdfsDirectory, getName(), "output", algorithm + "-" + graph.getName())
                .toString();
        Configuration jobConf = new Configuration();

        GiraphJob.INPUT_PATH.set(jobConf, pathsOfGraphs.get(graph.getName()));
        GiraphJob.OUTPUT_PATH.set(jobConf, hdfsOutputPath);
        GiraphJob.ZOOKEEPER_ADDRESS.set(jobConf, ConfigurationUtil.getString(giraphConfig, ZOOKEEPERADDRESS));

        transferIfSet(giraphConfig, JOB_WORKERCOUNT, jobConf, GiraphJob.WORKER_COUNT);
        transferIfSet(giraphConfig, JOB_HEAPSIZE, jobConf, GiraphJob.HEAP_SIZE_MB);
        transferIfSet(giraphConfig, JOB_MEMORYSIZE, jobConf, GiraphJob.WORKER_MEMORY_MB);

        transferGiraphOptions(giraphConfig, jobConf);

        // Execute the Giraph job
        result = ToolRunner.run(jobConf, job, new String[0]);
        // TODO: Clean up intermediate and output data, depending on some configuration.

        if (benchmark.isOutputRequired()) {
            FileSystem fs = FileSystem.get(new Configuration());
            fs.copyToLocalFile(new Path(hdfsOutputPath), new Path(benchmark.getOutputPath()));
            fs.close();
        }

    } catch (Exception e) {
        throw new PlatformExecutionException("Giraph job failed with exception: ", e);
    }

    if (result != 0) {
        throw new PlatformExecutionException("Giraph job completed with exit code = " + result);
    }

    return new PlatformBenchmarkResult(NestedConfiguration.empty());
}

From source file:org.apache.accumulo.server.test.scalability.Run.java

License:Apache License

public static void main(String[] args) throws Exception {

    final String sitePath = "/tmp/scale-site.conf";
    final String testPath = "/tmp/scale-test.conf";

    // parse command line
    if (args.length != 3) {
        throw new IllegalArgumentException("usage : Run <testId> <action> <numTabletServers>");
    }// w w  w .j a va 2  s .c  om
    String testId = args[0];
    String action = args[1];
    int numTabletServers = Integer.parseInt(args[2]);

    Configuration conf = CachedConfiguration.getInstance();
    FileSystem fs;
    fs = FileSystem.get(conf);

    fs.copyToLocalFile(new Path("/accumulo-scale/conf/site.conf"), new Path(sitePath));
    fs.copyToLocalFile(new Path(String.format("/accumulo-scale/conf/%s.conf", testId)), new Path(testPath));

    // load configuration file properties
    Properties scaleProps = new Properties();
    Properties testProps = new Properties();
    try {
        scaleProps.load(new FileInputStream(sitePath));
        testProps.load(new FileInputStream(testPath));
    } catch (Exception e) {
        System.out.println("Problem loading config file");
        e.printStackTrace();
    }

    ScaleTest test = (ScaleTest) Class.forName(String.format("accumulo.server.test.scalability.%s", testId))
            .newInstance();

    test.init(scaleProps, testProps, numTabletServers);

    if (action.equalsIgnoreCase("setup")) {
        test.setup();
    } else if (action.equalsIgnoreCase("client")) {
        InetAddress addr = InetAddress.getLocalHost();
        String host = addr.getHostName();
        fs.createNewFile(new Path("/accumulo-scale/clients/" + host));
        test.client();
        fs.copyFromLocalFile(new Path("/tmp/scale.out"), new Path("/accumulo-scale/results/" + host));
    } else if (action.equalsIgnoreCase("teardown")) {
        test.teardown();
    }
}

From source file:org.apache.accumulo.test.scalability.Run.java

License:Apache License

public static void main(String[] args) throws Exception {

    final String sitePath = "/tmp/scale-site.conf";
    final String testPath = "/tmp/scale-test.conf";
    Opts opts = new Opts();
    opts.parseArgs(Run.class.getName(), args);

    Configuration conf = CachedConfiguration.getInstance();
    FileSystem fs;
    fs = FileSystem.get(conf);//ww w.  j a v  a 2s .  co m

    fs.copyToLocalFile(new Path("/accumulo-scale/conf/site.conf"), new Path(sitePath));
    fs.copyToLocalFile(new Path(String.format("/accumulo-scale/conf/%s.conf", opts.testId)),
            new Path(testPath));

    // load configuration file properties
    Properties scaleProps = new Properties();
    Properties testProps = new Properties();
    try {
        FileInputStream fis = new FileInputStream(sitePath);
        try {
            scaleProps.load(fis);
        } finally {
            fis.close();
        }
        fis = new FileInputStream(testPath);
        try {
            testProps.load(fis);
        } finally {
            fis.close();
        }
    } catch (Exception e) {
        log.error("Error loading config file.", e);
    }

    ScaleTest test = (ScaleTest) Class
            .forName(String.format("org.apache.accumulo.test.scalability.%s", opts.testId)).newInstance();

    test.init(scaleProps, testProps, opts.numTabletServers);

    if (opts.action.equalsIgnoreCase("setup")) {
        test.setup();
    } else if (opts.action.equalsIgnoreCase("client")) {
        InetAddress addr = InetAddress.getLocalHost();
        String host = addr.getHostName();
        fs.createNewFile(new Path("/accumulo-scale/clients/" + host));
        test.client();
        fs.copyFromLocalFile(new Path("/tmp/scale.out"), new Path("/accumulo-scale/results/" + host));
    } else if (opts.action.equalsIgnoreCase("teardown")) {
        test.teardown();
    }
}

From source file:org.apache.accumulo.test.ShellServerIT.java

License:Apache License

@Test
public void exporttableImporttable() throws Exception {
    final String table = name.getMethodName(), table2 = table + "2";

    // exporttable / importtable
    ts.exec("createtable " + table + " -evc", true);
    make10();//www  .  ja  v a  2s . co m
    ts.exec("addsplits row5", true);
    ts.exec("config -t " + table + " -s table.split.threshold=345M", true);
    ts.exec("offline " + table, true);
    File exportDir = new File(rootPath, "ShellServerIT.export");
    String exportUri = "file://" + exportDir.toString();
    String localTmp = "file://" + new File(rootPath, "ShellServerIT.tmp").toString();
    ts.exec("exporttable -t " + table + " " + exportUri, true);
    DistCp cp = newDistCp(new Configuration(false));
    String import_ = "file://" + new File(rootPath, "ShellServerIT.import").toString();
    if (getCluster().getClientConfig().getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
        // DistCp bugs out trying to get a fs delegation token to perform the cp. Just copy it ourselves by hand.
        FileSystem fs = getCluster().getFileSystem();
        FileSystem localFs = FileSystem.getLocal(new Configuration(false));

        // Path on local fs to cp into
        Path localTmpPath = new Path(localTmp);
        localFs.mkdirs(localTmpPath);

        // Path in remote fs to importtable from
        Path importDir = new Path(import_);
        fs.mkdirs(importDir);

        // Implement a poor-man's DistCp
        try (BufferedReader reader = new BufferedReader(new FileReader(new File(exportDir, "distcp.txt")))) {
            for (String line; (line = reader.readLine()) != null;) {
                Path exportedFile = new Path(line);
                // There isn't a cp on FileSystem??
                log.info("Copying " + line + " to " + localTmpPath);
                fs.copyToLocalFile(exportedFile, localTmpPath);
                Path tmpFile = new Path(localTmpPath, exportedFile.getName());
                log.info("Moving " + tmpFile + " to the import directory " + importDir);
                fs.moveFromLocalFile(tmpFile, importDir);
            }
        }
    } else {
        String[] distCpArgs = new String[] { "-f", exportUri + "/distcp.txt", import_ };
        assertEquals("Failed to run distcp: " + Arrays.toString(distCpArgs), 0, cp.run(distCpArgs));
    }
    ts.exec("importtable " + table2 + " " + import_, true);
    ts.exec("config -t " + table2 + " -np", true, "345M", true);
    ts.exec("getsplits -t " + table2, true, "row5", true);
    ts.exec("constraint --list -t " + table2, true, "VisibilityConstraint=2", true);
    ts.exec("online " + table, true);
    ts.exec("deletetable -f " + table, true);
    ts.exec("deletetable -f " + table2, true);
}

From source file:org.apache.drill.exec.expr.fn.FunctionImplementationRegistry.java

License:Apache License

/**
 * Copies jar from remote udf area to local udf area.
 *
 * @param jarName jar name to be copied//from w  ww .  j  a  va  2  s. c o m
 * @param remoteFunctionRegistry remote function registry
 * @return local path to jar that was copied
 * @throws IOException in case of problems during jar coping process
 */
@SuppressWarnings("resource")
private Path copyJarToLocal(String jarName, RemoteFunctionRegistry remoteFunctionRegistry) throws IOException {
    Path registryArea = remoteFunctionRegistry.getRegistryArea();
    FileSystem fs = remoteFunctionRegistry.getFs();
    Path remoteJar = new Path(registryArea, jarName);
    Path localJar = new Path(localUdfDir, jarName);
    try {
        fs.copyToLocalFile(remoteJar, localJar);
    } catch (IOException e) {
        String message = String.format("Error during jar [%s] coping from [%s] to [%s]", jarName,
                registryArea.toUri().getPath(), localUdfDir.toUri().getPath());
        throw new IOException(message, e);
    }
    return localJar;
}