Example usage for org.apache.hadoop.fs FileSystem copyFromLocalFile

List of usage examples for org.apache.hadoop.fs FileSystem copyFromLocalFile

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem copyFromLocalFile.

Prototype

public void copyFromLocalFile(Path src, Path dst) throws IOException 

Source Link

Document

The src file is on the local disk.

Usage

From source file:nl.gridline.zieook.tasks.DataImportExportTest.java

License:Apache License

@Test
@Ignore//from   ww w. j a v  a 2 s  .c  om
public void dataPrepareTool() throws Exception {

    // put data in hbase:
    try {

        LOG.info("copy data from local to HDFS");
        FileSystem hdfs = FileSystem.get(hdfsConfig);
        // import:
        hdfs.copyFromLocalFile(new Path("test-data-big/movies.dat"), new Path(input, "movies.dat"));
        hdfs.copyFromLocalFile(new Path("test-data-big/ratings.dat"), new Path(input, "ratings.dat"));
        hdfs.copyFromLocalFile(new Path("test-data-big/users.dat"), new Path(input, "users.dat"));
    } catch (IOException e) {
        LOG.error("", e);
        fail(e.getMessage());
    }

    // import data into HBase:
    DataFileImportTask imp = new DataFileImportTask();
    imp.setConfig(inxConfig);
    LOG.info("Create data import tasks");

    inxConfig.setProperty(TaskConfig.DATA_PART, TaskConfig.DATA_PART_COLLECTION);
    inxConfig.setProperty(TaskConfig.INPUT_PATH, new Path(input, "movies.dat").toString());
    imp.call();
    LOG.info("COLLECTION data imported");
    inxConfig.setProperty(TaskConfig.DATA_PART, TaskConfig.DATA_PART_RATINGS);
    inxConfig.setProperty(TaskConfig.INPUT_PATH, new Path(input, "ratings.dat").toString());
    imp.call();
    LOG.info("RATINGS data imported");
    inxConfig.setProperty(TaskConfig.DATA_PART, TaskConfig.DATA_PART_USERS);
    inxConfig.setProperty(TaskConfig.INPUT_PATH, new Path(input, "users.dat").toString());
    imp.call();
    LOG.info("USER data imported");

}

From source file:nl.gridline.zieook.tasks.ItemBasedRecommenderTaskTest.java

License:Apache License

/**
 * copy a file to hdfs//from   w ww. ja v a2s  .co  m
 * @param local local Path
 * @param remote remote (hdfs) path
 * @throws IOException
 */
@Ignore
private static void copy(Path local, Path remote) throws IOException {
    LOG.info("copy {} to {}", local, remote);
    FileSystem hdfs = FileSystem.get(hdfsConfig);
    hdfs.copyFromLocalFile(local, remote);
}

From source file:nl.gridline.zieook.tasks.TinyRecommenderTest.java

License:Apache License

@Test
@Ignore//w w w . ja  v a 2s .  c  o  m
public void tinyItemRecommenderTask() throws Exception {
    // first clean-up old data:
    FileSystem hdfs = FileSystem.get(hdfsConfig);
    if (hdfs.exists(output)) {
        hdfs.delete(output, true);
    }
    if (hdfs.exists(input)) {
        hdfs.delete(input, true);
    }
    if (hdfs.exists(tmp)) {
        hdfs.delete(tmp, true);
    }

    // put data in local folder:
    hdfs.copyFromLocalFile(new Path("test-data-small/tiny_ratings.dat"), new Path(input, "tiny_ratings.dat"));

    // run the recommender (without the prepare)
    ItemBasedRecommenderTask task = new ItemBasedRecommenderTask();
    task.setConfig(rtaskConfig);
    task.call();
}

From source file:nl.gridline.zieook.tasks.TinyRecommenderTest.java

License:Apache License

@Test
@Ignore/*w  w  w  .  j a va  2  s  .c o  m*/
public void tinyUserRecommenderTask() throws Exception {
    // first clean-up old data:
    FileSystem hdfs = FileSystem.get(hdfsConfig);
    if (hdfs.exists(output)) {
        hdfs.delete(output, true);
    }
    if (hdfs.exists(input)) {
        hdfs.delete(input, true);
    }
    if (hdfs.exists(tmp)) {
        hdfs.delete(tmp, true);
    }

    // put data in local folder:
    hdfs.copyFromLocalFile(new Path("test-data-small/tiny_ratings.dat"), new Path(input, "tiny_ratings.dat"));
    // execute itembased recommender:
    UserBasedRecommenderTask task = new UserBasedRecommenderTask();
    task.setConfig(rtaskConfig);
    task.call();

}

From source file:nl.tudelft.graphalytics.giraph.GiraphPlatform.java

License:Apache License

@Override
public void uploadGraph(Graph graph) throws Exception {
    LOG.info("Uploading graph \"{}\" to HDFS", graph.getName());

    String uploadPath = Paths.get(hdfsDirectory, getName(), "input", graph.getName()).toString();

    // Upload the graph to HDFS
    FileSystem fs = FileSystem.get(new Configuration());

    LOG.debug("- Uploading vertex list");
    fs.copyFromLocalFile(new Path(graph.getVertexFilePath()), new Path(uploadPath + ".v"));

    LOG.debug("- Uploading edge list");
    fs.copyFromLocalFile(new Path(graph.getEdgeFilePath()), new Path(uploadPath + ".e"));

    fs.close();/*from  w w w  . j a v  a 2  s . co m*/

    // Track available datasets in a map
    pathsOfGraphs.put(graph.getName(), uploadPath);
}

From source file:nl.tudelft.graphalytics.graphlab.GraphLabPlatform.java

License:Apache License

@Override
public void uploadGraph(Graph graph, String graphFilePath) throws Exception {
    LOG.entry(graph, graphFilePath);// w  ww  .ja  v  a  2  s  .  c om

    if (USE_HADOOP) {
        String uploadPath = Paths.get(hdfsDirectory, getName(), "input", graph.getName()).toString();

        // Upload the graph to HDFS
        FileSystem fs = FileSystem.get(new Configuration());
        fs.copyFromLocalFile(new Path(graphFilePath), new Path(uploadPath));
        fs.close();

        // Track available datasets in a map
        pathsOfGraphs.put(graph.getName(), fs.getHomeDirectory().toUri() + "/" + uploadPath);
    } else {
        // Use local files, so just put the local file path in the map
        pathsOfGraphs.put(graph.getName(), graphFilePath);
    }

    LOG.exit();
}

From source file:nl.tudelft.graphalytics.mapreducev2.MapReduceV2Platform.java

License:Apache License

public void uploadGraph(Graph graph, String graphFilePath) throws IOException {
    log.entry(graph, graphFilePath);/*from ww  w  .j  a va  2  s  .c  om*/

    String hdfsPathRaw = hdfsDirectory + "/mapreducev2/input/raw-" + graph.getName();
    String hdfsPath = hdfsDirectory + "/mapreducev2/input/" + graph.getName();

    // Establish a connection with HDFS and upload the graph
    Configuration conf = new Configuration();
    FileSystem dfs = FileSystem.get(conf);
    dfs.copyFromLocalFile(new Path(graphFilePath), new Path(hdfsPathRaw));

    // If the graph needs to be preprocessed, do so, otherwise rename it
    if (graph.getGraphFormat().isEdgeBased()) {
        try {
            EdgesToAdjacencyListConversion job = new EdgesToAdjacencyListConversion(hdfsPathRaw, hdfsPath,
                    graph.getGraphFormat().isDirected());
            if (mrConfig.containsKey("mapreducev2.reducer-count"))
                job.withNumberOfReducers(ConfigurationUtil.getInteger(mrConfig, "mapreducev2.reducer-count"));
            job.run();
        } catch (Exception e) {
            throw new IOException("Failed to preprocess graph: ", e);
        }
    } else if (graph.getGraphFormat().isDirected()) {
        try {
            DirectedVertexToAdjacencyListConversion job = new DirectedVertexToAdjacencyListConversion(
                    hdfsPathRaw, hdfsPath);
            if (mrConfig.containsKey("mapreducev2.reducer-count"))
                job.withNumberOfReducers(ConfigurationUtil.getInteger(mrConfig, "mapreducev2.reducer-count"));
            job.run();
        } catch (Exception e) {
            throw new IOException("Failed to preprocess graph: ", e);
        }
    } else {
        // Rename the graph
        dfs.rename(new Path(hdfsPathRaw), new Path(hdfsPath));
    }

    hdfsPathForGraphName.put(graph.getName(), hdfsPath);
    log.exit();
}

From source file:org.apache.accumulo.core.conf.CredentialProviderFactoryShimTest.java

License:Apache License

@Test
public void extractFromHdfs() throws Exception {
    File target = new File(System.getProperty("user.dir"), "target");
    String prevValue = System.setProperty("test.build.data",
            new File(target, this.getClass().getName() + "_minidfs").toString());
    MiniDFSCluster dfsCluster = new MiniDFSCluster.Builder(new Configuration()).build();
    try {//from w  w w .j  a  va  2 s  .com
        if (null != prevValue) {
            System.setProperty("test.build.data", prevValue);
        } else {
            System.clearProperty("test.build.data");
        }

        // One namenode, One configuration
        Configuration dfsConfiguration = dfsCluster.getConfiguration(0);
        Path destPath = new Path("/accumulo.jceks");
        FileSystem dfs = dfsCluster.getFileSystem();
        // Put the populated keystore in hdfs
        dfs.copyFromLocalFile(new Path(populatedKeyStore.toURI()), destPath);

        Configuration cpConf = CredentialProviderFactoryShim.getConfiguration(dfsConfiguration,
                "jceks://hdfs/accumulo.jceks");

        // The values in the keystore
        Map<String, String> expectations = new HashMap<>();
        expectations.put("key1", "value1");
        expectations.put("key2", "value2");

        checkCredentialProviders(cpConf, expectations);
    } finally {
        dfsCluster.shutdown();
    }
}

From source file:org.apache.accumulo.server.test.scalability.Run.java

License:Apache License

public static void main(String[] args) throws Exception {

    final String sitePath = "/tmp/scale-site.conf";
    final String testPath = "/tmp/scale-test.conf";

    // parse command line
    if (args.length != 3) {
        throw new IllegalArgumentException("usage : Run <testId> <action> <numTabletServers>");
    }/*from  w  ww.  ja  v  a 2 s .  co  m*/
    String testId = args[0];
    String action = args[1];
    int numTabletServers = Integer.parseInt(args[2]);

    Configuration conf = CachedConfiguration.getInstance();
    FileSystem fs;
    fs = FileSystem.get(conf);

    fs.copyToLocalFile(new Path("/accumulo-scale/conf/site.conf"), new Path(sitePath));
    fs.copyToLocalFile(new Path(String.format("/accumulo-scale/conf/%s.conf", testId)), new Path(testPath));

    // load configuration file properties
    Properties scaleProps = new Properties();
    Properties testProps = new Properties();
    try {
        scaleProps.load(new FileInputStream(sitePath));
        testProps.load(new FileInputStream(testPath));
    } catch (Exception e) {
        System.out.println("Problem loading config file");
        e.printStackTrace();
    }

    ScaleTest test = (ScaleTest) Class.forName(String.format("accumulo.server.test.scalability.%s", testId))
            .newInstance();

    test.init(scaleProps, testProps, numTabletServers);

    if (action.equalsIgnoreCase("setup")) {
        test.setup();
    } else if (action.equalsIgnoreCase("client")) {
        InetAddress addr = InetAddress.getLocalHost();
        String host = addr.getHostName();
        fs.createNewFile(new Path("/accumulo-scale/clients/" + host));
        test.client();
        fs.copyFromLocalFile(new Path("/tmp/scale.out"), new Path("/accumulo-scale/results/" + host));
    } else if (action.equalsIgnoreCase("teardown")) {
        test.teardown();
    }
}

From source file:org.apache.accumulo.test.scalability.Run.java

License:Apache License

public static void main(String[] args) throws Exception {

    final String sitePath = "/tmp/scale-site.conf";
    final String testPath = "/tmp/scale-test.conf";
    Opts opts = new Opts();
    opts.parseArgs(Run.class.getName(), args);

    Configuration conf = CachedConfiguration.getInstance();
    FileSystem fs;
    fs = FileSystem.get(conf);//from w  w w  .ja  va  2s  .c  o m

    fs.copyToLocalFile(new Path("/accumulo-scale/conf/site.conf"), new Path(sitePath));
    fs.copyToLocalFile(new Path(String.format("/accumulo-scale/conf/%s.conf", opts.testId)),
            new Path(testPath));

    // load configuration file properties
    Properties scaleProps = new Properties();
    Properties testProps = new Properties();
    try {
        FileInputStream fis = new FileInputStream(sitePath);
        try {
            scaleProps.load(fis);
        } finally {
            fis.close();
        }
        fis = new FileInputStream(testPath);
        try {
            testProps.load(fis);
        } finally {
            fis.close();
        }
    } catch (Exception e) {
        log.error("Error loading config file.", e);
    }

    ScaleTest test = (ScaleTest) Class
            .forName(String.format("org.apache.accumulo.test.scalability.%s", opts.testId)).newInstance();

    test.init(scaleProps, testProps, opts.numTabletServers);

    if (opts.action.equalsIgnoreCase("setup")) {
        test.setup();
    } else if (opts.action.equalsIgnoreCase("client")) {
        InetAddress addr = InetAddress.getLocalHost();
        String host = addr.getHostName();
        fs.createNewFile(new Path("/accumulo-scale/clients/" + host));
        test.client();
        fs.copyFromLocalFile(new Path("/tmp/scale.out"), new Path("/accumulo-scale/results/" + host));
    } else if (opts.action.equalsIgnoreCase("teardown")) {
        test.teardown();
    }
}