Example usage for org.apache.hadoop.fs FileSystem mkdirs

List of usage examples for org.apache.hadoop.fs FileSystem mkdirs

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem mkdirs.

Prototype

public boolean mkdirs(Path f) throws IOException 

Source Link

Document

Call #mkdirs(Path,FsPermission) with default permission.

Usage

From source file:hdfs.FileUtil.java

License:Apache License

/** Copy files between FileSystems. */
public static boolean copy(FileSystem srcFS, Path src, FileSystem dstFS, Path dst, boolean deleteSource,
        boolean overwrite, Configuration conf) throws IOException {
    dst = checkDest(src.getName(), dstFS, dst, overwrite);

    if (srcFS.getFileStatus(src).isDir()) {
        checkDependencies(srcFS, src, dstFS, dst);
        if (!dstFS.mkdirs(dst)) {
            return false;
        }//from  w ww .jav  a 2 s  . c o  m
        FileStatus contents[] = srcFS.listStatus(src);
        for (int i = 0; i < contents.length; i++) {
            copy(srcFS, contents[i].getPath(), dstFS, new Path(dst, contents[i].getPath().getName()),
                    deleteSource, overwrite, conf);
        }
    } else if (srcFS.isFile(src)) {
        InputStream in = null;
        OutputStream out = null;
        try {
            in = srcFS.open(src);
            out = dstFS.create(dst, overwrite);
            IOUtils.copyBytes(in, out, conf, true);
        } catch (IOException e) {
            IOUtils.closeStream(out);
            IOUtils.closeStream(in);
            throw e;
        }
    } else {
        throw new IOException(src.toString() + ": No such file or directory");
    }
    if (deleteSource) {
        return srcFS.delete(src, true);
    } else {
        return true;
    }

}

From source file:hdfs.FileUtil.java

License:Apache License

/** Copy local files to a FileSystem. */
public static boolean copy(File src, FileSystem dstFS, Path dst, boolean deleteSource, Configuration conf)
        throws IOException {
    dst = checkDest(src.getName(), dstFS, dst, false);

    if (src.isDirectory()) {
        if (!dstFS.mkdirs(dst)) {
            return false;
        }/*from   w  w w  .ja  v  a2  s.c o m*/
        File contents[] = listFiles(src);
        for (int i = 0; i < contents.length; i++) {
            copy(contents[i], dstFS, new Path(dst, contents[i].getName()), deleteSource, conf);
        }
    } else if (src.isFile()) {
        InputStream in = null;
        OutputStream out = null;
        try {
            in = new FileInputStream(src);
            out = dstFS.create(dst);
            IOUtils.copyBytes(in, out, conf);
        } catch (IOException e) {
            IOUtils.closeStream(out);
            IOUtils.closeStream(in);
            throw e;
        }
    } else {
        throw new IOException(src.toString() + ": No such file or directory");
    }
    if (deleteSource) {
        return FileUtil.fullyDelete(src);
    } else {
        return true;
    }
}

From source file:Hdfs_Operations.HdfsCreateDirectory.java

@Override
public int run(String[] args) throws Exception {
    FileSystem fs = FileSystem.get(getConf());
    Path path = new Path(args[0]);
    fs.mkdirs(path);
    fs.close();//from w  ww  .  j  a  v  a  2 s .com

    return 0;
}

From source file:hibench.DataPaths.java

License:Apache License

public static void checkHdfsFile(Path path, boolean mkdir) throws IOException {

    FileSystem fs = path.getFileSystem(new Configuration());

    if (fs.exists(path)) {
        fs.delete(path, true);/* w ww .j a va 2 s  . c  o  m*/
    }

    if (mkdir) {
        fs.mkdirs(path);
    }
    fs.close();
}

From source file:hitune.analysis.mapreduce.processor.AnalysisProcessor.java

License:Apache License

private void GenReportHome() {
    try {//  w ww. j a va 2s  .c o  m
        FileSystem fs = FileSystem.get(this.conf);
        Path reportfolder = new Path(this.conf.get(AnalysisProcessorConfiguration.reportfolder));
        if (!fs.exists(reportfolder))
            fs.mkdirs(reportfolder);
    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
        log.error("Cannot create report folder");
    }
}

From source file:hitune.analysis.mapreduce.processor.AnalysisProcessor.java

License:Apache License

/**
 * Move the TEMP output folder to final one(user defined one);
 * If there are multiple files under one job's output folder, it should merge the output into one file.
 * Then rename the folder to the final one.
 * @param job/* w w w .  j  a v a2s. c  o m*/
 * @param output
 * @param result
 */
protected void moveResults(JobConf job, String output, String result) {
    try {
        FileSystem fs = FileSystem.get(job);
        log.debug("move results: " + result);
        Path src = new Path(result + "/" + "*.csv*");
        Path dst = new Path(output);
        if (!fs.exists(dst)) {
            fs.mkdirs(dst);
        }
        FileStatus[] matches = fs.globStatus(src, new PathFilter() {
            @Override
            public boolean accept(Path path) {
                // TODO Auto-generated method stub
                return true;

            }
        });
        if (matches != null && matches.length != 0) {
            if (matches.length > 1) {
                //multiple output files
                String[] args = new String[2];
                args[0] = result;
                args[1] = "_" + result;
                fs.delete(new Path("_" + result));
                //merge multiple output files into one file
                ToolRunner.run(new MergeOutput(this.conf), args);
                fs.delete(new Path(result));
                fs.rename(new Path("_" + result), new Path(result));
            }

            matches = fs.globStatus(src, new PathFilter() {
                @Override
                public boolean accept(Path path) {
                    // TODO Auto-generated method stub
                    return true;
                }
            });

            for (FileStatus file : matches) {
                String filename = file.getPath().getName();
                filename = filename.substring(0, filename.indexOf("-"));
                log.debug("move file:" + filename);
                Path toFile = new Path(output + "/" + filename);
                if (fs.exists(toFile)) {
                    fs.delete(toFile);
                }
                fs.rename(file.getPath(), toFile);
                fs.delete(file.getPath().getParent(), true);
                FileStatus[] tmpDirs = fs.listStatus(file.getPath().getParent().getParent());
                if (tmpDirs == null || tmpDirs.length == 0) {
                    fs.delete(file.getPath().getParent().getParent(), true);
                }
                break;
            }
        } else {
            MOVE_DONE = false;
        }
    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
        MOVE_DONE = false;
    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
    MOVE_DONE = true;
}

From source file:hu.sztaki.ilab.bigdata.common.tools.hbase.PerformanceEvaluation.java

License:Apache License

private Path writeInputFile(final Configuration c) throws IOException {
    FileSystem fs = FileSystem.get(c);
    if (!fs.exists(PERF_EVAL_DIR)) {
        fs.mkdirs(PERF_EVAL_DIR);
    }//from  www  .  j  a  v  a2  s.c o  m
    SimpleDateFormat formatter = new SimpleDateFormat("yyyyMMddHHmmss");
    Path subdir = new Path(PERF_EVAL_DIR, formatter.format(new Date()));
    fs.mkdirs(subdir);
    Path inputFile = new Path(subdir, "input.txt");
    PrintStream out = new PrintStream(fs.create(inputFile));
    // Make input random.
    Map<Integer, String> m = new TreeMap<Integer, String>();
    Hash h = MurmurHash.getInstance();
    int perClientRows = (R / N);
    try {
        for (int i = 0; i < 10; i++) {
            for (int j = 0; j < N; j++) {
                String s = "startRow=" + ((j * perClientRows) + (i * (perClientRows / 10)))
                        + ", perClientRunRows=" + (perClientRows / 10) + ", totalRows=" + R + ", clients=" + N
                        + ", rowsPerPut=" + B;
                int hash = h.hash(Bytes.toBytes(s));
                m.put(hash, s);
            }
        }
        for (Map.Entry<Integer, String> e : m.entrySet()) {
            out.println(e.getValue());
        }
    } finally {
        out.close();
    }
    return subdir;
}

From source file:hws.core.JobClient.java

License:Apache License

public void run(String[] args) throws Exception {
    //final String command = args[0];
    //final int n = Integer.valueOf(args[1]);
    //final Path jarPath = new Path(args[2]);
    Options options = new Options();
    /*options.addOption(OptionBuilder.withLongOpt("jar")
                           .withDescription( "Jar path" )
                           .hasArg()//from w  w  w  .  j ava 2  s . com
                           .withArgName("JarPath")
                           .create());
    options.addOption(OptionBuilder.withLongOpt("scheduler")
                           .withDescription( "Scheduler class name" )
                           .hasArg()
                           .withArgName("ClassName")
                           .create());
    */options.addOption(OptionBuilder.withLongOpt("zk-servers")
            .withDescription("List of the ZooKeeper servers").hasArgs().withArgName("zkAddrs").create("zks"));
    //options.addOption("l", "list", false, "list modules");
    options.addOption(OptionBuilder.withLongOpt("load").withDescription("load new modules").hasArgs()
            .withArgName("XMLFiles").create());
    /*options.addOption(OptionBuilder.withLongOpt( "remove" )
                           .withDescription( "remove modules" )
                           .hasArgs()
                           .withArgName("ModuleNames")
                           .create("rm"));
    */CommandLineParser parser = new BasicParser();
    CommandLine cmd = parser.parse(options, args);

    //Path jarPath = null;
    //String schedulerClassName = null;
    String[] xmlFileNames = null;
    //String []moduleNames = null;
    String zksArgs = "";
    String[] zkServers = null;
    if (cmd.hasOption("zks")) {
        zksArgs = "-zks";
        zkServers = cmd.getOptionValues("zks");
        for (String zks : zkServers) {
            zksArgs += " " + zks;
        }
    }

    //Logger setup
    //FSDataOutputStream writer = FileSystem.get(conf).create(new Path("hdfs:///hws/apps/"+appIdStr+"/logs/jobClient.log"));
    //Logger.addOutputStream(writer);

    /*if(cmd.hasOption("l")){
       LOG.warn("Argument --list (-l) is not supported yet.");
    }
    if(cmd.hasOption("jar")){
       jarPath = new Path(cmd.getOptionValue("jar")); 
    }
    if(cmd.hasOption("scheduler")){
       schedulerClassName = cmd.getOptionValue("scheduler");
    }*/
    if (cmd.hasOption("load")) {
        xmlFileNames = cmd.getOptionValues("load");
    } /*else if(cmd.hasOption("rm")){
        moduleNames = cmd.getOptionValues("rm");
      }*/

    //LOG.info("Jar-Path "+jarPath);
    if (xmlFileNames != null) {
        String paths = "";
        for (String path : xmlFileNames) {
            paths += path + "; ";
        }
        LOG.info("Load XMLs: " + paths);
    }
    /*if(moduleNames!=null){
       String modules = "";
       for(String module: moduleNames){
          modules += module+"; ";
       }
       LOG.info("remove: "+modules);
    }*/
    // Create yarnClient
    YarnConfiguration conf = new YarnConfiguration();
    YarnClient yarnClient = YarnClient.createYarnClient();
    yarnClient.init(conf);
    yarnClient.start();

    // Create application via yarnClient
    YarnClientApplication app = yarnClient.createApplication();

    System.out.println("LOG Path: " + ApplicationConstants.LOG_DIR_EXPANSION_VAR);
    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    ApplicationId appId = appContext.getApplicationId();

    ZkClient zk = new ZkClient(zkServers[0]); //TODO select a ZooKeeper server
    if (!zk.exists("/hadoop-watershed")) {
        zk.createPersistent("/hadoop-watershed", "");
    }
    zk.createPersistent("/hadoop-watershed/" + appId.toString(), "");

    FileSystem fs = FileSystem.get(conf);

    LOG.info("Collecting files to upload");
    fs.mkdirs(new Path("hdfs:///hws/apps/" + appId.toString()));
    fs.mkdirs(new Path("hdfs:///hws/apps/" + appId.toString() + "/logs"));

    ModulePipeline modulePipeline = ModulePipeline.fromXMLFiles(xmlFileNames);
    LOG.info("Uploading files to HDFS");
    for (String path : modulePipeline.files()) {
        uploadFile(fs, new File(path), appId);
    }
    LOG.info("Upload finished");

    String modulePipelineJson = Json.dumps(modulePipeline);
    String modulePipelineBase64 = Base64.encodeBase64String(StringUtils.getBytesUtf8(modulePipelineJson))
            .replaceAll("\\s", "");
    LOG.info("ModulePipeline: " + modulePipelineJson);
    //LOG.info("ModulePipeline: "+modulePipelineBase64);
    amContainer.setCommands(Collections.singletonList("$JAVA_HOME/bin/java" + " -Xmx256M"
            + " hws.core.JobMaster" + " -aid " + appId.toString() + " --load " + modulePipelineBase64 + " "
            + zksArgs + " 1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout" + " 2>"
            + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"));

    // Setup jar for ApplicationMaster
    //LocalResource appMasterJar = Records.newRecord(LocalResource.class);
    //setupAppMasterJar(jarPath, appMasterJar);
    //amContainer.setLocalResources(Collections.singletonMap("hws.jar", appMasterJar));

    LOG.info("Listing files for YARN-Watershed");
    RemoteIterator<LocatedFileStatus> filesIterator = fs.listFiles(new Path("hdfs:///hws/bin/"), false);
    Map<String, LocalResource> resources = new HashMap<String, LocalResource>();
    LOG.info("Files setup as resource");
    while (filesIterator.hasNext()) {
        LocatedFileStatus fileStatus = filesIterator.next();
        // Setup jar for ApplicationMaster
        LocalResource containerJar = Records.newRecord(LocalResource.class);
        ContainerUtils.setupContainerJar(fs, fileStatus.getPath(), containerJar);
        resources.put(fileStatus.getPath().getName(), containerJar);
    }
    LOG.info("container resource setup");
    amContainer.setLocalResources(resources);

    fs.close(); //closing FileSystem interface

    // Setup CLASSPATH for ApplicationMaster
    Map<String, String> appMasterEnv = new HashMap<String, String>();
    ContainerUtils.setupContainerEnv(appMasterEnv, conf);
    amContainer.setEnvironment(appMasterEnv);

    // Set up resource type requirements for ApplicationMaster
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(256);
    capability.setVirtualCores(1);

    // Finally, set-up ApplicationSubmissionContext for the application
    //ApplicationSubmissionContext appContext = 
    //app.getApplicationSubmissionContext();
    appContext.setApplicationName("Hadoop-Watershed"); // application name
    appContext.setAMContainerSpec(amContainer);
    appContext.setResource(capability);
    appContext.setQueue("default"); // queue 

    // Submit application
    LOG.info("Submitting application " + appId);
    yarnClient.submitApplication(appContext);

    LOG.info("Waiting for containers to finish");
    zk.waitUntilExists("/hadoop-watershed/" + appId.toString() + "/done", TimeUnit.MILLISECONDS, 250);
    ApplicationReport appReport = yarnClient.getApplicationReport(appId);
    YarnApplicationState appState = appReport.getYarnApplicationState();
    while (appState != YarnApplicationState.FINISHED && appState != YarnApplicationState.KILLED
            && appState != YarnApplicationState.FAILED) {
        Thread.sleep(100);
        appReport = yarnClient.getApplicationReport(appId);
        appState = appReport.getYarnApplicationState();
    }

    System.out.println("Application " + appId + " finished with" + " state " + appState + " at "
            + appReport.getFinishTime());

    System.out.println("deleting " + appId.toString() + " znode");
    zk.deleteRecursive("/hadoop-watershed/" + appId.toString()); //TODO remove app folder from ZooKeeper
}

From source file:io.aos.mapreduce.count.WordCountToolTest.java

License:Apache License

@BeforeClass
public static void setup() throws IOException {

    Properties props = new Properties();
    InputStream is = ClassLoader.getSystemResourceAsStream("hdfs-conf.properties");
    props.load(is);/*from   w w  w  . j a v  a 2  s  .  co  m*/
    for (Entry<Object, Object> entry : props.entrySet()) {
        System.setProperty((String) entry.getKey(), (String) entry.getValue());
    }

    Map<String, String> envMap = new HashMap<String, String>();
    envMap.put("JAVA_HOME", System.getProperty("java.home"));
    setEnv(envMap);

    final Configuration conf = new Configuration();
    final Path TEST_ROOT_DIR = new Path(System.getProperty("test.build.data", "/tmp"));
    testdir = new Path(TEST_ROOT_DIR, "TestMiniMRClientCluster");
    inDir = new Path(testdir, "in");
    outDir = new Path(testdir, "out");

    FileSystem fs = FileSystem.getLocal(conf);
    if (fs.exists(testdir) && !fs.delete(testdir, true)) {
        throw new IOException("Could not delete " + testdir);
    }
    if (!fs.mkdirs(inDir)) {
        throw new IOException("Mkdirs failed to create " + inDir);
    }

    for (int i = 0; i < inFiles.length; i++) {
        inFiles[i] = new Path(inDir, "part_" + i);
        createFile(inFiles[i], conf);
    }

    // create the mini cluster to be used for the tests
    mrCluster = MiniMRClientClusterFactory.create(WordCountToolTest.class, 1, new Configuration());

}

From source file:io.aos.mapreduce.grep.GrepToolTest.java

License:Apache License

@BeforeClass
public static void setup() throws IOException {

    Properties props = new Properties();
    InputStream is = ClassLoader.getSystemResourceAsStream("hdfs-conf.properties");
    props.load(is);/*from  w w  w  . ja  v a 2 s  .c  o  m*/
    for (Entry<Object, Object> entry : props.entrySet()) {
        System.setProperty((String) entry.getKey(), (String) entry.getValue());
    }

    Map<String, String> envMap = new HashMap<String, String>();
    envMap.put("JAVA_HOME", System.getProperty("java.home"));
    setEnv(envMap);

    final Configuration conf = new Configuration();
    final Path TEST_ROOT_DIR = new Path(System.getProperty("test.build.data", "/tmp"));
    testdir = new Path(TEST_ROOT_DIR, "TestMiniMRClientCluster");
    inDir = new Path(testdir, "in");
    outDir = new Path(testdir, "out");

    FileSystem fs = FileSystem.getLocal(conf);
    if (fs.exists(testdir) && !fs.delete(testdir, true)) {
        throw new IOException("Could not delete " + testdir);
    }
    if (!fs.mkdirs(inDir)) {
        throw new IOException("Mkdirs failed to create " + inDir);
    }

    for (int i = 0; i < inFiles.length; i++) {
        inFiles[i] = new Path(inDir, "part_" + i);
        createFile(inFiles[i], conf);
    }

    // create the mini cluster to be used for the tests
    mrCluster = MiniMRClientClusterFactory.create(GrepToolTest.class, 1, new Configuration());

}