Example usage for org.apache.hadoop.fs FileUtil fullyDelete

List of usage examples for org.apache.hadoop.fs FileUtil fullyDelete

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileUtil fullyDelete.

Prototype

public static boolean fullyDelete(final File dir) 

Source Link

Document

Delete a directory and all its contents.

Usage

From source file:org.apache.flink.hdfstests.ContinuousFileMonitoringTest.java

License:Apache License

@BeforeClass
public static void createHDFS() {
    try {// w  w w  .jav a2s .  c  o m
        baseDir = new File("./target/hdfs/hdfsTesting").getAbsoluteFile();
        FileUtil.fullyDelete(baseDir);

        org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration();
        hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
        hdConf.set("dfs.block.size", String.valueOf(1048576)); // this is the minimum we can set.

        MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
        hdfsCluster = builder.build();

        hdfsURI = "hdfs://" + hdfsCluster.getURI().getHost() + ":" + hdfsCluster.getNameNodePort() + "/";
        hdfs = new org.apache.hadoop.fs.Path(hdfsURI).getFileSystem(hdConf);

    } catch (Throwable e) {
        e.printStackTrace();
        Assert.fail("Test failed " + e.getMessage());
    }
}

From source file:org.apache.flink.hdfstests.ContinuousFileMonitoringTest.java

License:Apache License

@AfterClass
public static void destroyHDFS() {
    try {/*from w  w  w  .j a  va  2 s  .c  om*/
        FileUtil.fullyDelete(baseDir);
        hdfsCluster.shutdown();
    } catch (Throwable t) {
        throw new RuntimeException(t);
    }
}

From source file:org.apache.flink.hdfstests.ContinuousFileProcessingFrom11MigrationTest.java

License:Apache License

@BeforeClass
public static void createHDFS() {
    try {//from w  ww .  j  a v  a 2s . c om
        baseDir = tempFolder.newFolder().getAbsoluteFile();
        FileUtil.fullyDelete(baseDir);

        Configuration hdConf = new Configuration();
        hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
        hdConf.set("dfs.block.size", String.valueOf(1048576)); // this is the minimum we can set.

        MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
        hdfsCluster = builder.build();

        hdfsURI = "hdfs://" + hdfsCluster.getURI().getHost() + ":" + hdfsCluster.getNameNodePort() + "/";
        hdfs = new org.apache.hadoop.fs.Path(hdfsURI).getFileSystem(hdConf);

    } catch (Throwable e) {
        e.printStackTrace();
        Assert.fail("Test failed " + e.getMessage());
    }
}

From source file:org.apache.flink.hdfstests.HDFSTest.java

License:Apache License

@Before
public void createHDFS() {
    try {//from   w w w  .  j  a  v  a2 s.c om
        Configuration hdConf = new Configuration();

        File baseDir = new File("./target/hdfs/hdfsTest").getAbsoluteFile();
        FileUtil.fullyDelete(baseDir);
        hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
        MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
        hdfsCluster = builder.build();

        hdfsURI = "hdfs://" + hdfsCluster.getURI().getHost() + ":" + hdfsCluster.getNameNodePort() + "/";

        hdPath = new org.apache.hadoop.fs.Path("/test");
        hdfs = hdPath.getFileSystem(hdConf);
        FSDataOutputStream stream = hdfs.create(hdPath);
        for (int i = 0; i < 10; i++) {
            stream.write("Hello HDFS\n".getBytes());
        }
        stream.close();

    } catch (Throwable e) {
        e.printStackTrace();
        Assert.fail("Test failed " + e.getMessage());
    }
}

From source file:org.apache.flink.tachyon.FileStateHandleTest.java

License:Apache License

@Before
public void createHDFS() {
    try {//from  ww  w  .  j  a v  a  2s.  c  o m
        Configuration hdConf = new Configuration();

        File baseDir = new File("./target/hdfs/filestatehandletest").getAbsoluteFile();
        FileUtil.fullyDelete(baseDir);
        hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
        MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
        hdfsCluster = builder.build();

        hdfsURI = "hdfs://" + hdfsCluster.getURI().getHost() + ":" + hdfsCluster.getNameNodePort() + "/";

        hdPath = new org.apache.hadoop.fs.Path("/StateHandleTest");
        hdfs = hdPath.getFileSystem(hdConf);
        hdfs.mkdirs(hdPath);

    } catch (Throwable e) {
        e.printStackTrace();
        Assert.fail("Test failed " + e.getMessage());
    }
}

From source file:org.apache.flink.test.checkpointing.ContinuousFileProcessingCheckpointITCase.java

License:Apache License

@BeforeClass
public static void createHDFS() {
    try {/*from  w  ww  .j  av a2  s .co  m*/
        baseDir = new File("./target/localfs/fs_tests").getAbsoluteFile();
        FileUtil.fullyDelete(baseDir);

        org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration();

        localFsURI = "file:///" + baseDir + "/";
        localFs = new org.apache.hadoop.fs.Path(localFsURI).getFileSystem(hdConf);

    } catch (Throwable e) {
        e.printStackTrace();
        Assert.fail("Test failed " + e.getMessage());
    }
}

From source file:org.apache.flink.test.checkpointing.ContinuousFileProcessingCheckpointITCase.java

License:Apache License

@AfterClass
public static void destroyHDFS() {
    try {//  w  w w.j  av  a2  s.c om
        FileUtil.fullyDelete(baseDir);
    } catch (Throwable t) {
        throw new RuntimeException(t);
    }
}

From source file:org.apache.hadoop.fs.FileUtil.java

License:Apache License

/** Copy local files to a FileSystem. */
public static boolean copy(File src, FileSystem dstFS, Path dst, boolean deleteSource, Configuration conf)
        throws IOException {
    dst = checkDest(src.getName(), dstFS, dst, false);

    if (src.isDirectory()) {
        if (!dstFS.mkdirs(dst)) {
            return false;
        }/* w  w w.  j  av  a2  s . c  om*/
        File contents[] = listFiles(src);
        for (int i = 0; i < contents.length; i++) {
            copy(contents[i], dstFS, new Path(dst, contents[i].getName()), deleteSource, conf);
        }
    } else if (src.isFile()) {
        InputStream in = null;
        OutputStream out = null;
        try {
            in = new FileInputStream(src);
            out = dstFS.create(dst);
            IOUtils.copyBytes(in, out, conf);
        } catch (IOException e) {
            IOUtils.closeStream(out);
            IOUtils.closeStream(in);
            throw e;
        }
    } else {
        throw new IOException(src.toString() + ": No such file or directory");
    }
    if (deleteSource) {
        return FileUtil.fullyDelete(src);
    } else {
        return true;
    }
}

From source file:org.apache.hama.bsp.TaskLog.java

License:Apache License

/**
 * Purge old user logs./* w ww. j  a  v  a  2 s  . c  o  m*/
 * 
 * @throws IOException
 */
public static synchronized void cleanup(int logsRetainHours) throws IOException {
    // Purge logs of tasks on this tasktracker if their
    // mtime has exceeded "bsp.task.log.retain" hours
    long purgeTimeStamp = System.currentTimeMillis() - (logsRetainHours * 60L * 60 * 1000);
    File[] oldTaskLogs = LOG_DIR.listFiles(new TaskLogsPurgeFilter(purgeTimeStamp));
    if (oldTaskLogs != null) {
        for (File oldTaskLog : oldTaskLogs) {
            FileUtil.fullyDelete(oldTaskLog);
        }
    }
}

From source file:org.apache.hama.MiniZooKeeperCluster.java

License:Apache License

private static void recreateDir(File dir) throws IOException {
    if (dir.exists()) {
        FileUtil.fullyDelete(dir);
    }/*from  w ww.j a va 2s . c  o  m*/
    try {
        dir.mkdirs();
    } catch (SecurityException e) {
        throw new IOException("creating dir: " + dir, e);
    }
}