Example usage for org.aspectj.util FileUtil deleteContents

List of usage examples for org.aspectj.util FileUtil deleteContents

Introduction

In this page you can find the example usage for org.aspectj.util FileUtil deleteContents.

Prototype

public static int deleteContents(File dir) 

Source Link

Document

Recursively delete the contents of dir, but not the dir itself

Usage

From source file:org.apache.hadoop.hdfs.server.namenode.TestEditLog.java

License:Apache License

/**
 * Do a test to make sure the edit log can recover edits even after
 * a non-clean shutdown. This does a simulated crash by copying over
 * the edits directory while the NN is still running, then shutting it
 * down, and restoring that edits directory.
 *///from   w ww.j ava  2s . c om
private void testCrashRecovery(int numTransactions) throws Exception {
    MiniDFSCluster cluster = null;
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, CHECKPOINT_ON_STARTUP_MIN_TXNS);

    try {
        LOG.info("\n===========================================\n" + "Starting empty cluster");

        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(true).build();
        cluster.waitActive();

        FileSystem fs = cluster.getFileSystem();
        for (int i = 0; i < numTransactions; i++) {
            fs.mkdirs(new Path("/test" + i));
        }

        // Directory layout looks like:
        // test/data/dfs/nameN/current/{fsimage_N,edits_...}
        File nameDir = new File(cluster.getNameDirs(0).iterator().next().getPath());
        File dfsDir = nameDir.getParentFile();
        assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir

        LOG.info("Copying data directory aside to a hot backup");
        File backupDir = new File(dfsDir.getParentFile(), "dfs.backup-while-running");
        FileUtil.copyDir(dfsDir, backupDir);
        ;

        LOG.info("Shutting down cluster #1");
        cluster.shutdown();
        cluster = null;

        // Now restore the backup
        FileUtil.deleteContents(dfsDir);
        backupDir.renameTo(dfsDir);

        // Directory layout looks like:
        // test/data/dfs/nameN/current/{fsimage_N,edits_...}
        File currentDir = new File(nameDir, "current");

        // We should see the file as in-progress
        File editsFile = new File(currentDir, NNStorage.getInProgressEditsFileName(1));
        assertTrue("Edits file " + editsFile + " should exist", editsFile.exists());

        File imageFile = FSImageTestUtil.findNewestImageFile(currentDir.getAbsolutePath());
        assertNotNull("No image found in " + nameDir, imageFile);
        assertEquals(NNStorage.getImageFileName(0), imageFile.getName());

        // Try to start a new cluster
        LOG.info("\n===========================================\n"
                + "Starting same cluster after simulated crash");
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).build();
        cluster.waitActive();

        // We should still have the files we wrote prior to the simulated crash
        fs = cluster.getFileSystem();
        for (int i = 0; i < numTransactions; i++) {
            assertTrue(fs.exists(new Path("/test" + i)));
        }

        long expectedTxId;
        if (numTransactions > CHECKPOINT_ON_STARTUP_MIN_TXNS) {
            // It should have saved a checkpoint on startup since there
            // were more unfinalized edits than configured
            expectedTxId = numTransactions + 1;
        } else {
            // otherwise, it shouldn't have made a checkpoint
            expectedTxId = 0;
        }
        imageFile = FSImageTestUtil.findNewestImageFile(currentDir.getAbsolutePath());
        assertNotNull("No image found in " + nameDir, imageFile);
        assertEquals(NNStorage.getImageFileName(expectedTxId), imageFile.getName());

        // Started successfully. Shut it down and make sure it can restart.
        cluster.shutdown();
        cluster = null;

        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).build();
        cluster.waitActive();
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

From source file:org.apache.hadoop.hdfs.util.TestAtomicFileOutputStream.java

License:Apache License

@Before
public void cleanupTestDir() throws IOException {
    assertTrue(TEST_DIR.exists() || TEST_DIR.mkdirs());
    FileUtil.deleteContents(TEST_DIR);
}