Example usage for org.apache.commons.io FileUtils forceDeleteOnExit

List of usage examples for org.apache.commons.io FileUtils forceDeleteOnExit

Introduction

In this page you can find the example usage for org.apache.commons.io FileUtils forceDeleteOnExit.

Prototype

public static void forceDeleteOnExit(File file) throws IOException 

Source Link

Document

Schedules a file to be deleted when JVM exits.

Usage

From source file:org.apache.beam.sdk.io.hcatalog.test.EmbeddedMetastoreService.java

public EmbeddedMetastoreService(String baseDirPath) throws IOException {
    FileUtils.forceDeleteOnExit(new File(baseDirPath));

    String hiveDirPath = makePathASafeFileName(baseDirPath + "/hive");
    String testDataDirPath = makePathASafeFileName(hiveDirPath + "/data/"
            + EmbeddedMetastoreService.class.getCanonicalName() + System.currentTimeMillis());
    String testWarehouseDirPath = makePathASafeFileName(testDataDirPath + "/warehouse");

    hiveConf = new HiveConf(getClass());
    hiveConf.setVar(HiveConf.ConfVars.PREEXECHOOKS, "");
    hiveConf.setVar(HiveConf.ConfVars.POSTEXECHOOKS, "");
    hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
    hiveConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, testWarehouseDirPath);
    hiveConf.setBoolVar(HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES, true);
    hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER,
            "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd." + "SQLStdHiveAuthorizerFactory");
    hiveConf.set("test.tmp.dir", hiveDirPath);

    System.setProperty("derby.stream.error.file", "/dev/null");
    driver = new Driver(hiveConf);
    sessionState = SessionState.start(new SessionState(hiveConf));
}

From source file:org.apache.distributedlog.LocalDLMEmulator.java

public void teardown() throws Exception {
    if (bkStartupThread != null) {
        bkStartupThread.interrupt();/*from  w w w .jav  a  2s.  c o  m*/
        bkStartupThread.join();
    }
    for (File dir : tmpDirs) {
        FileUtils.forceDeleteOnExit(dir);
    }
}

From source file:org.apache.distributedlog.LocalDLMEmulator.java

public static void main(String[] args) throws Exception {
    try {/*  ww w  .j  a  va 2s.com*/
        if (args.length < 1) {
            System.out.println("Usage: LocalDLEmulator <zk_port>");
            System.exit(-1);
        }

        final int zkPort = Integer.parseInt(args[0]);
        final File zkDir = IOUtils.createTempDir("distrlog", "zookeeper");
        final LocalDLMEmulator localDlm = LocalDLMEmulator.newBuilder().zkPort(zkPort).build();

        Runtime.getRuntime().addShutdownHook(new Thread() {
            @Override
            public void run() {
                try {
                    localDlm.teardown();
                    FileUtils.forceDeleteOnExit(zkDir);
                    System.out.println("ByeBye!");
                } catch (Exception e) {
                    // do nothing
                }
            }
        });
        localDlm.start();

        System.out.println(
                String.format("DistributedLog Sandbox is running now. You could access distributedlog://%s:%s",
                        DEFAULT_ZK_HOST, zkPort));
    } catch (Exception ex) {
        System.out.println("Exception occurred running emulator " + ex);
    }
}

From source file:org.apache.distributedlog.TestDistributedLogBase.java

@AfterClass
public static void teardownCluster() throws Exception {
    bkutil.teardown();/*from  ww  w .ja  va2 s  .c o m*/
    zks.stop();
    for (File dir : tmpDirs) {
        FileUtils.forceDeleteOnExit(dir);
    }
}

From source file:org.apache.distributedlog.ZooKeeperClusterTestCase.java

@AfterClass
public static void shutdownZooKeeper() throws Exception {
    log.info("--- Shutdown zookeeper at {} ---", zkServers);
    zks.stop();//from  ww w.  j ava2 s  .  c  o m
    if (null != zkDir) {
        FileUtils.forceDeleteOnExit(zkDir);
    }
}

From source file:org.apache.flume.channel.file.TestCheckpoint.java

@Before
public void setup() throws IOException {
    file = File.createTempFile("Checkpoint", "");
    inflightPuts = File.createTempFile("inflightPuts", "");
    FileUtils.forceDeleteOnExit(inflightPuts);
    inflightTakes = File.createTempFile("inflightTakes", "");
    FileUtils.forceDeleteOnExit(inflightTakes);
    queueSet = File.createTempFile("queueset", "");
    FileUtils.forceDeleteOnExit(queueSet);
    queueSet2 = File.createTempFile("queueset2", "");
    FileUtils.forceDeleteOnExit(queueSet2);
    queueSet3 = File.createTempFile("queueset3", "");
    FileUtils.forceDeleteOnExit(queueSet3);

    Assert.assertTrue(file.isFile());//from  w ww  .  j  a v a  2s .co  m
    Assert.assertTrue(file.canWrite());
}

From source file:org.apache.flume.channel.file.TestCheckpointRebuilder.java

@Test
public void testFastReplay() throws Exception {
    Map<String, String> overrides = Maps.newHashMap();
    overrides.put(FileChannelConfiguration.CAPACITY, String.valueOf(50));
    overrides.put(FileChannelConfiguration.TRANSACTION_CAPACITY, String.valueOf(50));
    channel = createFileChannel(overrides);
    channel.start();/*w ww.j  a v  a  2 s  .  c  om*/
    Assert.assertTrue(channel.isOpen());
    Set<String> in = fillChannel(channel, "checkpointBulder");
    channel.stop();
    checkpointDir = new File(baseDir, "chkpt2");
    File checkpointFile = new File(checkpointDir, "checkpoint");
    File metaDataFile = Serialization.getMetaDataFile(checkpointFile);
    File inflightTakesFile = new File(checkpointDir, "inflighttakes");
    File inflightPutsFile = new File(checkpointDir, "inflightputs");
    File queueSetDir = new File(checkpointDir, "queueset");
    FileUtils.forceDeleteOnExit(checkpointFile);
    FileUtils.forceDeleteOnExit(metaDataFile);
    FileUtils.forceDeleteOnExit(inflightPutsFile);
    FileUtils.forceDeleteOnExit(inflightTakesFile);
    checkpointDir.mkdirs();
    FileUtils.forceDeleteOnExit(checkpointDir);
    EventQueueBackingStore backingStore = EventQueueBackingStoreFactory.get(checkpointFile, 50, "test");
    FlumeEventQueue queue = new FlumeEventQueue(backingStore, inflightTakesFile, inflightPutsFile, queueSetDir);
    CheckpointRebuilder checkpointRebuilder = new CheckpointRebuilder(getAllLogs(dataDirs), queue, true);
    Assert.assertTrue(checkpointRebuilder.rebuild());
    queue.close();
    channel = createFileChannel(overrides);
    channel.start();
    Assert.assertTrue(channel.isOpen());
    Set<String> out = consumeChannel(channel);
    compareInputAndOut(in, out);
}

From source file:org.apache.hadoop.hive.metastore.TestMetastoreVersion.java

@Override
protected void tearDown() throws Exception {
    File metaStoreDir = new File(testMetastoreDB);
    if (metaStoreDir.exists()) {
        FileUtils.forceDeleteOnExit(metaStoreDir);
    }//from   w  w  w  . j  a  v a2 s  . co m
}

From source file:org.apache.hadoop.hive.metastore.tools.TestSchemaToolCatalogOps.java

@AfterClass
public static void removeDb() throws Exception {
    File metaStoreDir = new File(testMetastoreDB);
    if (metaStoreDir.exists()) {
        FileUtils.forceDeleteOnExit(metaStoreDir);
    }/*from   www.jav  a2  s  .  c  o  m*/
    System.setOut(outStream);
    System.setErr(errStream);
}

From source file:org.apache.hadoop.hive.metastore.tools.TestSchemaToolForMetastore.java

@After
public void tearDown() throws IOException, SQLException {
    File metaStoreDir = new File(testMetastoreDB);
    if (metaStoreDir.exists()) {
        FileUtils.forceDeleteOnExit(metaStoreDir);
    }//  w ww. j a  v a  2s . com
    System.setOut(outStream);
    System.setErr(errStream);
    if (conn != null) {
        conn.close();
    }
}