Example usage for org.apache.commons.io FileUtils cleanDirectory

List of usage examples for org.apache.commons.io FileUtils cleanDirectory

Introduction

In this page you can find the example usage for org.apache.commons.io FileUtils cleanDirectory.

Prototype

public static void cleanDirectory(File directory) throws IOException 

Source Link

Document

Cleans a directory without deleting it.

Usage

From source file:org.apache.nifi.processors.standard.TestGetSFTP.java

private void emptyTestDirectory() throws IOException {
    //Delete Virtual File System folder
    Path dir = Paths.get(sshTestServer.getVirtualFileSystemPath());
    FileUtils.cleanDirectory(dir.toFile());
}

From source file:org.apache.openjpa.eclipse.util.PCEnhancerHelperTest.java

private boolean checkEnhance(PCEnhancerHelper eh, String classNameToCheck) throws Exception {
    String classFileName = classPackage.replace('.', '/') + classNameToCheck + ".class";

    FileUtils.forceMkdir(targetDir);/*www.  j a va2s .  co  m*/
    FileUtils.cleanDirectory(targetDir);
    FileUtils.copyFileToDirectory(new File(srcDir, classFileName),
            new File(targetDir, classPackage.replace('.', '/')));
    File classFile = new File(targetDir, classFileName);
    assertTrue(classFile.exists());

    return eh.enhance(classFile);
}

From source file:org.apache.servicecomb.it.deploy.ServiceCenterDeploy.java

protected void initServiceCenterCmd() throws IOException {
    // where is service center
    // 1.find from env, for local dev environment
    LOGGER.info("try to find serviceCenter by env {}.", "serviceCenterHome");
    String dir = System.getenv("serviceCenterHome");
    if (dir != null) {
        LOGGER.info("serviceCenterHome={}.", dir);
        File file = new File(dir, deployDefinition.getCmd());
        if (file.exists()) {
            FileUtils.cleanDirectory(new File(dir, "data"));
            deployDefinition.setWorkDir(dir);
            deployDefinition.setCmd(file.getAbsolutePath());
            return;
        }// w  ww.  j a  v  a2 s .  c  o  m

        LOGGER.info("{} is not exist.", file.getAbsolutePath());
    }

    // 2.docker, for CI environment
    LOGGER.info("can not find serviceCenter by env {}, try run by docker.", "serviceCenterHome");
    deployDefinition.setCmd("docker");
    deployDefinition
            .setArgs(new String[] { "run", "-p", "127.0.0.1:30100:30100", "servicecomb/service-center" });
}

From source file:org.apache.storm.daemon.nimbus.NimbusUtils.java

@ClojureClass(className = "backtype.storm.daemon.nimbus#setup-storm-code")
public static void setupStormCode(Map<Object, Object> conf, String topologyId, String tmpJarLocation,
        Map<Object, Object> stormConf, StormTopology topology) throws IOException {
    String stormroot = ConfigUtil.masterStormdistRoot(conf, topologyId);
    FileUtils.forceMkdir(new File(stormroot));
    FileUtils.cleanDirectory(new File(stormroot));
    setupJar(conf, tmpJarLocation, stormroot);
    FileUtils.writeByteArrayToFile(new File(ConfigUtil.masterStormcodePath(stormroot)),
            Utils.serialize(topology));//from  w w  w  .  j  a va 2 s  .c  om
    FileUtils.writeByteArrayToFile(new File(ConfigUtil.masterStormconfPath(stormroot)),
            Utils.serialize(stormConf));
}

From source file:org.apache.storm.daemon.supervisor.Supervisor.java

/**
 * Launch the supervisor/*from   w w w. j a  va2  s .c om*/
 */
public void launch() throws Exception {
    LOG.info("Starting Supervisor with conf {}", conf);
    String path = ServerConfigUtils.supervisorTmpDir(conf);
    FileUtils.cleanDirectory(new File(path));

    SupervisorHeartbeat hb = new SupervisorHeartbeat(conf, this);
    hb.run();
    // should synchronize supervisor so it doesn't launch anything after being down (optimization)
    Integer heartbeatFrequency = ObjectReader
            .getInt(conf.get(DaemonConfig.SUPERVISOR_HEARTBEAT_FREQUENCY_SECS));
    heartbeatTimer.scheduleRecurring(0, heartbeatFrequency, hb);

    this.eventManager = new EventManagerImp(false);
    this.readState = new ReadClusterState(this);

    asyncLocalizer.start();

    if ((Boolean) conf.get(DaemonConfig.SUPERVISOR_ENABLE)) {
        // This isn't strictly necessary, but it doesn't hurt and ensures that the machine stays up
        // to date even if callbacks don't all work exactly right
        eventTimer.scheduleRecurring(0, 10, new EventManagerPushCallback(readState, eventManager));

        // supervisor health check
        eventTimer.scheduleRecurring(300, 300, new SupervisorHealthCheck(this));
    }
    LOG.info("Starting supervisor with id {} at host {}.", getId(), getHostName());
}

From source file:org.apache.synapse.samples.framework.SynapseTestCase.java

/**
 * shutting down servers, cleaning temp files
 *///from w  w w . ja v a 2s .  c  om
private void doCleanup() {
    if (synapseController != null) {
        log.debug("Stopping Synapse");
        synapseController.stopProcess();
    }

    List<ProcessController> removed = new ArrayList<ProcessController>();
    for (ProcessController bsc : backendServerControllers) {
        if (bsc instanceof Axis2BackEndServerController) {
            log.info("Stopping Server: " + bsc.getServerName());
            bsc.stopProcess();
            removed.add(bsc);
        }
    }

    for (ProcessController bsc : removed) {
        backendServerControllers.remove(bsc);
    }

    for (ProcessController bsc : backendServerControllers) {
        log.info("Stopping Server: " + bsc.getServerName());
        bsc.stopProcess();
    }

    //cleaning up temp dir
    try {
        FileUtils.cleanDirectory(new File(System.getProperty("java.io.tmpdir")));
    } catch (IOException e) {
        log.warn("Error while cleaning temp directory", e);
    }
}

From source file:org.apache.zeppelin.dep.DependencyResolverTest.java

@Test
public void testLoad() throws Exception {
    // basic load
    resolver.load("com.databricks:spark-csv_2.10:1.3.0", testCopyPath);
    assertEquals(testCopyPath.list().length, 4);
    FileUtils.cleanDirectory(testCopyPath);

    // load with exclusions parameter
    resolver.load("com.databricks:spark-csv_2.10:1.3.0",
            Collections.singletonList("org.scala-lang:scala-library"), testCopyPath);
    assertEquals(testCopyPath.list().length, 3);
    FileUtils.cleanDirectory(testCopyPath);

    // load from added repository
    resolver.addRepo("sonatype", "https://oss.sonatype.org/content/repositories/agimatec-releases/", false);
    resolver.load("com.agimatec:agimatec-validation:0.9.3", testCopyPath);
    assertEquals(testCopyPath.list().length, 8);

    // load invalid artifact
    resolver.delRepo("sonatype");
    exception.expect(RepositoryException.class);
    resolver.load("com.agimatec:agimatec-validation:0.9.3", testCopyPath);
}

From source file:org.apache.zeppelin.interpreter.InterpreterSettingManager.java

private void loadInterpreterDependencies(final InterpreterSetting setting) {
    setting.setStatus(InterpreterSetting.Status.DOWNLOADING_DEPENDENCIES);
    setting.setErrorReason(null);/*w w w  .  j  a va 2  s . c o m*/
    interpreterSettings.put(setting.getId(), setting);
    synchronized (interpreterSettings) {
        final Thread t = new Thread() {
            public void run() {
                try {
                    // dependencies to prevent library conflict
                    File localRepoDir = new File(
                            zeppelinConfiguration.getInterpreterLocalRepoPath() + "/" + setting.getId());
                    if (localRepoDir.exists()) {
                        try {
                            FileUtils.cleanDirectory(localRepoDir);
                        } catch (FileNotFoundException e) {
                            logger.info("A file that does not exist cannot be deleted, nothing to worry", e);
                        }
                    }

                    // load dependencies
                    List<Dependency> deps = setting.getDependencies();
                    if (deps != null) {
                        for (Dependency d : deps) {
                            File destDir = new File(
                                    zeppelinConfiguration.getRelativeDir(ConfVars.ZEPPELIN_DEP_LOCALREPO));

                            if (d.getExclusions() != null) {
                                dependencyResolver.load(d.getGroupArtifactVersion(), d.getExclusions(),
                                        new File(destDir, setting.getId()));
                            } else {
                                dependencyResolver.load(d.getGroupArtifactVersion(),
                                        new File(destDir, setting.getId()));
                            }
                        }
                    }

                    setting.setStatus(InterpreterSetting.Status.READY);
                    setting.setErrorReason(null);
                } catch (Exception e) {
                    logger.error(String.format(
                            "Error while downloading repos for interpreter group : %s,"
                                    + " go to interpreter setting page click on edit and save it again to make "
                                    + "this interpreter work properly. : %s",
                            setting.getGroup(), e.getLocalizedMessage()), e);
                    setting.setErrorReason(e.getLocalizedMessage());
                    setting.setStatus(InterpreterSetting.Status.ERROR);
                } finally {
                    interpreterSettings.put(setting.getId(), setting);
                }
            }
        };
        t.start();
    }
}

From source file:org.argrr.extractor.gdrive.downloader.ChartsDownloader.java

public static void initChartsFolder(String outputFolder) {
    ChartsDownloader.rootOutputPathCharts = outputFolder;
    File f = new File(outputFolder);
    try {//  ww  w.j  ava2 s.  c o  m
        FileUtils.cleanDirectory(f); //clean out directory (this is optional -- but good know)
        FileUtils.forceDelete(f); //delete directory
    } catch (Exception ex) {
    }
    try {
        FileUtils.forceMkdir(f); //create directory
    } catch (IOException ex) {
        Logger.getLogger(ChartsDownloader.class.getName()).log(Level.WARNING,
                "can't create the gdoc charts folder", ex);
    }
}

From source file:org.artifactory.common.ArtifactoryHome.java

private void create() {
    try {//  w  w w  .j  a  v  a2s  .  c om
        // Create or find all the needed sub folders
        etcDir = getOrCreateSubDir("etc");
        dataDir = getOrCreateSubDir("data");
        logDir = getOrCreateSubDir("logs");
        backupDir = getOrCreateSubDir("backup");
        supportDir = getOrCreateSubDir("support");

        File tempRootDir = getOrCreateSubDir(dataDir, "tmp");
        tempWorkDir = getOrCreateSubDir(tempRootDir, "work");
        tempUploadDir = getOrCreateSubDir(tempRootDir, "artifactory-uploads");

        //Manage the artifactory.system.properties file under etc dir
        initAndLoadSystemPropertyFile();

        //Check the write access to all directories that need it
        checkWritableDirectory(dataDir);
        checkWritableDirectory(logDir);
        checkWritableDirectory(backupDir);
        checkWritableDirectory(supportDir);
        checkWritableDirectory(tempRootDir);
        checkWritableDirectory(tempWorkDir);
        checkWritableDirectory(tempUploadDir);

        //If ha props exist, load the storage from cluster_home/ha-etc
        File haPropertiesFile = getArtifactoryHaPropertiesFile();
        if (haPropertiesFile.exists()) {
            //load ha properties
            HaNodeProperties = new HaNodeProperties();
            HaNodeProperties.load(haPropertiesFile);

            File haArtifactoryHome = HaNodeProperties.getClusterHome();
            if (!haArtifactoryHome.exists()) {
                throw new RuntimeException(
                        "Artifactory HA home does not exist: " + haArtifactoryHome.getAbsolutePath());
            }

            //create directory structure
            haEtcDir = getOrCreateSubDir(haArtifactoryHome, "ha-etc");
            haDataDir = getOrCreateSubDir(haArtifactoryHome, "ha-data");
            haBackupDir = getOrCreateSubDir(haArtifactoryHome, "ha-backup");

            checkWritableDirectory(haEtcDir);
            checkWritableDirectory(haDataDir);
            checkWritableDirectory(haBackupDir);

            //load cluster properties
            File clusterPropertiesFile = getArtifactoryClusterPropertiesFile();
            clusterProperties = new ClusterProperties();
            clusterProperties.load(clusterPropertiesFile);
        }

        pluginsDir = getOrCreateSubDir(getHaAwareEtcDir(), "plugins");
        logoDir = getOrCreateSubDir(getHaAwareEtcDir(), "ui");

        checkWritableDirectory(pluginsDir);

        try {
            //noinspection ConstantConditions
            for (File rootTmpDirChild : tempRootDir.listFiles()) {
                if (rootTmpDirChild.isDirectory()) {
                    FileUtils.cleanDirectory(rootTmpDirChild);
                } else {
                    FileUtils.deleteQuietly(rootTmpDirChild);
                }
            }
        } catch (Exception e) {
            System.out.println(ArtifactoryHome.class.getName()
                    + " - Warning: unable to clean temporary directories. Cause: " + e.getMessage());
        }

    } catch (Exception e) {
        throw new IllegalArgumentException(
                "Could not initialize artifactory home directory due to: " + e.getMessage(), e);
    }
}