Example usage for org.apache.commons.io FileUtils deleteDirectory

List of usage examples for org.apache.commons.io FileUtils deleteDirectory

Introduction

In this page you can find the example usage for org.apache.commons.io FileUtils deleteDirectory.

Prototype

public static void deleteDirectory(File directory) throws IOException 

Source Link

Document

Deletes a directory recursively.

Usage

From source file:de.uniwue.dmir.heatmap.EntryPointIncremental.java

@SuppressWarnings({ "rawtypes", "unchecked" })
public static void main(String[] args) throws IOException, ParseException {

    DateFormat df = new SimpleDateFormat(DATE_FORMAT);
    SimpleDateFormat backupDf = new SimpleDateFormat(BACKUP_DATE_FORMAT);

    String workDir = System.getProperty("workDir", ".");
    LOGGER.debug("Work dir: {}", workDir);
    String configDir = System.getProperty("configDir", ".");
    LOGGER.debug("Config dir: {}", configDir);

    File seedDir = new File(workDir, SEED_DIR);
    LOGGER.debug("Seed dir: {}", seedDir);
    File currentDir = new File(workDir, CURRENT_DIR);
    LOGGER.debug("Current dir: {}", currentDir);
    File backupDir = new File(workDir, BACKUP_DIR);
    LOGGER.debug("Backup dir: {}", backupDir);

    String initialMinTimeString = System.getProperty("minTime");
    LOGGER.debug("Initial minimal time parameter: {}", initialMinTimeString);
    Date initialMinTime = initialMinTimeString == null ? new Date(0) : df.parse(initialMinTimeString);
    LOGGER.debug("Initial minimal time: {}", df.format(initialMinTime));

    String absoluteMaxTimeString = System.getProperty("maxTime");
    LOGGER.debug("Absolute maximal time parameter: {}", absoluteMaxTimeString);
    Date absoluteMaxTime = absoluteMaxTimeString == null ? new Date()
            : new SimpleDateFormat(DATE_FORMAT).parse(absoluteMaxTimeString);
    LOGGER.debug("Absolute maximal time: {}", df.format(absoluteMaxTime));

    String incrementalFile = new File("file:" + configDir, INCREMENTAL_FILE).getPath();
    String settingsFile = new File("file:" + configDir, HEATMAP_PROCESSOR__FILE).getPath();

    LOGGER.debug("Initializing incremental control file: {}", incrementalFile);
    FileSystemXmlApplicationContext incrementalContext = new FileSystemXmlApplicationContext(incrementalFile);

    // get point limit
    int pointLimit = Integer
            .parseInt(incrementalContext.getBeanFactory().resolveEmbeddedValue("${point.limit}"));
    LOGGER.debug("Print limit: {}", pointLimit);

    // get backups to keep
    int backupsToKeep = Integer
            .parseInt(incrementalContext.getBeanFactory().resolveEmbeddedValue("${backups.to.keep}"));
    LOGGER.debug("Backups to keep: {}", pointLimit);

    LOGGER.debug("Initializing process components (manager and limiter).");
    IProcessManager processManager = incrementalContext.getBean(IProcessManager.class);
    IProcessLimiter processLimiter = incrementalContext.getBean(IProcessLimiter.class);

    LOGGER.debug("Starting incremental loop.");
    while (true) { // break as soon as no new points are available

        // cleanup --- just in case
        LOGGER.debug("Deleting \"current\" dir.");
        FileUtils.deleteDirectory(currentDir);

        // copy from seed to current
        LOGGER.debug("Copying seed.");
        seedDir.mkdirs();/*from w  ww. j  a va2s  .c  om*/
        FileUtils.copyDirectory(seedDir, currentDir);

        // get min time
        LOGGER.debug("Getting minimal time ...");
        Date minTime = initialMinTime;
        ProcessManagerEntry entry = processManager.getEntry();
        if (entry != null && entry.getMaxTime() != null) {
            minTime = entry.getMaxTime();
        }
        LOGGER.debug("Minimal time: {}", new SimpleDateFormat(DATE_FORMAT).format(minTime));

        // break if we processed all available points (minTime is greater than or equal to absoluteMaxTime)
        if (minTime.getTime() >= absoluteMaxTime.getTime()) {
            LOGGER.debug("Processed all points.");
            break;
        }

        // get the maximal time
        LOGGER.debug("Get maximal time.");

        // get the time from the newest point in our point range (pointMaxTime) ...
        Date pointMaxTime = processLimiter.getMaxTime(minTime, pointLimit);

        // ... and possibly break the loop if no new points are available
        if (pointMaxTime == null)
            break;

        // set the max time and make sure we are not taking to many points 
        // (set max time to the minimum of pointMaxTime and absoluteMaxTime)
        Date maxTime = pointMaxTime.getTime() > absoluteMaxTime.getTime() ? absoluteMaxTime : pointMaxTime;

        LOGGER.debug("Maximal time: {}", new SimpleDateFormat(DATE_FORMAT).format(maxTime));

        // start process
        processManager.start(minTime);

        System.setProperty("minTimestamp", new SimpleDateFormat(DATE_FORMAT).format(minTime));

        System.setProperty("maxTimestamp", new SimpleDateFormat(DATE_FORMAT).format(maxTime));

        FileSystemXmlApplicationContext heatmapContext = new FileSystemXmlApplicationContext(settingsFile);

        IHeatmap heatmap = heatmapContext.getBean(HEATMAP_BEAN, IHeatmap.class);

        ITileProcessor tileProcessor = heatmapContext.getBean(WRITER_BEAN, ITileProcessor.class);

        heatmap.processTiles(tileProcessor);

        tileProcessor.close();
        heatmapContext.close();

        // finish process
        processManager.finish(maxTime);

        // move old seed
        if (backupsToKeep > 0) {
            FileUtils.moveDirectory(seedDir, new File(backupDir, backupDf.format(minTime))); // minTime is the maxTime of the seed

            // cleanup backups
            String[] backups = backupDir.list(DirectoryFileFilter.DIRECTORY);
            File oldestBackup = null;
            if (backups.length > backupsToKeep) {
                for (String bs : backups) {
                    File b = new File(backupDir, bs);
                    if (oldestBackup == null || oldestBackup.lastModified() > b.lastModified()) {
                        oldestBackup = b;
                    }
                }
                FileUtils.deleteDirectory(oldestBackup);
            }

        } else {
            FileUtils.deleteDirectory(seedDir);
        }

        // move new seed
        FileUtils.moveDirectory(currentDir, seedDir);

    }

    incrementalContext.close();

}

From source file:com.oculusinfo.ml.spark.unsupervised.TestDPMeans.java

/**
 * @param args//  w  w w.ja v  a 2  s  .co  m
 */
public static void main(String[] args) {
    int k = 5;

    try {
        FileUtils.deleteDirectory(new File("output/clusters"));
        FileUtils.deleteDirectory(new File("output/centroids"));
    } catch (IOException e1) {
        /* ignore (*/ }

    genTestData(k);

    JavaSparkContext sc = new JavaSparkContext("local", "OculusML");
    SparkDataSet ds = new SparkDataSet(sc);
    ds.load("test.txt", new InstanceParser());

    DPMeansClusterer clusterer = new DPMeansClusterer(80, 10, 0.001);
    clusterer.setOutputPaths("output/centroids", "output/clusters");

    clusterer.registerFeatureType("point", MeanNumericVectorCentroid.class, new EuclideanDistance(1.0));

    clusterer.doCluster(ds);

    try {
        final List<double[]> instances = readInstances();

        final Color[] colors = { Color.red, Color.blue, Color.green, Color.magenta, Color.yellow, Color.black,
                Color.orange, Color.cyan, Color.darkGray, Color.white };

        TestDPMeans t = new TestDPMeans();
        t.add(new JComponent() {
            private static final long serialVersionUID = 7920802321066846416L;

            public void paintComponent(Graphics g) {
                Graphics2D g2 = (Graphics2D) g;
                g2.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);

                for (double[] inst : instances) {
                    int color = (int) inst[0];
                    g.setColor(colors[color]);

                    Ellipse2D l = new Ellipse2D.Double(inst[1], inst[2], 5, 5);
                    g2.draw(l);
                }
            }
        });

        t.setDefaultCloseOperation(EXIT_ON_CLOSE);
        t.setSize(400, 400);
        t.setVisible(true);
    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

}

From source file:com.oculusinfo.ml.spark.unsupervised.TestThresholdClusterer.java

/**
 * @param args//from w  w  w  . j a v a 2  s.  c om
 */
public static void main(String[] args) {
    int k = 5;

    try {
        FileUtils.deleteDirectory(new File("output/clusters"));
        FileUtils.deleteDirectory(new File("output/centroids"));
    } catch (IOException e1) {
        /* ignore (*/ }

    genTestData(k);

    JavaSparkContext sc = new JavaSparkContext("local", "OculusML");
    SparkDataSet ds = new SparkDataSet(sc);
    ds.load("test.txt", new InstanceParser());

    ThresholdClusterer clusterer = new ThresholdClusterer(80);
    clusterer.setOutputPaths("output/centroids", "output/clusters");

    clusterer.registerFeatureType("point", MeanNumericVectorCentroid.class, new EuclideanDistance(1.0));

    clusterer.doCluster(ds);

    try {
        final List<double[]> instances = readInstances();

        final Color[] colors = { Color.red, Color.blue, Color.green, Color.magenta, Color.yellow, Color.black,
                Color.orange, Color.cyan, Color.darkGray, Color.white };

        TestThresholdClusterer t = new TestThresholdClusterer();
        t.add(new JComponent() {
            private static final long serialVersionUID = -5597119848880912541L;

            public void paintComponent(Graphics g) {
                Graphics2D g2 = (Graphics2D) g;
                g2.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);

                for (double[] inst : instances) {
                    int color = (int) inst[0];
                    g.setColor(colors[color]);

                    Ellipse2D l = new Ellipse2D.Double(inst[1], inst[2], 5, 5);
                    g2.draw(l);
                }
            }
        });

        t.setDefaultCloseOperation(EXIT_ON_CLOSE);
        t.setSize(400, 400);
        t.setVisible(true);
    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

}

From source file:com.oculusinfo.ml.spark.unsupervised.TestKMeans.java

/**
 * @param args//from ww w. j a va  2s.com
 */
public static void main(String[] args) {
    int k = 5;

    try {
        FileUtils.deleteDirectory(new File("output/clusters"));
        FileUtils.deleteDirectory(new File("output/centroids"));
    } catch (IOException e1) {
        /* ignore (*/ }

    genTestData(k);

    JavaSparkContext sc = new JavaSparkContext("local", "OculusML");
    SparkDataSet ds = new SparkDataSet(sc);
    ds.load("test.txt", new SparkInstanceParser() {
        private static final long serialVersionUID = 1L;

        @Override
        public Tuple2<String, Instance> call(String line) throws Exception {
            Instance inst = new Instance();

            String tokens[] = line.split(",");

            NumericVectorFeature v = new NumericVectorFeature("point");

            double x = Double.parseDouble(tokens[0]);
            double y = Double.parseDouble(tokens[1]);
            v.setValue(new double[] { x, y });

            inst.addFeature(v);

            return new Tuple2<String, Instance>(inst.getId(), inst);
        }
    });

    KMeansClusterer clusterer = new KMeansClusterer(k, 10, 0.001, "output/centroids", "output/clusters");

    clusterer.registerFeatureType("point", MeanNumericVectorCentroid.class, new EuclideanDistance(1.0));

    clusterer.doCluster(ds);

    try {
        final List<double[]> instances = readInstances();

        final Color[] colors = { Color.red, Color.blue, Color.green, Color.magenta, Color.yellow, Color.black,
                Color.orange, Color.cyan, Color.darkGray, Color.white };

        TestKMeans t = new TestKMeans();
        t.add(new JComponent() {
            private static final long serialVersionUID = 2059497051387104848L;

            public void paintComponent(Graphics g) {
                Graphics2D g2 = (Graphics2D) g;
                g2.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON);

                for (double[] inst : instances) {
                    int color = (int) inst[0];
                    g.setColor(colors[color]);

                    Ellipse2D l = new Ellipse2D.Double(inst[1], inst[2], 5, 5);
                    g2.draw(l);
                }
            }
        });

        t.setDefaultCloseOperation(EXIT_ON_CLOSE);
        t.setSize(400, 400);
        t.setVisible(true);
    } catch (Exception e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

}

From source file:de.huberlin.cuneiform.main.Main.java

public static void main(String[] args)
        throws ParseException, IOException, NotDerivableException, InterruptedException, JSONException {

    GnuParser gnuParser;// www  .  ja v a 2  s  . c o m
    CommandLine cmdline;
    Options opt;
    String value;
    int platform;
    File outputDir;
    String[] fileList;
    StringBuffer buf;
    String line;
    String dagid;
    File logFile;

    opt = new Options();

    opt.addOption("p", "platform", true, "The platform to perform the Cuneiform script's interpretation. "
            + "Possible platforms are: 'dot', 'local', and 'debug'. Default is 'local'.");

    opt.addOption("d", "directory", true,
            "The output directory, to put the interpretation intermediate and output result as well as the default location to store the log.");

    opt.addOption("c", "clean", false,
            "If set, the execution engine ignores all cached results and starts a clean workflow run.");

    opt.addOption("r", "runid", true,
            "If set, a custom id is set for this workflow run. By default a UUID string is used.");

    opt.addOption("f", "file", true,
            "Override the default location of the log file and use the specified filename instead. If the platform is 'dot', this option sets the name of the output dot-file.");

    opt.addOption("h", "help", false, "Print help text.");

    gnuParser = new GnuParser();
    cmdline = gnuParser.parse(opt, args);

    if (cmdline.hasOption("help")) {

        System.out.println("CUNEIFORM - A Functional Workflow Language\n" + LABEL_VERSION);
        new HelpFormatter().printHelp("java -jar cuneiform.jar [OPTION]*", opt);

        return;
    }

    if (cmdline.hasOption("platform")) {

        value = cmdline.getOptionValue("platform");

        if (value.equals("dot"))
            platform = PLATFORM_DOT;
        else if (value.equals("local"))
            platform = PLATFORM_LOCAL;
        else if (value.equals("debug"))
            platform = PLATFORM_DEBUG;
        else
            throw new RuntimeException("Specified platform '" + value + "' not recognized.");

    } else
        platform = PLATFORM_LOCAL;

    if (cmdline.hasOption('d')) {

        value = cmdline.getOptionValue('d');
    } else
        value = "build";

    outputDir = new File(value);

    if (outputDir.exists()) {

        if (!outputDir.isDirectory())
            throw new IOException(
                    "Output directory '" + outputDir.getAbsolutePath() + "' exists but is not a directory.");

        else if (cmdline.hasOption('c')) {

            FileUtils.deleteDirectory(outputDir);

            if (!outputDir.mkdirs())
                throw new IOException(
                        "Could not create output directory '" + outputDir.getAbsolutePath() + "'");
        }
    } else if (!outputDir.mkdirs())
        throw new IOException("Could not create output directory '" + outputDir.getAbsolutePath() + "'");

    if (cmdline.hasOption('r'))
        dagid = cmdline.getOptionValue('r');
    else
        dagid = UUID.randomUUID().toString();

    if (cmdline.hasOption('f'))
        logFile = new File(cmdline.getOptionValue('f'));
    else
        logFile = null;

    fileList = cmdline.getArgs();
    buf = new StringBuffer();
    if (fileList.length == 0) {

        try (BufferedReader reader = new BufferedReader(new InputStreamReader(System.in))) {

            while ((line = reader.readLine()) != null)
                buf.append(line).append('\n');
        }

        switch (platform) {

        case PLATFORM_DOT:
            createDot(buf.toString(), outputDir, logFile);
            break;
        case PLATFORM_LOCAL:
            runLocal(buf.toString(), outputDir, logFile, dagid);
            break;
        case PLATFORM_DEBUG:
            runDebug(buf.toString(), outputDir, logFile, dagid);
            break;
        default:
            throw new RuntimeException("Platform not recognized.");
        }
    } else

        switch (platform) {

        case PLATFORM_DOT:
            createDot(fileList, outputDir, logFile);
            break;
        case PLATFORM_LOCAL:
            runLocal(fileList, outputDir, logFile, dagid);
            break;
        case PLATFORM_DEBUG:
            runDebug(fileList, outputDir, logFile, dagid);
            break;
        default:
            throw new RuntimeException("Platform not recognized.");
        }

}

From source file:ee.ria.xroad.asyncdb.AsyncDBIntegrationTest.java

/**
 *
 *
 * @param args/*w w w  .j  a  v a  2  s. c  o  m*/
 *            - use 'preservedb' to retain directory structure for further
 *            investigation
 * @throws Exception - when running integration test fails.
 */
public static void main(String[] args) throws Exception {
    File provider = new File(AsyncDBTestUtil.getProviderDirPath());
    File logFile = new File(AsyncDBTestUtil.getAsyncLogFilePath());
    if (provider.exists() || logFile.exists()) {
        throw new IntegrationTestFailedException(
                "Directory '" + AsyncDBTestUtil.DB_FILEPATH + "' and file '" + AsyncDBTestUtil.LOG_FILEPATH
                        + "' must not be present in the beginning of integration test, delete it!");
    }

    File logDir = logFile.getParentFile();
    logDir.mkdirs();

    boolean preserveDB = false;
    if (args.length > 0) {
        preserveDB = "preservedb".equalsIgnoreCase(args[0]);
        LOG.warn("Preserving DB file tree after test, be sure to remove them later by yourself!");
    }

    long freeFileDescriptorsAtBeginning = SystemMetrics.getFreeFileDescriptorCount();

    try {

        addRequestToNonExistentProvider();
        addRequestToExistentProvider();
        markSecondRequestAsRemoved();
        restoreSecondRequest();
        getAllMessageQueues();
        sendFirstRequestSuccessfully();
        sendSecondRequestUnsuccessfully();
        resetSendCountOfSecondRequest();
        skipNotSendingRequest();
        revertWritingFailure();

        // Test cases from real life
        removeCorruptRequestAndSendNext();
    } finally {
        validateFileDescriptors(freeFileDescriptorsAtBeginning);

        if (!preserveDB) {
            FileUtils.deleteDirectory(new File(AsyncDBTestUtil.getProviderDirPath()));
            FileUtils.deleteDirectory(logDir);
        }
    }

    LOG.info("Integration test of ASYNC-DB accomplished successfully.");
}

From source file:hdfs.MiniHDFS.java

public static void main(String[] args) throws Exception {
    if (args.length != 1 && args.length != 3) {
        throw new IllegalArgumentException(
                "Expected: MiniHDFS <baseDirectory> [<kerberosPrincipal> <kerberosKeytab>], " + "got: "
                        + Arrays.toString(args));
    }/*from w w w  .  ja  va 2 s  .  c o  m*/
    boolean secure = args.length == 3;

    // configure Paths
    Path baseDir = Paths.get(args[0]);
    // hadoop-home/, so logs will not complain
    if (System.getenv("HADOOP_HOME") == null) {
        Path hadoopHome = baseDir.resolve("hadoop-home");
        Files.createDirectories(hadoopHome);
        System.setProperty("hadoop.home.dir", hadoopHome.toAbsolutePath().toString());
    }
    // hdfs-data/, where any data is going
    Path hdfsHome = baseDir.resolve("hdfs-data");

    // configure cluster
    Configuration cfg = new Configuration();
    cfg.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsHome.toAbsolutePath().toString());
    // lower default permission: TODO: needed?
    cfg.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY, "766");

    // optionally configure security
    if (secure) {
        String kerberosPrincipal = args[1];
        String keytabFile = args[2];

        cfg.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
        cfg.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, "true");
        cfg.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal);
        cfg.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal);
        cfg.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal);
        cfg.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, keytabFile);
        cfg.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, keytabFile);
        cfg.set(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, "true");
        cfg.set(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, "true");
        cfg.set(DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTING_KEY, "true");
    }

    UserGroupInformation.setConfiguration(cfg);

    // TODO: remove hardcoded port!
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(cfg);
    if (secure) {
        builder.nameNodePort(9998);
    } else {
        builder.nameNodePort(9999);
    }
    MiniDFSCluster dfs = builder.build();

    // Configure contents of the filesystem
    org.apache.hadoop.fs.Path esUserPath = new org.apache.hadoop.fs.Path("/user/elasticsearch");
    try (FileSystem fs = dfs.getFileSystem()) {

        // Set the elasticsearch user directory up
        fs.mkdirs(esUserPath);
        if (UserGroupInformation.isSecurityEnabled()) {
            List<AclEntry> acls = new ArrayList<>();
            acls.add(new AclEntry.Builder().setType(AclEntryType.USER).setName("elasticsearch")
                    .setPermission(FsAction.ALL).build());
            fs.modifyAclEntries(esUserPath, acls);
        }

        // Install a pre-existing repository into HDFS
        String directoryName = "readonly-repository";
        String archiveName = directoryName + ".tar.gz";
        URL readOnlyRepositoryArchiveURL = MiniHDFS.class.getClassLoader().getResource(archiveName);
        if (readOnlyRepositoryArchiveURL != null) {
            Path tempDirectory = Files.createTempDirectory(MiniHDFS.class.getName());
            File readOnlyRepositoryArchive = tempDirectory.resolve(archiveName).toFile();
            FileUtils.copyURLToFile(readOnlyRepositoryArchiveURL, readOnlyRepositoryArchive);
            FileUtil.unTar(readOnlyRepositoryArchive, tempDirectory.toFile());

            fs.copyFromLocalFile(true, true,
                    new org.apache.hadoop.fs.Path(
                            tempDirectory.resolve(directoryName).toAbsolutePath().toUri()),
                    esUserPath.suffix("/existing/" + directoryName));

            FileUtils.deleteDirectory(tempDirectory.toFile());
        }
    }

    // write our PID file
    Path tmp = Files.createTempFile(baseDir, null, null);
    String pid = ManagementFactory.getRuntimeMXBean().getName().split("@")[0];
    Files.write(tmp, pid.getBytes(StandardCharsets.UTF_8));
    Files.move(tmp, baseDir.resolve(PID_FILE_NAME), StandardCopyOption.ATOMIC_MOVE);

    // write our port file
    tmp = Files.createTempFile(baseDir, null, null);
    Files.write(tmp, Integer.toString(dfs.getNameNodePort()).getBytes(StandardCharsets.UTF_8));
    Files.move(tmp, baseDir.resolve(PORT_FILE_NAME), StandardCopyOption.ATOMIC_MOVE);
}

From source file:fr.ippon.tatami.test.support.LdapTestServer.java

/**
 * Main class. We just do a lookup on the server to check that it's available.
 * /*from   ww  w .j  a  va 2  s . c  om*/
 * @param args
 *            Not used.
 * @throws Exception
 */
public static void main(String[] args) throws Exception // throws Exception
{
    FileUtils.deleteDirectory(workingDir);

    LdapTestServer ads = null;
    try {
        // Create the server
        ads = new LdapTestServer();
        ads.start();

        // Read an entry
        Entry result = ads.service.getAdminSession().lookup(new LdapDN("dc=ippon,dc=fr"));

        // And print it if available
        System.out.println("Found entry : " + result);

    } catch (Exception e) {
        // Ok, we have something wrong going on ...
        e.printStackTrace();
    }
    System.out.println("Press enter");
    new BufferedReader(new InputStreamReader(System.in)).readLine();
    ads.stop();
}

From source file:fr.ippon.pamelaChu.test.support.LdapTestServer.java

/**
 * Main class. We just do a lookup on the server to check that it's available.
 * <p/>/*from www. jav  a 2  s. c  om*/
 * FIXME : in Eclipse : when running this classes as "Java Application", target/test-classes is not added in the classpath
 * resulting in a java.lang.ClassNotFoundException ...
 *
 * @param args Not used.
 * @throws Exception
 */
public static void main(String[] args) throws Exception {
    FileUtils.deleteDirectory(workingDir);

    LdapTestServer ads = null;
    try {
        // Create the server
        ads = new LdapTestServer();
        ads.start();

        // Read an entry
        Entry result = ads.service.getAdminSession().lookup(new LdapDN("dc=ippon,dc=fr"));

        // And print it if available
        System.out.println("Found entry : " + result);

    } catch (Exception e) {
        // Ok, we have something wrong going on ...
        e.printStackTrace();
    }
    System.out.println("Press enter");
    new BufferedReader(new InputStreamReader(System.in)).readLine();
    ads.stop();
}

From source file:MiniCluster.java

/**
 * Runs the {@link MiniAccumuloCluster} given a -p argument with a property file. Establishes a shutdown port for asynchronous operation.
 * /*from www  .  j a v  a2  s. co m*/
 * @param args
 *          An optional -p argument can be specified with the path to a valid properties file.
 */
public static void main(String[] args) throws Exception {
    Opts opts = new Opts();
    opts.parseArgs(MiniCluster.class.getName(), args);

    if (opts.printProps) {
        printProperties();
        System.exit(0);
    }

    int shutdownPort = 4445;

    final File miniDir;

    if (opts.prop.containsKey(DIRECTORY_PROP))
        miniDir = new File(opts.prop.getProperty(DIRECTORY_PROP));
    else
        miniDir = Files.createTempDir();

    String rootPass = opts.prop.containsKey(ROOT_PASSWORD_PROP) ? opts.prop.getProperty(ROOT_PASSWORD_PROP)
            : "secret";
    String instanceName = opts.prop.containsKey(INSTANCE_NAME_PROP) ? opts.prop.getProperty(INSTANCE_NAME_PROP)
            : "accumulo";
    MiniAccumuloConfig config = new MiniAccumuloConfig(miniDir, instanceName, rootPass);

    if (opts.prop.containsKey(NUM_T_SERVERS_PROP))
        config.setNumTservers(Integer.parseInt(opts.prop.getProperty(NUM_T_SERVERS_PROP)));
    if (opts.prop.containsKey(ZOO_KEEPER_PORT_PROP))
        config.setZooKeeperPort(Integer.parseInt(opts.prop.getProperty(ZOO_KEEPER_PORT_PROP)));
    //    if (opts.prop.containsKey(JDWP_ENABLED_PROP))
    //      config.setJDWPEnabled(Boolean.parseBoolean(opts.prop.getProperty(JDWP_ENABLED_PROP)));
    //    if (opts.prop.containsKey(ZOO_KEEPER_MEMORY_PROP))
    //      setMemoryOnConfig(config, opts.prop.getProperty(ZOO_KEEPER_MEMORY_PROP), ServerType.ZOOKEEPER);
    //    if (opts.prop.containsKey(TSERVER_MEMORY_PROP))
    //      setMemoryOnConfig(config, opts.prop.getProperty(TSERVER_MEMORY_PROP), ServerType.TABLET_SERVER);
    //    if (opts.prop.containsKey(MASTER_MEMORY_PROP))
    //      setMemoryOnConfig(config, opts.prop.getProperty(MASTER_MEMORY_PROP), ServerType.MASTER);
    //    if (opts.prop.containsKey(DEFAULT_MEMORY_PROP))
    //      setMemoryOnConfig(config, opts.prop.getProperty(DEFAULT_MEMORY_PROP));
    //    if (opts.prop.containsKey(SHUTDOWN_PORT_PROP))
    //      shutdownPort = Integer.parseInt(opts.prop.getProperty(SHUTDOWN_PORT_PROP));

    Map<String, String> siteConfig = new HashMap<String, String>();
    for (Map.Entry<Object, Object> entry : opts.prop.entrySet()) {
        String key = (String) entry.getKey();
        if (key.startsWith("site."))
            siteConfig.put(key.replaceFirst("site.", ""), (String) entry.getValue());
    }

    config.setSiteConfig(siteConfig);

    final MiniAccumuloCluster accumulo = new MiniAccumuloCluster(config);

    Runtime.getRuntime().addShutdownHook(new Thread() {
        @Override
        public void run() {
            try {
                accumulo.stop();
                FileUtils.deleteDirectory(miniDir);
                System.out.println("\nShut down gracefully on " + new Date());
            } catch (IOException e) {
                e.printStackTrace();
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }
    });

    accumulo.start();

    printInfo(accumulo, shutdownPort);

    // start a socket on the shutdown port and block- anything connected to this port will activate the shutdown
    ServerSocket shutdownServer = new ServerSocket(shutdownPort);
    shutdownServer.accept();

    System.exit(0);
}