Example usage for org.apache.hadoop.fs FileUtil fullyDelete

List of usage examples for org.apache.hadoop.fs FileUtil fullyDelete

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileUtil fullyDelete.

Prototype

public static boolean fullyDelete(final File dir) 

Source Link

Document

Delete a directory and all its contents.

Usage

From source file:CSVUtils.CSVUtils.java

public static void createCSVForSentimentTimezone(ArrayList<KeyValueTuple> data, String headers, String outPut) {
    File outputFile = new File(outPut);
    if (outputFile.exists()) {
        FileUtil.fullyDelete(outputFile);
    }/*ww w. jav  a  2 s  .  c o  m*/
    try {
        outputFile.getParentFile().mkdirs();
        outputFile.createNewFile();
    } catch (Exception ex) {
        System.out.println("File creation failed");
    }
    try {
        PrintWriter writer = new PrintWriter(outputFile);
        writer.println(headers);

        for (KeyValueTuple kvt : data) {
            String timezone = (kvt.getKey().split("-")[0]).trim();
            String sentiment = (kvt.getKey().split("-")[1]).trim();
            SentimentAndTimezone st = findTimezonePresent(timezone);
            if (st == null) {
                st = new SentimentAndTimezone();
                st.setTimeZone(timezone);
                listOfTimezones.add(st);
            }
            if (sentiment.equals("NEUTRAL")) {
                st.setNumberOfNeutral(kvt.getValue());
            } else if (sentiment.equals("NEGATIVE")) {
                st.setNumberOfNegative(kvt.getValue());
            } else if (sentiment.equals("POSITIVE")) {
                st.setNumberOfPositive(kvt.getValue());
            }
        }

        for (SentimentAndTimezone st : listOfTimezones) {
            st.formString(writer);
        }
        writer.close();
    } catch (Exception ex) {
        System.out.println("Error Reading Data");
    }
}

From source file:CSVUtils.CSVUtils.java

public static void createCSVForSentimentTimezoneDashboard(ArrayList<KeyValueTuple> data, String outPut) {
    File outputFile = new File(outPut);
    if (outputFile.exists()) {
        FileUtil.fullyDelete(outputFile);
    }//  w ww  .j a v a2 s . c o m
    try {
        outputFile.getParentFile().mkdirs();
        outputFile.createNewFile();
    } catch (Exception ex) {
        System.out.println("File creation failed");
    }
    try {
        PrintWriter writer1 = new PrintWriter(outputFile);

        for (KeyValueTuple kvt : data) {
            String timezone = (kvt.getKey().split("-")[0]).trim();
            String sentiment = (kvt.getKey().split("-")[1]).trim();
            SentimentAndTimezone st = findTimezonePresent(timezone);
            if (st == null) {
                st = new SentimentAndTimezone();
                st.setTimeZone(timezone);
                listOfTimezones.add(st);
            }
            if (sentiment.equals("NEUTRAL")) {
                st.setNumberOfNeutral(kvt.getValue());
            } else if (sentiment.equals("NEGATIVE")) {
                st.setNumberOfNegative(kvt.getValue());
            } else if (sentiment.equals("POSITIVE")) {
                st.setNumberOfPositive(kvt.getValue());
            }
        }
        String finalDashboardString = "";
        for (SentimentAndTimezone st : listOfTimezones) {
            finalDashboardString += st.formStringForDashboard1();
        }
        finalDashboardString = "var freqData=[ \n" + finalDashboardString;
        String finalDashboardString1 = finalDashboardString.substring(0, finalDashboardString.length() - 2);
        String finalDashboardString2 = finalDashboardString1 + "];";
        writer1.write(finalDashboardString2);
        writer1.println();
        writer1.println();
        writer1.println("dashboard('#graph',freqData);");
        writer1.close();
    } catch (Exception ex) {
        System.out.println("Error Reading Data");
    }
}

From source file:CSVUtils.CSVUtils.java

public static void createTopUserMentionsCSV(ArrayList<KeyValueTuple> data, String outPut) {
    File outputFile = new File(outPut);
    if (outputFile.exists()) {
        FileUtil.fullyDelete(outputFile);
    }//ww  w .j av a2s.c  o m
    try {
        outputFile.getParentFile().mkdirs();
        outputFile.createNewFile();
    } catch (Exception ex) {
        System.out.println("File creation failed");
    }
    try {
        PrintWriter writer = new PrintWriter(outputFile);
        writer.println("Top User Mentions,");
        writer.println("Source: D3JS,");
        writer.println("Metadata Notes: XYZ,");
        writer.println(",");
        writer.println("Mention,Count");
        for (KeyValueTuple kvt : data) {
            writer.println(kvt.getKey() + "," + kvt.getValue());
        }
        writer.close();
    } catch (Exception ex) {
        System.out.println("Error Reading Data");
    }
}

From source file:de.tiqsolutions.hdfs.HadoopTestBase.java

License:Apache License

@BeforeClass
public static void setUp() throws Exception {
    String key = "java.protocol.handler.pkgs";
    String newValue = "de.tiqsolutions";
    if (System.getProperty(key) != null) {
        String previousValue = System.getProperty(key);
        newValue = String.valueOf(newValue) + "|" + previousValue;
    }// www .  ja v a2s.c  o m
    System.setProperty(key, newValue);
    File baseDir = new File("./target/hdfs/" + HadoopTestBase.class.getName()).getAbsoluteFile();
    FileUtil.fullyDelete((File) baseDir);
    Configuration conf = new Configuration();
    conf.set("hdfs.minidfs.basedir", baseDir.getAbsolutePath());
    conf.setBoolean("dfs.webhdfs.enabled", true);
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    hdfsCluster = builder.build();
    hdfsCluster.waitActive();
    HDFS_BASE_URI = hdfsCluster.getURI();
    WEBHDFS_BASE_URI = new URI("webhdfs://" + conf.get("dfs.namenode.http-address"));

    try (FileSystem fs = FileSystems.newFileSystem(HDFS_BASE_URI, System.getenv())) {

        Files.copy(Paths.get(HadoopTestBase.class.getResource("/test.csv").toURI()),
                Paths.get(HDFS_BASE_URI.resolve("/test.csv")), StandardCopyOption.REPLACE_EXISTING);

    }

}

From source file:de.tudarmstadt.ukp.dkpro.core.fs.hdfs.HdfsResourceLoaderLocatorTest.java

License:Apache License

@Before
public void startCluster() throws Exception {
    // Start dummy HDFS
    File target = folder.newFolder("hdfs");
    hadoopTmp = folder.newFolder("hadoop");

    File baseDir = new File(target, "hdfs").getAbsoluteFile();
    FileUtil.fullyDelete(baseDir);
    Configuration conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    conf.set("hadoop.tmp.dir", hadoopTmp.getAbsolutePath());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    hdfsCluster = builder.build();// ww  w .j a va 2  s  . c om
}

From source file:edu.uci.ics.asterix.aoya.test.AsterixYARNInstanceUtil.java

License:Apache License

public YarnConfiguration setUp() throws Exception {
    File asterixProjectDir = new File(System.getProperty("user.dir"));

    File installerTargetDir = new File(asterixProjectDir, "target");

    String[] dirsInTarget = installerTargetDir.list(new FilenameFilter() {
        @Override//w  w  w.  ja  v a 2s.  c om
        public boolean accept(File dir, String name) {
            return new File(dir, name).isDirectory() && name.startsWith("asterix-yarn")
                    && name.endsWith("binary-assembly");
        }

    });
    if (dirsInTarget.length != 1) {
        throw new IllegalStateException("Could not find binary to run YARN integration test with");
    }
    aoyaHome = installerTargetDir.getAbsolutePath() + File.separator + dirsInTarget[0];
    File asterixServerInstallerDir = new File(aoyaHome, "asterix");
    String[] zipsInFolder = asterixServerInstallerDir.list(new FilenameFilter() {
        @Override
        public boolean accept(File dir, String name) {
            return name.startsWith("asterix-server") && name.endsWith("binary-assembly.zip");
        }
    });
    if (zipsInFolder.length != 1) {
        throw new IllegalStateException("Could not find server binary to run YARN integration test with");
    }
    aoyaServerPath = asterixServerInstallerDir.getAbsolutePath() + File.separator + zipsInFolder[0];
    configPath = aoyaHome + File.separator + "configs" + File.separator + "local.xml";
    parameterPath = aoyaHome + File.separator + "conf" + File.separator + "base-asterix-configuration.xml";
    YARNCluster.getInstance().setup();
    appConf = new YarnConfiguration();
    File baseDir = new File("./target/hdfs/").getAbsoluteFile();
    FileUtil.fullyDelete(baseDir);
    appConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(appConf);
    MiniDFSCluster hdfsCluster = builder.build();
    miniCluster = YARNCluster.getInstance().getCluster();
    appConf.set("fs.defaultFS", "hdfs://localhost:" + hdfsCluster.getNameNodePort());
    miniCluster.init(appConf);
    Cluster defaultConfig = Utils.parseYarnClusterConfig(configPath);
    for (Node n : defaultConfig.getNode()) {
        n.setClusterIp(MiniYARNCluster.getHostname());
    }
    defaultConfig.getMasterNode().setClusterIp(MiniYARNCluster.getHostname());
    configPath = "target" + File.separator + "localized-aoya-config.xml";
    Utils.writeYarnClusterConfig(configPath, defaultConfig);
    miniCluster.start();
    appConf = new YarnConfiguration(miniCluster.getConfig());
    appConf.set("fs.defaultFS", "hdfs://localhost:" + hdfsCluster.getNameNodePort());
    //TODO:why must I do this!? what is not being passed properly via environment variables???
    appConf.writeXml(new FileOutputStream("target" + File.separator + "yarn-site.xml"));

    //once the cluster is created, you can get its configuration
    //with the binding details to the cluster added from the minicluster
    FileSystem fs = FileSystem.get(appConf);
    Path instanceState = new Path(fs.getHomeDirectory(), AsterixYARNClient.CONF_DIR_REL + INSTANCE_NAME + "/");
    fs.delete(instanceState, true);
    Assert.assertFalse(fs.exists(instanceState));

    File outdir = new File(PATH_ACTUAL);
    outdir.mkdirs();
    return appConf;
}

From source file:gobblin.writer.AvroHdfsDataWriterTest.java

License:Apache License

@AfterClass
public void tearDown() throws IOException {
    // Clean up the staging and/or output directories if necessary
    File testRootDir = new File(TestConstants.TEST_ROOT_DIR);
    if (testRootDir.exists()) {
        FileUtil.fullyDelete(testRootDir);
    }/*from ww  w  . j av  a2 s.  c om*/
}

From source file:gobblin.writer.AvroToParquetHdfsDataWriterTest.java

License:Open Source License

@AfterClass
public static void tearDown() throws IOException {
    // Clean up the staging and/or output directories if necessary
    File testRootDir = new File(TestConstants.TEST_ROOT_DIR);
    if (testRootDir.exists()) {
        FileUtil.fullyDelete(testRootDir);
    }//from ww w .j  av a  2s. c o  m
}

From source file:gobblin.writer.SimpleDataWriterTest.java

License:Apache License

@AfterMethod
public void tearDown() throws IOException {
    // Clean up the staging and/or output directories if necessary
    File testRootDir = new File(TestConstants.TEST_ROOT_DIR);
    if (testRootDir.exists()) {
        FileUtil.fullyDelete(testRootDir);
    }//from   w w  w  . java  2s  .co  m
}

From source file:hdfs.jsr203.TestAttributes.java

License:Apache License

private static MiniDFSCluster startMini(String testName) throws IOException {
    File baseDir = new File("./target/hdfs/" + testName).getAbsoluteFile();
    FileUtil.fullyDelete(baseDir);
    Configuration conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    MiniDFSCluster hdfsCluster = builder.clusterId(testName).build();
    hdfsCluster.waitActive();//from  w ww . j  a va  2 s  .c  om
    return hdfsCluster;
}