Example usage for org.apache.hadoop.fs FileUtil fullyDelete

List of usage examples for org.apache.hadoop.fs FileUtil fullyDelete

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileUtil fullyDelete.

Prototype

public static boolean fullyDelete(final File dir) 

Source Link

Document

Delete a directory and all its contents.

Usage

From source file:org.apache.hama.myhama.util.TaskLog.java

License:Apache License

/**
 * Purge old user logs.//from www.j  a  v a2  s  . c om
 * 
 * @throws IOException
 */
public static synchronized void cleanup(int logsRetainHours) throws IOException {
    // Purge logs of tasks on this tasktracker if their
    // mtime has exceeded "mapred.task.log.retain" hours
    long purgeTimeStamp = System.currentTimeMillis() - (logsRetainHours * 60L * 60 * 1000);
    File[] oldTaskLogs = LOG_DIR.listFiles(new TaskLogsPurgeFilter(purgeTimeStamp));
    if (oldTaskLogs != null) {
        for (int i = 0; i < oldTaskLogs.length; ++i) {
            FileUtil.fullyDelete(oldTaskLogs[i]);
        }
    }
}

From source file:org.apache.hama.util.RunJar.java

License:Apache License

/**
 * Run a Hama job jar. If the main class is not in the jar's manifest, then it
 * must be provided on the command line.
 *///from w  w  w .j a  v  a2 s. co  m
public static void main(String[] args) throws Throwable {
    String usage = "Usage: hama jar <jar> [mainClass] args...";

    if (args.length < 1) {
        System.err.println(usage);
        System.exit(-1);
    }

    int firstArg = 0;
    String fileName = args[firstArg++];
    File file = new File(fileName);
    String mainClassName = null;

    JarFile jarFile = new JarFile(fileName);
    Manifest manifest = jarFile.getManifest();
    if (manifest != null) {
        mainClassName = manifest.getMainAttributes().getValue("Main-Class");
    }
    jarFile.close();

    if (mainClassName == null) {
        if (args.length < 2) {
            System.err.println(usage);
            System.exit(-1);
        }
        mainClassName = args[firstArg++];
    }
    mainClassName = mainClassName.replaceAll("/", ".");

    final File workDir = File.createTempFile("hama-unjar", "");
    workDir.delete();
    workDir.mkdirs();

    Runtime.getRuntime().addShutdownHook(new Thread() {
        @Override
        public void run() {
            try {
                FileUtil.fullyDelete(workDir);
            } catch (Exception e) {
            }
        }
    });

    unJar(file, workDir);

    List<URL> classPath = new ArrayList<URL>();
    classPath.add(new File(workDir + "/").toURI().toURL());
    classPath.add(file.toURI().toURL());
    classPath.add(new File(workDir, "classes/").toURI().toURL());
    File[] libs = new File(workDir, "lib").listFiles();
    if (libs != null) {
        for (File lib : libs) {
            classPath.add(lib.toURI().toURL());
        }
    }
    ClassLoader loader = new URLClassLoader(classPath.toArray(new URL[classPath.size()]));

    Thread.currentThread().setContextClassLoader(loader);
    Class<?> mainClass = loader.loadClass(mainClassName);
    Method main = mainClass.getMethod("main", new Class[] { Array.newInstance(String.class, 0).getClass() });
    List<String> var = Arrays.asList(args).subList(firstArg, args.length);
    String[] newArgs = var.toArray(new String[var.size()]);
    try {
        main.invoke(null, new Object[] { newArgs });
    } catch (InvocationTargetException e) {
        throw e.getTargetException();
    }
}

From source file:org.apache.hcatalog.hbase.ManyMiniCluster.java

License:Apache License

protected synchronized void start() {
    try {//from ww  w.j a v a 2 s . c  o m
        if (!started) {
            FileUtil.fullyDelete(workDir);
            if (miniMRClusterEnabled) {
                setupMRCluster();
            }
            if (miniZookeeperClusterEnabled || miniHBaseClusterEnabled) {
                miniZookeeperClusterEnabled = true;
                setupZookeeper();
            }
            if (miniHBaseClusterEnabled) {
                setupHBaseCluster();
            }
            if (miniHiveMetastoreEnabled) {
                setUpMetastore();
            }
        }
    } catch (Exception e) {
        throw new IllegalStateException("Failed to setup cluster", e);
    }
}

From source file:org.apache.hcatalog.mapreduce.HCatBaseTest.java

License:Apache License

@BeforeClass
public static void setUpTestDataDir() throws Exception {
    LOG.info("Using warehouse directory " + TEST_WAREHOUSE_DIR);
    File f = new File(TEST_WAREHOUSE_DIR);
    if (f.exists()) {
        FileUtil.fullyDelete(f);
    }/*from  w ww. j  ava  2 s  .c  o m*/
    Assert.assertTrue(new File(TEST_WAREHOUSE_DIR).mkdirs());
}

From source file:org.apache.hcatalog.mapreduce.TestHCatMultiOutputFormat.java

License:Apache License

@BeforeClass
public static void setup() throws Exception {
    String testDir = System.getProperty("test.data.dir", "./");
    testDir = testDir + "/test_multitable_" + Math.abs(new Random().nextLong()) + "/";
    workDir = new File(new File(testDir).getCanonicalPath());
    FileUtil.fullyDelete(workDir);
    workDir.mkdirs();//from   w w w.j av  a  2s  . co m

    warehousedir = new Path(workDir + "/warehouse");

    // Run hive metastore server
    t = new Thread(new RunMS());
    t.start();

    // LocalJobRunner does not work with mapreduce OutputCommitter. So need
    // to use MiniMRCluster. MAPREDUCE-2350
    Configuration conf = new Configuration(true);
    conf.set("yarn.scheduler.capacity.root.queues", "default");
    conf.set("yarn.scheduler.capacity.root.default.capacity", "100");

    FileSystem fs = FileSystem.get(conf);
    System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath());
    mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null, new JobConf(conf));
    mrConf = mrCluster.createJobConf();
    fs.mkdirs(warehousedir);

    initializeSetup();
}

From source file:org.apache.hcatalog.mapreduce.TestHCatMultiOutputFormat.java

License:Apache License

@AfterClass
public static void tearDown() throws IOException {
    FileUtil.fullyDelete(workDir);
    FileSystem fs = FileSystem.get(mrConf);
    if (fs.exists(warehousedir)) {
        fs.delete(warehousedir, true);//  w w w  . j a va2 s .  c om
    }
    if (mrCluster != null) {
        mrCluster.shutdown();
    }
}

From source file:org.apache.hcatalog.mapreduce.TestMultiOutputFormat.java

License:Apache License

private static void createWorkDir() throws IOException {
    String testDir = System.getProperty("test.data.dir", "./");
    testDir = testDir + "/test_multiout_" + Math.abs(new Random().nextLong()) + "/";
    workDir = new File(new File(testDir).getCanonicalPath());
    FileUtil.fullyDelete(workDir);
    workDir.mkdirs();//w  ww .j a  v a  2s  .  c  o  m
}

From source file:org.apache.hcatalog.mapreduce.TestMultiOutputFormat.java

License:Apache License

@AfterClass
public static void tearDown() throws IOException {
    if (mrCluster != null) {
        mrCluster.shutdown();//from   w  ww. ja va 2 s . c  o  m
    }
    FileUtil.fullyDelete(workDir);
}

From source file:org.apache.hcatalog.pig.TestE2EScenarios.java

License:Apache License

@Override
protected void setUp() throws Exception {

    File f = new File(TEST_WAREHOUSE_DIR);
    if (f.exists()) {
        FileUtil.fullyDelete(f);
    }/*from  w  w w  .j a va2 s  .co m*/
    new File(TEST_WAREHOUSE_DIR).mkdirs();

    HiveConf hiveConf = new HiveConf(this.getClass());
    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
    hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR);
    driver = new Driver(hiveConf);
    SessionState.start(new CliSessionState(hiveConf));

}

From source file:org.apache.hcatalog.pig.TestHCatLoader.java

License:Apache License

protected void guardedSetUpBeforeClass() throws Exception {
    if (!setupHasRun) {
        setupHasRun = true;//  w ww .  j  a v a 2  s.  c om
    } else {
        return;
    }

    File f = new File(TEST_WAREHOUSE_DIR);
    if (f.exists()) {
        FileUtil.fullyDelete(f);
    }
    new File(TEST_WAREHOUSE_DIR).mkdirs();

    HiveConf hiveConf = new HiveConf(this.getClass());
    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
    hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR);
    driver = new Driver(hiveConf);
    SessionState.start(new CliSessionState(hiveConf));

    cleanup();

    createTable(BASIC_TABLE, "a int, b string");
    createTable(COMPLEX_TABLE,
            "name string, studentid int, " + "contact struct<phno:string,email:string>, "
                    + "currently_registered_courses array<string>, " + "current_grades map<string,string>, "
                    + "phnos array<struct<phno:string,type:string>>");

    createTable(PARTITIONED_TABLE, "a int, b string", "bkt string");
    createTable(SPECIFIC_SIZE_TABLE, "a int, b string");

    int LOOP_SIZE = 3;
    String[] input = new String[LOOP_SIZE * LOOP_SIZE];
    basicInputData = new HashMap<Integer, Pair<Integer, String>>();
    int k = 0;
    for (int i = 1; i <= LOOP_SIZE; i++) {
        String si = i + "";
        for (int j = 1; j <= LOOP_SIZE; j++) {
            String sj = "S" + j + "S";
            input[k] = si + "\t" + sj;
            basicInputData.put(k, new Pair<Integer, String>(i, sj));
            k++;
        }
    }
    HcatTestUtils.createTestDataFile(BASIC_FILE_NAME, input);
    HcatTestUtils.createTestDataFile(COMPLEX_FILE_NAME, new String[] {
            //"Henry Jekyll\t42\t(415-253-6367,hjekyll@contemporary.edu.uk)\t{(PHARMACOLOGY),(PSYCHIATRY)},[PHARMACOLOGY#A-,PSYCHIATRY#B+],{(415-253-6367,cell),(408-253-6367,landline)}",
            //"Edward Hyde\t1337\t(415-253-6367,anonymous@b44chan.org)\t{(CREATIVE_WRITING),(COPYRIGHT_LAW)},[CREATIVE_WRITING#A+,COPYRIGHT_LAW#D],{(415-253-6367,cell),(408-253-6367,landline)}",
    });

    PigServer server = new PigServer(ExecType.LOCAL);
    server.setBatchOn();
    server.registerQuery("A = load '" + BASIC_FILE_NAME + "' as (a:int, b:chararray);");

    server.registerQuery("store A into '" + BASIC_TABLE + "' using org.apache.hcatalog.pig.HCatStorer();");
    server.registerQuery(
            "store A into '" + SPECIFIC_SIZE_TABLE + "' using org.apache.hcatalog.pig.HCatStorer();");
    server.registerQuery("B = foreach A generate a,b;");
    server.registerQuery("B2 = filter B by a < 2;");
    server.registerQuery(
            "store B2 into '" + PARTITIONED_TABLE + "' using org.apache.hcatalog.pig.HCatStorer('bkt=0');");

    server.registerQuery("C = foreach A generate a,b;");
    server.registerQuery("C2 = filter C by a >= 2;");
    server.registerQuery(
            "store C2 into '" + PARTITIONED_TABLE + "' using org.apache.hcatalog.pig.HCatStorer('bkt=1');");

    server.registerQuery("D = load '" + COMPLEX_FILE_NAME
            + "' as (name:chararray, studentid:int, contact:tuple(phno:chararray,email:chararray), currently_registered_courses:bag{innertup:tuple(course:chararray)}, current_grades:map[ ] , phnos :bag{innertup:tuple(phno:chararray,type:chararray)});");
    server.registerQuery("store D into '" + COMPLEX_TABLE + "' using org.apache.hcatalog.pig.HCatStorer();");
    server.executeBatch();

}