Example usage for org.apache.hadoop.hdfs MiniDFSCluster shutdown

List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster shutdown

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs MiniDFSCluster shutdown.

Prototype

public void shutdown() 

Source Link

Document

Shutdown all the nodes in the cluster.

Usage

From source file:org.apache.flume.sink.kite.TestDatasetSink.java

License:Apache License

@Test
public void testMiniClusterStore() throws EventDeliveryException, IOException {
    // setup a minicluster
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration()).build();
    DatasetRepository hdfsRepo = null;//  ww  w. j a v a 2  s.c  o  m
    try {
        FileSystem dfs = cluster.getFileSystem();
        Configuration conf = dfs.getConf();
        String repoURI = "repo:" + conf.get("fs.defaultFS") + "/tmp/repo";

        // create a repository and dataset in HDFS
        hdfsRepo = DatasetRepositories.open(repoURI);
        hdfsRepo.create(DATASET_NAME, DESCRIPTOR);

        // update the config to use the HDFS repository
        config.put(DatasetSinkConstants.CONFIG_KITE_REPO_URI, repoURI);

        DatasetSink sink = sink(in, config);

        // run the sink
        sink.start();
        sink.process();
        sink.stop();

        Assert.assertEquals(Sets.newHashSet(expected), read(hdfsRepo.<GenericData.Record>load(DATASET_NAME)));
        Assert.assertEquals("Should have committed", 0, remaining(in));

    } finally {
        if (hdfsRepo != null && hdfsRepo.exists(DATASET_NAME)) {
            hdfsRepo.delete(DATASET_NAME);
        }
        cluster.shutdown();
    }
}

From source file:org.apache.hama.HamaTestCase.java

License:Apache License

/**
 * Common method to close down a MiniDFSCluster and the associated file system
 * //from w w  w.  ja v a2  s  .  c om
 * @param cluster
 */
public static void shutdownDfs(MiniDFSCluster cluster) {
    if (cluster != null) {
        LOG.info("Shutting down Mini DFS ");
        try {
            cluster.shutdown();
        } catch (Exception e) {
            // / Can get a java.lang.reflect.UndeclaredThrowableException thrown
            // here because of an InterruptedException. Don't let exceptions in
            // here be cause of test failure.
        }
        try {
            FileSystem fs = cluster.getFileSystem();
            if (fs != null) {
                LOG.info("Shutting down FileSystem");
                fs.close();
            }
            FileSystem.closeAll();
        } catch (IOException e) {
            LOG.error("error closing file system", e);
        }
    }
}

From source file:org.apache.sentry.binding.solr.HdfsTestUtil.java

License:Apache License

public static void teardownClass(MiniDFSCluster dfsCluster) throws Exception {
    System.clearProperty("solr.lock.type");
    System.clearProperty("test.build.data");
    System.clearProperty("test.cache.data");
    if (dfsCluster != null) {
        dfsCluster.shutdown();
    }//ww w .ja  va 2s.c o  m

    // TODO: we HACK around HADOOP-9643
    if (savedLocale != null) {
        Locale.setDefault(savedLocale);
    }
}

From source file:org.apache.sentry.tests.e2e.solr.HdfsTestUtil.java

License:Apache License

public static void teardownClass(MiniDFSCluster dfsCluster) throws Exception {
    SolrTestCaseJ4.resetFactory();/*w  w w. j  ava  2s . c  o  m*/
    System.clearProperty("solr.lock.type");
    System.clearProperty("test.build.data");
    System.clearProperty("test.cache.data");
    if (dfsCluster != null) {
        dfsCluster.shutdown();
    }

    // TODO: we HACK around HADOOP-9643
    if (savedLocale != null) {
        Locale.setDefault(savedLocale);
    }
}

From source file:org.apache.tajo.storage.TestStorageManager.java

License:Apache License

@Test
public void testGetSplit() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, false);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();

    int testCount = 10;
    Path tablePath = new Path("/testGetSplit");
    try {/* ww w . j a  v a2 s. c o  m*/
        DistributedFileSystem fs = cluster.getFileSystem();

        // Create test partitions
        List<Path> partitions = Lists.newArrayList();
        for (int i = 0; i < testCount; i++) {
            Path tmpFile = new Path(tablePath, String.valueOf(i));
            DFSTestUtil.createFile(fs, new Path(tmpFile, "tmpfile.dat"), 10, (short) 2, 0xDEADDEADl);
            partitions.add(tmpFile);
        }

        assertTrue(fs.exists(tablePath));
        StorageManager sm = StorageManager.getStorageManager(new TajoConf(conf), tablePath);

        Schema schema = new Schema();
        schema.addColumn("id", Type.INT4);
        schema.addColumn("age", Type.INT4);
        schema.addColumn("name", Type.TEXT);
        TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV);

        List<FileFragment> splits = Lists.newArrayList();
        // Get FileFragments in partition batch
        splits.addAll(sm.getSplits("data", meta, schema, partitions.toArray(new Path[partitions.size()])));
        assertEquals(testCount, splits.size());
        // -1 is unknown volumeId
        assertEquals(-1, splits.get(0).getDiskIds()[0]);

        splits.clear();
        splits.addAll(sm.getSplits("data", meta, schema,
                partitions.subList(0, partitions.size() / 2).toArray(new Path[partitions.size() / 2])));
        assertEquals(testCount / 2, splits.size());
        assertEquals(1, splits.get(0).getHosts().length);
        assertEquals(-1, splits.get(0).getDiskIds()[0]);
        fs.close();
    } finally {
        cluster.shutdown();

        File dir = new File(testDataPath);
        dir.delete();
    }
}

From source file:org.apache.tajo.storage.TestStorageManager.java

License:Apache License

@Test
public void testGetSplitWithBlockStorageLocationsBatching() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();

    int testCount = 10;
    Path tablePath = new Path("/testGetSplitWithBlockStorageLocationsBatching");
    try {/*from  w ww .j a va 2s.c om*/
        DistributedFileSystem fs = cluster.getFileSystem();

        // Create test files
        for (int i = 0; i < testCount; i++) {
            Path tmpFile = new Path(tablePath, "tmpfile" + i + ".dat");
            DFSTestUtil.createFile(fs, tmpFile, 10, (short) 2, 0xDEADDEADl);
        }
        assertTrue(fs.exists(tablePath));
        StorageManager sm = StorageManager.getStorageManager(new TajoConf(conf), tablePath);

        Schema schema = new Schema();
        schema.addColumn("id", Type.INT4);
        schema.addColumn("age", Type.INT4);
        schema.addColumn("name", Type.TEXT);
        TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV);

        List<FileFragment> splits = Lists.newArrayList();
        splits.addAll(sm.getSplits("data", meta, schema, tablePath));

        assertEquals(testCount, splits.size());
        assertEquals(2, splits.get(0).getHosts().length);
        assertEquals(2, splits.get(0).getDiskIds().length);
        assertNotEquals(-1, splits.get(0).getDiskIds()[0]);
        fs.close();
    } finally {
        cluster.shutdown();

        File dir = new File(testDataPath);
        dir.delete();
    }
}

From source file:pack.block.blockstore.hdfs.HdfsMiniClusterUtil.java

License:Apache License

public static void shutdownDfs(MiniDFSCluster cluster) {
    if (cluster != null) {
        LOGGER.info("Shutting down Mini DFS");
        try {// www . j  a v a2 s .co  m
            cluster.shutdown();
        } catch (Exception e) {
            // / Can get a java.lang.reflect.UndeclaredThrowableException thrown
            // here because of an InterruptedException. Don't let exceptions in
            // here be cause of test failure.
        }
        try {
            FileSystem fs = cluster.getFileSystem();
            if (fs != null) {
                LOGGER.info("Shutting down FileSystem");
                fs.close();
            }
            FileSystem.closeAll();
        } catch (IOException e) {
            LOGGER.error("error closing file system", e);
        }

        // This has got to be one of the worst hacks I have ever had to do.
        // This is needed to shutdown 2 thread pools that are not shutdown by
        // themselves.
        ThreadGroup threadGroup = Thread.currentThread().getThreadGroup();
        Thread[] threads = new Thread[100];
        int enumerate = threadGroup.enumerate(threads);
        for (int i = 0; i < enumerate; i++) {
            Thread thread = threads[i];
            if (thread.getName().startsWith("pool")) {
                if (thread.isAlive()) {
                    thread.interrupt();
                    LOGGER.info("Stopping ThreadPoolExecutor {}", thread.getName());
                    Object target = getField(Thread.class, thread, "target");
                    if (target != null) {
                        ThreadPoolExecutor e = (ThreadPoolExecutor) getField(ThreadPoolExecutor.class, target,
                                "this$0");
                        if (e != null) {
                            e.shutdownNow();
                        }
                    }
                    try {
                        LOGGER.info("Waiting for thread pool to exit {}", thread.getName());
                        thread.join();
                    } catch (InterruptedException e) {
                        throw new RuntimeException(e);
                    }
                }
            }
        }
    }
}