Example usage for org.apache.hadoop.hdfs MiniDFSCluster waitClusterUp

List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster waitClusterUp

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs MiniDFSCluster waitClusterUp.

Prototype

public void waitClusterUp() throws IOException 

Source Link

Document

wait for the cluster to get out of safemode.

Usage

From source file:co.cask.cdap.common.logging.LogCollectorTest.java

License:Apache License

@Test
public void testCollectionDFS() throws IOException {

    MiniDFSCluster dfsCluster = null;

    try {//from  w ww.j  a  v  a 2s.c  o  m
        File dfsPath = tempFolder.newFolder();
        System.setProperty("test.build.data", dfsPath.toString());
        System.setProperty("test.cache.data", dfsPath.toString());

        System.err.println("Starting up Mini HDFS cluster...");
        Configuration hConf = new Configuration();
        CConfiguration conf = CConfiguration.create();
        //conf.setInt("dfs.block.size", 1024*1024);
        dfsCluster = new MiniDFSCluster.Builder(hConf).nameNodePort(0).numDataNodes(1).format(true)
                .manageDataDfsDirs(true).manageNameDfsDirs(true).build();
        dfsCluster.waitClusterUp();
        System.err.println("Mini HDFS is started.");

        // Add HDFS info to conf
        hConf.set("fs.defaultFS", dfsCluster.getFileSystem().getUri().toString());
        // set a root directory for log collection
        conf.set(Constants.CFG_LOG_COLLECTION_ROOT, "/logtemp");

        testCollection(conf, hConf);
    } finally {
        if (dfsCluster != null) {
            System.err.println("Shutting down Mini HDFS cluster...");
            dfsCluster.shutdown();
            System.err.println("Mini HDFS is shut down.");
        }
    }
}

From source file:org.apache.tajo.storage.TestByteBufLineReader.java

License:Apache License

@Test
public void testReaderWithDFS() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    cluster.waitClusterUp();

    TajoConf tajoConf = new TajoConf(conf);
    tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo");

    Path tablePath = new Path("/testReaderWithDFS");
    Path filePath = new Path(tablePath, "data.dat");
    try {//  ww w  . j a  v a  2  s. c  o  m
        DistributedFileSystem fs = cluster.getFileSystem();
        FSDataOutputStream out = fs.create(filePath, true);
        out.write(LINE.getBytes(Charset.defaultCharset()));
        out.write('\n');
        out.close();

        assertTrue(fs.exists(filePath));
        FSDataInputStream inputStream = fs.open(filePath);
        assertTrue(inputStream.getWrappedStream() instanceof ByteBufferReadable);

        ByteBufLineReader lineReader = new ByteBufLineReader(new FSDataInputChannel(inputStream));
        assertEquals(LINE, lineReader.readLine());
        lineReader.seek(0);
        assertEquals(LINE, lineReader.readLine());
        assertNull(lineReader.readLine());

        lineReader.close();
        fs.close();
    } finally {
        cluster.shutdown(true);
    }
}

From source file:org.apache.tajo.storage.TestFileStorageManager.java

License:Apache License

@Test
public void testGetSplit() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, false);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitClusterUp();
    TajoConf tajoConf = new TajoConf(conf);
    tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo");

    int testCount = 10;
    Path tablePath = new Path("/testGetSplit");
    try {/*w  w  w.java 2s. c o  m*/
        DistributedFileSystem fs = cluster.getFileSystem();

        // Create test partitions
        List<Path> partitions = Lists.newArrayList();
        for (int i = 0; i < testCount; i++) {
            Path tmpFile = new Path(tablePath, String.valueOf(i));
            DFSTestUtil.createFile(fs, new Path(tmpFile, "tmpfile.dat"), 10, (short) 2, 0xDEADDEADl);
            partitions.add(tmpFile);
        }

        assertTrue(fs.exists(tablePath));
        FileStorageManager sm = (FileStorageManager) StorageManager.getFileStorageManager(tajoConf);
        assertEquals(fs.getUri(), sm.getFileSystem().getUri());

        Schema schema = new Schema();
        schema.addColumn("id", Type.INT4);
        schema.addColumn("age", Type.INT4);
        schema.addColumn("name", Type.TEXT);
        TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV);

        List<Fragment> splits = Lists.newArrayList();
        // Get FileFragments in partition batch
        splits.addAll(sm.getSplits("data", meta, schema, partitions.toArray(new Path[partitions.size()])));
        assertEquals(testCount, splits.size());
        // -1 is unknown volumeId
        assertEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]);

        splits.clear();
        splits.addAll(sm.getSplits("data", meta, schema,
                partitions.subList(0, partitions.size() / 2).toArray(new Path[partitions.size() / 2])));
        assertEquals(testCount / 2, splits.size());
        assertEquals(1, splits.get(0).getHosts().length);
        assertEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]);
        fs.close();
    } finally {
        cluster.shutdown(true);
    }
}

From source file:org.apache.tajo.storage.TestFileStorageManager.java

License:Apache License

@Test
public void testGetSplitWithBlockStorageLocationsBatching() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    cluster.waitClusterUp();

    TajoConf tajoConf = new TajoConf(conf);
    tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo");

    int testCount = 10;
    Path tablePath = new Path("/testGetSplitWithBlockStorageLocationsBatching");
    try {/*from  w  w  w .j av  a2  s . com*/
        DistributedFileSystem fs = cluster.getFileSystem();

        // Create test files
        for (int i = 0; i < testCount; i++) {
            Path tmpFile = new Path(tablePath, "tmpfile" + i + ".dat");
            DFSTestUtil.createFile(fs, tmpFile, 10, (short) 2, 0xDEADDEADl);
        }
        assertTrue(fs.exists(tablePath));
        FileStorageManager sm = (FileStorageManager) StorageManager.getFileStorageManager(tajoConf);
        assertEquals(fs.getUri(), sm.getFileSystem().getUri());

        Schema schema = new Schema();
        schema.addColumn("id", Type.INT4);
        schema.addColumn("age", Type.INT4);
        schema.addColumn("name", Type.TEXT);
        TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV);

        List<Fragment> splits = Lists.newArrayList();
        splits.addAll(sm.getSplits("data", meta, schema, tablePath));

        assertEquals(testCount, splits.size());
        assertEquals(2, splits.get(0).getHosts().length);
        assertEquals(2, ((FileFragment) splits.get(0)).getDiskIds().length);
        assertNotEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]);
        fs.close();
    } finally {
        cluster.shutdown(true);
    }
}

From source file:org.apache.tajo.storage.TestFileStorageManager.java

License:Apache License

@Test
public void testStoreType() throws Exception {
    final Configuration hdfsConf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    hdfsConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    hdfsConf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    hdfsConf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(hdfsConf).numDataNodes(2).build();
    cluster.waitClusterUp();

    TajoConf tajoConf = new TajoConf(hdfsConf);
    tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo");

    try {//from  ww w  . ja  va 2  s  . c  o m
        /* Local FileSystem */
        FileStorageManager sm = (FileStorageManager) StorageManager.getStorageManager(conf, StoreType.CSV);
        assertEquals(fs.getUri(), sm.getFileSystem().getUri());

        /* Distributed FileSystem */
        sm = (FileStorageManager) StorageManager.getStorageManager(tajoConf, StoreType.CSV);
        assertNotEquals(fs.getUri(), sm.getFileSystem().getUri());
        assertEquals(cluster.getFileSystem().getUri(), sm.getFileSystem().getUri());
    } finally {
        cluster.shutdown(true);
    }
}

From source file:org.apache.tajo.storage.TestFileTablespace.java

License:Apache License

@Test
public void testGetSplit() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, false);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitClusterUp();
    TajoConf tajoConf = new TajoConf(conf);
    tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo");

    int testCount = 10;
    Path tablePath = new Path("/testGetSplit");
    try {/*from w  ww .j  a  v a  2s  . c  om*/
        DistributedFileSystem fs = cluster.getFileSystem();

        // Create test partitions
        List<Path> partitions = Lists.newArrayList();
        for (int i = 0; i < testCount; i++) {
            Path tmpFile = new Path(tablePath, String.valueOf(i));
            DFSTestUtil.createFile(fs, new Path(tmpFile, "tmpfile.dat"), 10, (short) 2, 0xDEADDEADl);
            partitions.add(tmpFile);
        }

        assertTrue(fs.exists(tablePath));
        FileTablespace space = new FileTablespace("testGetSplit", fs.getUri());
        space.init(new TajoConf(conf));
        assertEquals(fs.getUri(), space.getUri());

        Schema schema = new Schema();
        schema.addColumn("id", Type.INT4);
        schema.addColumn("age", Type.INT4);
        schema.addColumn("name", Type.TEXT);
        TableMeta meta = CatalogUtil.newTableMeta("TEXT");

        List<Fragment> splits = Lists.newArrayList();
        // Get FileFragments in partition batch
        splits.addAll(space.getSplits("data", meta, schema, partitions.toArray(new Path[partitions.size()])));
        assertEquals(testCount, splits.size());
        // -1 is unknown volumeId
        assertEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]);

        splits.clear();
        splits.addAll(space.getSplits("data", meta, schema,
                partitions.subList(0, partitions.size() / 2).toArray(new Path[partitions.size() / 2])));
        assertEquals(testCount / 2, splits.size());
        assertEquals(1, splits.get(0).getHosts().length);
        assertEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]);
        fs.close();
    } finally {
        cluster.shutdown(true);
    }
}

From source file:org.apache.tajo.storage.TestFileTablespace.java

License:Apache License

@Test
public void testGetSplitWithBlockStorageLocationsBatching() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    cluster.waitClusterUp();

    TajoConf tajoConf = new TajoConf(conf);
    tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo");

    int testCount = 10;
    Path tablePath = new Path("/testGetSplitWithBlockStorageLocationsBatching");
    try {/* w w  w .j a v  a2s .  c om*/
        DistributedFileSystem fs = cluster.getFileSystem();

        // Create test files
        for (int i = 0; i < testCount; i++) {
            Path tmpFile = new Path(tablePath, "tmpfile" + i + ".dat");
            DFSTestUtil.createFile(fs, tmpFile, 10, (short) 2, 0xDEADDEADl);
        }
        assertTrue(fs.exists(tablePath));

        FileTablespace sm = new FileTablespace("testGetSplitWithBlockStorageLocationsBatching", fs.getUri());
        sm.init(new TajoConf(conf));

        assertEquals(fs.getUri(), sm.getUri());

        Schema schema = new Schema();
        schema.addColumn("id", Type.INT4);
        schema.addColumn("age", Type.INT4);
        schema.addColumn("name", Type.TEXT);
        TableMeta meta = CatalogUtil.newTableMeta("TEXT");

        List<Fragment> splits = Lists.newArrayList();
        splits.addAll(sm.getSplits("data", meta, schema, tablePath));

        assertEquals(testCount, splits.size());
        assertEquals(2, splits.get(0).getHosts().length);
        assertEquals(2, ((FileFragment) splits.get(0)).getDiskIds().length);
        assertNotEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]);
        fs.close();
    } finally {
        cluster.shutdown(true);
    }
}

From source file:org.apache.tajo.storage.TestFileTablespace.java

License:Apache License

@Test
public void testGetFileTablespace() throws Exception {
    final Configuration hdfsConf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    hdfsConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    hdfsConf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    hdfsConf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(hdfsConf).numDataNodes(2).build();
    cluster.waitClusterUp();
    URI uri = URI.create(cluster.getFileSystem().getUri() + "/tajo");

    Optional<Tablespace> existingTs = Optional.absent();
    try {/*from w  w  w.java  2s. com*/
        /* Local FileSystem */
        FileTablespace space = TablespaceManager.getLocalFs();
        assertEquals(localFs.getUri(), space.getFileSystem().getUri());

        FileTablespace distTablespace = new FileTablespace("testGetFileTablespace", uri);
        distTablespace.init(conf);
        existingTs = TablespaceManager.addTableSpaceForTest(distTablespace);

        /* Distributed FileSystem */
        space = (FileTablespace) TablespaceManager.get(uri).get();
        assertEquals(cluster.getFileSystem().getUri(), space.getFileSystem().getUri());

        space = (FileTablespace) TablespaceManager.getByName("testGetFileTablespace").get();
        assertEquals(cluster.getFileSystem().getUri(), space.getFileSystem().getUri());

    } finally {

        if (existingTs.isPresent()) {
            TablespaceManager.addTableSpaceForTest(existingTs.get());
        }

        cluster.shutdown(true);
    }
}

From source file:org.kiji.bento.BentoHBaseTestingUtility.java

License:Apache License

/**
 * Start a mini dfs cluster. We override this method in our child class so we can
 * disable formatting the filesystem between runs and so we can pass configuration options for
 * the namenode port and namenode ui address.
 *
 * @param servers How many DNs to start.
 * @param hosts hostnames DNs to run on.
 * @throws Exception If an error occurs when starting up the cluster.
 * @see {@link #shutdownMiniDFSCluster()}
 * @return The mini dfs cluster created.
 *//*from w w  w .jav a  2s .co m*/
@Override
public MiniDFSCluster startMiniDFSCluster(int servers, final String[] hosts) throws Exception {
    // Check that there is not already a cluster running
    isRunningCluster();

    // We have to set this property as it is used by MiniCluster
    System.setProperty("test.build.data", mClusterTestDir.toString());

    // Some tests also do this:
    //  System.getProperty("test.cache.data", "build/test/cache");
    // It's also deprecated
    System.setProperty("test.cache.data", mClusterTestDir.toString());

    // Use configuration provided values for the namenode port and namenode ui port, or use
    // accepted defaults.
    Configuration conf = getConfiguration();
    int nameNodePort = FileSystem.get(conf).getUri().getPort();
    int nameNodeUiPort = getPortFromConfiguration("dfs.http.address", 50070);
    MiniDFSCluster dfsCluster = null;
    MiniDFSCluster.Builder options = new MiniDFSCluster.Builder(conf).nameNodePort(nameNodePort)
            .nameNodeHttpPort(nameNodeUiPort).numDataNodes(servers).manageNameDfsDirs(true)
            .manageDataDfsDirs(true).hosts(hosts);

    // Ok, now we can start. First try it without reformatting.
    try {
        LOG.debug("Attempting to use existing cluster storage.");
        dfsCluster = options.format(false).build();
    } catch (InconsistentFSStateException e) {
        LOG.debug("Couldn't use existing storage. Attempting to format and try again.");
        dfsCluster = options.format(true).build();
    }

    // Set this just-started cluster as our filesystem.
    FileSystem fs = dfsCluster.getFileSystem();
    conf.set("fs.defaultFS", fs.getUri().toString());
    // Do old style too just to be safe.
    conf.set("fs.default.name", fs.getUri().toString());

    // Wait for the cluster to be totally up
    dfsCluster.waitClusterUp();

    // Save the dfsCluster in the private field of the parent class.
    setField(HBaseTestingUtility.class, this, "dfsCluster", dfsCluster);

    return dfsCluster;
}