Example usage for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR

List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR.

Prototype

String HDFS_MINIDFS_BASEDIR

To view the source code for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR.

Click Source Link

Document

Configuration option to set the data dir:

Usage

From source file:org.apache.tajo.storage.TestStorageManager.java

License:Apache License

@Test
public void testGetSplit() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, false);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();

    int testCount = 10;
    Path tablePath = new Path("/testGetSplit");
    try {//w  w w.j a v a 2 s .c o m
        DistributedFileSystem fs = cluster.getFileSystem();

        // Create test partitions
        List<Path> partitions = Lists.newArrayList();
        for (int i = 0; i < testCount; i++) {
            Path tmpFile = new Path(tablePath, String.valueOf(i));
            DFSTestUtil.createFile(fs, new Path(tmpFile, "tmpfile.dat"), 10, (short) 2, 0xDEADDEADl);
            partitions.add(tmpFile);
        }

        assertTrue(fs.exists(tablePath));
        StorageManager sm = StorageManager.getStorageManager(new TajoConf(conf), tablePath);

        Schema schema = new Schema();
        schema.addColumn("id", Type.INT4);
        schema.addColumn("age", Type.INT4);
        schema.addColumn("name", Type.TEXT);
        TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV);

        List<FileFragment> splits = Lists.newArrayList();
        // Get FileFragments in partition batch
        splits.addAll(sm.getSplits("data", meta, schema, partitions.toArray(new Path[partitions.size()])));
        assertEquals(testCount, splits.size());
        // -1 is unknown volumeId
        assertEquals(-1, splits.get(0).getDiskIds()[0]);

        splits.clear();
        splits.addAll(sm.getSplits("data", meta, schema,
                partitions.subList(0, partitions.size() / 2).toArray(new Path[partitions.size() / 2])));
        assertEquals(testCount / 2, splits.size());
        assertEquals(1, splits.get(0).getHosts().length);
        assertEquals(-1, splits.get(0).getDiskIds()[0]);
        fs.close();
    } finally {
        cluster.shutdown();

        File dir = new File(testDataPath);
        dir.delete();
    }
}

From source file:org.apache.tajo.storage.TestStorageManager.java

License:Apache License

@Test
public void testGetSplitWithBlockStorageLocationsBatching() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0);
    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true);

    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();

    int testCount = 10;
    Path tablePath = new Path("/testGetSplitWithBlockStorageLocationsBatching");
    try {// w ww. j  a v  a 2 s .  c o  m
        DistributedFileSystem fs = cluster.getFileSystem();

        // Create test files
        for (int i = 0; i < testCount; i++) {
            Path tmpFile = new Path(tablePath, "tmpfile" + i + ".dat");
            DFSTestUtil.createFile(fs, tmpFile, 10, (short) 2, 0xDEADDEADl);
        }
        assertTrue(fs.exists(tablePath));
        StorageManager sm = StorageManager.getStorageManager(new TajoConf(conf), tablePath);

        Schema schema = new Schema();
        schema.addColumn("id", Type.INT4);
        schema.addColumn("age", Type.INT4);
        schema.addColumn("name", Type.TEXT);
        TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV);

        List<FileFragment> splits = Lists.newArrayList();
        splits.addAll(sm.getSplits("data", meta, schema, tablePath));

        assertEquals(testCount, splits.size());
        assertEquals(2, splits.get(0).getHosts().length);
        assertEquals(2, splits.get(0).getDiskIds().length);
        assertNotEquals(-1, splits.get(0).getDiskIds()[0]);
        fs.close();
    } finally {
        cluster.shutdown();

        File dir = new File(testDataPath);
        dir.delete();
    }
}

From source file:org.apache.tajo.TajoTestingCluster.java

License:Apache License

/**
 * Start a minidfscluster.//  w  w  w  . j  av  a  2 s  . co m
 * Can only create one.
 * @param servers How many DNs to start.
 * @param dir Where to home your dfs cluster.
 * @param hosts hostnames DNs to run on.
 * @throws Exception
 * @see {@link #shutdownMiniDFSCluster()}
 * @return The mini dfs cluster created.
 * @throws java.io.IOException
 */
public MiniDFSCluster startMiniDFSCluster(int servers, File dir, final String hosts[]) throws IOException {

    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dir.toString());
    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, false);
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(new HdfsConfiguration(conf));
    builder.hosts(hosts);
    builder.numDataNodes(servers);
    builder.format(true);
    builder.manageNameDfsDirs(true);
    builder.manageDataDfsDirs(true);
    builder.waitSafeMode(true);
    this.dfsCluster = builder.build();

    // Set this just-started cluser as our filesystem.
    this.defaultFS = this.dfsCluster.getFileSystem();
    this.conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, defaultFS.getUri().toString());
    this.conf.setVar(TajoConf.ConfVars.ROOT_DIR, defaultFS.getUri() + "/tajo");
    isDFSRunning = true;
    return this.dfsCluster;
}

From source file:org.apache.tephra.hbase.coprocessor.TransactionProcessorTest.java

License:Apache License

@BeforeClass
public static void setupBeforeClass() throws Exception {
    Configuration hConf = new Configuration();
    String rootDir = tmpFolder.newFolder().getAbsolutePath();
    hConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, rootDir);
    hConf.set(HConstants.HBASE_DIR, rootDir + "/hbase");

    dfsCluster = new MiniDFSCluster.Builder(hConf).numDataNodes(1).build();
    dfsCluster.waitActive();/*from  w  ww.  ja  va 2s . co  m*/
    conf = HBaseConfiguration.create(dfsCluster.getFileSystem().getConf());

    conf.unset(TxConstants.Manager.CFG_TX_HDFS_USER);
    conf.unset(TxConstants.Persist.CFG_TX_SNAPHOT_CODEC_CLASSES);
    String localTestDir = tmpFolder.newFolder().getAbsolutePath();
    conf.set(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR, localTestDir);
    conf.set(TxConstants.Persist.CFG_TX_SNAPHOT_CODEC_CLASSES, DefaultSnapshotCodec.class.getName());

    // write an initial transaction snapshot
    InvalidTxList invalidTxList = new InvalidTxList();
    invalidTxList.addAll(invalidSet);
    TransactionSnapshot txSnapshot = TransactionSnapshot.copyFrom(System.currentTimeMillis(), V[6] - 1, V[7],
            invalidTxList,
            // this will set visibility upper bound to V[6]
            Maps.newTreeMap(ImmutableSortedMap.of(V[6],
                    new TransactionManager.InProgressTx(V[6] - 1, Long.MAX_VALUE,
                            TransactionManager.InProgressType.SHORT))),
            new HashMap<Long, TransactionManager.ChangeSet>(),
            new TreeMap<Long, TransactionManager.ChangeSet>());
    txVisibilityState = new TransactionSnapshot(txSnapshot.getTimestamp(), txSnapshot.getReadPointer(),
            txSnapshot.getWritePointer(), txSnapshot.getInvalid(), txSnapshot.getInProgress());
    HDFSTransactionStateStorage tmpStorage = new HDFSTransactionStateStorage(conf,
            new SnapshotCodecProvider(conf), new TxMetricsCollector());
    tmpStorage.startAndWait();
    tmpStorage.writeSnapshot(txSnapshot);
    tmpStorage.stopAndWait();
}

From source file:org.apache.tephra.hbase.TransactionAwareHTableTest.java

License:Apache License

@BeforeClass
public static void setupBeforeClass() throws Exception {
    testUtil = new HBaseTestingUtility();
    conf = testUtil.getConfiguration();//  ww w. j  a va  2s .  c  o m

    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tmpFolder.newFolder().getAbsolutePath());
    dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();

    conf.unset(TxConstants.Manager.CFG_TX_HDFS_USER);
    conf.set(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR, tmpFolder.newFolder().getAbsolutePath());

    conf.setLong(TxConstants.Manager.CFG_TX_SNAPSHOT_INTERVAL, 5);

    // Tune down the connection thread pool size
    conf.setInt("hbase.hconnection.threads.core", 5);
    conf.setInt("hbase.hconnection.threads.max", 10);
    // Tunn down handler threads in regionserver
    conf.setInt("hbase.regionserver.handler.count", 10);

    // Set to random port
    conf.setInt("hbase.master.port", 0);
    conf.setInt("hbase.master.info.port", 0);
    conf.setInt("hbase.regionserver.port", 0);
    conf.setInt("hbase.regionserver.info.port", 0);

    testUtil.startMiniCluster();
    hBaseAdmin = testUtil.getHBaseAdmin();
    txStateStorage = new HDFSTransactionStateStorage(conf, new SnapshotCodecProvider(conf),
            new TxMetricsCollector());
    txManager = new TransactionManager(conf, txStateStorage, new TxMetricsCollector());
    txManager.startAndWait();
}

From source file:org.apache.tez.analyzer.TestAnalyzer.java

License:Apache License

@BeforeClass
public static void setupClass() throws Exception {
    conf = new Configuration();
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH, false);
    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR);
    dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
    fs = dfsCluster.getFileSystem();// w ww. ja v a 2 s .c om
    conf.set("fs.defaultFS", fs.getUri().toString());

    setupTezCluster();
}

From source file:org.apache.tez.common.TestTezCommonUtils.java

License:Apache License

@BeforeClass
public static void setup() throws Exception {
    conf.set(TezConfiguration.TEZ_AM_STAGING_DIR, STAGE_DIR);
    LOG.info("Starting mini clusters");
    try {// www .j  a  va  2 s.c om
        conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR);
        dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).format(true).racks(null).build();
        remoteFs = dfsCluster.getFileSystem();
        RESOLVED_STAGE_DIR = remoteFs.getUri() + STAGE_DIR;
        conf.set("fs.defaultFS", remoteFs.getUri().toString()); // use HDFS
    } catch (IOException io) {
        throw new RuntimeException("problem starting mini dfs cluster", io);
    }
}

From source file:org.apache.tez.dag.history.ats.acls.TestATSHistoryV15.java

License:Apache License

@BeforeClass
public static void setup() throws IOException {
    try {//from  ww  w.  ja v a 2  s  .c om
        conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR);
        dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).racks(null).build();
        remoteFs = dfsCluster.getFileSystem();
    } catch (IOException io) {
        throw new RuntimeException("problem starting mini dfs cluster", io);
    }

    if (mrrTezCluster == null) {
        try {
            mrrTezCluster = new MiniTezClusterWithTimeline(TestATSHistoryV15.class.getName(), 1, 1, 1, true);
            Configuration conf = new Configuration();
            conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
            conf.set("fs.defaultFS", remoteFs.getUri().toString()); // use HDFS
            conf.setInt("yarn.nodemanager.delete.debug-delay-sec", 20000);
            atsActivePath = new Path("/tmp/ats/active/" + random.nextInt(100000));
            Path atsDonePath = new Path("/tmp/ats/done/" + random.nextInt(100000));
            conf.setDouble(YarnConfiguration.TIMELINE_SERVICE_VERSION, 1.5);

            remoteFs.mkdirs(atsActivePath);
            remoteFs.mkdirs(atsDonePath);

            conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
            conf.set(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_ACTIVE_DIR,
                    remoteFs.resolvePath(atsActivePath).toString());
            conf.set(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_DONE_DIR,
                    remoteFs.resolvePath(atsDonePath).toString());

            mrrTezCluster.init(conf);
            mrrTezCluster.start();
        } catch (Throwable e) {
            LOG.info("Failed to start Mini Tez Cluster", e);
        }
    }
    user = UserGroupInformation.getCurrentUser().getShortUserName();
    timelineAddress = mrrTezCluster.getConfig().get(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS);
    if (timelineAddress != null) {
        // Hack to handle bug in MiniYARNCluster handling of webapp address
        timelineAddress = timelineAddress.replace("0.0.0.0", "localhost");
    }
}

From source file:org.apache.tez.dag.history.ats.acls.TestATSHistoryWithACLs.java

License:Apache License

@BeforeClass
public static void setup() throws IOException {
    try {/*from   w w  w  .j a  v a2s.  c  om*/
        conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR);
        dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).racks(null).build();
        remoteFs = dfsCluster.getFileSystem();
    } catch (IOException io) {
        throw new RuntimeException("problem starting mini dfs cluster", io);
    }

    if (mrrTezCluster == null) {
        try {
            mrrTezCluster = new MiniTezClusterWithTimeline(TestATSHistoryWithACLs.class.getName(), 1, 1, 1,
                    true);
            Configuration conf = new Configuration();
            conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
            conf.set("fs.defaultFS", remoteFs.getUri().toString()); // use HDFS
            conf.setInt("yarn.nodemanager.delete.debug-delay-sec", 20000);
            mrrTezCluster.init(conf);
            mrrTezCluster.start();
        } catch (Throwable e) {
            LOG.info("Failed to start Mini Tez Cluster", e);
        }
    }
    user = UserGroupInformation.getCurrentUser().getShortUserName();
    timelineAddress = mrrTezCluster.getConfig().get(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS);
    if (timelineAddress != null) {
        // Hack to handle bug in MiniYARNCluster handling of webapp address
        timelineAddress = timelineAddress.replace("0.0.0.0", "localhost");
    }
}

From source file:org.apache.tez.dag.history.logging.ats.TestATSHistoryWithMiniCluster.java

License:Apache License

@BeforeClass
public static void setup() throws IOException {
    try {//from ww  w  .  ja  v a  2 s . c  om
        conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR);
        dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).format(true).racks(null).build();
        remoteFs = dfsCluster.getFileSystem();
    } catch (IOException io) {
        throw new RuntimeException("problem starting mini dfs cluster", io);
    }

    if (mrrTezCluster == null) {
        try {
            mrrTezCluster = new MiniTezClusterWithTimeline(TestATSHistoryWithMiniCluster.class.getName(), 1, 1,
                    1, true);
            Configuration conf = new Configuration();
            conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
            conf.set("fs.defaultFS", remoteFs.getUri().toString()); // use HDFS
            conf.setInt("yarn.nodemanager.delete.debug-delay-sec", 20000);
            mrrTezCluster.init(conf);
            mrrTezCluster.start();
        } catch (Throwable e) {
            LOG.info("Failed to start Mini Tez Cluster", e);
        }
    }
    timelineAddress = mrrTezCluster.getConfig().get(YarnConfiguration.TIMELINE_SERVICE_WEBAPP_ADDRESS);
    if (timelineAddress != null) {
        // Hack to handle bug in MiniYARNCluster handling of webapp address
        timelineAddress = timelineAddress.replace("0.0.0.0", "localhost");
    }
}