Example usage for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR

List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR.

Prototype

String HDFS_MINIDFS_BASEDIR

To view the source code for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR.

Click Source Link

Document

Configuration option to set the data dir:

Usage

From source file:co.cask.hydrator.plugin.HDFSSinkTest.java

License:Apache License

@Before
public void beforeTest() throws Exception {
    // Setup Hadoop Minicluster
    File baseDir = temporaryFolder.newFolder();
    Configuration conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    dfsCluster = builder.build();/*from  w  ww. ja v  a 2s.  c  o  m*/
    dfsCluster.waitActive();
    fileSystem = FileSystem.get(conf);
}

From source file:co.cask.tephra.hbase10.coprocessor.TransactionProcessorTest.java

License:Apache License

@BeforeClass
public static void setupBeforeClass() throws Exception {
    Configuration hConf = new Configuration();
    String rootDir = tmpFolder.newFolder().getAbsolutePath();
    hConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, rootDir);
    hConf.set(HConstants.HBASE_DIR, rootDir + "/hbase");

    dfsCluster = new MiniDFSCluster.Builder(hConf).numDataNodes(1).build();
    dfsCluster.waitActive();/*from   www . ja v  a 2 s  .c om*/
    conf = HBaseConfiguration.create(dfsCluster.getFileSystem().getConf());

    conf.unset(TxConstants.Manager.CFG_TX_HDFS_USER);
    conf.unset(TxConstants.Persist.CFG_TX_SNAPHOT_CODEC_CLASSES);
    String localTestDir = tmpFolder.newFolder().getAbsolutePath();
    conf.set(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR, localTestDir);
    conf.set(TxConstants.Persist.CFG_TX_SNAPHOT_CODEC_CLASSES, DefaultSnapshotCodec.class.getName());

    // write an initial transaction snapshot
    TransactionSnapshot txSnapshot = TransactionSnapshot.copyFrom(System.currentTimeMillis(), V[6] - 1, V[7],
            invalidSet,
            // this will set visibility upper bound to V[6]
            Maps.newTreeMap(ImmutableSortedMap.of(V[6],
                    new TransactionManager.InProgressTx(V[6] - 1, Long.MAX_VALUE, TransactionType.SHORT))),
            new HashMap<Long, Set<ChangeId>>(), new TreeMap<Long, Set<ChangeId>>());
    txVisibilityState = new TransactionSnapshot(txSnapshot.getTimestamp(), txSnapshot.getReadPointer(),
            txSnapshot.getWritePointer(), txSnapshot.getInvalid(), txSnapshot.getInProgress());
    HDFSTransactionStateStorage tmpStorage = new HDFSTransactionStateStorage(conf,
            new SnapshotCodecProvider(conf), new TxMetricsCollector());
    tmpStorage.startAndWait();
    tmpStorage.writeSnapshot(txSnapshot);
    tmpStorage.stopAndWait();
}

From source file:co.cask.tephra.hbase94.coprocessor.TransactionProcessorTest.java

License:Apache License

@BeforeClass
public static void setupBeforeClass() throws Exception {
    Configuration hConf = new Configuration();
    hConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tmpFolder.newFolder().getAbsolutePath());

    dfsCluster = new MiniDFSCluster.Builder(hConf).numDataNodes(1).build();
    dfsCluster.waitActive();/* w ww.j a v  a2  s .c  om*/
    conf = HBaseConfiguration.create(dfsCluster.getFileSystem().getConf());

    conf.unset(TxConstants.Manager.CFG_TX_HDFS_USER);
    conf.unset(TxConstants.Persist.CFG_TX_SNAPHOT_CODEC_CLASSES);
    String localTestDir = "/tmp/transactionDataJanitorTest";
    conf.set(TxConstants.Manager.CFG_TX_SNAPSHOT_DIR, localTestDir);
    conf.set(TxConstants.Persist.CFG_TX_SNAPHOT_CODEC_CLASSES, DefaultSnapshotCodec.class.getName());

    // write an initial transaction snapshot
    txSnapshot = TransactionSnapshot.copyFrom(System.currentTimeMillis(), V[6] - 1, V[7], invalidSet,
            // this will set visibility upper bound to V[6]
            Maps.newTreeMap(ImmutableSortedMap.of(V[6],
                    new TransactionManager.InProgressTx(V[6] - 1, Long.MAX_VALUE, TransactionType.SHORT))),
            new HashMap<Long, Set<ChangeId>>(), new TreeMap<Long, Set<ChangeId>>());
    HDFSTransactionStateStorage tmpStorage = new HDFSTransactionStateStorage(conf,
            new SnapshotCodecProvider(conf));
    tmpStorage.startAndWait();
    tmpStorage.writeSnapshot(txSnapshot);
    tmpStorage.stopAndWait();
}

From source file:co.cask.tephra.persist.CommitMarkerCodecTest.java

License:Apache License

@BeforeClass
public static void setupBeforeClass() throws Exception {
    Configuration hConf = new Configuration();
    hConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TMP_FOLDER.newFolder().getAbsolutePath());

    dfsCluster = new MiniDFSCluster.Builder(hConf).numDataNodes(1).build();
    conf = new Configuration(dfsCluster.getFileSystem().getConf());
    fs = FileSystem.newInstance(FileSystem.getDefaultUri(conf), conf);
}

From source file:co.cask.tephra.persist.HDFSTransactionLogTest.java

License:Apache License

@BeforeClass
public static void setupBeforeClass() throws Exception {
    Configuration hConf = new Configuration();
    hConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TMP_FOLDER.newFolder().getAbsolutePath());

    dfsCluster = new MiniDFSCluster.Builder(hConf).numDataNodes(1).build();
    conf = new Configuration(dfsCluster.getFileSystem().getConf());
    metricsCollector = new TxMetricsCollector();
}

From source file:co.cask.tephra.persist.HDFSTransactionStateStorageTest.java

License:Apache License

@BeforeClass
public static void setupBeforeClass() throws Exception {
    Configuration hConf = new Configuration();
    hConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tmpFolder.newFolder().getAbsolutePath());

    dfsCluster = new MiniDFSCluster.Builder(hConf).numDataNodes(1).build();
    conf = new Configuration(dfsCluster.getFileSystem().getConf());
}

From source file:com.hortonworks.minicluster.MiniHadoopCluster.java

License:Apache License

/**
 *
 * @param clusterName// w ww .  j  av a  2s.  co  m
 * @param numNodeManagers
 */
public MiniHadoopCluster(String clusterName, int numNodeManagers) {
    super(clusterName);
    this.testWorkDir = new File("target/MINI_YARN_CLUSTER");
    this.resourceManager = new UnsecureResourceManager();
    this.numLocalDirs = 1;
    this.numLogDirs = 1;
    this.nodeManagers = new NodeManager[numNodeManagers];
    this.configuration = new YarnConfiguration(new Configuration());
    this.configuration.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
    this.configuration.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, "target/MINI_DFS_CLUSTER/data");
    try {
        FileUtils.deleteDirectory(MiniHadoopCluster.this.testWorkDir);
    } catch (Exception e) {
        logger.warn("Failed to remove 'target' directory", e);
    }
}

From source file:com.uber.hoodie.common.table.log.HoodieLogFormatAppendFailureTest.java

License:Apache License

@BeforeClass
public static void setUpClass() throws IOException {
    // NOTE : The MiniClusterDFS leaves behind the directory under which the cluster was created
    baseDir = new File("/tmp/" + UUID.randomUUID().toString());
    FileUtil.fullyDelete(baseDir);/*  w  w w .j a v a 2  s .c  om*/
    // Append is not supported in LocalFileSystem. HDFS needs to be setup.
    Configuration conf = new Configuration();
    // lower heartbeat interval for fast recognition of DN
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);
    cluster = new MiniDFSCluster.Builder(conf).checkExitOnShutdown(true).numDataNodes(4).build();
}

From source file:de.tudarmstadt.ukp.dkpro.core.fs.hdfs.HdfsResourceLoaderLocatorTest.java

License:Apache License

@Before
public void startCluster() throws Exception {
    // Start dummy HDFS
    File target = folder.newFolder("hdfs");
    hadoopTmp = folder.newFolder("hadoop");

    File baseDir = new File(target, "hdfs").getAbsoluteFile();
    FileUtil.fullyDelete(baseDir);/*from   w w  w  . ja  v a2  s .c o m*/
    Configuration conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    conf.set("hadoop.tmp.dir", hadoopTmp.getAbsolutePath());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    hdfsCluster = builder.build();
}

From source file:edu.uci.ics.asterix.aoya.test.AsterixYARNInstanceUtil.java

License:Apache License

public YarnConfiguration setUp() throws Exception {
    File asterixProjectDir = new File(System.getProperty("user.dir"));

    File installerTargetDir = new File(asterixProjectDir, "target");

    String[] dirsInTarget = installerTargetDir.list(new FilenameFilter() {
        @Override/*from w w w.j  av  a  2 s  . com*/
        public boolean accept(File dir, String name) {
            return new File(dir, name).isDirectory() && name.startsWith("asterix-yarn")
                    && name.endsWith("binary-assembly");
        }

    });
    if (dirsInTarget.length != 1) {
        throw new IllegalStateException("Could not find binary to run YARN integration test with");
    }
    aoyaHome = installerTargetDir.getAbsolutePath() + File.separator + dirsInTarget[0];
    File asterixServerInstallerDir = new File(aoyaHome, "asterix");
    String[] zipsInFolder = asterixServerInstallerDir.list(new FilenameFilter() {
        @Override
        public boolean accept(File dir, String name) {
            return name.startsWith("asterix-server") && name.endsWith("binary-assembly.zip");
        }
    });
    if (zipsInFolder.length != 1) {
        throw new IllegalStateException("Could not find server binary to run YARN integration test with");
    }
    aoyaServerPath = asterixServerInstallerDir.getAbsolutePath() + File.separator + zipsInFolder[0];
    configPath = aoyaHome + File.separator + "configs" + File.separator + "local.xml";
    parameterPath = aoyaHome + File.separator + "conf" + File.separator + "base-asterix-configuration.xml";
    YARNCluster.getInstance().setup();
    appConf = new YarnConfiguration();
    File baseDir = new File("./target/hdfs/").getAbsoluteFile();
    FileUtil.fullyDelete(baseDir);
    appConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(appConf);
    MiniDFSCluster hdfsCluster = builder.build();
    miniCluster = YARNCluster.getInstance().getCluster();
    appConf.set("fs.defaultFS", "hdfs://localhost:" + hdfsCluster.getNameNodePort());
    miniCluster.init(appConf);
    Cluster defaultConfig = Utils.parseYarnClusterConfig(configPath);
    for (Node n : defaultConfig.getNode()) {
        n.setClusterIp(MiniYARNCluster.getHostname());
    }
    defaultConfig.getMasterNode().setClusterIp(MiniYARNCluster.getHostname());
    configPath = "target" + File.separator + "localized-aoya-config.xml";
    Utils.writeYarnClusterConfig(configPath, defaultConfig);
    miniCluster.start();
    appConf = new YarnConfiguration(miniCluster.getConfig());
    appConf.set("fs.defaultFS", "hdfs://localhost:" + hdfsCluster.getNameNodePort());
    //TODO:why must I do this!? what is not being passed properly via environment variables???
    appConf.writeXml(new FileOutputStream("target" + File.separator + "yarn-site.xml"));

    //once the cluster is created, you can get its configuration
    //with the binding details to the cluster added from the minicluster
    FileSystem fs = FileSystem.get(appConf);
    Path instanceState = new Path(fs.getHomeDirectory(), AsterixYARNClient.CONF_DIR_REL + INSTANCE_NAME + "/");
    fs.delete(instanceState, true);
    Assert.assertFalse(fs.exists(instanceState));

    File outdir = new File(PATH_ACTUAL);
    outdir.mkdirs();
    return appConf;
}