Example usage for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR

List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR.

Prototype

String HDFS_MINIDFS_BASEDIR

To view the source code for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR.

Click Source Link

Document

Configuration option to set the data dir:

Usage

From source file:edu.uci.ics.asterix.aoya.test.YARNCluster.java

License:Apache License

/**
 * Instantiates the (Mini) DFS Cluster with the configured number of datanodes.
 * Post instantiation, data is laoded to HDFS.
 * Called prior to running the Runtime test suite.
 *///from  w w  w  .ja  v  a2 s  . co m
public void setup() throws Exception {
    conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/core-site.xml"));
    conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/mapred-site.xml"));
    conf.addResource(new Path(PATH_TO_HADOOP_CONF + "/hdfs-site.xml"));
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64);
    conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, "target/integrationts/data");
    cleanupLocal();
    //this constructor is deprecated in hadoop 2x 
    //dfsCluster = new MiniDFSCluster(nameNodePort, conf, numDataNodes, true, true, StartupOption.REGULAR, null);
    miniCluster = new MiniYARNCluster("Asterix_testing", numDataNodes, 1, 1);
    miniCluster.init(conf);
    dfs = FileSystem.get(conf);
}

From source file:hdfs.jsr203.TestAttributes.java

License:Apache License

private static MiniDFSCluster startMini(String testName) throws IOException {
    File baseDir = new File("./target/hdfs/" + testName).getAbsoluteFile();
    FileUtil.fullyDelete(baseDir);//from   w ww  . j a  v a2 s  . com
    Configuration conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    MiniDFSCluster hdfsCluster = builder.clusterId(testName).build();
    hdfsCluster.waitActive();
    return hdfsCluster;
}

From source file:hdfs.MiniHDFS.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length != 1) {
        throw new IllegalArgumentException("MiniHDFS <baseDirectory>");
    }//from   w ww  .  j  a  v  a  2  s. c  om
    // configure Paths
    Path baseDir = Paths.get(args[0]);
    // hadoop-home/, so logs will not complain
    if (System.getenv("HADOOP_HOME") == null) {
        Path hadoopHome = baseDir.resolve("hadoop-home");
        Files.createDirectories(hadoopHome);
        System.setProperty("hadoop.home.dir", hadoopHome.toAbsolutePath().toString());
    }
    // hdfs-data/, where any data is going
    Path hdfsHome = baseDir.resolve("hdfs-data");

    // start cluster
    Configuration cfg = new Configuration();
    cfg.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsHome.toAbsolutePath().toString());
    // lower default permission: TODO: needed?
    cfg.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY, "766");
    // TODO: remove hardcoded port!
    MiniDFSCluster dfs = new MiniDFSCluster.Builder(cfg).nameNodePort(9999).build();

    // write our PID file
    Path tmp = Files.createTempFile(baseDir, null, null);
    String pid = ManagementFactory.getRuntimeMXBean().getName().split("@")[0];
    Files.write(tmp, pid.getBytes(StandardCharsets.UTF_8));
    Files.move(tmp, baseDir.resolve(PID_FILE_NAME), StandardCopyOption.ATOMIC_MOVE);

    // write our port file
    tmp = Files.createTempFile(baseDir, null, null);
    Files.write(tmp, Integer.toString(dfs.getNameNodePort()).getBytes(StandardCharsets.UTF_8));
    Files.move(tmp, baseDir.resolve(PORT_FILE_NAME), StandardCopyOption.ATOMIC_MOVE);
}

From source file:io.amient.kafka.hadoop.testutils.SystemTestBase.java

License:Apache License

@Before
public void setUp() throws IOException, InterruptedException {
    dfsBaseDir = new File(TimestampExtractorSystemTest.class.getResource("/systemtest").getPath());

    //setup hadoop node
    embeddedClusterPath = new File(dfsBaseDir, "local-cluster");
    System.clearProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA);
    conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, embeddedClusterPath.getAbsolutePath());
    cluster = new MiniDFSCluster.Builder(conf).build();
    fs = FileSystem.get(conf);/*from  www.  j  av  a2s.  co m*/
    localFileSystem = FileSystem.getLocal(conf);

    //setup zookeeper
    embeddedZkPath = new File(dfsBaseDir, "local-zookeeper");
    // smaller testDir footprint, default zookeeper file blocks are 65535Kb
    System.getProperties().setProperty("zookeeper.preAllocSize", "64");
    zookeeper = new ZooKeeperServer(new File(embeddedZkPath, "snapshots"), new File(embeddedZkPath, "logs"),
            3000);
    zkFactory = new NIOServerCnxnFactory();
    zkFactory.configure(new InetSocketAddress(0), 10);
    zkConnect = "localhost:" + zkFactory.getLocalPort();
    System.out.println("starting local zookeeper at " + zkConnect);
    zkFactory.startup(zookeeper);

    //setup kafka
    System.out.println("starting local kafka broker...");

    embeddedKafkaPath = new File(dfsBaseDir, "local-kafka-logs");
    KafkaConfig kafkaConfig = new KafkaConfig(new Properties() {
        {
            put("broker.id", "1");
            put("host.name", "localhost");
            put("port", "0");
            put("log.dir", embeddedKafkaPath.toString());
            put("num.partitions", "2");
            put("auto.create.topics.enable", "true");
            put("zookeeper.connect", zkConnect);
        }
    });
    kafka = new KafkaServerStartable(kafkaConfig);
    kafka.startup();

    //dynamic kafka port allocation
    try (KafkaZkUtils tmpZkClient = new KafkaZkUtils(zkConnect, 30000, 6000)) {
        Broker broker = Broker.createBroker(1, tmpZkClient.getBrokerInfo(1));
        kafkaBootstrap = broker.getBrokerEndPoint(SecurityProtocol.PLAINTEXT).connectionString();
    }

    System.out.println("preparing simpleProducer..");
    simpleProducer = new Producer<>(new ProducerConfig(new Properties() {
        {
            put("metadata.broker.list", kafkaBootstrap);
            put("serializer.class", "kafka.serializer.StringEncoder");
            put("request.required.acks", "1");
        }
    }));

    System.out.println("system test setup complete");

}

From source file:io.druid.indexer.HdfsClasspathSetupTest.java

License:Apache License

@BeforeClass
public static void setupStatic() throws IOException, ClassNotFoundException {
    hdfsTmpDir = File.createTempFile("hdfsClasspathSetupTest", "dir");
    hdfsTmpDir.deleteOnExit();/*from  w  w  w .j  ava  2  s.c  o  m*/
    if (!hdfsTmpDir.delete()) {
        throw new IOException(String.format("Unable to delete hdfsTmpDir [%s]", hdfsTmpDir.getAbsolutePath()));
    }
    conf = new Configuration(true);
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsTmpDir.getAbsolutePath());
    miniCluster = new MiniDFSCluster.Builder(conf).build();
}

From source file:io.druid.segment.loading.HdfsDataSegmentFinderTest.java

License:Apache License

@BeforeClass
public static void setupStatic() throws IOException {
    mapper.registerSubtypes(new NamedType(NumberedShardSpec.class, "numbered"));

    hdfsTmpDir = File.createTempFile("hdfsDataSource", "dir");
    if (!hdfsTmpDir.delete()) {
        throw new IOE("Unable to delete hdfsTmpDir [%s]", hdfsTmpDir.getAbsolutePath());
    }/*from ww  w  .j  a  v a  2  s . c o m*/
    conf = new Configuration(true);
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsTmpDir.getAbsolutePath());
    miniCluster = new MiniDFSCluster.Builder(conf).build();
    uriBase = miniCluster.getURI();
    fs = miniCluster.getFileSystem();
}

From source file:io.druid.segment.loading.HdfsDataSegmentPullerTest.java

License:Apache License

@BeforeClass
public static void setupStatic() throws IOException, ClassNotFoundException {
    hdfsTmpDir = File.createTempFile("hdfsHandlerTest", "dir");
    hdfsTmpDir.deleteOnExit();/*from w w  w .  ja  v a2s  .co  m*/
    if (!hdfsTmpDir.delete()) {
        throw new IOException(String.format("Unable to delete hdfsTmpDir [%s]", hdfsTmpDir.getAbsolutePath()));
    }
    conf = new Configuration(true);
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsTmpDir.getAbsolutePath());
    miniCluster = new MiniDFSCluster.Builder(conf).build();
    uriBase = miniCluster.getURI(0);

    final File tmpFile = File.createTempFile("hdfsHandlerTest", ".data");
    tmpFile.delete();
    try {
        tmpFile.deleteOnExit();
        Files.copy(new ByteArrayInputStream(pathByteContents), tmpFile.toPath());
        try (OutputStream stream = miniCluster.getFileSystem().create(filePath)) {
            Files.copy(tmpFile.toPath(), stream);
        }
    } finally {
        tmpFile.delete();
    }
}

From source file:io.druid.storage.hdfs.HdfsDataSegmentPullerTest.java

License:Apache License

@BeforeClass
public static void setupStatic() throws IOException {
    hdfsTmpDir = File.createTempFile("hdfsHandlerTest", "dir");
    if (!hdfsTmpDir.delete()) {
        throw new IOE("Unable to delete hdfsTmpDir [%s]", hdfsTmpDir.getAbsolutePath());
    }/*from   w w w.j a v a  2 s .c om*/
    conf = new Configuration(true);
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsTmpDir.getAbsolutePath());
    miniCluster = new MiniDFSCluster.Builder(conf).build();
    uriBase = miniCluster.getURI(0);

    final File tmpFile = File.createTempFile("hdfsHandlerTest", ".data");
    tmpFile.delete();
    try {
        Files.copy(new ByteArrayInputStream(pathByteContents), tmpFile.toPath());
        try (OutputStream stream = miniCluster.getFileSystem().create(filePath)) {
            Files.copy(tmpFile.toPath(), stream);
        }
    } finally {
        tmpFile.delete();
    }
}

From source file:io.pravega.local.LocalHDFSEmulator.java

License:Open Source License

public void start() throws IOException {
    baseDir = Files.createTempDirectory(baseDirName).toFile().getAbsoluteFile();
    Configuration conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    conf.setBoolean("dfs.permissions.enabled", true);
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    hdfsCluster = builder.build();//from  w w  w . j  a  va2  s  .c  o m
}

From source file:io.pravega.segmentstore.storage.impl.hdfs.HDFSClusterHelpers.java

License:Open Source License

/**
 * Creates a MiniDFSCluster at the given Path.
 *
 * @param path The path to create at.//from w w  w  .j a va  2  s . co  m
 * @return A MiniDFSCluster.
 * @throws IOException If an Exception occurred.
 */
public static MiniDFSCluster createMiniDFSCluster(String path) throws IOException {
    Configuration conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, path);
    conf.setBoolean("dfs.permissions.enabled", true);
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    return builder.build();
}