Example usage for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR

List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR.

Prototype

String HDFS_MINIDFS_BASEDIR

To view the source code for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR.

Click Source Link

Document

Configuration option to set the data dir:

Usage

From source file:org.springframework.yarn.test.support.StandaloneYarnCluster.java

License:Apache License

@Override
public void start() throws IOException {
    log.info("Checking if cluster=" + clusterName + " needs to be started");
    synchronized (this.startupShutdownMonitor) {
        if (started) {
            return;
        }//from  w w w.  java2 s.c o m
        log.info("Starting cluster=" + clusterName);
        configuration = new YarnConfiguration();
        configuration.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, "target/" + clusterName + "-dfs");

        dfsCluster = new MiniDFSCluster.Builder(configuration).numDataNodes(nodes).build();

        yarnCluster = new MiniYARNCluster(clusterName, nodes, 1, 1);
        yarnCluster.init(configuration);
        yarnCluster.start();

        log.info("Started cluster=" + clusterName);
        started = true;
    }
}

From source file:org.talend.components.test.MiniDfsResource.java

License:Open Source License

/**
 * @return The hadoop FileSystem pointing to the simulated cluster.
 *///from  ww w  . j  a v  a 2  s  .c o  m
public FileSystem getFs() throws IOException {
    // Lazily create the MiniDFSCluster on first use.
    if (miniHdfs == null) {
        System.setProperty(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, newFolder("base").getAbsolutePath());
        System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, newFolder("build").getAbsolutePath());
        miniHdfs = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(1).format(true).racks(null)
                .build();
        miniHdfs.waitActive();
        fs = miniHdfs.getFileSystem();
    }
    return fs;
}

From source file:org.testifyproject.resource.hdfs.MiniDFSResource.java

License:Apache License

@Override
public HdfsConfiguration configure(TestContext testContext, LocalResource localResource,
        PropertiesReader configReader) {
    String testName = testContext.getName();
    String hdfsDirectory = fileSystemUtil.createPath("target", "hdfs", testName);
    HdfsConfiguration configuration = new HdfsConfiguration();
    configuration.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsDirectory);

    return configuration;
}

From source file:org.testifyproject.resource.hdfs.MiniDFSResource.java

License:Apache License

@Override
public LocalResourceInstance<MiniDFSCluster, DistributedFileSystem> start(TestContext testContext,
        LocalResource localResource, HdfsConfiguration config) throws Exception {
    String hdfsDirectory = config.get(MiniDFSCluster.HDFS_MINIDFS_BASEDIR);
    fileSystemUtil.recreateDirectory(hdfsDirectory);
    config.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsDirectory);
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(config);
    hdfsCluster = builder.build();//from w w  w. j a  v a  2s.c o m
    fileSystem = hdfsCluster.getFileSystem();

    return LocalResourceInstanceBuilder.builder().resource(hdfsCluster).client(fileSystem).build("hdfs",
            localResource);

}

From source file:org.trustedanalytics.auth.gateway.hdfs.integration.config.LocalConfiguration.java

License:Apache License

@Bean
@Qualifier(Qualifiers.CONFIGURATION)/*from   w  w w  .  j ava2  s .com*/
public org.apache.hadoop.conf.Configuration initializeHdfsCluster()
        throws IOException, InterruptedException, URISyntaxException {
    File baseDir = new File("./target/hdfs/" + "testName").getAbsoluteFile();
    FileUtil.fullyDelete(baseDir);
    org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration(false);
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    MiniDFSCluster cluster = builder.build();

    UserGroupInformation.createUserForTesting("cf", new String[] { "cf" });
    UserGroupInformation.createUserForTesting("super", new String[] { "supergroup" });

    return cluster.getConfiguration(0);
}

From source file:org.trustedanalytics.cfbroker.store.hdfs.service.SimpleHdfsClientTest.java

License:Apache License

@BeforeClass
public static void initialize() throws IOException {
    File baseDir = new File("./target/hdfs/" + "testName").getAbsoluteFile();
    FileUtil.fullyDelete(baseDir);/*from  ww  w .  ja v a2 s. com*/
    Configuration conf = new Configuration(false);
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    cluster = builder.build();
    cluster.waitClusterUp();
}

From source file:org.trustedanalytics.utils.hdfs.TestHdfsConfigFactory.java

License:Apache License

@Bean
@Profile("embedded")
public HdfsConfig configEmbedded() throws IOException, LoginException {
    String tmpDir = createTmpDir();
    Configuration config = new Configuration(false);
    config.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tmpDir);
    FileSystem fileSystem = new MiniDFSCluster.Builder(config).build().getFileSystem();

    return createConfig(fileSystem, tmpDir, "hdfs", config);
}

From source file:org.zuinnote.hadoop.office.example.MapReduceExcelInputIntegrationTest.java

License:Apache License

@BeforeAll
public static void oneTimeSetUp() throws IOException {
    // Create temporary directory for HDFS base and shutdownhook 
    // create temp directory
    tmpPath = Files.createTempDirectory(tmpPrefix);
    // create shutdown hook to remove temp files (=HDFS MiniCluster) after shutdown, may need to rethink to avoid many threads are created
    Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
        @Override/*from   w  ww.  j ava 2s.  c  om*/
        public void run() {
            try {
                Files.walkFileTree(tmpPath, new SimpleFileVisitor<java.nio.file.Path>() {

                    @Override
                    public FileVisitResult visitFile(java.nio.file.Path file, BasicFileAttributes attrs)
                            throws IOException {
                        Files.delete(file);
                        return FileVisitResult.CONTINUE;
                    }

                    @Override
                    public FileVisitResult postVisitDirectory(java.nio.file.Path dir, IOException e)
                            throws IOException {
                        if (e == null) {
                            Files.delete(dir);
                            return FileVisitResult.CONTINUE;
                        }
                        throw e;
                    }
                });
            } catch (IOException e) {
                throw new RuntimeException(
                        "Error temporary files in following path could not be deleted " + tmpPath, e);
            }
        }
    }));
    // Create Configuration
    Configuration conf = new Configuration();
    // create HDFS cluster
    File baseDir = new File(tmpPath.toString()).getAbsoluteFile();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    dfsCluster = builder.numDataNodes(NOOFDATANODES).build();
    // create Yarn cluster
    YarnConfiguration clusterConf = new YarnConfiguration(conf);
    conf.set("fs.defaultFS", dfsCluster.getFileSystem().getUri().toString());
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 64);
    conf.setClass(YarnConfiguration.RM_SCHEDULER, FifoScheduler.class, ResourceScheduler.class);
    miniCluster = new MiniMRYarnCluster(CLUSTERNAME, NOOFNODEMANAGERS, STARTTIMELINESERVER);
    miniCluster.init(conf);
    miniCluster.start();
}