Example usage for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR

List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR.

Prototype

String HDFS_MINIDFS_BASEDIR

To view the source code for org.apache.hadoop.hdfs MiniDFSCluster HDFS_MINIDFS_BASEDIR.

Click Source Link

Document

Configuration option to set the data dir:

Usage

From source file:org.apache.flink.yarn.highavailability.YarnIntraNonHaMasterServicesTest.java

License:Apache License

@BeforeClass
public static void createHDFS() throws Exception {
    final File tempDir = TEMP_DIR.newFolder();

    org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration();
    hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tempDir.getAbsolutePath());

    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
    HDFS_CLUSTER = builder.build();//from   w w  w  .  ja  va 2 s .c o m
    HDFS_ROOT_PATH = new Path(HDFS_CLUSTER.getURI());
}

From source file:org.apache.flink.yarn.YarnFileStageTest.java

License:Apache License

@BeforeClass
public static void createHDFS() throws Exception {
    Assume.assumeTrue(!OperatingSystem.isWindows());

    final File tempDir = CLASS_TEMP_DIR.newFolder();

    org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration();
    hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, tempDir.getAbsolutePath());

    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
    hdfsCluster = builder.build();/*from  www.j  ava 2  s.  com*/
    hdfsRootPath = new Path(hdfsCluster.getURI());
}

From source file:org.apache.metron.integration.components.MRComponent.java

License:Apache License

@Override
public void start() {
    configuration = new Configuration();
    System.clearProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA);
    configuration.set(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, "true");
    if (basePath == null) {
        throw new RuntimeException("Unable to start cluster: You must specify the basepath");
    }//from w  w w  .j  a va  2s .c o m
    configuration.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, basePath.toString());
    try {
        cluster = new MiniDFSCluster.Builder(configuration).build();
    } catch (IOException e) {
        throw new RuntimeException("Unable to start cluster", e);
    }
}

From source file:org.apache.metron.management.FileSystemFunctionsTest.java

License:Apache License

@Before
public void setup() throws IOException {
    if (type == FileSystemFunctions.FS_TYPE.HDFS) {
        baseDir = Files.createTempDirectory("test_hdfs").toFile().getAbsoluteFile();
        Configuration conf = new Configuration();
        conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
        MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
        hdfsCluster = builder.build();//from w w w . j ava2  s  .  c  om

        fsGetter = () -> hdfsCluster.getFileSystem();
        prefix = "/";
    } else {
        fsGetter = FileSystemFunctions.FS_TYPE.LOCAL;
        prefix = "target/fsTest/";
        if (new File(prefix).exists()) {
            new File(prefix).delete();
        }
        new File(prefix).mkdirs();
    }

    get = new FileSystemFunctions.FileSystemGet(fsGetter);
    get.initialize(null);
    getList = new FileSystemFunctions.FileSystemGetList(fsGetter);
    getList.initialize(null);
    ls = new FileSystemFunctions.FileSystemLs(fsGetter);
    ls.initialize(null);
    put = new FileSystemFunctions.FileSystemPut(fsGetter);
    put.initialize(null);
    rm = new FileSystemFunctions.FileSystemRm(fsGetter);
    rm.initialize(null);
}

From source file:org.apache.sentry.binding.hive.TestSentryIniPolicyFileFormatter.java

License:Apache License

@BeforeClass
public static void setup() throws IOException {
    baseDir = Files.createTempDir();
    Assert.assertNotNull(baseDir);//ww w .j  a  v a  2 s. c o m
    File dfsDir = new File(baseDir, "dfs");
    assertTrue(dfsDir.isDirectory() || dfsDir.mkdirs());
    Configuration conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dfsDir.getPath());
    dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    fileSystem = dfsCluster.getFileSystem();
    root = new Path(fileSystem.getUri().toString());
    etc = new Path(root, "/etc");
    fileSystem.mkdirs(etc);
    prepareTestData();
}

From source file:org.apache.sentry.policy.db.TestSimpleDBPolicyEngineDFS.java

License:Apache License

@BeforeClass
public static void setupLocalClazz() throws IOException {
    File baseDir = getBaseDir();//w w  w. j  ava2 s  .  c o  m
    Assert.assertNotNull(baseDir);
    File dfsDir = new File(baseDir, "dfs");
    Assert.assertTrue(dfsDir.isDirectory() || dfsDir.mkdirs());
    Configuration conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dfsDir.getPath());
    dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    fileSystem = dfsCluster.getFileSystem();
    root = new Path(fileSystem.getUri().toString());
    etc = new Path(root, "/etc");
    fileSystem.mkdirs(etc);
}

From source file:org.apache.sentry.tests.e2e.AbstractTestWithStaticDFS.java

License:Apache License

@BeforeClass
public static void setupTestWithStaticDFS() throws Exception {
    Configuration conf = new Configuration();
    File dfsDir = assertCreateDir(new File(baseDir, "dfs"));
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dfsDir.getPath());
    dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    fileSystem = dfsCluster.getFileSystem();
    dfsBaseDir = assertCreateDfsDir(new Path(new Path(fileSystem.getUri()), "/base"));
    hiveServer = HiveServerFactory.create(properties, baseDir, confDir, logDir, policyFile, fileSystem);
    hiveServer.start();/*  w  w  w  .  j  ava 2s.c o m*/
}

From source file:org.apache.sentry.tests.e2e.hdfs.TestHDFSIntegration.java

License:Apache License

private static void startDFSandYARN() throws IOException, InterruptedException {
    adminUgi.doAs(new PrivilegedExceptionAction<Void>() {
        @Override//w  w  w  .j  a  va 2  s  .  c  o  m
        public Void run() throws Exception {
            System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, "target/test/data");
            hadoopConf = new HdfsConfiguration();
            hadoopConf.set(DFSConfigKeys.DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY,
                    SentryINodeAttributesProvider.class.getName());
            hadoopConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
            hadoopConf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
            File dfsDir = assertCreateDir(new File(baseDir, "dfs"));
            hadoopConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dfsDir.getPath());
            hadoopConf.set("hadoop.security.group.mapping", MiniDFS.PseudoGroupMappingService.class.getName());
            Configuration.addDefaultResource("test.xml");

            hadoopConf.set("sentry.authorization-provider.hdfs-path-prefixes", MANAGED_PREFIXES);
            hadoopConf.set("sentry.authorization-provider.cache-refresh-retry-wait.ms", "5000");
            hadoopConf.set("sentry.authorization-provider.cache-refresh-interval.ms",
                    String.valueOf(CACHE_REFRESH));

            hadoopConf.set("sentry.authorization-provider.cache-stale-threshold.ms",
                    String.valueOf(STALE_THRESHOLD));

            hadoopConf.set("sentry.hdfs.service.security.mode", "none");
            hadoopConf.set("sentry.hdfs.service.client.server.rpc-addresses", "localhost");
            hadoopConf.set("sentry.hdfs.service.client.server.rpc-port", String.valueOf(sentryPort));
            EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
            miniDFS = new MiniDFSCluster.Builder(hadoopConf).build();
            Path tmpPath = new Path("/tmp");
            Path hivePath = new Path("/user/hive");
            Path warehousePath = new Path(hivePath, "warehouse");
            miniDFS.getFileSystem().mkdirs(warehousePath);
            boolean directory = miniDFS.getFileSystem().isDirectory(warehousePath);
            LOGGER.info("\n\n Is dir :" + directory + "\n\n");
            LOGGER.info("\n\n DefaultFS :" + miniDFS.getFileSystem().getUri() + "\n\n");
            fsURI = miniDFS.getFileSystem().getUri().toString();
            hadoopConf.set("fs.defaultFS", fsURI);

            // Create Yarn cluster
            // miniMR = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);

            miniDFS.getFileSystem().mkdirs(tmpPath);
            miniDFS.getFileSystem().setPermission(tmpPath, FsPermission.valueOf("drwxrwxrwx"));
            miniDFS.getFileSystem().setOwner(hivePath, "hive", "hive");
            miniDFS.getFileSystem().setOwner(warehousePath, "hive", "hive");
            LOGGER.info("\n\n Owner :" + miniDFS.getFileSystem().getFileStatus(warehousePath).getOwner() + ", "
                    + miniDFS.getFileSystem().getFileStatus(warehousePath).getGroup() + "\n\n");
            LOGGER.info("\n\n Owner tmp :" + miniDFS.getFileSystem().getFileStatus(tmpPath).getOwner() + ", "
                    + miniDFS.getFileSystem().getFileStatus(tmpPath).getGroup() + ", "
                    + miniDFS.getFileSystem().getFileStatus(tmpPath).getPermission() + ", " + "\n\n");

            int dfsSafeCheckRetry = 30;
            boolean hasStarted = false;
            for (int i = dfsSafeCheckRetry; i > 0; i--) {
                if (!miniDFS.getFileSystem().isInSafeMode()) {
                    hasStarted = true;
                    LOGGER.info("HDFS safemode check num times : " + (31 - i));
                    break;
                }
            }
            if (!hasStarted) {
                throw new RuntimeException("HDFS hasnt exited safe mode yet..");
            }

            return null;
        }
    });
}

From source file:org.apache.sentry.tests.e2e.hive.fs.MiniDFS.java

License:Apache License

private void createMiniDFSCluster(File baseDir, String serverType, boolean enableHDFSAcls) throws Exception {
    Configuration conf = new Configuration();
    if (HiveServer2Type.InternalMetastore.name().equalsIgnoreCase(serverType)) {
        // set the test group mapping that maps user to a group of same name
        conf.set("hadoop.security.group.mapping",
                "org.apache.sentry.tests.e2e.hive.fs.MiniDFS$PseudoGroupMappingService");
        // set umask for metastore test client can create tables in the warehouse dir
        conf.set("fs.permissions.umask-mode", "000");
        Groups.getUserToGroupsMappingServiceWithLoadedConfiguration(conf);
    }/*from  w w  w  .java 2 s .c o m*/
    File dfsDir = assertCreateDir(new File(baseDir, "dfs"));
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dfsDir.getPath());
    conf.set("hadoop.security.group.mapping", MiniDFS.PseudoGroupMappingService.class.getName());
    if (enableHDFSAcls) {
        conf.set("dfs.namenode.acls.enabled", "true");
    }
    Configuration.addDefaultResource("test.xml");
    dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    fileSystem = dfsCluster.getFileSystem();
    String policyDir = System.getProperty("sentry.e2etest.hive.policy.location", "/user/hive/sentry");
    sentryDir = super.assertCreateDfsDir(new Path(fileSystem.getUri() + policyDir));
    dfsBaseDir = assertCreateDfsDir(new Path(new Path(fileSystem.getUri()), "/base"));
}

From source file:org.apache.storm.hdfs.bolt.AvroGenericRecordBoltTest.java

License:Apache License

@Before
public void setup() throws Exception {
    MockitoAnnotations.initMocks(this);
    Configuration conf = new Configuration();
    conf.set("fs.trash.interval", "10");
    conf.setBoolean("dfs.permissions", true);
    File baseDir = new File("./target/hdfs/").getAbsoluteFile();
    FileUtil.fullyDelete(baseDir);/*from w  w  w.  ja v  a2s  . c o m*/
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());

    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
    hdfsCluster = builder.build();
    fs = hdfsCluster.getFileSystem();
    hdfsURI = fs.getUri() + "/";
}