Example usage for org.apache.hadoop.hdfs MiniDFSCluster PROP_TEST_BUILD_DATA

List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster PROP_TEST_BUILD_DATA

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs MiniDFSCluster PROP_TEST_BUILD_DATA.

Prototype

String PROP_TEST_BUILD_DATA

To view the source code for org.apache.hadoop.hdfs MiniDFSCluster PROP_TEST_BUILD_DATA.

Click Source Link

Document

System property to set the data dir:

Usage

From source file:com.streamsets.pipeline.stage.destination.hdfs.TestBaseHdfsTarget.java

License:Apache License

@BeforeClass
public static void setUpClass() throws Exception {
    //setting some dummy kerberos settings to be able to test a mis-setting
    System.setProperty("java.security.krb5.realm", "foo");
    System.setProperty("java.security.krb5.kdc", "localhost:0");

    File minidfsDir = new File("target/minidfs").getAbsoluteFile();
    if (!minidfsDir.exists()) {
        Assert.assertTrue(minidfsDir.mkdirs());
    }//www.  ja va  2 s.  c om
    Set<PosixFilePermission> set = new HashSet<PosixFilePermission>();
    set.add(PosixFilePermission.OWNER_EXECUTE);
    set.add(PosixFilePermission.OWNER_READ);
    set.add(PosixFilePermission.OWNER_WRITE);
    set.add(PosixFilePermission.OTHERS_READ);
    java.nio.file.Files.setPosixFilePermissions(minidfsDir.toPath(), set);
    System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
    Configuration conf = new HdfsConfiguration();
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".hosts", "*");
    conf.set("hadoop.proxyuser." + System.getProperty("user.name") + ".groups", "*");
    fooUgi = UserGroupInformation.createUserForTesting("foo", new String[] { "all" });
    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
    miniDFS = new MiniDFSCluster.Builder(conf).build();
    miniDFS.getFileSystem().setPermission(new Path("/"), FsPermission.createImmutable((short) 0777));
}

From source file:com.streamsets.pipeline.stage.origin.hdfs.cluster.ClusterHDFSSourceIT.java

License:Apache License

@BeforeClass
public static void setUpBeforeClass() throws Exception {
    minidfsDir = new File("target/minidfs-" + UUID.randomUUID()).getAbsoluteFile();
    minidfsDir.mkdirs();//from  ww  w. j  ava 2  s  .  c  o m
    Assert.assertTrue(minidfsDir.exists());
    System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
    Configuration conf = new HdfsConfiguration();
    conf.set("dfs.namenode.fs-limits.min-block-size", String.valueOf(32));
    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
    miniDFS = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    dir = new Path(miniDFS.getURI() + "/dir");
    FileSystem fs = miniDFS.getFileSystem();
    fs.mkdirs(dir);
    writeFile(fs, new Path(dir + "/forAllTests/" + "path"), 1000);
    dummyEtc = new File(minidfsDir, "dummy-etc");
    dummyEtc.mkdirs();
    Assert.assertTrue(dummyEtc.exists());
    Configuration dummyConf = new Configuration(false);
    for (String file : new String[] { "core", "hdfs", "mapred", "yarn" }) {
        File siteXml = new File(dummyEtc, file + "-site.xml");
        FileOutputStream out = new FileOutputStream(siteXml);
        dummyConf.writeXml(out);
        out.close();
    }
    resourcesDir = minidfsDir.getAbsolutePath();
    hadoopConfDir = dummyEtc.getName();
    System.setProperty("sdc.resources.dir", resourcesDir);
    ;
}

From source file:com.streamsets.pipeline.stage.origin.hdfs.cluster.TestClusterHDFSSource.java

License:Apache License

@BeforeClass
public static void setUpBeforeClass() throws Exception {
    File minidfsDir = new File("target/minidfs-" + UUID.randomUUID()).getAbsoluteFile();
    minidfsDir.mkdirs();/*from w  w w . ja  v a  2 s  . com*/
    Assert.assertTrue(minidfsDir.exists());
    System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
    Configuration conf = new HdfsConfiguration();
    conf.set("dfs.namenode.fs-limits.min-block-size", String.valueOf(32));
    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
    miniDFS = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    dir = new Path(miniDFS.getURI() + "/dir");
    FileSystem fs = miniDFS.getFileSystem();
    fs.mkdirs(dir);
    writeFile(fs, new Path(dir + "/forAllTests/" + "path"), 1000);
    dummyEtc = new File(minidfsDir, "dummy-etc");
    dummyEtc.mkdirs();
    Assert.assertTrue(dummyEtc.exists());
    Configuration dummyConf = new Configuration(false);
    for (String file : new String[] { "core", "hdfs", "mapred", "yarn" }) {
        File siteXml = new File(dummyEtc, file + "-site.xml");
        FileOutputStream out = new FileOutputStream(siteXml);
        dummyConf.writeXml(out);
        out.close();
    }
    resourcesDir = minidfsDir.getAbsolutePath();
    hadoopConfDir = dummyEtc.getName();
    System.setProperty("sdc.resources.dir", resourcesDir);
    ;
}

From source file:de.zib.sfs.StatisticsFileSystemContractTest.java

License:BSD License

protected StatisticsFileSystemContractTest(String fileSystemUri) {
    this.fileSystemPath = new Path(fileSystemUri);

    Configuration.addDefaultResource("hadoop/core-site.xml");
    this.conf = new HdfsConfiguration();
    this.conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, FileSystemContractBaseTest.TEST_UMASK);

    // most test use tiny block sizes, so disable minimum block size
    this.conf.set(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, "0");

    // set NameNode and DataNode directories
    System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, "target/test/data");
}

From source file:io.amient.kafka.hadoop.testutils.SystemTestBase.java

License:Apache License

@Before
public void setUp() throws IOException, InterruptedException {
    dfsBaseDir = new File(TimestampExtractorSystemTest.class.getResource("/systemtest").getPath());

    //setup hadoop node
    embeddedClusterPath = new File(dfsBaseDir, "local-cluster");
    System.clearProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA);
    conf = new Configuration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, embeddedClusterPath.getAbsolutePath());
    cluster = new MiniDFSCluster.Builder(conf).build();
    fs = FileSystem.get(conf);//from   w ww.  java2s . c o m
    localFileSystem = FileSystem.getLocal(conf);

    //setup zookeeper
    embeddedZkPath = new File(dfsBaseDir, "local-zookeeper");
    // smaller testDir footprint, default zookeeper file blocks are 65535Kb
    System.getProperties().setProperty("zookeeper.preAllocSize", "64");
    zookeeper = new ZooKeeperServer(new File(embeddedZkPath, "snapshots"), new File(embeddedZkPath, "logs"),
            3000);
    zkFactory = new NIOServerCnxnFactory();
    zkFactory.configure(new InetSocketAddress(0), 10);
    zkConnect = "localhost:" + zkFactory.getLocalPort();
    System.out.println("starting local zookeeper at " + zkConnect);
    zkFactory.startup(zookeeper);

    //setup kafka
    System.out.println("starting local kafka broker...");

    embeddedKafkaPath = new File(dfsBaseDir, "local-kafka-logs");
    KafkaConfig kafkaConfig = new KafkaConfig(new Properties() {
        {
            put("broker.id", "1");
            put("host.name", "localhost");
            put("port", "0");
            put("log.dir", embeddedKafkaPath.toString());
            put("num.partitions", "2");
            put("auto.create.topics.enable", "true");
            put("zookeeper.connect", zkConnect);
        }
    });
    kafka = new KafkaServerStartable(kafkaConfig);
    kafka.startup();

    //dynamic kafka port allocation
    try (KafkaZkUtils tmpZkClient = new KafkaZkUtils(zkConnect, 30000, 6000)) {
        Broker broker = Broker.createBroker(1, tmpZkClient.getBrokerInfo(1));
        kafkaBootstrap = broker.getBrokerEndPoint(SecurityProtocol.PLAINTEXT).connectionString();
    }

    System.out.println("preparing simpleProducer..");
    simpleProducer = new Producer<>(new ProducerConfig(new Properties() {
        {
            put("metadata.broker.list", kafkaBootstrap);
            put("serializer.class", "kafka.serializer.StringEncoder");
            put("request.required.acks", "1");
        }
    }));

    System.out.println("system test setup complete");

}

From source file:org.apache.metron.integration.components.MRComponent.java

License:Apache License

@Override
public void start() {
    configuration = new Configuration();
    System.clearProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA);
    configuration.set(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, "true");
    if (basePath == null) {
        throw new RuntimeException("Unable to start cluster: You must specify the basepath");
    }//from   w ww. j av a  2  s  .  c  o  m
    configuration.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, basePath.toString());
    try {
        cluster = new MiniDFSCluster.Builder(configuration).build();
    } catch (IOException e) {
        throw new RuntimeException("Unable to start cluster", e);
    }
}

From source file:org.apache.sentry.hdfs.TestSentryAuthorizationProvider.java

License:Apache License

@Before
public void setUp() throws Exception {
    admin = UserGroupInformation.createUserForTesting(System.getProperty("user.name"),
            new String[] { "supergroup" });
    admin.doAs(new PrivilegedExceptionAction<Void>() {
        @Override//  w w  w  . ja v  a  2s.  c om
        public Void run() throws Exception {
            System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, "target/test/data");
            Configuration conf = new HdfsConfiguration();
            conf.setBoolean("sentry.authorization-provider.include-hdfs-authz-as-acl", true);
            conf.set(DFSConfigKeys.DFS_NAMENODE_AUTHORIZATION_PROVIDER_KEY,
                    MockSentryAuthorizationProvider.class.getName());
            conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
            EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
            miniDFS = new MiniDFSCluster.Builder(conf).build();
            return null;
        }
    });
}

From source file:org.apache.sentry.hdfs.TestSentryINodeAttributesProvider.java

License:Apache License

@Before
public void setUp() throws Exception {
    admin = UserGroupInformation.createUserForTesting(System.getProperty("user.name"),
            new String[] { "supergroup" });
    admin.doAs(new PrivilegedExceptionAction<Void>() {
        @Override//from   w w  w  .j av  a  2 s.c  om
        public Void run() throws Exception {
            System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, "target/test/data");
            Configuration conf = new HdfsConfiguration();
            conf.setBoolean("sentry.authorization-provider.include-hdfs-authz-as-acl", true);
            conf.set(DFSConfigKeys.DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY,
                    MockSentryINodeAttributesProvider.class.getName());
            conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
            EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
            miniDFS = new MiniDFSCluster.Builder(conf).build();
            return null;
        }
    });
}

From source file:org.apache.sentry.tests.e2e.hdfs.TestHDFSIntegration.java

License:Apache License

private static void startDFSandYARN() throws IOException, InterruptedException {
    adminUgi.doAs(new PrivilegedExceptionAction<Void>() {
        @Override//from w  w  w.  j  av a2 s . co m
        public Void run() throws Exception {
            System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, "target/test/data");
            hadoopConf = new HdfsConfiguration();
            hadoopConf.set(DFSConfigKeys.DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY,
                    SentryINodeAttributesProvider.class.getName());
            hadoopConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
            hadoopConf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
            File dfsDir = assertCreateDir(new File(baseDir, "dfs"));
            hadoopConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dfsDir.getPath());
            hadoopConf.set("hadoop.security.group.mapping", MiniDFS.PseudoGroupMappingService.class.getName());
            Configuration.addDefaultResource("test.xml");

            hadoopConf.set("sentry.authorization-provider.hdfs-path-prefixes", MANAGED_PREFIXES);
            hadoopConf.set("sentry.authorization-provider.cache-refresh-retry-wait.ms", "5000");
            hadoopConf.set("sentry.authorization-provider.cache-refresh-interval.ms",
                    String.valueOf(CACHE_REFRESH));

            hadoopConf.set("sentry.authorization-provider.cache-stale-threshold.ms",
                    String.valueOf(STALE_THRESHOLD));

            hadoopConf.set("sentry.hdfs.service.security.mode", "none");
            hadoopConf.set("sentry.hdfs.service.client.server.rpc-addresses", "localhost");
            hadoopConf.set("sentry.hdfs.service.client.server.rpc-port", String.valueOf(sentryPort));
            EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
            miniDFS = new MiniDFSCluster.Builder(hadoopConf).build();
            Path tmpPath = new Path("/tmp");
            Path hivePath = new Path("/user/hive");
            Path warehousePath = new Path(hivePath, "warehouse");
            miniDFS.getFileSystem().mkdirs(warehousePath);
            boolean directory = miniDFS.getFileSystem().isDirectory(warehousePath);
            LOGGER.info("\n\n Is dir :" + directory + "\n\n");
            LOGGER.info("\n\n DefaultFS :" + miniDFS.getFileSystem().getUri() + "\n\n");
            fsURI = miniDFS.getFileSystem().getUri().toString();
            hadoopConf.set("fs.defaultFS", fsURI);

            // Create Yarn cluster
            // miniMR = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);

            miniDFS.getFileSystem().mkdirs(tmpPath);
            miniDFS.getFileSystem().setPermission(tmpPath, FsPermission.valueOf("drwxrwxrwx"));
            miniDFS.getFileSystem().setOwner(hivePath, "hive", "hive");
            miniDFS.getFileSystem().setOwner(warehousePath, "hive", "hive");
            LOGGER.info("\n\n Owner :" + miniDFS.getFileSystem().getFileStatus(warehousePath).getOwner() + ", "
                    + miniDFS.getFileSystem().getFileStatus(warehousePath).getGroup() + "\n\n");
            LOGGER.info("\n\n Owner tmp :" + miniDFS.getFileSystem().getFileStatus(tmpPath).getOwner() + ", "
                    + miniDFS.getFileSystem().getFileStatus(tmpPath).getGroup() + ", "
                    + miniDFS.getFileSystem().getFileStatus(tmpPath).getPermission() + ", " + "\n\n");

            int dfsSafeCheckRetry = 30;
            boolean hasStarted = false;
            for (int i = dfsSafeCheckRetry; i > 0; i--) {
                if (!miniDFS.getFileSystem().isInSafeMode()) {
                    hasStarted = true;
                    LOGGER.info("HDFS safemode check num times : " + (31 - i));
                    break;
                }
            }
            if (!hasStarted) {
                throw new RuntimeException("HDFS hasnt exited safe mode yet..");
            }

            return null;
        }
    });
}

From source file:org.talend.components.test.MiniDfsResource.java

License:Open Source License

/**
 * @return The hadoop FileSystem pointing to the simulated cluster.
 *//*w  ww . ja  v a  2 s  .  c om*/
public FileSystem getFs() throws IOException {
    // Lazily create the MiniDFSCluster on first use.
    if (miniHdfs == null) {
        System.setProperty(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, newFolder("base").getAbsolutePath());
        System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, newFolder("build").getAbsolutePath());
        miniHdfs = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(1).format(true).racks(null)
                .build();
        miniHdfs.waitActive();
        fs = miniHdfs.getFileSystem();
    }
    return fs;
}