Example usage for org.apache.hadoop.hdfs.server.namenode EditLogFileOutputStream setShouldSkipFsyncForTesting

List of usage examples for org.apache.hadoop.hdfs.server.namenode EditLogFileOutputStream setShouldSkipFsyncForTesting

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs.server.namenode EditLogFileOutputStream setShouldSkipFsyncForTesting.

Prototype

@VisibleForTesting
public static void setShouldSkipFsyncForTesting(boolean skip) 

Source Link

Document

For the purposes of unit tests, we don't need to actually write durably to disk.

Usage

From source file:com.streamsets.pipeline.stage.origin.hdfs.cluster.TestClusterHDFSSource.java

License:Apache License

@BeforeClass
public static void setUpBeforeClass() throws Exception {
    File minidfsDir = new File("target/minidfs-" + UUID.randomUUID()).getAbsoluteFile();
    minidfsDir.mkdirs();/*from ww w.  j a  v a2 s.  co  m*/
    Assert.assertTrue(minidfsDir.exists());
    System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, minidfsDir.getPath());
    Configuration conf = new HdfsConfiguration();
    conf.set("dfs.namenode.fs-limits.min-block-size", String.valueOf(32));
    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
    miniDFS = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    dir = new Path(miniDFS.getURI() + "/dir");
    FileSystem fs = miniDFS.getFileSystem();
    fs.mkdirs(dir);
    writeFile(fs, new Path(dir + "/forAllTests/" + "path"), 1000);
    dummyEtc = new File(minidfsDir, "dummy-etc");
    dummyEtc.mkdirs();
    Assert.assertTrue(dummyEtc.exists());
    Configuration dummyConf = new Configuration(false);
    for (String file : new String[] { "core", "hdfs", "mapred", "yarn" }) {
        File siteXml = new File(dummyEtc, file + "-site.xml");
        FileOutputStream out = new FileOutputStream(siteXml);
        dummyConf.writeXml(out);
        out.close();
    }
    resourcesDir = minidfsDir.getAbsolutePath();
    hadoopConfDir = dummyEtc.getName();
    System.setProperty("sdc.resources.dir", resourcesDir);
    ;
}

From source file:org.apache.sentry.hdfs.TestSentryAuthorizationProvider.java

License:Apache License

@Before
public void setUp() throws Exception {
    admin = UserGroupInformation.createUserForTesting(System.getProperty("user.name"),
            new String[] { "supergroup" });
    admin.doAs(new PrivilegedExceptionAction<Void>() {
        @Override// ww w  .  j  a v a 2 s  .c o  m
        public Void run() throws Exception {
            System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, "target/test/data");
            Configuration conf = new HdfsConfiguration();
            conf.setBoolean("sentry.authorization-provider.include-hdfs-authz-as-acl", true);
            conf.set(DFSConfigKeys.DFS_NAMENODE_AUTHORIZATION_PROVIDER_KEY,
                    MockSentryAuthorizationProvider.class.getName());
            conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
            EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
            miniDFS = new MiniDFSCluster.Builder(conf).build();
            return null;
        }
    });
}

From source file:org.apache.sentry.hdfs.TestSentryINodeAttributesProvider.java

License:Apache License

@Before
public void setUp() throws Exception {
    admin = UserGroupInformation.createUserForTesting(System.getProperty("user.name"),
            new String[] { "supergroup" });
    admin.doAs(new PrivilegedExceptionAction<Void>() {
        @Override/* w w w .j  a  va  2 s.c o  m*/
        public Void run() throws Exception {
            System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, "target/test/data");
            Configuration conf = new HdfsConfiguration();
            conf.setBoolean("sentry.authorization-provider.include-hdfs-authz-as-acl", true);
            conf.set(DFSConfigKeys.DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY,
                    MockSentryINodeAttributesProvider.class.getName());
            conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
            EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
            miniDFS = new MiniDFSCluster.Builder(conf).build();
            return null;
        }
    });
}

From source file:org.apache.sentry.tests.e2e.hdfs.TestHDFSIntegration.java

License:Apache License

private static void startDFSandYARN() throws IOException, InterruptedException {
    adminUgi.doAs(new PrivilegedExceptionAction<Void>() {
        @Override//from  w ww . j a  v  a2  s. c o m
        public Void run() throws Exception {
            System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, "target/test/data");
            hadoopConf = new HdfsConfiguration();
            hadoopConf.set(DFSConfigKeys.DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY,
                    SentryINodeAttributesProvider.class.getName());
            hadoopConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
            hadoopConf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
            File dfsDir = assertCreateDir(new File(baseDir, "dfs"));
            hadoopConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dfsDir.getPath());
            hadoopConf.set("hadoop.security.group.mapping", MiniDFS.PseudoGroupMappingService.class.getName());
            Configuration.addDefaultResource("test.xml");

            hadoopConf.set("sentry.authorization-provider.hdfs-path-prefixes", MANAGED_PREFIXES);
            hadoopConf.set("sentry.authorization-provider.cache-refresh-retry-wait.ms", "5000");
            hadoopConf.set("sentry.authorization-provider.cache-refresh-interval.ms",
                    String.valueOf(CACHE_REFRESH));

            hadoopConf.set("sentry.authorization-provider.cache-stale-threshold.ms",
                    String.valueOf(STALE_THRESHOLD));

            hadoopConf.set("sentry.hdfs.service.security.mode", "none");
            hadoopConf.set("sentry.hdfs.service.client.server.rpc-addresses", "localhost");
            hadoopConf.set("sentry.hdfs.service.client.server.rpc-port", String.valueOf(sentryPort));
            EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
            miniDFS = new MiniDFSCluster.Builder(hadoopConf).build();
            Path tmpPath = new Path("/tmp");
            Path hivePath = new Path("/user/hive");
            Path warehousePath = new Path(hivePath, "warehouse");
            miniDFS.getFileSystem().mkdirs(warehousePath);
            boolean directory = miniDFS.getFileSystem().isDirectory(warehousePath);
            LOGGER.info("\n\n Is dir :" + directory + "\n\n");
            LOGGER.info("\n\n DefaultFS :" + miniDFS.getFileSystem().getUri() + "\n\n");
            fsURI = miniDFS.getFileSystem().getUri().toString();
            hadoopConf.set("fs.defaultFS", fsURI);

            // Create Yarn cluster
            // miniMR = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);

            miniDFS.getFileSystem().mkdirs(tmpPath);
            miniDFS.getFileSystem().setPermission(tmpPath, FsPermission.valueOf("drwxrwxrwx"));
            miniDFS.getFileSystem().setOwner(hivePath, "hive", "hive");
            miniDFS.getFileSystem().setOwner(warehousePath, "hive", "hive");
            LOGGER.info("\n\n Owner :" + miniDFS.getFileSystem().getFileStatus(warehousePath).getOwner() + ", "
                    + miniDFS.getFileSystem().getFileStatus(warehousePath).getGroup() + "\n\n");
            LOGGER.info("\n\n Owner tmp :" + miniDFS.getFileSystem().getFileStatus(tmpPath).getOwner() + ", "
                    + miniDFS.getFileSystem().getFileStatus(tmpPath).getGroup() + ", "
                    + miniDFS.getFileSystem().getFileStatus(tmpPath).getPermission() + ", " + "\n\n");

            int dfsSafeCheckRetry = 30;
            boolean hasStarted = false;
            for (int i = dfsSafeCheckRetry; i > 0; i--) {
                if (!miniDFS.getFileSystem().isInSafeMode()) {
                    hasStarted = true;
                    LOGGER.info("HDFS safemode check num times : " + (31 - i));
                    break;
                }
            }
            if (!hasStarted) {
                throw new RuntimeException("HDFS hasnt exited safe mode yet..");
            }

            return null;
        }
    });
}

From source file:org.apache.tez.analyzer.TestAnalyzer.java

License:Apache License

@BeforeClass
public static void setupClass() throws Exception {
    conf = new Configuration();
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH, false);
    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR);
    dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
    fs = dfsCluster.getFileSystem();//from  www.j a v a  2s  .  c om
    conf.set("fs.defaultFS", fs.getUri().toString());

    setupTezCluster();
}

From source file:org.apache.tez.history.TestATSFileParser.java

License:Apache License

@BeforeClass
public static void setupCluster() throws Exception {
    conf = new Configuration();
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH, false);
    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR);
    miniDFSCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
    fs = miniDFSCluster.getFileSystem();
    conf.set("fs.defaultFS", fs.getUri().toString());

    setupTezCluster();//from   w  w  w .  j a v  a2 s . c  om
}

From source file:org.apache.tez.test.TestPipelinedShuffle.java

License:Apache License

@BeforeClass
public static void setupDFSCluster() throws Exception {
    conf = new Configuration();
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH, false);
    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR);
    miniDFSCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
    fs = miniDFSCluster.getFileSystem();
    conf.set("fs.defaultFS", fs.getUri().toString());
    conf.setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_OPTIMIZE_LOCAL_FETCH, false);
}

From source file:org.apache.tez.test.TestSecureShuffle.java

License:Apache License

@BeforeClass
public static void setupDFSCluster() throws Exception {
    conf = new Configuration();
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH, false);
    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR);
    miniDFSCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
    fs = miniDFSCluster.getFileSystem();
    conf.set("fs.defaultFS", fs.getUri().toString());
}