List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster getConfiguration
public Configuration getConfiguration(int nnIndex)
From source file:org.apache.accumulo.core.conf.CredentialProviderFactoryShimTest.java
License:Apache License
@Test public void extractFromHdfs() throws Exception { File target = new File(System.getProperty("user.dir"), "target"); String prevValue = System.setProperty("test.build.data", new File(target, this.getClass().getName() + "_minidfs").toString()); MiniDFSCluster dfsCluster = new MiniDFSCluster.Builder(new Configuration()).build(); try {/*from ww w. ja v a 2 s .com*/ if (null != prevValue) { System.setProperty("test.build.data", prevValue); } else { System.clearProperty("test.build.data"); } // One namenode, One configuration Configuration dfsConfiguration = dfsCluster.getConfiguration(0); Path destPath = new Path("/accumulo.jceks"); FileSystem dfs = dfsCluster.getFileSystem(); // Put the populated keystore in hdfs dfs.copyFromLocalFile(new Path(populatedKeyStore.toURI()), destPath); Configuration cpConf = CredentialProviderFactoryShim.getConfiguration(dfsConfiguration, "jceks://hdfs/accumulo.jceks"); // The values in the keystore Map<String, String> expectations = new HashMap<>(); expectations.put("key1", "value1"); expectations.put("key2", "value2"); checkCredentialProviders(cpConf, expectations); } finally { dfsCluster.shutdown(); } }
From source file:org.apache.hive.service.server.TestHS2ClearDanglingScratchDir.java
License:Apache License
@Test public void testScratchDirCleared() throws Exception { MiniDFSCluster m_dfs = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(1).format(true).build(); HiveConf conf = new HiveConf(); conf.addResource(m_dfs.getConfiguration(0)); if (Shell.WINDOWS) { WindowsPathUtil.convertPathsFromWindowsToHdfs(conf); }// w ww . ja v a 2s. co m conf.set(HiveConf.ConfVars.HIVE_SCRATCH_DIR_LOCK.toString(), "true"); conf.set(HiveConf.ConfVars.HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR.toString(), "true"); Path scratchDir = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR)); m_dfs.getFileSystem().mkdirs(scratchDir); m_dfs.getFileSystem().setPermission(scratchDir, new FsPermission("777")); // Fake two live session SessionState.start(conf); conf.setVar(HiveConf.ConfVars.HIVESESSIONID, UUID.randomUUID().toString()); SessionState.start(conf); // Fake dead session Path fakeSessionPath = new Path(new Path(scratchDir, Utils.getUGI().getShortUserName()), UUID.randomUUID().toString()); m_dfs.getFileSystem().mkdirs(fakeSessionPath); m_dfs.getFileSystem().create(new Path(fakeSessionPath, "inuse.lck")).close(); FileStatus[] scratchDirs = m_dfs.getFileSystem() .listStatus(new Path(scratchDir, Utils.getUGI().getShortUserName())); Assert.assertEquals(scratchDirs.length, 3); HiveServer2.scheduleClearDanglingScratchDir(conf, 0); // Check dead session get cleared long start = System.currentTimeMillis(); long end; do { Thread.sleep(200); end = System.currentTimeMillis(); if (end - start > 5000) { Assert.fail("timeout, scratch dir has not been cleared"); } scratchDirs = m_dfs.getFileSystem().listStatus(new Path(scratchDir, Utils.getUGI().getShortUserName())); } while (scratchDirs.length != 2); }
From source file:org.trustedanalytics.auth.gateway.hdfs.integration.config.LocalConfiguration.java
License:Apache License
@Bean @Qualifier(Qualifiers.CONFIGURATION)// w w w . java2 s. c om public org.apache.hadoop.conf.Configuration initializeHdfsCluster() throws IOException, InterruptedException, URISyntaxException { File baseDir = new File("./target/hdfs/" + "testName").getAbsoluteFile(); FileUtil.fullyDelete(baseDir); org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration(false); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath()); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true); MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf); MiniDFSCluster cluster = builder.build(); UserGroupInformation.createUserForTesting("cf", new String[] { "cf" }); UserGroupInformation.createUserForTesting("super", new String[] { "supergroup" }); return cluster.getConfiguration(0); }