List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster shutdown
public void shutdown()
From source file:com.mellanox.r4h.TestHFlush.java
License:Apache License
/** * Test hsync (with updating block length in NameNode) while no data is * actually written yet/* w w w .j a v a2s .c o m*/ */ @Test public void hSyncUpdateLength_00() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); DistributedFileSystem fileSystem = (DistributedFileSystem) cluster.getFileSystem(); try { Path path = new Path(fName); FSDataOutputStream stm = fileSystem.create(path, true, 4096, (short) 2, MiniDFSClusterBridge.getAppendTestUtil_BLOCK_SIZE()); System.out.println("Created file " + path.toString()); ((DFSOutputStream) stm.getWrappedStream()).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); long currentFileLength = fileSystem.getFileStatus(path).getLen(); assertEquals(0L, currentFileLength); stm.close(); } finally { fileSystem.close(); cluster.shutdown(); } }
From source file:com.mellanox.r4h.TestHFlush.java
License:Apache License
/** * The method starts new cluster with defined Configuration; creates a file * with specified block_size and writes 10 equal sections in it; it also calls * hflush/hsync after each write and throws an IOException in case of an error. * //from w w w .j av a 2 s . c o m * @param conf cluster configuration * @param fileName of the file to be created and processed as required * @param block_size value to be used for the file's creation * @param replicas is the number of replicas * @param isSync hsync or hflush * @param syncFlags specify the semantic of the sync/flush * @throws IOException in case of any errors */ public static void doTheJob(Configuration conf, final String fileName, long block_size, short replicas, boolean isSync, EnumSet<SyncFlag> syncFlags) throws IOException { byte[] fileContent; final int SECTIONS = 10; fileContent = AppendTestUtil.initBuffer(MiniDFSClusterBridge.getAppendTestUtils_FILE_SIZE()); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(replicas).build(); // Make sure we work with DFS in order to utilize all its functionality DistributedFileSystem fileSystem = (DistributedFileSystem) cluster.getFileSystem(); FSDataInputStream is; try { Path path = new Path(fileName); FSDataOutputStream stm = fileSystem.create(path, false, 4096, replicas, block_size); System.out.println("Created file " + fileName); int tenth = MiniDFSClusterBridge.getAppendTestUtils_FILE_SIZE() / SECTIONS; int rounding = MiniDFSClusterBridge.getAppendTestUtils_FILE_SIZE() - tenth * SECTIONS; for (int i = 0; i < SECTIONS; i++) { System.out.println( "Writing " + (tenth * i) + " to " + (tenth * (i + 1)) + " section to file " + fileName); // write to the file stm.write(fileContent, tenth * i, tenth); // Wait while hflush/hsync pushes all packets through built pipeline if (isSync) { ((DFSOutputStream) stm.getWrappedStream()).hsync(syncFlags); } else { ((DFSOutputStream) stm.getWrappedStream()).hflush(); } // Check file length if updatelength is required if (isSync && syncFlags.contains(SyncFlag.UPDATE_LENGTH)) { long currentFileLength = fileSystem.getFileStatus(path).getLen(); assertEquals("File size doesn't match for hsync/hflush with updating the length", tenth * (i + 1), currentFileLength); } byte[] toRead = new byte[tenth]; byte[] expected = new byte[tenth]; System.arraycopy(fileContent, tenth * i, expected, 0, tenth); // Open the same file for read. Need to create new reader after every write operation(!) is = fileSystem.open(path); is.seek(tenth * i); int readBytes = is.read(toRead, 0, tenth); System.out.println("Has read " + readBytes); assertTrue("Should've get more bytes", (readBytes > 0) && (readBytes <= tenth)); is.close(); checkData(toRead, 0, readBytes, expected, "Partial verification"); } System.out.println("Writing " + (tenth * SECTIONS) + " to " + (tenth * SECTIONS + rounding) + " section to file " + fileName); stm.write(fileContent, tenth * SECTIONS, rounding); stm.close(); assertEquals("File size doesn't match ", MiniDFSClusterBridge.getAppendTestUtils_FILE_SIZE(), fileSystem.getFileStatus(path).getLen()); AppendTestUtil.checkFullFile(fileSystem, path, fileContent.length, fileContent, "hflush()"); } finally { fileSystem.close(); cluster.shutdown(); } }
From source file:com.mellanox.r4h.TestHFlush.java
License:Apache License
/** This creates a slow writer and check to see * if pipeline heartbeats work fine//from ww w . j a v a 2 s . c om */ @Test public void testPipelineHeartbeat() throws Exception { final int DATANODE_NUM = 2; final int fileLen = 6; Configuration conf = new HdfsConfiguration(); final int timeout = 2000; conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout); final Path p = new Path("/pipelineHeartbeat/foo"); System.out.println("p=" + p); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build(); try { DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem(); byte[] fileContents = AppendTestUtil.initBuffer(fileLen); // create a new file. FSDataOutputStream stm = AppendTestUtil.createFile(fs, p, DATANODE_NUM); stm.write(fileContents, 0, 1); Thread.sleep(timeout); stm.hflush(); System.out.println("Wrote 1 byte and hflush " + p); // write another byte Thread.sleep(timeout); stm.write(fileContents, 1, 1); stm.hflush(); stm.write(fileContents, 2, 1); Thread.sleep(timeout); stm.hflush(); stm.write(fileContents, 3, 1); Thread.sleep(timeout); stm.write(fileContents, 4, 1); stm.hflush(); stm.write(fileContents, 5, 1); Thread.sleep(timeout); stm.close(); // verify that entire file is good AppendTestUtil.checkFullFile(fs, p, fileLen, fileContents, "Failed to slowly write to a file"); } finally { cluster.shutdown(); } }
From source file:com.mellanox.r4h.TestHFlush.java
License:Apache License
@Test public void testHFlushInterrupted() throws Exception { final int DATANODE_NUM = 2; final int fileLen = 6; byte[] fileContents = AppendTestUtil.initBuffer(fileLen); Configuration conf = new HdfsConfiguration(); final Path p = new Path("/hflush-interrupted"); System.out.println("p=" + p); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build(); try {//from w w w . j a va 2 s . c o m DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem(); // create a new file. FSDataOutputStream stm = AppendTestUtil.createFile(fs, p, DATANODE_NUM); stm.write(fileContents, 0, 2); Thread.currentThread().interrupt(); try { stm.hflush(); // If we made it past the hflush(), then that means that the ack made it back // from the pipeline before we got to the wait() call. In that case we should // still have interrupted status. assertTrue(Thread.currentThread().interrupted()); } catch (InterruptedIOException ie) { System.out.println("Got expected exception during flush"); } assertFalse(Thread.currentThread().interrupted()); // Try again to flush should succeed since we no longer have interrupt status stm.hflush(); // Write some more data and flush stm.write(fileContents, 2, 2); stm.hflush(); // Write some data and close while interrupted stm.write(fileContents, 4, 2); Thread.currentThread().interrupt(); try { stm.close(); // If we made it past the close(), then that means that the ack made it back // from the pipeline before we got to the wait() call. In that case we should // still have interrupted status. assertTrue(Thread.currentThread().interrupted()); } catch (InterruptedIOException ioe) { System.out.println("Got expected exception during close"); // If we got the exception, we shouldn't have interrupted status anymore. assertFalse(Thread.currentThread().interrupted()); // Now do a successful close. stm.close(); } // verify that entire file is good AppendTestUtil.checkFullFile(fs, p, fileLen, fileContents, "Failed to deal with thread interruptions"); } finally { cluster.shutdown(); } }
From source file:com.mellanox.r4h.TestReadWhileWriting.java
License:Apache License
/** Test reading while writing. */ @Test//from w ww . j av a 2 s. c o m public void pipeline_02_03() throws Exception { final Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); // create cluster final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build(); try { //change the lease limits. cluster.setLeasePeriod(SOFT_LEASE_LIMIT, HARD_LEASE_LIMIT); //wait for the cluster cluster.waitActive(); final FileSystem fs = cluster.getFileSystem(); final Path p = new Path(DIR, "file1"); final int half = BLOCK_SIZE / 2; //a. On Machine M1, Create file. Write half block of data. // Invoke DFSOutputStream.hflush() on the dfs file handle. // Do not close file yet. { final FSDataOutputStream out = fs.create(p, true, fs.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), (short) 3, BLOCK_SIZE); write(out, 0, half); //hflush ((DFSOutputStream) out.getWrappedStream()).hflush(); } //b. On another machine M2, open file and verify that the half-block // of data can be read successfully. checkFile(p, half, conf); MiniDFSClusterBridge.getAppendTestUtilLOG().info("leasechecker.interruptAndJoin()"); ((DistributedFileSystem) fs).dfs.getLeaseRenewer().interruptAndJoin(); //c. On M1, append another half block of data. Close file on M1. { //sleep to let the lease is expired. Thread.sleep(2 * SOFT_LEASE_LIMIT); final UserGroupInformation current = UserGroupInformation.getCurrentUser(); final UserGroupInformation ugi = UserGroupInformation .createUserForTesting(current.getShortUserName() + "x", new String[] { "supergroup" }); final DistributedFileSystem dfs = ugi.doAs(new PrivilegedExceptionAction<DistributedFileSystem>() { @Override public DistributedFileSystem run() throws Exception { return (DistributedFileSystem) FileSystem.newInstance(conf); } }); final FSDataOutputStream out = append(dfs, p); write(out, 0, half); out.close(); } //d. On M2, open file and read 1 block of data from it. Close file. checkFile(p, 2 * half, conf); } finally { cluster.shutdown(); } }
From source file:io.hops.transaction.lock.TestInodeLock.java
License:Apache License
@Test public void testInodeLockWithWrongPath() throws IOException { final Configuration conf = new Configuration(); MiniDFSCluster cluster = null; try {/* w w w.j a v a2 s . c o m*/ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); final MiniDFSCluster clusterFinal = cluster; final DistributedFileSystem hdfs = cluster.getFileSystem(); hdfs.mkdirs(new Path("/tmp")); DFSTestUtil.createFile(hdfs, new Path("/tmp/f1"), 0, (short) 1, 0); new HopsTransactionalRequestHandler(HDFSOperationType.TEST) { @Override public void acquireLock(TransactionLocks locks) throws IOException { LockFactory lf = LockFactory.getInstance(); INodeLock il = lf.getINodeLock(TransactionLockTypes.INodeLockType.READ_COMMITTED, TransactionLockTypes.INodeResolveType.PATH, new String[] { "/tmp/f1", "/tmp/f2" }) .setNameNodeID(clusterFinal.getNameNode().getId()) .setActiveNameNodes(clusterFinal.getNameNode().getActiveNameNodes().getActiveNodes()) .skipReadingQuotaAttr(true); locks.add(il); } @Override public Object performTask() throws IOException { return null; } }.handle(); } finally { if (cluster != null) { cluster.shutdown(); } } }
From source file:org.apache.accumulo.core.conf.CredentialProviderFactoryShimTest.java
License:Apache License
@Test public void extractFromHdfs() throws Exception { File target = new File(System.getProperty("user.dir"), "target"); String prevValue = System.setProperty("test.build.data", new File(target, this.getClass().getName() + "_minidfs").toString()); MiniDFSCluster dfsCluster = new MiniDFSCluster.Builder(new Configuration()).build(); try {/*from w w w .j av a2s . c o m*/ if (null != prevValue) { System.setProperty("test.build.data", prevValue); } else { System.clearProperty("test.build.data"); } // One namenode, One configuration Configuration dfsConfiguration = dfsCluster.getConfiguration(0); Path destPath = new Path("/accumulo.jceks"); FileSystem dfs = dfsCluster.getFileSystem(); // Put the populated keystore in hdfs dfs.copyFromLocalFile(new Path(populatedKeyStore.toURI()), destPath); Configuration cpConf = CredentialProviderFactoryShim.getConfiguration(dfsConfiguration, "jceks://hdfs/accumulo.jceks"); // The values in the keystore Map<String, String> expectations = new HashMap<>(); expectations.put("key1", "value1"); expectations.put("key2", "value2"); checkCredentialProviders(cpConf, expectations); } finally { dfsCluster.shutdown(); } }
From source file:org.apache.ambari.servicemonitor.utils.DFSUtils.java
License:Apache License
public static MiniDFSCluster destroyCluster(MiniDFSCluster dfsCluster) { if (dfsCluster != null) { try {/* w w w . java 2 s . c om*/ dfsCluster.shutdown(); } catch (Exception e) { LOG.warn("Exception when destroying cluster: " + e, e); } } return null; }
From source file:org.apache.blur.HdfsMiniClusterUtil.java
License:Apache License
public static void shutdownDfs(MiniDFSCluster cluster) { if (cluster != null) { LOG.info("Shutting down Mini DFS "); try {//from w ww. j a v a 2 s. c o m cluster.shutdown(); } catch (Exception e) { // / Can get a java.lang.reflect.UndeclaredThrowableException thrown // here because of an InterruptedException. Don't let exceptions in // here be cause of test failure. } try { FileSystem fs = cluster.getFileSystem(); if (fs != null) { LOG.info("Shutting down FileSystem"); fs.close(); } FileSystem.closeAll(); } catch (IOException e) { LOG.error("error closing file system", e); } // This has got to be one of the worst hacks I have ever had to do. // This is needed to shutdown 2 thread pools that are not shutdown by // themselves. ThreadGroup threadGroup = Thread.currentThread().getThreadGroup(); Thread[] threads = new Thread[100]; int enumerate = threadGroup.enumerate(threads); for (int i = 0; i < enumerate; i++) { Thread thread = threads[i]; if (thread.getName().startsWith("pool")) { if (thread.isAlive()) { thread.interrupt(); LOG.info("Stopping ThreadPoolExecutor [" + thread.getName() + "]"); Object target = getField(Thread.class, thread, "target"); if (target != null) { ThreadPoolExecutor e = (ThreadPoolExecutor) getField(ThreadPoolExecutor.class, target, "this$0"); if (e != null) { e.shutdownNow(); } } try { LOG.info("Waiting for thread pool to exit [" + thread.getName() + "]"); thread.join(); } catch (InterruptedException e) { throw new RuntimeException(e); } } } } } }
From source file:org.apache.falcon.cluster.util.MiniHdfsClusterUtil.java
License:Apache License
public static void cleanupDfs(MiniDFSCluster miniDFSCluster, File baseDir) throws Exception { miniDFSCluster.shutdown(); FileUtil.fullyDelete(baseDir);/*from ww w . jav a2s . co m*/ }