List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster stopDataNode
public synchronized DataNodeProperties stopDataNode(String dnName)
From source file:backup.integration.MiniClusterTestBase.java
License:Apache License
@Test public void testIntegrationBasic() throws Exception { File hdfsDir = setupHdfsLocalDir(); Configuration conf = setupConfig(hdfsDir); MiniDFSCluster hdfsCluster = new MiniDFSCluster.Builder(conf).build(); Thread thread = null;//from w w w .j av a 2 s.c o m try { DistributedFileSystem fileSystem = hdfsCluster.getFileSystem(); Path path = new Path("/testing.txt"); writeFile(fileSystem, path); Thread.sleep(TimeUnit.SECONDS.toMillis(5)); AtomicBoolean success = new AtomicBoolean(false); thread = new Thread(new Runnable() { @Override public void run() { boolean beginTest = true; while (true) { try { try (ByteArrayOutputStream output = new ByteArrayOutputStream()) { try (FSDataInputStream inputStream = fileSystem.open(path)) { IOUtils.copy(inputStream, output); } if (beginTest) { hdfsCluster.startDataNodes(conf, 1, true, null, null); hdfsCluster.stopDataNode(0); beginTest = false; } else { LOG.info("Missing block restored."); success.set(true); return; } } } catch (IOException e) { LOG.error(e.getMessage()); } try { Thread.sleep(1000); } catch (InterruptedException e) { return; } } } }); thread.start(); thread.join(TimeUnit.MINUTES.toMillis(2)); if (!success.get()) { fail(); } } finally { if (thread != null) { thread.interrupt(); } hdfsCluster.shutdown(); destroyBackupStore(conf); } }
From source file:backup.integration.MiniClusterTestBase.java
License:Apache License
@Test public void testIntegrationBasicFullRestoreFromShutdown() throws Exception { File hdfsDir = setupHdfsLocalDir(); Configuration conf = setupConfig(hdfsDir); {/*from w w w . ja va2s.c om*/ MiniDFSCluster hdfsCluster = new MiniDFSCluster.Builder(conf).build(); try { DistributedFileSystem fileSystem = hdfsCluster.getFileSystem(); for (int i = 0; i < 5; i++) { Path path = new Path("/testing." + i + ".txt"); System.out.println("Adding path " + path); writeFile(fileSystem, path); } Thread.sleep(TimeUnit.SECONDS.toMillis(3)); hdfsCluster.stopDataNode(0); // Remove data FileUtils.deleteDirectory(new File(hdfsDir, "data")); hdfsCluster.startDataNodes(conf, 1, true, null, null); NameNode nameNode = hdfsCluster.getNameNode(); for (int i = 0; i < 90; i++) { if (!nameNode.isInSafeMode()) { return; } System.out.println(nameNode.getState() + " " + nameNode.isInSafeMode()); Thread.sleep(1000); } fail(); } finally { hdfsCluster.shutdown(); destroyBackupStore(conf); } } }