Example usage for org.apache.hadoop.hdfs MiniDFSCluster startDataNodes

List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster startDataNodes

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs MiniDFSCluster startDataNodes.

Prototype


public void startDataNodes(Configuration conf, int numDataNodes, boolean manageDfsDirs, StartupOption operation,
        String[] racks) throws IOException 

Source Link

Document

Modify the config and start up the DataNodes.

Usage

From source file:backup.integration.MiniClusterTestBase.java

License:Apache License

@Test
public void testIntegrationBasic() throws Exception {
    File hdfsDir = setupHdfsLocalDir();
    Configuration conf = setupConfig(hdfsDir);

    MiniDFSCluster hdfsCluster = new MiniDFSCluster.Builder(conf).build();
    Thread thread = null;/*from w w w  . ja v a 2 s  .  c om*/
    try {
        DistributedFileSystem fileSystem = hdfsCluster.getFileSystem();
        Path path = new Path("/testing.txt");
        writeFile(fileSystem, path);
        Thread.sleep(TimeUnit.SECONDS.toMillis(5));
        AtomicBoolean success = new AtomicBoolean(false);
        thread = new Thread(new Runnable() {
            @Override
            public void run() {
                boolean beginTest = true;
                while (true) {
                    try {
                        try (ByteArrayOutputStream output = new ByteArrayOutputStream()) {
                            try (FSDataInputStream inputStream = fileSystem.open(path)) {
                                IOUtils.copy(inputStream, output);
                            }
                            if (beginTest) {
                                hdfsCluster.startDataNodes(conf, 1, true, null, null);
                                hdfsCluster.stopDataNode(0);
                                beginTest = false;
                            } else {
                                LOG.info("Missing block restored.");
                                success.set(true);
                                return;
                            }
                        }
                    } catch (IOException e) {
                        LOG.error(e.getMessage());
                    }
                    try {
                        Thread.sleep(1000);
                    } catch (InterruptedException e) {
                        return;
                    }
                }
            }
        });
        thread.start();
        thread.join(TimeUnit.MINUTES.toMillis(2));
        if (!success.get()) {
            fail();
        }
    } finally {
        if (thread != null) {
            thread.interrupt();
        }
        hdfsCluster.shutdown();
        destroyBackupStore(conf);
    }
}

From source file:backup.integration.MiniClusterTestBase.java

License:Apache License

@Test
public void testIntegrationBasicFullRestoreFromShutdown() throws Exception {
    File hdfsDir = setupHdfsLocalDir();
    Configuration conf = setupConfig(hdfsDir);
    {//from  w  ww. j  a v  a 2s  .  co m
        MiniDFSCluster hdfsCluster = new MiniDFSCluster.Builder(conf).build();
        try {
            DistributedFileSystem fileSystem = hdfsCluster.getFileSystem();
            for (int i = 0; i < 5; i++) {
                Path path = new Path("/testing." + i + ".txt");
                System.out.println("Adding path " + path);
                writeFile(fileSystem, path);
            }

            Thread.sleep(TimeUnit.SECONDS.toMillis(3));

            hdfsCluster.stopDataNode(0);

            // Remove data
            FileUtils.deleteDirectory(new File(hdfsDir, "data"));

            hdfsCluster.startDataNodes(conf, 1, true, null, null);

            NameNode nameNode = hdfsCluster.getNameNode();
            for (int i = 0; i < 90; i++) {
                if (!nameNode.isInSafeMode()) {
                    return;
                }
                System.out.println(nameNode.getState() + " " + nameNode.isInSafeMode());
                Thread.sleep(1000);
            }
            fail();
        } finally {
            hdfsCluster.shutdown();
            destroyBackupStore(conf);
        }
    }
}