Example usage for org.apache.hadoop.hdfs MiniDFSCluster shutdown

List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster shutdown

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs MiniDFSCluster shutdown.

Prototype

public void shutdown() 

Source Link

Document

Shutdown all the nodes in the cluster.

Usage

From source file:a.TestConcatExample.java

License:Apache License

@Test
public void concatIsPermissive() throws IOException, URISyntaxException {
    MiniDFSCluster cluster = null;
    final Configuration conf = WebHdfsTestUtil.createConf();
    conf.set("dfs.namenode.fs-limits.min-block-size", "1000"); // Allow tiny blocks for the test
    try {/* w  w  w  . j  a  v a  2 s  .  com*/
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
        cluster.waitActive();
        final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME);
        final FileSystem dfs = cluster.getFileSystem();

        final FileSystem fs = dfs; // WebHDFS has a bug in getLocatedBlocks

        Path root = new Path("/dir");
        fs.mkdirs(root);

        short origRep = 3;
        short secondRep = (short) (origRep - 1);
        Path f1 = new Path("/dir/f1");
        long size1 = writeFile(fs, f1, /* blocksize */ 4096, origRep, 5);
        long f1NumBlocks = fs.getFileBlockLocations(f1, 0, size1).length;
        assertEquals(5, f1NumBlocks);

        Path f2 = new Path("/dir/f2");
        long size2 = writeFile(fs, f2, /* blocksize (must divide 512 for checksum) */ 4096 - 512, secondRep, 4);
        long f2NumBlocks = fs.getFileBlockLocations(f2, 0, size2).length;
        assertEquals(5, f2NumBlocks);

        fs.concat(f1, new Path[] { f2 });
        FileStatus[] fileStatuses = fs.listStatus(root);

        // Only one file should remain
        assertEquals(1, fileStatuses.length);
        FileStatus fileStatus = fileStatuses[0];

        // And it should be named after the first file
        assertEquals("f1", fileStatus.getPath().getName());

        // The entire file takes the replication of the first argument
        assertEquals(origRep, fileStatus.getReplication());

        // As expected, the new concated file is the length of both the previous files
        assertEquals(size1 + size2, fileStatus.getLen());

        // And we should have the same number of blocks
        assertEquals(f1NumBlocks + f2NumBlocks,
                fs.getFileBlockLocations(fileStatus.getPath(), 0, size1 + size2).length);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }

    }
}

From source file:backup.integration.MiniClusterTestBase.java

License:Apache License

@Test
public void testIntegrationBasic() throws Exception {
    File hdfsDir = setupHdfsLocalDir();
    Configuration conf = setupConfig(hdfsDir);

    MiniDFSCluster hdfsCluster = new MiniDFSCluster.Builder(conf).build();
    Thread thread = null;//from w w w.j a v  a 2 s. c o m
    try {
        DistributedFileSystem fileSystem = hdfsCluster.getFileSystem();
        Path path = new Path("/testing.txt");
        writeFile(fileSystem, path);
        Thread.sleep(TimeUnit.SECONDS.toMillis(5));
        AtomicBoolean success = new AtomicBoolean(false);
        thread = new Thread(new Runnable() {
            @Override
            public void run() {
                boolean beginTest = true;
                while (true) {
                    try {
                        try (ByteArrayOutputStream output = new ByteArrayOutputStream()) {
                            try (FSDataInputStream inputStream = fileSystem.open(path)) {
                                IOUtils.copy(inputStream, output);
                            }
                            if (beginTest) {
                                hdfsCluster.startDataNodes(conf, 1, true, null, null);
                                hdfsCluster.stopDataNode(0);
                                beginTest = false;
                            } else {
                                LOG.info("Missing block restored.");
                                success.set(true);
                                return;
                            }
                        }
                    } catch (IOException e) {
                        LOG.error(e.getMessage());
                    }
                    try {
                        Thread.sleep(1000);
                    } catch (InterruptedException e) {
                        return;
                    }
                }
            }
        });
        thread.start();
        thread.join(TimeUnit.MINUTES.toMillis(2));
        if (!success.get()) {
            fail();
        }
    } finally {
        if (thread != null) {
            thread.interrupt();
        }
        hdfsCluster.shutdown();
        destroyBackupStore(conf);
    }
}

From source file:backup.integration.MiniClusterTestBase.java

License:Apache License

@Test
public void testIntegrationBasicFullRestoreFromShutdown() throws Exception {
    File hdfsDir = setupHdfsLocalDir();
    Configuration conf = setupConfig(hdfsDir);
    {/*from  w  w w  .j a va2  s .  c om*/
        MiniDFSCluster hdfsCluster = new MiniDFSCluster.Builder(conf).build();
        try {
            DistributedFileSystem fileSystem = hdfsCluster.getFileSystem();
            for (int i = 0; i < 5; i++) {
                Path path = new Path("/testing." + i + ".txt");
                System.out.println("Adding path " + path);
                writeFile(fileSystem, path);
            }

            Thread.sleep(TimeUnit.SECONDS.toMillis(3));

            hdfsCluster.stopDataNode(0);

            // Remove data
            FileUtils.deleteDirectory(new File(hdfsDir, "data"));

            hdfsCluster.startDataNodes(conf, 1, true, null, null);

            NameNode nameNode = hdfsCluster.getNameNode();
            for (int i = 0; i < 90; i++) {
                if (!nameNode.isInSafeMode()) {
                    return;
                }
                System.out.println(nameNode.getState() + " " + nameNode.isInSafeMode());
                Thread.sleep(1000);
            }
            fail();
        } finally {
            hdfsCluster.shutdown();
            destroyBackupStore(conf);
        }
    }
}

From source file:backup.integration.MiniClusterTestBase.java

License:Apache License

@Test
public void testIntegrationBlockCheckWhenAllBackupStoreBlocksMissing() throws Exception {
    File hdfsDir = setupHdfsLocalDir();
    Configuration conf = setupConfig(hdfsDir);

    MiniDFSCluster hdfsCluster = new MiniDFSCluster.Builder(conf).build();
    Thread thread = null;/*from  w  w w .  j  a va 2  s.  c  o  m*/
    try (BackupStore backupStore = BackupStore.create(BackupUtil.convert(conf))) {
        DistributedFileSystem fileSystem = hdfsCluster.getFileSystem();
        Path path = new Path("/testing.txt");
        writeFile(fileSystem, path);
        Thread.sleep(TimeUnit.SECONDS.toMillis(10));

        Set<ExtendedBlock> original = getLastGeneration(toSet(backupStore.getExtendedBlocks()));
        destroyBackupStoreBlocks(backupStore);

        NameNode nameNode = hdfsCluster.getNameNode();
        NameNodeRestoreProcessor processor = SingletonManager.getManager(NameNodeRestoreProcessor.class)
                .getInstance(nameNode);
        processor.runBlockCheck();

        Thread.sleep(TimeUnit.SECONDS.toMillis(5));

        Set<ExtendedBlock> current = toSet(backupStore.getExtendedBlocks());

        assertEquals(original, current);

    } finally {
        if (thread != null) {
            thread.interrupt();
        }
        hdfsCluster.shutdown();
        destroyBackupStore(conf);
    }
}

From source file:backup.integration.MiniClusterTestBase.java

License:Apache License

@Test
public void testIntegrationBlockCheckWhenSomeBackupStoreBlocksMissing() throws Exception {
    File hdfsDir = setupHdfsLocalDir();
    Configuration conf = setupConfig(hdfsDir);

    MiniDFSCluster hdfsCluster = new MiniDFSCluster.Builder(conf).build();
    Thread thread = null;/*from  w  w  w  . j  a va 2 s. co m*/
    try (BackupStore backupStore = BackupStore.create(BackupUtil.convert(conf))) {
        DistributedFileSystem fileSystem = hdfsCluster.getFileSystem();
        writeFile(fileSystem, new Path("/testing1.txt"));
        writeFile(fileSystem, new Path("/testing2.txt"));
        writeFile(fileSystem, new Path("/testing3.txt"));
        Thread.sleep(TimeUnit.SECONDS.toMillis(10));

        Set<ExtendedBlock> original = getLastGeneration(toSet(backupStore.getExtendedBlocks()));
        destroyOneBackupStoreBlock(backupStore);

        NameNode nameNode = hdfsCluster.getNameNode();

        NameNodeRestoreProcessor processor = SingletonManager.getManager(NameNodeRestoreProcessor.class)
                .getInstance(nameNode);
        processor.runBlockCheck();

        Thread.sleep(TimeUnit.SECONDS.toMillis(5));

        Set<ExtendedBlock> current = toSet(backupStore.getExtendedBlocks());

        for (ExtendedBlock eb : original) {
            System.out.println("ORIGINAL=" + eb);
        }

        for (ExtendedBlock eb : current) {
            System.out.println("CURRENT=" + eb);
        }

        assertEquals(original, current);

    } finally {
        if (thread != null) {
            thread.interrupt();
        }
        hdfsCluster.shutdown();
        destroyBackupStore(conf);
    }
}

From source file:co.cask.cdap.common.logging.LogCollectorTest.java

License:Apache License

@Test
public void testCollectionDFS() throws IOException {

    MiniDFSCluster dfsCluster = null;

    try {/*  w  w w .  ja v  a  2s.c o  m*/
        File dfsPath = tempFolder.newFolder();
        System.setProperty("test.build.data", dfsPath.toString());
        System.setProperty("test.cache.data", dfsPath.toString());

        System.err.println("Starting up Mini HDFS cluster...");
        Configuration hConf = new Configuration();
        CConfiguration conf = CConfiguration.create();
        //conf.setInt("dfs.block.size", 1024*1024);
        dfsCluster = new MiniDFSCluster.Builder(hConf).nameNodePort(0).numDataNodes(1).format(true)
                .manageDataDfsDirs(true).manageNameDfsDirs(true).build();
        dfsCluster.waitClusterUp();
        System.err.println("Mini HDFS is started.");

        // Add HDFS info to conf
        hConf.set("fs.defaultFS", dfsCluster.getFileSystem().getUri().toString());
        // set a root directory for log collection
        conf.set(Constants.CFG_LOG_COLLECTION_ROOT, "/logtemp");

        testCollection(conf, hConf);
    } finally {
        if (dfsCluster != null) {
            System.err.println("Shutting down Mini HDFS cluster...");
            dfsCluster.shutdown();
            System.err.println("Mini HDFS is shut down.");
        }
    }
}

From source file:com.github.stephenc.javaisotools.loopfs.iso9660.Iso9660FileSystemTest.java

License:Open Source License

@Test
public void hdfsSmokes() throws Exception {
    assumeTrue(isNotWindows());/*from  w  ww.j  a  va2 s . c o m*/
    //Creating a Mini DFS Cluster as the default File System does not return a Seekable Stream
    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(new Configuration());
    MiniDFSCluster hdfsCluster = builder.build();
    String hdfsTestFile = "hdfs://127.0.0.1:" + hdfsCluster.getNameNodePort() + "/test/" + filePath;
    hdfsCluster.getFileSystem().copyFromLocalFile(new Path(filePath), new Path(hdfsTestFile));
    InputStream is = hdfsCluster.getFileSystem().open(new Path(hdfsTestFile));
    Iso9660FileSystem image = new Iso9660FileSystem(new SeekableInputFileHadoop(is), true);
    this.runCheck(image);
    hdfsCluster.shutdown();
}

From source file:com.linkedin.haivvreo.TestHaivvreoUtils.java

License:Apache License

@Test
public void determineSchemaCanReadSchemaFromHDFS() throws IOException, HaivvreoException {
    // TODO: Make this an integration test, mock out hdfs for the actual unit test.
    String schemaString = TestAvroObjectInspectorGenerator.RECORD_SCHEMA;
    MiniDFSCluster miniDfs = null;
    try {//w  w  w  . j a  v a 2  s .  c o m
        // MiniDFSCluster litters files and folders all over the place.
        System.setProperty("test.build.data", "target/test-intermediate-stuff-data/");
        miniDfs = new MiniDFSCluster(new Configuration(), 1, true, null);

        miniDfs.getFileSystem().mkdirs(new Path("/path/to/schema"));
        FSDataOutputStream out = miniDfs.getFileSystem().create(new Path("/path/to/schema/schema.avsc"));
        out.writeBytes(schemaString);
        out.close();
        String onHDFS = miniDfs.getFileSystem().getUri() + "/path/to/schema/schema.avsc";

        Schema schemaFromHDFS = HaivvreoUtils.getSchemaFromHDFS(onHDFS, miniDfs.getFileSystem().getConf());
        Schema expectedSchema = Schema.parse(schemaString);
        assertEquals(expectedSchema, schemaFromHDFS);
    } finally {
        if (miniDfs != null)
            miniDfs.shutdown();
    }
}

From source file:com.mellanox.r4h.TestFSOutputSummer.java

License:Apache License

private void doTestFSOutputSummer(String checksumType) throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM);
    conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, checksumType);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
    fileSys = cluster.getFileSystem();/* ww  w .j a  va  2 s  .c  om*/
    try {
        Path file = new Path("try.dat");
        Random rand = new Random(seed);
        rand.nextBytes(expected);
        writeFile1(file);
        writeFile2(file);
        writeFile3(file);
    } finally {
        fileSys.close();
        cluster.shutdown();
    }
}

From source file:com.mellanox.r4h.TestFSOutputSummer.java

License:Apache License

@Test
public void TestDFSCheckSumType() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM);
    conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "NULL");
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
    fileSys = cluster.getFileSystem();/* w  ww.  j  a v a2s . co  m*/
    try {
        Path file = new Path("try.dat");
        Random rand = new Random(seed);
        rand.nextBytes(expected);
        writeFile1(file);
    } finally {
        fileSys.close();
        cluster.shutdown();
    }
}