Example usage for org.apache.hadoop.hdfs MiniDFSCluster getNameNode

List of usage examples for org.apache.hadoop.hdfs MiniDFSCluster getNameNode

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs MiniDFSCluster getNameNode.

Prototype

public NameNode getNameNode() 

Source Link

Document

Gets the started NameNode.

Usage

From source file:backup.integration.MiniClusterTestBase.java

License:Apache License

@Test
public void testIntegrationBasicFullRestoreFromShutdown() throws Exception {
    File hdfsDir = setupHdfsLocalDir();
    Configuration conf = setupConfig(hdfsDir);
    {//w  w  w  .  j a va 2s .c o m
        MiniDFSCluster hdfsCluster = new MiniDFSCluster.Builder(conf).build();
        try {
            DistributedFileSystem fileSystem = hdfsCluster.getFileSystem();
            for (int i = 0; i < 5; i++) {
                Path path = new Path("/testing." + i + ".txt");
                System.out.println("Adding path " + path);
                writeFile(fileSystem, path);
            }

            Thread.sleep(TimeUnit.SECONDS.toMillis(3));

            hdfsCluster.stopDataNode(0);

            // Remove data
            FileUtils.deleteDirectory(new File(hdfsDir, "data"));

            hdfsCluster.startDataNodes(conf, 1, true, null, null);

            NameNode nameNode = hdfsCluster.getNameNode();
            for (int i = 0; i < 90; i++) {
                if (!nameNode.isInSafeMode()) {
                    return;
                }
                System.out.println(nameNode.getState() + " " + nameNode.isInSafeMode());
                Thread.sleep(1000);
            }
            fail();
        } finally {
            hdfsCluster.shutdown();
            destroyBackupStore(conf);
        }
    }
}

From source file:backup.integration.MiniClusterTestBase.java

License:Apache License

@Test
public void testIntegrationBlockCheckWhenAllBackupStoreBlocksMissing() throws Exception {
    File hdfsDir = setupHdfsLocalDir();
    Configuration conf = setupConfig(hdfsDir);

    MiniDFSCluster hdfsCluster = new MiniDFSCluster.Builder(conf).build();
    Thread thread = null;//from  ww w .j  av a2s  .  c  o m
    try (BackupStore backupStore = BackupStore.create(BackupUtil.convert(conf))) {
        DistributedFileSystem fileSystem = hdfsCluster.getFileSystem();
        Path path = new Path("/testing.txt");
        writeFile(fileSystem, path);
        Thread.sleep(TimeUnit.SECONDS.toMillis(10));

        Set<ExtendedBlock> original = getLastGeneration(toSet(backupStore.getExtendedBlocks()));
        destroyBackupStoreBlocks(backupStore);

        NameNode nameNode = hdfsCluster.getNameNode();
        NameNodeRestoreProcessor processor = SingletonManager.getManager(NameNodeRestoreProcessor.class)
                .getInstance(nameNode);
        processor.runBlockCheck();

        Thread.sleep(TimeUnit.SECONDS.toMillis(5));

        Set<ExtendedBlock> current = toSet(backupStore.getExtendedBlocks());

        assertEquals(original, current);

    } finally {
        if (thread != null) {
            thread.interrupt();
        }
        hdfsCluster.shutdown();
        destroyBackupStore(conf);
    }
}

From source file:backup.integration.MiniClusterTestBase.java

License:Apache License

@Test
public void testIntegrationBlockCheckWhenSomeBackupStoreBlocksMissing() throws Exception {
    File hdfsDir = setupHdfsLocalDir();
    Configuration conf = setupConfig(hdfsDir);

    MiniDFSCluster hdfsCluster = new MiniDFSCluster.Builder(conf).build();
    Thread thread = null;/*from w w w. ja  v  a2 s  .c  o  m*/
    try (BackupStore backupStore = BackupStore.create(BackupUtil.convert(conf))) {
        DistributedFileSystem fileSystem = hdfsCluster.getFileSystem();
        writeFile(fileSystem, new Path("/testing1.txt"));
        writeFile(fileSystem, new Path("/testing2.txt"));
        writeFile(fileSystem, new Path("/testing3.txt"));
        Thread.sleep(TimeUnit.SECONDS.toMillis(10));

        Set<ExtendedBlock> original = getLastGeneration(toSet(backupStore.getExtendedBlocks()));
        destroyOneBackupStoreBlock(backupStore);

        NameNode nameNode = hdfsCluster.getNameNode();

        NameNodeRestoreProcessor processor = SingletonManager.getManager(NameNodeRestoreProcessor.class)
                .getInstance(nameNode);
        processor.runBlockCheck();

        Thread.sleep(TimeUnit.SECONDS.toMillis(5));

        Set<ExtendedBlock> current = toSet(backupStore.getExtendedBlocks());

        for (ExtendedBlock eb : original) {
            System.out.println("ORIGINAL=" + eb);
        }

        for (ExtendedBlock eb : current) {
            System.out.println("CURRENT=" + eb);
        }

        assertEquals(original, current);

    } finally {
        if (thread != null) {
            thread.interrupt();
        }
        hdfsCluster.shutdown();
        destroyBackupStore(conf);
    }
}

From source file:com.trace.hadoop.TestDFSRename.java

License:Apache License

static int countLease(MiniDFSCluster cluster) {
    return cluster.getNameNode().namesystem.leaseManager.countLease();
}

From source file:io.hops.security.TestUsersGroups.java

License:Apache License

@Test
public void testGroupMappingsRefresh() throws IOException {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();/*w ww  .  java  2 s .  co m*/

    cluster.getNameNode().getRpcServer().refreshUserToGroupsMappings();

    UsersGroups.addUserToGroupsTx("user", new String[] { "group1", "group2" });

    int userId = UsersGroups.getUserID("user");
    assertNotSame(0, userId);
    assertEquals(UsersGroups.getUser(userId), "user");

    int groupId = UsersGroups.getGroupID("group1");
    assertNotSame(0, groupId);
    assertEquals(UsersGroups.getGroup(groupId), "group1");

    assertEquals(UsersGroups.getGroups("user"), Arrays.asList("group1", "group2"));

    removeUser(userId);

    userId = UsersGroups.getUserID("user");
    assertNotSame(0, userId);

    cluster.getNameNode().getRpcServer().refreshUserToGroupsMappings();

    userId = UsersGroups.getUserID("user");
    assertEquals(0, userId);
    assertNull(UsersGroups.getGroups("user"));

    UsersGroups.addUserToGroupsTx("user", new String[] { "group1", "group2" });

    userId = UsersGroups.getUserID("user");
    assertNotSame(0, userId);

    assertEquals(Arrays.asList("group1", "group2"), UsersGroups.getGroups("user"));

    removeUser(userId);

    UsersGroups.addUserToGroupsTx("user", new String[] { "group3" });

    int newUserId = UsersGroups.getUserID("user");
    assertNotSame(0, userId);
    assertEquals(userId, newUserId);

    UsersGroups.addUserToGroupsTx("user", new String[] { "group1", "group2" });

    assertEquals(Arrays.asList("group3", "group1", "group2"), UsersGroups.getGroups("user"));
}

From source file:io.hops.transaction.lock.TestInodeLock.java

License:Apache License

@Test
public void testInodeLockWithWrongPath() throws IOException {
    final Configuration conf = new Configuration();
    MiniDFSCluster cluster = null;//from  w  w  w  .j a  va2s  . c o  m
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        final MiniDFSCluster clusterFinal = cluster;
        final DistributedFileSystem hdfs = cluster.getFileSystem();

        hdfs.mkdirs(new Path("/tmp"));
        DFSTestUtil.createFile(hdfs, new Path("/tmp/f1"), 0, (short) 1, 0);

        new HopsTransactionalRequestHandler(HDFSOperationType.TEST) {
            @Override
            public void acquireLock(TransactionLocks locks) throws IOException {
                LockFactory lf = LockFactory.getInstance();
                INodeLock il = lf.getINodeLock(TransactionLockTypes.INodeLockType.READ_COMMITTED,
                        TransactionLockTypes.INodeResolveType.PATH, new String[] { "/tmp/f1", "/tmp/f2" })
                        .setNameNodeID(clusterFinal.getNameNode().getId())
                        .setActiveNameNodes(clusterFinal.getNameNode().getActiveNameNodes().getActiveNodes())
                        .skipReadingQuotaAttr(true);
                locks.add(il);

            }

            @Override
            public Object performTask() throws IOException {
                return null;
            }
        }.handle();

    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}

From source file:org.apache.flume.sink.hdfs.TestHDFSEventSinkOnMiniCluster.java

License:Apache License

private static String getNameNodeURL(MiniDFSCluster cluster) {
    int nnPort = cluster.getNameNode().getNameNodeAddress().getPort();
    return "hdfs://localhost:" + nnPort;
}