Example usage for org.apache.hadoop.hdfs.server.datanode DataNode shutdown

List of usage examples for org.apache.hadoop.hdfs.server.datanode DataNode shutdown

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs.server.datanode DataNode shutdown.

Prototype

public void shutdown() 

Source Link

Document

Shut down this instance of the datanode.

Usage

From source file:com.mellanox.r4h.MiniDFSCluster.java

License:Apache License

/**
 * Shutdown all DataNodes started by this class. The NameNode
 * is left running so that new DataNodes may be started.
 *//*from   w ww .  ja  v  a  2s  . c o  m*/
public void shutdownDataNodes() {
    for (int i = dataNodes.size() - 1; i >= 0; i--) {
        LOG.info("Shutting down DataNode " + i);
        DataNode dn = dataNodes.remove(i).datanode;
        dn.shutdown();
        numDataNodes--;
    }
}

From source file:com.mellanox.r4h.MiniDFSCluster.java

License:Apache License

public synchronized DataNodeProperties stopDataNode(int i) {
    if (i < 0 || i >= dataNodes.size()) {
        return null;
    }//from ww  w .ja v  a  2s  .  c  o  m
    DataNodeProperties dnprop = dataNodes.remove(i);
    DataNode dn = dnprop.datanode;
    LOG.info("MiniDFSCluster Stopping DataNode " + dn.getDisplayName() + " from a total of "
            + (dataNodes.size() + 1) + " datanodes.");
    dn.shutdown();
    numDataNodes--;
    return dnprop;
}

From source file:com.uber.hoodie.common.table.log.HoodieLogFormatAppendFailureTest.java

License:Apache License

@Test(timeout = 60000)
public void testFailedToGetAppendStreamFromHDFSNameNode()
        throws IOException, URISyntaxException, InterruptedException, TimeoutException {

    // Use some fs like LocalFileSystem, that does not support appends
    String uuid = UUID.randomUUID().toString();
    Path localPartitionPath = new Path("/tmp/");
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path(localPartitionPath, uuid);
    fs.mkdirs(testPath);/*from   w w w  .  j  a v a 2 s .co  m*/

    // Some data & append.
    List<IndexedRecord> records = SchemaTestUtil.generateTestRecords(0, 10);
    Map<HoodieLogBlock.HeaderMetadataType, String> header = Maps.newHashMap();
    header.put(HoodieLogBlock.HeaderMetadataType.INSTANT_TIME, "100");
    header.put(HoodieLogBlock.HeaderMetadataType.SCHEMA, getSimpleSchema().toString());
    HoodieAvroDataBlock dataBlock = new HoodieAvroDataBlock(records, header);

    Writer writer = HoodieLogFormat.newWriterBuilder().onParentPath(testPath)
            .withFileExtension(HoodieArchivedLogFile.ARCHIVE_EXTENSION).withFileId("commits" + ".archive")
            .overBaseCommit("").withFs(fs).build();

    writer = writer.appendBlock(dataBlock);
    // get the current log file version to compare later
    int logFileVersion = writer.getLogFile().getLogVersion();
    Path logFilePath = writer.getLogFile().getPath();
    writer.close();

    // Wait for 3 times replication of file
    DFSTestUtil.waitReplication(fs, logFilePath, (short) 3);
    // Shut down all DNs that have the last block location for the file
    LocatedBlocks lbs = cluster.getFileSystem().getClient().getNamenode()
            .getBlockLocations("/tmp/" + uuid + "/" + logFilePath.getName(), 0, Long.MAX_VALUE);
    List<DataNode> dnsOfCluster = cluster.getDataNodes();
    DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().getLocations();
    for (DataNode dn : dnsOfCluster) {
        for (DatanodeInfo loc : dnsWithLocations) {
            if (dn.getDatanodeId().equals(loc)) {
                dn.shutdown();
                cluster.stopDataNode(dn.getDisplayName());
                DFSTestUtil.waitForDatanodeDeath(dn);
            }
        }
    }
    // Wait for the replication of this file to go down to 0
    DFSTestUtil.waitReplication(fs, logFilePath, (short) 0);

    // Opening a new Writer right now will throw IOException. The code should handle this, rollover the logfile and
    // return a new writer with a bumped up logVersion
    writer = HoodieLogFormat.newWriterBuilder().onParentPath(testPath)
            .withFileExtension(HoodieArchivedLogFile.ARCHIVE_EXTENSION).withFileId("commits" + ".archive")
            .overBaseCommit("").withFs(fs).build();
    // The log version should be different for this new writer
    Assert.assertFalse(writer.getLogFile().getLogVersion() == logFileVersion);
}

From source file:io.fabric8.hadoop.hdfs.DataNodeFactory.java

License:Apache License

@Override
protected void doDelete(DataNode service) throws Exception {
    service.shutdown();
}