Example usage for org.apache.hadoop.hdfs.server.datanode DataNode getDatanodeId

List of usage examples for org.apache.hadoop.hdfs.server.datanode DataNode getDatanodeId

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs.server.datanode DataNode getDatanodeId.

Prototype

@VisibleForTesting
    public DatanodeID getDatanodeId() 

Source Link

Usage

From source file:com.mellanox.r4h.MiniDFSCluster.java

License:Apache License

public synchronized DataNodeProperties stopDataNode(String dnName) {
    int i;//from ww  w  .ja v  a2 s  . c om
    for (i = 0; i < dataNodes.size(); i++) {
        DataNode dn = dataNodes.get(i).datanode;
        LOG.info("DN name=" + dnName + " found DN=" + dn + " with name=" + dn.getDisplayName());
        if (dnName.equals(dn.getDatanodeId().getXferAddr())) {
            break;
        }
    }
    return stopDataNode(i);
}

From source file:com.uber.hoodie.common.table.log.HoodieLogFormatAppendFailureTest.java

License:Apache License

@Test(timeout = 60000)
public void testFailedToGetAppendStreamFromHDFSNameNode()
        throws IOException, URISyntaxException, InterruptedException, TimeoutException {

    // Use some fs like LocalFileSystem, that does not support appends
    String uuid = UUID.randomUUID().toString();
    Path localPartitionPath = new Path("/tmp/");
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path(localPartitionPath, uuid);
    fs.mkdirs(testPath);//w ww .j a  v  a 2  s . c  o m

    // Some data & append.
    List<IndexedRecord> records = SchemaTestUtil.generateTestRecords(0, 10);
    Map<HoodieLogBlock.HeaderMetadataType, String> header = Maps.newHashMap();
    header.put(HoodieLogBlock.HeaderMetadataType.INSTANT_TIME, "100");
    header.put(HoodieLogBlock.HeaderMetadataType.SCHEMA, getSimpleSchema().toString());
    HoodieAvroDataBlock dataBlock = new HoodieAvroDataBlock(records, header);

    Writer writer = HoodieLogFormat.newWriterBuilder().onParentPath(testPath)
            .withFileExtension(HoodieArchivedLogFile.ARCHIVE_EXTENSION).withFileId("commits" + ".archive")
            .overBaseCommit("").withFs(fs).build();

    writer = writer.appendBlock(dataBlock);
    // get the current log file version to compare later
    int logFileVersion = writer.getLogFile().getLogVersion();
    Path logFilePath = writer.getLogFile().getPath();
    writer.close();

    // Wait for 3 times replication of file
    DFSTestUtil.waitReplication(fs, logFilePath, (short) 3);
    // Shut down all DNs that have the last block location for the file
    LocatedBlocks lbs = cluster.getFileSystem().getClient().getNamenode()
            .getBlockLocations("/tmp/" + uuid + "/" + logFilePath.getName(), 0, Long.MAX_VALUE);
    List<DataNode> dnsOfCluster = cluster.getDataNodes();
    DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().getLocations();
    for (DataNode dn : dnsOfCluster) {
        for (DatanodeInfo loc : dnsWithLocations) {
            if (dn.getDatanodeId().equals(loc)) {
                dn.shutdown();
                cluster.stopDataNode(dn.getDisplayName());
                DFSTestUtil.waitForDatanodeDeath(dn);
            }
        }
    }
    // Wait for the replication of this file to go down to 0
    DFSTestUtil.waitReplication(fs, logFilePath, (short) 0);

    // Opening a new Writer right now will throw IOException. The code should handle this, rollover the logfile and
    // return a new writer with a bumped up logVersion
    writer = HoodieLogFormat.newWriterBuilder().onParentPath(testPath)
            .withFileExtension(HoodieArchivedLogFile.ARCHIVE_EXTENSION).withFileId("commits" + ".archive")
            .overBaseCommit("").withFs(fs).build();
    // The log version should be different for this new writer
    Assert.assertFalse(writer.getLogFile().getLogVersion() == logFileVersion);
}