Example usage for org.apache.hadoop.hdfs.protocol ClientDatanodeProtocol getBlockLocalPathInfo

List of usage examples for org.apache.hadoop.hdfs.protocol ClientDatanodeProtocol getBlockLocalPathInfo

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs.protocol ClientDatanodeProtocol getBlockLocalPathInfo.

Prototype

BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block, Token<BlockTokenIdentifier> token)
        throws IOException;

Source Link

Document

Retrieves the path names of the block file and metadata file stored on the local file system.

Usage

From source file:com.splunk.shuttl.prototype.symlink.BucketBlockSymlinkPrototypeTest.java

License:Apache License

private void doSymlinkPathInDir(File fileInDir, LocatedBlocks blockLocations, List<LocatedBlock> locatedBlocks)
        throws IOException {
    assertEquals(1, locatedBlocks.size());
    LocatedBlock locatedBlock = blockLocations.get(0);
    assertEquals(1, locatedBlock.getLocations().length);

    DatanodeInfo datanodeInfo = locatedBlock.getLocations()[0];
    ClientDatanodeProtocol createClientDatanodeProtocolProxy = HadoopFileLocationPrototypeTest
            .createClientDatanodeProtocolProxy(datanodeInfo, hadoopFileSystem.getConf(), 1000);

    BlockLocalPathInfo blockLocalPathInfo = createClientDatanodeProtocolProxy
            .getBlockLocalPathInfo(locatedBlock.getBlock(), locatedBlock.getBlockToken());
    String absolutePathToBlock = blockLocalPathInfo.getBlockPath();
    assertTrue(new File(absolutePathToBlock).exists());
    FileUtil.symLink(absolutePathToBlock, fileInDir.getAbsolutePath());
}

From source file:com.splunk.shuttl.prototype.symlink.HadoopFileLocationPrototypeTest.java

License:Apache License

/**
 * Before running the test: <br/>/*  w  w  w  .  j a va  2s  . c  om*/
 * <br/>
 * 1. run `ant hadoop-setup`<br/>
 * 2. run the following command in build-cache/hadoop: bin/hadoop fs -put
 * ../../test/resources/splunk-buckets/SPLUNK_BUCKET/
 * db_1336330530_1336330530_0 / <br/>
 * <br/>
 * Note: This will be automated soon!
 */
@Test(groups = { "prototype" })
public void printPathToABlockOnHadoop() throws IOException {
    // Connect to hdfs. Needs to be HDFS because we're casting to
    // org.apache.hadoop.hdfs.DistributedFileSystem
    URI uri = URI.create("hdfs://localhost:9000");
    fileSystem = (DistributedFileSystem) FileSystem.get(uri, new Configuration());
    namenode = fileSystem.getClient().namenode;

    // Get the path to the bucket that's been put to hadoop.
    Path bucketPath = new Path("/db_1336330530_1336330530_0");
    assertTrue(fileSystem.exists(bucketPath));

    // path to any file in the bucket. Chose .csv because it's
    // readable/verifiable.
    String filePath = "/db_1336330530_1336330530_0/bucket_info.csv";

    // Get location of the blocks for the file.
    LocatedBlocks blockLocations = namenode.getBlockLocations(filePath, 0, Long.MAX_VALUE);
    // There exists only one block because of how everything is set up.
    LocatedBlock locatedBlock = blockLocations.getLocatedBlocks().get(0);
    Block block = locatedBlock.getBlock();
    // There exists only one node.
    DatanodeInfo datanodeInfo = locatedBlock.getLocations()[0];

    // Get a proxy to the Datanode containing the block. (This took a while to
    // figure out)
    ClientDatanodeProtocol createClientDatanodeProtocolProxy = createClientDatanodeProtocolProxy(datanodeInfo,
            fileSystem.getConf(), 1000);

    // Get the local block path. Requires two settings on the server side of
    // hadoop.
    // 1. dfs.client.read.shortcircuit : 'true'
    // 2. dfs.block.local-path-access.user : '<user running the tests (ie.
    // periksson)>'
    BlockLocalPathInfo blockLocalPathInfo = createClientDatanodeProtocolProxy.getBlockLocalPathInfo(block,
            locatedBlock.getBlockToken());
    // Printing the local path to the block, so we can access it!!
    System.out.println("BLOCK PATH: " + blockLocalPathInfo.getBlockPath() + " !!!!!!!!!!!!!!!!!!");
}