Example usage for org.apache.hadoop.hdfs.protocol BlockLocalPathInfo getBlockPath

List of usage examples for org.apache.hadoop.hdfs.protocol BlockLocalPathInfo getBlockPath

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs.protocol BlockLocalPathInfo getBlockPath.

Prototype

public String getBlockPath() 

Source Link

Document

Get the Block data file.

Usage

From source file:backup.datanode.DataNodeBackupProcessor.java

License:Apache License

@Override
protected long doBackup(ExtendedBlock extendedBlock, boolean force) throws Exception {
    if (!force) {
        long blockId = extendedBlock.getBlockId();
        DatanodeUuids result;//w w  w.  j  av  a 2s .com
        try {
            result = _nameNodeClient.getDatanodeUuids(blockId);
        } catch (Exception e) {
            if (LOG.isDebugEnabled()) {
                LOG.error("datanode UUID lookup failed " + e.getCause(), e);
            } else {
                LOG.error("datanode UUID lookup failed {}", e.getCause());
            }
            result = new DatanodeUuids();
        }
        List<String> datanodeUuids = result.getDatanodeUuids();
        if (!datanodeUuids.isEmpty()) {
            LOG.debug("datanode UUIDs for block id {} {}", blockId, datanodeUuids);
            List<String> orderedDataNodeUuids = getOrderDataNodeUuids(datanodeUuids, blockId);
            String datanodeUuid = _datanode.getDatanodeUuid();
            int binarySearch = Collections.binarySearch(orderedDataNodeUuids, datanodeUuid);
            if (binarySearch == 0) {
                LOG.debug("datanode UUID {} first in list {} for block id {}, performing backup", datanodeUuid,
                        datanodeUuids, blockId);
            } else if (binarySearch > 0) {
                return binarySearch * _retryDelay;
            } else {
                LOG.debug("datanode UUID {} not found in list {} for block id {}, forcing backup", datanodeUuid,
                        datanodeUuids, blockId);
            }
        } else {
            LOG.debug("datanode UUIDs for block id {} empty, forcing backup", blockId);
        }
    }

    if (_backupStore.hasBlock(extendedBlock)) {
        LOG.debug("block {} already backed up", extendedBlock);
        return 0;
    }

    FsDatasetSpi<?> fsDataset = _datanode.getFSDataset();

    org.apache.hadoop.hdfs.protocol.ExtendedBlock heb = BackupUtil.toHadoop(extendedBlock);
    if (!fsDataset.isValidBlock(heb)) {
        return 0;
    }
    BlockLocalPathInfo info = fsDataset.getBlockLocalPathInfo(heb);
    String blockPath = info.getBlockPath();
    if (Files.isSymbolicLink(new File(blockPath).toPath())) {
        LOG.debug("block {} is symbolic link, not backing up", extendedBlock);
        return 0;
    }

    LOG.info("performing block {}", extendedBlock);
    long numBytes = heb.getNumBytes();
    try (LengthInputStream data = new LengthInputStream(trackThroughPut(fsDataset.getBlockInputStream(heb, 0)),
            numBytes)) {
        org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream tmeta;
        tmeta = fsDataset.getMetaDataInputStream(heb);
        try (LengthInputStream meta = new LengthInputStream(trackThroughPut(tmeta), tmeta.getLength())) {
            _backupStore.backupBlock(extendedBlock, data, meta);
            return 0;
        }
    } catch (IOException e) {
        if (LOG.isDebugEnabled() || !e.getMessage().contains("is not valid")) {
            LOG.error(e.getMessage(), e);
        } else {
            LOG.debug("block {} has been removed {}", extendedBlock);
        }
        return 0;
    }
}

From source file:com.splunk.shuttl.prototype.symlink.BucketBlockSymlinkPrototypeTest.java

License:Apache License

private void doSymlinkPathInDir(File fileInDir, LocatedBlocks blockLocations, List<LocatedBlock> locatedBlocks)
        throws IOException {
    assertEquals(1, locatedBlocks.size());
    LocatedBlock locatedBlock = blockLocations.get(0);
    assertEquals(1, locatedBlock.getLocations().length);

    DatanodeInfo datanodeInfo = locatedBlock.getLocations()[0];
    ClientDatanodeProtocol createClientDatanodeProtocolProxy = HadoopFileLocationPrototypeTest
            .createClientDatanodeProtocolProxy(datanodeInfo, hadoopFileSystem.getConf(), 1000);

    BlockLocalPathInfo blockLocalPathInfo = createClientDatanodeProtocolProxy
            .getBlockLocalPathInfo(locatedBlock.getBlock(), locatedBlock.getBlockToken());
    String absolutePathToBlock = blockLocalPathInfo.getBlockPath();
    assertTrue(new File(absolutePathToBlock).exists());
    FileUtil.symLink(absolutePathToBlock, fileInDir.getAbsolutePath());
}

From source file:com.splunk.shuttl.prototype.symlink.HadoopFileLocationPrototypeTest.java

License:Apache License

/**
 * Before running the test: <br/>/* w w w . ja v a2  s .  co  m*/
 * <br/>
 * 1. run `ant hadoop-setup`<br/>
 * 2. run the following command in build-cache/hadoop: bin/hadoop fs -put
 * ../../test/resources/splunk-buckets/SPLUNK_BUCKET/
 * db_1336330530_1336330530_0 / <br/>
 * <br/>
 * Note: This will be automated soon!
 */
@Test(groups = { "prototype" })
public void printPathToABlockOnHadoop() throws IOException {
    // Connect to hdfs. Needs to be HDFS because we're casting to
    // org.apache.hadoop.hdfs.DistributedFileSystem
    URI uri = URI.create("hdfs://localhost:9000");
    fileSystem = (DistributedFileSystem) FileSystem.get(uri, new Configuration());
    namenode = fileSystem.getClient().namenode;

    // Get the path to the bucket that's been put to hadoop.
    Path bucketPath = new Path("/db_1336330530_1336330530_0");
    assertTrue(fileSystem.exists(bucketPath));

    // path to any file in the bucket. Chose .csv because it's
    // readable/verifiable.
    String filePath = "/db_1336330530_1336330530_0/bucket_info.csv";

    // Get location of the blocks for the file.
    LocatedBlocks blockLocations = namenode.getBlockLocations(filePath, 0, Long.MAX_VALUE);
    // There exists only one block because of how everything is set up.
    LocatedBlock locatedBlock = blockLocations.getLocatedBlocks().get(0);
    Block block = locatedBlock.getBlock();
    // There exists only one node.
    DatanodeInfo datanodeInfo = locatedBlock.getLocations()[0];

    // Get a proxy to the Datanode containing the block. (This took a while to
    // figure out)
    ClientDatanodeProtocol createClientDatanodeProtocolProxy = createClientDatanodeProtocolProxy(datanodeInfo,
            fileSystem.getConf(), 1000);

    // Get the local block path. Requires two settings on the server side of
    // hadoop.
    // 1. dfs.client.read.shortcircuit : 'true'
    // 2. dfs.block.local-path-access.user : '<user running the tests (ie.
    // periksson)>'
    BlockLocalPathInfo blockLocalPathInfo = createClientDatanodeProtocolProxy.getBlockLocalPathInfo(block,
            locatedBlock.getBlockToken());
    // Printing the local path to the block, so we can access it!!
    System.out.println("BLOCK PATH: " + blockLocalPathInfo.getBlockPath() + " !!!!!!!!!!!!!!!!!!");
}