Example usage for org.apache.hadoop.hdfs.server.datanode.fsdataset FsDatasetSpi getBlockLocalPathInfo

List of usage examples for org.apache.hadoop.hdfs.server.datanode.fsdataset FsDatasetSpi getBlockLocalPathInfo

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs.server.datanode.fsdataset FsDatasetSpi getBlockLocalPathInfo.

Prototype

BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock b) throws IOException;

Source Link

Document

Get BlockLocalPathInfo for the given block.

Usage

From source file:backup.datanode.DataNodeBackupProcessor.java

License:Apache License

@Override
protected long doBackup(ExtendedBlock extendedBlock, boolean force) throws Exception {
    if (!force) {
        long blockId = extendedBlock.getBlockId();
        DatanodeUuids result;/*  w  ww  .  j  av a  2  s. com*/
        try {
            result = _nameNodeClient.getDatanodeUuids(blockId);
        } catch (Exception e) {
            if (LOG.isDebugEnabled()) {
                LOG.error("datanode UUID lookup failed " + e.getCause(), e);
            } else {
                LOG.error("datanode UUID lookup failed {}", e.getCause());
            }
            result = new DatanodeUuids();
        }
        List<String> datanodeUuids = result.getDatanodeUuids();
        if (!datanodeUuids.isEmpty()) {
            LOG.debug("datanode UUIDs for block id {} {}", blockId, datanodeUuids);
            List<String> orderedDataNodeUuids = getOrderDataNodeUuids(datanodeUuids, blockId);
            String datanodeUuid = _datanode.getDatanodeUuid();
            int binarySearch = Collections.binarySearch(orderedDataNodeUuids, datanodeUuid);
            if (binarySearch == 0) {
                LOG.debug("datanode UUID {} first in list {} for block id {}, performing backup", datanodeUuid,
                        datanodeUuids, blockId);
            } else if (binarySearch > 0) {
                return binarySearch * _retryDelay;
            } else {
                LOG.debug("datanode UUID {} not found in list {} for block id {}, forcing backup", datanodeUuid,
                        datanodeUuids, blockId);
            }
        } else {
            LOG.debug("datanode UUIDs for block id {} empty, forcing backup", blockId);
        }
    }

    if (_backupStore.hasBlock(extendedBlock)) {
        LOG.debug("block {} already backed up", extendedBlock);
        return 0;
    }

    FsDatasetSpi<?> fsDataset = _datanode.getFSDataset();

    org.apache.hadoop.hdfs.protocol.ExtendedBlock heb = BackupUtil.toHadoop(extendedBlock);
    if (!fsDataset.isValidBlock(heb)) {
        return 0;
    }
    BlockLocalPathInfo info = fsDataset.getBlockLocalPathInfo(heb);
    String blockPath = info.getBlockPath();
    if (Files.isSymbolicLink(new File(blockPath).toPath())) {
        LOG.debug("block {} is symbolic link, not backing up", extendedBlock);
        return 0;
    }

    LOG.info("performing block {}", extendedBlock);
    long numBytes = heb.getNumBytes();
    try (LengthInputStream data = new LengthInputStream(trackThroughPut(fsDataset.getBlockInputStream(heb, 0)),
            numBytes)) {
        org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream tmeta;
        tmeta = fsDataset.getMetaDataInputStream(heb);
        try (LengthInputStream meta = new LengthInputStream(trackThroughPut(tmeta), tmeta.getLength())) {
            _backupStore.backupBlock(extendedBlock, data, meta);
            return 0;
        }
    } catch (IOException e) {
        if (LOG.isDebugEnabled() || !e.getMessage().contains("is not valid")) {
            LOG.error(e.getMessage(), e);
        } else {
            LOG.debug("block {} has been removed {}", extendedBlock);
        }
        return 0;
    }
}