Example usage for org.apache.hadoop.hdfs BlockMissingException BlockMissingException

List of usage examples for org.apache.hadoop.hdfs BlockMissingException BlockMissingException

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs BlockMissingException BlockMissingException.

Prototype

public BlockMissingException(String filename, String description, long offset) 

Source Link

Document

An exception that indicates that file was corrupted.

Usage

From source file:com.mellanox.r4h.DFSInputStream.java

License:Apache License

private DNAddrPair chooseDataNode(LocatedBlock block, Collection<DatanodeInfo> ignoredNodes)
        throws IOException {
    while (true) {
        try {/*from w  ww.  j a v a  2  s. com*/
            return getBestNodeDNAddrPair(block, ignoredNodes);
        } catch (IOException ie) {
            String errMsg = getBestNodeDNAddrPairErrorString(block.getLocations(), deadNodes, ignoredNodes);
            String blockInfo = block.getBlock() + " file=" + src;
            if (failures >= dfsClient.getMaxBlockAcquireFailures()) {
                String description = "Could not obtain block: " + blockInfo;
                DFSClient.LOG.warn(description + errMsg + ". Throwing a BlockMissingException");
                throw new BlockMissingException(src, description, block.getStartOffset());
            }

            DatanodeInfo[] nodes = block.getLocations();
            if (nodes == null || nodes.length == 0) {
                DFSClient.LOG.info("No node available for " + blockInfo);
            }
            DFSClient.LOG.info("Could not obtain " + block.getBlock() + " from any node: " + ie + errMsg
                    + ". Will get new block locations from namenode and retry...");
            try {
                // Introducing a random factor to the wait time before another retry.
                // The wait time is dependent on # of failures and a random factor.
                // At the first time of getting a BlockMissingException, the wait time
                // is a random number between 0..3000 ms. If the first retry
                // still fails, we will wait 3000 ms grace period before the 2nd retry.
                // Also at the second retry, the waiting window is expanded to 6000 ms
                // alleviating the request rate from the server. Similarly the 3rd retry
                // will wait 6000ms grace period before retry and the waiting window is
                // expanded to 9000ms.
                final int timeWindow = dfsClient.getConf().getTimeWindow();
                double waitTime = timeWindow * failures + // grace period for the last round of attempt
                        timeWindow * (failures + 1) * DFSUtil.getRandom().nextDouble(); // expanding time window for each failure
                DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1)
                        + " IOException, will wait for " + waitTime + " msec.");
                Thread.sleep((long) waitTime);
            } catch (InterruptedException iex) {
            }
            deadNodes.clear(); // 2nd option is to remove only nodes[blockId]
            openInfo();
            block = getBlockAt(block.getStartOffset());
            failures++;
            continue;
        }
    }
}