List of usage examples for org.apache.hadoop.hdfs DFSClientFaultInjector get
public static DFSClientFaultInjector get()
From source file:com.mellanox.r4h.DFSInputStream.java
License:Apache License
private void actualGetFromOneDataNode(final DNAddrPair datanode, LocatedBlock block, final long start, final long end, byte[] buf, int offset, Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap) throws IOException { DFSClientFaultInjector.get().startFetchFromDatanode(); int refetchToken = 1; // only need to get a new access token once int refetchEncryptionKey = 1; // only need to get a new encryption key once while (true) { // cached block locations may have been updated by chooseDataNode() // or fetchBlockAt(). Always get the latest list of locations at the // start of the loop. CachingStrategy curCachingStrategy; boolean allowShortCircuitLocalReads; block = getBlockAt(block.getStartOffset()); synchronized (infoLock) { curCachingStrategy = cachingStrategy; allowShortCircuitLocalReads = !shortCircuitForbidden(); }/*from w w w. j a v a 2s.co m*/ DatanodeInfo chosenNode = datanode.info; InetSocketAddress targetAddr = datanode.addr; StorageType storageType = datanode.storageType; BlockReader reader = null; try { DFSClientFaultInjector.get().fetchFromDatanodeException(); Token<BlockTokenIdentifier> blockToken = block.getBlockToken(); int len = (int) (end - start + 1); reader = new BlockReaderFactory(dfsClient.getConf()).setInetSocketAddress(targetAddr) .setRemotePeerFactory(dfsClient).setDatanodeInfo(chosenNode).setStorageType(storageType) .setFileName(src).setBlock(block.getBlock()).setBlockToken(blockToken).setStartOffset(start) .setVerifyChecksum(verifyChecksum).setClientName(dfsClient.clientName).setLength(len) .setCachingStrategy(curCachingStrategy) .setAllowShortCircuitLocalReads(allowShortCircuitLocalReads) .setClientCacheContext(dfsClient.getClientContext()).setUserGroupInformation(dfsClient.ugi) .setConfiguration(dfsClient.getConfiguration()).build(); int nread = reader.readAll(buf, offset, len); updateReadStatistics(readStatistics, nread, reader); if (nread != len) { throw new IOException( "truncated return from reader.read(): " + "excpected " + len + ", got " + nread); } DFSClientFaultInjector.get().readFromDatanodeDelay(); return; } catch (ChecksumException e) { String msg = "fetchBlockByteRange(). Got a checksum exception for " + src + " at " + block.getBlock() + ":" + e.getPos() + " from " + chosenNode; DFSClient.LOG.warn(msg); // we want to remember what we have tried addIntoCorruptedBlockMap(block.getBlock(), chosenNode, corruptedBlockMap); addToDeadNodes(chosenNode); throw new IOException(msg); } catch (IOException e) { if (e instanceof InvalidEncryptionKeyException && refetchEncryptionKey > 0) { DFSClient.LOG.info("Will fetch a new encryption key and retry, " + "encryption key was invalid when connecting to " + targetAddr + " : " + e); // The encryption key used is invalid. refetchEncryptionKey--; dfsClient.clearDataEncryptionKey(); continue; } else if (refetchToken > 0 && tokenRefetchNeeded(e, targetAddr)) { refetchToken--; try { fetchBlockAt(block.getStartOffset()); } catch (IOException fbae) { // ignore IOE, since we can retry it later in a loop } continue; } else { String msg = "Failed to connect to " + targetAddr + " for file " + src + " for block " + block.getBlock() + ":" + e; DFSClient.LOG.warn("Connection failure: " + msg, e); addToDeadNodes(chosenNode); throw new IOException(msg); } } finally { if (reader != null) { reader.close(); } } } }