List of usage examples for org.apache.hadoop.hdfs.protocol Block getNumBytes
public long getNumBytes()
From source file:backup.namenode.NameNodeRestoreProcessor.java
License:Apache License
private boolean checkForBlocksToRestore() throws Exception { String blockPoolId = namesystem.getBlockPoolId(); Iterator<? extends Block> blockIterator = blockManager.getCorruptReplicaBlockIterator(); boolean atLeastOneRestoreRequest = false; while (blockIterator.hasNext()) { Block block = blockIterator.next(); long blockId = block.getBlockId(); long length = block.getNumBytes(); long generationStamp = block.getGenerationStamp(); ExtendedBlock extendedBlock = new ExtendedBlock(blockPoolId, blockId, length, generationStamp); if (!hasRestoreBeenRequested(extendedBlock)) { LOG.info("Need to restore block {}", extendedBlock); requestRestoreInternal(extendedBlock); atLeastOneRestoreRequest = true; }//from w w w. j a v a 2 s . co m } return atLeastOneRestoreRequest; }
From source file:common.DataNode.java
License:Apache License
private void transferBlock(Block block, DatanodeInfo xferTargets[]) throws IOException { if (!data.isValidBlock(block)) { // block does not exist or is under-construction String errStr = "Can't send invalid block " + block; LOG.info(errStr);//from w w w .jav a2s. c o m namenode.errorReport(dnRegistration, DatanodeProtocol.INVALID_BLOCK, errStr); return; } // Check if NN recorded length matches on-disk length long onDiskLength = data.getLength(block); if (block.getNumBytes() > onDiskLength) { // Shorter on-disk len indicates corruption so report NN the corrupt block namenode.reportBadBlocks(new LocatedBlock[] { new LocatedBlock(block, new DatanodeInfo[] { new DatanodeInfo(dnRegistration) }) }); LOG.info("Can't replicate block " + block + " because on-disk length " + onDiskLength + " is shorter than NameNode recorded length " + block.getNumBytes()); return; } int numTargets = xferTargets.length; if (numTargets > 0) { if (LOG.isInfoEnabled()) { StringBuilder xfersBuilder = new StringBuilder(); for (int i = 0; i < numTargets; i++) { xfersBuilder.append(xferTargets[i].getName()); xfersBuilder.append(" "); } LOG.info(dnRegistration + " Starting thread to transfer block " + block + " to " + xfersBuilder); } new Daemon(new DataTransfer(xferTargets, block, this)).start(); } }
From source file:common.DataNode.java
License:Apache License
/** Block synchronization */ void syncBlock(RecoveringBlock rBlock, List<BlockRecord> syncList) throws IOException { Block block = rBlock.getBlock(); long recoveryId = rBlock.getNewGenerationStamp(); if (LOG.isDebugEnabled()) { LOG.debug("block=" + block + ", (length=" + block.getNumBytes() + "), syncList=" + syncList); }//ww w .j ava2 s. c o m // syncList.isEmpty() means that all data-nodes do not have the block // or their replicas have 0 length. // The block can be deleted. if (syncList.isEmpty()) { namenode.commitBlockSynchronization(block, recoveryId, 0, true, true, DatanodeID.EMPTY_ARRAY); return; } // Calculate the best available replica state. ReplicaState bestState = ReplicaState.RWR; long finalizedLength = -1; for (BlockRecord r : syncList) { assert r.rInfo.getNumBytes() > 0 : "zero length replica"; ReplicaState rState = r.rInfo.getOriginalReplicaState(); if (rState.getValue() < bestState.getValue()) bestState = rState; if (rState == ReplicaState.FINALIZED) { if (finalizedLength > 0 && finalizedLength != r.rInfo.getNumBytes()) throw new IOException("Inconsistent size of finalized replicas. " + "Replica " + r.rInfo + " expected size: " + finalizedLength); finalizedLength = r.rInfo.getNumBytes(); } } // Calculate list of nodes that will participate in the recovery // and the new block size List<BlockRecord> participatingList = new ArrayList<BlockRecord>(); Block newBlock = new Block(block.getBlockId(), -1, recoveryId); switch (bestState) { case FINALIZED: assert finalizedLength > 0 : "finalizedLength is not positive"; for (BlockRecord r : syncList) { ReplicaState rState = r.rInfo.getOriginalReplicaState(); if (rState == ReplicaState.FINALIZED || rState == ReplicaState.RBW && r.rInfo.getNumBytes() == finalizedLength) participatingList.add(r); } newBlock.setNumBytes(finalizedLength); break; case RBW: case RWR: long minLength = Long.MAX_VALUE; for (BlockRecord r : syncList) { ReplicaState rState = r.rInfo.getOriginalReplicaState(); if (rState == bestState) { minLength = Math.min(minLength, r.rInfo.getNumBytes()); participatingList.add(r); } } newBlock.setNumBytes(minLength); break; case RUR: case TEMPORARY: assert false : "bad replica state: " + bestState; } List<DatanodeID> failedList = new ArrayList<DatanodeID>(); List<DatanodeID> successList = new ArrayList<DatanodeID>(); for (BlockRecord r : participatingList) { try { Block reply = r.datanode.updateReplicaUnderRecovery(r.rInfo, recoveryId, newBlock.getNumBytes()); assert reply.equals(newBlock) && reply.getNumBytes() == newBlock .getNumBytes() : "Updated replica must be the same as the new block."; successList.add(r.id); } catch (IOException e) { InterDatanodeProtocol.LOG .warn("Failed to updateBlock (newblock=" + newBlock + ", datanode=" + r.id + ")", e); failedList.add(r.id); } } // If any of the data-nodes failed, the recovery fails, because // we never know the actual state of the replica on failed data-nodes. // The recovery should be started over. if (!failedList.isEmpty()) { StringBuilder b = new StringBuilder(); for (DatanodeID id : failedList) { b.append("\n " + id); } throw new IOException("Cannot recover " + block + ", the following " + failedList.size() + " data-nodes failed {" + b + "\n}"); } // Notify the name-node about successfully recovered replicas. DatanodeID[] nlist = successList.toArray(new DatanodeID[successList.size()]); namenode.commitBlockSynchronization(block, newBlock.getGenerationStamp(), newBlock.getNumBytes(), true, false, nlist); }
From source file:io.hops.experiments.benchmarks.blockreporting.TinyDatanodesHelper.java
License:Apache License
public void writeDataNodesStateToDisk(TinyDatanode[] datanodes) throws IOException { BufferedWriter writer = new BufferedWriter(new FileWriter(DATANODES_STATE)); for (int dn = 0; dn < datanodes.length; dn++) { for (Block block : datanodes[dn].blocks) { if (block != null) { writer.write(Joiner.on(",").join(dn, block.getBlockId(), block.getNumBytes(), block.getGenerationStamp())); writer.newLine();/*from w w w . j a va2s . co m*/ } } } writer.close(); }
From source file:org.openflamingo.remote.thrift.thriftfs.ThriftUtils.java
License:Apache License
public static Block toThrift(LocatedBlock block, String path, Map<DatanodeID, Integer> thriftPorts) throws java.io.IOException { if (block == null) { return new Block(); }// w w w .j a va 2s .com List<DatanodeInfo> nodes = new ArrayList<DatanodeInfo>(); for (org.apache.hadoop.hdfs.protocol.DatanodeInfo n : block.getLocations()) { DatanodeInfo node = toThrift(n, thriftPorts); if (node.getThriftPort() != Constants.UNKNOWN_THRIFT_PORT) { nodes.add(node); } } org.apache.hadoop.hdfs.protocol.Block b = block.getBlock(); return new Block(b.getBlockId(), path, b.getNumBytes(), b.getGenerationStamp(), nodes, block.getStartOffset(), block.getBlockToken().encodeToUrlString()); }