List of usage examples for org.apache.hadoop.hdfs.protocol Block getGenerationStamp
public long getGenerationStamp()
From source file:backup.namenode.NameNodeRestoreProcessor.java
License:Apache License
private boolean checkForBlocksToRestore() throws Exception { String blockPoolId = namesystem.getBlockPoolId(); Iterator<? extends Block> blockIterator = blockManager.getCorruptReplicaBlockIterator(); boolean atLeastOneRestoreRequest = false; while (blockIterator.hasNext()) { Block block = blockIterator.next(); long blockId = block.getBlockId(); long length = block.getNumBytes(); long generationStamp = block.getGenerationStamp(); ExtendedBlock extendedBlock = new ExtendedBlock(blockPoolId, blockId, length, generationStamp); if (!hasRestoreBeenRequested(extendedBlock)) { LOG.info("Need to restore block {}", extendedBlock); requestRestoreInternal(extendedBlock); atLeastOneRestoreRequest = true; }//from www.j a v a2s . co m } return atLeastOneRestoreRequest; }
From source file:common.DataNode.java
License:Apache License
/** Recover a block */ private void recoverBlock(RecoveringBlock rBlock) throws IOException { Block block = rBlock.getBlock(); DatanodeInfo[] targets = rBlock.getLocations(); DatanodeID[] datanodeids = (DatanodeID[]) targets; List<BlockRecord> syncList = new ArrayList<BlockRecord>(datanodeids.length); int errorCount = 0; //check generation stamps for (DatanodeID id : datanodeids) { try {//from ww w .ja v a 2 s . c om InterDatanodeProtocol datanode = dnRegistration.equals(id) ? this : DataNode.createInterDataNodeProtocolProxy(id, getConf()); ReplicaRecoveryInfo info = callInitReplicaRecovery(datanode, rBlock); if (info != null && info.getGenerationStamp() >= block.getGenerationStamp() && info.getNumBytes() > 0) { syncList.add(new BlockRecord(id, datanode, info)); } } catch (RecoveryInProgressException ripE) { InterDatanodeProtocol.LOG.warn("Recovery for replica " + block + " on data-node " + id + " is already in progress. Recovery id = " + rBlock.getNewGenerationStamp() + " is aborted.", ripE); return; } catch (IOException e) { ++errorCount; InterDatanodeProtocol.LOG.warn( "Failed to obtain replica info for block (=" + block + ") from datanode (=" + id + ")", e); } } if (errorCount == datanodeids.length) { throw new IOException( "All datanodes failed: block=" + block + ", datanodeids=" + Arrays.asList(datanodeids)); } syncBlock(rBlock, syncList); }
From source file:common.DataNode.java
License:Apache License
/** Block synchronization */ void syncBlock(RecoveringBlock rBlock, List<BlockRecord> syncList) throws IOException { Block block = rBlock.getBlock();// w w w . j a va 2s. co m long recoveryId = rBlock.getNewGenerationStamp(); if (LOG.isDebugEnabled()) { LOG.debug("block=" + block + ", (length=" + block.getNumBytes() + "), syncList=" + syncList); } // syncList.isEmpty() means that all data-nodes do not have the block // or their replicas have 0 length. // The block can be deleted. if (syncList.isEmpty()) { namenode.commitBlockSynchronization(block, recoveryId, 0, true, true, DatanodeID.EMPTY_ARRAY); return; } // Calculate the best available replica state. ReplicaState bestState = ReplicaState.RWR; long finalizedLength = -1; for (BlockRecord r : syncList) { assert r.rInfo.getNumBytes() > 0 : "zero length replica"; ReplicaState rState = r.rInfo.getOriginalReplicaState(); if (rState.getValue() < bestState.getValue()) bestState = rState; if (rState == ReplicaState.FINALIZED) { if (finalizedLength > 0 && finalizedLength != r.rInfo.getNumBytes()) throw new IOException("Inconsistent size of finalized replicas. " + "Replica " + r.rInfo + " expected size: " + finalizedLength); finalizedLength = r.rInfo.getNumBytes(); } } // Calculate list of nodes that will participate in the recovery // and the new block size List<BlockRecord> participatingList = new ArrayList<BlockRecord>(); Block newBlock = new Block(block.getBlockId(), -1, recoveryId); switch (bestState) { case FINALIZED: assert finalizedLength > 0 : "finalizedLength is not positive"; for (BlockRecord r : syncList) { ReplicaState rState = r.rInfo.getOriginalReplicaState(); if (rState == ReplicaState.FINALIZED || rState == ReplicaState.RBW && r.rInfo.getNumBytes() == finalizedLength) participatingList.add(r); } newBlock.setNumBytes(finalizedLength); break; case RBW: case RWR: long minLength = Long.MAX_VALUE; for (BlockRecord r : syncList) { ReplicaState rState = r.rInfo.getOriginalReplicaState(); if (rState == bestState) { minLength = Math.min(minLength, r.rInfo.getNumBytes()); participatingList.add(r); } } newBlock.setNumBytes(minLength); break; case RUR: case TEMPORARY: assert false : "bad replica state: " + bestState; } List<DatanodeID> failedList = new ArrayList<DatanodeID>(); List<DatanodeID> successList = new ArrayList<DatanodeID>(); for (BlockRecord r : participatingList) { try { Block reply = r.datanode.updateReplicaUnderRecovery(r.rInfo, recoveryId, newBlock.getNumBytes()); assert reply.equals(newBlock) && reply.getNumBytes() == newBlock .getNumBytes() : "Updated replica must be the same as the new block."; successList.add(r.id); } catch (IOException e) { InterDatanodeProtocol.LOG .warn("Failed to updateBlock (newblock=" + newBlock + ", datanode=" + r.id + ")", e); failedList.add(r.id); } } // If any of the data-nodes failed, the recovery fails, because // we never know the actual state of the replica on failed data-nodes. // The recovery should be started over. if (!failedList.isEmpty()) { StringBuilder b = new StringBuilder(); for (DatanodeID id : failedList) { b.append("\n " + id); } throw new IOException("Cannot recover " + block + ", the following " + failedList.size() + " data-nodes failed {" + b + "\n}"); } // Notify the name-node about successfully recovered replicas. DatanodeID[] nlist = successList.toArray(new DatanodeID[successList.size()]); namenode.commitBlockSynchronization(block, newBlock.getGenerationStamp(), newBlock.getNumBytes(), true, false, nlist); }
From source file:io.hops.experiments.benchmarks.blockreporting.TinyDatanodesHelper.java
License:Apache License
public void writeDataNodesStateToDisk(TinyDatanode[] datanodes) throws IOException { BufferedWriter writer = new BufferedWriter(new FileWriter(DATANODES_STATE)); for (int dn = 0; dn < datanodes.length; dn++) { for (Block block : datanodes[dn].blocks) { if (block != null) { writer.write(Joiner.on(",").join(dn, block.getBlockId(), block.getNumBytes(), block.getGenerationStamp())); writer.newLine();/*from w w w . ja v a2 s .c o m*/ } } } writer.close(); }
From source file:org.openflamingo.remote.thrift.thriftfs.ThriftUtils.java
License:Apache License
public static Block toThrift(LocatedBlock block, String path, Map<DatanodeID, Integer> thriftPorts) throws java.io.IOException { if (block == null) { return new Block(); }//from ww w .j a v a2 s .com List<DatanodeInfo> nodes = new ArrayList<DatanodeInfo>(); for (org.apache.hadoop.hdfs.protocol.DatanodeInfo n : block.getLocations()) { DatanodeInfo node = toThrift(n, thriftPorts); if (node.getThriftPort() != Constants.UNKNOWN_THRIFT_PORT) { nodes.add(node); } } org.apache.hadoop.hdfs.protocol.Block b = block.getBlock(); return new Block(b.getBlockId(), path, b.getNumBytes(), b.getGenerationStamp(), nodes, block.getStartOffset(), block.getBlockToken().encodeToUrlString()); }