Example usage for org.apache.hadoop.hdfs.protocol Block getBlockId

List of usage examples for org.apache.hadoop.hdfs.protocol Block getBlockId

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs.protocol Block getBlockId.

Prototype

public long getBlockId() 

Source Link

Usage

From source file:backup.namenode.NameNodeRestoreProcessor.java

License:Apache License

private boolean checkForBlocksToRestore() throws Exception {
    String blockPoolId = namesystem.getBlockPoolId();
    Iterator<? extends Block> blockIterator = blockManager.getCorruptReplicaBlockIterator();
    boolean atLeastOneRestoreRequest = false;
    while (blockIterator.hasNext()) {
        Block block = blockIterator.next();
        long blockId = block.getBlockId();
        long length = block.getNumBytes();
        long generationStamp = block.getGenerationStamp();

        ExtendedBlock extendedBlock = new ExtendedBlock(blockPoolId, blockId, length, generationStamp);
        if (!hasRestoreBeenRequested(extendedBlock)) {
            LOG.info("Need to restore block {}", extendedBlock);
            requestRestoreInternal(extendedBlock);
            atLeastOneRestoreRequest = true;
        }/* w w w  .  j  av a 2s  .  c  o  m*/
    }
    return atLeastOneRestoreRequest;
}

From source file:common.DataNode.java

License:Apache License

/** Block synchronization */
void syncBlock(RecoveringBlock rBlock, List<BlockRecord> syncList) throws IOException {
    Block block = rBlock.getBlock();
    long recoveryId = rBlock.getNewGenerationStamp();
    if (LOG.isDebugEnabled()) {
        LOG.debug("block=" + block + ", (length=" + block.getNumBytes() + "), syncList=" + syncList);
    }/*  w  w w  .  j  a  va2s.  c o  m*/

    // syncList.isEmpty() means that all data-nodes do not have the block
    // or their replicas have 0 length.
    // The block can be deleted.
    if (syncList.isEmpty()) {
        namenode.commitBlockSynchronization(block, recoveryId, 0, true, true, DatanodeID.EMPTY_ARRAY);
        return;
    }

    // Calculate the best available replica state.
    ReplicaState bestState = ReplicaState.RWR;
    long finalizedLength = -1;
    for (BlockRecord r : syncList) {
        assert r.rInfo.getNumBytes() > 0 : "zero length replica";
        ReplicaState rState = r.rInfo.getOriginalReplicaState();
        if (rState.getValue() < bestState.getValue())
            bestState = rState;
        if (rState == ReplicaState.FINALIZED) {
            if (finalizedLength > 0 && finalizedLength != r.rInfo.getNumBytes())
                throw new IOException("Inconsistent size of finalized replicas. " + "Replica " + r.rInfo
                        + " expected size: " + finalizedLength);
            finalizedLength = r.rInfo.getNumBytes();
        }
    }

    // Calculate list of nodes that will participate in the recovery
    // and the new block size
    List<BlockRecord> participatingList = new ArrayList<BlockRecord>();
    Block newBlock = new Block(block.getBlockId(), -1, recoveryId);
    switch (bestState) {
    case FINALIZED:
        assert finalizedLength > 0 : "finalizedLength is not positive";
        for (BlockRecord r : syncList) {
            ReplicaState rState = r.rInfo.getOriginalReplicaState();
            if (rState == ReplicaState.FINALIZED
                    || rState == ReplicaState.RBW && r.rInfo.getNumBytes() == finalizedLength)
                participatingList.add(r);
        }
        newBlock.setNumBytes(finalizedLength);
        break;
    case RBW:
    case RWR:
        long minLength = Long.MAX_VALUE;
        for (BlockRecord r : syncList) {
            ReplicaState rState = r.rInfo.getOriginalReplicaState();
            if (rState == bestState) {
                minLength = Math.min(minLength, r.rInfo.getNumBytes());
                participatingList.add(r);
            }
        }
        newBlock.setNumBytes(minLength);
        break;
    case RUR:
    case TEMPORARY:
        assert false : "bad replica state: " + bestState;
    }

    List<DatanodeID> failedList = new ArrayList<DatanodeID>();
    List<DatanodeID> successList = new ArrayList<DatanodeID>();
    for (BlockRecord r : participatingList) {
        try {
            Block reply = r.datanode.updateReplicaUnderRecovery(r.rInfo, recoveryId, newBlock.getNumBytes());
            assert reply.equals(newBlock) && reply.getNumBytes() == newBlock
                    .getNumBytes() : "Updated replica must be the same as the new block.";
            successList.add(r.id);
        } catch (IOException e) {
            InterDatanodeProtocol.LOG
                    .warn("Failed to updateBlock (newblock=" + newBlock + ", datanode=" + r.id + ")", e);
            failedList.add(r.id);
        }
    }

    // If any of the data-nodes failed, the recovery fails, because
    // we never know the actual state of the replica on failed data-nodes.
    // The recovery should be started over.
    if (!failedList.isEmpty()) {
        StringBuilder b = new StringBuilder();
        for (DatanodeID id : failedList) {
            b.append("\n  " + id);
        }
        throw new IOException("Cannot recover " + block + ", the following " + failedList.size()
                + " data-nodes failed {" + b + "\n}");
    }

    // Notify the name-node about successfully recovered replicas.
    DatanodeID[] nlist = successList.toArray(new DatanodeID[successList.size()]);
    namenode.commitBlockSynchronization(block, newBlock.getGenerationStamp(), newBlock.getNumBytes(), true,
            false, nlist);
}

From source file:io.hops.common.INodeUtil.java

License:Apache License

public static INodeIdentifier resolveINodeFromBlock(final Block b) throws StorageException {
    if (b instanceof BlockInfo || b instanceof BlockInfoUnderConstruction) {
        INodeIdentifier inodeIden = new INodeIdentifier(((BlockInfo) b).getInodeId());
        INodeDALAdaptor ida = (INodeDALAdaptor) HdfsStorageFactory.getDataAccess(INodeDataAccess.class);
        INode inode = ida.indexScanfindInodeById(((BlockInfo) b).getInodeId());
        if (inode != null) {
            inodeIden.setName(inode.getLocalName());
            inodeIden.setPid(inode.getParentId());
        }/*ww w  . ja v  a 2  s . c o m*/
        return inodeIden;
    } else {
        return resolveINodeFromBlockID(b.getBlockId());
    }
}

From source file:io.hops.experiments.benchmarks.blockreporting.TinyDatanodesHelper.java

License:Apache License

public void writeDataNodesStateToDisk(TinyDatanode[] datanodes) throws IOException {
    BufferedWriter writer = new BufferedWriter(new FileWriter(DATANODES_STATE));
    for (int dn = 0; dn < datanodes.length; dn++) {
        for (Block block : datanodes[dn].blocks) {
            if (block != null) {
                writer.write(Joiner.on(",").join(dn, block.getBlockId(), block.getNumBytes(),
                        block.getGenerationStamp()));
                writer.newLine();//  ww  w . j ava2  s . com
            }
        }
    }
    writer.close();
}

From source file:io.hops.transaction.lock.LastBlockReplicasHashBucketLock.java

License:Apache License

@Override
protected void acquire(TransactionLocks locks) throws IOException {
    BlockLock blockLock = (BlockLock) locks.getLock(Type.Block);
    for (INodeFile iNodeFile : blockLock.getFiles()) {
        Block lastBlock = iNodeFile.getLastBlock();
        if (iNodeFile.getLastBlock() != null) {
            List<Replica> replicas = (List<Replica>) EntityManager.findList(Replica.Finder.ByBlockIdAndINodeId,
                    lastBlock.getBlockId(), iNodeFile.getId());
            if (replicas != null) {
                Collections.sort(replicas, new Comparator<Replica>() {
                    @Override/*from   w  ww.  j  ava  2 s .c  o  m*/
                    public int compare(Replica o1, Replica o2) {
                        return new Integer(o1.getBucketId()).compareTo(o2.getBucketId());
                    }
                });

                //FIXME-BR why lock buckets for all the replicas. Only the last
                // replica should be locked. Am I missing something
                for (Replica replica : replicas) {
                    EntityManager.find(HashBucket.Finder.ByStorageIdAndBucketId, replica.getStorageId(),
                            replica.getBucketId());
                }
            }
        }
    }
}

From source file:org.openflamingo.remote.thrift.thriftfs.ThriftUtils.java

License:Apache License

public static Block toThrift(LocatedBlock block, String path, Map<DatanodeID, Integer> thriftPorts)
        throws java.io.IOException {
    if (block == null) {
        return new Block();
    }//from w w w . java 2 s  .  c om

    List<DatanodeInfo> nodes = new ArrayList<DatanodeInfo>();
    for (org.apache.hadoop.hdfs.protocol.DatanodeInfo n : block.getLocations()) {
        DatanodeInfo node = toThrift(n, thriftPorts);
        if (node.getThriftPort() != Constants.UNKNOWN_THRIFT_PORT) {
            nodes.add(node);
        }
    }

    org.apache.hadoop.hdfs.protocol.Block b = block.getBlock();
    return new Block(b.getBlockId(), path, b.getNumBytes(), b.getGenerationStamp(), nodes,
            block.getStartOffset(), block.getBlockToken().encodeToUrlString());
}