Example usage for org.apache.hadoop.hdfs.server.datanode ReplicaHandler getReplica

List of usage examples for org.apache.hadoop.hdfs.server.datanode ReplicaHandler getReplica

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs.server.datanode ReplicaHandler getReplica.

Prototype

public ReplicaInPipeline getReplica() 

Source Link

Usage

From source file:backup.datanode.DataNodeRestoreProcessor.java

License:Apache License

public void restoreBlock(ExtendedBlock extendedBlock) throws Exception {
    if (!_backupStore.hasBlock(extendedBlock)) {
        LOG.error("Can not restore block, not in block store {}", extendedBlock);
        return;//  w  ww.  j  a  v a 2s .  c  o m
    }
    FsDatasetSpi<?> fsDataset = _datanode.getFSDataset();
    org.apache.hadoop.hdfs.protocol.ExtendedBlock heb = BackupUtil.toHadoop(extendedBlock);
    if (fsDataset.isValidBlock(heb)) {
        LOG.info("Block already restored {}", extendedBlock);
        return;
    }
    try {
        _restoreInProgress.incrementAndGet();
        LOG.info("Restoring block {}", extendedBlock);
        boolean allowLazyPersist = true;
        // org.apache.hadoop.fs.StorageType storageType =
        // org.apache.hadoop.fs.StorageType.DEFAULT;
        org.apache.hadoop.hdfs.StorageType storageType = org.apache.hadoop.hdfs.StorageType.DEFAULT;
        ReplicaHandler replicaHandler = fsDataset.createRbw(storageType, heb, allowLazyPersist);
        ReplicaInPipelineInterface pipelineInterface = replicaHandler.getReplica();
        boolean isCreate = true;
        DataChecksum requestedChecksum = DataChecksum.newDataChecksum(_checksumType, _bytesPerChecksum);
        int bytesCopied = 0;
        try (ReplicaOutputStreams streams = pipelineInterface.createStreams(isCreate, requestedChecksum)) {
            try (OutputStream checksumOut = streams.getChecksumOut()) {
                try (InputStream metaData = _backupStore.getMetaDataInputStream(extendedBlock)) {
                    LOG.info("Restoring meta data for block {}", extendedBlock);
                    IOUtils.copy(trackThroughPut(metaData), checksumOut);
                }
            }
            try (OutputStream dataOut = streams.getDataOut()) {
                try (InputStream data = _backupStore.getDataInputStream(extendedBlock)) {
                    LOG.info("Restoring data for block {}", extendedBlock);
                    bytesCopied = IOUtils.copy(trackThroughPut(data), dataOut);
                }
            }
        }
        pipelineInterface.setNumBytes(bytesCopied);
        LOG.info("Finalizing restored block {}", extendedBlock);
        fsDataset.finalizeBlock(heb);

        // datanode.notifyNamenodeReceivedBlock(extendedBlock, "",
        // pipelineInterface.getStorageUuid();
        _datanode.notifyNamenodeReceivedBlock(heb, "", pipelineInterface.getStorageUuid(),
                pipelineInterface.isOnTransientStorage());
    } catch (ReplicaAlreadyExistsException e) {
        LOG.info("Restoring block already exists {}", extendedBlock);
    } finally {
        _restoreInProgress.decrementAndGet();
    }
}