Example usage for org.apache.hadoop.hdfs DistributedFileSystem getDelegationToken

List of usage examples for org.apache.hadoop.hdfs DistributedFileSystem getDelegationToken

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs DistributedFileSystem getDelegationToken.

Prototype

@Override
    public Token<DelegationTokenIdentifier> getDelegationToken(String renewer) throws IOException 

Source Link

Usage

From source file:com.moz.fiji.schema.impl.hbase.HBaseFijiTable.java

License:Apache License

/**
 * Loads partitioned HFiles directly into the regions of this Fiji table.
 *
 * @param hfilePath Path of the HFiles to load.
 * @throws IOException on I/O error.//from  w  w w .j a v  a2s. c om
 */
public void bulkLoad(Path hfilePath) throws IOException {
    final LoadIncrementalHFiles loader = createHFileLoader(mConf);

    final String hFileScheme = hfilePath.toUri().getScheme();
    Token<DelegationTokenIdentifier> hdfsDelegationToken = null;

    // If we're bulk loading from a secure HDFS, we should request and forward a delegation token.
    // LoadIncrementalHfiles will actually do this if none is provided, but because we call it
    // repeatedly in a short amount of time, this seems to trigger a possible race condition
    // where we ask to load the next HFile while there is a pending token cancellation request.
    // By requesting the token ourselves, it is re-used for each bulk load call.
    // Once we're done with the bulk loader we cancel the token.
    if (UserGroupInformation.isSecurityEnabled() && hFileScheme.equals(HDFS_SCHEME)) {
        final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
        final DistributedFileSystem fileSystem = (DistributedFileSystem) hfilePath.getFileSystem(mConf);
        hdfsDelegationToken = fileSystem.getDelegationToken(RENEWER);
        ugi.addToken(hdfsDelegationToken);
    }

    try {
        // LoadIncrementalHFiles.doBulkLoad() requires an HTable instance, not an HTableInterface:
        final HTable htable = (HTable) mHTableFactory.create(mConf, mHBaseTableName);
        try {
            final List<Path> hfilePaths = Lists.newArrayList();

            // Try to find any hfiles for partitions within the passed in path
            final FileStatus[] hfiles = hfilePath.getFileSystem(mConf).globStatus(new Path(hfilePath, "*"));
            for (FileStatus hfile : hfiles) {
                String partName = hfile.getPath().getName();
                if (!partName.startsWith("_") && partName.endsWith(".hfile")) {
                    Path partHFile = new Path(hfilePath, partName);
                    hfilePaths.add(partHFile);
                }
            }
            if (hfilePaths.isEmpty()) {
                // If we didn't find any parts, add in the passed in parameter
                hfilePaths.add(hfilePath);
            }
            for (Path path : hfilePaths) {
                loader.doBulkLoad(path, htable);
                LOG.info("Successfully loaded: " + path.toString());
            }
        } finally {
            htable.close();
        }
    } catch (TableNotFoundException tnfe) {
        throw new InternalFijiError(tnfe);
    }

    // Cancel the HDFS delegation token if we requested one.
    if (null != hdfsDelegationToken) {
        try {
            hdfsDelegationToken.cancel(mConf);
        } catch (InterruptedException e) {
            LOG.warn("Failed to cancel HDFS delegation token.", e);
        }
    }
}