Example usage for org.apache.hadoop.security.token Token cancel

List of usage examples for org.apache.hadoop.security.token Token cancel

Introduction

In this page you can find the example usage for org.apache.hadoop.security.token Token cancel.

Prototype

public void cancel(Configuration conf) throws IOException, InterruptedException 

Source Link

Document

Cancel this delegation token.

Usage

From source file:azkaban.security.HadoopSecurityManager_H_1_0.java

License:Apache License

private void cancelNameNodeToken(final Token<? extends TokenIdentifier> t, String userToProxy)
        throws HadoopSecurityManagerException {
    try {/*w ww .  j  a  v a2s.com*/
        getProxiedUser(userToProxy).doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                cancelToken(t);
                return null;
            }

            private void cancelToken(Token<?> nt) throws IOException, InterruptedException {
                nt.cancel(conf);
            }
        });
    } catch (Exception e) {
        e.printStackTrace();
        throw new HadoopSecurityManagerException("Failed to cancel Token. " + e.getMessage() + e.getCause());
    }
}

From source file:azkaban.security.HadoopSecurityManager_H_2_0.java

License:Apache License

private void cancelNameNodeToken(final Token<? extends TokenIdentifier> t, String userToProxy)
        throws HadoopSecurityManagerException {
    try {// www . jav  a 2s  . co  m
        getProxiedUser(userToProxy).doAs(new PrivilegedExceptionAction<Void>() {
            @Override
            public Void run() throws Exception {
                cancelToken(t);
                return null;
            }

            private void cancelToken(Token<?> nt) throws IOException, InterruptedException {
                nt.cancel(conf);
            }
        });
    } catch (Exception e) {
        throw new HadoopSecurityManagerException("Failed to cancel token. " + e.getMessage() + e.getCause(), e);
    }
}

From source file:com.mellanox.r4h.DFSClient.java

License:Apache License

/**
 * Cancel a delegation token/* w w  w  .j a  v  a2s  . c o m*/
 * 
 * @param token
 *            the token to cancel
 * @throws InvalidToken
 * @throws IOException
 * @deprecated Use Token.cancel instead.
 */
@Deprecated
public void cancelDelegationToken(Token<DelegationTokenIdentifier> token) throws InvalidToken, IOException {
    LOG.info("Cancelling " + DelegationTokenIdentifier.stringifyToken(token));
    try {
        token.cancel(conf);
    } catch (InterruptedException ie) {
        throw new RuntimeException("caught interrupted", ie);
    } catch (RemoteException re) {
        throw re.unwrapRemoteException(InvalidToken.class, AccessControlException.class);
    }
}

From source file:com.moz.fiji.schema.impl.hbase.HBaseFijiTable.java

License:Apache License

/**
 * Loads partitioned HFiles directly into the regions of this Fiji table.
 *
 * @param hfilePath Path of the HFiles to load.
 * @throws IOException on I/O error.//from ww  w  .j av a  2s .  co  m
 */
public void bulkLoad(Path hfilePath) throws IOException {
    final LoadIncrementalHFiles loader = createHFileLoader(mConf);

    final String hFileScheme = hfilePath.toUri().getScheme();
    Token<DelegationTokenIdentifier> hdfsDelegationToken = null;

    // If we're bulk loading from a secure HDFS, we should request and forward a delegation token.
    // LoadIncrementalHfiles will actually do this if none is provided, but because we call it
    // repeatedly in a short amount of time, this seems to trigger a possible race condition
    // where we ask to load the next HFile while there is a pending token cancellation request.
    // By requesting the token ourselves, it is re-used for each bulk load call.
    // Once we're done with the bulk loader we cancel the token.
    if (UserGroupInformation.isSecurityEnabled() && hFileScheme.equals(HDFS_SCHEME)) {
        final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
        final DistributedFileSystem fileSystem = (DistributedFileSystem) hfilePath.getFileSystem(mConf);
        hdfsDelegationToken = fileSystem.getDelegationToken(RENEWER);
        ugi.addToken(hdfsDelegationToken);
    }

    try {
        // LoadIncrementalHFiles.doBulkLoad() requires an HTable instance, not an HTableInterface:
        final HTable htable = (HTable) mHTableFactory.create(mConf, mHBaseTableName);
        try {
            final List<Path> hfilePaths = Lists.newArrayList();

            // Try to find any hfiles for partitions within the passed in path
            final FileStatus[] hfiles = hfilePath.getFileSystem(mConf).globStatus(new Path(hfilePath, "*"));
            for (FileStatus hfile : hfiles) {
                String partName = hfile.getPath().getName();
                if (!partName.startsWith("_") && partName.endsWith(".hfile")) {
                    Path partHFile = new Path(hfilePath, partName);
                    hfilePaths.add(partHFile);
                }
            }
            if (hfilePaths.isEmpty()) {
                // If we didn't find any parts, add in the passed in parameter
                hfilePaths.add(hfilePath);
            }
            for (Path path : hfilePaths) {
                loader.doBulkLoad(path, htable);
                LOG.info("Successfully loaded: " + path.toString());
            }
        } finally {
            htable.close();
        }
    } catch (TableNotFoundException tnfe) {
        throw new InternalFijiError(tnfe);
    }

    // Cancel the HDFS delegation token if we requested one.
    if (null != hdfsDelegationToken) {
        try {
            hdfsDelegationToken.cancel(mConf);
        } catch (InterruptedException e) {
            LOG.warn("Failed to cancel HDFS delegation token.", e);
        }
    }
}