Example usage for org.apache.hadoop.security UserGroupInformation doAs

List of usage examples for org.apache.hadoop.security UserGroupInformation doAs

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation doAs.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public <T> T doAs(PrivilegedExceptionAction<T> action) throws IOException, InterruptedException 

Source Link

Document

Run the given action as the user, potentially throwing an exception.

Usage

From source file:org.apache.drill.exec.physical.impl.ImplCreator.java

License:Apache License

/** Create a RecordBatch and its children for given PhysicalOperator */
@VisibleForTesting/*from  w ww.  j av a 2  s .c  om*/
public RecordBatch getRecordBatch(final PhysicalOperator op, final FragmentContext context)
        throws ExecutionSetupException {
    Preconditions.checkNotNull(op);

    final List<RecordBatch> childRecordBatches = getChildren(op, context);

    if (context.isImpersonationEnabled()) {
        final UserGroupInformation proxyUgi = ImpersonationUtil.createProxyUgi(op.getUserName(),
                context.getQueryUserName());
        try {
            return proxyUgi.doAs(new PrivilegedExceptionAction<RecordBatch>() {
                @Override
                public RecordBatch run() throws Exception {
                    final CloseableRecordBatch batch = ((BatchCreator<PhysicalOperator>) getOpCreator(op,
                            context)).getBatch(context, op, childRecordBatches);
                    operators.addFirst(batch);
                    return batch;
                }
            });
        } catch (InterruptedException | IOException e) {
            final String errMsg = String.format("Failed to create RecordBatch for operator with id '%d'",
                    op.getOperatorId());
            logger.error(errMsg, e);
            throw new ExecutionSetupException(errMsg, e);
        }
    } else {
        final CloseableRecordBatch batch = ((BatchCreator<PhysicalOperator>) getOpCreator(op, context))
                .getBatch(context, op, childRecordBatches);
        operators.addFirst(batch);
        return batch;
    }
}

From source file:org.apache.drill.exec.planner.index.MapRDBIndexDiscover.java

License:Apache License

@SuppressWarnings("deprecation")
private Admin admin() {
    assert getOriginalScan() instanceof MapRDBGroupScan;

    final MapRDBGroupScan dbGroupScan = (MapRDBGroupScan) getOriginalScan();
    final UserGroupInformation currentUser = ImpersonationUtil.createProxyUgi(dbGroupScan.getUserName());
    final Configuration conf = dbGroupScan.getFormatPlugin().getFsConf();

    final Admin admin;
    try {//from ww w.ja  v a  2 s  . co m
        admin = currentUser.doAs((PrivilegedExceptionAction<Admin>) () -> MapRDB.getAdmin(conf));
    } catch (Exception e) {
        throw new DrillRuntimeException("Failed to get Admin instance for user: " + currentUser.getUserName(),
                e);
    }
    return admin;
}

From source file:org.apache.drill.exec.rpc.security.AuthenticationOutcomeListener.java

License:Apache License

private static byte[] evaluateChallenge(final UserGroupInformation ugi, final SaslClient saslClient,
        final byte[] challengeBytes) throws SaslException {
    try {/*w w w.ja  v  a  2 s.c o m*/
        return ugi.doAs(new PrivilegedExceptionAction<byte[]>() {
            @Override
            public byte[] run() throws Exception {
                return saslClient.evaluateChallenge(challengeBytes);
            }
        });
    } catch (final UndeclaredThrowableException e) {
        throw new SaslException(String.format("Unexpected failure (%s)", saslClient.getMechanismName()),
                e.getCause());
    } catch (final IOException | InterruptedException e) {
        if (e instanceof SaslException) {
            throw (SaslException) e;
        } else {
            throw new SaslException(String.format("Unexpected failure (%s)", saslClient.getMechanismName()), e);
        }
    }
}

From source file:org.apache.drill.exec.rpc.security.kerberos.KerberosFactory.java

License:Apache License

@Override
public SaslServer createSaslServer(final UserGroupInformation ugi, final Map<String, ?> properties)
        throws SaslException {
    try {/*from  w w w. j  a v  a2s.c  o m*/
        final String primaryName = ugi.getShortUserName();
        final String instanceName = new HadoopKerberosName(ugi.getUserName()).getHostName();

        final SaslServer saslServer = ugi.doAs(new PrivilegedExceptionAction<SaslServer>() {
            @Override
            public SaslServer run() throws Exception {
                return FastSaslServerFactory.getInstance().createSaslServer(KerberosUtil.KERBEROS_SASL_NAME,
                        primaryName, instanceName, properties, new KerberosServerCallbackHandler());
            }
        });
        logger.trace("GSSAPI SaslServer created.");
        return saslServer;
    } catch (final UndeclaredThrowableException e) {
        final Throwable cause = e.getCause();
        logger.debug("Authentication failed.", cause);
        if (cause instanceof SaslException) {
            throw (SaslException) cause;
        } else {
            throw new SaslException("Unexpected failure trying to authenticate using Kerberos", cause);
        }
    } catch (final IOException | InterruptedException e) {
        logger.debug("Authentication failed.", e);
        throw new SaslException("Unexpected failure trying to authenticate using Kerberos", e);
    }
}

From source file:org.apache.drill.exec.rpc.security.kerberos.KerberosFactory.java

License:Apache License

@Override
public SaslClient createSaslClient(final UserGroupInformation ugi, final Map<String, ?> properties)
        throws SaslException {
    final String servicePrincipal = getServicePrincipal(properties);

    final String parts[] = KerberosUtil.splitPrincipalIntoParts(servicePrincipal);
    final String serviceName = parts[0];
    final String serviceHostName = parts[1];
    // ignore parts[2]; GSSAPI gets the realm info from the ticket
    try {/*from www  . jav a 2  s .  c o  m*/
        final SaslClient saslClient = ugi.doAs(new PrivilegedExceptionAction<SaslClient>() {

            @Override
            public SaslClient run() throws Exception {
                return FastSaslClientFactory.getInstance().createSaslClient(
                        new String[] { KerberosUtil.KERBEROS_SASL_NAME }, null /** authorization ID */
                , serviceName, serviceHostName, properties, new CallbackHandler() {
                    @Override
                    public void handle(final Callback[] callbacks)
                            throws IOException, UnsupportedCallbackException {
                        throw new UnsupportedCallbackException(callbacks[0]);
                    }
                });
            }
        });
        logger.debug("GSSAPI SaslClient created to authenticate to {} running on {}", serviceName,
                serviceHostName);
        return saslClient;
    } catch (final UndeclaredThrowableException e) {
        logger.debug("Authentication failed.", e);
        throw new SaslException(
                String.format("Unexpected failure trying to authenticate to %s using GSSAPI", serviceHostName),
                e.getCause());
    } catch (final IOException | InterruptedException e) {
        logger.debug("Authentication failed.", e);
        if (e instanceof SaslException) {
            throw (SaslException) e;
        }
        throw new SaslException(
                String.format("Unexpected failure trying to authenticate to %s using GSSAPI", serviceHostName),
                e);
    }
}

From source file:org.apache.drill.exec.store.avro.AvroRecordReader.java

License:Apache License

private DataFileReader<GenericContainer> getReader(final Path hadoop, final FileSystem fs)
        throws ExecutionSetupException {
    try {/*from  w  w  w.j  ava 2 s  .  co m*/
        final UserGroupInformation ugi = ImpersonationUtil.createProxyUgi(this.opUserName, this.queryUserName);
        return ugi.doAs(new PrivilegedExceptionAction<DataFileReader<GenericContainer>>() {
            @Override
            public DataFileReader<GenericContainer> run() throws Exception {
                return new DataFileReader<>(new FsInput(hadoop, fs.getConf()),
                        new GenericDatumReader<GenericContainer>());
            }
        });
    } catch (IOException | InterruptedException e) {
        throw new ExecutionSetupException(String.format("Error in creating avro reader for file: %s", hadoop),
                e);
    }
}

From source file:org.apache.drill.exec.store.easy.sequencefile.SequenceFileRecordReader.java

License:Apache License

private org.apache.hadoop.mapred.RecordReader<BytesWritable, BytesWritable> getRecordReader(
        final InputFormat<BytesWritable, BytesWritable> inputFormat, final JobConf jobConf)
        throws ExecutionSetupException {
    try {/*from  w w  w  .ja  v  a  2s  . c  o  m*/
        final UserGroupInformation ugi = ImpersonationUtil.createProxyUgi(this.opUserName, this.queryUserName);
        return ugi.doAs(
                new PrivilegedExceptionAction<org.apache.hadoop.mapred.RecordReader<BytesWritable, BytesWritable>>() {
                    @Override
                    public org.apache.hadoop.mapred.RecordReader<BytesWritable, BytesWritable> run()
                            throws Exception {
                        return inputFormat.getRecordReader(split, jobConf, Reporter.NULL);
                    }
                });
    } catch (IOException | InterruptedException e) {
        throw new ExecutionSetupException(
                String.format("Error in creating sequencefile reader for file: %s, start: %d, length: %d",
                        split.getPath(), split.getStart(), split.getLength()),
                e);
    }
}

From source file:org.apache.drill.exec.store.hive.DrillHiveMetaStoreClient.java

License:Apache License

/**
 * Create a DrillHiveMetaStoreClient for cases where:
 *   1. Drill impersonation is enabled and
 *   2. either storage (in remote HiveMetaStore server) or SQL standard based authorization (in Hive storage plugin)
 *      is enabled/*from  ww  w  . java  2 s  . co m*/
 * @param processUserMetaStoreClient MetaStoreClient of process user. Useful for generating the delegation tokens when
 *                                   SASL (KERBEROS or custom SASL implementations) is enabled.
 * @param hiveConf Conf including authorization configuration
 * @param userName User who is trying to access the Hive metadata
 * @return
 * @throws MetaException
 */
public static DrillHiveMetaStoreClient createClientWithAuthz(
        final DrillHiveMetaStoreClient processUserMetaStoreClient, final HiveConf hiveConf,
        final String userName) throws MetaException {
    try {
        boolean delegationTokenGenerated = false;

        final UserGroupInformation ugiForRpc; // UGI credentials to use for RPC communication with Hive MetaStore server
        if (!hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS)) {
            // If the user impersonation is disabled in Hive storage plugin (not Drill impersonation), use the process
            // user UGI credentials.
            ugiForRpc = ImpersonationUtil.getProcessUserUGI();
        } else {
            ugiForRpc = ImpersonationUtil.createProxyUgi(userName);
            if (hiveConf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_SASL)) {
                // When SASL is enabled for proxy user create a delegation token. Currently HiveMetaStoreClient can create
                // client transport for proxy users only when the authentication mechanims is DIGEST (through use of
                // delegation tokens).
                String delegationToken = processUserMetaStoreClient.getDelegationToken(userName, userName);
                try {
                    Utils.setTokenStr(ugiForRpc, delegationToken,
                            HiveClientWithAuthzWithCaching.DRILL2HMS_TOKEN);
                } catch (IOException e) {
                    throw new DrillRuntimeException(
                            "Couldn't setup delegation token in the UGI for Hive MetaStoreClient", e);
                }
                delegationTokenGenerated = true;
            }
        }

        final HiveConf hiveConfForClient;
        if (delegationTokenGenerated) {
            hiveConfForClient = new HiveConf(hiveConf);
            hiveConfForClient.set("hive.metastore.token.signature",
                    HiveClientWithAuthzWithCaching.DRILL2HMS_TOKEN);
        } else {
            hiveConfForClient = hiveConf;
        }

        return ugiForRpc.doAs(new PrivilegedExceptionAction<DrillHiveMetaStoreClient>() {
            @Override
            public DrillHiveMetaStoreClient run() throws Exception {
                return new HiveClientWithAuthzWithCaching(hiveConfForClient, ugiForRpc, userName);
            }
        });
    } catch (final Exception e) {
        throw new DrillRuntimeException("Failure setting up HiveMetaStore client.", e);
    }
}

From source file:org.apache.drill.exec.store.mapr.db.MapRDBTableCache.java

License:Apache License

/**
 * getTable given primary table path and indexDesc.
 * returns Table for corresponding index table if indexDesc is not null.
 * returns Table for primary table if indexDesc is null.
 *
 * @param tablePath primary table path/* ww w  .  j  av  a2s . c  o m*/
 * @param indexDesc index table descriptor
 */
public Table getTable(final Path tablePath, final IndexDesc indexDesc, final String userName)
        throws DrillRuntimeException {

    final Table dbTableHandle;
    final UserGroupInformation proxyUserUgi = ImpersonationUtil.createProxyUgi(userName);

    try {
        dbTableHandle = proxyUserUgi.doAs(new PrivilegedExceptionAction<Table>() {
            public Table run() throws Exception {

                if (logger.isTraceEnabled()) {
                    logger.trace("Getting MaprDB Table handle for proxy user: "
                            + UserGroupInformation.getCurrentUser());
                }

                if (tableCachingEnabled) {
                    Table table = tableCache.get(new MapRDBTableCache.Key(tablePath, indexDesc));
                    logger.trace(
                            "time {} get the tablePath {} tableHandle {} index {} userName {} currentUser {}",
                            System.nanoTime(), tablePath == null ? "null" : tablePath,
                            table == null ? "null" : table,
                            indexDesc == null ? "null" : indexDesc.getIndexName(),
                            userName == null ? "null" : userName,
                            UserGroupInformation.getCurrentUser() == null ? "null"
                                    : UserGroupInformation.getCurrentUser());
                    return table;
                } else {
                    return indexDesc == null ? MapRDBImpl.getTable(tablePath)
                            : MapRDBImpl.getIndexTable(indexDesc);
                }
            }
        });
    } catch (Exception e) {
        throw new DrillRuntimeException("Error getting table: " + tablePath.toString()
                + (indexDesc == null ? "" : (", " + "IndexDesc: " + indexDesc.toString())), e);
    }

    return dbTableHandle;
}

From source file:org.apache.drill.exec.store.parquet.metadata.Metadata.java

License:Apache License

/**
 * Get the metadata for a single file/*  ww  w . j  ava  2s.  c om*/
 */
private ParquetFileMetadata_v3 getParquetFileMetadata_v3(ParquetTableMetadata_v3 parquetTableMetadata,
        final FileStatus file, final FileSystem fs, boolean allColumns, Set<String> columnSet)
        throws IOException, InterruptedException {
    final ParquetMetadata metadata;
    final UserGroupInformation processUserUgi = ImpersonationUtil.getProcessUserUGI();
    final Configuration conf = new Configuration(fs.getConf());
    try {
        metadata = processUserUgi.doAs((PrivilegedExceptionAction<ParquetMetadata>) () -> {
            try (ParquetFileReader parquetFileReader = ParquetFileReader
                    .open(HadoopInputFile.fromStatus(file, conf), readerConfig.toReadOptions())) {
                return parquetFileReader.getFooter();
            }
        });
    } catch (Exception e) {
        logger.error(
                "Exception while reading footer of parquet file [Details - path: {}, owner: {}] as process user {}",
                file.getPath(), file.getOwner(), processUserUgi.getShortUserName(), e);
        throw e;
    }

    MessageType schema = metadata.getFileMetaData().getSchema();

    Map<SchemaPath, ColTypeInfo> colTypeInfoMap = new HashMap<>();
    schema.getPaths();
    for (String[] path : schema.getPaths()) {
        colTypeInfoMap.put(SchemaPath.getCompoundPath(path), getColTypeInfo(schema, schema, path, 0));
    }

    List<RowGroupMetadata_v3> rowGroupMetadataList = Lists.newArrayList();

    ArrayList<SchemaPath> ALL_COLS = new ArrayList<>();
    ALL_COLS.add(SchemaPath.STAR_COLUMN);
    ParquetReaderUtility.DateCorruptionStatus containsCorruptDates = ParquetReaderUtility
            .detectCorruptDates(metadata, ALL_COLS, readerConfig.autoCorrectCorruptedDates());
    logger.debug("Contains corrupt dates: {}.", containsCorruptDates);

    for (BlockMetaData rowGroup : metadata.getBlocks()) {
        List<ColumnMetadata_v3> columnMetadataList = new ArrayList<>();
        long length = 0;
        for (ColumnChunkMetaData col : rowGroup.getColumns()) {
            String[] columnName = col.getPath().toArray();
            SchemaPath columnSchemaName = SchemaPath.getCompoundPath(columnName);
            ColTypeInfo colTypeInfo = colTypeInfoMap.get(columnSchemaName);

            ColumnTypeMetadata_v3 columnTypeMetadata = new ColumnTypeMetadata_v3(columnName,
                    col.getPrimitiveType().getPrimitiveTypeName(), colTypeInfo.originalType,
                    colTypeInfo.precision, colTypeInfo.scale, colTypeInfo.repetitionLevel,
                    colTypeInfo.definitionLevel);

            if (parquetTableMetadata.columnTypeInfo == null) {
                parquetTableMetadata.columnTypeInfo = new ConcurrentHashMap<>();
            }
            parquetTableMetadata.columnTypeInfo.put(new ColumnTypeMetadata_v3.Key(columnTypeMetadata.name),
                    columnTypeMetadata);
            // Store column metadata only if allColumns is set to true or if the column belongs to the subset of columns specified in the refresh command
            if (allColumns || columnSet == null || !allColumns && columnSet != null && columnSet.size() > 0
                    && columnSet.contains(columnSchemaName.getRootSegmentPath())) {
                Statistics<?> stats = col.getStatistics();
                // Save the column schema info. We'll merge it into one list
                Object minValue = null;
                Object maxValue = null;
                long numNulls = -1;
                boolean statsAvailable = stats != null && !stats.isEmpty();
                if (statsAvailable) {
                    if (stats.hasNonNullValue()) {
                        minValue = stats.genericGetMin();
                        maxValue = stats.genericGetMax();
                        if (containsCorruptDates == ParquetReaderUtility.DateCorruptionStatus.META_SHOWS_CORRUPTION
                                && columnTypeMetadata.originalType == OriginalType.DATE) {
                            minValue = ParquetReaderUtility.autoCorrectCorruptedDate((Integer) minValue);
                            maxValue = ParquetReaderUtility.autoCorrectCorruptedDate((Integer) maxValue);
                        }
                    }
                    numNulls = stats.getNumNulls();
                }
                ColumnMetadata_v3 columnMetadata = new ColumnMetadata_v3(columnTypeMetadata.name,
                        col.getPrimitiveType().getPrimitiveTypeName(), minValue, maxValue, numNulls);
                columnMetadataList.add(columnMetadata);
            }
            length += col.getTotalSize();
        }

        // DRILL-5009: Skip the RowGroup if it is empty
        // Note we still read the schema even if there are no values in the RowGroup
        if (rowGroup.getRowCount() == 0) {
            continue;
        }
        RowGroupMetadata_v3 rowGroupMeta = new RowGroupMetadata_v3(rowGroup.getStartingPos(), length,
                rowGroup.getRowCount(), getHostAffinity(file, fs, rowGroup.getStartingPos(), length),
                columnMetadataList);

        rowGroupMetadataList.add(rowGroupMeta);
    }
    Path path = Path.getPathWithoutSchemeAndAuthority(file.getPath());

    return new ParquetFileMetadata_v3(path, file.getLen(), rowGroupMetadataList);
}