Example usage for org.apache.hadoop.security UserGroupInformation doAs

List of usage examples for org.apache.hadoop.security UserGroupInformation doAs

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation doAs.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public <T> T doAs(PrivilegedExceptionAction<T> action) throws IOException, InterruptedException 

Source Link

Document

Run the given action as the user, potentially throwing an exception.

Usage

From source file:org.apache.drill.exec.util.ImpersonationUtil.java

License:Apache License

/** Helper method to create DrillFileSystem */
private static DrillFileSystem createFileSystem(UserGroupInformation proxyUserUgi, final Configuration fsConf,
        final OperatorStats stats) {
    DrillFileSystem fs;/*from  w  w  w.j  a  va 2 s.  c  o m*/
    try {
        fs = proxyUserUgi.doAs(new PrivilegedExceptionAction<DrillFileSystem>() {
            public DrillFileSystem run() throws Exception {
                logger.trace(
                        "Creating DrillFileSystem for proxy user: " + UserGroupInformation.getCurrentUser());
                return new DrillFileSystem(fsConf, stats);
            }
        });
    } catch (InterruptedException | IOException e) {
        final String errMsg = "Failed to create DrillFileSystem for proxy user: " + e.getMessage();
        logger.error(errMsg, e);
        throw new DrillRuntimeException(errMsg, e);
    }

    return fs;
}

From source file:org.apache.drill.exec.work.fragment.FragmentExecutor.java

License:Apache License

@Override
public void run() {
    // if a cancel thread has already entered this executor, we have not reason to continue.
    if (!hasCloseoutThread.compareAndSet(false, true)) {
        return;//from   w w w.  ja v  a  2s .  co m
    }

    final Thread myThread = Thread.currentThread();
    myThreadRef.set(myThread);
    final String originalThreadName = myThread.getName();
    final FragmentHandle fragmentHandle = fragmentContext.getHandle();
    final DrillbitContext drillbitContext = fragmentContext.getDrillbitContext();
    final ClusterCoordinator clusterCoordinator = drillbitContext.getClusterCoordinator();
    final DrillbitStatusListener drillbitStatusListener = new FragmentDrillbitStatusListener();
    final String newThreadName = QueryIdHelper.getExecutorThreadName(fragmentHandle);

    try {

        myThread.setName(newThreadName);

        // if we didn't get the root operator when the executor was created, create it now.
        final FragmentRoot rootOperator = this.rootOperator != null ? this.rootOperator
                : drillbitContext.getPlanReader().readFragmentOperator(fragment.getFragmentJson());

        root = ImplCreator.getExec(fragmentContext, rootOperator);
        if (root == null) {
            return;
        }

        clusterCoordinator.addDrillbitStatusListener(drillbitStatusListener);
        updateState(FragmentState.RUNNING);

        eventProcessor.start();
        injector.injectPause(fragmentContext.getExecutionControls(), "fragment-running", logger);

        final DrillbitEndpoint endpoint = drillbitContext.getEndpoint();
        logger.debug("Starting fragment {}:{} on {}:{}", fragmentHandle.getMajorFragmentId(),
                fragmentHandle.getMinorFragmentId(), endpoint.getAddress(), endpoint.getUserPort());

        final UserGroupInformation queryUserUgi = fragmentContext.isImpersonationEnabled()
                ? ImpersonationUtil.createProxyUgi(fragmentContext.getQueryUserName())
                : ImpersonationUtil.getProcessUserUGI();

        queryUserUgi.doAs(new PrivilegedExceptionAction<Void>() {
            public Void run() throws Exception {
                injector.injectChecked(fragmentContext.getExecutionControls(), "fragment-execution",
                        IOException.class);
                /*
                 * Run the query until root.next returns false OR we no longer need to continue.
                 */
                while (shouldContinue() && root.next()) {
                    // loop
                }

                return null;
            }
        });

    } catch (OutOfMemoryError | OutOfMemoryException e) {
        if (!(e instanceof OutOfMemoryError) || "Direct buffer memory".equals(e.getMessage())) {
            fail(UserException.memoryError(e).build(logger));
        } else {
            // we have a heap out of memory error. The JVM in unstable, exit.
            CatastrophicFailure.exit(e, "Unable to handle out of memory condition in FragmentExecutor.", -2);
        }
    } catch (AssertionError | Exception e) {
        fail(e);
    } finally {

        // no longer allow this thread to be interrupted. We synchronize here to make sure that cancel can't set an
        // interruption after we have moved beyond this block.
        synchronized (myThreadRef) {
            myThreadRef.set(null);
            Thread.interrupted();
        }

        // Make sure the event processor is started at least once
        eventProcessor.start();

        // here we could be in FAILED, RUNNING, or CANCELLATION_REQUESTED
        cleanup(FragmentState.FINISHED);

        clusterCoordinator.removeDrillbitStatusListener(drillbitStatusListener);

        myThread.setName(originalThreadName);

    }
}

From source file:org.apache.druid.security.kerberos.KerberosHttpClient.java

License:Apache License

private <Intermediate, Final> void inner_go(final Request request,
        final HttpResponseHandler<Intermediate, Final> httpResponseHandler, final Duration duration,
        final SettableFuture<Final> future) {
    try {//from   w  w  w  .ja  va  2  s. c  o m
        final String host = request.getUrl().getHost();
        final URI uri = request.getUrl().toURI();

        Map<String, List<String>> cookieMap = cookieManager.get(uri, Collections.emptyMap());
        for (Map.Entry<String, List<String>> entry : cookieMap.entrySet()) {
            request.addHeaderValues(entry.getKey(), entry.getValue());
        }
        final boolean should_retry_on_unauthorized_response;

        if (DruidKerberosUtil.needToSendCredentials(cookieManager.getCookieStore(), uri)) {
            // No Cookies for requested URI, authenticate user and add authentication header
            log.debug("No Auth Cookie found for URI[%s]. Existing Cookies[%s] Authenticating... ", uri,
                    cookieManager.getCookieStore().getCookies());
            DruidKerberosUtil.authenticateIfRequired(internalClientPrincipal, internalClientKeytab);
            UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
            String challenge = currentUser.doAs(new PrivilegedExceptionAction<String>() {
                @Override
                public String run() throws Exception {
                    return DruidKerberosUtil.kerberosChallenge(host);
                }
            });
            request.setHeader(HttpHeaders.Names.AUTHORIZATION, "Negotiate " + challenge);
            should_retry_on_unauthorized_response = false;
        } else {
            should_retry_on_unauthorized_response = true;
            log.debug("Found Auth Cookie found for URI[%s].", uri);
        }

        ListenableFuture<RetryResponseHolder<Final>> internalFuture = delegate.go(request,
                new RetryIfUnauthorizedResponseHandler<Intermediate, Final>(new ResponseCookieHandler(
                        request.getUrl().toURI(), cookieManager, httpResponseHandler)),
                duration);

        Futures.addCallback(internalFuture, new FutureCallback<RetryResponseHolder<Final>>() {
            @Override
            public void onSuccess(RetryResponseHolder<Final> result) {
                if (should_retry_on_unauthorized_response && result.shouldRetry()) {
                    log.info("Preparing for Retry");
                    // remove Auth cookie
                    DruidKerberosUtil.removeAuthCookie(cookieManager.getCookieStore(), uri);
                    // clear existing cookie
                    request.setHeader("Cookie", "");
                    inner_go(request.copy(), httpResponseHandler, duration, future);
                } else {
                    log.debug("Not retrying and returning future response");
                    future.set(result.getObj());
                }
            }

            @Override
            public void onFailure(Throwable t) {
                future.setException(t);
            }
        }, exec);
    } catch (Throwable e) {
        throw new RuntimeException(e);
    }
}

From source file:org.apache.falcon.catalog.HiveCatalogService.java

License:Apache License

/**
 * This is used from with in falcon namespace.
 *
 * @param conf                      conf
 * @param catalogUrl                metastore uri
 * @return hive metastore client handle//from   w  w  w.  j a v a2s. co  m
 * @throws FalconException
 */
private static HiveMetaStoreClient createProxiedClient(Configuration conf, String catalogUrl)
        throws FalconException {

    try {
        final HiveConf hcatConf = createHiveConf(conf, catalogUrl);
        UserGroupInformation proxyUGI = CurrentUser.getProxyUGI();
        addSecureCredentialsAndToken(conf, hcatConf, proxyUGI);

        LOG.info("Creating HCatalog client object for {}", catalogUrl);
        return proxyUGI.doAs(new PrivilegedExceptionAction<HiveMetaStoreClient>() {
            public HiveMetaStoreClient run() throws Exception {
                return new HiveMetaStoreClient(hcatConf);
            }
        });
    } catch (Exception e) {
        throw new FalconException("Exception creating Proxied HiveMetaStoreClient: " + e.getMessage(), e);
    }
}

From source file:org.apache.falcon.cluster.util.EmbeddedCluster.java

License:Apache License

public static EmbeddedCluster newCluster(final String name, final String user) throws Exception {
    UserGroupInformation hdfsUser = UserGroupInformation.createRemoteUser(user);
    return hdfsUser.doAs(new PrivilegedExceptionAction<EmbeddedCluster>() {
        @Override// www .ja  v a2s  .c  om
        public EmbeddedCluster run() throws Exception {
            return createClusterAsUser(name, false, null, null);
        }
    });
}

From source file:org.apache.falcon.entity.DatasourceHelper.java

License:Apache License

/**
 * fetch password from the corresponding store.
 * @param c//from  ww w  .  j av a  2s. c  o m
 * @return actual password
 * @throws FalconException
 */
private static String fetchPasswordInfoFromCredentialStore(final PasswordAliasType c) throws FalconException {
    try {
        final String credPath = c.getProviderPath();
        final URI credURI = new URI(credPath);
        if (StringUtils.isBlank(credURI.getScheme()) || StringUtils.isBlank(credURI.getHost())
                || StringUtils.isBlank(credURI.getPath())) {
            throw new FalconException("Password alias jceks provider HDFS path is incorrect.");
        }
        final String alias = c.getAlias();
        if (StringUtils.isBlank(alias)) {
            throw new FalconException("Password alias is empty.");
        }

        final String credProviderPath = buildJceksProviderPath(credURI);
        LOG.info("Credential provider HDFS path : " + credProviderPath);

        if (CredentialProviderHelper.isProviderAvailable()) {
            UserGroupInformation ugi = CurrentUser.getProxyUGI();
            String password = ugi.doAs(new PrivilegedExceptionAction<String>() {
                public String run() throws Exception {
                    final Configuration conf = new Configuration();
                    conf.set(HadoopClientFactory.FS_DEFAULT_NAME_KEY, credPath);
                    conf.set(CredentialProviderHelper.CREDENTIAL_PROVIDER_PATH, credProviderPath);
                    FileSystem fs = FileSystem.get(credURI, conf);
                    if (!fs.exists(new Path(credPath))) {
                        String msg = String.format(
                                "Credential provider hdfs path [%s] does not " + "exist or access denied!",
                                credPath);
                        LOG.error(msg);
                        throw new FalconException(msg);
                    }
                    return CredentialProviderHelper.resolveAlias(conf, alias);
                }
            });
            return password;
        } else {
            throw new FalconException("Credential Provider is not initialized");
        }
    } catch (Exception ioe) {
        String msg = "Exception while trying to fetch credential alias";
        LOG.error(msg, ioe);
        throw new FalconException(msg, ioe);
    }
}

From source file:org.apache.falcon.hadoop.HadoopClientFactory.java

License:Apache License

/**
 * Return a FileSystem created with the provided user for the specified URI.
 *
 * @param ugi user group information//from  www  .  j  ava2  s  .  c  om
 * @param uri  file system URI.
 * @param conf Configuration with all necessary information to create the FileSystem.
 * @return FileSystem created with the provided user/group.
 * @throws org.apache.falcon.FalconException
 *          if the filesystem could not be created.
 */
@SuppressWarnings("ResultOfMethodCallIgnored")
public FileSystem createFileSystem(UserGroupInformation ugi, final URI uri, final Configuration conf)
        throws FalconException {
    validateInputs(ugi, uri, conf);

    try {
        // prevent falcon impersonating falcon, no need to use doas
        final String proxyUserName = ugi.getShortUserName();
        if (proxyUserName.equals(UserGroupInformation.getLoginUser().getShortUserName())) {
            LOG.trace("Creating FS for the login user {}, impersonation not required", proxyUserName);
            return FileSystem.get(uri, conf);
        }

        LOG.trace("Creating FS impersonating user {}", proxyUserName);
        return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
            public FileSystem run() throws Exception {
                return FileSystem.get(uri, conf);
            }
        });
    } catch (InterruptedException | IOException ex) {
        throw new FalconException("Exception creating FileSystem:" + ex.getMessage(), ex);
    }
}

From source file:org.apache.falcon.hadoop.HadoopClientFactory.java

License:Apache License

/**
 * Return a DistributedFileSystem created with the provided user for the specified URI.
 *
 * @param ugi user group information/*from   w w  w.j a va2  s . c om*/
 * @param uri  file system URI.
 * @param conf Configuration with all necessary information to create the FileSystem.
 * @return DistributedFileSystem created with the provided user/group.
 * @throws org.apache.falcon.FalconException
 *          if the filesystem could not be created.
 */
@SuppressWarnings("ResultOfMethodCallIgnored")
public DistributedFileSystem createDistributedFileSystem(UserGroupInformation ugi, final URI uri,
        final Configuration conf) throws FalconException {
    validateInputs(ugi, uri, conf);
    FileSystem returnFs;
    try {
        // prevent falcon impersonating falcon, no need to use doas
        final String proxyUserName = ugi.getShortUserName();
        if (proxyUserName.equals(UserGroupInformation.getLoginUser().getShortUserName())) {
            LOG.info("Creating Distributed FS for the login user {}, impersonation not required",
                    proxyUserName);
            returnFs = DistributedFileSystem.get(uri, conf);
        } else {
            LOG.info("Creating FS impersonating user {}", proxyUserName);
            returnFs = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
                public FileSystem run() throws Exception {
                    return DistributedFileSystem.get(uri, conf);
                }
            });
        }

        return (DistributedFileSystem) returnFs;
    } catch (InterruptedException | IOException ex) {
        throw new FalconException("Exception creating FileSystem:" + ex.getMessage(), ex);
    }
}

From source file:org.apache.falcon.hadoop.HadoopClientFactory.java

License:Apache License

/**
 * This method validates if the execute url is able to reach the MR endpoint.
 *
 * @param executeUrl jt url or RM url/*  w w w  . j  a  va  2s. c  o m*/
 * @throws IOException
 */
public void validateJobClient(String executeUrl, String rmPrincipal) throws IOException {
    final JobConf jobConf = new JobConf();
    jobConf.set(MR_JT_ADDRESS_KEY, executeUrl);
    jobConf.set(YARN_RM_ADDRESS_KEY, executeUrl);
    /**
     * It is possible that the RM/JT principal can be different between clusters,
     * for example, the cluster is using a different KDC with cross-domain trust
     * with the Falcon KDC.   in that case, we want to allow the user to provide
     * the RM principal similar to NN principal.
     */
    if (UserGroupInformation.isSecurityEnabled() && StringUtils.isNotEmpty(rmPrincipal)) {
        jobConf.set(SecurityUtil.RM_PRINCIPAL, rmPrincipal);
    }
    UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
    try {
        JobClient jobClient = loginUser.doAs(new PrivilegedExceptionAction<JobClient>() {
            public JobClient run() throws Exception {
                return new JobClient(jobConf);
            }
        });

        jobClient.getClusterStatus().getMapTasks();
    } catch (InterruptedException e) {
        throw new IOException("Exception creating job client:" + e.getMessage(), e);
    }
}

From source file:org.apache.falcon.recipe.RecipeTool.java

License:Apache License

private FileSystem createFileSystem(UserGroupInformation ugi, final URI uri, final Configuration conf)
        throws Exception {
    try {/*from ww  w  . j a  va 2  s. co m*/
        final String proxyUserName = ugi.getShortUserName();
        if (proxyUserName.equals(UserGroupInformation.getLoginUser().getShortUserName())) {
            return FileSystem.get(uri, conf);
        }

        return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {
            public FileSystem run() throws Exception {
                return FileSystem.get(uri, conf);
            }
        });
    } catch (InterruptedException ex) {
        throw new IOException("Exception creating FileSystem:" + ex.getMessage(), ex);
    }
}