Example usage for org.apache.hadoop.security UserGroupInformation createProxyUser

List of usage examples for org.apache.hadoop.security UserGroupInformation createProxyUser

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation createProxyUser.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation createProxyUser(String user, UserGroupInformation realUser) 

Source Link

Document

Create a proxy user using username of the effective user and the ugi of the real user.

Usage

From source file:org.apache.storm.hdfs.security.AutoHDFSNimbus.java

License:Apache License

@SuppressWarnings("unchecked")
private byte[] getHadoopCredentials(Map<String, Object> conf, final Configuration configuration,
        final String topologySubmitterUser) {
    try {/*from  ww w .j a va 2s.  co  m*/
        if (UserGroupInformation.isSecurityEnabled()) {
            login(configuration);

            final URI nameNodeURI = conf.containsKey(TOPOLOGY_HDFS_URI)
                    ? new URI(conf.get(TOPOLOGY_HDFS_URI).toString())
                    : FileSystem.getDefaultUri(configuration);

            UserGroupInformation ugi = UserGroupInformation.getCurrentUser();

            final UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(topologySubmitterUser,
                    ugi);

            Credentials creds = (Credentials) proxyUser.doAs(new PrivilegedAction<Object>() {
                @Override
                public Object run() {
                    try {
                        FileSystem fileSystem = FileSystem.get(nameNodeURI, configuration);
                        Credentials credential = proxyUser.getCredentials();

                        if (configuration.get(STORM_USER_NAME_KEY) == null) {
                            configuration.set(STORM_USER_NAME_KEY, hdfsPrincipal);
                        }

                        fileSystem.addDelegationTokens(configuration.get(STORM_USER_NAME_KEY), credential);
                        LOG.info("Delegation tokens acquired for user {}", topologySubmitterUser);
                        return credential;
                    } catch (IOException e) {
                        throw new RuntimeException(e);
                    }
                }
            });

            ByteArrayOutputStream bao = new ByteArrayOutputStream();
            ObjectOutputStream out = new ObjectOutputStream(bao);

            creds.write(out);
            out.flush();
            out.close();

            return bao.toByteArray();
        } else {
            throw new RuntimeException("Security is not enabled for HDFS");
        }
    } catch (Exception ex) {
        throw new RuntimeException("Failed to get delegation tokens.", ex);
    }
}

From source file:org.apache.storm.hive.security.AutoHive.java

License:Apache License

@SuppressWarnings("unchecked")
protected byte[] getHadoopCredentials(Map<String, Object> conf, final Configuration configuration) {
    try {//ww  w.jav a  2  s . c o m
        if (UserGroupInformation.isSecurityEnabled()) {
            String topologySubmitterUser = (String) conf.get(Config.TOPOLOGY_SUBMITTER_PRINCIPAL);
            String hiveMetaStoreURI = getMetaStoreURI(configuration);
            String hiveMetaStorePrincipal = getMetaStorePrincipal(configuration);
            HiveConf hcatConf = createHiveConf(hiveMetaStoreURI, hiveMetaStorePrincipal);
            login(configuration);

            UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
            UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(topologySubmitterUser,
                    currentUser);
            try {
                Token<DelegationTokenIdentifier> delegationTokenId = getDelegationToken(hcatConf,
                        hiveMetaStorePrincipal, topologySubmitterUser);
                proxyUser.addToken(delegationTokenId);
                LOG.info("Obtained Hive tokens, adding to user credentials.");

                Credentials credential = proxyUser.getCredentials();
                ByteArrayOutputStream bao = new ByteArrayOutputStream();
                ObjectOutputStream out = new ObjectOutputStream(bao);
                credential.write(out);
                out.flush();
                out.close();
                return bao.toByteArray();
            } catch (Exception ex) {
                LOG.debug(" Exception" + ex.getMessage());
                throw ex;
            }
        } else {
            throw new RuntimeException("Security is not enabled for Hadoop");
        }
    } catch (Exception ex) {
        throw new RuntimeException("Failed to get delegation tokens.", ex);
    }
}

From source file:org.apache.storm.hive.security.AutoHiveNimbus.java

License:Apache License

@SuppressWarnings("unchecked")
protected byte[] getHadoopCredentials(Map<String, Object> conf, final Configuration configuration,
        final String topologySubmitterUser) {
    try {//ww w. ja v a2s  .c  o m
        if (UserGroupInformation.isSecurityEnabled()) {
            String hiveMetaStoreURI = getMetaStoreURI(configuration);
            String hiveMetaStorePrincipal = getMetaStorePrincipal(configuration);
            HiveConf hcatConf = createHiveConf(hiveMetaStoreURI, hiveMetaStorePrincipal);
            login(configuration);

            UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
            UserGroupInformation proxyUser = UserGroupInformation.createProxyUser(topologySubmitterUser,
                    currentUser);
            try {
                Token<DelegationTokenIdentifier> delegationTokenId = getDelegationToken(hcatConf,
                        hiveMetaStorePrincipal, topologySubmitterUser);
                proxyUser.addToken(delegationTokenId);
                LOG.info("Obtained Hive tokens, adding to user credentials.");

                Credentials credential = proxyUser.getCredentials();
                ByteArrayOutputStream bao = new ByteArrayOutputStream();
                ObjectOutputStream out = new ObjectOutputStream(bao);
                credential.write(out);
                out.flush();
                out.close();
                return bao.toByteArray();
            } catch (Exception ex) {
                LOG.debug(" Exception" + ex.getMessage());
                throw ex;
            }
        } else {
            throw new RuntimeException("Security is not enabled for Hadoop");
        }
    } catch (Exception ex) {
        throw new RuntimeException("Failed to get delegation tokens.", ex);
    }
}

From source file:org.apache.zeppelin.jdbc.JDBCInterpreter.java

License:Apache License

public Connection getConnection(String propertyKey, InterpreterContext interpreterContext)
        throws ClassNotFoundException, SQLException, InterpreterException, IOException {
    final String user = interpreterContext.getAuthenticationInfo().getUser();
    Connection connection;/*w w w  . j a va2s.co m*/
    if (propertyKey == null || basePropretiesMap.get(propertyKey) == null) {
        return null;
    }

    JDBCUserConfigurations jdbcUserConfigurations = getJDBCConfiguration(user);
    setUserProperty(propertyKey, interpreterContext);

    final Properties properties = jdbcUserConfigurations.getPropertyMap(propertyKey);
    final String url = properties.getProperty(URL_KEY);

    if (isEmpty(getProperty("zeppelin.jdbc.auth.type"))) {
        connection = getConnectionFromPool(url, user, propertyKey, properties);
    } else {
        UserGroupInformation.AuthenticationMethod authType = JDBCSecurityImpl.getAuthtype(getProperties());

        final String connectionUrl = appendProxyUserToURL(url, user, propertyKey);

        JDBCSecurityImpl.createSecureConfiguration(getProperties(), authType);
        switch (authType) {
        case KERBEROS:
            if (user == null
                    || "false".equalsIgnoreCase(getProperty("zeppelin.jdbc.auth.kerberos.proxy.enable"))) {
                connection = getConnectionFromPool(connectionUrl, user, propertyKey, properties);
            } else {
                if (basePropretiesMap.get(propertyKey).containsKey("proxy.user.property")) {
                    connection = getConnectionFromPool(connectionUrl, user, propertyKey, properties);
                } else {
                    UserGroupInformation ugi = null;
                    try {
                        ugi = UserGroupInformation.createProxyUser(user, UserGroupInformation.getCurrentUser());
                    } catch (Exception e) {
                        logger.error("Error in getCurrentUser", e);
                        throw new InterpreterException("Error in getCurrentUser", e);
                    }

                    final String poolKey = propertyKey;
                    try {
                        connection = ugi.doAs(new PrivilegedExceptionAction<Connection>() {
                            @Override
                            public Connection run() throws Exception {
                                return getConnectionFromPool(connectionUrl, user, poolKey, properties);
                            }
                        });
                    } catch (Exception e) {
                        logger.error("Error in doAs", e);
                        throw new InterpreterException("Error in doAs", e);
                    }
                }
            }
            break;

        default:
            connection = getConnectionFromPool(connectionUrl, user, propertyKey, properties);
        }
    }

    return connection;
}

From source file:org.apache.zeppelin.scalding.ScaldingInterpreter.java

License:Apache License

@Override
public InterpreterResult interpret(String cmd, InterpreterContext contextInterpreter) {
    String user = contextInterpreter.getAuthenticationInfo().getUser();
    logger.info("Running Scalding command: user: {} cmd: '{}'", user, cmd);

    if (interpreter == null) {
        logger.error("interpreter == null, open may not have been called because max.open.instances reached");
        return new InterpreterResult(Code.ERROR,
                "interpreter == null\n" + "open may not have been called because max.open.instances reached");
    }/* w  w w  .  j  a  v a  2  s  .  c o m*/
    if (cmd == null || cmd.trim().length() == 0) {
        return new InterpreterResult(Code.SUCCESS);
    }
    InterpreterResult interpreterResult = new InterpreterResult(Code.ERROR);
    if (getProperty(ARGS_STRING).contains("hdfs")) {
        UserGroupInformation ugi = null;
        try {
            ugi = UserGroupInformation.createProxyUser(user, UserGroupInformation.getLoginUser());
        } catch (IOException e) {
            logger.error("Error creating UserGroupInformation", e);
            return new InterpreterResult(Code.ERROR, e.getMessage());
        }
        try {
            // Make variables final to avoid "local variable is accessed from within inner class;
            // needs to be declared final" exception in JDK7
            final String cmd1 = cmd;
            final InterpreterContext contextInterpreter1 = contextInterpreter;
            PrivilegedExceptionAction<InterpreterResult> action = new PrivilegedExceptionAction<InterpreterResult>() {
                public InterpreterResult run() throws Exception {
                    return interpret(cmd1.split("\n"), contextInterpreter1);
                }
            };
            interpreterResult = ugi.doAs(action);
        } catch (Exception e) {
            logger.error("Error running command with ugi.doAs", e);
            return new InterpreterResult(Code.ERROR, e.getMessage());
        }
    } else {
        interpreterResult = interpret(cmd.split("\n"), contextInterpreter);
    }
    return interpreterResult;
}

From source file:org.kaaproject.kaa.server.flume.sink.hdfs.KaaHdfsSink.java

License:Apache License

private boolean authenticate() {

    // logic for kerberos login
    boolean useSecurity = UserGroupInformation.isSecurityEnabled();

    LOG.info("Hadoop Security enabled: " + useSecurity);

    if (useSecurity) {

        // sanity checking
        if (kerbConfPrincipal.isEmpty()) {
            LOG.error("Hadoop running in secure mode, but Flume config doesn't "
                    + "specify a principal to use for Kerberos auth.");
            return false;
        }//from  w ww. ja  v  a2s.c  om
        if (kerbKeytab.isEmpty()) {
            LOG.error("Hadoop running in secure mode, but Flume config doesn't "
                    + "specify a keytab to use for Kerberos auth.");
            return false;
        } else {
            //If keytab is specified, user should want it take effect.
            //HDFSSink will halt when keytab file is non-exist or unreadable
            File kfile = new File(kerbKeytab);
            if (!(kfile.isFile() && kfile.canRead())) {
                throw new IllegalArgumentException(
                        "The keyTab file: " + kerbKeytab + " is nonexistent or can't read. "
                                + "Please specify a readable keytab file for Kerberos auth.");
            }
        }

        String principal;
        try {
            // resolves _HOST pattern using standard Hadoop search/replace
            // via DNS lookup when 2nd argument is empty
            principal = SecurityUtil.getServerPrincipal(kerbConfPrincipal, "");
        } catch (IOException ex) {
            LOG.error("Host lookup error resolving kerberos principal (" + kerbConfPrincipal
                    + "). Exception follows.", ex);
            return false;
        }

        Preconditions.checkNotNull(principal, "Principal must not be null");
        KerberosUser prevUser = staticLogin.get();
        KerberosUser newUser = new KerberosUser(principal, kerbKeytab);

        // be cruel and unusual when user tries to login as multiple principals
        // this isn't really valid with a reconfigure but this should be rare
        // enough to warrant a restart of the agent JVM
        // TODO: find a way to interrogate the entire current config state,
        // since we don't have to be unnecessarily protective if they switch all
        // HDFS sinks to use a different principal all at once.
        Preconditions.checkState(prevUser == null || prevUser.equals(newUser),
                "Cannot use multiple kerberos principals in the same agent. "
                        + " Must restart agent to use new principal or keytab. " + "Previous = %s, New = %s",
                prevUser, newUser);

        // attempt to use cached credential if the user is the same
        // this is polite and should avoid flooding the KDC with auth requests
        UserGroupInformation curUser = null;
        if (prevUser != null && prevUser.equals(newUser)) {
            try {
                curUser = UserGroupInformation.getLoginUser();
            } catch (IOException ex) {
                LOG.warn("User unexpectedly had no active login. Continuing with " + "authentication", ex);
            }
        }

        if (curUser == null || !curUser.getUserName().equals(principal)) {
            try {
                // static login
                kerberosLogin(this, principal, kerbKeytab);
            } catch (IOException ex) {
                LOG.error("Authentication or file read error while attempting to "
                        + "login as kerberos principal (" + principal + ") using " + "keytab (" + kerbKeytab
                        + "). Exception follows.", ex);
                return false;
            }
        } else {
            LOG.debug("{}: Using existing principal login: {}", this, curUser);
        }

        // we supposedly got through this unscathed... so store the static user
        staticLogin.set(newUser);
    }

    // hadoop impersonation works with or without kerberos security
    proxyTicket = null;
    if (!proxyUserName.isEmpty()) {
        try {
            proxyTicket = UserGroupInformation.createProxyUser(proxyUserName,
                    UserGroupInformation.getLoginUser());
        } catch (IOException ex) {
            LOG.error("Unable to login as proxy user. Exception follows.", ex);
            return false;
        }
    }

    UserGroupInformation ugi = null;
    if (proxyTicket != null) {
        ugi = proxyTicket;
    } else if (useSecurity) {
        try {
            ugi = UserGroupInformation.getLoginUser();
        } catch (IOException ex) {
            LOG.error("Unexpected error: Unable to get authenticated user after "
                    + "apparent successful login! Exception follows.", ex);
            return false;
        }
    }

    if (ugi != null) {
        // dump login information
        AuthenticationMethod authMethod = ugi.getAuthenticationMethod();
        LOG.info("Auth method: {}", authMethod);
        LOG.info(" User name: {}", ugi.getUserName());
        LOG.info(" Using keytab: {}", ugi.isFromKeytab());
        if (authMethod == AuthenticationMethod.PROXY) {
            UserGroupInformation superUser;
            try {
                superUser = UserGroupInformation.getLoginUser();
                LOG.info(" Superuser auth: {}", superUser.getAuthenticationMethod());
                LOG.info(" Superuser name: {}", superUser.getUserName());
                LOG.info(" Superuser using keytab: {}", superUser.isFromKeytab());
            } catch (IOException ex) {
                LOG.error("Unexpected error: unknown superuser impersonating proxy.", ex);
                return false;
            }
        }

        LOG.info("Logged in as user {}", ugi.getUserName());

        return true;
    }

    return true;
}

From source file:org.kitesdk.spring.hbase.example.service.WebPageSnapshotService.java

License:Apache License

/**
 * Take a snapshot of an URL. This WebPageSnapshot is stored in HBase. Returns
 * the WebPageSnapshotMeta/*w w  w  .j  av a 2s  . c o  m*/
 *
 * If the URL is a redirect, the snapshot is stored under the final URL
 * destination. A WebPageRedirectModel is stored in the redirect table so when
 * fetching snapshots, we can follow the proper redirect path.
 *
 * @param url The URL to take a snapshot of
 * @param contentKey The key used to store the content
 * @param user The user taking a snapshot
 * @return The WebPageSnapshotMeta for the page that we snapshotted.
 * @throws IOException
 */
public WebPageSnapshotMeta takeSnapshot(final String url, final String contentKey, final String user)
        throws IOException {
    WebPageSnapshotMeta meta = null;
    UserGroupInformation ugi = UserGroupInformation.createProxyUser(user, UserGroupInformation.getLoginUser());
    try {
        meta = ugi.doAs(new PrivilegedExceptionAction<WebPageSnapshotMeta>() {

            @Override
            public WebPageSnapshotMeta run() throws Exception {
                WebPageSnapshotModel webPageSnapshotModel = fetchWebPage(url, contentKey);
                if (!webPageSnapshotModel.getUrl().equals(url)) {
                    // Url is different, so must have redirected. Store the redirect model
                    WebPageRedirectModel redirectModel = WebPageRedirectModel.newBuilder().setUrl(url)
                            .setDestinationUrl(webPageSnapshotModel.getUrl()).build();
                    webPageRedirectModels(user).put(redirectModel);
                } else {
                    // If redirect exists, remove it since this URL no longer redirects
                    Key key = new Key.Builder(webPageRedirectModels(user)).add("url", url).build();
                    WebPageRedirectModel redirectModel = webPageRedirectModels(user).get(key);
                    if (redirectModel != null) {
                        webPageRedirectModels(user).delete(key);
                    }
                }
                webPageSnapshotModels(user).put(webPageSnapshotModel);
                return conversionService.convert(webPageSnapshotModel, WebPageSnapshotMeta.class);
            }
        });
    } catch (InterruptedException ex) {
        Thread.currentThread().interrupt();
        if (meta == null) {
            throw new IOException("Interrupted trying to save the snapshot", ex);
        }
    }

    return meta;
}

From source file:org.kitesdk.spring.hbase.example.service.WebPageSnapshotService.java

License:Apache License

/**
 * Get the epoch timestamps for every snapshot time of an URL in HBase.
 *
 * @param url The URL of the page to get snapshot timestamps for
 * @return The list of timestamps/*from ww w .  j ava  2  s  . c o m*/
 */
public List<Long> getSnapshotTimestamps(String url, final String user) throws IOException {
    List<Long> snapshotTimestamps = null;
    final String normalizedUrl = normalizeUrl(url, user);
    LOG.error("Getting snapshot timestamps: url = {}, user = {}, normalized url = {}",
            new Object[] { url, user, normalizedUrl });
    UserGroupInformation ugi = UserGroupInformation.createProxyUser(user, UserGroupInformation.getLoginUser());

    snapshotTimestamps = ugi.doAs(new PrivilegedAction<List<Long>>() {

        @Override
        public List<Long> run() {
            List<Long> snapshotTimestamps = new ArrayList<Long>();
            DatasetReader<WebPageSnapshotModel> reader = null;
            try {
                reader = webPageSnapshotModels(user).from("url", normalizedUrl).from("fetchedAtRevTs", 0L)
                        .to("url", normalizedUrl).to("fetchedAtRevTs", Long.MAX_VALUE).newReader();
                while (reader.hasNext()) {
                    snapshotTimestamps.add(reader.next().getFetchedAt());
                }
            } finally {
                if (reader != null) {
                    reader.close();
                }
            }
            return snapshotTimestamps;
        }
    });

    return snapshotTimestamps;
}

From source file:org.kitesdk.spring.hbase.example.service.WebPageSnapshotService.java

License:Apache License

/**
 * Get the most recent WebPageSnapshotModel from HBase
 *
 * @param url The URL to get the snapshotted page from HBase
 * @return The WebPageSnapshotModel, or null if there are no fetches for this
 * URL/* w  ww .  java 2 s  .c o m*/
 */
private WebPageSnapshotModel getMostRecentWebPageSnapshot(String url, final String user) throws IOException {
    WebPageSnapshotModel snapshot = null;
    final String normalizedUrl = normalizeUrl(url, user);

    UserGroupInformation ugi = UserGroupInformation.createProxyUser(user, UserGroupInformation.getLoginUser());

    LOG.error("Created proxy user " + ugi.getShortUserName() + " ugi: " + ugi);

    snapshot = ugi.doAs(new PrivilegedAction<WebPageSnapshotModel>() {

        @Override
        public WebPageSnapshotModel run() {
            DatasetReader<WebPageSnapshotModel> reader = null;
            try {
                // we don't know the exact timestamp in the key, but we know since keys
                // are in timestamp descending order that the first row for an URL will be
                // the most recent.
                reader = webPageSnapshotModels(user).from("url", normalizedUrl).from("fetchedAtRevTs", 0L)
                        .to("url", normalizedUrl).to("fetchedAtRevTs", Long.MAX_VALUE).newReader();
                if (reader.hasNext()) {
                    return reader.next();
                } else {
                    return null;
                }
            } finally {
                if (reader != null) {
                    reader.close();
                }
            }
        }

    });

    return snapshot;
}

From source file:org.kitesdk.spring.hbase.example.service.WebPageSnapshotService.java

License:Apache License

/**
 * Get the WebPageSnapshotModel from HBase
 *
 * @param url The URL of the WebPageSnapshotModel
 * @param ts The snapshot timestamp of the WebPageSnapshotModel
 * @return The WebPageSnapshotModel, or null if there is no snapshot for the
 * URL at this timestamp./*w  ww  .j av a  2  s  .c o m*/
 */
private WebPageSnapshotModel getWebPageSnapshot(String url, final long ts, final String user)
        throws IOException {
    WebPageSnapshotModel snapshot = null;
    final String normalizedUrl = normalizeUrl(url, user);

    UserGroupInformation ugi = UserGroupInformation.createProxyUser(user, UserGroupInformation.getLoginUser());
    snapshot = ugi.doAs(new PrivilegedAction<WebPageSnapshotModel>() {

        @Override
        public WebPageSnapshotModel run() {
            Key key = new Key.Builder(webPageSnapshotModels(user)).add("url", normalizedUrl)
                    .add("fetchedAtRevTs", Long.MAX_VALUE - ts).build();
            return webPageSnapshotModels(user).get(key);
        }
    });

    return snapshot;
}