Example usage for org.apache.hadoop.security UserGroupInformation setConfiguration

List of usage examples for org.apache.hadoop.security UserGroupInformation setConfiguration

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation setConfiguration.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static void setConfiguration(Configuration conf) 

Source Link

Document

Set the static configuration for UGI.

Usage

From source file:com.streamsets.pipeline.stage.origin.hdfs.cluster.ClusterHdfsSource.java

License:Apache License

private void validateHadoopFS(List<ConfigIssue> issues) {
    boolean validHapoopFsUri = true;
    hadoopConf = getHadoopConfiguration(issues);
    String hdfsUriInConf;/*from w  w  w  .  j a v a  2  s  .co  m*/
    if (hdfsUri != null && !hdfsUri.isEmpty()) {
        hadoopConf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, hdfsUri);
    } else {
        hdfsUriInConf = hadoopConf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY);
        if (hdfsUriInConf == null) {
            issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsUri", Errors.HADOOPFS_19));
            return;
        } else {
            hdfsUri = hdfsUriInConf;
        }
    }
    if (hdfsUri.contains("://")) {
        try {
            URI uri = new URI(hdfsUri);
            if (!"hdfs".equals(uri.getScheme())) {
                issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsUri",
                        Errors.HADOOPFS_12, hdfsUri, uri.getScheme()));
                validHapoopFsUri = false;
            } else if (uri.getAuthority() == null) {
                issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsUri",
                        Errors.HADOOPFS_13, hdfsUri));
                validHapoopFsUri = false;
            }
        } catch (Exception ex) {
            issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsUri", Errors.HADOOPFS_22,
                    hdfsUri, ex.getMessage(), ex));
            validHapoopFsUri = false;
        }
    } else {
        issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsUri", Errors.HADOOPFS_02,
                hdfsUri));
        validHapoopFsUri = false;
    }

    StringBuilder logMessage = new StringBuilder();
    try {
        // forcing UGI to initialize with the security settings from the stage
        UserGroupInformation.setConfiguration(hadoopConf);
        Subject subject = Subject.getSubject(AccessController.getContext());
        if (UserGroupInformation.isSecurityEnabled()) {
            loginUgi = UserGroupInformation.getUGIFromSubject(subject);
        } else {
            UserGroupInformation.loginUserFromSubject(subject);
            loginUgi = UserGroupInformation.getLoginUser();
        }
        LOG.info("Subject = {}, Principals = {}, Login UGI = {}", subject,
                subject == null ? "null" : subject.getPrincipals(), loginUgi);
        if (hdfsKerberos) {
            logMessage.append("Using Kerberos");
            if (loginUgi.getAuthenticationMethod() != UserGroupInformation.AuthenticationMethod.KERBEROS) {
                issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsKerberos",
                        Errors.HADOOPFS_00, loginUgi.getAuthenticationMethod(),
                        UserGroupInformation.AuthenticationMethod.KERBEROS));
            }
        } else {
            logMessage.append("Using Simple");
            hadoopConf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
                    UserGroupInformation.AuthenticationMethod.SIMPLE.name());
        }
        if (validHapoopFsUri) {
            getUGI().doAs(new PrivilegedExceptionAction<Void>() {
                @Override
                public Void run() throws Exception {
                    try (FileSystem fs = getFileSystemForInitDestroy()) { //to trigger the close
                    }
                    return null;
                }
            });
        }
    } catch (Exception ex) {
        LOG.info("Error connecting to FileSystem: " + ex, ex);
        issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), null, Errors.HADOOPFS_11, hdfsUri,
                String.valueOf(ex), ex));
    }
    LOG.info("Authentication Config: " + logMessage);
}

From source file:com.thinkbiganalytics.kerberos.KerberosTicketGenerator.java

License:Apache License

public UserGroupInformation generateKerberosTicket(KerberosTicketConfiguration kerberosTicketConfiguration)
        throws IOException {
    Configuration config = new Configuration();

    String[] resources = kerberosTicketConfiguration.getHadoopConfigurationResources().split(",");
    for (String resource : resources) {
        config.addResource(new Path(resource));
    }/*from  w  ww.  j  a v  a  2s  .co m*/

    config.set("hadoop.security.authentication", "Kerberos");

    UserGroupInformation.setConfiguration(config);

    log.debug("Generating Kerberos ticket for principal: " + kerberosTicketConfiguration.getKerberosPrincipal()
            + " at key tab location: " + kerberosTicketConfiguration.getKeytabLocation());
    return UserGroupInformation.loginUserFromKeytabAndReturnUGI(
            kerberosTicketConfiguration.getKerberosPrincipal(),
            kerberosTicketConfiguration.getKeytabLocation());
}

From source file:com.thinkbiganalytics.kerberos.TestKerberosKinit.java

License:Apache License

private static UserGroupInformation generateKerberosTicket(Configuration configuration, String keytabLocation,
        String principal) throws IOException {
    System.setProperty("sun.security.krb5.debug", "false");
    configuration.set("hadoop.security.authentication", "Kerberos");
    UserGroupInformation.setConfiguration(configuration);

    System.out.println("Generating Kerberos ticket for principal: " + principal + " at key tab location: "
            + keytabLocation);/*from   www .  j a  v  a  2  s.c om*/
    return UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytabLocation);
}

From source file:com.thinkbiganalytics.nifi.security.ApplySecurityPolicy.java

License:Apache License

public boolean validateUserWithKerberos(ComponentLog loggerInstance, String HadoopConfigurationResources,
        String Principal, String KeyTab) throws Exception {

    ClassLoader savedClassLoader = Thread.currentThread().getContextClassLoader();
    Thread.currentThread().setContextClassLoader(this.getClass().getClassLoader());
    try {//w w w.ja  v  a  2  s. c  om

        loggerInstance.info("Start of hadoop configuration read");
        Configuration config = getConfigurationFromResources(HadoopConfigurationResources);
        config.set("hadoop.security.authentication", "Kerberos");

        loggerInstance.info("End of hadoop configuration read");

        // first check for timeout on HDFS connection, because FileSystem has a hard coded 15 minute timeout
        loggerInstance.info("Start of HDFS timeout check");
        checkHdfsUriForTimeout(config);
        loggerInstance.info("End of HDFS timeout check");

        // disable caching of Configuration and FileSystem objects, else we cannot reconfigure the processor without a complete
        // restart
        String disableCacheName = String.format("fs.%s.impl.disable.cache",
                FileSystem.getDefaultUri(config).getScheme());

        // If kerberos is enabled, create the file system as the kerberos principal
        // -- use RESOURCE_LOCK to guarantee UserGroupInformation is accessed by only a single thread at at time

        FileSystem fs;
        UserGroupInformation ugi;

        synchronized (RESOURCES_LOCK) {

            if (SecurityUtil.isSecurityEnabled(config)) {
                loggerInstance.info("Start of Kerberos Security Check");
                UserGroupInformation.setConfiguration(config);
                UserGroupInformation.loginUserFromKeytab(Principal, KeyTab);
                loggerInstance.info("End of Kerberos Security Check");
            } else {
                config.set("ipc.client.fallback-to-simple-auth-allowed", "true");
                config.set("hadoop.security.authentication", "simple");
                ugi = SecurityUtil.loginSimple(config);
                fs = getFileSystemAsUser(config, ugi);
            }
        }
        config.set(disableCacheName, "true");
        return true;
    } catch (Exception e) {
        loggerInstance.error("Unable to validate user : " + e.getMessage());
        return false;

    } finally {
        Thread.currentThread().setContextClassLoader(savedClassLoader);
    }

}

From source file:com.thinkbiganalytics.nifi.security.SecurityUtil.java

License:Apache License

/**
 * Initializes UserGroupInformation with the given Configuration and performs the login for the given principal
 * and keytab. All logins should happen through this class to ensure other threads are not concurrently modifying
 * UserGroupInformation./*from   w ww . j a va2 s . com*/
 *
 * @param config    the configuration instance
 * @param principal the principal to authenticate as
 * @param keyTab    the keytab to authenticate with
 * @return the UGI for the given principal
 * @throws IOException if login failed
 */
public static synchronized UserGroupInformation loginKerberos(final Configuration config,
        final String principal, final String keyTab) throws IOException {
    Validate.notNull(config);
    Validate.notNull(principal);
    Validate.notNull(keyTab);

    config.set("hadoop.security.authentication", "Kerberos");
    UserGroupInformation.setConfiguration(config);
    return UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal.trim(), keyTab.trim());
}

From source file:com.thinkbiganalytics.nifi.security.SecurityUtil.java

License:Apache License

/**
 * Initializes UserGroupInformation with the given Configuration and returns UserGroupInformation.getLoginUser().
 * All logins should happen through this class to ensure other threads are not concurrently modifying
 * UserGroupInformation./*ww  w. ja  v a2 s  .  c om*/
 *
 * @param config the configuration instance
 * @return the UGI for the given principal
 * @throws IOException if login failed
 */
public static synchronized UserGroupInformation loginSimple(final Configuration config) throws IOException {
    Validate.notNull(config);
    UserGroupInformation.setConfiguration(config);
    return UserGroupInformation.getLoginUser();
}

From source file:com.thinkbiganalytics.nifi.v2.hdfs.AbstractHadoopProcessor.java

License:Apache License

/**
 * Reset Hadoop Configuration and FileSystem based on the supplied configuration resources.
 *
 * @param configResources for configuration
 * @param dir             the target directory
 * @param context         for context, which gives access to the principal
 * @return An HdfsResources object/*  w  ww.java 2s .c o  m*/
 * @throws IOException if unable to access HDFS
 */
HdfsResources resetHDFSResources(String configResources, String dir, ProcessContext context)
        throws IOException {
    // org.apache.hadoop.conf.Configuration saves its current thread context class loader to use for threads that it creates
    // later to do I/O. We need this class loader to be the NarClassLoader instead of the magical
    // NarThreadContextClassLoader.
    ClassLoader savedClassLoader = Thread.currentThread().getContextClassLoader();
    Thread.currentThread().setContextClassLoader(this.getClass().getClassLoader());

    try {
        Configuration config = getConfigurationFromResources(configResources);

        // first check for timeout on HDFS connection, because FileSystem has a hard coded 15 minute timeout
        checkHdfsUriForTimeout(config);

        // disable caching of Configuration and FileSystem objects, else we cannot reconfigure the processor without a complete
        // restart
        String disableCacheName = String.format("fs.%s.impl.disable.cache",
                FileSystem.getDefaultUri(config).getScheme());
        config.set(disableCacheName, "true");

        // If kerberos is enabled, create the file system as the kerberos principal
        // -- use RESOURCE_LOCK to guarantee UserGroupInformation is accessed by only a single thread at at time
        FileSystem fs = null;
        UserGroupInformation ugi = null;
        synchronized (RESOURCES_LOCK) {
            if (config.get("hadoop.security.authentication").equalsIgnoreCase("kerberos")) {
                String principal = context.getProperty(kerberosPrincipal).getValue();
                String keyTab = context.getProperty(kerberosKeytab).getValue();
                UserGroupInformation.setConfiguration(config);
                ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keyTab);
                modifyConfig(context, config);
                fs = getFileSystemAsUser(config, ugi);
                lastKerberosReloginTime = System.currentTimeMillis() / 1000;
            } else {
                config.set("ipc.client.fallback-to-simple-auth-allowed", "true");
                config.set("hadoop.security.authentication", "simple");
                modifyConfig(context, config);
                fs = getFileSystem(config);
            }
        }
        getLog().info(
                "Initialized a new HDFS File System with working dir: {} default block size: {} default replication: {} config: {}",
                new Object[] { fs.getWorkingDirectory(), fs.getDefaultBlockSize(new Path(dir)),
                        fs.getDefaultReplication(new Path(dir)), config.toString() });
        return new HdfsResources(config, fs, ugi);
    } finally {
        Thread.currentThread().setContextClassLoader(savedClassLoader);
    }
}

From source file:com.trendmicro.hdfs.webdav.Main.java

License:Apache License

public static void main(String[] args) {

    HDFSWebDAVServlet servlet = HDFSWebDAVServlet.getServlet();
    Configuration conf = servlet.getConfiguration();

    // Process command line 

    Options options = new Options();
    options.addOption("d", "debug", false, "Enable debug logging");
    options.addOption("p", "port", true, "Port to bind to [default: 8080]");
    options.addOption("b", "bind-address", true, "Address or hostname to bind to [default: 0.0.0.0]");
    options.addOption("g", "ganglia", true, "Send Ganglia metrics to host:port [default: none]");

    CommandLine cmd = null;//from w w  w. j a va  2 s  .  c  om
    try {
        cmd = new PosixParser().parse(options, args);
    } catch (ParseException e) {
        printUsageAndExit(options, -1);
    }

    if (cmd.hasOption('d')) {
        Logger rootLogger = Logger.getLogger("com.trendmicro");
        rootLogger.setLevel(Level.DEBUG);
    }

    if (cmd.hasOption('b')) {
        conf.set("hadoop.webdav.bind.address", cmd.getOptionValue('b'));
    }

    if (cmd.hasOption('p')) {
        conf.setInt("hadoop.webdav.port", Integer.valueOf(cmd.getOptionValue('p')));
    }

    String gangliaHost = null;
    int gangliaPort = 8649;
    if (cmd.hasOption('g')) {
        String val = cmd.getOptionValue('g');
        if (val.indexOf(':') != -1) {
            String[] split = val.split(":");
            gangliaHost = split[0];
            gangliaPort = Integer.valueOf(split[1]);
        } else {
            gangliaHost = val;
        }
    }

    InetSocketAddress addr = getAddress(conf);

    // Log in the server principal from keytab

    UserGroupInformation.setConfiguration(conf);
    if (UserGroupInformation.isSecurityEnabled())
        try {
            SecurityUtil.login(conf, "hadoop.webdav.server.kerberos.keytab",
                    "hadoop.webdav.server.kerberos.principal", addr.getHostName());
        } catch (IOException e) {
            LOG.fatal("Could not log in", e);
            System.err.println("Could not log in");
            System.exit(-1);
        }

    // Set up embedded Jetty

    Server server = new Server();

    server.setSendServerVersion(false);
    server.setSendDateHeader(false);
    server.setStopAtShutdown(true);

    // Set up connector
    Connector connector = new SelectChannelConnector();
    connector.setPort(addr.getPort());
    connector.setHost(addr.getHostName());
    server.addConnector(connector);
    LOG.info("Listening on " + addr);

    // Set up context
    Context context = new Context(server, "/", Context.SESSIONS);
    // WebDAV servlet
    ServletHolder servletHolder = new ServletHolder(servlet);
    servletHolder.setInitParameter("authenticate-header", "Basic realm=\"Hadoop WebDAV Server\"");
    context.addServlet(servletHolder, "/*");
    // metrics instrumentation filter
    context.addFilter(new FilterHolder(new DefaultWebappMetricsFilter()), "/*", 0);
    // auth filter
    context.addFilter(new FilterHolder(new AuthFilter(conf)), "/*", 0);
    server.setHandler(context);

    // Set up Ganglia metrics reporting
    if (gangliaHost != null) {
        GangliaReporter.enable(1, TimeUnit.MINUTES, gangliaHost, gangliaPort);
    }

    // Start and join the server thread    
    try {
        server.start();
        server.join();
    } catch (Exception e) {
        LOG.fatal("Failed to start Jetty", e);
        System.err.println("Failed to start Jetty");
        System.exit(-1);
    }
}

From source file:com.yahoo.ycsb.db.HBaseClient10.java

License:Apache License

/**
 * Initialize any state for this DB. Called once per DB instance; there is one
 * DB instance per client thread./*from  w  w w . j a  v  a  2  s  .c o  m*/
 */
@Override
public void init() throws DBException {
    if ("true".equals(getProperties().getProperty("clientbuffering", "false"))) {
        this.clientSideBuffering = true;
    }
    if (getProperties().containsKey("writebuffersize")) {
        writeBufferSize = Long.parseLong(getProperties().getProperty("writebuffersize"));
    }

    if (getProperties().getProperty("durability") != null) {
        this.durability = Durability.valueOf(getProperties().getProperty("durability"));
    }

    if ("kerberos".equalsIgnoreCase(config.get("hbase.security.authentication"))) {
        config.set("hadoop.security.authentication", "Kerberos");
        UserGroupInformation.setConfiguration(config);
    }

    if ((getProperties().getProperty("principal") != null) && (getProperties().getProperty("keytab") != null)) {
        try {
            UserGroupInformation.loginUserFromKeytab(getProperties().getProperty("principal"),
                    getProperties().getProperty("keytab"));
        } catch (IOException e) {
            System.err.println("Keytab file is not readable or not found");
            throw new DBException(e);
        }
    }

    try {
        threadCount.getAndIncrement();
        synchronized (CONNECTION_LOCK) {
            if (connection == null) {
                // Initialize if not set up already.
                connection = ConnectionFactory.createConnection(config);
            }
        }
    } catch (java.io.IOException e) {
        throw new DBException(e);
    }

    if ((getProperties().getProperty("debug") != null)
            && (getProperties().getProperty("debug").compareTo("true") == 0)) {
        debug = true;
    }

    if ("false".equals(getProperties().getProperty("hbase.usepagefilter", "true"))) {
        usePageFilter = false;
    }

    columnFamily = getProperties().getProperty("columnfamily");
    if (columnFamily == null) {
        System.err.println("Error, must specify a columnfamily for HBase table");
        throw new DBException("No columnfamily specified");
    }
    columnFamilyBytes = Bytes.toBytes(columnFamily);

    // Terminate right now if table does not exist, since the client
    // will not propagate this error upstream once the workload
    // starts.
    String table = com.yahoo.ycsb.workloads.CoreWorkload.table;
    try {
        final TableName tName = TableName.valueOf(table);
        synchronized (CONNECTION_LOCK) {
            connection.getTable(tName).getTableDescriptor();
        }
    } catch (IOException e) {
        throw new DBException(e);
    }
}

From source file:common.DataNode.java

License:Apache License

/**
 * Create the DataNode given a configuration, an array of dataDirs,
 * and a namenode proxy/*from  w ww.j a  v  a 2s .  co m*/
 */
DataNode(final Configuration conf, final AbstractList<File> dataDirs, final DatanodeProtocol namenode)
        throws IOException {
    super(conf);

    UserGroupInformation.setConfiguration(conf);
    DFSUtil.login(conf, DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY);

    DataNode.setDataNode(this);

    try {
        startDataNode(conf, dataDirs, namenode);
    } catch (IOException ie) {
        shutdown();
        throw ie;
    }
}