Example usage for org.apache.hadoop.security UserGroupInformation setConfiguration

List of usage examples for org.apache.hadoop.security UserGroupInformation setConfiguration

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation setConfiguration.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static void setConfiguration(Configuration conf) 

Source Link

Document

Set the static configuration for UGI.

Usage

From source file:ml.shifu.guagua.yarn.GuaguaYarnTask.java

License:Apache License

public static void main(String[] args) {
    LOG.info("args:{}", Arrays.toString(args));
    if (args.length != 7) {
        throw new IllegalStateException(String.format(
                "GuaguaYarnTask could not construct a TaskAttemptID for the Guagua job from args: %s",
                Arrays.toString(args)));
    }//from   www  .j a  v a2  s.c o m

    String containerIdString = System.getenv().get(Environment.CONTAINER_ID.name());
    if (containerIdString == null) {
        // container id should always be set in the env by the framework
        throw new IllegalArgumentException("ContainerId not found in env vars.");
    }
    ContainerId containerId = ConverterUtils.toContainerId(containerIdString);
    ApplicationAttemptId appAttemptId = containerId.getApplicationAttemptId();

    try {
        Configuration conf = new YarnConfiguration();
        String jobUserName = System.getenv(ApplicationConstants.Environment.USER.name());
        conf.set(MRJobConfig.USER_NAME, jobUserName);
        UserGroupInformation.setConfiguration(conf);
        // Security framework already loaded the tokens into current UGI, just use them
        Credentials credentials = UserGroupInformation.getCurrentUser().getCredentials();
        LOG.info("Executing with tokens:");
        for (Token<?> token : credentials.getAllTokens()) {
            LOG.info(token.toString());
        }

        UserGroupInformation appTaskUGI = UserGroupInformation.createRemoteUser(jobUserName);
        appTaskUGI.addCredentials(credentials);
        @SuppressWarnings("rawtypes")
        final GuaguaYarnTask<?, ?> guaguaYarnTask = new GuaguaYarnTask(appAttemptId, containerId,
                Integer.parseInt(args[args.length - 3]), args[args.length - 2], args[args.length - 1], conf);
        appTaskUGI.doAs(new PrivilegedAction<Void>() {
            @Override
            public Void run() {
                guaguaYarnTask.run();
                return null;
            }
        });
    } catch (Throwable t) {
        LOG.error("GuaguaYarnTask threw a top-level exception, failing task", t);
        System.exit(2);
    }
    System.exit(0);
}

From source file:ms.dew.core.hbase.HBaseAutoConfiguration.java

License:Apache License

/**
 * Init HBase connection./* www  .  j a v a2 s  .co m*/
 *
 * @param hbaseProperties hbase settings properties
 * @return HBase connection
 * @throws IOException IOException
 */
@Bean
public Connection connection(HBaseProperties hbaseProperties, org.apache.hadoop.conf.Configuration conf)
        throws IOException {
    if ("kerberos".equalsIgnoreCase(hbaseProperties.getAuth().getType())) {
        System.setProperty("java.security.krb5.conf", hbaseProperties.getAuth().getKrb5());
        UserGroupInformation.setConfiguration(conf);
        UserGroupInformation.loginUserFromKeytab(hbaseProperties.getAuth().getPrincipal(),
                hbaseProperties.getAuth().getKeytab());
    }
    ThreadPoolExecutor poolExecutor = new ThreadPoolExecutor(200, Integer.MAX_VALUE, 60L, TimeUnit.SECONDS,
            new SynchronousQueue<>());
    poolExecutor.prestartCoreThread();
    return ConnectionFactory.createConnection(conf, poolExecutor);
}

From source file:net.iridiant.hdfs.webdav.Main.java

License:Apache License

public static void main(String[] args) {

    HDFSWebDAVServlet servlet = HDFSWebDAVServlet.getServlet();
    Configuration conf = servlet.getConfiguration();

    // Process command line 

    Options options = new Options();
    options.addOption("d", "debug", false, "Enable debug logging");
    options.addOption("p", "port", true, "Port to bind to [default: 8080]");
    options.addOption("b", "bind-address", true, "Address or hostname to bind to [default: 0.0.0.0]");
    options.addOption("g", "ganglia", true, "Send Ganglia metrics to host:port [default: none]");

    CommandLine cmd = null;/*from w w  w.  j ava  2  s. c  o  m*/
    try {
        cmd = new PosixParser().parse(options, args);
    } catch (ParseException e) {
        printUsageAndExit(options, -1);
    }

    if (cmd.hasOption('d')) {
        Logger rootLogger = Logger.getLogger("net.iridiant");
        rootLogger.setLevel(Level.DEBUG);
    }

    if (cmd.hasOption('b')) {
        conf.set("hadoop.webdav.bind.address", cmd.getOptionValue('b'));
    }

    if (cmd.hasOption('p')) {
        conf.setInt("hadoop.webdav.port", Integer.valueOf(cmd.getOptionValue('p')));
    }

    String gangliaHost = null;
    int gangliaPort = 8649;
    if (cmd.hasOption('g')) {
        String val = cmd.getOptionValue('g');
        if (val.indexOf(':') != -1) {
            String[] split = val.split(":");
            gangliaHost = split[0];
            gangliaPort = Integer.valueOf(split[1]);
        } else {
            gangliaHost = val;
        }
    }

    InetSocketAddress addr = getAddress(conf);

    // Log in the server principal from keytab

    UserGroupInformation.setConfiguration(conf);
    if (UserGroupInformation.isSecurityEnabled())
        try {
            SecurityUtil.login(conf, "hadoop.webdav.server.kerberos.keytab",
                    "hadoop.webdav.server.kerberos.principal", addr.getHostName());
        } catch (IOException e) {
            LOG.fatal("Could not log in", e);
            System.err.println("Could not log in");
            System.exit(-1);
        }

    // Set up embedded Jetty

    Server server = new Server();

    server.setSendServerVersion(false);
    server.setSendDateHeader(false);
    server.setStopAtShutdown(true);

    // Set up connector
    Connector connector = new SelectChannelConnector();
    connector.setPort(addr.getPort());
    connector.setHost(addr.getHostName());
    server.addConnector(connector);
    LOG.info("Listening on " + addr);

    // Set up context
    Context context = new Context(server, "/", Context.SESSIONS);
    // WebDAV servlet
    ServletHolder servletHolder = new ServletHolder(servlet);
    servletHolder.setInitParameter("authenticate-header", "Basic realm=\"Hadoop WebDAV Server\"");
    context.addServlet(servletHolder, "/*");
    // metrics instrumentation filter
    context.addFilter(new FilterHolder(new DefaultWebappMetricsFilter()), "/*", 0);
    // auth filter
    context.addFilter(new FilterHolder(new AuthFilter(conf)), "/*", 0);
    server.setHandler(context);

    // Set up Ganglia metrics reporting
    if (gangliaHost != null) {
        GangliaReporter.enable(1, TimeUnit.MINUTES, gangliaHost, gangliaPort);
    }

    // Start and join the server thread    
    try {
        server.start();
        server.join();
    } catch (Exception e) {
        LOG.fatal("Failed to start Jetty", e);
        System.err.println("Failed to start Jetty");
        System.exit(-1);
    }
}

From source file:nl.surfsara.newsreader.loader.Loader.java

License:Apache License

private void init() throws IOException {
    conf = new Configuration();
    conf.addResource(new Path("core-site.xml"));
    conf.addResource(new Path("hdfs-site.xml"));

    conf.set("hadoop.security.authentication", "kerberos");
    conf.set("hadoop.security.authorization", "true");

    System.setProperty("java.security.krb5.realm", "CUA.SURFSARA.NL");
    System.setProperty("java.security.krb5.kdc", "kerberos1.osd.surfsara.nl");

    UserGroupInformation.setConfiguration(conf);

    loginUser = UserGroupInformation.getLoginUser();
    logger.info("Logged in as: " + loginUser.getUserName());
}

From source file:nl.surfsara.warcexamples.hdfs.Headers.java

License:Apache License

@Override
public void run() {
    // PropertyConfigurator.configure("log4jconfig.properties");
    final Configuration conf = new Configuration();
    // The core-site.xml and hdfs-site.xml are cluster specific. If you wish to use this on other clusters adapt the files as needed.
    conf.addResource(/*from  w ww . ja  v a2s  .c  o  m*/
            Headers.class.getResourceAsStream("/nl/surfsara/warcexamples/hdfs/resources/core-site.xml"));
    conf.addResource(
            Headers.class.getResourceAsStream("/nl/surfsara/warcexamples/hdfs/resources/hdfs-site.xml"));

    conf.set("hadoop.security.authentication", "kerberos");
    conf.set("hadoop.security.authorization", "true");

    System.setProperty("java.security.krb5.realm", "CUA.SURFSARA.NL");
    System.setProperty("java.security.krb5.kdc", "kdc.hathi.surfsara.nl");

    UserGroupInformation.setConfiguration(conf);

    UserGroupInformation loginUser;
    try {
        loginUser = UserGroupInformation.getLoginUser();
        System.out.println("Logged in as: " + loginUser.getUserName());
        PrintHeaders printHeaders = new PrintHeaders(conf, path);
        loginUser.doAs(printHeaders);
    } catch (IOException e) {
        // Just dump the error..
        e.printStackTrace();
    }
}

From source file:org.apache.accumulo.core.client.impl.ThriftTransportKeyTest.java

License:Apache License

@Before
public void setup() throws Exception {
    System.setProperty("java.security.krb5.realm", "accumulo");
    System.setProperty("java.security.krb5.kdc", "fake");
    Configuration conf = new Configuration(false);
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(conf);
}

From source file:org.apache.accumulo.core.clientImpl.ThriftTransportKeyTest.java

License:Apache License

@Before
public void setup() {
    System.setProperty("java.security.krb5.realm", "accumulo");
    System.setProperty("java.security.krb5.kdc", "fake");
    Configuration conf = new Configuration(false);
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(conf);
}

From source file:org.apache.accumulo.core.rpc.SaslConnectionParamsTest.java

License:Apache License

@Before
public void setup() throws Exception {
    System.setProperty("java.security.krb5.realm", "accumulo");
    System.setProperty("java.security.krb5.kdc", "fake");
    Configuration conf = new Configuration(false);
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(conf);
    testUser = UserGroupInformation.createUserForTesting("test_user", new String[0]);
    username = testUser.getUserName();/*from ww  w .j  a  v a2 s  .  c  o m*/
}

From source file:org.apache.accumulo.harness.AccumuloClusterHarness.java

License:Apache License

@Before
public void setupCluster() throws Exception {
    // Before we try to instantiate the cluster, check to see if the test even wants to run against this type of cluster
    Assume.assumeTrue(canRunTest(type));

    switch (type) {
    case MINI://from  w  w w . ja v a 2s.c om
        MiniClusterHarness miniClusterHarness = new MiniClusterHarness();
        // Intrinsically performs the callback to let tests alter MiniAccumuloConfig and core-site.xml
        MiniAccumuloClusterImpl impl = miniClusterHarness.create(this, getAdminToken(), krb);
        cluster = impl;
        // MAC makes a ClientConf for us, just set it
        ((AccumuloMiniClusterConfiguration) clusterConf).setClientConf(impl.getClientConfig());
        // Login as the "root" user
        if (null != krb) {
            ClusterUser rootUser = krb.getRootUser();
            // Log in the 'client' user
            UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(),
                    rootUser.getKeytab().getAbsolutePath());
        }
        break;
    case STANDALONE:
        StandaloneAccumuloClusterConfiguration conf = (StandaloneAccumuloClusterConfiguration) clusterConf;
        ClientConfiguration clientConf = conf.getClientConf();
        StandaloneAccumuloCluster standaloneCluster = new StandaloneAccumuloCluster(conf.getInstance(),
                clientConf, conf.getTmpDirectory(), conf.getUsers(), conf.getAccumuloServerUser());
        // If these are provided in the configuration, pass them into the cluster
        standaloneCluster.setAccumuloHome(conf.getAccumuloHome());
        standaloneCluster.setClientAccumuloConfDir(conf.getClientAccumuloConfDir());
        standaloneCluster.setServerAccumuloConfDir(conf.getServerAccumuloConfDir());
        standaloneCluster.setHadoopConfDir(conf.getHadoopConfDir());

        // For SASL, we need to get the Hadoop configuration files as well otherwise UGI will log in as SIMPLE instead of KERBEROS
        Configuration hadoopConfiguration = standaloneCluster.getHadoopConfiguration();
        if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
            UserGroupInformation.setConfiguration(hadoopConfiguration);
            // Login as the admin user to start the tests
            UserGroupInformation.loginUserFromKeytab(conf.getAdminPrincipal(),
                    conf.getAdminKeytab().getAbsolutePath());
        }

        // Set the implementation
        cluster = standaloneCluster;
        break;
    default:
        throw new RuntimeException("Unhandled type");
    }

    if (type.isDynamic()) {
        cluster.start();
    } else {
        log.info("Removing tables which appear to be from a previous test run");
        cleanupTables();
        log.info("Removing users which appear to be from a previous test run");
        cleanupUsers();
    }

    switch (type) {
    case MINI:
        if (null != krb) {
            final String traceTable = Property.TRACE_TABLE.getDefaultValue();
            final ClusterUser systemUser = krb.getAccumuloServerUser(), rootUser = krb.getRootUser();

            // Login as the trace user
            UserGroupInformation.loginUserFromKeytab(systemUser.getPrincipal(),
                    systemUser.getKeytab().getAbsolutePath());

            // Open a connector as the system user (ensures the user will exist for us to assign permissions to)
            UserGroupInformation.loginUserFromKeytab(systemUser.getPrincipal(),
                    systemUser.getKeytab().getAbsolutePath());
            Connector conn = cluster.getConnector(systemUser.getPrincipal(), new KerberosToken());

            // Then, log back in as the "root" user and do the grant
            UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(),
                    rootUser.getKeytab().getAbsolutePath());
            conn = getConnector();

            // Create the trace table
            conn.tableOperations().create(traceTable);

            // Trace user (which is the same kerberos principal as the system user, but using a normal KerberosToken) needs
            // to have the ability to read, write and alter the trace table
            conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable,
                    TablePermission.READ);
            conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable,
                    TablePermission.WRITE);
            conn.securityOperations().grantTablePermission(systemUser.getPrincipal(), traceTable,
                    TablePermission.ALTER_TABLE);
        }
        break;
    default:
        // do nothing
    }
}

From source file:org.apache.accumulo.harness.conf.AccumuloMiniClusterConfiguration.java

License:Apache License

@Override
public AuthenticationToken getAdminToken() {
    if (saslEnabled) {
        // Turn on Kerberos authentication so UGI acts properly
        final Configuration conf = new Configuration(false);
        conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
        UserGroupInformation.setConfiguration(conf);

        ClusterUser rootUser = AccumuloClusterHarness.getKdc().getRootUser();
        try {/*from  www  .j a v a  2  s.  co  m*/
            UserGroupInformation.loginUserFromKeytab(rootUser.getPrincipal(),
                    rootUser.getKeytab().getAbsolutePath());
            return new KerberosToken();
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    } else {
        String password = conf.get(ACCUMULO_MINI_PASSWORD_KEY);
        if (null == password) {
            password = ACCUMULO_MINI_PASSWORD_DEFAULT;
        }

        return new PasswordToken(password);
    }
}