Example usage for org.apache.hadoop.security UserGroupInformation setConfiguration

List of usage examples for org.apache.hadoop.security UserGroupInformation setConfiguration

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation setConfiguration.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static void setConfiguration(Configuration conf) 

Source Link

Document

Set the static configuration for UGI.

Usage

From source file:org.shadowmask.framework.datacenter.hive.KerberizedHiveDc.java

License:Apache License

public void loginKdc() throws ClassNotFoundException, IOException {
    System.setProperty("java.security.krb5.realm", realm);
    System.setProperty("java.security.krb5.kdc", kdc);
    Configuration conf = new Configuration();
    conf.setBoolean("hadoop.security.authorization", true);
    conf.set("hadoop.security.authentication", "kerberos");
    UserGroupInformation.setConfiguration(conf);

    Class.forName(getJdbcDriver());
    UserGroupInformation.loginUserFromKeytab(krbUser, keyTab);

}

From source file:org.springframework.data.hadoop.configuration.ConfigurationFactoryBean.java

License:Apache License

public void afterPropertiesSet() throws Exception {
    internalConfig = createConfiguration(configuration);

    internalConfig.setClassLoader(beanClassLoader);
    if (resources != null) {
        for (Resource resource : resources) {
            internalConfig.addResource(resource.getURL());
        }//from   w w  w . j a v a  2 s .  co m
    }

    ConfigurationUtils.addProperties(internalConfig, properties);

    // set hdfs / fs URI last to override all other properties
    if (StringUtils.hasText(fsUri)) {
        internalConfig.set("fs.default.name", fsUri.trim());
    }

    if (StringUtils.hasText(jtUri)) {
        internalConfig.set("mapred.job.tracker", jtUri.trim());
    }

    if (initialize) {
        internalConfig.size();
    }

    postProcessConfiguration(internalConfig);

    if (registerJvmUrl) {
        try {
            // force UGI init to prevent infinite loop - see SHDP-92
            UserGroupInformation.setConfiguration(internalConfig);
            URL.setURLStreamHandlerFactory(new FsUrlStreamHandlerFactory(getObject()));
            log.info("Registered HDFS URL stream handler");
        } catch (Error err) {
            log.warn("Cannot register Hadoop URL stream handler - one is already registered");
        }
    }
}

From source file:org.springframework.xd.sqoop.SqoopRunner.java

License:Apache License

protected static Configuration createConfiguration(Map<String, String> configOptions) {

    Configuration configuration = new Configuration();
    setConfigurationProperty(configOptions, configuration, CommonConfigurationKeys.FS_DEFAULT_NAME_KEY);
    setConfigurationProperty(configOptions, configuration, YarnConfiguration.RM_HOSTNAME);
    setConfigurationProperty(configOptions, configuration, YarnConfiguration.RM_ADDRESS);
    setConfigurationProperty(configOptions, configuration, YarnConfiguration.RM_SCHEDULER_ADDRESS);
    setConfigurationProperty(configOptions, configuration, YarnConfiguration.YARN_APPLICATION_CLASSPATH);
    setConfigurationProperty(configOptions, configuration, "mapreduce.framework.name");
    if (StringUtils.hasText(configOptions.get("mapreduce.jobhistory.address"))) {
        setConfigurationProperty(configOptions, configuration, "mapreduce.jobhistory.address");
    }/* w  w w  . ja v  a 2  s.c om*/
    if (configOptions.containsKey(SECURITY_AUTH_METHOD)
            && "kerberos".equals(configOptions.get(SECURITY_AUTH_METHOD))) {
        configuration.setBoolean("hadoop.security.authorization", true);
        configuration.set("hadoop.security.authentication", configOptions.get(SECURITY_AUTH_METHOD));
        configuration.set("dfs.namenode.kerberos.principal", configOptions.get(SECURITY_NAMENODE_PRINCIPAL));
        configuration.set("yarn.resourcemanager.principal", configOptions.get(SECURITY_RM_MANAGER_PRINCIPAL));
        if (StringUtils.hasText(configOptions.get(SECURITY_MAPREDUCE_JOBHISTORY_PRINCIPAL))) {
            configuration.set("mapreduce.jobhistory.principal",
                    configOptions.get(SECURITY_MAPREDUCE_JOBHISTORY_PRINCIPAL));
        }
        String userKeytab = configOptions.get(SECURITY_USER_KEYTAB);
        String userPrincipal = configOptions.get(SECURITY_USER_PRINCIPAL);
        UserGroupInformation.setConfiguration(configuration);
        if (StringUtils.hasText(userKeytab)) {
            configuration.set(ConfigurationFactoryBean.USERKEYTAB, userKeytab.trim());
        }
        if (StringUtils.hasText(userPrincipal)) {
            configuration.set(ConfigurationFactoryBean.USERPRINCIPAL, userPrincipal.trim());
        }
        if (StringUtils.hasText(userKeytab) && StringUtils.hasText(userPrincipal)) {
            try {
                SecurityUtil.login(configuration, ConfigurationFactoryBean.USERKEYTAB,
                        ConfigurationFactoryBean.USERPRINCIPAL);
            } catch (Exception e) {
                logger.warn("Cannot login using keytab " + userKeytab + " and principal " + userPrincipal, e);
            }
        }
    }

    for (Entry<String, String> entry : configOptions.entrySet()) {
        String key = entry.getKey();
        if (key.startsWith(SPRING_HADOOP_CONFIG_PREFIX + ".")) {
            String prop = key.substring(SPRING_HADOOP_CONFIG_PREFIX.length() + 1);
            String value = entry.getValue();
            logger.info("Setting configuration property: " + prop + "=" + value);
            configuration.set(prop, value);
        }
    }
    return configuration;
}

From source file:org.springframework.yarn.configuration.ConfigurationFactoryBean.java

License:Apache License

public void afterPropertiesSet() throws Exception {
    internalConfig = createConfiguration(configuration);

    internalConfig.setClassLoader(beanClassLoader);
    if (resources != null) {
        for (Resource resource : resources) {
            internalConfig.addResource(resource.getURL());
        }/*from w ww . j  a  v  a2  s.co  m*/
    }

    ConfigurationUtils.addProperties(internalConfig, properties);

    // set hdfs / fs URI last to override all other properties
    if (StringUtils.hasText(fsUri)) {
        internalConfig.set("fs.default.name", fsUri.trim());
    }

    if (StringUtils.hasText(rmAddress)) {
        internalConfig.set("yarn.resourcemanager.address", rmAddress.trim());
    }

    if (initialize) {
        internalConfig.size();
    }

    postProcessConfiguration(internalConfig);

    if (registerJvmUrl) {
        try {
            // force UGI init to prevent infinite loop - see SHDP-92
            UserGroupInformation.setConfiguration(internalConfig);
            URL.setURLStreamHandlerFactory(new FsUrlStreamHandlerFactory(getObject()));
            log.info("Registered HDFS URL stream handler");
        } catch (Error err) {
            log.warn("Cannot register Hadoop URL stream handler - one is already registered");
        }
    }
}

From source file:org.springframework.yarn.rpc.YarnRpcAccessor.java

License:Apache License

@Override
public void afterPropertiesSet() throws Exception {
    Assert.notNull(configuration, "Yarn configuration must be set");
    Assert.notNull(protocolClazz, "Rpc protocol class must be set");
    if (UserGroupInformation.isSecurityEnabled()) {
        UserGroupInformation.setConfiguration(configuration);
    }//from   www. j a  va2  s . c om
    address = getRpcAddress(configuration);
    proxy = createProxy();
}

From source file:org.trpr.dataaccess.hbase.auth.kerberos.KerberosAuthenticationProvider.java

License:Apache License

/**
 * Interface method implementation. Initializes the specified HBase configuration with Kerberos authentication properties
 * @see org.trpr.dataaccess.hbase.auth.AuthenticationProvider#authenticatePrincipal(org.apache.hadoop.conf.Configuration)
 *///from  w ww. j av  a 2 s. c o m
public void authenticatePrincipal(Configuration configuration) throws SecurityException {
    for (Object key : this.kerberosAuthProperties.keySet()) {
        configuration.set(key.toString(), this.kerberosAuthProperties.getProperty(key.toString()));
    }
    System.setProperty(KerberosAuthenticationProvider.KERBEROS_CONFIG_SYSTEM_VARIABLE,
            this.kerberosConfigLocation);
    try {
        UserGroupInformation.setConfiguration(configuration);
        UserGroupInformation.loginUserFromKeytab(this.kerberosPrincipal, this.kerberosKeytabLocation);
        UserGroupInformation loggedInUser = UserGroupInformation.getLoginUser();
        LOGGER.info("Currently logged in Kerberos principal : " + loggedInUser);
        new TGTRenewalThread(configuration, loggedInUser);
    } catch (Exception e) {
        throw new SecurityException("Error authenticating Kerberos Principal : " + this.kerberosPrincipal
                + " .Error message : " + e.getMessage(), e);
    }
}

From source file:org.wso2.carbon.hdfs.sample.FSClient.java

License:Open Source License

public static void main(String[] args) throws IOException {

    Configuration conf = new Configuration(false);
    /**/*from  w  w  w .  jav a2 s. c  om*/
     * Create HDFS Client configuration to use name node hosted on host master.
     * Client configured to connect to a remote distributed file system.
     */
    conf.set("fs.default.name", "hdfs://localhost:54310");
    conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
    conf.set("hadoop.security.authentication", "kerberos");
    conf.set("dfs.namenode.kerberos.principal", "hdfs/node0@WSO2.ORG");

    UserGroupInformation.setConfiguration(conf);
    /**
     * Get connection to remote file sytem
     */
    FileSystem fs = FileSystem.get(conf);

    /**
     * Create file path object
     */
    Path tenantFileName = new Path(
            File.separator + USER_HOME + File.separator + TENANT + File.separator + FILE_NAME);
    /**
     * Do read / write operation with HDFS
     */
    try {
        if (fs.exists(tenantFileName)) {
            // remove the file first
            fs.delete(tenantFileName, true);
        }

        //create and put content to the file
        FSDataOutputStream out = fs.create(tenantFileName);
        out.writeUTF(message);
        out.close();

        FSDataInputStream in = fs.open(tenantFileName);
        String messageIn = in.readUTF();
        System.out.print(messageIn);
        in.close();

    } catch (IOException ioe) {
        System.err.println("IOException during operation: " + ioe.toString());
        System.exit(1);
    }
}

From source file:rpc.TestRPC.java

License:Apache License

@Before
public void setupConf() {
    conf = new Configuration();
    conf.setClass("rpc.engine." + StoppedProtocol.class.getName(), StoppedRpcEngine.class, RpcEngine.class);
    UserGroupInformation.setConfiguration(conf);
}

From source file:rpc.TestRPC.java

License:Apache License

@Test
public void testErrorMsgForInsecureClient() throws IOException {
    Configuration serverConf = new Configuration(conf);
    SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, serverConf);
    UserGroupInformation.setConfiguration(serverConf);

    final Server server = new RPC.Builder(serverConf).setProtocol(TestProtocol.class)
            .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
            .build();//from ww w  .  j  a  v  a 2 s  .  c  o  m
    server.start();

    UserGroupInformation.setConfiguration(conf);
    boolean succeeded = false;
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    TestProtocol proxy = null;
    try {
        proxy = RPC.getProxy(TestProtocol.class, TestProtocol.versionID, addr, conf);
        proxy.echo("");
    } catch (RemoteException e) {
        LOG.info("LOGGING MESSAGE: " + e.getLocalizedMessage());
        assertTrue(e.unwrapRemoteException() instanceof AccessControlException);
        succeeded = true;
    } finally {
        server.stop();
        if (proxy != null) {
            RPC.stopProxy(proxy);
        }
    }
    assertTrue(succeeded);

    conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_THREADS_KEY, 2);

    UserGroupInformation.setConfiguration(serverConf);
    final Server multiServer = new RPC.Builder(serverConf).setProtocol(TestProtocol.class)
            .setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
            .build();
    multiServer.start();
    succeeded = false;
    final InetSocketAddress mulitServerAddr = NetUtils.getConnectAddress(multiServer);
    proxy = null;
    try {
        UserGroupInformation.setConfiguration(conf);
        proxy = RPC.getProxy(TestProtocol.class, TestProtocol.versionID, mulitServerAddr, conf);
        proxy.echo("");
    } catch (RemoteException e) {
        LOG.info("LOGGING MESSAGE: " + e.getLocalizedMessage());
        assertTrue(e.unwrapRemoteException() instanceof AccessControlException);
        succeeded = true;
    } finally {
        multiServer.stop();
        if (proxy != null) {
            RPC.stopProxy(proxy);
        }
    }
    assertTrue(succeeded);
}

From source file:skewtune.mapreduce.STJobTracker.java

License:Apache License

@SuppressWarnings("unchecked")
STJobTracker(final JobConf conf, String jobtrackerIndentifier) throws IOException, InterruptedException {
    // find the owner of the process
    // get the desired principal to load
    String keytabFilename = conf.get(JTConfig.JT_KEYTAB_FILE);
    UserGroupInformation.setConfiguration(conf);
    if (keytabFilename != null) {
        String desiredUser = conf.get(JTConfig.JT_USER_NAME, System.getProperty("user.name"));
        UserGroupInformation.loginUserFromKeytab(desiredUser, keytabFilename);
        mrOwner = UserGroupInformation.getLoginUser();
    } else {/*from   w  w w.  j a va  2 s  .c om*/
        mrOwner = UserGroupInformation.getCurrentUser();
    }

    supergroup = conf.get(MR_SUPERGROUP, "supergroup");
    LOG.info("Starting jobtracker with owner as " + mrOwner.getShortUserName() + " and supergroup as "
            + supergroup);

    long secretKeyInterval = conf.getLong(MRConfig.DELEGATION_KEY_UPDATE_INTERVAL_KEY,
            MRConfig.DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT);
    long tokenMaxLifetime = conf.getLong(MRConfig.DELEGATION_TOKEN_MAX_LIFETIME_KEY,
            MRConfig.DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT);
    long tokenRenewInterval = conf.getLong(MRConfig.DELEGATION_TOKEN_RENEW_INTERVAL_KEY,
            MRConfig.DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT);
    secretManager = new DelegationTokenSecretManager(secretKeyInterval, tokenMaxLifetime, tokenRenewInterval,
            DELEGATION_TOKEN_GC_INTERVAL);
    secretManager.startThreads();

    //
    // Grab some static constants
    //

    NUM_HEARTBEATS_IN_SECOND = conf.getInt(JT_HEARTBEATS_IN_SECOND, DEFAULT_NUM_HEARTBEATS_IN_SECOND);
    if (NUM_HEARTBEATS_IN_SECOND < MIN_NUM_HEARTBEATS_IN_SECOND) {
        NUM_HEARTBEATS_IN_SECOND = DEFAULT_NUM_HEARTBEATS_IN_SECOND;
    }

    HEARTBEATS_SCALING_FACTOR = conf.getFloat(JT_HEARTBEATS_SCALING_FACTOR, DEFAULT_HEARTBEATS_SCALING_FACTOR);
    if (HEARTBEATS_SCALING_FACTOR < MIN_HEARTBEATS_SCALING_FACTOR) {
        HEARTBEATS_SCALING_FACTOR = DEFAULT_HEARTBEATS_SCALING_FACTOR;
    }

    // whether to dump or not every heartbeat message even when DEBUG is enabled
    dumpHeartbeat = conf.getBoolean(JT_HEARTBEATS_DUMP, false);

    // This is a directory of temporary submission files. We delete it
    // on startup, and can delete any files that we're done with
    this.conf = conf;
    JobConf jobConf = new JobConf(conf);

    // Set ports, start RPC servers, setup security policy etc.
    InetSocketAddress addr = getAddress(conf);
    this.localMachine = addr.getHostName();
    this.port = addr.getPort();

    int handlerCount = conf.getInt(JT_IPC_HANDLER_COUNT, 10);
    this.interTrackerServer = RPC.getServer(SkewTuneClientProtocol.class, this, addr.getHostName(),
            addr.getPort(), handlerCount, false, conf, secretManager);
    if (LOG.isDebugEnabled()) {
        Properties p = System.getProperties();
        for (Iterator it = p.keySet().iterator(); it.hasNext();) {
            String key = (String) it.next();
            String val = p.getProperty(key);
            LOG.debug("Property '" + key + "' is " + val);
        }
    }

    InetSocketAddress infoSocAddr = NetUtils
            .createSocketAddr(conf.get(JT_HTTP_ADDRESS, String.format("%s:0", this.localMachine)));
    String infoBindAddress = infoSocAddr.getHostName();
    int tmpInfoPort = infoSocAddr.getPort();
    this.startTime = System.currentTimeMillis();
    infoServer = new HttpServer("job", infoBindAddress, tmpInfoPort, tmpInfoPort == 0, conf);
    infoServer.setAttribute("job.tracker", this);
    infoServer.addServlet("jobcompletion", "/completion", JobCompletionServlet.class);
    infoServer.addServlet("taskspeculation", "/speculation", SpeculationEventServlet.class);
    infoServer.addServlet("skewreport", "/skew", SkewReportServlet.class);
    infoServer.addServlet("tasksplit", "/split/*", SplitTaskServlet.class);
    infoServer.addServlet("tasksplitV2", "/splitV2/*", SplitTaskV2Servlet.class);
    infoServer.start();

    this.trackerIdentifier = jobtrackerIndentifier;

    // The rpc/web-server ports can be ephemeral ports...
    // ... ensure we have the correct info
    this.port = interTrackerServer.getListenerAddress().getPort();
    this.conf.set(JT_IPC_ADDRESS, (this.localMachine + ":" + this.port));
    LOG.info("JobTracker up at: " + this.port);
    this.infoPort = this.infoServer.getPort();
    this.conf.set(JT_HTTP_ADDRESS, infoBindAddress + ":" + this.infoPort);
    LOG.info("JobTracker webserver: " + this.infoServer.getPort());
    this.defaultNotificationUrl = String.format("http://%s:%d/completion?jobid=$jobId&status=$jobStatus",
            infoBindAddress, this.infoPort);
    LOG.info("JobTracker completion URI: " + defaultNotificationUrl);
    //        this.defaultSpeculationEventUrl = String.format("http://%s:%d/speculation?taskid=$taskId&remainTime=$taskRemainTime",infoBindAddress,this.infoPort);
    this.defaultSpeculationEventUrl = String.format("http://%s:%d/speculation?jobid=$jobId", infoBindAddress,
            this.infoPort);
    LOG.info("JobTracker speculation event URI: " + defaultSpeculationEventUrl);
    this.defaultSkewReportUrl = String.format("http://%s:%d/skew", infoBindAddress, this.infoPort);
    LOG.info("JobTracker skew report event URI: " + defaultSkewReportUrl);
    this.trackerHttp = String.format("http://%s:%d", infoBindAddress, this.infoPort);

    while (!Thread.currentThread().isInterrupted()) {
        try {
            // if we haven't contacted the namenode go ahead and do it
            if (fs == null) {
                fs = mrOwner.doAs(new PrivilegedExceptionAction<FileSystem>() {
                    @Override
                    public FileSystem run() throws IOException {
                        return FileSystem.get(conf);
                    }
                });
            }

            // clean up the system dir, which will only work if hdfs is out
            // of safe mode
            if (systemDir == null) {
                systemDir = new Path(getSystemDir());
            }
            try {
                FileStatus systemDirStatus = fs.getFileStatus(systemDir);
                if (!systemDirStatus.getOwner().equals(mrOwner.getShortUserName())) {
                    throw new AccessControlException(
                            "The systemdir " + systemDir + " is not owned by " + mrOwner.getShortUserName());
                }
                if (!systemDirStatus.getPermission().equals(SYSTEM_DIR_PERMISSION)) {
                    LOG.warn("Incorrect permissions on " + systemDir + ". Setting it to "
                            + SYSTEM_DIR_PERMISSION);
                    fs.setPermission(systemDir, new FsPermission(SYSTEM_DIR_PERMISSION));
                } else {
                    break;
                }
            } catch (FileNotFoundException fnf) {
            } // ignore
        } catch (AccessControlException ace) {
            LOG.warn("Failed to operate on " + JTConfig.JT_SYSTEM_DIR + "(" + systemDir
                    + ") because of permissions.");
            LOG.warn("Manually delete the " + JTConfig.JT_SYSTEM_DIR + "(" + systemDir
                    + ") and then start the JobTracker.");
            LOG.warn("Bailing out ... ");
            throw ace;
        } catch (IOException ie) {
            LOG.info("problem cleaning system directory: " + systemDir, ie);
        }
        Thread.sleep(FS_ACCESS_RETRY_PERIOD);
    }

    if (Thread.currentThread().isInterrupted()) {
        throw new InterruptedException();
    }

    // initialize cluster variable
    cluster = new Cluster(this.conf);

    // now create a job client proxy
    jtClient = (ClientProtocol) RPC.getProxy(ClientProtocol.class, ClientProtocol.versionID,
            JobTracker.getAddress(conf), mrOwner, this.conf,
            NetUtils.getSocketFactory(conf, ClientProtocol.class));

    new SpeculativeScheduler().start();

    // initialize task event fetcher
    new TaskCompletionEventFetcher().start();

    // Same with 'localDir' except it's always on the local disk.
    asyncDiskService = new MRAsyncDiskService(FileSystem.getLocal(conf), conf.getLocalDirs());
    asyncDiskService.moveAndDeleteFromEachVolume(SUBDIR);

    // keep at least one asynchronous worker per CPU core
    int numProcs = Runtime.getRuntime().availableProcessors();
    LOG.info("# of available processors = " + numProcs);
    int maxFactor = conf.getInt(JT_MAX_ASYNC_WORKER_FACTOR, 2);
    asyncWorkers = new ThreadPoolExecutor(numProcs, numProcs * maxFactor, 30, TimeUnit.SECONDS,
            new SynchronousQueue<Runnable>(true), new ThreadPoolExecutor.CallerRunsPolicy());

    speculativeSplit = conf.getBoolean(JT_SPECULATIVE_SPLIT, false);
}