Example usage for org.apache.hadoop.security UserGroupInformation setConfiguration

List of usage examples for org.apache.hadoop.security UserGroupInformation setConfiguration

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation setConfiguration.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static void setConfiguration(Configuration conf) 

Source Link

Document

Set the static configuration for UGI.

Usage

From source file:org.elasticsearch.hadoop.yarn.client.ClientRpc.java

License:Apache License

public void start() {
    if (client != null) {
        return;//from   ww w. j a  v a  2  s  .  c  o m
    }

    UserGroupInformation.setConfiguration(cfg);

    client = YarnClient.createYarnClient();
    client.init(cfg);
    client.start();
}

From source file:org.elasticsearch.hadoop.yarn.rpc.YarnRpc.java

License:Apache License

public void start() {
    // handle security
    if (UserGroupInformation.isSecurityEnabled()) {
        UserGroupInformation.setConfiguration(cfg);
    }/*  ww w.j a v a2  s. co m*/

    try {
        endpoint = resolveEndpoint(cfg);
    } catch (IOException ex) {
        throw new EsYarnException("Cannot resolve endpoint", ex);
    }

    UserGroupInformation ugi = null;
    try {
        ugi = UserGroupInformation.getCurrentUser();
    } catch (IOException ex) {
        throw new EsYarnException("Cannot get current user", ex);
    }

    // create proxy
    proxy = ugi.doAs(new PrivilegedAction<P>() {
        @SuppressWarnings("unchecked")
        @Override
        public P run() {
            return (P) YarnRPC.create(cfg).getProxy(protocolType, endpoint, cfg);
        }
    });

}

From source file:org.elasticsearch.repositories.hdfs.HaHdfsFailoverTestSuiteIT.java

License:Apache License

public void testHAFailoverWithRepository() throws Exception {
    RestClient client = client();//from  w w  w  .j ava  2 s  .  c o  m
    Map<String, String> emptyParams = Collections.emptyMap();
    Header contentHeader = new BasicHeader("Content-Type", "application/json");

    String esKerberosPrincipal = System.getProperty("test.krb5.principal.es");
    String hdfsKerberosPrincipal = System.getProperty("test.krb5.principal.hdfs");
    String kerberosKeytabLocation = System.getProperty("test.krb5.keytab.hdfs");
    boolean securityEnabled = hdfsKerberosPrincipal != null;

    Configuration hdfsConfiguration = new Configuration();
    hdfsConfiguration.set("dfs.nameservices", "ha-hdfs");
    hdfsConfiguration.set("dfs.ha.namenodes.ha-hdfs", "nn1,nn2");
    hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn1", "localhost:10001");
    hdfsConfiguration.set("dfs.namenode.rpc-address.ha-hdfs.nn2", "localhost:10002");
    hdfsConfiguration.set("dfs.client.failover.proxy.provider.ha-hdfs",
            "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");

    AccessController.doPrivileged((PrivilegedExceptionAction<Void>) () -> {
        if (securityEnabled) {
            // ensure that keytab exists
            Path kt = PathUtils.get(kerberosKeytabLocation);
            if (Files.exists(kt) == false) {
                throw new IllegalStateException("Could not locate keytab at " + kerberosKeytabLocation);
            }
            if (Files.isReadable(kt) != true) {
                throw new IllegalStateException("Could not read keytab at " + kerberosKeytabLocation);
            }
            logger.info("Keytab Length: " + Files.readAllBytes(kt).length);

            // set principal names
            hdfsConfiguration.set("dfs.namenode.kerberos.principal", hdfsKerberosPrincipal);
            hdfsConfiguration.set("dfs.datanode.kerberos.principal", hdfsKerberosPrincipal);
            hdfsConfiguration.set("dfs.data.transfer.protection", "authentication");

            SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS,
                    hdfsConfiguration);
            UserGroupInformation.setConfiguration(hdfsConfiguration);
            UserGroupInformation.loginUserFromKeytab(hdfsKerberosPrincipal, kerberosKeytabLocation);
        } else {
            SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.SIMPLE,
                    hdfsConfiguration);
            UserGroupInformation.setConfiguration(hdfsConfiguration);
            UserGroupInformation.getCurrentUser();
        }
        return null;
    });

    // Create repository
    {
        Response response = client.performRequest("PUT", "/_snapshot/hdfs_ha_repo_read", emptyParams,
                new NStringEntity("{" + "\"type\":\"hdfs\"," + "\"settings\":{"
                        + "\"uri\": \"hdfs://ha-hdfs/\",\n"
                        + "\"path\": \"/user/elasticsearch/existing/readonly-repository\","
                        + "\"readonly\": \"true\"," + securityCredentials(securityEnabled, esKerberosPrincipal)
                        + "\"conf.dfs.nameservices\": \"ha-hdfs\","
                        + "\"conf.dfs.ha.namenodes.ha-hdfs\": \"nn1,nn2\","
                        + "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn1\": \"localhost:10001\","
                        + "\"conf.dfs.namenode.rpc-address.ha-hdfs.nn2\": \"localhost:10002\","
                        + "\"conf.dfs.client.failover.proxy.provider.ha-hdfs\": "
                        + "\"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider\"" + "}"
                        + "}", Charset.defaultCharset()),
                contentHeader);

        Assert.assertEquals(200, response.getStatusLine().getStatusCode());
    }

    // Get repository
    {
        Response response = client.performRequest("GET", "/_snapshot/hdfs_ha_repo_read/_all", emptyParams);
        Assert.assertEquals(200, response.getStatusLine().getStatusCode());
    }

    // Failover the namenode to the second.
    failoverHDFS("nn1", "nn2", hdfsConfiguration);

    // Get repository again
    {
        Response response = client.performRequest("GET", "/_snapshot/hdfs_ha_repo_read/_all", emptyParams);
        Assert.assertEquals(200, response.getStatusLine().getStatusCode());
    }
}

From source file:org.elasticsearch.repositories.hdfs.HdfsRepository.java

License:Apache License

private FileSystem initFileSystem(RepositorySettings repositorySettings) throws IOException {
    Configuration cfg = new Configuration(repositorySettings.settings().getAsBoolean("load_defaults",
            componentSettings.getAsBoolean("load_defaults", true)));

    String confLocation = repositorySettings.settings().get("conf_location",
            componentSettings.get("conf_location"));
    if (Strings.hasText(confLocation)) {
        for (String entry : Strings.commaDelimitedListToStringArray(confLocation)) {
            addConfigLocation(cfg, entry.trim());
        }//from   ww w  . j  av a  2  s .  co m
    }

    Map<String, String> map = componentSettings.getByPrefix("conf.").getAsMap();
    for (Entry<String, String> entry : map.entrySet()) {
        cfg.set(entry.getKey(), entry.getValue());
    }

    UserGroupInformation.setConfiguration(cfg);

    String uri = repositorySettings.settings().get("uri", componentSettings.get("uri"));
    URI actualUri = (uri != null ? URI.create(uri) : FileSystem.getDefaultUri(cfg));
    String user = repositorySettings.settings().get("user", componentSettings.get("user"));

    try {
        // disable FS cache
        String disableFsCache = String.format("fs.%s.impl.disable.cache", actualUri.getScheme());
        cfg.setBoolean(disableFsCache, true);
        return (user != null ? FileSystem.get(actualUri, cfg, user) : FileSystem.get(actualUri, cfg));
    } catch (Exception ex) {
        throw new ElasticsearchGenerationException(
                String.format("Cannot create Hdfs file-system for uri [%s]", actualUri), ex);
    }
}

From source file:org.mule.modules.hdfs.connection.config.Kerberos.java

License:Open Source License

/**
 * Establish the connection to the Hadoop Distributed File System.
 *
 * @param nameNodeUri/*w ww . ja  v  a2s  .  c  o  m*/
 *            The name of the file system to connect to. It is passed to HDFS client as the {FileSystem#FS_DEFAULT_NAME_KEY} configuration entry. It can be overriden by values
 *            in configurationResources and configurationEntries.
 * @throws org.mule.api.ConnectionException
 *             Holding information regarding reason of failure while trying to connect to the system.
 */
@Connect
@TestConnectivity
public void connect(@ConnectionKey @FriendlyName("NameNode URI") final String nameNodeUri)
        throws ConnectionException {
    hadoopClientConfigurationProvider = new HadoopClientConfigurationProvider();
    final Configuration configuration = hadoopClientConfigurationProvider.forKerberosAuth(nameNodeUri,
            getUsername(), getConfigurationResources(), getConfigurationEntries());
    UserGroupInformation.setConfiguration(configuration);
    if (isKeytabProvided()) {
        loginUserUsingKeytab();
    }
    fileSystem(configuration);
}

From source file:org.mule.modules.hdfs.connection.config.Simple.java

License:Open Source License

/**
 * Establish the connection to the Hadoop Distributed File System.
 *
 * @param nameNodeUri/*w  w  w. j a  v  a 2s.c o  m*/
 *            The name of the file system to connect to. It is passed to HDFS client as the {FileSystem#FS_DEFAULT_NAME_KEY} configuration entry. It can be overriden by values
 *            in configurationResources and configurationEntries.
 * @throws org.mule.api.ConnectionException
 *             Holding information regarding reason of failure while trying to connect to the system.
 */
@Connect
@TestConnectivity
public void connect(@ConnectionKey @FriendlyName("NameNode URI") final String nameNodeUri)
        throws ConnectionException {
    hadoopClientConfigurationProvider = new HadoopClientConfigurationProvider();
    final Configuration configuration = hadoopClientConfigurationProvider.forSimpleAuth(nameNodeUri,
            getUsername(), getConfigurationResources(), getConfigurationEntries());
    UserGroupInformation.setConfiguration(configuration);
    fileSystem(configuration);
}

From source file:org.notmysock.tez.BroadcastTest.java

License:Apache License

public boolean run(Configuration conf, boolean doLocalityCheck) throws Exception {
    System.out.println("Running BroadcastTest");
    // conf and UGI
    TezConfiguration tezConf;/*from w ww  .  ja  va2 s .c om*/
    if (conf != null) {
        tezConf = new TezConfiguration(conf);
    } else {
        tezConf = new TezConfiguration();
    }
    tezConf.setBoolean(TezConfiguration.TEZ_AM_CONTAINER_REUSE_ENABLED, true);
    UserGroupInformation.setConfiguration(tezConf);
    String user = UserGroupInformation.getCurrentUser().getShortUserName();

    // staging dir
    FileSystem fs = FileSystem.get(tezConf);
    String stagingDirStr = Path.SEPARATOR + "user" + Path.SEPARATOR + user + Path.SEPARATOR + ".staging"
            + Path.SEPARATOR + Path.SEPARATOR + Long.toString(System.currentTimeMillis());
    Path stagingDir = new Path(stagingDirStr);
    tezConf.set(TezConfiguration.TEZ_AM_STAGING_DIR, stagingDirStr);
    stagingDir = fs.makeQualified(stagingDir);

    Path jobJar = new Path(stagingDir, "job.jar");
    fs.copyFromLocalFile(getCurrentJarURL(), jobJar);

    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    localResources.put("job.jar", createLocalResource(fs, jobJar));

    TezClient tezSession = null;
    // needs session or else TaskScheduler does not hold onto containers
    tezSession = TezClient.create("BroadcastTest", tezConf);
    tezSession.addAppMasterLocalFiles(localResources);
    tezSession.start();

    DAGClient dagClient = null;

    try {
        DAG dag = createDAG(fs, tezConf, stagingDir, localResources);

        dag.addTaskLocalFiles(localResources);

        tezSession.waitTillReady();
        dagClient = tezSession.submitDAG(dag);

        // monitoring
        DAGStatus dagStatus = dagClient.waitForCompletionWithStatusUpdates(null);
        if (dagStatus.getState() != DAGStatus.State.SUCCEEDED) {
            System.out.println("DAG diagnostics: " + dagStatus.getDiagnostics());
            return false;
        }
        return true;
    } finally {
        fs.delete(stagingDir, true);
        tezSession.stop();
    }
}

From source file:org.schedoscope.metascope.task.MetastoreTask.java

License:Apache License

@Override
@Transactional(propagation = Propagation.REQUIRES_NEW)
public boolean run(long start) {
    LOG.info("Sync repository with metastore");
    HiveConf conf = new HiveConf();
    conf.set("hive.metastore.local", "false");
    conf.setVar(HiveConf.ConfVars.METASTOREURIS, config.getMetastoreThriftUri());
    String principal = config.getKerberosPrincipal();
    if (principal != null && !principal.isEmpty()) {
        conf.setBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL, true);
        conf.setVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL, principal);
        conf.set("hadoop.security.authentication", "kerberos");
        UserGroupInformation.setConfiguration(conf);
    }/*from w w  w.j a v a 2  s . co m*/

    HiveMetaStoreClient client = null;
    try {
        client = new HiveMetaStoreClient(conf);
    } catch (Exception e) {
        LOG.info("[MetastoreSyncTask] FAILED: Could not connect to hive metastore", e);
        return false;
    }

    FileSystem fs;
    try {
        Configuration hadoopConfig = new Configuration();
        hadoopConfig.set("fs.defaultFS", config.getHdfs());
        fs = FileSystem.get(hadoopConfig);
    } catch (IOException e) {
        LOG.info("[MetastoreSyncTask] FAILED: Could not connect to HDFS", e);
        client.close();
        return false;
    }

    LOG.info("Connected to metastore (" + config.getMetastoreThriftUri() + ")");

    List<String> allTables = metascopeTableRepository.getAllTablesNames();

    for (String fqdn : allTables) {
        //load table
        MetascopeTable table = metascopeTableRepository.findOne(fqdn);
        LOG.info("Get metastore information for table " + table.getFqdn());

        try {
            Table mTable = client.getTable(table.getDatabaseName(), table.getTableName());
            List<Partition> partitions = client.listPartitions(table.getDatabaseName(), table.getTableName(),
                    Short.MAX_VALUE);

            table.setTableOwner(mTable.getOwner());
            table.setCreatedAt(mTable.getCreateTime() * 1000L);
            table.setInputFormat(mTable.getSd().getInputFormat());
            table.setOutputFormat(mTable.getSd().getOutputFormat());
            table.setDataPath(mTable.getSd().getLocation());
            try {
                table.setDataSize(getDirectorySize(fs, table.getDataPath()));
                table.setPermissions(getPermission(fs, table.getDataPath()));
            } catch (IllegalArgumentException e) {
                LOG.warn("Could not retrieve dir size: " + e.getMessage());
                LOG.debug("ERROR: Could not read HDFS metadata", e);
            }

            long maxLastTransformation = -1;

            Hibernate.initialize(table.getViews());
            table.setViewsSize(table.getViews().size());

            for (Partition partition : partitions) {
                MetascopeView view = getView(table.getViews(), partition);
                if (view == null) {
                    //a view which is not registered as a partition in hive metastore should not exists ...
                    continue;
                }
                String numRows = partition.getParameters().get("numRows");
                if (numRows != null) {
                    view.setNumRows(Long.parseLong(numRows));
                }
                String totalSize = partition.getParameters().get("totalSize");
                if (totalSize != null) {
                    view.setTotalSize(Long.parseLong(totalSize));
                }
                String lastTransformation = partition.getParameters().get(SCHEDOSCOPE_TRANSFORMATION_TIMESTAMP);
                if (lastTransformation != null) {
                    long ts = Long.parseLong(lastTransformation);
                    view.setLastTransformation(ts);
                    if (ts > maxLastTransformation) {
                        maxLastTransformation = ts;
                    }
                }
                solrFacade.updateViewEntity(view, false);
            }

            if (maxLastTransformation != -1) {
                table.setLastTransformation(maxLastTransformation);
            } else {
                String ts = mTable.getParameters().get(SCHEDOSCOPE_TRANSFORMATION_TIMESTAMP);
                if (ts != null) {
                    long lastTransformationTs = Long.parseLong(ts);
                    table.setLastTransformation(lastTransformationTs);
                    MetascopeView rootView = table.getViews().get(0);
                    rootView.setLastTransformation(lastTransformationTs);
                    solrFacade.updateViewEntity(rootView, false);
                }
            }

            metascopeTableRepository.save(table);
            solrFacade.updateTablePartial(table, true);
        } catch (Exception e) {
            LOG.warn("Could not retrieve table from metastore", e);
            continue;
        }

    }

    /* commit to index */
    solrFacade.commit();

    client.close();
    try {
        fs.close();
    } catch (IOException e) {
        LOG.warn("Could not close connection to HDFS", e);
    }

    LOG.info("Sync with metastore finished");
    return true;
}

From source file:org.schedoscope.metascope.tasks.MetastoreSyncTask.java

License:Apache License

@Override
public boolean run(long start) {
    LOG.info("Sync repository with metastore");
    HiveConf conf = new HiveConf();
    conf.set("hive.metastore.local", "false");
    conf.setVar(HiveConf.ConfVars.METASTOREURIS, config.getMetastoreThriftUri());
    String principal = config.getKerberosPrincipal();
    if (principal != null && !principal.isEmpty()) {
        conf.setBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL, true);
        conf.setVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL, principal);
        conf.set("hadoop.security.authentication", "kerberos");
        UserGroupInformation.setConfiguration(conf);
    }//from   w  w  w .  ja  v  a2s  .  c  o m

    HiveMetaStoreClient client = null;
    try {
        client = new HiveMetaStoreClient(conf);
    } catch (Exception e) {
        LOG.info("[MetastoreSyncTask] FAILED: Could not connect to hive metastore", e);
        return false;
    }

    FileSystem fs;
    try {
        fs = FileSystem.get(hadoopConfig);
    } catch (IOException e) {
        LOG.info("[MetastoreSyncTask] FAILED: Could not connect to HDFS", e);
        return false;
    }

    Connection connection;
    try {
        connection = dataSource.getConnection();
    } catch (SQLException e) {
        LOG.info("[MetastoreSyncTask] FAILED: Could not connect to repository", e);
        return false;
    }

    LOG.info("Connected to metastore (" + config.getMetastoreThriftUri() + ")");
    List<TableEntity> allTables = repo.getTables(connection);
    List<ViewEntity> allViews = repo.getViews(connection);
    int tableSize = allTables.size();
    int counter = 1;
    for (TableEntity tableEntity : allTables) {
        Table mTable;
        try {
            mTable = client.getTable(tableEntity.getDatabaseName(), tableEntity.getTableName());
        } catch (Exception e) {
            LOG.warn("Could not retrieve table from metastore", e);
            continue;
        }
        LOG.info("[" + counter++ + "/" + tableSize + "] Get metastore information for table "
                + tableEntity.getFqdn());
        tableEntity.setTableOwner(mTable.getOwner());
        tableEntity.setCreatedAt(mTable.getCreateTime() * 1000L);
        tableEntity.setInputFormat(mTable.getSd().getInputFormat());
        tableEntity.setOutputFormat(mTable.getSd().getOutputFormat());
        tableEntity.setDataPath(mTable.getSd().getLocation());
        tableEntity.setDataSize(getDirectorySize(fs, tableEntity.getDataPath()));
        tableEntity.setPermissions(getPermission(fs, tableEntity.getDataPath()));
        List<ViewEntity> views = getViews(tableEntity.getFqdn(), allViews);
        if (views.size() == 1) {
            ViewEntity viewEntity = views.get(0);
            String schedoscopeTimestamp = mTable.getParameters().get(SCHEDOSCOPE_TRANSFORMATION_TIMESTAMP);
            setTransformationTimestamp(schedoscopeTimestamp, tableEntity, viewEntity,
                    mTable.getCreateTime() * 1000L, connection);
        } else {
            for (ViewEntity viewEntity : views) {
                Partition mPartition;
                try {
                    mPartition = client.getPartition(tableEntity.getDatabaseName(), tableEntity.getTableName(),
                            viewEntity.getParameterString());
                } catch (Exception e) {
                    LOG.warn("Could not retrieve partition from metastore", e);
                    continue;
                }
                if (mPartition != null) {
                    String schedoscopeTimestamp = mPartition.getParameters()
                            .get(SCHEDOSCOPE_TRANSFORMATION_TIMESTAMP);
                    setTransformationTimestamp(schedoscopeTimestamp, tableEntity, viewEntity,
                            mPartition.getCreateTime() * 1000L, connection);
                }
            }
        }
    }

    LOG.info("Updating tables ...");
    for (TableEntity tableEntity : allTables) {
        repo.insertOrUpdate(connection, tableEntity);
        solr.updateTablePartial(tableEntity, false);
    }

    try {
        connection.close();
    } catch (SQLException e) {
        LOG.error("Could not close connection to repository", e);
    }

    solr.commit();
    client.close();
    LOG.info("Sync with metastore finished");
    return true;
}

From source file:org.schedoscope.metascope.util.HiveQueryExecutor.java

License:Apache License

@PostConstruct
private void init() {
    Configuration conf = new Configuration();
    String principal = config.getKerberosPrincipal();
    if (principal != null && !principal.isEmpty()) {
        conf.set("hive.metastore.sasl.enabled", "true");
        conf.set("hive.metastore.kerberos.principal", principal);
        conf.set("hadoop.security.authentication", "kerberos");
        UserGroupInformation.setConfiguration(conf);
    }/*from  w w w  .j a va 2s.  c o  m*/
    try {
        Class.forName(config.getHiveJdbcDriver());
    } catch (ClassNotFoundException e) {
        LOG.error("Hive JDBC driver not found", e);
    }
}