Example usage for org.apache.hadoop.security UserGroupInformation isSecurityEnabled

List of usage examples for org.apache.hadoop.security UserGroupInformation isSecurityEnabled

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation isSecurityEnabled.

Prototype

public static boolean isSecurityEnabled() 

Source Link

Document

Determine if UserGroupInformation is using Kerberos to determine user identities or is relying on simple authentication

Usage

From source file:com.streamsets.datacollector.security.MapRLoginUgiProvider.java

License:Apache License

@Override
public UserGroupInformation getLoginUgi(Configuration hdfsConfiguration) throws IOException {
    // check system property to see if MapR U/P security is enabled
    String maprLoginEnabled = System.getProperty(MAPR_USERNAME_PASSWORD_SECURITY_ENABLED_KEY,
            MAPR_USERNAME_PASSWORD_SECURITY_ENABLED_DEFAULT);
    boolean isMapRLogin = Boolean.parseBoolean(maprLoginEnabled);
    AccessControlContext accessControlContext = AccessController.getContext();
    Subject subject = Subject.getSubject(accessControlContext);
    //HADOOP-13805
    HadoopConfigurationUtils.configureHadoopTreatSubjectExternal(hdfsConfiguration);
    // SDC-4015 As privateclassloader is false for MapR, UGI is shared and it also needs to be under jvm lock
    UserGroupInformation.setConfiguration(hdfsConfiguration);
    UserGroupInformation loginUgi;/*from w ww.  j  a  v a2  s  . c om*/

    if (UserGroupInformation.isSecurityEnabled() && !isMapRLogin) {
        // The code in this block must only be executed in case Kerberos is enabled.
        // MapR implementation of UserGroupInformation.isSecurityEnabled() returns true even if Kerberos is not enabled.
        // System property helps to avoid this code path in such a case
        loginUgi = UserGroupInformation.getUGIFromSubject(subject);
    } else {
        UserGroupInformation.loginUserFromSubject(subject);
        loginUgi = UserGroupInformation.getLoginUser();
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("Subject = {}, Principals = {}, Login UGI = {}", subject,
                subject == null ? "null" : subject.getPrincipals(), loginUgi);
    }
    return loginUgi;

}

From source file:com.streamsets.pipeline.stage.destination.hbase.HBaseTarget.java

License:Apache License

private void validateSecurityConfigs(List<ConfigIssue> issues) {
    try {/* w w  w. j  a v a 2  s. c  o m*/
        if (kerberosAuth) {
            hbaseConf.set(User.HBASE_SECURITY_CONF_KEY,
                    UserGroupInformation.AuthenticationMethod.KERBEROS.name());
            hbaseConf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
                    UserGroupInformation.AuthenticationMethod.KERBEROS.name());
            if (hbaseConf.get(MASTER_KERBEROS_PRINCIPAL) == null) {
                try {
                    hbaseConf.set(MASTER_KERBEROS_PRINCIPAL, "hbase/_HOST@" + KerberosUtil.getDefaultRealm());
                } catch (Exception e) {
                    issues.add(getContext().createConfigIssue(Groups.HBASE.name(), "masterPrincipal",
                            Errors.HBASE_22));
                }
            }
            if (hbaseConf.get(REGIONSERVER_KERBEROS_PRINCIPAL) == null) {
                try {
                    hbaseConf.set(REGIONSERVER_KERBEROS_PRINCIPAL,
                            "hbase/_HOST@" + KerberosUtil.getDefaultRealm());
                } catch (Exception e) {
                    issues.add(getContext().createConfigIssue(Groups.HBASE.name(), "regionServerPrincipal",
                            Errors.HBASE_23));
                }
            }
        }

        UserGroupInformation.setConfiguration(hbaseConf);
        Subject subject = Subject.getSubject(AccessController.getContext());
        if (UserGroupInformation.isSecurityEnabled()) {
            loginUgi = UserGroupInformation.getUGIFromSubject(subject);
        } else {
            UserGroupInformation.loginUserFromSubject(subject);
            loginUgi = UserGroupInformation.getLoginUser();
        }
        LOG.info("Subject = {}, Principals = {}, Login UGI = {}", subject,
                subject == null ? "null" : subject.getPrincipals(), loginUgi);
        StringBuilder logMessage = new StringBuilder();
        if (kerberosAuth) {
            logMessage.append("Using Kerberos");
            if (loginUgi.getAuthenticationMethod() != UserGroupInformation.AuthenticationMethod.KERBEROS) {
                issues.add(getContext().createConfigIssue(Groups.HBASE.name(), "kerberosAuth", Errors.HBASE_16,
                        loginUgi.getAuthenticationMethod()));
            }
        } else {
            logMessage.append("Using Simple");
            hbaseConf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
                    UserGroupInformation.AuthenticationMethod.SIMPLE.name());
        }
        LOG.info("Authentication Config: " + logMessage);
    } catch (Exception ex) {
        LOG.info("Error validating security configuration: " + ex, ex);
        issues.add(
                getContext().createConfigIssue(Groups.HBASE.name(), null, Errors.HBASE_17, ex.toString(), ex));
    }
}

From source file:com.streamsets.pipeline.stage.destination.hdfs.HdfsTarget.java

License:Apache License

private boolean validateHadoopFS(List<ConfigIssue> issues) {
    boolean validHapoopFsUri = true;
    if (hdfsUri.contains("://")) {
        try {/*from  ww w .  j  a  v  a  2 s  .  com*/
            new URI(hdfsUri);
        } catch (Exception ex) {
            issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), null, Errors.HADOOPFS_22,
                    hdfsUri, ex.toString(), ex));
            validHapoopFsUri = false;
        }
    } else {
        issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsUri", Errors.HADOOPFS_18,
                hdfsUri));
        validHapoopFsUri = false;
    }

    StringBuilder logMessage = new StringBuilder();
    try {
        hdfsConfiguration = getHadoopConfiguration(issues);

        hdfsConfiguration.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, hdfsUri);

        // forcing UGI to initialize with the security settings from the stage
        UserGroupInformation.setConfiguration(hdfsConfiguration);
        Subject subject = Subject.getSubject(AccessController.getContext());
        if (UserGroupInformation.isSecurityEnabled()) {
            loginUgi = UserGroupInformation.getUGIFromSubject(subject);
        } else {
            UserGroupInformation.loginUserFromSubject(subject);
            loginUgi = UserGroupInformation.getLoginUser();
        }
        LOG.info("Subject = {}, Principals = {}, Login UGI = {}", subject,
                subject == null ? "null" : subject.getPrincipals(), loginUgi);
        if (hdfsKerberos) {
            logMessage.append("Using Kerberos");
            if (loginUgi.getAuthenticationMethod() != UserGroupInformation.AuthenticationMethod.KERBEROS) {
                issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsKerberos",
                        Errors.HADOOPFS_00, loginUgi.getAuthenticationMethod(),
                        UserGroupInformation.AuthenticationMethod.KERBEROS));
            }
        } else {
            logMessage.append("Using Simple");
            hdfsConfiguration.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
                    UserGroupInformation.AuthenticationMethod.SIMPLE.name());
        }
        if (validHapoopFsUri) {
            getUGI().doAs(new PrivilegedExceptionAction<Void>() {
                @Override
                public Void run() throws Exception {
                    try (FileSystem fs = getFileSystemForInitDestroy()) { //to trigger the close
                    }
                    return null;
                }
            });
        }
    } catch (Exception ex) {
        LOG.info("Validation Error: " + Errors.HADOOPFS_01.getMessage(), hdfsUri, ex.toString(), ex);
        issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), null, Errors.HADOOPFS_01, hdfsUri,
                String.valueOf(ex), ex));
    }
    LOG.info("Authentication Config: " + logMessage);
    return validHapoopFsUri;
}

From source file:com.streamsets.pipeline.stage.destination.hive.HiveTarget.java

License:Apache License

@Override
protected List<ConfigIssue> init() {
    List<ConfigIssue> issues = super.init();

    partitionsToFields = new HashMap<>();
    columnsToFields = new HashMap<>();

    hiveConf = new HiveConf();
    if (null != hiveConfDir && !hiveConfDir.isEmpty()) {
        File hiveConfDir = new File(this.hiveConfDir);

        if (!hiveConfDir.isAbsolute()) {
            hiveConfDir = new File(getContext().getResourcesDirectory(), this.hiveConfDir).getAbsoluteFile();
        }/* w  ww  .ja  va  2  s .  c o m*/

        if (hiveConfDir.exists()) {
            File coreSite = new File(hiveConfDir.getAbsolutePath(), "core-site.xml");
            File hiveSite = new File(hiveConfDir.getAbsolutePath(), "hive-site.xml");
            File hdfsSite = new File(hiveConfDir.getAbsolutePath(), "hdfs-site.xml");

            if (!coreSite.exists()) {
                issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "hiveConfDir", Errors.HIVE_06,
                        coreSite.getName(), this.hiveConfDir));
            } else {
                hiveConf.addResource(new Path(coreSite.getAbsolutePath()));
            }

            if (!hdfsSite.exists()) {
                issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "hiveConfDir", Errors.HIVE_06,
                        hdfsSite.getName(), this.hiveConfDir));
            } else {
                hiveConf.addResource(new Path(hdfsSite.getAbsolutePath()));
            }

            if (!hiveSite.exists()) {
                issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "hiveConfDir", Errors.HIVE_06,
                        hiveSite.getName(), this.hiveConfDir));
            } else {
                hiveConf.addResource(new Path(hiveSite.getAbsolutePath()));
            }
        } else {
            issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "hiveConfDir", Errors.HIVE_07,
                    this.hiveConfDir));
        }
    } else if (hiveThriftUrl == null || hiveThriftUrl.isEmpty()) {
        issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "hiveThriftUrl", Errors.HIVE_13));
    }

    // Specified URL overrides what's in the Hive Conf
    hiveConf.set(HIVE_METASTORE_URI, hiveThriftUrl);
    // Add any additional hive conf overrides
    for (Map.Entry<String, String> entry : additionalHiveProperties.entrySet()) {
        hiveConf.set(entry.getKey(), entry.getValue());
    }

    try {
        // forcing UGI to initialize with the security settings from the stage
        UserGroupInformation.setConfiguration(hiveConf);
        Subject subject = Subject.getSubject(AccessController.getContext());
        if (UserGroupInformation.isSecurityEnabled()) {
            loginUgi = UserGroupInformation.getUGIFromSubject(subject);
        } else {
            UserGroupInformation.loginUserFromSubject(subject);
            loginUgi = UserGroupInformation.getLoginUser();
        }
        LOG.info("Subject = {}, Principals = {}, Login UGI = {}", subject,
                subject == null ? "null" : subject.getPrincipals(), loginUgi);
        // Proxy users are not currently supported due to: https://issues.apache.org/jira/browse/HIVE-11089
    } catch (IOException e) {
        issues.add(getContext().createConfigIssue(Groups.HIVE.name(), null, Errors.HIVE_11, e.getMessage()));
    }

    try {
        issues.addAll(loginUgi.doAs(new PrivilegedExceptionAction<List<ConfigIssue>>() {
            @Override
            public List<ConfigIssue> run() {
                List<ConfigIssue> issues = new ArrayList<>();
                HiveMetaStoreClient client = null;
                try {
                    client = new HiveMetaStoreClient(hiveConf);

                    List<FieldSchema> columnNames = client.getFields(schema, tableName);
                    for (FieldSchema field : columnNames) {
                        columnsToFields.put(field.getName(), SDC_FIELD_SEP + field.getName());
                    }

                    Table table = client.getTable(schema, tableName);
                    List<FieldSchema> partitionKeys = table.getPartitionKeys();
                    for (FieldSchema field : partitionKeys) {
                        partitionsToFields.put(field.getName(), SDC_FIELD_SEP + field.getName());
                    }
                } catch (UnknownDBException e) {
                    issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "schema", Errors.HIVE_02,
                            schema));
                } catch (UnknownTableException e) {
                    issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "table", Errors.HIVE_03,
                            schema, tableName));
                } catch (MetaException e) {
                    issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "hiveUrl", Errors.HIVE_05,
                            e.getMessage()));
                } catch (TException e) {
                    issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "hiveUrl", Errors.HIVE_04,
                            e.getMessage()));
                } finally {
                    if (null != client) {
                        client.close();
                    }
                }
                return issues;
            }
        }));
    } catch (Error | IOException | InterruptedException e) {
        LOG.error("Received unknown error in validation: {}", e.toString(), e);
        issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "", Errors.HIVE_01, e.toString()));
    } catch (UndeclaredThrowableException e) {
        LOG.error("Received unknown error in validation: {}", e.toString(), e);
        issues.add(getContext().createConfigIssue(Groups.HIVE.name(), "", Errors.HIVE_01,
                e.getUndeclaredThrowable().toString()));
    }

    // Now apply any custom mappings
    if (validColumnMappings(issues)) {
        for (FieldMappingConfig mapping : columnMappings) {
            LOG.debug("Custom mapping field {} to column {}", mapping.field, mapping.columnName);
            if (columnsToFields.containsKey(mapping.columnName)) {
                LOG.debug("Mapping field {} to column {}", mapping.field, mapping.columnName);
                columnsToFields.put(mapping.columnName, mapping.field);
            } else if (partitionsToFields.containsKey(mapping.columnName)) {
                LOG.debug("Mapping field {} to partition {}", mapping.field, mapping.columnName);
                partitionsToFields.put(mapping.columnName, mapping.field);
            }
        }
    }

    dataGeneratorFactory = createDataGeneratorFactory();

    // Note that cleanup is done synchronously by default while servicing .get
    hiveConnectionPool = CacheBuilder.newBuilder().maximumSize(10).expireAfterAccess(10, TimeUnit.MINUTES)
            .removalListener(new HiveConnectionRemovalListener()).build(new HiveConnectionLoader());

    recordWriterPool = CacheBuilder.newBuilder().maximumSize(10).expireAfterAccess(10, TimeUnit.MINUTES)
            .build(new HiveRecordWriterLoader());

    LOG.debug("Total issues: {}", issues.size());
    return issues;
}

From source file:com.streamsets.pipeline.stage.origin.hdfs.cluster.ClusterHdfsSource.java

License:Apache License

private void validateHadoopFS(List<ConfigIssue> issues) {
    boolean validHapoopFsUri = true;
    hadoopConf = getHadoopConfiguration(issues);
    String hdfsUriInConf;//w w  w .  ja v  a 2  s  .  c  o  m
    if (hdfsUri != null && !hdfsUri.isEmpty()) {
        hadoopConf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, hdfsUri);
    } else {
        hdfsUriInConf = hadoopConf.get(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY);
        if (hdfsUriInConf == null) {
            issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsUri", Errors.HADOOPFS_19));
            return;
        } else {
            hdfsUri = hdfsUriInConf;
        }
    }
    if (hdfsUri.contains("://")) {
        try {
            URI uri = new URI(hdfsUri);
            if (!"hdfs".equals(uri.getScheme())) {
                issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsUri",
                        Errors.HADOOPFS_12, hdfsUri, uri.getScheme()));
                validHapoopFsUri = false;
            } else if (uri.getAuthority() == null) {
                issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsUri",
                        Errors.HADOOPFS_13, hdfsUri));
                validHapoopFsUri = false;
            }
        } catch (Exception ex) {
            issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsUri", Errors.HADOOPFS_22,
                    hdfsUri, ex.getMessage(), ex));
            validHapoopFsUri = false;
        }
    } else {
        issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsUri", Errors.HADOOPFS_02,
                hdfsUri));
        validHapoopFsUri = false;
    }

    StringBuilder logMessage = new StringBuilder();
    try {
        // forcing UGI to initialize with the security settings from the stage
        UserGroupInformation.setConfiguration(hadoopConf);
        Subject subject = Subject.getSubject(AccessController.getContext());
        if (UserGroupInformation.isSecurityEnabled()) {
            loginUgi = UserGroupInformation.getUGIFromSubject(subject);
        } else {
            UserGroupInformation.loginUserFromSubject(subject);
            loginUgi = UserGroupInformation.getLoginUser();
        }
        LOG.info("Subject = {}, Principals = {}, Login UGI = {}", subject,
                subject == null ? "null" : subject.getPrincipals(), loginUgi);
        if (hdfsKerberos) {
            logMessage.append("Using Kerberos");
            if (loginUgi.getAuthenticationMethod() != UserGroupInformation.AuthenticationMethod.KERBEROS) {
                issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), "hdfsKerberos",
                        Errors.HADOOPFS_00, loginUgi.getAuthenticationMethod(),
                        UserGroupInformation.AuthenticationMethod.KERBEROS));
            }
        } else {
            logMessage.append("Using Simple");
            hadoopConf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
                    UserGroupInformation.AuthenticationMethod.SIMPLE.name());
        }
        if (validHapoopFsUri) {
            getUGI().doAs(new PrivilegedExceptionAction<Void>() {
                @Override
                public Void run() throws Exception {
                    try (FileSystem fs = getFileSystemForInitDestroy()) { //to trigger the close
                    }
                    return null;
                }
            });
        }
    } catch (Exception ex) {
        LOG.info("Error connecting to FileSystem: " + ex, ex);
        issues.add(getContext().createConfigIssue(Groups.HADOOP_FS.name(), null, Errors.HADOOPFS_11, hdfsUri,
                String.valueOf(ex), ex));
    }
    LOG.info("Authentication Config: " + logMessage);
}

From source file:com.sxp.hbase.security.HBaseSecurityUtil.java

License:Apache License

public static UserProvider login(Map conf, Configuration hbaseConfig) throws IOException {
    UserProvider provider = UserProvider.instantiate(hbaseConfig);
    if (UserGroupInformation.isSecurityEnabled()) {
        String keytab = (String) conf.get(STORM_KEYTAB_FILE_KEY);
        if (keytab != null) {
            hbaseConfig.set(STORM_KEYTAB_FILE_KEY, keytab);
        }/*from w  w  w  . j  a  v a 2  s  .c  o  m*/
        String userName = (String) conf.get(STORM_USER_NAME_KEY);
        if (userName != null) {
            hbaseConfig.set(STORM_USER_NAME_KEY, userName);
        }
        provider.login(STORM_KEYTAB_FILE_KEY, STORM_USER_NAME_KEY,
                InetAddress.getLocalHost().getCanonicalHostName());
    }
    return provider;
}

From source file:com.toy.Client.java

License:Apache License

/**
 * Start a new Application Master and deploy the web application on 2 Tomcat containers
 *
 * @throws Exception//from w w w. java2s  .  c  om
 */
void start() throws Exception {

    //Check tomcat dir
    final File tomcatHomeDir = new File(toyConfig.tomcat);
    final File tomcatLibraries = new File(tomcatHomeDir, "lib");
    final File tomcatBinaries = new File(tomcatHomeDir, "bin");
    Preconditions.checkState(tomcatLibraries.isDirectory(),
            tomcatLibraries.getAbsolutePath() + " does not exist");

    //Check war file
    final File warFile = new File(toyConfig.war);
    Preconditions.checkState(warFile.isFile(), warFile.getAbsolutePath() + " does not exist");

    yarn = YarnClient.createYarnClient();
    yarn.init(configuration);
    yarn.start();

    YarnClientApplication yarnApplication = yarn.createApplication();
    GetNewApplicationResponse newApplication = yarnApplication.getNewApplicationResponse();
    appId = newApplication.getApplicationId();
    ApplicationSubmissionContext appContext = yarnApplication.getApplicationSubmissionContext();
    appContext.setApplicationName("Tomcat : " + tomcatHomeDir.getName() + "\n War : " + warFile.getName());
    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    // Register required libraries
    Map<String, LocalResource> localResources = new HashMap<>();
    FileSystem fs = FileSystem.get(configuration);
    uploadDepAndRegister(localResources, appId, fs, "lib-ext/curator-client-2.3.0.jar");
    uploadDepAndRegister(localResources, appId, fs, "lib-ext/curator-framework-2.3.0.jar");
    uploadDepAndRegister(localResources, appId, fs, "lib-ext/curator-recipes-2.3.0.jar");

    // Register application master jar
    registerLocalResource(localResources, appId, fs, new Path(appMasterJar));

    // Register the WAR that will be deployed on Tomcat
    registerLocalResource(localResources, appId, fs, new Path(warFile.getAbsolutePath()));

    // Register Tomcat libraries
    for (File lib : tomcatLibraries.listFiles()) {
        registerLocalResource(localResources, appId, fs, new Path(lib.getAbsolutePath()));
    }

    File juli = new File(tomcatBinaries, "tomcat-juli.jar");
    if (juli.exists()) {
        registerLocalResource(localResources, appId, fs, new Path(juli.getAbsolutePath()));
    }

    amContainer.setLocalResources(localResources);

    // Setup master environment
    Map<String, String> env = new HashMap<>();
    final String TOMCAT_LIBS = fs.getHomeDirectory() + "/" + Constants.TOY_PREFIX + appId.toString();
    env.put(Constants.TOMCAT_LIBS, TOMCAT_LIBS);

    if (toyConfig.zookeeper != null) {
        env.put(Constants.ZOOKEEPER_QUORUM, toyConfig.zookeeper);
    } else {
        env.put(Constants.ZOOKEEPER_QUORUM, NetUtils.getHostname());
    }

    // 1. Compute classpath
    StringBuilder classPathEnv = new StringBuilder(ApplicationConstants.Environment.CLASSPATH.$())
            .append(File.pathSeparatorChar).append("./*");
    for (String c : configuration.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
        classPathEnv.append(File.pathSeparatorChar);
        classPathEnv.append(c.trim());
    }
    classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties");

    // add the runtime classpath needed for tests to work
    if (configuration.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        classPathEnv.append(':');
        classPathEnv.append(System.getProperty("java.class.path"));
    }
    env.put("CLASSPATH", classPathEnv.toString());
    env.put(Constants.WAR, warFile.getName());
    // For unit test with YarnMiniCluster
    env.put(YarnConfiguration.RM_SCHEDULER_ADDRESS, configuration.get(YarnConfiguration.RM_SCHEDULER_ADDRESS));
    amContainer.setEnvironment(env);

    // 1.2 Set constraint for the app master
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(32);
    appContext.setResource(capability);

    // 2. Compute app master cmd line
    Vector<CharSequence> vargs = new Vector<>(10);
    // Set java executable command
    vargs.add(ApplicationConstants.Environment.JAVA_HOME.$() + "/bin/java");
    // Set Xmx based on am memory size
    vargs.add("-Xmx32m");
    // Set class name
    vargs.add(TOYMaster.class.getCanonicalName());
    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr");

    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }

    LOG.info("Completed setting up app master command " + command.toString());
    List<String> commands = new ArrayList<>();
    commands.add(command.toString());
    amContainer.setCommands(commands);
    appContext.setAMContainerSpec(amContainer);

    // 3. Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials credentials = new Credentials();
        String tokenRenewer = configuration.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new Exception("Can't get Master Kerberos principal for the RM to use as renewer");
        }

        // For now, only getting tokens for the default file-system.
        final org.apache.hadoop.security.token.Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer,
                credentials);
        if (tokens != null) {
            for (org.apache.hadoop.security.token.Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(fsTokens);
    }

    appContext.setQueue("default");
    LOG.info("Submitting TOY application {} to ASM", appId.toString());
    yarn.submitApplication(appContext);

    // Monitor the application and exit if it is RUNNING
    monitorApplication(appId);
}

From source file:com.trendmicro.hdfs.webdav.Main.java

License:Apache License

public static void main(String[] args) {

    HDFSWebDAVServlet servlet = HDFSWebDAVServlet.getServlet();
    Configuration conf = servlet.getConfiguration();

    // Process command line 

    Options options = new Options();
    options.addOption("d", "debug", false, "Enable debug logging");
    options.addOption("p", "port", true, "Port to bind to [default: 8080]");
    options.addOption("b", "bind-address", true, "Address or hostname to bind to [default: 0.0.0.0]");
    options.addOption("g", "ganglia", true, "Send Ganglia metrics to host:port [default: none]");

    CommandLine cmd = null;// ww w .  j ava  2  s.co  m
    try {
        cmd = new PosixParser().parse(options, args);
    } catch (ParseException e) {
        printUsageAndExit(options, -1);
    }

    if (cmd.hasOption('d')) {
        Logger rootLogger = Logger.getLogger("com.trendmicro");
        rootLogger.setLevel(Level.DEBUG);
    }

    if (cmd.hasOption('b')) {
        conf.set("hadoop.webdav.bind.address", cmd.getOptionValue('b'));
    }

    if (cmd.hasOption('p')) {
        conf.setInt("hadoop.webdav.port", Integer.valueOf(cmd.getOptionValue('p')));
    }

    String gangliaHost = null;
    int gangliaPort = 8649;
    if (cmd.hasOption('g')) {
        String val = cmd.getOptionValue('g');
        if (val.indexOf(':') != -1) {
            String[] split = val.split(":");
            gangliaHost = split[0];
            gangliaPort = Integer.valueOf(split[1]);
        } else {
            gangliaHost = val;
        }
    }

    InetSocketAddress addr = getAddress(conf);

    // Log in the server principal from keytab

    UserGroupInformation.setConfiguration(conf);
    if (UserGroupInformation.isSecurityEnabled())
        try {
            SecurityUtil.login(conf, "hadoop.webdav.server.kerberos.keytab",
                    "hadoop.webdav.server.kerberos.principal", addr.getHostName());
        } catch (IOException e) {
            LOG.fatal("Could not log in", e);
            System.err.println("Could not log in");
            System.exit(-1);
        }

    // Set up embedded Jetty

    Server server = new Server();

    server.setSendServerVersion(false);
    server.setSendDateHeader(false);
    server.setStopAtShutdown(true);

    // Set up connector
    Connector connector = new SelectChannelConnector();
    connector.setPort(addr.getPort());
    connector.setHost(addr.getHostName());
    server.addConnector(connector);
    LOG.info("Listening on " + addr);

    // Set up context
    Context context = new Context(server, "/", Context.SESSIONS);
    // WebDAV servlet
    ServletHolder servletHolder = new ServletHolder(servlet);
    servletHolder.setInitParameter("authenticate-header", "Basic realm=\"Hadoop WebDAV Server\"");
    context.addServlet(servletHolder, "/*");
    // metrics instrumentation filter
    context.addFilter(new FilterHolder(new DefaultWebappMetricsFilter()), "/*", 0);
    // auth filter
    context.addFilter(new FilterHolder(new AuthFilter(conf)), "/*", 0);
    server.setHandler(context);

    // Set up Ganglia metrics reporting
    if (gangliaHost != null) {
        GangliaReporter.enable(1, TimeUnit.MINUTES, gangliaHost, gangliaPort);
    }

    // Start and join the server thread    
    try {
        server.start();
        server.join();
    } catch (Exception e) {
        LOG.fatal("Failed to start Jetty", e);
        System.err.println("Failed to start Jetty");
        System.exit(-1);
    }
}

From source file:com.yahoo.omid.committable.hbase.HBaseLogin.java

License:Apache License

public static UserGroupInformation loginIfNeeded(Config config) throws IOException {
    if (UserGroupInformation.isSecurityEnabled()) {
        LOG.info("Security is enabled, logging in with principal={}, keytab={}", config.getPrincipal(),
                config.getKeytab());//from w  w  w .  j  a  v a  2  s .co m
        UserGroupInformation.loginUserFromKeytab(config.getPrincipal(), config.getKeytab());
    }
    return UserGroupInformation.getCurrentUser();
}

From source file:common.NameNode.java

License:Apache License

/**
 * Activate name-node servers and threads.
 *//*from   www  .ja v  a2  s. c  o m*/
void activate(Configuration conf) throws IOException {
    if ((isRole(NamenodeRole.ACTIVE)) && (UserGroupInformation.isSecurityEnabled())) {
        namesystem.activateSecretManager();
    }
    namesystem.activate(conf);
    startHttpServer(conf);
    server.start(); //start RPC server
    startTrashEmptier(conf);

    plugins = conf.getInstances("dfs.namenode.plugins", ServicePlugin.class);
    for (ServicePlugin p : plugins) {
        try {
            p.start(this);
        } catch (Throwable t) {
            LOG.warn("ServicePlugin " + p + " could not be started", t);
        }
    }
}