Example usage for org.apache.hadoop.security UserGroupInformation setConfiguration

List of usage examples for org.apache.hadoop.security UserGroupInformation setConfiguration

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation setConfiguration.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static void setConfiguration(Configuration conf) 

Source Link

Document

Set the static configuration for UGI.

Usage

From source file:uk.ac.gla.terrier.probos.controller.ControllerServer.java

License:Open Source License

public ControllerServer(Configuration _hconf) throws IOException {
    this.yConf = new YarnConfiguration(_hconf);
    yConf.addResource("yarn-site.xml");
    UserGroupInformation.setConfiguration(yConf);

    this.pConf = new PConfiguration(_hconf);

    //do the Kerberos authentication
    if (UserGroupInformation.isSecurityEnabled()) {
        final String principal = pConf.get(PConfiguration.KEY_CONTROLLER_PRINCIPAL);
        String keytab = pConf.get(PConfiguration.KEY_CONTROLLER_KEYTAB);
        File fKeytab = new File(keytab);
        if (!fKeytab.exists()) {
            if (!fKeytab.isAbsolute()) {
                keytab = System.getProperty("probos.conf") + '/' + keytab;
                fKeytab = new File(keytab);
                pConf.set(PConfiguration.KEY_CONTROLLER_KEYTAB, keytab);
            }/* w  ww.j  ava  2  s  . c o m*/
            if (!fKeytab.exists())
                throw new FileNotFoundException("Could not find keytab file " + keytab);
        }

        LOG.debug("Starting login for " + principal + " using keytab " + keytab);
        SecurityUtil.login(pConf, PConfiguration.KEY_CONTROLLER_KEYTAB, PConfiguration.KEY_CONTROLLER_PRINCIPAL,
                Utils.getHostname());
        LOG.info("Switched principal to " + UserGroupInformation.getCurrentUser().getUserName());
    }

    this.mClient = MailClient.getMailClient(this.pConf);
    final String bindAddress = pConf.get(PConfiguration.KEY_CONTROLLER_BIND_ADDRESS);
    if (bindAddress == null)
        throw new IllegalArgumentException(PConfiguration.KEY_CONTROLLER_BIND_ADDRESS + " cannot be null");

    secretManager = new ControllerAPISecretManager(
            //delegationKeyUpdateInterval
            //renewal interval for delegation token
            7 * 24 * 3600 * 1000, //Yarn default is 7 day

            //delegationTokenMaxLifetime -- maximum lifetime for which a delegation token is valid
            //i.e. how long can we keep renewing the token for?
            14 * 24 * 3600 * 1000, //Yarn default is 14 days

            //delegationTokenRenewInterval -- how long should a token last?
            7 * 24 * 3600 * 1000, //Yarn default is 7 day

            //delegationTokenRemoverScanInterval -- how often are expired keys removed?
            3600 * 1000); //Yarn default is 1 hour

    //build the client rpc server: 8027
    int port = pConf.getInt(PConfiguration.KEY_CONTROLLER_PORT, 8027);
    LOG.info("Starting RPC server for " + PBSClient.class.getSimpleName() + " on port " + port);
    clientRpcserver = new RPC.Builder(yConf).setInstance(this).setBindAddress(bindAddress)
            .setProtocol(PBSClient.class).setPort(port).setSecretManager(secretManager).
            //setVerbose(true).
            build();
    System.setProperty("hadoop.policy.file", Constants.PRODUCT_NAME + "-policy.xml");
    clientRpcserver.refreshServiceAclWithLoadedConfiguration(yConf, new ControllerPolicyProvider());

    //build the master rpc server: 8028
    port = Constants.CONTROLLER_MASTER_PORT_OFFSET + pConf.getInt(PConfiguration.KEY_CONTROLLER_PORT, 8027);
    LOG.info("Starting RPC server for " + PBSMasterClient.class.getSimpleName() + " on port " + port);
    masterRpcserver = new RPC.Builder(yConf).setInstance(new ApplicationMasterAPI()).setBindAddress(bindAddress)
            .setProtocol(PBSMasterClient.class).setPort(port).setSecretManager(secretManager).
            //setVerbose(true).
            build();
    masterRpcserver.refreshServiceAclWithLoadedConfiguration(yConf, new ControllerPolicyProvider());

    port = Constants.CONTROLLER_INTERACTIVE_PORT_OFFSET
            + pConf.getInt(PConfiguration.KEY_CONTROLLER_PORT, 8027);
    LOG.info("Starting RPC server for " + PBSInteractiveClient.class.getSimpleName() + " on port " + port);
    //build the interactive rpc server: 8026
    interactiveRpcserver = new RPC.Builder(yConf).setInstance(new InteractiveTaskAPI())
            .setBindAddress(bindAddress).setProtocol(PBSInteractiveClient.class).setPort(port)
            .setSecretManager(secretManager).
            //setVerbose(true).
            build();
    interactiveRpcserver.refreshServiceAclWithLoadedConfiguration(yConf, new ControllerPolicyProvider());

    //build the webapp UI server
    final List<Entry<String, HttpServlet>> controllerServlets = new ArrayList<>();
    controllerServlets
            .add(new MapEntry<String, HttpServlet>("/", new QstatServlet("/", controllerServlets, this)));
    controllerServlets.add(
            new MapEntry<String, HttpServlet>("/pbsnodes", new PbsnodesServlet("/", controllerServlets, this)));
    //metrics is the Servlet from metrics.dropwizard for accessing metrics
    controllerServlets.add(new MapEntry<String, HttpServlet>("/metrics", new MetricsServlet(metrics)));
    //this is the hadoop servlet for accessing anything defined in JMX
    controllerServlets.add(new MapEntry<String, HttpServlet>("/jmx", new JMXJsonServlet()));
    final int httpport = pConf.getInt(PConfiguration.KEY_CONTROLLER_HTTP_PORT,
            Constants.DEFAULT_CONTROLLER_PORT + Constants.CONTROLLER_HTTP_PORT_OFFSET);
    LOG.info("Starting Jetty ProbosControllerHttp on port " + httpport);
    webServer = new WebServer("ProbosControllerHttp", controllerServlets, httpport);
    webServer.init(pConf);

    //this thread detects yarn jobs that have ended
    watcherThread = new Thread(new ControllerWatcher());
    watcherThread.setName(ControllerWatcher.class.getSimpleName());

    //ensure we have the directory
    Path _probosFolder = new Path(pConf.get(PConfiguration.KEY_CONTROLLER_JOBDIR));
    FileSystem controllerFS = FileSystem.get(yConf);
    if (!_probosFolder.isUriPathAbsolute()) {
        _probosFolder = _probosFolder.makeQualified(controllerFS.getUri(), controllerFS.getWorkingDirectory());
        assert _probosFolder.isUriPathAbsolute();
    }
    probosFolder = _probosFolder;
    if (!controllerFS.exists(probosFolder)) {
        throw new IllegalArgumentException(probosFolder.toString() + " does not exist");
    }

    //now initialise the metrics

    //jobs.queued.size
    metrics.register(MetricRegistry.name(ControllerServer.class, "jobs", "queued.size"), new Gauge<Integer>() {
        @Override
        public Integer getValue() {
            int sum = 0;
            for (int i : user2QueuedCount.values())
                sum += i;
            return sum;
        }
    });
    //jobs.size
    metrics.register(MetricRegistry.name(ControllerServer.class, "jobs", "size"), new Gauge<Integer>() {
        @Override
        public Integer getValue() {
            return jobArray.size();
        }
    });
    //jobs.held.size
    metrics.register(MetricRegistry.name(ControllerServer.class, "jobs", "held.size"), new Gauge<Integer>() {
        @Override
        public Integer getValue() {
            return jobHolds.size();
        }
    });

    //nodes.size
    metrics.register(MetricRegistry.name(ControllerServer.class, "nodes", "size"), new Gauge<Integer>() {
        @Override
        public Integer getValue() {
            try {
                return getNodesStatus().length;
            } catch (Exception e) {
                return 0;
            }
        }
    });

    //nodes.free.size
    metrics.register(MetricRegistry.name(ControllerServer.class, "nodes", "free.size"), new Gauge<Integer>() {
        @Override
        public Integer getValue() {
            try {
                PBSNodeStatus[] nodes = getNodesStatus();
                int count = 0;
                for (PBSNodeStatus n : nodes)
                    if ("free".equals(n.getState()))
                        count++;
                return count;
            } catch (Exception e) {
                return 0;
            }
        }
    });

    runningJobs = metrics.counter(MetricRegistry.name(ControllerServer.class, "jobs", "running.counter"));
    rejectedJobs = metrics.counter(MetricRegistry.name(ControllerServer.class, "jobs", "rejected.counter"));
    killedJobs = metrics.counter(MetricRegistry.name(ControllerServer.class, "jobs", "killed.counter"));
    mailEvents = metrics.counter(MetricRegistry.name(ControllerServer.class, "mails", "counter"));
    mailFailures = metrics.counter(MetricRegistry.name(ControllerServer.class, "mails", "failure.counter"));

}

From source file:voldemort.store.readonly.fetcher.HdfsFetcher.java

License:Apache License

public File fetch(String sourceFileUrl, String destinationFile, String hadoopConfigPath) throws IOException {
    if (this.globalThrottleLimit != null) {
        if (this.globalThrottleLimit.getSpeculativeRate() < this.minBytesPerSecond)
            throw new VoldemortException("Too many push jobs.");
        this.globalThrottleLimit.incrementNumJobs();
    }/*from   w ww . j  a  va2  s. c o  m*/

    ObjectName jmxName = null;
    try {

        final Configuration config = new Configuration();
        FileSystem fs = null;
        config.setInt("io.socket.receive.buffer", bufferSize);
        config.set("hadoop.rpc.socket.factory.class.ClientProtocol", ConfigurableSocketFactory.class.getName());
        config.set("hadoop.security.group.mapping", "org.apache.hadoop.security.ShellBasedUnixGroupsMapping");

        final Path path = new Path(sourceFileUrl);

        boolean isHftpBasedFetch = sourceFileUrl.length() > 4 && sourceFileUrl.substring(0, 4).equals("hftp");
        logger.info("URL : " + sourceFileUrl + " and hftp protocol enabled = " + isHftpBasedFetch);
        logger.info("Hadoop path = " + hadoopConfigPath + " , keytab path = " + HdfsFetcher.keytabPath
                + " , kerberos principal = " + HdfsFetcher.kerberosPrincipal);

        if (hadoopConfigPath.length() > 0 && !isHftpBasedFetch) {

            config.addResource(new Path(hadoopConfigPath + "/core-site.xml"));
            config.addResource(new Path(hadoopConfigPath + "/hdfs-site.xml"));

            String security = config.get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION);

            if (security == null || !security.equals("kerberos")) {
                logger.error("Security isn't turned on in the conf: "
                        + CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION + " = "
                        + config.get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION));
                logger.error("Please make sure that the Hadoop config directory path is valid.");
                return null;
            } else {
                logger.info("Security is turned on in the conf. Trying to authenticate ...");

            }
        }

        try {

            if (HdfsFetcher.keytabPath.length() > 0 && !isHftpBasedFetch) {

                if (!new File(HdfsFetcher.keytabPath).exists()) {
                    logger.error("Invalid keytab file path. Please provide a valid keytab path");
                    return null;
                }

                // First login using the specified principal and keytab file
                UserGroupInformation.setConfiguration(config);
                UserGroupInformation.loginUserFromKeytab(HdfsFetcher.kerberosPrincipal, HdfsFetcher.keytabPath);

                /*
                 * If login is successful, get the filesystem object. NOTE:
                 * Ideally we do not need a doAs block for this. Consider
                 * removing it in the future once the Hadoop jars have the
                 * corresponding patch (tracked in the Hadoop Apache
                 * project: HDFS-3367)
                 */
                try {
                    logger.info("I've logged in and am now Doasing as "
                            + UserGroupInformation.getCurrentUser().getUserName());
                    fs = UserGroupInformation.getCurrentUser()
                            .doAs(new PrivilegedExceptionAction<FileSystem>() {

                                public FileSystem run() throws Exception {
                                    FileSystem fs = path.getFileSystem(config);
                                    return fs;
                                }
                            });
                } catch (InterruptedException e) {
                    logger.error(e.getMessage());
                } catch (Exception e) {
                    logger.error("Got an exception while getting the filesystem object: ");
                    logger.error("Exception class : " + e.getClass());
                    e.printStackTrace();
                    for (StackTraceElement et : e.getStackTrace()) {
                        logger.error(et.toString());
                    }
                }
            } else {
                fs = path.getFileSystem(config);
            }

        } catch (IOException e) {
            e.printStackTrace();
            logger.error("Error in authenticating or getting the Filesystem object !!!");
            return null;
        }

        CopyStats stats = new CopyStats(sourceFileUrl, sizeOfPath(fs, path));
        jmxName = JmxUtils.registerMbean("hdfs-copy-" + copyCount.getAndIncrement(), stats);
        File destination = new File(destinationFile);

        if (destination.exists()) {
            throw new VoldemortException(
                    "Version directory " + destination.getAbsolutePath() + " already exists");
        }

        logger.info("Starting fetch for : " + sourceFileUrl);
        boolean result = fetch(fs, path, destination, stats);
        logger.info("Completed fetch : " + sourceFileUrl);

        // Close the filesystem
        fs.close();

        if (result) {
            return destination;
        } else {
            return null;
        }
    } catch (IOException e) {
        logger.error("Error while getting Hadoop filesystem : " + e);
        return null;
    } finally {
        if (this.globalThrottleLimit != null) {
            this.globalThrottleLimit.decrementNumJobs();
        }
        if (jmxName != null)
            JmxUtils.unregisterMbean(jmxName);
    }
}

From source file:voldemort.store.readonly.fetcher.HdfsFetcher.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length < 1)
        Utils.croak("USAGE: java " + HdfsFetcher.class.getName()
                + " url [keytab location] [kerberos username] [hadoop-config-path]");
    String url = args[0];//  ww w. ja  v  a  2  s  . c  o  m

    String keytabLocation = "";
    String kerberosUser = "";
    String hadoopPath = "";
    if (args.length == 4) {
        keytabLocation = args[1];
        kerberosUser = args[2];
        hadoopPath = args[3];
    }

    long maxBytesPerSec = 1024 * 1024 * 1024;
    Path p = new Path(url);

    final Configuration config = new Configuration();
    final URI uri = new URI(url);
    config.setInt("io.file.buffer.size", VoldemortConfig.DEFAULT_BUFFER_SIZE);
    config.set("hadoop.rpc.socket.factory.class.ClientProtocol", ConfigurableSocketFactory.class.getName());
    config.setInt("io.socket.receive.buffer", 1 * 1024 * 1024 - 10000);

    FileSystem fs = null;
    p = new Path(url);
    HdfsFetcher.keytabPath = keytabLocation;
    HdfsFetcher.kerberosPrincipal = kerberosUser;

    boolean isHftpBasedFetch = url.length() > 4 && url.substring(0, 4).equals("hftp");
    logger.info("URL : " + url + " and hftp protocol enabled = " + isHftpBasedFetch);

    if (hadoopPath.length() > 0 && !isHftpBasedFetch) {
        config.set("hadoop.security.group.mapping", "org.apache.hadoop.security.ShellBasedUnixGroupsMapping");

        config.addResource(new Path(hadoopPath + "/core-site.xml"));
        config.addResource(new Path(hadoopPath + "/hdfs-site.xml"));

        String security = config.get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION);

        if (security == null || !security.equals("kerberos")) {
            logger.info("Security isn't turned on in the conf: "
                    + CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION + " = "
                    + config.get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION));
            logger.info("Fix that.  Exiting.");
            return;
        } else {
            logger.info("Security is turned on in the conf. Trying to authenticate ...");
        }
    }

    try {

        // Get the filesystem object
        if (keytabLocation.length() > 0 && !isHftpBasedFetch) {
            UserGroupInformation.setConfiguration(config);
            UserGroupInformation.loginUserFromKeytab(kerberosUser, keytabLocation);

            final Path path = p;
            try {
                logger.debug("I've logged in and am now Doasing as "
                        + UserGroupInformation.getCurrentUser().getUserName());
                fs = UserGroupInformation.getCurrentUser().doAs(new PrivilegedExceptionAction<FileSystem>() {

                    public FileSystem run() throws Exception {
                        FileSystem fs = path.getFileSystem(config);
                        return fs;
                    }
                });
            } catch (InterruptedException e) {
                logger.error(e.getMessage());
            } catch (Exception e) {
                logger.error("Got an exception while getting the filesystem object: ");
                logger.error("Exception class : " + e.getClass());
                e.printStackTrace();
                for (StackTraceElement et : e.getStackTrace()) {
                    logger.error(et.toString());
                }
            }
        } else {
            fs = p.getFileSystem(config);
        }

    } catch (IOException e) {
        e.printStackTrace();
        System.err.println("Error in getting Hadoop filesystem object !!! Exiting !!!");
        System.exit(-1);
    }

    FileStatus status = fs.listStatus(p)[0];
    long size = status.getLen();
    HdfsFetcher fetcher = new HdfsFetcher(null, maxBytesPerSec, VoldemortConfig.REPORTING_INTERVAL_BYTES,
            VoldemortConfig.DEFAULT_BUFFER_SIZE, 0, keytabLocation, kerberosUser);
    long start = System.currentTimeMillis();

    File location = fetcher.fetch(url, System.getProperty("java.io.tmpdir") + File.separator + start,
            hadoopPath);

    double rate = size * Time.MS_PER_SECOND / (double) (System.currentTimeMillis() - start);
    NumberFormat nf = NumberFormat.getInstance();
    nf.setMaximumFractionDigits(2);
    System.out.println(
            "Fetch to " + location + " completed: " + nf.format(rate / (1024.0 * 1024.0)) + " MB/sec.");
    fs.close();
}

From source file:wherehows.SchemaFetch.java

License:Open Source License

public SchemaFetch(Configuration conf) throws IOException, InterruptedException {
    logger = LoggerFactory.getLogger(getClass());
    this.conf = conf;

    schemaFileWriter = new FileWriter(this.conf.get(Constant.HDFS_SCHEMA_REMOTE_PATH_KEY));
    sampleFileWriter = new FileWriter(this.conf.get(Constant.HDFS_SAMPLE_REMOTE_PATH_KEY));

    // login from kerberos, get the file system
    String principal = this.conf.get(Constant.HDFS_REMOTE_USER_KEY);
    String keyLocation = this.conf.get(Constant.HDFS_REMOTE_KEYTAB_LOCATION_KEY, null);

    if (keyLocation == null) {
        System.out.println("No keytab file location specified, will ignore the kerberos login process");
        fs = FileSystem.get(new Configuration());
    } else {/*from   w w w.  j  a v  a2 s  .  c  o m*/
        try {
            Configuration hdfs_conf = new Configuration();
            hdfs_conf.set("hadoop.security.authentication", "Kerberos");
            hdfs_conf.set("dfs.namenode.kerberos.principal.pattern", "*");
            UserGroupInformation.setConfiguration(hdfs_conf);
            UserGroupInformation.loginUserFromKeytab(principal, keyLocation);
            fs = FileSystem.get(hdfs_conf);
        } catch (IOException e) {
            System.out.println("Failed, Try to login through kerberos. Priciple: " + principal
                    + " keytab location : " + keyLocation);
            e.printStackTrace();
            System.out.println("Use default, assume no kerbero needed");
            fs = FileSystem.get(new Configuration());
        }
    }

    // TODO Write to hdfs
    // String sampleDataFolder = "/projects/wherehows/hdfs/sample_data";
    // String cluster = this.conf.get("hdfs.cluster");
    // sampleDataAvroWriter = new AvroWriter(this.fs, sampleDataFolder + "/" + cluster, SampleDataRecord.class);
    // String schemaFolder = this.conf.get("hdfs.schema_location");

    fileAnalyzerFactory = new FileAnalyzerFactory(this.fs);
}