Example usage for org.apache.hadoop.security SecurityUtil login

List of usage examples for org.apache.hadoop.security SecurityUtil login

Introduction

In this page you can find the example usage for org.apache.hadoop.security SecurityUtil login.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static void login(final Configuration conf, final String keytabFileKey, final String userNameKey,
        String hostname) throws IOException 

Source Link

Document

Login as a principal specified in config.

Usage

From source file:alluxio.underfs.hdfs.HdfsUnderFileSystem.java

License:Apache License

private void login(PropertyKey keytabFileKey, String keytabFile, PropertyKey principalKey, String principal,
        String hostname) throws IOException {
    org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
    conf.set(keytabFileKey.toString(), keytabFile);
    conf.set(principalKey.toString(), principal);
    SecurityUtil.login(conf, keytabFileKey.toString(), principalKey.toString(), hostname);
}

From source file:com.trendmicro.hdfs.webdav.Main.java

License:Apache License

public static void main(String[] args) {

    HDFSWebDAVServlet servlet = HDFSWebDAVServlet.getServlet();
    Configuration conf = servlet.getConfiguration();

    // Process command line 

    Options options = new Options();
    options.addOption("d", "debug", false, "Enable debug logging");
    options.addOption("p", "port", true, "Port to bind to [default: 8080]");
    options.addOption("b", "bind-address", true, "Address or hostname to bind to [default: 0.0.0.0]");
    options.addOption("g", "ganglia", true, "Send Ganglia metrics to host:port [default: none]");

    CommandLine cmd = null;// w w w.  j av  a  2s. c o m
    try {
        cmd = new PosixParser().parse(options, args);
    } catch (ParseException e) {
        printUsageAndExit(options, -1);
    }

    if (cmd.hasOption('d')) {
        Logger rootLogger = Logger.getLogger("com.trendmicro");
        rootLogger.setLevel(Level.DEBUG);
    }

    if (cmd.hasOption('b')) {
        conf.set("hadoop.webdav.bind.address", cmd.getOptionValue('b'));
    }

    if (cmd.hasOption('p')) {
        conf.setInt("hadoop.webdav.port", Integer.valueOf(cmd.getOptionValue('p')));
    }

    String gangliaHost = null;
    int gangliaPort = 8649;
    if (cmd.hasOption('g')) {
        String val = cmd.getOptionValue('g');
        if (val.indexOf(':') != -1) {
            String[] split = val.split(":");
            gangliaHost = split[0];
            gangliaPort = Integer.valueOf(split[1]);
        } else {
            gangliaHost = val;
        }
    }

    InetSocketAddress addr = getAddress(conf);

    // Log in the server principal from keytab

    UserGroupInformation.setConfiguration(conf);
    if (UserGroupInformation.isSecurityEnabled())
        try {
            SecurityUtil.login(conf, "hadoop.webdav.server.kerberos.keytab",
                    "hadoop.webdav.server.kerberos.principal", addr.getHostName());
        } catch (IOException e) {
            LOG.fatal("Could not log in", e);
            System.err.println("Could not log in");
            System.exit(-1);
        }

    // Set up embedded Jetty

    Server server = new Server();

    server.setSendServerVersion(false);
    server.setSendDateHeader(false);
    server.setStopAtShutdown(true);

    // Set up connector
    Connector connector = new SelectChannelConnector();
    connector.setPort(addr.getPort());
    connector.setHost(addr.getHostName());
    server.addConnector(connector);
    LOG.info("Listening on " + addr);

    // Set up context
    Context context = new Context(server, "/", Context.SESSIONS);
    // WebDAV servlet
    ServletHolder servletHolder = new ServletHolder(servlet);
    servletHolder.setInitParameter("authenticate-header", "Basic realm=\"Hadoop WebDAV Server\"");
    context.addServlet(servletHolder, "/*");
    // metrics instrumentation filter
    context.addFilter(new FilterHolder(new DefaultWebappMetricsFilter()), "/*", 0);
    // auth filter
    context.addFilter(new FilterHolder(new AuthFilter(conf)), "/*", 0);
    server.setHandler(context);

    // Set up Ganglia metrics reporting
    if (gangliaHost != null) {
        GangliaReporter.enable(1, TimeUnit.MINUTES, gangliaHost, gangliaPort);
    }

    // Start and join the server thread    
    try {
        server.start();
        server.join();
    } catch (Exception e) {
        LOG.fatal("Failed to start Jetty", e);
        System.err.println("Failed to start Jetty");
        System.exit(-1);
    }
}

From source file:net.iridiant.hdfs.webdav.Main.java

License:Apache License

public static void main(String[] args) {

    HDFSWebDAVServlet servlet = HDFSWebDAVServlet.getServlet();
    Configuration conf = servlet.getConfiguration();

    // Process command line 

    Options options = new Options();
    options.addOption("d", "debug", false, "Enable debug logging");
    options.addOption("p", "port", true, "Port to bind to [default: 8080]");
    options.addOption("b", "bind-address", true, "Address or hostname to bind to [default: 0.0.0.0]");
    options.addOption("g", "ganglia", true, "Send Ganglia metrics to host:port [default: none]");

    CommandLine cmd = null;//  www  .j  ava2s .c  o m
    try {
        cmd = new PosixParser().parse(options, args);
    } catch (ParseException e) {
        printUsageAndExit(options, -1);
    }

    if (cmd.hasOption('d')) {
        Logger rootLogger = Logger.getLogger("net.iridiant");
        rootLogger.setLevel(Level.DEBUG);
    }

    if (cmd.hasOption('b')) {
        conf.set("hadoop.webdav.bind.address", cmd.getOptionValue('b'));
    }

    if (cmd.hasOption('p')) {
        conf.setInt("hadoop.webdav.port", Integer.valueOf(cmd.getOptionValue('p')));
    }

    String gangliaHost = null;
    int gangliaPort = 8649;
    if (cmd.hasOption('g')) {
        String val = cmd.getOptionValue('g');
        if (val.indexOf(':') != -1) {
            String[] split = val.split(":");
            gangliaHost = split[0];
            gangliaPort = Integer.valueOf(split[1]);
        } else {
            gangliaHost = val;
        }
    }

    InetSocketAddress addr = getAddress(conf);

    // Log in the server principal from keytab

    UserGroupInformation.setConfiguration(conf);
    if (UserGroupInformation.isSecurityEnabled())
        try {
            SecurityUtil.login(conf, "hadoop.webdav.server.kerberos.keytab",
                    "hadoop.webdav.server.kerberos.principal", addr.getHostName());
        } catch (IOException e) {
            LOG.fatal("Could not log in", e);
            System.err.println("Could not log in");
            System.exit(-1);
        }

    // Set up embedded Jetty

    Server server = new Server();

    server.setSendServerVersion(false);
    server.setSendDateHeader(false);
    server.setStopAtShutdown(true);

    // Set up connector
    Connector connector = new SelectChannelConnector();
    connector.setPort(addr.getPort());
    connector.setHost(addr.getHostName());
    server.addConnector(connector);
    LOG.info("Listening on " + addr);

    // Set up context
    Context context = new Context(server, "/", Context.SESSIONS);
    // WebDAV servlet
    ServletHolder servletHolder = new ServletHolder(servlet);
    servletHolder.setInitParameter("authenticate-header", "Basic realm=\"Hadoop WebDAV Server\"");
    context.addServlet(servletHolder, "/*");
    // metrics instrumentation filter
    context.addFilter(new FilterHolder(new DefaultWebappMetricsFilter()), "/*", 0);
    // auth filter
    context.addFilter(new FilterHolder(new AuthFilter(conf)), "/*", 0);
    server.setHandler(context);

    // Set up Ganglia metrics reporting
    if (gangliaHost != null) {
        GangliaReporter.enable(1, TimeUnit.MINUTES, gangliaHost, gangliaPort);
    }

    // Start and join the server thread    
    try {
        server.start();
        server.join();
    } catch (Exception e) {
        LOG.fatal("Failed to start Jetty", e);
        System.err.println("Failed to start Jetty");
        System.exit(-1);
    }
}

From source file:org.apache.phoenix.queryserver.server.Main.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    logProcessInfo(getConf());//  www  .  j a va 2s  .  com
    try {
        final boolean isKerberos = "kerberos"
                .equalsIgnoreCase(getConf().get(QueryServices.QUERY_SERVER_HBASE_SECURITY_CONF_ATTRIB));

        // handle secure cluster credentials
        if (isKerberos) {
            String hostname = Strings.domainNamePointerToHostName(DNS.getDefaultHost(
                    getConf().get(QueryServices.QUERY_SERVER_DNS_INTERFACE_ATTRIB, "default"),
                    getConf().get(QueryServices.QUERY_SERVER_DNS_NAMESERVER_ATTRIB, "default")));
            if (LOG.isDebugEnabled()) {
                LOG.debug("Login to " + hostname + " using "
                        + getConf().get(QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB) + " and principal "
                        + getConf().get(QueryServices.QUERY_SERVER_KERBEROS_PRINCIPAL_ATTRIB) + ".");
            }
            SecurityUtil.login(getConf(), QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB,
                    QueryServices.QUERY_SERVER_KERBEROS_PRINCIPAL_ATTRIB, hostname);
            LOG.info("Login successful.");
        }

        Class<? extends PhoenixMetaFactory> factoryClass = getConf().getClass(
                QueryServices.QUERY_SERVER_META_FACTORY_ATTRIB, PhoenixMetaFactoryImpl.class,
                PhoenixMetaFactory.class);
        int port = getConf().getInt(QueryServices.QUERY_SERVER_HTTP_PORT_ATTRIB,
                QueryServicesOptions.DEFAULT_QUERY_SERVER_HTTP_PORT);
        LOG.debug("Listening on port " + port);
        PhoenixMetaFactory factory = factoryClass.getDeclaredConstructor(Configuration.class)
                .newInstance(getConf());
        Meta meta = factory.create(Arrays.asList(args));
        Service service = new LocalService(meta);

        // Start building the Avatica HttpServer
        final HttpServer.Builder builder = new HttpServer.Builder().withPort(port).withHandler(service,
                getSerialization(getConf()));

        // Enable SPNEGO and Impersonation when using Kerberos
        if (isKerberos) {
            UserGroupInformation ugi = UserGroupInformation.getLoginUser();

            // Make sure the proxyuser configuration is up to date
            ProxyUsers.refreshSuperUserGroupsConfiguration(getConf());

            String keytabPath = getConf().get(QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB);
            File keytab = new File(keytabPath);

            // Enable SPNEGO and impersonation (through standard Hadoop configuration means)
            builder.withSpnego(ugi.getUserName()).withAutomaticLogin(keytab)
                    .withImpersonation(new PhoenixDoAsCallback(ugi));
        }

        // Build and start the HttpServer
        server = builder.build();
        server.start();
        runningLatch.countDown();
        server.join();
        return 0;
    } catch (Throwable t) {
        LOG.fatal("Unrecoverable service error. Shutting down.", t);
        this.t = t;
        return -1;
    }
}

From source file:org.apache.phoenix.queryserver.server.QueryServer.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    logProcessInfo(getConf());/*from  w w w .ja va2 s  . c om*/
    final boolean loadBalancerEnabled = getConf().getBoolean(
            QueryServices.PHOENIX_QUERY_SERVER_LOADBALANCER_ENABLED,
            QueryServicesOptions.DEFAULT_PHOENIX_QUERY_SERVER_LOADBALANCER_ENABLED);
    try {
        final boolean isKerberos = "kerberos"
                .equalsIgnoreCase(getConf().get(QueryServices.QUERY_SERVER_HBASE_SECURITY_CONF_ATTRIB));
        final boolean disableSpnego = getConf().getBoolean(
                QueryServices.QUERY_SERVER_SPNEGO_AUTH_DISABLED_ATTRIB,
                QueryServicesOptions.DEFAULT_QUERY_SERVER_SPNEGO_AUTH_DISABLED);
        String hostname;
        final boolean disableLogin = getConf().getBoolean(QueryServices.QUERY_SERVER_DISABLE_KERBEROS_LOGIN,
                QueryServicesOptions.DEFAULT_QUERY_SERVER_DISABLE_KERBEROS_LOGIN);

        // handle secure cluster credentials
        if (isKerberos && !disableSpnego && !disableLogin) {
            hostname = Strings.domainNamePointerToHostName(DNS.getDefaultHost(
                    getConf().get(QueryServices.QUERY_SERVER_DNS_INTERFACE_ATTRIB, "default"),
                    getConf().get(QueryServices.QUERY_SERVER_DNS_NAMESERVER_ATTRIB, "default")));
            if (LOG.isDebugEnabled()) {
                LOG.debug("Login to " + hostname + " using "
                        + getConf().get(QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB) + " and principal "
                        + getConf().get(QueryServices.QUERY_SERVER_KERBEROS_PRINCIPAL_ATTRIB) + ".");
            }
            SecurityUtil.login(getConf(), QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB,
                    QueryServices.QUERY_SERVER_KERBEROS_PRINCIPAL_ATTRIB, hostname);
            LOG.info("Login successful.");
        } else {
            hostname = InetAddress.getLocalHost().getHostName();
            LOG.info(" Kerberos is off and hostname is : " + hostname);
        }

        Class<? extends PhoenixMetaFactory> factoryClass = getConf().getClass(
                QueryServices.QUERY_SERVER_META_FACTORY_ATTRIB, PhoenixMetaFactoryImpl.class,
                PhoenixMetaFactory.class);
        int port = getConf().getInt(QueryServices.QUERY_SERVER_HTTP_PORT_ATTRIB,
                QueryServicesOptions.DEFAULT_QUERY_SERVER_HTTP_PORT);
        LOG.debug("Listening on port " + port);
        PhoenixMetaFactory factory = factoryClass.getDeclaredConstructor(Configuration.class)
                .newInstance(getConf());
        Meta meta = factory.create(Arrays.asList(args));
        Service service = new LocalService(meta);

        // Start building the Avatica HttpServer
        final HttpServer.Builder builder = new HttpServer.Builder().withPort(port).withHandler(service,
                getSerialization(getConf()));

        // Enable SPNEGO and Impersonation when using Kerberos
        if (isKerberos) {
            UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
            LOG.debug("Current user is " + ugi);
            if (!ugi.hasKerberosCredentials()) {
                ugi = UserGroupInformation.getLoginUser();
                LOG.debug("Current user does not have Kerberos credentials, using instead " + ugi);
            }

            // Make sure the proxyuser configuration is up to date
            ProxyUsers.refreshSuperUserGroupsConfiguration(getConf());

            String keytabPath = getConf().get(QueryServices.QUERY_SERVER_KEYTAB_FILENAME_ATTRIB);
            File keytab = new File(keytabPath);

            String realmsString = getConf().get(QueryServices.QUERY_SERVER_KERBEROS_ALLOWED_REALMS, null);
            String[] additionalAllowedRealms = null;
            if (null != realmsString) {
                additionalAllowedRealms = StringUtils.split(realmsString, ',');
            }

            // Enable SPNEGO and impersonation (through standard Hadoop configuration means)
            builder.withSpnego(ugi.getUserName(), additionalAllowedRealms).withAutomaticLogin(keytab)
                    .withImpersonation(new PhoenixDoAsCallback(ugi, getConf()));

        }
        setRemoteUserExtractorIfNecessary(builder, getConf());

        // Build and start the HttpServer
        server = builder.build();
        server.start();
        if (loadBalancerEnabled) {
            registerToServiceProvider(hostname);
        }
        runningLatch.countDown();
        server.join();
        return 0;
    } catch (Throwable t) {
        LOG.fatal("Unrecoverable service error. Shutting down.", t);
        this.t = t;
        return -1;
    } finally {
        if (loadBalancerEnabled) {
            unRegister();
        }
    }
}

From source file:tachyon.UnderFileSystemHdfs.java

License:Apache License

public void login(String keytabFileKey, String keytabFile, String principalKey, String principal,
        String hostname) throws IOException {
    Configuration conf = new Configuration();
    conf.set(keytabFileKey, keytabFile);
    conf.set(principalKey, principal);/*w  ww  . jav  a2s .  c om*/
    SecurityUtil.login(conf, keytabFileKey, principalKey, hostname);
}

From source file:tachyon.underfs.hdfs.HdfsUnderFileSystem.java

License:Apache License

private void login(String keytabFileKey, String keytabFile, String principalKey, String principal,
        String hostname) throws IOException {
    Configuration conf = new Configuration();
    conf.set(keytabFileKey, keytabFile);
    conf.set(principalKey, principal);//www  . j  a va2s  . co  m
    SecurityUtil.login(conf, keytabFileKey, principalKey, hostname);
}

From source file:uk.ac.gla.terrier.probos.controller.ControllerServer.java

License:Open Source License

public ControllerServer(Configuration _hconf) throws IOException {
    this.yConf = new YarnConfiguration(_hconf);
    yConf.addResource("yarn-site.xml");
    UserGroupInformation.setConfiguration(yConf);

    this.pConf = new PConfiguration(_hconf);

    //do the Kerberos authentication
    if (UserGroupInformation.isSecurityEnabled()) {
        final String principal = pConf.get(PConfiguration.KEY_CONTROLLER_PRINCIPAL);
        String keytab = pConf.get(PConfiguration.KEY_CONTROLLER_KEYTAB);
        File fKeytab = new File(keytab);
        if (!fKeytab.exists()) {
            if (!fKeytab.isAbsolute()) {
                keytab = System.getProperty("probos.conf") + '/' + keytab;
                fKeytab = new File(keytab);
                pConf.set(PConfiguration.KEY_CONTROLLER_KEYTAB, keytab);
            }/*from www  .  j  a va  2  s. c om*/
            if (!fKeytab.exists())
                throw new FileNotFoundException("Could not find keytab file " + keytab);
        }

        LOG.debug("Starting login for " + principal + " using keytab " + keytab);
        SecurityUtil.login(pConf, PConfiguration.KEY_CONTROLLER_KEYTAB, PConfiguration.KEY_CONTROLLER_PRINCIPAL,
                Utils.getHostname());
        LOG.info("Switched principal to " + UserGroupInformation.getCurrentUser().getUserName());
    }

    this.mClient = MailClient.getMailClient(this.pConf);
    final String bindAddress = pConf.get(PConfiguration.KEY_CONTROLLER_BIND_ADDRESS);
    if (bindAddress == null)
        throw new IllegalArgumentException(PConfiguration.KEY_CONTROLLER_BIND_ADDRESS + " cannot be null");

    secretManager = new ControllerAPISecretManager(
            //delegationKeyUpdateInterval
            //renewal interval for delegation token
            7 * 24 * 3600 * 1000, //Yarn default is 7 day

            //delegationTokenMaxLifetime -- maximum lifetime for which a delegation token is valid
            //i.e. how long can we keep renewing the token for?
            14 * 24 * 3600 * 1000, //Yarn default is 14 days

            //delegationTokenRenewInterval -- how long should a token last?
            7 * 24 * 3600 * 1000, //Yarn default is 7 day

            //delegationTokenRemoverScanInterval -- how often are expired keys removed?
            3600 * 1000); //Yarn default is 1 hour

    //build the client rpc server: 8027
    int port = pConf.getInt(PConfiguration.KEY_CONTROLLER_PORT, 8027);
    LOG.info("Starting RPC server for " + PBSClient.class.getSimpleName() + " on port " + port);
    clientRpcserver = new RPC.Builder(yConf).setInstance(this).setBindAddress(bindAddress)
            .setProtocol(PBSClient.class).setPort(port).setSecretManager(secretManager).
            //setVerbose(true).
            build();
    System.setProperty("hadoop.policy.file", Constants.PRODUCT_NAME + "-policy.xml");
    clientRpcserver.refreshServiceAclWithLoadedConfiguration(yConf, new ControllerPolicyProvider());

    //build the master rpc server: 8028
    port = Constants.CONTROLLER_MASTER_PORT_OFFSET + pConf.getInt(PConfiguration.KEY_CONTROLLER_PORT, 8027);
    LOG.info("Starting RPC server for " + PBSMasterClient.class.getSimpleName() + " on port " + port);
    masterRpcserver = new RPC.Builder(yConf).setInstance(new ApplicationMasterAPI()).setBindAddress(bindAddress)
            .setProtocol(PBSMasterClient.class).setPort(port).setSecretManager(secretManager).
            //setVerbose(true).
            build();
    masterRpcserver.refreshServiceAclWithLoadedConfiguration(yConf, new ControllerPolicyProvider());

    port = Constants.CONTROLLER_INTERACTIVE_PORT_OFFSET
            + pConf.getInt(PConfiguration.KEY_CONTROLLER_PORT, 8027);
    LOG.info("Starting RPC server for " + PBSInteractiveClient.class.getSimpleName() + " on port " + port);
    //build the interactive rpc server: 8026
    interactiveRpcserver = new RPC.Builder(yConf).setInstance(new InteractiveTaskAPI())
            .setBindAddress(bindAddress).setProtocol(PBSInteractiveClient.class).setPort(port)
            .setSecretManager(secretManager).
            //setVerbose(true).
            build();
    interactiveRpcserver.refreshServiceAclWithLoadedConfiguration(yConf, new ControllerPolicyProvider());

    //build the webapp UI server
    final List<Entry<String, HttpServlet>> controllerServlets = new ArrayList<>();
    controllerServlets
            .add(new MapEntry<String, HttpServlet>("/", new QstatServlet("/", controllerServlets, this)));
    controllerServlets.add(
            new MapEntry<String, HttpServlet>("/pbsnodes", new PbsnodesServlet("/", controllerServlets, this)));
    //metrics is the Servlet from metrics.dropwizard for accessing metrics
    controllerServlets.add(new MapEntry<String, HttpServlet>("/metrics", new MetricsServlet(metrics)));
    //this is the hadoop servlet for accessing anything defined in JMX
    controllerServlets.add(new MapEntry<String, HttpServlet>("/jmx", new JMXJsonServlet()));
    final int httpport = pConf.getInt(PConfiguration.KEY_CONTROLLER_HTTP_PORT,
            Constants.DEFAULT_CONTROLLER_PORT + Constants.CONTROLLER_HTTP_PORT_OFFSET);
    LOG.info("Starting Jetty ProbosControllerHttp on port " + httpport);
    webServer = new WebServer("ProbosControllerHttp", controllerServlets, httpport);
    webServer.init(pConf);

    //this thread detects yarn jobs that have ended
    watcherThread = new Thread(new ControllerWatcher());
    watcherThread.setName(ControllerWatcher.class.getSimpleName());

    //ensure we have the directory
    Path _probosFolder = new Path(pConf.get(PConfiguration.KEY_CONTROLLER_JOBDIR));
    FileSystem controllerFS = FileSystem.get(yConf);
    if (!_probosFolder.isUriPathAbsolute()) {
        _probosFolder = _probosFolder.makeQualified(controllerFS.getUri(), controllerFS.getWorkingDirectory());
        assert _probosFolder.isUriPathAbsolute();
    }
    probosFolder = _probosFolder;
    if (!controllerFS.exists(probosFolder)) {
        throw new IllegalArgumentException(probosFolder.toString() + " does not exist");
    }

    //now initialise the metrics

    //jobs.queued.size
    metrics.register(MetricRegistry.name(ControllerServer.class, "jobs", "queued.size"), new Gauge<Integer>() {
        @Override
        public Integer getValue() {
            int sum = 0;
            for (int i : user2QueuedCount.values())
                sum += i;
            return sum;
        }
    });
    //jobs.size
    metrics.register(MetricRegistry.name(ControllerServer.class, "jobs", "size"), new Gauge<Integer>() {
        @Override
        public Integer getValue() {
            return jobArray.size();
        }
    });
    //jobs.held.size
    metrics.register(MetricRegistry.name(ControllerServer.class, "jobs", "held.size"), new Gauge<Integer>() {
        @Override
        public Integer getValue() {
            return jobHolds.size();
        }
    });

    //nodes.size
    metrics.register(MetricRegistry.name(ControllerServer.class, "nodes", "size"), new Gauge<Integer>() {
        @Override
        public Integer getValue() {
            try {
                return getNodesStatus().length;
            } catch (Exception e) {
                return 0;
            }
        }
    });

    //nodes.free.size
    metrics.register(MetricRegistry.name(ControllerServer.class, "nodes", "free.size"), new Gauge<Integer>() {
        @Override
        public Integer getValue() {
            try {
                PBSNodeStatus[] nodes = getNodesStatus();
                int count = 0;
                for (PBSNodeStatus n : nodes)
                    if ("free".equals(n.getState()))
                        count++;
                return count;
            } catch (Exception e) {
                return 0;
            }
        }
    });

    runningJobs = metrics.counter(MetricRegistry.name(ControllerServer.class, "jobs", "running.counter"));
    rejectedJobs = metrics.counter(MetricRegistry.name(ControllerServer.class, "jobs", "rejected.counter"));
    killedJobs = metrics.counter(MetricRegistry.name(ControllerServer.class, "jobs", "killed.counter"));
    mailEvents = metrics.counter(MetricRegistry.name(ControllerServer.class, "mails", "counter"));
    mailFailures = metrics.counter(MetricRegistry.name(ControllerServer.class, "mails", "failure.counter"));

}