Example usage for java.lang.management ManagementFactory getPlatformMXBean

List of usage examples for java.lang.management ManagementFactory getPlatformMXBean

Introduction

In this page you can find the example usage for java.lang.management ManagementFactory getPlatformMXBean.

Prototype

public static <T extends PlatformManagedObject> T getPlatformMXBean(Class<T> mxbeanInterface) 

Source Link

Document

Returns the platform MXBean implementing the given mxbeanInterface which is specified to have one single instance in the Java virtual machine.

Usage

From source file:Test.java

public static void main(String[] args) {
    RuntimeMXBean mxBean = ManagementFactory.getPlatformMXBean(RuntimeMXBean.class);

    System.out.println("JVM Name: " + mxBean.getName());
    System.out.println("JVM Specification Name: " + mxBean.getSpecName());
    System.out.println("JVM Specification Version: " + mxBean.getSpecVersion());
    System.out.println("JVM Implemenation Name: " + mxBean.getVmName());
    System.out.println("JVM Implemenation Vendor: " + mxBean.getVmVendor());
    System.out.println("JVM Implemenation Version: " + mxBean.getVmVersion());

    // Using the getPlatformMXBeans method
    List<OperatingSystemMXBean> list = ManagementFactory.getPlatformMXBeans(OperatingSystemMXBean.class);
    System.out.println("size: " + list.size());
    for (OperatingSystemMXBean bean : list) {
        System.out.println("Operating System Name: " + bean.getName());
        System.out.println("Operating System Architecture: " + bean.getArch());
        System.out.println("Operating System Version: " + bean.getVersion());
    }//from w w w.  j  a v a  2s  .c o  m

}

From source file:org.epics.archiverappliance.config.DefaultConfigService.java

@Override
public void postStartup() throws ConfigException {
    if (this.startupState != STARTUP_SEQUENCE.READY_TO_JOIN_APPLIANCE) {
        configlogger.info("Webapp is not in correct state for postStartup " + this.getWarFile().toString()
                + ". It is in " + this.startupState.toString());
        return;/*from  ww w  .java  2 s . c o m*/
    }

    this.startupState = STARTUP_SEQUENCE.POST_STARTUP_RUNNING;
    configlogger.info("Post startup for " + this.getWarFile().toString());

    // Inherit logging from log4j configuration.
    try {
        PlatformLoggingMXBean logging = ManagementFactory.getPlatformMXBean(PlatformLoggingMXBean.class);
        if (logging != null) {
            java.util.logging.Logger.getLogger("com.hazelcast");
            if (clusterLogger.isDebugEnabled()) {
                logging.setLoggerLevel("com.hazelcast", java.util.logging.Level.FINE.toString());
            } else if (clusterLogger.isInfoEnabled()) {
                logging.setLoggerLevel("com.hazelcast", java.util.logging.Level.INFO.toString());
            } else {
                logger.info(
                        "Setting clustering logging based on log levels for cluster." + getClass().getName());
                logging.setLoggerLevel("com.hazelcast", java.util.logging.Level.SEVERE.toString());
            }
        }

        Logger hzMain = Logger.getLogger("com.hazelcast");
        if (clusterLogger.isDebugEnabled()) {
            hzMain.setLevel(Level.DEBUG);
        } else if (clusterLogger.isInfoEnabled()) {
            hzMain.setLevel(Level.INFO);
        } else {
            logger.info("Setting clustering logging based on log levels for cluster." + getClass().getName());
            hzMain.setLevel(Level.FATAL);
        }
    } catch (Exception ex) {
        logger.error("Exception setting logging JVM levels ", ex);
    }

    // Add this to the system props before doing anything with Hz
    System.getProperties().put("hazelcast.logging.type", "log4j");

    HazelcastInstance hzinstance = null;

    // Set the thread count to control how may threads this library spawns.
    Properties hzThreadCounts = new Properties();
    if (System.getenv().containsKey("ARCHAPPL_ALL_APPS_ON_ONE_JVM")) {
        logger.info("Reducing the generic clustering thread counts.");
        hzThreadCounts.put("hazelcast.clientengine.thread.count", "2");
        hzThreadCounts.put("hazelcast.operation.generic.thread.count", "2");
        hzThreadCounts.put("hazelcast.operation.thread.count", "2");
    }

    if (this.warFile == WAR_FILE.MGMT) {
        // The management webapps are the head honchos in the cluster. We set them up differently

        configlogger.debug("Initializing the MGMT webapp's clustering");
        // If we have a hazelcast.xml in the servlet classpath, the XmlConfigBuilder picks that up.
        // If not we use the default config found in hazelcast.jar
        // We then alter this config to suit our purposes.
        Config config = new XmlConfigBuilder().build();
        try {
            if (this.getClass().getResource("hazelcast.xml") == null) {
                logger.info("We override the default cluster config by disabling multicast discovery etc.");
                // We do not use multicast as it is not supported on all networks.
                config.getNetworkConfig().getJoin().getMulticastConfig().setEnabled(false);
                // We use TCPIP to discover the members in the cluster.
                // This is part of the config that comes from appliance.xml
                config.getNetworkConfig().getJoin().getTcpIpConfig().setEnabled(true);
                // Clear any tcpip config that comes from the default config
                // This gets rid of the localhost in the default that prevents clusters from forming..
                // If we need localhost, we'll add it back later.
                config.getNetworkConfig().getJoin().getTcpIpConfig().clear();
                // Enable interfaces; we seem to need this after 2.4 for clients to work correctly in a multi-homed environment.
                // We'll add the actual interface later below
                config.getNetworkConfig().getInterfaces().setEnabled(true);
                config.getNetworkConfig().getInterfaces().clear();

                // We don't really use the authentication provided by the tool; however, we set it to some default
                config.getGroupConfig().setName("archappl");
                config.getGroupConfig().setPassword("archappl");

                // Backup count is 1 by default; we set it explicitly however...
                config.getMapConfig("default").setBackupCount(1);

                config.setProperty("hazelcast.logging.type", "log4j");
            } else {
                logger.debug(
                        "There is a hazelcast.xml in the classpath; skipping default configuration in the code.");
            }
        } catch (Exception ex) {
            throw new ConfigException("Exception configuring cluster", ex);
        }

        config.setInstanceName(myIdentity);

        if (!hzThreadCounts.isEmpty()) {
            logger.info("Reducing the generic clustering thread counts.");
            config.getProperties().putAll(hzThreadCounts);
        }

        try {
            String[] myAddrParts = myApplianceInfo.getClusterInetPort().split(":");
            String myHostName = myAddrParts[0];
            InetAddress myInetAddr = InetAddress.getByName(myHostName);
            if (!myHostName.equals("localhost") && myInetAddr.isLoopbackAddress()) {
                logger.info("Address for this appliance -- " + myInetAddr.toString()
                        + " is a loopback address. Changing this to 127.0.0.1 to clustering happy");
                myInetAddr = InetAddress.getByName("127.0.0.1");
            }
            int myClusterPort = Integer.parseInt(myAddrParts[1]);

            logger.debug("We do not let the port auto increment for the MGMT webap");
            config.getNetworkConfig().setPortAutoIncrement(false);

            config.getNetworkConfig().setPort(myClusterPort);
            config.getNetworkConfig().getInterfaces().addInterface(myInetAddr.getHostAddress());
            configlogger.info("Setting my cluster port base to " + myClusterPort + " and using interface "
                    + myInetAddr.getHostAddress());

            for (ApplianceInfo applInfo : appliances.values()) {
                if (applInfo.getIdentity().equals(myIdentity) && this.warFile == WAR_FILE.MGMT) {
                    logger.debug("Not adding myself to the discovery process when I am the mgmt webapp");
                } else {
                    String[] addressparts = applInfo.getClusterInetPort().split(":");
                    String inetaddrpart = addressparts[0];
                    try {
                        InetAddress inetaddr = InetAddress.getByName(inetaddrpart);
                        if (!inetaddrpart.equals("localhost") && inetaddr.isLoopbackAddress()) {
                            logger.info("Address for appliance " + applInfo.getIdentity() + " -  "
                                    + inetaddr.toString()
                                    + " is a loopback address. Changing this to 127.0.0.1 to clustering happy");
                            inetaddr = InetAddress.getByName("127.0.0.1");
                        }
                        int clusterPort = Integer.parseInt(addressparts[1]);
                        logger.info("Adding " + applInfo.getIdentity()
                                + " from appliances.xml to the cluster discovery using cluster inetport "
                                + inetaddr.toString() + ":" + clusterPort);
                        config.getNetworkConfig().getJoin().getTcpIpConfig()
                                .addMember(inetaddr.getHostAddress() + ":" + clusterPort);
                    } catch (UnknownHostException ex) {
                        configlogger.info("Cannnot resolve the IP address for appliance " + inetaddrpart
                                + ". Skipping adding this appliance to the cliuster.");
                    }
                }
            }
            hzinstance = Hazelcast.newHazelcastInstance(config);
        } catch (Exception ex) {
            throw new ConfigException("Exception adding member to cluster", ex);
        }
    } else {
        // All other webapps are "native" clients.
        try {
            configlogger.debug("Initializing a non-mgmt webapp's clustering");
            ClientConfig clientConfig = new ClientConfig();
            clientConfig.getGroupConfig().setName("archappl");
            clientConfig.getGroupConfig().setPassword("archappl");
            clientConfig.setExecutorPoolSize(4);
            // Non mgmt client can only connect to their MGMT webapp.
            String[] myAddrParts = myApplianceInfo.getClusterInetPort().split(":");
            String myHostName = myAddrParts[0];
            InetAddress myInetAddr = InetAddress.getByName(myHostName);
            if (!myHostName.equals("localhost") && myInetAddr.isLoopbackAddress()) {
                logger.info("Address for this appliance -- " + myInetAddr.toString()
                        + " is a loopback address. Changing this to 127.0.0.1 to clustering happy");
                myInetAddr = InetAddress.getByName("127.0.0.1");
            }
            int myClusterPort = Integer.parseInt(myAddrParts[1]);

            configlogger.debug(this.warFile + " connecting as a native client to " + myInetAddr.getHostAddress()
                    + ":" + myClusterPort);
            clientConfig.getNetworkConfig().addAddress(myInetAddr.getHostAddress() + ":" + myClusterPort);
            clientConfig.setProperty("hazelcast.logging.type", "log4j");

            if (!hzThreadCounts.isEmpty()) {
                logger.info("Reducing the generic clustering thread counts.");
                clientConfig.getProperties().putAll(hzThreadCounts);
            }

            if (!clusterLogger.isDebugEnabled()) {
                // The client code logs some SEVERE exceptions on shutdown when deploying on the same Tomcat container.
                // These exceptions are confusing; ideally, we would not have to set the log levels like so.
                Logger.getLogger("com.hazelcast.client.spi.impl.ClusterListenerThread").setLevel(Level.OFF);
                Logger.getLogger("com.hazelcast.client.spi.ClientPartitionService").setLevel(Level.OFF);
            }
            hzinstance = HazelcastClient.newHazelcastClient(clientConfig);
        } catch (Exception ex) {
            throw new ConfigException("Exception adding client to cluster", ex);
        }
    }

    pv2appliancemapping = hzinstance.getMap("pv2appliancemapping");
    namedFlags = hzinstance.getMap("namedflags");
    typeInfos = hzinstance.getMap("typeinfo");
    archivePVRequests = hzinstance.getMap("archivePVRequests");
    channelArchiverDataServers = hzinstance.getMap("channelArchiverDataServers");
    clusterInet2ApplianceIdentity = hzinstance.getMap("clusterInet2ApplianceIdentity");
    aliasNamesToRealNames = hzinstance.getMap("aliasNamesToRealNames");
    pv2ChannelArchiverDataServer = hzinstance.getMap("pv2ChannelArchiverDataServer");
    pubSub = hzinstance.getTopic("pubSub");

    final HazelcastInstance shutdownHzInstance = hzinstance;
    shutdownHooks.add(0, new Runnable() {
        @Override
        public void run() {
            logger.debug("Shutting down clustering instance in webapp " + warFile.toString());
            shutdownHzInstance.shutdown();
        }
    });

    if (this.warFile == WAR_FILE.MGMT) {
        Cluster cluster = hzinstance.getCluster();
        String localInetPort = getMemberKey(cluster.getLocalMember());
        clusterInet2ApplianceIdentity.put(localInetPort, myIdentity);
        logger.debug("Adding myself " + myIdentity + " as having inetport " + localInetPort);
        hzinstance.getMap("clusterInet2ApplianceIdentity")
                .addEntryListener(new EntryAddedListener<Object, Object>() {
                    @Override
                    public void entryAdded(EntryEvent<Object, Object> event) {
                        String appliden = (String) event.getValue();
                        appliancesInCluster.add(appliden);
                        logger.info("Adding appliance " + appliden
                                + " to the list of active appliances as inetport " + ((String) event.getKey()));
                    }
                }, true);
        hzinstance.getMap("clusterInet2ApplianceIdentity")
                .addEntryListener(new EntryRemovedListener<Object, Object>() {
                    @Override
                    public void entryRemoved(EntryEvent<Object, Object> event) {
                        String appliden = (String) event.getValue();
                        appliancesInCluster.remove(appliden);
                        logger.info("Removing appliance " + appliden
                                + " from the list of active appliancesas inetport "
                                + ((String) event.getKey()));
                    }
                }, true);

        logger.debug(
                "Establishing a cluster membership listener to detect when appliances drop off the cluster");
        cluster.addMembershipListener(new MembershipListener() {
            public void memberAdded(MembershipEvent membersipEvent) {
                Member member = membersipEvent.getMember();
                String inetPort = getMemberKey(member);
                if (clusterInet2ApplianceIdentity.containsKey(inetPort)) {
                    String appliden = clusterInet2ApplianceIdentity.get(inetPort);
                    appliancesInCluster.add(appliden);
                    configlogger.info("Adding newly started appliance " + appliden
                            + " to the list of active appliances for inetport " + inetPort);
                } else {
                    logger.debug("Skipping adding appliance using inetport " + inetPort
                            + " to the list of active instances as we do not have a mapping to its identity");
                }
            }

            public void memberRemoved(MembershipEvent membersipEvent) {
                Member member = membersipEvent.getMember();
                String inetPort = getMemberKey(member);
                if (clusterInet2ApplianceIdentity.containsKey(inetPort)) {
                    String appliden = clusterInet2ApplianceIdentity.get(inetPort);
                    appliancesInCluster.remove(appliden);
                    configlogger.info("Removing appliance " + appliden + " from the list of active appliances");
                } else {
                    configlogger.debug("Received member removed event for " + inetPort);
                }
            }

            @Override
            public void memberAttributeChanged(MemberAttributeEvent membersipEvent) {
                Member member = membersipEvent.getMember();
                String inetPort = getMemberKey(member);
                configlogger.debug("Received membership attribute changed event for " + inetPort);
            }
        });

        logger.debug(
                "Adding the current members in the cluster after establishing the cluster membership listener");
        for (Member member : cluster.getMembers()) {
            String mbrInetPort = getMemberKey(member);
            logger.debug("Found member " + mbrInetPort);
            if (clusterInet2ApplianceIdentity.containsKey(mbrInetPort)) {
                String appliden = clusterInet2ApplianceIdentity.get(mbrInetPort);
                appliancesInCluster.add(appliden);
                logger.info("Adding appliance " + appliden + " to the list of active appliances for inetport "
                        + mbrInetPort);
            } else {
                logger.debug("Skipping adding appliance using inetport " + mbrInetPort
                        + " to the list of active instances as we do not have a mapping to its identity");
            }
        }
        logger.info("Established subscription(s) for appliance availability");

        if (this.getInstallationProperties().containsKey(ARCHAPPL_NAMEDFLAGS_PROPERTIES_FILE_PROPERTY)) {
            String namedFlagsFileName = (String) this.getInstallationProperties()
                    .get(ARCHAPPL_NAMEDFLAGS_PROPERTIES_FILE_PROPERTY);
            configlogger.info("Loading named flags from file " + namedFlagsFileName);
            File namedFlagsFile = new File(namedFlagsFileName);
            if (!namedFlagsFile.exists()) {
                configlogger.error(
                        "File containing named flags " + namedFlagsFileName + " specified but not present");
            } else {
                Properties namedFlagsFromFile = new Properties();
                try (FileInputStream is = new FileInputStream(namedFlagsFile)) {
                    namedFlagsFromFile.load(is);
                    for (Object namedFlagFromFile : namedFlagsFromFile.keySet()) {
                        try {
                            String namedFlagFromFileStr = (String) namedFlagFromFile;
                            Boolean namedFlagFromFileValue = Boolean
                                    .parseBoolean((String) namedFlagsFromFile.get(namedFlagFromFileStr));
                            logger.debug("Setting named flag " + namedFlagFromFileStr + " to "
                                    + namedFlagFromFileValue);
                            this.namedFlags.put(namedFlagFromFileStr, namedFlagFromFileValue);
                        } catch (Exception ex) {
                            logger.error("Exception loading named flag from file" + namedFlagsFileName, ex);
                        }
                    }
                } catch (Exception ex) {
                    configlogger.error("Exception loading named flags from " + namedFlagsFileName, ex);
                }
            }
        }
    }

    if (this.warFile == WAR_FILE.ENGINE) {
        // It can take a while for the engine to start up.
        // We probably want to do this in the background so that the appliance as a whole starts up quickly and we get retrieval up and running quickly.
        this.startupExecutor.schedule(new Runnable() {
            @Override
            public void run() {
                try {
                    logger.debug("Starting up the engine's channels on startup.");
                    archivePVSonStartup();
                    logger.debug("Done starting up the engine's channels in startup.");
                } catch (Throwable t) {
                    configlogger.fatal("Exception starting up the engine channels on startup", t);
                }
            }
        }, 1, TimeUnit.SECONDS);
    } else if (this.warFile == WAR_FILE.ETL) {
        this.etlPVLookup.postStartup();
    } else if (this.warFile == WAR_FILE.MGMT) {
        pvsForThisAppliance = new ConcurrentSkipListSet<String>();
        pausedPVsForThisAppliance = new ConcurrentSkipListSet<String>();

        initializePersistenceLayer();

        loadTypeInfosFromPersistence();

        loadAliasesFromPersistence();

        loadArchiveRequestsFromPersistence();

        loadExternalServersFromPersistence();

        registerForNewExternalServers(hzinstance.getMap("channelArchiverDataServers"));

        // Cache the aggregate of all the PVs that are registered to this appliance.
        logger.debug("Building a local aggregate of PV infos that are registered to this appliance");
        for (String pvName : getPVsForThisAppliance()) {
            if (!pvsForThisAppliance.contains(pvName)) {
                applianceAggregateInfo.addInfoForPV(pvName, this.getTypeInfoForPV(pvName), this);
            }
        }
    }

    // Register for changes to the typeinfo map.
    logger.info("Registering for changes to typeinfos");
    hzinstance.getMap("typeinfo").addEntryListener(new EntryAddedListener<Object, Object>() {
        @Override
        public void entryAdded(EntryEvent<Object, Object> entryEvent) {
            logger.debug("Received entryAdded for pvTypeInfo");
            PVTypeInfo typeInfo = (PVTypeInfo) entryEvent.getValue();
            String pvName = typeInfo.getPvName();
            eventBus.post(new PVTypeInfoEvent(pvName, typeInfo, ChangeType.TYPEINFO_ADDED));
            if (persistanceLayer != null) {
                try {
                    persistanceLayer.putTypeInfo(pvName, typeInfo);
                } catch (Exception ex) {
                    logger.error("Exception persisting pvTypeInfo for pv " + pvName, ex);
                }
            }
        }
    }, true);
    hzinstance.getMap("typeinfo").addEntryListener(new EntryRemovedListener<Object, Object>() {
        @Override
        public void entryRemoved(EntryEvent<Object, Object> entryEvent) {
            PVTypeInfo typeInfo = (PVTypeInfo) entryEvent.getOldValue();
            String pvName = typeInfo.getPvName();
            logger.info("Received entryRemoved for pvTypeInfo " + pvName);
            eventBus.post(new PVTypeInfoEvent(pvName, typeInfo, ChangeType.TYPEINFO_DELETED));
            if (persistanceLayer != null) {
                try {
                    persistanceLayer.deleteTypeInfo(pvName);
                } catch (Exception ex) {
                    logger.error("Exception deleting pvTypeInfo for pv " + pvName, ex);
                }
            }
        }
    }, true);
    hzinstance.getMap("typeinfo").addEntryListener(new EntryUpdatedListener<Object, Object>() {
        @Override
        public void entryUpdated(EntryEvent<Object, Object> entryEvent) {
            PVTypeInfo typeInfo = (PVTypeInfo) entryEvent.getValue();
            String pvName = typeInfo.getPvName();
            eventBus.post(new PVTypeInfoEvent(pvName, typeInfo, ChangeType.TYPEINFO_MODIFIED));
            logger.debug("Received entryUpdated for pvTypeInfo");
            if (persistanceLayer != null) {
                try {
                    persistanceLayer.putTypeInfo(pvName, typeInfo);
                } catch (Exception ex) {
                    logger.error("Exception persisting pvTypeInfo for pv " + pvName, ex);
                }
            }
        }
    }, true);

    eventBus.register(this);

    pubSub.addMessageListener(new MessageListener<PubSubEvent>() {
        @Override
        public void onMessage(Message<PubSubEvent> pubSubEventMsg) {
            PubSubEvent pubSubEvent = pubSubEventMsg.getMessageObject();
            if (pubSubEvent.getDestination() != null) {
                if (pubSubEvent.getDestination().equals("ALL")
                        || (pubSubEvent.getDestination().startsWith(myIdentity) && pubSubEvent.getDestination()
                                .endsWith(DefaultConfigService.this.warFile.toString()))) {
                    // We publish messages from hazelcast into this VM only if the intened WAR file is us.
                    logger.debug("Publishing event into this JVM " + pubSubEvent.generateEventDescription());
                    // In this case, we set the source as being the cluster to prevent republishing back into the cluster.
                    pubSubEvent.markSourceAsCluster();
                    eventBus.post(pubSubEvent);
                } else {
                    logger.debug("Skipping publishing event into this JVM "
                            + pubSubEvent.generateEventDescription() + " as destination is not me "
                            + DefaultConfigService.this.warFile.toString());
                }
            } else {
                logger.debug("Skipping publishing event with null destination");
            }
        }
    });

    logger.info("Done registering for changes to typeinfos");

    this.startupState = STARTUP_SEQUENCE.STARTUP_COMPLETE;
    configlogger.info("Start complete for webapp " + this.warFile);
}

From source file:org.jwebsocket.util.Tools.java

/**
 * Gets the global CPU usage percent.//from w ww. ja v  a2 s  .com
 *
 * @return
 * @throws java.lang.Exception
 */
public static double getCpuUsage() throws Exception {
    Double lJavaVersion = Double.parseDouble(System.getProperty("java.vm.specification.version"));
    if (!getOperatingSystem().startsWith("Windows") && lJavaVersion >= 1.6) {
        OperatingSystemMXBean lOSBean = ManagementFactory.getPlatformMXBean(OperatingSystemMXBean.class);

        return lOSBean.getSystemLoadAverage() * 10;
    } else {
        Sigar mSigar = new Sigar();
        CpuPerc lCPU = mSigar.getCpuPerc();
        double lIdle = lCPU.getIdle();

        return 100 - (lIdle * 100);
    }
}