Example usage for java.lang Runtime freeMemory

List of usage examples for java.lang Runtime freeMemory

Introduction

In this page you can find the example usage for java.lang Runtime freeMemory.

Prototype

public native long freeMemory();

Source Link

Document

Returns the amount of free memory in the Java Virtual Machine.

Usage

From source file:info.magnolia.cms.exchange.simple.SimpleExchangeServlet.java

/**
 * @param request//from   w  ww . java 2  s  .co m
 * @param response
 * @throws javax.servlet.ServletException
 * @throws java.io.IOException
 */
public void doGet(HttpServletRequest request, HttpServletResponse response)
        throws ServletException, IOException {
    String statusMessage = "";
    String status = "";
    try {
        validateRequest(request);
        initializeContext(request);
        applyLock(request);
        receive(request);
        // remove cached files if successful
        this.cacheManager.flushAll();
        status = SimpleSyndicator.ACTIVATION_SUCCESSFUL;
    } catch (OutOfMemoryError e) {
        Runtime rt = Runtime.getRuntime();
        log.error("---------\nOutOfMemoryError caught during activation. Total memory = " //$NON-NLS-1$
                + rt.totalMemory() + ", free memory = " //$NON-NLS-1$
                + rt.freeMemory() + "\n---------"); //$NON-NLS-1$
        statusMessage = e.getMessage();
        status = SimpleSyndicator.ACTIVATION_FAILED;
    } catch (PathNotFoundException e) {
        log.error(e.getMessage(), e);
        statusMessage = "Parent not found (not yet activated): " + e.getMessage();
        status = SimpleSyndicator.ACTIVATION_FAILED;
    } catch (Throwable e) {
        log.error(e.getMessage(), e);
        statusMessage = e.getMessage();
        status = SimpleSyndicator.ACTIVATION_FAILED;
    } finally {
        cleanUp(request);
        response.setHeader(SimpleSyndicator.ACTIVATION_ATTRIBUTE_STATUS, status);
        response.setHeader(SimpleSyndicator.ACTIVATION_ATTRIBUTE_MESSAGE, statusMessage);
    }

}

From source file:at.salzburgresearch.vgi.vgianalyticsframework.activityanalysis.pipeline.consumer.impl.QuadtreeBuilderConsumer.java

@Override
public void checkRuntimeMemory() {
    Runtime runtime = Runtime.getRuntime();
    /** TOTAL MEMORY - FREE MEMORY > USED MEMORY */
    long usedMemory = runtime.totalMemory() - runtime.freeMemory();

    if (usedMemory > usedMemoryLimit) {

        /** If last memory check was performed less than 10 seconds ago, decrease minNumFeaturesLimiter */
        if (new Date().getTime() - timeLastMemoryCheck < 1000 * 10) {
            minNumFeaturesLimiter++;/*from w w  w.ja va 2s.  c om*/
            if (minNumFeaturesLimiter > 4)
                minNumFeaturesLimiter = 4;
        } else {
            /** default values */
            minNumFeaturesLimiter = 1;
            previousLimit = Integer.MAX_VALUE;
            timeLastMemoryCheck = new Date().getTime();
        }

        int limit = 0;
        if (usedMemory > usedMemoryLimit + 1024 * 1024 * 1024) {
            limit = 0;
        } else if (usedMemory > usedMemoryLimit + 1024 * 1024 * 512) {
            limit = 36 / minNumFeaturesLimiter;
        } else if (usedMemory > usedMemoryLimit + 1024 * 1024 * 256) {
            limit = 360 / minNumFeaturesLimiter;
        } else if (usedMemory > usedMemoryLimit) {
            limit = 1800 / minNumFeaturesLimiter;
        }

        /** Only write file if current limit is lower than previous limit */
        if (limit < previousLimit) {
            writePbfFile(limit);
            previousLimit = limit;
        } else {

        }
    }
}

From source file:org.mrgeo.services.wms.WmsGenerator.java

/**
 * @see HttpServlet#doGet(HttpServletRequest request, HttpServletResponse response)
 */// ww w .ja  va2s. co  m
@Override
protected void doGet(final HttpServletRequest request, final HttpServletResponse response)
        throws ServletException, IOException {
    final long start = System.currentTimeMillis();
    try {
        log.debug("Semaphores available: {}", semaphore.availablePermits());
        semaphore.acquire();
        log.debug("Semaphore acquired.  Semaphores available: {}", semaphore.availablePermits());

        ServletUtils.printRequestURL(request);
        ServletUtils.printRequestAttributes(request);
        ServletUtils.printRequestParams(request);

        final String cache = ServletUtils.getParamValue(request, "cache");
        if (!StringUtils.isEmpty(cache) && cache.toLowerCase().equals("off")) {
            response.setHeader("Cache-Control", "no-store");
            response.setHeader("Pragma", "no-cache");
            response.setDateHeader("Expires", 0);
        } else {
            response.setHeader("Cache-Control", "max-age=3600");
            response.setHeader("Pragma", "");
            response.setDateHeader("Expires", 3600);
        }

        String requestParam = ServletUtils.getParamValue(request, "request");
        if (requestParam == null || requestParam.isEmpty()) {
            requestParam = "GetCapabilities";
        }
        requestParam = requestParam.toLowerCase();

        String serviceParam = ServletUtils.getParamValue(request, "service");
        if (serviceParam == null || serviceParam.isEmpty()) {
            serviceParam = "wms";
        }
        if (!serviceParam.toLowerCase().equals("wms")) {
            throw new Exception(
                    "Invalid service type was requested. (only WMS is supported '" + serviceParam + "')");
        }

        if (requestParam.equals("getmap") || requestParam.equals("getmosaic")
                || requestParam.equals("gettile")) {
            if (!requestParam.equals("gettile")) {
                ServletUtils.validateParam(request, "layers", "string");
            } else {
                ServletUtils.validateParam(request, "layer", "string");
            }
            ServletUtils.validateParam(request, "format", "string");
            final String cs = ServletUtils.getParamValue(request, "crs");
            if (!StringUtils.isEmpty(cs)) {
                if (!cs.toUpperCase().equals("CRS:84")) {
                    throw new Exception("InvalidCRS: Invalid coordinate system \"" + cs
                            + "\".  Only coordinate system CRS:84 is supported.");
                }
            }

            OpImageRegistrar.registerMrGeoOps();
        }

        // TODO: Need to construct provider properties from the WebRequest using
        // a new security layer and pass those properties to MapAlgebraJob.
        Properties providerProperties = SecurityUtils.getProviderProperties();
        if (requestParam.equals("getcapabilities")) {
            getCapabilities(request, response, providerProperties);
        } else if (requestParam.equals("getmap")) {
            getMap(request, response, providerProperties);
        } else if (requestParam.equals("getmosaic")) {
            getMosaic(request, response, providerProperties);
        } else if (requestParam.equals("gettile")) {
            getTile(request, response, providerProperties);
        } else if (requestParam.equals("describetiles")) {
            describeTiles(request, response, providerProperties);
        } else {
            throw new Exception("Invalid request type made.");
        }
    } catch (final Exception e) {
        e.printStackTrace();
        try {
            response.setContentType("text/xml");
            writeError(e, response);
        }
        // we already started writing out to HTTP, instead return an error.
        catch (final Exception exception) {
            log.warn("Exception writing error: {}", exception);
            throw new IOException("Exception while writing XML exception (ah, the irony). "
                    + "Original Exception is below." + exception.getLocalizedMessage(), e);
        }
    } finally {
        semaphore.release();

        if (log.isDebugEnabled()) {
            log.debug("Semaphore released.  Semaphores available: {}", semaphore.availablePermits());
            log.debug("WMS request time: {}ms", (System.currentTimeMillis() - start));
            // this can be resource intensive.
            System.gc();
            final Runtime rt = Runtime.getRuntime();
            log.debug(String.format("WMS request memory: %.1fMB / %.1fMB\n",
                    (rt.totalMemory() - rt.freeMemory()) / 1e6, rt.maxMemory() / 1e6));
        }
    }
}

From source file:no.uio.medicine.virsurveillance.parsers.CSVsGBDdata.java

public void parse(String deathFolder) throws IOException {
    File f = new File(deathFolder);
    Runtime runtime = Runtime.getRuntime();
    if (f.isDirectory()) {
        String[] filesInDir = f.list();

        for (String fil : filesInDir) {
            if (fil.endsWith(".zip")) {
                ZipFile zipFile = new ZipFile(deathFolder + "/" + fil);

                Enumeration<? extends ZipEntry> entries = zipFile.entries();

                while (entries.hasMoreElements()) {
                    System.out.println(
                            "Used memory: " + (runtime.totalMemory() - runtime.freeMemory()) / (1024 * 1024)
                                    + " Free memory: " + (runtime.freeMemory()) / (1024 * 1024));

                    ZipEntry entry = entries.nextElement();
                    InputStream stream = zipFile.getInputStream(entry);
                    BufferedReader br = new BufferedReader(new InputStreamReader(stream, "UTF-8"));
                    CSVParser parser = CSVFormat.RFC4180.withDelimiter(',').withIgnoreEmptyLines().withHeader()
                            .parse(br);/*  w w  w. j  a v  a  2s. co  m*/

                    List<CSVRecord> records = parser.getRecords();
                    System.out.println("Reading records: " + zipFile.getName() + "/" + entry);
                    /*for (int i=0;i<records.size();i++) {
                    CSVRecord csvRecord = records.get(i);*/
                    for (CSVRecord csvRecord : records) {
                        if (csvRecord.isMapped("age_group_id")) { //age group 22 corresponds to all ages
                            if (csvRecord.get("age_group_id").equalsIgnoreCase("22")) {
                                String location = null;
                                String year = null;
                                String sex = null;
                                String cause = null;
                                String number = null;
                                String metric = null;

                                if (csvRecord.isMapped("location_code")) {
                                    location = csvRecord.get("location_code");
                                }
                                if (csvRecord.isMapped("year")) {
                                    year = csvRecord.get("year");
                                }
                                if (csvRecord.isMapped("sex_id")) { //1=male, 2 = female
                                    if (csvRecord.get("sex_id").equalsIgnoreCase(("1"))) {
                                        sex = "m";
                                    } else if (csvRecord.get("sex_id").equalsIgnoreCase("2")) {
                                        sex = "f";
                                    }
                                }
                                if (csvRecord.isMapped("cause_name")) {
                                    cause = csvRecord.get("cause_name");
                                }
                                if (csvRecord.isMapped("mean")) {
                                    number = csvRecord.get("mean");
                                }
                                if (csvRecord.isMapped("metric") && csvRecord.isMapped("unit")) {
                                    metric = csvRecord.get("metric") + "-" + csvRecord.get("unit");
                                }

                                if (location != null && year != null && sex != null && cause != null
                                        && number != null && metric != null) {
                                    try {
                                        sqlM.addSanitaryIssueToCountry(location, year, sex, cause, metric,
                                                number);
                                    } catch (SQLException ex) {

                                        Logger.getLogger(CSVsGBDdata.class.getName()).log(Level.SEVERE, null,
                                                ex);
                                    }
                                }

                            }

                        }

                    }

                    parser.close();

                    stream.close();
                    br.close();
                }
                zipFile.close();
            }
        }
    } else {
        System.out.println("Not a directory");
    }
}

From source file:org.fcrepo.server.security.xacml.pdp.data.FedoraPolicyStore.java

@Override
public void init() throws PolicyStoreException, FileNotFoundException {
    if (log.isDebugEnabled()) {
        Runtime runtime = Runtime.getRuntime();
        log.debug("Total memory: " + runtime.totalMemory() / 1024);
        log.debug("Free memory: " + runtime.freeMemory() / 1024);
        log.debug("Max memory: " + runtime.maxMemory() / 1024);
    }/*w w w  . j  av a 2  s . c om*/
    super.init();
    // if no pid namespace was specified, use the default specified in fedora.fcfg
    if (pidNamespace.equals("")) {
        pidNamespace = fedoraServer.getModule("org.fcrepo.server.storage.DOManager")
                .getParameter("pidNamespace");
    }

    // check control group was supplied
    if (datastreamControlGroup.equals("")) {
        throw new PolicyStoreException(
                "No control group for policy datastreams was specified in FedoraPolicyStore configuration");
    }
    if (validateSchema) {
        String schemaLocation = schemaLocations.get(XACML20_POLICY_NS);
        if (schemaLocation == null) {
            throw new PolicyStoreException("Configuration error - no policy schema specified");
        }
        try {
            String serverHome = fedoraServer.getHomeDir().getCanonicalPath() + File.separator;

            String schemaPath = ((schemaLocation).startsWith(File.separator) ? "" : serverHome)
                    + schemaLocation;
            FileInputStream in = new FileInputStream(schemaPath);
            PolicyParser policyParser = new PolicyParser(in);
            ValidationUtility.setFeslPolicyParser(policyParser);
        } catch (IOException ioe) {
            throw new PolicyStoreException(ioe.getMessage(), ioe);
        } catch (SAXException se) {
            throw new PolicyStoreException(se.getMessage(), se);
        }
    }
}

From source file:org.apache.ignite.testframework.junits.IgniteConfigVariationsAbstractTest.java

/**
 * Prints memory usage.// w ww. ja  va 2 s .  co  m
 */
private void memoryUsage() {
    int mb = 1024 * 1024;

    Runtime runtime = Runtime.getRuntime();

    info("##### Heap utilization statistics [MB] #####");
    info("Used Memory  (mb): " + (runtime.totalMemory() - runtime.freeMemory()) / mb);
    info("Free Memory  (mb): " + runtime.freeMemory() / mb);
    info("Total Memory (mb): " + runtime.totalMemory() / mb);
    info("Max Memory   (mb): " + runtime.maxMemory() / mb);
}

From source file:com.atlassian.jira.startup.JiraSystemInfo.java

/**
 * This only gets the most basic environment information to avoid bring up the JIRA world before the raw database
 * checks are done.//from  w  ww.ja v a 2 s  . com
 * <p/>
 * It MUST BE CAREFUL not to access an JIRA code that will bring up the world
 *
 * @param context - a ServletContext that the app is running in.  This may be nulll
 */
public void obtainBasicInfo(final ServletContext context) {
    final SystemInfoUtils systemInfoUtils = new SystemInfoUtilsImpl();
    final ReleaseInfo releaseInfo = ReleaseInfo.getReleaseInfo(ReleaseInfo.class);

    logMsg.outputHeader("Environment");

    logMsg.outputProperty("JIRA Build", buildUtilsInfo.getBuildInformation());
    logMsg.outputProperty("Build Date", String.valueOf(buildUtilsInfo.getCurrentBuildDate()));
    logMsg.outputProperty("JIRA Installation Type", releaseInfo.getInfo());

    if (context != null) {
        logMsg.outputProperty("Application Server", context.getServerInfo() + " - Servlet API "
                + context.getMajorVersion() + "." + context.getMinorVersion());
    }
    logMsg.outputProperty("Java Version", jiraSystemProperties.getProperty("java.version", STRANGELY_UNKNOWN)
            + " - " + jiraSystemProperties.getProperty("java.vendor", STRANGELY_UNKNOWN));
    logMsg.outputProperty("Current Working Directory",
            jiraSystemProperties.getProperty("user.dir", STRANGELY_UNKNOWN));

    final Runtime rt = Runtime.getRuntime();
    final long maxMemory = rt.maxMemory() / MEGABYTE;
    final long totalMemory = rt.totalMemory() / MEGABYTE;
    final long freeMemory = rt.freeMemory() / MEGABYTE;
    final long usedMemory = totalMemory - freeMemory;

    logMsg.outputProperty("Maximum Allowable Memory", maxMemory + "MB");
    logMsg.outputProperty("Total Memory", totalMemory + "MB");
    logMsg.outputProperty("Free Memory", freeMemory + "MB");
    logMsg.outputProperty("Used Memory", usedMemory + "MB");

    for (final MemoryInformation memory : systemInfoUtils.getMemoryPoolInformation()) {
        logMsg.outputProperty("Memory Pool: " + memory.getName(), memory.toString());
    }
    logMsg.outputProperty("JVM Input Arguments", systemInfoUtils.getJvmInputArguments());

    // do we have any patches
    Set<AppliedPatchInfo> appliedPatches = AppliedPatches.getAppliedPatches();
    if (appliedPatches.size() > 0) {
        logMsg.outputHeader("Applied Patches");
        for (AppliedPatchInfo appliedPatch : appliedPatches) {
            logMsg.outputProperty(appliedPatch.getIssueKey(), appliedPatch.getDescription());
        }
    }
    logMsg.outputProperty("Java Compatibility Information", "JIRA version = " + buildUtilsInfo.getVersion()
            + ", Java Version = " + jiraSystemProperties.getProperty("java.version", STRANGELY_UNKNOWN));
}

From source file:org.apache.hadoop.hdfs.server.namenode.Namenode2AgentServiceImpl.java

@Override
public Map getNamenodeInfo() {
    Map map = new HashMap<>();
    NameNode namenode = Namenode2Agent.namenode;
    Configuration configuration = Namenode2Agent.configuration;

    map.put("hostName", namenode.getAddress(configuration).getHostName());
    map.put("port", namenode.getAddress(configuration).getPort());

    // Block//www.  j  a va2  s  .  c  om
    map.put("blocksTotal", namenode.getNamesystem().getBlocksTotal());
    map.put("corruptReplicatedBlocks", namenode.getNamesystem().getCorruptReplicaBlocks());
    map.put("pendingReplicationBlocks", namenode.getNamesystem().getPendingReplicationBlocks());
    map.put("scheduledReplicationBlocks", namenode.getNamesystem().getScheduledReplicationBlocks());
    map.put("underReplicatedBlocks", namenode.getNamesystem().getUnderReplicatedBlocks());
    map.put("missingBlocks", namenode.getNamesystem().getNumberOfMissingBlocks());
    map.put("blockCapacity", namenode.getNamesystem().getBlockCapacity());

    // Node Status
    map.put("all", namenode.getNamesystem().getNumberOfDatanodes(HdfsConstants.DatanodeReportType.ALL));
    map.put("dead", namenode.getNamesystem().getNumberOfDatanodes(HdfsConstants.DatanodeReportType.DEAD));
    map.put("live", namenode.getNamesystem().getNumberOfDatanodes(HdfsConstants.DatanodeReportType.LIVE));
    map.put("decommissioning",
            namenode.getNamesystem().getNumberOfDatanodes(HdfsConstants.DatanodeReportType.DECOMMISSIONING));
    map.put("stale", namenode.getNamesystem().getNumStaleDataNodes());

    // FSNamesystem
    //map.put("defaultBlockSize", namenode.getNamesystem().getDefaultBlockSize());
    map.put("defaultBlockSize", configuration.get("dfs.blocksize"));
    map.put("totalFiles", namenode.getNamesystem().getTotalFiles());
    map.put("totalBlocks", namenode.getNamesystem().getTotalBlocks());
    map.put("totalLoad", namenode.getNamesystem().getTotalLoad());

    // DFS Capacity
    map.put("capacityRemaining", namenode.getNamesystem().getCapacityRemainingGB());
    map.put("capacityRemainingPercent", Math.round(100 / namenode.getNamesystem().getCapacityTotal()
            * namenode.getNamesystem().getCapacityRemaining()));
    map.put("capacityTotal", namenode.getNamesystem().getCapacityTotalGB());
    map.put("capacityUsed", namenode.getNamesystem().getCapacityUsedGB());
    map.put("capacityUsedNonDFS", namenode.getNamesystem().getCapacityUsedNonDFS());
    map.put("capacityUsedPercent", Math.round(100 / namenode.getNamesystem().getCapacityTotal()
            * namenode.getNamesystem().getCapacityUsedNonDFS()));

    // DFS Usage
    map.put("free", namenode.getNamesystem().getFree());
    map.put("used", namenode.getNamesystem().getUsed());
    map.put("total", namenode.getNamesystem().getTotal());
    map.put("threads", namenode.getNamesystem().getThreads());
    map.put("startTime", namenode.getNamesystem().getStartTime());

    // JVM Heap Size
    final Runtime rt = Runtime.getRuntime();
    final long totalMemory = rt.totalMemory() / MEGA_BYTES;
    final long freeMemory = rt.freeMemory() / MEGA_BYTES;
    map.put("jvmMaxMemory", rt.maxMemory() / MEGA_BYTES);
    map.put("jvmTotalMemory", rt.totalMemory() / MEGA_BYTES);
    map.put("jvmFreeMemory", rt.freeMemory() / MEGA_BYTES);
    map.put("jvmUsedMemory", totalMemory - freeMemory);
    return map;
}

From source file:org.apache.jackrabbit.oak.plugins.segment.PartialCompactionMapTest.java

@Test
public void benchLargeMap() {
    assumeTrue(Boolean.getBoolean("benchmark.benchLargeMap"));
    assertHeapSize(4000000000L);/*from  w  w w  .  java 2s. c  o  m*/

    map = createCompactionMap();

    // check the memory use of really large mappings, 1M compacted segments with 10 records each.
    Runtime runtime = Runtime.getRuntime();
    for (int i = 0; i < 1000; i++) {
        Map<RecordId, RecordId> ids = randomRecordIdMap(rnd, getTracker(), 10000, 100);
        long start = System.nanoTime();
        for (Entry<RecordId, RecordId> entry : ids.entrySet()) {
            map.put(entry.getKey(), entry.getValue());
        }
        log.info("Bench Large Map #" + (i + 1) + ": "
                + (runtime.totalMemory() - runtime.freeMemory()) / (1024 * 1024) + "MB, "
                + (System.nanoTime() - start) / 1000000 + "ms");
    }
}

From source file:org.apache.hadoop.hdfs.server.namenode.Namenode2AgentServiceImpl.java

@Override
public Map<String, Long> getJVMHeap() {
    final Runtime rt = Runtime.getRuntime();
    final long maxMemory = rt.maxMemory() / MEGA_BYTES;
    final long totalMemory = rt.totalMemory() / MEGA_BYTES;
    final long freeMemory = rt.freeMemory() / MEGA_BYTES;
    final long usedMemory = totalMemory - freeMemory;

    Map<String, Long> result = new HashMap<>();
    result.put("Max Memory", maxMemory);
    result.put("Total Memory", totalMemory);
    result.put("Free Memory", freeMemory);
    result.put("Used Memory", usedMemory);
    return result;
}