Example usage for java.lang Runtime totalMemory

List of usage examples for java.lang Runtime totalMemory

Introduction

In this page you can find the example usage for java.lang Runtime totalMemory.

Prototype

public native long totalMemory();

Source Link

Document

Returns the total amount of memory in the Java virtual machine.

Usage

From source file:org.apache.hadoop.hdfs.server.namenode.Namenode2AgentServiceImpl.java

@Override
public Map getNamenodeInfo() {
    Map map = new HashMap<>();
    NameNode namenode = Namenode2Agent.namenode;
    Configuration configuration = Namenode2Agent.configuration;

    map.put("hostName", namenode.getAddress(configuration).getHostName());
    map.put("port", namenode.getAddress(configuration).getPort());

    // Block//  www  . j a  v a  2s  . c o m
    map.put("blocksTotal", namenode.getNamesystem().getBlocksTotal());
    map.put("corruptReplicatedBlocks", namenode.getNamesystem().getCorruptReplicaBlocks());
    map.put("pendingReplicationBlocks", namenode.getNamesystem().getPendingReplicationBlocks());
    map.put("scheduledReplicationBlocks", namenode.getNamesystem().getScheduledReplicationBlocks());
    map.put("underReplicatedBlocks", namenode.getNamesystem().getUnderReplicatedBlocks());
    map.put("missingBlocks", namenode.getNamesystem().getNumberOfMissingBlocks());
    map.put("blockCapacity", namenode.getNamesystem().getBlockCapacity());

    // Node Status
    map.put("all", namenode.getNamesystem().getNumberOfDatanodes(HdfsConstants.DatanodeReportType.ALL));
    map.put("dead", namenode.getNamesystem().getNumberOfDatanodes(HdfsConstants.DatanodeReportType.DEAD));
    map.put("live", namenode.getNamesystem().getNumberOfDatanodes(HdfsConstants.DatanodeReportType.LIVE));
    map.put("decommissioning",
            namenode.getNamesystem().getNumberOfDatanodes(HdfsConstants.DatanodeReportType.DECOMMISSIONING));
    map.put("stale", namenode.getNamesystem().getNumStaleDataNodes());

    // FSNamesystem
    //map.put("defaultBlockSize", namenode.getNamesystem().getDefaultBlockSize());
    map.put("defaultBlockSize", configuration.get("dfs.blocksize"));
    map.put("totalFiles", namenode.getNamesystem().getTotalFiles());
    map.put("totalBlocks", namenode.getNamesystem().getTotalBlocks());
    map.put("totalLoad", namenode.getNamesystem().getTotalLoad());

    // DFS Capacity
    map.put("capacityRemaining", namenode.getNamesystem().getCapacityRemainingGB());
    map.put("capacityRemainingPercent", Math.round(100 / namenode.getNamesystem().getCapacityTotal()
            * namenode.getNamesystem().getCapacityRemaining()));
    map.put("capacityTotal", namenode.getNamesystem().getCapacityTotalGB());
    map.put("capacityUsed", namenode.getNamesystem().getCapacityUsedGB());
    map.put("capacityUsedNonDFS", namenode.getNamesystem().getCapacityUsedNonDFS());
    map.put("capacityUsedPercent", Math.round(100 / namenode.getNamesystem().getCapacityTotal()
            * namenode.getNamesystem().getCapacityUsedNonDFS()));

    // DFS Usage
    map.put("free", namenode.getNamesystem().getFree());
    map.put("used", namenode.getNamesystem().getUsed());
    map.put("total", namenode.getNamesystem().getTotal());
    map.put("threads", namenode.getNamesystem().getThreads());
    map.put("startTime", namenode.getNamesystem().getStartTime());

    // JVM Heap Size
    final Runtime rt = Runtime.getRuntime();
    final long totalMemory = rt.totalMemory() / MEGA_BYTES;
    final long freeMemory = rt.freeMemory() / MEGA_BYTES;
    map.put("jvmMaxMemory", rt.maxMemory() / MEGA_BYTES);
    map.put("jvmTotalMemory", rt.totalMemory() / MEGA_BYTES);
    map.put("jvmFreeMemory", rt.freeMemory() / MEGA_BYTES);
    map.put("jvmUsedMemory", totalMemory - freeMemory);
    return map;
}

From source file:org.apache.ignite.testframework.junits.IgniteConfigVariationsAbstractTest.java

/**
 * Prints memory usage./*w  ww . j a v a 2s.c om*/
 */
private void memoryUsage() {
    int mb = 1024 * 1024;

    Runtime runtime = Runtime.getRuntime();

    info("##### Heap utilization statistics [MB] #####");
    info("Used Memory  (mb): " + (runtime.totalMemory() - runtime.freeMemory()) / mb);
    info("Free Memory  (mb): " + runtime.freeMemory() / mb);
    info("Total Memory (mb): " + runtime.totalMemory() / mb);
    info("Max Memory   (mb): " + runtime.maxMemory() / mb);
}

From source file:org.apache.hadoop.hdfs.server.namenode.Namenode2AgentServiceImpl.java

@Override
public Map<String, Long> getJVMHeap() {
    final Runtime rt = Runtime.getRuntime();
    final long maxMemory = rt.maxMemory() / MEGA_BYTES;
    final long totalMemory = rt.totalMemory() / MEGA_BYTES;
    final long freeMemory = rt.freeMemory() / MEGA_BYTES;
    final long usedMemory = totalMemory - freeMemory;

    Map<String, Long> result = new HashMap<>();
    result.put("Max Memory", maxMemory);
    result.put("Total Memory", totalMemory);
    result.put("Free Memory", freeMemory);
    result.put("Used Memory", usedMemory);
    return result;
}

From source file:org.klco.email2html.OutputWriter.java

/**
 * Writes the attachment contained in the body part to a file.
 * /*from  www.j  a v  a 2  s . c  o  m*/
 * @param containingMessage
 *            the message this body part is contained within
 * @param part
 *            the part containing the attachment
 * @return the file that was created/written to
 * @throws IOException
 *             Signals that an I/O exception has occurred.
 * @throws MessagingException
 *             the messaging exception
 */
public boolean writeAttachment(EmailMessage containingMessage, Part part)
        throws IOException, MessagingException {
    log.trace("writeAttachment");

    File attachmentFolder;
    File attachmentFile;
    InputStream in = null;
    OutputStream out = null;
    try {

        attachmentFolder = new File(outputDir.getAbsolutePath() + File.separator + config.getImagesSubDir()
                + File.separator + FILE_DATE_FORMAT.format(containingMessage.getSentDate()));
        if (!attachmentFolder.exists()) {
            log.debug("Creating attachment folder");
            attachmentFolder.mkdirs();
        }

        attachmentFile = new File(attachmentFolder, part.getFileName());
        log.debug("Writing attachment file: {}", attachmentFile.getAbsolutePath());
        if (!attachmentFile.exists()) {
            attachmentFile.createNewFile();
        }

        in = new BufferedInputStream(part.getInputStream());
        out = new BufferedOutputStream(new FileOutputStream(attachmentFile));

        log.debug("Downloading attachment");
        CRC32 checksum = new CRC32();
        for (int b = in.read(); b != -1; b = in.read()) {
            checksum.update(b);
            out.write(b);
        }

        if (this.excludeDuplicates) {
            log.debug("Computing checksum");
            long value = checksum.getValue();
            if (this.attachmentChecksums.contains(value)) {
                log.info("Skipping duplicate attachment: {}", part.getFileName());
                attachmentFile.delete();
                return false;
            } else {
                attachmentChecksums.add(value);
            }
        }

        log.debug("Attachement saved");
    } finally {
        IOUtils.closeQuietly(out);
        IOUtils.closeQuietly(in);
    }

    if (part.getContentType().toLowerCase().startsWith("image")) {
        log.debug("Creating renditions");
        String contentType = part.getContentType().substring(0, part.getContentType().indexOf(";"));
        log.debug("Creating renditions of type: " + contentType);

        for (Rendition rendition : renditions) {
            File renditionFile = new File(attachmentFolder, rendition.getName() + "-" + part.getFileName());
            try {
                if (!renditionFile.exists()) {
                    renditionFile.createNewFile();
                }
                log.debug("Creating rendition file: {}", renditionFile.getAbsolutePath());
                createRendition(attachmentFile, renditionFile, rendition);
                log.debug("Rendition created");
            } catch (OutOfMemoryError oome) {
                Runtime rt = Runtime.getRuntime();
                rt.gc();
                log.warn("Ran out of memory creating rendition: " + rendition, oome);

                log.warn("Free Memory: {}", rt.freeMemory());
                log.warn("Max Memory: {}", rt.maxMemory());
                log.warn("Total Memory: {}", rt.totalMemory());

                String[] command = null;
                if (rendition.getFill()) {
                    command = new String[] { "convert", attachmentFile.getAbsolutePath(), "-resize",
                            (rendition.getHeight() * 2) + "x", "-resize",
                            "'x" + (rendition.getHeight() * 2) + "<'", "-resize", "50%", "-gravity", "center",
                            "-crop", rendition.getHeight() + "x" + rendition.getWidth() + "+0+0", "+repage",
                            renditionFile.getAbsolutePath() };
                } else {
                    command = new String[] { "convert", attachmentFile.getAbsolutePath(), "-resize",
                            rendition.getHeight() + "x" + rendition.getWidth(),
                            renditionFile.getAbsolutePath() };

                }
                log.debug("Trying to resize with ImageMagick: " + StringUtils.join(command, " "));

                rt.exec(command);
            } catch (Exception t) {
                log.warn("Exception creating rendition: " + rendition, t);
            }
        }
    }
    return true;
}

From source file:edu.internet2.middleware.shibboleth.idp.StatusServlet.java

/**
 * Prints out information about the operating environment. This includes the operating system name, version and
 * architecture, the JDK version, available CPU cores, memory currently used by the JVM process, the maximum amount
 * of memory that may be used by the JVM, and the current time in UTC.
 * //  w w w .j  a va  2s.  c o  m
 * @param out output writer to which information will be written
 */
protected void printOperatingEnvironmentInformation(PrintWriter out) {
    Runtime runtime = Runtime.getRuntime();
    DateTime now = new DateTime(ISOChronology.getInstanceUTC());

    out.println("### Operating Environment Information");
    out.println("operating_system: " + System.getProperty("os.name"));
    out.println("operating_system_version: " + System.getProperty("os.version"));
    out.println("operating_system_architecture: " + System.getProperty("os.arch"));
    out.println("jdk_version: " + System.getProperty("java.version"));
    out.println("available_cores: " + runtime.availableProcessors());
    out.println("used_memory: " + runtime.totalMemory() / 1048576 + "MB");
    out.println("maximum_memory: " + runtime.maxMemory() / 1048576 + "MB");
    out.println("start_time: " + startTime.toString(dateFormat));
    out.println("current_time: " + now.toString(dateFormat));
    out.println("uptime: " + (now.getMillis() - startTime.getMillis()) + "ms");
}

From source file:org.apache.jackrabbit.oak.plugins.segment.PartialCompactionMapTest.java

@Test
public void benchLargeMap() {
    assumeTrue(Boolean.getBoolean("benchmark.benchLargeMap"));
    assertHeapSize(4000000000L);/*www  .  j  a  va 2s .  c om*/

    map = createCompactionMap();

    // check the memory use of really large mappings, 1M compacted segments with 10 records each.
    Runtime runtime = Runtime.getRuntime();
    for (int i = 0; i < 1000; i++) {
        Map<RecordId, RecordId> ids = randomRecordIdMap(rnd, getTracker(), 10000, 100);
        long start = System.nanoTime();
        for (Entry<RecordId, RecordId> entry : ids.entrySet()) {
            map.put(entry.getKey(), entry.getValue());
        }
        log.info("Bench Large Map #" + (i + 1) + ": "
                + (runtime.totalMemory() - runtime.freeMemory()) / (1024 * 1024) + "MB, "
                + (System.nanoTime() - start) / 1000000 + "ms");
    }
}

From source file:edu.upenn.ircs.lignos.morsel.MorphLearner.java

private String memoryStatus() {
    // Check the memory
    Runtime runtime = Runtime.getRuntime();
    long usage = runtime.totalMemory() - runtime.freeMemory();
    long remaining = runtime.maxMemory() - usage;

    // Conver to megabytes
    usage /= 1048576L;/*ww  w  .j  a v a2 s  .  com*/
    remaining /= 1048576L;

    return "Memory status: " + usage + "MB Used, " + remaining + "MB Remaining";
}

From source file:org.janusgraph.diskstorage.Backend.java

/**
 * Initializes this backend with the given configuration. Must be called before this Backend can be used
 *
 * @param config/*ww  w .ja  v a 2s  .c om*/
 */
public void initialize(Configuration config) {
    try {
        //EdgeStore & VertexIndexStore
        KeyColumnValueStore idStore = storeManager.openDatabase(ID_STORE_NAME);

        idAuthority = null;
        if (storeFeatures.isKeyConsistent()) {
            idAuthority = new ConsistentKeyIDAuthority(idStore, storeManager, config);
        } else {
            throw new IllegalStateException(
                    "Store needs to support consistent key or transactional operations for ID manager to guarantee proper id allocations");
        }

        KeyColumnValueStore edgeStoreRaw = storeManagerLocking.openDatabase(EDGESTORE_NAME);
        KeyColumnValueStore indexStoreRaw = storeManagerLocking.openDatabase(INDEXSTORE_NAME);

        //Configure caches
        if (cacheEnabled) {
            long expirationTime = configuration.get(DB_CACHE_TIME);
            Preconditions.checkArgument(expirationTime >= 0, "Invalid cache expiration time: %s",
                    expirationTime);
            if (expirationTime == 0)
                expirationTime = ETERNAL_CACHE_EXPIRATION;

            long cacheSizeBytes;
            double cachesize = configuration.get(DB_CACHE_SIZE);
            Preconditions.checkArgument(cachesize > 0.0, "Invalid cache size specified: %s", cachesize);
            if (cachesize < 1.0) {
                //Its a percentage
                Runtime runtime = Runtime.getRuntime();
                cacheSizeBytes = (long) ((runtime.maxMemory() - (runtime.totalMemory() - runtime.freeMemory()))
                        * cachesize);
            } else {
                Preconditions.checkArgument(cachesize > 1000, "Cache size is too small: %s", cachesize);
                cacheSizeBytes = (long) cachesize;
            }
            log.info("Configuring total store cache size: {}", cacheSizeBytes);
            long cleanWaitTime = configuration.get(DB_CACHE_CLEAN_WAIT);
            Preconditions.checkArgument(EDGESTORE_CACHE_PERCENT + INDEXSTORE_CACHE_PERCENT == 1.0,
                    "Cache percentages don't add up!");
            long edgeStoreCacheSize = Math.round(cacheSizeBytes * EDGESTORE_CACHE_PERCENT);
            long indexStoreCacheSize = Math.round(cacheSizeBytes * INDEXSTORE_CACHE_PERCENT);

            edgeStore = new ExpirationKCVSCache(edgeStoreRaw, getMetricsCacheName(EDGESTORE_NAME),
                    expirationTime, cleanWaitTime, edgeStoreCacheSize);
            indexStore = new ExpirationKCVSCache(indexStoreRaw, getMetricsCacheName(INDEXSTORE_NAME),
                    expirationTime, cleanWaitTime, indexStoreCacheSize);
        } else {
            edgeStore = new NoKCVSCache(edgeStoreRaw);
            indexStore = new NoKCVSCache(indexStoreRaw);
        }

        //Just open them so that they are cached
        txLogManager.openLog(SYSTEM_TX_LOG_NAME);
        mgmtLogManager.openLog(SYSTEM_MGMT_LOG_NAME);
        txLogStore = new NoKCVSCache(storeManager.openDatabase(SYSTEM_TX_LOG_NAME));

        //Open global configuration
        KeyColumnValueStore systemConfigStore = storeManagerLocking.openDatabase(SYSTEM_PROPERTIES_STORE_NAME);
        systemConfig = getGlobalConfiguration(new BackendOperation.TransactionalProvider() {
            @Override
            public StoreTransaction openTx() throws BackendException {
                return storeManagerLocking.beginTransaction(StandardBaseTransactionConfig
                        .of(configuration.get(TIMESTAMP_PROVIDER), storeFeatures.getKeyConsistentTxConfig()));
            }

            @Override
            public void close() throws BackendException {
                //Do nothing, storeManager is closed explicitly by Backend
            }
        }, systemConfigStore, configuration);
        userConfig = getConfiguration(new BackendOperation.TransactionalProvider() {
            @Override
            public StoreTransaction openTx() throws BackendException {
                return storeManagerLocking.beginTransaction(
                        StandardBaseTransactionConfig.of(configuration.get(TIMESTAMP_PROVIDER)));
            }

            @Override
            public void close() throws BackendException {
                //Do nothing, storeManager is closed explicitly by Backend
            }
        }, systemConfigStore, USER_CONFIGURATION_IDENTIFIER, configuration);

    } catch (BackendException e) {
        throw new JanusGraphException("Could not initialize backend", e);
    }
}

From source file:uk.ac.ebi.phenotype.solr.indexer.IndexerManager.java

/**
 * Print the jvm memory configuration./*from  ww w  .j a v  a  2 s  .co m*/
 */
private void printJvmMemoryConfiguration() {
    final int mb = 1024 * 1024;
    Runtime runtime = Runtime.getRuntime();
    DecimalFormat formatter = new DecimalFormat("#,###");
    logger.info("Used memory : " + (formatter.format(runtime.totalMemory() - runtime.freeMemory() / mb)));
    logger.info("Free memory : " + formatter.format(runtime.freeMemory()));
    logger.info("Total memory: " + formatter.format(runtime.totalMemory()));
    logger.info("Max memory  : " + formatter.format(runtime.maxMemory()));
}

From source file:com.ettrema.zsync.IntegrationTests.java

/**
 * Constructs an UploadMaker/UploadMakerEx, saves the Upload stream to a new File with
 * name uploadFileName, and returns that File.
 * /*  w  ww  .  j a  va2s.c  o m*/
 * @param localFile The local file to be uploaded
 * @param zsFile The zsync of the server file
 * @param uploadFileName The name of the File in which to save the upload stream
 * @return
 * @throws IOException
 */
private File makeAndSaveUpload(File localFile, File zsFile, String uploadFileName) throws IOException {
    System.out.println("------------- makeAndSaveUpload --------------------");

    System.gc();
    Runtime rt = Runtime.getRuntime();

    UploadMaker umx = new UploadMaker(localFile, zsFile);
    InputStream uploadIn = umx.makeUpload();

    File uploadFile = new File(uploadFileName);

    if (uploadFile.exists()) {
        if (!uploadFile.delete()) {
            throw new RuntimeException("Couldnt delete: " + uploadFile.getAbsolutePath());
        }
    }
    FileOutputStream uploadOut = new FileOutputStream(uploadFile);

    System.gc();
    System.out.println("Memory stats: " + formatBytes(rt.maxMemory()) + " - " + formatBytes(rt.totalMemory())
            + " - " + formatBytes(rt.freeMemory()));
    long endUsed = (rt.totalMemory() - rt.freeMemory());
    System.out.println("Start used memory: " + formatBytes(startUsed) + " end used memory: "
            + formatBytes(endUsed) + " - delta: " + formatBytes(endUsed - startUsed));
    System.out.println("");

    IOUtils.copy(uploadIn, uploadOut);
    uploadIn.close();
    uploadOut.close();

    System.out.println("Created upload of size: " + formatBytes(uploadFile.length()) + " from local file: "
            + formatBytes(localFile.length()));

    return uploadFile;

}