Example usage for com.google.common.cache CacheStats hitRate

List of usage examples for com.google.common.cache CacheStats hitRate

Introduction

In this page you can find the example usage for com.google.common.cache CacheStats hitRate.

Prototype

public double hitRate() 

Source Link

Document

Returns the ratio of cache requests which were hits.

Usage

From source file:com.google.devtools.build.lib.runtime.CacheFileDigestsModule.java

/**
 * Adds a line to the log with cache statistics.
 *
 * @param message message to prefix to the written line
 * @param stats the cache statistics to be logged
 *///from   w  w w  .jav  a  2s  .c  om
private static void logStats(String message, CacheStats stats) {
    log.info(message + ": hit count=" + stats.hitCount() + ", miss count=" + stats.missCount() + ", hit rate="
            + stats.hitRate() + ", eviction count=" + stats.evictionCount());
}

From source file:org.mapsforge.map.writer.CB_MapFileWriter.java

/**
 * Writes the map file according to the given configuration using the given data processor.
 * /*from  w  w w  . j  a  v a 2s. c  o m*/
 * @param configuration
 *            the configuration
 * @param dataProcessor
 *            the data processor
 * @throws IOException
 *             thrown if any IO error occurs
 */
public static void writeFile(CB_MapWriterConfiguration configuration, CB_TileBasedDataProcessor dataProcessor)
        throws IOException {
    RandomAccessFile randomAccessFile = new RandomAccessFile(configuration.getOutputFile(), "rw");

    int amountOfZoomIntervals = dataProcessor.getZoomIntervalConfiguration().getNumberOfZoomIntervals();
    ByteBuffer containerHeaderBuffer = ByteBuffer.allocate(HEADER_BUFFER_SIZE);
    // CONTAINER HEADER
    int totalHeaderSize = writeHeaderBuffer(configuration, dataProcessor, containerHeaderBuffer);

    // set to mark where zoomIntervalConfig starts
    containerHeaderBuffer.reset();

    final LoadingCache<CB_TDWay, Geometry> jtsGeometryCache = CacheBuilder.newBuilder()
            .maximumSize(JTS_GEOMETRY_CACHE_SIZE)
            .concurrencyLevel(Runtime.getRuntime().availableProcessors() * 2)
            .build(new JTSGeometryCacheLoader(dataProcessor));

    // SUB FILES
    // for each zoom interval write a sub file
    long currentFileSize = totalHeaderSize;
    for (int i = 0; i < amountOfZoomIntervals; i++) {
        // SUB FILE INDEX AND DATA
        long subfileSize = writeSubfile(currentFileSize, i, dataProcessor, jtsGeometryCache, randomAccessFile,
                configuration);
        // SUB FILE META DATA IN CONTAINER HEADER
        writeSubfileMetaDataToContainerHeader(dataProcessor.getZoomIntervalConfiguration(), i, currentFileSize,
                subfileSize, containerHeaderBuffer);
        currentFileSize += subfileSize;
    }

    randomAccessFile.seek(0);
    randomAccessFile.write(containerHeaderBuffer.array(), 0, totalHeaderSize);

    // WRITE FILE SIZE TO HEADER
    long fileSize = randomAccessFile.length();
    randomAccessFile.seek(OFFSET_FILE_SIZE);
    randomAccessFile.writeLong(fileSize);

    randomAccessFile.close();

    CacheStats stats = jtsGeometryCache.stats();
    LOGGER.info("JTS Geometry cache hit rate: " + stats.hitRate());
    LOGGER.info("JTS Geometry total load time: " + stats.totalLoadTime() / 1000);

    LOGGER.info("Finished writing file.");
}

From source file:org.springframework.boot.actuate.cache.GuavaCacheStatisticsProvider.java

@Override
public CacheStatistics getCacheStatistics(CacheManager cacheManager, GuavaCache cache) {
    DefaultCacheStatistics statistics = new DefaultCacheStatistics();
    statistics.setSize(cache.getNativeCache().size());
    CacheStats guavaStats = cache.getNativeCache().stats();
    if (guavaStats.requestCount() > 0) {
        statistics.setHitRatio(guavaStats.hitRate());
        statistics.setMissRatio(guavaStats.missRate());
    }/* ww  w  . j a va 2 s .com*/
    return statistics;
}

From source file:com.google.gerrit.server.cache.CacheMetrics.java

@Inject
public CacheMetrics(MetricMaker metrics, DynamicMap<Cache<?, ?>> cacheMap) {
    Field<String> F_NAME = Field.ofString("cache_name");

    CallbackMetric1<String, Long> memEnt = metrics.newCallbackMetric("caches/memory_cached", Long.class,
            new Description("Memory entries").setGauge().setUnit("entries"), F_NAME);
    CallbackMetric1<String, Double> memHit = metrics.newCallbackMetric("caches/memory_hit_ratio", Double.class,
            new Description("Memory hit ratio").setGauge().setUnit("percent"), F_NAME);
    CallbackMetric1<String, Long> memEvict = metrics.newCallbackMetric("caches/memory_eviction_count",
            Long.class, new Description("Memory eviction count").setGauge().setUnit("evicted entries"), F_NAME);
    CallbackMetric1<String, Long> perDiskEnt = metrics.newCallbackMetric("caches/disk_cached", Long.class,
            new Description("Disk entries used by persistent cache").setGauge().setUnit("entries"), F_NAME);
    CallbackMetric1<String, Double> perDiskHit = metrics.newCallbackMetric("caches/disk_hit_ratio",
            Double.class, new Description("Disk hit ratio for persistent cache").setGauge().setUnit("percent"),
            F_NAME);/*from w  ww.  ja  va  2s.  c  o m*/

    Set<CallbackMetric<?>> cacheMetrics = ImmutableSet.<CallbackMetric<?>>of(memEnt, memHit, memEvict,
            perDiskEnt, perDiskHit);

    metrics.newTrigger(cacheMetrics, () -> {
        for (DynamicMap.Entry<Cache<?, ?>> e : cacheMap) {
            Cache<?, ?> c = e.getProvider().get();
            String name = metricNameOf(e);
            CacheStats cstats = c.stats();
            memEnt.set(name, c.size());
            memHit.set(name, cstats.hitRate() * 100);
            memEvict.set(name, cstats.evictionCount());
            if (c instanceof PersistentCache) {
                PersistentCache.DiskStats d = ((PersistentCache) c).diskStats();
                perDiskEnt.set(name, d.size());
                perDiskHit.set(name, hitRatio(d));
            }
        }
        cacheMetrics.forEach(CallbackMetric::prune);
    });
}

From source file:com.facebook.buck.rules.keys.EventPostingRuleKeyCacheScope.java

@Override
public final void close() {
    try (SimplePerfEvent.Scope scope = SimplePerfEvent.scope(buckEventBus,
            PerfEventId.of("rule_key_cache_cleanup"))) {

        // Log stats.
        CacheStats stats = cache.getStats().minus(startStats);
        buckEventBus.post(RuleKeyCacheStatsEvent.create(stats));
        scope.update("hitRate", stats.hitRate());
        scope.update("hits", stats.hitCount());
        scope.update("misses", stats.missCount());
        scope.update("requests", stats.requestCount());
        scope.update("load_time_ns", stats.totalLoadTime());

        // Run additional cleanup.
        cleanup(scope);//from   w  ww  .ja  v  a2s. co  m
    }
}

From source file:org.mapsforge.map.writer.MapFileWriter.java

/**
 * Writes the map file according to the given configuration using the given data processor.
 *
 * @param configuration the configuration
 * @param dataProcessor the data processor
 * @throws IOException thrown if any IO error occurs
 *//*from   www .  j a  va 2  s .  c  o  m*/
public static void writeFile(MapWriterConfiguration configuration, TileBasedDataProcessor dataProcessor)
        throws IOException {
    EXECUTOR_SERVICE = Executors.newFixedThreadPool(configuration.getThreads());
    RandomAccessFile randomAccessFile = new RandomAccessFile(configuration.getOutputFile(), "rw");

    int amountOfZoomIntervals = dataProcessor.getZoomIntervalConfiguration().getNumberOfZoomIntervals();
    ByteBuffer containerHeaderBuffer = ByteBuffer.allocate(HEADER_BUFFER_SIZE);
    // CONTAINER HEADER
    int totalHeaderSize = writeHeaderBuffer(configuration, dataProcessor, containerHeaderBuffer);

    // set to mark where zoomIntervalConfig starts
    containerHeaderBuffer.reset();

    final LoadingCache<TDWay, Geometry> jtsGeometryCache = CacheBuilder.newBuilder()
            .maximumSize(JTS_GEOMETRY_CACHE_SIZE)
            .concurrencyLevel(Runtime.getRuntime().availableProcessors() * 2)
            .build(new JTSGeometryCacheLoader(dataProcessor));

    // SUB FILES
    // for each zoom interval write a sub file
    long currentFileSize = totalHeaderSize;
    for (int i = 0; i < amountOfZoomIntervals; i++) {
        // SUB FILE INDEX AND DATA
        long subfileSize = writeSubfile(currentFileSize, i, dataProcessor, jtsGeometryCache, randomAccessFile,
                configuration);
        // SUB FILE META DATA IN CONTAINER HEADER
        writeSubfileMetaDataToContainerHeader(dataProcessor.getZoomIntervalConfiguration(), i, currentFileSize,
                subfileSize, containerHeaderBuffer);
        currentFileSize += subfileSize;
    }

    randomAccessFile.seek(0);
    randomAccessFile.write(containerHeaderBuffer.array(), 0, totalHeaderSize);

    // WRITE FILE SIZE TO HEADER
    long fileSize = randomAccessFile.length();
    randomAccessFile.seek(OFFSET_FILE_SIZE);
    randomAccessFile.writeLong(fileSize);

    randomAccessFile.close();

    CacheStats stats = jtsGeometryCache.stats();
    LOGGER.fine("Tag values stats:\n" + OSMUtils.logValueTypeCount());
    LOGGER.info("JTS Geometry cache hit rate: " + stats.hitRate());
    LOGGER.info("JTS Geometry total load time: " + stats.totalLoadTime() / 1000);

    LOGGER.info("Finished writing file.");
}

From source file:com.ebay.pulsar.analytics.datasource.DataSourceMetaRepo.java

public Map<String, Object> getStats() {
    // Get Cache Stats: requestCount, hitRate
    Map<String, Object> map = Maps.newHashMap();
    CacheStats stats = cache.stats();

    Long reqCount = stats.requestCount();
    Double hitRate = stats.hitRate();
    map.put("requestCount", reqCount);
    map.put("hitRate", hitRate);
    return map;/*  w  ww .  j a  v a  2s .c o  m*/
}

From source file:org.auraframework.impl.cache.CacheEvictionListenerImpl.java

public void onRemoval(boolean isSize) {
    long current = System.currentTimeMillis();
    boolean haveLogging = (loggingAdapter != null && loggingAdapter.isEstablished());
    boolean emitForPressure = false;
    boolean maxTimeHasPassed;
    boolean minTimeHasPassed;

    synchronized (this) {
        maxTimeHasPassed = (current >= lastFull + maxTime);
        minTimeHasPassed = (current >= lastFull + minTime);
        if (isSize) {
            evictions++;/*from  w  w  w  . jav  a  2s  .com*/
            if (evictions >= nextLogThreshold) {
                nextLogThreshold += interval;
                emitForPressure = true;
            }
        }
        emitForPressure = (emitForPressure || pressureMemory);
        if (emitForPressure) {
            boolean suppress = (!minTimeHasPassed || !haveLogging);

            pressureMemory = (emitForPressure && suppress);
            emitForPressure = !suppress;
        }
        if (haveLogging && (emitForPressure || maxTimeHasPassed)) {
            lastFull = current;
        }
    }

    if (haveLogging) {
        if (emitForPressure) {
            LoggingContext loggingCtx = loggingAdapter.getLoggingContext();
            CacheStats stats = cache.stats();
            loggingCtx.logCacheInfo(name, String.format("evicted %d entries for size pressure, hit rate=%.3f",
                    evictions, stats.hitRate()), cache.size(), stats);
        } else if (maxTimeHasPassed) {
            // Even without size pressure, we want to log occasionally
            LoggingContext loggingCtx = loggingAdapter.getLoggingContext();
            CacheStats stats = cache.stats();
            loggingCtx.logCacheInfo(name,
                    String.format("cache has little size pressure, hit rate=%.3f", stats.hitRate()),
                    cache.size(), stats);
        }
    }
}

From source file:it.units.malelab.ege.core.listener.collector.CacheStatistics.java

@Override
public Map<String, Object> collect(GenerationEvent generationEvent) {
    CacheStats mappingStats = (CacheStats) generationEvent.getData().get(StandardEvolver.MAPPING_CACHE_NAME);
    CacheStats fitnessStats = (CacheStats) generationEvent.getData().get(StandardEvolver.FITNESS_CACHE_NAME);
    Map<String, Object> map = new LinkedHashMap<>();
    map.put("cache.mapping.miss.count", mappingStats.missCount());
    map.put("cache.mapping.hit.rate", mappingStats.hitRate());
    map.put("cache.mapping.avg.load.penalty", mappingStats.averageLoadPenalty() / 1000);
    map.put("cache.fitness.miss.count", fitnessStats.missCount());
    map.put("cache.fitness.hit.rate", fitnessStats.hitRate());
    map.put("cache.fitness.avg.load.penalty", fitnessStats.averageLoadPenalty() / 1000);
    return map;/*from   w ww.java2  s .co m*/
}

From source file:info.archinnov.achilles.internals.cache.StatementsCache.java

private void displayCacheStatistics() {

    long cacheSize = dynamicCache.size();
    CacheStats cacheStats = dynamicCache.stats();

    LOGGER.info("Total LRU cache size {}", cacheSize);
    if (cacheSize > (maxLRUCacheSize * 0.8)) {
        LOGGER.warn("Warning, the LRU prepared statements cache is over 80% full");
    }/* w  w  w  . j  av  a  2  s  .co  m*/

    if (LOGGER.isDebugEnabled()) {
        LOGGER.debug("Cache statistics :");
        LOGGER.debug("\t\t- hits count : {}", cacheStats.hitCount());
        LOGGER.debug("\t\t- hits rate : {}", cacheStats.hitRate());
        LOGGER.debug("\t\t- miss count : {}", cacheStats.missCount());
        LOGGER.debug("\t\t- miss rate : {}", cacheStats.missRate());
        LOGGER.debug("\t\t- eviction count : {}", cacheStats.evictionCount());
        LOGGER.debug("\t\t- load count : {}", cacheStats.loadCount());
        LOGGER.debug("\t\t- load success count : {}", cacheStats.loadSuccessCount());
        LOGGER.debug("\t\t- load exception count : {}", cacheStats.loadExceptionCount());
        LOGGER.debug("\t\t- total load time : {}", cacheStats.totalLoadTime());
        LOGGER.debug("\t\t- average load penalty : {}", cacheStats.averageLoadPenalty());
        LOGGER.debug("");
        LOGGER.debug("");
    }
}