Example usage for com.google.common.cache LoadingCache asMap

List of usage examples for com.google.common.cache LoadingCache asMap

Introduction

In this page you can find the example usage for com.google.common.cache LoadingCache asMap.

Prototype

@Override
ConcurrentMap<K, V> asMap();

Source Link

Document

Note that although the view is modifiable, no method on the returned map will ever cause entries to be automatically loaded.

Usage

From source file:com.facebook.buck.distributed.DistributedBuildFileHashes.java

private static ListenableFuture<ImmutableList<BuildJobStateFileHashes>> fileHashesComputation(
        ListenableFuture<Void> ruleKeyComputationForSideEffect,
        final LoadingCache<ProjectFilesystem, BuildJobStateFileHashes> remoteFileHashes,
        ListeningExecutorService executorService) {
    return Futures.transform(ruleKeyComputationForSideEffect,
            new Function<Void, ImmutableList<BuildJobStateFileHashes>>() {
                @Override//from w w  w  .  ja v  a 2s. c om
                public ImmutableList<BuildJobStateFileHashes> apply(Void input) {
                    return ImmutableList.copyOf(remoteFileHashes.asMap().values());
                }
            }, executorService);
}

From source file:com.comcast.cdn.traffic_control.traffic_router.core.dns.ZoneManager.java

private static Runnable getMaintenanceRunnable(final LoadingCache<ZoneKey, Zone> cache,
        final ZoneCacheType type, final int refreshInterval) {
    return new Runnable() {
        public void run() {
            cache.cleanUp();//from  w  ww .  ja  v a2 s  . com

            for (final ZoneKey zoneKey : cache.asMap().keySet()) {
                try {
                    if (signatureManager.needsRefresh(type, zoneKey, refreshInterval)) {
                        cache.refresh(zoneKey);
                    }
                } catch (RuntimeException ex) {
                    LOGGER.fatal("RuntimeException caught on " + zoneKey.getClass().getSimpleName() + " for "
                            + zoneKey.getName(), ex);
                }
            }
        }
    };
}

From source file:org.waveprotocol.box.server.persistence.memory.MemorySnapshotStore.java

@Override
public ImmutableSet<WaveletId> lookup(WaveId waveId) {
    LoadingCache<WaveletId, MemorySnapshotAccess> waveData = access.getIfPresent(waveId);
    if (waveData == null) {
        return ImmutableSet.of();
    } else {//from   ww w  .ja v  a  2  s . c om
        ImmutableSet.Builder<WaveletId> builder = ImmutableSet.builder();
        for (MemorySnapshotAccess collection : waveData.asMap().values()) {
            builder.add(collection.getWaveletName().waveletId);
        }
        return builder.build();
    }
}

From source file:org.apereo.portal.utils.cache.TagTrackingCacheEventListener.java

/**
 * Remove all cache entries with keys that have the specified tag
 *///w  w w  . j av a2 s .  c om
@Override
public int purgeCacheEntries(CacheEntryTag tag) {
    final String tagType = tag.getTagType();
    final Set<Ehcache> caches = taggedCaches.getIfPresent(tagType);

    //Tag exists in cache(s)
    if (caches == null || caches.isEmpty()) {
        return 0;
    }

    int purgeCount = 0;

    //Iterate over each cache to remove the tagged entries
    for (final Ehcache cache : caches) {
        final String cacheName = cache.getName();

        //See if there are any tagged cache keys for the cache
        final LoadingCache<CacheEntryTag, Set<Object>> cacheKeys = taggedCacheKeys.getIfPresent(cacheName);
        if (cacheKeys != null) {

            //Remove all cache keys from the cache
            final Set<Object> taggedKeys = cacheKeys.asMap().remove(tag);
            if (taggedKeys != null) {
                final int keyCount = taggedKeys.size();
                purgeCount += keyCount;
                logger.debug("Removing {} keys from {} for tag {}", keyCount, cacheName, tag);

                cache.removeAll(taggedKeys);
            }
        }
    }

    return purgeCount;
}

From source file:org.apache.rocketmq.console.task.DashboardCollectTask.java

private void writeFile(LoadingCache<String, List<String>> map, Map<String, List<String>> fileMap, File file)
        throws IOException {
    Map<String, List<String>> newMap = map.asMap();
    Map<String, List<String>> resultMap = Maps.newHashMap();
    if (fileMap.size() == 0) {
        resultMap = newMap;/*from   ww w.ja  v  a2s  .c o m*/
    } else {
        for (Map.Entry<String, List<String>> entry : fileMap.entrySet()) {
            List<String> oldList = entry.getValue();
            List<String> newList = newMap.get(entry.getKey());
            resultMap.put(entry.getKey(), appendData(newList, oldList));
            if (newList == null || newList.size() == 0) {
                map.put(entry.getKey(), appendData(newList, oldList));
            }
        }

        for (Map.Entry<String, List<String>> entry : newMap.entrySet()) {
            List<String> oldList = fileMap.get(entry.getKey());
            if (oldList == null || oldList.size() == 0) {
                resultMap.put(entry.getKey(), entry.getValue());
            }
        }
    }
    Files.write(JsonUtil.obj2String(resultMap).getBytes(), file);
}

From source file:org.apache.aurora.scheduler.stats.ResourceCounter.java

/**
 * Computes arbitrary resource aggregates based on a query, a filter, and a grouping function.
 *
 * @param query Query to select tasks for aggregation.
 * @param filter Filter to apply on query result tasks.
 * @param keyFunction Function to define aggregation groupings.
 * @param <K> Key type./*from w  w  w .  jav a  2  s .c  o m*/
 * @return A map from the keys to their aggregates based on the tasks fetched.
 * @throws StorageException if there was a problem fetching tasks from storage.
 */
public <K> Map<K, Metric> computeAggregates(Query.Builder query, Predicate<ITaskConfig> filter,
        Function<ITaskConfig, K> keyFunction) throws StorageException {

    LoadingCache<K, Metric> metrics = CacheBuilder.newBuilder().build(new CacheLoader<K, Metric>() {
        @Override
        public Metric load(K key) {
            return new Metric();
        }
    });
    for (ITaskConfig task : Iterables.filter(getTasks(query), filter)) {
        metrics.getUnchecked(keyFunction.apply(task)).accumulate(task);
    }
    return metrics.asMap();
}

From source file:org.apache.aurora.scheduler.http.SchedulerzHome.java

/**
 * Fetches the scheduler landing page./* ww  w  .  java  2  s.co  m*/
 *
 * @return HTTP response.
 */
@GET
@Produces(MediaType.TEXT_HTML)
public Response get() {
    return fillTemplate(new Closure<StringTemplate>() {
        @Override
        public void execute(StringTemplate template) {
            template.setAttribute("cluster_name", clusterName);

            LoadingCache<String, Role> owners = CacheBuilder.newBuilder().build(CacheLoader.from(CREATE_ROLE));

            // TODO(William Farner): Render this page without an expensive query.
            Set<IScheduledTask> tasks = Storage.Util.weaklyConsistentFetchTasks(storage, Query.unscoped());
            for (ITaskConfig task : Iterables.transform(tasks, Tasks.SCHEDULED_TO_INFO)) {
                owners.getUnchecked(task.getOwner().getRole()).accumulate(task);
            }

            // Add cron job counts for each role.
            for (IJobConfiguration job : cronScheduler.getJobs()) {
                owners.getUnchecked(job.getOwner().getRole()).accumulate(job);
            }

            template.setAttribute("owners", DisplayUtils.ROLE_ORDERING.sortedCopy(owners.asMap().values()));
        }
    });
}

From source file:org.apache.beam.runners.core.metrics.MetricsTranslation.java

public static Map<String, Collection<BeamFnApi.Metrics.User>> metricUpdatesToProto(
        MetricUpdates metricUpdates) {/* w  w w  .  j  a  v a  2 s .c  o m*/
    LoadingCache<String, Collection<BeamFnApi.Metrics.User>> fnMetrics = CacheBuilder.newBuilder()
            .build(new CacheLoader<String, Collection<BeamFnApi.Metrics.User>>() {
                @Override
                public Collection<BeamFnApi.Metrics.User> load(String ptransformName) {
                    return new ArrayList<>();
                }
            });

    for (MetricUpdates.MetricUpdate<Long> counterUpdate : metricUpdates.counterUpdates()) {
        fnMetrics.getUnchecked(counterUpdate.getKey().stepName()).add(BeamFnApi.Metrics.User.newBuilder()
                .setMetricName(metricNameToProto(counterUpdate.getKey().metricName()))
                .setCounterData(
                        BeamFnApi.Metrics.User.CounterData.newBuilder().setValue(counterUpdate.getUpdate()))
                .build());
    }

    for (MetricUpdates.MetricUpdate<GaugeData> gaugeUpdate : metricUpdates.gaugeUpdates()) {
        fnMetrics.getUnchecked(gaugeUpdate.getKey().stepName()).add(BeamFnApi.Metrics.User.newBuilder()
                .setMetricName(metricNameToProto(gaugeUpdate.getKey().metricName()))
                .setGaugeData(
                        BeamFnApi.Metrics.User.GaugeData.newBuilder().setValue(gaugeUpdate.getUpdate().value()))
                .build());
    }

    for (MetricUpdates.MetricUpdate<DistributionData> distributionUpdate : metricUpdates
            .distributionUpdates()) {
        fnMetrics.getUnchecked(distributionUpdate.getKey().stepName())
                .add(BeamFnApi.Metrics.User.newBuilder()
                        .setMetricName(metricNameToProto(distributionUpdate.getKey().metricName()))
                        .setDistributionData(BeamFnApi.Metrics.User.DistributionData.newBuilder()
                                .setCount(distributionUpdate.getUpdate().count())
                                .setMax(distributionUpdate.getUpdate().max())
                                .setMin(distributionUpdate.getUpdate().min())
                                .setSum(distributionUpdate.getUpdate().sum()))
                        .build());
    }

    return fnMetrics.asMap();
}

From source file:playground.acmarmol.matsim2030.microcensus2010.MZPopulationUtils.java

public static void analyzeActivityTypesAndLengths(Population population) throws ExecutionException {
    LoadingCache<String, SummaryStatistics> activityDuration = CacheBuilder.newBuilder()
            .build(CacheLoader.from(new Supplier<SummaryStatistics>() {
                @Override/*from  ww w .  ja  v  a 2 s .c  o  m*/
                public SummaryStatistics get() {
                    return new SummaryStatistics();
                }
            }));

    for (Person p : population.getPersons().values()) {
        if (p.getPlans().size() == 0)
            continue;

        Plan plan = p.getPlans().get(0);

        List<PlanElement> planElements = plan.getPlanElements();
        for (PlanElement pe : planElements) {
            if (!(pe instanceof Activity))
                continue;
            Activity activity = (Activity) pe;

            double startTime = activity.getStartTime();
            double endTime = activity.getEndTime();

            SummaryStatistics typeStats = activityDuration.get(activity.getType());
            if (endTime != Time.UNDEFINED_TIME) {
                if (startTime == Time.UNDEFINED_TIME)
                    startTime = 0;
                typeStats.addValue(endTime - startTime);
            }
        }
    }

    ConcurrentMap<String, SummaryStatistics> activityDurationMap = activityDuration.asMap();
    {
        int i = 0;
        final StringBuffer s = new StringBuffer();
        for (final String actType : activityDurationMap.keySet()) {
            final SummaryStatistics stats = activityDurationMap.get(actType);

            s.append(String.format("<param name=\"activityType_%d\" value=\"%s\" />\n", i, actType));
            s.append(String.format("<param name=\"activityPriority_%d\" value=\"1\" />\n", i));
            s.append(String.format("<param name=\"activityTypicalDuration_%d\" value=\"%s\" />\n", i,
                    Time.writeTime(stats.getMean())));
            s.append("\n");
            i++;
        }
        log.info("All activities:\n" + s.toString());
    }
}

From source file:org.apache.cassandra.auth.AuthCache.java

private LoadingCache<K, V> initCache(LoadingCache<K, V> existing) {
    if (!enableCache.get())
        return null;

    if (getValidity() <= 0)
        return null;

    logger.info("(Re)initializing {} (validity period/update interval/max entries) ({}/{}/{})", name,
            getValidity(), getUpdateInterval(), getMaxEntries());

    LoadingCache<K, V> newcache = CacheBuilder.newBuilder()
            .refreshAfterWrite(getUpdateInterval(), TimeUnit.MILLISECONDS)
            .expireAfterWrite(getValidity(), TimeUnit.MILLISECONDS).maximumSize(getMaxEntries())
            .build(new CacheLoader<K, V>() {
                public V load(K k) {
                    return loadFunction.apply(k);
                }/*from  w ww. ja va  2s  . c  o m*/

                public ListenableFuture<V> reload(final K k, final V oldV) {
                    ListenableFutureTask<V> task = ListenableFutureTask.create(() -> {
                        try {
                            return loadFunction.apply(k);
                        } catch (Exception e) {
                            logger.trace("Error performing async refresh of auth data in {}", name, e);
                            throw e;
                        }
                    });
                    cacheRefreshExecutor.execute(task);
                    return task;
                }
            });
    if (existing != null)
        newcache.putAll(existing.asMap());
    return newcache;
}