List of usage examples for com.google.common.cache CacheBuilder build
public <K1 extends K, V1 extends V> Cache<K1, V1> build()
From source file:org.apache.apex.malhar.lib.db.cache.CacheStore.java
@Override public void connect() throws IOException { open = true;/*from ww w . j a v a 2 s .c om*/ if (numInitCacheLines > maxCacheSize) { logger.warn("numInitCacheLines = {} is greater than maxCacheSize = {}, maxCacheSize was set to {}", numInitCacheLines, maxCacheSize, numInitCacheLines); maxCacheSize = numInitCacheLines; } CacheBuilder<Object, Object> cacheBuilder = CacheBuilder.newBuilder(); cacheBuilder.maximumSize(maxCacheSize); if (entryExpiryStrategy == ExpiryType.EXPIRE_AFTER_ACCESS) { cacheBuilder.expireAfterAccess(entryExpiryDurationInMillis, TimeUnit.MILLISECONDS); } else if (entryExpiryStrategy == ExpiryType.EXPIRE_AFTER_WRITE) { cacheBuilder.expireAfterWrite(entryExpiryDurationInMillis, TimeUnit.MILLISECONDS); } cache = cacheBuilder.build(); if (entryExpiryStrategy == ExpiryType.NO_EVICTION) { return; } this.cleanupScheduler = Executors.newScheduledThreadPool(1); cleanupScheduler.scheduleAtFixedRate(new Runnable() { @Override public void run() { cache.cleanUp(); } }, cacheCleanupIntervalInMillis, cacheCleanupIntervalInMillis, TimeUnit.MILLISECONDS); }
From source file:org.apache.samza.table.caching.CachingTableProvider.java
private ReadWriteTable createDefaultCacheTable(String tableId, JavaTableConfig tableConfig) { long readTtlMs = Long.parseLong(tableConfig.getForTable(tableId, CachingTableDescriptor.READ_TTL_MS, "-1")); long writeTtlMs = Long .parseLong(tableConfig.getForTable(tableId, CachingTableDescriptor.WRITE_TTL_MS, "-1")); long cacheSize = Long.parseLong(tableConfig.getForTable(tableId, CachingTableDescriptor.CACHE_SIZE, "-1")); CacheBuilder cacheBuilder = CacheBuilder.newBuilder(); if (readTtlMs != -1) { cacheBuilder.expireAfterAccess(readTtlMs, TimeUnit.MILLISECONDS); }/*w w w.j a va 2 s. c o m*/ if (writeTtlMs != -1) { cacheBuilder.expireAfterWrite(writeTtlMs, TimeUnit.MILLISECONDS); } if (cacheSize != -1) { cacheBuilder.maximumSize(cacheSize); } logger.info(String.format("Creating default cache with: readTtl=%d, writeTtl=%d, maxSize=%d", readTtlMs, writeTtlMs, cacheSize)); GuavaCacheTable cacheTable = new GuavaCacheTable(tableId + "-def-cache", cacheBuilder.build()); cacheTable.init(this.context); return cacheTable; }
From source file:org.apache.samza.table.caching.descriptors.CachingTableProvider.java
private ReadWriteTable createDefaultCacheTable(String tableId) { long readTtlMs = Long.parseLong(tableSpec.getConfig().getOrDefault(READ_TTL_MS, "-1")); long writeTtlMs = Long.parseLong(tableSpec.getConfig().getOrDefault(WRITE_TTL_MS, "-1")); long cacheSize = Long.parseLong(tableSpec.getConfig().getOrDefault(CACHE_SIZE, "-1")); CacheBuilder cacheBuilder = CacheBuilder.newBuilder(); if (readTtlMs != -1) { cacheBuilder.expireAfterAccess(readTtlMs, TimeUnit.MILLISECONDS); }// w w w . j a v a 2 s .c o m if (writeTtlMs != -1) { cacheBuilder.expireAfterWrite(writeTtlMs, TimeUnit.MILLISECONDS); } if (cacheSize != -1) { cacheBuilder.maximumSize(cacheSize); } logger.info(String.format("Creating default cache with: readTtl=%d, writeTtl=%d, maxSize=%d", readTtlMs, writeTtlMs, cacheSize)); GuavaCacheTable cacheTable = new GuavaCacheTable(tableId + "-def-cache", cacheBuilder.build()); cacheTable.init(this.context); return cacheTable; }
From source file:org.elasticsearch.indices.cache.filter.IndicesFilterCache.java
private void buildCache() { CacheBuilder<WeightedFilterCache.FilterCacheKey, DocIdSet> cacheBuilder = CacheBuilder.newBuilder() .removalListener(this).maximumWeight(sizeInBytes) .weigher(new WeightedFilterCache.FilterCacheValueWeigher()); // defaults to 4, but this is a busy map for all indices, increase it a bit cacheBuilder.concurrencyLevel(16);//w w w . ja v a 2s . com if (expire != null) { cacheBuilder.expireAfterAccess(expire.millis(), TimeUnit.MILLISECONDS); } cache = cacheBuilder.build(); }
From source file:fr.inria.corese.rdftograph.driver.GdbDriver.java
/** * Default constructor. Set up a cache for Value -> Vertex. *//*from ww w . j a v a2s . c o m*/ public GdbDriver() { CacheBuilder<Value, Vertex> cachebuilder = CacheBuilder.newBuilder().maximumWeight(maximumByteSize) .concurrencyLevel(concurrencyLevel).initialCapacity(1000) .expireAfterWrite(cacheTimeMS, TimeUnit.MILLISECONDS).weigher(new Weigher<Value, Vertex>() { @Override public int weigh(Value value, Vertex vertex) { return 1; } }); ; cache = cachebuilder.build(); }
From source file:org.apache.druid.server.lookup.cache.loading.OnHeapLoadingCache.java
/** * @param concurrencyLevel default to {@code DEFAULT_CONCURRENCY_LEVEL} * @param initialCapacity default to {@code DEFAULT_INITIAL_CAPACITY} * @param maximumSize Max number of entries that the cache can hold, When set to zero, elements will be evicted immediately after being loaded into the * cache.//from ww w . j a va2s . co m * When set to null, cache maximum size is infinity * @param expireAfterAccess Specifies that each entry should be automatically removed from the cache once a fixed duration * has elapsed after the entry's creation, the most recent replacement of its value, or its last * access. Access time is reset by all cache read and write operations. * No read-time-based eviction when set to null. * @param expireAfterWrite Specifies that each entry should be automatically removed from the cache once a fixed duration * has elapsed after the entry's creation, or the most recent replacement of its value. * No write-time-based eviction when set to null. */ @JsonCreator public OnHeapLoadingCache(@JsonProperty("concurrencyLevel") int concurrencyLevel, @JsonProperty("initialCapacity") int initialCapacity, @JsonProperty("maximumSize") Long maximumSize, @JsonProperty("expireAfterAccess") Long expireAfterAccess, @JsonProperty("expireAfterWrite") Long expireAfterWrite) { this.concurrencyLevel = concurrencyLevel <= 0 ? DEFAULT_CONCURRENCY_LEVEL : concurrencyLevel; this.initialCapacity = initialCapacity <= 0 ? DEFAULT_INITIAL_CAPACITY : initialCapacity; this.maximumSize = maximumSize; this.expireAfterAccess = expireAfterAccess; this.expireAfterWrite = expireAfterWrite; CacheBuilder builder = CacheBuilder.newBuilder().concurrencyLevel(this.concurrencyLevel) .initialCapacity(this.initialCapacity).recordStats(); if (this.expireAfterAccess != null) { builder.expireAfterAccess(expireAfterAccess, TimeUnit.MILLISECONDS); } if (this.expireAfterWrite != null) { builder.expireAfterWrite(this.expireAfterWrite, TimeUnit.MILLISECONDS); } if (this.maximumSize != null) { builder.maximumSize(this.maximumSize); } this.cache = builder.build(); if (isClosed.getAndSet(false)) { log.info("Guava Based OnHeapCache started with spec [%s]", cache.toString()); } }
From source file:org.auraframework.impl.cache.CacheImpl.java
public CacheImpl(Builder<K, T> builder) { // if builder.useSecondaryStorage is true, we should try to use a // non-quava secondary-storage cache with streaming ability com.google.common.cache.CacheBuilder<Object, Object> cb = com.google.common.cache.CacheBuilder.newBuilder() .initialCapacity(builder.initialCapacity).maximumSize(builder.maximumSize) .concurrencyLevel(builder.concurrencyLevel); if (builder.recordStats) { cb = cb.recordStats();/*from w w w.j ava2 s. c om*/ } if (builder.softValues) { cb = cb.softValues(); } EvictionListener<K, T> listener = new EvictionListener<K, T>(builder.name); cb.removalListener(listener); cache = cb.build(); listener.setCache(cache); }
From source file:org.akraievoy.couch.CouchDao.java
public void start() { final CacheBuilder<Object, Object> caches = CacheBuilder.newBuilder().concurrencyLevel(concurrencyLevel) .expireAfterWrite(cacheValidityMinutes, TimeUnit.MINUTES); cachePaths = caches.build(); cacheAxes = caches.build();/* w w w. j av a 2 s. c o m*/ cacheSquabs = caches.build(); cacheStamped = caches.build(); }
From source file:org.opendaylight.controller.cluster.messaging.MessageSlicer.java
MessageSlicer(final Builder builder) { this.fileBackedStreamFactory = builder.fileBackedStreamFactory; this.messageSliceSize = builder.messageSliceSize; this.maxSlicingTries = builder.maxSlicingTries; id = SLICER_ID_COUNTER.getAndIncrement(); this.logContext = builder.logContext + "_slicer-id-" + id; CacheBuilder<Identifier, SlicedMessageState<ActorRef>> cacheBuilder = CacheBuilder.newBuilder() .removalListener(notification -> stateRemoved(notification)); if (builder.expireStateAfterInactivityDuration > 0) { cacheBuilder = cacheBuilder.expireAfterAccess(builder.expireStateAfterInactivityDuration, builder.expireStateAfterInactivityUnit); }//w ww. j a v a2s . com stateCache = cacheBuilder.build(); }
From source file:org.janusgraph.diskstorage.keycolumnvalue.cache.ExpirationKCVSCache.java
public ExpirationKCVSCache(final KeyColumnValueStore store, String metricsName, final long cacheTimeMS, final long invalidationGracePeriodMS, final long maximumByteSize) { super(store, metricsName); Preconditions.checkArgument(cacheTimeMS > 0, "Cache expiration must be positive: %s", cacheTimeMS); Preconditions.checkArgument(System.currentTimeMillis() + 1000l * 3600 * 24 * 365 * 100 + cacheTimeMS > 0, "Cache expiration time too large, overflow may occur: %s", cacheTimeMS); this.cacheTimeMS = cacheTimeMS; int concurrencyLevel = Runtime.getRuntime().availableProcessors(); Preconditions.checkArgument(invalidationGracePeriodMS >= 0, "Invalid expiration grace peiod: %s", invalidationGracePeriodMS);/*from ww w. j av a 2s . co m*/ this.invalidationGracePeriodMS = invalidationGracePeriodMS; CacheBuilder<KeySliceQuery, EntryList> cachebuilder = CacheBuilder.newBuilder() .maximumWeight(maximumByteSize).concurrencyLevel(concurrencyLevel).initialCapacity(1000) .expireAfterWrite(cacheTimeMS, TimeUnit.MILLISECONDS) .weigher(new Weigher<KeySliceQuery, EntryList>() { @Override public int weigh(KeySliceQuery keySliceQuery, EntryList entries) { return GUAVA_CACHE_ENTRY_SIZE + KEY_QUERY_SIZE + entries.getByteSize(); } }); cache = cachebuilder.build(); expiredKeys = new ConcurrentHashMap<StaticBuffer, Long>(50, 0.75f, concurrencyLevel); penaltyCountdown = new CountDownLatch(PENALTY_THRESHOLD); cleanupThread = new CleanupThread(); cleanupThread.start(); }