List of usage examples for com.google.common.cache CacheBuilder maximumSize
long maximumSize
To view the source code for com.google.common.cache CacheBuilder maximumSize.
Click Source Link
From source file:com.streamsets.pipeline.stage.processor.lookup.ForceLookupProcessor.java
@SuppressWarnings("unchecked") private Cache<String, Optional<List<Map<String, Field>>>> buildCache() { CacheBuilder cacheBuilder = CacheBuilder.newBuilder(); if (!conf.cacheConfig.enabled) { return (conf.lookupMode == QUERY) ? cacheBuilder.maximumSize(0).build(new ForceLookupLoader(this)) : cacheBuilder.maximumSize(0).build(); }/* w w w.java2 s.co m*/ if (conf.cacheConfig.maxSize == -1) { conf.cacheConfig.maxSize = Long.MAX_VALUE; } if (LOG.isDebugEnabled()) { cacheBuilder.recordStats(); } // CacheBuilder doesn't support specifying type thus suffers from erasure, so // we build it with this if / else logic. if (conf.cacheConfig.evictionPolicyType == EvictionPolicyType.EXPIRE_AFTER_ACCESS) { cacheBuilder.maximumSize(conf.cacheConfig.maxSize).expireAfterAccess(conf.cacheConfig.expirationTime, conf.cacheConfig.timeUnit); } else if (conf.cacheConfig.evictionPolicyType == EvictionPolicyType.EXPIRE_AFTER_WRITE) { cacheBuilder.maximumSize(conf.cacheConfig.maxSize).expireAfterWrite(conf.cacheConfig.expirationTime, conf.cacheConfig.timeUnit); } else { throw new IllegalArgumentException( Utils.format("Unrecognized EvictionPolicyType: '{}'", conf.cacheConfig.evictionPolicyType)); } return (conf.lookupMode == QUERY) ? cacheBuilder.build(new ForceLookupLoader(this)) : cacheBuilder.build(); }
From source file:org.apache.druid.server.lookup.cache.loading.OnHeapLoadingCache.java
/** * @param concurrencyLevel default to {@code DEFAULT_CONCURRENCY_LEVEL} * @param initialCapacity default to {@code DEFAULT_INITIAL_CAPACITY} * @param maximumSize Max number of entries that the cache can hold, When set to zero, elements will be evicted immediately after being loaded into the * cache./*from w w w. jav a 2 s . c o m*/ * When set to null, cache maximum size is infinity * @param expireAfterAccess Specifies that each entry should be automatically removed from the cache once a fixed duration * has elapsed after the entry's creation, the most recent replacement of its value, or its last * access. Access time is reset by all cache read and write operations. * No read-time-based eviction when set to null. * @param expireAfterWrite Specifies that each entry should be automatically removed from the cache once a fixed duration * has elapsed after the entry's creation, or the most recent replacement of its value. * No write-time-based eviction when set to null. */ @JsonCreator public OnHeapLoadingCache(@JsonProperty("concurrencyLevel") int concurrencyLevel, @JsonProperty("initialCapacity") int initialCapacity, @JsonProperty("maximumSize") Long maximumSize, @JsonProperty("expireAfterAccess") Long expireAfterAccess, @JsonProperty("expireAfterWrite") Long expireAfterWrite) { this.concurrencyLevel = concurrencyLevel <= 0 ? DEFAULT_CONCURRENCY_LEVEL : concurrencyLevel; this.initialCapacity = initialCapacity <= 0 ? DEFAULT_INITIAL_CAPACITY : initialCapacity; this.maximumSize = maximumSize; this.expireAfterAccess = expireAfterAccess; this.expireAfterWrite = expireAfterWrite; CacheBuilder builder = CacheBuilder.newBuilder().concurrencyLevel(this.concurrencyLevel) .initialCapacity(this.initialCapacity).recordStats(); if (this.expireAfterAccess != null) { builder.expireAfterAccess(expireAfterAccess, TimeUnit.MILLISECONDS); } if (this.expireAfterWrite != null) { builder.expireAfterWrite(this.expireAfterWrite, TimeUnit.MILLISECONDS); } if (this.maximumSize != null) { builder.maximumSize(this.maximumSize); } this.cache = builder.build(); if (isClosed.getAndSet(false)) { log.info("Guava Based OnHeapCache started with spec [%s]", cache.toString()); } }
From source file:com.github.benmanes.multiway.TransferPool.java
/** Creates the denormalized cache of resources based on the builder configuration. */ Cache<ResourceKey<K>, R> __makeCache(MultiwayPoolBuilder<? super K, ? super R> builder) { CacheBuilder<Object, Object> cacheBuilder = CacheBuilder.newBuilder(); if (builder.maximumSize != MultiwayPoolBuilder.UNSET_INT) { cacheBuilder.maximumSize(builder.maximumSize); }/*from w w w .j a v a2s. c o m*/ if (builder.maximumWeight != MultiwayPoolBuilder.UNSET_INT) { cacheBuilder.maximumWeight(builder.maximumWeight); } if (builder.weigher != null) { final Weigher<? super K, ? super R> weigher = builder.weigher; cacheBuilder.weigher(new Weigher<ResourceKey<K>, R>() { @Override public int weigh(ResourceKey<K> resourceKey, R resource) { return weigher.weigh(resourceKey.getKey(), resource); } }); } if (builder.expireAfterWriteNanos != MultiwayPoolBuilder.UNSET_INT) { cacheBuilder.expireAfterWrite(builder.expireAfterWriteNanos, TimeUnit.NANOSECONDS); } if (builder.ticker != null) { cacheBuilder.ticker(builder.ticker); } if (builder.recordStats) { cacheBuilder.recordStats(); } cacheBuilder.concurrencyLevel(builder.getConcurrencyLevel()); cacheBuilder.removalListener(new CacheRemovalListener()); return cacheBuilder.build(); }
From source file:org.nuxeo.ecm.core.storage.dbs.DBSCachingRepository.java
protected <T> Cache<String, T> newCache(DBSRepositoryDescriptor descriptor) { CacheBuilder<Object, Object> builder = CacheBuilder.newBuilder(); builder = builder.expireAfterWrite(descriptor.cacheTTL.longValue(), TimeUnit.MINUTES); if (descriptor.cacheConcurrencyLevel != null) { builder = builder.concurrencyLevel(descriptor.cacheConcurrencyLevel.intValue()); }/* w ww . j a v a2 s.c o m*/ if (descriptor.cacheMaxSize != null) { builder = builder.maximumSize(descriptor.cacheMaxSize.longValue()); } return builder.build(); }
From source file:com.streamsets.pipeline.lib.jdbc.multithread.JdbcBaseRunnable.java
/** * Builds the Read Context Cache {@link #tableReadContextCache} *//*from ww w . j av a 2 s .c o m*/ @SuppressWarnings("unchecked") private LoadingCache<TableRuntimeContext, TableReadContext> buildReadContextCache( CacheLoader<TableRuntimeContext, TableReadContext> tableCacheLoader) { CacheBuilder resultSetCacheBuilder = CacheBuilder.newBuilder() .removalListener(new JdbcTableReadContextInvalidationListener()); if (tableJdbcConfigBean.batchTableStrategy == BatchTableStrategy.SWITCH_TABLES) { if (tableJdbcConfigBean.resultCacheSize > 0) { resultSetCacheBuilder = resultSetCacheBuilder.maximumSize(tableJdbcConfigBean.resultCacheSize); } } else { resultSetCacheBuilder = resultSetCacheBuilder.maximumSize(1); } if (tableCacheLoader != null) { return resultSetCacheBuilder.build(tableCacheLoader); } else { return resultSetCacheBuilder.build(new JdbcTableReadContextLoader(connectionManager, offsets, tableJdbcConfigBean.fetchSize, tableJdbcConfigBean.quoteChar.getQuoteCharacter(), tableJdbcELEvalContext, isReconnect)); } }
From source file:net.voxton.mafiacraft.core.city.CityManager.java
/** * Gets the section cache of a world./*from www .ja va 2s .c o m*/ * * @param world The world of the cache. * @return The section cache. */ private Cache<Long, Section> getSectionCache(final MWorld world) { Cache<Long, Section> cache = sections.get(world.getName()); if (cache == null) { CacheBuilder builder = CacheBuilder.newBuilder(); builder.maximumSize(10000).expireAfterWrite(10, TimeUnit.MINUTES); cache = builder.build(new CacheLoader<Long, Section>() { @Override public Section load(Long key) throws Exception { int x = getXFromKey(key); int y = getYFromKey(key); int z = getZFromKey(key); return createSection(world, x, y, z); } }); } sections.put(world.getName(), cache); return cache; }
From source file:org.apache.hadoop.hive.llap.AsyncPbRpcProxy.java
public AsyncPbRpcProxy(String name, int numThreads, Configuration conf, Token<TokenType> token, long connectionTimeoutMs, long retrySleepMs, int expectedNodes, int maxPerNode) { super(name);/* w ww . j a va 2 s. c o m*/ // Note: we may make size/etc. configurable later. CacheBuilder<String, ProtocolType> cb = CacheBuilder.newBuilder().expireAfterAccess(1, TimeUnit.HOURS) .removalListener(new RemovalListener<String, ProtocolType>() { @Override public void onRemoval(RemovalNotification<String, ProtocolType> arg) { if (arg == null) return; shutdownProtocolImpl(arg.getValue()); } }); if (expectedNodes > 0) { cb.maximumSize(expectedNodes * 2); } this.hostProxies = cb.build(); this.socketFactory = NetUtils.getDefaultSocketFactory(conf); this.token = token; if (token != null) { String tokenUser = getTokenUser(token); if (tokenUser == null) { try { tokenUser = UserGroupInformation.getCurrentUser().getShortUserName(); } catch (IOException e) { throw new RuntimeException(e); } LOG.warn("Cannot determine token user from the token; using {}", tokenUser); } this.tokenUser = tokenUser; } else { this.tokenUser = null; } this.retryPolicy = RetryPolicies.retryUpToMaximumTimeWithFixedSleep(connectionTimeoutMs, retrySleepMs, TimeUnit.MILLISECONDS); this.requestManager = new RequestManager(numThreads, maxPerNode); ExecutorService localExecutor = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder().setNameFormat("RequestManagerExecutor").build()); this.requestManagerExecutor = MoreExecutors.listeningDecorator(localExecutor); LOG.info("Setting up AsyncPbRpcProxy with" + "numThreads=" + numThreads + "retryTime(millis)=" + connectionTimeoutMs + "retrySleep(millis)=" + retrySleepMs); }
From source file:com.gitblit.tickets.ITicketService.java
/** * Creates a ticket service.// w w w.j ava2 s .c om */ public ITicketService(IRuntimeManager runtimeManager, IPluginManager pluginManager, INotificationManager notificationManager, IUserManager userManager, IRepositoryManager repositoryManager) { this.log = LoggerFactory.getLogger(getClass()); this.settings = runtimeManager.getSettings(); this.runtimeManager = runtimeManager; this.pluginManager = pluginManager; this.notificationManager = notificationManager; this.userManager = userManager; this.repositoryManager = repositoryManager; this.indexer = new TicketIndexer(runtimeManager); CacheBuilder<Object, Object> cb = CacheBuilder.newBuilder(); this.ticketsCache = cb.maximumSize(1000).expireAfterAccess(30, TimeUnit.MINUTES).build(); this.labelsCache = new ConcurrentHashMap<String, List<TicketLabel>>(); this.milestonesCache = new ConcurrentHashMap<String, List<TicketMilestone>>(); this.updateDiffstats = settings.getBoolean(SETTING_UPDATE_DIFFSTATS, true); }
From source file:software.coolstuff.springframework.owncloud.service.impl.rest.OwncloudRestResourceServiceImpl.java
protected LoadingCache<String, Sardine> buildSardineCache() { CacheProperties cacheProperties = properties.getResourceService().getSardineCache(); CacheBuilder<Object, Object> builder = CacheBuilder.newBuilder(); if (cacheProperties.getConcurrencyLevel() != null) { builder.concurrencyLevel(cacheProperties.getConcurrencyLevel()); }/*from w ww . j av a 2s.c o m*/ if (cacheProperties.getExpireAfterAccess() != null && cacheProperties.getExpireAfterAccessTimeUnit() != null) { builder.expireAfterAccess(cacheProperties.getExpireAfterAccess(), cacheProperties.getExpireAfterAccessTimeUnit()); } if (cacheProperties.getExpireAfterWrite() != null && cacheProperties.getExpireAfterWriteTimeUnit() != null) { builder.expireAfterWrite(cacheProperties.getExpireAfterWrite(), cacheProperties.getExpireAfterWriteTimeUnit()); } if (cacheProperties.getInitialCapacity() != null) { builder.initialCapacity(cacheProperties.getInitialCapacity()); } if (cacheProperties.getMaximumSize() != null) { builder.maximumSize(cacheProperties.getMaximumSize()); } if (cacheProperties.getMaximumWeight() != null) { builder.maximumWeight(cacheProperties.getMaximumWeight()); } if (cacheProperties.getRefreshAfterWrite() != null && cacheProperties.getRefreshAfterWriteTimeUnit() != null) { builder.refreshAfterWrite(cacheProperties.getRefreshAfterWrite(), cacheProperties.getRefreshAfterWriteTimeUnit()); } return builder.build(sardineCacheLoader); }
From source file:com.addthis.hydra.data.tree.ReadTree.java
public ReadTree(File root, boolean metrics) throws Exception { this.metrics = metrics; if (!root.isDirectory()) { throw new IOException("Unable to open root directory '" + root + "'"); }/* w ww . java2 s . c o m*/ this.root = root; this.advanced = TreeConfig.readFromDataDirectory(root.toPath()); source = initSource(); try { CacheBuilder<? super CacheKey, ? super ReadTreeNode> cacheBuilder = CacheBuilder.newBuilder(); if (nodeCacheWeight != 0) { // limit by weight cacheBuilder = cacheBuilder.maximumWeight((long) (nodeCacheWeight * advanced.cacheWeight)) .weigher((key, value) -> { /* A lean node goes from 24 to 24 + its string name and + cacheKey. the 24 becomes a small percentage. Dangerous, fat nodes typically have lots of serialized strings in their value payload. The inflation ratio there is actually probably less than for lean nodes since the various pointers for the string objects may not be nearly as large as the strings themselves. Therefore, holding them to the lean node's expansion standard is probably conservative enough. */ return value.getWeight(); }); } else { // Limit by the number of nodes cacheBuilder = cacheBuilder.maximumSize((long) (nodeCacheSize * advanced.cacheWeight)); } loadingNodeCache = cacheBuilder.build(new CacheLoader<CacheKey, ReadTreeNode>() { @Override public ReadTreeNode load(CacheKey key) throws Exception { ReadTreeNode node = sourceGet(key.dbkey()); if (node != null) { node.init(ReadTree.this, key.name); return node; } else { return MISSING; } } }); rootNode = getNode(1, "root"); if (rootNode == null) { throw new IllegalStateException("missing root in readonly tree"); } } catch (Exception e) { source.close(); throw e; } }