Example usage for com.google.common.cache CacheBuilder maximumSize

List of usage examples for com.google.common.cache CacheBuilder maximumSize

Introduction

In this page you can find the example usage for com.google.common.cache CacheBuilder maximumSize.

Prototype

long maximumSize

To view the source code for com.google.common.cache CacheBuilder maximumSize.

Click Source Link

Usage

From source file:com.streamsets.pipeline.stage.processor.kv.redis.RedisLookupProcessor.java

@SuppressWarnings("unchecked")
private LoadingCache<Pair<String, DataType>, LookupValue> buildCache() {
    CacheBuilder cacheBuilder = CacheBuilder.newBuilder();
    if (!conf.cache.enabled) {
        return cacheBuilder.maximumSize(0).build(store);
    }//from   ww  w .  ja  va  2  s  .com

    if (conf.cache.maxSize == -1) {
        conf.cache.maxSize = Long.MAX_VALUE;
    }

    // CacheBuilder doesn't support specifying type thus suffers from erasure, so
    // we build it with this if / else logic.
    if (conf.cache.evictionPolicyType == EvictionPolicyType.EXPIRE_AFTER_ACCESS) {
        cacheBuilder.maximumSize(conf.cache.maxSize).expireAfterAccess(conf.cache.expirationTime,
                conf.cache.timeUnit);
    } else if (conf.cache.evictionPolicyType == EvictionPolicyType.EXPIRE_AFTER_WRITE) {
        cacheBuilder.maximumSize(conf.cache.maxSize).expireAfterWrite(conf.cache.expirationTime,
                conf.cache.timeUnit);
    } else {
        throw new IllegalArgumentException(
                Utils.format("Unrecognized EvictionPolicyType: '{}'", conf.cache.evictionPolicyType));
    }
    return cacheBuilder.build(store);
}

From source file:org.apache.samza.table.caching.CachingTableProvider.java

private ReadWriteTable createDefaultCacheTable(String tableId, JavaTableConfig tableConfig) {
    long readTtlMs = Long.parseLong(tableConfig.getForTable(tableId, CachingTableDescriptor.READ_TTL_MS, "-1"));
    long writeTtlMs = Long
            .parseLong(tableConfig.getForTable(tableId, CachingTableDescriptor.WRITE_TTL_MS, "-1"));
    long cacheSize = Long.parseLong(tableConfig.getForTable(tableId, CachingTableDescriptor.CACHE_SIZE, "-1"));

    CacheBuilder cacheBuilder = CacheBuilder.newBuilder();
    if (readTtlMs != -1) {
        cacheBuilder.expireAfterAccess(readTtlMs, TimeUnit.MILLISECONDS);
    }/*from   w  ww  . ja v  a2  s. c o  m*/
    if (writeTtlMs != -1) {
        cacheBuilder.expireAfterWrite(writeTtlMs, TimeUnit.MILLISECONDS);
    }
    if (cacheSize != -1) {
        cacheBuilder.maximumSize(cacheSize);
    }

    logger.info(String.format("Creating default cache with: readTtl=%d, writeTtl=%d, maxSize=%d", readTtlMs,
            writeTtlMs, cacheSize));

    GuavaCacheTable cacheTable = new GuavaCacheTable(tableId + "-def-cache", cacheBuilder.build());
    cacheTable.init(this.context);

    return cacheTable;
}

From source file:org.apache.samza.table.caching.descriptors.CachingTableProvider.java

private ReadWriteTable createDefaultCacheTable(String tableId) {
    long readTtlMs = Long.parseLong(tableSpec.getConfig().getOrDefault(READ_TTL_MS, "-1"));
    long writeTtlMs = Long.parseLong(tableSpec.getConfig().getOrDefault(WRITE_TTL_MS, "-1"));
    long cacheSize = Long.parseLong(tableSpec.getConfig().getOrDefault(CACHE_SIZE, "-1"));

    CacheBuilder cacheBuilder = CacheBuilder.newBuilder();
    if (readTtlMs != -1) {
        cacheBuilder.expireAfterAccess(readTtlMs, TimeUnit.MILLISECONDS);
    }//w w  w  .  ja  va 2 s  . c  o  m
    if (writeTtlMs != -1) {
        cacheBuilder.expireAfterWrite(writeTtlMs, TimeUnit.MILLISECONDS);
    }
    if (cacheSize != -1) {
        cacheBuilder.maximumSize(cacheSize);
    }

    logger.info(String.format("Creating default cache with: readTtl=%d, writeTtl=%d, maxSize=%d", readTtlMs,
            writeTtlMs, cacheSize));

    GuavaCacheTable cacheTable = new GuavaCacheTable(tableId + "-def-cache", cacheBuilder.build());
    cacheTable.init(this.context);

    return cacheTable;
}

From source file:com.streamsets.pipeline.stage.bigquery.destination.BigQueryTarget.java

@Override
@SuppressWarnings("unchecked")
public List<ConfigIssue> init() {
    List<ConfigIssue> issues = super.init();

    conf.credentials.getCredentialsProvider(getContext(), issues).ifPresent(provider -> {
        if (issues.isEmpty()) {
            try {
                Optional.ofNullable(provider.getCredentials())
                        .ifPresent(c -> bigQuery = BigQueryDelegate.getBigquery(c, conf.credentials.projectId));
            } catch (IOException e) {
                LOG.error(Errors.BIGQUERY_05.getMessage(), e);
                issues.add(getContext().createConfigIssue(Groups.CREDENTIALS.name(),
                        "conf.credentials.credentialsProvider", Errors.BIGQUERY_05));
            }/*from   ww w. j a  v a  2s.c om*/
        }
    });

    dataSetEval = getContext().createELEval("datasetEL");
    tableNameELEval = getContext().createELEval("tableNameEL");
    rowIdELEval = getContext().createELEval("rowIdExpression");

    CacheBuilder tableIdExistsCacheBuilder = CacheBuilder.newBuilder();
    if (conf.maxCacheSize != -1) {
        tableIdExistsCacheBuilder.maximumSize(conf.maxCacheSize);
    }

    tableIdExistsCache = tableIdExistsCacheBuilder.build(new CacheLoader<TableId, Boolean>() {
        @Override
        public Boolean load(TableId key) throws Exception {
            return bigQuery.getTable(key) != null;
        }
    });

    return issues;
}

From source file:org.nuxeo.ecm.core.cache.InMemoryCacheImpl.java

public InMemoryCacheImpl(CacheDescriptor desc) {
    super(desc);/*from   ww  w  .  j a va 2s  . com*/
    CacheBuilder<Object, Object> builder = CacheBuilder.newBuilder();
    builder = builder.expireAfterWrite(desc.ttl, TimeUnit.MINUTES);
    if (desc.options.containsKey("concurrencyLevel")) {
        builder = builder.concurrencyLevel(Integer.valueOf(desc.options.get("concurrencyLevel")).intValue());
    }
    if (desc.options.containsKey("maxSize")) {
        builder = builder.maximumSize(Integer.valueOf(desc.options.get("maxSize")).intValue());
    }
    cache = builder.build();
}

From source file:uk.q3c.krail.core.user.opt.GuavaCacheConfiguration.java

public CacheBuilder<Object, Object> builder() {
    CacheBuilder<Object, Object> builder = CacheBuilder.newBuilder();
    if (initialCapacity != null) {
        builder.initialCapacity(initialCapacity);
    }/*from  w  w w  .  ja v a 2  s .  c o m*/
    if (maximumSize != null) {
        builder.maximumSize(maximumSize);
    }
    if (maximumWeight != null) {
        builder.maximumWeight(maximumWeight);
    }
    if (concurrencyLevel != null) {
        builder.concurrencyLevel(concurrencyLevel);
    }
    if (weakKeys) {
        builder.weakKeys();
    }

    if (weakValues) {
    }
    builder.weakValues();
    if (softValues) {
        builder.softValues();
    }

    if (expireAfterWriteDuration != null) {
        builder.expireAfterWrite(expireAfterWriteDuration, expireAfterWriteTimeUnit);
    }
    if (expireAfterAccessDuration != null) {
        builder.expireAfterAccess(expireAfterAccessDuration, expireAfterAccessTimeUnit);
    }
    if (refreshAfterWriteDuration != null) {
        builder.refreshAfterWrite(refreshAfterWriteDuration, refreshAfterWriteTimeUnit);
    }

    if (ticker != null) {
        builder.ticker(ticker);
    }

    if (removalListener != null) {
        builder.removalListener(removalListener);
    }

    if (recordStats) {
        builder.recordStats();
    }
    return builder;
}

From source file:com.sri.ai.util.cache.DefaultCacheMap.java

private void initStorage() {
    CacheBuilder<Object, Object> cb = CacheBuilder.newBuilder();

    if (weakKeys) {
        cb.weakKeys();//from ww  w  .  j  av a  2  s . c o  m
    }
    // Note: a maximumSize of 
    // < 0 means no size restrictions
    // = 0 means no cache
    // > 0 means maximum size of cache
    if (maximumSize >= 0L) {
        cb.maximumSize(maximumSize);
    }
    if (AICUtilConfiguration.isRecordCacheStatistics()) {
        cb.recordStats();
    }

    storage = cb.build();
    delegate = storage.asMap();
}

From source file:com.addthis.hydra.job.store.AvailableCache.java

/**
 * Make a cache using specified cache parameters
 *
 * @param refreshMillis How frequently values should be refreshed in milliseconds (if <= 0, no refresh)
 * @param expireMillis  How old values should have to be before they are expired (if <= 0, they never expire)
 * @param maxSize       How many values should be stored in the cache (if <= 0, no explicit limit)
 * @param fetchThreads  How many threads to use to fetch values in the background (if <=0, use two threads)
 *///from w  ww  . ja v a 2  s . c  o  m
public AvailableCache(long refreshMillis, long expireMillis, int maxSize, int fetchThreads) {
    CacheBuilder<Object, Object> cacheBuilder = CacheBuilder.newBuilder();
    // Configure the cache for any parameters that are > 0
    if (expireMillis > 0) {
        cacheBuilder.expireAfterWrite(expireMillis, TimeUnit.MILLISECONDS);
    }
    if (refreshMillis > 0) {
        cacheBuilder.refreshAfterWrite(refreshMillis, TimeUnit.MILLISECONDS);
    }
    if (maxSize > 0) {
        cacheBuilder.maximumSize(maxSize);
    }
    if (fetchThreads <= 0) {
        fetchThreads = 2;
    }
    executor = new ThreadPoolExecutor(fetchThreads, fetchThreads, 1000L, TimeUnit.MILLISECONDS,
            new LinkedBlockingQueue<>(),
            new ThreadFactoryBuilder().setNameFormat("avail-cache-%d").setDaemon(true).build());
    //noinspection unchecked
    this.loadingCache = cacheBuilder.build(new CacheLoader<String, Optional<T>>() {
        @Override
        /**
         * If refreshAfterWrite is enabled, this method is called after returning the old value.
         * The new value will be inserted into the cache when the load() operation completes.
         */
        public ListenableFuture<Optional<T>> reload(final String key, Optional<T> oldValue) {
            ListenableFutureTask<Optional<T>> task = ListenableFutureTask.create(() -> load(key));
            executor.execute(task);
            return task;
        }

        @Override
        public Optional<T> load(String key) throws Exception {
            return Optional.fromNullable(fetchValue(key));
        }
    });
}

From source file:com.tesora.dve.sql.schema.cache.SchemaCache.java

private Cache<SchemaCacheKey<?>, Object> buildCache(int size) {
    CacheBuilder<SchemaCacheKey<?>, Object> boo = CacheBuilder.newBuilder().removalListener(this).recordStats();
    if (size > -1)
        boo = boo.maximumSize(size);
    return boo.build();
}

From source file:org.graylog2.lookup.caches.GuavaLookupCache.java

@Inject
public GuavaLookupCache(@Assisted("id") String id, @Assisted("name") String name,
        @Assisted LookupCacheConfiguration c, @Named("processbuffer_processors") int processorCount,
        MetricRegistry metricRegistry) {
    super(id, name, c, metricRegistry);
    Config config = (Config) c;//  www. ja v a2s  .co  m
    CacheBuilder<Object, Object> builder = CacheBuilder.newBuilder();

    // the theory is that typically only processors will affect the cache concurrency, whereas decorator usage is less critical
    builder.concurrencyLevel(processorCount).recordStats();

    builder.maximumSize(config.maxSize());
    if (config.expireAfterAccess() > 0 && config.expireAfterAccessUnit() != null) {
        //noinspection ConstantConditions
        builder.expireAfterAccess(config.expireAfterAccess(), config.expireAfterAccessUnit());
    }
    if (config.expireAfterWrite() > 0 && config.expireAfterWriteUnit() != null) {
        //noinspection ConstantConditions
        builder.expireAfterWrite(config.expireAfterWrite(), config.expireAfterWriteUnit());
    }

    cache = new InstrumentedCache<>(builder.build(), this);
}