List of usage examples for org.apache.commons.collections.map LRUMap LRUMap
public LRUMap(Map map)
From source file:org.apache.directory.server.core.authn.SimpleAuthenticator.java
/** * Creates a new instance, with an initial cache size * @param cacheSize the size of the credential cache *//*from w w w . jav a 2s . c o m*/ public SimpleAuthenticator(int cacheSize) { super(AuthenticationLevel.SIMPLE, Dn.ROOT_DSE); credentialCache = new LRUMap(cacheSize > 0 ? cacheSize : DEFAULT_CACHE_SIZE); }
From source file:org.apache.directory.server.core.authn.SimpleAuthenticator.java
/** * Creates a new instance, with an initial cache size * //from w w w. ja v a2 s . c om * @param cacheSize the size of the credential cache * @param baseDn The base Dn */ public SimpleAuthenticator(int cacheSize, Dn baseDn) { super(AuthenticationLevel.SIMPLE, baseDn); credentialCache = new LRUMap(cacheSize > 0 ? cacheSize : DEFAULT_CACHE_SIZE); }
From source file:org.apache.hadoop.yarn.server.applicationhistoryservice.timeline.LeveldbTimelineStore.java
@Override @SuppressWarnings("unchecked") protected void serviceInit(Configuration conf) throws Exception { Options options = new Options(); options.createIfMissing(true);/*w w w . j a v a 2 s .co m*/ options.cacheSize(conf.getLong(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE)); JniDBFactory factory = new JniDBFactory(); String path = conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH); File p = new File(path); if (!p.exists()) { if (!p.mkdirs()) { throw new IOException("Couldn't create directory for leveldb " + "timeline store " + path); } } LOG.info("Using leveldb path " + path); db = factory.open(new File(path, FILENAME), options); startTimeWriteCache = Collections.synchronizedMap(new LRUMap(getStartTimeWriteCacheSize(conf))); startTimeReadCache = Collections.synchronizedMap(new LRUMap(getStartTimeReadCacheSize(conf))); if (conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_TTL_ENABLE, true)) { deletionThread = new EntityDeletionThread(conf); deletionThread.start(); } super.serviceInit(conf); }
From source file:org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore.java
@Override @SuppressWarnings("unchecked") protected void serviceInit(Configuration conf) throws Exception { Preconditions.checkArgument(/* w w w . ja va 2 s .c om*/ conf.getLong(YarnConfiguration.TIMELINE_SERVICE_TTL_MS, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_TTL_MS) > 0, "%s property value should be greater than zero", YarnConfiguration.TIMELINE_SERVICE_TTL_MS); Preconditions.checkArgument( conf.getLong(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS) > 0, "%s property value should be greater than zero", YarnConfiguration.TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS); Preconditions.checkArgument( conf.getLong(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE) >= 0, "%s property value should be greater than or equal to zero", YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE); Preconditions.checkArgument( conf.getLong(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE) > 0, " %s property value should be greater than zero", YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE); Preconditions.checkArgument( conf.getLong(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE) > 0, "%s property value should be greater than zero", YarnConfiguration.TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE); Options options = new Options(); options.createIfMissing(true); options.cacheSize(conf.getLong(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE, YarnConfiguration.DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE)); JniDBFactory factory = new JniDBFactory(); Path dbPath = new Path(conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH), FILENAME); FileSystem localFS = null; try { localFS = FileSystem.getLocal(conf); if (!localFS.exists(dbPath)) { if (!localFS.mkdirs(dbPath)) { throw new IOException("Couldn't create directory for leveldb " + "timeline store " + dbPath); } localFS.setPermission(dbPath, LEVELDB_DIR_UMASK); } } finally { IOUtils.cleanup(LOG, localFS); } LOG.info("Using leveldb path " + dbPath); db = factory.open(new File(dbPath.toString()), options); checkVersion(); startTimeWriteCache = Collections.synchronizedMap(new LRUMap(getStartTimeWriteCacheSize(conf))); startTimeReadCache = Collections.synchronizedMap(new LRUMap(getStartTimeReadCacheSize(conf))); if (conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_TTL_ENABLE, true)) { deletionThread = new EntityDeletionThread(conf); deletionThread.start(); } super.serviceInit(conf); }
From source file:org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore.java
@Override @SuppressWarnings("unchecked") protected void serviceInit(Configuration conf) throws Exception { Preconditions.checkArgument(conf.getLong(TIMELINE_SERVICE_TTL_MS, DEFAULT_TIMELINE_SERVICE_TTL_MS) > 0, "%s property value should be greater than zero", TIMELINE_SERVICE_TTL_MS); Preconditions.checkArgument(/* ww w. j ava 2 s . c om*/ conf.getLong(TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS, DEFAULT_TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS) > 0, "%s property value should be greater than zero", TIMELINE_SERVICE_LEVELDB_TTL_INTERVAL_MS); Preconditions.checkArgument( conf.getLong(TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE, DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE) >= 0, "%s property value should be greater than or equal to zero", TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE); Preconditions.checkArgument( conf.getLong(TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE, DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE) > 0, " %s property value should be greater than zero", TIMELINE_SERVICE_LEVELDB_START_TIME_READ_CACHE_SIZE); Preconditions.checkArgument( conf.getLong(TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE, DEFAULT_TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE) > 0, "%s property value should be greater than zero", TIMELINE_SERVICE_LEVELDB_START_TIME_WRITE_CACHE_SIZE); Preconditions.checkArgument( conf.getLong(TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES, DEFAULT_TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES) > 0, "%s property value should be greater than zero", TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES); Preconditions.checkArgument( conf.getLong(TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE, DEFAULT_TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE) > 0, "%s property value should be greater than zero", TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE); Options options = new Options(); options.createIfMissing(true); options.cacheSize(conf.getLong(TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE, DEFAULT_TIMELINE_SERVICE_LEVELDB_READ_CACHE_SIZE)); JniDBFactory factory = new JniDBFactory(); Path dbPath = new Path(conf.get(TIMELINE_SERVICE_LEVELDB_PATH), FILENAME); Path domainDBPath = new Path(dbPath, DOMAIN); Path starttimeDBPath = new Path(dbPath, STARTTIME); Path ownerDBPath = new Path(dbPath, OWNER); try (FileSystem localFS = FileSystem.getLocal(conf)) { if (!localFS.exists(dbPath)) { if (!localFS.mkdirs(dbPath)) { throw new IOException("Couldn't create directory for leveldb " + "timeline store " + dbPath); } localFS.setPermission(dbPath, LEVELDB_DIR_UMASK); } if (!localFS.exists(domainDBPath)) { if (!localFS.mkdirs(domainDBPath)) { throw new IOException( "Couldn't create directory for leveldb " + "timeline store " + domainDBPath); } localFS.setPermission(domainDBPath, LEVELDB_DIR_UMASK); } if (!localFS.exists(starttimeDBPath)) { if (!localFS.mkdirs(starttimeDBPath)) { throw new IOException( "Couldn't create directory for leveldb " + "timeline store " + starttimeDBPath); } localFS.setPermission(starttimeDBPath, LEVELDB_DIR_UMASK); } if (!localFS.exists(ownerDBPath)) { if (!localFS.mkdirs(ownerDBPath)) { throw new IOException( "Couldn't create directory for leveldb " + "timeline store " + ownerDBPath); } localFS.setPermission(ownerDBPath, LEVELDB_DIR_UMASK); } } options.maxOpenFiles(conf.getInt(TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES, DEFAULT_TIMELINE_SERVICE_LEVELDB_MAX_OPEN_FILES)); options.writeBufferSize(conf.getInt(TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE, DEFAULT_TIMELINE_SERVICE_LEVELDB_WRITE_BUFFER_SIZE)); LOG.info("Using leveldb path " + dbPath); domaindb = factory.open(new File(domainDBPath.toString()), options); entitydb = new RollingLevelDB(ENTITY); entitydb.init(conf); indexdb = new RollingLevelDB(INDEX); indexdb.init(conf); starttimedb = factory.open(new File(starttimeDBPath.toString()), options); ownerdb = factory.open(new File(ownerDBPath.toString()), options); checkVersion(); startTimeWriteCache = Collections.synchronizedMap(new LRUMap(getStartTimeWriteCacheSize(conf))); startTimeReadCache = Collections.synchronizedMap(new LRUMap(getStartTimeReadCacheSize(conf))); writeBatchSize = conf.getInt(TIMELINE_SERVICE_LEVELDB_WRITE_BATCH_SIZE, DEFAULT_TIMELINE_SERVICE_LEVELDB_WRITE_BATCH_SIZE); super.serviceInit(conf); }
From source file:org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager.java
@SuppressWarnings("unchecked") public TimelineACLsManager(Configuration conf) { this.adminAclsManager = new AdminACLsManager(conf); aclExts = Collections.synchronizedMap(new LRUMap(DOMAIN_ACCESS_ENTRY_CACHE_SIZE)); }
From source file:org.apache.jackrabbit.core.query.lucene.CachingIndexReader.java
/** * Creates a new <code>CachingIndexReader</code> based on * <code>delegatee</code>// ww w. ja v a 2s .com * * @param delegatee the base <code>IndexReader</code>. * @param cache a document number cache, or <code>null</code> if not * available to this reader. * @param initCache if the parent caches should be initialized * when this index reader is constructed. * @throws IOException if an error occurs while reading from the index. */ @SuppressWarnings("unchecked") CachingIndexReader(IndexReader delegatee, DocNumberCache cache, boolean initCache) throws IOException { super(delegatee); this.cache = cache; this.inSegmentParents = new int[delegatee.maxDoc()]; Arrays.fill(this.inSegmentParents, -1); this.shareableNodes = initShareableNodes(delegatee); this.cacheInitializer = new CacheInitializer(delegatee); if (initCache) { cacheInitializer.run(); } // limit cache to 1% of maxDoc(), but at least 10. this.docNumber2id = Collections.synchronizedMap(new LRUMap(Math.max(10, delegatee.maxDoc() / 100))); this.termDocsCache = new TermDocsCache(delegatee, FieldNames.PROPERTIES); }
From source file:org.apache.jackrabbit.core.query.lucene.DocNumberCache.java
/** * Creates a new <code>DocNumberCache</code> with a limiting * <code>size</code>.//from w ww . j av a 2 s . c o m * * @param size the cache limit. */ DocNumberCache(int size) { size = size / CACHE_SEGMENTS; if (size < 0x40) { // minimum size is 0x40 * 0x10 = 1024 size = 0x40; } for (int i = 0; i < docNumbers.length; i++) { docNumbers[i] = new LRUMap(size); } }
From source file:org.apache.jackrabbit.core.security.authorization.AbstractCompiledPermissions.java
@SuppressWarnings("unchecked") protected AbstractCompiledPermissions() { cache = new LRUMap(1000); }
From source file:org.apache.jackrabbit.core.security.authorization.principalbased.EntriesCache.java
/** * * @param systemSession/*from ww w . j ava 2 s . co m*/ * @param systemEditor * @param accessControlRootPath * @throws javax.jcr.RepositoryException */ EntriesCache(SessionImpl systemSession, ACLEditor systemEditor, String accessControlRootPath) throws RepositoryException { this.systemSession = systemSession; this.systemEditor = systemEditor; repPolicyName = systemSession.getJCRName(N_POLICY); cache = new LRUMap(1000); ObservationManager observationMgr = systemSession.getWorkspace().getObservationManager(); /* Make sure the collector and all subscribed listeners are informed upon ACL modifications. Interesting events are: - new ACL (NODE_ADDED) - new ACE (NODE_ADDED) - changing ACE (PROPERTY_CHANGED) - removed ACL (NODE_REMOVED) - removed ACE (NODE_REMOVED) */ int events = Event.PROPERTY_CHANGED | Event.NODE_ADDED | Event.NODE_REMOVED; String[] ntNames = new String[] { systemSession.getJCRName(NT_REP_ACCESS_CONTROLLABLE), systemSession.getJCRName(NT_REP_ACL), systemSession.getJCRName(NT_REP_ACE) }; observationMgr.addEventListener(this, events, accessControlRootPath, true, null, ntNames, false); }