List of usage examples for java.util.concurrent.locks Lock lock
lock
From source file:org.apache.rave.synchronization.SynchronizingAspect.java
@Around("synchronizePointcut()") public Object synchronizeInvocation(ProceedingJoinPoint proceedingJoinPoint) throws Throwable { MethodSignature methodSignature = (MethodSignature) proceedingJoinPoint.getSignature(); Method method = methodSignature.getMethod(); Object target = proceedingJoinPoint.getTarget(); Object[] args = proceedingJoinPoint.getArgs(); Class<?> targetClass = AopProxyUtils.ultimateTargetClass(target); Synchronized annotation = getAnnotation(targetClass, method); Validate.notNull(annotation, "Could not find @Synchronized annotation!"); Lock lock = getLock(targetClass, method, args, annotation); if (lock == null) { logger.debug(//from w w w. j a va 2 s . co m "No lock obtained for call [{}] on targetClass [{}] - proceeding without synchronization on " + "thread {}", new Object[] { method.getName(), targetClass.getName(), Thread.currentThread().getId() }); return proceedingJoinPoint.proceed(); } else { try { logger.debug( "Lock obtained for call [{}] on targetClass [{}] - proceeding with synchronization on thread {}", new Object[] { method.getName(), targetClass.getName(), Thread.currentThread().getId() }); lock.lock(); return proceedingJoinPoint.proceed(); } finally { lock.unlock(); lockService.returnLock(lock); } } }
From source file:org.opendedup.collections.ProgressiveFileBasedCSMap.java
private long getPos(byte[] hash) throws IOException { long pos = -1; Lock l = gcLock.readLock(); l.lock(); try {/* ww w . j a v a 2 s .c om*/ if (!runningGC && !lbf.mightContain(hash)) { return pos; } for (AbstractShard m : this.maps.getAL()) { try { pos = m.get(hash); } catch (MapClosedException e) { SDFSLogger.getLog().warn("", e); } if (pos != -1) { if (runningGC) this.lbf.put(hash); // m.cache(); return pos; } } return pos; } finally { l.unlock(); } }
From source file:org.opendedup.collections.ProgressiveFileBasedCSMap.java
@Override public boolean remove(ChunkData cm) throws IOException { if (this.isClosed()) { throw new IOException("hashtable [" + this.fileName + "] is close"); }/*from w w w. j ava 2 s .com*/ Lock l = gcLock.readLock(); l.lock(); try { if (!this.runningGC && !lbf.mightContain(cm.getHash())) return false; try { if (cm.getHash().length == 0) return true; AbstractShard m = this.getReadMap(cm.getHash()); if (m == null) return false; if (!m.remove(cm.getHash())) { return false; } else { cm.setmDelete(true); if (this.isClosed()) { throw new IOException("hashtable [" + this.fileName + "] is close"); } try { this.kSz.decrementAndGet(); } catch (Exception e) { } return true; } } catch (Exception e) { SDFSLogger.getLog().fatal("error getting record", e); return false; } } finally { l.unlock(); } }
From source file:org.opendedup.collections.ProgressiveFileBasedCSMap.java
private AbstractShard getReadMap(byte[] hash) throws IOException { Lock l = gcLock.readLock(); l.lock(); // ct.incrementAndGet(); try {//w w w . java 2 s .c o m if (!runningGC && !lbf.mightContain(hash)) { // SDFSLogger.getLog().info("not in bloom filter"); return null; } /* * Iterator<ProgressiveFileByteArrayLongMap> iter = * activeReadMaps.iterator(); while (iter.hasNext()) { * ProgressiveFileByteArrayLongMap _m = iter.next(); if * (_m.containsKey(hash)) return _m; } */ // zmt.incrementAndGet(); /* * synchronized (ct) { if (ct.get() > 10000) { * SDFSLogger.getLog().info( "misses=" + mt.get() + " attempts=" + * ct.get() + " lookups=" + amt.get()); ct.set(0); amt.set(0); * mt.set(0); } } */ for (AbstractShard _m : this.maps.getAL()) { // amt.incrementAndGet(); try { if (_m.containsKey(hash)) { if (runningGC) this.lbf.put(hash); return _m; } } catch (MapClosedException e) { this.getReadMap(hash); } } // mt.incrementAndGet(); return null; } finally { l.unlock(); } }
From source file:org.jactr.core.module.declarative.basic.DefaultDeclarativeModule.java
public DefaultDeclarativeModule() { super("declarative"); _allChunks = new TreeMap<String, IChunk>(); _allChunkTypes = new TreeMap<String, IChunkType>(); _activationSorter = new ChunkActivationComparator(); _chunksToDispose = FastList.newInstance(); _searchSystem = new DefaultSearchSystem(this); _chunkLock = new ReentrantReadWriteLock(); _chunkTypeLock = new ReentrantReadWriteLock(); _chunksToEncode = FastList.newInstance(); _encodeChunksOnRemove = new IActivationBufferListener() { public void chunkMatched(ActivationBufferEvent abe) { // noop }//from w w w . j a v a 2s. com public void requestAccepted(ActivationBufferEvent abe) { // noop } public void sourceChunkAdded(ActivationBufferEvent abe) { // noop } public void sourceChunkRemoved(ActivationBufferEvent abe) { /* * queue up the encoding. we dont encode it here so that any inline * listeners after this one will get the actual instance of the removed * chunk and not the merged version after encoding (if a merge occurs) */ try { _chunkLock.writeLock().lock(); if (!abe.getSource().handlesEncoding()) _chunksToEncode.addAll(abe.getSourceChunks()); } finally { _chunkLock.writeLock().unlock(); } } public void sourceChunksCleared(ActivationBufferEvent abe) { sourceChunkRemoved(abe); } public void statusSlotChanged(ActivationBufferEvent abe) { // noop } @SuppressWarnings("unchecked") public void parameterChanged(IParameterEvent pe) { // noop } }; _chunkEncoder = new ModelListenerAdaptor() { @Override public void cycleStopped(ModelEvent event) { /** * encode those that need encoding and dispose of those that need * disposing */ FastList<IChunk> chunkList = FastList.newInstance(); // assignment, check thread safety try { _chunkLock.writeLock().lock(); chunkList.addAll(_chunksToEncode); _chunksToEncode.clear(); } finally { _chunkLock.writeLock().unlock(); } FastList<IActivationBuffer> containingBuffers = FastList.newInstance(); // fast, destructive iterator where processing order does not matter for (IChunk chunk = null; !chunkList.isEmpty() && (chunk = chunkList.removeLast()) != null;) { /* * because this chunk might get merged, effectively changing the lock * instance, we do grab a reference to the lock temporarily */ Lock lock = chunk.getWriteLock(); try { lock.lock(); if (chunk.hasBeenDisposed()) continue; if (chunk.isEncoded()) chunk.getSubsymbolicChunk().accessed(event.getSimulationTime()); else { BufferUtilities.getContainingBuffers(chunk, true, containingBuffers); if (containingBuffers.size() == 0) addChunk(chunk); } } finally { lock.unlock(); containingBuffers.clear(); } } /** * now for the disposal */ // assignment, check thread safety try { _chunkLock.writeLock().lock(); chunkList.addAll(_chunksToDispose); _chunksToDispose.clear(); } finally { _chunkLock.writeLock().unlock(); } // fast, destructive iterator where processing order does not matter for (IChunk chunk = null; !chunkList.isEmpty() && (chunk = chunkList.removeLast()) != null;) try { chunk.getWriteLock().lock(); if (chunk.isEncoded()) continue; if (chunk.hasBeenDisposed()) continue; // requeue if (isDisposalSuspended(chunk)) dispose(chunk); else { BufferUtilities.getContainingBuffers(chunk, true, containingBuffers); if (containingBuffers.size() == 0) disposeInternal(chunk); } } finally { containingBuffers.clear(); chunk.getWriteLock().unlock(); } FastList.recycle(containingBuffers); FastList.recycle(chunkList); } }; }
From source file:org.opendedup.collections.ProgressiveFileBasedCSMap.java
@Override public InsertRecord put(ChunkData cm, boolean persist) throws IOException, HashtableFullException { // persist = false; if (this.isClosed()) throw new HashtableFullException("Hashtable " + this.fileName + " is close"); if (kSz.get() >= this.maxSz) throw new HashtableFullException("maximum sized reached"); InsertRecord rec = null;//from w w w .j a v a 2 s . co m // if (persist) // this.flushFullBuffer(); Lock l = gcLock.readLock(); l.lock(); ProgressiveFileByteArrayLongMap bm = null; try { // long tm = System.currentTimeMillis(); AbstractShard rm = this.getReadMap(cm.getHash()); if (rm == null) { // this.misses.incrementAndGet(); // tm = System.currentTimeMillis() - tm; while (rec == null) { try { if (persist && !cm.recoverd) { try { cm.persistData(true); } catch (HashExistsException e) { return new InsertRecord(false, e.getPos()); } } bm = this.getWriteMap(); rec = bm.put(cm.getHash(), cm.getcPos()); this.lbf.put(cm.getHash()); } catch (HashtableFullException e) { rec = null; } catch (Exception e) { throw e; } } } else { try { rec = new InsertRecord(false, rm.get(cm.getHash())); } catch (MapClosedException e) { SDFSLogger.getLog().error("unable to remove", e); } } // this.msTr.addAndGet(tm); } finally { try { if (bm != null) { bm.activate(); } } catch (Exception e) { } finally { l.unlock(); } } /* * this.trs.incrementAndGet(); if(this.trs.get() == 10000) { long tpm = * 0; if(this.misses.get() > 0) tpm = this.msTr.get()/this.misses.get(); * SDFSLogger.getLog().info("trs=" + this.trs.get() + " misses=" + * this.misses.get() + " mtm=" + this.msTr.get() + " tpm=" + tpm); * this.trs.set(0); this.misses.set(0); this.msTr.set(0); } */ if (rec.getInserted()) this.kSz.incrementAndGet(); return rec; }
From source file:org.apache.bookkeeper.bookie.EntryLogManagerForEntryLogPerLedger.java
private void onCacheEntryRemoval( RemovalNotification<Long, EntryLogAndLockTuple> removedLedgerEntryLogMapEntry) { Long ledgerId = removedLedgerEntryLogMapEntry.getKey(); log.debug("LedgerId {} is being evicted from the cache map because of {}", ledgerId, removedLedgerEntryLogMapEntry.getCause()); EntryLogAndLockTuple entryLogAndLockTuple = removedLedgerEntryLogMapEntry.getValue(); if (entryLogAndLockTuple == null) { log.error("entryLogAndLockTuple is not supposed to be null in entry removal listener for ledger : {}", ledgerId);/*from w w w . ja v a 2 s. c om*/ return; } Lock lock = entryLogAndLockTuple.ledgerLock; BufferedLogChannelWithDirInfo logChannelWithDirInfo = entryLogAndLockTuple.getEntryLogWithDirInfo(); if (logChannelWithDirInfo == null) { log.error("logChannel for ledger: {} is not supposed to be null in entry removal listener", ledgerId); return; } lock.lock(); try { BufferedLogChannel logChannel = logChannelWithDirInfo.getLogChannel(); // Append ledgers map at the end of entry log try { logChannel.appendLedgersMap(); } catch (Exception e) { log.error("Got IOException while trying to appendLedgersMap in cacheEntryRemoval callback", e); } replicaOfCurrentLogChannels.remove(logChannel.getLogId()); rotatedLogChannels.add(logChannel); entryLogsPerLedgerCounter.removedLedgerFromEntryLogMapCache(ledgerId, removedLedgerEntryLogMapEntry.getCause()); } finally { lock.unlock(); } }
From source file:net.sf.xfd.provider.ProviderBase.java
private boolean isPossiblySpecial(Stat s) { if (s == null) return true; final Lock lock = mounts.getLock(); lock.lock(); try {/* ww w . j a v a 2s. co m*/ final MountInfo.Mount mount = mounts.mountMap.get(s.st_dev); if (mount == null || mounts.isVolatile(mount)) { return true; } } finally { lock.unlock(); } return false; }
From source file:com.cloudera.oryx.als.serving.ServerRecommender.java
@Override public List<IDValue> recommendToAnonymous(String[] itemIDs, float[] values, int howMany, Rescorer rescorer) throws NotReadyException, NoSuchItemException { Preconditions.checkArgument(howMany > 0, "howMany must be positive"); float[] anonymousUserFeatures = buildAnonymousUserFeatures(itemIDs, values); LongSet userKnownItemIDs = new LongSet(itemIDs.length); for (String itemID : itemIDs) { userKnownItemIDs.add(StringLongMapping.toLong(itemID)); }/*from w w w.j a v a 2s. c o m*/ float[][] anonymousFeaturesAsArray = { anonymousUserFeatures }; Generation generation = getCurrentGeneration(); Lock yLock = generation.getYLock().readLock(); yLock.lock(); try { return multithreadedTopN(anonymousFeaturesAsArray, userKnownItemIDs, rescorer, howMany, generation.getCandidateFilter()); } finally { yLock.unlock(); } }
From source file:com.ning.maven.plugins.dependencyversionscheck.AbstractDependencyVersionsMojo.java
/** * Convenience method for a multi map add. Also makes sure that all the actual versions line up. *///from w w w.j a va 2s .c o m private void addToResolutionMap(final Map resolutionMap, final VersionResolution resolution) { Lock lock = (Lock) resolutionMapLocks.get(resolution.getDependencyName()); // lock to protect mutation on the list per dependency as this can potentially run in multiple threads lock.lock(); try { List resolutions = (List) resolutionMap.get(resolution.getDependencyName()); if (resolutions == null) { resolutions = new ArrayList(); resolutionMap.put(resolution.getDependencyName(), resolutions); } for (Iterator it = resolutions.iterator(); it.hasNext();) { final VersionResolution existingResolution = (VersionResolution) it.next(); // TODO: It might be reasonable to fail the build in this case. However, I have yet to see // this message... :-) if (!existingResolution.getActualVersion().equals(resolution.getActualVersion())) { LOG.warn("Dependency '{} expects version '{}' but '{}' already resolved to '{}'!", new Object[] { resolution.getDependencyName(), resolution.getActualVersion(), existingResolution.getDependencyName(), existingResolution.getActualVersion() }); } } LOG.debug("Adding resolution: {}", resolution); resolutions.add(resolution); } finally { lock.unlock(); } }