Example usage for java.util.concurrent.locks ReentrantReadWriteLock ReentrantReadWriteLock

List of usage examples for java.util.concurrent.locks ReentrantReadWriteLock ReentrantReadWriteLock

Introduction

In this page you can find the example usage for java.util.concurrent.locks ReentrantReadWriteLock ReentrantReadWriteLock.

Prototype

public ReentrantReadWriteLock(boolean fair) 

Source Link

Document

Creates a new ReentrantReadWriteLock with the given fairness policy.

Usage

From source file:edu.clemson.cs.nestbed.server.management.configuration.MoteDeploymentConfigurationManagerImpl.java

private MoteDeploymentConfigurationManagerImpl() throws RemoteException {
    super();//w  w  w .j  ava 2 s .  c o m

    try {
        managerLock = new ReentrantReadWriteLock(true);
        readLock = managerLock.readLock();
        writeLock = managerLock.writeLock();
        moteDepConfigAdapter = AdapterFactory.createMoteDeploymentConfigurationAdapter(AdapterType.SQL);
        moteDepConfigs = moteDepConfigAdapter.readMoteDeploymentConfigurations();

        log.debug("MoteDeploymentConfigurations read:\n" + moteDepConfigs);
    } catch (AdaptationException ex) {
        throw new RemoteException("AdaptationException", ex);
    } catch (Exception ex) {
        String msg = "Exception in MoteDeploymentConfigurationManagerImpl";
        log.error(msg, ex);
        throw new RemoteException(msg, ex);
    }
}

From source file:edu.clemson.cs.nestbed.server.management.configuration.ProgramProfilingSymbolManagerImpl.java

private ProgramProfilingSymbolManagerImpl() throws RemoteException {
    super();//from  ww  w .  ja v a  2 s.com

    try {
        this.managerLock = new ReentrantReadWriteLock(true);
        this.readLock = managerLock.readLock();
        this.writeLock = managerLock.writeLock();
        progProfSymbolAdapter = AdapterFactory.createProgramProfilingSymbolAdapter(AdapterType.SQL);
        progProfSymbols = progProfSymbolAdapter.readProgramProfilingSymbols();

        log.debug("ProgramProfilingSymbols read:\n" + progProfSymbols);
    } catch (AdaptationException ex) {
        throw new RemoteException("AdaptationException", ex);
    } catch (Exception ex) {
        String msg = "Exception in ProgramProfilingSymbolManagerImpl";
        log.error(msg, ex);
        throw new RemoteException(msg, ex);
    }
}

From source file:edu.clemson.cs.nestbed.server.management.configuration.ProgramProfilingMessageSymbolManagerImpl.java

private ProgramProfilingMessageSymbolManagerImpl() throws RemoteException {
    super();//from   ww w  .j  a va 2  s . com

    try {
        this.managerLock = new ReentrantReadWriteLock(true);
        this.readLock = managerLock.readLock();
        this.writeLock = managerLock.writeLock();
        ppmsAdapter = AdapterFactory.createProgramProfilingMessageSymbolAdapter(AdapterType.SQL);

        ppmSymbols = ppmsAdapter.readProgramProfilingMessageSymbols();

        log.debug("ProgramProfilingMessageSymbols read:\n" + ppmSymbols);
    } catch (AdaptationException ex) {
        throw new RemoteException("AdaptationException", ex);
    } catch (Exception ex) {
        String msg = "Exception in " + "ProgramProfilingMessageSymbolManagerImpl";
        log.error(msg, ex);
        throw new RemoteException(msg, ex);
    }
}

From source file:io.hops.hopsworks.common.security.CertificateMaterializer.java

/**
 * Do NOT use this method directly. Use {@see CertificateMaterializer#getReadLockForKey}
 * and {@see CertificateMaterializer#getWriteLockForKey} instead.
 *
 * @param key Key to take the lock for//from  w  w w. ja  va  2 s  .  co  m
 * @param createIfMissing Create a lock if the key is not already associated with one
 * @return The lock for that key, or null
 */
private ReentrantReadWriteLock getLockForKey(MaterialKey key, boolean createIfMissing) {
    if (createIfMissing) {
        materialKeyLocks.putIfAbsent(key, new ReentrantReadWriteLock(true));
    }
    return materialKeyLocks.get(key);
}

From source file:org.alfresco.repo.search.impl.lucene.index.IndexInfo.java

/**
 * Construct an index in the given directory.
 * /*from  www  .  jav  a  2  s  .  c o m*/
 * @param indexDirectory File
 * @param config LuceneConfig
 */
private IndexInfo(File indexDirectory, LuceneConfig config) {
    super();
    initialiseTransitions();
    this.config = config;

    if (config != null) {
        this.readWriteLock = new ReentrantReadWriteLock(config.getFairLocking());
        this.maxFieldLength = config.getIndexerMaxFieldLength();
        this.threadPoolExecutor = config.getThreadPoolExecutor();
        IndexInfo.useNIOMemoryMapping = config.getUseNioMemoryMapping();
        this.maxDocsForInMemoryMerge = config.getMaxDocsForInMemoryMerge();
        this.maxRamInMbForInMemoryMerge = config.getMaxRamInMbForInMemoryMerge();
        this.maxDocsForInMemoryIndex = config.getMaxDocsForInMemoryIndex();
        this.maxRamInMbForInMemoryIndex = config.getMaxRamInMbForInMemoryIndex();
        this.writerMaxBufferedDocs = config.getWriterMaxBufferedDocs();
        this.writerRamBufferSizeMb = config.getWriterRamBufferSizeMb();
        this.writerMergeFactor = config.getWriterMergeFactor();
        this.writerMaxMergeDocs = config.getWriterMaxMergeDocs();
        this.mergerMaxBufferedDocs = config.getMergerMaxBufferedDocs();
        this.mergerRamBufferSizeMb = config.getMergerRamBufferSizeMb();
        this.mergerMergeFactor = config.getMergerMergeFactor();
        this.mergerMaxMergeDocs = config.getMergerMaxMergeDocs();
        this.termIndexInterval = config.getTermIndexInterval();
        this.mergerTargetOverlays = config.getMergerTargetOverlayCount();
        this.mergerTargetIndexes = config.getMergerTargetIndexCount();
        this.mergerTargetOverlaysBlockingFactor = config.getMergerTargetOverlaysBlockingFactor();
        // Work out the relative path of the index
        try {
            String indexRoot = new File(config.getIndexRootLocation()).getCanonicalPath();
            this.relativePath = indexDirectory.getCanonicalPath().substring(indexRoot.length() + 1);
        } catch (IOException e) {
            throw new AlfrescoRuntimeException("Failed to determine index relative path", e);
        }
    } else {
        this.readWriteLock = new ReentrantReadWriteLock(false);

        // need a default thread pool ....
        TraceableThreadFactory threadFactory = new TraceableThreadFactory();
        threadFactory.setThreadDaemon(true);
        threadFactory.setThreadPriority(5);

        threadPoolExecutor = new ThreadPoolExecutor(10, 10, 90, TimeUnit.SECONDS,
                new LinkedBlockingQueue<Runnable>(), threadFactory, new ThreadPoolExecutor.CallerRunsPolicy());

        // Create a 'fake' relative path
        try {
            this.relativePath = indexDirectory.getCanonicalPath();
            int sepIndex = this.relativePath.indexOf(File.separator);
            if (sepIndex != -1) {
                if (this.relativePath.length() > sepIndex + 1) {
                    this.relativePath = this.relativePath.substring(sepIndex + 1);
                } else {
                    this.relativePath = "";
                }
            }
        } catch (IOException e) {
            throw new AlfrescoRuntimeException("Failed to determine index relative path", e);
        }

    }

    // Create an empty in memory index
    IndexWriter writer;
    try {
        writer = new IndexWriter(emptyIndex, new AlfrescoStandardAnalyser(), true, MaxFieldLength.LIMITED);
        writer.setUseCompoundFile(writerUseCompoundFile);
        writer.setMaxBufferedDocs(writerMaxBufferedDocs);
        writer.setRAMBufferSizeMB(writerRamBufferSizeMb);
        writer.setMergeFactor(writerMergeFactor);
        writer.setMaxMergeDocs(writerMaxMergeDocs);
        writer.setWriteLockTimeout(writeLockTimeout);
        writer.setMaxFieldLength(maxFieldLength);
        writer.setTermIndexInterval(termIndexInterval);
        writer.setMergeScheduler(new SerialMergeScheduler());
        writer.setMergePolicy(new LogDocMergePolicy());
        writer.close();
    } catch (IOException e) {
        throw new IndexerException("Failed to create an empty in memory index!");
    }

    this.indexDirectory = indexDirectory;

    // Make sure the directory exists
    if (!this.indexDirectory.exists()) {
        if (!this.indexDirectory.mkdirs()) {
            throw new AlfrescoRuntimeException("Failed to create index directory");
        }
    }
    if (!this.indexDirectory.isDirectory()) {
        throw new AlfrescoRuntimeException("The index must be held in a directory");
    }

    // Create the info files.
    File indexInfoFile = new File(this.indexDirectory, INDEX_INFO);
    File indexInfoBackupFile = new File(this.indexDirectory, INDEX_INFO_BACKUP);
    if (createFile(indexInfoFile) && createFile(indexInfoBackupFile)) {
        // If both files required creation this is a new index
        version = 0;
    }

    // Open the files and channels for the index info file and the backup
    this.indexInfoRAF = openFile(indexInfoFile);
    this.indexInfoChannel = this.indexInfoRAF.getChannel();

    this.indexInfoBackupRAF = openFile(indexInfoBackupFile);
    this.indexInfoBackupChannel = this.indexInfoBackupRAF.getChannel();

    // If the index found no info files (i.e. it is new), check if there is
    // an old style index and covert it.
    if (version == 0) {
        // Check if an old style index exists

        final File oldIndex = new File(this.indexDirectory, OLD_INDEX);
        if (IndexReader.indexExists(oldIndex)) {
            getWriteLock();
            try {
                doWithFileLock(new LockWork<Object>() {
                    public Object doWork() throws Exception {
                        IndexWriter writer;
                        try {
                            writer = new IndexWriter(oldIndex, new AlfrescoStandardAnalyser(), false,
                                    MaxFieldLength.LIMITED);
                            writer.setUseCompoundFile(writerUseCompoundFile);
                            writer.setMaxBufferedDocs(writerMaxBufferedDocs);
                            writer.setRAMBufferSizeMB(writerRamBufferSizeMb);
                            writer.setMergeFactor(writerMergeFactor);
                            writer.setMaxMergeDocs(writerMaxMergeDocs);
                            writer.setWriteLockTimeout(writeLockTimeout);
                            writer.setMaxFieldLength(maxFieldLength);
                            writer.setTermIndexInterval(termIndexInterval);
                            writer.setMergeScheduler(new SerialMergeScheduler());
                            writer.setMergePolicy(new LogDocMergePolicy());
                            writer.optimize();
                            long docs = writer.numDocs();
                            writer.close();

                            IndexEntry entry = new IndexEntry(IndexType.INDEX, OLD_INDEX, "",
                                    TransactionStatus.COMMITTED, "", docs, 0, false);
                            indexEntries.put(OLD_INDEX, entry);

                            writeStatus();

                            // The index exists and we should initialise the single reader
                            registerReferenceCountingIndexReader(entry.getName(),
                                    buildReferenceCountingIndexReader(entry.getName(),
                                            entry.getDocumentCount()));
                        } catch (IOException e) {
                            throw new IndexerException("Failed to optimise old index");
                        }
                        return null;
                    }

                    public boolean canRetry() {
                        return false;
                    }
                });
            } finally {
                releaseWriteLock();
            }

        }
    }

    // The index exists
    else if (version == -1) {
        getWriteLock();
        try {
            doWithFileLock(new LockWork<Object>() {
                public Object doWork() throws Exception {
                    setStatusFromFile();

                    // If the index is not shared we can do some easy clean
                    // up
                    if (!indexIsShared) {
                        HashSet<String> deletable = new HashSet<String>();
                        // clean up
                        for (IndexEntry entry : indexEntries.values()) {
                            switch (entry.getStatus()) {
                            // states which can be deleted
                            // We could check prepared states can be
                            // committed.
                            case ACTIVE:
                            case MARKED_ROLLBACK:
                            case NO_TRANSACTION:
                            case PREPARING:
                            case ROLLEDBACK:
                            case ROLLINGBACK:
                            case MERGE_TARGET:
                            case UNKNOWN:
                            case PREPARED:
                            case DELETABLE:
                                if (s_logger.isInfoEnabled()) {
                                    s_logger.info("Deleting index entry " + entry);
                                }
                                entry.setStatus(TransactionStatus.DELETABLE);
                                deletable.add(entry.getName());
                                break;
                            // States which are in mid-transition which we
                            // can roll back to the committed state
                            case COMMITTED_DELETING:
                            case MERGE:
                                if (s_logger.isInfoEnabled()) {
                                    s_logger.info("Resetting merge to committed " + entry);
                                }
                                entry.setStatus(TransactionStatus.COMMITTED);
                                registerReferenceCountingIndexReader(entry.getName(),
                                        buildReferenceCountingIndexReader(entry.getName(),
                                                entry.getDocumentCount()));
                                break;
                            // Complete committing (which is post database
                            // commit)
                            case COMMITTING:
                                // do the commit
                                if (s_logger.isInfoEnabled()) {
                                    s_logger.info("Committing " + entry);
                                }
                                entry.setStatus(TransactionStatus.COMMITTED);
                                registerReferenceCountingIndexReader(entry.getName(),
                                        buildReferenceCountingIndexReader(entry.getName(),
                                                entry.getDocumentCount()));
                                break;
                            // States that require no action
                            case COMMITTED:
                                registerReferenceCountingIndexReader(entry.getName(),
                                        buildReferenceCountingIndexReader(entry.getName(),
                                                entry.getDocumentCount()));
                                break;
                            default:
                                // nothing to do
                                break;
                            }
                        }
                        // Delete entries that are not required
                        invalidateMainReadersFromFirst(deletable);
                        for (String id : deletable) {
                            indexEntries.remove(id);
                        }
                        clearOldReaders();

                        cleaner.schedule();

                        merger.schedule();

                        // persist the new state
                        writeStatus();
                    }
                    return null;
                }

                public boolean canRetry() {
                    return false;
                }

            });
        } finally {
            releaseWriteLock();
        }
    }
    // Need to do with file lock - must share info about other readers to support this with shared indexer
    // implementation

    getWriteLock();
    try {
        LockWork<Object> work = new DeleteUnknownGuidDirectories();
        doWithFileLock(work);
    } finally {
        releaseWriteLock();
    }

    // Run the cleaner around every 20 secods - this just makes the request to the thread pool
    timer.schedule(new TimerTask() {
        @Override
        public void run() {
            cleaner.schedule();
        }
    }, 0, 20000);

    publishDiscoveryEvent();
}

From source file:org.opencms.loader.CmsJspLoader.java

/**
 * Returns the read-write-lock for the given jsp vfs name.<p>
 * //from  w  w  w . ja va  2s .co  m
 * @param jspVfsName the jsp vfs name
 * 
 * @return the read-write-lock
 */
private ReentrantReadWriteLock getFileLock(String jspVfsName) {

    synchronized (m_fileLocks) {
        if (!m_fileLocks.containsKey(jspVfsName)) {
            m_fileLocks.put(jspVfsName, new ReentrantReadWriteLock(true));
        }
        return m_fileLocks.get(jspVfsName);
    }
}

From source file:com.microsoft.tooling.msservices.helpers.azure.AzureManagerImpl.java

private void loadSubscriptions() {
    String json = DefaultLoader.getIdeHelper().getProperty(AppSettingsNames.AZURE_SUBSCRIPTIONS);

    if (!StringHelper.isNullOrWhiteSpace(json)) {
        try {//w  w w  .  j  a  v  a 2  s.c om
            Type subscriptionsType = new TypeToken<HashMap<String, Subscription>>() {
            }.getType();
            subscriptions = gson.fromJson(json, subscriptionsType);
        } catch (JsonSyntaxException ignored) {
            DefaultLoader.getIdeHelper().unsetProperty(AppSettingsNames.AZURE_SUBSCRIPTIONS);
        }
    } else {
        subscriptions = new HashMap<String, Subscription>();
    }

    for (String subscriptionId : subscriptions.keySet()) {
        lockBySubscriptionId.put(subscriptionId, new ReentrantReadWriteLock(false));
    }
}

From source file:com.microsoft.tooling.msservices.helpers.azure.AzureManagerImpl.java

@NotNull
private ReentrantReadWriteLock getSubscriptionLock(@NotNull String subscriptionId, boolean createOnMissing)
        throws AzureCmdException {
    Lock lock = createOnMissing ? subscriptionMapLock.writeLock() : subscriptionMapLock.readLock();
    lock.lock();/* ww  w  .  j ava2  s  .  c o  m*/

    try {
        if (!lockBySubscriptionId.containsKey(subscriptionId)) {
            if (createOnMissing) {
                lockBySubscriptionId.put(subscriptionId, new ReentrantReadWriteLock(false));
            } else {
                throw new AzureCmdException("No authentication information for the specified Subscription Id");
            }
        }

        return lockBySubscriptionId.get(subscriptionId);
    } finally {
        lock.unlock();
    }
}

From source file:com.microsoft.tooling.msservices.helpers.azure.AzureManagerImpl.java

@NotNull
private ReentrantReadWriteLock getUserLock(@NotNull UserInfo userInfo, boolean createOnMissing)
        throws AzureCmdException {
    Lock lock = createOnMissing ? userMapLock.writeLock() : userMapLock.readLock();
    lock.lock();//w ww  .j a  v a  2  s  .c o  m

    try {
        if (!lockByUser.containsKey(userInfo)) {
            if (createOnMissing) {
                lockByUser.put(userInfo, new ReentrantReadWriteLock(false));
            } else {
                throw new AzureCmdException("No access token for the specified User Information");
            }
        }

        return lockByUser.get(userInfo);
    } finally {
        lock.unlock();
    }
}