Example usage for java.util.concurrent.locks Lock lock

List of usage examples for java.util.concurrent.locks Lock lock

Introduction

In this page you can find the example usage for java.util.concurrent.locks Lock lock.

Prototype

lock

Source Link

Usage

From source file:com.lonepulse.zombielink.processor.AsyncEndpointTest.java

/**
 * <p>See {@link #testAsyncSuccess()}.</p>
 *//*from  w  w w. j av  a 2s.  c  om*/
private void successScenario() throws InterruptedException {

    String subpath = "/asyncsuccess", body = "hello";

    stubFor(get(urlEqualTo(subpath)).willReturn(aResponse().withStatus(200).withBody(body)));

    final Object[] content = new Object[2];

    final Lock lock = new ReentrantLock();
    final Condition condition = lock.newCondition();

    String result = asyncEndpoint.asyncSuccess(new AsyncHandler<String>() {

        @Override
        public void onSuccess(HttpResponse httpResponse, String deserializedContent) {

            lock.lock();

            content[0] = httpResponse;
            content[1] = deserializedContent;

            condition.signal();
            lock.unlock();
        }
    });

    lock.lock();
    condition.await();
    lock.unlock();

    verify(getRequestedFor(urlEqualTo(subpath)));

    assertTrue(content[0] != null);
    assertTrue(content[1] != null);
    assertTrue(content[1].equals(body));

    assertNull(result);
}

From source file:org.apache.bookkeeper.bookie.EntryLogManagerForEntryLogPerLedger.java

@Override
public void setCurrentLogForLedgerAndAddToRotate(long ledgerId, BufferedLogChannel logChannel)
        throws IOException {
    Lock lock = getLock(ledgerId);
    lock.lock();
    try {//  www . ja  v  a2 s .co  m
        BufferedLogChannel hasToRotateLogChannel = getCurrentLogForLedger(ledgerId);
        boolean newLedgerInEntryLogMapCache = (hasToRotateLogChannel == null);
        logChannel.setLedgerIdAssigned(ledgerId);
        BufferedLogChannelWithDirInfo logChannelWithDirInfo = new BufferedLogChannelWithDirInfo(logChannel);
        ledgerIdEntryLogMap.get(ledgerId).setEntryLogWithDirInfo(logChannelWithDirInfo);
        entryLogsPerLedgerCounter.openNewEntryLogForLedger(ledgerId, newLedgerInEntryLogMapCache);
        replicaOfCurrentLogChannels.put(logChannel.getLogId(), logChannelWithDirInfo);
        if (hasToRotateLogChannel != null) {
            replicaOfCurrentLogChannels.remove(hasToRotateLogChannel.getLogId());
            rotatedLogChannels.add(hasToRotateLogChannel);
        }
    } catch (Exception e) {
        log.error("Received unexpected exception while fetching entry from map for ledger: " + ledgerId, e);
        throw new IOException("Received unexpected exception while fetching entry from map", e);
    } finally {
        lock.unlock();
    }
}

From source file:net.myrrix.online.ServerRecommender.java

private static void updateClusters(long id, float[] featureVector, Collection<IDCluster> clusters,
        Lock clustersReadLock) {
    if (featureVector == null || clusters == null || clusters.isEmpty()) {
        return;//from w ww .  j av a2  s  . c  om
    }

    IDCluster closestCentroid;
    clustersReadLock.lock();
    try {
        closestCentroid = findClosestCentroid(featureVector, clusters);
    } finally {
        clustersReadLock.unlock();
    }

    if (closestCentroid == null) {
        return;
    }

    FastIDSet newMembers = closestCentroid.getMembers();

    boolean removeFromCurrentCluster;
    synchronized (newMembers) {
        // Wasn't already present, so was present elsewhere; find and remove it        
        removeFromCurrentCluster = newMembers.add(id);
    }

    if (removeFromCurrentCluster) {
        clustersReadLock.lock();
        try {
            for (IDCluster cluster : clusters) {
                FastIDSet oldMembers = cluster.getMembers();
                synchronized (oldMembers) {
                    if (oldMembers.remove(id)) {
                        break;
                    }
                }
            }
        } finally {
            clustersReadLock.unlock();
        }
    }
}

From source file:org.apache.bookkeeper.bookie.EntryLogManagerForEntryLogPerLedger.java

@Override
public boolean commitEntryMemTableFlush() throws IOException {
    // lock it only if there is new data
    // so that cache accesstime is not changed
    Set<BufferedLogChannelWithDirInfo> copyOfCurrentLogsWithDirInfo = getCopyOfCurrentLogs();
    for (BufferedLogChannelWithDirInfo currentLogWithDirInfo : copyOfCurrentLogsWithDirInfo) {
        BufferedLogChannel currentLog = currentLogWithDirInfo.getLogChannel();
        if (reachEntryLogLimit(currentLog, 0L)) {
            Long ledgerId = currentLog.getLedgerIdAssigned();
            Lock lock = getLock(ledgerId);
            lock.lock();
            try {
                if (reachEntryLogLimit(currentLog, 0L)) {
                    log.info("Rolling entry logger since it reached size limitation for ledger: {}", ledgerId);
                    createNewLog(ledgerId, "after entry log file is rotated");
                }// w  ww  . j av  a 2  s .  c om
            } finally {
                lock.unlock();
            }
        }
    }
    /*
     * in the case of entrylogperledger, SyncThread drives
     * checkpoint logic for every flushInterval. So
     * EntryMemtable doesn't need to call checkpoint in the case
     * of entrylogperledger.
     */
    return false;
}

From source file:org.apache.bookkeeper.bookie.EntryLogManagerForEntryLogPerLedger.java

@Override
BufferedLogChannel getCurrentLogForLedgerForAddEntry(long ledgerId, int entrySize, boolean rollLog)
        throws IOException {
    Lock lock = getLock(ledgerId);
    lock.lock();
    try {/*  w w w. j  a va 2 s .c  om*/
        BufferedLogChannelWithDirInfo logChannelWithDirInfo = getCurrentLogWithDirInfoForLedger(ledgerId);
        BufferedLogChannel logChannel = null;
        if (logChannelWithDirInfo != null) {
            logChannel = logChannelWithDirInfo.getLogChannel();
        }
        boolean reachEntryLogLimit = rollLog ? reachEntryLogLimit(logChannel, entrySize)
                : readEntryLogHardLimit(logChannel, entrySize);
        // Create new log if logSizeLimit reached or current disk is full
        boolean diskFull = (logChannel == null) ? false : logChannelWithDirInfo.isLedgerDirFull();
        boolean allDisksFull = !ledgerDirsManager.hasWritableLedgerDirs();

        /**
         * if disk of the logChannel is full or if the entrylog limit is
         * reached of if the logchannel is not initialized, then
         * createNewLog. If allDisks are full then proceed with the current
         * logChannel, since Bookie must have turned to readonly mode and
         * the addEntry traffic would be from GC and it is ok to proceed in
         * this case.
         */
        if ((diskFull && (!allDisksFull)) || reachEntryLogLimit || (logChannel == null)) {
            if (logChannel != null) {
                logChannel.flushAndForceWriteIfRegularFlush(false);
            }
            createNewLog(ledgerId, ": diskFull = " + diskFull + ", allDisksFull = " + allDisksFull
                    + ", reachEntryLogLimit = " + reachEntryLogLimit + ", logChannel = " + logChannel);
        }

        return getCurrentLogForLedger(ledgerId);
    } finally {
        lock.unlock();
    }
}

From source file:org.geotools.gce.imagemosaic.catalog.GTDataStoreGranuleCatalog.java

@Override
public QueryCapabilities getQueryCapabilities(String typeName) {
    final Lock lock = rwLock.readLock();
    try {//from  w ww .j av a2  s. c o  m
        lock.lock();
        checkStore();
        return tileIndexStore.getFeatureSource(typeName).getQueryCapabilities();
    } catch (IOException e) {
        if (LOGGER.isLoggable(Level.INFO))
            LOGGER.log(Level.INFO, "Unable to collect QueryCapabilities", e);
        return null;
    } finally {
        lock.unlock();
    }
}

From source file:org.geotools.gce.imagemosaic.catalog.GTDataStoreGranuleCatalog.java

public void removeType(String typeName) throws IOException {
    Utilities.ensureNonNull("featureType", typeName);
    final Lock lock = rwLock.writeLock();
    try {//  w ww.ja  va 2 s .c o  m
        lock.lock();
        checkStore();

        tileIndexStore.removeSchema(typeName);
        removeTypeName(typeName);

    } finally {
        lock.unlock();
    }

}

From source file:org.geotools.gce.imagemosaic.catalog.GTDataStoreGranuleCatalog.java

@Override
public SimpleFeatureType getType(String typeName) throws IOException {
    final Lock lock = rwLock.readLock();
    try {/*w w w  .  j a va 2s  .  c  o  m*/
        lock.lock();
        checkStore();

        if (this.typeNames.isEmpty() || !this.typeNames.contains(typeName)) {
            return null;
        }
        return tileIndexStore.getSchema(typeName);
    } finally {
        lock.unlock();
    }

}

From source file:org.geotools.gce.imagemosaic.catalog.GTDataStoreGranuleCatalog.java

public void computeAggregateFunction(Query query, FeatureCalc function) throws IOException {
    query = mergeHints(query);// w ww  . j ava 2  s  .  c o m
    final Lock lock = rwLock.readLock();
    try {
        lock.lock();
        checkStore();
        SimpleFeatureSource fs = tileIndexStore.getFeatureSource(query.getTypeName());

        if (fs instanceof ContentFeatureSource)
            ((ContentFeatureSource) fs).accepts(query, function, null);
        else {
            final SimpleFeatureCollection collection = fs.getFeatures(query);
            collection.accepts(function, null);

        }
    } finally {
        lock.unlock();
    }

}

From source file:org.geotools.gce.imagemosaic.catalog.GTDataStoreGranuleCatalog.java

public void dispose() {
    final Lock l = rwLock.writeLock();
    try {/*from  w  w  w  .  j av  a  2 s  .c om*/
        l.lock();
        try {
            if (tileIndexStore != null) {
                tileIndexStore.dispose();
            }
            if (multiScaleROIProvider != null) {
                multiScaleROIProvider.dispose();
            }
        } catch (Throwable e) {
            if (LOGGER.isLoggable(Level.FINE)) {
                LOGGER.log(Level.FINE, e.getLocalizedMessage(), e);
            }
        } finally {
            tileIndexStore = null;
            multiScaleROIProvider = null;
        }

    } finally {
        l.unlock();
    }
}