Example usage for java.util.concurrent.locks Lock lock

List of usage examples for java.util.concurrent.locks Lock lock

Introduction

In this page you can find the example usage for java.util.concurrent.locks Lock lock.

Prototype

lock

Source Link

Usage

From source file:org.opendedup.collections.ShardedProgressiveFileBasedCSMap.java

@Override
public boolean remove(ChunkData cm) throws IOException {
    if (this.isClosed()) {
        throw new IOException("hashtable [" + this.fileName + "] is close");
    }/*from  w ww.  j a  v a 2  s.com*/
    Lock l = gcLock.readLock();
    l.lock();
    try {
        if (!runningGC && !lbf.mightContain(cm.getHash()))
            return false;
        try {
            if (cm.getHash().length == 0)
                return true;
            AbstractShard m = this.getReadMap(cm.getHash(), true);
            if (m == null)
                return false;
            if (!m.remove(cm.getHash())) {
                return false;
            } else {
                cm.setmDelete(true);
                if (this.isClosed()) {
                    throw new IOException("hashtable [" + this.fileName + "] is close");
                }
                try {
                    this.kSz.decrementAndGet();
                } catch (Exception e) {
                }

                return true;
            }
        } catch (Exception e) {
            SDFSLogger.getLog().fatal("error getting record", e);
            return false;
        }
    } finally {
        l.unlock();
    }
}

From source file:org.opendedup.collections.ShardedProgressiveFileBasedCSMap.java

private long getPos(byte[] hash) throws IOException {
    long pos = -1;
    Lock l = gcLock.readLock();
    l.lock();
    try {/*  www  . j  av  a  2 s  .  c o  m*/
        if (!runningGC && !lbf.mightContain(hash)) {
            return pos;
        }
        AbstractShard k = this.keyLookup.getIfPresent(new ByteArrayWrapper(hash));
        if (k != null) {
            try {
                pos = k.get(hash);
                if (pos != -1) {

                    // m.cache();
                    return pos;
                } else {
                    this.keyLookup.invalidate(new ByteArrayWrapper(hash));
                }
            } catch (MapClosedException e) {
                this.keyLookup.invalidate(new ByteArrayWrapper(hash));
            }
        }

        for (AbstractShard m : this.maps.getAL()) {
            try {
                pos = m.get(hash);
                if (pos != -1) {

                    // m.cache();
                    return pos;
                }
            } catch (MapClosedException e) {
                this.keyLookup.invalidate(new ByteArrayWrapper(hash));
            }
        }
        return pos;
    } finally {
        l.unlock();
    }
}

From source file:org.opendedup.collections.ShardedProgressiveFileBasedCSMap.java

@Override
public InsertRecord put(ChunkData cm, boolean persist) throws IOException, HashtableFullException {
    // persist = false;
    if (this.isClosed())
        throw new HashtableFullException("Hashtable " + this.fileName + " is close");
    if (kSz.get() >= this.maxSz)
        throw new HashtableFullException("maximum sized reached");
    InsertRecord rec = null;// w w  w.java 2 s. c om
    // if (persist)
    // this.flushFullBuffer();
    Lock l = gcLock.readLock();
    l.lock();
    ShardedFileByteArrayLongMap bm = null;
    try {
        // long tm = System.currentTimeMillis();
        AbstractShard rm = this.getReadMap(cm.getHash(), false);
        if (rm == null) {
            // this.misses.incrementAndGet();
            // tm = System.currentTimeMillis() - tm;
            while (rec == null) {
                try {
                    if (persist && !cm.recoverd) {
                        try {
                            cm.persistData(true);
                        } catch (org.opendedup.collections.HashExistsException e) {
                            return new InsertRecord(false, e.getPos());
                        }
                    }
                    bm = this.getWriteMap();
                    rec = bm.put(cm.getHash(), cm.getcPos());
                    this.lbf.put(cm.getHash());
                } catch (HashtableFullException e) {
                    rec = null;
                } catch (Exception e) {
                    // this.keyLookup.invalidate(new
                    // ByteArrayWrapper(cm.getHash()));
                    throw e;
                }
            }
        } else {
            try {
                rec = new InsertRecord(false, rm.get(cm.getHash()));
            } catch (MapClosedException e) {
                this.keyLookup.invalidate(new ByteArrayWrapper(cm.getHash()));
                put(cm, persist);
            }
        }
        // this.msTr.addAndGet(tm);

    } finally {

        l.unlock();
    }
    /*
     * this.trs.incrementAndGet(); if(this.trs.get() == 10000) { long tpm =
     * 0; if(this.misses.get() > 0) tpm = this.msTr.get()/this.misses.get();
     * SDFSLogger.getLog().info("trs=" + this.trs.get() + " misses=" +
     * this.misses.get() + " mtm=" + this.msTr.get() + " tpm=" + tpm);
     * this.trs.set(0); this.misses.set(0); this.msTr.set(0); }
     */
    if (rec.getInserted())
        this.kSz.incrementAndGet();
    return rec;
}

From source file:com.mirth.connect.donkey.server.channel.DestinationConnector.java

@Override
public void run() {
    DonkeyDao dao = null;//  ww w.  j a v a 2s. c o  m
    Serializer serializer = channel.getSerializer();
    ConnectorMessage connectorMessage = null;
    int retryIntervalMillis = destinationConnectorProperties.getRetryIntervalMillis();
    Long lastMessageId = null;
    boolean canAcquire = true;
    Lock statusUpdateLock = null;
    queue.registerThreadId();

    do {
        try {
            if (canAcquire) {
                connectorMessage = queue.acquire();
            }

            if (connectorMessage != null) {
                boolean exceptionCaught = false;

                try {
                    /*
                     * If the last message id is equal to the current message id, then the
                     * message was not successfully sent and is being retried, so wait the retry
                     * interval.
                     * 
                     * If the last message id is greater than the current message id, then some
                     * message was not successful, message rotation is on, and the queue is back
                     * to the oldest message, so wait the retry interval.
                     */
                    if (connectorMessage.isAttemptedFirst()
                            || lastMessageId != null && (lastMessageId == connectorMessage.getMessageId()
                                    || (queue.isRotate() && lastMessageId > connectorMessage.getMessageId()
                                            && queue.hasBeenRotated()))) {
                        Thread.sleep(retryIntervalMillis);
                        connectorMessage.setAttemptedFirst(false);
                    }

                    lastMessageId = connectorMessage.getMessageId();

                    dao = daoFactory.getDao();
                    Status previousStatus = connectorMessage.getStatus();

                    Class<?> connectorPropertiesClass = getConnectorProperties().getClass();
                    Class<?> serializedPropertiesClass = null;

                    ConnectorProperties connectorProperties = null;

                    /*
                     * If we're not regenerating connector properties, use the serialized sent
                     * content from the database. It's possible that the channel had Regenerate
                     * Template and Include Filter/Transformer enabled at one point, and then
                     * was disabled later, so we also have to make sure the sent content exists.
                     */
                    if (!destinationConnectorProperties.isRegenerateTemplate()
                            && connectorMessage.getSent() != null) {
                        // Attempt to get the sent properties from the in-memory cache. If it doesn't exist, deserialize from the actual sent content.
                        connectorProperties = connectorMessage.getSentProperties();
                        if (connectorProperties == null) {
                            connectorProperties = serializer.deserialize(
                                    connectorMessage.getSent().getContent(), ConnectorProperties.class);
                            connectorMessage.setSentProperties(connectorProperties);
                        }

                        serializedPropertiesClass = connectorProperties.getClass();
                    } else {
                        connectorProperties = ((DestinationConnectorPropertiesInterface) getConnectorProperties())
                                .clone();
                    }

                    /*
                     * Verify that the connector properties stored in the connector message
                     * match the properties from the current connector. Otherwise the connector
                     * type has changed and the message will be set to errored. If we're
                     * regenerating the connector properties then it doesn't matter.
                     */
                    if (connectorMessage.getSent() == null
                            || destinationConnectorProperties.isRegenerateTemplate()
                            || serializedPropertiesClass == connectorPropertiesClass) {
                        ThreadUtils.checkInterruptedStatus();

                        /*
                         * If a historical queued message has not yet been transformed and the
                         * current queue settings do not include the filter/transformer, force
                         * the message to ERROR.
                         */
                        if (connectorMessage.getSent() == null && !includeFilterTransformerInQueue()) {
                            connectorMessage.setStatus(Status.ERROR);
                            connectorMessage.setProcessingError(
                                    "Queued message has not yet been transformed, and Include Filter/Transformer is currently disabled.");

                            dao.updateStatus(connectorMessage, previousStatus);
                            dao.updateErrors(connectorMessage);
                        } else {
                            if (includeFilterTransformerInQueue()) {
                                transform(dao, connectorMessage, previousStatus,
                                        connectorMessage.getSent() == null);
                            }

                            if (connectorMessage.getStatus() == Status.QUEUED) {
                                /*
                                 * Replace the connector properties if necessary. Again for
                                 * historical queue reasons, we need to check whether the sent
                                 * content exists.
                                 */
                                if (connectorMessage.getSent() == null
                                        || destinationConnectorProperties.isRegenerateTemplate()) {
                                    replaceConnectorProperties(connectorProperties, connectorMessage);
                                    MessageContent sentContent = getSentContent(connectorMessage,
                                            connectorProperties);
                                    connectorMessage.setSent(sentContent);

                                    if (sentContent != null && storageSettings.isStoreSent()) {
                                        ThreadUtils.checkInterruptedStatus();
                                        dao.storeMessageContent(sentContent);
                                    }
                                }

                                Response response = handleSend(connectorProperties, connectorMessage);
                                connectorMessage.setSendAttempts(connectorMessage.getSendAttempts() + 1);

                                if (response == null) {
                                    throw new RuntimeException(
                                            "Received null response from destination " + destinationName + ".");
                                }
                                response.fixStatus(isQueueEnabled());

                                afterSend(dao, connectorMessage, response, previousStatus);
                            }
                        }
                    } else {
                        connectorMessage.setStatus(Status.ERROR);
                        connectorMessage.setProcessingError(
                                "Mismatched connector properties detected in queued message. The connector type may have changed since the message was queued.\nFOUND: "
                                        + serializedPropertiesClass.getSimpleName() + "\nEXPECTED: "
                                        + connectorPropertiesClass.getSimpleName());

                        dao.updateStatus(connectorMessage, previousStatus);
                        dao.updateErrors(connectorMessage);
                    }

                    /*
                     * If we're about to commit a non-QUEUED status, we first need to obtain a
                     * read lock from the queue. This is done so that if something else
                     * invalidates the queue at the same time, we don't incorrectly decrement
                     * the size during the release.
                     */
                    if (connectorMessage.getStatus() != Status.QUEUED) {
                        Lock lock = queue.getStatusUpdateLock();
                        lock.lock();
                        statusUpdateLock = lock;
                    }

                    ThreadUtils.checkInterruptedStatus();
                    dao.commit(storageSettings.isDurable());

                    // Only actually attempt to remove content if the status is SENT
                    if (connectorMessage.getStatus().isCompleted()) {
                        try {
                            channel.removeContent(dao, null, lastMessageId, true, true);
                        } catch (RuntimeException e) {
                            /*
                             * The connector message itself processed successfully, only the
                             * remove content operation failed. In this case just give up and
                             * log an error.
                             */
                            logger.error("Error removing content for message " + lastMessageId + " for channel "
                                    + channel.getName() + " (" + channel.getChannelId() + ") on destination "
                                    + destinationName
                                    + ". This error is expected if the message was manually removed from the queue.",
                                    e);
                        }
                    }
                } catch (RuntimeException e) {
                    logger.error("Error processing queued "
                            + (connectorMessage != null ? connectorMessage.toString() : "message (null)")
                            + " for channel " + channel.getName() + " (" + channel.getChannelId()
                            + ") on destination " + destinationName
                            + ". This error is expected if the message was manually removed from the queue.",
                            e);
                    /*
                     * Invalidate the queue's buffer if any errors occurred. If the message
                     * being processed by the queue was deleted, this will prevent the queue
                     * from trying to process that message repeatedly. Since multiple
                     * queues/threads may need to do this as well, we do not reset the queue's
                     * maps of checked in or deleted messages.
                     */
                    exceptionCaught = true;
                } finally {
                    if (dao != null) {
                        dao.close();
                    }

                    /*
                     * We always want to release the message if it's done (obviously).
                     */
                    if (exceptionCaught) {
                        /*
                         * If an runtime exception was caught, we can't guarantee whether that
                         * message was deleted or is still in the database. When it is released,
                         * the message will be removed from the in-memory queue. However we need
                         * to invalidate the queue before allowing any other threads to be able
                         * to access it in case the message is still in the database.
                         */
                        canAcquire = true;
                        synchronized (queue) {
                            queue.release(connectorMessage, true);

                            // Release the read lock now before calling invalidate
                            if (statusUpdateLock != null) {
                                statusUpdateLock.unlock();
                                statusUpdateLock = null;
                            }

                            queue.invalidate(true, false);
                        }
                    } else if (connectorMessage.getStatus() != Status.QUEUED) {
                        canAcquire = true;
                        queue.release(connectorMessage, true);
                    } else if (destinationConnectorProperties.isRotate()) {
                        canAcquire = true;
                        queue.release(connectorMessage, false);
                    } else {
                        /*
                         * If the message is still queued, no exception occurred, and queue
                         * rotation is disabled, we still want to force the queue to re-acquire
                         * a message if it has been marked as deleted by another process.
                         */
                        canAcquire = queue.releaseIfDeleted(connectorMessage);
                    }

                    // Always release the read lock if we obtained it
                    if (statusUpdateLock != null) {
                        statusUpdateLock.unlock();
                        statusUpdateLock = null;
                    }
                }
            } else {
                /*
                 * This is necessary because there is no blocking peek. If the queue is empty,
                 * wait some time to free up the cpu.
                 */
                Thread.sleep(Constants.DESTINATION_QUEUE_EMPTY_SLEEP_TIME);
            }
        } catch (InterruptedException e) {
            // Stop this thread if it was halted
            return;
        } catch (Exception e) {
            // Always release the read lock if we obtained it
            if (statusUpdateLock != null) {
                statusUpdateLock.unlock();
                statusUpdateLock = null;
            }

            logger.warn("Error in queue thread for channel " + channel.getName() + " (" + channel.getChannelId()
                    + ") on destination " + destinationName + ".\n" + ExceptionUtils.getStackTrace(e));
            try {
                Thread.sleep(retryIntervalMillis);

                /*
                 * Since the thread already slept for the retry interval, set lastMessageId to
                 * null to prevent sleeping again.
                 */
                lastMessageId = null;
            } catch (InterruptedException e1) {
                // Stop this thread if it was halted
                return;
            }
        } finally {
            // Always release the read lock if we obtained it
            if (statusUpdateLock != null) {
                statusUpdateLock.unlock();
                statusUpdateLock = null;
            }
        }
    } while (getCurrentState() == DeployedState.STARTED || getCurrentState() == DeployedState.STARTING);
}

From source file:org.apereo.portal.portlet.registry.PortletEntityRegistryImpl.java

@Override
public void storePortletEntity(HttpServletRequest request, final IPortletEntity portletEntity) {
    Validate.notNull(portletEntity, "portletEntity can not be null");

    final IUserInstance userInstance = this.userInstanceManager.getUserInstance(request);
    final IPerson person = userInstance.getPerson();
    if (person.isGuest()) {
        //Never persist things for the guest user, just rely on in-memory storage
        return;/*from  www. j a  va2 s  . c  o m*/
    }

    final IPortletEntityId wrapperPortletEntityId = portletEntity.getPortletEntityId();
    final Lock portletEntityLock = this.getPortletEntityLock(request, wrapperPortletEntityId);
    portletEntityLock.lock();
    try {
        final boolean shouldBePersisted = this.shouldBePersisted(portletEntity);

        if (portletEntity instanceof PersistentPortletEntityWrapper) {
            //Unwrap the persistent entity
            final IPortletEntity persistentEntity = ((PersistentPortletEntityWrapper) portletEntity)
                    .getPersistentEntity();

            //Already persistent entity that still has prefs 
            if (shouldBePersisted) {
                try {
                    this.portletEntityDao.updatePortletEntity(persistentEntity);
                } catch (HibernateOptimisticLockingFailureException e) {
                    //Check if this exception is from the entity being deleted from under us.
                    final boolean exists = this.portletEntityDao
                            .portletEntityExists(persistentEntity.getPortletEntityId());
                    if (!exists) {
                        this.logger.warn("The persistent portlet has already been deleted: " + persistentEntity
                                + ". The passed entity should be persistent so a new persistent entity will be created");
                        this.deletePortletEntity(request, portletEntity, true);
                        this.createPersistentEntity(persistentEntity, wrapperPortletEntityId);
                    } else {
                        throw e;
                    }
                }
            }
            //Already persistent entity that should not be, DELETE!
            else {
                //Capture identifiers needed to recreate the entity as session persistent
                final IPortletDefinitionId portletDefinitionId = portletEntity.getPortletDefinitionId();
                final String layoutNodeId = portletEntity.getLayoutNodeId();
                final int userId = portletEntity.getUserId();

                //Delete the persistent entity
                this.deletePortletEntity(request, portletEntity, false);

                //Create a new entity and stick it in the cache
                this.getOrCreatePortletEntity(request, portletDefinitionId, layoutNodeId, userId);
            }
        } else if (portletEntity instanceof SessionPortletEntityImpl) {
            //There are preferences on the interim entity, create an store it
            if (shouldBePersisted) {
                //Remove the session scoped entity from the request and session caches
                this.deletePortletEntity(request, portletEntity, false);

                final IPortletEntity persistentEntity = createPersistentEntity(portletEntity,
                        wrapperPortletEntityId);

                if (this.logger.isTraceEnabled()) {
                    this.logger.trace("Session scoped entity " + wrapperPortletEntityId
                            + " should now be persistent. Deleted it from session cache and created persistent portlet entity "
                            + persistentEntity.getPortletEntityId());
                }
            }
            //Session scoped entity that is still session scoped,
            else {
                //Look for a persistent entity and delete it
                final String channelSubscribeId = portletEntity.getLayoutNodeId();
                final int userId = portletEntity.getUserId();
                IPortletEntity existingPersistentEntity = this.portletEntityDao
                        .getPortletEntity(channelSubscribeId, userId);
                if (existingPersistentEntity != null) {
                    final IPortletEntityId consistentPortletEntityId = this
                            .createConsistentPortletEntityId(existingPersistentEntity);
                    existingPersistentEntity = new PersistentPortletEntityWrapper(existingPersistentEntity,
                            consistentPortletEntityId);

                    this.logger.warn("A persistent portlet entity already exists: " + existingPersistentEntity
                            + ". The passed entity has no preferences so the persistent version will be deleted");
                    this.deletePortletEntity(request, existingPersistentEntity, false);

                    //Add to request cache
                    final PortletEntityCache<IPortletEntity> portletEntityMap = this
                            .getPortletEntityMap(request);
                    portletEntityMap.storeIfAbsentEntity(portletEntity);

                    //Add to session cache
                    final PortletEntityCache<PortletEntityData> portletEntityDataMap = this
                            .getPortletEntityDataMap(request);
                    portletEntityDataMap.storeIfAbsentEntity(
                            ((SessionPortletEntityImpl) portletEntity).getPortletEntityData());
                }
            }
        } else {
            throw new IllegalArgumentException(
                    "Invalid portlet entity implementation passed: " + portletEntity.getClass());
        }
    } finally {
        portletEntityLock.unlock();
    }
}

From source file:org.opendedup.collections.ShardedProgressiveFileBasedCSMap.java

private AbstractShard getReadMap(byte[] hash, boolean deep) throws IOException {
    Lock l = gcLock.readLock();
    l.lock();
    ct.incrementAndGet();/*from w  ww .  jav a  2s.c o m*/
    try {

        if (!runningGC && !lbf.mightContain(hash)) {
            // SDFSLogger.getLog().info("not in bloom filter");
            return null;
        }

        /*
         * Iterator<ProgressiveFileByteArrayLongMap> iter =
         * activeReadMaps.iterator(); while (iter.hasNext()) {
         * ProgressiveFileByteArrayLongMap _m = iter.next(); if
         * (_m.containsKey(hash)) return _m; }
         */
        zmt.incrementAndGet();
        AbstractShard _km;
        _km = this.keyLookup.getIfPresent(new ByteArrayWrapper(hash));
        if (_km != null && !_km.isClosed()) {
            if (!_km.isClosed()) {
                // long chl = ch.incrementAndGet();
                // SDFSLogger.getLog().info("found ch=" + chl + " sz=" +
                // this.keyLookup.size());
                _km.cache();
                return _km;
            } else {
                this.keyLookup.invalidate(new ByteArrayWrapper(hash));
            }
        }

        /*
        synchronized (ct) {
           if (ct.get() > 10000) {
              SDFSLogger.getLog().info("misses=" + mt.get() + " inserts=" + ct.get() + " lookups=" + amt.get()
             + " attempts=" + zmt.incrementAndGet());
              ct.set(0);
              amt.set(0);
              mt.set(0);
           }
        }
        */

        int sns = this.maxTbls;
        int trs = 0;
        if (this.maxTbls <= 0)
            deep = true;
        for (AbstractShard _m : this.maps.getAL()) {
            trs++;
            if (!deep && trs == this.maxTbls) {
                sns = this.tblrnd.nextInt(0, this.maps.size());
            }
            if (!deep && trs > sns) {
                break;
            }
            amt.incrementAndGet();
            try {
                if (_m.containsKey(hash)) {
                    this.keyLookup.put(new ByteArrayWrapper(hash), _m);
                    _m.cache();
                    return _m;
                }
            } catch (MapClosedException e) {
                getReadMap(hash, deep);
            }
        }
        mt.incrementAndGet();

        return null;

    } finally {
        l.unlock();
    }

}

From source file:com.cloudera.oryx.als.serving.ServerRecommender.java

private float[] buildAnonymousUserFeatures(String[] itemIDs, float[] values)
        throws NotReadyException, NoSuchItemException {

    Preconditions.checkArgument(values == null || values.length == itemIDs.length,
            "Number of values doesn't match number of items");

    Generation generation = getCurrentGeneration();

    LongObjectMap<float[]> Y = generation.getY();
    Solver ytySolver = generation.getYTYSolver();
    if (ytySolver == null) {
        throw new NotReadyException();
    }/* w w  w. jav a 2 s .  c  om*/

    float[] anonymousUserFeatures = null;
    Lock yLock = generation.getYLock().readLock();

    boolean anyItemIDFound = false;
    for (int j = 0; j < itemIDs.length; j++) {
        String itemID = itemIDs[j];
        float[] itemFeatures;
        yLock.lock();
        try {
            itemFeatures = Y.get(StringLongMapping.toLong(itemID));
        } finally {
            yLock.unlock();
        }
        if (itemFeatures == null) {
            continue;
        }
        anyItemIDFound = true;
        double[] userFoldIn = ytySolver.solveFToD(itemFeatures);
        if (anonymousUserFeatures == null) {
            anonymousUserFeatures = new float[userFoldIn.length];
        }
        double signedFoldInWeight = foldInWeight(0.0, values == null ? 1.0f : values[j]);
        if (signedFoldInWeight != 0.0) {
            for (int i = 0; i < anonymousUserFeatures.length; i++) {
                anonymousUserFeatures[i] += (float) (signedFoldInWeight * userFoldIn[i]);
            }
        }
    }
    if (!anyItemIDFound) {
        throw new NoSuchItemException(Arrays.toString(itemIDs));
    }

    return anonymousUserFeatures;
}

From source file:org.unitime.timetable.solver.studentsct.StudentSolver.java

public Map<String, String> currentSolutionInfo() {
    if (isPassivated())
        return iCurrentSolutionInfoBeforePassivation;
    java.util.concurrent.locks.Lock lock = currentSolution().getLock().readLock();
    lock.lock();
    try {//  w  w w.ja v  a2  s  . c  om
        return super.currentSolution().getExtendedInfo();
    } finally {
        lock.unlock();
    }
}

From source file:org.unitime.timetable.solver.studentsct.StudentSolver.java

public Map<String, String> bestSolutionInfo() {
    if (isPassivated())
        return iBestSolutionInfoBeforePassivation;
    java.util.concurrent.locks.Lock lock = currentSolution().getLock().readLock();
    lock.lock();
    try {/* w ww  . j  a v a2 s .  co  m*/
        return super.currentSolution().getBestInfo();
    } finally {
        lock.unlock();
    }
}

From source file:org.unitime.timetable.solver.studentsct.StudentSolver.java

public void clear() {
    java.util.concurrent.locks.Lock lock = currentSolution().getLock().writeLock();
    lock.lock();
    try {//from   www. java  2  s.c  o  m
        for (Request request : currentSolution().getModel().variables()) {
            currentSolution().getAssignment().unassign(0, request);
        }
        currentSolution().clearBest();
    } finally {
        lock.unlock();
    }
}