Example usage for java.util.concurrent.locks Lock unlock

List of usage examples for java.util.concurrent.locks Lock unlock

Introduction

In this page you can find the example usage for java.util.concurrent.locks Lock unlock.

Prototype

void unlock();

Source Link

Document

Releases the lock.

Usage

From source file:org.apereo.portal.portlet.registry.PortletEntityRegistryImpl.java

@Override
public void storePortletEntity(HttpServletRequest request, final IPortletEntity portletEntity) {
    Validate.notNull(portletEntity, "portletEntity can not be null");

    final IUserInstance userInstance = this.userInstanceManager.getUserInstance(request);
    final IPerson person = userInstance.getPerson();
    if (person.isGuest()) {
        //Never persist things for the guest user, just rely on in-memory storage
        return;/*from w ww.  ja  va2  s  . c  om*/
    }

    final IPortletEntityId wrapperPortletEntityId = portletEntity.getPortletEntityId();
    final Lock portletEntityLock = this.getPortletEntityLock(request, wrapperPortletEntityId);
    portletEntityLock.lock();
    try {
        final boolean shouldBePersisted = this.shouldBePersisted(portletEntity);

        if (portletEntity instanceof PersistentPortletEntityWrapper) {
            //Unwrap the persistent entity
            final IPortletEntity persistentEntity = ((PersistentPortletEntityWrapper) portletEntity)
                    .getPersistentEntity();

            //Already persistent entity that still has prefs 
            if (shouldBePersisted) {
                try {
                    this.portletEntityDao.updatePortletEntity(persistentEntity);
                } catch (HibernateOptimisticLockingFailureException e) {
                    //Check if this exception is from the entity being deleted from under us.
                    final boolean exists = this.portletEntityDao
                            .portletEntityExists(persistentEntity.getPortletEntityId());
                    if (!exists) {
                        this.logger.warn("The persistent portlet has already been deleted: " + persistentEntity
                                + ". The passed entity should be persistent so a new persistent entity will be created");
                        this.deletePortletEntity(request, portletEntity, true);
                        this.createPersistentEntity(persistentEntity, wrapperPortletEntityId);
                    } else {
                        throw e;
                    }
                }
            }
            //Already persistent entity that should not be, DELETE!
            else {
                //Capture identifiers needed to recreate the entity as session persistent
                final IPortletDefinitionId portletDefinitionId = portletEntity.getPortletDefinitionId();
                final String layoutNodeId = portletEntity.getLayoutNodeId();
                final int userId = portletEntity.getUserId();

                //Delete the persistent entity
                this.deletePortletEntity(request, portletEntity, false);

                //Create a new entity and stick it in the cache
                this.getOrCreatePortletEntity(request, portletDefinitionId, layoutNodeId, userId);
            }
        } else if (portletEntity instanceof SessionPortletEntityImpl) {
            //There are preferences on the interim entity, create an store it
            if (shouldBePersisted) {
                //Remove the session scoped entity from the request and session caches
                this.deletePortletEntity(request, portletEntity, false);

                final IPortletEntity persistentEntity = createPersistentEntity(portletEntity,
                        wrapperPortletEntityId);

                if (this.logger.isTraceEnabled()) {
                    this.logger.trace("Session scoped entity " + wrapperPortletEntityId
                            + " should now be persistent. Deleted it from session cache and created persistent portlet entity "
                            + persistentEntity.getPortletEntityId());
                }
            }
            //Session scoped entity that is still session scoped,
            else {
                //Look for a persistent entity and delete it
                final String channelSubscribeId = portletEntity.getLayoutNodeId();
                final int userId = portletEntity.getUserId();
                IPortletEntity existingPersistentEntity = this.portletEntityDao
                        .getPortletEntity(channelSubscribeId, userId);
                if (existingPersistentEntity != null) {
                    final IPortletEntityId consistentPortletEntityId = this
                            .createConsistentPortletEntityId(existingPersistentEntity);
                    existingPersistentEntity = new PersistentPortletEntityWrapper(existingPersistentEntity,
                            consistentPortletEntityId);

                    this.logger.warn("A persistent portlet entity already exists: " + existingPersistentEntity
                            + ". The passed entity has no preferences so the persistent version will be deleted");
                    this.deletePortletEntity(request, existingPersistentEntity, false);

                    //Add to request cache
                    final PortletEntityCache<IPortletEntity> portletEntityMap = this
                            .getPortletEntityMap(request);
                    portletEntityMap.storeIfAbsentEntity(portletEntity);

                    //Add to session cache
                    final PortletEntityCache<PortletEntityData> portletEntityDataMap = this
                            .getPortletEntityDataMap(request);
                    portletEntityDataMap.storeIfAbsentEntity(
                            ((SessionPortletEntityImpl) portletEntity).getPortletEntityData());
                }
            }
        } else {
            throw new IllegalArgumentException(
                    "Invalid portlet entity implementation passed: " + portletEntity.getClass());
        }
    } finally {
        portletEntityLock.unlock();
    }
}

From source file:org.apache.hadoop.hive.metastore.HiveMetaStore.java

private static void signalOtherThreadsToStart(final TServer server, final Lock startLock,
        final Condition startCondition, final AtomicBoolean startedServing) {
    // A simple thread to wait until the server has started and then signal the other threads to
    // begin//  w w w . j  a  va 2s . c o m
    Thread t = new Thread() {
        @Override
        public void run() {
            do {
                try {
                    Thread.sleep(1000);
                } catch (InterruptedException e) {
                    LOG.warn("Signalling thread was interuppted: " + e.getMessage());
                }
            } while (!server.isServing());
            startLock.lock();
            try {
                startedServing.set(true);
                startCondition.signalAll();
            } finally {
                startLock.unlock();
            }
        }
    };
    t.start();
}

From source file:com.threecrickets.prudence.cache.SqlCache.java

public void store(String key, CacheEntry entry) {
    logger.fine("Store: " + key);

    Lock lock = lockSource.getWriteLock(key);
    lock.lock();/*from  www  .  j av  a2 s  .com*/
    try {
        Connection connection = connect();
        if (connection == null)
            return;

        try {
            boolean tryInsert = true;

            // Try updating this key

            String sql = "UPDATE " + cacheTableName
                    + " SET data=?, media_type=?, language=?, character_set=?, encoding=?, modification_date=?, tag=?, headers=?, expiration_date=?, document_modification_date=? WHERE key=?";
            PreparedStatement statement = connection.prepareStatement(sql);
            try {
                statement.setBytes(1,
                        entry.getString() != null ? entry.getString().getBytes() : entry.getBytes());
                statement.setString(2, entry.getMediaType() != null ? entry.getMediaType().getName() : null);
                statement.setString(3, entry.getLanguage() != null ? entry.getLanguage().getName() : null);
                statement.setString(4,
                        entry.getCharacterSet() != null ? entry.getCharacterSet().getName() : null);
                statement.setString(5, entry.getEncoding() != null ? entry.getEncoding().getName() : null);
                statement.setTimestamp(6,
                        entry.getModificationDate() != null
                                ? new Timestamp(entry.getModificationDate().getTime())
                                : null);
                statement.setString(7, entry.getTag() != null ? entry.getTag().format() : null);
                statement.setString(8, entry.getHeaders() == null ? "" : serializeHeaders(entry.getHeaders()));
                statement.setTimestamp(9,
                        entry.getExpirationDate() != null ? new Timestamp(entry.getExpirationDate().getTime())
                                : null);
                statement.setTimestamp(10,
                        entry.getDocumentModificationDate() != null
                                ? new Timestamp(entry.getDocumentModificationDate().getTime())
                                : null);
                statement.setString(11, key);
                if (!statement.execute() && statement.getUpdateCount() > 0) {
                    logger.fine("Updated " + key);

                    // Update worked, so no need to try insertion

                    tryInsert = false;
                }
            } finally {
                statement.close();
            }

            if (tryInsert) {
                // Try inserting this key

                // But first make sure we have room...

                int size = countEntries(connection);
                if (size >= maxSize) {
                    prune();

                    size = countEntries(connection);
                    if (size >= maxSize) {
                        logger.fine("No room in cache (" + size + ", " + maxSize + ")");
                        return;
                    }
                }

                // delete( connection, key );

                sql = "INSERT INTO " + cacheTableName
                        + " (key, data, media_type, language, character_set, encoding, modification_date, tag, headers, expiration_date, document_modification_date) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
                statement = connection.prepareStatement(sql);
                try {
                    statement.setString(1, key);
                    statement.setBytes(2,
                            entry.getString() != null ? entry.getString().getBytes() : entry.getBytes());
                    statement.setString(3, getName(entry.getMediaType()));
                    statement.setString(4, getName(entry.getLanguage()));
                    statement.setString(5, getName(entry.getCharacterSet()));
                    statement.setString(6, getName(entry.getEncoding()));
                    statement.setTimestamp(7,
                            entry.getModificationDate() != null
                                    ? new Timestamp(entry.getModificationDate().getTime())
                                    : null);
                    statement.setString(8, entry.getTag() != null ? entry.getTag().format() : null);
                    statement.setString(9,
                            entry.getHeaders() == null ? "" : serializeHeaders(entry.getHeaders()));
                    statement.setTimestamp(10,
                            entry.getExpirationDate() != null
                                    ? new Timestamp(entry.getExpirationDate().getTime())
                                    : null);
                    statement.setTimestamp(11,
                            entry.getDocumentModificationDate() != null
                                    ? new Timestamp(entry.getDocumentModificationDate().getTime())
                                    : null);
                    statement.execute();
                } finally {
                    statement.close();
                }
            }

            // Clean out existing tags for this key

            sql = "DELETE FROM " + cacheTagsTableName + " WHERE key=?";
            statement = connection.prepareStatement(sql);
            try {
                statement.setString(1, key);
                statement.execute();
            } finally {
                statement.close();
            }

            // Add tags for this key

            String[] tags = entry.getTags();
            if ((tags != null) && (tags.length > 0)) {
                sql = "INSERT INTO " + cacheTagsTableName + " (key, tag) VALUES (?, ?)";
                statement = connection.prepareStatement(sql);
                statement.setString(1, key);
                try {
                    for (String tag : tags) {
                        statement.setString(2, tag);
                        statement.execute();
                    }
                } finally {
                    statement.close();
                }
            }
        } finally {
            connection.close();
        }
    } catch (SQLException x) {
        logger.log(Level.WARNING, "Could not store cache entry", x);
    } finally {
        lock.unlock();
    }
}

From source file:org.nema.medical.mint.server.processor.StudyUpdateProcessor.java

@Override
public void run() {
    LOG.debug("Execution started.");

    String jobID = jobFolder.getName();
    String studyUUID = studyFolder.getName();

    JobInfo jobInfo = new JobInfo();
    jobInfo.setId(jobID);/*from w  w  w  .j  a  v a 2s .  c o  m*/
    jobInfo.setStudyID(studyUUID);

    Lock lock = new ReentrantLock(), oldLock;

    oldLock = studyIdLocks.putIfAbsent(studyUUID, lock);
    if (oldLock != null) {
        LOG.debug("Lock was an existing lock.");
        lock = oldLock;
    }

    if (lock.tryLock()) {
        try {
            LOG.debug("Got lock, and starting process");

            //Not calling mkdir on this because they better already exist
            File changelogRoot = new File(studyFolder, "changelog");

            if (!changelogRoot.exists()) {
                throw new FileNotFoundException("The changelog for study uuid " + studyUUID
                        + " does not exist, may need to do a create first.");
            }

            /*
             * Need to load new study information
             */
            final StudyMetadata newStudy = StudyIO.loadStudy(jobFolder);
            final String typeName = newStudy.getType();
            final MetadataType dataDictionary = availableTypes.get(typeName);
            if (dataDictionary == null) {
                throw new RuntimeException("Invalid study type " + typeName);
            }

            if (newStudy.getVersion() >= 0) {
                throw new RuntimeException("Study update data specifies a version [" + newStudy.getVersion()
                        + "]; versions are controlled by server, not client");
            }

            try {
                StorageUtil.validateStudy(newStudy, dataDictionary, jobFolder);
            } catch (final StudyTraversals.TraversalException e) {
                throw new RuntimeException("Validation of the jobs study failed", e);
            }

            final File typeFolder = new File(studyFolder, typeName);
            final File existingBinaryFolder = new File(typeFolder, "binaryitems");
            existingBinaryFolder.mkdirs();

            StudyMetadata existingStudy;
            try {
                /*
                 * Need to load current study information
                 */
                existingStudy = StudyIO.loadStudy(typeFolder);
            } catch (final RuntimeException e) {
                /*
                 * Do nothing, just means there is no existing study
                 * which is fine.
                 */
                existingStudy = null;
            }

            /*
             * If the study versions are not the same, then this
             * update is for a version that is not the most recent and
             * should not be applied.
             */
            if (existingStudy != null
                    && (existingStudy.getVersion() < 0 || existingStudy.getVersion() != oldVersion)) {
                throw new RuntimeException(
                        "Study update data is of a different version than the current study, "
                                + "cannot update if versions do not match. (" + existingStudy.getVersion()
                                + " : " + oldVersion + ")");
            }

            /*
             * Need to rename the new binary files so there are no collisions
             * with existing data files when merging. This also means updating
             * the new study document.
             */
            final int maxExistingItemNumber = StorageUtil.getHighestNumberedBinaryItem(existingBinaryFolder);
            StorageUtil.shiftItemIds(newStudy, jobFolder, maxExistingItemNumber + 1);

            /*
             * Write metadata update message to change log folder.
             */
            File changelogFolder = StorageUtil.getNextChangelogDir(changelogRoot);

            StudyUtils.writeStudy(newStudy, changelogFolder);

            Collection<Integer> excludedBids = new HashSet<Integer>();
            if (existingStudy != null) {
                /*
                 * Need to move through the new study and look for things to exclude
                 * and exclude them from the existing study.
                 */
                StudyUtils.applyExcludes(existingStudy, newStudy, excludedBids);
            }

            /*
             * Clean out excludes because excludes should not be left in
             * the newStudy.
             */
            StudyUtils.removeStudyExcludes(newStudy);

            /*
             * Need to merge the study documents and renormalize the result.
             * This means first denormalize, then merge, then normalize the
             * result
             */
            StudyUtils.denormalizeStudy(newStudy);

            if (existingStudy != null) {
                StudyUtils.denormalizeStudy(existingStudy);
                StudyUtils.mergeStudy(existingStudy, newStudy, excludedBids);

                // Get next version number
                existingStudy.setVersion(existingStudy.getVersion() + 1);
            } else {
                /*
                 * If no existing study, new study becomes the existing
                 * study. This happens when an update is done on a type that
                 * has no data yet.
                 */
                existingStudy = newStudy;

                // Set to base level version
                existingStudy.setVersion(StudyUtils.getBaseVersion());
                existingStudy.setType(typeName);
            }

            //Rename all excluded binary files to have .exclude
            StorageUtil.renameExcludedFiles(existingBinaryFolder, excludedBids);

            StudyUtils.normalizeStudy(existingStudy);

            /*
             * Need to copy into the Study folder the new study document and
             * binary data files.
             */
            StudyUtils.writeStudy(existingStudy, typeFolder);

            StorageUtil.moveBinaryItems(jobFolder, existingBinaryFolder);

            FileUtils.deleteDirectory(jobFolder);

            //Update study DAO only if this is DICOM data; don't update study DAO for other types (DICOM is primary)
            if (typeName.equals("DICOM")) {
                MINTStudy studyData = new MINTStudy();
                studyData.setID(studyUUID);
                studyData.setStudyInstanceUID(existingStudy.getStudyInstanceUID());
                studyData.setPatientID(existingStudy.getValueForAttribute(0x00100020));
                studyData.setAccessionNumber(existingStudy.getValueForAttribute(0x00080050));
                // studyData.setDateTime(study.getValueForAttribute(0x00080020));
                studyData.setDateTime(MINTStudy.now());
                studyData.setStudyVersion(existingStudy.getVersion());
                studyDAO.updateStudy(studyData);
            }

            //Update change DAO for any type
            Change updateInfo = new Change();
            updateInfo.setId(UUID.randomUUID().toString());
            updateInfo.setStudyID(studyUUID);
            updateInfo.setType(typeName);
            updateInfo.setRemoteUser(remoteUser);
            updateInfo.setRemoteHost(remoteHost);
            updateInfo.setIndex(Integer.parseInt(changelogFolder.getName()));
            updateInfo.setOperation(ChangeOperation.UPDATE);
            updateDAO.saveChange(updateInfo);

            jobInfo.setStatus(JobStatus.SUCCESS);
            jobInfo.setStatusDescription("complete");
        } catch (Exception e) {
            jobInfo.setStatus(JobStatus.FAILED);
            jobInfo.setStatusDescription(e.getMessage());
            LOG.error("unable to process job " + jobID, e);
        } finally {
            lock.unlock();
            LOG.debug("Released lock and stopping.");
        }
    } else {
        jobInfo.setStatus(JobStatus.FAILED);
        jobInfo.setStatusDescription("unable to process job " + jobID
                + ", another update is current being processed on the same study.");
    }

    jobInfoDAO.saveOrUpdateJobInfo(jobInfo);
}

From source file:org.apache.hadoop.hive.metastore.HiveMetaStore.java

/**
 * Start threads outside of the thrift service, such as the compactor threads.
 * @param conf Hive configuration object
 *//*from ww w.j  a v  a2 s .c o m*/
private static void startMetaStoreThreads(final HiveConf conf, final Lock startLock,
        final Condition startCondition, final AtomicBoolean startedServing) {
    // A thread is spun up to start these other threads.  That's because we can't start them
    // until after the TServer has started, but once TServer.serve is called we aren't given back
    // control.
    Thread t = new Thread() {
        @Override
        public void run() {
            // This is a massive hack.  The compactor threads have to access packages in ql (such as
            // AcidInputFormat).  ql depends on metastore so we can't directly access those.  To deal
            // with this the compactor thread classes have been put in ql and they are instantiated here
            // dyanmically.  This is not ideal but it avoids a massive refactoring of Hive packages.
            //
            // Wrap the start of the threads in a catch Throwable loop so that any failures
            // don't doom the rest of the metastore.
            startLock.lock();
            try {
                JvmPauseMonitor pauseMonitor = new JvmPauseMonitor(conf);
                pauseMonitor.start();
            } catch (Throwable t) {
                LOG.warn("Could not initiate the JvmPauseMonitor thread." + " GCs and Pauses may not be "
                        + "warned upon.", t);
            }

            try {
                // Per the javadocs on Condition, do not depend on the condition alone as a start gate
                // since spurious wake ups are possible.
                while (!startedServing.get())
                    startCondition.await();
                startCompactorInitiator(conf);
                startCompactorWorkers(conf);
                startCompactorCleaner(conf);
                startHouseKeeperService(conf);
            } catch (Throwable e) {
                LOG.error("Failure when starting the compactor, compactions may not happen, "
                        + StringUtils.stringifyException(e));
            } finally {
                startLock.unlock();
            }

            ReplChangeManager.scheduleCMClearer(conf);
        }
    };
    t.setDaemon(true);
    t.setName("Metastore threads starter thread");
    t.start();
}

From source file:org.apache.hadoop.hbase.master.AssignmentManager.java

/**
 * Handles various states an unassigned node can be in.
 * <p>/* w w w  .j  a  v a2s . c o m*/
 * Method is called when a state change is suspected for an unassigned node.
 * <p>
 * This deals with skipped transitions (we got a CLOSED but didn't see CLOSING
 * yet).
 * @param rt
 * @param expectedVersion
 */
void handleRegion(final RegionTransition rt, int expectedVersion) {
    if (rt == null) {
        LOG.warn("Unexpected NULL input for RegionTransition rt");
        return;
    }
    final ServerName sn = rt.getServerName();
    // Check if this is a special HBCK transition
    if (sn.equals(HBCK_CODE_SERVERNAME)) {
        handleHBCK(rt);
        return;
    }
    final long createTime = rt.getCreateTime();
    final byte[] regionName = rt.getRegionName();
    String encodedName = HRegionInfo.encodeRegionName(regionName);
    String prettyPrintedRegionName = HRegionInfo.prettyPrint(encodedName);
    // Verify this is a known server
    if (!serverManager.isServerOnline(sn) && !ignoreStatesRSOffline.contains(rt.getEventType())) {
        LOG.warn("Attempted to handle region transition for server but " + "it is not online: "
                + prettyPrintedRegionName + ", " + rt);
        return;
    }

    RegionState regionState = regionStates.getRegionState(encodedName);
    long startTime = System.currentTimeMillis();
    if (LOG.isDebugEnabled()) {
        boolean lateEvent = createTime < (startTime - 15000);
        LOG.debug("Handling " + rt.getEventType() + ", server=" + sn + ", region="
                + (prettyPrintedRegionName == null ? "null" : prettyPrintedRegionName)
                + (lateEvent ? ", which is more than 15 seconds late" : "") + ", current_state=" + regionState);
    }
    // We don't do anything for this event,
    // so separate it out, no need to lock/unlock anything
    if (rt.getEventType() == EventType.M_ZK_REGION_OFFLINE) {
        return;
    }

    // We need a lock on the region as we could update it
    Lock lock = locker.acquireLock(encodedName);
    try {
        RegionState latestState = regionStates.getRegionState(encodedName);
        if ((regionState == null && latestState != null) || (regionState != null && latestState == null)
                || (regionState != null && latestState != null
                        && latestState.getState() != regionState.getState())) {
            LOG.warn("Region state changed from " + regionState + " to " + latestState
                    + ", while acquiring lock");
        }
        long waitedTime = System.currentTimeMillis() - startTime;
        if (waitedTime > 5000) {
            LOG.warn("Took " + waitedTime + "ms to acquire the lock");
        }
        regionState = latestState;
        switch (rt.getEventType()) {
        case RS_ZK_REQUEST_REGION_SPLIT:
        case RS_ZK_REGION_SPLITTING:
        case RS_ZK_REGION_SPLIT:
            if (!handleRegionSplitting(rt, encodedName, prettyPrintedRegionName, sn)) {
                deleteSplittingNode(encodedName, sn);
            }
            break;

        case RS_ZK_REQUEST_REGION_MERGE:
        case RS_ZK_REGION_MERGING:
        case RS_ZK_REGION_MERGED:
            // Merged region is a new region, we can't find it in the region states now.
            // However, the two merging regions are not new. They should be in state for merging.
            if (!handleRegionMerging(rt, encodedName, prettyPrintedRegionName, sn)) {
                deleteMergingNode(encodedName, sn);
            }
            break;

        case M_ZK_REGION_CLOSING:
            // Should see CLOSING after we have asked it to CLOSE or additional
            // times after already being in state of CLOSING
            if (regionState == null || !regionState.isPendingCloseOrClosingOnServer(sn)) {
                LOG.warn("Received CLOSING for " + prettyPrintedRegionName + " from " + sn
                        + " but the region isn't PENDING_CLOSE/CLOSING here: "
                        + regionStates.getRegionState(encodedName));
                return;
            }
            // Transition to CLOSING (or update stamp if already CLOSING)
            regionStates.updateRegionState(rt, State.CLOSING);
            break;

        case RS_ZK_REGION_CLOSED:
            // Should see CLOSED after CLOSING but possible after PENDING_CLOSE
            if (regionState == null || !regionState.isPendingCloseOrClosingOnServer(sn)) {
                LOG.warn("Received CLOSED for " + prettyPrintedRegionName + " from " + sn
                        + " but the region isn't PENDING_CLOSE/CLOSING here: "
                        + regionStates.getRegionState(encodedName));
                return;
            }
            // Handle CLOSED by assigning elsewhere or stopping if a disable
            // If we got here all is good.  Need to update RegionState -- else
            // what follows will fail because not in expected state.
            new ClosedRegionHandler(server, this, regionState.getRegion()).process();
            updateClosedRegionHandlerTracker(regionState.getRegion());
            break;

        case RS_ZK_REGION_FAILED_OPEN:
            if (regionState == null || !regionState.isPendingOpenOrOpeningOnServer(sn)) {
                LOG.warn("Received FAILED_OPEN for " + prettyPrintedRegionName + " from " + sn
                        + " but the region isn't PENDING_OPEN/OPENING here: "
                        + regionStates.getRegionState(encodedName));
                return;
            }
            AtomicInteger failedOpenCount = failedOpenTracker.get(encodedName);
            if (failedOpenCount == null) {
                failedOpenCount = new AtomicInteger();
                // No need to use putIfAbsent, or extra synchronization since
                // this whole handleRegion block is locked on the encoded region
                // name, and failedOpenTracker is updated only in this block
                failedOpenTracker.put(encodedName, failedOpenCount);
            }
            if (failedOpenCount.incrementAndGet() >= maximumAttempts) {
                regionStates.updateRegionState(rt, State.FAILED_OPEN);
                // remove the tracking info to save memory, also reset
                // the count for next open initiative
                failedOpenTracker.remove(encodedName);
            } else {
                // Handle this the same as if it were opened and then closed.
                regionState = regionStates.updateRegionState(rt, State.CLOSED);
                if (regionState != null) {
                    // When there are more than one region server a new RS is selected as the
                    // destination and the same is updated in the regionplan. (HBASE-5546)
                    try {
                        getRegionPlan(regionState.getRegion(), sn, true);
                        new ClosedRegionHandler(server, this, regionState.getRegion()).process();
                    } catch (HBaseIOException e) {
                        LOG.warn("Failed to get region plan", e);
                    }
                }
            }
            break;

        case RS_ZK_REGION_OPENING:
            // Should see OPENING after we have asked it to OPEN or additional
            // times after already being in state of OPENING
            if (regionState == null || !regionState.isPendingOpenOrOpeningOnServer(sn)) {
                LOG.warn("Received OPENING for " + prettyPrintedRegionName + " from " + sn
                        + " but the region isn't PENDING_OPEN/OPENING here: "
                        + regionStates.getRegionState(encodedName));
                return;
            }
            // Transition to OPENING (or update stamp if already OPENING)
            regionStates.updateRegionState(rt, State.OPENING);
            break;

        case RS_ZK_REGION_OPENED:
            // Should see OPENED after OPENING but possible after PENDING_OPEN.
            if (regionState == null || !regionState.isPendingOpenOrOpeningOnServer(sn)) {
                LOG.warn("Received OPENED for " + prettyPrintedRegionName + " from " + sn
                        + " but the region isn't PENDING_OPEN/OPENING here: "
                        + regionStates.getRegionState(encodedName));

                if (regionState != null) {
                    // Close it without updating the internal region states,
                    // so as not to create double assignments in unlucky scenarios
                    // mentioned in OpenRegionHandler#process
                    unassign(regionState.getRegion(), null, -1, null, false, sn);
                }
                return;
            }
            // Handle OPENED by removing from transition and deleted zk node
            regionState = regionStates.updateRegionState(rt, State.OPEN);
            if (regionState != null) {
                failedOpenTracker.remove(encodedName); // reset the count, if any
                new OpenedRegionHandler(server, this, regionState.getRegion(), sn, expectedVersion).process();
                updateOpenedRegionHandlerTracker(regionState.getRegion());
            }
            break;

        default:
            throw new IllegalStateException("Received event is not valid.");
        }
    } finally {
        lock.unlock();
    }
}

From source file:com.alibaba.wasp.master.AssignmentManager.java

/**
 * Handles various states an unassigned node can be in.
 * <p>/*from  ww w . j a  v  a 2 s . co  m*/
 * Method is called when a state change is suspected for an unassigned node.
 * <p>
 * This deals with skipped transitions (we got a CLOSED but didn't see CLOSING
 * yet).
 *
 * @param egTransition
 * @param expectedVersion
 */
private void handleEntityGroup(final EntityGroupTransaction egTransition, int expectedVersion) {
    if (egTransition == null) {
        LOG.warn("Unexpected NULL input " + egTransition);
        return;
    }
    final ServerName sn = egTransition.getServerName();
    // Check if this is a special HBCK transition
    if (sn.equals(HBCK_CODE_SERVERNAME)) {
        handleHBCK(egTransition);
        return;
    }
    final long createTime = egTransition.getCreateTime();
    final byte[] entityGroupName = egTransition.getEntityGroupName();
    String encodedName = EntityGroupInfo.encodeEntityGroupName(entityGroupName);
    // Verify this is a known server
    if (!serverManager.isServerOnline(sn) && !ignoreStatesFSOffline.contains(egTransition.getEventType())) {
        LOG.warn("Attempted to handle entityGroup transition for server but " + "server is not online: "
                + encodedName);
        return;
    }

    EntityGroupState entityGroupState = entityGroupStates.getEntityGroupTransitionState(encodedName);
    long startTime = System.currentTimeMillis();
    if (LOG.isDebugEnabled()) {
        boolean lateEvent = createTime < (startTime - 15000);
        LOG.debug("Handling transition=" + egTransition.getEventType() + ", server=" + sn + ", entityGroup="
                + (encodedName == null ? "null" : encodedName)
                + (lateEvent ? ", which is more than 15 seconds late" : "")
                + ", current state from entityGroup state map =" + entityGroupState);
    }
    // We don't do anything for this event,
    // so separate it out, no need to lock/unlock anything
    if (egTransition.getEventType() == EventType.M_ZK_ENTITYGROUP_OFFLINE) {
        return;
    }

    // We need a lock on the entityGroup as we could update it
    Lock lock = locker.acquireLock(encodedName);
    try {
        EntityGroupState latestState = entityGroupStates.getEntityGroupTransitionState(encodedName);
        if ((entityGroupState == null && latestState != null)
                || (entityGroupState != null && latestState == null) || (entityGroupState != null
                        && latestState != null && latestState.getState() != entityGroupState.getState())) {
            LOG.warn("EntityGroup state changed from " + entityGroupState + " to " + latestState
                    + ", while acquiring lock");
        }
        long waitedTime = System.currentTimeMillis() - startTime;
        if (waitedTime > 5000) {
            LOG.warn("Took " + waitedTime + "ms to acquire the lock");
        }
        entityGroupState = latestState;
        switch (egTransition.getEventType()) {
        case FSERVER_ZK_ENTITYGROUP_SPLITTING:
            if (!isInStateForSplitting(entityGroupState))
                break;
            entityGroupStates.updateEntityGroupState(egTransition, EntityGroupState.State.SPLITTING);
            break;

        case FSERVER_ZK_ENTITYGROUP_SPLIT:
            // EntityGroupState must be null, or SPLITTING or PENDING_CLOSE.
            if (!isInStateForSplitting(entityGroupState))
                break;
            // If null, add SPLITTING state before going to SPLIT
            if (entityGroupState == null) {
                entityGroupState = entityGroupStates.updateEntityGroupState(egTransition,
                        EntityGroupState.State.SPLITTING);

                String message = "Received SPLIT for entityGroup " + encodedName + " from server " + sn;
                // If still null, it means we cannot find it and it was already
                // processed
                if (entityGroupState == null) {
                    LOG.warn(message + " but it doesn't exist anymore,"
                            + " probably already processed its split");
                    break;
                }
                LOG.info(message + " but entityGroup was not first in SPLITTING state; continuing");
            }
            // Check it has daughters.
            byte[] payload = egTransition.getPayload();
            List<EntityGroupInfo> daughters = null;
            try {
                daughters = EntityGroupInfo.parseDelimitedFrom(payload, 0, payload.length);
            } catch (IOException e) {
                LOG.error("Dropped split! Failed reading split payload for " + encodedName);
                break;
            }
            assert daughters.size() == 2;
            // Assert that we can get a serverinfo for this server.
            if (!this.serverManager.isServerOnline(sn)) {
                LOG.error("Dropped split! ServerName=" + sn + " unknown.");
                break;
            }
            // Run handler to do the rest of the SPLIT handling.
            this.executorService.submit(new SplitEntityGroupHandler(server, this,
                    entityGroupState.getEntityGroup(), sn, daughters));
            break;

        case M_ZK_ENTITYGROUP_CLOSING:
            // Should see CLOSING after we have asked it to CLOSE or additional
            // times after already being in state of CLOSING
            if (entityGroupState != null && !entityGroupState.isPendingCloseOrClosingOnServer(sn)) {
                LOG.warn("Received CLOSING for entityGroup " + encodedName + " from server " + sn
                        + " but entityGroup was in the state " + entityGroupState
                        + " and not in expected PENDING_CLOSE or CLOSING states,"
                        + " or not on the expected server");
                return;
            }
            // Transition to CLOSING (or update stamp if already CLOSING)
            entityGroupStates.updateEntityGroupState(egTransition, EntityGroupState.State.CLOSING);
            break;

        case FSERVER_ZK_ENTITYGROUP_CLOSED:
            // Should see CLOSED after CLOSING but possible after PENDING_CLOSE
            if (entityGroupState != null && !entityGroupState.isPendingCloseOrClosingOnServer(sn)) {
                LOG.warn("Received CLOSED for entityGroup " + encodedName + " from server " + sn
                        + " but entityGroup was in the state " + entityGroupState
                        + " and not in expected PENDING_CLOSE or CLOSING states,"
                        + " or not on the expected server");
                return;
            }
            // Handle CLOSED by assigning elsewhere or stopping if a disable
            // If we got here all is good. Need to update EntityGroupState -- else
            // what follows will fail because not in expected state.
            entityGroupState = entityGroupStates.updateEntityGroupState(egTransition,
                    EntityGroupState.State.CLOSED);
            if (entityGroupState != null) {
                removeClosedEntityGroup(entityGroupState.getEntityGroup());
                this.executorService
                        .submit(new ClosedEntityGroupHandler(server, this, entityGroupState.getEntityGroup()));
            }
            break;

        case FSERVER_ZK_ENTITYGROUP_FAILED_OPEN:
            if (entityGroupState != null && !entityGroupState.isPendingOpenOrOpeningOnServer(sn)) {
                LOG.warn("Received FAILED_OPEN for entityGroup " + encodedName + " from server " + sn
                        + " but entityGroup was in the state " + entityGroupState
                        + " and not in expected PENDING_OPEN or OPENING states,"
                        + " or not on the expected server");
                return;
            }
            // Handle this the same as if it were opened and then closed.
            entityGroupState = entityGroupStates.updateEntityGroupState(egTransition,
                    EntityGroupState.State.CLOSED);
            // When there are more than one entityGroup server a new FSERVER is
            // selected as the
            // destination and the same is updated in the entityGroupplan.
            // (HBASE-5546)
            if (entityGroupState != null) {
                getEntityGroupPlan(entityGroupState.getEntityGroup(), sn, true);
                this.executorService
                        .submit(new ClosedEntityGroupHandler(server, this, entityGroupState.getEntityGroup()));
            }
            break;

        case FSERVER_ZK_ENTITYGROUP_OPENING:
            // Should see OPENING after we have asked it to OPEN or additional
            // times after already being in state of OPENING
            if (entityGroupState != null && !entityGroupState.isPendingOpenOrOpeningOnServer(sn)) {
                LOG.warn("Received OPENING for entityGroup " + encodedName + " from server " + sn
                        + " but entityGroup was in the state " + entityGroupState
                        + " and not in expected PENDING_OPEN or OPENING states,"
                        + " or not on the expected server");
                return;
            }
            // Transition to OPENING (or update stamp if already OPENING)
            entityGroupStates.updateEntityGroupState(egTransition, EntityGroupState.State.OPENING);
            break;

        case FSERVER_ZK_ENTITYGROUP_OPENED:
            // Should see OPENED after OPENING but possible after PENDING_OPEN
            if (entityGroupState != null && !entityGroupState.isPendingOpenOrOpeningOnServer(sn)) {
                LOG.warn("Received OPENED for entityGroup " + encodedName + " from server " + sn
                        + " but entityGroup was in the state " + entityGroupState
                        + " and not in expected PENDING_OPEN or OPENING states,"
                        + " or not on the expected server");
                return;
            }
            // Handle OPENED by removing from transition and deleted zk node
            entityGroupState = entityGroupStates.updateEntityGroupState(egTransition,
                    EntityGroupState.State.OPEN);
            if (entityGroupState != null) {
                this.executorService.submit(new OpenedEntityGroupHandler(server, this,
                        entityGroupState.getEntityGroup(), sn, expectedVersion));
            }
            break;

        default:
            throw new IllegalStateException("Received event is not valid.");
        }
    } finally {
        lock.unlock();
    }
}

From source file:com.mirth.connect.donkey.server.channel.DestinationConnector.java

@Override
public void run() {
    DonkeyDao dao = null;/*  w  ww .j  a  v a2 s . c  o  m*/
    Serializer serializer = channel.getSerializer();
    ConnectorMessage connectorMessage = null;
    int retryIntervalMillis = destinationConnectorProperties.getRetryIntervalMillis();
    Long lastMessageId = null;
    boolean canAcquire = true;
    Lock statusUpdateLock = null;
    queue.registerThreadId();

    do {
        try {
            if (canAcquire) {
                connectorMessage = queue.acquire();
            }

            if (connectorMessage != null) {
                boolean exceptionCaught = false;

                try {
                    /*
                     * If the last message id is equal to the current message id, then the
                     * message was not successfully sent and is being retried, so wait the retry
                     * interval.
                     * 
                     * If the last message id is greater than the current message id, then some
                     * message was not successful, message rotation is on, and the queue is back
                     * to the oldest message, so wait the retry interval.
                     */
                    if (connectorMessage.isAttemptedFirst()
                            || lastMessageId != null && (lastMessageId == connectorMessage.getMessageId()
                                    || (queue.isRotate() && lastMessageId > connectorMessage.getMessageId()
                                            && queue.hasBeenRotated()))) {
                        Thread.sleep(retryIntervalMillis);
                        connectorMessage.setAttemptedFirst(false);
                    }

                    lastMessageId = connectorMessage.getMessageId();

                    dao = daoFactory.getDao();
                    Status previousStatus = connectorMessage.getStatus();

                    Class<?> connectorPropertiesClass = getConnectorProperties().getClass();
                    Class<?> serializedPropertiesClass = null;

                    ConnectorProperties connectorProperties = null;

                    /*
                     * If we're not regenerating connector properties, use the serialized sent
                     * content from the database. It's possible that the channel had Regenerate
                     * Template and Include Filter/Transformer enabled at one point, and then
                     * was disabled later, so we also have to make sure the sent content exists.
                     */
                    if (!destinationConnectorProperties.isRegenerateTemplate()
                            && connectorMessage.getSent() != null) {
                        // Attempt to get the sent properties from the in-memory cache. If it doesn't exist, deserialize from the actual sent content.
                        connectorProperties = connectorMessage.getSentProperties();
                        if (connectorProperties == null) {
                            connectorProperties = serializer.deserialize(
                                    connectorMessage.getSent().getContent(), ConnectorProperties.class);
                            connectorMessage.setSentProperties(connectorProperties);
                        }

                        serializedPropertiesClass = connectorProperties.getClass();
                    } else {
                        connectorProperties = ((DestinationConnectorPropertiesInterface) getConnectorProperties())
                                .clone();
                    }

                    /*
                     * Verify that the connector properties stored in the connector message
                     * match the properties from the current connector. Otherwise the connector
                     * type has changed and the message will be set to errored. If we're
                     * regenerating the connector properties then it doesn't matter.
                     */
                    if (connectorMessage.getSent() == null
                            || destinationConnectorProperties.isRegenerateTemplate()
                            || serializedPropertiesClass == connectorPropertiesClass) {
                        ThreadUtils.checkInterruptedStatus();

                        /*
                         * If a historical queued message has not yet been transformed and the
                         * current queue settings do not include the filter/transformer, force
                         * the message to ERROR.
                         */
                        if (connectorMessage.getSent() == null && !includeFilterTransformerInQueue()) {
                            connectorMessage.setStatus(Status.ERROR);
                            connectorMessage.setProcessingError(
                                    "Queued message has not yet been transformed, and Include Filter/Transformer is currently disabled.");

                            dao.updateStatus(connectorMessage, previousStatus);
                            dao.updateErrors(connectorMessage);
                        } else {
                            if (includeFilterTransformerInQueue()) {
                                transform(dao, connectorMessage, previousStatus,
                                        connectorMessage.getSent() == null);
                            }

                            if (connectorMessage.getStatus() == Status.QUEUED) {
                                /*
                                 * Replace the connector properties if necessary. Again for
                                 * historical queue reasons, we need to check whether the sent
                                 * content exists.
                                 */
                                if (connectorMessage.getSent() == null
                                        || destinationConnectorProperties.isRegenerateTemplate()) {
                                    replaceConnectorProperties(connectorProperties, connectorMessage);
                                    MessageContent sentContent = getSentContent(connectorMessage,
                                            connectorProperties);
                                    connectorMessage.setSent(sentContent);

                                    if (sentContent != null && storageSettings.isStoreSent()) {
                                        ThreadUtils.checkInterruptedStatus();
                                        dao.storeMessageContent(sentContent);
                                    }
                                }

                                Response response = handleSend(connectorProperties, connectorMessage);
                                connectorMessage.setSendAttempts(connectorMessage.getSendAttempts() + 1);

                                if (response == null) {
                                    throw new RuntimeException(
                                            "Received null response from destination " + destinationName + ".");
                                }
                                response.fixStatus(isQueueEnabled());

                                afterSend(dao, connectorMessage, response, previousStatus);
                            }
                        }
                    } else {
                        connectorMessage.setStatus(Status.ERROR);
                        connectorMessage.setProcessingError(
                                "Mismatched connector properties detected in queued message. The connector type may have changed since the message was queued.\nFOUND: "
                                        + serializedPropertiesClass.getSimpleName() + "\nEXPECTED: "
                                        + connectorPropertiesClass.getSimpleName());

                        dao.updateStatus(connectorMessage, previousStatus);
                        dao.updateErrors(connectorMessage);
                    }

                    /*
                     * If we're about to commit a non-QUEUED status, we first need to obtain a
                     * read lock from the queue. This is done so that if something else
                     * invalidates the queue at the same time, we don't incorrectly decrement
                     * the size during the release.
                     */
                    if (connectorMessage.getStatus() != Status.QUEUED) {
                        Lock lock = queue.getStatusUpdateLock();
                        lock.lock();
                        statusUpdateLock = lock;
                    }

                    ThreadUtils.checkInterruptedStatus();
                    dao.commit(storageSettings.isDurable());

                    // Only actually attempt to remove content if the status is SENT
                    if (connectorMessage.getStatus().isCompleted()) {
                        try {
                            channel.removeContent(dao, null, lastMessageId, true, true);
                        } catch (RuntimeException e) {
                            /*
                             * The connector message itself processed successfully, only the
                             * remove content operation failed. In this case just give up and
                             * log an error.
                             */
                            logger.error("Error removing content for message " + lastMessageId + " for channel "
                                    + channel.getName() + " (" + channel.getChannelId() + ") on destination "
                                    + destinationName
                                    + ". This error is expected if the message was manually removed from the queue.",
                                    e);
                        }
                    }
                } catch (RuntimeException e) {
                    logger.error("Error processing queued "
                            + (connectorMessage != null ? connectorMessage.toString() : "message (null)")
                            + " for channel " + channel.getName() + " (" + channel.getChannelId()
                            + ") on destination " + destinationName
                            + ". This error is expected if the message was manually removed from the queue.",
                            e);
                    /*
                     * Invalidate the queue's buffer if any errors occurred. If the message
                     * being processed by the queue was deleted, this will prevent the queue
                     * from trying to process that message repeatedly. Since multiple
                     * queues/threads may need to do this as well, we do not reset the queue's
                     * maps of checked in or deleted messages.
                     */
                    exceptionCaught = true;
                } finally {
                    if (dao != null) {
                        dao.close();
                    }

                    /*
                     * We always want to release the message if it's done (obviously).
                     */
                    if (exceptionCaught) {
                        /*
                         * If an runtime exception was caught, we can't guarantee whether that
                         * message was deleted or is still in the database. When it is released,
                         * the message will be removed from the in-memory queue. However we need
                         * to invalidate the queue before allowing any other threads to be able
                         * to access it in case the message is still in the database.
                         */
                        canAcquire = true;
                        synchronized (queue) {
                            queue.release(connectorMessage, true);

                            // Release the read lock now before calling invalidate
                            if (statusUpdateLock != null) {
                                statusUpdateLock.unlock();
                                statusUpdateLock = null;
                            }

                            queue.invalidate(true, false);
                        }
                    } else if (connectorMessage.getStatus() != Status.QUEUED) {
                        canAcquire = true;
                        queue.release(connectorMessage, true);
                    } else if (destinationConnectorProperties.isRotate()) {
                        canAcquire = true;
                        queue.release(connectorMessage, false);
                    } else {
                        /*
                         * If the message is still queued, no exception occurred, and queue
                         * rotation is disabled, we still want to force the queue to re-acquire
                         * a message if it has been marked as deleted by another process.
                         */
                        canAcquire = queue.releaseIfDeleted(connectorMessage);
                    }

                    // Always release the read lock if we obtained it
                    if (statusUpdateLock != null) {
                        statusUpdateLock.unlock();
                        statusUpdateLock = null;
                    }
                }
            } else {
                /*
                 * This is necessary because there is no blocking peek. If the queue is empty,
                 * wait some time to free up the cpu.
                 */
                Thread.sleep(Constants.DESTINATION_QUEUE_EMPTY_SLEEP_TIME);
            }
        } catch (InterruptedException e) {
            // Stop this thread if it was halted
            return;
        } catch (Exception e) {
            // Always release the read lock if we obtained it
            if (statusUpdateLock != null) {
                statusUpdateLock.unlock();
                statusUpdateLock = null;
            }

            logger.warn("Error in queue thread for channel " + channel.getName() + " (" + channel.getChannelId()
                    + ") on destination " + destinationName + ".\n" + ExceptionUtils.getStackTrace(e));
            try {
                Thread.sleep(retryIntervalMillis);

                /*
                 * Since the thread already slept for the retry interval, set lastMessageId to
                 * null to prevent sleeping again.
                 */
                lastMessageId = null;
            } catch (InterruptedException e1) {
                // Stop this thread if it was halted
                return;
            }
        } finally {
            // Always release the read lock if we obtained it
            if (statusUpdateLock != null) {
                statusUpdateLock.unlock();
                statusUpdateLock = null;
            }
        }
    } while (getCurrentState() == DeployedState.STARTED || getCurrentState() == DeployedState.STARTING);
}