Example usage for java.util.concurrent.locks Condition await

List of usage examples for java.util.concurrent.locks Condition await

Introduction

In this page you can find the example usage for java.util.concurrent.locks Condition await.

Prototype

void await() throws InterruptedException;

Source Link

Document

Causes the current thread to wait until it is signalled or Thread#interrupt interrupted .

Usage

From source file:com.bigdata.dastor.db.ColumnFamilyStore.java

/** flush the given memtable and swap in a new one for its CFS, if it hasn't been frozen already.  threadsafe. */
Future<?> maybeSwitchMemtable(Memtable oldMemtable, final boolean writeCommitLog) throws IOException {
    /**/* www  .  java  2  s . c om*/
     *  If we can get the writelock, that means no new updates can come in and 
     *  all ongoing updates to memtables have completed. We can get the tail
     *  of the log and use it as the starting position for log replay on recovery.
     */
    Table.flusherLock.writeLock().lock();
    try {
        if (oldMemtable.isFrozen()) {
            return null;
        }
        oldMemtable.freeze();

        final CommitLogSegment.CommitLogContext ctx = writeCommitLog ? CommitLog.instance().getContext() : null;
        logger_.info(columnFamily_ + " has reached its threshold; switching in a fresh Memtable at " + ctx);
        final Condition condition = submitFlush(oldMemtable);
        memtable_ = new Memtable(this);
        // a second executor that makes sure the onMemtableFlushes get called in the right order,
        // while keeping the wait-for-flush (future.get) out of anything latency-sensitive.
        return commitLogUpdater_.submit(new WrappedRunnable() {
            public void runMayThrow() throws InterruptedException, IOException {
                condition.await();
                if (writeCommitLog) {
                    // if we're not writing to the commit log, we are replaying the log, so marking
                    // the log header with "you can discard anything written before the context" is not valid
                    CommitLog.instance().discardCompletedSegments(table_, columnFamily_, ctx);
                }
            }
        });
    } finally {
        Table.flusherLock.writeLock().unlock();
        if (memtableSwitchCount == Integer.MAX_VALUE) {
            memtableSwitchCount = 0;
        }
        memtableSwitchCount++;
    }
}

From source file:com.edgenius.core.repository.SimpleRepositoryServiceImpl.java

private void acquireLock(String spacename, String identifier, String nodeUuid)
        throws RepositoryTiemoutExcetpion {

    log.debug("Request - Repository space {} identifier {} node {} acquire write lock in thread {}. ",
            new String[] { spacename, identifier, nodeUuid, Thread.currentThread().getName() });
    try {/*from  w  w w  . jav a 2 s . c  o  m*/
        writeLock.lock();

        //this aquireLock may recover from another lock, which will terminate in TIMEOUT, now cancel  it, so that 
        //once lock acquired correct, the thread can go through without any timeout limit(for big file upload, it is necessary).
        ThreadInterruptManager.removeThread(Thread.currentThread());
        ConditionGroup cond = spaceLockMap.get(spacename);
        //maybe it is performance bottleneck
        if (identifier == null && nodeUuid == null) {
            //space level lock
            if (cond == null) {
                cond = new ConditionGroup();
                spaceLockMap.put(spacename, cond);
                //this space never has a lock whatever any level, success acquired lock
                cond.spaceCondition = writeLock.newCondition();
            } else {

                //need check itself and all sub-level node lock, if found any, then choose first one to lock thread
                Condition relCond = null;
                if (cond.spaceCondition != null) {
                    relCond = cond.spaceCondition;
                }
                if (cond.identifierConditionMap.size() > 0) {
                    relCond = cond.identifierConditionMap.values().iterator().next();
                } else if (cond.nodeConditionMap.size() > 0) {
                    Map<String, Condition> nodeMap = cond.nodeConditionMap.values().iterator().next();
                    if (nodeMap.size() > 0) {
                        relCond = nodeMap.values().iterator().next();
                    }
                }
                if (relCond == null) {
                    //not relative lock, success acquired lock , success acquired lock
                    cond.spaceCondition = writeLock.newCondition();
                } else {
                    //2 minutes wait other relative lock release
                    ThreadInterruptManager.addThread(Thread.currentThread(), TIMEOUT);
                    relCond.await();
                    //do next cycle compete to acquire lock.
                    acquireLock(spacename, identifier, nodeUuid);
                }
            }

        } else if (nodeUuid == null) {
            //identifier level lock
            if (cond == null) {
                cond = new ConditionGroup();
                spaceLockMap.put(spacename, cond);
                //this space does not has a lock whatever any level yet, success acquired lock
                Condition idCond = writeLock.newCondition();
                cond.identifierConditionMap.put(identifier, idCond);
            } else {
                //need check space level, and sub-level
                Condition relCond = null;
                if (cond.spaceCondition != null) {
                    relCond = cond.spaceCondition;
                } else if (cond.identifierConditionMap.containsKey(identifier)) {
                    relCond = cond.identifierConditionMap.get(identifier);
                } else if (cond.nodeConditionMap.containsKey(identifier)) {
                    Map<String, Condition> nodeMap = cond.nodeConditionMap.get(identifier);
                    if (nodeMap.size() > 0) {
                        relCond = nodeMap.values().iterator().next();
                    }
                }
                if (relCond == null) {
                    //no lock in this level, success acquired lock 
                    Condition idCond = writeLock.newCondition();
                    cond.identifierConditionMap.put(identifier, idCond);
                } else {
                    //2 minutes wait, if 
                    ThreadInterruptManager.addThread(Thread.currentThread(), TIMEOUT);
                    relCond.await();
                    //do next cycle compete to acquire lock.
                    acquireLock(spacename, identifier, nodeUuid);
                }
            }
        } else {
            //node level lock
            if (cond == null) {
                cond = new ConditionGroup();
                spaceLockMap.put(spacename, cond);
                //this space never has a lock whatever any level, success acquired lock
                Condition nodeCond = writeLock.newCondition();
                Map<String, Condition> nodeMap = new HashMap<String, Condition>();
                nodeMap.put(nodeUuid, nodeCond);
                cond.nodeConditionMap.put(identifier, nodeMap);
            } else {
                //need check space level, and its direct identifier level
                Condition relCond = null;
                if (cond.spaceCondition != null) {
                    relCond = cond.spaceCondition;
                } else if (cond.identifierConditionMap.containsKey(identifier)) {
                    relCond = cond.identifierConditionMap.get(identifier);
                } else if (cond.nodeConditionMap.containsKey(identifier)) {
                    Map<String, Condition> nodeMap = cond.nodeConditionMap.get(identifier);
                    relCond = nodeMap.get(nodeUuid);
                }
                if (relCond == null) {
                    //not relative  lock, success acquired lock , success acquired lock
                    Condition nodeCond = writeLock.newCondition();
                    Map<String, Condition> nodeMap = cond.nodeConditionMap.get(identifier);
                    if (nodeMap == null) {
                        nodeMap = new HashMap<String, Condition>();
                    }
                    nodeMap.put(nodeUuid, nodeCond);
                    cond.nodeConditionMap.put(identifier, nodeMap);
                } else {
                    //2 minutes wait, if 
                    ThreadInterruptManager.addThread(Thread.currentThread(), TIMEOUT);
                    relCond.await();
                    //do next cycle compete to acquire lock.
                    acquireLock(spacename, identifier, nodeUuid);
                }
            }
        }
        log.debug("Repository space {} identifier {} node {} acquire write lock successfully in thread {}. ",
                new String[] { spacename, identifier, nodeUuid, Thread.currentThread().getName() });
    } catch (InterruptedException e) {
        log.info("Acquire space lock concurrent interrupted");
        throw new RepositoryTiemoutExcetpion(
                "Repository cannot acquired write permssion in time period " + TIMEOUT + ". Timeout exception");
    } finally {
        writeLock.unlock();
    }
}

From source file:org.apache.hadoop.hive.metastore.HiveMetaStore.java

/**
 * Start threads outside of the thrift service, such as the compactor threads.
 * @param conf Hive configuration object
 *//*w  w  w  .  java  2 s  . co m*/
private static void startMetaStoreThreads(final HiveConf conf, final Lock startLock,
        final Condition startCondition, final AtomicBoolean startedServing) {
    // A thread is spun up to start these other threads.  That's because we can't start them
    // until after the TServer has started, but once TServer.serve is called we aren't given back
    // control.
    Thread t = new Thread() {
        @Override
        public void run() {
            // This is a massive hack.  The compactor threads have to access packages in ql (such as
            // AcidInputFormat).  ql depends on metastore so we can't directly access those.  To deal
            // with this the compactor thread classes have been put in ql and they are instantiated here
            // dyanmically.  This is not ideal but it avoids a massive refactoring of Hive packages.
            //
            // Wrap the start of the threads in a catch Throwable loop so that any failures
            // don't doom the rest of the metastore.
            startLock.lock();
            try {
                JvmPauseMonitor pauseMonitor = new JvmPauseMonitor(conf);
                pauseMonitor.start();
            } catch (Throwable t) {
                LOG.warn("Could not initiate the JvmPauseMonitor thread." + " GCs and Pauses may not be "
                        + "warned upon.", t);
            }

            try {
                // Per the javadocs on Condition, do not depend on the condition alone as a start gate
                // since spurious wake ups are possible.
                while (!startedServing.get())
                    startCondition.await();
                startCompactorInitiator(conf);
                startCompactorWorkers(conf);
                startCompactorCleaner(conf);
                startHouseKeeperService(conf);
            } catch (Throwable e) {
                LOG.error("Failure when starting the compactor, compactions may not happen, "
                        + StringUtils.stringifyException(e));
            } finally {
                startLock.unlock();
            }

            ReplChangeManager.scheduleCMClearer(conf);
        }
    };
    t.setDaemon(true);
    t.setName("Metastore threads starter thread");
    t.start();
}

From source file:org.jactr.core.production.action.SleepAction.java

/**
 * wait until the goal buffer isn't empty
 * //  www .  ja va2 s  .  c om
 * @see org.jactr.core.production.action.IAction#fire(org.jactr.core.production.IInstantiation, double)
 */
public double fire(IInstantiation instantiation, double firingTime) {
    IActivationBuffer goalBuffer = instantiation.getModel().getActivationBuffer(IActivationBuffer.GOAL);

    if (goalBuffer.getSourceChunk() == null) {
        final Lock goalLock = new ReentrantLock();
        final Condition gotAGoal = goalLock.newCondition();

        /*
         * merely signal when the goal buffer gets something
         */
        IActivationBufferListener listener = new ActivationBufferListenerAdaptor() {
            @Override
            public void sourceChunkAdded(ActivationBufferEvent event) {
                try {
                    goalLock.lock();
                    if (LOGGER.isDebugEnabled())
                        LOGGER.debug("Signaling goal insertion");
                    gotAGoal.signalAll();
                } finally {
                    goalLock.unlock();
                }
            }
        };

        /*
         * attach the listener with the inline executor - this ensures that
         * regardless of what thread adds the source chunk to the buffer we will
         * be notified
         */
        goalBuffer.addListener(listener, ExecutorServices.INLINE_EXECUTOR);

        try {
            goalLock.lock();
            while (goalBuffer.getSourceChunk() == null) {
                if (LOGGER.isDebugEnabled())
                    LOGGER.debug("Waiting for goal");
                gotAGoal.await();
            }
        } catch (Exception e) {
            if (LOGGER.isDebugEnabled())
                LOGGER.debug("Could not wait for goal ", e);
        }

        if (LOGGER.isDebugEnabled())
            LOGGER.debug("Resuming from wait");

        goalLock.unlock();

        /*
         * remove the listener
         */
        goalBuffer.removeListener(listener);
    } else if (LOGGER.isDebugEnabled())
        LOGGER.debug("Goal is already present, no need to sleep");

    return 0;
}