Example usage for java.util.concurrent Semaphore release

List of usage examples for java.util.concurrent Semaphore release

Introduction

In this page you can find the example usage for java.util.concurrent Semaphore release.

Prototype

public void release(int permits) 

Source Link

Document

Releases the given number of permits, returning them to the semaphore.

Usage

From source file:Main.java

/**
 * Runs each runnable in a new thread. This method blocks until all runnables are complete.
 * Before any runnables are run, we also wait until the allocated thread has ran at least once.
 * This is done to increase the randomness in the order of thread execution.
 *///from   w w  w .  ja  v  a  2s. c  o  m
public static void startMultipleThreadsAndWaitUntilComplete(final List<Runnable> runnables) throws Exception {
    final Semaphore competingThreadsStarted = new Semaphore(0); // Number of threads for runnables started.
    final Semaphore competingThreadsToRelease = new Semaphore(0); // Acquired by runnable threads. Will be released
                                                                  // once all runnables have been run once.
    final Semaphore competingThreadsCompleted = new Semaphore(0); // Number of runnable threads completed.

    for (int i = 0; i < runnables.size(); i++) {
        final int runnableIndex = i;

        new Thread(new Runnable() {
            @Override
            public void run() {
                try {
                    // Notify semaphore that this thread has been started.
                    competingThreadsStarted.release(1);

                    // Once all threads have notified the competingThreadsStarted semaphore,
                    // competingThreadsToRelease will be released and we will continue.
                    competingThreadsToRelease.acquire(1);

                    // Increases randomness of thread execution order.
                    Thread.sleep(1);

                    runnables.get(runnableIndex).run();

                    // thread has completed running provided runnable.
                    competingThreadsCompleted.release(1);
                } catch (final InterruptedException e) {
                    e.printStackTrace();
                }
            }
        }).start();
    }

    // Only proceed once all threads have at least started running once.
    competingThreadsStarted.acquire(runnables.size());

    // Release all threads.
    competingThreadsToRelease.release(runnables.size());

    // Wait until all threads have completed before returning.
    competingThreadsCompleted.acquire(runnables.size());
}

From source file:com.zavakid.mushroom.impl.TestSinkQueue.java

private SinkQueue<Integer> newSleepingConsumerQueue(int capacity, int... values) {
    final SinkQueue<Integer> q = new SinkQueue<Integer>(capacity);
    final Semaphore semaphore = new Semaphore(0);
    for (int i : values) {
        q.enqueue(i);// w  ww.  j  a  va 2s .c  o m
    }
    Thread t = new Thread() {

        @Override
        public void run() {
            try {
                q.consume(new Consumer<Integer>() {

                    public void consume(Integer e) throws InterruptedException {
                        semaphore.release(1);
                        LOG.info("sleeping");
                        Thread.sleep(1000 * 86400); // a long time
                    }
                });
            } catch (InterruptedException ex) {
                LOG.warn("Interrupted", ex);
            }
        }
    };
    t.setName("Sleeping consumer");
    t.setDaemon(true); // so jvm can exit
    t.start();
    try {
        semaphore.acquire();
    } catch (InterruptedException e) {
        e.printStackTrace();
    }
    LOG.debug("Returning new sleeping consumer queue");
    return q;
}

From source file:com.mtgi.analytics.BehaviorTrackingManagerTest.java

@Test
public void testThreadSafety() throws InterruptedException, SQLException {

    EventGenerator[] threads = new EventGenerator[20];
    Semaphore in = new Semaphore(0), out = new Semaphore(0);
    for (int i = 0; i < threads.length; ++i) {
        threads[i] = new EventGenerator("thread[" + i + "]", in, out);
        threads[i].start();/*from   w  w w  .ja v a  2  s.  co m*/
    }

    //release the threads to do their work
    in.release(threads.length);
    //wait for all to finish
    out.acquire(threads.length);

    //let them all quiesce.
    for (int i = 0; i < threads.length; ++i) {
        threads[i].join(10000);
        assertFalse("thread[" + i + "] has exited", threads[i].isAlive());
    }

    //make sure lingering autoflushes are done, and perform a manual flush to pick up stragglers.
    flushTaskExecutions();
    manager.flush();
    assertEquals("no uncommitted events remain", 0, manager.getEventsPendingFlush());

    //do an initial count to see how we look.
    ResultSet rs = stmt.executeQuery(
            "select count(event_id) from BEHAVIOR_TRACKING_EVENT where event_type != 'behavior-tracking'");
    assertTrue(rs.next());
    int ret = rs.getInt(1);
    rs.close();
    assertEquals("all threads' events are committed", 39 * threads.length, ret);

    //let each thread verify that all of its data was committed
    for (EventGenerator g : threads)
        g.verifyEvents();
}

From source file:com.netflix.curator.framework.recipes.queue.DistributedQueue.java

private void processChildren(List<String> children, long currentVersion) throws Exception {
    final Semaphore processedLatch = new Semaphore(0);
    final boolean isUsingLockSafety = (lockPath != null);
    int min = minItemsBeforeRefresh;
    for (final String itemNode : children) {
        if (Thread.currentThread().isInterrupted()) {
            processedLatch.release(children.size());
            break;
        }//from  w  w w. j  a  v  a2  s.  c  o  m

        if (!itemNode.startsWith(QUEUE_ITEM_NAME)) {
            log.warn("Foreign node in queue path: " + itemNode);
            processedLatch.release();
            continue;
        }

        if (min-- <= 0) {
            if (refreshOnWatch && (currentVersion != childrenCache.getData().version)) {
                processedLatch.release(children.size());
                break;
            }
        }

        if (getDelay(itemNode) > 0) {
            processedLatch.release();
            continue;
        }

        executor.execute(new Runnable() {
            @Override
            public void run() {
                try {
                    if (isUsingLockSafety) {
                        processWithLockSafety(itemNode, ProcessType.NORMAL);
                    } else {
                        processNormally(itemNode, ProcessType.NORMAL);
                    }
                } catch (Exception e) {
                    log.error("Error processing message at " + itemNode, e);
                } finally {
                    processedLatch.release();
                }
            }
        });
    }

    processedLatch.acquire(children.size());
}

From source file:com.netflix.curator.framework.recipes.locks.TestInterProcessMutexBase.java

@Test
public void testReentrantSingleLock() throws Exception {
    final int THREAD_QTY = 10;

    CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1));
    client.start();//from w w  w .  ja v a 2s .  c  o m
    try {
        final AtomicBoolean hasLock = new AtomicBoolean(false);
        final AtomicBoolean isFirst = new AtomicBoolean(true);
        final Semaphore semaphore = new Semaphore(1);
        final InterProcessLock mutex = makeLock(client);

        List<Future<Object>> threads = Lists.newArrayList();
        ExecutorService service = Executors.newCachedThreadPool();
        for (int i = 0; i < THREAD_QTY; ++i) {
            Future<Object> t = service.submit(new Callable<Object>() {
                @Override
                public Object call() throws Exception {
                    semaphore.acquire();
                    mutex.acquire();
                    Assert.assertTrue(hasLock.compareAndSet(false, true));
                    try {
                        if (isFirst.compareAndSet(true, false)) {
                            semaphore.release(THREAD_QTY - 1);
                            while (semaphore.availablePermits() > 0) {
                                Thread.sleep(100);
                            }
                        } else {
                            Thread.sleep(100);
                        }
                    } finally {
                        mutex.release();
                        hasLock.set(false);
                    }
                    return null;
                }
            });
            threads.add(t);
        }

        for (Future<Object> t : threads) {
            t.get();
        }
    } finally {
        client.close();
    }
}

From source file:org.opcfoundation.ua.transport.https.HttpsServer.java

@Override
public EndpointHandle bind(SocketAddress socketAddress, EndpointBinding endpointBinding)
        throws ServiceResultException {
    if (endpointBinding == null || socketAddress == null || endpointBinding.endpointServer != this)
        throw new IllegalArgumentException();
    String url = endpointBinding.endpointAddress.getEndpointUrl();

    // Start endpoint handler
    {/* www . j  a v  a2 s  . c o  m*/
        String endpointId = url;
        endpointId = UriUtil.getEndpointName(url);
        if (endpointId == null)
            endpointId = "";
        //         else endpointId = "*"+endpointId;
        HttpAsyncRequestHandler<?> oldEndpointHandler = registry.lookup(endpointId);
        if (oldEndpointHandler == null) {
            HttpsServerEndpointHandler endpointHandler = new HttpsServerEndpointHandler(endpointBinding);
            registry.register(endpointId, endpointHandler);
            registry.register("", discoveryHandler);
        } else {
            HttpsServerEndpointHandler oldEndpointHander2 = (HttpsServerEndpointHandler) oldEndpointHandler;
            if (oldEndpointHander2.endpointServer != endpointBinding.endpointServer) {
                throw new ServiceResultException(StatusCodes.Bad_UnexpectedError,
                        "Cannot bind endpoint " + url + " and "
                                + oldEndpointHander2.endpointBinding.endpointAddress.getEndpointUrl()
                                + " with two different sets of service.");
            }
        }
    }

    // Make socket handle and endpoint handle
    String scheme = UriUtil.getTransportProtocol(endpointBinding.endpointAddress.getEndpointUrl());
    SocketHandle socketHandle = getOrCreateSocketHandle(socketAddress, scheme);

    HttpsEndpointHandle endpointHandle = socketHandle.getOrCreate(endpointBinding);

    try {
        // Shutdown reactor
        shutdownReactor();
        // Create reactor
        initReactor();

        // Bind to listen the given ports
        for (SocketHandle sh : socketHandleSnapshot()) {
            if (sh.listenerEndpoint == null) {
                sh.listenerEndpoint = ioReactor.listen(sh.getSocketAddress());
            }
        }

        // Start reactor threads
        if (UriUtil.SCHEME_HTTPS.equals(scheme)) {
            if (sslReactorThread == null || !sslReactorThread.isAlive()) {
                final IOReactor r = ioReactor;
                final Semaphore s = sslThreadSemaphore = new Semaphore(0);
                sslReactorThread = new Thread() {
                    public void run() {
                        try {
                            setState(CloseableObjectState.Open);
                            r.execute(sslIoEventDispatch);
                        } catch (IOException e) {
                            HttpsServer.this.setError(new ServiceResultException(e));
                        } finally {
                            s.release(9999);
                        }
                    };
                };
                if (!getState().isOpen())
                    setState(CloseableObjectState.Opening);
                sslReactorThread.start();
            }
        }

        if (UriUtil.SCHEME_HTTP.equals(scheme)) {
            if (plainReactorThread == null || !plainReactorThread.isAlive()) {
                final IOReactor r = ioReactor;
                final Semaphore s = plainThreadSemaphore = new Semaphore(0);
                plainReactorThread = new Thread() {
                    public void run() {
                        try {
                            setState(CloseableObjectState.Open);
                            r.execute(plainIoEventDispatch);
                        } catch (IOException e) {
                            HttpsServer.this.setError(new ServiceResultException(e));
                        } finally {
                            s.release(9999);
                        }
                    };
                };
                if (!getState().isOpen())
                    setState(CloseableObjectState.Opening);
                plainReactorThread.start();
            }
        }

    } catch (ServiceResultException e) {
        endpointHandle.close();
        throw e;
    }
    log.info("Endpoint bound to {}", url);
    return endpointHandle;
}

From source file:com.netflix.curator.framework.recipes.cache.TestPathChildrenCache.java

@Test
public void testRebuildNode() throws Exception {
    PathChildrenCache cache = null;//from w  w w . j a  va 2s .  c  o  m
    CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1));
    client.start();
    try {
        client.create().creatingParentsIfNeeded().forPath("/test/one", "one".getBytes());

        final CountDownLatch latch = new CountDownLatch(1);
        final AtomicInteger counter = new AtomicInteger();
        final Semaphore semaphore = new Semaphore(1);
        cache = new PathChildrenCache(client, "/test", true) {
            @Override
            void getDataAndStat(String fullPath) throws Exception {
                semaphore.acquire();
                counter.incrementAndGet();
                super.getDataAndStat(fullPath);
                latch.countDown();
            }
        };
        cache.start(PathChildrenCache.StartMode.BUILD_INITIAL_CACHE);

        latch.await();

        int saveCounter = counter.get();
        client.setData().forPath("/test/one", "alt".getBytes());
        cache.rebuildNode("/test/one");
        Assert.assertEquals(cache.getCurrentData("/test/one").getData(), "alt".getBytes());
        Assert.assertEquals(saveCounter, counter.get());

        semaphore.release(1000);
    } finally {
        IOUtils.closeQuietly(cache);
        IOUtils.closeQuietly(client);
    }
}

From source file:org.apache.bookkeeper.tools.perf.dlog.PerfWriter.java

void write(List<DistributedLogManager> logs, double writeRate, int maxOutstandingBytesForThisThread,
        long numRecordsForThisThread, long numBytesForThisThread) throws Exception {
    log.info(//from  w  w  w. j  a va  2s .c om
            "Write thread started with : logs = {}, rate = {},"
                    + " num records = {}, num bytes = {}, max outstanding bytes = {}",
            logs.stream().map(l -> l.getStreamName()).collect(Collectors.toList()), writeRate,
            numRecordsForThisThread, numBytesForThisThread, maxOutstandingBytesForThisThread);

    List<CompletableFuture<AsyncLogWriter>> writerFutures = logs.stream()
            .map(manager -> manager.openAsyncLogWriter()).collect(Collectors.toList());
    List<AsyncLogWriter> writers = result(FutureUtils.collect(writerFutures));

    long txid = writers.stream().mapToLong(writer -> writer.getLastTxId()).max().orElse(0L);
    txid = Math.max(0L, txid);

    RateLimiter limiter;
    if (writeRate > 0) {
        limiter = RateLimiter.create(writeRate);
    } else {
        limiter = null;
    }
    final Semaphore semaphore;
    if (maxOutstandingBytesForThisThread > 0) {
        semaphore = new Semaphore(maxOutstandingBytesForThisThread);
    } else {
        semaphore = null;
    }

    // Acquire 1 second worth of records to have a slower ramp-up
    if (limiter != null) {
        limiter.acquire((int) writeRate);
    }

    long totalWritten = 0L;
    long totalBytesWritten = 0L;
    final int numLogs = logs.size();
    while (true) {
        for (int i = 0; i < numLogs; i++) {
            if (numRecordsForThisThread > 0 && totalWritten >= numRecordsForThisThread) {
                markPerfDone();
            }
            if (numBytesForThisThread > 0 && totalBytesWritten >= numBytesForThisThread) {
                markPerfDone();
            }
            if (null != semaphore) {
                semaphore.acquire(payload.length);
            }

            totalWritten++;
            totalBytesWritten += payload.length;
            if (null != limiter) {
                limiter.acquire(payload.length);
            }
            final long sendTime = System.nanoTime();
            writers.get(i).write(new LogRecord(++txid, Unpooled.wrappedBuffer(payload))).thenAccept(dlsn -> {
                if (null != semaphore) {
                    semaphore.release(payload.length);
                }

                recordsWritten.increment();
                bytesWritten.add(payload.length);

                long latencyMicros = TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - sendTime);
                recorder.recordValue(latencyMicros);
                cumulativeRecorder.recordValue(latencyMicros);
            }).exceptionally(cause -> {
                log.warn("Error at writing records", cause);
                System.exit(-1);
                return null;
            });
        }
    }
}

From source file:com.netflix.curator.framework.recipes.queue.TestBoundedDistributedQueue.java

@Test
public void testSimple() throws Exception {
    Timing timing = new Timing();
    DistributedQueue<String> queue = null;
    CuratorFramework client = CuratorFrameworkFactory.newClient(server.getConnectString(), timing.session(),
            timing.connection(), new RetryOneTime(1));
    try {/* w ww .j av a  2  s  .  c o  m*/
        client.start();

        final List<String> messages = new CopyOnWriteArrayList<String>();
        final CountDownLatch latch = new CountDownLatch(2);
        final Semaphore semaphore = new Semaphore(0);
        QueueConsumer<String> consumer = new QueueConsumer<String>() {
            @Override
            public void consumeMessage(String message) throws Exception {
                messages.add(message);
                semaphore.acquire();
            }

            @Override
            public void stateChanged(CuratorFramework client, ConnectionState newState) {
            }
        };
        queue = QueueBuilder.builder(client, consumer, serializer, "/queue")
                .executor(Executors.newSingleThreadExecutor()).maxItems(1).buildQueue();
        queue.start();

        QueuePutListener<String> listener = new QueuePutListener<String>() {
            @Override
            public void putCompleted(String item) {
                latch.countDown();
            }

            @Override
            public void putMultiCompleted(MultiItem<String> items) {
            }
        };
        queue.getPutListenerContainer().addListener(listener);

        Assert.assertTrue(queue.put("1", timing.milliseconds(), TimeUnit.MILLISECONDS)); // should end up in consumer
        Assert.assertTrue(queue.put("2", timing.milliseconds(), TimeUnit.MILLISECONDS)); // should sit blocking in DistributedQueue
        Assert.assertTrue(timing.awaitLatch(latch));
        timing.sleepABit();
        Assert.assertFalse(queue.put("3", timing.multiple(.5).milliseconds(), TimeUnit.MILLISECONDS));

        semaphore.release(100);
        Assert.assertTrue(queue.put("3", timing.milliseconds(), TimeUnit.MILLISECONDS));
        Assert.assertTrue(queue.put("4", timing.milliseconds(), TimeUnit.MILLISECONDS));
        Assert.assertTrue(queue.put("5", timing.milliseconds(), TimeUnit.MILLISECONDS));

        for (int i = 0; i < 5; ++i) {
            if (messages.size() == 3) {
                break;
            }
            timing.sleepABit();
        }
        timing.sleepABit();

        Assert.assertEquals(messages, Arrays.asList("1", "2", "3", "4", "5"));
    } finally {
        IOUtils.closeQuietly(queue);
        IOUtils.closeQuietly(client);
    }
}

From source file:org.apache.solr.schema.TestSchemalessBufferedUpdates.java

@Test
public void test() throws Exception {
    DirectUpdateHandler2.commitOnClose = false;
    final Semaphore logReplay = new Semaphore(0);
    final Semaphore logReplayFinish = new Semaphore(0);
    UpdateLog.testing_logReplayHook = () -> {
        try {/*from   w  w  w. j  ava  2  s.co  m*/
            assertTrue(logReplay.tryAcquire(TIMEOUT, TimeUnit.SECONDS));
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    };
    UpdateLog.testing_logReplayFinishHook = logReplayFinish::release;

    SolrQueryRequest req = req();
    UpdateHandler uhandler = req.getCore().getUpdateHandler();
    UpdateLog ulog = uhandler.getUpdateLog();

    try {
        assertEquals(UpdateLog.State.ACTIVE, ulog.getState());

        // Invalid date will be normalized by ParseDateField URP
        updateJ(jsonAdd(processAdd(sdoc("id", "1", "f_dt", "2017-01-04"))),
                params(DISTRIB_UPDATE_PARAM, FROM_LEADER));
        assertU(commit());
        assertJQ(req("q", "*:*"), "/response/numFound==1");

        ulog.bufferUpdates();
        assertEquals(UpdateLog.State.BUFFERING, ulog.getState());

        // If the ParseDateField URP isn't ahead of the DUP, then the date won't be normalized in the buffered tlog entry,
        // and the doc won't be indexed on the replaying replica - a warning is logged as follows:
        // WARN [...] o.a.s.u.UpdateLog REYPLAY_ERR: IOException reading log
        //            org.apache.solr.common.SolrException: Invalid Date String:'2017-01-05'
        //              at org.apache.solr.util.DateMathParser.parseMath(DateMathParser.java:234)
        //              at org.apache.solr.schema.TrieField.createField(TrieField.java:725) [...]
        updateJ(jsonAdd(processAdd(sdoc("id", "2", "f_dt", "2017-01-05"))),
                params(DISTRIB_UPDATE_PARAM, FROM_LEADER));

        Future<UpdateLog.RecoveryInfo> rinfoFuture = ulog.applyBufferedUpdates();

        assertTrue(rinfoFuture != null);

        assertEquals(UpdateLog.State.APPLYING_BUFFERED, ulog.getState());

        logReplay.release(1000);

        UpdateLog.RecoveryInfo rinfo = rinfoFuture.get();
        assertEquals(UpdateLog.State.ACTIVE, ulog.getState());

        assertU(commit());
        assertJQ(req("q", "*:*"), "/response/numFound==2");
    } finally {
        DirectUpdateHandler2.commitOnClose = true;
        UpdateLog.testing_logReplayHook = null;
        UpdateLog.testing_logReplayFinishHook = null;
        req().close();
    }
}