Example usage for java.util.concurrent Semaphore acquire

List of usage examples for java.util.concurrent Semaphore acquire

Introduction

In this page you can find the example usage for java.util.concurrent Semaphore acquire.

Prototype

public void acquire(int permits) throws InterruptedException 

Source Link

Document

Acquires the given number of permits from this semaphore, blocking until all are available, or the thread is Thread#interrupt interrupted .

Usage

From source file:Main.java

public static void acquire(Semaphore semaphore, int permits) {
    try {// www .j  a  v a  2s  . c  o  m
        semaphore.acquire(permits);
    } catch (InterruptedException iex) {
        // ignore
    }
}

From source file:edu.iu.harp.schdynamic.ComputeUtil.java

public static void acquire(Semaphore sem, int count) {
    boolean isFailed = false;
    do {//from w ww .  j  a va2 s. c om
        try {
            sem.acquire(count);
            isFailed = false;
        } catch (Exception e) {
            isFailed = true;
            LOG.error("Error when acquiring semaphore", e);
        }
    } while (isFailed);
}

From source file:Main.java

/**
 * Runs each runnable in a new thread. This method blocks until all runnables are complete.
 * Before any runnables are run, we also wait until the allocated thread has ran at least once.
 * This is done to increase the randomness in the order of thread execution.
 */// w  w w.j  a v  a  2  s  .  c  o  m
public static void startMultipleThreadsAndWaitUntilComplete(final List<Runnable> runnables) throws Exception {
    final Semaphore competingThreadsStarted = new Semaphore(0); // Number of threads for runnables started.
    final Semaphore competingThreadsToRelease = new Semaphore(0); // Acquired by runnable threads. Will be released
                                                                  // once all runnables have been run once.
    final Semaphore competingThreadsCompleted = new Semaphore(0); // Number of runnable threads completed.

    for (int i = 0; i < runnables.size(); i++) {
        final int runnableIndex = i;

        new Thread(new Runnable() {
            @Override
            public void run() {
                try {
                    // Notify semaphore that this thread has been started.
                    competingThreadsStarted.release(1);

                    // Once all threads have notified the competingThreadsStarted semaphore,
                    // competingThreadsToRelease will be released and we will continue.
                    competingThreadsToRelease.acquire(1);

                    // Increases randomness of thread execution order.
                    Thread.sleep(1);

                    runnables.get(runnableIndex).run();

                    // thread has completed running provided runnable.
                    competingThreadsCompleted.release(1);
                } catch (final InterruptedException e) {
                    e.printStackTrace();
                }
            }
        }).start();
    }

    // Only proceed once all threads have at least started running once.
    competingThreadsStarted.acquire(runnables.size());

    // Release all threads.
    competingThreadsToRelease.release(runnables.size());

    // Wait until all threads have completed before returning.
    competingThreadsCompleted.acquire(runnables.size());
}

From source file:org.apache.hadoop.hbase.procedure2.TestProcedureExecutor.java

@Test(timeout = 60000)
public void testWorkerStuck() throws Exception {
    // replace the executor
    final Configuration conf = new Configuration(htu.getConfiguration());
    conf.setFloat("hbase.procedure.worker.add.stuck.percentage", 0.5f);
    conf.setInt("hbase.procedure.worker.monitor.interval.msec", 500);
    conf.setInt("hbase.procedure.worker.stuck.threshold.msec", 750);

    final int NUM_THREADS = 2;
    createNewExecutor(conf, NUM_THREADS);

    Semaphore latch1 = new Semaphore(2);
    latch1.acquire(2);
    BusyWaitProcedure busyProc1 = new BusyWaitProcedure(latch1);

    Semaphore latch2 = new Semaphore(2);
    latch2.acquire(2);//from w  ww. j a v  a  2s .co  m
    BusyWaitProcedure busyProc2 = new BusyWaitProcedure(latch2);

    long busyProcId1 = procExecutor.submitProcedure(busyProc1);
    long busyProcId2 = procExecutor.submitProcedure(busyProc2);
    long otherProcId = procExecutor.submitProcedure(new NoopProcedure());

    // wait until a new worker is being created
    int threads1 = waitThreadCount(NUM_THREADS + 1);
    LOG.info("new threads got created: " + (threads1 - NUM_THREADS));
    assertEquals(NUM_THREADS + 1, threads1);

    ProcedureTestingUtility.waitProcedure(procExecutor, otherProcId);
    assertEquals(true, procExecutor.isFinished(otherProcId));
    ProcedureTestingUtility.assertProcNotFailed(procExecutor, otherProcId);

    assertEquals(true, procExecutor.isRunning());
    assertEquals(false, procExecutor.isFinished(busyProcId1));
    assertEquals(false, procExecutor.isFinished(busyProcId2));

    // terminate the busy procedures
    latch1.release();
    latch2.release();

    LOG.info("set keep alive and wait threads being removed");
    procExecutor.setKeepAliveTime(500L, TimeUnit.MILLISECONDS);
    int threads2 = waitThreadCount(NUM_THREADS);
    LOG.info("threads got removed: " + (threads1 - threads2));
    assertEquals(NUM_THREADS, threads2);

    // terminate the busy procedures
    latch1.release();
    latch2.release();

    // wait for all procs to complete
    ProcedureTestingUtility.waitProcedure(procExecutor, busyProcId1);
    ProcedureTestingUtility.waitProcedure(procExecutor, busyProcId2);
    ProcedureTestingUtility.assertProcNotFailed(procExecutor, busyProcId1);
    ProcedureTestingUtility.assertProcNotFailed(procExecutor, busyProcId2);
}

From source file:org.apache.bookkeeper.tools.perf.dlog.PerfWriter.java

void write(List<DistributedLogManager> logs, double writeRate, int maxOutstandingBytesForThisThread,
        long numRecordsForThisThread, long numBytesForThisThread) throws Exception {
    log.info(/*from  w ww .ja  va 2 s  .  c  om*/
            "Write thread started with : logs = {}, rate = {},"
                    + " num records = {}, num bytes = {}, max outstanding bytes = {}",
            logs.stream().map(l -> l.getStreamName()).collect(Collectors.toList()), writeRate,
            numRecordsForThisThread, numBytesForThisThread, maxOutstandingBytesForThisThread);

    List<CompletableFuture<AsyncLogWriter>> writerFutures = logs.stream()
            .map(manager -> manager.openAsyncLogWriter()).collect(Collectors.toList());
    List<AsyncLogWriter> writers = result(FutureUtils.collect(writerFutures));

    long txid = writers.stream().mapToLong(writer -> writer.getLastTxId()).max().orElse(0L);
    txid = Math.max(0L, txid);

    RateLimiter limiter;
    if (writeRate > 0) {
        limiter = RateLimiter.create(writeRate);
    } else {
        limiter = null;
    }
    final Semaphore semaphore;
    if (maxOutstandingBytesForThisThread > 0) {
        semaphore = new Semaphore(maxOutstandingBytesForThisThread);
    } else {
        semaphore = null;
    }

    // Acquire 1 second worth of records to have a slower ramp-up
    if (limiter != null) {
        limiter.acquire((int) writeRate);
    }

    long totalWritten = 0L;
    long totalBytesWritten = 0L;
    final int numLogs = logs.size();
    while (true) {
        for (int i = 0; i < numLogs; i++) {
            if (numRecordsForThisThread > 0 && totalWritten >= numRecordsForThisThread) {
                markPerfDone();
            }
            if (numBytesForThisThread > 0 && totalBytesWritten >= numBytesForThisThread) {
                markPerfDone();
            }
            if (null != semaphore) {
                semaphore.acquire(payload.length);
            }

            totalWritten++;
            totalBytesWritten += payload.length;
            if (null != limiter) {
                limiter.acquire(payload.length);
            }
            final long sendTime = System.nanoTime();
            writers.get(i).write(new LogRecord(++txid, Unpooled.wrappedBuffer(payload))).thenAccept(dlsn -> {
                if (null != semaphore) {
                    semaphore.release(payload.length);
                }

                recordsWritten.increment();
                bytesWritten.add(payload.length);

                long latencyMicros = TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - sendTime);
                recorder.recordValue(latencyMicros);
                cumulativeRecorder.recordValue(latencyMicros);
            }).exceptionally(cause -> {
                log.warn("Error at writing records", cause);
                System.exit(-1);
                return null;
            });
        }
    }
}

From source file:gobblin.runtime.job_catalog.FSJobCatalogHelperTest.java

@Test(dependsOnMethods = { "testloadGenericJobConfig" })
public void testPathAlterationObserver() throws Exception {
    PathAlterationObserverScheduler detector = new PathAlterationObserverScheduler(1000);
    final Set<Path> fileAltered = Sets.newHashSet();
    final Semaphore semaphore = new Semaphore(0);
    PathAlterationListener listener = new PathAlterationListenerAdaptor() {

        @Override//from w  ww . ja  va2 s .  c  o m
        public void onFileCreate(Path path) {
            fileAltered.add(path);
            semaphore.release();
        }

        @Override
        public void onFileChange(Path path) {
            fileAltered.add(path);
            semaphore.release();
        }
    };

    detector.addPathAlterationObserver(listener, Optional.<PathAlterationObserver>absent(),
            new Path(this.jobConfigDir.getPath()));
    try {
        detector.start();
        // Give the monitor some time to start
        Thread.sleep(1000);

        File jobConfigFile = new File(this.subDir11, "test111.pull");
        Files.touch(jobConfigFile);

        File newJobConfigFile = new File(this.subDir11, "test112.pull");
        Files.append("k1=v1", newJobConfigFile, ConfigurationKeys.DEFAULT_CHARSET_ENCODING);

        semaphore.acquire(2);
        Assert.assertEquals(fileAltered.size(), 2);

        Assert.assertTrue(fileAltered.contains(new Path("file:" + jobConfigFile)));
        Assert.assertTrue(fileAltered.contains(new Path("file:" + newJobConfigFile)));
    } finally {
        detector.stop();
    }
}

From source file:org.apache.gobblin.runtime.job_catalog.FSJobCatalogHelperTest.java

@Test(enabled = false, dependsOnMethods = { "testloadGenericJobConfig" })
public void testPathAlterationObserver() throws Exception {
    PathAlterationObserverScheduler detector = new PathAlterationObserverScheduler(1000);
    final Set<Path> fileAltered = Sets.newHashSet();
    final Semaphore semaphore = new Semaphore(0);
    PathAlterationListener listener = new PathAlterationListenerAdaptor() {

        @Override//from  ww  w .  j av  a2 s.  c  om
        public void onFileCreate(Path path) {
            fileAltered.add(path);
            semaphore.release();
        }

        @Override
        public void onFileChange(Path path) {
            fileAltered.add(path);
            semaphore.release();
        }
    };

    detector.addPathAlterationObserver(listener, Optional.<PathAlterationObserver>absent(),
            new Path(this.jobConfigDir.getPath()));
    try {
        detector.start();
        // Give the monitor some time to start
        Thread.sleep(1000);

        File jobConfigFile = new File(this.subDir11, "test111.pull");
        Files.touch(jobConfigFile);

        File newJobConfigFile = new File(this.subDir11, "test112.pull");
        Files.append("k1=v1", newJobConfigFile, ConfigurationKeys.DEFAULT_CHARSET_ENCODING);

        semaphore.acquire(2);
        Assert.assertEquals(fileAltered.size(), 2);

        Assert.assertTrue(fileAltered.contains(new Path("file:" + jobConfigFile)));
        Assert.assertTrue(fileAltered.contains(new Path("file:" + newJobConfigFile)));
    } finally {
        detector.stop();
    }
}

From source file:gobblin.util.SchedulerUtilsTest.java

@Test(dependsOnMethods = { "testLoadJobConfigsForCommonPropsFile", "testloadGenericJobConfig" })
public void testPathAlterationObserver() throws Exception {
    PathAlterationObserverScheduler monitor = new PathAlterationObserverScheduler(1000);
    final Set<Path> fileAltered = Sets.newHashSet();
    final Semaphore semaphore = new Semaphore(0);
    PathAlterationListener listener = new PathAlterationListenerAdaptor() {

        @Override/*w ww .j a va2  s.  c o  m*/
        public void onFileCreate(Path path) {
            fileAltered.add(path);
            semaphore.release();
        }

        @Override
        public void onFileChange(Path path) {
            fileAltered.add(path);
            semaphore.release();
        }
    };

    SchedulerUtils.addPathAlterationObserver(monitor, listener, new Path(this.jobConfigDir.getPath()));
    try {
        monitor.start();
        // Give the monitor some time to start
        Thread.sleep(1000);

        File jobConfigFile = new File(this.subDir11, "test111.pull");
        Files.touch(jobConfigFile);

        File commonPropsFile = new File(this.subDir1, "test.properties");
        Files.touch(commonPropsFile);

        File newJobConfigFile = new File(this.subDir11, "test112.pull");
        Files.append("k1=v1", newJobConfigFile, ConfigurationKeys.DEFAULT_CHARSET_ENCODING);

        semaphore.acquire(3);
        Assert.assertEquals(fileAltered.size(), 3);

        Assert.assertTrue(fileAltered.contains(new Path("file:" + jobConfigFile)));
        Assert.assertTrue(fileAltered.contains(new Path("file:" + commonPropsFile)));
        Assert.assertTrue(fileAltered.contains(new Path("file:" + newJobConfigFile)));
    } finally {
        monitor.stop();
    }
}

From source file:com.amazonaws.services.kinesis.clientlibrary.lib.worker.WorkerTest.java

private void runAndTestWorker(List<Shard> shardList, int threadPoolSize, List<KinesisClientLease> initialLeases,
        boolean callProcessRecordsForEmptyRecordList, int numberOfRecordsPerShard) throws Exception {
    File file = KinesisLocalFileDataCreator.generateTempDataFile(shardList, numberOfRecordsPerShard,
            "unitTestWT001");
    IKinesisProxy fileBasedProxy = new KinesisLocalFileProxy(file.getAbsolutePath());

    Semaphore recordCounter = new Semaphore(0);
    ShardSequenceVerifier shardSequenceVerifier = new ShardSequenceVerifier(shardList);
    TestStreamletFactory recordProcessorFactory = new TestStreamletFactory(recordCounter,
            shardSequenceVerifier);/*www.  j a  v  a2 s .  c o  m*/

    ExecutorService executorService = Executors.newFixedThreadPool(threadPoolSize);

    WorkerThread workerThread = runWorker(shardList, initialLeases, callProcessRecordsForEmptyRecordList,
            failoverTimeMillis, numberOfRecordsPerShard, fileBasedProxy, recordProcessorFactory,
            executorService, nullMetricsFactory);

    // TestStreamlet will release the semaphore once for every record it processes
    recordCounter.acquire(numberOfRecordsPerShard * shardList.size());

    // Wait a bit to allow the worker to spin against the end of the stream.
    Thread.sleep(500L);

    testWorker(shardList, threadPoolSize, initialLeases, callProcessRecordsForEmptyRecordList,
            numberOfRecordsPerShard, fileBasedProxy, recordProcessorFactory);

    workerThread.getWorker().shutdown();
    executorService.shutdownNow();
    file.delete();
}

From source file:com.netflix.curator.framework.recipes.queue.DistributedQueue.java

private void processChildren(List<String> children, long currentVersion) throws Exception {
    final Semaphore processedLatch = new Semaphore(0);
    final boolean isUsingLockSafety = (lockPath != null);
    int min = minItemsBeforeRefresh;
    for (final String itemNode : children) {
        if (Thread.currentThread().isInterrupted()) {
            processedLatch.release(children.size());
            break;
        }/*  w  ww  .j  a  va  2  s .c  o m*/

        if (!itemNode.startsWith(QUEUE_ITEM_NAME)) {
            log.warn("Foreign node in queue path: " + itemNode);
            processedLatch.release();
            continue;
        }

        if (min-- <= 0) {
            if (refreshOnWatch && (currentVersion != childrenCache.getData().version)) {
                processedLatch.release(children.size());
                break;
            }
        }

        if (getDelay(itemNode) > 0) {
            processedLatch.release();
            continue;
        }

        executor.execute(new Runnable() {
            @Override
            public void run() {
                try {
                    if (isUsingLockSafety) {
                        processWithLockSafety(itemNode, ProcessType.NORMAL);
                    } else {
                        processNormally(itemNode, ProcessType.NORMAL);
                    }
                } catch (Exception e) {
                    log.error("Error processing message at " + itemNode, e);
                } finally {
                    processedLatch.release();
                }
            }
        });
    }

    processedLatch.acquire(children.size());
}