Example usage for java.util.concurrent Semaphore acquire

List of usage examples for java.util.concurrent Semaphore acquire

Introduction

In this page you can find the example usage for java.util.concurrent Semaphore acquire.

Prototype

public void acquire() throws InterruptedException 

Source Link

Document

Acquires a permit from this semaphore, blocking until one is available, or the thread is Thread#interrupt interrupted .

Usage

From source file:com.alibaba.napoli.client.benchmark.NapoliNormalQueueTest.java

@Test
public void sendMessageWithSenderStoreDisabledTest() throws Exception {

    log.info("start to execute sendMessageWithSenderStoreDisabledTest");

    long beginQueueSize = JmxUtil.getQueueSize(sendConnector.getAddress(), queueName);

    qSender = new DefaultAsyncSender();
    qSender.setConnector(sendConnector);
    qSender.setName(queueName);//from ww  w.  j  a  v a  2s .co  m

    qSender.setStoreEnable(false);
    qSender.setReprocessInterval(10000 * 1000 * 1000);

    qSender.init();

    int tc = 10;
    log.info("yanny requestcount = " + System.getProperty("requestCount"));
    final int tp = Integer.parseInt(System.getProperty("requestCount", "20"));
    final Semaphore semaphore = new Semaphore(tc);
    final AtomicInteger sumCount = new AtomicInteger();

    final AtomicInteger requestCount = new AtomicInteger();
    long startTime = System.currentTimeMillis();
    log.info("Yanny start send request " + startTime);

    for (int i = 0; i < tc; i++) {
        Thread t = new Thread("thread--" + i) {
            public void run() {
                try {
                    //?tringap??Serializable
                    semaphore.acquire();
                    Person person = new Person();

                    person.setLoginName("superman");
                    person.setEmail("sm@1.com");
                    person.setPenName("pname");
                    person.setStatus(PersonStatus.ENABLED);

                    for (int j = 0; j < tp; j++) {
                        //      log.info("hello");
                        int id = requestCount.incrementAndGet();
                        person.setPersonId("" + id);

                        //?? ??true???alse                        
                        boolean result = qSender.send(person);
                        if (!result) {
                            log.info("----------------send to queue " + "result is false. personid=" + j);
                        } else {
                            sumCount.incrementAndGet();
                        }
                    }
                } catch (Throwable t) {
                    t.printStackTrace();
                } finally {
                    semaphore.release();
                }
            }
        };
        t.start();
    }

    while (semaphore.availablePermits() != tc) {
        Thread.sleep(100);
    }
    int totalRequest = tc * tp;

    long endTime = System.currentTimeMillis();
    log.info("yanny: send " + totalRequest + " message, take " + (endTime - startTime) + " milseconds");

    JmxUtil.waitTillQueueSizeAsTarget(sendConnector.getAddress(), queueName, beginQueueSize);

    endTime = System.currentTimeMillis();

    String errorMessage = "";

    long qBdbCount = NapoliTestUtil.getStoreSize(sendConnector.getSenderKVStore(qSender.getName()));
    log.info("yanny totalRequest " + totalRequest + " send queue success " + sumCount + " local store count:"
            + qBdbCount + " queue received " + qWorker.getAccessNum() + " take " + (endTime - startTime)
            + " milseconds");

    log.info(initConsumeMessage);

    log.info("NapoliNormalQueueTest's success=" + qWorker.getAccessNum() + " bdb's size=" + qBdbCount);

    if (qBdbCount > 0) {
        errorMessage += ";with store disabled, local store count should be empty, but is " + qBdbCount;
    }

    //with store enabled, all send should succeed.
    if (qSender.getStoreEnable()) {
        if (sumCount.get() != totalRequest) {
            errorMessage += ";with store enabled, all send should return success, but not equal now. send succeed "
                    + sumCount.get() + "; total request:" + totalRequest;
        }
    } else {
        if (sumCount.get() < totalRequest * 0.95) {
            errorMessage += ";with store disabled, expected more than 95% message send succeed, total request:"
                    + totalRequest + "; send succeed " + sumCount.get();
        }
    }

    //?????otalRequest,??>=sum
    if (totalRequest < qWorker.getAccessNum()) {
        errorMessage += ";queue should not have success messages more than send succeed" + sumCount.get()
                + " (success " + qWorker.getAccessNum() + ")";
    }

    //?qBdbCount?
    if ((sumCount.get() - qBdbCount) > qWorker.getAccessNum()) {
        errorMessage += ";queue received message (" + qWorker.getAccessNum()
                + ") less than send succeed - local stored message, message lost "
                + (sumCount.get() - qBdbCount);
    }

    int allowedDiff = (int) Math.round(sumCount.get() * 0.001);

    if (((qWorker.getAccessNum() + qBdbCount) - sumCount.get()) > allowedDiff) {
        errorMessage += "queue received message should not have more than send succeed + " + allowedDiff
                + " than allowed (0.1%), gap " + ((qWorker.getAccessNum() + qBdbCount) - sumCount.get());
    }

    assertTrue(errorMessage, errorMessage.equals(""));

    verify(napoliSenderStat, atMost(qWorker.getAccessNum())).sendSuccess(anyLong(), anyLong());
    verify(napoliSenderStat, atLeast((int) (sumCount.get() - qBdbCount))).sendSuccess(anyLong(), anyLong());
    verify(napoliSenderStat, times(totalRequest - sumCount.get())).sendFalse(anyLong(), anyLong());

    verify(napoliSenderStat, times((int) qBdbCount)).sendFailure(anyLong(), anyLong());

    verify(napoliReceiverStat, times((int) qWorker.getAccessNum())).receiveSuccess(anyLong(), anyLong());
}

From source file:com.zavakid.mushroom.impl.TestSinkQueue.java

private SinkQueue<Integer> newSleepingConsumerQueue(int capacity, int... values) {
    final SinkQueue<Integer> q = new SinkQueue<Integer>(capacity);
    final Semaphore semaphore = new Semaphore(0);
    for (int i : values) {
        q.enqueue(i);/*  www . ja  v  a 2 s  . c  o  m*/
    }
    Thread t = new Thread() {

        @Override
        public void run() {
            try {
                q.consume(new Consumer<Integer>() {

                    public void consume(Integer e) throws InterruptedException {
                        semaphore.release(1);
                        LOG.info("sleeping");
                        Thread.sleep(1000 * 86400); // a long time
                    }
                });
            } catch (InterruptedException ex) {
                LOG.warn("Interrupted", ex);
            }
        }
    };
    t.setName("Sleeping consumer");
    t.setDaemon(true); // so jvm can exit
    t.start();
    try {
        semaphore.acquire();
    } catch (InterruptedException e) {
        e.printStackTrace();
    }
    LOG.debug("Returning new sleeping consumer queue");
    return q;
}

From source file:org.paxle.core.threading.impl.Master.java

public void process(final Data cmd) throws Exception {
    // creating and assigning a dummy output queues
    final Semaphore s = new Semaphore(0);
    final IInputQueue<Data> inputQueue = new IInputQueue<Data>() {
        private int counter = 0;

        public Data dequeue() throws InterruptedException {
            if (counter > 0)
                throw new IllegalStateException("Method executed multiple times");
            this.counter++;
            return cmd;
        }/*w  w w .  ja  v a 2 s. co m*/

        public void waitForNext() throws InterruptedException {
            throw new IllegalStateException("You are not allowed to call this method");
        }
    };
    final IOutputQueue<Data> outputQueue = new IOutputQueue<Data>() {
        public void enqueue(Data command) throws InterruptedException {
            s.release();
        }
    };

    // process the command
    this.process(inputQueue, outputQueue, false);

    // waiting for the worker to finish execution
    s.acquire();
}

From source file:com.impetus.ankush2.ganglia.GangliaDeployer.java

private boolean deployNodes(final ClusterConfig conf, Map<String, Map<String, Object>> nodeMap) {
    try {//from w w  w  . j  a  v a2s  . c o m
        // Node Deployment process ...
        final Semaphore semaphore = new Semaphore(nodeMap.size());
        for (final String host : nodeMap.keySet()) {
            semaphore.acquire();
            AppStoreWrapper.getExecutor().execute(new Runnable() {
                @Override
                public void run() {
                    conf.getNodes().get(host).setStatus(createNode(host));
                    if (semaphore != null) {
                        semaphore.release();
                    }
                }
            });
        }
        semaphore.acquire(nodeMap.size());
    } catch (Exception e) {
        return addClusterError("There is some exception while deploying " + getComponentName()
                + " on all nodes. " + GangliaConstants.EXCEPTION_STRING, e);
    }
    if (newClusterConf == null) {
        if (!clusterConf.getNodes().get(gmetadHost).getStatus()) {
            logger.error("Could not deploy " + getComponentName() + " on "
                    + GangliaConstants.Ganglia_Services.GangliaMaster + " node , so initializing rollback.",
                    getComponentName());
        }
        return clusterConf.getNodes().get(gmetadHost).getStatus();
    } else {
        return AnkushUtils.getStatus(conf.getNodes());
    }
}

From source file:com.impetus.ankush2.ganglia.GangliaDeployer.java

@Override
public boolean register(final ClusterConfig conf) {

    try {/*from  w ww  .  j a  va  2s.c  o  m*/
        if (String.valueOf(advanceConf.get(Constant.Keys.REGISTER_LEVEL))
                .equalsIgnoreCase(Constant.RegisterLevel.LEVEL1.toString())) {
            return true;
        }
        final String infoMsg = "Registering " + getComponentName() + "...";
        logger.info(infoMsg);
        // Getting node map for cluster deployment
        Map<String, Map<String, Object>> nodeMap = new HashMap<String, Map<String, Object>>(
                compConfig.getNodes());

        // Node Registration process ...
        final Semaphore semaphore = new Semaphore(nodeMap.size());
        for (final String host : nodeMap.keySet()) {
            semaphore.acquire();
            AppStoreWrapper.getExecutor().execute(new Runnable() {
                @Override
                public void run() {
                    conf.getNodes().get(host).setStatus(configureServiceMonitoring(host));
                    if (semaphore != null) {
                        semaphore.release();
                    }
                }
            });
        }
        semaphore.acquire(nodeMap.size());

        // Return false if any of the node is not deployed.
        return AnkushUtils.getStatus(conf.getNodes());
    } catch (Exception e) {
        addClusterError("Could not register " + getComponentName(), e);
        return false;
    }
}

From source file:com.impetus.ankush2.ganglia.GangliaDeployer.java

@Override
public boolean unregister(final ClusterConfig conf) {

    try {//w w  w. j  a va2 s . c  o  m
        if (String.valueOf(advanceConf.get(Constant.Keys.REGISTER_LEVEL))
                .equalsIgnoreCase(Constant.RegisterLevel.LEVEL1.toString())) {
            return true;
        }

        if (!initializeDataMembers(conf)) {
            return false;
        }
        final String infoMsg = "Unregistering " + getComponentName() + "...";
        logger.info(infoMsg, getComponentName());
        // Getting node map for cluster deployment
        Map<String, Map<String, Object>> nodeMap = new HashMap<String, Map<String, Object>>(
                compConfig.getNodes());

        // Node Registration process ...
        final Semaphore semaphore = new Semaphore(nodeMap.size());
        for (final String host : nodeMap.keySet()) {
            semaphore.acquire();
            AppStoreWrapper.getExecutor().execute(new Runnable() {
                @Override
                public void run() {
                    conf.getNodes().get(host).setStatus(unregisterNode(host));
                    if (semaphore != null) {
                        semaphore.release();
                    }
                }
            });
        }
        semaphore.acquire(nodeMap.size());

        // Return false if any of the node is not deployed.
        return AnkushUtils.getStatus(conf.getNodes());
    } catch (Exception e) {
        logger.error(e.getMessage());
        return false;
    }
}

From source file:org.apache.solr.cloud.TestTlogReplica.java

@SuppressWarnings("unchecked")
@BadApple(bugUrl = "https://issues.apache.org/jira/browse/SOLR-12028")
public void testRecovery() throws Exception {
    boolean useKill = random().nextBoolean();
    createAndWaitForCollection(1, 0, 2, 0);

    CloudSolrClient cloudClient = cluster.getSolrClient();
    new UpdateRequest().add(sdoc("id", "3")).add(sdoc("id", "4")).commit(cloudClient, collectionName);
    new UpdateRequest().add(sdoc("id", "5")).process(cloudClient, collectionName);
    JettySolrRunner solrRunner = getSolrRunner(false).get(0);
    if (useKill) {
        ChaosMonkey.kill(solrRunner);//  w ww.jav a  2  s  . com
    } else {
        ChaosMonkey.stop(solrRunner);
    }
    waitForState("Replica still up", collectionName, activeReplicaCount(0, 1, 0));
    new UpdateRequest().add(sdoc("id", "6")).process(cloudClient, collectionName);
    ChaosMonkey.start(solrRunner);
    waitForState("Replica didn't recover", collectionName, activeReplicaCount(0, 2, 0));
    // We skip peerSync, so replica will always trigger commit on leader
    // We query only the non-leader replicas, since we haven't opened a new searcher on the leader yet
    waitForNumDocsInAllReplicas(4, getNonLeaderReplias(collectionName), 10); //timeout for stale collection state

    // If I add the doc immediately, the leader fails to communicate with the follower with broken pipe.
    // Options are, wait or retry...
    for (int i = 0; i < 3; i++) {
        UpdateRequest ureq = new UpdateRequest().add(sdoc("id", "7"));
        ureq.setParam("collection", collectionName);
        ureq.setParam(UpdateRequest.MIN_REPFACT, "2");
        NamedList<Object> response = cloudClient.request(ureq);
        if ((Integer) ((NamedList<Object>) response.get("responseHeader")).get(UpdateRequest.REPFACT) >= 2) {
            break;
        }
        LOG.info("Min RF not achieved yet. retrying");
    }
    checkRTG(3, 7, cluster.getJettySolrRunners());
    DirectUpdateHandler2.commitOnClose = false;
    ChaosMonkey.stop(solrRunner);
    waitForState("Replica still up", collectionName, activeReplicaCount(0, 1, 0));
    DirectUpdateHandler2.commitOnClose = true;
    ChaosMonkey.start(solrRunner);
    waitForState("Replica didn't recover", collectionName, activeReplicaCount(0, 2, 0));
    waitForNumDocsInAllReplicas(5, getNonLeaderReplias(collectionName), 10); //timeout for stale collection state
    checkRTG(3, 7, cluster.getJettySolrRunners());
    cluster.getSolrClient().commit(collectionName);

    // Test replica recovery apply buffer updates
    Semaphore waitingForBufferUpdates = new Semaphore(0);
    Semaphore waitingForReplay = new Semaphore(0);
    RecoveryStrategy.testing_beforeReplayBufferingUpdates = () -> {
        try {
            waitingForReplay.release();
            waitingForBufferUpdates.acquire();
        } catch (InterruptedException e) {
            e.printStackTrace();
            fail("Test interrupted: " + e.getMessage());
        }
    };
    if (useKill) {
        ChaosMonkey.kill(solrRunner);
    } else {
        ChaosMonkey.stop(solrRunner);
    }
    ChaosMonkey.start(solrRunner);
    waitingForReplay.acquire();
    new UpdateRequest().add(sdoc("id", "8")).add(sdoc("id", "9")).process(cloudClient, collectionName);
    waitingForBufferUpdates.release();
    RecoveryStrategy.testing_beforeReplayBufferingUpdates = null;
    waitForState("Replica didn't recover", collectionName, activeReplicaCount(0, 2, 0));
    checkRTG(3, 9, cluster.getJettySolrRunners());
    for (SolrCore solrCore : getSolrCore(false)) {
        RefCounted<IndexWriter> iwRef = solrCore.getUpdateHandler().getSolrCoreState().getIndexWriter(null);
        assertFalse("IndexWriter at replicas must not see updates ", iwRef.get().hasUncommittedChanges());
        iwRef.decref();
    }
}

From source file:com.impetus.ankush2.ganglia.GangliaDeployer.java

@Override
public boolean removeNode(final ClusterConfig conf, Collection<String> nodes) {
    try {//w  w w  .  j a va2s . co  m
        if (newClusterConf == null) {
            // setting clusterconf, componentconf and logger
            if (!initializeDataMembers(conf)) {
                return false;
            }
        }
        final Semaphore semaphore = new Semaphore(nodes.size());
        // undeploying package from each node
        for (final String host : nodes) {
            semaphore.acquire();
            AppStoreWrapper.getExecutor().execute(new Runnable() {
                @Override
                public void run() {
                    // setting nodestatus default value to false
                    boolean nodestatus = false;
                    // if service stopped successfully, then removing
                    // component from node
                    if (stopNode(host)) {
                        nodestatus = removeNode(host);
                    }
                    conf.getNodes().get(host).setStatus(nodestatus);
                    if (semaphore != null) {
                        semaphore.release();
                    }
                }
            });
        }
        semaphore.acquire(nodes.size());
    } catch (Exception e) {
        addClusterError("Could not remove " + getComponentName(), e);
        return false;
    }
    return AnkushUtils.getStatus(conf.getNodes());
}

From source file:com.impetus.ankush2.hadoop.monitor.HadoopComponentMonitor.java

/**
 * Editparams.//from   w  w w .j  a  va2s . co m
 */
private void editparams() {

    this.hadoopConfig = HadoopUtils.getHadoopConfig(this.clusterConf);
    String errMsg = "Unable to process request to edit Hadoop configuration files.";

    if (!HadoopUtils.isManagedByAnkush(this.hadoopConfig)) {
        this.addAndLogError(errMsg + " " + Constant.Registration.ErrorMsg.NOT_MANAGED_MODE);
        return;
    }

    try {
        this.clusterConf.incrementOperation();
        boolean isAgentDown = AnkushUtils.isAnyAgentDown(this.hadoopConfig.getNodes().keySet());
        if (isAgentDown) {
            throw new AnkushException(
                    "Could not process edit parameters request: AnkushAgent is down on few nodes.");
        }

        final Map<String, Object> confParams = (Map<String, Object>) parameterMap.get("params");

        final String loggedUser = (String) parameterMap.get("loggedUser");

        AppStoreWrapper.getExecutor().execute(new Runnable() {
            @Override
            public void run() {
                final Semaphore semaphore = new Semaphore(hadoopConfig.getNodes().size());
                try {
                    // connect with all the component nodes
                    AnkushUtils.connectNodesString(clusterConf, hadoopConfig.getNodes().keySet());

                    for (final String host : hadoopConfig.getNodes().keySet()) {

                        semaphore.acquire();
                        AppStoreWrapper.getExecutor().execute(new Runnable() {
                            @Override
                            public void run() {
                                try {
                                    for (Entry entry : confParams.entrySet()) {

                                        // get fileName
                                        String fileName = (String) entry.getKey();
                                        // get config params list
                                        List<Map> params = (List<Map>) entry.getValue();

                                        for (Map param : params) {
                                            final Parameter parameter = JsonMapperUtil.objectFromMap(param,
                                                    Parameter.class);

                                            String status = parameter.getStatus();

                                            Result res = null;

                                            ConfigurationManager confManager = new ConfigurationManager();

                                            // get component
                                            // homepath
                                            String confDir = HadoopUtils.getHadoopConfDir(hadoopConfig);

                                            // get server.properties
                                            // file path
                                            String propertyFilePath = confDir + fileName;

                                            // if connection is
                                            // established.

                                            switch (Constant.ParameterActionType
                                                    .valueOf(status.toUpperCase())) {
                                            case ADD:
                                                if (addParam(clusterConf.getNodes().get(host),
                                                        Constant.Component.Name.HADOOP, parameter.getName(),
                                                        parameter.getValue(), propertyFilePath,
                                                        Constant.File_Extension.XML)) {
                                                    confManager.saveConfiguration(clusterConf.getClusterId(),
                                                            loggedUser, fileName, host, parameter.getName(),
                                                            parameter.getValue());
                                                }
                                                break;
                                            case EDIT:
                                                if (editParam(clusterConf.getNodes().get(host),
                                                        Constant.Component.Name.HADOOP, parameter.getName(),
                                                        parameter.getValue(), propertyFilePath,
                                                        Constant.File_Extension.XML)) {
                                                    confManager.saveConfiguration(clusterConf.getClusterId(),
                                                            loggedUser, fileName, host, parameter.getName(),
                                                            parameter.getValue());
                                                }
                                                break;
                                            case DELETE:
                                                if (deleteParam(clusterConf.getNodes().get(host),
                                                        Constant.Component.Name.HADOOP, parameter.getName(),
                                                        propertyFilePath, Constant.File_Extension.XML)) {
                                                    confManager.removeOldConfiguration(
                                                            clusterConf.getClusterId(), host, fileName,
                                                            parameter.getName());
                                                }
                                                break;
                                            }
                                        }
                                    }
                                } catch (Exception e) {
                                    // To be Handled : Exception for
                                    // Edit Parameter call
                                } finally {
                                    if (semaphore != null) {
                                        semaphore.release();
                                    }
                                }
                            }
                        });
                    }
                    semaphore.acquire(hadoopConfig.getNodes().size());
                    // disconnect with all the component nodes
                    AnkushUtils.disconnectCompNodes(clusterConf, hadoopConfig.getNodes().keySet());
                } catch (Exception e) {
                    // To be Handled : Exception for Edit Parameter call
                }
            }

        });
        result.put("message", "Parameters update request placed successfully.");
    } catch (AnkushException e) {
        this.addErrorAndLogException(e.getMessage(), e);
    } catch (Exception e) {
        this.addErrorAndLogException(errMsg, e);
    }
}

From source file:org.paxle.data.db.impl.CommandDBTest.java

public void _testVeryLargeURLSet() throws MalformedURLException, InterruptedException {
    final int MAX = 1000000;
    final int chunkSize = 1000;

    System.setProperty("derby.storage.pageCacheSize", "2000"); // default 1000
    //System.setProperty("derby.storage.pageSize", "32768");      // default 4096 bytes

    // setup DB/*from w w  w  .  ja v  a2 s . c o m*/
    // this.setupDB(POSTGRESQL_CONFIG_FILE, String.format(POSTGRESQL_CONNECTION_URL,"192.168.10.201"));
    //this.setupDB(H2_CONFIG_FILE, H2_CONNECTION_URL, "sa", "");
    this.setupDB(DERBY_CONFIG_FILE, DERBY_CONNECTION_URL, null, null);

    // command-tracker must be called MAX times
    checking(new Expectations() {
        {
            exactly(MAX).of(cmdTracker).commandCreated(with(equal("org.paxle.data.db.ICommandDB")),
                    with(any(ICommand.class)));
        }
    });

    final Semaphore s = new Semaphore(-MAX + 1);

    new Thread() {
        public void run() {
            try {
                Thread.sleep(10000);
            } catch (InterruptedException e) {
            }

            // create a dummy data-sink
            cmdDB.setDataSink(new DummyDataSink(s));
        };
    }.start();

    // store new commands
    long start = System.currentTimeMillis();

    LinkedList<URI> testURI = new LinkedList<URI>();
    for (int i = 1; i <= MAX; i++) {
        URI nextCommand = URI.create("http://test.paxle.net/" + i);
        testURI.add(nextCommand);

        if (i % chunkSize == 0 || i == MAX) {
            int known = this.cmdDB.storeUnknownLocations(0, 1, testURI);
            assertEquals(0, known);
            testURI.clear();
        }
    }

    // wait for all commands to be enqueued
    s.acquire();

    System.out.println(String.format("Storing and loading %d URL took %d ms", Integer.valueOf(MAX),
            Long.valueOf(System.currentTimeMillis() - start)));
}