Example usage for java.util.concurrent BlockingQueue isEmpty

List of usage examples for java.util.concurrent BlockingQueue isEmpty

Introduction

In this page you can find the example usage for java.util.concurrent BlockingQueue isEmpty.

Prototype

boolean isEmpty();

Source Link

Document

Returns true if this collection contains no elements.

Usage

From source file:com.betfair.cougar.test.socket.app.SocketCompatibilityTestingApp.java

public static void main(String[] args) throws Exception {

    Parser parser = new PosixParser();
    Options options = new Options();
    options.addOption("r", "repo", true, "Repository type to search: local|central");
    options.addOption("c", "client-concurrency", true,
            "Max threads to allow each client tester to run tests, defaults to 10");
    options.addOption("t", "test-concurrency", true, "Max client testers to run concurrently, defaults to 5");
    options.addOption("m", "max-time", true,
            "Max time (in minutes) to allow tests to complete, defaults to 10");
    options.addOption("v", "version", false, "Print version and exit");
    options.addOption("h", "help", false, "This help text");
    CommandLine commandLine = parser.parse(options, args);
    if (commandLine.hasOption("h")) {
        System.out.println(options);
        System.exit(0);/*from w  w  w . jav a 2s  .c o  m*/
    }
    if (commandLine.hasOption("v")) {
        System.out.println("How the hell should I know?");
        System.exit(0);
    }
    // 1. Find all testers in given repos
    List<RepoSearcher> repoSearchers = new ArrayList<>();
    for (String repo : commandLine.getOptionValues("r")) {
        if ("local".equals(repo.toLowerCase())) {
            repoSearchers.add(new LocalRepoSearcher());
        } else if ("central".equals(repo.toLowerCase())) {
            repoSearchers.add(new CentralRepoSearcher());
        } else {
            System.err.println("Unrecognized repo: " + repo);
            System.err.println(options);
            System.exit(1);
        }
    }
    int clientConcurrency = 10;
    if (commandLine.hasOption("c")) {
        try {
            clientConcurrency = Integer.parseInt(commandLine.getOptionValue("c"));
        } catch (NumberFormatException nfe) {
            System.err.println(
                    "client-concurrency is not a valid integer: '" + commandLine.getOptionValue("c") + "'");
            System.exit(1);
        }
    }
    int testConcurrency = 5;
    if (commandLine.hasOption("t")) {
        try {
            testConcurrency = Integer.parseInt(commandLine.getOptionValue("t"));
        } catch (NumberFormatException nfe) {
            System.err.println(
                    "test-concurrency is not a valid integer: '" + commandLine.getOptionValue("t") + "'");
            System.exit(1);
        }
    }
    int maxMinutes = 10;
    if (commandLine.hasOption("m")) {
        try {
            maxMinutes = Integer.parseInt(commandLine.getOptionValue("m"));
        } catch (NumberFormatException nfe) {
            System.err.println("max-time is not a valid integer: '" + commandLine.getOptionValue("m") + "'");
            System.exit(1);
        }
    }

    Properties clientProps = new Properties();
    clientProps.setProperty("client.concurrency", String.valueOf(clientConcurrency));

    File baseRunDir = new File(System.getProperty("user.dir") + "/run");
    baseRunDir.mkdirs();

    File tmpDir = new File(baseRunDir, "jars");
    tmpDir.mkdirs();

    List<ServerRunner> serverRunners = new ArrayList<>();
    List<ClientRunner> clientRunners = new ArrayList<>();
    for (RepoSearcher searcher : repoSearchers) {
        List<File> jars = searcher.findAndCache(tmpDir);
        for (File f : jars) {
            ServerRunner serverRunner = new ServerRunner(f, baseRunDir);
            System.out.println("Found tester: " + serverRunner.getVersion());
            serverRunners.add(serverRunner);
            clientRunners.add(new ClientRunner(f, baseRunDir, clientProps));
        }
    }

    // 2. Start servers and collect ports
    System.out.println();
    System.out.println("Starting " + serverRunners.size() + " servers...");
    for (ServerRunner server : serverRunners) {
        server.startServer();
    }
    System.out.println();

    List<TestCombo> tests = new ArrayList<>(serverRunners.size() * clientRunners.size());
    for (ServerRunner server : serverRunners) {
        for (ClientRunner client : clientRunners) {
            tests.add(new TestCombo(server, client));
        }
    }

    System.out.println("Enqueued " + tests.size() + " test combos to run...");

    long startTime = System.currentTimeMillis();
    // 3. Run every client against every server, collecting results
    BlockingQueue<Runnable> workQueue = new ArrayBlockingQueue(serverRunners.size() * clientRunners.size());
    ThreadPoolExecutor service = new ThreadPoolExecutor(testConcurrency, testConcurrency, 5000,
            TimeUnit.MILLISECONDS, workQueue);
    service.prestartAllCoreThreads();
    workQueue.addAll(tests);
    while (!workQueue.isEmpty()) {
        Thread.sleep(1000);
    }
    service.shutdown();
    service.awaitTermination(maxMinutes, TimeUnit.MINUTES);
    long endTime = System.currentTimeMillis();
    long totalTimeSecs = Math.round((endTime - startTime) / 1000.0);
    for (ServerRunner server : serverRunners) {
        server.shutdownServer();
    }

    System.out.println();
    System.out.println("=======");
    System.out.println("Results");
    System.out.println("-------");
    // print a summary
    int totalTests = 0;
    int totalSuccess = 0;
    for (TestCombo combo : tests) {
        String clientVer = combo.getClientVersion();
        String serverVer = combo.getServerVersion();
        String results = combo.getClientResults();
        ObjectMapper mapper = new ObjectMapper(new JsonFactory());
        JsonNode node = mapper.reader().readTree(results);
        JsonNode resultsArray = node.get("results");
        int numTests = resultsArray.size();
        int numSuccess = 0;
        for (int i = 0; i < numTests; i++) {
            if ("success".equals(resultsArray.get(i).get("result").asText())) {
                numSuccess++;
            }
        }
        totalSuccess += numSuccess;
        totalTests += numTests;
        System.out.println(clientVer + "/" + serverVer + ": " + numSuccess + "/" + numTests
                + " succeeded - took " + String.format("%2f", combo.getRunningTime()) + " seconds");
    }
    System.out.println("-------");
    System.out.println(
            "Overall: " + totalSuccess + "/" + totalTests + " succeeded - took " + totalTimeSecs + " seconds");

    FileWriter out = new FileWriter("results.json");
    PrintWriter pw = new PrintWriter(out);

    // 4. Output full results
    pw.println("{\n  \"results\": [");
    for (TestCombo combo : tests) {
        combo.emitResults(pw, "    ");
    }
    pw.println("  ],");
    pw.println("  \"servers\": [");
    for (ServerRunner server : serverRunners) {
        server.emitInfo(pw, "    ");
    }
    pw.println("  ],");
    pw.close();
}

From source file:com.opengamma.bbg.BloombergReferenceDataProvider.java

/**
 * Performs the main work to query Bloomberg.
 * <p>//  w ww .  ja  v a  2 s  . c  o  m
 * This is part of {@link #getFields(Set, Set)}.
 * 
 * @param securityKeys  the set of securities, not null
 * @param fields  the set of fields, not null
 * @return the Bloomberg result, not null
 */
protected BlockingQueue<Element> doQuery(Set<String> securityKeys, Set<String> fields) {
    Request request = composeRequest(securityKeys, fields);
    CorrelationID cid = submitBloombergRequest(request);
    BlockingQueue<Element> resultElements = getResultElement(cid);
    if (resultElements == null || resultElements.isEmpty()) {
        throw new OpenGammaRuntimeException(
                "Unable to get a Bloomberg response for " + fields + " fields for " + securityKeys);
    }
    return resultElements;
}

From source file:org.apache.streams.facebook.provider.FacebookProvider.java

@Override
public StreamsResultSet readCurrent() {
    int batchSize = 0;
    BlockingQueue<StreamsDatum> batch = Queues.newLinkedBlockingQueue();
    while (!this.datums.isEmpty() && batchSize < MAX_BATCH_SIZE) {
        ComponentUtils.offerUntilSuccess(ComponentUtils.pollWhileNotEmpty(this.datums), batch);
        ++batchSize;//from w  w  w  .j av a2s.  c  o  m
    }
    this.isComplete.set(batch.isEmpty() && this.datums.isEmpty() && this.dataCollector.isComplete());
    return new StreamsResultSet(batch);
}

From source file:com.opengamma.bbg.replay.BloombergTickWriter.java

/**
 * /*from  w ww.  ja va 2  s .  c  o m*/
 */
private void writeOutSecurityMapQueue() {
    for (Entry<String, BlockingQueue<FudgeMsg>> entry : _securityMapQueue.entrySet()) {
        String security = entry.getKey();
        BlockingQueue<FudgeMsg> queue = entry.getValue();
        if (queue.isEmpty()) {
            continue;
        }
        List<FudgeMsg> tickMsgList = new ArrayList<FudgeMsg>(queue.size());
        queue.drainTo(tickMsgList);
        String buid = getBloombergBUID(security);

        //get first message
        FudgeMsg tickMsg = tickMsgList.get(0);
        Long epochMillis = tickMsg.getLong(RECEIVED_TS_KEY);

        File dir = buildSecurityDirectory(buid, epochMillis);
        if (!dir.exists()) {
            createDirectory(dir);
        }
        writeSecurityTicks(dir, buid, security, tickMsgList);
        tickMsgList.clear();
        tickMsgList = null;
    }
}

From source file:org.jboss.as.test.integration.logging.syslog.SyslogHandlerTestCase.java

/**
 * Tests that only messages on specific level or higher level are logged to syslog.
 *///from ww w  .  j  a  v a  2  s. com
@Test
public void testLogOnSpecificLevel() throws Exception {
    final BlockingQueue<SyslogServerEventIF> queue = BlockedSyslogServerEventHandler.getQueue();
    executeOperation(Operations.createWriteAttributeOperation(SYSLOG_HANDLER_ADDR, "level", "ERROR"));
    queue.clear();
    makeLogs();
    testLog(queue, Level.ERROR);
    testLog(queue, Level.FATAL);
    Assert.assertTrue("No other message was expected in syslog.", queue.isEmpty());
}

From source file:org.jboss.as.test.integration.logging.syslog.SyslogHandlerTestCase.java

/**
 * Tests that messages on all levels are logged, when level="TRACE" in syslog handler.
 *//* w  w  w.  java 2 s. c  om*/
@Test
public void testAllLevelLogs() throws Exception {
    final BlockingQueue<SyslogServerEventIF> queue = BlockedSyslogServerEventHandler.getQueue();
    executeOperation(Operations.createWriteAttributeOperation(SYSLOG_HANDLER_ADDR, "level", "TRACE"));
    queue.clear();
    makeLogs();
    for (Level level : LoggingServiceActivator.LOG_LEVELS) {
        testLog(queue, level);
    }
    Assert.assertTrue("No other message was expected in syslog.", queue.isEmpty());
}

From source file:com.splout.db.qnode.QNodeHandlerContext.java

/**
 * This method can be called by {@link QNodeHandler} to cancel the Thrift client cache when a DNode disconnects.
 * Usually this happens when Hazelcast notifies it.
 *///from  w ww .  ja va  2s.co  m
public void discardThriftClientCacheFor(String dnode) throws InterruptedException {
    thriftClientCacheLock.lock();
    try {
        // discarding all connections to a DNode who leaved
        log.info(Thread.currentThread().getName() + " : trashing queue for [" + dnode + "] as it leaved.");
        BlockingQueue<DNodeService.Client> dnodeQueue = thriftClientCache.get(dnode);
        // release connections until empty
        while (!dnodeQueue.isEmpty()) {
            dnodeQueue.take().getOutputProtocol().getTransport().close();
        }
        thriftClientCache.remove(dnode); // to indicate that the DNode is not present
    } finally {
        thriftClientCacheLock.unlock();
    }
}

From source file:io.fabric8.che.starter.openshift.CheDeploymentConfig.java

private void waitUntilDeploymentConfigIsAvailable(final OpenShiftClient client, String namespace) {
    final BlockingQueue<Object> queue = new ArrayBlockingQueue<Object>(1);

    final Runnable readinessPoller = new Runnable() {
        public void run() {
            try {
                if (isDeploymentAvailable(client, namespace)) {
                    queue.put(true);/* ww w  . j  av  a  2s.c  o  m*/
                    return;
                } else {
                    queue.put(false);
                    return;
                }
            } catch (Throwable t) {
                try {
                    if (queue.isEmpty()) {
                        queue.put(false);
                    }
                    return;
                } catch (InterruptedException e) {
                }
            }
        }
    };

    ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
    ScheduledFuture<?> poller = executor.scheduleWithFixedDelay(readinessPoller, 0, 500, TimeUnit.MILLISECONDS);
    executor.schedule(new Runnable() {

        @Override
        public void run() {
            poller.cancel(true);
        }
    }, Integer.valueOf(startTimeout), TimeUnit.SECONDS);
    try {
        while (!waitUntilReady(queue)) {
        }
    } finally {
        if (!poller.isDone()) {
            poller.cancel(true);
        }
        executor.shutdown();
    }
}

From source file:com.opengamma.bbg.BloombergHistoricalTimeSeriesSource.java

private LocalDateDoubleTimeSeries processRequest(String identifier, Request request, String field) {
    CorrelationID cid = submitBloombergRequest(request);
    BlockingQueue<Element> resultElements = getResultElement(cid);

    if (resultElements == null || resultElements.isEmpty()) {
        s_logger.info("Unable to get HistoricalTimeSeries for {}", identifier);
        return null;
    }/*  w w  w  . j  a  v a 2 s  . c o m*/
    List<LocalDate> dates = Lists.newArrayList();
    List<Double> values = Lists.newArrayList();
    for (Element resultElem : resultElements) {
        if (resultElem.hasElement(RESPONSE_ERROR)) {
            s_logger.warn("Response error");
            processError(resultElem.getElement(RESPONSE_ERROR));
        }
        Element securityElem = resultElem.getElement(SECURITY_DATA);
        if (securityElem.hasElement(SECURITY_ERROR)) {
            processError(securityElem.getElement(SECURITY_ERROR));
            return null;
        }
        if (securityElem.hasElement(FIELD_EXCEPTIONS)) {
            Element fieldExceptions = securityElem.getElement(FIELD_EXCEPTIONS);

            for (int i = 0; i < fieldExceptions.numValues(); i++) {
                Element fieldException = fieldExceptions.getValueAsElement(i);
                String fieldId = fieldException.getElementAsString(FIELD_ID);
                s_logger.warn("Field error on {}", fieldId);
                Element errorInfo = fieldException.getElement(ERROR_INFO);
                processError(errorInfo);
            }
        }
        if (securityElem.hasElement(FIELD_DATA)) {
            processFieldData(securityElem.getElement(FIELD_DATA), field, dates, values);
        }
    }
    return new ArrayLocalDateDoubleTimeSeries(dates, values);
}

From source file:com.google.gplus.provider.TestGPlusUserActivityCollector.java

/**
 * Creates a randomized activity and randomized date range.
 * The activity feed is separated into three chunks,
 * |. . . data too recent to be in date range . . .||. . . data in date range. . .||. . . data too old to be in date range|
 * [index 0, ............................................................................................., index length-1]
 * Inside of those chunks data has no order, but the list is ordered by those three chunks.
 *
 * The test will check to see if the num of data in the date range make onto the output queue.
 *//*www.j  a va  2s  .c o m*/
@Test
@Repeat(iterations = 3)
public void testWithBeforeAndAfterDates() throws InterruptedException {
    //initialize counts assuming no date ranges will be used
    int numActivities = randomIntBetween(0, 1000);
    int numActivitiesInDateRange = numActivities;
    int numberOutOfRange = 0;
    int numBerforeRange = 0;
    int numAfterRange = 0;
    //determine if date ranges will be used
    DateTime beforeDate = null;
    DateTime afterDate = null;
    if (randomInt() % 2 == 0) {
        beforeDate = DateTime.now().minusDays(randomIntBetween(1, 5));
    }
    if (randomInt() % 2 == 0) {
        if (beforeDate == null) {
            afterDate = DateTime.now().minusDays(randomIntBetween(1, 10));
        } else {
            afterDate = beforeDate.minusDays(randomIntBetween(1, 10));
        }
    }
    //update counts if date ranges are going to be used.
    if (beforeDate != null || afterDate != null) { //assign amount to be in range
        numActivitiesInDateRange = randomIntBetween(0, numActivities);
        numberOutOfRange = numActivities - numActivitiesInDateRange;
    }
    if (beforeDate == null && afterDate != null) { //assign all out of range to be before the start of the range
        numBerforeRange = numberOutOfRange;
    } else if (beforeDate != null && afterDate == null) { //assign all out of range to be after the start of the range
        numAfterRange = numberOutOfRange;
    } else if (beforeDate != null && afterDate != null) { //assign half before range and half after the range
        numAfterRange = (numberOutOfRange / 2) + (numberOutOfRange % 2);
        numBerforeRange = numberOutOfRange / 2;
    }

    Plus plus = createMockPlus(numBerforeRange, numAfterRange, numActivitiesInDateRange, afterDate, beforeDate);
    BackOffStrategy strategy = new ConstantTimeBackOffStrategy(1);
    BlockingQueue<StreamsDatum> datums = new LinkedBlockingQueue<>();
    UserInfo userInfo = new UserInfo();
    userInfo.setUserId("A");
    userInfo.setAfterDate(afterDate);
    userInfo.setBeforeDate(beforeDate);
    GPlusUserActivityCollector collector = new GPlusUserActivityCollector(plus, datums, strategy, userInfo);
    collector.run();

    assertEquals(numActivitiesInDateRange, datums.size());
    while (!datums.isEmpty()) {
        StreamsDatum datum = datums.take();
        assertNotNull(datum);
        assertNotNull(datum.getDocument());
        assertTrue(datum.getDocument() instanceof String);
        assertTrue(((String) datum.getDocument()).contains(IN_RANGE_IDENTIFIER)); //only in range documents are on the out going queue.
    }
}