Example usage for java.util.concurrent LinkedBlockingQueue poll

List of usage examples for java.util.concurrent LinkedBlockingQueue poll

Introduction

In this page you can find the example usage for java.util.concurrent LinkedBlockingQueue poll.

Prototype

public E poll(long timeout, TimeUnit unit) throws InterruptedException 

Source Link

Usage

From source file:me.bulat.jivr.core.bootstrap.config.RootConfig.java

/**
 * Main Node bean factory.//  w w  w  .  ja v a  2  s . co m
 * @param consul consul manager.
 * @return constructed node.
 */
@Bean(name = "node")
public Node getNode(@Qualifier("consul_manager") ConsulManager consul) {
    return new SimpleNode() {

        @Override
        protected void updateServices() {
            LinkedBlockingQueue<UpdateInfo> updates = getUpdater().getUpdates();
            try {
                while (true) {
                    UpdateInfo update = updates.poll(10, TimeUnit.SECONDS);
                    if (update != null) {
                        AgiScriptEngine engine;
                        switch (update.type) {
                        case ADD:
                            engine = new SimpleAgiScriptEngine(update.service, getConsulManager());
                            services.put(update.service.getServiceName(), engine);
                            break;
                        case ADD_AND_START:
                            engine = new SimpleAgiScriptEngine(update.service, getConsulManager());
                            services.put(update.service.getServiceName(), engine);
                            engine.start();
                            break;
                        case DELETE:
                            services.remove(update.service.getServiceName()).stop();
                            break;
                        case REPLACE_AND_STOP:
                            services.get(update.service.getServiceName()).updateService(update.service);
                            break;
                        case REPLACE_AND_START:
                            services.get(update.service.getServiceName()).updateService(update.service).start();
                            break;
                        case START:
                            services.get(update.service.getServiceName()).start();
                            break;
                        case STOP:
                            services.get(update.service.getServiceName()).stop();
                            break;
                        }
                    }
                }
            } catch (InterruptedException e) {
                // TODO: 10/09/16 LOG stop get updates
                System.out.println("Stop get updates.");
            }
        }

        @Override
        protected void nodeRegister() {
            consul.registerNode();
        }

        @Override
        protected void nodeUnRegister() {
            consul.unRegisterNode();
        }

        @Override
        protected void setNodeState(final ServiceState state) {
            consul.setNodeState(state);

        }

        @Override
        protected void setNodeState(final ServiceState state, final String note) {
            consul.setNodeState(state, note);
        }
    };
}

From source file:com.adaptris.http.RequestDispatcher.java

private RequestProcessor waitForRequestProcessor(LinkedBlockingQueue queue) throws HttpException {
    RequestProcessor rp = null;/*from  w  w w  .  ja va  2  s. c  o m*/
    do {
        if (logR.isTraceEnabled()) {
            logR.trace("Waiting for an available processor from " + queue);
        }
        try {
            rp = (RequestProcessor) queue.poll(DEFAULT_QUEUE_POLL_TIMEOUT, TimeUnit.MILLISECONDS);
            if (rp != null) {
                if (logR.isTraceEnabled()) {
                    logR.trace("Got RequestProcessor " + rp);
                }
                break;
            }
        } catch (InterruptedException e) {
            ;
        }
    } while (rp == null && parent.isAlive());
    return rp;
}

From source file:org.apache.ftpserver.ftpletcontainer.FtpLetReturnDefaultTest.java

public void testLogin() throws Exception {
    final LinkedBlockingQueue<User> loggedInUser = new LinkedBlockingQueue<User>();

    MockFtplet.callback = new MockFtpletCallback() {
        public FtpletResult onLogin(FtpSession session, FtpRequest request) throws FtpException, IOException {
            loggedInUser.add(session.getUser());

            return super.onLogin(session, request);
        }/*w w  w  . j  a  v a  2 s  .  c  o m*/

    };
    MockFtpletCallback.returnValue = FtpletResult.DEFAULT;

    assertTrue(client.login(ADMIN_USERNAME, ADMIN_PASSWORD));

    assertNotNull(loggedInUser.poll(2000, TimeUnit.MILLISECONDS));
}

From source file:se.vgregion.pubsub.push.impl.PushTest.java

@Test
@Transactional // TODO remove
public void test() throws InterruptedException {
    final URI testUri = URI.create("http://feeds.feedburner.com/protocol7/main");

    PubSubEngine pubSubEngine = applicationContext.getBean(PubSubEngine.class);

    final LinkedBlockingQueue<Feed> publishedFeeds = new LinkedBlockingQueue<Feed>();

    pubSubEngine.subscribe(new Subscriber() {

        @Override//from  w  ww.j  av  a 2 s. c o m
        public void timedOut() {
        }

        @Override
        public void publish(Feed feed, PushJms pushJms) throws PublicationFailedException {
            publishedFeeds.add(feed);
        }

        @Override
        public URI getTopic() {
            return testUri;
        }

        @Override
        public DateTime getTimeout() {
            return null;
        }

        @Override
        public DateTime getLastUpdated() {
            return null;
        }
    });

    //        pubSubEngine.getOrCreateTopic(testUri).addSubscriber(new DefaultPushSubscriber(
    //                applicationContext.getBean(PushSubscriberRepository.class),
    //                testUri, URI.create("http://localhost:9000"), 100, "verify"));

    PushSubscriberManager pushSubscriberManager = applicationContext.getBean(PushSubscriberManager.class);
    pushSubscriberManager.retrive(testUri);

    Feed feed = publishedFeeds.poll(10000, TimeUnit.MILLISECONDS);

    //        Thread.sleep(200000);
}

From source file:org.kurento.rabbitmq.RabbitTemplate.java

protected Message doSendAndReceiveWithFixed(final String exchange, final String routingKey,
        final Message message) {
    return this.execute(new ChannelCallback<Message>() {

        @Override//from   w  w w. j  a v a  2  s .  c  o m
        public Message doInRabbit(Channel channel) throws Exception {
            final PendingReply pendingReply = new PendingReply();

            byte[] messageTagBytes = message.getMessageProperties().getCorrelationId();

            String messageTag;
            if (messageTagBytes != null) {
                messageTag = new String(messageTagBytes);
            } else {
                messageTag = UUID.randomUUID().toString();
            }

            RabbitTemplate.this.replyHolder.put(messageTag, pendingReply);
            // Save any existing replyTo and correlation data
            String savedReplyTo = message.getMessageProperties().getReplyTo();
            pendingReply.setSavedReplyTo(savedReplyTo);
            if (StringUtils.hasLength(savedReplyTo) && logger.isDebugEnabled()) {
                logger.debug("Replacing replyTo header:" + savedReplyTo
                        + " in favor of template's configured reply-queue:"
                        + RabbitTemplate.this.replyQueue.getName());
            }
            message.getMessageProperties().setReplyTo(RabbitTemplate.this.replyQueue.getName());
            String savedCorrelation = null;
            if (RabbitTemplate.this.correlationKey == null) { // using
                // standard
                // correlationId
                // property
                byte[] correlationId = message.getMessageProperties().getCorrelationId();
                if (correlationId != null) {
                    savedCorrelation = new String(correlationId, RabbitTemplate.this.encoding);
                }
            } else {
                savedCorrelation = (String) message.getMessageProperties().getHeaders()
                        .get(RabbitTemplate.this.correlationKey);
            }
            pendingReply.setSavedCorrelation(savedCorrelation);
            if (RabbitTemplate.this.correlationKey == null) { // using
                // standard
                // correlationId
                // property
                message.getMessageProperties()
                        .setCorrelationId(messageTag.getBytes(RabbitTemplate.this.encoding));
            } else {
                message.getMessageProperties().setHeader(RabbitTemplate.this.correlationKey, messageTag);
            }

            if (logger.isDebugEnabled()) {
                logger.debug("Sending message with tag " + messageTag);
            }
            doSend(channel, exchange, routingKey, message, null);
            LinkedBlockingQueue<Message> replyHandoff = pendingReply.getQueue();
            Message reply = (replyTimeout < 0) ? replyHandoff.take()
                    : replyHandoff.poll(replyTimeout, TimeUnit.MILLISECONDS);
            RabbitTemplate.this.replyHolder.remove(messageTag);
            return reply;
        }
    });
}

From source file:se.vgregion.pubsub.push.impl.DefaultPushSubscriberPublishTest.java

@Test
public void publish() throws Exception {

    subscriber = new DefaultPushSubscriber(UnitTestConstants.TOPIC, buildTestUrl("/"), UnitTestConstants.FUTURE,
            UnitTestConstants.UPDATED1, 100, "verify", UnitTestConstants.SECRET, true);

    final LinkedBlockingQueue<HttpRequest> issuedRequests = new LinkedBlockingQueue<HttpRequest>();
    final LinkedBlockingQueue<byte[]> issuedRequestBodies = new LinkedBlockingQueue<byte[]>();
    server.register("/*", new HttpRequestHandler() {
        @Override/*ww w .ja v a2s .c om*/
        public void handle(HttpRequest request, HttpResponse response, HttpContext context)
                throws HttpException, IOException {
            issuedRequests.add(request);

            HttpEntity entity = ((HttpEntityEnclosingRequest) request).getEntity();
            ByteArrayOutputStream buffer = new ByteArrayOutputStream();
            entity.writeTo(buffer);
            issuedRequestBodies.add(buffer.toByteArray());
        }
    });

    Feed feed = new FeedBuilder(ContentType.ATOM).id("e1")
            .entry(new EntryBuilder().id("f1").updated(new DateTime()).build())
            .entry(new EntryBuilder().id("f2").updated(UnitTestConstants.UPDATED1.minusHours(1)).build())
            .build();

    subscriber.publish(feed, null);

    // subscriber should be updated
    Assert.assertEquals(new DateTime(), subscriber.getLastUpdated());

    HttpRequest request = issuedRequests.poll(10000, TimeUnit.MILLISECONDS);
    Assert.assertNotNull(request);
    Assert.assertEquals(ContentType.ATOM.toString(), request.getFirstHeader("Content-Type").getValue());

    // verify HMAC header
    Assert.assertEquals("sha1=1356b52665408a17af46803a7988e48d40d1fb75",
            request.getFirstHeader("X-Hub-Signature").getValue());

    // verify content
    Assert.assertTrue(request instanceof HttpEntityEnclosingRequest);

    HttpEntity entity = ((HttpEntityEnclosingRequest) request).getEntity();

    Assert.assertNotNull(entity);

    Document actualAtom = new Builder().build(new ByteArrayInputStream(issuedRequestBodies.poll()));

    Assert.assertEquals(1, actualAtom.getRootElement().getChildElements("entry", Namespaces.ATOM).size());

}

From source file:com.numenta.taurus.service.TaurusDataSyncService.java

/**
 * Load all instance data from the database
 *//* w  w w .j a  va 2 s. c o  m*/
@Override
protected void loadAllData() throws HTMException, IOException {

    Context context = TaurusApplication.getContext();
    if (context == null) {
        // Should not happen.
        // We need application context to run.
        return;
    }

    // Get last known date from the database
    final TaurusDatabase database = TaurusApplication.getDatabase();
    if (database == null) {
        // Should not happen.
        // We need application context to run.
        return;
    }
    long from = database.getLastTimestamp();

    // Get current time
    final long now = System.currentTimeMillis();

    // The server updates the instance data table into hourly buckets as the models process
    // data. This may leave the last hour with outdated values when the server updates the
    // instance data table after we start loading the new hourly bucket.
    // To make sure the last hour bucket is updated we should get data since last update up to
    // now and on when the time is above a certain threshold (15 minutes) also download the
    // previous hour once.
    SharedPreferences prefs = PreferenceManager.getDefaultSharedPreferences(context);

    // Check if we need to update the previous hour
    long previousHourThreshold = prefs.getLong(PREF_PREVIOUS_HOUR_THRESHOLD, now);
    if (now >= previousHourThreshold) {
        // Download the previous hour
        from -= DataUtils.MILLIS_PER_HOUR;

        // Set threshold time to minute 15 of next hour
        Calendar calendar = Calendar.getInstance();
        calendar.setTimeInMillis(now);
        calendar.add(Calendar.HOUR, 1);
        calendar.set(Calendar.MINUTE, 15);
        calendar.set(Calendar.SECOND, 0);
        calendar.set(Calendar.MILLISECOND, 0);
        prefs.edit().putLong(PREF_PREVIOUS_HOUR_THRESHOLD, calendar.getTimeInMillis()).apply();
    }
    final long oldestTimestamp = DataUtils
            .floorTo60minutes(now - TaurusApplication.getNumberOfDaysToSync() * DataUtils.MILLIS_PER_DAY);

    // Check if we need to catch up and download old data
    if (database.getFirstTimestamp() > oldestTimestamp) {
        from = oldestTimestamp;
    }

    // Don't get date older than NUMBER_OF_DAYS_TO_SYNC
    from = Math.max(from, oldestTimestamp);

    // Blocking queue holding data waiting to be saved to the database.
    // This queue will be filled by the TaurusClient as it downloads data and it will be
    // emptied by the databaseTask as is saves data to the database
    final LinkedBlockingQueue<InstanceData> pending = new LinkedBlockingQueue<InstanceData>(
            PENDING_IO_BUFFER_SIZE);

    // Background task used save data to the database. This task will wait for data to arrive
    // from the server and save them to the database in batches until it finds the end of the
    // queue marked by DATA_EOF or it times out after 60 seconds
    final Future<?> databaseTask = getService().getIOThreadPool().submit(new Runnable() {
        @Override
        public void run() {
            // Save data in batches, one day at the time
            final List<InstanceData> batch = new ArrayList<InstanceData>();
            int batchSize = -DataUtils.MILLIS_PER_HOUR;

            // Tracks batch timestamp. Once the data timestamp is greater than the batch
            // timestamp, a new batch is created
            long batchTimestamp = now - DataUtils.MILLIS_PER_HOUR;

            try {
                // Process all pending data until the DATA_EOF is found or a timeout is reached
                InstanceData data;
                while ((data = pending.poll(60, TimeUnit.SECONDS)) != DATA_EOF && data != null) {
                    batch.add(data);
                    // Process batches
                    if (data.getTimestamp() < batchTimestamp) {
                        // Calculate next batch timestamp
                        batchTimestamp = data.getTimestamp() + batchSize;
                        if (database.addInstanceDataBatch(batch)) {
                            // Notify receivers new data has arrived
                            fireInstanceDataChangedEvent();
                        }
                        batch.clear();
                    }
                }
                // Last batch
                if (!batch.isEmpty()) {
                    if (database.addInstanceDataBatch(batch)) {
                        // Notify receivers new data has arrived
                        fireInstanceDataChangedEvent();
                    }
                }
            } catch (InterruptedException e) {
                Log.w(TAG, "Interrupted while loading data");
            }
        }
    });

    try {
        // Get new data from server
        Log.d(TAG, "Start downloading data from " + from);
        TaurusClient client = getClient();
        client.getAllInstanceData(new Date(from), new Date(now), false,
                new HTMClient.DataCallback<InstanceData>() {
                    @Override
                    public boolean onData(InstanceData data) {
                        // enqueue data for saving
                        try {
                            pending.put(data);
                        } catch (InterruptedException e) {
                            pending.clear();
                            Log.w(TAG, "Interrupted while loading data");
                            return false;
                        }
                        return true;
                    }
                });
        // Mark the end of the records
        pending.add(DATA_EOF);
        // Wait for the database task to complete
        databaseTask.get();
        // Clear client cache
        client.clearCache();
    } catch (InterruptedException e) {
        Log.w(TAG, "Interrupted while loading data");
    } catch (ExecutionException e) {
        Log.e(TAG, "Failed to load data", e);
    }
}

From source file:com.test.HibernateDerbyLockingTest.java

public void runTest(final SessionFactory sessionFactory) throws Exception {
    Person person = new Person();

    Session session = sessionFactory.openSession();
    session.save(person);//w w w  . j a  v a 2 s . c o  m
    session.flush();
    session.close();

    final String id = person.getId();
    final LinkedBlockingQueue<String> queue = new LinkedBlockingQueue<String>();

    ExecutorService executorService = Executors.newCachedThreadPool();
    Future<?> submit = executorService.submit(new Runnable() {
        public void run() {
            Session session = sessionFactory.openSession();
            Transaction transaction = session.beginTransaction();
            session.load(Person.class, id, LockMode.UPGRADE);
            try {
                Thread.sleep(2000);
            } catch (Throwable t) {
            }
            System.out.println("one");
            queue.add("one");
            try {
                Thread.sleep(500);
            } catch (Throwable t) {
            }
            transaction.commit();
            session.flush();
            session.close();
        }
    });
    Thread.sleep(500);
    Future<?> submit2 = executorService.submit(new Runnable() {
        public void run() {
            Session session = sessionFactory.openSession();
            Transaction transaction = session.beginTransaction();
            session.load(Person.class, id, LockMode.UPGRADE);
            queue.add("two");
            System.out.println("two");
            transaction.commit();
            session.flush();
            session.close();
        }
    });
    submit.get();
    submit2.get();
    assertEquals("one", queue.poll(3, TimeUnit.SECONDS));
    assertEquals("two", queue.poll(3, TimeUnit.SECONDS));
}

From source file:com.offbynull.portmapper.natpmp.NatPmpController.java

private <T extends NatPmpResponse> T attemptRequest(ByteBuffer sendBuffer, int attempt, Creator<T> creator)
        throws InterruptedException {

    final LinkedBlockingQueue<ByteBuffer> recvBufferQueue = new LinkedBlockingQueue<>();

    UdpCommunicatorListener listener = new UdpCommunicatorListener() {

        @Override/*from  www  . j  ava  2s .c om*/
        public void incomingPacket(InetSocketAddress sourceAddress, DatagramChannel channel,
                ByteBuffer packet) {
            if (channel != unicastChannel) {
                return;
            }

            recvBufferQueue.add(packet);
        }
    };

    // timeout duration should double each iteration, starting from 250 according to spec
    // i = 1, maxWaitTime = (1 << (1-1)) * 250 = (1 << 0) * 250 = 1 * 250 = 250
    // i = 2, maxWaitTime = (1 << (2-1)) * 250 = (1 << 1) * 250 = 2 * 250 = 500
    // i = 3, maxWaitTime = (1 << (3-1)) * 250 = (1 << 2) * 250 = 4 * 250 = 1000
    // i = 4, maxWaitTime = (1 << (4-1)) * 250 = (1 << 3) * 250 = 8 * 250 = 2000
    // ...
    try {
        communicator.addListener(listener);
        communicator.send(unicastChannel, gateway, sendBuffer);

        int maxWaitTime = (1 << (attempt - 1)) * 250; // NOPMD

        T pcpResponse = null;

        long endTime = System.currentTimeMillis() + maxWaitTime;
        long waitTime;
        while ((waitTime = endTime - System.currentTimeMillis()) > 0L) {
            waitTime = Math.max(waitTime, 0L); // must be at least 0, probably should never happen

            ByteBuffer recvBuffer = recvBufferQueue.poll(waitTime, TimeUnit.MILLISECONDS);

            if (recvBuffer != null) {
                pcpResponse = creator.create(recvBuffer);
                if (pcpResponse != null) {
                    break;
                }
            }
        }

        return pcpResponse;
    } finally {
        communicator.removeListener(listener);
    }
}

From source file:com.groksolutions.grok.mobile.service.GrokDataSyncService.java

/**
 * Loads metric data from the server//www  . j  a  v a  2  s  .  c om
 *
 * @param metricId (optional) The metric Id to get the data. If metricId is {@code null} then
 *                 loads data for all metrics at once.
 * @param from     return records from this date
 * @param to       return records up to this date
 * @see HTMClient#getMetricData
 */
private void loadMetricData(final String metricId, final long from, final long to)
        throws HTMException, IOException {

    if (getClient() == null) {
        Log.w(TAG, "Not connected to any server yet");
        return;
    }
    final CoreDatabase database = HTMITApplication.getDatabase();

    // Blocking queue holding metric data waiting to be saved to the
    // database. This queue will be filled by the HTMClient as it downloads
    // the metric data and it will be emptied by the databaseTask as is
    // saves the data to the database
    final LinkedBlockingQueue<MetricData> pending = new LinkedBlockingQueue<>(
            MAX_PENDING_METRIC_DATA_IO_BUFFER);

    // Background task used save metric data to the database. This task will
    // wait for metric data to arrive from the server and save them to the
    // database in batches until it finds the end of the queue marked by
    // METRIC_DATA_EOF or it times out after 60 seconds
    final Future<?> databaseTask = getService().getIOThreadPool().submit(new Runnable() {
        @Override
        public void run() {

            // Make the batch size 1 hour for all metrics or one week for
            // single metric
            int batchSize = metricId == null ? DataUtils.MILLIS_PER_HOUR : 24 * 7 * DataUtils.MILLIS_PER_HOUR;

            // Save metrics in batches, 24 hours at the time
            final List<MetricData> batch = new ArrayList<>();

            // Tracks batch timestamp. Once the metric timestamp is greater
            // than the batch timestamp, a new batch is created
            long batchTimestamp = 0;

            try {
                // Process all pending metric data until the METRIC_DATA_EOF
                // is found or a timeout is reached
                MetricData metricData;
                while ((metricData = pending.poll(60, TimeUnit.SECONDS)) != METRIC_DATA_EOF
                        && metricData != null) {
                    // Add metric data to batch regardless of the timestamp.
                    // At this point we may receive stale metric data with
                    // lower timestamp after we receive the latest data with
                    // the current timestamp. As a side effect, you may see
                    // gaps in the data as described in MER-1524
                    batch.add(metricData);
                    // Process batches
                    if (metricData.getTimestamp() > batchTimestamp) {
                        // Calculate next batch timestamp
                        batchTimestamp = metricData.getTimestamp() + batchSize;
                        if (database.addMetricDataBatch(batch)) {
                            Log.d(TAG, "Saving " + batch.size() + " new records");
                            // Notify receivers new data has arrived
                            fireMetricDataChangedEvent();
                        }
                        batch.clear();
                    }
                }
                // Last batch
                if (!batch.isEmpty()) {
                    if (database.addMetricDataBatch(batch)) {
                        Log.d(TAG, "Received " + batch.size() + " records");
                        // Notify receivers new data has arrived
                        fireMetricDataChangedEvent();
                    }
                }
            } catch (InterruptedException e) {
                Log.w(TAG, "Interrupted while loading metric data");
            }
        }
    });

    try {
        // Get new data from server
        getClient().getMetricData(metricId, new Date(from), new Date(to),
                new HTMClient.DataCallback<MetricData>() {
                    @Override
                    public boolean onData(MetricData metricData) {
                        // enqueue data for saving
                        try {
                            Metric metric = database.getMetric(metricData.getMetricId());
                            if (metric == null) {
                                Log.w(TAG, "Received data for unknown metric:" + metricData.getMetricId());
                                return true;
                            }
                            pending.put(metricData);
                        } catch (InterruptedException e) {
                            pending.clear();
                            Log.w(TAG, "Interrupted while loading metric data");
                            return false;
                        }
                        return true;
                    }
                });
        // Mark the end of the records
        pending.add(METRIC_DATA_EOF);
        // Wait for the database task to complete
        databaseTask.get();
    } catch (InterruptedException e) {
        Log.w(TAG, "Interrupted while loading metric data");
    } catch (ExecutionException e) {
        Log.e(TAG, "Failed to load metric data", e);
    }
}