Example usage for java.util.concurrent BlockingQueue peek

List of usage examples for java.util.concurrent BlockingQueue peek

Introduction

In this page you can find the example usage for java.util.concurrent BlockingQueue peek.

Prototype

E peek();

Source Link

Document

Retrieves, but does not remove, the head of this queue, or returns null if this queue is empty.

Usage

From source file:com.openteach.diamond.network.waverider.session.DefaultSession.java

public static void main(String[] args) {

    BlockingQueue<ByteBuffer> inputBuffer = new LinkedBlockingQueue<ByteBuffer>();
    /*for (int i = 0; i < 10; i++)
    {*/// w w  w  .  j a  va2 s.  c o  m
    ByteBuffer byteBuffer = ByteBuffer.allocate(1024);
    byteBuffer.put(makePacket().marshall());
    byteBuffer.put(makePacket().marshall());
    byteBuffer.flip();
    byte[] b = new byte[8];
    ByteBuffer halfBuf0 = ByteBuffer.allocate(8);
    byteBuffer.get(b);
    halfBuf0.put(b);
    halfBuf0.flip();
    inputBuffer.add(halfBuf0);
    inputBuffer.add(byteBuffer);
    /*}*/

    int size = 0;
    int oldSize = size;
    long length = Packet.getHeaderSize();
    ByteBuffer buffer = ByteBuffer.allocate(NetWorkConstants.DEFAULT_NETWORK_BUFFER_SIZE);
    ByteBuffer currentBuffer = null;

    while (size < length) {
        currentBuffer = inputBuffer.peek();
        oldSize = size;
        int position = currentBuffer.position();
        size += currentBuffer.remaining();
        buffer.put(currentBuffer);
        if (size >= Packet.getHeaderSize()) {
            length = buffer.getLong(Packet.getLengthPosition());
        }

        if (size <= length) {
            inputBuffer.remove();
        } else {
            currentBuffer.position(position);
            buffer.position(buffer.position() - currentBuffer.remaining());
            byte[] buf = new byte[(int) (length - oldSize)];
            currentBuffer.get(buf);
            buffer.put(buf);
        }
    }

    // buffer.position(0);
    buffer.flip();
    Packet packet = Packet.unmarshall(buffer);

    Command command = CommandFactory.createCommand(packet.getType(), packet.getPayLoad());

    String str = new String(command.getPayLoad().array());

    System.out.println(str);

}

From source file:com.openteach.diamond.network.waverider.network.Packet.java

/**
 * ??Packet, ??//from w w  w . j av a 2s .c o m
 * @param inputBuffer
 * @return
 * @throws IOException, InterruptedException
 */
public static Packet parse(BlockingQueue<ByteBuffer> inputBuffer, NetWorkEndPoint endPoint,
        SocketChannel channel) throws IOException, InterruptedException {
    // Buffer for packet header
    byte[] tmpBuf = new byte[NetWorkConstants.DEFAULT_NETWORK_BUFFER_SIZE];
    ByteBuffer header = ByteBuffer.allocate(Packet.getHeaderSize());
    ByteBuffer currentBuffer = null;
    int rest = 0;
    boolean isRemove = false;

    // ?
    while (true) {
        while ((currentBuffer = inputBuffer.peek()) == null) {
            if (!endPoint.notifyRead(channel)) {
                throw new IOException("Socket closed by other thread");
            }
            // ?
            //endPoint.waitMoreData(5);
            // FIXME 2ms
            //Thread.sleep(1);
            Thread.yield();
        }
        isRemove = false;
        rest = header.capacity() - header.position();
        if (currentBuffer.remaining() >= rest) {
            if (currentBuffer.remaining() == rest) {
                isRemove = true;
            }
            currentBuffer.get(tmpBuf, 0, rest);
            header.put(tmpBuf, 0, rest);
            if (isRemove) {
                inputBuffer.remove();
            }
            break;
        } else {
            header.put(currentBuffer);
            inputBuffer.remove();
        }
    }

    header.flip();

    // , ???

    // ?
    Integer size = header.getInt(Packet.getLengthPosition());
    // For test
    /*if(size < 0 || size > 100000) {
       logger.info("Error");
    }*/
    //logger.debug(new StringBuilder("Try to allocate ").append(size).append(" bytes memory"));
    ByteBuffer buffer = ByteBuffer.allocate(size);
    buffer.put(header);
    header.clear();

    // ?
    while (true) {
        while ((currentBuffer = inputBuffer.peek()) == null) {
            endPoint.notifyRead(channel);
            Thread.sleep(1000);
        }
        isRemove = false;
        rest = buffer.capacity() - buffer.position();
        if (currentBuffer.remaining() >= rest) {
            if (currentBuffer.remaining() == rest) {
                isRemove = true;
            }
            currentBuffer.get(tmpBuf, 0, rest);
            buffer.put(tmpBuf, 0, rest);
            if (isRemove) {
                inputBuffer.remove();
            }
            break;
        } else {
            buffer.put(currentBuffer);
            inputBuffer.remove();
        }
    }
    //buffer.position(0);
    buffer.flip();
    Packet packet = Packet.unmarshall(buffer);
    //logger.info("Parse one packet from network");
    //packet.dump();
    return packet;
}

From source file:gobblin.couchbase.writer.CouchbaseWriterTest.java

private void drainQueue(BlockingQueue<Pair<AbstractDocument, Future>> queue, int threshold, long sleepTime,
        TimeUnit sleepUnit, List<Pair<AbstractDocument, Future>> failedFutures) {
    while (queue.remainingCapacity() < threshold) {
        if (sleepTime > 0) {
            Pair<AbstractDocument, Future> topElement = queue.peek();
            if (topElement != null) {
                try {
                    topElement.getSecond().get(sleepTime, sleepUnit);
                } catch (Exception te) {
                    failedFutures.add(topElement);
                }/* w  ww .j a va  2s  .  c o  m*/
                queue.poll();
            }
        }
    }
}

From source file:org.apache.falcon.service.FeedSLAMonitoringService.java

void addNewPendingFeedInstances(Date from, Date to) throws FalconException {
    Set<String> currentClusters = DeploymentUtil.getCurrentClusters();
    for (String feedName : monitoredFeeds) {
        Feed feed = EntityUtil.getEntity(EntityType.FEED, feedName);
        for (Cluster feedCluster : feed.getClusters().getClusters()) {
            if (currentClusters.contains(feedCluster.getName())) {
                Date nextInstanceTime = from;
                Pair<String, String> key = new Pair<>(feed.getName(), feedCluster.getName());
                BlockingQueue<Date> instances = pendingInstances.get(key);
                if (instances == null) {
                    instances = new LinkedBlockingQueue<>(queueSize);
                    Date feedStartTime = feedCluster.getValidity().getStart();
                    Frequency retentionFrequency = FeedHelper.getRetentionFrequency(feed, feedCluster);
                    ExpressionHelper evaluator = ExpressionHelper.get();
                    ExpressionHelper.setReferenceDate(new Date());
                    Date retention = new Date(evaluator.evaluate(retentionFrequency.toString(), Long.class));
                    if (feedStartTime.before(retention)) {
                        feedStartTime = retention;
                    }/*from w w  w .ja va2 s  . c o  m*/
                    nextInstanceTime = feedStartTime;
                }
                Set<Date> exists = new HashSet<>(instances);
                org.apache.falcon.entity.v0.cluster.Cluster currentCluster = EntityUtil
                        .getEntity(EntityType.CLUSTER, feedCluster.getName());
                nextInstanceTime = EntityUtil.getNextStartTime(feed, currentCluster, nextInstanceTime);
                while (nextInstanceTime.before(to)) {
                    if (instances.size() >= queueSize) { // if no space, first make some space
                        LOG.debug("Removing instance={} for <feed,cluster>={}", instances.peek(), key);
                        exists.remove(instances.peek());
                        instances.remove();
                    }
                    LOG.debug("Adding instance={} for <feed,cluster>={}", nextInstanceTime, key);
                    if (exists.add(nextInstanceTime)) {
                        instances.add(nextInstanceTime);
                    }
                    nextInstanceTime = new Date(nextInstanceTime.getTime() + ONE_MS);
                    nextInstanceTime = EntityUtil.getNextStartTime(feed, currentCluster, nextInstanceTime);
                }
                pendingInstances.put(key, instances);
            }
        }
    }
}

From source file:org.apache.falcon.service.FeedSLAMonitoringService.java

@SuppressWarnings("unchecked")
private void deserialize(Path path) throws FalconException {
    try {//from www.  j  a v  a2  s.c om
        Map<String, Object> state = deserializeInternal(path);
        pendingInstances = new ConcurrentHashMap<>();
        Map<Pair<String, String>, BlockingQueue<Date>> pendingInstancesCopy = (Map<Pair<String, String>, BlockingQueue<Date>>) state
                .get("pendingInstances");
        // queue size can change during restarts, hence copy
        for (Map.Entry<Pair<String, String>, BlockingQueue<Date>> entry : pendingInstancesCopy.entrySet()) {
            BlockingQueue<Date> value = new LinkedBlockingQueue<>(queueSize);
            BlockingQueue<Date> oldValue = entry.getValue();
            LOG.debug("Number of old instances:{}, new queue size:{}", oldValue.size(), queueSize);
            while (!oldValue.isEmpty()) {
                Date instance = oldValue.remove();
                if (value.size() == queueSize) { // if full
                    LOG.debug("Deserialization: Removing value={} for <feed,cluster>={}", value.peek(),
                            entry.getKey());
                    value.remove();
                }
                LOG.debug("Deserialization Adding: key={} to <feed,cluster>={}", entry.getKey(), instance);
                value.add(instance);
            }
            pendingInstances.put(entry.getKey(), value);
        }
        lastCheckedAt = new Date((Long) state.get("lastCheckedAt"));
        lastSerializedAt = new Date((Long) state.get("lastSerializedAt"));
        monitoredFeeds = new ConcurrentHashSet<>(); // will be populated on the onLoad of entities.
        LOG.debug("Restored the service from old state.");
    } catch (IOException | ClassNotFoundException e) {
        throw new FalconException("Couldn't deserialize the old state", e);
    }
}

From source file:org.apache.nifi.controller.repository.FileSystemRepository.java

private boolean deleteBasedOnTimestamp(final BlockingQueue<ArchiveInfo> fileQueue,
        final long removalTimeThreshold) throws IOException {
    // check next file's last mod time.
    final ArchiveInfo nextFile = fileQueue.peek();
    if (nextFile == null) {
        // Continue on to queue up the files, in case the next file must be destroyed based on time.
        return false;
    }/* ww  w  . j  a v a  2  s  .c  om*/

    // If the last mod time indicates that it should be removed, just continue loop.
    final long oldestArchiveDate = getLastModTime(nextFile.toPath());
    return (oldestArchiveDate <= removalTimeThreshold);
}

From source file:org.apache.nifi.controller.repository.FileSystemRepository.java

private long destroyExpiredArchives(final String containerName, final Path container) throws IOException {
    archiveExpirationLog.debug("Destroying Expired Archives for Container {}", containerName);
    final List<ArchiveInfo> notYetExceedingThreshold = new ArrayList<>();
    long removalTimeThreshold = System.currentTimeMillis() - maxArchiveMillis;
    long oldestArchiveDateFound = System.currentTimeMillis();

    // determine how much space we must have in order to stop deleting old data
    final Long minRequiredSpace = minUsableContainerBytesForArchive.get(containerName);
    if (minRequiredSpace == null) {
        archiveExpirationLog/*from w  w  w .j a  v a 2  s. c o  m*/
                .debug("Could not determine minimum required space so will not destroy any archived data");
        return -1L;
    }

    final long usableSpace = getContainerUsableSpace(containerName);
    final ContainerState containerState = containerStateMap.get(containerName);

    // First, delete files from our queue
    final long startNanos = System.nanoTime();
    final long toFree = minRequiredSpace - usableSpace;
    final BlockingQueue<ArchiveInfo> fileQueue = archivedFiles.get(containerName);
    if (archiveExpirationLog.isDebugEnabled()) {
        if (toFree < 0) {
            archiveExpirationLog.debug(
                    "Currently {} bytes free for Container {}; requirement is {} byte free, so no need to free space until an additional {} bytes are used",
                    usableSpace, containerName, minRequiredSpace, Math.abs(toFree));
        } else {
            archiveExpirationLog.debug(
                    "Currently {} bytes free for Container {}; requirement is {} byte free, so need to free {} bytes",
                    usableSpace, containerName, minRequiredSpace, toFree);
        }
    }

    ArchiveInfo toDelete;
    int deleteCount = 0;
    long freed = 0L;
    while ((toDelete = fileQueue.peek()) != null) {
        try {
            final long fileSize = toDelete.getSize();

            removalTimeThreshold = System.currentTimeMillis() - maxArchiveMillis;

            // we use fileQueue.peek above instead of fileQueue.poll() because we don't always want to
            // remove the head of the queue. Instead, we want to remove it only if we plan to delete it.
            // In order to accomplish this, we just peek at the head and check if it should be deleted.
            // If so, then we call poll() to remove it
            if (freed < toFree || getLastModTime(toDelete.toPath()) < removalTimeThreshold) {
                toDelete = fileQueue.poll(); // remove the head of the queue, which is already stored in 'toDelete'
                Files.deleteIfExists(toDelete.toPath());
                containerState.decrementArchiveCount();
                LOG.debug(
                        "Deleted archived ContentClaim with ID {} from Container {} because the archival size was exceeding the max configured size",
                        toDelete.getName(), containerName);
                freed += fileSize;
                deleteCount++;
            }

            // If we'd freed up enough space, we're done... unless the next file needs to be destroyed based on time.
            if (freed >= toFree) {
                // If the last mod time indicates that it should be removed, just continue loop.
                if (deleteBasedOnTimestamp(fileQueue, removalTimeThreshold)) {
                    archiveExpirationLog.debug(
                            "Freed enough space ({} bytes freed, needed to free {} bytes) but will continue to expire data based on timestamp",
                            freed, toFree);
                    continue;
                }

                archiveExpirationLog.debug(
                        "Freed enough space ({} bytes freed, needed to free {} bytes). Finished expiring data",
                        freed, toFree);

                final ArchiveInfo archiveInfo = fileQueue.peek();
                final long oldestArchiveDate = archiveInfo == null ? System.currentTimeMillis()
                        : getLastModTime(archiveInfo.toPath());

                // Otherwise, we're done. Return the last mod time of the oldest file in the container's archive.
                final long millis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
                if (deleteCount > 0) {
                    LOG.info(
                            "Deleted {} files from archive for Container {}; oldest Archive Date is now {}; container cleanup took {} millis",
                            deleteCount, containerName, new Date(oldestArchiveDate), millis);
                } else {
                    LOG.debug(
                            "Deleted {} files from archive for Container {}; oldest Archive Date is now {}; container cleanup took {} millis",
                            deleteCount, containerName, new Date(oldestArchiveDate), millis);
                }

                return oldestArchiveDate;
            }
        } catch (final IOException ioe) {
            LOG.warn("Failed to delete {} from archive due to {}", toDelete, ioe.toString());
            if (LOG.isDebugEnabled()) {
                LOG.warn("", ioe);
            }
        }
    }

    // Go through each container and grab the archived data into a List
    archiveExpirationLog.debug("Searching for more archived data to expire");
    final StopWatch stopWatch = new StopWatch(true);
    for (int i = 0; i < SECTIONS_PER_CONTAINER; i++) {
        final Path sectionContainer = container.resolve(String.valueOf(i));
        final Path archive = sectionContainer.resolve("archive");
        if (!Files.exists(archive)) {
            continue;
        }

        try {
            final long timestampThreshold = removalTimeThreshold;
            Files.walkFileTree(archive, new SimpleFileVisitor<Path>() {
                @Override
                public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs)
                        throws IOException {
                    if (attrs.isDirectory()) {
                        return FileVisitResult.CONTINUE;
                    }

                    final long lastModTime = getLastModTime(file);
                    if (lastModTime < timestampThreshold) {
                        try {
                            Files.deleteIfExists(file);
                            containerState.decrementArchiveCount();
                            LOG.debug(
                                    "Deleted archived ContentClaim with ID {} from Container {} because it was older than the configured max archival duration",
                                    file.toFile().getName(), containerName);
                        } catch (final IOException ioe) {
                            LOG.warn(
                                    "Failed to remove archived ContentClaim with ID {} from Container {} due to {}",
                                    file.toFile().getName(), containerName, ioe.toString());
                            if (LOG.isDebugEnabled()) {
                                LOG.warn("", ioe);
                            }
                        }
                    } else if (usableSpace < minRequiredSpace) {
                        notYetExceedingThreshold
                                .add(new ArchiveInfo(container, file, attrs.size(), lastModTime));
                    }

                    return FileVisitResult.CONTINUE;
                }
            });
        } catch (final IOException ioe) {
            LOG.warn("Failed to cleanup archived files in {} due to {}", archive, ioe.toString());
            if (LOG.isDebugEnabled()) {
                LOG.warn("", ioe);
            }
        }
    }
    final long deleteExpiredMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS);

    // Sort the list according to last modified time
    Collections.sort(notYetExceedingThreshold, new Comparator<ArchiveInfo>() {
        @Override
        public int compare(final ArchiveInfo o1, final ArchiveInfo o2) {
            return Long.compare(o1.getLastModTime(), o2.getLastModTime());
        }
    });

    final long sortRemainingMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS) - deleteExpiredMillis;

    // Delete the oldest data
    archiveExpirationLog.debug("Deleting data based on timestamp");
    final Iterator<ArchiveInfo> itr = notYetExceedingThreshold.iterator();
    int counter = 0;
    while (itr.hasNext()) {
        final ArchiveInfo archiveInfo = itr.next();

        try {
            final Path path = archiveInfo.toPath();
            Files.deleteIfExists(path);
            containerState.decrementArchiveCount();
            LOG.debug(
                    "Deleted archived ContentClaim with ID {} from Container {} because the archival size was exceeding the max configured size",
                    archiveInfo.getName(), containerName);

            // Check if we've freed enough space every 25 files that we destroy
            if (++counter % 25 == 0) {
                if (getContainerUsableSpace(containerName) > minRequiredSpace) { // check if we can stop now
                    LOG.debug("Finished cleaning up archive for Container {}", containerName);
                    break;
                }
            }
        } catch (final IOException ioe) {
            LOG.warn("Failed to delete {} from archive due to {}", archiveInfo, ioe.toString());
            if (LOG.isDebugEnabled()) {
                LOG.warn("", ioe);
            }
        }

        itr.remove();
    }

    final long deleteOldestMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS) - sortRemainingMillis
            - deleteExpiredMillis;

    long oldestContainerArchive;
    if (notYetExceedingThreshold.isEmpty()) {
        oldestContainerArchive = System.currentTimeMillis();
    } else {
        oldestContainerArchive = notYetExceedingThreshold.get(0).getLastModTime();
    }

    if (oldestContainerArchive < oldestArchiveDateFound) {
        oldestArchiveDateFound = oldestContainerArchive;
    }

    // Queue up the files in the order that they should be destroyed so that we don't have to scan the directories for a while.
    for (final ArchiveInfo toEnqueue : notYetExceedingThreshold.subList(0,
            Math.min(100000, notYetExceedingThreshold.size()))) {
        fileQueue.offer(toEnqueue);
    }

    final long cleanupMillis = stopWatch.getElapsed(TimeUnit.MILLISECONDS) - deleteOldestMillis
            - sortRemainingMillis - deleteExpiredMillis;
    LOG.debug(
            "Oldest Archive Date for Container {} is {}; delete expired = {} ms, sort remaining = {} ms, delete oldest = {} ms, cleanup = {} ms",
            containerName, new Date(oldestContainerArchive), deleteExpiredMillis, sortRemainingMillis,
            deleteOldestMillis, cleanupMillis);
    return oldestContainerArchive;
}

From source file:org.wso2.carbon.event.processor.core.internal.ha.HAManager.java

private void becomePassive() {
    membershipMap.put(passiveId, currentCepMembershipInfo);

    threadBarrier.close();/*from w w  w .  jav a  2s  .  c om*/

    for (SiddhiHAOutputStreamListener streamCallback : streamCallbackList) {
        streamCallback.setDrop(true);
    }

    CEPMembership cepMembership = membershipMap.get(activeId);

    HAServiceClient haServiceClient = new HAServiceClientThriftImpl();

    SnapshotData snapshotData = null;
    try {
        snapshotData = haServiceClient.getSnapshot(tenantId, executionPlanName, cepMembership,
                currentCepMembershipInfo);
    } catch (Exception e) {
        log.error("Error in becoming the passive member for " + executionPlanName + " on tenant:" + tenantId
                + ", " + e.getMessage(), e);
        threadBarrier.open();

        return;
    }

    int count = 0;
    while (count < 1000) {
        if (threadBarrier.getBlockedThreads().longValue() == inputProcessors) {
            break;
        } else {
            try {
                Thread.sleep(10);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }
        count++;
    }

    try {
        siddhiManager.restore(snapshotData.getStates());
        byte[] eventData = snapshotData.getNextEventData();
        HashMap<String, Object[]> eventMap = (HashMap<String, Object[]>) ByteSerializer.BToO(eventData);
        for (Map.Entry<String, Object[]> entry : eventMap.entrySet()) {
            SiddhiHAInputEventDispatcher inputEventDispatcher = inputEventDispatcherMap.get(entry.getKey());
            if (inputEventDispatcher == null) {
                throw new Exception(entry.getKey() + " stream mismatched with the Active Node "
                        + executionPlanName + " execution plan for tenant:" + tenantId);
            }
            BlockingQueue<Object[]> eventQueue = inputEventDispatcher.getEventQueue();
            Object[] activeEventData = entry.getValue();
            Object[] passiveEventData = eventQueue.peek();
            while (!Arrays.equals(passiveEventData, activeEventData)) {
                eventQueue.remove();
                passiveEventData = eventQueue.peek();
            }
        }

    } catch (Throwable t) {
        log.error("Syncing failed when becoming a Passive Node for tenant:" + tenantId + " on:"
                + executionPlanName + " execution plan", t);

    }

    threadBarrier.open();
    log.info("Became Passive Member for tenant:" + tenantId + " on:" + executionPlanName);

}

From source file:org.xwiki.mail.internal.DefaultMailSender.java

@Override
public void send(MimeMessage message, Session session) throws MessagingException {
    DefaultMailResultListener listener = new DefaultMailResultListener();
    sendAsynchronously(message, session, listener);
    waitTillSent(Long.MAX_VALUE);
    BlockingQueue<Exception> errorQueue = listener.getExceptionQueue();
    if (!errorQueue.isEmpty()) {
        throw new MessagingException(String.format("Failed to send mail message [%s]", message),
                errorQueue.peek());
    }/*from  ww  w . j  a v a2s .  c  om*/
}