Example usage for java.util.concurrent BlockingQueue remove

List of usage examples for java.util.concurrent BlockingQueue remove

Introduction

In this page you can find the example usage for java.util.concurrent BlockingQueue remove.

Prototype

E remove();

Source Link

Document

Retrieves and removes the head of this queue.

Usage

From source file:com.openteach.diamond.network.waverider.session.DefaultSession.java

public static void main(String[] args) {

    BlockingQueue<ByteBuffer> inputBuffer = new LinkedBlockingQueue<ByteBuffer>();
    /*for (int i = 0; i < 10; i++)
    {*/// w  w  w.ja v  a 2  s.co m
    ByteBuffer byteBuffer = ByteBuffer.allocate(1024);
    byteBuffer.put(makePacket().marshall());
    byteBuffer.put(makePacket().marshall());
    byteBuffer.flip();
    byte[] b = new byte[8];
    ByteBuffer halfBuf0 = ByteBuffer.allocate(8);
    byteBuffer.get(b);
    halfBuf0.put(b);
    halfBuf0.flip();
    inputBuffer.add(halfBuf0);
    inputBuffer.add(byteBuffer);
    /*}*/

    int size = 0;
    int oldSize = size;
    long length = Packet.getHeaderSize();
    ByteBuffer buffer = ByteBuffer.allocate(NetWorkConstants.DEFAULT_NETWORK_BUFFER_SIZE);
    ByteBuffer currentBuffer = null;

    while (size < length) {
        currentBuffer = inputBuffer.peek();
        oldSize = size;
        int position = currentBuffer.position();
        size += currentBuffer.remaining();
        buffer.put(currentBuffer);
        if (size >= Packet.getHeaderSize()) {
            length = buffer.getLong(Packet.getLengthPosition());
        }

        if (size <= length) {
            inputBuffer.remove();
        } else {
            currentBuffer.position(position);
            buffer.position(buffer.position() - currentBuffer.remaining());
            byte[] buf = new byte[(int) (length - oldSize)];
            currentBuffer.get(buf);
            buffer.put(buf);
        }
    }

    // buffer.position(0);
    buffer.flip();
    Packet packet = Packet.unmarshall(buffer);

    Command command = CommandFactory.createCommand(packet.getType(), packet.getPayLoad());

    String str = new String(command.getPayLoad().array());

    System.out.println(str);

}

From source file:com.openteach.diamond.network.waverider.network.Packet.java

/**
 * ??Packet, ??/* w  w w.j a v  a2s. c  o m*/
 * @param inputBuffer
 * @return
 * @throws IOException, InterruptedException
 */
public static Packet parse(BlockingQueue<ByteBuffer> inputBuffer, NetWorkEndPoint endPoint,
        SocketChannel channel) throws IOException, InterruptedException {
    // Buffer for packet header
    byte[] tmpBuf = new byte[NetWorkConstants.DEFAULT_NETWORK_BUFFER_SIZE];
    ByteBuffer header = ByteBuffer.allocate(Packet.getHeaderSize());
    ByteBuffer currentBuffer = null;
    int rest = 0;
    boolean isRemove = false;

    // ?
    while (true) {
        while ((currentBuffer = inputBuffer.peek()) == null) {
            if (!endPoint.notifyRead(channel)) {
                throw new IOException("Socket closed by other thread");
            }
            // ?
            //endPoint.waitMoreData(5);
            // FIXME 2ms
            //Thread.sleep(1);
            Thread.yield();
        }
        isRemove = false;
        rest = header.capacity() - header.position();
        if (currentBuffer.remaining() >= rest) {
            if (currentBuffer.remaining() == rest) {
                isRemove = true;
            }
            currentBuffer.get(tmpBuf, 0, rest);
            header.put(tmpBuf, 0, rest);
            if (isRemove) {
                inputBuffer.remove();
            }
            break;
        } else {
            header.put(currentBuffer);
            inputBuffer.remove();
        }
    }

    header.flip();

    // , ???

    // ?
    Integer size = header.getInt(Packet.getLengthPosition());
    // For test
    /*if(size < 0 || size > 100000) {
       logger.info("Error");
    }*/
    //logger.debug(new StringBuilder("Try to allocate ").append(size).append(" bytes memory"));
    ByteBuffer buffer = ByteBuffer.allocate(size);
    buffer.put(header);
    header.clear();

    // ?
    while (true) {
        while ((currentBuffer = inputBuffer.peek()) == null) {
            endPoint.notifyRead(channel);
            Thread.sleep(1000);
        }
        isRemove = false;
        rest = buffer.capacity() - buffer.position();
        if (currentBuffer.remaining() >= rest) {
            if (currentBuffer.remaining() == rest) {
                isRemove = true;
            }
            currentBuffer.get(tmpBuf, 0, rest);
            buffer.put(tmpBuf, 0, rest);
            if (isRemove) {
                inputBuffer.remove();
            }
            break;
        } else {
            buffer.put(currentBuffer);
            inputBuffer.remove();
        }
    }
    //buffer.position(0);
    buffer.flip();
    Packet packet = Packet.unmarshall(buffer);
    //logger.info("Parse one packet from network");
    //packet.dump();
    return packet;
}

From source file:org.apache.falcon.service.FeedSLAMonitoringService.java

void addNewPendingFeedInstances(Date from, Date to) throws FalconException {
    Set<String> currentClusters = DeploymentUtil.getCurrentClusters();
    for (String feedName : monitoredFeeds) {
        Feed feed = EntityUtil.getEntity(EntityType.FEED, feedName);
        for (Cluster feedCluster : feed.getClusters().getClusters()) {
            if (currentClusters.contains(feedCluster.getName())) {
                Date nextInstanceTime = from;
                Pair<String, String> key = new Pair<>(feed.getName(), feedCluster.getName());
                BlockingQueue<Date> instances = pendingInstances.get(key);
                if (instances == null) {
                    instances = new LinkedBlockingQueue<>(queueSize);
                    Date feedStartTime = feedCluster.getValidity().getStart();
                    Frequency retentionFrequency = FeedHelper.getRetentionFrequency(feed, feedCluster);
                    ExpressionHelper evaluator = ExpressionHelper.get();
                    ExpressionHelper.setReferenceDate(new Date());
                    Date retention = new Date(evaluator.evaluate(retentionFrequency.toString(), Long.class));
                    if (feedStartTime.before(retention)) {
                        feedStartTime = retention;
                    }//w ww . jav a 2 s  .c  om
                    nextInstanceTime = feedStartTime;
                }
                Set<Date> exists = new HashSet<>(instances);
                org.apache.falcon.entity.v0.cluster.Cluster currentCluster = EntityUtil
                        .getEntity(EntityType.CLUSTER, feedCluster.getName());
                nextInstanceTime = EntityUtil.getNextStartTime(feed, currentCluster, nextInstanceTime);
                while (nextInstanceTime.before(to)) {
                    if (instances.size() >= queueSize) { // if no space, first make some space
                        LOG.debug("Removing instance={} for <feed,cluster>={}", instances.peek(), key);
                        exists.remove(instances.peek());
                        instances.remove();
                    }
                    LOG.debug("Adding instance={} for <feed,cluster>={}", nextInstanceTime, key);
                    if (exists.add(nextInstanceTime)) {
                        instances.add(nextInstanceTime);
                    }
                    nextInstanceTime = new Date(nextInstanceTime.getTime() + ONE_MS);
                    nextInstanceTime = EntityUtil.getNextStartTime(feed, currentCluster, nextInstanceTime);
                }
                pendingInstances.put(key, instances);
            }
        }
    }
}

From source file:org.apache.falcon.service.FeedSLAMonitoringService.java

@SuppressWarnings("unchecked")
private void deserialize(Path path) throws FalconException {
    try {//  w w  w  . j  av  a 2s . co m
        Map<String, Object> state = deserializeInternal(path);
        pendingInstances = new ConcurrentHashMap<>();
        Map<Pair<String, String>, BlockingQueue<Date>> pendingInstancesCopy = (Map<Pair<String, String>, BlockingQueue<Date>>) state
                .get("pendingInstances");
        // queue size can change during restarts, hence copy
        for (Map.Entry<Pair<String, String>, BlockingQueue<Date>> entry : pendingInstancesCopy.entrySet()) {
            BlockingQueue<Date> value = new LinkedBlockingQueue<>(queueSize);
            BlockingQueue<Date> oldValue = entry.getValue();
            LOG.debug("Number of old instances:{}, new queue size:{}", oldValue.size(), queueSize);
            while (!oldValue.isEmpty()) {
                Date instance = oldValue.remove();
                if (value.size() == queueSize) { // if full
                    LOG.debug("Deserialization: Removing value={} for <feed,cluster>={}", value.peek(),
                            entry.getKey());
                    value.remove();
                }
                LOG.debug("Deserialization Adding: key={} to <feed,cluster>={}", entry.getKey(), instance);
                value.add(instance);
            }
            pendingInstances.put(entry.getKey(), value);
        }
        lastCheckedAt = new Date((Long) state.get("lastCheckedAt"));
        lastSerializedAt = new Date((Long) state.get("lastSerializedAt"));
        monitoredFeeds = new ConcurrentHashSet<>(); // will be populated on the onLoad of entities.
        LOG.debug("Restored the service from old state.");
    } catch (IOException | ClassNotFoundException e) {
        throw new FalconException("Couldn't deserialize the old state", e);
    }
}

From source file:org.wso2.carbon.event.processor.core.internal.ha.HAManager.java

private void becomePassive() {
    membershipMap.put(passiveId, currentCepMembershipInfo);

    threadBarrier.close();//w w  w  .  j  a  v  a  2  s  .com

    for (SiddhiHAOutputStreamListener streamCallback : streamCallbackList) {
        streamCallback.setDrop(true);
    }

    CEPMembership cepMembership = membershipMap.get(activeId);

    HAServiceClient haServiceClient = new HAServiceClientThriftImpl();

    SnapshotData snapshotData = null;
    try {
        snapshotData = haServiceClient.getSnapshot(tenantId, executionPlanName, cepMembership,
                currentCepMembershipInfo);
    } catch (Exception e) {
        log.error("Error in becoming the passive member for " + executionPlanName + " on tenant:" + tenantId
                + ", " + e.getMessage(), e);
        threadBarrier.open();

        return;
    }

    int count = 0;
    while (count < 1000) {
        if (threadBarrier.getBlockedThreads().longValue() == inputProcessors) {
            break;
        } else {
            try {
                Thread.sleep(10);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }
        count++;
    }

    try {
        siddhiManager.restore(snapshotData.getStates());
        byte[] eventData = snapshotData.getNextEventData();
        HashMap<String, Object[]> eventMap = (HashMap<String, Object[]>) ByteSerializer.BToO(eventData);
        for (Map.Entry<String, Object[]> entry : eventMap.entrySet()) {
            SiddhiHAInputEventDispatcher inputEventDispatcher = inputEventDispatcherMap.get(entry.getKey());
            if (inputEventDispatcher == null) {
                throw new Exception(entry.getKey() + " stream mismatched with the Active Node "
                        + executionPlanName + " execution plan for tenant:" + tenantId);
            }
            BlockingQueue<Object[]> eventQueue = inputEventDispatcher.getEventQueue();
            Object[] activeEventData = entry.getValue();
            Object[] passiveEventData = eventQueue.peek();
            while (!Arrays.equals(passiveEventData, activeEventData)) {
                eventQueue.remove();
                passiveEventData = eventQueue.peek();
            }
        }

    } catch (Throwable t) {
        log.error("Syncing failed when becoming a Passive Node for tenant:" + tenantId + " on:"
                + executionPlanName + " execution plan", t);

    }

    threadBarrier.open();
    log.info("Became Passive Member for tenant:" + tenantId + " on:" + executionPlanName);

}