Example usage for com.google.common.collect MinMaxPriorityQueue pollLast

List of usage examples for com.google.common.collect MinMaxPriorityQueue pollLast

Introduction

In this page you can find the example usage for com.google.common.collect MinMaxPriorityQueue pollLast.

Prototype

public E pollLast() 

Source Link

Document

Removes and returns the greatest element of this queue, or returns null if the queue is empty.

Usage

From source file:kungfu.algdesign.ds.MovingMedian.java

public static void calculate(Queue<Integer> data, Queue<Integer> medians) {
    MinMaxPriorityQueue<Integer> minHeap = MinMaxPriorityQueue.create();
    MinMaxPriorityQueue<Integer> maxHeap = MinMaxPriorityQueue.create();

    minHeap.add(Integer.MIN_VALUE);
    maxHeap.add(Integer.MAX_VALUE);

    Integer item = null;/*from  w w w . ja va 2s.  c o  m*/
    Integer median = null;

    while ((item = data.poll()) != null) {
        if (median == null) {
            maxHeap.add(item);
        } else if (item >= median) {
            maxHeap.add(item);
        } else {
            minHeap.add(item);
        }

        if (maxHeap.size() - minHeap.size() == 2) {
            minHeap.add(maxHeap.pollFirst());
        } else if (minHeap.size() - maxHeap.size() == 2) {
            maxHeap.add(minHeap.pollLast());
        }

        if (minHeap.size() == maxHeap.size() || minHeap.size() > maxHeap.size()) {
            median = minHeap.peekLast();
        } else {
            median = maxHeap.peekFirst();
        }

        medians.add(median);
    }
}

From source file:com.metamx.druid.master.rules.LoadRule.java

private MasterStats drop(int expectedReplicants, int clusterReplicants, final DataSegment segment,
        final DruidMasterRuntimeParams params) {
    MasterStats stats = new MasterStats();
    final ReplicationThrottler replicationManager = params.getReplicationManager();

    if (!params.hasDeletionWaitTimeElapsed()) {
        return stats;
    }/*from  w w  w . j  a  v a  2s.  c  om*/

    // Make sure we have enough actual replicants in the cluster before doing anything
    if (clusterReplicants < expectedReplicants) {
        return stats;
    }

    Map<String, Integer> replicantsByType = params.getSegmentReplicantLookup()
            .getClusterTiers(segment.getIdentifier());

    for (Map.Entry<String, Integer> entry : replicantsByType.entrySet()) {
        String tier = entry.getKey();
        int actualNumReplicantsForType = entry.getValue();
        int expectedNumReplicantsForType = getReplicants(tier);

        MinMaxPriorityQueue<ServerHolder> serverQueue = params.getDruidCluster().get(tier);
        if (serverQueue == null) {
            log.makeAlert("No holders found for tier[%s]", entry.getKey()).emit();
            return stats;
        }

        List<ServerHolder> droppedServers = Lists.newArrayList();
        while (actualNumReplicantsForType > expectedNumReplicantsForType) {
            final ServerHolder holder = serverQueue.pollLast();
            if (holder == null) {
                log.warn("Wtf, holder was null?  I have no servers serving [%s]?", segment.getIdentifier());
                break;
            }

            if (holder.isServingSegment(segment)) {
                if (expectedNumReplicantsForType > 0) { // don't throttle unless we are removing extra replicants
                    if (!replicationManager.canDestroyReplicant(getTier())) {
                        serverQueue.add(holder);
                        break;
                    }

                    replicationManager.registerReplicantTermination(getTier(), segment.getIdentifier(),
                            holder.getServer().getHost());
                }

                holder.getPeon().dropSegment(segment, new LoadPeonCallback() {
                    @Override
                    protected void execute() {
                        replicationManager.unregisterReplicantTermination(getTier(), segment.getIdentifier(),
                                holder.getServer().getHost());
                    }
                });
                --actualNumReplicantsForType;
                stats.addToTieredStat("droppedCount", tier, 1);
            }
            droppedServers.add(holder);
        }
        serverQueue.addAll(droppedServers);
    }

    return stats;
}

From source file:io.druid.server.coordinator.rules.LoadRule.java

private CoordinatorStats drop(final Map<String, Integer> loadStatus, final DataSegment segment,
        final DruidCoordinatorRuntimeParams params) {
    CoordinatorStats stats = new CoordinatorStats();

    // Make sure we have enough loaded replicants in the correct tiers in the cluster before doing anything
    for (Integer leftToLoad : loadStatus.values()) {
        if (leftToLoad > 0) {
            return stats;
        }// w  ww .ja v  a 2  s .  co m
    }

    final ReplicationThrottler replicationManager = params.getReplicationManager();

    // Find all instances of this segment across tiers
    Map<String, Integer> replicantsByTier = params.getSegmentReplicantLookup()
            .getClusterTiers(segment.getIdentifier());

    for (Map.Entry<String, Integer> entry : replicantsByTier.entrySet()) {
        final String tier = entry.getKey();
        int loadedNumReplicantsForTier = entry.getValue();
        int expectedNumReplicantsForTier = getNumReplicants(tier);

        stats.addToTieredStat(droppedCount, tier, 0);

        MinMaxPriorityQueue<ServerHolder> serverQueue = params.getDruidCluster().get(tier);
        if (serverQueue == null) {
            log.makeAlert("No holders found for tier[%s]", entry.getKey()).emit();
            return stats;
        }

        List<ServerHolder> droppedServers = Lists.newArrayList();
        while (loadedNumReplicantsForTier > expectedNumReplicantsForTier) {
            final ServerHolder holder = serverQueue.pollLast();
            if (holder == null) {
                log.warn("Wtf, holder was null?  I have no servers serving [%s]?", segment.getIdentifier());
                break;
            }

            if (holder.isServingSegment(segment)) {
                if (expectedNumReplicantsForTier > 0) { // don't throttle unless we are removing extra replicants
                    if (!replicationManager.canDestroyReplicant(tier)) {
                        serverQueue.add(holder);
                        break;
                    }

                    replicationManager.registerReplicantTermination(tier, segment.getIdentifier(),
                            holder.getServer().getHost());
                }

                holder.getPeon().dropSegment(segment, new LoadPeonCallback() {
                    @Override
                    public void execute() {
                        replicationManager.unregisterReplicantTermination(tier, segment.getIdentifier(),
                                holder.getServer().getHost());
                    }
                });
                --loadedNumReplicantsForTier;
                stats.addToTieredStat(droppedCount, tier, 1);
            }
            droppedServers.add(holder);
        }
        serverQueue.addAll(droppedServers);
    }

    return stats;
}