Example usage for com.google.common.collect MinMaxPriorityQueue add

List of usage examples for com.google.common.collect MinMaxPriorityQueue add

Introduction

In this page you can find the example usage for com.google.common.collect MinMaxPriorityQueue add.

Prototype

@Override
public boolean add(E element) 

Source Link

Document

Adds the given element to this queue.

Usage

From source file:org.commoncrawl.mapred.pipelineV3.domainmeta.blogs.feedurlid.FeedUrlIdStep.java

public static void main(String[] args) {
    ArrayList<URLCandidate> candidates = new ArrayList<URLCandidate>();
    candidates.add(new URLCandidate(new GoogleURL("http://2010.goldenplains.com.au/info/feed/")));
    candidates.add(/*from  www .  j a  v a  2s  .  c o  m*/
            new URLCandidate(new GoogleURL("http://2010.goldenplains.com.au/supernatural-amphitheatre/feed/")));
    candidates.add(new URLCandidate(new GoogleURL("http://2010.goldenplains.com.au/tickets-pre-ballot/feed/")));

    collapseCandidates(candidates, 2);

    System.out.println(candidates.toString());

    MinMaxPriorityQueue<URLCandidate> deque2 = MinMaxPriorityQueue.create();
    deque2.add(new URLCandidate(new GoogleURL("http://2010.goldenplains.com.au/blog/feed/")));
    deque2.add(new URLCandidate(
            new GoogleURL("http://2010.goldenplains.com.au/blog/supernatural-amphitheatre/feed/")));
    deque2.add(
            new URLCandidate(new GoogleURL("http://2010.goldenplains.com.au/vlog/tickets-pre-ballot/feed/")));
    deque2.add(
            new URLCandidate(new GoogleURL("http://2010.goldenplains.com.au/blog/tickets-pre-ballot/feed/")));
    deque2.add(
            new URLCandidate(new GoogleURL("http://2010.goldenplains.com.au/vlog/tickets-pre-ballot-2/feed/")));
    deque2.add(
            new URLCandidate(new GoogleURL("http://2010.goldenplains.com.au/vlog/tickets-pre-ballot-3/feed/")));
    ArrayList<URLCandidate> test = drainToArrayList(deque2);
    System.out.println(test);

    collapseCandidates(test, 2);

    System.out.println(test.toString());
}

From source file:kungfu.algdesign.ds.MovingMedian.java

public static void calculate(Queue<Integer> data, Queue<Integer> medians) {
    MinMaxPriorityQueue<Integer> minHeap = MinMaxPriorityQueue.create();
    MinMaxPriorityQueue<Integer> maxHeap = MinMaxPriorityQueue.create();

    minHeap.add(Integer.MIN_VALUE);
    maxHeap.add(Integer.MAX_VALUE);

    Integer item = null;/*from  w  w w .j a  va  2 s.c o m*/
    Integer median = null;

    while ((item = data.poll()) != null) {
        if (median == null) {
            maxHeap.add(item);
        } else if (item >= median) {
            maxHeap.add(item);
        } else {
            minHeap.add(item);
        }

        if (maxHeap.size() - minHeap.size() == 2) {
            minHeap.add(maxHeap.pollFirst());
        } else if (minHeap.size() - maxHeap.size() == 2) {
            maxHeap.add(minHeap.pollLast());
        }

        if (minHeap.size() == maxHeap.size() || minHeap.size() > maxHeap.size()) {
            median = minHeap.peekLast();
        } else {
            median = maxHeap.peekFirst();
        }

        medians.add(median);
    }
}

From source file:com.linkedin.pinot.core.query.aggregation.groupby.AggregationGroupByOperatorService.java

/**
 * Given a map from group by keys to results for multiple aggregation functions, trim the results to desired size and
 * put them into a list of group by results.
 *
 * @param aggrFuncList List of aggregation functions.
 * @param aggrGroupByResults Map from group by keys to result arrays.
 * @param trimmedGroupByResultList List of maps containing group by results returned.
 * @param numAggrFunctions Number of aggregation functions.
 * @param trimSize Desired trim size.// www.j  a va2s . com
 */
@SuppressWarnings("unchecked")
private static void trimToSize(List<AggregationFunction> aggrFuncList,
        Map<String, Serializable[]> aggrGroupByResults,
        List<Map<String, Serializable>> trimmedGroupByResultList, int numAggrFunctions, int trimSize) {
    MinMaxPriorityQueue<ImmutablePair<Serializable, String>>[] heaps = new MinMaxPriorityQueue[numAggrFunctions];
    for (int i = 0; i < numAggrFunctions; i++) {
        boolean reverseOrder = aggrFuncList.get(i).getFunctionName().startsWith(MIN_PREFIX);
        heaps[i] = getMinMaxPriorityQueue(aggrGroupByResults.values().iterator().next()[i], trimSize,
                reverseOrder);
    }

    for (String key : aggrGroupByResults.keySet()) {
        Serializable[] results = aggrGroupByResults.get(key);
        for (int i = 0; i < numAggrFunctions; i++) {
            Serializable result = results[i];
            MinMaxPriorityQueue<ImmutablePair<Serializable, String>> heap = heaps[i];
            if (heap == null) {
                trimmedGroupByResultList.get(i).put(key, result);
            } else {
                heap.add(new ImmutablePair(result, key));
            }
        }
    }

    for (int i = 0; i < numAggrFunctions; i++) {
        MinMaxPriorityQueue<ImmutablePair<Serializable, String>> heap = heaps[i];
        ImmutablePair<Serializable, String> pair;
        if (heap != null) {
            while ((pair = heap.pollFirst()) != null) {
                trimmedGroupByResultList.get(i).put(pair.getRight(), pair.getLeft());
            }
        }
    }
}

From source file:com.github.gdfm.shobaidogu.StatsUtils.java

/**
 * Compute top-k elements with largest values in a map from string to Comparable objects.
 * //  w  w  w. j a  va 2s. c  om
 * @param counts
 *          the map.
 * @param k
 *          how many elements to keep.
 * @return a map with top-k elements.
 */
public static <K, V extends Comparable<V>> Map<K, V> topKComparable(Map<K, V> counts, int k) {
    MinMaxPriorityQueue<Entry<K, V>> maxHeap = MinMaxPriorityQueue
            .<Entry<K, V>>orderedBy(new Comparator<Entry<K, V>>() {
                @Override
                public int compare(Entry<K, V> o1, Entry<K, V> o2) {
                    return -1 * o1.getValue().compareTo(o2.getValue()); // reverse comparator
                }
            }).maximumSize(k).create();
    // keep top-k
    for (Entry<K, V> e : counts.entrySet())
        maxHeap.add(e);
    Map<K, V> result = Maps.newHashMapWithExpectedSize(k);
    for (Entry<K, V> e : maxHeap)
        result.put(e.getKey(), e.getValue());
    return result;
}

From source file:com.github.gdfm.shobaidogu.StatsUtils.java

/**
 * Compute top-k elements with largest values in a map from string to numbers (e.g., term frequency counts).
 * //from  www.  j a v a 2s .c o m
 * @param counts
 *          the map.
 * @param k
 *          how many elements to keep.
 * @return a map with top-k elements.
 */
public static <K, V extends Number> Map<K, V> topK(Map<K, V> counts, int k) {
    MinMaxPriorityQueue<Entry<K, V>> maxHeap = MinMaxPriorityQueue
            .<Entry<K, V>>orderedBy(new Comparator<Entry<K, V>>() {
                @Override
                public int compare(Entry<K, V> o1, Entry<K, V> o2) {
                    return -1 * Double.compare(o1.getValue().doubleValue(), o2.getValue().doubleValue()); // reverse comparator
                }
            }).maximumSize(k).create();
    // keep top-k
    for (Entry<K, V> e : counts.entrySet())
        maxHeap.add(e);
    Map<K, V> result = Maps.newHashMapWithExpectedSize(k);
    for (Entry<K, V> e : maxHeap)
        result.put(e.getKey(), e.getValue());
    return result;
}

From source file:edu.brandeis.wisedb.scheduler.BestNFirstGraphSearch.java

@Override
public List<Action> schedule(Set<ModelQuery> toSched) {

    FullGraphState first = new FullGraphState(new TreeSet<ModelVM>(), toSched, sla, qtp);
    MinMaxPriorityQueue<StateCost> frontier = MinMaxPriorityQueue.create();
    frontier.add(new StateCost(first, 0, null, null));

    while (!frontier.isEmpty()) {
        log.fine("Frontier size: " + frontier.size());

        PriorityQueue<Action> pq = new PriorityQueue<Action>(new ActionComparator());
        StateCost next = frontier.poll();

        if (next.s.isGoalState()) {
            // we're done
            List<Action> toR = new LinkedList<Action>();
            StateCost last = next;//from w w w. j av a2  s  . c  o m
            while (last.action != null) {
                toR.add(0, last.action);
                last = last.prev;
            }
            log.fine("Reached goal state with following actions: " + toR);

            return toR;
        }

        for (Action a : next.s.getPossibleActions()) {
            int cost = 0;
            FullGraphState nextState = next.s.getNewStateForAction(a);

            cost += h.predictCostToEnd(nextState);
            //cost += nextState.getExecutionCost();

            a.computedCost = cost;
            log.finer("Added action " + a + " to the frontier");
            pq.add(a);
        }

        if (pq.isEmpty()) {
            log.severe("There was no selectable action for state: " + next);
            return null;
        }

        for (int i = 0; i < toTry; i++) {
            Action nextBest = pq.poll();
            if (nextBest == null) {
                log.fine("Unable to get " + (i + 1) + "th action for state " + next);
                break;
            }
            FullGraphState c = next.s.getNewStateForAction(nextBest);
            StateCost candidate = new StateCost(c, c.getExecutionCost(), nextBest, next);
            frontier.add(candidate);
        }

        while (frontier.size() > maxFrontierSize) {
            frontier.removeLast();
        }
    }

    return null;
}

From source file:io.druid.server.coordinator.DruidCluster.java

public void add(ServerHolder serverHolder) {
    ImmutableDruidServer server = serverHolder.getServer();
    MinMaxPriorityQueue<ServerHolder> tierServers = cluster.get(server.getTier());
    if (tierServers == null) {
        tierServers = MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create();
        cluster.put(server.getTier(), tierServers);
    }/*from  w  ww . j a  va 2 s  .  co m*/
    tierServers.add(serverHolder);
}

From source file:com.metamx.druid.master.DruidCluster.java

public void add(ServerHolder serverHolder) {
    DruidServer server = serverHolder.getServer();
    MinMaxPriorityQueue<ServerHolder> tierServers = cluster.get(server.getTier());
    if (tierServers == null) {
        tierServers = MinMaxPriorityQueue.orderedBy(Ordering.natural().reverse()).create();
        cluster.put(server.getTier(), tierServers);
    }/*from w w w  .  jav a 2 s.  c o m*/
    tierServers.add(serverHolder);
}

From source file:gobblin.util.binpacking.WorstFitDecreasingBinPacking.java

@Override
@OverridingMethodsMustInvokeSuper//from ww w  . ja v  a  2 s . co m
public List<WorkUnit> pack(List<WorkUnit> workUnitsIn, WorkUnitWeighter weighter) {

    if (this.maxWeightPerUnit <= 0) { // just return the input
        return workUnitsIn;
    }

    List<WorkUnit> workUnits = Lists.newArrayList(workUnitsIn);

    long smallUnitSize = 0; // total size of work units smaller than maxWeightPerUnit
    int largeUnits = 0; // number of work units larger than maxWeightPerUnit
    for (WorkUnit workUnit : workUnits) {
        long weight = weighter.weight(workUnit);
        if (weight <= this.maxWeightPerUnit) {
            smallUnitSize += weight;
        } else {
            largeUnits++;
        }
    }
    int estimateByWeight = largeUnits + (int) ((smallUnitSize - 1) / this.maxWeightPerUnit) + 1;
    int estimatedMultiWorkUnits = Math.min(estimateByWeight, workUnits.size());

    MinMaxPriorityQueue<MultiWorkUnit> pQueue = MinMaxPriorityQueue.orderedBy(new MultiWorkUnitComparator())
            .create();
    for (int i = 0; i < estimatedMultiWorkUnits; i++) {
        pQueue.add(MultiWorkUnit.createEmpty());
    }

    Collections.sort(workUnits, Collections.reverseOrder(new WeightComparator(weighter)));

    for (WorkUnit workUnit : workUnits) {
        MultiWorkUnit lightestMultiWorkUnit = pQueue.peek();
        long weight = Math.max(1, weighter.weight(workUnit));
        long multiWorkUnitWeight = getMultiWorkUnitWeight(lightestMultiWorkUnit);
        if (multiWorkUnitWeight == 0 || (weight + multiWorkUnitWeight <= this.maxWeightPerUnit
                && weight + multiWorkUnitWeight > multiWorkUnitWeight)) { // check for overflow
            // if it fits, add it to lightest work unit
            addToMultiWorkUnit(lightestMultiWorkUnit, workUnit, weight);
            pQueue.poll();
            pQueue.add(lightestMultiWorkUnit);
        } else {
            // if doesn't fit in lightest multi work unit, create a new work unit for it
            MultiWorkUnit newMultiWorkUnit = MultiWorkUnit.createEmpty();
            addToMultiWorkUnit(newMultiWorkUnit, workUnit, weight);
            pQueue.add(newMultiWorkUnit);
        }
    }

    return Lists.<WorkUnit>newArrayList(Iterables.filter(pQueue, new Predicate<MultiWorkUnit>() {
        @Override
        public boolean apply(@Nullable MultiWorkUnit input) {
            return getMultiWorkUnitWeight(input) > 0;
        }
    }));
}

From source file:org.apache.gobblin.util.binpacking.WorstFitDecreasingBinPacking.java

@Override
@OverridingMethodsMustInvokeSuper/* w  ww .j  av a 2  s. co  m*/
public List<WorkUnit> pack(List<WorkUnit> workUnitsIn, WorkUnitWeighter weighter) {

    if (this.maxWeightPerUnit <= 0) { // just return the input
        return workUnitsIn;
    }

    List<WorkUnit> workUnits = Lists.newArrayList(workUnitsIn);

    long smallUnitSize = 0; // total size of work units smaller than maxWeightPerUnit
    int largeUnits = 0; // number of work units larger than maxWeightPerUnit
    for (WorkUnit workUnit : workUnits) {
        long weight = weighter.weight(workUnit);
        if (weight <= this.maxWeightPerUnit) {
            smallUnitSize += weight;
        } else {
            largeUnits++;
        }
    }
    int estimateByWeight = largeUnits + (int) ((smallUnitSize - 1) / this.maxWeightPerUnit) + 1;
    int estimatedMultiWorkUnits = Math.min(estimateByWeight, workUnits.size());

    MinMaxPriorityQueue<MultiWorkUnit> pQueue = MinMaxPriorityQueue.orderedBy(new MultiWorkUnitComparator())
            .create();
    for (int i = 0; i < estimatedMultiWorkUnits; i++) {
        pQueue.add(MultiWorkUnit.createEmpty());
    }

    Collections.sort(workUnits, Collections.reverseOrder(new WeightComparator(weighter)));

    for (WorkUnit workUnit : workUnits) {
        MultiWorkUnit lightestMultiWorkUnit = pQueue.peek();
        long weight = Math.max(1, weighter.weight(workUnit));
        long multiWorkUnitWeight = getMultiWorkUnitWeight(lightestMultiWorkUnit);
        if (multiWorkUnitWeight == 0 || (weight + multiWorkUnitWeight <= this.maxWeightPerUnit
                && weight + multiWorkUnitWeight > multiWorkUnitWeight)) { // check for overflow
            // if it fits, add it to lightest work unit
            addToMultiWorkUnit(lightestMultiWorkUnit, workUnit, weight);
            pQueue.poll();
            pQueue.add(lightestMultiWorkUnit);
        } else {
            // if doesn't fit in lightest multi work unit, create a new work unit for it
            MultiWorkUnit newMultiWorkUnit = MultiWorkUnit.createEmpty();
            addToMultiWorkUnit(newMultiWorkUnit, workUnit, weight);
            pQueue.add(newMultiWorkUnit);
        }
    }

    return Lists.<WorkUnit>newArrayList(Iterables.filter(pQueue, new Predicate<MultiWorkUnit>() {
        @Override
        @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_PARAMETER_MUST_BE_NONNULL_BUT_MARKED_AS_NULLABLE", justification = "Allowing nullable values")
        public boolean apply(@Nullable MultiWorkUnit input) {
            return getMultiWorkUnitWeight(input) > 0;
        }
    }));
}