Example usage for com.google.common.collect MinMaxPriorityQueue add

List of usage examples for com.google.common.collect MinMaxPriorityQueue add

Introduction

In this page you can find the example usage for com.google.common.collect MinMaxPriorityQueue add.

Prototype

@Override
public boolean add(E element) 

Source Link

Document

Adds the given element to this queue.

Usage

From source file:com.davidbracewell.wordnet.WordNet.java

private ListMultimap<Synset, Synset> dijkstra_path(Synset source) {
    Counter<Synset> dist = Counters.newHashMapCounter();
    Map<Synset, Synset> previous = new HashMap<>();
    Set<Synset> visited = Sets.newHashSet(source);

    for (Synset other : getSynsets()) {
        if (!other.equals(source)) {
            dist.set(other, Integer.MAX_VALUE);
            previous.put(other, null);/* w  ww . j  av a  2  s. c  om*/
        }
    }

    MinMaxPriorityQueue<Pair<Synset, Double>> queue = MinMaxPriorityQueue
            .orderedBy(
                    Cast.<Comparator<? super Pair<Synset, Double>>>as(Sorting.mapEntryComparator(false, true)))
            .create();
    queue.add(Pair.of(source, 0d));

    while (!queue.isEmpty()) {
        Pair<Synset, Double> next = queue.remove();

        Synset synset = next.getFirst();
        visited.add(synset);

        Iterable<Synset> neighbors = Iterables.concat(synset.getRelatedSynsets(Relation.HYPERNYM),
                synset.getRelatedSynsets(Relation.HYPERNYM_INSTANCE),
                synset.getRelatedSynsets(Relation.HYPONYM),
                synset.getRelatedSynsets(Relation.HYPONYM_INSTANCE));

        for (Synset neighbor : neighbors) {
            double alt = dist.get(synset);
            if (alt != Integer.MAX_VALUE && (alt + 1) < dist.get(neighbor)) {
                dist.set(neighbor, alt + 1);
                previous.put(neighbor, synset);
            }
            if (!visited.contains(neighbor)) {
                queue.add(Pair.of(neighbor, alt));
            }
        }
    }

    ListMultimap<Synset, Synset> path = ArrayListMultimap.create();
    for (Synset other : getSynsets()) {
        if (other.equals(source) || dist.get(other) == Integer.MAX_VALUE)
            continue;

        Deque<Synset> stack = Lists.newLinkedList();
        Synset u = other;
        while (u != null && previous.containsKey(u)) {
            stack.push(u);
            u = previous.get(u);
        }
        while (!stack.isEmpty()) {
            Synset to = stack.pop();
            path.put(other, to);
        }
    }

    return path;
}

From source file:com.github.davidmoten.rx.operators.SortedOutputQueue.java

@Override
public Subscriber<? super T> call(final Subscriber<? super T> child) {
    final MinMaxPriorityQueue<T> q = MinMaxPriorityQueue.orderedBy(comparator).maximumSize(maximumSize)
            .create();/*from  w w  w .j a  v  a 2s.  c  o m*/
    return new Subscriber<T>(child) {

        @Override
        public void onCompleted() {
            T t;
            while ((t = q.removeFirst()) != null) {
                if (isUnsubscribed())
                    return;
                child.onNext(t);
            }
            if (isUnsubscribed())
                return;
            child.onCompleted();
        }

        @Override
        public void onError(Throwable t) {
            if (!isUnsubscribed())
                child.onError(t);
        }

        @Override
        public void onNext(T t) {
            if (!isUnsubscribed())
                q.add(t);
        }
    };
}

From source file:com.linkedin.pinot.core.query.aggregation.groupby.AggregationGroupByOperatorService.java

/**
 * Given a group by result, return a group by result trimmed to provided size.
 * Sorting ordering is determined based on aggregation function.
 *
 * @param aggregationFunction/*from   ww w .  j a v a 2s.co m*/
 * @param aggregationGroupByResult
 * @param trimSize
 * @return
 */
private Map<String, Serializable> trimToSize(AggregationFunction aggregationFunction,
        Map<String, Serializable> aggregationGroupByResult, int trimSize) {

    boolean reverseOrder = aggregationFunction.getFunctionName().startsWith(MIN_PREFIX);
    MinMaxPriorityQueue<ImmutablePair<Serializable, String>> minMaxPriorityQueue = getMinMaxPriorityQueue(
            aggregationGroupByResult.values().iterator().next(), trimSize, reverseOrder);

    if (minMaxPriorityQueue == null) {
        return aggregationGroupByResult;
    }

    // The MinMaxPriorityQueue will add only the TOP N elements.
    for (String groupedKey : aggregationGroupByResult.keySet()) {
        minMaxPriorityQueue.add(new ImmutablePair(aggregationGroupByResult.get(groupedKey), groupedKey));
    }

    Map<String, Serializable> trimmedResult = new HashMap<>();
    ImmutablePair<Serializable, String> pair;
    while ((pair = (ImmutablePair) minMaxPriorityQueue.pollFirst()) != null) {
        trimmedResult.put(pair.getRight(), pair.getLeft());
    }
    return trimmedResult;
}

From source file:com.linkedin.pinot.core.query.aggregation.groupby.AggregationGroupByOperatorService.java

public List<JSONObject> renderGroupByOperators(List<Map<String, Serializable>> finalAggregationResult) {
    try {/*from   w  ww .  j  a  va  2  s . com*/
        if (finalAggregationResult == null
                || finalAggregationResult.size() != _aggregationFunctionList.size()) {
            return null;
        }
        List<JSONObject> retJsonResultList = new ArrayList<JSONObject>();
        for (int i = 0; i < _aggregationFunctionList.size(); ++i) {
            JSONArray groupByResultsArray = new JSONArray();

            int groupSize = _groupByColumns.size();
            Map<String, Serializable> reducedGroupByResult = finalAggregationResult.get(i);
            AggregationFunction aggregationFunction = _aggregationFunctionList.get(i);

            if (!reducedGroupByResult.isEmpty()) {
                boolean reverseOrder = aggregationFunction.getFunctionName().startsWith(MIN_PREFIX);

                MinMaxPriorityQueue<ImmutablePair<Serializable, String>> minMaxPriorityQueue = getMinMaxPriorityQueue(
                        reducedGroupByResult.values().iterator().next(), _groupByTopN, reverseOrder);

                if (minMaxPriorityQueue != null) {
                    // The MinMaxPriorityQueue will only add TOP N
                    for (String groupedKey : reducedGroupByResult.keySet()) {
                        minMaxPriorityQueue
                                .add(new ImmutablePair(reducedGroupByResult.get(groupedKey), groupedKey));
                    }

                    ImmutablePair res;
                    while ((res = (ImmutablePair) minMaxPriorityQueue.pollFirst()) != null) {
                        JSONObject groupByResultObject = new JSONObject();
                        groupByResultObject.put("group",
                                new JSONArray(((String) res.getRight()).split(
                                        GroupByConstants.GroupByDelimiter.groupByMultiDelimeter.toString(),
                                        groupSize)));
                        //          if (res.getFirst() instanceof Number) {
                        //            groupByResultObject.put("value", df.format(res.getFirst()));
                        //          } else {
                        //            groupByResultObject.put("value", res.getFirst());
                        //          }
                        //          groupByResultsArray.put(realGroupSize - 1 - j, groupByResultObject);
                        groupByResultObject.put("value",
                                aggregationFunction.render((Serializable) res.getLeft()).get("value"));
                        groupByResultsArray.put(groupByResultObject);
                    }
                }
            }

            JSONObject result = new JSONObject();
            result.put("function", aggregationFunction.getFunctionName());
            result.put("groupByResult", groupByResultsArray);
            result.put("groupByColumns", new JSONArray(_groupByColumns));
            retJsonResultList.add(result);
        }
        return retJsonResultList;
    } catch (JSONException e) {
        LOGGER.error("Caught exception while processing group by aggregation", e);
        Utils.rethrowException(e);
        throw new AssertionError("Should not reach this");
    }
}

From source file:main.okapi.graphs.maxbmatching.MaxBMatching.java

private void sendUpdates(Vertex<LongWritable, IntWritable, MBMEdgeValue> vertex) {
    final MBMMessage proposeMsg = new MBMMessage(vertex.getId(), State.PROPOSED);

    // get top-capacity available edges by weight
    final int capacity = vertex.getValue().get();
    MinMaxPriorityQueue<Entry<LongWritable, MBMEdgeValue>> maxHeap = MinMaxPriorityQueue
            .orderedBy(new Comparator<Entry<LongWritable, MBMEdgeValue>>() {
                @Override//from w  w  w  . j  a  va  2  s.  c o  m
                public int compare(Entry<LongWritable, MBMEdgeValue> o1, Entry<LongWritable, MBMEdgeValue> o2) {
                    return -1 * Double.compare(o1.getValue().getWeight(), o2.getValue().getWeight()); // reverse comparator, largest weight first
                }
            }).maximumSize(capacity).create();
    // prepare list of available edges
    for (Edge<LongWritable, MBMEdgeValue> e : vertex.getEdges()) {
        if (e.getValue().getState() == State.DEFAULT || e.getValue().getState() == State.PROPOSED) {
            maxHeap.add(Maps.immutableEntry(e.getTargetVertexId(), e.getValue()));
        }
    }

    if (maxHeap.isEmpty()) {
        // all remaining edges are INCLUDED, nothing else to do
        checkSolution(vertex.getEdges());
        vertex.voteToHalt();
    } else {
        // propose up to capacity
        while (!maxHeap.isEmpty()) {
            Entry<LongWritable, MBMEdgeValue> entry = maxHeap.removeFirst();
            vertex.getEdgeValue(entry.getKey()).setState(State.PROPOSED);
            sendMessage(entry.getKey(), proposeMsg);
        }
    }
}

From source file:de.tudarmstadt.ukp.dkpro.tc.features.ngram.base.LuceneFeatureExtractorBase.java

@Override
protected FrequencyDistribution<String> getTopNgrams() throws ResourceInitializationException {

    FrequencyDistribution<String> topNGrams = new FrequencyDistribution<String>();

    MinMaxPriorityQueue<TermFreqTuple> topN = MinMaxPriorityQueue.maximumSize(getTopN()).create();

    long ngramVocabularySize = 0;
    IndexReader reader;/* w  ww  .j  a  v  a2 s.  c  o  m*/
    try {
        reader = DirectoryReader.open(FSDirectory.open(luceneDir));
        Fields fields = MultiFields.getFields(reader);
        if (fields != null) {
            Terms terms = fields.terms(getFieldName());
            if (terms != null) {
                TermsEnum termsEnum = terms.iterator(null);
                BytesRef text = null;
                while ((text = termsEnum.next()) != null) {
                    String term = text.utf8ToString();
                    long freq = termsEnum.totalTermFreq();
                    if (passesScreening(term)) {
                        topN.add(new TermFreqTuple(term, freq));
                        ngramVocabularySize += freq;
                    }
                }
            }
        }
    } catch (Exception e) {
        throw new ResourceInitializationException(e);
    }

    int size = topN.size();
    for (int i = 0; i < size; i++) {
        TermFreqTuple tuple = topN.poll();
        long absCount = tuple.getFreq();
        double relFrequency = ((double) absCount) / ngramVocabularySize;

        if (relFrequency >= ngramFreqThreshold)
            topNGrams.addSample(tuple.getTerm(), tuple.getFreq());
    }

    getLogger().log(Level.INFO, "+++ SELECTING THE " + topNGrams.getB() + " MOST FREQUENT NGRAMS");

    return topNGrams;
}

From source file:org.dkpro.tc.features.ngram.base.LuceneFeatureExtractorBase.java

@Override
protected FrequencyDistribution<String> getTopNgrams() throws ResourceInitializationException {

    FrequencyDistribution<String> topNGrams = new FrequencyDistribution<String>();

    MinMaxPriorityQueue<TermFreqTuple> topN = MinMaxPriorityQueue.maximumSize(getTopN()).create();

    long ngramVocabularySize = 0;
    IndexReader reader;//from  w  w w  . java 2  s. co m
    try {
        reader = DirectoryReader.open(FSDirectory.open(luceneDir));
        Fields fields = MultiFields.getFields(reader);
        if (fields != null) {
            Terms terms = fields.terms(getFieldName());
            if (terms != null) {
                TermsEnum termsEnum = terms.iterator(null);
                BytesRef text = null;
                while ((text = termsEnum.next()) != null) {
                    String term = text.utf8ToString();
                    long freq = termsEnum.totalTermFreq();
                    if (passesScreening(term)) {
                        topN.add(new TermFreqTuple(term, freq));
                        ngramVocabularySize += freq;
                    }
                }
            }
        }
    } catch (Exception e) {
        throw new ResourceInitializationException(e);
    }

    int size = topN.size();
    for (int i = 0; i < size; i++) {
        TermFreqTuple tuple = topN.poll();
        long absCount = tuple.getFreq();
        double relFrequency = ((double) absCount) / ngramVocabularySize;

        if (relFrequency >= ngramFreqThreshold) {
            topNGrams.addSample(tuple.getTerm(), tuple.getFreq());
        }
    }

    logSelectionProcess(topNGrams.getB());

    return topNGrams;
}

From source file:org.dkpro.tc.features.pair.core.ngram.LuceneNGramPFE.java

private FrequencyDistribution<String> getTopNgrams(int topNgramThreshold, String fieldName)
        throws ResourceInitializationException {

    FrequencyDistribution<String> topNGrams = new FrequencyDistribution<String>();

    MinMaxPriorityQueue<TermFreqTuple> topN = MinMaxPriorityQueue.maximumSize(topNgramThreshold).create();
    IndexReader reader;//from w w w  .ja v  a 2 s  . c o  m
    try {
        reader = DirectoryReader.open(FSDirectory.open(luceneDir));
        Fields fields = MultiFields.getFields(reader);
        if (fields != null) {
            Terms terms = fields.terms(fieldName);
            if (terms != null) {
                TermsEnum termsEnum = terms.iterator(null);
                BytesRef text = null;
                while ((text = termsEnum.next()) != null) {
                    String term = text.utf8ToString();
                    long freq = termsEnum.totalTermFreq();
                    topN.add(new TermFreqTuple(term, freq));
                }
            }
        }
    } catch (Exception e) {
        throw new ResourceInitializationException(e);
    }

    int size = topN.size();
    for (int i = 0; i < size; i++) {
        TermFreqTuple tuple = topN.poll();
        // System.out.println(tuple.getTerm() + " - " + tuple.getFreq());
        topNGrams.addSample(tuple.getTerm(), tuple.getFreq());
    }

    return topNGrams;
}

From source file:com.linkedin.pinot.tools.StarTreeIndexViewer.java

private int build(StarTreeIndexNodeInterf indexNode, StarTreeJsonNode json) {
    Iterator<? extends StarTreeIndexNodeInterf> childrenIterator = indexNode.getChildrenIterator();
    if (!childrenIterator.hasNext()) {
        return 0;
    }/* ww  w . ja v  a2  s.  c  om*/
    int childDimensionId = indexNode.getChildDimensionName();
    String childDimensionName = dimensionNameToIndexMap.inverse().get(childDimensionId);
    Dictionary dictionary = dictionaries.get(childDimensionName);
    int totalChildNodes = indexNode.getNumChildren();

    Comparator<Pair<String, Integer>> comparator = new Comparator<Pair<String, Integer>>() {

        @Override
        public int compare(Pair<String, Integer> o1, Pair<String, Integer> o2) {
            return -1 * Integer.compare(o1.getRight(), o2.getRight());
        }
    };
    MinMaxPriorityQueue<Pair<String, Integer>> queue = MinMaxPriorityQueue.orderedBy(comparator)
            .maximumSize(MAX_CHILDREN).create();
    StarTreeJsonNode allNode = null;

    while (childrenIterator.hasNext()) {
        StarTreeIndexNodeInterf childIndexNode = childrenIterator.next();
        int childDimensionValueId = childIndexNode.getDimensionValue();
        String childDimensionValue = "ALL";
        if (childDimensionValueId != StarTreeIndexNodeInterf.ALL) {
            childDimensionValue = dictionary.get(childDimensionValueId).toString();
        }
        StarTreeJsonNode childJson = new StarTreeJsonNode(childDimensionValue);
        totalChildNodes += build(childIndexNode, childJson);
        if (childDimensionValueId != StarTreeIndexNodeInterf.ALL) {
            json.addChild(childJson);
            queue.add(ImmutablePair.of(childDimensionValue, totalChildNodes));
        } else {
            allNode = childJson;
        }
    }
    //put ALL node at the end
    if (allNode != null) {
        json.addChild(allNode);
    }
    if (totalChildNodes > MAX_CHILDREN) {
        Iterator<Pair<String, Integer>> qIterator = queue.iterator();
        Set<String> topKDimensions = new HashSet<>();
        topKDimensions.add("ALL");
        while (qIterator.hasNext()) {
            topKDimensions.add(qIterator.next().getKey());
        }
        Iterator<StarTreeJsonNode> iterator = json.getChildren().iterator();
        while (iterator.hasNext()) {
            StarTreeJsonNode next = iterator.next();
            if (!topKDimensions.contains(next.getName())) {
                iterator.remove();
            }
        }
    }
    return totalChildNodes;
}

From source file:co.cask.cdap.common.zookeeper.coordination.BalancedAssignmentStrategy.java

/**
 * Balance the assignment by spreading it across all handlers evenly.
 *
 * @param handlerQueue The priority queue for tracking number of resources assigned to a given handler.
 * @param assigner The assigner for changing the assignment.
 * @param maxDiff The maximum differences between the handlers that has the most resources assigned vs the one with
 *                the least resources assigned.
 *//*from  w ww  .j  a v a2 s .com*/
private <T> void balance(MinMaxPriorityQueue<HandlerSize<T>> handlerQueue, ResourceAssigner<T> assigner,
        int maxDiff) {
    HandlerSize<T> minHandler = handlerQueue.peekFirst();
    HandlerSize<T> maxHandler = handlerQueue.peekLast();

    // Move assignment from the handler that has the most assigned partition replica to the least one, until the
    // differences is within the desired range.
    Multimap<T, PartitionReplica> assignments = assigner.get();
    while (maxHandler.getSize() - minHandler.getSize() > maxDiff) {
        PartitionReplica partitionReplica = assignments.get(maxHandler.getHandler()).iterator().next();

        // Remove min and max from the queue, and perform the reassignment.
        handlerQueue.removeFirst();
        handlerQueue.removeLast();

        assigner.set(minHandler.getHandler(), partitionReplica);

        // After assignment, the corresponding size should get updated, hence put it back to the queue for next iteration.
        handlerQueue.add(minHandler);
        handlerQueue.add(maxHandler);

        minHandler = handlerQueue.peekFirst();
        maxHandler = handlerQueue.peekLast();
    }
}