Example usage for com.google.common.collect MinMaxPriorityQueue poll

List of usage examples for com.google.common.collect MinMaxPriorityQueue poll

Introduction

In this page you can find the example usage for com.google.common.collect MinMaxPriorityQueue poll.

Prototype

@Override
    public E poll() 

Source Link

Usage

From source file:edu.brandeis.wisedb.scheduler.BestNFirstGraphSearch.java

@Override
public List<Action> schedule(Set<ModelQuery> toSched) {

    FullGraphState first = new FullGraphState(new TreeSet<ModelVM>(), toSched, sla, qtp);
    MinMaxPriorityQueue<StateCost> frontier = MinMaxPriorityQueue.create();
    frontier.add(new StateCost(first, 0, null, null));

    while (!frontier.isEmpty()) {
        log.fine("Frontier size: " + frontier.size());

        PriorityQueue<Action> pq = new PriorityQueue<Action>(new ActionComparator());
        StateCost next = frontier.poll();

        if (next.s.isGoalState()) {
            // we're done
            List<Action> toR = new LinkedList<Action>();
            StateCost last = next;/*from   w  ww. j a  va 2 s.  c  om*/
            while (last.action != null) {
                toR.add(0, last.action);
                last = last.prev;
            }
            log.fine("Reached goal state with following actions: " + toR);

            return toR;
        }

        for (Action a : next.s.getPossibleActions()) {
            int cost = 0;
            FullGraphState nextState = next.s.getNewStateForAction(a);

            cost += h.predictCostToEnd(nextState);
            //cost += nextState.getExecutionCost();

            a.computedCost = cost;
            log.finer("Added action " + a + " to the frontier");
            pq.add(a);
        }

        if (pq.isEmpty()) {
            log.severe("There was no selectable action for state: " + next);
            return null;
        }

        for (int i = 0; i < toTry; i++) {
            Action nextBest = pq.poll();
            if (nextBest == null) {
                log.fine("Unable to get " + (i + 1) + "th action for state " + next);
                break;
            }
            FullGraphState c = next.s.getNewStateForAction(nextBest);
            StateCost candidate = new StateCost(c, c.getExecutionCost(), nextBest, next);
            frontier.add(candidate);
        }

        while (frontier.size() > maxFrontierSize) {
            frontier.removeLast();
        }
    }

    return null;
}

From source file:de.tudarmstadt.ukp.dkpro.core.api.frequency.util.FrequencyDistribution.java

/**
 * Returns the n most frequent samples in the distribution. The ordering within in a group of
 * samples with the same frequency is undefined.
 * //from  ww w. j  a v  a 2 s.  co m
 * @param n
 *            the numer of most frequent samples to return.
 * @return the n most frequent samples in the distribution.
 */
public List<T> getMostFrequentSamples(int n) {

    MinMaxPriorityQueue<TermFreqTuple<T>> topN = MinMaxPriorityQueue.maximumSize(n).create();

    for (T key : this.getKeys()) {
        topN.add(new TermFreqTuple<T>(key, this.getCount(key)));
    }

    List<T> topNList = new ArrayList<T>();
    while (!topN.isEmpty()) {
        topNList.add(topN.poll().getKey());
    }

    return topNList;
}

From source file:gobblin.source.extractor.extract.kafka.workunit.packer.KafkaWorkUnitPacker.java

/**
 * Pack a list of {@link WorkUnit}s into a smaller number of {@link MultiWorkUnit}s,
 * using the worst-fit-decreasing algorithm.
 *
 * Each {@link WorkUnit} is assigned to the {@link MultiWorkUnit} with the smallest load.
 *///  w  w  w  .  j a  v  a2s.com
protected List<WorkUnit> worstFitDecreasingBinPacking(List<WorkUnit> groups, int numOfMultiWorkUnits) {

    // Sort workunit groups by data size desc
    Collections.sort(groups, LOAD_DESC_COMPARATOR);

    MinMaxPriorityQueue<MultiWorkUnit> pQueue = MinMaxPriorityQueue.orderedBy(LOAD_ASC_COMPARATOR)
            .expectedSize(numOfMultiWorkUnits).create();
    for (int i = 0; i < numOfMultiWorkUnits; i++) {
        MultiWorkUnit multiWorkUnit = MultiWorkUnit.createEmpty();
        setWorkUnitEstSize(multiWorkUnit, 0);
        pQueue.add(multiWorkUnit);
    }

    for (WorkUnit group : groups) {
        MultiWorkUnit lightestMultiWorkUnit = pQueue.poll();
        addWorkUnitToMultiWorkUnit(group, lightestMultiWorkUnit);
        pQueue.add(lightestMultiWorkUnit);
    }

    logMultiWorkUnitInfo(pQueue);

    double minLoad = getWorkUnitEstLoad(pQueue.peekFirst());
    double maxLoad = getWorkUnitEstLoad(pQueue.peekLast());
    LOG.info(String.format("Min load of multiWorkUnit = %f; Max load of multiWorkUnit = %f; Diff = %f%%",
            minLoad, maxLoad, (maxLoad - minLoad) / maxLoad * 100.0));

    this.state.setProp(MIN_MULTIWORKUNIT_LOAD, minLoad);
    this.state.setProp(MAX_MULTIWORKUNIT_LOAD, maxLoad);

    List<WorkUnit> multiWorkUnits = Lists.newArrayList();
    multiWorkUnits.addAll(pQueue);
    return multiWorkUnits;
}

From source file:org.apache.druid.query.groupby.orderby.TopNSequence.java

public TopNSequence(final Sequence<T> input, final Ordering<T> ordering, final int limit) {
    super(new IteratorMaker<T, Iterator<T>>() {
        @Override/* ww  w.  jav a  2 s  .  c  o m*/
        public Iterator<T> make() {
            if (limit <= 0) {
                return Collections.emptyIterator();
            }

            // Materialize the topN values
            final MinMaxPriorityQueue<T> queue = MinMaxPriorityQueue.orderedBy(ordering).maximumSize(limit)
                    .create();

            input.accumulate(queue, new Accumulator<MinMaxPriorityQueue<T>, T>() {
                @Override
                public MinMaxPriorityQueue<T> accumulate(MinMaxPriorityQueue<T> theQueue, T row) {
                    theQueue.offer(row);
                    return theQueue;
                }
            });

            // Now return them when asked
            return new Iterator<T>() {
                @Override
                public boolean hasNext() {
                    return !queue.isEmpty();
                }

                @Override
                public T next() {
                    return queue.poll();
                }

                @Override
                public void remove() {
                    throw new UnsupportedOperationException();
                }
            };
        }

        @Override
        public void cleanup(Iterator<T> rowIterator) {
            // Nothing to do
        }
    });
}

From source file:io.druid.query.groupby.orderby.TopNSequence.java

public TopNSequence(final Sequence<T> input, final Ordering<T> ordering, final int limit) {
    super(new IteratorMaker<T, Iterator<T>>() {
        @Override/* w  ww.  j a  v  a2  s.c o m*/
        public Iterator<T> make() {
            if (limit <= 0) {
                return Iterators.emptyIterator();
            }

            // Materialize the topN values
            final MinMaxPriorityQueue<T> queue = MinMaxPriorityQueue.orderedBy(ordering).maximumSize(limit)
                    .create();

            input.accumulate(queue, new Accumulator<MinMaxPriorityQueue<T>, T>() {
                @Override
                public MinMaxPriorityQueue<T> accumulate(MinMaxPriorityQueue<T> theQueue, T row) {
                    theQueue.offer(row);
                    return theQueue;
                }
            });

            // Now return them when asked
            return new Iterator<T>() {
                @Override
                public boolean hasNext() {
                    return !queue.isEmpty();
                }

                @Override
                public T next() {
                    return queue.poll();
                }

                @Override
                public void remove() {
                    throw new UnsupportedOperationException();
                }
            };
        }

        @Override
        public void cleanup(Iterator<T> rowIterator) {
            // Nothing to do
        }
    });
}

From source file:gobblin.util.binpacking.WorstFitDecreasingBinPacking.java

@Override
@OverridingMethodsMustInvokeSuper//from ww w.j a  v a 2s  . c  om
public List<WorkUnit> pack(List<WorkUnit> workUnitsIn, WorkUnitWeighter weighter) {

    if (this.maxWeightPerUnit <= 0) { // just return the input
        return workUnitsIn;
    }

    List<WorkUnit> workUnits = Lists.newArrayList(workUnitsIn);

    long smallUnitSize = 0; // total size of work units smaller than maxWeightPerUnit
    int largeUnits = 0; // number of work units larger than maxWeightPerUnit
    for (WorkUnit workUnit : workUnits) {
        long weight = weighter.weight(workUnit);
        if (weight <= this.maxWeightPerUnit) {
            smallUnitSize += weight;
        } else {
            largeUnits++;
        }
    }
    int estimateByWeight = largeUnits + (int) ((smallUnitSize - 1) / this.maxWeightPerUnit) + 1;
    int estimatedMultiWorkUnits = Math.min(estimateByWeight, workUnits.size());

    MinMaxPriorityQueue<MultiWorkUnit> pQueue = MinMaxPriorityQueue.orderedBy(new MultiWorkUnitComparator())
            .create();
    for (int i = 0; i < estimatedMultiWorkUnits; i++) {
        pQueue.add(MultiWorkUnit.createEmpty());
    }

    Collections.sort(workUnits, Collections.reverseOrder(new WeightComparator(weighter)));

    for (WorkUnit workUnit : workUnits) {
        MultiWorkUnit lightestMultiWorkUnit = pQueue.peek();
        long weight = Math.max(1, weighter.weight(workUnit));
        long multiWorkUnitWeight = getMultiWorkUnitWeight(lightestMultiWorkUnit);
        if (multiWorkUnitWeight == 0 || (weight + multiWorkUnitWeight <= this.maxWeightPerUnit
                && weight + multiWorkUnitWeight > multiWorkUnitWeight)) { // check for overflow
            // if it fits, add it to lightest work unit
            addToMultiWorkUnit(lightestMultiWorkUnit, workUnit, weight);
            pQueue.poll();
            pQueue.add(lightestMultiWorkUnit);
        } else {
            // if doesn't fit in lightest multi work unit, create a new work unit for it
            MultiWorkUnit newMultiWorkUnit = MultiWorkUnit.createEmpty();
            addToMultiWorkUnit(newMultiWorkUnit, workUnit, weight);
            pQueue.add(newMultiWorkUnit);
        }
    }

    return Lists.<WorkUnit>newArrayList(Iterables.filter(pQueue, new Predicate<MultiWorkUnit>() {
        @Override
        public boolean apply(@Nullable MultiWorkUnit input) {
            return getMultiWorkUnitWeight(input) > 0;
        }
    }));
}

From source file:org.apache.gobblin.util.binpacking.WorstFitDecreasingBinPacking.java

@Override
@OverridingMethodsMustInvokeSuper//from   w  ww . j a  v  a2s  . co m
public List<WorkUnit> pack(List<WorkUnit> workUnitsIn, WorkUnitWeighter weighter) {

    if (this.maxWeightPerUnit <= 0) { // just return the input
        return workUnitsIn;
    }

    List<WorkUnit> workUnits = Lists.newArrayList(workUnitsIn);

    long smallUnitSize = 0; // total size of work units smaller than maxWeightPerUnit
    int largeUnits = 0; // number of work units larger than maxWeightPerUnit
    for (WorkUnit workUnit : workUnits) {
        long weight = weighter.weight(workUnit);
        if (weight <= this.maxWeightPerUnit) {
            smallUnitSize += weight;
        } else {
            largeUnits++;
        }
    }
    int estimateByWeight = largeUnits + (int) ((smallUnitSize - 1) / this.maxWeightPerUnit) + 1;
    int estimatedMultiWorkUnits = Math.min(estimateByWeight, workUnits.size());

    MinMaxPriorityQueue<MultiWorkUnit> pQueue = MinMaxPriorityQueue.orderedBy(new MultiWorkUnitComparator())
            .create();
    for (int i = 0; i < estimatedMultiWorkUnits; i++) {
        pQueue.add(MultiWorkUnit.createEmpty());
    }

    Collections.sort(workUnits, Collections.reverseOrder(new WeightComparator(weighter)));

    for (WorkUnit workUnit : workUnits) {
        MultiWorkUnit lightestMultiWorkUnit = pQueue.peek();
        long weight = Math.max(1, weighter.weight(workUnit));
        long multiWorkUnitWeight = getMultiWorkUnitWeight(lightestMultiWorkUnit);
        if (multiWorkUnitWeight == 0 || (weight + multiWorkUnitWeight <= this.maxWeightPerUnit
                && weight + multiWorkUnitWeight > multiWorkUnitWeight)) { // check for overflow
            // if it fits, add it to lightest work unit
            addToMultiWorkUnit(lightestMultiWorkUnit, workUnit, weight);
            pQueue.poll();
            pQueue.add(lightestMultiWorkUnit);
        } else {
            // if doesn't fit in lightest multi work unit, create a new work unit for it
            MultiWorkUnit newMultiWorkUnit = MultiWorkUnit.createEmpty();
            addToMultiWorkUnit(newMultiWorkUnit, workUnit, weight);
            pQueue.add(newMultiWorkUnit);
        }
    }

    return Lists.<WorkUnit>newArrayList(Iterables.filter(pQueue, new Predicate<MultiWorkUnit>() {
        @Override
        @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_PARAMETER_MUST_BE_NONNULL_BUT_MARKED_AS_NULLABLE", justification = "Allowing nullable values")
        public boolean apply(@Nullable MultiWorkUnit input) {
            return getMultiWorkUnitWeight(input) > 0;
        }
    }));
}

From source file:de.tudarmstadt.ukp.dkpro.tc.features.ngram.base.LuceneFeatureExtractorBase.java

@Override
protected FrequencyDistribution<String> getTopNgrams() throws ResourceInitializationException {

    FrequencyDistribution<String> topNGrams = new FrequencyDistribution<String>();

    MinMaxPriorityQueue<TermFreqTuple> topN = MinMaxPriorityQueue.maximumSize(getTopN()).create();

    long ngramVocabularySize = 0;
    IndexReader reader;/*  ww  w  .  j  av a2s . c om*/
    try {
        reader = DirectoryReader.open(FSDirectory.open(luceneDir));
        Fields fields = MultiFields.getFields(reader);
        if (fields != null) {
            Terms terms = fields.terms(getFieldName());
            if (terms != null) {
                TermsEnum termsEnum = terms.iterator(null);
                BytesRef text = null;
                while ((text = termsEnum.next()) != null) {
                    String term = text.utf8ToString();
                    long freq = termsEnum.totalTermFreq();
                    if (passesScreening(term)) {
                        topN.add(new TermFreqTuple(term, freq));
                        ngramVocabularySize += freq;
                    }
                }
            }
        }
    } catch (Exception e) {
        throw new ResourceInitializationException(e);
    }

    int size = topN.size();
    for (int i = 0; i < size; i++) {
        TermFreqTuple tuple = topN.poll();
        long absCount = tuple.getFreq();
        double relFrequency = ((double) absCount) / ngramVocabularySize;

        if (relFrequency >= ngramFreqThreshold)
            topNGrams.addSample(tuple.getTerm(), tuple.getFreq());
    }

    getLogger().log(Level.INFO, "+++ SELECTING THE " + topNGrams.getB() + " MOST FREQUENT NGRAMS");

    return topNGrams;
}

From source file:org.dkpro.tc.features.ngram.base.LuceneFeatureExtractorBase.java

@Override
protected FrequencyDistribution<String> getTopNgrams() throws ResourceInitializationException {

    FrequencyDistribution<String> topNGrams = new FrequencyDistribution<String>();

    MinMaxPriorityQueue<TermFreqTuple> topN = MinMaxPriorityQueue.maximumSize(getTopN()).create();

    long ngramVocabularySize = 0;
    IndexReader reader;//from  w  ww  .j a  v  a  2  s  .  co  m
    try {
        reader = DirectoryReader.open(FSDirectory.open(luceneDir));
        Fields fields = MultiFields.getFields(reader);
        if (fields != null) {
            Terms terms = fields.terms(getFieldName());
            if (terms != null) {
                TermsEnum termsEnum = terms.iterator(null);
                BytesRef text = null;
                while ((text = termsEnum.next()) != null) {
                    String term = text.utf8ToString();
                    long freq = termsEnum.totalTermFreq();
                    if (passesScreening(term)) {
                        topN.add(new TermFreqTuple(term, freq));
                        ngramVocabularySize += freq;
                    }
                }
            }
        }
    } catch (Exception e) {
        throw new ResourceInitializationException(e);
    }

    int size = topN.size();
    for (int i = 0; i < size; i++) {
        TermFreqTuple tuple = topN.poll();
        long absCount = tuple.getFreq();
        double relFrequency = ((double) absCount) / ngramVocabularySize;

        if (relFrequency >= ngramFreqThreshold) {
            topNGrams.addSample(tuple.getTerm(), tuple.getFreq());
        }
    }

    logSelectionProcess(topNGrams.getB());

    return topNGrams;
}

From source file:org.dkpro.tc.features.pair.core.ngram.LuceneNGramPFE.java

private FrequencyDistribution<String> getTopNgrams(int topNgramThreshold, String fieldName)
        throws ResourceInitializationException {

    FrequencyDistribution<String> topNGrams = new FrequencyDistribution<String>();

    MinMaxPriorityQueue<TermFreqTuple> topN = MinMaxPriorityQueue.maximumSize(topNgramThreshold).create();
    IndexReader reader;// w  w w  .j  a  v  a  2s . com
    try {
        reader = DirectoryReader.open(FSDirectory.open(luceneDir));
        Fields fields = MultiFields.getFields(reader);
        if (fields != null) {
            Terms terms = fields.terms(fieldName);
            if (terms != null) {
                TermsEnum termsEnum = terms.iterator(null);
                BytesRef text = null;
                while ((text = termsEnum.next()) != null) {
                    String term = text.utf8ToString();
                    long freq = termsEnum.totalTermFreq();
                    topN.add(new TermFreqTuple(term, freq));
                }
            }
        }
    } catch (Exception e) {
        throw new ResourceInitializationException(e);
    }

    int size = topN.size();
    for (int i = 0; i < size; i++) {
        TermFreqTuple tuple = topN.poll();
        // System.out.println(tuple.getTerm() + " - " + tuple.getFreq());
        topNGrams.addSample(tuple.getTerm(), tuple.getFreq());
    }

    return topNGrams;
}