Example usage for com.google.common.collect MinMaxPriorityQueue orderedBy

List of usage examples for com.google.common.collect MinMaxPriorityQueue orderedBy

Introduction

In this page you can find the example usage for com.google.common.collect MinMaxPriorityQueue orderedBy.

Prototype

public static <B> Builder<B> orderedBy(Comparator<B> comparator) 

Source Link

Document

Creates and returns a new builder, configured to build MinMaxPriorityQueue instances that use comparator to determine the least and greatest elements.

Usage

From source file:org.apache.druid.indexing.overlord.autoscaling.ScalingStats.java

public ScalingStats(int capacity) {
    if (capacity == 0) {
        this.recentEvents = MinMaxPriorityQueue.orderedBy(COMPARATOR).create();
    } else {/*from w  w  w  .  j  a v  a 2s . c  o m*/
        this.recentEvents = MinMaxPriorityQueue.orderedBy(COMPARATOR).maximumSize(capacity).create();
    }
}

From source file:io.druid.query.select.SelectResultValueBuilder.java

private void instantiatePQueue(int threshold, final Comparator comparator) {
    this.pQueue = MinMaxPriorityQueue.orderedBy(comparator).maximumSize(threshold).create();
}

From source file:OpenSearcherAspect.java

public OpenSearcherAspect(int nSlowLoadTimes) {
    NUM_SLOWEST_LOAD_TIMES = nSlowLoadTimes;
    loadTimes = MinMaxPriorityQueue.orderedBy(new Comparator<Long>() {

        @Override//  ww  w  . j a v a 2s  .  c o  m
        public int compare(Long o1, Long o2) {
            return -o1.compareTo(o2);
        }
    }).maximumSize(NUM_SLOWEST_LOAD_TIMES).create();
}

From source file:org.thingsboard.server.common.transport.quota.inmemory.IntervalRegistryLogger.java

protected Map<String, Long> getTopElements(Map<String, Long> countMap) {
    MinMaxPriorityQueue<Map.Entry<String, Long>> topQueue = MinMaxPriorityQueue.orderedBy(
            Comparator.comparing((Function<Map.Entry<String, Long>, Long>) Map.Entry::getValue).reversed())
            .maximumSize(topSize).create(countMap.entrySet());

    return topQueue.stream().collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}

From source file:main.okapi.graphs.maxbmatching.MaxBMatching.java

private void sendUpdates(Vertex<LongWritable, IntWritable, MBMEdgeValue> vertex) {
    final MBMMessage proposeMsg = new MBMMessage(vertex.getId(), State.PROPOSED);

    // get top-capacity available edges by weight
    final int capacity = vertex.getValue().get();
    MinMaxPriorityQueue<Entry<LongWritable, MBMEdgeValue>> maxHeap = MinMaxPriorityQueue
            .orderedBy(new Comparator<Entry<LongWritable, MBMEdgeValue>>() {
                @Override//w  w w  .j  ava2s  .  co  m
                public int compare(Entry<LongWritable, MBMEdgeValue> o1, Entry<LongWritable, MBMEdgeValue> o2) {
                    return -1 * Double.compare(o1.getValue().getWeight(), o2.getValue().getWeight()); // reverse comparator, largest weight first
                }
            }).maximumSize(capacity).create();
    // prepare list of available edges
    for (Edge<LongWritable, MBMEdgeValue> e : vertex.getEdges()) {
        if (e.getValue().getState() == State.DEFAULT || e.getValue().getState() == State.PROPOSED) {
            maxHeap.add(Maps.immutableEntry(e.getTargetVertexId(), e.getValue()));
        }
    }

    if (maxHeap.isEmpty()) {
        // all remaining edges are INCLUDED, nothing else to do
        checkSolution(vertex.getEdges());
        vertex.voteToHalt();
    } else {
        // propose up to capacity
        while (!maxHeap.isEmpty()) {
            Entry<LongWritable, MBMEdgeValue> entry = maxHeap.removeFirst();
            vertex.getEdgeValue(entry.getKey()).setState(State.PROPOSED);
            sendMessage(entry.getKey(), proposeMsg);
        }
    }
}

From source file:gobblin.util.binpacking.WorstFitDecreasingBinPacking.java

@Override
@OverridingMethodsMustInvokeSuper/*from  w  ww .  j  a  v  a2  s . com*/
public List<WorkUnit> pack(List<WorkUnit> workUnitsIn, WorkUnitWeighter weighter) {

    if (this.maxWeightPerUnit <= 0) { // just return the input
        return workUnitsIn;
    }

    List<WorkUnit> workUnits = Lists.newArrayList(workUnitsIn);

    long smallUnitSize = 0; // total size of work units smaller than maxWeightPerUnit
    int largeUnits = 0; // number of work units larger than maxWeightPerUnit
    for (WorkUnit workUnit : workUnits) {
        long weight = weighter.weight(workUnit);
        if (weight <= this.maxWeightPerUnit) {
            smallUnitSize += weight;
        } else {
            largeUnits++;
        }
    }
    int estimateByWeight = largeUnits + (int) ((smallUnitSize - 1) / this.maxWeightPerUnit) + 1;
    int estimatedMultiWorkUnits = Math.min(estimateByWeight, workUnits.size());

    MinMaxPriorityQueue<MultiWorkUnit> pQueue = MinMaxPriorityQueue.orderedBy(new MultiWorkUnitComparator())
            .create();
    for (int i = 0; i < estimatedMultiWorkUnits; i++) {
        pQueue.add(MultiWorkUnit.createEmpty());
    }

    Collections.sort(workUnits, Collections.reverseOrder(new WeightComparator(weighter)));

    for (WorkUnit workUnit : workUnits) {
        MultiWorkUnit lightestMultiWorkUnit = pQueue.peek();
        long weight = Math.max(1, weighter.weight(workUnit));
        long multiWorkUnitWeight = getMultiWorkUnitWeight(lightestMultiWorkUnit);
        if (multiWorkUnitWeight == 0 || (weight + multiWorkUnitWeight <= this.maxWeightPerUnit
                && weight + multiWorkUnitWeight > multiWorkUnitWeight)) { // check for overflow
            // if it fits, add it to lightest work unit
            addToMultiWorkUnit(lightestMultiWorkUnit, workUnit, weight);
            pQueue.poll();
            pQueue.add(lightestMultiWorkUnit);
        } else {
            // if doesn't fit in lightest multi work unit, create a new work unit for it
            MultiWorkUnit newMultiWorkUnit = MultiWorkUnit.createEmpty();
            addToMultiWorkUnit(newMultiWorkUnit, workUnit, weight);
            pQueue.add(newMultiWorkUnit);
        }
    }

    return Lists.<WorkUnit>newArrayList(Iterables.filter(pQueue, new Predicate<MultiWorkUnit>() {
        @Override
        public boolean apply(@Nullable MultiWorkUnit input) {
            return getMultiWorkUnitWeight(input) > 0;
        }
    }));
}

From source file:org.apache.gobblin.util.binpacking.WorstFitDecreasingBinPacking.java

@Override
@OverridingMethodsMustInvokeSuper/*from   ww w. j a  va 2 s . c o  m*/
public List<WorkUnit> pack(List<WorkUnit> workUnitsIn, WorkUnitWeighter weighter) {

    if (this.maxWeightPerUnit <= 0) { // just return the input
        return workUnitsIn;
    }

    List<WorkUnit> workUnits = Lists.newArrayList(workUnitsIn);

    long smallUnitSize = 0; // total size of work units smaller than maxWeightPerUnit
    int largeUnits = 0; // number of work units larger than maxWeightPerUnit
    for (WorkUnit workUnit : workUnits) {
        long weight = weighter.weight(workUnit);
        if (weight <= this.maxWeightPerUnit) {
            smallUnitSize += weight;
        } else {
            largeUnits++;
        }
    }
    int estimateByWeight = largeUnits + (int) ((smallUnitSize - 1) / this.maxWeightPerUnit) + 1;
    int estimatedMultiWorkUnits = Math.min(estimateByWeight, workUnits.size());

    MinMaxPriorityQueue<MultiWorkUnit> pQueue = MinMaxPriorityQueue.orderedBy(new MultiWorkUnitComparator())
            .create();
    for (int i = 0; i < estimatedMultiWorkUnits; i++) {
        pQueue.add(MultiWorkUnit.createEmpty());
    }

    Collections.sort(workUnits, Collections.reverseOrder(new WeightComparator(weighter)));

    for (WorkUnit workUnit : workUnits) {
        MultiWorkUnit lightestMultiWorkUnit = pQueue.peek();
        long weight = Math.max(1, weighter.weight(workUnit));
        long multiWorkUnitWeight = getMultiWorkUnitWeight(lightestMultiWorkUnit);
        if (multiWorkUnitWeight == 0 || (weight + multiWorkUnitWeight <= this.maxWeightPerUnit
                && weight + multiWorkUnitWeight > multiWorkUnitWeight)) { // check for overflow
            // if it fits, add it to lightest work unit
            addToMultiWorkUnit(lightestMultiWorkUnit, workUnit, weight);
            pQueue.poll();
            pQueue.add(lightestMultiWorkUnit);
        } else {
            // if doesn't fit in lightest multi work unit, create a new work unit for it
            MultiWorkUnit newMultiWorkUnit = MultiWorkUnit.createEmpty();
            addToMultiWorkUnit(newMultiWorkUnit, workUnit, weight);
            pQueue.add(newMultiWorkUnit);
        }
    }

    return Lists.<WorkUnit>newArrayList(Iterables.filter(pQueue, new Predicate<MultiWorkUnit>() {
        @Override
        @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_PARAMETER_MUST_BE_NONNULL_BUT_MARKED_AS_NULLABLE", justification = "Allowing nullable values")
        public boolean apply(@Nullable MultiWorkUnit input) {
            return getMultiWorkUnitWeight(input) > 0;
        }
    }));
}

From source file:com.streamsets.pipeline.stage.cloudstorage.origin.GoogleCloudStorageSource.java

@Override
protected List<ConfigIssue> init() {
    // Validate configuration values and open any required resources.
    List<ConfigIssue> issues = gcsOriginConfig.init(getContext(), super.init());
    minMaxPriorityQueue = MinMaxPriorityQueue.orderedBy((Blob o1, Blob o2) -> {
        int result = o1.getUpdateTime().compareTo(o2.getUpdateTime());
        if (result != 0) {
            return result;
        }//from w ww .ja v  a 2  s  . co  m
        //same modified time. Use generatedid (bucket/blob name/timestamp) to sort
        return o1.getGeneratedId().compareTo(o2.getGeneratedId());
    }).maximumSize(gcsOriginConfig.maxResultQueueSize).create();
    antPathMatcher = new AntPathMatcher();

    gcsOriginConfig.credentials.getCredentialsProvider(getContext(), issues)
            .ifPresent(p -> credentialsProvider = p);

    try {
        storage = StorageOptions.newBuilder().setCredentials(credentialsProvider.getCredentials()).build()
                .getService();
    } catch (IOException e) {
        LOG.error("Error when initializing storage. Reason : {}", e);
        issues.add(getContext().createConfigIssue(Groups.CREDENTIALS.name(),
                "gcsOriginConfig.credentials.credentialsProvider", Errors.GCS_01, e));
    }

    rateLimitElEval = FileRefUtil.createElEvalForRateLimit(getContext());
    rateLimitElVars = getContext().createELVars();
    errorBlobHandler = new GcsObjectPostProcessingHandler(storage, gcsOriginConfig.gcsOriginErrorConfig);
    return issues;
}

From source file:org.pentaho.di.trans.steps.superlative.SuperlativeFilter.java

@Override
public boolean processRow(StepMetaInterface smi, StepDataInterface sdi) throws KettleException {

    Object[] r = getRow(); // get row, set busy!

    if (first) {//from  w  w  w.j  a  va 2  s  . c  o m
        outputRowMeta = getInputRowMeta().clone();
        valueIndex = outputRowMeta.indexOfValue(meta.getValueFieldName());
        if (valueIndex < 0 && (topFilter || bottomFilter)) {
            throw new KettleException(BaseMessages.getString(PKG, "SuperlativeFilter.Error.NoValueField",
                    meta.getValueFieldName()));
        }
        valueFieldType = outputRowMeta.getValueMeta(valueIndex);
        FilterType metaFilterType = meta.getFilterType();
        topFilter = FilterType.Top.equals(metaFilterType);
        bottomFilter = FilterType.Bottom.equals(metaFilterType);
        firstFilter = FilterType.First.equals(metaFilterType);
        lastFilter = FilterType.Last.equals(metaFilterType);

        if (topFilter || bottomFilter) {
            rowComparator = new RowValueMetaComparator(valueFieldType, valueIndex, topFilter);
            rowQ = MinMaxPriorityQueue.orderedBy(rowComparator).expectedSize((int) meta.getNumRowsToSave())
                    .create();
        } else if (firstFilter || lastFilter) {
            rowQ = new LinkedList<Object[]>();
        } else
            throw new KettleException("Meta filter type = " + metaFilterType);

        first = false;
    }

    if (r == null) {
        // no more input to be expected...
        return putQueue(topFilter || bottomFilter);
    }

    // Determine whether to save row
    if ((rowQ.size() < meta.getNumRowsToSave()) || firstFilter || lastFilter) {
        rowQ.add(r);
    } else {
        Object[] head = rowQ.peek();

        if (rowComparator.compare(r, head) >= 0) {
            rowQ.add(r);
        }
    }

    // If we're returning the first N rows and we have N rows, we're done!
    if (firstFilter && (rowQ.size() == meta.getNumRowsToSave())) {
        return putQueue(false);
    }

    if (rowQ.size() > meta.getNumRowsToSave()) {
        rowQ.poll();
    }

    if (checkFeedback(getLinesRead())) {
        if (log.isBasic())
            logBasic(BaseMessages.getString(PKG, "SuperlativeFilter.Log.LineNumber") + getLinesRead());
    }

    return true;
}

From source file:com.datatorrent.lib.bucket.BucketManagerImpl.java

public BucketManagerImpl() {
    eventQueue = new LinkedBlockingQueue<Long>();
    evictionCandidates = Sets.newHashSet();
    dirtyBuckets = Maps.newConcurrentMap();
    bucketHeap = MinMaxPriorityQueue.orderedBy(new Comparator<Bucket<T>>() {
        @Override//  w  w  w . j  a  v a 2 s.c om
        public int compare(Bucket<T> bucket1, Bucket<T> bucket2) {
            if (bucket1.lastUpdateTime() < bucket2.lastUpdateTime()) {
                return -1;
            }
            if (bucket1.lastUpdateTime() > bucket2.lastUpdateTime()) {
                return 1;
            }
            return 0;
        }

    }).create();
    lock = new Lock();
    committedWindow = -1;

    noOfBuckets = DEF_NUM_BUCKETS;
    noOfBucketsInMemory = DEF_NUM_BUCKETS_MEM;
    maxNoOfBucketsInMemory = DEF_NUM_BUCKETS_MEM + 100;
    millisPreventingBucketEviction = DEF_MILLIS_PREVENTING_EVICTION;
    writeEventKeysOnly = true;
}