Example usage for com.google.common.collect MinMaxPriorityQueue removeFirst

List of usage examples for com.google.common.collect MinMaxPriorityQueue removeFirst

Introduction

In this page you can find the example usage for com.google.common.collect MinMaxPriorityQueue removeFirst.

Prototype

public E removeFirst() 

Source Link

Document

Removes and returns the least element of this queue.

Usage

From source file:org.commoncrawl.mapred.pipelineV3.domainmeta.blogs.feedurlid.FeedUrlIdStep.java

public static ArrayList<URLCandidate> drainToArrayList(MinMaxPriorityQueue<URLCandidate> queue) {
    int queueSize = queue.size();
    ArrayList<URLCandidate> list = new ArrayList<URLCandidate>(queueSize);
    for (int i = 0; i < queueSize; ++i) {
        list.add(queue.removeFirst());
    }/*from   w ww .  j ava  2  s .  c om*/
    return list;
}

From source file:com.github.davidmoten.rx.operators.SortedOutputQueue.java

@Override
public Subscriber<? super T> call(final Subscriber<? super T> child) {
    final MinMaxPriorityQueue<T> q = MinMaxPriorityQueue.orderedBy(comparator).maximumSize(maximumSize)
            .create();//from   w  w w  . j a v  a  2  s.c  om
    return new Subscriber<T>(child) {

        @Override
        public void onCompleted() {
            T t;
            while ((t = q.removeFirst()) != null) {
                if (isUnsubscribed())
                    return;
                child.onNext(t);
            }
            if (isUnsubscribed())
                return;
            child.onCompleted();
        }

        @Override
        public void onError(Throwable t) {
            if (!isUnsubscribed())
                child.onError(t);
        }

        @Override
        public void onNext(T t) {
            if (!isUnsubscribed())
                q.add(t);
        }
    };
}

From source file:com.griddynamics.jagger.diagnostics.thread.sampling.RuntimeGraph.java

private List<MethodProfile> getHotSpots(int maxSpots, Comparator<MethodStatistics> comparator) {
    List<MethodProfile> result = Lists.newArrayList();

    MinMaxPriorityQueue<MethodStatistics> hotSpots = MinMaxPriorityQueue.orderedBy(comparator)
            .maximumSize(maxSpots).create(graph.getVertices());

    int queueSize = hotSpots.size();
    for (int i = 0; i < queueSize; i++) {
        result.add(assembleProfile(hotSpots.removeFirst()));
    }/*from ww  w.j  a va  2 s.  c  o m*/

    return result;
}

From source file:com.griddynamics.jagger.storage.fs.logging.ChronologyLogAggregator.java

@Override
public AggregationInfo chronology(String dir, String targetFile) throws IOException {
    log.info("Try to aggregate {} into file {}", dir, targetFile);
    Collection<Iterable<LogEntry>> readers = new ArrayList<Iterable<LogEntry>>();
    Set<String> fileNameList = fileStorage.getFileNameList(dir);
    if (fileNameList.isEmpty()) {
        log.info("Nothing to aggregate. Directory {} is empty.", dir);
        fileStorage.create(targetFile);/*from  w w  w.  j a  v  a2  s.  c  om*/
        return new AggregationInfo(0, 0, 0);
    }
    for (String fileName : fileNameList) {
        try {
            readers.add(logReader.read(fileName, LogEntry.class));
        } catch (Exception e) {
            // TODO
            log.warn(e.getMessage(), e);
        }
    }

    int count = 0;
    long minTime = 0;
    long maxTime = 0;
    BufferedLogWriter.LogWriterOutput objectOutput = null;
    try {
        if (fileStorage.delete(targetFile, false)) {
            log.warn("Target file {} did not deleted!", targetFile);
        }
        objectOutput = logWriter.getOutput(fileStorage.create(targetFile));

        MinMaxPriorityQueue<StreamInfo> queue = MinMaxPriorityQueue.create();
        for (Iterable<LogEntry> inputStream : readers) {
            LogEntry logEntry;
            Iterator<LogEntry> it = inputStream.iterator();
            if (it.hasNext()) {
                logEntry = it.next();
            } else {
                continue;
            }
            queue.add(new StreamInfo(it, logEntry));
        }

        while (!queue.isEmpty()) {
            StreamInfo<LogEntry> streamInfo = queue.removeFirst();
            objectOutput.writeObject(streamInfo.lastLogEntry);

            if (count == 0) {
                minTime = streamInfo.lastLogEntry.getTime();
                maxTime = streamInfo.lastLogEntry.getTime();
            } else {
                maxTime = streamInfo.lastLogEntry.getTime();
            }

            count++;
            LogEntry logEntry;
            if (streamInfo.stream.hasNext()) {
                logEntry = streamInfo.stream.next();
            } else {
                continue;
            }
            streamInfo.lastLogEntry = logEntry;
            queue.add(streamInfo);
        }
    } finally {
        Closeables.closeQuietly(objectOutput);
    }

    return new AggregationInfo(minTime, maxTime, count);
}

From source file:co.cask.cdap.common.zookeeper.coordination.BalancedAssignmentStrategy.java

/**
 * Balance the assignment by spreading it across all handlers evenly.
 *
 * @param handlerQueue The priority queue for tracking number of resources assigned to a given handler.
 * @param assigner The assigner for changing the assignment.
 * @param maxDiff The maximum differences between the handlers that has the most resources assigned vs the one with
 *                the least resources assigned.
 *///from  w w w .  ja  v a2 s  .  c om
private <T> void balance(MinMaxPriorityQueue<HandlerSize<T>> handlerQueue, ResourceAssigner<T> assigner,
        int maxDiff) {
    HandlerSize<T> minHandler = handlerQueue.peekFirst();
    HandlerSize<T> maxHandler = handlerQueue.peekLast();

    // Move assignment from the handler that has the most assigned partition replica to the least one, until the
    // differences is within the desired range.
    Multimap<T, PartitionReplica> assignments = assigner.get();
    while (maxHandler.getSize() - minHandler.getSize() > maxDiff) {
        PartitionReplica partitionReplica = assignments.get(maxHandler.getHandler()).iterator().next();

        // Remove min and max from the queue, and perform the reassignment.
        handlerQueue.removeFirst();
        handlerQueue.removeLast();

        assigner.set(minHandler.getHandler(), partitionReplica);

        // After assignment, the corresponding size should get updated, hence put it back to the queue for next iteration.
        handlerQueue.add(minHandler);
        handlerQueue.add(maxHandler);

        minHandler = handlerQueue.peekFirst();
        maxHandler = handlerQueue.peekLast();
    }
}

From source file:com.davidbracewell.ml.sequence.decoder.LinearViterbi.java

@Override
public double[] decode(SequenceModel<V> raw, Sequence<V> sequence) {
    LinearSequenceModel<V> model = Val.of(raw).cast();
    final Feature classFeature = model.getTargetFeature();
    final int numStates = classFeature.alphabetSize();

    MinMaxPriorityQueue<State> beam = MinMaxPriorityQueue.maximumSize(beamSize).create();
    ClassificationResult result = model.classifyItem(0, sequence, new double[0]);
    for (int ci = 0; ci < numStates; ci++) {
        if (isValidStartTag(classFeature.valueAtIndex(ci))
                && isValidTag(classFeature.valueAtIndex(ci), sequence.getData(0))) {
            beam.add(new State(Math.log(result.getConfidence(ci)), ci, null, 0));
        }//w  w w  . ja  v a 2s .  co  m
    }

    MinMaxPriorityQueue<State> tempBeam = MinMaxPriorityQueue.maximumSize(beamSize).create();
    for (int i = 1; i < sequence.length(); i++) {
        while (!beam.isEmpty()) { // go through all the previous states
            State state = beam.removeFirst();
            String previousTag = classFeature.valueAtIndex(state.tag);
            result = model.classifyItem(i, sequence, state.labels());
            for (int ci = 0; ci < numStates; ci++) {
                if (isValidTransition(previousTag, classFeature.valueAtIndex(ci))
                        && ((i + 1 < sequence.length()) || isValidEndTag(classFeature.valueAtIndex(ci)))
                        && isValidTag(classFeature.valueAtIndex(ci), sequence.getData(i))) {
                    tempBeam.add(
                            new State(state.probability + Math.log(result.getConfidence(ci)), ci, state, i));
                }
            }
        }
        beam.addAll(tempBeam);
        tempBeam.clear();
    }

    return beam.remove().labels();
}

From source file:co.cask.cdap.common.zookeeper.coordination.BalancedAssignmentStrategy.java

@Override
public <T> void assign(ResourceRequirement requirement, Set<T> handlers, ResourceAssigner<T> assigner) {
    MinMaxPriorityQueue<HandlerSize<T>> handlerQueue = MinMaxPriorityQueue.create();
    Multimap<T, PartitionReplica> assignments = assigner.get();

    // Compute for each handler how many partition replica is already assigned
    for (T handler : handlers) {
        handlerQueue.add(new HandlerSize<>(handler, assignments));
    }//w w w .ja va  2  s  . c  o  m

    // For each unassigned partition replica in the requirement, assign it to the handler
    // with smallest partition replica assigned. It's just a heuristic to make the later balance phase doing less work.
    int totalPartitionReplica = 0;

    for (ResourceRequirement.Partition partition : requirement.getPartitions()) {
        totalPartitionReplica += partition.getReplicas();

        for (int replica = 0; replica < partition.getReplicas(); replica++) {
            if (assigner.getHandler(partition.getName(), replica) == null) {
                HandlerSize<T> handlerSize = handlerQueue.removeFirst();
                assigner.set(handlerSize.getHandler(), partition.getName(), replica);

                // After assignment, the size should get updated, hence put it back to the queue for next round usage.
                handlerQueue.add(handlerSize);
            }
        }
    }

    // Balance
    if (totalPartitionReplica > handlers.size()) {
        balance(handlerQueue, assigner, 1);
    } else {
        // Evenly distribute it to the first N handlers.
        while (handlerQueue.size() > totalPartitionReplica) {
            // If number of handler is > total partition replica,
            // there must be at least 1 handler that has nothing assigned,
            handlerQueue.removeFirst();
        }
        // Balance it evenly, and there should be no differences in number of partition replica assigned to each handler.
        balance(handlerQueue, assigner, 0);
    }
}

From source file:main.okapi.graphs.maxbmatching.MaxBMatching.java

private void sendUpdates(Vertex<LongWritable, IntWritable, MBMEdgeValue> vertex) {
    final MBMMessage proposeMsg = new MBMMessage(vertex.getId(), State.PROPOSED);

    // get top-capacity available edges by weight
    final int capacity = vertex.getValue().get();
    MinMaxPriorityQueue<Entry<LongWritable, MBMEdgeValue>> maxHeap = MinMaxPriorityQueue
            .orderedBy(new Comparator<Entry<LongWritable, MBMEdgeValue>>() {
                @Override//from  w  w w . ja va2 s. co m
                public int compare(Entry<LongWritable, MBMEdgeValue> o1, Entry<LongWritable, MBMEdgeValue> o2) {
                    return -1 * Double.compare(o1.getValue().getWeight(), o2.getValue().getWeight()); // reverse comparator, largest weight first
                }
            }).maximumSize(capacity).create();
    // prepare list of available edges
    for (Edge<LongWritable, MBMEdgeValue> e : vertex.getEdges()) {
        if (e.getValue().getState() == State.DEFAULT || e.getValue().getState() == State.PROPOSED) {
            maxHeap.add(Maps.immutableEntry(e.getTargetVertexId(), e.getValue()));
        }
    }

    if (maxHeap.isEmpty()) {
        // all remaining edges are INCLUDED, nothing else to do
        checkSolution(vertex.getEdges());
        vertex.voteToHalt();
    } else {
        // propose up to capacity
        while (!maxHeap.isEmpty()) {
            Entry<LongWritable, MBMEdgeValue> entry = maxHeap.removeFirst();
            vertex.getEdgeValue(entry.getKey()).setState(State.PROPOSED);
            sendMessage(entry.getKey(), proposeMsg);
        }
    }
}