Example usage for com.google.common.collect MinMaxPriorityQueue create

List of usage examples for com.google.common.collect MinMaxPriorityQueue create

Introduction

In this page you can find the example usage for com.google.common.collect MinMaxPriorityQueue create.

Prototype

public static <E extends Comparable<E>> MinMaxPriorityQueue<E> create() 

Source Link

Document

Creates a new min-max priority queue with default settings: natural order, no maximum size, no initial contents, and an initial expected size of 11.

Usage

From source file:org.commoncrawl.mapred.pipelineV3.domainmeta.blogs.feedurlid.FeedUrlIdStep.java

public static void main(String[] args) {
    ArrayList<URLCandidate> candidates = new ArrayList<URLCandidate>();
    candidates.add(new URLCandidate(new GoogleURL("http://2010.goldenplains.com.au/info/feed/")));
    candidates.add(//from  w w  w .  j a  v a  2 s .c  o m
            new URLCandidate(new GoogleURL("http://2010.goldenplains.com.au/supernatural-amphitheatre/feed/")));
    candidates.add(new URLCandidate(new GoogleURL("http://2010.goldenplains.com.au/tickets-pre-ballot/feed/")));

    collapseCandidates(candidates, 2);

    System.out.println(candidates.toString());

    MinMaxPriorityQueue<URLCandidate> deque2 = MinMaxPriorityQueue.create();
    deque2.add(new URLCandidate(new GoogleURL("http://2010.goldenplains.com.au/blog/feed/")));
    deque2.add(new URLCandidate(
            new GoogleURL("http://2010.goldenplains.com.au/blog/supernatural-amphitheatre/feed/")));
    deque2.add(
            new URLCandidate(new GoogleURL("http://2010.goldenplains.com.au/vlog/tickets-pre-ballot/feed/")));
    deque2.add(
            new URLCandidate(new GoogleURL("http://2010.goldenplains.com.au/blog/tickets-pre-ballot/feed/")));
    deque2.add(
            new URLCandidate(new GoogleURL("http://2010.goldenplains.com.au/vlog/tickets-pre-ballot-2/feed/")));
    deque2.add(
            new URLCandidate(new GoogleURL("http://2010.goldenplains.com.au/vlog/tickets-pre-ballot-3/feed/")));
    ArrayList<URLCandidate> test = drainToArrayList(deque2);
    System.out.println(test);

    collapseCandidates(test, 2);

    System.out.println(test.toString());
}

From source file:kungfu.algdesign.ds.MovingMedian.java

public static void calculate(Queue<Integer> data, Queue<Integer> medians) {
    MinMaxPriorityQueue<Integer> minHeap = MinMaxPriorityQueue.create();
    MinMaxPriorityQueue<Integer> maxHeap = MinMaxPriorityQueue.create();

    minHeap.add(Integer.MIN_VALUE);
    maxHeap.add(Integer.MAX_VALUE);

    Integer item = null;//from  w  w  w  .  ja va 2 s. co m
    Integer median = null;

    while ((item = data.poll()) != null) {
        if (median == null) {
            maxHeap.add(item);
        } else if (item >= median) {
            maxHeap.add(item);
        } else {
            minHeap.add(item);
        }

        if (maxHeap.size() - minHeap.size() == 2) {
            minHeap.add(maxHeap.pollFirst());
        } else if (minHeap.size() - maxHeap.size() == 2) {
            maxHeap.add(minHeap.pollLast());
        }

        if (minHeap.size() == maxHeap.size() || minHeap.size() > maxHeap.size()) {
            median = minHeap.peekLast();
        } else {
            median = maxHeap.peekFirst();
        }

        medians.add(median);
    }
}

From source file:co.cask.cdap.common.zookeeper.coordination.BalancedAssignmentStrategy.java

@Override
public <T> void assign(ResourceRequirement requirement, Set<T> handlers, ResourceAssigner<T> assigner) {
    MinMaxPriorityQueue<HandlerSize<T>> handlerQueue = MinMaxPriorityQueue.create();
    Multimap<T, PartitionReplica> assignments = assigner.get();

    // Compute for each handler how many partition replica is already assigned
    for (T handler : handlers) {
        handlerQueue.add(new HandlerSize<>(handler, assignments));
    }//from   w ww  .ja v a2  s  .c  o m

    // For each unassigned partition replica in the requirement, assign it to the handler
    // with smallest partition replica assigned. It's just a heuristic to make the later balance phase doing less work.
    int totalPartitionReplica = 0;

    for (ResourceRequirement.Partition partition : requirement.getPartitions()) {
        totalPartitionReplica += partition.getReplicas();

        for (int replica = 0; replica < partition.getReplicas(); replica++) {
            if (assigner.getHandler(partition.getName(), replica) == null) {
                HandlerSize<T> handlerSize = handlerQueue.removeFirst();
                assigner.set(handlerSize.getHandler(), partition.getName(), replica);

                // After assignment, the size should get updated, hence put it back to the queue for next round usage.
                handlerQueue.add(handlerSize);
            }
        }
    }

    // Balance
    if (totalPartitionReplica > handlers.size()) {
        balance(handlerQueue, assigner, 1);
    } else {
        // Evenly distribute it to the first N handlers.
        while (handlerQueue.size() > totalPartitionReplica) {
            // If number of handler is > total partition replica,
            // there must be at least 1 handler that has nothing assigned,
            handlerQueue.removeFirst();
        }
        // Balance it evenly, and there should be no differences in number of partition replica assigned to each handler.
        balance(handlerQueue, assigner, 0);
    }
}

From source file:edu.brandeis.wisedb.scheduler.BestNFirstGraphSearch.java

@Override
public List<Action> schedule(Set<ModelQuery> toSched) {

    FullGraphState first = new FullGraphState(new TreeSet<ModelVM>(), toSched, sla, qtp);
    MinMaxPriorityQueue<StateCost> frontier = MinMaxPriorityQueue.create();
    frontier.add(new StateCost(first, 0, null, null));

    while (!frontier.isEmpty()) {
        log.fine("Frontier size: " + frontier.size());

        PriorityQueue<Action> pq = new PriorityQueue<Action>(new ActionComparator());
        StateCost next = frontier.poll();

        if (next.s.isGoalState()) {
            // we're done
            List<Action> toR = new LinkedList<Action>();
            StateCost last = next;/*from   ww  w .ja v  a 2s .  com*/
            while (last.action != null) {
                toR.add(0, last.action);
                last = last.prev;
            }
            log.fine("Reached goal state with following actions: " + toR);

            return toR;
        }

        for (Action a : next.s.getPossibleActions()) {
            int cost = 0;
            FullGraphState nextState = next.s.getNewStateForAction(a);

            cost += h.predictCostToEnd(nextState);
            //cost += nextState.getExecutionCost();

            a.computedCost = cost;
            log.finer("Added action " + a + " to the frontier");
            pq.add(a);
        }

        if (pq.isEmpty()) {
            log.severe("There was no selectable action for state: " + next);
            return null;
        }

        for (int i = 0; i < toTry; i++) {
            Action nextBest = pq.poll();
            if (nextBest == null) {
                log.fine("Unable to get " + (i + 1) + "th action for state " + next);
                break;
            }
            FullGraphState c = next.s.getNewStateForAction(nextBest);
            StateCost candidate = new StateCost(c, c.getExecutionCost(), nextBest, next);
            frontier.add(candidate);
        }

        while (frontier.size() > maxFrontierSize) {
            frontier.removeLast();
        }
    }

    return null;
}

From source file:com.griddynamics.jagger.storage.fs.logging.ChronologyLogAggregator.java

@Override
public AggregationInfo chronology(String dir, String targetFile) throws IOException {
    log.info("Try to aggregate {} into file {}", dir, targetFile);
    Collection<Iterable<LogEntry>> readers = new ArrayList<Iterable<LogEntry>>();
    Set<String> fileNameList = fileStorage.getFileNameList(dir);
    if (fileNameList.isEmpty()) {
        log.info("Nothing to aggregate. Directory {} is empty.", dir);
        fileStorage.create(targetFile);/*ww w. j av a 2  s. co m*/
        return new AggregationInfo(0, 0, 0);
    }
    for (String fileName : fileNameList) {
        try {
            readers.add(logReader.read(fileName, LogEntry.class));
        } catch (Exception e) {
            // TODO
            log.warn(e.getMessage(), e);
        }
    }

    int count = 0;
    long minTime = 0;
    long maxTime = 0;
    BufferedLogWriter.LogWriterOutput objectOutput = null;
    try {
        if (fileStorage.delete(targetFile, false)) {
            log.warn("Target file {} did not deleted!", targetFile);
        }
        objectOutput = logWriter.getOutput(fileStorage.create(targetFile));

        MinMaxPriorityQueue<StreamInfo> queue = MinMaxPriorityQueue.create();
        for (Iterable<LogEntry> inputStream : readers) {
            LogEntry logEntry;
            Iterator<LogEntry> it = inputStream.iterator();
            if (it.hasNext()) {
                logEntry = it.next();
            } else {
                continue;
            }
            queue.add(new StreamInfo(it, logEntry));
        }

        while (!queue.isEmpty()) {
            StreamInfo<LogEntry> streamInfo = queue.removeFirst();
            objectOutput.writeObject(streamInfo.lastLogEntry);

            if (count == 0) {
                minTime = streamInfo.lastLogEntry.getTime();
                maxTime = streamInfo.lastLogEntry.getTime();
            } else {
                maxTime = streamInfo.lastLogEntry.getTime();
            }

            count++;
            LogEntry logEntry;
            if (streamInfo.stream.hasNext()) {
                logEntry = streamInfo.stream.next();
            } else {
                continue;
            }
            streamInfo.lastLogEntry = logEntry;
            queue.add(streamInfo);
        }
    } finally {
        Closeables.closeQuietly(objectOutput);
    }

    return new AggregationInfo(minTime, maxTime, count);
}