Example usage for com.google.common.collect MinMaxPriorityQueue orderedBy

List of usage examples for com.google.common.collect MinMaxPriorityQueue orderedBy

Introduction

In this page you can find the example usage for com.google.common.collect MinMaxPriorityQueue orderedBy.

Prototype

public static <B> Builder<B> orderedBy(Comparator<B> comparator) 

Source Link

Document

Creates and returns a new builder, configured to build MinMaxPriorityQueue instances that use comparator to determine the least and greatest elements.

Usage

From source file:com.davidbracewell.wordnet.WordNet.java

private ListMultimap<Synset, Synset> dijkstra_path(Synset source) {
    Counter<Synset> dist = Counters.newHashMapCounter();
    Map<Synset, Synset> previous = new HashMap<>();
    Set<Synset> visited = Sets.newHashSet(source);

    for (Synset other : getSynsets()) {
        if (!other.equals(source)) {
            dist.set(other, Integer.MAX_VALUE);
            previous.put(other, null);//  w w  w .  jav a  2 s . c om
        }
    }

    MinMaxPriorityQueue<Pair<Synset, Double>> queue = MinMaxPriorityQueue
            .orderedBy(
                    Cast.<Comparator<? super Pair<Synset, Double>>>as(Sorting.mapEntryComparator(false, true)))
            .create();
    queue.add(Pair.of(source, 0d));

    while (!queue.isEmpty()) {
        Pair<Synset, Double> next = queue.remove();

        Synset synset = next.getFirst();
        visited.add(synset);

        Iterable<Synset> neighbors = Iterables.concat(synset.getRelatedSynsets(Relation.HYPERNYM),
                synset.getRelatedSynsets(Relation.HYPERNYM_INSTANCE),
                synset.getRelatedSynsets(Relation.HYPONYM),
                synset.getRelatedSynsets(Relation.HYPONYM_INSTANCE));

        for (Synset neighbor : neighbors) {
            double alt = dist.get(synset);
            if (alt != Integer.MAX_VALUE && (alt + 1) < dist.get(neighbor)) {
                dist.set(neighbor, alt + 1);
                previous.put(neighbor, synset);
            }
            if (!visited.contains(neighbor)) {
                queue.add(Pair.of(neighbor, alt));
            }
        }
    }

    ListMultimap<Synset, Synset> path = ArrayListMultimap.create();
    for (Synset other : getSynsets()) {
        if (other.equals(source) || dist.get(other) == Integer.MAX_VALUE)
            continue;

        Deque<Synset> stack = Lists.newLinkedList();
        Synset u = other;
        while (u != null && previous.containsKey(u)) {
            stack.push(u);
            u = previous.get(u);
        }
        while (!stack.isEmpty()) {
            Synset to = stack.pop();
            path.put(other, to);
        }
    }

    return path;
}

From source file:com.linkedin.pinot.tools.StarTreeIndexViewer.java

private int build(StarTreeIndexNodeInterf indexNode, StarTreeJsonNode json) {
    Iterator<? extends StarTreeIndexNodeInterf> childrenIterator = indexNode.getChildrenIterator();
    if (!childrenIterator.hasNext()) {
        return 0;
    }// ww  w. ja  v a 2 s .  c o m
    int childDimensionId = indexNode.getChildDimensionName();
    String childDimensionName = dimensionNameToIndexMap.inverse().get(childDimensionId);
    Dictionary dictionary = dictionaries.get(childDimensionName);
    int totalChildNodes = indexNode.getNumChildren();

    Comparator<Pair<String, Integer>> comparator = new Comparator<Pair<String, Integer>>() {

        @Override
        public int compare(Pair<String, Integer> o1, Pair<String, Integer> o2) {
            return -1 * Integer.compare(o1.getRight(), o2.getRight());
        }
    };
    MinMaxPriorityQueue<Pair<String, Integer>> queue = MinMaxPriorityQueue.orderedBy(comparator)
            .maximumSize(MAX_CHILDREN).create();
    StarTreeJsonNode allNode = null;

    while (childrenIterator.hasNext()) {
        StarTreeIndexNodeInterf childIndexNode = childrenIterator.next();
        int childDimensionValueId = childIndexNode.getDimensionValue();
        String childDimensionValue = "ALL";
        if (childDimensionValueId != StarTreeIndexNodeInterf.ALL) {
            childDimensionValue = dictionary.get(childDimensionValueId).toString();
        }
        StarTreeJsonNode childJson = new StarTreeJsonNode(childDimensionValue);
        totalChildNodes += build(childIndexNode, childJson);
        if (childDimensionValueId != StarTreeIndexNodeInterf.ALL) {
            json.addChild(childJson);
            queue.add(ImmutablePair.of(childDimensionValue, totalChildNodes));
        } else {
            allNode = childJson;
        }
    }
    //put ALL node at the end
    if (allNode != null) {
        json.addChild(allNode);
    }
    if (totalChildNodes > MAX_CHILDREN) {
        Iterator<Pair<String, Integer>> qIterator = queue.iterator();
        Set<String> topKDimensions = new HashSet<>();
        topKDimensions.add("ALL");
        while (qIterator.hasNext()) {
            topKDimensions.add(qIterator.next().getKey());
        }
        Iterator<StarTreeJsonNode> iterator = json.getChildren().iterator();
        while (iterator.hasNext()) {
            StarTreeJsonNode next = iterator.next();
            if (!topKDimensions.contains(next.getName())) {
                iterator.remove();
            }
        }
    }
    return totalChildNodes;
}

From source file:com.datatorrent.lib.bucket.AbstractBucketManager.java

public AbstractBucketManager() {
    eventQueue = new LinkedBlockingQueue<Long>();
    evictionCandidates = Sets.newHashSet();
    dirtyBuckets = Maps.newConcurrentMap();
    bucketHeap = MinMaxPriorityQueue.orderedBy(new Comparator<AbstractBucket<T>>() {
        @Override/* w ww  . ja v  a  2s  .  co m*/
        public int compare(AbstractBucket<T> bucket1, AbstractBucket<T> bucket2) {
            if (bucket1.lastUpdateTime() < bucket2.lastUpdateTime()) {
                return -1;
            }
            if (bucket1.lastUpdateTime() > bucket2.lastUpdateTime()) {
                return 1;
            }
            return 0;
        }

    }).create();
    lock = new Lock();
    committedWindow = -1;

    noOfBuckets = DEF_NUM_BUCKETS;
    noOfBucketsInMemory = DEF_NUM_BUCKETS_MEM;
    maxNoOfBucketsInMemory = DEF_NUM_BUCKETS_MEM + 100;
    millisPreventingBucketEviction = DEF_MILLIS_PREVENTING_EVICTION;
    writeEventKeysOnly = true;
    bucketsToDelete = Sets.newHashSet();
}

From source file:io.druid.server.coordinator.DruidCoordinatorBalancerProfiler.java

public void bigProfiler() {
    Stopwatch watch = Stopwatch.createUnstarted();
    int numSegments = 55000;
    int numServers = 50;
    EasyMock.expect(manager.getAllRules()).andReturn(ImmutableMap.<String, List<Rule>>of("test", rules))
            .anyTimes();// w ww  . j  a v  a2 s  .com
    EasyMock.expect(manager.getRules(EasyMock.<String>anyObject())).andReturn(rules).anyTimes();
    EasyMock.expect(manager.getRulesWithDefault(EasyMock.<String>anyObject())).andReturn(rules).anyTimes();
    EasyMock.replay(manager);

    coordinator.moveSegment(EasyMock.<ImmutableDruidServer>anyObject(),
            EasyMock.<ImmutableDruidServer>anyObject(), EasyMock.<String>anyObject(),
            EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().anyTimes();
    EasyMock.replay(coordinator);

    List<DruidServer> serverList = Lists.newArrayList();
    Map<String, LoadQueuePeon> peonMap = Maps.newHashMap();
    List<ServerHolder> serverHolderList = Lists.newArrayList();
    Map<String, DataSegment> segmentMap = Maps.newHashMap();
    for (int i = 0; i < numSegments; i++) {
        segmentMap.put("segment" + i, new DataSegment("datasource" + i,
                new Interval(new DateTime("2012-01-01"), (new DateTime("2012-01-01")).plusHours(1)),
                (new DateTime("2012-03-01")).toString(), Maps.<String, Object>newHashMap(),
                Lists.<String>newArrayList(), Lists.<String>newArrayList(), new NoneShardSpec(), 0, 4L));
    }

    for (int i = 0; i < numServers; i++) {
        ImmutableDruidServer server = EasyMock.createMock(ImmutableDruidServer.class);
        EasyMock.expect(server.getMetadata()).andReturn(null).anyTimes();
        EasyMock.expect(server.getCurrSize()).andReturn(30L).atLeastOnce();
        EasyMock.expect(server.getMaxSize()).andReturn(100L).atLeastOnce();
        EasyMock.expect(server.getTier()).andReturn("normal").anyTimes();
        EasyMock.expect(server.getName()).andReturn(Integer.toString(i)).atLeastOnce();
        EasyMock.expect(server.getHost()).andReturn(Integer.toString(i)).anyTimes();
        if (i == 0) {
            EasyMock.expect(server.getSegments()).andReturn(segmentMap).anyTimes();
        } else {
            EasyMock.expect(server.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes();
        }
        EasyMock.expect(server.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
        EasyMock.replay(server);

        LoadQueuePeon peon = new LoadQueuePeonTester();
        peonMap.put(Integer.toString(i), peon);
        serverHolderList.add(new ServerHolder(server, peon));
    }

    DruidCoordinatorRuntimeParams params = DruidCoordinatorRuntimeParams.newBuilder()
            .withDruidCluster(
                    new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal",
                            MinMaxPriorityQueue.orderedBy(DruidCoordinatorBalancerTester.percentUsedComparator)
                                    .create(serverHolderList))))
            .withLoadManagementPeons(peonMap).withAvailableSegments(segmentMap.values())
            .withDynamicConfigs(
                    new CoordinatorDynamicConfig.Builder().withMaxSegmentsToMove(MAX_SEGMENTS_TO_MOVE)
                            .withReplicantLifetime(500).withReplicationThrottleLimit(5).build())
            .withBalancerReferenceTimestamp(new DateTime("2013-01-01")).withEmitter(emitter)
            .withDatabaseRuleManager(manager).withReplicationManager(new ReplicationThrottler(2, 500))
            .withSegmentReplicantLookup(SegmentReplicantLookup
                    .make(new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal",
                            MinMaxPriorityQueue.orderedBy(DruidCoordinatorBalancerTester.percentUsedComparator)
                                    .create(serverHolderList)))))
            .build();

    DruidCoordinatorBalancerTester tester = new DruidCoordinatorBalancerTester(coordinator);
    DruidCoordinatorRuleRunner runner = new DruidCoordinatorRuleRunner(coordinator);
    watch.start();
    DruidCoordinatorRuntimeParams balanceParams = tester.run(params);
    DruidCoordinatorRuntimeParams assignParams = runner.run(params);
    System.out.println(watch.stop());
}

From source file:com.datatorrent.lib.bucket.BucketManager.java

BucketManager() {
    eventQueue = new LinkedBlockingQueue<LoadCommand>();
    eventServiceThread = new Thread(this, "BucketManager");
    knownBucketKeys = Sets.newHashSet();
    unwrittenBucketEvents = Maps.newHashMap();
    bucketHeap = MinMaxPriorityQueue.orderedBy(new Comparator<Bucket<T>>() {
        @Override//from   w  w  w. j  a  va2  s  .c  om
        public int compare(Bucket<T> bucket1, Bucket<T> bucket2) {
            if (bucket1.lastUpdateTime() < bucket2.lastUpdateTime()) {
                return -1;
            }
            if (bucket1.lastUpdateTime() > bucket2.lastUpdateTime()) {
                return 1;
            }
            return 0;
        }
    }).create();
    lock = new Lock();
    maxNoOfBucketsInMemory = this.noOfBucketsInMemory + 10;
    committedWindow = -1;
}

From source file:com.metamx.druid.utils.DruidMasterBalancerProfiler.java

public void bigProfiler() {
    Stopwatch watch = new Stopwatch();
    int numSegments = 55000;
    int numServers = 50;
    EasyMock.expect(manager.getAllRules()).andReturn(ImmutableMap.<String, List<Rule>>of("test", rules))
            .anyTimes();//from   w w  w  .  j a v a 2s .  c om
    EasyMock.expect(manager.getRules(EasyMock.<String>anyObject())).andReturn(rules).anyTimes();
    EasyMock.expect(manager.getRulesWithDefault(EasyMock.<String>anyObject())).andReturn(rules).anyTimes();
    EasyMock.replay(manager);

    master.moveSegment(EasyMock.<String>anyObject(), EasyMock.<String>anyObject(), EasyMock.<String>anyObject(),
            EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().anyTimes();
    EasyMock.replay(master);

    List<DruidServer> serverList = Lists.newArrayList();
    Map<String, LoadQueuePeon> peonMap = Maps.newHashMap();
    List<ServerHolder> serverHolderList = Lists.newArrayList();
    Map<String, DataSegment> segmentMap = Maps.newHashMap();
    for (int i = 0; i < numSegments; i++) {
        segmentMap.put("segment" + i, new DataSegment("datasource" + i,
                new Interval(new DateTime("2012-01-01"), (new DateTime("2012-01-01")).plusHours(1)),
                (new DateTime("2012-03-01")).toString(), Maps.<String, Object>newHashMap(),
                Lists.<String>newArrayList(), Lists.<String>newArrayList(), new NoneShardSpec(), 0, 4L));
    }

    for (int i = 0; i < numServers; i++) {
        DruidServer server = EasyMock.createMock(DruidServer.class);
        EasyMock.expect(server.getMetadata()).andReturn(null).anyTimes();
        EasyMock.expect(server.getCurrSize()).andReturn(30L).atLeastOnce();
        EasyMock.expect(server.getMaxSize()).andReturn(100L).atLeastOnce();
        EasyMock.expect(server.getTier()).andReturn("normal").anyTimes();
        EasyMock.expect(server.getName()).andReturn(Integer.toString(i)).atLeastOnce();
        EasyMock.expect(server.getHost()).andReturn(Integer.toString(i)).anyTimes();
        if (i == 0) {
            EasyMock.expect(server.getSegments()).andReturn(segmentMap).anyTimes();
        } else {
            EasyMock.expect(server.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes();
        }
        EasyMock.expect(server.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
        EasyMock.replay(server);

        LoadQueuePeon peon = new LoadQueuePeonTester();
        peonMap.put(Integer.toString(i), peon);
        serverHolderList.add(new ServerHolder(server, peon));
    }

    DruidMasterRuntimeParams params = DruidMasterRuntimeParams.newBuilder()
            .withDruidCluster(
                    new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal",
                            MinMaxPriorityQueue.orderedBy(DruidMasterBalancerTester.percentUsedComparator)
                                    .create(serverHolderList))))
            .withLoadManagementPeons(peonMap).withAvailableSegments(segmentMap.values())
            .withMasterSegmentSettings(
                    new MasterSegmentSettings.Builder().withMaxSegmentsToMove(MAX_SEGMENTS_TO_MOVE).build())
            .withBalancerReferenceTimestamp(new DateTime("2013-01-01")).withEmitter(emitter)
            .withDatabaseRuleManager(manager).withReplicationManager(new ReplicationThrottler(2, 500))
            .withSegmentReplicantLookup(SegmentReplicantLookup
                    .make(new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal",
                            MinMaxPriorityQueue.orderedBy(DruidMasterBalancerTester.percentUsedComparator)
                                    .create(serverHolderList)))))
            .build();

    DruidMasterBalancerTester tester = new DruidMasterBalancerTester(master);
    DruidMasterRuleRunner runner = new DruidMasterRuleRunner(master, 500, 5);
    watch.start();
    DruidMasterRuntimeParams balanceParams = tester.run(params);
    DruidMasterRuntimeParams assignParams = runner.run(params);
    System.out.println(watch.stop());
}

From source file:com.griddynamics.jagger.diagnostics.thread.sampling.RuntimeGraph.java

private List<MethodProfile> getHotSpots(int maxSpots, Comparator<MethodStatistics> comparator) {
    List<MethodProfile> result = Lists.newArrayList();

    MinMaxPriorityQueue<MethodStatistics> hotSpots = MinMaxPriorityQueue.orderedBy(comparator)
            .maximumSize(maxSpots).create(graph.getVertices());

    int queueSize = hotSpots.size();
    for (int i = 0; i < queueSize; i++) {
        result.add(assembleProfile(hotSpots.removeFirst()));
    }/*www. j a  v a 2 s .c  o  m*/

    return result;
}

From source file:com.griddynamics.jagger.diagnostics.thread.sampling.RuntimeGraph.java

private List<MethodStatistics> selectMethodStatistics(Collection<MethodStatistics> statistics,
        int maxStatistics, Comparator<MethodStatistics> comparator) {
    List<MethodStatistics> result = Lists.newArrayList();

    MinMaxPriorityQueue<MethodStatistics> selected = MinMaxPriorityQueue.orderedBy(comparator)
            .maximumSize(maxStatistics).create(statistics);

    for (MethodStatistics method : selected) {
        result.add(method);//ww  w.j  a va  2s.c om
    }

    return result;
}

From source file:com.koda.integ.hbase.test.BucketCacheOverhead.java

/**
 * @param maxSize the target size of elements in the queue
 * @param blockSize expected average size of blocks
 *//* w  w w.j  a va  2s .c o  m*/
public CachedEntryQueue(long maxSize, long blockSize) {
    int initialSize = (int) (maxSize / blockSize);
    if (initialSize == 0)
        initialSize++;
    queue = MinMaxPriorityQueue.orderedBy(new Comparator<Map.Entry<BlockCacheKey, BucketEntry>>() {
        public int compare(Entry<BlockCacheKey, BucketEntry> entry1, Entry<BlockCacheKey, BucketEntry> entry2) {
            return entry1.getValue().compareTo(entry2.getValue());
        }

    }).expectedSize(initialSize).create();
    cacheSize = 0;
    this.maxSize = maxSize;
}

From source file:com.metamx.druid.master.BalancerCostAnalyzer.java

private MinMaxPriorityQueue<Pair<Double, ServerHolder>> computeCosts(final DataSegment proposalSegment,
        final Iterable<ServerHolder> serverHolders) {
    MinMaxPriorityQueue<Pair<Double, ServerHolder>> costsAndServers = MinMaxPriorityQueue
            .orderedBy(new Comparator<Pair<Double, ServerHolder>>() {
                @Override/*from  w  w w.  j  av a2  s .co  m*/
                public int compare(Pair<Double, ServerHolder> o, Pair<Double, ServerHolder> o1) {
                    return Double.compare(o.lhs, o1.lhs);
                }
            }).create();

    final long proposalSegmentSize = proposalSegment.getSize();

    for (ServerHolder server : serverHolders) {
        /** Don't calculate cost if the server doesn't have enough space or is loading the segment */
        if (proposalSegmentSize > server.getAvailableSize() || server.isLoadingSegment(proposalSegment)) {
            continue;
        }

        /** The contribution to the total cost of a given server by proposing to move the segment to that server is... */
        double cost = 0f;
        /**  the sum of the costs of other (exclusive of the proposalSegment) segments on the server */
        for (DataSegment segment : server.getServer().getSegments().values()) {
            if (!proposalSegment.equals(segment)) {
                cost += computeJointSegmentCosts(proposalSegment, segment);
            }
        }
        /**  plus the costs of segments that will be loaded */
        for (DataSegment segment : server.getPeon().getSegmentsToLoad()) {
            cost += computeJointSegmentCosts(proposalSegment, segment);
        }

        costsAndServers.add(Pair.of(cost, server));
    }

    return costsAndServers;
}