Example usage for com.google.common.collect Multimap removeAll

List of usage examples for com.google.common.collect Multimap removeAll

Introduction

In this page you can find the example usage for com.google.common.collect Multimap removeAll.

Prototype

Collection<V> removeAll(@Nullable Object key);

Source Link

Document

Removes all values associated with the key key .

Usage

From source file:com.github.rinde.rinsim.core.model.DependencyResolver.java

ImmutableSet<Model<?>> resolve() {
    addDefaultModels();/*from   ww w.j  a  v  a2s . c  o  m*/
    final Multimap<Dependency, Dependency> dependencyGraph = constructDependencyGraph();

    if (LOGGER.isTraceEnabled()) {
        for (final Dependency dep : dependencyGraph.keySet()) {
            final StringBuilder sb = new StringBuilder();
            for (final Dependency d : dependencyGraph.get(dep)) {
                sb.append(d.modelBuilder).append(" ");
            }
            LOGGER.trace("{} requires: {}.", dep.modelBuilder.toString(), sb);
        }
    }

    while (!dependencyGraph.isEmpty()) {
        final List<Dependency> toRemove = new ArrayList<>();
        for (final Dependency dep : dependencyGraph.keys()) {
            final Collection<Dependency> dependencies = dependencyGraph.get(dep);
            boolean allResolved = true;
            for (final Dependency dependency : dependencies) {
                allResolved &= dependency.isResolved();
            }
            if (allResolved) {
                dep.build();
                toRemove.add(dep);
            }
        }

        for (final Dependency mb : toRemove) {
            dependencyGraph.removeAll(mb);
        }
        if (toRemove.isEmpty()) {
            throw new IllegalArgumentException("Could not resolve dependencies for " + dependencyGraph.keySet()
                    + ", most likely a circular dependency was declared.");
        }
    }

    final ImmutableSet.Builder<Model<?>> builder = ImmutableSet.builder();
    for (final Dependency cmb : builders) {
        builder.add(cmb.build());
    }
    return builder.build();
}

From source file:org.wisdom.framework.filters.ProxyFilter.java

/**
 * The interception method. Re-emit the request to the target folder and forward the response. This method
 * returns an {@link org.wisdom.api.http.AsyncResult} as the proxy need to be run in another thread. It also
 * invokes a couple of callbacks letting developers to customize the request and result.
 *
 * @param route   the route/*from   w  w  w . j  a va2 s . co  m*/
 * @param context the filter context
 * @return the result
 * @throws Exception if anything bad happen
 */
@Override
public Result call(final Route route, final RequestContext context) throws Exception {
    return new AsyncResult(new Callable<Result>() {
        @Override
        public Result call() throws Exception {
            URI rewrittenURI = rewriteURI(context);
            logger.debug("Proxy request - rewriting {} to {}", context.request().uri(), rewrittenURI);
            if (rewrittenURI == null) {
                return onRewriteFailed(context);
            }

            BasicHttpEntityEnclosingRequest request = new BasicHttpEntityEnclosingRequest(
                    context.request().method(), rewrittenURI.toString());
            // Any header listed by the Connection header must be removed:
            // http://tools.ietf.org/html/rfc7230#section-6.1.
            Set<String> hopHeaders = new HashSet<>();
            List<String> connectionHeaders = context.request().headers().get(HeaderNames.CONNECTION);
            for (String s : connectionHeaders) {
                for (String entry : Splitter.on(",").omitEmptyStrings().trimResults().splitToList(s)) {
                    hopHeaders.add(entry.toLowerCase(Locale.ENGLISH));
                }
            }

            boolean hasContent = context.request().contentType() != null;
            final String host = getHost();
            Multimap<String, String> headers = ArrayListMultimap.create();
            for (Map.Entry<String, List<String>> entry : context.request().headers().entrySet()) {
                String name = entry.getKey();
                if (HeaderNames.TRANSFER_ENCODING.equalsIgnoreCase(name)) {
                    hasContent = true;
                }
                if (host != null && HeaderNames.HOST.equalsIgnoreCase(name)) {
                    continue;
                }
                // Remove hop-by-hop headers.
                String lower = name.toLowerCase(Locale.ENGLISH);
                if (HOP_HEADERS.contains(lower) || hopHeaders.contains(lower)) {
                    continue;
                }

                for (String v : entry.getValue()) {
                    headers.put(name, v);
                }
            }

            // Force the Host header if configured
            headers.removeAll(HeaderNames.HOST);
            if (host != null) {
                headers.put(HeaderNames.HOST, host);
                headers.put("X-Forwarded-Server", host);
            } else {
                // Set of the URI one
                headers.put("X-Forwarded-Server", rewrittenURI.getHost());
            }

            // Add proxy headers
            if (getVia() != null) {
                headers.put(HeaderNames.VIA, "http/1.1 " + getVia());
            }
            headers.put("X-Forwarded-For", context.request().remoteAddress());
            if (host != null) {
                headers.put("X-Forwarded-Host", host);
            }

            updateHeaders(context, headers);
            for (Map.Entry<String, String> s : headers.entries()) {
                request.addHeader(s.getKey(), s.getValue());
            }
            // Remove content-length as it is computed by the HTTP client.
            request.removeHeaders(HeaderNames.CONTENT_LENGTH);

            if (hasContent) {
                ByteArrayEntity entity = new ByteArrayEntity(context.context().raw(), ContentType
                        .create(context.request().contentMimeType(), context.request().contentCharset()));
                request.setEntity(entity);
            }

            HttpResponse response = client.execute(new HttpHost(rewrittenURI.getHost(), rewrittenURI.getPort()),
                    request);
            return onResult(toResult(response));
        }
    });

}

From source file:org.gradle.plugins.ide.idea.model.internal.IdeaDependenciesProvider.java

/** Looks for dependencies contained in all configurations to remove them from multimap and return as result. */
List<IdeDependencyKey<?, Dependency>> extractDependencies(
        Multimap<IdeDependencyKey<?, Dependency>, String> dependenciesToConfigs,
        Collection<String> configurations, Collection<String> minusConfigurations) {
    List<IdeDependencyKey<?, Dependency>> deps = new ArrayList<IdeDependencyKey<?, Dependency>>();
    List<IdeDependencyKey<?, Dependency>> minusDeps = new ArrayList<IdeDependencyKey<?, Dependency>>();
    for (IdeDependencyKey<?, Dependency> dependencyKey : dependenciesToConfigs.keySet()) {
        if (dependenciesToConfigs.get(dependencyKey).containsAll(configurations)) {
            boolean isInMinus = false;
            for (String minusConfiguration : minusConfigurations) {
                if (dependenciesToConfigs.get(dependencyKey).contains(minusConfiguration)) {
                    isInMinus = true;/*ww  w  .j a v  a 2s  . c  om*/
                    break;
                }
            }
            if (!isInMinus) {
                deps.add(dependencyKey);
            } else {
                minusDeps.add(dependencyKey);
            }
        }
    }
    for (IdeDependencyKey<?, Dependency> key : Iterables.concat(deps, minusDeps)) {
        dependenciesToConfigs.removeAll(key);
    }
    return deps;
}

From source file:org.locationtech.geogig.api.RevTreeBuilder.java

/**
 * @return//from w ww.  j a v  a  2 s  .  c  o  m
 * 
 */
private RevTree normalizeToBuckets() {
    // update all inner trees
    final ImmutableSet<Integer> changedBucketIndexes;

    // aggregate size delta for all changed buckets
    long sizeDelta = 0L;
    // aggregate number of trees delta for all changed buckets
    int treesDelta = 0;

    try {
        Multimap<Integer, Node> changesByBucket = getChangesByBucket();
        Preconditions.checkState(featureChanges.isEmpty());
        Preconditions.checkState(treeChanges.isEmpty());
        Preconditions.checkState(deletes.isEmpty());

        changedBucketIndexes = ImmutableSet.copyOf(changesByBucket.keySet());
        final Map<Integer, RevTree> bucketTrees = getBucketTrees(changedBucketIndexes);
        List<RevTree> newLeafTreesToSave = Lists.newArrayList();

        for (Integer bucketIndex : changedBucketIndexes) {
            final RevTree currentBucketTree = bucketTrees.get(bucketIndex);
            final int bucketDepth = this.depth + 1;
            final RevTreeBuilder bucketTreeBuilder = new RevTreeBuilder(this.db, currentBucketTree, bucketDepth,
                    this.pendingWritesCache);
            {
                final Collection<Node> bucketEntries = changesByBucket.removeAll(bucketIndex);
                for (Node node : bucketEntries) {
                    if (node.getObjectId().isNull()) {
                        bucketTreeBuilder.remove(node.getName());
                    } else {
                        bucketTreeBuilder.put(node);
                    }
                }
            }
            final RevTree modifiedBucketTree = bucketTreeBuilder.build();
            final long bucketSizeDelta = modifiedBucketTree.size() - currentBucketTree.size();
            final int bucketTreesDelta = modifiedBucketTree.numTrees() - currentBucketTree.numTrees();
            sizeDelta += bucketSizeDelta;
            treesDelta += bucketTreesDelta;
            if (modifiedBucketTree.isEmpty()) {
                bucketTreesByBucket.remove(bucketIndex);
            } else {
                final Bucket currBucket = this.bucketTreesByBucket.get(bucketIndex);
                if (currBucket == null || !currBucket.id().equals(modifiedBucketTree.getId())) {
                    // if (currBucket != null) {
                    // db.delete(currBucket.id());
                    // }
                    // have it on the pending writes set only if its not a leaf tree. Non bucket
                    // trees may be too large and cause OOM
                    if (null != pendingWritesCache.remove(currentBucketTree.getId())) {
                        // System.err.printf(" ---> removed bucket %s from list\n",
                        // currentBucketTree.getId());
                    }
                    if (modifiedBucketTree.buckets().isPresent()) {
                        pendingWritesCache.put(modifiedBucketTree.getId(), modifiedBucketTree);
                    } else {
                        // db.put(modifiedBucketTree);
                        newLeafTreesToSave.add(modifiedBucketTree);
                    }
                    Envelope bucketBounds = SpatialOps.boundsOf(modifiedBucketTree);
                    Bucket bucket = Bucket.create(modifiedBucketTree.getId(), bucketBounds);
                    bucketTreesByBucket.put(bucketIndex, bucket);
                }
            }
        }
        if (!newLeafTreesToSave.isEmpty()) {
            db.putAll(newLeafTreesToSave.iterator());
            newLeafTreesToSave.clear();
            newLeafTreesToSave = null;
        }
    } catch (RuntimeException e) {
        throw e;
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    // compute final size and number of trees out of the aggregate deltas
    long accSize = sizeDelta;
    if (initialSize > RevTree.NORMALIZED_SIZE_LIMIT) {
        accSize += initialSize;
    }
    int accChildTreeCount = this.initialNumTrees + treesDelta;

    RevTreeImpl unnamedTree;
    unnamedTree = RevTreeImpl.createNodeTree(ObjectId.NULL, accSize, accChildTreeCount,
            this.bucketTreesByBucket);
    return unnamedTree;
}

From source file:edu.uci.ics.sourcerer.tools.java.component.identifier.internal.ClusterIdentifier.java

public static ClusterCollection identifyClusters(JarCollection jars) {
    TaskProgressLogger task = TaskProgressLogger.get();

    task.start("Identifying core clusters in " + jars.size() + " jar files");
    task.report("Compatibility threshold: " + COMPATIBILITY_THRESHOLD.getValue());

    Multimap<VersionedFqnNode, Cluster> tempClusterMap = ArrayListMultimap.create();

    task.start("Performing post-order traversal of FQN suffix tree", "FQN fragments visited", 100000);
    // Explore the tree in post-order
    int clusterCount = 0;
    for (VersionedFqnNode fragment : jars.getRoot().getPostOrderIterable()) {
        task.progress("%d FQN fragments visited (" + clusterCount + " clusters) in %s");
        // If there are no children, then make it its own single-fqn library
        if (!fragment.hasChildren()) {
            Cluster cluster = Cluster.create(fragment);
            // Store it in the map for processing with the parent
            tempClusterMap.put(fragment, cluster);
            clusterCount++;//from  w  w  w  . ja  v a2s.  c om
        } else {
            // Start merging children
            for (VersionedFqnNode child : fragment.getChildren()) {
                for (Cluster childCluster : tempClusterMap.get(child)) {
                    LinkedList<Cluster> candidates = new LinkedList<>();

                    // Check to see if it can be merged with any of the
                    // libraries currently associated with the parent
                    for (Cluster merge : tempClusterMap.get(fragment)) {
                        if (areCompatible(merge, childCluster)) {
                            candidates.add(merge);
                        }
                    }
                    if (candidates.size() == 0) {
                        // If nothing was found, promote the library
                        tempClusterMap.put(fragment, childCluster);
                    } else if (candidates.size() == 1) {
                        // If one was found, merge in the child
                        Cluster candidate = candidates.getFirst();
                        candidate.mergeCore(childCluster);
                        clusterCount--;
                    } else {
                        // This else will never be hit for threshold 1
                        // TODO Change this for lower thresholds
                        // If more than one was found, promote the library
                        tempClusterMap.put(fragment, childCluster);
                    }
                }
                // Clear the entry for this child fragment
                tempClusterMap.removeAll(child);
            }
        }
    }
    task.finish();

    task.report("Identified " + clusterCount + " core clusters");

    task.finish();

    return ClusterCollection.create(tempClusterMap.get(jars.getRoot()));
}

From source file:org.locationtech.geogig.model.impl.LegacyTreeBuilder.java

/**
 * @return// w  w w  .ja v a 2  s  .c  o  m
 * 
 */
private RevTree normalizeToBuckets() {
    // update all inner trees
    final ImmutableSet<Integer> changedBucketIndexes;

    // aggregate size delta for all changed buckets
    long sizeDelta = 0L;
    // aggregate number of trees delta for all changed buckets
    int treesDelta = 0;

    try {
        Multimap<Integer, Node> changesByBucket = getChangesByBucket();
        Preconditions.checkState(featureChanges.isEmpty());
        Preconditions.checkState(treeChanges.isEmpty());
        Preconditions.checkState(deletes.isEmpty());

        changedBucketIndexes = ImmutableSet.copyOf(changesByBucket.keySet());
        final Map<Integer, RevTree> bucketTrees = getBucketTrees(changedBucketIndexes);
        List<RevTree> newLeafTreesToSave = Lists.newArrayList();

        for (Integer bucketIndex : changedBucketIndexes) {
            final RevTree currentBucketTree = bucketTrees.get(bucketIndex);
            final int bucketDepth = this.depth + 1;
            final LegacyTreeBuilder bucketTreeBuilder = new LegacyTreeBuilder(this.obStore, currentBucketTree,
                    bucketDepth, this.pendingWritesCache, this.normalizationThreshold);
            {
                final Collection<Node> bucketEntries = changesByBucket.removeAll(bucketIndex);
                for (Node node : bucketEntries) {
                    if (node.getObjectId().isNull()) {
                        bucketTreeBuilder.remove(node.getName());
                    } else {
                        bucketTreeBuilder.put(node);
                    }
                }
            }
            final RevTree modifiedBucketTree = bucketTreeBuilder.build();
            final long bucketSizeDelta = modifiedBucketTree.size() - currentBucketTree.size();
            final int bucketTreesDelta = modifiedBucketTree.numTrees() - currentBucketTree.numTrees();
            sizeDelta += bucketSizeDelta;
            treesDelta += bucketTreesDelta;
            if (modifiedBucketTree.isEmpty()) {
                bucketTreesByBucket.remove(bucketIndex);
            } else {
                final Bucket currBucket = this.bucketTreesByBucket.get(bucketIndex);
                if (currBucket == null || !currBucket.getObjectId().equals(modifiedBucketTree.getId())) {
                    // if (currBucket != null) {
                    // db.delete(currBucket.id());
                    // }
                    // have it on the pending writes set only if its not a leaf tree. Non bucket
                    // trees may be too large and cause OOM
                    if (null != pendingWritesCache.remove(currentBucketTree.getId())) {
                        // System.err.printf(" ---> removed bucket %s from list\n",
                        // currentBucketTree.getId());
                    }
                    if (!modifiedBucketTree.buckets().isEmpty()) {
                        pendingWritesCache.put(modifiedBucketTree.getId(), modifiedBucketTree);
                    } else {
                        // db.put(modifiedBucketTree);
                        newLeafTreesToSave.add(modifiedBucketTree);
                    }
                    Envelope bucketBounds = SpatialOps.boundsOf(modifiedBucketTree);
                    Bucket bucket = Bucket.create(modifiedBucketTree.getId(), bucketBounds);
                    bucketTreesByBucket.put(bucketIndex, bucket);
                }
            }
        }
        if (!newLeafTreesToSave.isEmpty()) {
            // db.putAll(newLeafTreesToSave.iterator());
            for (RevTree leaf : newLeafTreesToSave) {
                pendingWritesCache.put(leaf.getId(), leaf);
            }
            newLeafTreesToSave.clear();
            checkPendingWrites();
            checkPendingWrites();
        }
    } catch (RuntimeException e) {
        throw e;
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    // compute final size and number of trees out of the aggregate deltas
    long accSize = sizeDelta;
    if (initialSize > CanonicalNodeNameOrder.normalizedSizeLimit(this.depth)) {
        accSize += initialSize;
    }
    int accChildTreeCount = this.initialNumTrees + treesDelta;

    RevTree tree = createNodeTree(accSize, accChildTreeCount, this.bucketTreesByBucket);
    return tree;
}

From source file:org.sonar.core.issue.tracking.BlockRecognizer.java

/**
 * If base source code is available, then detect code moves through block hashes.
 * Only the issues associated to a line can be matched here.
 *///from   ww  w .ja v  a  2  s.c  om
void match(Input<RAW> rawInput, Input<BASE> baseInput, Tracking<RAW, BASE> tracking) {
    BlockHashSequence rawHashSequence = rawInput.getBlockHashSequence();
    BlockHashSequence baseHashSequence = baseInput.getBlockHashSequence();

    Multimap<Integer, RAW> rawsByLine = groupByLine(tracking.getUnmatchedRaws(), rawHashSequence);
    Multimap<Integer, BASE> basesByLine = groupByLine(tracking.getUnmatchedBases(), baseHashSequence);
    Map<Integer, HashOccurrence> occurrencesByHash = new HashMap<>();

    for (Integer line : basesByLine.keySet()) {
        int hash = baseHashSequence.getBlockHashForLine(line);
        HashOccurrence hashOccurrence = occurrencesByHash.get(hash);
        if (hashOccurrence == null) {
            // first occurrence in base
            hashOccurrence = new HashOccurrence();
            hashOccurrence.baseLine = line;
            hashOccurrence.baseCount = 1;
            occurrencesByHash.put(hash, hashOccurrence);
        } else {
            hashOccurrence.baseCount++;
        }
    }

    for (Integer line : rawsByLine.keySet()) {
        int hash = rawHashSequence.getBlockHashForLine(line);
        HashOccurrence hashOccurrence = occurrencesByHash.get(hash);
        if (hashOccurrence != null) {
            hashOccurrence.rawLine = line;
            hashOccurrence.rawCount++;
        }
    }

    for (HashOccurrence hashOccurrence : occurrencesByHash.values()) {
        if (hashOccurrence.baseCount == 1 && hashOccurrence.rawCount == 1) {
            // Guaranteed that baseLine has been moved to rawLine, so we can map all issues on baseLine to all issues on rawLine
            map(rawsByLine.get(hashOccurrence.rawLine), basesByLine.get(hashOccurrence.baseLine), tracking);
            basesByLine.removeAll(hashOccurrence.baseLine);
            rawsByLine.removeAll(hashOccurrence.rawLine);
        }
    }

    // Check if remaining number of lines exceeds threshold. It avoids processing too many combinations.
    if (basesByLine.keySet().size() * rawsByLine.keySet().size() >= 250_000) {
        return;
    }

    List<LinePair> possibleLinePairs = Lists.newArrayList();
    for (Integer baseLine : basesByLine.keySet()) {
        for (Integer rawLine : rawsByLine.keySet()) {
            int weight = lengthOfMaximalBlock(baseInput.getLineHashSequence(), baseLine,
                    rawInput.getLineHashSequence(), rawLine);
            possibleLinePairs.add(new LinePair(baseLine, rawLine, weight));
        }
    }
    Collections.sort(possibleLinePairs, LinePairComparator.INSTANCE);
    for (LinePair linePair : possibleLinePairs) {
        // High probability that baseLine has been moved to rawLine, so we can map all issues on baseLine to all issues on rawLine
        map(rawsByLine.get(linePair.rawLine), basesByLine.get(linePair.baseLine), tracking);
    }
}

From source file:nl.sidn.pcap.PcapReader.java

/**
 * Clear expired cache entries in order to avoid memory problems 
 *//*from  w  ww  .  j  a va2  s .c o m*/
public void clearCache(int tcpFlowCacheTimeout, int fragmentedIPcacheTimeout) {
    //clear tcp flows with expired packets
    List<TCPFlow> expiredList = new ArrayList<>();
    long now = System.currentTimeMillis();
    Multimap<TCPFlow, SequencePayload> flows = tcpDecoder.getFlows();
    for (TCPFlow flow : flows.keySet()) {
        Collection<SequencePayload> payloads = flows.get(flow);
        for (SequencePayload sequencePayload : payloads) {
            if ((sequencePayload.getTime() + tcpFlowCacheTimeout) <= now) {
                expiredList.add(flow);
                break;
            }
        }
    }

    //check IP datagrams
    List<Datagram> dgExpiredList = new ArrayList<>();

    for (Datagram dg : ipDecoder.getDatagrams().keySet()) {
        if ((dg.getTime() + fragmentedIPcacheTimeout) <= now) {
            dgExpiredList.add(dg);
        }
    }

    LOG.info("------------- Cache purge stats --------------");
    LOG.info("TCP flow cache size: " + flows.size());
    LOG.info("IP datagram cache size: " + ipDecoder.getDatagrams().size());
    LOG.info("Expired (to be removed) TCP flows: " + expiredList.size());
    LOG.info("Expired (to be removed) IP datagrams: " + dgExpiredList.size());
    LOG.info("----------------------------------------------------");

    //remove flows with expired packets
    for (TCPFlow tcpFlow : expiredList) {
        flows.removeAll(tcpFlow);
    }

    for (Datagram dg : dgExpiredList) {
        ipDecoder.getDatagrams().removeAll(dg);
    }

}

From source file:org.sonar.batch.issue.tracking.IssueTracking.java

private void mapNewissues(FileHashes hashedReference, FileHashes hashedSource,
        Collection<DefaultIssue> newIssues, IssueTrackingResult result) {

    IssueTrackingBlocksRecognizer rec = new IssueTrackingBlocksRecognizer(hashedReference, hashedSource);

    RollingFileHashes a = RollingFileHashes.create(hashedReference, 5);
    RollingFileHashes b = RollingFileHashes.create(hashedSource, 5);

    Multimap<Integer, DefaultIssue> newIssuesByLines = newIssuesByLines(newIssues, rec, result);
    Multimap<Integer, ServerIssue> lastIssuesByLines = lastIssuesByLines(result.unmatched(), rec);

    Map<Integer, HashOccurrence> map = Maps.newHashMap();

    for (Integer line : lastIssuesByLines.keySet()) {
        int hash = a.getHash(line);
        HashOccurrence hashOccurrence = map.get(hash);
        if (hashOccurrence == null) {
            // first occurrence in A
            hashOccurrence = new HashOccurrence();
            hashOccurrence.lineA = line;
            hashOccurrence.countA = 1;//from  ww w  . j  a  va2s .co m
            map.put(hash, hashOccurrence);
        } else {
            hashOccurrence.countA++;
        }
    }

    for (Integer line : newIssuesByLines.keySet()) {
        int hash = b.getHash(line);
        HashOccurrence hashOccurrence = map.get(hash);
        if (hashOccurrence != null) {
            hashOccurrence.lineB = line;
            hashOccurrence.countB++;
        }
    }

    for (HashOccurrence hashOccurrence : map.values()) {
        if (hashOccurrence.countA == 1 && hashOccurrence.countB == 1) {
            // Guaranteed that lineA has been moved to lineB, so we can map all issues on lineA to all issues on lineB
            map(newIssuesByLines.get(hashOccurrence.lineB), lastIssuesByLines.get(hashOccurrence.lineA),
                    result);
            lastIssuesByLines.removeAll(hashOccurrence.lineA);
            newIssuesByLines.removeAll(hashOccurrence.lineB);
        }
    }

    // Check if remaining number of lines exceeds threshold
    if (lastIssuesByLines.keySet().size() * newIssuesByLines.keySet().size() < 250000) {
        List<LinePair> possibleLinePairs = Lists.newArrayList();
        for (Integer oldLine : lastIssuesByLines.keySet()) {
            for (Integer newLine : newIssuesByLines.keySet()) {
                int weight = rec.computeLengthOfMaximalBlock(oldLine, newLine);
                possibleLinePairs.add(new LinePair(oldLine, newLine, weight));
            }
        }
        Collections.sort(possibleLinePairs, LINE_PAIR_COMPARATOR);
        for (LinePair linePair : possibleLinePairs) {
            // High probability that lineA has been moved to lineB, so we can map all Issues on lineA to all Issues on lineB
            map(newIssuesByLines.get(linePair.lineB), lastIssuesByLines.get(linePair.lineA), result);
        }
    }
}

From source file:com.b2international.snowowl.snomed.datastore.index.change.DescriptionChangeProcessor.java

private void processChanges(final String id, final Builder doc,
        final SnomedDescriptionIndexEntry currentRevision,
        Multimap<Acceptability, RefSetMemberChange> acceptabilityChanges,
        Multimap<String, RefSetMemberChange> referringRefSets) {
    final Multimap<Acceptability, String> acceptabilityMap = currentRevision == null
            ? ImmutableMultimap.<Acceptability, String>of()
            : ImmutableMap.copyOf(currentRevision.getAcceptabilityMap()).asMultimap().inverse();

    final Collection<String> preferredLanguageRefSets = newHashSet(
            acceptabilityMap.get(Acceptability.PREFERRED));
    final Collection<String> acceptableLanguageRefSets = newHashSet(
            acceptabilityMap.get(Acceptability.ACCEPTABLE));

    if (acceptabilityChanges != null) {
        collectChanges(acceptabilityChanges.get(Acceptability.PREFERRED), preferredLanguageRefSets);
        collectChanges(acceptabilityChanges.get(Acceptability.ACCEPTABLE), acceptableLanguageRefSets);
    }//from w  w w.  j  a  v  a 2 s.c  o m

    for (String preferredLanguageRefSet : preferredLanguageRefSets) {
        doc.acceptability(preferredLanguageRefSet, Acceptability.PREFERRED);
    }

    for (String acceptableLanguageRefSet : acceptableLanguageRefSets) {
        doc.acceptability(acceptableLanguageRefSet, Acceptability.ACCEPTABLE);
    }

    final Collection<String> currentMemberOf = currentRevision == null ? Collections.<String>emptySet()
            : currentRevision.getMemberOf();
    final Collection<String> currentActiveMemberOf = currentRevision == null ? Collections.<String>emptySet()
            : currentRevision.getActiveMemberOf();
    new ReferenceSetMembershipUpdater(referringRefSets.removeAll(id), currentMemberOf, currentActiveMemberOf)
            .update(doc);
}