Example usage for com.google.common.collect Multiset remove

List of usage examples for com.google.common.collect Multiset remove

Introduction

In this page you can find the example usage for com.google.common.collect Multiset remove.

Prototype

@Override
boolean remove(@Nullable Object element);

Source Link

Document

Removes a single occurrence of the specified element from this multiset, if present.

Usage

From source file:com.mycompany.wolf.Room.java

/**
 * /*ww  w.  j a  v  a  2 s . com*/
 */
private void assignRoles() {
    Multiset<String> roleCounts = HashMultiset.create(roleCounts());
    Map<String, String> roleMap = new HashMap();
    competeRoles.values().stream().filter(c -> roleCounts.remove(c.role)).forEach(c -> {
        roleMap.put(c.playerId, c.role);
    });
    List<String> restPlayerId = sessions.stream().map(s -> getPlayerId(s)).filter(s -> !roleMap.containsKey(s))
            .collect(Collectors.toList());
    Collections.shuffle(restPlayerId);
    Iterator<String> restRoleIt = roleCounts.iterator();
    Iterator<String> restPlayerIdIt = restPlayerId.iterator();
    for (; restRoleIt.hasNext();) {
        String role = restRoleIt.next();
        String playerId = restPlayerIdIt.next();
        roleMap.put(playerId, role);
    }
    sessions.stream().forEach(s -> {
        s.getUserProperties().put("role", roleMap.get(getPlayerId(s)));
    });

    List<ImmutableMap<String, String>> assignedRoles = roleMap.entrySet().stream()
            .map(entry -> ImmutableMap.of("playerId", entry.getKey(), "role", entry.getValue()))
            .collect(Collectors.toCollection(LinkedList::new));
    Map<String, Object> assignRoles = ImmutableMap.of("code", "assignRoles", "properties", assignedRoles);
    String jsonText = JsonUtils.toString(assignRoles);
    sessions.stream().forEach(s -> {
        s.getAsyncRemote().sendText(jsonText);
    });
}

From source file:com.b2international.snowowl.snomed.datastore.index.update.ReferenceSetMembershipUpdater.java

private void processReferencingRefSetIds(final Multiset<String> memberOf,
        final Multiset<String> activeMemberOf) {
    memberChanges.stream().filter(c -> c.getChangeKind() == MemberChangeKind.ADDED).forEach(change -> {
        if (change.isActive()) {
            activeMemberOf.add(change.getRefSetId());
        }//  w  w w. j  a v a  2s . co  m
        memberOf.add(change.getRefSetId());
    });

    memberChanges.stream().filter(c -> c.getChangeKind() == MemberChangeKind.CHANGED).forEach(change -> {
        // if the new state is active, then add it to the activeMemberOf otherwise remove it from that
        // this state transition won't change the memberOf field were all referring refsets are tracked
        if (change.isActive()) {
            activeMemberOf.add(change.getRefSetId());
        } else {
            activeMemberOf.remove(change.getRefSetId());
        }
    });

    memberChanges.stream().filter(c -> c.getChangeKind() == MemberChangeKind.REMOVED).forEach(change -> {
        if (change.isActive()) {
            activeMemberOf.remove(change.getRefSetId());
        }
        memberOf.remove(change.getRefSetId());
    });
}

From source file:de.andreasschoknecht.LS3.DocumentCollection.java

/**
 * Insert a model to a model collection. This means that the underlying Term-Document Matrix has to be updated.
 *
 * @param modelPath the path to the model to be inserted.
 *///from   w  w w .j a  v  a  2  s  .co  m
public void insertModel(String modelPath) {
    // Make sure file name is correct
    if (!modelPath.endsWith(".pnml"))
        modelPath = modelPath + ".pnml";

    // Create new LS3Document object and add it to the document collection list of documents
    System.out.println("------------------------");
    System.out.println("Model to insert:");
    System.out.println("------------------------");
    System.out.println(modelPath.substring(modelPath.lastIndexOf(File.separator) + 1));
    System.out.println("------------------------");
    System.out.println("Models in list:");
    System.out.println("------------------------");

    String[] updatedFileList = new String[fileList.length + 1];
    for (int i = 0; i <= fileList.length; i++) {
        if (i != fileList.length)
            updatedFileList[i] = fileList[i];
        else
            updatedFileList[i] = modelPath.substring(modelPath.lastIndexOf(File.separator) + 1);

        System.out.println(updatedFileList[i]);

    }

    documentNumber++;

    LS3Document newDocument = new LS3Document(modelPath);
    PNMLReader pnmlReader = new PNMLReader();
    try {
        pnmlReader.processDocument(newDocument);
    } catch (JDOMException | IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }

    System.out.println("------------------------");
    System.out.println("New LS3Document data:");
    System.out.println("------------------------");
    System.out.println(newDocument.getPNMLPath());
    System.out.println("Amount of terms = " + newDocument.getAmountTerms());
    for (String term : newDocument.getTermCollection()) {
        System.out.println(term);
    }

    // Add new column to the Term-Document Matrix
    int t = tdMatrix.getRowNumber();
    double[] termFrequencies = new double[t];
    String[] termCollectionArray = new String[termCollection.size()];
    termCollection.toArray(termCollectionArray);

    Multiset<String> termsM = newDocument.getTermCollection();
    for (int i = 0; i < t; i++) {
        termFrequencies[i] = termsM.count(termCollectionArray[i]);
        termsM.remove(termCollectionArray[i]);
    }
    System.out.println("------------------------");
    System.out.println("Term frequencies:");
    System.out.println("------------------------");
    System.out.println(Arrays.toString(termFrequencies));

    System.out.println("------------------------");
    System.out.println("Old TD Matrix:");
    System.out.println("------------------------");
    for (int i = 0, k = tdMatrix.getRowNumber(); i < k; i++) {
        System.out.print(tdMatrix.getTermArray()[i] + " ");
        for (int j = 0, l = tdMatrix.getColumnNumber(); j < l; j++) {
            System.out.print(" " + tdMatrix.getMatrix()[i][j] + " ");
        }
        System.out.println("");
    }
    System.out.println("---------------------\r\n\r\n");

    tdMatrix.addColumn(termFrequencies);

    System.out.println("------------------------");
    System.out.println("New TD Matrix:");
    System.out.println("------------------------");
    for (int i = 0, k = tdMatrix.getRowNumber(); i < k; i++) {
        System.out.print(tdMatrix.getTermArray()[i] + " ");
        for (int j = 0, l = tdMatrix.getColumnNumber(); j < l; j++) {
            System.out.print(" " + tdMatrix.getMatrix()[i][j] + " ");
        }
        System.out.println("");
    }
    System.out.println("---------------------\r\n\r\n");

    // Add new terms of the new model to the term list of the document collection
    System.out.println("------------------------");
    System.out.println("Old term collection:");
    System.out.println("------------------------");
    for (String term : termCollection) {
        System.out.println(term);
    }

    System.out.println("------------------------");
    System.out.println("Terms remaining in insertion model:");
    System.out.println("------------------------");
    System.out.println(Arrays.toString(termsM.toArray(new String[termsM.size()])));

    Set<String> termSet = termsM.elementSet();
    String[] newTerms = termSet.toArray(new String[termSet.size()]);
    for (String term : newTerms) {
        termCollection.add(term);
    }

    System.out.println("------------------------");
    System.out.println("New term collection:");
    System.out.println("------------------------");
    for (String term : termCollection) {
        System.out.println(term);
    }

    System.out.println("------------------------");
    System.out.println("New term collection TD Matrix:");
    System.out.println("------------------------");
    for (String term : tdMatrix.getTermArray()) {
        System.out.println(term);
    }

    //  Add one row for each new term and add the corresponding Term-Document Matrix entries
    double[] newTermsFrequencies = new double[newTerms.length];
    for (int i = 0; i < newTerms.length; i++) {
        newTermsFrequencies[i] = termsM.count(newTerms[i]);
    }

    System.out.println("------------------------");
    System.out.println("New term frequencies:");
    System.out.println("------------------------");
    System.out.println(Arrays.toString(newTermsFrequencies));

    int n = tdMatrix.getColumnNumber();
    for (int i = 0; i < newTermsFrequencies.length; i++) {
        double[] newRow = new double[n];
        for (int j = 0; j < n - 2; j++)
            newRow[j] = 0;

        newRow[n - 1] = newTermsFrequencies[i];
        tdMatrix.addRow(newRow);
    }

    // Update term list of TDMatrix object
    tdMatrix.setTermArray(termCollection.toArray(new String[0]));

    System.out.println("------------------------");
    System.out.println("Final TD Matrix:");
    System.out.println("------------------------");
    for (int i = 0, k = tdMatrix.getRowNumber(); i < k; i++) {
        System.out.print(tdMatrix.getTermArray()[i] + " ");
        for (int j = 0, l = tdMatrix.getColumnNumber(); j < l; j++) {
            System.out.print(" " + tdMatrix.getMatrix()[i][j] + " ");
        }
        System.out.println("");
    }
    System.out.println("---------------------\r\n\r\n");

}

From source file:org.eclipse.viatra.query.runtime.base.core.NavigationHelperContentAdapter.java

/**
 * This method uses either the original {@link EStructuralFeature} instance or the String id.
 *//*  w ww  . j a  v  a2  s  .c  o  m*/
private void removeFromReversedFeatureMap(final Object feature, final EObject holder) {
    final Multiset<EObject> setVal = featureToHolderMap.get(feature);
    if (setVal != null) {
        setVal.remove(holder);

        if (setVal.isEmpty()) {
            featureToHolderMap.remove(feature);
        }
    }
}

From source file:com.clarkparsia.sbol.order.PartialOrder.java

/**
 * Returns the elements in an ascending topological order.
 * //from   w w  w .  j  a  v a2s  .  c  om
 * @throws IllegalStateException if there are cycles between the elements
 */
@Override
public Iterator<T> iterator() throws IllegalStateException {
    Multiset<T> degrees = HashMultiset.create();
    Queue<T> nodesPending = new ArrayDeque<T>();
    List<T> nodesSorted = Lists.newArrayList();

    for (Entry<T, Set<T>> entry : precededBy.entrySet()) {
        T node = entry.getKey();
        Set<T> precededByList = entry.getValue();
        int degree = precededByList.size();
        degrees.setCount(node, degree);
        if (degree == 0) {
            nodesPending.add(node);
        }
    }

    while (!nodesPending.isEmpty()) {
        T node = nodesPending.remove();

        int deg = degrees.count(node);
        if (deg != 0)
            throw new IllegalStateException("Cycle detected " + node + " " + deg + " " + nodesSorted.size());

        nodesSorted.add(node);

        for (Entry<T, Set<T>> entry : precededBy.entrySet()) {
            T n = entry.getKey();
            Set<T> precededByList = entry.getValue();
            if (precededByList.contains(node)) {
                int degree = degrees.count(n);
                if (degree == 1) {
                    nodesPending.add(n);
                    degrees.setCount(n, 0);
                } else {
                    degrees.remove(n);
                }
            }
        }
    }

    if (nodesSorted.size() != precededBy.size()) {
        throw new IllegalStateException("Failed to sort elements");
    }

    return nodesSorted.iterator();
}

From source file:edu.stanford.nlp.util.JBLEU.java

public void stats(List<String> hyp, List<List<String>> refs, int[] result) {
    assert result.length == 9;
    assert refs.size() > 0;

    // 1) choose reference length
    int selectedRef = pickReference(hyp, refs, verbosity);
    int selectedRefLen = refs.get(selectedRef).size();

    // TODO: Integer-ify everything inside Ngram? Or is there too much
    // overhead there?

    // 2) determine the bag of n-grams we can score against
    // build a simple tries
    Multiset<Ngram> clippedRefNgrams = HashMultiset.create();
    for (List<String> ref : refs) {
        Multiset<Ngram> refNgrams = HashMultiset.create();
        for (int order = 1; order <= N; order++) {
            for (int i = 0; i <= ref.size() - order; i++) {
                List<String> toks = ref.subList(i, i + order);
                Ngram ngram = new Ngram(toks);
                refNgrams.add(ngram);/*from   w  w  w . j a  v a  2s .  c  om*/
            }
        }
        // clip n-grams by taking the maximum number of counts for any given reference
        for (Ngram ngram : refNgrams) {
            int clippedCount = Math.max(refNgrams.count(ngram), clippedRefNgrams.count(ngram));
            clippedRefNgrams.setCount(ngram, clippedCount);
        }
    }

    // 3) now match n-grams
    int[] attempts = new int[N];
    int[] matches = new int[N];
    for (int order = 1; order <= N; order++) {
        for (int i = 0; i <= hyp.size() - order; i++) {
            List<String> toks = hyp.subList(i, i + order);
            Ngram ngram = new Ngram(toks);
            boolean found = clippedRefNgrams.remove(ngram);
            ++attempts[order - 1];
            if (found) {
                ++matches[order - 1];
            }
        }
    }

    // 4) assign sufficient stats
    System.arraycopy(attempts, 0, result, 0, N);
    System.arraycopy(matches, 0, result, N, N);
    result[N * 2] = selectedRefLen;
}

From source file:com.facebook.presto.raptor.storage.BucketBalancer.java

private static Multimap<String, BucketAssignment> computeAssignmentChanges(ClusterState clusterState) {
    Multimap<String, BucketAssignment> sourceToAllocationChanges = HashMultimap.create();

    Map<String, Long> allocationBytes = new HashMap<>(clusterState.getAssignedBytes());
    Set<String> activeNodes = clusterState.getActiveNodes();

    for (Distribution distribution : clusterState.getDistributionAssignments().keySet()) {
        // number of buckets in this distribution assigned to a node
        Multiset<String> allocationCounts = HashMultiset.create();
        Collection<BucketAssignment> distributionAssignments = clusterState.getDistributionAssignments()
                .get(distribution);/*from   w w  w  .  j a v a2  s  .co m*/
        distributionAssignments.stream().map(BucketAssignment::getNodeIdentifier)
                .forEach(allocationCounts::add);

        int currentMin = allocationBytes.keySet().stream().mapToInt(allocationCounts::count).min().getAsInt();
        int currentMax = allocationBytes.keySet().stream().mapToInt(allocationCounts::count).max().getAsInt();

        int numBuckets = distributionAssignments.size();
        int targetMin = (int) Math.floor((numBuckets * 1.0) / clusterState.getActiveNodes().size());
        int targetMax = (int) Math.ceil((numBuckets * 1.0) / clusterState.getActiveNodes().size());

        log.info("Distribution %s: Current bucket skew: min %s, max %s. Target bucket skew: min %s, max %s",
                distribution.getId(), currentMin, currentMax, targetMin, targetMax);

        for (String source : ImmutableSet.copyOf(allocationCounts)) {
            List<BucketAssignment> existingAssignments = distributionAssignments.stream()
                    .filter(assignment -> assignment.getNodeIdentifier().equals(source)).collect(toList());

            for (BucketAssignment existingAssignment : existingAssignments) {
                if (activeNodes.contains(source) && allocationCounts.count(source) <= targetMin) {
                    break;
                }

                // identify nodes with bucket counts lower than the computed target, and greedily select from this set based on projected disk utilization.
                // greediness means that this may produce decidedly non-optimal results if one looks at the global distribution of buckets->nodes.
                // also, this assumes that nodes in a cluster have identical storage capacity
                String target = activeNodes.stream()
                        .filter(candidate -> !candidate.equals(source)
                                && allocationCounts.count(candidate) < targetMax)
                        .sorted(comparingInt(allocationCounts::count))
                        .min(Comparator.comparingDouble(allocationBytes::get))
                        .orElseThrow(() -> new VerifyException("unable to find target for rebalancing"));

                long bucketSize = clusterState.getDistributionBucketSize().get(distribution);

                // only move bucket if it reduces imbalance
                if (activeNodes.contains(source) && (allocationCounts.count(source) == targetMax
                        && allocationCounts.count(target) == targetMin)) {
                    break;
                }

                allocationCounts.remove(source);
                allocationCounts.add(target);
                allocationBytes.compute(source, (k, v) -> v - bucketSize);
                allocationBytes.compute(target, (k, v) -> v + bucketSize);

                sourceToAllocationChanges.put(existingAssignment.getNodeIdentifier(), new BucketAssignment(
                        existingAssignment.getDistributionId(), existingAssignment.getBucketNumber(), target));
            }
        }
    }

    return sourceToAllocationChanges;
}

From source file:fabric.worker.transaction.DeadlockDetectorThread.java

/**
 * Resolves deadlocks by aborting transactions.
 * //w w  w . j a v  a2 s.  c om
 * @param cycles
 *          the set of deadlocks, represented by the logs of transactions
 *          involved in waits-for cycles.
 */
private void resolveDeadlocks(Set<Set<Log>> cycles) {
    // Turn the set of cycles into a map from top-level TIDs to sorted multisets
    // of transaction logs. The multisets are sorted by transaction depth, outer
    // transactions first.
    LongKeyMap<Multiset<Log>> logsByTopLevelTid = new LongKeyHashMap<Multiset<Log>>();
    for (Set<Log> cycle : cycles) {
        for (Log log : cycle) {
            long topLevelTid = log.getTid().topTid;
            Multiset<Log> logs = logsByTopLevelTid.get(topLevelTid);
            if (logs == null) {
                logs = TreeMultiset.create(LOG_COMPARATOR);
                logsByTopLevelTid.put(topLevelTid, logs);
            }

            logs.add(log);
        }
    }

    // Abort transactions to break up cycles. Transactions involved in more
    // cycles are aborted first.
    while (!cycles.isEmpty()) {
        // Figure out which top-level transaction(s) is involved in the most number
        // of deadlocks.
        int curMax = 0;
        LongSet abortCandidates = new LongHashSet();
        for (LongKeyMap.Entry<Multiset<Log>> entry : logsByTopLevelTid.entrySet()) {
            int curSize = entry.getValue().size();
            if (curMax > curSize)
                continue;

            if (curMax < curSize) {
                curMax = curSize;
                abortCandidates.clear();
            }

            abortCandidates.add(entry.getKey());
        }

        // Figure out which transaction to abort. (Pick the newest one.)
        Log toAbort = null;
        Multiset<Log> abortSet = null;
        for (LongIterator it = abortCandidates.iterator(); it.hasNext();) {
            long curTopLevelTid = it.next();
            Multiset<Log> curCandidateSet = logsByTopLevelTid.get(curTopLevelTid);
            Log curCandidate = curCandidateSet.iterator().next();

            if (toAbort == null || toAbort.startTime < curCandidate.startTime) {
                toAbort = curCandidate;
                abortSet = curCandidateSet;
            }
        }

        // Abort the transaction.
        WORKER_DEADLOCK_LOGGER.log(Level.FINE, "Aborting {0}", toAbort);
        toAbort.flagRetry();

        // Fix up our data structures to reflect the aborted transaction.
        for (Iterator<Set<Log>> cycleIt = cycles.iterator(); cycleIt.hasNext();) {
            Set<Log> cycle = cycleIt.next();

            // Check if the cycle has a transaction that was aborted.
            if (!haveCommonElements(cycle, abortSet.elementSet()))
                continue;

            // Cycle was broken, so remove from the set of cycles.
            cycleIt.remove();

            // Fix up logsByTopLevelTid.
            for (Log log : cycle) {
                long topLevelTid = log.getTid().topTid;
                Multiset<Log> logs = logsByTopLevelTid.get(topLevelTid);
                logs.remove(log);
                if (logs.isEmpty()) {
                    logsByTopLevelTid.remove(topLevelTid);
                }
            }
        }
    }
}