Example usage for com.google.common.collect Multiset add

List of usage examples for com.google.common.collect Multiset add

Introduction

In this page you can find the example usage for com.google.common.collect Multiset add.

Prototype

@Override
boolean add(E element);

Source Link

Document

Adds a single occurrence of the specified element to this multiset.

Usage

From source file:bots.mctsbot.ai.bots.bot.gametree.rollout.BucketRollOut.java

public double doRollOut(int nbCommunitySamples) {
    boolean traceEnabled = logger.isTraceEnabled();
    double totalEV = 0;
    model.assumeTemporarily(gameState);// w w w  .  ja  va  2s.co m
    for (int i = 0; i < nbCommunitySamples; i++) {
        int communitySampleRank = fixedRank;
        Set<Integer> usedCommunityAndBotCards = new TreeSet<Integer>(usedFixedCommunityAndBotCards);
        Set<Integer> usedCommunityCards = new TreeSet<Integer>();
        for (int card = 0; card < usedFixedCommunityCards.size(); card++) {
            usedCommunityCards.add(usedFixedCommunityCards.getCardIndex(card + 1));
        }

        for (int j = 0; j < nbMissingCommunityCards; j++) {
            Integer communityCard = drawNewCard(usedCommunityAndBotCards);
            if (traceEnabled) {
                logger.trace("Evaluating sampled community card " + communityCard);
            }
            usedCommunityCards.add(communityCard);
            communitySampleRank = updateIntermediateRank(communitySampleRank, new Card(communityCard));
        }
        if (traceEnabled) {
            logger.trace("Evaluating bot cards " + botCard1 + " " + botCard2);
        }
        int botRank = getFinalRank(communitySampleRank, botCard1, botCard2);

        //         int minSampleRank = Integer.MAX_VALUE;
        //         int maxSampleRank = Integer.MIN_VALUE;
        //         int sum = 0;
        Multiset<Integer> ranks = new TreeMultiset<Integer>();
        Multiset<Integer> deadRanks = new TreeMultiset<Integer>();
        int n = 100;
        for (int j = 0; j < n; j++) {
            Set<Integer> handCards = new TreeSet<Integer>(usedCommunityCards);
            Integer sampleCard1 = drawNewCard(handCards);
            Integer sampleCard2 = drawNewCard(handCards);
            int sampleRank = getFinalRank(communitySampleRank, new Card(sampleCard1), new Card(sampleCard2));
            ranks.add(sampleRank);
            if (botCard1.equals(sampleCard1) || botCard1.equals(sampleCard2) || botCard2.equals(sampleCard1)
                    || botCard2.equals(sampleCard2)) {
                deadRanks.add(sampleRank);
            }
            //            if(sampleRank<minSampleRank){
            //               minSampleRank = sampleRank;
            //            }
            //            if(sampleRank>maxSampleRank){
            //               maxSampleRank = sampleRank;
            //            }
            //            sum += sampleRank;
        }
        //         double mean = ((double)sum)/n;
        //         double var = calcVariance(ranks, mean);
        //         int averageSampleRank = (int) Math.round(mean);
        //         int sigmaSampleRank = (int) Math.round(Math.sqrt(var));

        WinDistribution[] winProbs = calcWinDistributions(botRank, ranks, deadRanks);
        double[] deadCardWeights = calcDeadCardWeights(ranks, deadRanks);

        TreeMap<PlayerState, WinDistribution> winDistributions = calcOpponentWinDistributionMap(winProbs,
                deadCardWeights);

        int maxDistributed = 0;
        int botInvestment = botState.getTotalInvestment();
        double sampleEV = 0;
        for (Iterator<PlayerState> iter = winDistributions.keySet().iterator(); iter.hasNext();) {
            PlayerState opponent = iter.next();
            int toDistribute = Math.min(botInvestment, opponent.getTotalInvestment()) - maxDistributed;
            if (toDistribute > 0) {
                double pWin = 1;
                double pNotLose = 1;
                for (WinDistribution distribution : winDistributions.values()) {
                    //you win when you win from every opponent
                    pWin *= distribution.pWin;
                    //you don't lose when you don't lose from every opponent
                    pNotLose *= distribution.pWin + distribution.pDraw;
                }
                sampleEV += toDistribute * pWin;
                //you draw when you don't lose but don't win everything either;
                double pDraw = pNotLose - pWin;
                // assume worst case, with winDistributions.size()+1 drawers
                //TODO do this better, use rollout or statistics!
                sampleEV += pDraw * toDistribute / (winDistributions.size() + 1.0);
                maxDistributed += toDistribute;
            }
            iter.remove();
        }
        //get back uncalled investment
        sampleEV += botInvestment - maxDistributed;
        totalEV += sampleEV;
    }
    model.forgetLastAssumption();
    return (1 - gameState.getTableConfiguration().getRake()) * (totalEV / nbCommunitySamples);
}

From source file:org.eclipse.incquery.runtime.base.core.NavigationHelperContentAdapter.java

private void addToReversedFeatureMap(EStructuralFeature feature, EObject holder) {
    Multiset<EObject> setVal = reversedFeatureMap.get(feature);

    if (setVal == null) {
        setVal = HashMultiset.create();/*  ww w.  j  a v a2  s  . c o m*/
    }
    setVal.add(holder);
    reversedFeatureMap.put(feature, setVal);
}

From source file:com.synflow.cx.internal.validation.ExpressionValidator.java

/**
 * Computes the two port sets: one containing ports that are available, the other one containing
 * ports that are read.//  ww  w  . jav  a 2 s.  c o  m
 * 
 * @param available
 *            a set in which ports available are put
 * @param read
 *            a set in which ports read are put
 * @param condition
 *            the condition to visit
 */
public void computePortSets(Entity entity, Multiset<Port> available, Multiset<Port> read,
        CxExpression condition) {
    List<ExpressionVariable> exprs;
    if (condition == null) {
        return;
    }

    exprs = EcoreUtil2.eAllOfType(condition, ExpressionVariable.class);
    for (ExpressionVariable expr : exprs) {
        VarRef ref = expr.getSource();
        Variable variable = ref.getVariable();
        if (CxUtil.isPort(variable)) {
            Port port = instantiator.getPort(entity, ref);
            String prop = expr.getProperty();
            if (PROP_AVAILABLE.equals(prop)) {
                available.add(port);
            } else if (PROP_READ.equals(prop)) {
                read.add(port);
            }
        }
    }
}

From source file:visualizer.corpus.zip.InvertedZipCorpus.java

/**
 * Method that creates the ngrams./* www  . j a v  a2 s  . co m*/
 * 
 * @param filename
 * @return
 * @throws IOException
 */
private List<Ngram> getNgramsFromFile(String filename) throws IOException {
    String filecontent = corpus.getFullContent(filename);
    List<Ngram> ngrams = new ArrayList<Ngram>();

    if (filecontent == null || StringUtil.isEmpty(filecontent)) {
        return ngrams;
    }

    Multiset<String> bag = HashMultiset.create();
    TermExtractor<String> termExtractor = new RegExpTermExtractor(filecontent);

    StringCircularBuffer[] buffers = new StringCircularBuffer[nrGrams - 1];
    for (int i = 2; i <= nrGrams; i++) {
        buffers[i - 2] = new StringCircularBuffer(i);
        buffers[i - 2].setSeparator(Corpus.NGRAM_SEPARATOR);
    }

    String term;
    while ((term = termExtractor.next()) != null) {
        if (!StringUtil.isEmpty(term)) {
            bag.add(term);
            for (StringCircularBuffer buffer : buffers) {
                String ngram = buffer.add(term);
                if (ngram != null) {
                    bag.add(ngram);
                }
            }
        }
    }
    for (StringCircularBuffer buffer : buffers) {
        String leftover = buffer.reset();
        if (leftover != null) {
            bag.add(leftover);
        }
    }

    Iterator<String> i = bag.iterator();
    while (i.hasNext()) {
        String ngramText = i.next();
        Ngram ngram = new Ngram(ngramText);
        ngram.setFrequency(bag.count(ngramText));
        ngrams.add(ngram);
    }

    return ngrams;
}

From source file:org.zenoss.app.consumer.metric.impl.MetricsQueue.java

/** Iterate over all the metrics' {@link #CLIENT_TAG} tag values, counting how many of each. */
private Multiset<String> clientCounts(Collection<Metric> metrics) {
    Multiset<String> counts = HashMultiset.create();
    for (final Metric m : metrics) {
        String clientId = m.getTags().get(CLIENT_TAG);
        if (clientId == null) {
            log.error("Metric {} missing required tag {}. throwing IllegalStateException", m.toString(),
                    CLIENT_TAG);//w w  w .  ja v  a 2s . c om
            throw new IllegalStateException("Metric missing required tag: " + CLIENT_TAG);
        }
        counts.add(clientId);
    }
    return counts;
}

From source file:org.eclipse.tracecompass.internal.analysis.os.linux.ui.views.controlflow.NaiveOptimizationAlgorithm.java

/**
 * Get the scheduling column order by arrows
 *
 * @param arrows/*from   ww  w  . j  a v  a2s  . co m*/
 *            the list of visible links
 * @return the list of weights, by thread ID
 */
@Override
public Map<Integer, Long> apply(Collection<ILinkEvent> arrows) {
    /*
     * "transitions" contains the count of every arrows between two tids
     * (Pair<Integer, Integer>). For constructing the Pair, we always put
     * the smallest tid first
     */
    Multiset<Pair<Integer, Integer>> transitions = HashMultiset.<Pair<Integer, Integer>>create();

    /*
     * We iterate in arrows to count the number of transitions between every
     * pair (tid,tid) in the current view
     */
    for (ILinkEvent arrow : arrows) {
        ITimeGraphEntry from = arrow.getEntry();
        ITimeGraphEntry to = arrow.getDestinationEntry();
        if (!(from instanceof ControlFlowEntry) || !(to instanceof ControlFlowEntry)) {
            continue;
        }
        int fromTid = ((ControlFlowEntry) from).getThreadId();
        int toTid = ((ControlFlowEntry) to).getThreadId();
        if (fromTid != toTid) {
            Pair<Integer, Integer> key = new Pair<>(Math.min(fromTid, toTid), Math.max(fromTid, toTid));
            transitions.add(key);
        }
    }

    /*
     * We now have a transition count for every pair (tid,tid). The next
     * step is to sort every pair according to its count in decreasing order
     */
    List<Pair<Integer, Integer>> sortedTransitionsByCount = Multisets.copyHighestCountFirst(transitions)
            .asList();

    /*
     * Next, we find the order in which we want to display our threads. We
     * simply iterate in every pair (tid,tid) in orderedTidList. Each time
     * we see a new tid, we add it at the end of orderedTidList. This way,
     * threads with lots of transitions will be grouped in the top. While
     * very naive, this algorithm is fast, simple and gives decent results.
     */
    Map<Integer, Long> orderedTidMap = new LinkedHashMap<>();
    long pos = 0;
    for (Pair<Integer, Integer> threadPair : sortedTransitionsByCount) {
        if (orderedTidMap.get(threadPair.getFirst()) == null) {
            orderedTidMap.put(threadPair.getFirst(), pos);
            pos++;
        }
        if (orderedTidMap.get(threadPair.getSecond()) == null) {
            orderedTidMap.put(threadPair.getSecond(), pos);
            pos++;
        }
    }

    return orderedTidMap;
}

From source file:org.apache.hadoop.hive.ql.optimizer.SharedWorkOptimizer.java

private static Multiset<String> extractConjsIgnoringDPPPreds(ExprNodeDesc predicate) {
    List<ExprNodeDesc> conjsOp = ExprNodeDescUtils.split(predicate);
    Multiset<String> conjsOpString = TreeMultiset.create();
    for (int i = 0; i < conjsOp.size(); i++) {
        if (conjsOp.get(i) instanceof ExprNodeGenericFuncDesc) {
            ExprNodeGenericFuncDesc func = (ExprNodeGenericFuncDesc) conjsOp.get(i);
            if (GenericUDFInBloomFilter.class == func.getGenericUDF().getClass()) {
                continue;
            } else if (GenericUDFBetween.class == func.getGenericUDF().getClass()
                    && (func.getChildren().get(2) instanceof ExprNodeDynamicValueDesc
                            || func.getChildren().get(3) instanceof ExprNodeDynamicValueDesc)) {
                continue;
            }//from w  w w.j a v  a 2s .  com
        } else if (conjsOp.get(i) instanceof ExprNodeDynamicListDesc) {
            continue;
        }
        conjsOpString.add(conjsOp.get(i).toString());
    }
    return conjsOpString;
}

From source file:com.dssmp.agent.tailing.FirehoseSender.java

@Override
protected BufferSendResult<FirehoseRecord> attemptSend(RecordBuffer<FirehoseRecord> buffer) {
    activeBatchPutCalls.incrementAndGet();
    IMetricsScope metrics = agentContext.beginScope();
    metrics.addDimension(Metrics.DESTINATION_DIMENSION, "DeliveryStream:" + getDestination());
    try {/*from ww w. ja v a2 s  . c  o m*/
        BufferSendResult<FirehoseRecord> sendResult = null;
        List<Record> requestRecords = new ArrayList<>();
        for (FirehoseRecord data : buffer) {
            Record record = new Record();
            record.setData(data.data());
            requestRecords.add(record);
        }
        PutRecordBatchRequest request = new PutRecordBatchRequest();
        request.setRecords(requestRecords);
        request.setDeliveryStreamName(getDestination());
        PutRecordBatchResult result = null;
        Stopwatch timer = Stopwatch.createStarted();
        totalBatchPutCalls.incrementAndGet();
        try {
            logger.trace("{}: Sending buffer {} to firehose {}...", flow.getId(), buffer, getDestination());
            metrics.addCount(RECORDS_ATTEMPTED_METRIC, requestRecords.size());
            result = agentContext.getFirehoseClient().putRecordBatch(request);
            metrics.addCount(SERVICE_ERRORS_METRIC, 0);
        } catch (AmazonServiceException e) {
            metrics.addCount(SERVICE_ERRORS_METRIC, 1);
            totalBatchPutServiceErrors.incrementAndGet();
            throw e;
        } catch (Exception e) {
            metrics.addCount(SERVICE_ERRORS_METRIC, 1);
            totalBatchPutOtherErrors.incrementAndGet();
            throw e;
        } finally {
            totalBatchPutLatency.addAndGet(timer.elapsed(TimeUnit.MILLISECONDS));
        }
        if (sendResult == null) {
            List<Integer> sentRecords = new ArrayList<>(requestRecords.size());
            Multiset<String> errors = HashMultiset.<String>create();
            int index = 0;
            long totalBytesSent = 0;
            for (PutRecordBatchResponseEntry responseEntry : result.getRequestResponses()) {
                Record record = requestRecords.get(index);
                if (responseEntry.getErrorCode() == null) {
                    sentRecords.add(index);
                    totalBytesSent += record.getData().limit();
                } else {
                    logger.trace("{}:{} Record {} returned error code {}: {}", flow.getId(), buffer, index,
                            responseEntry.getErrorCode(), responseEntry.getErrorMessage());
                    errors.add(responseEntry.getErrorCode());
                }
                ++index;
            }
            if (sentRecords.size() == requestRecords.size()) {
                sendResult = BufferSendResult.succeeded(buffer);
            } else {
                buffer = buffer.remove(sentRecords);
                sendResult = BufferSendResult.succeeded_partially(buffer, requestRecords.size());
            }
            metrics.addData(BYTES_SENT_METRIC, totalBytesSent, StandardUnit.Bytes);
            int failedRecordCount = requestRecords.size() - sentRecords.size();
            metrics.addCount(RECORD_ERRORS_METRIC, failedRecordCount);
            logger.debug("{}:{} Records sent firehose {}: {}. Failed records: {}", flow.getId(), buffer,
                    getDestination(), sentRecords.size(), failedRecordCount);
            totalRecordsAttempted.addAndGet(requestRecords.size());
            totalRecordsSent.addAndGet(sentRecords.size());
            totalRecordsFailed.addAndGet(failedRecordCount);

            if (logger.isDebugEnabled() && !errors.isEmpty()) {
                synchronized (totalErrors) {
                    StringBuilder strErrors = new StringBuilder();
                    for (Multiset.Entry<String> err : errors.entrySet()) {
                        AtomicLong counter = totalErrors.get(err.getElement());
                        if (counter == null)
                            totalErrors.put(err.getElement(), counter = new AtomicLong());
                        counter.addAndGet(err.getCount());
                        if (strErrors.length() > 0)
                            strErrors.append(", ");
                        strErrors.append(err.getElement()).append(": ").append(err.getCount());
                    }
                    logger.debug("{}:{} Errors from firehose {}: {}", flow.getId(), buffer,
                            flow.getDestination(), strErrors.toString());
                }
            }
        }
        return sendResult;
    } finally {
        metrics.commit();
        activeBatchPutCalls.decrementAndGet();
    }
}

From source file:com.amazon.kinesis.streaming.agent.tailing.KinesisSender.java

@Override
protected BufferSendResult<KinesisRecord> attemptSend(RecordBuffer<KinesisRecord> buffer) {
    activePutRecordsCalls.incrementAndGet();
    IMetricsScope metrics = agentContext.beginScope();
    metrics.addDimension(Metrics.DESTINATION_DIMENSION, "KinesisStream:" + getDestination());
    try {// w  ww  .  j a v a 2s.  co  m
        BufferSendResult<KinesisRecord> sendResult = null;
        List<PutRecordsRequestEntry> requestRecords = new ArrayList<>();
        for (KinesisRecord data : buffer) {
            PutRecordsRequestEntry record = new PutRecordsRequestEntry();
            record.setData(data.data());
            record.setPartitionKey(data.partitionKey());
            requestRecords.add(record);
        }
        PutRecordsRequest request = new PutRecordsRequest();
        request.setStreamName(getDestination());
        request.setRecords(requestRecords);
        PutRecordsResult result = null;
        Stopwatch timer = Stopwatch.createStarted();
        totalPutRecordsCalls.incrementAndGet();
        try {
            logger.trace("{}: Sending buffer {} to kinesis stream {}...", flow.getId(), buffer,
                    getDestination());
            metrics.addCount(RECORDS_ATTEMPTED_METRIC, requestRecords.size());
            result = agentContext.getKinesisClient().putRecords(request);
            metrics.addCount(SERVICE_ERRORS_METRIC, 0);
        } catch (AmazonServiceException e) {
            metrics.addCount(SERVICE_ERRORS_METRIC, 1);
            totalPutRecordsServiceErrors.incrementAndGet();
            throw e;
        } catch (Exception e) {
            metrics.addCount(SERVICE_ERRORS_METRIC, 1);
            totalPutRecordsOtherErrors.incrementAndGet();
            throw e;
        } finally {
            totalPutRecordsLatency.addAndGet(timer.elapsed(TimeUnit.MILLISECONDS));
        }
        if (sendResult == null) {
            List<Integer> sentRecords = new ArrayList<>(requestRecords.size());
            Multiset<String> errors = HashMultiset.<String>create();
            int index = 0;
            long totalBytesSent = 0;
            for (final PutRecordsResultEntry responseEntry : result.getRecords()) {
                final PutRecordsRequestEntry record = requestRecords.get(index);
                if (responseEntry.getErrorCode() == null) {
                    sentRecords.add(index);
                    totalBytesSent += record.getData().limit();
                } else {
                    logger.trace("{}:{} Record {} returned error code {}: {}", flow.getId(), buffer, index,
                            responseEntry.getErrorCode(), responseEntry.getErrorMessage());
                    errors.add(responseEntry.getErrorCode());
                }
                ++index;
            }
            if (sentRecords.size() == requestRecords.size()) {
                sendResult = BufferSendResult.succeeded(buffer);
            } else {
                buffer = buffer.remove(sentRecords);
                sendResult = BufferSendResult.succeeded_partially(buffer, requestRecords.size());
            }
            metrics.addData(BYTES_SENT_METRIC, totalBytesSent, StandardUnit.Bytes);
            int failedRecordCount = requestRecords.size() - sentRecords.size();
            metrics.addCount(RECORD_ERRORS_METRIC, failedRecordCount);
            logger.debug("{}:{} Records sent to kinesis stream {}: {}. Failed records: {}", flow.getId(),
                    buffer, getDestination(), sentRecords.size(), failedRecordCount);
            totalRecordsAttempted.addAndGet(requestRecords.size());
            totalRecordsSent.addAndGet(sentRecords.size());
            totalRecordsFailed.addAndGet(failedRecordCount);

            if (logger.isDebugEnabled() && !errors.isEmpty()) {
                synchronized (totalErrors) {
                    StringBuilder strErrors = new StringBuilder();
                    for (Multiset.Entry<String> err : errors.entrySet()) {
                        AtomicLong counter = totalErrors.get(err.getElement());
                        if (counter == null)
                            totalErrors.put(err.getElement(), counter = new AtomicLong());
                        counter.addAndGet(err.getCount());
                        if (strErrors.length() > 0)
                            strErrors.append(", ");
                        strErrors.append(err.getElement()).append(": ").append(err.getCount());
                    }
                    logger.debug("{}:{} Errors from kinesis stream {}: {}", flow.getId(), buffer,
                            flow.getDestination(), strErrors.toString());
                }
            }
        }
        return sendResult;
    } finally {
        metrics.commit();
        activePutRecordsCalls.decrementAndGet();
    }
}

From source file:BibTex.IOmethods.java

public void writeConnectedCategories(Set<BibTexRef> refs) throws IOException {

    BufferedWriter bw = new BufferedWriter(new FileWriter(folder + "connected categories.csv"));
    StringBuilder sb = new StringBuilder();
    int maxCountCategory = 0;
    sb.append("Source,Target,Type,Weight").append("\n");

    //creation of convenient data structures for I/O
    Multiset<Edge> edges = HashMultiset.create();
    Multiset<String> multisetCategoryNames = HashMultiset.create();

    for (BibTexRef ref : refs) {
        Set<Category> categories = ref.getCategories();
        Set<String> categoriesNames = new HashSet();

        for (Category category : categories) {
            categoriesNames.add(category.getCategoryName());
            multisetCategoryNames.add(category.getCategoryName());
        }//from w  w  w .  jav a2  s.c  o m

        FindAllPairs findAllPairs = new FindAllPairs();
        List<Pair<String>> pairs = findAllPairs.getAllUndirectedPairsAsList(categoriesNames);

        for (Pair<String> pair : pairs) {
            Edge edge = new Edge();
            edge.setNode1(pair.getLeft());
            edge.setNode2(pair.getRight());
            edges.add(edge);

        }

    }

    //finding the max number for a category, for normalization purposes
    for (String string : multisetCategoryNames.elementSet()) {
        if (maxCountCategory < multisetCategoryNames.count(string)) {
            maxCountCategory = multisetCategoryNames.count(string);
        }
    }

    //writing of the first line of the csv: headers of the categories.
    for (Edge edge : edges.elementSet()) {
        //we devalue the weight of an edge by how frequent the 2 nodes of the edge are.
        float weight = edges.count(edge) / (float) (multisetCategoryNames.count(edge.getNode1())
                * multisetCategoryNames.count(edge.getNode2()));
        //            float weight = edges.count(edge);
        //normalization to a 0 -> 10 scale to visualize the weight on Gephi
        weight = weight * 10 / (float) maxCountCategory * 100000;
        sb.append(edge.getNode1()).append(",").append(edge.getNode2()).append(",Undirected,").append(weight);
        sb.append("\n");
    }
    bw.write(sb.toString());
    bw.close();
}