Example usage for com.google.common.collect Multiset add

List of usage examples for com.google.common.collect Multiset add

Introduction

In this page you can find the example usage for com.google.common.collect Multiset add.

Prototype

@Override
boolean add(E element);

Source Link

Document

Adds a single occurrence of the specified element to this multiset.

Usage

From source file:BibTex.IOmethods.java

public void writeCategoriesAndTheirPapersToFile(Set<BibTexRef> refs) throws IOException {
    BufferedWriter bw = new BufferedWriter(new FileWriter(folder + "refs per categories.csv"));
    StringBuilder sb = new StringBuilder();
    String sep = "|";

    //creation of 2 convenient data structures for I/O
    Map<String, Multiset<String>> categoriesToPapers = new TreeMap();
    List<String> categoryNames = new ArrayList();

    for (BibTexRef ref : refs) {
        Set<Category> categories = ref.getCategories();

        for (Category category : categories) {
            if (!categoryNames.contains(category.getCategoryName())) {
                categoryNames.add(category.getCategoryName());
            }/*w  w w  . j  av  a2  s .co  m*/
            if (categoriesToPapers.containsKey(category.getCategoryName())) {
                categoriesToPapers.get(category.getCategoryName()).add(ref.toBibliographicalFormattedString());
            } else {
                Multiset<String> papersForOneCategory = HashMultiset.create();
                papersForOneCategory.add(ref.toBibliographicalFormattedString());

                categoriesToPapers.put(category.getCategoryName(), papersForOneCategory);
            }
        }

    }
    Collections.sort(categoryNames);

    //writing of the first line of the csv: headers of the categories.
    for (String categoryName : categoryNames) {
        sb.append(categoryName);
        sb.append(sep);
    }
    sb.append("\n");

    //writing of all subsequent lines: one per year
    int countCategoriesdone = 0;
    boolean continueLoop = true;
    while (continueLoop) {

        for (Iterator<String> it = categoriesToPapers.keySet().iterator(); it.hasNext();) {
            String category = it.next();
            Multiset<String> papersForOneCategory = categoriesToPapers.get(category);
            Iterator<String> papersIterator = papersForOneCategory.iterator();
            if (papersIterator.hasNext()) {
                String string = papersIterator.next();
                sb.append(string).append(sep);
                papersIterator.remove();
            } else {
                sb.append(sep);
            }
        }
        sb.append("\n");

        for (String cat : categoriesToPapers.keySet()) {
            if (categoriesToPapers.get(cat).isEmpty()) {
                countCategoriesdone++;
            }
        }
        if (countCategoriesdone == categoryNames.size()) {
            continueLoop = false;
        } else {
            countCategoriesdone = 0;
        }

    }

    bw.write(sb.toString());
    bw.close();

}

From source file:BibTex.IOmethods.java

public void writeNumberPapersPerYear(Set<BibTexRef> refs) throws IOException {

    BufferedWriter bw = new BufferedWriter(new FileWriter(folder + "papers per year.csv"));
    StringBuilder sb = new StringBuilder();

    String sep = "|";

    //creation of the data structures for I/O
    Multiset<String> years = TreeMultiset.create();

    for (BibTexRef ref : refs) {
        String year = ref.getYear();
        years.add(year);
    }//from  w ww .j ava 2 s .  c  o  m

    for (String year : years.elementSet()) {
        sb.append(year);
        sb.append(sep);
    }
    sb.append("\n");

    for (String year : years.elementSet()) {
        sb.append(years.count(year));
        sb.append(sep);
    }
    sb.append("\n");

    bw.write(sb.toString());
    bw.close();
}

From source file:BibTex.IOmethods.java

public void writeJournalsPerCategories(Set<BibTexRef> refs) throws IOException {
    JournalAbbreviationsMapping jmap = new JournalAbbreviationsMapping();
    jmap.loadMap();//from   w  w  w. j a va2 s .com

    BufferedWriter bw = new BufferedWriter(new FileWriter(folder + "journals per categories.csv"));

    StringBuilder sb = new StringBuilder();
    String sep = "|";

    //creation of 2 convenient data structures for I/O
    Map<String, Multiset<String>> categoriesToJournals = new TreeMap();
    List<String> categoryNames = new ArrayList();

    for (BibTexRef ref : refs) {
        Set<Category> categories = ref.getCategories();

        String title = ref.getJournal();
        if (title == null || title.isEmpty()) {
            continue;
        }
        title = title.toLowerCase();

        Set<String> abbrev = (Set<String>) jmap.getJournalsToAbbrev().get(title);
        if (abbrev == null || abbrev.isEmpty()) {
            abbrev = new HashSet();
            abbrev.add(title);
        }

        String abbreviation = abbrev.iterator().next();

        for (Category category : categories) {
            if (!categoryNames.contains(category.getCategoryName())) {
                categoryNames.add(category.getCategoryName());
            }
            if (categoriesToJournals.containsKey(category.getCategoryName())) {
                categoriesToJournals.get(category.getCategoryName()).add(abbreviation);
            } else {
                Multiset<String> journalsForOneCategory = HashMultiset.create();
                journalsForOneCategory.add(abbreviation);

                categoriesToJournals.put(category.getCategoryName(), journalsForOneCategory);
            }
        }

    }
    Collections.sort(categoryNames);

    //writing of the first line of the csv: headers of the categories.
    for (String categoryName : categoryNames) {
        sb.append(categoryName);
        sb.append(sep);
    }
    sb.append("\n");

    //writing of all subsequent lines: one per year
    int countCategoriesdone = 0;
    boolean continueLoop = true;
    while (continueLoop) {

        for (Iterator<String> it = categoriesToJournals.keySet().iterator(); it.hasNext();) {
            String category = it.next();
            Multiset<String> journalsForOneCategory = categoriesToJournals.get(category);

            Iterator<String> journalsIterator = Multisets.copyHighestCountFirst(journalsForOneCategory)
                    .elementSet().iterator();
            if (journalsIterator.hasNext()) {
                String journal = journalsIterator.next();
                sb.append(journal).append(" (").append(journalsForOneCategory.count(journal)).append(")")
                        .append(sep);
                journalsForOneCategory.remove(journal, journalsForOneCategory.count(journal));
            } else {
                sb.append(sep);
            }
        }
        sb.append("\n");

        for (String cat : categoriesToJournals.keySet()) {
            if (categoriesToJournals.get(cat).isEmpty()) {
                countCategoriesdone++;
            }
        }
        if (countCategoriesdone == categoryNames.size()) {
            continueLoop = false;
        } else {
            countCategoriesdone = 0;
        }

    }

    bw.write(sb.toString());
    bw.close();

}

From source file:com.clarkparsia.geneious.SequenceVerificationAnnotation.java

private void guessSOType() {
    SequenceCharSequence expectedSequence = alignmentDoc.getCharSequence();

    substitution = null;/*from  ww w  .jav  a2 s  .  c  om*/
    soType = null;

    if (interval.getLength() == 1 && variantType == SequenceVariantType.SUBSTITUTION) {
        int position = interval.getMinimumIndex() - 1;
        SequenceCharSequence e = expectedSequence.subSequence(position - 10, position + 10);
        char expected = expectedSequence.charAt(position);
        LOGGER.debug("          **********V*********");
        LOGGER.debug("Expected: " + e + " " + expected);
        char mostLikelySubstitution = '-';
        int maxOccurence = 0;
        Multiset<Character> possibleSubstitutions = HashMultiset.create();
        for (SequenceDocument actualSequenceDoc : alignmentDoc.getSequencingData()) {
            SequenceCharSequence actualSequence = actualSequenceDoc.getCharSequence();
            char substitution = actualSequence.charAt(position);
            LOGGER.debug("Actual  : " + actualSequence.subSequence(position - 10, position + 10) + " "
                    + substitution);
            if (substitution != '-' && substitution != expected) {
                possibleSubstitutions.add(substitution);
                int occurences = possibleSubstitutions.count(substitution);
                if (occurences > maxOccurence || (occurences == maxOccurence && isValidChar(substitution)
                        && !isValidChar(mostLikelySubstitution))) {
                    mostLikelySubstitution = substitution;
                    maxOccurence = occurences;
                }
            }
        }
        LOGGER.debug("          **********^*********");

        if (isValidChar(mostLikelySubstitution)) {
            substitution = new char[] { expected, mostLikelySubstitution };
            soType = SequenceOntologyUtil.getSOSubstitutionType(expected, mostLikelySubstitution);
        }
    }

    if (substitution == null) {
        soType = SequenceOntologyUtil.getSOType(variantType);
    }
}

From source file:org.eclipse.sirius.tree.business.internal.dialect.common.tree.TreeItemContainerChildSupport.java

@Override
public void reorderChilds(Iterable<CreatedOutput> outDesc) {
    final Multiset<TreeItemMapping> subMappings = LinkedHashMultiset.create();
    Set<TreeItemMapping> mappings = new HashSet<TreeItemMapping>();
    final Map<EObject, CreatedOutput> outputToItem = Maps.newHashMap();
    for (CreatedOutput createdOutput : outDesc) {
        EObject createdElement = createdOutput.getCreatedElement();
        outputToItem.put(createdElement, createdOutput);
        if (createdElement instanceof DTreeItem) {
            DTreeItem createdDTreeItem = (DTreeItem) createdElement;
            TreeItemMapping actualMapping = createdDTreeItem.getActualMapping();
            subMappings.add(actualMapping);
            mappings.add(actualMapping);
        }//from  ww  w . j av a  2 s .  c om
    }

    // Does not need to sort DTreeItem according to their mapping if there
    // is only one mapping
    if (mappings.size() > 1) {

        // Counts subMappings to correctly sort tree items regarding mapping
        // order (items have been created regarding the semantic candidates
        // order)
        int startIndex = 0;
        final Map<TreeItemMapping, Integer> startIndexes = Maps.newHashMap();
        for (TreeItemMapping itemMapping : subMappings) {
            startIndexes.put(itemMapping, startIndex);
            startIndex += subMappings.count(itemMapping);
        }

        Function<DTreeItem, Integer> getNewIndex = new Function<DTreeItem, Integer>() {

            @Override
            public Integer apply(DTreeItem from) {
                // init with element count : elements with unknown mapping
                // will
                // be placed at
                // the end.
                int index = outputToItem.size();
                TreeItemMapping itemMapping = from.getActualMapping();
                if (itemMapping != null && startIndexes.containsKey(itemMapping)) {
                    index = startIndexes.get(itemMapping);
                }

                CreatedOutput createdOutput = outputToItem.get(from);
                if (createdOutput != null) {
                    return index + createdOutput.getNewIndex();
                }
                return -1;
            }
        };

        ECollections.sort(container.getOwnedTreeItems(), Ordering.natural().onResultOf(getNewIndex));
    }
}

From source file:BibTex.IOmethods.java

public void writeCategoriesPerYearInCsv(Set<BibTexRef> refs) throws IOException {

    BufferedWriter bw = new BufferedWriter(new FileWriter(folder + "years and categories.csv"));
    StringBuilder sb = new StringBuilder();

    String sep = "|";

    //creation of 2 convenient data structures for I/O
    Map<String, Multiset<String>> yearsToCategories = new HashMap();
    List<String> categoryNames = new ArrayList();

    for (BibTexRef ref : refs) {
        String year = ref.getYear();
        Set<Category> categories = ref.getCategories();

        for (Category category : categories) {
            if (!categoryNames.contains(category.getCategoryName())) {
                categoryNames.add(category.getCategoryName());
            }//from  w  w w  .j  a  v  a  2  s . c o m
        }

        if (yearsToCategories.containsKey(year)) {
            for (Category category : categories) {
                yearsToCategories.get(year).add(category.getCategoryName());
            }
        } else {
            Multiset<String> categoriesForOneYear = HashMultiset.create();
            for (Category category : categories) {
                categoriesForOneYear.add(category.getCategoryName());
            }
            yearsToCategories.put(year, categoriesForOneYear);
        }
    }

    //writing of the first line of the csv: headers of the categories.
    // first cell is empty. This is the first column, and will serve for the headers of the rows (which are the years)
    sb.append(sep);

    for (String categoryName : categoryNames) {
        sb.append(categoryName);
        sb.append(sep);
    }
    sb.append("\n");

    //writing of all subsequent lines: one per year
    for (String year : yearsToCategories.keySet()) {
        sb.append(year).append(sep);
        Multiset<String> categoriesForOneYear = yearsToCategories.get(year);
        for (String categoryName : categoryNames) {
            int count = categoriesForOneYear.count(categoryName);
            sb.append(count).append(sep);
        }
        sb.append("\n");
    }
    bw.write(sb.toString());
    bw.close();
}

From source file:BibTex.IOmethods.java

public void writeSuperCategoriesPerYearInCsv(Set<BibTexRef> refs) throws IOException {

    BufferedWriter bw = new BufferedWriter(new FileWriter(folder + "years and suppercategories.csv"));
    StringBuilder sb = new StringBuilder();

    String sep = "|";

    //creation of 2 convenient data structures for I/O
    Map<String, Multiset<String>> yearsToCategories = new HashMap();
    List<String> categoryNames = new ArrayList();

    for (BibTexRef ref : refs) {
        String year = ref.getYear();
        Set<Category> categories = ref.getCategories();

        for (Category category : categories) {
            if (!categoryNames.contains(category.getSuperCategory())) {
                categoryNames.add(category.getSuperCategory());
            }/*from w w w  .  ja v  a2 s . c om*/
        }

        if (yearsToCategories.containsKey(year)) {
            for (Category category : categories) {
                yearsToCategories.get(year).add(category.getSuperCategory());
            }
        } else {
            Multiset<String> categoriesForOneYear = HashMultiset.create();
            for (Category category : categories) {
                categoriesForOneYear.add(category.getSuperCategory());
            }
            yearsToCategories.put(year, categoriesForOneYear);
        }
    }

    //writing of the first line of the csv: headers of the categories.
    // first cell is empty. This is the first column, and will serve for the headers of the rows (which are the years)
    sb.append(sep);

    for (String categoryName : categoryNames) {
        sb.append(categoryName);
        sb.append(sep);
    }
    sb.append("\n");

    //writing of all subsequent lines: one per year
    for (String year : yearsToCategories.keySet()) {
        sb.append(year).append(sep);
        Multiset<String> categoriesForOneYear = yearsToCategories.get(year);
        for (String categoryName : categoryNames) {
            int count = categoriesForOneYear.count(categoryName);
            sb.append(count).append(sep);
        }
        sb.append("\n");
    }
    bw.write(sb.toString());
    bw.close();
}

From source file:org.cspoker.ai.bots.bot.gametree.rollout.BucketRollOut.java

public double doRollOut(int nbCommunitySamples) {
    boolean traceEnabled = logger.isTraceEnabled();
    double totalEV = 0;
    model.assumeTemporarily(gameState);//from   w  w  w  .java 2 s  . c  om
    for (int i = 0; i < nbCommunitySamples; i++) {
        int communitySampleRank = fixedRank;
        EnumSet<Card> usedCommunityAndBotCards = EnumSet.copyOf(usedFixedCommunityAndBotCards);
        EnumSet<Card> usedCommunityCards = EnumSet.copyOf(usedFixedCommunityCards);
        for (int j = 0; j < nbMissingCommunityCards; j++) {
            Card communityCard = drawNewCard(usedCommunityAndBotCards);
            if (traceEnabled) {
                logger.trace("Evaluating sampled community card " + communityCard);
            }
            usedCommunityCards.add(communityCard);
            communitySampleRank = updateIntermediateRank(communitySampleRank, communityCard);
        }
        if (traceEnabled) {
            logger.trace("Evaluating bot cards " + botCard1 + " " + botCard2);
        }
        int botRank = getFinalRank(communitySampleRank, botCard1, botCard2);

        //         int minSampleRank = Integer.MAX_VALUE;
        //         int maxSampleRank = Integer.MIN_VALUE;
        //         int sum = 0;
        Multiset<Integer> ranks = new TreeMultiset<Integer>();
        Multiset<Integer> deadRanks = new TreeMultiset<Integer>();
        int n = 100;
        for (int j = 0; j < n; j++) {
            EnumSet<Card> handCards = EnumSet.copyOf(usedCommunityCards);
            Card sampleCard1 = drawNewCard(handCards);
            Card sampleCard2 = drawNewCard(handCards);
            int sampleRank = getFinalRank(communitySampleRank, sampleCard1, sampleCard2);
            ranks.add(sampleRank);
            if (botCard1.equals(sampleCard1) || botCard1.equals(sampleCard2) || botCard2.equals(sampleCard1)
                    || botCard2.equals(sampleCard2)) {
                deadRanks.add(sampleRank);
            }
            //            if(sampleRank<minSampleRank){
            //               minSampleRank = sampleRank;
            //            }
            //            if(sampleRank>maxSampleRank){
            //               maxSampleRank = sampleRank;
            //            }
            //            sum += sampleRank;
        }
        //         double mean = ((double)sum)/n;
        //         double var = calcVariance(ranks, mean);
        //         int averageSampleRank = (int) Math.round(mean);
        //         int sigmaSampleRank = (int) Math.round(Math.sqrt(var));

        WinDistribution[] winProbs = calcWinDistributions(botRank, ranks, deadRanks);
        double[] deadCardWeights = calcDeadCardWeights(ranks, deadRanks);

        TreeMap<PlayerState, WinDistribution> winDistributions = calcOpponentWinDistributionMap(winProbs,
                deadCardWeights);

        int maxDistributed = 0;
        int botInvestment = botState.getTotalInvestment();
        double sampleEV = 0;
        for (Iterator<PlayerState> iter = winDistributions.keySet().iterator(); iter.hasNext();) {
            PlayerState opponent = iter.next();
            int toDistribute = Math.min(botInvestment, opponent.getTotalInvestment()) - maxDistributed;
            if (toDistribute > 0) {
                double pWin = 1;
                double pNotLose = 1;
                for (WinDistribution distribution : winDistributions.values()) {
                    //you win when you win from every opponent
                    pWin *= distribution.pWin;
                    //you don't lose when you don't lose from every opponent
                    pNotLose *= distribution.pWin + distribution.pDraw;
                }
                sampleEV += toDistribute * pWin;
                //you draw when you don't lose but don't win everything either;
                double pDraw = pNotLose - pWin;
                // assume worst case, with winDistributions.size()+1 drawers
                //TODO do this better, use rollout or statistics!
                sampleEV += pDraw * toDistribute / (winDistributions.size() + 1.0);
                maxDistributed += toDistribute;
            }
            iter.remove();
        }
        //get back uncalled investment
        sampleEV += botInvestment - maxDistributed;
        totalEV += sampleEV;
    }
    model.forgetLastAssumption();
    return (1 - gameState.getTableConfiguration().getRake()) * (totalEV / nbCommunitySamples);
}

From source file:it.units.malelab.ege.benchmark.mapper.MappingPropertiesFitness.java

@Override
public MultiObjectiveFitness<Double> compute(Node<String> mapperRawPhenotype) {
    Map<Property, double[]> propertyValues = new LinkedHashMap<>();
    for (Property property : properties) {
        propertyValues.put(property, new double[problems.size()]);
    }//from  www. java 2s.  c  o m
    int i = 0;
    for (Problem<String, NumericFitness> problem : problems.keySet()) {
        List<Node<String>> phenotypes = new ArrayList<>();
        Multiset<Node<String>> groups = LinkedHashMultiset.create();
        //build mapper
        RecursiveMapper<String> mapper = new RecursiveMapper<>(mapperRawPhenotype, maxMappingDepth,
                EXPRESSIVENESS_DEPTH, problem.getGrammar());
        //map
        for (BitsGenotype genotype : genotypes) {
            Node<String> phenotype = Node.EMPTY_TREE;
            try {
                phenotype = mapper.map(genotype, Collections.EMPTY_MAP);
            } catch (MappingException ex) {
                //ignore
            }
            phenotypes.add(phenotype);
            groups.add(phenotype);
        }
        //compute properties
        if (propertyValues.keySet().contains(Property.REDUNDANCY)) {
            propertyValues.get(Property.REDUNDANCY)[i] = 1d
                    - (double) groups.elementSet().size() / (double) genotypes.size();
        }
        if (propertyValues.keySet().contains(Property.NON_UNIFORMITY)) {
            double[] groupSizes = new double[groups.elementSet().size()];
            int c = 0;
            for (Node<String> phenotype : groups.elementSet()) {
                groupSizes[c] = (double) groups.count(phenotype);
                c = c + 1;
            }
            propertyValues.get(Property.NON_UNIFORMITY)[i] = Math.sqrt(StatUtils.variance(groupSizes))
                    / StatUtils.mean(groupSizes);
        }
        if (propertyValues.keySet().contains(Property.NON_LOCALITY)) {
            double[] phenotypeDistances = computeDistances(phenotypes, problems.get(problem));
            double locality = 1d
                    - (1d + (new PearsonsCorrelation().correlation(genotypeDistances, phenotypeDistances)))
                            / 2d;
            propertyValues.get(Property.NON_LOCALITY)[i] = Double.isNaN(locality) ? 1d : locality;
        }
        i = i + 1;
    }
    Double[] meanValues = new Double[properties.length];
    for (int j = 0; j < properties.length; j++) {
        meanValues[j] = StatUtils.mean(propertyValues.get(properties[j]));
    }
    MultiObjectiveFitness<Double> mof = new MultiObjectiveFitness<Double>(meanValues);
    return mof;
}

From source file:org.caleydo.view.domino.internal.toolbar.NodeTools.java

private void addMultiNodes(Set<Node> nodes) {
    ButtonBarBuilder b = new ButtonBarBuilder();
    b.layoutAs(EButtonBarLayout.SLIDE_DOWN);
    b.customCallback(new ChangeVisTypeTo(nodes));
    GLElementFactorySwitcher start = nodes.iterator().next().getRepresentableSwitcher();
    if (nodes.size() == 1) {
        this.add(b.build(start, start.getActiveId()));
    } else {/*w  ww. j  ava2  s  .  c  o m*/
        Collection<GLElementSupplier> s = Lists.newArrayList(start);
        Multiset<String> actives = HashMultiset.create();
        for (Node node : nodes) {
            final GLElementFactorySwitcher swi = node.getRepresentableSwitcher();
            Set<String> ids = getIds(swi);
            for (Iterator<GLElementSupplier> it = s.iterator(); it.hasNext();) {
                if (!ids.contains(it.next().getId()))
                    it.remove();
            }
            actives.add(swi.getActiveId());
        }
        if (s.isEmpty())
            return;
        String initialID = mostFrequent(actives);
        this.add(b.build(s, initialID));
    }
}