Example usage for com.google.common.collect HashMultiset create

List of usage examples for com.google.common.collect HashMultiset create

Introduction

In this page you can find the example usage for com.google.common.collect HashMultiset create.

Prototype

public static <E> HashMultiset<E> create() 

Source Link

Document

Creates a new, empty HashMultiset using the default initial capacity.

Usage

From source file:org.splevo.ui.refinementbrowser.ArgoUMLVariantScanHandler.java

private void scanForIncludedFeatures(List<VariationPoint> vps) {

    Multiset<String> identifiedFeatues = HashMultiset.create();
    List<String> errors = Lists.newArrayList();

    for (VariationPoint vp : vps) {

        Set<SoftwareElement> elements = getNotLeadingImplementingElements((VariationPoint) vp);
        if (elements.size() == 0) {
            identifiedFeatues.add("{NONE}");
        }//w  w w . j  a  va2s .com
        for (SoftwareElement element : elements) {

            SourceLocation sourceLocation = element.getSourceLocation();
            String path = sourceLocation.getFilePath();
            List<String> lines = null;
            try {
                lines = FileUtils.readLines(new File(path));

            } catch (IOException e) {
                e.printStackTrace();
                continue;
            }
            int markerLineIndex = getMarkerLineIndex(vp, sourceLocation, lines);
            if (markerLineIndex == -1) {
                errors.add("No marker found for " + path.substring(path.length() - 20));
                continue;
            }

            String featureId = getFeatureId(lines, markerLineIndex);

            if (isMarkerLine(lines, markerLineIndex - 1)) {
                featureId = getFeatureId(lines, markerLineIndex - 1) + " + " + featureId;
            } else if (isMarkerLine(lines, markerLineIndex + 1)) {
                featureId += " + " + getFeatureId(lines, markerLineIndex + 1);
            }

            identifiedFeatues.add(featureId);
        }
    }

    if (errors.size() > 0) {
        MessageDialog.openError(Display.getCurrent().getActiveShell(), "Marker Detection Errors",
                Joiner.on("\n").join(errors));
    }

    StringBuilder message = new StringBuilder();
    message.append("VP Count Total: ");
    message.append(vps.size());
    for (String featureId : identifiedFeatues.elementSet()) {
        message.append("\n");
        message.append(identifiedFeatues.count(featureId));
        message.append(" x ");
        message.append(featureId);
    }
    MessageDialog.openInformation(Display.getCurrent().getActiveShell(), "Info", message.toString());
}

From source file:webreduce.indexing.luceneSearcher.java

public List<Dataset> search() throws IOException {

    List<Dataset> resultList;
    resultList = new ArrayList<>();

    BooleanQuery.Builder finalQueryBuilder = new BooleanQuery.Builder();
    BooleanQuery.Builder entityQueryBuilder = new BooleanQuery.Builder();
    BooleanQuery.Builder attributeQueryBuilder = new BooleanQuery.Builder();

    //gives me queries
    QueryParser qpa = new QueryParser(ATTRIBUTES_FIELD, new CustomAnalyzer());

    QueryParser qpe = new QueryParser(ENTITIES_FIELD, new CustomAnalyzer());

    //QueryWrapperFilter queryFilter = new QueryWrapperFilter(query);
    //CachingWrapperFilter cachingFilter = new CachingWrapperFilter(queryFilter);

    //CachingWrapperQuery typeFilterR = new CachingWrapperFilter(new TermsFilter(new Term(TABLE_TYPE_FIELD, "RELATION")));

    IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(indexDir)));

    IndexSearcher searcher = new IndexSearcher(reader);

    QueryBuilder queryBuilder = new QueryBuilder(new CustomAnalyzer());

    System.out.println("Attributes: \n" + Arrays.deepToString(attributes));
    System.out.println("Entities: \n" + Arrays.deepToString(entities));

    //add attributes one by one
    for (String a : attributes) {

        Query qa;/* w  w w.  jav a2 s. c  o m*/
        try {
            qa = qpa.parse("\"" + a + "\"");
            attributeQueryBuilder.add(qa, BooleanClause.Occur.SHOULD);

        } catch (ParseException ex) {
        }
    } //end of for loop
      //remove null

    HashSet<String> entitySet;
    entitySet = new HashSet<>(Arrays.asList(entities));
    entitySet.remove(null);

    entities = entitySet.toArray(new String[entitySet.size()]);

    System.out.println("Entities after null removal \n" + Arrays.deepToString(entities));

    Multiset<Integer> docNoCount;
    docNoCount = HashMultiset.create();

    //Take only top 50 entities;
    String[] entities50 = new String[50];
    System.arraycopy(entities, 0, entities50, 0, 50);

    System.out.println(Arrays.deepToString(entities50));

    for (String e : entities50) {
        System.out.println(e);
        if (e == null) {
            continue;
        }
        Query qe;
        try {
            qe = qpe.parse(QueryParserBase.escape(e));
            //Query qeph = qpe.parse("\"" + QueryParserBase.escape(e) + "\"");
            finalQueryBuilder.add(qe, BooleanClause.Occur.MUST); //add entities boolean query
            finalQueryBuilder.add(attributeQueryBuilder.build(), BooleanClause.Occur.MUST); //add attributes query

            TopDocs td = searcher.search(finalQueryBuilder.build(), numberOfResults * 10);
            for (ScoreDoc sd : td.scoreDocs) {
                int docNo = sd.doc;
                docNoCount.add(docNo);
            }
        } catch (ParseException ex) {
        }

        System.out.println("Top Doc id: \n"
                + Multisets.copyHighestCountFirst(docNoCount).entrySet().iterator().next().getElement());

    }

    //Sort the returned docs by their frequency and store it in docNoSorted
    ImmutableMultiset<Integer> docNoSorted = Multisets.copyHighestCountFirst(docNoCount);
    //Get the entry set of the frequency ordered document set
    ImmutableSet<Multiset.Entry<Integer>> entrySet = Multisets.copyHighestCountFirst(docNoCount).entrySet();
    //Get the iterator for the sorted entry set
    UnmodifiableIterator<Multiset.Entry<Integer>> iterator = entrySet.iterator();

    int bestDocId = iterator.next().getElement();
    System.out.println("first count" + iterator.next());

    //
    Set<Integer> elementSet = docNoSorted.elementSet();
    Integer next = elementSet.iterator().next();
    System.out.println("Most frequent document id: " + next);
    int resultSetSize;
    resultSetSize = docNoSorted.elementSet().size();

    System.out.println("Entry Set Size: " + resultSetSize + " Cardinality: " + docNoSorted.size());

    Set<Integer> elementSet1 = Multisets.copyHighestCountFirst(docNoSorted).elementSet();

    List<Integer> t = new ArrayList<Integer>(elementSet1);

    List<Integer> subList = t.subList(0, numberOfResults);
    //ArrayList subArrayList = new ArrayList(subList);
    Iterator<Integer> subListIterator = subList.iterator();

    //we have all the web table doc IDs
    //We snould take
    while (subListIterator.hasNext()) {
        int docID = subListIterator.next();
        Document doc;
        doc = searcher.doc(docID);
        String jsonString = doc.get("full_result");
        Dataset er = Dataset.fromJson(jsonString);
        resultList.add(er);
    }
    return resultList;
}

From source file:Grep.GrepFiles.java

private PackageStats() {
    statistics = HashMultiset.create();
}

From source file:org.sonar.plugins.core.issue.CountOpenIssuesDecorator.java

public void decorate(Resource resource, DecoratorContext context) {
    Issuable issuable = perspectives.as(Issuable.class, resource);
    if (issuable != null) {
        Collection<Issue> issues = getOpenIssues(issuable.issues());
        boolean shouldSaveNewMetrics = shouldSaveNewMetrics(context);

        Multiset<RulePriority> severityBag = HashMultiset.create();
        Map<RulePriority, Multiset<Rule>> rulesPerSeverity = Maps.newHashMap();
        ListMultimap<RulePriority, Issue> issuesPerSeverity = ArrayListMultimap.create();
        int countOpen = 0;
        int countReopened = 0;
        int countConfirmed = 0;

        for (Issue issue : issues) {
            severityBag.add(RulePriority.valueOf(issue.severity()));
            Multiset<Rule> rulesBag = initRules(rulesPerSeverity, RulePriority.valueOf(issue.severity()));
            rulesBag.add(rulefinder.findByKey(issue.ruleKey().repository(), issue.ruleKey().rule()));
            issuesPerSeverity.put(RulePriority.valueOf(issue.severity()), issue);

            if (Issue.STATUS_OPEN.equals(issue.status())) {
                countOpen++;//from w  ww  .j  a v a 2  s  .  co m
            }
            if (Issue.STATUS_REOPENED.equals(issue.status())) {
                countReopened++;
            }
            if (Issue.STATUS_CONFIRMED.equals(issue.status())) {
                countConfirmed++;
            }
        }

        for (RulePriority ruleSeverity : RulePriority.values()) {
            saveIssuesForSeverity(context, ruleSeverity, severityBag);
            saveIssuesPerRules(context, ruleSeverity, rulesPerSeverity);
            saveNewIssuesForSeverity(context, ruleSeverity, issuesPerSeverity, shouldSaveNewMetrics);
            saveNewIssuesPerRule(context, ruleSeverity, issues, shouldSaveNewMetrics);
        }

        saveTotalIssues(context, issues);
        saveNewIssues(context, issues, shouldSaveNewMetrics);

        saveMeasure(context, CoreMetrics.OPEN_ISSUES, countOpen);
        saveMeasure(context, CoreMetrics.REOPENED_ISSUES, countReopened);
        saveMeasure(context, CoreMetrics.CONFIRMED_ISSUES, countConfirmed);
    }
}

From source file:edu.cmu.cs.lti.ark.fn.parsing.FeatureExtractor.java

public Multiset<String> extractFeatures(DataPointWithFrameElements dp, String frameName, String roleName,
        final Range0Based fillerSpanRange, DependencyParse parse) {
    final Multiset<String> featureMap = HashMultiset.create();
    final String frameAndRoleName = frameName + "." + roleName;
    int[] targetTokenNums = dp.getTargetTokenIdxs();
    final DependencyParse[] nodes = parse.getIndexSortedListOfNodes();
    final DependencyParse targetHeadNode = DependencyParse.getHeuristicHead(nodes, targetTokenNums);

    final boolean isEmpty = isEmptySpan(fillerSpanRange);
    String overtness = isEmpty ? "NULL" : "OVERT";
    conjoinAndAdd(overtness, frameAndRoleName, roleName, FRAME_AND_ROLE_NAME, featureMap); // overtness of the role

    String nullness = isEmpty ? "NULL_" : "";
    for (int targetTokenNum : targetTokenNums) {
        final DependencyParse node = nodes[targetTokenNum + 1];
        final Voice voice = findVoice(node);
        final String lemma = node.getLemma();
        final String feature = nullness + "targetLemma_" + lemma;
        conjoinAndAdd(feature, frameAndRoleName, roleName, FRAME_AND_ROLE_NAME, featureMap);
        conjoinAndAdd(UNDERSCORE.join(feature, voice.name), frameAndRoleName, roleName, ROLE_NAME, featureMap);
        conjoinAndAdd(nullness + "targetPOS_" + node.getPOS(), frameAndRoleName, roleName, FRAME_AND_ROLE_NAME,
                featureMap);//from  w w w.ja  v  a  2  s. c  om
    }

    final List<DependencyParse> tgtChildren = targetHeadNode.getChildren();
    conjoinAndAdd(nullness + "NCHILDREN_" + tgtChildren.size(), frameAndRoleName, roleName, FRAME_AND_ROLE_NAME,
            featureMap); // number of children

    // Dependency subcategorization
    String dsubcat = "";
    if (tgtChildren.size() > 0) {
        for (DependencyParse dpn : tgtChildren) {
            dsubcat += dpn.getLabelType() + "_";
            conjoinAndAdd(nullness + "SUBCAT_" + dpn.getLabelType(), frameAndRoleName, roleName, ROLE_NAME,
                    featureMap);
        }
        conjoinAndAdd(nullness + "SUBCATSEQ" + dsubcat, frameAndRoleName, roleName, ROLE_NAME, featureMap);
    }

    if (!isEmpty) { // null span
        // lemma, POS tag, voice, and relative position (with respect to target)
        // of each word in the candidate span
        extractChildPOSFeatures(featureMap, dp, nodes, fillerSpanRange, frameAndRoleName, roleName);
        final DependencyParse fillerHeadNode = DependencyParse.getHeuristicHead(nodes, fillerSpanRange);
        final List<Pair<String, DependencyParse>> targetToFillerPath = DependencyParse.getPath(targetHeadNode,
                fillerHeadNode);
        int pathSize = targetToFillerPath.size();
        String depTypePath = ""; // target's POS and dependency types on the
        // path to the head of the filler
        // To avoid feature explosion, only include paths of length <=7, and
        // only include the dependency types if the path's length is <=5.  
        // (To put this in perspective, < FRAME_AND_ROLE_NAME% of FE fillers have a path length
        // of >5 in the training/dev data, and hardly any have length >7.)
        // e.g. "=<VB> !VMOD !PMOD" ( GIVE to [the paper *boy*] )
        // "=<PRP> ^OBJ ^VMOD ^VMOD !OBJ" ( want [*him*] to make a REQUEST )
        final int spanStart = fillerSpanRange.start;
        final int spanEnd = fillerSpanRange.end;
        final int targetStart = targetTokenNums[0];
        final int targetEnd = targetTokenNums[targetTokenNums.length - 1];
        if (isOverlap(spanStart, spanEnd, targetStart, targetEnd)) {

            //a few features describing
            //relative position of the span with respect to the target

            //does the span overlap with target
            conjoinAndAdd("O_W_T", frameAndRoleName, roleName, FRAME_AND_ROLE_NAME, featureMap);
            if (targetTokenNums.length > 1) {
                if ((spanStart < targetStart && spanEnd < targetEnd)
                        || (spanStart > targetStart && spanEnd > targetEnd)) {
                    //does the span cross the target
                    conjoinAndAdd("CROS_TAR", frameAndRoleName, roleName, NO_CONJOIN, featureMap);
                }
            }
        } else {
            // distance between nearest words of span and target
            String dist = getDistToTarget(targetStart, targetEnd, spanStart, spanEnd);
            String feature = "dist_" + dist;
            conjoinAndAdd(feature, frameAndRoleName, roleName, NO_CONJOIN, featureMap);
            if (dist.charAt(0) == '-') {
                //span is left to target
                feature = "LEFTTAR";
            } else {
                //span is right to target
                feature = "RIGHTTAR";
            }
            conjoinAndAdd(feature, frameAndRoleName, roleName, FRAME_AND_ROLE_NAME, featureMap);
            int targetMidpoint = (targetStart + targetEnd) / 2;
            int feMidpoint = (spanStart + spanEnd) / 2;
            //distance between words in the middle
            //of target span and candidate span
            feature = "midDist_" + getDistToTarget(targetMidpoint, targetMidpoint, feMidpoint, feMidpoint);
            conjoinAndAdd(feature, frameAndRoleName, roleName, NO_CONJOIN, featureMap);
        }

        if (pathSize <= 7) {
            DependencyParse lastNode = null;
            for (int i = 0; i < targetToFillerPath.size(); i++) {
                final Pair<String, DependencyParse> item = targetToFillerPath.get(i);
                final DependencyParse node = item.second;
                if (i == 0)
                    depTypePath += "=<" + node.getPOS() + ">";
                else {
                    if (item.first.equals("^"))
                        depTypePath += " ^" + ((pathSize <= 5) ? lastNode.getLabelType() : "");
                    else {
                        if (item.first.equals("!"))
                            depTypePath += " !" + ((pathSize <= 5) ? node.getLabelType() : "");
                    }
                }
                lastNode = node;
            }
        } else {
            depTypePath = "=<" + targetToFillerPath.get(0).second.getPOS() + "> ...";
        }
        conjoinAndAdd("depPath_" + depTypePath, frameAndRoleName, roleName, NO_CONJOIN, featureMap);
        conjoinAndAdd("pathLength_" + quantizeLength(pathSize), frameAndRoleName, roleName, NO_CONJOIN,
                featureMap);

        // head word
        // left and right most dependents
        List<DependencyParse> children = fillerHeadNode.getChildren();
        conjoinAndAdd("headLemma_" + fillerHeadNode.getLemma(), frameAndRoleName, roleName, NO_CONJOIN,
                featureMap);
        conjoinAndAdd("headPOS_" + fillerHeadNode.getPOS(), frameAndRoleName, roleName, NO_CONJOIN, featureMap);
        conjoinAndAdd("headLabel_" + fillerHeadNode.getLabelType(), frameAndRoleName, roleName, NO_CONJOIN,
                featureMap);
        if (children.size() > 0) {
            final DependencyParse firstChild = children.get(0);
            final DependencyParse lastChild = children.get(children.size() - 1);
            conjoinAndAdd("leftLemma_" + firstChild.getLemma(), frameAndRoleName, roleName, NO_CONJOIN,
                    featureMap);
            conjoinAndAdd("leftPOS_" + firstChild.getPOS(), frameAndRoleName, roleName, NO_CONJOIN, featureMap);
            conjoinAndAdd("rightLemma_" + lastChild.getLemma(), frameAndRoleName, roleName, NO_CONJOIN,
                    featureMap);
            conjoinAndAdd("rightPOS_" + lastChild.getPOS(), frameAndRoleName, roleName, NO_CONJOIN, featureMap);
        }

        // word/POS/dependency type of 1st,  FrameAndRoleNamend, last word in the span
        int startNode = new Range1Based(fillerSpanRange).start;
        int endNode = new Range1Based(fillerSpanRange).end;

        if (isClosedClass(nodes[startNode].getPOS()))
            conjoinAndAdd("w[0]pos[0]_" + nodes[startNode].getWord() + " " + nodes[startNode].getPOS(),
                    frameAndRoleName, roleName, FRAME_AND_ROLE_NAME, featureMap);
        conjoinAndAdd("dep[0]_" + nodes[startNode].getLabelType(), frameAndRoleName, roleName,
                FRAME_AND_ROLE_NAME, featureMap);

        if (endNode - startNode > 0) {
            if (isClosedClass(nodes[startNode + 1].getPOS()))
                conjoinAndAdd(
                        "w[1]pos[1]_" + nodes[startNode + 1].getWord() + " " + nodes[startNode + 1].getPOS(),
                        frameAndRoleName, roleName, FRAME_AND_ROLE_NAME, featureMap);
            conjoinAndAdd("dep[1]_" + nodes[startNode + 1].getLabelType(), frameAndRoleName, roleName,
                    FRAME_AND_ROLE_NAME, featureMap);
            if (endNode - startNode > 1) {
                if (isClosedClass(nodes[endNode].getPOS()))
                    conjoinAndAdd(
                            "w[-1]pos[-1]_" + nodes[endNode].getWord() + " " + nodes[endNode].getPOS() + "_",
                            frameAndRoleName, roleName, FRAME_AND_ROLE_NAME, featureMap);
                conjoinAndAdd("dep[-1]_" + nodes[endNode].getLabelType() + "_", frameAndRoleName, roleName,
                        FRAME_AND_ROLE_NAME, featureMap);
            }
        }

        // length of the filler span
        conjoinAndAdd("len_" + quantizeLength(endNode - startNode + 1), frameAndRoleName, roleName,
                FRAME_AND_ROLE_NAME, featureMap);
    }
    return featureMap;
}

From source file:org.lightjason.agentspeak.language.execution.action.CProxyAction.java

/**
 * ctor//from  w w  w. j  a v  a  2s .c o  m
 *
 * @param p_actions actions definition
 * @param p_literal literal
 */
public CProxyAction(final Map<IPath, IAction> p_actions, final ILiteral p_literal) {
    // create cache for scoring action and define action
    final Multiset<IAction> l_scoringcache = HashMultiset.create();
    m_execution = new CActionWrapper(p_literal, p_actions, l_scoringcache);

    // scoring set is created so build-up to an unmodifieable set
    m_scoringcache = ImmutableMultiset.copyOf(l_scoringcache);
}

From source file:org.sonar.plugins.qi.AbstractViolationsDecorator.java

/**
 * Counts the number of violation by priority
 *
 * @param context the context//  w ww  .  jav  a2  s .  c  om
 * @return a multiset of priority count
 */
protected Multiset<RulePriority> countViolationsBySeverity(DecoratorContext context) {
    List<Violation> violations = context.getViolations();
    Multiset<RulePriority> violationsBySeverity = HashMultiset.create();

    for (Violation violation : violations) {
        if (violation.getRule().getPluginName().equals(getPluginKey())) {
            violationsBySeverity.add(violation.getSeverity());
        }
    }
    return violationsBySeverity;
}

From source file:it.units.malelab.ege.core.listener.EvolutionImageSaverListener.java

@Override
public void listen(EvolutionEvent<G, T, F> event) {
    if (types.isEmpty()) {
        return;// w w w. j  ava2  s . c  o  m
    }
    List<List<Individual<G, T, F>>> rankedPopulation = ((GenerationEvent) event).getRankedPopulation();
    //update best usages
    Individual<G, T, F> best = rankedPopulation.get(0).get(0);
    if (types.contains(ImageType.BEST_USAGE)) {
        double[] bestUsages = new double[best.getGenotype().size()];
        int[] bitUsages = (int[]) best.getOtherInfo().get(StandardGEMapper.BIT_USAGES_INDEX_NAME);
        if (bitUsages != null) {
            double maxUsage = 0;
            for (int bitUsage : bitUsages) {
                maxUsage = Math.max(maxUsage, (double) bitUsage);
            }
            for (int i = 0; i < Math.min(bitUsages.length, bestUsages.length); i++) {
                bestUsages[i] = (double) bitUsages[i] / maxUsage;
            }
        }
        evolutionBestUsages.add(bestUsages);
    }
    //update diversities
    if (types.contains(ImageType.DIVERSITY) || types.contains(ImageType.DU)) {
        Set[] domains = new Set[best.getGenotype().size()];
        Multiset[] symbols = new Multiset[best.getGenotype().size()];
        for (int i = 0; i < symbols.length; i++) {
            symbols[i] = HashMultiset.create();
            domains[i] = new LinkedHashSet();
        }
        double[] counts = new double[best.getGenotype().size()];
        for (List<Individual<G, T, F>> rank : rankedPopulation) {
            for (Individual<G, T, F> individual : rank) {
                for (int i = 0; i < Math.min(best.getGenotype().size(), individual.getGenotype().size()); i++) {
                    counts[i] = counts[i] + 1;
                    symbols[i].add(individual.getGenotype().get(i));
                    domains[i].addAll(individual.getGenotype().domain(i));
                }
            }
        }
        double[] diversities = new double[best.getGenotype().size()];
        for (int i = 0; i < symbols.length; i++) {
            diversities[i] = Utils.multisetDiversity(symbols[i], domains[i]);
        }
        evolutionDiversities.add(diversities);
    }
    //update usages
    if (types.contains(ImageType.USAGE) || types.contains(ImageType.DU)) {
        double[] usages = new double[best.getGenotype().size()];
        double count = 0;
        for (List<Individual<G, T, F>> rank : rankedPopulation) {
            for (Individual<G, T, F> individual : rank) {
                int[] bitUsages = (int[]) individual.getOtherInfo().get(StandardGEMapper.BIT_USAGES_INDEX_NAME);
                if (bitUsages != null) {
                    double maxUsage = 0;
                    for (int bitUsage : bitUsages) {
                        maxUsage = Math.max(maxUsage, (double) bitUsage);
                    }
                    for (int i = 0; i < Math.min(bitUsages.length, usages.length); i++) {
                        usages[i] = usages[i] + (double) bitUsages[i] / maxUsage;
                    }
                    count = count + 1;
                }
            }
        }
        if (count > 0) {
            for (int i = 0; i < usages.length; i++) {
                usages[i] = usages[i] / count;
            }
        }
        evolutionUsages.add(usages);
    }
    if (event instanceof EvolutionEndEvent) {
        if (basePath != null) {
            //save
            String baseFileName = "";
            for (Object value : constants.values()) {
                baseFileName = baseFileName + value.toString() + "-";
            }
            if (types.contains(ImageType.DIVERSITY)) {
                saveCSV(basePath + File.separator + baseFileName + "diversitiy.csv",
                        toArray(evolutionDiversities));
                saveImage(basePath + File.separator + baseFileName + "diversity.png",
                        toArray(evolutionDiversities));
            }
            if (types.contains(ImageType.USAGE)) {
                saveImage(basePath + File.separator + baseFileName + "usage.png", toArray(evolutionUsages));
                saveCSV(basePath + File.separator + baseFileName + "usage.csv", toArray(evolutionUsages));
            }
            if (types.contains(ImageType.DU)) {
                saveImage(basePath + File.separator + baseFileName + "diversity_usage.png",
                        toArray(evolutionDiversities), toArray(evolutionUsages));
            }
            if (types.contains(ImageType.BEST_USAGE)) {
                saveImage(basePath + File.separator + baseFileName + "bestUsage.png",
                        toArray(evolutionBestUsages));
                saveCSV(basePath + File.separator + baseFileName + "bestUsage.csv",
                        toArray(evolutionBestUsages));
            }
        }
    }
    if (event instanceof EvolutionStartEvent) {
        //clear
        evolutionBestUsages.clear();
        evolutionUsages.clear();
        evolutionDiversities.clear();

    }
}

From source file:co.turnus.analysis.profiler.dynamic.StepDataBox.java

public void execute(Procedure procedure) {
    // check if the procedure has been already called
    if (!procCalls.contains(procedure)) {
        HashMultiset<StateVariable> readVars = HashMultiset.create();
        procReadSvars.put(procedure, readVars);
        HashMultiset<StateVariable> writeVars = HashMultiset.create();
        procWriteSvars.put(procedure, writeVars);
        HashMultiset<Operator> opCalls = HashMultiset.create();
        procOpCalls.put(procedure, opCalls);
        HashMultiset<Procedure> procCalls = HashMultiset.create();
        procProcCalls.put(procedure, procCalls);
    }/*w  ww. ja va 2  s.com*/
    procCalls.add(procedure);

    if (!procStack.isEmpty()) {
        procProcCalls.get(procStack.getLast()).add(procedure);
    }
    procStack.addLast(procedure);
}

From source file:org.dllearner.utilities.examples.AutomaticNegativeExampleFinderSPARQL2.java

public SortedSet<OWLIndividual> getNegativeExamples(OWLClass classToDescribe,
        Set<OWLIndividual> positiveExamples, Map<Strategy, Double> strategiesWithWeight,
        int maxNrOfReturnedInstances) {
    //set class to describe as the type for each instance
    Multiset<OWLClass> types = HashMultiset.create();
    types.add(classToDescribe);//from w w  w.  ja v  a2s.c  o m

    return computeNegativeExamples(classToDescribe, types, strategiesWithWeight, maxNrOfReturnedInstances);
}