Example usage for com.google.common.collect Multiset add

List of usage examples for com.google.common.collect Multiset add

Introduction

In this page you can find the example usage for com.google.common.collect Multiset add.

Prototype

@Override
boolean add(E element);

Source Link

Document

Adds a single occurrence of the specified element to this multiset.

Usage

From source file:edu.umd.cs.submitServer.servlets.AnalyzeArchives.java

@Override
public void doGet(HttpServletRequest request, HttpServletResponse response)
        throws ServletException, IOException {

    Connection conn = null;//from   w w w  .  j a  v a  2s  .  com
    response.setContentType("text/plain");
    PrintWriter writer = response.getWriter();

    Project project = (Project) request.getAttribute("project");
    Course course = (Course) request.getAttribute("course");

    long totalArchiveSpace = 0;
    long totalDistinctArchiveSpace = 0;
    HashSet<Integer> seen = new HashSet<Integer>();

    HashMap<String, FileContents> archiveContents = new HashMap<String, FileContents>();
    Multiset<String> files = HashMultiset.create();
    Multiset<String> checksums = HashMultiset.create();
    try {
        conn = getConnection();
        List<Integer> archives = Submission.getAllArchivesForProject(project.getProjectPK(), conn);
        writer.printf("Analyzing %d submissions for %s project %s%n", archives.size(), course.getCourseName(),
                project.getProjectNumber());
        for (Integer archivePK : archives) {
            byte[] bytes = Archive.downloadBytesFromArchive((String) Submission.SUBMISSION_ARCHIVES,
                    (Integer) archivePK, (Connection) conn);
            totalArchiveSpace += bytes.length;
            if (!seen.add(archivePK))
                continue;
            totalDistinctArchiveSpace += bytes.length;
            TreeMap<String, byte[]> contents = Archive.unzip(new ByteArrayInputStream(bytes));

            for (Map.Entry<String, byte[]> e : contents.entrySet()) {
                byte[] archiveBytes = e.getValue();
                String checksum = Checksums.getChecksum(archiveBytes);
                String name = e.getKey();
                files.add(name);
                checksums.add(checksum);
                FileContents info = archiveContents.get(checksum);
                if (info == null) {

                    info = new FileContents(name, TextUtilities.isText(TextUtilities.simpleName(name)),
                            archiveBytes.length, checksum, null);
                    archiveContents.put(checksum, info);
                }

            }

        }

    } catch (SQLException e) {
        throw new ServletException(e);
    } finally {
        releaseConnection(conn);
    }
    long totalSize = 0;
    TreeSet<FileContents> ordered = new TreeSet<FileContents>(archiveContents.values());
    writer.printf("%5s %9s %s%n", "#", "size", "name");

    String prevName = null;
    for (FileContents info : ordered) {
        if (prevName == null || !prevName.equals(info.name)) {
            if (prevName != null)
                writer.println();
            writer.printf("%5d %9s %s%n", files.count(info.name), " ", info.name);
            prevName = info.name;
        }
        int count = checksums.count(info.checksum);
        writer.printf("%5d %9d %s%n", count, info.size, info.name);
        totalSize += info.size;
    }
    writer.printf("%n");
    writer.printf("%d distinct archives%n", seen.size());
    writer.printf("%d distinct files%n", files.elementSet().size());
    writer.printf("%d total files%n", files.size());

    writer.printf("%d bytes in distinct archives%n", totalDistinctArchiveSpace);
    writer.printf("%d bytes in repeated archives%n", totalArchiveSpace);
    writer.printf("%d bytes as files%n", totalSize);
}

From source file:org.eclipse.viatra.query.runtime.base.core.NavigationHelperContentAdapter.java

/**
 * This method uses either the original {@link EStructuralFeature} instance or the String id.
 */// w w w  . j  a v a 2  s.com
private void addToReversedFeatureMap(final Object feature, final EObject holder) {
    Multiset<EObject> setVal = featureToHolderMap.get(feature);

    if (setVal == null) {
        setVal = HashMultiset.create();
        featureToHolderMap.put(feature, setVal);
    }
    setVal.add(holder);
}

From source file:org.sonar.server.es.request.ProxyBulkRequestBuilder.java

@Override
public String toString() {
    StringBuilder message = new StringBuilder();
    message.append("Bulk[");
    Multiset<BulkRequestKey> groupedRequests = LinkedHashMultiset.create();
    for (int i = 0; i < request.requests().size(); i++) {
        ActionRequest<?> item = request.requests().get(i);
        String requestType;/* w w w  . j ava  2 s .c  o m*/
        String index;
        String docType;
        if (item instanceof IndexRequest) {
            IndexRequest request = (IndexRequest) item;
            requestType = "index";
            index = request.index();
            docType = request.type();
        } else if (item instanceof UpdateRequest) {
            UpdateRequest request = (UpdateRequest) item;
            requestType = "update";
            index = request.index();
            docType = request.type();
        } else if (item instanceof DeleteRequest) {
            DeleteRequest request = (DeleteRequest) item;
            requestType = "delete";
            index = request.index();
            docType = request.type();
        } else {
            // Cannot happen, not allowed by BulkRequest's contract
            throw new IllegalStateException("Unsupported bulk request type: " + item.getClass());
        }
        groupedRequests.add(new BulkRequestKey(requestType, index, docType));
    }

    Set<Multiset.Entry<BulkRequestKey>> entrySet = groupedRequests.entrySet();
    int size = entrySet.size();
    int current = 0;
    for (Multiset.Entry<BulkRequestKey> requestEntry : entrySet) {
        message.append(requestEntry.getCount()).append(" ").append(requestEntry.getElement().toString());
        current++;
        if (current < size) {
            message.append(", ");
        }
    }

    message.append("]");
    return message.toString();
}

From source file:BibTex.IOmethods.java

public void writeJournalsAndTheirCategories(Set<BibTexRef> refs, Integer minNumber) throws IOException {

    BufferedWriter bw = new BufferedWriter(new FileWriter(folder + "journals and their categories.csv"));
    //        BufferedWriter bwJournals = new BufferedWriter(new FileWriter(folder + "journals.csv"));
    StringBuilder sb = new StringBuilder();
    String sep = "|";

    //creation of convenient data structures for I/O
    Map<String, Multiset<String>> journalsAndTheirCategories = new HashMap();
    Multiset journals = HashMultiset.create();

    JournalAbbreviationsMapping jmap = new JournalAbbreviationsMapping();
    jmap.loadMap();/*  ww w.  j av a2s . co m*/

    for (BibTexRef ref : refs) {
        Set<Category> categories = ref.getCategories();
        String title = ref.getJournal();
        if (title == null || title.isEmpty()) {
            continue;
        }
        title = title.toLowerCase();

        Set<String> abbrev = (Set<String>) jmap.getJournalsToAbbrev().get(title);
        if (abbrev == null || abbrev.isEmpty()) {
            abbrev = new HashSet();
            abbrev.add(title);
        }

        String abbreviation = abbrev.iterator().next();

        journals.add(abbreviation);
        if (!journalsAndTheirCategories.containsKey(abbreviation)) {
            Multiset<String> cats = HashMultiset.create();
            journalsAndTheirCategories.put(abbreviation, cats);
        }

        for (Category category : categories) {
            journalsAndTheirCategories.get(abbreviation).add(category.getCategoryName());
        }
    }

    for (String journal : journalsAndTheirCategories.keySet()) {
        if (journals.count(journal) < minNumber) {
            continue;
        }

        for (String category : journalsAndTheirCategories.get(journal).elementSet()) {
            sb.append(journal).append(sep).append(category).append(sep)
                    .append(journalsAndTheirCategories.get(journal).count(category)).append("\n");
        }
    }
    bw.write(sb.toString());
    bw.close();
    //        sb = new StringBuilder();
    //        for (String journal : journalsAndTheirCategories.keySet()) {
    //            Set<String> abbrev = (Set<String>) jmap.getJournalsToAbbrev().get(journal);
    //            if (abbrev == null || abbrev.isEmpty()) {
    //                abbrev = new HashSet();
    //                abbrev.add(journal);
    //            }
    //            sb.append(journal).append(sep).append(abbrev.iterator().next()).append("\n");
    //        }
    //        bwJournals.write(sb.toString());
    //        bwJournals.close();
}

From source file:net.shipilev.concurrent.torture.Runner.java

private <S> void run(final OneActorOneObserverTest<S> test, int time, boolean dryRun)
        throws InterruptedException, ExecutionException {
    final SingleSharedStateHolder<S> holder = new SingleSharedStateHolder<S>();

    // current should be null so that injector could inject the first instance
    holder.current = null;/* www.j  a v  a  2s . c  o  m*/

    isStopped = false;

    /*
       Injector thread: injects new states until interrupted.
     */
    Future<?> s1 = pool.submit(new Runnable() {
        public void run() {
            while (!isStopped) {

                @SuppressWarnings("unchecked")
                S[] newStride = (S[]) new Object[loops];

                for (int c = 0; c < loops; c++) {
                    newStride[c] = test.newState();
                }

                while (holder.current != null) {
                    if (isStopped) {
                        return;
                    }
                    if (shouldYield)
                        Thread.yield();
                }
                holder.current = newStride;
            }
        }
    });

    /*
       Actor 1 thread.
       The rationale for its loop is as follows:
      a. We should be easy on checking the interrupted status, hence we do $LOOPS internally
      b. Thread should not observe the state object more than once
     */
    Future<?> a1 = pool.submit(new Runnable() {
        public void run() {
            S[] last = null;

            int[] indices = generatePermutation(loops);

            while (!isStopped) {
                S[] cur = holder.current;
                if (cur != null && last != cur) {
                    for (int l = 0; l < loops; l++) {
                        test.actor1(cur[indices[l]]);
                    }
                    last = cur;
                } else {
                    if (shouldYield)
                        Thread.yield();
                }
            }
        }
    });

    /*
      Observer thread.
      The rationale for its loop is as follows:
      a. We should be easy on checking the interrupted status, hence we do $LOOPS internally
      b. Thread should not observe the state object more than once
      c. The overhead of doing the work inside the inner loop should be small
      d. $state is getting reused, so we end up marshalling it to long to count properly
    */
    Future<Multiset<Long>> res = pool.submit(new Callable<Multiset<Long>>() {
        public Multiset<Long> call() {
            Multiset<Long> set = HashMultiset.create();

            S[] last = null;
            byte[] state = new byte[8];
            byte[][] results = new byte[loops][];

            int[] indices = generatePermutation(loops);

            while (!isStopped) {
                S[] cur = holder.current;

                if (cur != null && last != cur) {
                    for (int l = 0; l < loops; l++) {
                        int index = indices[l];
                        test.observe(cur[index], state);
                        results[index] = new byte[8];
                        System.arraycopy(state, 0, results[index], 0, 8);
                    }

                    last = cur;

                    for (int i = 0; i < loops; i++) {
                        set.add(byteArrToLong(results[i]));
                    }

                    // let others proceed
                    holder.current = null;
                } else {
                    if (shouldYield)
                        Thread.yield();
                }
            }
            return set;
        }
    });

    TimeUnit.MILLISECONDS.sleep(time);

    isStopped = true;
    a1.get();
    s1.get();
    res.get();

    if (!dryRun) {
        Result r = dump(test, res.get());
        judge(r);
    }
}

From source file:com.google.devtools.kythe.analyzers.java.JavaEntrySets.java

private int hashSymbol(Symbol sym) {
    // This method is necessary because Symbol, and most other javac internals, do not overload the
    // Object#hashCode() method and the default implementation, System#identityHashCode(Object), is
    // practically useless because it can change across JVM instances.  This method instead only
    // uses stable hashing methods such as String#hashCode(), Multiset#hashCode(), and
    // Integer#hashCode().

    if (symbolHashes.containsKey(sym)) {
        return symbolHashes.get(sym);
    }/*w  w w. j  av a 2 s . c o  m*/

    Multiset<Integer> hashes = HashMultiset.create();
    if (sym.members() != null) {
        for (Symbol member : sym.members().getSymbols()) {
            if (member.isPrivate()
                    || member instanceof MethodSymbol && ((MethodSymbol) member).isStaticOrInstanceInit()
                    || ((member.flags_field & (Flags.BRIDGE | Flags.SYNTHETIC)) != 0)) {
                // Ignore initializers, private members, and synthetic members.  It's possible these do
                // not appear in the symbol's scope outside of its .java source compilation (i.e. they do
                // not appear in dependent compilations for Bazel's java rules).
                continue;
            }
            // We can't recursively get the result of hashSymbol(member) since the extractor removes all
            // .class files not directly used by a compilation meaning that member may not be complete.
            hashes.add(member.getSimpleName().toString().hashCode());
            hashes.add(member.kind.ordinal());
        }
    }

    hashes.add(sym.getQualifiedName().toString().hashCode());
    hashes.add(sym.getKind().ordinal());
    for (Modifier mod : sym.getModifiers()) {
        hashes.add(mod.ordinal());
    }

    int h = hashes.hashCode();
    symbolHashes.put(sym, h);
    return h;
}

From source file:org.caleydo.view.domino.internal.Block.java

/**
 * @param idCategory//  ww w.j ava2  s .c om
 * @param count
 */
public void directions(IDCategory category, Multiset<EDimension> count) {
    for (LinearBlock l : linearBlocks) {
        if (!category.isOfCategory(l.getIdType()))
            continue;
        count.add(l.getDim().opposite());
    }
}

From source file:net.shipilev.concurrent.torture.Runner.java

public <S> void run(final TwoActorsOneArbiterTest<S> test, int time, boolean dryRun)
        throws InterruptedException, ExecutionException {
    final TwoSharedStateHolder<S> holder = new TwoSharedStateHolder<S>();

    // need to initialize so that actor thread will not NPE.
    // once injector catches up, it will push fresh state objects
    holder.current = test.newState();/*  ww w  . jav a 2 s .  com*/

    isStopped = false;

    /*
      Injector thread: injects new states until interrupted.
      There are an addi.tional constraints:
      a. If actors results are not yet consumed, do not push the new state.
         This will effectively block actors from working until arbiter consumes their result.
    */
    Future<?> s1 = pool.submit(new Runnable() {
        public void run() {
            while (!isStopped) {
                while (holder.t1 != null && holder.t2 != null && !isStopped)
                    if (shouldYield)
                        Thread.yield();
                holder.current = test.newState();
            }
        }
    });

    /*
       Actor 1 thread.
       The rationale for its loop is as follows:
      a. We should be easy on checking the interrupted status, hence we do $LOOPS internally
      b. Thread should not observe the state object more than once
      c. Once thread is done with its work, it publishes the reference to state object for arbiter
     */
    Future<?> a1 = pool.submit(new Runnable() {
        public void run() {
            S last = null;

            while (!isStopped) {
                int l = 0;
                while (l < loops) {
                    S cur = holder.current;
                    if (last != cur) {
                        test.actor1(cur);
                        holder.t1 = cur;
                        last = cur;
                    } else {
                        if (shouldYield)
                            Thread.yield();
                    }
                    l++;
                }
            }
        }
    });

    /*
       Actor 2 thread.
       The rationale for its loop is as follows:
      a. We should be easy on checking the interrupted status, hence we do $LOOPS internally
      b. Thread should not observe the state object more than once
      c. Once thread is done with its work, it publishes the reference to state object for arbiter
     */
    Future<?> a2 = pool.submit(new Runnable() {
        public void run() {
            S last = null;
            while (!isStopped) {
                int l = 0;
                while (l < loops) {
                    S cur = holder.current;
                    if (last != cur) {
                        test.actor2(cur);
                        last = cur;
                        holder.t2 = cur;
                    } else {
                        if (shouldYield)
                            Thread.yield();
                    }
                    l++;
                }
            }
        }
    });

    /*
      Arbiter thread.
      The rationale for its loop is as follows:
      a. We should be easy on checking the interrupted status, hence we do $LOOPS internally
      b. Thread should not observe the state object more than once
      c. The overhead of doing the work inside the inner loop should be small
      d. $state is getting reused, so we end up marshalling it to long to count properly
      e. Arbiter waits until both actors have finished their work and published their results
    */
    Future<Multiset<Long>> res = pool.submit(new Callable<Multiset<Long>>() {
        public Multiset<Long> call() {
            byte[] res = new byte[8];

            Multiset<Long> set = HashMultiset.create();

            byte[][] results = new byte[loops][];
            while (!isStopped) {
                int c = 0;
                int l = 0;
                while (l < loops) {
                    S s1 = holder.t1;
                    S s2 = holder.t2;
                    if (s1 == s2 && s1 != null) {
                        test.arbitrate(s1, res);
                        results[c] = new byte[8];
                        System.arraycopy(res, 0, results[c], 0, 8);
                        c++;
                        holder.t1 = null;
                        holder.t2 = null;
                    } else {
                        if (shouldYield)
                            Thread.yield();
                    }
                    l++;
                }

                for (int i = 0; i < c; i++) {
                    set.add(byteArrToLong(results[i]));
                }
            }
            return set;
        }
    });

    TimeUnit.MILLISECONDS.sleep(time);

    isStopped = true;
    s1.get();
    a1.get();
    a2.get();
    res.get();

    if (!dryRun) {
        Result r = dump(test, res.get());
        judge(r);
    }
}

From source file:de.hzi.helmholtz.Compare.PathwayComparisonUsingModules.java

public Multimap<Double, String> SubsetsMatching(final PathwayUsingModules firstPathway,
        final PathwayUsingModules secondPathway, BiMap<String, Integer> newSourceGeneIdToPositionMap,
        BiMap<String, Integer> newTargetGeneIdToPositionMap, int Yes) {
    Multimap<Double, String> resultPerfect = TreeMultimap.create(Ordering.natural().reverse(),
            Ordering.natural());//ww  w.java2 s .  co  m
    PathwayUsingModules firstPathwayCopy = new PathwayUsingModules(firstPathway);// Copy of the Query pathway
    PathwayUsingModules secondPathwayCopy = new PathwayUsingModules(secondPathway);// Copy of the Target pathway'
    // PathwayUsingModules secondPathwayCopy1 = new PathwayUsingModules(secondPathway);
    int currentQueryGene = 0;
    Iterator<Module> sourceGeneIt = firstPathway.geneIterator();
    List<String> QueryToRemove = new ArrayList<String>();
    List<String> TargetToRemove = new ArrayList<String>();
    while (sourceGeneIt.hasNext()) {
        currentQueryGene++;
        Module queryGene = sourceGeneIt.next();

        int currentTargetGene = 0;
        Multiset<String> qfunction = LinkedHashMultiset.create();
        List<String> qfunctionList = new ArrayList<String>();
        List<String> qactivity = new ArrayList<String>();
        List<Set<String>> qsubstrate = new ArrayList<Set<String>>();
        for (Domain d : queryGene.getDomains()) {
            qfunction.add(d.getDomainFunctionString());
            qfunctionList.add(d.getDomainFunctionString());
            qactivity.add(d.getStatus().toString());
            qsubstrate.add(d.getSubstrates());
        }
        Iterator<Module> targetGeneIt = secondPathway.geneIterator();

        while (targetGeneIt.hasNext()) {
            currentTargetGene++;
            Module targetGene = targetGeneIt.next();
            Multiset<String> tfunction = LinkedHashMultiset.create();
            List<String> tfunctionList = new ArrayList<String>();
            List<String> tactivity = new ArrayList<String>();
            List<Set<String>> tsubstrate = new ArrayList<Set<String>>();
            for (Domain d : targetGene.getDomains()) {
                tfunctionList.add(d.getDomainFunctionString());
                tfunction.add(d.getDomainFunctionString());
                tactivity.add(d.getStatus().toString());
                tsubstrate.add(d.getSubstrates());
            }
            Multiset<String> DomainsCovered = Multisets.intersection(qfunction, tfunction);
            if (DomainsCovered.size() == qfunction.size() && DomainsCovered.size() == tfunction.size()) {
                Multimap<Double, Multimap<String, Integer>> activityscores = myFunction.calculate(qactivity,
                        tactivity);
                Multimap<String, Integer> Functionscores = ArrayListMultimap.create();

                int TranspositionDomains = LevenshteinDistance.computeLevenshteinDistance(qfunctionList,
                        tfunctionList);
                if (TranspositionDomains > 0) {
                    TranspositionDomains = 1;
                }

                Functionscores.put(qfunction.size() + "-0", TranspositionDomains);
                Multimap<Double, Multimap<String, Integer>> substratescore = myFunction
                        .calculate(getSubstrateList(qsubstrate), getSubstrateList(tsubstrate));
                Object activityScore = activityscores.asMap().keySet().toArray()[0];
                Object substrateScore = substratescore.asMap().keySet().toArray()[0];
                double finalScore = Math
                        .round((((2.9 * 1.0) + (0.05 * Double.parseDouble(activityScore.toString().trim()))
                                + (0.05 * Double.parseDouble(substrateScore.toString().trim()))) / 3) * 100.0)
                        / 100.0;
                String ConvertedGeneIDs = "";
                if (Yes == 0) {
                    ConvertedGeneIDs = reconstructWithGeneId(Integer.toString(currentQueryGene),
                            newSourceGeneIdToPositionMap) + "->"
                            + reconstructWithGeneId(Integer.toString(currentTargetGene),
                                    newTargetGeneIdToPositionMap);
                } else {
                    ConvertedGeneIDs = reconstructWithGeneId(Integer.toString(currentTargetGene),
                            newTargetGeneIdToPositionMap) + "->"
                            + reconstructWithGeneId(Integer.toString(currentQueryGene),
                                    newSourceGeneIdToPositionMap);
                }
                resultPerfect.put(finalScore, ConvertedGeneIDs);
                ScoreFunctionMatchMisMatch.put(ConvertedGeneIDs, Functionscores);
                ScoreStatusMatchMisMatch.putAll(ConvertedGeneIDs, activityscores.values());
                ScoreSubstrateMatchMisMatch.putAll(ConvertedGeneIDs, substratescore.values());

                TargetToRemove.add(reconstructWithGeneId(Integer.toString(currentTargetGene),
                        newTargetGeneIdToPositionMap));
                QueryToRemove.add(reconstructWithGeneId(Integer.toString(currentQueryGene),
                        newSourceGeneIdToPositionMap));
            }
        }

    }
    for (String i : TargetToRemove) {
        secondPathwayCopy.removeModule(i);
    }
    for (String i : QueryToRemove) {
        firstPathwayCopy.removeModule(i);
    }
    if (firstPathwayCopy.size() > 0 && secondPathwayCopy.size() > 0) {
        // Re-construct the bimaps
        newSourceGeneIdToPositionMap = HashBiMap.create();
        int temp = 0;
        for (Module e : firstPathwayCopy.getModules()) {
            temp = temp + 1;
            newSourceGeneIdToPositionMap.put(e.getModuleId(), temp);
        }
        newTargetGeneIdToPositionMap = HashBiMap.create();
        temp = 0;
        for (Module e : secondPathwayCopy.getModules()) {
            temp = temp + 1;
            newTargetGeneIdToPositionMap.put(e.getModuleId(), temp);
        }
        resultPerfect.putAll(SubsetIdentification(firstPathwayCopy, secondPathwayCopy,
                newSourceGeneIdToPositionMap, newTargetGeneIdToPositionMap, Yes));
    }
    ////System.out.println(resultPerfect);
    return resultPerfect;
}

From source file:org.kalypso.ogc.sensor.timeseries.TimestampGuesser.java

/**
 * This function guesses the timestamp.//from   www  .j  a  v a  2 s.  co m
 * 
 * @return The timestamp in UTC.
 */
public LocalTime execute() throws SensorException {
    /* Used to determine the number of equal timestamps. */
    final Multiset<LocalTime> timestampsDSTWinter = HashMultiset.create();
    final Multiset<LocalTime> timestampsDSTSummer = HashMultiset.create();

    /* Get the number of test steps. */
    final int testSteps = getTestSteps();

    /* Find the date axis. */
    final IAxis dateAxis = AxisUtils.findDateAxis(m_timeseries.getAxes());
    if (dateAxis == null)
        throw new IllegalArgumentException("Argument mus tbe a timeseries"); //$NON-NLS-1$

    /* Collect all timestamps. */
    for (int i = 0; i < testSteps; i++) {
        /* REMARK: We need UTC here. */
        final Date date = (Date) m_timeseries.get(i, dateAxis);
        final Calendar calendar = Calendar.getInstance(TimeZone.getTimeZone("UTC")); //$NON-NLS-1$
        calendar.setTime(date);

        /* Differ between daylight saving winter and summer times. */
        /* Old zml daylight saving summer times are possible broken! */
        final boolean dstWinterTime = CalendarUtilities.isDSTWinterTime(date);

        /* REMARK: The ISO Chronolgy used will have the UTC timezone set. */
        /* REMARK: See the source code of the constructor. */
        final LocalTime timestamp = new LocalTime(calendar.get(Calendar.HOUR_OF_DAY),
                calendar.get(Calendar.MINUTE));
        if (dstWinterTime)
            timestampsDSTWinter.add(timestamp);
        else
            timestampsDSTSummer.add(timestamp);
    }

    /* We want to use the one, with the most occurences. */
    final LocalTime timestamp = doGuessTimestamp(timestampsDSTWinter);
    if (timestamp != null)
        return timestamp;

    return doGuessTimestamp(timestampsDSTSummer);
}