Example usage for com.google.common.collect Multimap entries

List of usage examples for com.google.common.collect Multimap entries

Introduction

In this page you can find the example usage for com.google.common.collect Multimap entries.

Prototype

Collection<Map.Entry<K, V>> entries();

Source Link

Document

Returns a view collection of all key-value pairs contained in this multimap, as Map.Entry instances.

Usage

From source file:us.physion.ovation.ui.editor.ResourceInfoPanel.java

private void updateInputs() {
    final Multimap<String, Source> sources = HashMultimap.create();
    final Multimap<String, Revision> inputResources = HashMultimap.create();

    for (Measurement m : getMeasurements()) {
        for (String s : m.getSourceNames()) {
            sources.putAll(s, m.getEpoch().getInputSources().get(s));
        }//  w ww  . j a v  a2s .  c  om
    }

    getResources().stream().forEach((r) -> {
        if (r.getContainingEntity() instanceof AnalysisRecord) {
            AnalysisRecord record = (AnalysisRecord) r.getContainingEntity();

            record.getInputs().keySet().stream().forEach((s) -> {
                inputResources.put(s, ((AnalysisRecord) r.getContainingEntity()).getInputs().get(s));
            });
        }

        Revision rev = r.getHeadRevision();
        if (rev != null) {
            rev.getInputSources().forEach((s) -> {
                sources.put(s.getLabel(), s);
            });
        }

    });

    getRevisions().stream().forEach((rev) -> {
        rev.getInputSources().forEach((s) -> {
            sources.put(s.getLabel(), s);
        });
    });

    EventQueueUtilities.runOnEDT(() -> {
        inputsTextPane.setText("");

        for (Map.Entry<String, Source> namedSource : sources.entries()) {
            insertInputsPanel(namedSource.getKey(), namedSource.getValue());

        }

        for (Map.Entry<String, Revision> namedInput : inputResources.entries()) {
            insertInputsPanel(namedInput.getKey(), namedInput.getValue());
        }
    });

}

From source file:com.bigdata.dastor.service.StorageService.java

/**
 * Called when an endPoint is removed from the ring without proper
 * STATE_LEAVING -> STATE_LEFT sequence. This function checks
 * whether this node becomes responsible for new ranges as a
 * consequence and streams data if needed.
 *
 * This is rather ineffective, but it does not matter so much
 * since this is called very seldom//from   ww  w. j a  v a 2 s. com
 *
 * @param endPoint node that has left
 */
private void restoreReplicaCount(InetAddress endPoint) {
    InetAddress myAddress = FBUtilities.getLocalAddress();

    for (String table : DatabaseDescriptor.getNonSystemTables()) {
        // get all ranges that change ownership (that is, a node needs
        // to take responsibility for new range)
        Multimap<Range, InetAddress> changedRanges = getChangedRangesForLeaving(table, endPoint);

        // check if any of these ranges are coming our way
        Set<Range> myNewRanges = new HashSet<Range>();
        for (Map.Entry<Range, InetAddress> entry : changedRanges.entries()) {
            if (entry.getValue().equals(myAddress))
                myNewRanges.add(entry.getKey());
        }

        if (!myNewRanges.isEmpty()) {
            if (logger_.isDebugEnabled())
                logger_.debug(
                        endPoint + " was removed, my added ranges: " + StringUtils.join(myNewRanges, ", "));

            Multimap<Range, InetAddress> rangeAddresses = getReplicationStrategy(table)
                    .getRangeAddresses(tokenMetadata_, table);
            Multimap<InetAddress, Range> sourceRanges = HashMultimap.create();
            IFailureDetector failureDetector = FailureDetector.instance;

            // find alive sources for our new ranges
            for (Range myNewRange : myNewRanges) {
                List<InetAddress> sources = DatabaseDescriptor.getEndPointSnitch(table)
                        .getSortedListByProximity(myAddress, rangeAddresses.get(myNewRange));

                assert (!sources.contains(myAddress));

                for (InetAddress source : sources) {
                    if (source.equals(endPoint))
                        continue;

                    if (failureDetector.isAlive(source)) {
                        sourceRanges.put(source, myNewRange);
                        break;
                    }
                }
            }

            // Finally we have a list of addresses and ranges to
            // stream. Proceed to stream
            for (Map.Entry<InetAddress, Collection<Range>> entry : sourceRanges.asMap().entrySet()) {
                if (logger_.isDebugEnabled())
                    logger_.debug("Requesting from " + entry.getKey() + " ranges "
                            + StringUtils.join(entry.getValue(), ", "));
                StreamIn.requestRanges(entry.getKey(), table, entry.getValue());
            }
        }
    }
}

From source file:org.apache.pulsar.broker.loadbalance.impl.SimpleLoadManagerImpl.java

private synchronized ResourceUnit getLeastLoadedBroker(ServiceUnitId serviceUnit,
        Map<Long, Set<ResourceUnit>> availableBrokers) {
    ResourceUnit selectedBroker = null;/*  ww w  .jav  a 2s.  c  o  m*/
    // If the broker is already assigned, return that candidate.
    for (final Map.Entry<ResourceUnit, ResourceUnitRanking> entry : resourceUnitRankings.entrySet()) {
        final ResourceUnit resourceUnit = entry.getKey();
        final ResourceUnitRanking ranking = entry.getValue();
        if (ranking.isServiceUnitPreAllocated(serviceUnit.toString())) {
            return resourceUnit;
        }
    }
    Multimap<Long, ResourceUnit> finalCandidates = getFinalCandidates(serviceUnit, availableBrokers);
    // Remove candidates that point to inactive brokers
    Set<String> activeBrokers = Collections.emptySet();
    try {
        activeBrokers = availableActiveBrokers.get();
        // Need to use an explicit Iterator object to prevent concurrent modification exceptions
        Iterator<Map.Entry<Long, ResourceUnit>> candidateIterator = finalCandidates.entries().iterator();
        while (candidateIterator.hasNext()) {
            Map.Entry<Long, ResourceUnit> candidate = candidateIterator.next();
            String candidateBrokerName = candidate.getValue().getResourceId().replace("http://", "");
            if (!activeBrokers.contains(candidateBrokerName)) {
                candidateIterator.remove(); // Current candidate points to an inactive broker, so remove it
            }
        }
    } catch (Exception e) {
        log.warn("Error during attempt to remove inactive brokers while searching for least active broker", e);
    }

    if (finalCandidates.size() > 0) {
        if (this.getLoadBalancerPlacementStrategy().equals(LOADBALANCER_STRATEGY_LLS)
                || this.getLoadBalancerPlacementStrategy().equals(LOADBALANCER_STRATEGY_LEAST_MSG)) {
            selectedBroker = findBrokerForPlacement(finalCandidates, serviceUnit);
        } else {
            selectedBroker = placementStrategy.findBrokerForPlacement(finalCandidates);
        }
        log.info("Selected : [{}] for ServiceUnit : [{}]", selectedBroker.getResourceId(),
                serviceUnit.toString());
        return selectedBroker;
    } else {
        // No available broker found
        log.warn("No broker available to acquire service unit: [{}]", serviceUnit);
        return null;
    }
}

From source file:com.palantir.atlasdb.cleaner.Scrubber.java

void scrubImmediately(final TransactionManager txManager,
        final Multimap<String, Cell> tableNameToCell, final long scrubTimestamp, final long commitTimestamp) {
    if (log.isInfoEnabled()) {
        log.info("Scrubbing a total of " + tableNameToCell.size() + " cells immediately.");
    }//from   w  ww . ja  v  a 2 s . c  o m

    // Note that if the background scrub thread is also running at the same time, it will try to scrub
    // the same cells as the current thread (since these cells were queued for scrubbing right before
    // the hard delete transaction committed; while this is unfortunate (because it means we will be
    // doing more work than necessary), the behavior is still correct
    long nextImmutableTimestamp;
    while ((nextImmutableTimestamp = immutableTimestampSupplier.get()) < commitTimestamp) {
        try {
            if (log.isInfoEnabled()) {
                log.info(String.format(
                        "Sleeping because immutable timestamp %d has not advanced to at least commit timestamp %d",
                        nextImmutableTimestamp, commitTimestamp));
            }
            Thread.sleep(AtlasDbConstants.SCRUBBER_RETRY_DELAY_MILLIS);
        } catch (InterruptedException e) {
            log.error("Interrupted while waiting for immutableTimestamp to advance past commitTimestamp", e);
        }
    }

    List<Future<Void>> scrubFutures = Lists.newArrayList();
    for (List<Entry<String, Cell>> batch : Iterables.partition(tableNameToCell.entries(),
            batchSizeSupplier.get())) {
        final Multimap<String, Cell> batchMultimap = HashMultimap.create();
        for (Entry<String, Cell> e : batch) {
            batchMultimap.put(e.getKey(), e.getValue());
        }

        final Callable<Void> c = new Callable<Void>() {
            @Override
            public Void call() throws Exception {
                if (log.isInfoEnabled()) {
                    log.info("Scrubbing " + batchMultimap.size() + " cells immediately.");
                }

                // Here we don't need to check scrub timestamps because we guarantee that scrubImmediately is called
                // AFTER the transaction commits
                scrubCells(txManager, batchMultimap, scrubTimestamp, TransactionType.AGGRESSIVE_HARD_DELETE);

                Multimap<Cell, Long> cellToScrubTimestamp = HashMultimap.create();

                cellToScrubTimestamp = Multimaps.invertFrom(
                        Multimaps.index(batchMultimap.values(), Functions.constant(scrubTimestamp)),
                        cellToScrubTimestamp);

                scrubberStore.markCellsAsScrubbed(cellToScrubTimestamp, batchSizeSupplier.get());

                if (log.isInfoEnabled()) {
                    log.info("Completed scrub immediately.");
                }
                return null;
            }
        };
        if (!inScrubThread.get()) {
            scrubFutures.add(exec.submit(new Callable<Void>() {
                @Override
                public Void call() throws Exception {
                    inScrubThread.set(true);
                    c.call();
                    return null;
                }
            }));
        } else {
            try {
                c.call();
            } catch (Exception e) {
                throw Throwables.throwUncheckedException(e);
            }
        }
    }

    for (Future<Void> future : scrubFutures) {
        try {
            future.get();
        } catch (InterruptedException e) {
            throw Throwables.throwUncheckedException(e);
        } catch (ExecutionException e) {
            throw Throwables.rewrapAndThrowUncheckedException(e);
        }
    }
}

From source file:org.spongepowered.eventimplgen.eventgencore.AccessorFirstStrategy.java

@Override
public List<Property> findProperties(final CtTypeReference<?> type) {
    checkNotNull(type, "type");

    final Multimap<String, CtMethod<?>> accessors = HashMultimap.create();
    final Multimap<String, CtMethod<?>> mutators = HashMultimap.create();
    final Map<String, CtMethod<?>> accessorHierarchyBottoms = new HashMap<>();
    final Map<String, CtMethod<?>> mostSpecific = new HashMap<>();
    final Set<String> signatures = new HashSet<>();

    for (CtMethod<?> method : type.getDeclaration().getAllMethods()) {
        String name;/*from  www.  j  a  v a 2 s  . c  om*/

        String signature = method.getSimpleName() + ";";
        for (CtParameter<?> parameterType : method.getParameters()) {
            signature += parameterType.getType().getQualifiedName() + ";";
        }
        signature += method.getType().getSimpleName();

        CtMethod<?> leastSpecificMethod;
        if ((name = getAccessorName(method)) != null && !signatures.contains(signature)
                && ((leastSpecificMethod = accessorHierarchyBottoms.get(name)) == null || !leastSpecificMethod
                        .getType().getQualifiedName().equals(method.getType().getQualifiedName()))) {
            accessors.put(name, method);
            signatures.add(signature);

            if (!mostSpecific.containsKey(name)
                    || method.getType().isSubtypeOf(mostSpecific.get(name).getType())) {
                mostSpecific.put(name, method);
            }

            if (accessorHierarchyBottoms.get(name) == null
                    || accessorHierarchyBottoms.get(name).getType().isSubtypeOf(method.getType())) {
                accessorHierarchyBottoms.put(name, method);
            }
        } else if ((name = getMutatorName(method)) != null) {
            mutators.put(name, method);
        }
    }

    final List<Property> result = new ArrayList<>();

    for (Map.Entry<String, CtMethod<?>> entry : accessors.entries()) {
        final CtMethod<?> accessor = entry.getValue();

        @Nullable
        final CtMethod<?> mutator = findMutator(entry.getValue(), mutators.get(entry.getKey()));
        result.add(
                new Property(entry.getKey(), accessor.getType(), accessorHierarchyBottoms.get(entry.getKey()),
                        mostSpecific.get(entry.getKey()), accessor, mutator));
    }

    result.sort(Comparator.comparing(Property::getName));
    return ImmutableList.copyOf(result);
}

From source file:no.ssb.vtl.script.operations.hierarchy.HierarchyOperation.java

@Override
public Stream<DataPoint> getData() {

    final DataStructure structure = getDataStructure();
    final Order groupOrder = computeOrder();
    final Order groupPredicate = computePredicate();

    // TODO: Save the graph in the correct order.
    final List<VTLObject> sorted = getGraphValues();

    final Map<Component, HierarchyAccumulator> accumulators = createAccumulatorMap();

    // Get the data sorted.
    Stream<DataPoint> sortedData = getChild().getData(groupOrder)
            .orElseGet(() -> getChild().getData().sorted(groupOrder));

    Stream<ComposedDataPoint> streamToAggregate = StreamUtils
            .aggregate(sortedData, (prev, current) -> groupPredicate.compare(prev, current) == 0)
            .onClose(sortedData::close).map(dataPoints -> {

                // Organize the data points in "buckets" for each component. Here we add "sign" information
                // to the data points so that we can use it later when we aggregate.
                Multimap<VTLObject, ComposedDataPoint> buckets = ArrayListMultimap.create();
                for (DataPoint dataPoint : dataPoints) {
                    Map<Component, VTLObject> map = structure.asMap(dataPoint);
                    VTLObject group = map.get(this.component);
                    buckets.put(group, new ComposedDataPoint(dataPoint, Composition.UNION));
                }//ww w. j  a v  a2 s.c o m

                // TODO: Filter the nodes by the keys of the bucket (and check that it is faster)
                // For each component put the content in every successors. If the edge was a complement (-) then we invert
                // the sign of each datapoint (ie. a - (b - c + d) = a - b + c - d)
                for (VTLObject node : sorted) {
                    for (VTLObject successor : graph.successors(node)) {
                        Composition sign = graph.edgeValue(node, successor);
                        for (ComposedDataPoint point : buckets.get(node)) {
                            if (Composition.COMPLEMENT.equals(sign)) {
                                // Invert if complement.
                                buckets.put(successor, ComposedDataPoint.invert(point));
                            } else {
                                buckets.put(successor, new ComposedDataPoint(point, point.sign));
                            }
                        }
                    }
                }

                // Put the new "mapped" component
                List<ComposedDataPoint> result = Lists.newArrayList();
                for (Map.Entry<VTLObject, ComposedDataPoint> entry : buckets.entries()) {
                    VTLObject group = entry.getKey();
                    ComposedDataPoint point = entry.getValue();
                    result.add(point);
                    structure.asMap(point).put(this.component, group);
                }

                // Not needed since we are constructing the result by component.
                // Collections.sort(result, groupOrder);

                return result;

            }).flatMap(Collection::stream);

    return StreamUtils
            .aggregate(streamToAggregate,
                    (dataPoint, dataPoint2) -> groupOrder.compare(dataPoint, dataPoint2) == 0)
            .onClose(streamToAggregate::close).map(dataPoints -> {

                DataPoint aggregate;
                // Optimization.
                if (dataPoints.size() > 1) {

                    // Won't fail since we check size.
                    aggregate = DataPoint.create(dataPoints.get(0));
                    Map<Component, VTLObject> result = structure.asMap(aggregate);

                    for (Map.Entry<Component, HierarchyAccumulator> entry : accumulators.entrySet()) {
                        result.put(entry.getKey(), entry.getValue().identity());
                    }

                    Iterator<ComposedDataPoint> iterator = dataPoints.iterator();
                    while (iterator.hasNext()) {
                        ComposedDataPoint composedDataPoint = iterator.next();
                        Map<Component, VTLObject> next = structure.asMap(composedDataPoint);

                        for (Map.Entry<Component, HierarchyAccumulator> accumulator : accumulators.entrySet()) {
                            Component component = accumulator.getKey();
                            VTLObject objectValue = next.get(component);
                            HierarchyAccumulator value = accumulator.getValue();
                            result.merge(component, objectValue,
                                    value.accumulator(composedDataPoint.getSign()));
                        }

                    }
                } else {
                    aggregate = dataPoints.get(0);
                }

                return aggregate;
            });
}

From source file:io.prestosql.execution.SqlStageExecution.java

public synchronized Set<RemoteTask> scheduleSplits(Node node, Multimap<PlanNodeId, Split> splits,
        Multimap<PlanNodeId, Lifespan> noMoreSplitsNotification) {
    requireNonNull(node, "node is null");
    requireNonNull(splits, "splits is null");

    if (stateMachine.getState().isDone()) {
        return ImmutableSet.of();
    }//from  w  w w . java 2s.  c o  m
    splitsScheduled.set(true);

    checkArgument(stateMachine.getFragment().getPartitionedSources().containsAll(splits.keySet()),
            "Invalid splits");

    ImmutableSet.Builder<RemoteTask> newTasks = ImmutableSet.builder();
    Collection<RemoteTask> tasks = this.tasks.get(node);
    RemoteTask task;
    if (tasks == null) {
        // The output buffer depends on the task id starting from 0 and being sequential, since each
        // task is assigned a private buffer based on task id.
        TaskId taskId = new TaskId(stateMachine.getStageId(), nextTaskId.getAndIncrement());
        task = scheduleTask(node, taskId, splits, OptionalInt.empty());
        newTasks.add(task);
    } else {
        task = tasks.iterator().next();
        task.addSplits(splits);
    }
    if (noMoreSplitsNotification.size() > 1) {
        // The assumption that `noMoreSplitsNotification.size() <= 1` currently holds.
        // If this assumption no longer holds, we should consider calling task.noMoreSplits with multiple entries in one shot.
        // These kind of methods can be expensive since they are grabbing locks and/or sending HTTP requests on change.
        throw new UnsupportedOperationException(
                "This assumption no longer holds: noMoreSplitsNotification.size() < 1");
    }
    for (Entry<PlanNodeId, Lifespan> entry : noMoreSplitsNotification.entries()) {
        task.noMoreSplits(entry.getKey(), entry.getValue());
    }
    return newTasks.build();
}

From source file:org.wisdom.framework.filters.ProxyFilter.java

/**
 * The interception method. Re-emit the request to the target folder and forward the response. This method
 * returns an {@link org.wisdom.api.http.AsyncResult} as the proxy need to be run in another thread. It also
 * invokes a couple of callbacks letting developers to customize the request and result.
 *
 * @param route   the route/*from   ww w .ja  va  2 s.c  o  m*/
 * @param context the filter context
 * @return the result
 * @throws Exception if anything bad happen
 */
@Override
public Result call(final Route route, final RequestContext context) throws Exception {
    return new AsyncResult(new Callable<Result>() {
        @Override
        public Result call() throws Exception {
            URI rewrittenURI = rewriteURI(context);
            logger.debug("Proxy request - rewriting {} to {}", context.request().uri(), rewrittenURI);
            if (rewrittenURI == null) {
                return onRewriteFailed(context);
            }

            BasicHttpEntityEnclosingRequest request = new BasicHttpEntityEnclosingRequest(
                    context.request().method(), rewrittenURI.toString());
            // Any header listed by the Connection header must be removed:
            // http://tools.ietf.org/html/rfc7230#section-6.1.
            Set<String> hopHeaders = new HashSet<>();
            List<String> connectionHeaders = context.request().headers().get(HeaderNames.CONNECTION);
            for (String s : connectionHeaders) {
                for (String entry : Splitter.on(",").omitEmptyStrings().trimResults().splitToList(s)) {
                    hopHeaders.add(entry.toLowerCase(Locale.ENGLISH));
                }
            }

            boolean hasContent = context.request().contentType() != null;
            final String host = getHost();
            Multimap<String, String> headers = ArrayListMultimap.create();
            for (Map.Entry<String, List<String>> entry : context.request().headers().entrySet()) {
                String name = entry.getKey();
                if (HeaderNames.TRANSFER_ENCODING.equalsIgnoreCase(name)) {
                    hasContent = true;
                }
                if (host != null && HeaderNames.HOST.equalsIgnoreCase(name)) {
                    continue;
                }
                // Remove hop-by-hop headers.
                String lower = name.toLowerCase(Locale.ENGLISH);
                if (HOP_HEADERS.contains(lower) || hopHeaders.contains(lower)) {
                    continue;
                }

                for (String v : entry.getValue()) {
                    headers.put(name, v);
                }
            }

            // Force the Host header if configured
            headers.removeAll(HeaderNames.HOST);
            if (host != null) {
                headers.put(HeaderNames.HOST, host);
                headers.put("X-Forwarded-Server", host);
            } else {
                // Set of the URI one
                headers.put("X-Forwarded-Server", rewrittenURI.getHost());
            }

            // Add proxy headers
            if (getVia() != null) {
                headers.put(HeaderNames.VIA, "http/1.1 " + getVia());
            }
            headers.put("X-Forwarded-For", context.request().remoteAddress());
            if (host != null) {
                headers.put("X-Forwarded-Host", host);
            }

            updateHeaders(context, headers);
            for (Map.Entry<String, String> s : headers.entries()) {
                request.addHeader(s.getKey(), s.getValue());
            }
            // Remove content-length as it is computed by the HTTP client.
            request.removeHeaders(HeaderNames.CONTENT_LENGTH);

            if (hasContent) {
                ByteArrayEntity entity = new ByteArrayEntity(context.context().raw(), ContentType
                        .create(context.request().contentMimeType(), context.request().contentCharset()));
                request.setEntity(entity);
            }

            HttpResponse response = client.execute(new HttpHost(rewrittenURI.getHost(), rewrittenURI.getPort()),
                    request);
            return onResult(toResult(response));
        }
    });

}

From source file:de.hzi.helmholtz.Compare.PathwayComparisonUsingModules.java

public Map<String, Map<String, Double>> pathwayComparisonCoalesce(PathwayUsingModules newSource,
        PathwayUsingModules newTarget, BiMap<String, Integer> newSourceGeneIdToPositionMap,
        BiMap<String, Integer> newTargetGeneIdToPositionMap) {

    Multimap<Integer, Multimap<Double, String>> forward = pcompare(newSource, newTarget); // key: qgeneId, value: {score=tgenecombination;...}
    Multimap<Integer, Multimap<Double, String>> reverse = pcompare(newTarget, newSource);

    // Re-construct the bimaps
    newSourceGeneIdToPositionMap = HashBiMap.create();
    int temp = 0;
    for (Module e : newSource.getModules()) {
        newSourceGeneIdToPositionMap.put(e.getModuleId(), temp++);
    }//from w w w.  j  av a 2 s.c  om
    newTargetGeneIdToPositionMap = HashBiMap.create();
    temp = 0;
    for (Module e : newTarget.getModules()) {
        newTargetGeneIdToPositionMap.put(e.getModuleId(), temp++);
    }

    /* Create global list of matchings ordered by score by joining forward and reverse lists
     * key: querygene -> targetgenes
     * value: score
     */
    TreeMultimap<Double, String> globalMap = TreeMultimap.create(Ordering.natural().reverse(),
            Ordering.natural());
    for (Map.Entry<Integer, Multimap<Double, String>> e : forward.entries()) {
        int fgene = e.getKey();
        Multimap<Double, String> geneAndScore = e.getValue();
        for (Map.Entry<Double, String> scoreEntry : geneAndScore.entries()) {
            double score = scoreEntry.getKey();
            String matchingGeneString = scoreEntry.getValue();
            String[] multipleMatchingGenes = matchingGeneString.split(",");
            for (String matchingGene : multipleMatchingGenes) {
                String newKey = fgene + "->" + matchingGene;
                globalMap.put(score, newKey);
            }
        }
    }
    for (Map.Entry<Integer, Multimap<Double, String>> e : reverse.entries()) {
        int rgene = e.getKey();
        Multimap<Double, String> geneAndScore = e.getValue();
        for (Map.Entry<Double, String> scoreEntry : geneAndScore.entries()) {
            double score = scoreEntry.getKey();
            String matchingGeneString = scoreEntry.getValue();
            String[] multipleMatchingGenes = matchingGeneString.split(",");
            for (String matchingGene : multipleMatchingGenes) {
                String newKey = matchingGene + "->" + rgene;
                globalMap.put(score, newKey);
            }
        }
    }
    //////System.out.println("----------------------------------------------------------------------------------------------------------------------------------");
    // create alignment
    // ////System.out.println(globalMap);
    Map<String, Double> matchingInTarget;
    Set<String> queryGenesCovered = new HashSet<String>();
    Set<String> targetGenesCovered = new HashSet<String>();
    Map<String, Map<String, Double>> currentBestResultMapping = new TreeMap<String, Map<String, Double>>();
    for (Map.Entry<Double, String> entry : globalMap.entries()) {
        double score = entry.getKey();
        //score=[alignment1, aligment2, ..]
        String alignment = entry.getValue();
        int i = 100;
        for (String collection : globalMap.asMap().get(score)) {
            int count = collection.length() - collection.replace("+", "").length();
            if (i > count) {
                i = count;
                alignment = collection;
            }
        }
        String bestScoreAlignment = alignment.split(",")[0];
        // start->end
        String start = bestScoreAlignment.split("->")[0];
        String end = bestScoreAlignment.split("->")[1];

        // start and end can be combination of genes
        Set<String> s = new HashSet<String>(Arrays.asList((start + "+").split("\\+")));
        Set<String> t = new HashSet<String>(Arrays.asList((end + "+").split("\\+")));

        // add to result mapping
        Set<String> sIntersection = new HashSet<String>();
        sIntersection.addAll(queryGenesCovered);
        sIntersection.retainAll(s);

        Set<String> tIntersection = new HashSet<String>();
        tIntersection.addAll(targetGenesCovered);
        tIntersection.retainAll(t);

        if (sIntersection.isEmpty() && tIntersection.isEmpty()) {
            matchingInTarget = new HashMap<String, Double>();
            matchingInTarget.put(reconstructWithGeneId(end, newTargetGeneIdToPositionMap), score);
            currentBestResultMapping.put(reconstructWithGeneId(start, newSourceGeneIdToPositionMap),
                    matchingInTarget);
            queryGenesCovered.addAll(s);
            targetGenesCovered.addAll(t);
            break;
        }
    }
    return currentBestResultMapping;
}

From source file:com.bigdata.dastor.service.StorageService.java

private void unbootstrap(final Runnable onFinish) {
    final CountDownLatch latch = new CountDownLatch(DatabaseDescriptor.getNonSystemTables().size());
    for (final String table : DatabaseDescriptor.getNonSystemTables()) {
        Multimap<Range, InetAddress> rangesMM = getChangedRangesForLeaving(table,
                FBUtilities.getLocalAddress());
        if (logger_.isDebugEnabled())
            logger_.debug("Ranges needing transfer are [" + StringUtils.join(rangesMM.keySet(), ",") + "]");
        if (rangesMM.isEmpty()) {
            latch.countDown();// ww w.j  a va 2  s.c o  m
            continue;
        }

        setMode("Leaving: streaming data to other nodes", true);
        final Set<Map.Entry<Range, InetAddress>> pending = Collections
                .synchronizedSet(new HashSet<Map.Entry<Range, InetAddress>>(rangesMM.entries()));
        for (final Map.Entry<Range, InetAddress> entry : rangesMM.entries()) {
            final Range range = entry.getKey();
            final InetAddress newEndpoint = entry.getValue();
            final Runnable callback = new Runnable() {
                public void run() {
                    pending.remove(entry);
                    if (pending.isEmpty())
                        latch.countDown();
                }
            };
            StageManager.getStage(StageManager.STREAM_STAGE).execute(new Runnable() {
                public void run() {
                    // TODO each call to transferRanges re-flushes, this is potentially a lot of waste
                    StreamOut.transferRanges(newEndpoint, table, Arrays.asList(range), callback);
                }
            });
        }
    }

    // wait for the transfer runnables to signal the latch.
    logger_.debug("waiting for stream aks.");
    try {
        latch.await();
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    }
    logger_.debug("stream acks all received.");
    leaveRing();
    onFinish.run();
}