Example usage for com.google.common.collect Multimap putAll

List of usage examples for com.google.common.collect Multimap putAll

Introduction

In this page you can find the example usage for com.google.common.collect Multimap putAll.

Prototype

boolean putAll(@Nullable K key, Iterable<? extends V> values);

Source Link

Document

Stores a key-value pair in this multimap for each of values , all using the same key, key .

Usage

From source file:com.b2international.snowowl.snomed.datastore.request.rf2.SnomedRf2ExportRequest.java

private Multimap<String, String> getLanguageCodes(RepositoryContext context, List<String> branchesToExport) {

    List<String> branchesOrRanges = newArrayList(branchesToExport);

    if (includePreReleaseContent) {
        branchesOrRanges.add(referenceBranch);
    }// w w w .  ja  v  a 2s  .  c o  m

    Multimap<String, String> branchToLanguageCodes = HashMultimap.create();

    Set<String> filteredLanguageCodes = Stream.of(Locale.getISOLanguages())
            .filter(code -> !Locale.ENGLISH.getLanguage().equals(code)).collect(toSet());

    for (String branchOrRange : branchesOrRanges) {

        String branch = getBranchOrRangeTarget(branchOrRange);

        final Set<String> languageCodes = newHashSet();

        // check if there are any english terms on the given branch / range
        final Request<BranchContext, SnomedDescriptions> englishLanguageCodeRequest = SnomedRequests
                .prepareSearchDescription().setLimit(0)
                .filterByLanguageCodes(singleton(Locale.ENGLISH.getLanguage())).build();

        final SnomedDescriptions enDescriptions = execute(context, branch, englishLanguageCodeRequest);

        if (enDescriptions.getTotal() > 0) {
            languageCodes.add(Locale.ENGLISH.getLanguage());
        }

        // check if there are any terms other than english on the given branch / range
        final Request<BranchContext, SnomedDescriptions> languageCodeRequest = SnomedRequests
                .prepareSearchDescription().all().filterByLanguageCodes(filteredLanguageCodes)
                .setFields(SnomedRf2Headers.FIELD_LANGUAGE_CODE).build();

        final SnomedDescriptions descriptions = execute(context, branch, languageCodeRequest);

        if (!descriptions.isEmpty()) {
            languageCodes
                    .addAll(descriptions.stream().map(SnomedDescription::getLanguageCode).collect(toSet()));
        }

        branchToLanguageCodes.putAll(branchOrRange, languageCodes);
    }

    return branchToLanguageCodes;
}

From source file:com.bigdata.dastor.service.StorageService.java

private Multimap<Range, InetAddress> getChangedRangesForLeaving(String table, InetAddress endpoint) {
    // First get all ranges the leaving endpoint is responsible for
    Collection<Range> ranges = getRangesForEndPoint(table, endpoint);

    if (logger_.isDebugEnabled())
        logger_.debug("Node " + endpoint + " ranges [" + StringUtils.join(ranges, ", ") + "]");

    Map<Range, ArrayList<InetAddress>> currentReplicaEndpoints = new HashMap<Range, ArrayList<InetAddress>>();

    // Find (for each range) all nodes that store replicas for these ranges as well
    for (Range range : ranges)
        currentReplicaEndpoints.put(range,
                getReplicationStrategy(table).getNaturalEndpoints(range.right, tokenMetadata_, table));

    TokenMetadata temp = tokenMetadata_.cloneAfterAllLeft();

    // endpoint might or might not be 'leaving'. If it was not leaving (that is, removetoken
    // command was used), it is still present in temp and must be removed.
    if (temp.isMember(endpoint))
        temp.removeEndpoint(endpoint);//from   w  w  w  .j av a  2  s  . c o m

    Multimap<Range, InetAddress> changedRanges = HashMultimap.create();

    // Go through the ranges and for each range check who will be
    // storing replicas for these ranges when the leaving endpoint
    // is gone. Whoever is present in newReplicaEndpoins list, but
    // not in the currentReplicaEndpoins list, will be needing the
    // range.
    for (Range range : ranges) {
        ArrayList<InetAddress> newReplicaEndpoints = getReplicationStrategy(table)
                .getNaturalEndpoints(range.right, temp, table);
        newReplicaEndpoints.removeAll(currentReplicaEndpoints.get(range));
        if (logger_.isDebugEnabled())
            if (newReplicaEndpoints.isEmpty())
                logger_.debug("Range " + range + " already in all replicas");
            else
                logger_.debug("Range " + range + " will be responsibility of "
                        + StringUtils.join(newReplicaEndpoints, ", "));
        changedRanges.putAll(range, newReplicaEndpoints);
    }

    return changedRanges;
}

From source file:org.lealone.cluster.locator.TokenMetaData.java

/**
* Calculate pending ranges according to bootsrapping and leaving nodes. Reasoning is:
*
* (1) When in doubt, it is better to write too much to a node than too little. That is, if
* there are multiple nodes moving, calculate the biggest ranges a node could have. Cleaning
* up unneeded data afterwards is better than missing writes during movement.
* (2) When a node leaves, ranges for other nodes can only grow (a node might get additional
* ranges, but it will not lose any of its current ranges as a result of a leave). Therefore
* we will first remove _all_ leaving tokens for the sake of calculation and then check what
* ranges would go where if all nodes are to leave. This way we get the biggest possible
* ranges with regard current leave operations, covering all subsets of possible final range
* values.//from  w ww .  ja  va  2s .  c o m
* (3) When a node bootstraps, ranges of other nodes can only get smaller. Without doing
* complex calculations to see if multiple bootstraps overlap, we simply base calculations
* on the same token ring used before (reflecting situation after all leave operations have
* completed). Bootstrapping nodes will be added and removed one by one to that metadata and
* checked what their ranges would be. This will give us the biggest possible ranges the
* node could have. It might be that other bootstraps make our actual final ranges smaller,
* but it does not matter as we can clean up the data afterwards.
*
* NOTE: This is heavy and ineffective operation. This will be done only once when a node
* changes state in the cluster, so it should be manageable.
*/
public void calculatePendingRanges(AbstractReplicationStrategy strategy, String keyspaceName) {
    lock.readLock().lock();
    try {
        Multimap<Range<Token>, InetAddress> newPendingRanges = HashMultimap.create();

        if (bootstrapTokens.isEmpty() && leavingEndpoints.isEmpty() && movingEndpoints.isEmpty()) {
            if (logger.isDebugEnabled())
                logger.debug("No bootstrapping, leaving or moving nodes -> empty pending ranges for {}",
                        keyspaceName);

            pendingRanges.put(keyspaceName, newPendingRanges);
            return;
        }

        Multimap<InetAddress, Range<Token>> addressRanges = strategy.getAddressRanges();

        // Copy of metadata reflecting the situation after all leave operations are finished.
        TokenMetaData allLeftMetaData = cloneAfterAllLeft();

        // get all ranges that will be affected by leaving nodes
        Set<Range<Token>> affectedRanges = new HashSet<Range<Token>>();
        for (InetAddress endpoint : leavingEndpoints)
            affectedRanges.addAll(addressRanges.get(endpoint));

        // for each of those ranges, find what new nodes will be responsible for the range when
        // all leaving nodes are gone.
        TokenMetaData metadata = cloneOnlyTokenMap(); // don't do this in the loop! #7758
        for (Range<Token> range : affectedRanges) {
            Set<InetAddress> currentEndpoints = ImmutableSet
                    .copyOf(strategy.calculateNaturalEndpoints(range.right, metadata));
            Set<InetAddress> newEndpoints = ImmutableSet
                    .copyOf(strategy.calculateNaturalEndpoints(range.right, allLeftMetaData));
            newPendingRanges.putAll(range, Sets.difference(newEndpoints, currentEndpoints));
        }

        // At this stage newPendingRanges has been updated according to leave operations. We can
        // now continue the calculation by checking bootstrapping nodes.

        // For each of the bootstrapping nodes, simply add and remove them one by one to
        // allLeftMetaData and check in between what their ranges would be.
        Multimap<InetAddress, Token> bootstrapAddresses = bootstrapTokens.inverse();
        for (InetAddress endpoint : bootstrapAddresses.keySet()) {
            Collection<Token> tokens = bootstrapAddresses.get(endpoint);

            allLeftMetaData.updateNormalTokens(tokens, endpoint);
            for (Range<Token> range : strategy.getAddressRanges(allLeftMetaData).get(endpoint))
                newPendingRanges.put(range, endpoint);
            allLeftMetaData.removeEndpoint(endpoint);
        }

        // At this stage newPendingRanges has been updated according to leaving and bootstrapping nodes.
        // We can now finish the calculation by checking moving nodes.

        // For each of the moving nodes, we do the same thing we did for bootstrapping:
        // simply add and remove them one by one to allLeftMetaData and check in between what their ranges would be.
        for (Pair<Token, InetAddress> moving : movingEndpoints) {
            InetAddress endpoint = moving.right; // address of the moving node

            //  moving.left is a new token of the endpoint
            allLeftMetaData.updateNormalToken(moving.left, endpoint);

            for (Range<Token> range : strategy.getAddressRanges(allLeftMetaData).get(endpoint)) {
                newPendingRanges.put(range, endpoint);
            }

            allLeftMetaData.removeEndpoint(endpoint);
        }

        pendingRanges.put(keyspaceName, newPendingRanges);

        if (logger.isDebugEnabled())
            logger.debug("Pending ranges:\n{}", (pendingRanges.isEmpty() ? "<empty>" : printPendingRanges()));
    } finally {
        lock.readLock().unlock();
    }
}

From source file:org.apache.cassandra.service.StorageService.java

public static void calculatePendingRanges(AbstractReplicationStrategy strategy, String table) {
    TokenMetadata tm = StorageService.instance.getTokenMetadata();
    Multimap<Range, InetAddress> pendingRanges = HashMultimap.create();
    Map<Token, InetAddress> bootstrapTokens = tm.getBootstrapTokens();
    Set<InetAddress> leavingEndpoints = tm.getLeavingEndpoints();

    if (bootstrapTokens.isEmpty() && leavingEndpoints.isEmpty() && tm.getMovingEndpoints().isEmpty()) {
        if (logger_.isDebugEnabled())
            logger_.debug("No bootstrapping, leaving or moving nodes -> empty pending ranges for {}", table);
        tm.setPendingRanges(table, pendingRanges);
        return;//from w  w w . j  a v  a 2s .c  o m
    }

    Multimap<InetAddress, Range> addressRanges = strategy.getAddressRanges();

    // Copy of metadata reflecting the situation after all leave operations are finished.
    TokenMetadata allLeftMetadata = tm.cloneAfterAllLeft();

    // get all ranges that will be affected by leaving nodes
    Set<Range> affectedRanges = new HashSet<Range>();
    for (InetAddress endpoint : leavingEndpoints)
        affectedRanges.addAll(addressRanges.get(endpoint));

    // for each of those ranges, find what new nodes will be responsible for the range when
    // all leaving nodes are gone.
    for (Range range : affectedRanges) {
        Collection<InetAddress> currentEndpoints = strategy.calculateNaturalEndpoints(range.right, tm);
        Collection<InetAddress> newEndpoints = strategy.calculateNaturalEndpoints(range.right, allLeftMetadata);
        newEndpoints.removeAll(currentEndpoints);
        pendingRanges.putAll(range, newEndpoints);
    }

    // At this stage pendingRanges has been updated according to leave operations. We can
    // now continue the calculation by checking bootstrapping nodes.

    // For each of the bootstrapping nodes, simply add and remove them one by one to
    // allLeftMetadata and check in between what their ranges would be.
    for (Map.Entry<Token, InetAddress> entry : bootstrapTokens.entrySet()) {
        InetAddress endpoint = entry.getValue();

        allLeftMetadata.updateNormalToken(entry.getKey(), endpoint);
        for (Range range : strategy.getAddressRanges(allLeftMetadata).get(endpoint))
            pendingRanges.put(range, endpoint);
        allLeftMetadata.removeEndpoint(endpoint);
    }

    // At this stage pendingRanges has been updated according to leaving and bootstrapping nodes.
    // We can now finish the calculation by checking moving nodes.

    // For each of the moving nodes, we do the same thing we did for bootstrapping:
    // simply add and remove them one by one to allLeftMetadata and check in between what their ranges would be.
    for (Pair<Token, InetAddress> moving : tm.getMovingEndpoints()) {
        InetAddress endpoint = moving.right; // address of the moving node

        //  moving.left is a new token of the endpoint
        allLeftMetadata.updateNormalToken(moving.left, endpoint);

        for (Range range : strategy.getAddressRanges(allLeftMetadata).get(endpoint)) {
            pendingRanges.put(range, endpoint);
        }

        allLeftMetadata.removeEndpoint(endpoint);
    }

    tm.setPendingRanges(table, pendingRanges);

    if (logger_.isDebugEnabled())
        logger_.debug("Pending ranges:\n" + (pendingRanges.isEmpty() ? "<empty>" : tm.printPendingRanges()));
}

From source file:org.apache.cassandra.service.StorageService.java

/**
 * move the node to new token or find a new token to boot to according to load
 *
 * @param newToken new token to boot to, or if null, find balanced token to boot to
 *
 * @throws IOException on any I/O operation error
 *//*from w  ww  .  ja v  a  2s .  c  o m*/
private void move(Token newToken) throws IOException {
    if (newToken == null)
        throw new IOException("Can't move to the undefined (null) token.");

    if (tokenMetadata_.sortedTokens().contains(newToken))
        throw new IOException("target token " + newToken + " is already owned by another node.");

    // address of the current node
    InetAddress localAddress = FBUtilities.getLocalAddress();
    List<String> tablesToProcess = DatabaseDescriptor.getNonSystemTables();

    // checking if data is moving to this node
    for (String table : tablesToProcess) {
        if (tokenMetadata_.getPendingRanges(table, localAddress).size() > 0)
            throw new UnsupportedOperationException(
                    "data is currently moving to this node; unable to leave the ring");
    }

    // setting 'moving' application state
    Gossiper.instance.addLocalApplicationState(ApplicationState.STATUS, valueFactory.moving(newToken));

    logger_.info(String.format("Moving %s from %s to %s.", localAddress, getLocalToken(), newToken));

    IEndpointSnitch snitch = DatabaseDescriptor.getEndpointSnitch();

    Map<String, Multimap<InetAddress, Range>> rangesToFetch = new HashMap<String, Multimap<InetAddress, Range>>();
    Map<String, Multimap<Range, InetAddress>> rangesToStreamByTable = new HashMap<String, Multimap<Range, InetAddress>>();

    TokenMetadata tokenMetaClone = tokenMetadata_.cloneAfterAllSettled();

    // for each of the non system tables calculating new ranges
    // which current node will handle after move to the new token
    for (String table : tablesToProcess) {
        // replication strategy of the current keyspace (aka table)
        AbstractReplicationStrategy strategy = Table.open(table).getReplicationStrategy();

        // getting collection of the currently used ranges by this keyspace
        Collection<Range> currentRanges = getRangesForEndpoint(table, localAddress);
        // collection of ranges which this node will serve after move to the new token
        Collection<Range> updatedRanges = strategy.getPendingAddressRanges(tokenMetadata_, newToken,
                localAddress);

        // ring ranges and endpoints associated with them
        // this used to determine what nodes should we ping about range data
        Multimap<Range, InetAddress> rangeAddresses = strategy.getRangeAddresses(tokenMetaClone);

        // calculated parts of the ranges to request/stream from/to nodes in the ring
        Pair<Set<Range>, Set<Range>> rangesPerTable = calculateStreamAndFetchRanges(currentRanges,
                updatedRanges);

        /**
         * In this loop we are going through all ranges "to fetch" and determining
         * nodes in the ring responsible for data we are interested in
         */
        Multimap<Range, InetAddress> rangesToFetchWithPreferredEndpoints = ArrayListMultimap.create();
        for (Range toFetch : rangesPerTable.right) {
            for (Range range : rangeAddresses.keySet()) {
                if (range.contains(toFetch)) {
                    List<InetAddress> endpoints = snitch.getSortedListByProximity(localAddress,
                            rangeAddresses.get(range));
                    // storing range and preferred endpoint set
                    rangesToFetchWithPreferredEndpoints.putAll(toFetch, endpoints);
                }
            }
        }

        // calculating endpoints to stream current ranges to if needed
        // in some situations node will handle current ranges as part of the new ranges
        Multimap<Range, InetAddress> rangeWithEndpoints = HashMultimap.create();

        for (Range toStream : rangesPerTable.left) {
            List<InetAddress> endpoints = strategy.calculateNaturalEndpoints(toStream.right, tokenMetaClone);
            rangeWithEndpoints.putAll(toStream, endpoints);
        }

        // associating table with range-to-endpoints map
        rangesToStreamByTable.put(table, rangeWithEndpoints);

        Multimap<InetAddress, Range> workMap = BootStrapper.getWorkMap(rangesToFetchWithPreferredEndpoints);
        rangesToFetch.put(table, workMap);

        if (logger_.isDebugEnabled())
            logger_.debug("Table {}: work map {}.", table, workMap);
    }

    if (!rangesToStreamByTable.isEmpty() || !rangesToFetch.isEmpty()) {
        logger_.info("Sleeping {} ms before start streaming/fetching ranges.", RING_DELAY);

        try {
            Thread.sleep(RING_DELAY);
        } catch (InterruptedException e) {
            throw new RuntimeException("Sleep interrupted " + e.getMessage());
        }

        setMode("Moving: fetching new ranges and streaming old ranges", true);

        if (logger_.isDebugEnabled())
            logger_.debug("[Move->STREAMING] Work Map: " + rangesToStreamByTable);

        CountDownLatch streamLatch = streamRanges(rangesToStreamByTable);

        if (logger_.isDebugEnabled())
            logger_.debug("[Move->FETCHING] Work Map: " + rangesToFetch);

        CountDownLatch fetchLatch = requestRanges(rangesToFetch);

        try {
            streamLatch.await();
            fetchLatch.await();
        } catch (InterruptedException e) {
            throw new RuntimeException(
                    "Interrupted latch while waiting for stream/fetch ranges to finish: " + e.getMessage());
        }
    }

    setToken(newToken); // setting new token as we have everything settled

    if (logger_.isDebugEnabled())
        logger_.debug("Successfully moved to new token {}", getLocalToken());
}

From source file:com.palantir.atlasdb.cleaner.Scrubber.java

/**
 * @return number of cells read from _scrub table
 *//*from  w  w  w  .  j  av a 2s  .  com*/
private int scrubSomeCells(SortedMap<Long, Multimap<String, Cell>> scrubTimestampToTableNameToCell,
        final TransactionManager txManager, long maxScrubTimestamp) {

    // Don't call expensive toString() if trace logging is off
    if (log.isTraceEnabled()) {
        log.trace("Attempting to scrub cells: " + scrubTimestampToTableNameToCell);
    }

    if (log.isInfoEnabled()) {
        int numCells = 0;
        Set<String> tables = Sets.newHashSet();
        for (Multimap<String, Cell> v : scrubTimestampToTableNameToCell.values()) {
            tables.addAll(v.keySet());
            numCells += v.size();
        }
        log.info("Attempting to scrub " + numCells + " cells from tables " + tables);
    }

    if (scrubTimestampToTableNameToCell.size() == 0) {
        return 0; // No cells left to scrub
    }

    Multimap<Long, Cell> toRemoveFromScrubQueue = HashMultimap.create();

    int numCellsReadFromScrubTable = 0;
    List<Future<Void>> scrubFutures = Lists.newArrayList();
    for (Map.Entry<Long, Multimap<String, Cell>> entry : scrubTimestampToTableNameToCell.entrySet()) {
        final long scrubTimestamp = entry.getKey();
        final Multimap<String, Cell> tableNameToCell = entry.getValue();

        numCellsReadFromScrubTable += tableNameToCell.size();

        long commitTimestamp = getCommitTimestampRollBackIfNecessary(scrubTimestamp, tableNameToCell);
        if (commitTimestamp >= maxScrubTimestamp) {
            // We cannot scrub this yet because not all transactions can read this value.
            continue;
        } else if (commitTimestamp != TransactionConstants.FAILED_COMMIT_TS) {
            // This is CRITICAL; don't scrub if the hard delete transaction didn't actually finish
            // (we still remove it from the _scrub table with the call to markCellsAsScrubbed though),
            // or else we could cause permanent data loss if the hard delete transaction failed after
            // queuing cells to scrub but before successfully committing
            for (final List<Entry<String, Cell>> batch : Iterables.partition(tableNameToCell.entries(),
                    batchSizeSupplier.get())) {
                final Multimap<String, Cell> batchMultimap = HashMultimap.create();
                for (Entry<String, Cell> e : batch) {
                    batchMultimap.put(e.getKey(), e.getValue());
                }
                scrubFutures.add(exec.submit(new Callable<Void>() {
                    @Override
                    public Void call() throws Exception {
                        scrubCells(txManager, batchMultimap, scrubTimestamp,
                                aggressiveScrub ? TransactionType.AGGRESSIVE_HARD_DELETE
                                        : TransactionType.HARD_DELETE);
                        return null;
                    }
                }));
            }
        }
        toRemoveFromScrubQueue.putAll(scrubTimestamp, tableNameToCell.values());
    }

    for (Future<Void> future : scrubFutures) {
        Futures.getUnchecked(future);
    }

    Multimap<Cell, Long> cellToScrubTimestamp = HashMultimap.create();
    scrubberStore.markCellsAsScrubbed(Multimaps.invertFrom(toRemoveFromScrubQueue, cellToScrubTimestamp),
            batchSizeSupplier.get());

    if (log.isTraceEnabled()) {
        log.trace("Finished scrubbing cells: " + scrubTimestampToTableNameToCell);
    }

    if (log.isInfoEnabled()) {
        Set<String> tables = Sets.newHashSet();
        for (Multimap<String, Cell> v : scrubTimestampToTableNameToCell.values()) {
            tables.addAll(v.keySet());
        }
        long minTimestamp = Collections.min(scrubTimestampToTableNameToCell.keySet());
        long maxTimestamp = Collections.max(scrubTimestampToTableNameToCell.keySet());
        log.info("Finished scrubbing " + numCellsReadFromScrubTable + " cells at "
                + scrubTimestampToTableNameToCell.size() + " timestamps (" + minTimestamp + "..." + maxTimestamp
                + ") from tables " + tables);
    }

    return numCellsReadFromScrubTable;
}

From source file:org.openflexo.dg.action.ReinjectDocx.java

public Multimap<EditionPatternInstance, IParsedFlexoEPI> removeConflictingParsedDocX(
        Multimap<EditionPatternInstance, IParsedFlexoEPI> epis) {
    Multimap<EditionPatternInstance, IParsedFlexoEPI> episToReinject = ArrayListMultimap.create();
    Multimap<String, IParsedFlexoEPI> paths = ArrayListMultimap.create();
    for (Entry<EditionPatternInstance, Collection<IParsedFlexoEPI>> e : epis.asMap().entrySet()) {
        if (e.getValue().size() > 1) {
            // There are multiple parsed DocX EPI for this EditionPatternInstance
            // Let's see if it is for the same binding path
            for (IParsedFlexoEPI epi : e.getValue()) {
                paths.put(epi.getBindingPath(), epi);
            }//from  w  ww. j av a2s . c o m
            for (Entry<String, Collection<IParsedFlexoEPI>> e1 : paths.asMap().entrySet()) {
                boolean conflict = false;
                if (e1.getValue().size() > 1) {
                    // There are multiple parsed DocX EPI for the same EPI and the same binding path
                    Object currentValue = e.getKey().evaluate(e1.getKey());
                    List<IParsedFlexoEPI> modified = new ArrayList<IParsedFlexoEPI>();
                    for (IParsedFlexoEPI epi : e1.getValue()) {
                        if (!epi.getValue().equals(currentValue)) {
                            modified.add(epi);
                        }
                    }
                    if (modified.size() > 1) {
                        // There is more than one parsed DocX EPI that has a different value than the current one
                        // Let's see if they are not all the same.
                        String value = modified.get(0).getValue();
                        for (int i = 1; i < modified.size(); i++) {
                            if (!value.equals(modified.get(i).getValue())) {
                                conflict = true;
                                errorReport.append("Conflicting values: ").append(value + " ")
                                        .append(modified.get(i).getValue()).append("\n");
                                break;
                            }
                        }
                    }
                }
                if (!conflict) {
                    episToReinject.putAll(e.getKey(), e1.getValue());
                }
            }
        } else {
            // There is a single parsed DocX EPI for this EditionPatternInstance
            episToReinject.putAll(e.getKey(), e.getValue());
        }
        paths.clear();
    }
    return episToReinject;
}

From source file:edu.buaa.satla.analysis.core.predicate.PredicateStaticRefiner.java

/**
 * This method extracts a precision based only on static information derived from the CFA.
 *
 * @return a precision for the predicate CPA
 * @throws CPATransferException/*from w w  w .  j  ava 2 s . co m*/
 * @throws InterruptedException
 */
public PredicatePrecision extractPrecisionFromCfa(UnmodifiableReachedSet pReached,
        List<ARGState> abstractionStatesTrace, boolean atomicPredicates)
        throws SolverException, CPATransferException, InterruptedException {
    logger.log(Level.FINER, "Extracting precision from CFA...");

    // Predicates that should be tracked on function scope
    Multimap<String, AbstractionPredicate> functionPredicates = ArrayListMultimap.create();

    // Predicates that should be tracked globally
    Collection<AbstractionPredicate> globalPredicates = Lists.newArrayList();

    // Determine the ERROR location of the path (last node)
    ARGState targetState = abstractionStatesTrace.get(abstractionStatesTrace.size() - 1);
    CFANode targetLocation = AbstractStates.extractLocation(targetState);

    // Determine the assume edges that should be considered for predicate extraction
    Set<AssumeEdge> assumeEdges = new HashSet<>();

    if (addAllControlFlowAssumes) {
        assumeEdges.addAll(getAllNonLoopControlFlowAssumes());
    } else {
        if (addAllErrorTraceAssumes) {
            assumeEdges.addAll(getAssumeEdgesAlongPath(pReached, targetState));
        }
        if (addAssumesByBoundedBackscan) {
            assumeEdges.addAll(getTargetLocationAssumes(Lists.newArrayList(targetLocation)).values());
        }
    }

    // Create predicates for the assume edges and add them to the precision
    for (AssumeEdge assume : assumeEdges) {
        // Create a boolean formula from the assume
        Collection<AbstractionPredicate> preds = assumeEdgeToPredicates(atomicPredicates, assume);

        // Check whether the predicate should be used global or only local
        boolean applyGlobal = true;
        if (applyScoped) {
            for (CIdExpression idExpr : getVariablesOfAssume(assume)) {
                CSimpleDeclaration decl = idExpr.getDeclaration();
                if (decl instanceof CVariableDeclaration) {
                    if (!((CVariableDeclaration) decl).isGlobal()) {
                        applyGlobal = false;
                    }
                } else if (decl instanceof CParameterDeclaration) {
                    applyGlobal = false;
                }
            }
        }

        // Add the predicate to the resulting precision
        if (applyGlobal) {
            logger.log(Level.FINEST, "Global predicates mined", preds);
            globalPredicates.addAll(preds);
        } else {
            logger.log(Level.FINEST, "Function predicates mined", preds);
            String function = assume.getPredecessor().getFunctionName();
            functionPredicates.putAll(function, preds);
        }
    }

    logger.log(Level.FINER, "Extracting finished.");

    return new PredicatePrecision(ImmutableSetMultimap.<Pair<CFANode, Integer>, AbstractionPredicate>of(),
            ArrayListMultimap.<CFANode, AbstractionPredicate>create(), functionPredicates, globalPredicates);
}

From source file:com.google.googlejavaformat.OpsBuilder.java

/**
 * Build a list of {@link Op}s from the {@code OpsBuilder}.
 *
 * @return the list of {@link Op}s/*from   www  .j  a  v a 2  s  .c  om*/
 */
public final ImmutableList<Op> build() {
    markForPartialFormat();
    // Rewrite the ops to insert comments.
    Multimap<Integer, Op> tokOps = ArrayListMultimap.create();
    int opsN = ops.size();
    for (int i = 0; i < opsN; i++) {
        Op op = ops.get(i);
        if (op instanceof Doc.Token) {
            /*
             * Token ops can have associated non-tokens, including comments, which we need to insert.
             * They can also cause line breaks, so we insert them before or after the current level,
             * when possible.
             */
            Doc.Token tokenOp = (Doc.Token) op;
            Input.Token token = tokenOp.getToken();
            int j = i; // Where to insert toksBefore before.
            while (0 < j && ops.get(j - 1) instanceof OpenOp) {
                --j;
            }
            int k = i; // Where to insert toksAfter after.
            while (k + 1 < opsN && ops.get(k + 1) instanceof CloseOp) {
                ++k;
            }
            if (tokenOp.realOrImaginary().isReal()) {
                /*
                 * Regular input token. Copy out toksBefore before token, and toksAfter after it. Insert
                 * this token's toksBefore at position j.
                 */
                int newlines = 0; // Count of newlines in a row.
                boolean space = false; // Do we need an extra space after a previous "/*" comment?
                boolean lastWasComment = false; // Was the last thing we output a comment?
                boolean allowBlankAfterLastComment = false;
                for (Input.Tok tokBefore : token.getToksBefore()) {
                    if (tokBefore.isNewline()) {
                        newlines++;
                    } else if (tokBefore.isComment()) {
                        tokOps.put(j,
                                Doc.Break.make(
                                        tokBefore.isSlashSlashComment() ? Doc.FillMode.FORCED
                                                : Doc.FillMode.UNIFIED,
                                        "", tokenOp.getPlusIndentCommentsBefore()));
                        tokOps.putAll(j, makeComment(tokBefore));
                        space = tokBefore.isSlashStarComment();
                        newlines = 0;
                        lastWasComment = true;
                        if (tokBefore.isJavadocComment()) {
                            tokOps.put(j, Doc.Break.makeForced());
                        }
                        allowBlankAfterLastComment = tokBefore.isSlashSlashComment()
                                || (tokBefore.isSlashStarComment() && !tokBefore.isJavadocComment());
                    }
                }
                if (allowBlankAfterLastComment && newlines > 1) {
                    // Force a line break after two newlines in a row following a line or block comment
                    output.blankLine(token.getTok().getIndex(), BlankLineWanted.YES);
                }
                if (lastWasComment && newlines > 0) {
                    tokOps.put(j, Doc.Break.makeForced());
                } else if (space) {
                    tokOps.put(j, SPACE);
                }
                // Now we've seen the Token; output the toksAfter.
                for (Input.Tok tokAfter : token.getToksAfter()) {
                    if (tokAfter.isComment()) {
                        boolean breakAfter = tokAfter.isJavadocComment() || (tokAfter.isSlashStarComment()
                                && tokenOp.breakAndIndentTrailingComment().isPresent());
                        if (breakAfter) {
                            tokOps.put(k + 1, Doc.Break.make(Doc.FillMode.FORCED, "",
                                    tokenOp.breakAndIndentTrailingComment().orElse(Const.ZERO)));
                        } else {
                            tokOps.put(k + 1, SPACE);
                        }
                        tokOps.putAll(k + 1, makeComment(tokAfter));
                        if (breakAfter) {
                            tokOps.put(k + 1, Doc.Break.make(Doc.FillMode.FORCED, "", ZERO));
                        }
                    }
                }
            } else {
                /*
                 * This input token was mistakenly not generated for output. As no whitespace or comments
                 * were generated (presumably), copy all input non-tokens literally, even spaces and
                 * newlines.
                 */
                int newlines = 0;
                boolean lastWasComment = false;
                for (Input.Tok tokBefore : token.getToksBefore()) {
                    if (tokBefore.isNewline()) {
                        newlines++;
                    } else if (tokBefore.isComment()) {
                        newlines = 0;
                        lastWasComment = tokBefore.isComment();
                    }
                    if (lastWasComment && newlines > 0) {
                        tokOps.put(j, Doc.Break.makeForced());
                    }
                    tokOps.put(j, Doc.Tok.make(tokBefore));
                }
                for (Input.Tok tokAfter : token.getToksAfter()) {
                    tokOps.put(k + 1, Doc.Tok.make(tokAfter));
                }
            }
        }
    }
    /*
     * Construct new list of ops, splicing in the comments. If a comment is inserted immediately
     * before a space, suppress the space.
     */
    ImmutableList.Builder<Op> newOps = ImmutableList.builder();
    boolean afterForcedBreak = false; // Was the last Op a forced break? If so, suppress spaces.
    for (int i = 0; i < opsN; i++) {
        for (Op op : tokOps.get(i)) {
            if (!(afterForcedBreak && op instanceof Doc.Space)) {
                newOps.add(op);
                afterForcedBreak = isForcedBreak(op);
            }
        }
        Op op = ops.get(i);
        if (afterForcedBreak && (op instanceof Doc.Space || (op instanceof Doc.Break
                && ((Doc.Break) op).getPlusIndent() == 0 && " ".equals(((Doc) op).getFlat())))) {
            continue;
        }
        newOps.add(op);
        if (!(op instanceof OpenOp)) {
            afterForcedBreak = isForcedBreak(op);
        }
    }
    for (Op op : tokOps.get(opsN)) {
        if (!(afterForcedBreak && op instanceof Doc.Space)) {
            newOps.add(op);
            afterForcedBreak = isForcedBreak(op);
        }
    }
    return newOps.build();
}

From source file:org.apache.cassandra.service.StorageService.java

private Multimap<Range, InetAddress> getChangedRangesForLeaving(String table, InetAddress endpoint) {
    // First get all ranges the leaving endpoint is responsible for
    Collection<Range> ranges = getRangesForEndpoint(table, endpoint);

    if (logger_.isDebugEnabled())
        logger_.debug("Node " + endpoint + " ranges [" + StringUtils.join(ranges, ", ") + "]");

    Map<Range, List<InetAddress>> currentReplicaEndpoints = new HashMap<Range, List<InetAddress>>();

    // Find (for each range) all nodes that store replicas for these ranges as well
    for (Range range : ranges)
        currentReplicaEndpoints.put(range, Table.open(table).getReplicationStrategy()
                .calculateNaturalEndpoints(range.right, tokenMetadata_));

    TokenMetadata temp = tokenMetadata_.cloneAfterAllLeft();

    // endpoint might or might not be 'leaving'. If it was not leaving (that is, removetoken
    // command was used), it is still present in temp and must be removed.
    if (temp.isMember(endpoint))
        temp.removeEndpoint(endpoint);// www.  j a  v  a2 s.c o m

    Multimap<Range, InetAddress> changedRanges = HashMultimap.create();

    // Go through the ranges and for each range check who will be
    // storing replicas for these ranges when the leaving endpoint
    // is gone. Whoever is present in newReplicaEndpoints list, but
    // not in the currentReplicaEndpoints list, will be needing the
    // range.
    for (Range range : ranges) {
        Collection<InetAddress> newReplicaEndpoints = Table.open(table).getReplicationStrategy()
                .calculateNaturalEndpoints(range.right, temp);
        newReplicaEndpoints.removeAll(currentReplicaEndpoints.get(range));
        if (logger_.isDebugEnabled())
            if (newReplicaEndpoints.isEmpty())
                logger_.debug("Range " + range + " already in all replicas");
            else
                logger_.debug("Range " + range + " will be responsibility of "
                        + StringUtils.join(newReplicaEndpoints, ", "));
        changedRanges.putAll(range, newReplicaEndpoints);
    }

    return changedRanges;
}