Example usage for com.google.common.collect Sets newIdentityHashSet

List of usage examples for com.google.common.collect Sets newIdentityHashSet

Introduction

In this page you can find the example usage for com.google.common.collect Sets newIdentityHashSet.

Prototype

public static <E> Set<E> newIdentityHashSet() 

Source Link

Document

Creates an empty Set that uses identity to determine equality.

Usage

From source file:edu.washington.cs.cupid.CapabilityExecutor.java

private CapabilityExecutor() {
    resultCaches = CacheBuilder.newBuilder().build();
    running = HashBasedTable.create();//from  w  ww  .j  a  v a2s .com
    canceling = Sets.newIdentityHashSet();

    IPreferenceStore preferences = CupidActivator.getDefault().getPreferenceStore();
    logJobStatus = preferences.getBoolean(PreferenceConstants.P_JOB_STATUS_LOGGING);
    logCacheStatus = preferences.getBoolean(PreferenceConstants.P_CACHE_STATUS_LOGGING);
    preferences.addPropertyChangeListener(this);
}

From source file:org.apache.hadoop.hive.ql.optimizer.ReduceSinkMapJoinProc.java

@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procContext, Object... nodeOutputs)
        throws SemanticException {
    GenTezProcContext context = (GenTezProcContext) procContext;
    MapJoinOperator mapJoinOp = (MapJoinOperator) nd;

    // remember the original parent list before we start modifying it.
    if (!context.mapJoinParentMap.containsKey(mapJoinOp)) {
        List<Operator<?>> parents = new ArrayList<Operator<?>>(mapJoinOp.getParentOperators());
        context.mapJoinParentMap.put(mapJoinOp, parents);
    }/*from w  w  w . j av  a2  s  . co m*/

    boolean isBigTable = stack.size() < 2 || !(stack.get(stack.size() - 2) instanceof ReduceSinkOperator);

    ReduceSinkOperator parentRS = null;
    if (!isBigTable) {
        parentRS = (ReduceSinkOperator) stack.get(stack.size() - 2);

        // For dynamic partitioned hash join, the big table will also be coming from a ReduceSinkOperator
        // Check for this condition.
        // TODO: use indexOf(), or parentRS.getTag()?
        isBigTable = (mapJoinOp.getParentOperators().indexOf(parentRS) == mapJoinOp.getConf().getPosBigTable());
    }

    if (mapJoinOp.getConf().isDynamicPartitionHashJoin()
            && !context.mapJoinToUnprocessedSmallTableReduceSinks.containsKey(mapJoinOp)) {
        // Initialize set of unprocessed small tables
        Set<ReduceSinkOperator> rsSet = Sets.newIdentityHashSet();
        for (int pos = 0; pos < mapJoinOp.getParentOperators().size(); ++pos) {
            if (pos == mapJoinOp.getConf().getPosBigTable()) {
                continue;
            }
            rsSet.add((ReduceSinkOperator) mapJoinOp.getParentOperators().get(pos));
        }
        context.mapJoinToUnprocessedSmallTableReduceSinks.put(mapJoinOp, rsSet);
    }

    if (isBigTable) {
        context.currentMapJoinOperators.add(mapJoinOp);
        return null;
    }

    context.preceedingWork = null;
    context.currentRootOperator = null;

    return processReduceSinkToHashJoin(parentRS, mapJoinOp, context);
}

From source file:com.github.benmanes.caffeine.cache.IsValidBoundedLocalCache.java

private void checkLinks(BoundedLocalCache<K, V> cache, ImmutableList<LinkedDeque<Node<K, V>>> deques,
        DescriptionBuilder desc) {// ww  w. j a v  a 2 s .co  m
    int size = 0;
    long weightedSize = 0;
    Set<Node<K, V>> seen = Sets.newIdentityHashSet();
    for (LinkedDeque<Node<K, V>> deque : deques) {
        size += deque.size();
        weightedSize += scanLinks(cache, seen, deque, desc);
    }
    if (cache.size() != size) {
        desc.expectThat(() -> "deque size " + deques, size, is(cache.size()));
    }

    Supplier<String> errorMsg = () -> String.format("Size != list length; pending=%s, additional: %s",
            cache.writeBuffer().size(), Sets.difference(seen, ImmutableSet.copyOf(cache.data.values())));
    desc.expectThat(errorMsg, cache.size(), is(seen.size()));

    final long weighted = weightedSize;
    if (cache.evicts()) {
        Supplier<String> error = () -> String.format("WeightedSize != link weights [%d vs %d] {%d vs %d}",
                cache.adjustedWeightedSize(), weighted, seen.size(), cache.size());
        desc.expectThat("non-negative weight", weightedSize, is(greaterThanOrEqualTo(0L)));
        desc.expectThat(error, cache.adjustedWeightedSize(), is(weightedSize));
    }
}

From source file:com.google.inject.servlet.AbstractFilterPipeline.java

public void destroyPipeline() {
    //destroy servlets first
    servletPipeline.destroy();/* w  ww. ja va2 s.  c  o  m*/

    //go down chain and destroy all our filters
    Set<Filter> destroyedSoFar = Sets.newIdentityHashSet();
    for (FilterDefinition filterDefinition : filterDefinitions()) {
        filterDefinition.destroy(destroyedSoFar);
    }
}

From source file:org.sosy_lab.cpachecker.cpa.value.symbolic.refiner.ARGTreePrecisionUpdater.java

private ConstraintsPrecision mergeConstraintsPrecisionsForSubgraph(final ARGState pRefinementRoot,
        final ARGReachedSet pReached) {
    // get all unique precisions from the subtree
    Set<ConstraintsPrecision> uniquePrecisions = Sets.newIdentityHashSet();

    for (ARGState descendant : getNonCoveredStatesInSubgraph(pRefinementRoot)) {
        uniquePrecisions.add(extractConstraintsPrecision(pReached, descendant));
    }/* w w  w.  j av a2 s.  co m*/

    // join all unique precisions into a single precision
    ConstraintsPrecision mergedPrecision = Iterables.getLast(uniquePrecisions);
    for (ConstraintsPrecision precision : uniquePrecisions) {
        mergedPrecision = mergedPrecision.join(precision);
    }

    return mergedPrecision;
}

From source file:org.sosy_lab.cpachecker.cpa.smg.refiner.SMGRefiner.java

private VariableTrackingPrecision mergeValuePrecisionsForSubgraph(final ARGState pRefinementRoot,
        final ARGReachedSet pReached) {
    // get all unique precisions from the subtree
    Set<VariableTrackingPrecision> uniquePrecisions = Sets.newIdentityHashSet();
    for (ARGState descendant : getNonCoveredStatesInSubgraph(pRefinementRoot)) {
        uniquePrecisions.add(extractValuePrecision(pReached, descendant));
    }/*from   www . jav a  2s  .com*/

    // join all unique precisions into a single precision
    VariableTrackingPrecision mergedPrecision = Iterables.getLast(uniquePrecisions);
    for (VariableTrackingPrecision precision : uniquePrecisions) {
        mergedPrecision = mergedPrecision.join(precision);
    }

    return mergedPrecision;
}

From source file:io.bazel.rules.closure.webfiles.Webset.java

/**
 * Mutates graph to remove web files not reachable from set of entry points.
 *
 * <p>This method fully prunes {@link #webfiles()}. Entries might be removed from {@link #links()}
 * on a best effort basis.//from w w w . jav a2 s .  c  o m
 *
 * @param entryPoints set of paths that should be considered tree tips
 * @throws IllegalArgumentException if {@code entryPoints} aren't defined by {@code manifests}
 * @return {@code this}
 */
public final Webset removeWebfilesNotReachableFrom(Iterable<Webpath> entryPoints) {
    Deque<Webpath> bfs = new ArrayDeque<>();
    Set<Webpath> visited = Sets.newIdentityHashSet();
    for (Webpath entryPoint : Iterables
            .transform(Iterables.transform(entryPoints, Functions.toStringFunction()), interner())) {
        checkArgument(webfiles().containsKey(entryPoint), "Not found: %s", entryPoint);
        bfs.add(entryPoint);
    }
    while (!bfs.isEmpty()) {
        Webpath path = bfs.removeLast();
        if (visited.add(path)) {
            for (Webpath dest : links().get(path)) {
                if (webfiles().containsKey(dest)) {
                    bfs.addFirst(dest);
                }
            }
        }
    }
    Iterator<Webpath> webfilesIterator = webfiles().keySet().iterator();
    while (webfilesIterator.hasNext()) {
        Webpath key = webfilesIterator.next();
        if (!visited.contains(key)) {
            webfilesIterator.remove();
            links().removeAll(key);
        }
    }
    return this;
}

From source file:com.google.inject.servlet.ManagedFilterPipeline.java

public void destroyPipeline() {
    //destroy servlets first
    servletPipeline.destroy();/*from  w w  w .j  av a2 s.c om*/

    //go down chain and destroy all our filters
    Set<Filter> destroyedSoFar = Sets.newIdentityHashSet();
    for (FilterDefinition filterDefinition : filterDefinitions) {
        filterDefinition.destroy(destroyedSoFar);
    }
}

From source file:org.jooby.internal.spec.LocalVariableCollector.java

public LocalVariableCollector() {
    this(null, Sets.newIdentityHashSet());
}

From source file:org.jamocha.util.Lambdas.java

public static <T> Set<T> newIdentityHashSet(final Iterable<T> elements) {
    final Set<T> newIdentityHashSet = Sets.newIdentityHashSet();
    Iterables.addAll(newIdentityHashSet, elements);
    return newIdentityHashSet;
}