Example usage for com.google.common.collect Sets newConcurrentHashSet

List of usage examples for com.google.common.collect Sets newConcurrentHashSet

Introduction

In this page you can find the example usage for com.google.common.collect Sets newConcurrentHashSet.

Prototype

public static <E> Set<E> newConcurrentHashSet(Iterable<? extends E> elements) 

Source Link

Document

Creates a thread-safe set backed by a hash map and containing the given elements.

Usage

From source file:io.atomix.core.set.impl.DefaultDistributedSetService.java

@Override
public void restore(BackupInput input) {
    collection = Sets.newConcurrentHashSet(input.readObject());
    lockedElements = input.readObject();
    transactions = input.readObject();
}

From source file:com.github.hilcode.versionator.Model.java

public ImmutableCollection<Gav> createClosure(final ImmutableCollection<Gav> gavs) {
    final Map<GroupArtifact, Pom> groupArtifact2Pom = Maps.newConcurrentMap();
    for (final Pom pom : this.poms) {
        groupArtifact2Pom.put(pom.gav.groupArtifact, pom);
    }/*from  w w  w . jav a  2 s  .co  m*/
    final Map<GroupArtifact, Version> groupArtifact2Version = Maps.newConcurrentMap();
    for (final Gav gav : gavs) {
        if (groupArtifact2Version.containsKey(gav.groupArtifact)) {
            throw new IllegalStateException("Duplicate entry.");
        }
        groupArtifact2Version.put(gav.groupArtifact, gav.version);
    }
    final Set<Gav> gavs_ = Sets.newConcurrentHashSet(gavs);
    while (true) {
        boolean addedMoreGavs = false;
        final Map<Pom, Set<Pom>> map = Maps.newConcurrentMap();
        for (final Pom pom : this.poms) {
            if (pom.versionSource == POM) {
                if (!map.containsKey(pom)) {
                    map.put(pom, Sets.newConcurrentHashSet());
                }
                map.get(pom).add(pom);
            } else {
                final Pom pom_ = pom.findRoot();
                if (!map.containsKey(pom_)) {
                    map.put(pom_, Sets.newConcurrentHashSet());
                }
                map.get(pom_).add(pom);
            }
        }
        for (final Gav gav : gavs_) {
            if (groupArtifact2Pom.containsKey(gav.groupArtifact)) {
                final Pom pom = groupArtifact2Pom.get(gav.groupArtifact).findRoot();
                for (final Pom pom_ : map.get(pom)) {
                    if (!groupArtifact2Version.containsKey(pom_.gav.groupArtifact)) {
                        addedMoreGavs = addedMoreGavs
                                || gavs_.add(Gav.BUILDER.build(pom_.gav.groupArtifact, gav.version));
                    } else if (groupArtifact2Version.get(pom_.gav.groupArtifact) != gav.version) {
                        throw new IllegalStateException("Contradicting versions.");
                    }
                }
            }
        }
        if (!addedMoreGavs) {
            break;
        }
    }
    return ImmutableSet.copyOf(gavs_);
}

From source file:com.emftriple.transform.impl.PutObjectImpl.java

@Override
public RDFGraph put(EObject from, RDFGraph graph) {
    final Object2RDF o2r = new Object2RDF();
    final Set<EObject> objects = Sets.newConcurrentHashSet(o2r.process(from, graph));
    for (EObject obj : objects) {
        o2r.process(obj, graph);/* ww  w .  j a  va  2  s  .  co m*/
    }

    return graph;
}

From source file:com.arpnetworking.tsdcore.sinks.TimeThresholdSink.java

private TimeThresholdSink(final Builder builder) {
    super(builder);
    _excludedServices = Sets.newConcurrentHashSet(builder._excludedServices);
    _sink = builder._sink;//from   www  . j  av  a2 s . c om
    _logOnly = builder._logOnly;
    _threshold = builder._threshold;
    _logger = (AggregatedData data) -> STALE_DATA_LOGGER.warn().setMessage("Dropped stale data")
            .addData("sink", getName()).addData("threshold", _threshold).addData("data", data).log();
    _filter = new Filter(_threshold, _logger, _excludedServices);
}

From source file:com.arpnetworking.tsdcore.sinks.PeriodFilteringSink.java

private PeriodFilteringSink(final Builder builder) {
    super(builder);
    _cachedFilterResult = CacheBuilder.newBuilder().maximumSize(10).build(new CacheLoader<Period, Boolean>() {
        @Override/*  w  ww. j a va2  s.  co m*/
        public Boolean load(final Period key) throws Exception {
            if (_include.contains(key)) {
                return true;
            }
            if (_exclude.contains(key)) {
                return false;
            }
            if (_excludeLessThan.isPresent()
                    && key.toStandardDuration().isShorterThan(_excludeLessThan.get().toStandardDuration())) {
                return false;
            }
            if (_excludeGreaterThan.isPresent()
                    && key.toStandardDuration().isLongerThan(_excludeGreaterThan.get().toStandardDuration())) {
                return false;
            }
            return true;
        }
    });
    _exclude = Sets.newConcurrentHashSet(builder._exclude);
    _include = Sets.newConcurrentHashSet(builder._include);
    _excludeLessThan = Optional.fromNullable(builder._excludeLessThan);
    _excludeGreaterThan = Optional.fromNullable(builder._excludeGreaterThan);
    _sink = builder._sink;
}

From source file:org.esupportail.publisher.service.bean.UserContextTree.java

/**
 * To construct the tree use this add on all context.
 *
 * @param ctx/*  ww w.  j  a v  a  2 s. c o m*/
 *            the context to add discovered from the parent.
 * @param parent
 *            The parent from where the context was loaded
 * @param childs
 *            All childs of the context,
 * @param perms
 *            The upper Role that the user has.
 */
public synchronized void addCtx(@NotNull final ContextKey ctx, final boolean isLastNode,
        final ContextKey parent, final Set<ContextKey> childs, final PermissionDTO perms,
        final PermissionType permType) {
    if (!isCtxLoaded(ctx)) {
        UserContextInfos current = new UserContextInfos();
        if (ctx instanceof OwnerContextKey && ((OwnerContextKey) ctx).getOwner() != null) {
            current.setOwner(subjectKeyConverter.convertToDTOKey(((OwnerContextKey) ctx).getOwner()));
        }
        if (childs != null) {
            current.setChilds(Sets.newConcurrentHashSet(childs));
        }
        current.setPerms(perms, permType);
        current.setLastLeaf(isLastNode);
        if (!ctx.getKeyType().equals(ContextType.ORGANIZATION)) {
            Assert.isTrue(parent != null && isCtxLoaded(parent), "Context " + ctx.toString()
                    + " doesn't have a parent = '" + parent.toString() + "' or parent isn't loaded !");
            contexts.put(ctx, current);
            this.linkToParent(ctx, parent);
        } else {
            contexts.put(ctx, current);
        }
    } else if (!ctx.getKeyType().equals(ContextType.ORGANIZATION)) {
        Assert.isTrue(parent != null && isCtxLoaded(parent), "Context " + ctx.toString()
                + " doesn't have a parent = '" + parent.toString() + "' or parent isn't loaded !");
        this.linkToParent(ctx, parent);
    }
}

From source file:com.google.devtools.build.lib.query2.engine.BinaryOperatorExpression.java

/**
 * Evaluates an expression of the form "e1 - e2 - ... - eK" by noting its equivalence to
 * "e1 - (e2 + ... + eK)" and evaluating the subexpressions on the right-hand-side in parallel.
 *//*from  w  w  w  .  j  av  a 2 s .c  o m*/
private static <T> void parEvalMinus(ImmutableList<QueryExpression> operands, QueryEnvironment<T> env,
        VariableContext<T> context, ThreadSafeCallback<T> callback, ForkJoinPool forkJoinPool)
        throws QueryException, InterruptedException {
    final Set<T> lhsValue = Sets.newConcurrentHashSet(QueryUtil.evalAll(env, context, operands.get(0)));
    ThreadSafeCallback<T> subtractionCallback = new ThreadSafeCallback<T>() {
        @Override
        public void process(Iterable<T> partialResult) throws QueryException, InterruptedException {
            for (T target : partialResult) {
                lhsValue.remove(target);
            }
        }
    };
    parEvalPlus(operands.subList(1, operands.size()), env, context, subtractionCallback, forkJoinPool);
    callback.process(lhsValue);
}

From source file:com.googlecode.blaisemath.util.coordinate.CoordinateManager.java

/**
 * Replaces the current set of objects with specified objects, and caches the rest.
 * Propagates the updated coordinates to interested listeners (on the invoking thread).
 * @param coords new coordinates/*from w w w . j  a v  a2s.  c  om*/
 */
public void setCoordinateMap(Map<S, ? extends C> coords) {
    Map<S, C> coordCopy = Maps.newHashMap(coords);
    Set<S> toCache;
    synchronized (this) {
        toCache = Sets.difference(map.keySet(), coordCopy.keySet()).immutableCopy();
        map.putAll(coordCopy);
        active = Sets.newConcurrentHashSet(coordCopy.keySet());
        inactive.removeAll(coordCopy.keySet());
        inactive.addAll(toCache);
        checkCache();
    }
    fireCoordinatesChanged(CoordinateChangeEvent.createAddRemoveEvent(this, coordCopy, toCache));
}

From source file:com.mattc.argus2.concurrent.Decompressors.java

/**
 * Updates our Decompressors. This will reload the Reflections object,
 * {@link #reloadReflector()}, <br />
 * and actually update our Decompressor List to include Subtypes of
 * DecompressProcess who are annotated <br />
 * with {@literal @Decompressor}. <br />
 * <br />//ww w .ja  v  a 2s . c  om
 * {@link #reloadReflector()} is SLOW and so this method should be called
 * sparingly.
 */
private static void update() {
    // Reload org.reflections Reflector
    long delay;
    final long start = System.currentTimeMillis();
    final long reloadDelay = Decompressors.reloadReflector();

    final Set<Class<? extends DecompressProcess>> processes = Sets
            .newConcurrentHashSet(reflector.getSubTypesOf(DecompressProcess.class));
    final Map<String[], Class<? extends DecompressProcess>> formats = Maps.newConcurrentMap();

    for (final Class<? extends DecompressProcess> clazz : processes) {
        if (clazz.getAnnotation(Decompressor.class) == null) {
            processes.remove(clazz);
        } else {
            try {
                final String[] key = clazz.getAnnotation(Decompressor.class).value();
                formats.put(key, clazz);
                Console.info("Registered " + clazz.getName() + " as Decompressor with format suffixes: "
                        + Arrays.toString(key) + "...");
            } catch (final IncompleteAnnotationException e) {
                Console.exception(new InvalidDecompressorException(clazz,
                        " No Formats specified in @Decompressor Annotation! Check Plugin Version...", e));
            }
        }
    }

    decompressorFormats = ImmutableMap.copyOf(formats);
    Console.debug(
            String.format("Updated Decompressors in %,d ms, Reloaded Reflector in %,d ms (%.02f%% of Delay)",
                    (delay = System.currentTimeMillis() - start), reloadDelay,
                    ((float) reloadDelay / (float) delay) * 100.0f));
}