Example usage for com.google.common.collect Sets newTreeSet

List of usage examples for com.google.common.collect Sets newTreeSet

Introduction

In this page you can find the example usage for com.google.common.collect Sets newTreeSet.

Prototype

public static <E extends Comparable> TreeSet<E> newTreeSet() 

Source Link

Document

Creates a mutable, empty TreeSet instance sorted by the natural sort ordering of its elements.

Usage

From source file:gov.nih.nci.caintegrator.web.action.analysis.biodbnet.BioDbNetSearchAction.java

/**
 * Generates the search inputs in the following manner if case insensitivity has been selected.
 *  - the original inputs/*from ww  w .  j a v a2s.  c  om*/
 *  - inputs are transformed to all upper case
 *  - inputs are transformed to all lower case
 *  - inputs are transformed to 1st letter upper case, all others lower case
 * @return the transformed input strings as comma separated values
 */
private Set<String> handleCaseSensitivity(SearchParameters searchParams) {
    Set<String> inputs = Sets.newTreeSet();
    if (searchParams.isCaseSensitiveSearch() || searchParams.getSearchType() == SearchType.GENE_ID) {
        CollectionUtils.addAll(inputs, StringUtils.split(searchParams.getInputValues(), ','));
        return inputs;
    }
    String[] splitInputs = StringUtils.split(searchParams.getInputValues(), ',');
    for (String input : splitInputs) {
        inputs.add(input);
        inputs.add(StringUtils.upperCase(input));
        inputs.add(StringUtils.lowerCase(input));
        inputs.add(StringUtils.capitalize(input));
    }
    return inputs;
}

From source file:nl.knaw.huygens.timbuctoo.model.DomainEntity.java

public void addRelation(RelationRef ref) {
    String name = ref.getRelationName();
    relationCount++;//from  w  w w  .ja  va2s . c  om
    Set<RelationRef> refs = relations.get(name);
    if (refs == null) {
        refs = Sets.newTreeSet();
        relations.put(name, refs);
    }
    refs.add(ref);
}

From source file:org.commoncrawl.mapred.ec2.postprocess.crawldb.LinkGraphDataEmitterJob.java

private static SortedSet<Long> scanForValidSegments(FileSystem fs) throws IOException {
    SortedSet<Long> completeSegmentIds = Sets.newTreeSet();

    for (FileStatus fileStatus : fs.globStatus(new Path(S3N_BUCKET_PREFIX + VALID_SEGMENTS_PATH + "[0-9]*"))) {
        completeSegmentIds.add(Long.parseLong(fileStatus.getPath().getName()));
    }/*from  w w w  .  j av a 2  s.com*/
    return completeSegmentIds;
}

From source file:org.obiba.mica.study.domain.Population.java

public void setDataCollectionEvents(SortedSet<DataCollectionEvent> dataCollectionEvents) {
    this.dataCollectionEvents = dataCollectionEvents == null ? Sets.newTreeSet() : dataCollectionEvents;
}

From source file:com.cloudera.oryx.kmeans.computation.cluster.KSketchIndex.java

public Distance getDistance(RealVector vec, int id, boolean approx) {
    double distance = Double.POSITIVE_INFINITY;
    int closestPoint = -1;
    if (approx) {
        if (updated) {
            rebuildIndices();/*from  w ww . ja  v a2s.c  om*/
        }

        BitSet q = index(vec);
        List<BitSet> index = indices.get(id);
        SortedSet<Idx> lookup = Sets.newTreeSet();
        for (int j = 0; j < index.size(); j++) {
            Idx idx = new Idx(hammingDistance(q, index.get(j)), j);
            if (lookup.size() < projectionSamples) {
                lookup.add(idx);
            } else if (idx.compareTo(lookup.last()) < 0) {
                lookup.add(idx);
                lookup.remove(lookup.last());
            }
        }

        List<RealVector> p = points.get(id);
        List<Double> lsq = lengthSquared.get(id);
        for (Idx idx : lookup) {
            double lenSq = lsq.get(idx.getIndex());
            double length = vec.getNorm();
            double d = length * length + lenSq - 2 * vec.dotProduct(p.get(idx.getIndex()));
            if (d < distance) {
                distance = d;
                closestPoint = idx.getIndex();
            }
        }
    } else { // More expensive exact computation
        List<RealVector> px = points.get(id);
        List<Double> lsq = lengthSquared.get(id);
        for (int j = 0; j < px.size(); j++) {
            RealVector p = px.get(j);
            double lenSq = lsq.get(j);
            double length = vec.getNorm();
            double d = length * length + lenSq - 2 * vec.dotProduct(p);
            if (d < distance) {
                distance = d;
                closestPoint = j;
            }
        }
    }

    return new Distance(distance, closestPoint);
}

From source file:org.opennms.netmgt.dao.support.NewtsResourceStorageDao.java

@Override
public Set<ResourcePath> children(ResourcePath path, int depth) {
    Preconditions.checkArgument(depth >= 0, "depth must be non-negative");
    Set<ResourcePath> matches = Sets.newTreeSet();

    SearchResults results = searchFor(path, depth, false);
    for (Result result : results) {
        // Relativize the path
        ResourcePath child = toChildResourcePath(path, result.getResource().getId());
        if (child == null) {
            // This shouldn't happen
            LOG.warn(// w  w  w . j  av a 2  s .  co  m
                    "Encountered non-child resource {} when searching for {} with depth {}. Ignoring resource.",
                    result.getResource(), path, depth);
            continue;
        }
        matches.add(child);
    }

    return matches;
}

From source file:io.druid.query.select.SelectQueryQueryToolChest.java

@Override
public CacheStrategy<Result<SelectResultValue>, Object, SelectQuery> getCacheStrategy(final SelectQuery query) {
    return new CacheStrategy<Result<SelectResultValue>, Object, SelectQuery>() {
        @Override/*  w w  w.  j a  v  a2  s  .  c  o m*/
        public byte[] computeCacheKey(SelectQuery query) {
            final DimFilter dimFilter = query.getDimensionsFilter();
            final byte[] filterBytes = dimFilter == null ? new byte[] {} : dimFilter.getCacheKey();
            final byte[] granularityBytes = query.getGranularity().cacheKey();

            final Set<String> dimensions = Sets.newTreeSet();
            if (query.getDimensions() != null) {
                dimensions.addAll(query.getDimensions());
            }

            final byte[][] dimensionsBytes = new byte[dimensions.size()][];
            int dimensionsBytesSize = 0;
            int index = 0;
            for (String dimension : dimensions) {
                dimensionsBytes[index] = StringUtils.toUtf8(dimension);
                dimensionsBytesSize += dimensionsBytes[index].length;
                ++index;
            }

            final Set<String> metrics = Sets.newTreeSet();
            if (query.getMetrics() != null) {
                metrics.addAll(query.getMetrics());
            }

            final byte[][] metricBytes = new byte[metrics.size()][];
            int metricBytesSize = 0;
            index = 0;
            for (String metric : metrics) {
                metricBytes[index] = StringUtils.toUtf8(metric);
                metricBytesSize += metricBytes[index].length;
                ++index;
            }

            final ByteBuffer queryCacheKey = ByteBuffer
                    .allocate(1 + granularityBytes.length + filterBytes.length
                            + query.getPagingSpec().getCacheKey().length + dimensionsBytesSize
                            + metricBytesSize)
                    .put(SELECT_QUERY).put(granularityBytes).put(filterBytes)
                    .put(query.getPagingSpec().getCacheKey());

            for (byte[] dimensionsByte : dimensionsBytes) {
                queryCacheKey.put(dimensionsByte);
            }

            for (byte[] metricByte : metricBytes) {
                queryCacheKey.put(metricByte);
            }

            return queryCacheKey.array();
        }

        @Override
        public TypeReference<Object> getCacheObjectClazz() {
            return OBJECT_TYPE_REFERENCE;
        }

        @Override
        public Function<Result<SelectResultValue>, Object> prepareForCache() {
            return new Function<Result<SelectResultValue>, Object>() {
                @Override
                public Object apply(final Result<SelectResultValue> input) {
                    return Arrays.asList(input.getTimestamp().getMillis(),
                            input.getValue().getPagingIdentifiers(), input.getValue().getEvents());
                }
            };
        }

        @Override
        public Function<Object, Result<SelectResultValue>> pullFromCache() {
            return new Function<Object, Result<SelectResultValue>>() {
                private final QueryGranularity granularity = query.getGranularity();

                @Override
                public Result<SelectResultValue> apply(Object input) {
                    List<Object> results = (List<Object>) input;
                    Iterator<Object> resultIter = results.iterator();

                    DateTime timestamp = granularity.toDateTime(((Number) resultIter.next()).longValue());

                    return new Result<SelectResultValue>(timestamp,
                            new SelectResultValue((Map<String, Integer>) jsonMapper
                                    .convertValue(resultIter.next(), new TypeReference<Map<String, Integer>>() {
                                    }), (List<EventHolder>) jsonMapper.convertValue(resultIter.next(),
                                            new TypeReference<List<EventHolder>>() {
                                            })));
                }
            };
        }

        @Override
        public Sequence<Result<SelectResultValue>> mergeSequences(
                Sequence<Sequence<Result<SelectResultValue>>> seqOfSequences) {
            return new MergeSequence<Result<SelectResultValue>>(getOrdering(), seqOfSequences);
        }
    };
}

From source file:ezbake.groups.graph.query.AuthorizationQuery.java

/**
 * Will get a user's authorizations, filtering by the groups that apps in the filter chain have access to
 *
 * @param type type of user to look for// ww w.j a  va 2s  .  co m
 * @param id user id
 * @param appFilterChain
 * @return the user's set of group authorizations
 */
public Set<Long> getAuthorizationSet(BaseVertex.VertexType type, String id, List<String> appFilterChain)
        throws GroupQueryException {
    Set<Long> auths = Sets.newHashSet();

    // Only get auths if the user exists
    User user;
    try {
        user = graph.getUser(type, id);
        if (!user.isActive()) {
            logger.debug("User was inactive, returning empty set");
            return auths; // just don't get groups
        }
    } catch (InvalidVertexTypeException e) {
        logger.debug("Invalid request, returning empty set");
        return auths; // just don't get groups
    } catch (UserNotFoundException e) {
        throw new GroupQueryException("No user found: " + type + ":" + id, GroupQueryError.USER_NOT_FOUND);
    }

    // Add the user's own index
    logger.debug("Adding user's id to the auths: {}", user.getIndex());
    auths.add(user.getIndex());

    // This can sometimes be null
    if (appFilterChain == null) {
        appFilterChain = Collections.emptyList();
    }

    // These are the groups the user has on their own
    Set<Group> userGroups = getUserGroups(type, id, false);
    for (Group g : userGroups) {
        logger.debug("Group -> {} {}", g.getGroupName(), g.getIndex());
    }
    logger.debug("getAuthorizations User: {} has groups: {}", user, userGroups);

    // These are the groups the apps always include, even if the user doesn't have access
    Collection<Set<Group>> appsGroups = getUserGroups(
            userListToMap(BaseVertex.VertexType.APP_USER, appFilterChain), false);
    Set<Long> appsFilter = Sets.newHashSet(); // This is the intersection of all app auths
    Set<Long> groupsAppsAlwaysInclude = Sets.newTreeSet(); // This is all the groups the apps include anyways
    for (Set<Group> appGroup : appsGroups) {
        Set<Long> indices = Sets.newTreeSet();
        for (Group group : appGroup) {
            indices.add(group.getIndex());
            if (group.isRequireOnlyApp()) {
                groupsAppsAlwaysInclude.add(group.getIndex());
            }
        }
        appsFilter.retainAll(indices);
    }

    if (type == BaseVertex.VertexType.USER) {
        // Split groups into 2 sets - those that users always have (even if app doesn't) and those that users only have if app has too
        Set<Long> groupsUserHasRegardless = Sets.newHashSet(auths);
        Set<Long> groupsDependingOnApp = Sets.newHashSet();
        for (Group g : userGroups) {
            if (g.isRequireOnlyUser()) {
                logger.debug("User should have group: {} regardless", g);
                groupsUserHasRegardless.add(g.getIndex());
            } else {
                logger.debug("Will check app access to group: {}", g);
                groupsDependingOnApp.add(g.getIndex());
            }
        }

        // Filter the groups that depend on the app
        if (!groupsDependingOnApp.isEmpty()) {
            logger.debug("Filtering groups depending on app: {} -> {}", groupsDependingOnApp, appsFilter);
            groupsDependingOnApp = Sets.intersection(groupsDependingOnApp, appsFilter);
            logger.debug("Filter result: {}", groupsDependingOnApp);
        }

        // Now union the sets to get the users final list
        auths = Sets.union(groupsUserHasRegardless, groupsDependingOnApp);
        logger.debug("Auths after taking intersection: {}", auths);
    } else if (type == BaseVertex.VertexType.APP_USER) {
        // What to do here?
        Set<Long> appAuths = Sets.newHashSet(auths);
        for (Group g : userGroups) {
            appAuths.add(g.getIndex());
        }
        auths = appAuths;
    }

    graph.commitTransaction();
    return Sets.union(auths, groupsAppsAlwaysInclude);
}

From source file:com.google.walkaround.wave.server.googleimport.FindRemoteWavesProcessor.java

private List<Pair<Long, Long>> splitInterval(long onOrAfterDays, long beforeDays) {
    Preconditions.checkArgument(onOrAfterDays < beforeDays - 1,
            "Interval invalid or too small to split further: %s, %s", onOrAfterDays, beforeDays);
    // Split into roughly 5 intervals (if possible) because we want a high
    // branching factor (300*5^n reaches 1000, 10000 etc. quite a bit faster
    // than 300*2^n) and the maximum number of tasks GAE lets us add in one
    // transaction is 5.
    ////  w  w  w  .j ava 2  s. com
    // TreeSet for iteration order.
    Set<Long> splitPoints = Sets.newTreeSet();
    for (int i = 0; i < 4; i++) {
        // NOTE(ohler): Randomized strategy because it's simple to implement (the
        // cases where beforeDays - onOrAfterDays < 5 would require some thought
        // otherwise) and to make it unlikely that repeated runs send the same
        // queries to the googlewave.com servers, which seem to have a bug where
        // the result list is sometimes truncated for a query that has been issued
        // previously with a lower maxResults limit (perhaps some incorrect
        // caching).  Randomization means that re-running the "find waves" step
        // several times might have a greater chance to discover all waves.  But
        // I'm not positive whether this helps since I don't understand the bug.
        //
        // Other options include:
        //
        // * Instead of this interval splitting, start with "folder:3" or
        //   "before:2013/01/01" (for all waves), then do "before:<date of oldest
        //   wave returned by previous search>" until no more waves are returned.
        //   However, this relies on the assumption that truncated result lists
        //   are always truncated in such a way that only old waves are missing,
        //   not new waves.  We'd have to verify this.  Also, it's completely
        //   sequential rather than parallelizable.
        //
        // * Follow up every search for "after:A before:B" with another a search
        //   for "after:A before:<date of oldest wave returned by previous
        //   search>".  This could be a good combination of the two but relies on
        //   the same assumption and adds quite a bit more code.
        //
        // * When the user triggers the "find remote waves" task, enqueue N of
        //   them rather than just one, to cover the search space N times with
        //   different random interval splits to improve the likelihood that we
        //   find everything.  Could be good as well but adds code.
        //
        // * Add random negative search terms like -dgzimhmcoblhqfjciezc to the
        //   query that are unlikely to restrict the result set but make the query
        //   unique to avoid the poisoned caches.  Could also do many different
        //   such searches and merge the result sets.  (Can't assert that they are
        //   the same since waves may have been modified and fallen out of the
        //   date range.)  Probably worth implementing.
        //
        // * Fix the bug in googlewave.com or demonstrate that it's not
        //   reproducible.  Unlikely to happen since it's harder than any of these
        //   workarounds.
        splitPoints.add(randomBetween(onOrAfterDays + 1, beforeDays));
    }
    splitPoints.add(beforeDays);
    ImmutableList.Builder<Pair<Long, Long>> out = ImmutableList.builder();
    long left = onOrAfterDays;
    for (long right : splitPoints) {
        Assert.check(left < right, "left=%s, right=%s", left, right);
        out.add(Pair.of(left, right));
        left = right;
    }
    return out.build();
}

From source file:org.richfaces.cdk.generate.freemarker.ModelElementBaseTemplateModel.java

public TemplateModel getTagImports() throws TemplateModelException {
    Set<ClassName> result = Sets.newTreeSet();

    for (PropertyBase entry : model.getAttributes()) {
        if (!(entry.isHidden() || entry.isReadOnly() || null == entry.getSignature())) {
            MethodSignature methodSignature = entry.getSignature();
            if (!isPredefined(methodSignature.getReturnType())) {
                result.add(methodSignature.getReturnType());
            }/*w w  w . j a v  a2  s.co m*/
            for (ClassName className : methodSignature.getParameters()) {
                if (!isPredefined(className)) {
                    result.add(className);
                }
            }
        }
    }
    return this.wrapper.wrap(result);
}