Example usage for com.google.common.collect ContiguousSet create

List of usage examples for com.google.common.collect ContiguousSet create

Introduction

In this page you can find the example usage for com.google.common.collect ContiguousSet create.

Prototype

public static <C extends Comparable> ContiguousSet<C> create(Range<C> range, DiscreteDomain<C> domain) 

Source Link

Document

Returns a ContiguousSet containing the same values in the given domain Range#contains contained by the range.

Usage

From source file:zipkin2.storage.cassandra.v1.CassandraSpanStore.java

CassandraSpanStore(CassandraStorage storage) {
    Session session = storage.session();
    Schema.Metadata metadata = storage.metadata();
    maxTraceCols = storage.maxTraceCols;
    indexFetchMultiplier = storage.indexFetchMultiplier;
    strictTraceId = storage.strictTraceId;
    searchEnabled = storage.searchEnabled;
    timestampCodec = new TimestampCodec(metadata.protocolVersion);
    buckets = ContiguousSet.create(Range.closedOpen(0, storage.bucketCount), integers());

    spans = new SelectFromTraces.Factory(session, strictTraceId, maxTraceCols);
    dependencies = new SelectDependencies.Factory(session);

    if (!searchEnabled) {
        serviceNames = null;//from w w w . j a v  a 2 s .c o m
        remoteServiceNames = null;
        spanNames = null;
        selectTraceIdsByServiceName = null;
        selectTraceIdsByServiceNames = null;
        selectTraceIdsByRemoteServiceName = null;
        selectTraceIdsBySpanName = null;
        selectTraceIdsByAnnotation = null;
        return;
    }

    if (metadata.hasRemoteService) {
        selectTraceIdsByRemoteServiceName = new SelectTraceIdTimestampFromServiceRemoteServiceName.Factory(
                session, timestampCodec);
        remoteServiceNames = new SelectRemoteServiceNames.Factory(session);
    } else {
        selectTraceIdsByRemoteServiceName = null;
        remoteServiceNames = null;
    }
    spanNames = new SelectSpanNames.Factory(session);
    serviceNames = new SelectServiceNames.Factory(session).create();

    selectTraceIdsByServiceName = new SelectTraceIdTimestampFromServiceName.Factory(session, timestampCodec,
            buckets);

    if (metadata.protocolVersion.compareTo(ProtocolVersion.V4) < 0) {
        LOG.warn("Please update Cassandra to 2.2 or later, as some features may fail");
        // Log vs failing on "Partition KEY part service_name cannot be restricted by IN relation"
        selectTraceIdsByServiceNames = null;
    } else {
        selectTraceIdsByServiceNames = new SelectTraceIdTimestampFromServiceNames.Factory(session,
                timestampCodec, buckets);
    }

    selectTraceIdsBySpanName = new SelectTraceIdTimestampFromServiceSpanName.Factory(session, timestampCodec);

    selectTraceIdsByAnnotation = new SelectTraceIdTimestampFromAnnotations.Factory(session, timestampCodec,
            buckets);
}

From source file:google.registry.model.common.TimeOfYear.java

/**
 * Returns an {@link Iterable} of {@link DateTime}s of every recurrence of this particular
 * time of year within a given {@link Range} (usually one spanning many years).
 *
 * <p>WARNING: This can return a potentially very large {@link Iterable} if {@code END_OF_TIME}
 * is used as the upper endpoint of the range.
 *//*w w  w.  j  a  v  a 2s . co  m*/
public Iterable<DateTime> getInstancesInRange(Range<DateTime> range) {
    // In registry world, all dates are within START_OF_TIME and END_OF_TIME, so restrict any
    // ranges without bounds to our notion of zero-to-infinity.
    Range<DateTime> normalizedRange = range.intersection(Range.closed(START_OF_TIME, END_OF_TIME));
    Range<Integer> yearRange = Range.closed(normalizedRange.lowerEndpoint().getYear(),
            normalizedRange.upperEndpoint().getYear());
    return FluentIterable.from(ContiguousSet.create(yearRange, integers()))
            .transform(new Function<Integer, DateTime>() {
                @Override
                public DateTime apply(Integer year) {
                    return getDateTimeWithYear(year);
                }
            }).filter(normalizedRange);
}

From source file:com.cognifide.aet.job.common.datafilters.removelines.RemoveLinesDataModifier.java

private String modify(String data, Set<Integer> indexesToRemove) {
    List<String> lines = Arrays.asList(StringUtils.split(data, NEWLINE));
    Set<Integer> dataIndexes = ContiguousSet.create(Range.closed(1, lines.size()), DiscreteDomain.integers());
    if (!dataIndexes.containsAll(indexesToRemove)) {
        LOGGER.warn("Some of defined ranges exceed source lenght. Source length is: " + lines.size());
    }/* ww w  .  j a  va  2  s  .  c  o  m*/
    Set<Integer> filtereedIndexesToRemove = Sets.intersection(dataIndexes, indexesToRemove);
    List<String> modifiedLines = new ArrayList<String>(lines.size() - filtereedIndexesToRemove.size());
    for (int i = 0; i < lines.size(); i++) {
        if (!filtereedIndexesToRemove.contains(i + 1)) {
            modifiedLines.add(lines.get(i));
        }
    }
    return StringUtils.join(modifiedLines, NEWLINE);
}

From source file:org.geogig.osm.internal.history.HistoryDownloader.java

public void downloadAll(ProgressListener progressListener) {
    RemoteChangesetDownloader downloader = (RemoteChangesetDownloader) this.downloader;
    Range<Long> range = Range.closed(initialChangeset, finalChangeset);
    ContiguousSet<Long> changesetIds = ContiguousSet.create(range, DiscreteDomain.longs());

    progressListener/* w w  w.ja  va 2s  .co  m*/
            .setDescription("Downloading changesets " + initialChangeset + " to " + finalChangeset + "...");

    final int readTimeoutMinutes = 20;

    final AtomicBoolean abortFlag = new AtomicBoolean();

    List<Future<Long>> futures = new LinkedList<>();
    for (Long changesetId : changesetIds) {
        try {

            Future<Long> future = downloader.download(changesetId, readTimeoutMinutes, abortFlag);
            futures.add(future);
        } catch (IOException e) {
            e.printStackTrace();
            throw Throwables.propagate(e);
        }
    }
    for (Future<Long> f : futures) {
        try {
            Long id = f.get();
            if (-1L == id.longValue()) {
                continue;
            }
            progressListener.setDescription("Downloaded changeset " + id + ".");
        } catch (InterruptedException | ExecutionException e) {
            e.printStackTrace();
            throw Throwables.propagate(e);
        }
    }
    progressListener.setDescription("Done!");
}

From source file:org.caleydo.view.histogram.v2.BarDistributionElement.java

@Override
public Set<Integer> unapply(GLLocation location) {
    float max = EDimension.get(!vertical).select(getSize());
    int from = (int) (location.getOffset() * data.size() / max);
    int to = (int) (location.getOffset2() * data.size() / max);
    return ContiguousSet.create(Range.closed(from, to), DiscreteDomain.integers());
}

From source file:com.tinspx.util.base.NumberUtils.java

public static ContiguousSet<Integer> asSet(Range<Integer> range) {
    return ContiguousSet.create(range, DiscreteDomain.integers());
}

From source file:org.sonar.java.filters.BaseTreeVisitorIssueFilter.java

private void computeFilteredLinesForRule(@Nullable Tree tree, Class<? extends JavaCheck> filteredRule,
        boolean excludeLine) {
    if (tree == null) {
        return;//w w w  .  j  a  v  a  2s.com
    }
    SyntaxToken firstSyntaxToken = tree.firstToken();
    SyntaxToken lastSyntaxToken = tree.lastToken();
    if (firstSyntaxToken != null && lastSyntaxToken != null) {
        Set<Integer> filteredlines = ContiguousSet.create(
                Range.closed(firstSyntaxToken.line(), lastSyntaxToken.line()), DiscreteDomain.integers());
        computeFilteredLinesForRule(filteredlines, rulesKeysByRulesClass.get(filteredRule), excludeLine);
    }
}

From source file:org.nmdp.ngs.tools.EvaluateScaffolds.java

@Override
public Integer call() throws Exception {
    PrintWriter writer = null;//from w  w  w  .  j ava  2  s  . co m
    try {
        writer = writer(evalFile);

        Sequence reference = readReference();
        List<Sequence> scaffolds = readScaffolds();

        writer.println("#reference length = " + reference.length());
        writer.println("#scaffold count = " + scaffolds.size());
        writer.println("#scaffold lengths = " + dumpLengths(scaffolds));

        RangeSet<Long> ranges = TreeRangeSet.create();
        for (HighScoringPair hsp : blastn(referenceFastaFile, scaffoldsFastaFile)) {
            if (reference.getName().equals(hsp.target())) {
                writer.println("#" + hsp.toString());
                if (hsp.targetStart() <= hsp.targetEnd()) { // strands match
                    ranges.add(Range.closed(hsp.targetStart(), hsp.targetEnd()));
                } else {
                    ranges.add(Range.closed(hsp.targetEnd(), hsp.targetStart()));
                }
            }
        }

        writer.println("#coalesced intervals = " + ranges);

        long breadthOfCoverage = 0;
        for (Range<Long> range : ranges.asRanges()) {
            breadthOfCoverage += ContiguousSet.create(range, DiscreteDomain.longs()).size();
        }
        double normalizedBreadthOfCoverage = (double) breadthOfCoverage / (double) reference.length();
        writer.println("#breadth-of-coverage = " + breadthOfCoverage);
        writer.println("#normalized breadth-of-coverage = " + normalizedBreadthOfCoverage);

        StringBuilder sb = new StringBuilder();
        sb.append(referenceFastaFile.getName());
        sb.append("\t");
        sb.append(scaffoldsFastaFile.getName());
        sb.append("\t");
        sb.append(reference.length());
        sb.append("\t");
        sb.append(scaffolds.size());
        sb.append("\t");
        sb.append(ranges.asRanges().size());
        sb.append("\t");
        sb.append(breadthOfCoverage);
        sb.append("\t");
        sb.append(normalizedBreadthOfCoverage);
        sb.append("\t");
        writer.println(sb.toString());

        return 0;
    } finally {
        try {
            writer.close();
        } catch (Exception e) {
            // ignore
        }
    }
}

From source file:org.apache.brooklyn.entity.nosql.riak.RiakNodeImpl.java

@Override
protected Collection<Integer> getRequiredOpenPorts() {
    // TODO this creates a huge list of inbound ports; much better to define on a security group using range syntax!
    int erlangRangeStart = getConfig(ERLANG_PORT_RANGE_START).iterator().next();
    int erlangRangeEnd = getConfig(ERLANG_PORT_RANGE_END).iterator().next();

    Set<Integer> ports = MutableSet.copyOf(super.getRequiredOpenPorts());
    Set<Integer> erlangPorts = ContiguousSet.create(Range.open(erlangRangeStart, erlangRangeEnd),
            DiscreteDomain.integers());/*w w w.  j a v  a  2  s  . c o  m*/
    ports.addAll(erlangPorts);

    return ports;
}

From source file:zipkin.storage.cassandra.CassandraSpanStore.java

CassandraSpanStore(Session session, int bucketCount, int maxTraceCols, int indexFetchMultiplier,
        boolean strictTraceId) {
    this.session = session;
    this.maxTraceCols = maxTraceCols;
    this.indexFetchMultiplier = indexFetchMultiplier;
    this.strictTraceId = strictTraceId;

    ProtocolVersion protocolVersion = session.getCluster().getConfiguration().getProtocolOptions()
            .getProtocolVersion();//from w  ww. j  av  a  2 s  .co m
    this.timestampCodec = new TimestampCodec(protocolVersion);
    this.buckets = ContiguousSet.create(Range.closedOpen(0, bucketCount), integers());

    selectTraces = session.prepare(QueryBuilder.select("trace_id", "span").from("traces")
            .where(QueryBuilder.in("trace_id", QueryBuilder.bindMarker("trace_id")))
            .limit(QueryBuilder.bindMarker("limit_")));

    selectDependencies = session.prepare(QueryBuilder.select("dependencies").from("dependencies")
            .where(QueryBuilder.in("day", QueryBuilder.bindMarker("days"))));

    selectServiceNames = session.prepare(QueryBuilder.select("service_name").from(Tables.SERVICE_NAMES));

    selectSpanNames = session.prepare(QueryBuilder.select("span_name").from(Tables.SPAN_NAMES)
            .where(QueryBuilder.eq("service_name", QueryBuilder.bindMarker("service_name")))
            .and(QueryBuilder.eq("bucket", QueryBuilder.bindMarker("bucket")))
            .limit(QueryBuilder.bindMarker("limit_")));

    selectTraceIdsByServiceName = session
            .prepare(QueryBuilder.select("ts", "trace_id").from(Tables.SERVICE_NAME_INDEX)
                    .where(QueryBuilder.eq("service_name", QueryBuilder.bindMarker("service_name")))
                    .and(QueryBuilder.in("bucket", QueryBuilder.bindMarker("bucket")))
                    .and(QueryBuilder.gte("ts", QueryBuilder.bindMarker("start_ts")))
                    .and(QueryBuilder.lte("ts", QueryBuilder.bindMarker("end_ts")))
                    .limit(QueryBuilder.bindMarker("limit_")).orderBy(QueryBuilder.desc("ts")));

    selectTraceIdsBySpanName = session
            .prepare(QueryBuilder.select("ts", "trace_id").from(Tables.SERVICE_SPAN_NAME_INDEX)
                    .where(QueryBuilder.eq("service_span_name", QueryBuilder.bindMarker("service_span_name")))
                    .and(QueryBuilder.gte("ts", QueryBuilder.bindMarker("start_ts")))
                    .and(QueryBuilder.lte("ts", QueryBuilder.bindMarker("end_ts")))
                    .limit(QueryBuilder.bindMarker("limit_")).orderBy(QueryBuilder.desc("ts")));

    selectTraceIdsByAnnotation = session
            .prepare(QueryBuilder.select("ts", "trace_id").from(Tables.ANNOTATIONS_INDEX)
                    .where(QueryBuilder.eq("annotation", QueryBuilder.bindMarker("annotation")))
                    .and(QueryBuilder.in("bucket", QueryBuilder.bindMarker("bucket")))
                    .and(QueryBuilder.gte("ts", QueryBuilder.bindMarker("start_ts")))
                    .and(QueryBuilder.lte("ts", QueryBuilder.bindMarker("end_ts")))
                    .limit(QueryBuilder.bindMarker("limit_")).orderBy(QueryBuilder.desc("ts")));

    if (protocolVersion.compareTo(ProtocolVersion.V4) < 0) {
        LOG.warn("Please update Cassandra to 2.2 or later, as some features may fail");
        // Log vs failing on "Partition KEY part service_name cannot be restricted by IN relation"
        selectTraceIdsByServiceNames = null;
    } else {
        selectTraceIdsByServiceNames = session
                .prepare(QueryBuilder.select("ts", "trace_id").from(Tables.SERVICE_NAME_INDEX)
                        .where(QueryBuilder.in("service_name", QueryBuilder.bindMarker("service_name")))
                        .and(QueryBuilder.in("bucket", QueryBuilder.bindMarker("bucket")))
                        .and(QueryBuilder.gte("ts", QueryBuilder.bindMarker("start_ts")))
                        .and(QueryBuilder.lte("ts", QueryBuilder.bindMarker("end_ts")))
                        .limit(QueryBuilder.bindMarker("limit_")).orderBy(QueryBuilder.desc("ts")));
    }

    traceIdToTimestamp = new Function<ResultSet, Map<Long, Long>>() {
        @Override
        public Map<Long, Long> apply(ResultSet input) {
            Map<Long, Long> traceIdsToTimestamps = new LinkedHashMap<>();
            for (Row row : input) {
                traceIdsToTimestamps.put(row.getLong("trace_id"), timestampCodec.deserialize(row, "ts"));
            }
            return traceIdsToTimestamps;
        }
    };
}