Example usage for org.joda.time Interval contains

List of usage examples for org.joda.time Interval contains

Introduction

In this page you can find the example usage for org.joda.time Interval contains.

Prototype

public boolean contains(long millisInstant) 

Source Link

Document

Does this time interval contain the specified millisecond instant.

Usage

From source file:io.druid.query.search.SearchQueryRunner.java

License:Apache License

@Override
public Sequence<Result<SearchResultValue>> run(final Query<Result<SearchResultValue>> input,
        Map<String, Object> responseContext) {
    if (!(input instanceof SearchQuery)) {
        throw new ISE("Got a [%s] which isn't a %s", input.getClass(), SearchQuery.class);
    }//from  w w w. ja v a  2  s .  co  m

    final SearchQuery query = (SearchQuery) input;
    final Filter filter = Filters.convertToCNFFromQueryContext(query,
            Filters.toFilter(query.getDimensionsFilter()));
    final List<DimensionSpec> dimensions = query.getDimensions();
    final SearchQuerySpec searchQuerySpec = query.getQuery();
    final int limit = query.getLimit();
    final boolean descending = query.isDescending();
    final List<Interval> intervals = query.getQuerySegmentSpec().getIntervals();
    if (intervals.size() != 1) {
        throw new IAE("Should only have one interval, got[%s]", intervals);
    }
    final Interval interval = intervals.get(0);

    // Closing this will cause segfaults in unit tests.
    final QueryableIndex index = segment.asQueryableIndex();

    if (index != null) {
        final TreeMap<SearchHit, MutableInt> retVal = Maps.newTreeMap(query.getSort().getComparator());

        Iterable<DimensionSpec> dimsToSearch;
        if (dimensions == null || dimensions.isEmpty()) {
            dimsToSearch = Iterables.transform(index.getAvailableDimensions(), Druids.DIMENSION_IDENTITY);
        } else {
            dimsToSearch = dimensions;
        }

        final BitmapFactory bitmapFactory = index.getBitmapFactoryForDimensions();

        final ImmutableBitmap baseFilter = filter == null ? null
                : filter.getBitmapIndex(new ColumnSelectorBitmapIndexSelector(bitmapFactory, index));

        ImmutableBitmap timeFilteredBitmap;
        if (!interval.contains(segment.getDataInterval())) {
            MutableBitmap timeBitmap = bitmapFactory.makeEmptyMutableBitmap();
            final Column timeColumn = index.getColumn(Column.TIME_COLUMN_NAME);
            final GenericColumn timeValues = timeColumn.getGenericColumn();

            int startIndex = Math.max(0, getStartIndexOfTime(timeValues, interval.getStartMillis(), true));
            int endIndex = Math.min(timeValues.length() - 1,
                    getStartIndexOfTime(timeValues, interval.getEndMillis(), false));

            for (int i = startIndex; i <= endIndex; i++) {
                timeBitmap.add(i);
            }

            final ImmutableBitmap finalTimeBitmap = bitmapFactory.makeImmutableBitmap(timeBitmap);
            timeFilteredBitmap = (baseFilter == null) ? finalTimeBitmap
                    : finalTimeBitmap.intersection(baseFilter);
        } else {
            timeFilteredBitmap = baseFilter;
        }

        for (DimensionSpec dimension : dimsToSearch) {
            final Column column = index.getColumn(dimension.getDimension());
            if (column == null) {
                continue;
            }

            final BitmapIndex bitmapIndex = column.getBitmapIndex();
            ExtractionFn extractionFn = dimension.getExtractionFn();
            if (extractionFn == null) {
                extractionFn = IdentityExtractionFn.getInstance();
            }
            if (bitmapIndex != null) {
                for (int i = 0; i < bitmapIndex.getCardinality(); ++i) {
                    String dimVal = Strings.nullToEmpty(extractionFn.apply(bitmapIndex.getValue(i)));
                    if (!searchQuerySpec.accept(dimVal)) {
                        continue;
                    }
                    ImmutableBitmap bitmap = bitmapIndex.getBitmap(i);
                    if (timeFilteredBitmap != null) {
                        bitmap = bitmapFactory.intersection(Arrays.asList(timeFilteredBitmap, bitmap));
                    }
                    if (bitmap.size() > 0) {
                        MutableInt counter = new MutableInt(bitmap.size());
                        MutableInt prev = retVal.put(new SearchHit(dimension.getOutputName(), dimVal), counter);
                        if (prev != null) {
                            counter.add(prev.intValue());
                        }
                        if (retVal.size() >= limit) {
                            return makeReturnResult(limit, retVal);
                        }
                    }
                }
            }
        }

        return makeReturnResult(limit, retVal);
    }

    final StorageAdapter adapter = segment.asStorageAdapter();

    if (adapter == null) {
        log.makeAlert("WTF!? Unable to process search query on segment.")
                .addData("segment", segment.getIdentifier()).addData("query", query).emit();
        throw new ISE(
                "Null storage adapter found. Probably trying to issue a query against a segment being memory unmapped.");
    }

    final Iterable<DimensionSpec> dimsToSearch;
    if (dimensions == null || dimensions.isEmpty()) {
        dimsToSearch = Iterables.transform(adapter.getAvailableDimensions(), Druids.DIMENSION_IDENTITY);
    } else {
        dimsToSearch = dimensions;
    }

    final Sequence<Cursor> cursors = adapter.makeCursors(filter, interval, query.getGranularity(), descending);

    final TreeMap<SearchHit, MutableInt> retVal = cursors.accumulate(
            Maps.<SearchHit, SearchHit, MutableInt>newTreeMap(query.getSort().getComparator()),
            new Accumulator<TreeMap<SearchHit, MutableInt>, Cursor>() {
                @Override
                public TreeMap<SearchHit, MutableInt> accumulate(TreeMap<SearchHit, MutableInt> set,
                        Cursor cursor) {
                    if (set.size() >= limit) {
                        return set;
                    }

                    Map<String, DimensionSelector> dimSelectors = Maps.newHashMap();
                    for (DimensionSpec dim : dimsToSearch) {
                        dimSelectors.put(dim.getOutputName(), cursor.makeDimensionSelector(dim));
                    }

                    while (!cursor.isDone()) {
                        for (Map.Entry<String, DimensionSelector> entry : dimSelectors.entrySet()) {
                            final DimensionSelector selector = entry.getValue();

                            if (selector != null) {
                                final IndexedInts vals = selector.getRow();
                                for (int i = 0; i < vals.size(); ++i) {
                                    final String dimVal = selector.lookupName(vals.get(i));
                                    if (searchQuerySpec.accept(dimVal)) {
                                        MutableInt counter = new MutableInt(1);
                                        MutableInt prev = set.put(new SearchHit(entry.getKey(), dimVal),
                                                counter);
                                        if (prev != null) {
                                            counter.add(prev.intValue());
                                        }
                                        if (set.size() >= limit) {
                                            return set;
                                        }
                                    }
                                }
                            }
                        }

                        cursor.advance();
                    }

                    return set;
                }
            });

    return makeReturnResult(limit, retVal);
}

From source file:io.druid.segment.indexing.granularity.ArbitraryGranularitySpec.java

License:Apache License

@Override
public Optional<Interval> bucketInterval(DateTime dt) {
    // First interval with start time  dt
    final Interval interval = intervals.floor(new Interval(dt, new DateTime(JodaUtils.MAX_INSTANT)));

    if (interval != null && interval.contains(dt)) {
        return Optional.of(interval);
    } else {/*w w w .  ja  v a2  s.c  o m*/
        return Optional.absent();
    }
}

From source file:io.druid.segment.IndexMaker.java

License:Apache License

public static File persist(final IncrementalIndex index, final Interval dataInterval, File outDir,
        final Map<String, Object> segmentMetadata, final IndexSpec indexSpec, ProgressIndicator progress)
        throws IOException {
    if (index.isEmpty()) {
        throw new IAE("Trying to persist an empty index!");
    }//from   ww  w.  jav a2  s .  co m

    final long firstTimestamp = index.getMinTime().getMillis();
    final long lastTimestamp = index.getMaxTime().getMillis();
    if (!(dataInterval.contains(firstTimestamp) && dataInterval.contains(lastTimestamp))) {
        throw new IAE("interval[%s] does not encapsulate the full range of timestamps[%s, %s]", dataInterval,
                new DateTime(firstTimestamp), new DateTime(lastTimestamp));
    }

    if (!outDir.exists()) {
        outDir.mkdirs();
    }
    if (!outDir.isDirectory()) {
        throw new ISE("Can only persist to directories, [%s] wasn't a directory", outDir);
    }

    log.info("Starting persist for interval[%s], rows[%,d]", dataInterval, index.size());
    return merge(
            Arrays.<IndexableAdapter>asList(new IncrementalIndexAdapter(dataInterval, index,
                    indexSpec.getBitmapSerdeFactory().getBitmapFactory())),
            index.getMetricAggs(), outDir, segmentMetadata, indexSpec, progress);
}

From source file:io.druid.segment.IndexMerger.java

License:Apache License

public static File persist(final IncrementalIndex index, final Interval dataInterval, File outDir,
        Map<String, Object> segmentMetadata, IndexSpec indexSpec, ProgressIndicator progress)
        throws IOException {
    if (index.isEmpty()) {
        throw new IAE("Trying to persist an empty index!");
    }/*from  w  w  w  .ja  v  a 2  s . com*/

    final long firstTimestamp = index.getMinTime().getMillis();
    final long lastTimestamp = index.getMaxTime().getMillis();
    if (!(dataInterval.contains(firstTimestamp) && dataInterval.contains(lastTimestamp))) {
        throw new IAE("interval[%s] does not encapsulate the full range of timestamps[%s, %s]", dataInterval,
                new DateTime(firstTimestamp), new DateTime(lastTimestamp));
    }

    if (!outDir.exists()) {
        outDir.mkdirs();
    }
    if (!outDir.isDirectory()) {
        throw new ISE("Can only persist to directories, [%s] wasn't a directory", outDir);
    }

    log.info("Starting persist for interval[%s], rows[%,d]", dataInterval, index.size());
    return merge(
            Arrays.<IndexableAdapter>asList(new IncrementalIndexAdapter(dataInterval, index,
                    indexSpec.getBitmapSerdeFactory().getBitmapFactory())),
            index.getMetricAggs(), outDir, segmentMetadata, indexSpec, progress);
}

From source file:io.druid.segment.IndexMergerV9.java

License:Apache License

@Override
public File persist(final IncrementalIndex index, final Interval dataInterval, File outDir, IndexSpec indexSpec,
        ProgressIndicator progress) throws IOException {
    if (index.isEmpty()) {
        throw new IAE("Trying to persist an empty index!");
    }/*from w ww  .j a v a 2 s .  co m*/

    final long firstTimestamp = index.getMinTime().getMillis();
    final long lastTimestamp = index.getMaxTime().getMillis();
    if (!(dataInterval.contains(firstTimestamp) && dataInterval.contains(lastTimestamp))) {
        throw new IAE("interval[%s] does not encapsulate the full range of timestamps[%s, %s]", dataInterval,
                new DateTime(firstTimestamp), new DateTime(lastTimestamp));
    }

    FileUtils.forceMkdir(outDir);

    log.info("Starting persist for interval[%s], rows[%,d]", dataInterval, index.size());
    return merge(
            Arrays.<IndexableAdapter>asList(new IncrementalIndexAdapter(dataInterval, index,
                    indexSpec.getBitmapSerdeFactory().getBitmapFactory())),
            // if index is not rolled up, then it should be not rollup here
            // if index is rolled up, then it is no need to rollup again.
            //                     In this case, true/false won't cause reOrdering in merge stage
            //                     while merging a single iterable
            false, index.getMetricAggs(), outDir, indexSpec, progress);
}

From source file:io.druid.segment.realtime.plumber.RealtimePlumber.java

License:Apache License

private void registerServerViewCallback() {
    serverView.registerSegmentCallback(mergeExecutor, new ServerView.BaseSegmentCallback() {
        @Override/*from w w w . j  av a  2 s  .co m*/
        public ServerView.CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
            if (stopped) {
                log.info("Unregistering ServerViewCallback");
                mergeExecutor.shutdown();
                return ServerView.CallbackAction.UNREGISTER;
            }

            if (!server.isAssignable()) {
                return ServerView.CallbackAction.CONTINUE;
            }

            log.debug("Checking segment[%s] on server[%s]", segment, server);
            if (schema.getDataSource().equals(segment.getDataSource())
                    && config.getShardSpec().getPartitionNum() == segment.getShardSpec().getPartitionNum()) {
                final Interval interval = segment.getInterval();
                for (Map.Entry<Long, Sink> entry : sinks.entrySet()) {
                    final Long sinkKey = entry.getKey();
                    if (interval.contains(sinkKey)) {
                        final Sink sink = entry.getValue();
                        log.info("Segment[%s] matches sink[%s] on server[%s]", segment, sink, server);

                        final String segmentVersion = segment.getVersion();
                        final String sinkVersion = sink.getSegment().getVersion();
                        if (segmentVersion.compareTo(sinkVersion) >= 0) {
                            log.info("Segment version[%s] >= sink version[%s]", segmentVersion, sinkVersion);
                            abandonSegment(sinkKey, sink);
                        }
                    }
                }
            }

            return ServerView.CallbackAction.CONTINUE;
        }
    }, new Predicate<DataSegment>() {
        @Override
        public boolean apply(final DataSegment segment) {
            return schema.getDataSource().equalsIgnoreCase(segment.getDataSource())
                    && config.getShardSpec().getPartitionNum() == segment.getShardSpec().getPartitionNum()
                    && Iterables.any(sinks.keySet(), new Predicate<Long>() {
                        @Override
                        public boolean apply(Long sinkKey) {
                            return segment.getInterval().contains(sinkKey);
                        }
                    });
        }
    });
}

From source file:io.druid.server.coordinator.helper.NewestSegmentFirstIterator.java

License:Apache License

/**
 * Find segments to compact together for the given intervalToSearch. It progressively searches the given
 * intervalToSearch in time order (latest first). The timeline lookup duration is one day. It means, the timeline is
 * looked up for the last one day of the given intervalToSearch, and the next day is searched again if the size of
 * found segments are not enough to compact. This is repeated until enough amount of segments are found.
 *
 * @param timeline         timeline of a dataSource
 * @param intervalToSearch interval to search
 * @param searchEnd        the end of the whole searchInterval
 * @param config           compaction config
 *
 * @return a pair of the reduced interval of (intervalToSearch - interval of found segments) and segments to compact
 *///from   w ww . java 2 s  . c  o  m
@VisibleForTesting
static Pair<Interval, SegmentsToCompact> findSegmentsToCompact(
        final VersionedIntervalTimeline<String, DataSegment> timeline, final Interval intervalToSearch,
        final DateTime searchEnd, final DataSourceCompactionConfig config) {
    final long targetCompactionSize = config.getTargetCompactionSizeBytes();
    final int numTargetSegments = config.getNumTargetCompactionSegments();
    final List<DataSegment> segmentsToCompact = new ArrayList<>();
    Interval searchInterval = intervalToSearch;
    long totalSegmentsToCompactBytes = 0;

    // Finds segments to compact together while iterating searchInterval from latest to oldest
    while (!Intervals.isEmpty(searchInterval) && totalSegmentsToCompactBytes < targetCompactionSize
            && segmentsToCompact.size() < numTargetSegments) {
        final Interval lookupInterval = SegmentCompactorUtil.getNextLoopupInterval(searchInterval);
        // holders are sorted by their interval
        final List<TimelineObjectHolder<String, DataSegment>> holders = timeline.lookup(lookupInterval);

        if (holders.isEmpty()) {
            // We found nothing. Continue to the next interval.
            searchInterval = SegmentCompactorUtil.removeIntervalFromEnd(searchInterval, lookupInterval);
            continue;
        }

        for (int i = holders.size() - 1; i >= 0; i--) {
            final TimelineObjectHolder<String, DataSegment> holder = holders.get(i);
            final List<PartitionChunk<DataSegment>> chunks = Lists.newArrayList(holder.getObject().iterator());
            final long partitionBytes = chunks.stream().mapToLong(chunk -> chunk.getObject().getSize()).sum();
            if (chunks.size() == 0 || partitionBytes == 0) {
                log.warn("Skip empty shard[%s]", holder);
                continue;
            }

            if (!intervalToSearch.contains(chunks.get(0).getObject().getInterval())) {
                searchInterval = SegmentCompactorUtil.removeIntervalFromEnd(searchInterval, new Interval(
                        chunks.get(0).getObject().getInterval().getStart(), searchInterval.getEnd()));
                continue;
            }

            // Addition of the segments of a partition should be atomic.
            if (SegmentCompactorUtil.isCompactible(targetCompactionSize, totalSegmentsToCompactBytes,
                    partitionBytes) && segmentsToCompact.size() + chunks.size() <= numTargetSegments) {
                chunks.forEach(chunk -> segmentsToCompact.add(chunk.getObject()));
                totalSegmentsToCompactBytes += partitionBytes;
            } else {
                if (segmentsToCompact.size() > 1) {
                    // We found some segmens to compact and cannot add more. End here.
                    return checkCompactableSizeForLastSegmentOrReturn(segmentsToCompact,
                            totalSegmentsToCompactBytes, timeline, searchInterval, searchEnd, config);
                } else {
                    // (*) Discard segments found so far because we can't compact it anyway.
                    final int numSegmentsToCompact = segmentsToCompact.size();
                    segmentsToCompact.clear();

                    if (!SegmentCompactorUtil.isCompactible(targetCompactionSize, 0, partitionBytes)) {
                        // TODO: this should be changed to compact many small segments into a few large segments
                        final DataSegment segment = chunks.get(0).getObject();
                        log.warn(
                                "shardSize[%d] for dataSource[%s] and interval[%s] is larger than targetCompactionSize[%d]."
                                        + " Contitnue to the next shard.",
                                partitionBytes, segment.getDataSource(), segment.getInterval(),
                                targetCompactionSize);
                    } else if (numTargetSegments < chunks.size()) {
                        final DataSegment segment = chunks.get(0).getObject();
                        log.warn(
                                "The number of segments[%d] for dataSource[%s] and interval[%s] is larger than "
                                        + "numTargetCompactSegments[%d]. If you see lots of shards are being skipped due to too many "
                                        + "segments, consider increasing 'numTargetCompactionSegments' and "
                                        + "'druid.indexer.runner.maxZnodeBytes'. Contitnue to the next shard.",
                                chunks.size(), segment.getDataSource(), segment.getInterval(),
                                numTargetSegments);
                    } else {
                        if (numSegmentsToCompact == 1) {
                            // We found a segment which is smaller than targetCompactionSize but too large to compact with other
                            // segments. Skip this one.
                            // Note that segmentsToCompact is already cleared at (*).
                            chunks.forEach(chunk -> segmentsToCompact.add(chunk.getObject()));
                            totalSegmentsToCompactBytes = partitionBytes;
                        } else {
                            throw new ISE("Cannot compact segments[%s]. shardBytes[%s], numSegments[%s]",
                                    chunks.stream().map(PartitionChunk::getObject).collect(Collectors.toList()),
                                    partitionBytes, chunks.size());
                        }
                    }
                }
            }

            // Update searchInterval
            searchInterval = SegmentCompactorUtil.removeIntervalFromEnd(searchInterval,
                    new Interval(chunks.get(0).getObject().getInterval().getStart(), searchInterval.getEnd()));
        }
    }

    if (segmentsToCompact.size() == 0 || segmentsToCompact.size() == 1) {
        if (Intervals.isEmpty(searchInterval)) {
            // We found nothing to compact. End here.
            return Pair.of(intervalToSearch, new SegmentsToCompact(ImmutableList.of()));
        } else {
            // We found only 1 segment. Further find segments for the remaining interval.
            return findSegmentsToCompact(timeline, searchInterval, searchEnd, config);
        }
    }

    return checkCompactableSizeForLastSegmentOrReturn(segmentsToCompact, totalSegmentsToCompactBytes, timeline,
            searchInterval, searchEnd, config);
}

From source file:io.druid.server.coordinator.rules.PeriodDropRule.java

License:Apache License

@Override
public boolean appliesTo(Interval theInterval, DateTime referenceTimestamp) {
    final Interval currInterval = new Interval(period, referenceTimestamp);
    return currInterval.contains(theInterval);
}

From source file:io.druid.server.coordinator.rules.Rules.java

License:Apache License

public static boolean eligibleForLoad(Interval src, Interval target) {
    return src.contains(target);
}

From source file:io.druid.server.http.DatasourcesResource.java

License:Apache License

@GET
@Path("/{dataSourceName}/intervals/{interval}")
@Produces(MediaType.APPLICATION_JSON)//from   w  w w. j  a  v  a 2s. c  om
public Response getSegmentDataSourceSpecificInterval(@PathParam("dataSourceName") String dataSourceName,
        @PathParam("interval") String interval, @QueryParam("simple") String simple,
        @QueryParam("full") String full) {
    final DruidDataSource dataSource = getDataSource(dataSourceName);
    final Interval theInterval = new Interval(interval.replace("_", "/"));

    if (dataSource == null) {
        return Response.noContent().build();
    }

    final Comparator<Interval> comparator = Comparators.inverse(Comparators.intervalsByStartThenEnd());
    if (full != null) {
        final Map<Interval, Map<String, Object>> retVal = Maps.newTreeMap(comparator);
        for (DataSegment dataSegment : dataSource.getSegments()) {
            if (theInterval.contains(dataSegment.getInterval())) {
                Map<String, Object> segments = retVal.get(dataSegment.getInterval());
                if (segments == null) {
                    segments = Maps.newHashMap();
                    retVal.put(dataSegment.getInterval(), segments);
                }

                Pair<DataSegment, Set<String>> val = getSegment(dataSegment.getIdentifier());
                segments.put(dataSegment.getIdentifier(),
                        ImmutableMap.of("metadata", val.lhs, "servers", val.rhs));
            }
        }

        return Response.ok(retVal).build();
    }

    if (simple != null) {
        final Map<Interval, Map<String, Object>> retVal = Maps.newHashMap();
        for (DataSegment dataSegment : dataSource.getSegments()) {
            if (theInterval.contains(dataSegment.getInterval())) {
                Map<String, Object> properties = retVal.get(dataSegment.getInterval());
                if (properties == null) {
                    properties = Maps.newHashMap();
                    properties.put("size", dataSegment.getSize());
                    properties.put("count", 1);

                    retVal.put(dataSegment.getInterval(), properties);
                } else {
                    properties.put("size", MapUtils.getLong(properties, "size", 0L) + dataSegment.getSize());
                    properties.put("count", MapUtils.getInt(properties, "count", 0) + 1);
                }
            }
        }

        return Response.ok(retVal).build();
    }

    final Set<String> retVal = Sets.newTreeSet(Comparators.inverse(String.CASE_INSENSITIVE_ORDER));
    for (DataSegment dataSegment : dataSource.getSegments()) {
        if (theInterval.contains(dataSegment.getInterval())) {
            retVal.add(dataSegment.getIdentifier());
        }
    }

    return Response.ok(retVal).build();
}