Example usage for org.joda.time Interval contains

List of usage examples for org.joda.time Interval contains

Introduction

In this page you can find the example usage for org.joda.time Interval contains.

Prototype

public boolean contains(long millisInstant) 

Source Link

Document

Does this time interval contain the specified millisecond instant.

Usage

From source file:org.apache.druid.query.groupby.epinephelinae.GroupByRowProcessor.java

License:Apache License

public static Grouper createGrouper(final Query queryParam, final Sequence<Row> rows,
        final Map<String, ValueType> rowSignature, final GroupByQueryConfig config,
        final GroupByQueryResource resource, final ObjectMapper spillMapper, final String processingTmpDir,
        final int mergeBufferSize, final List<Closeable> closeOnExit) {
    final GroupByQuery query = (GroupByQuery) queryParam;
    final GroupByQueryConfig querySpecificConfig = config.withOverrides(query);

    final AggregatorFactory[] aggregatorFactories = new AggregatorFactory[query.getAggregatorSpecs().size()];
    for (int i = 0; i < query.getAggregatorSpecs().size(); i++) {
        aggregatorFactories[i] = query.getAggregatorSpecs().get(i);
    }//from  w  w w  .j av a 2s.  c om

    final File temporaryStorageDirectory = new File(processingTmpDir,
            StringUtils.format("druid-groupBy-%s_%s", UUID.randomUUID(), query.getId()));

    final List<Interval> queryIntervals = query.getIntervals();
    final Filter filter = Filters.convertToCNFFromQueryContext(query, Filters.toFilter(query.getDimFilter()));

    final SettableSupplier<Row> rowSupplier = new SettableSupplier<>();
    final RowBasedColumnSelectorFactory columnSelectorFactory = RowBasedColumnSelectorFactory
            .create(rowSupplier, rowSignature);
    final ValueMatcher filterMatcher = filter == null ? BooleanValueMatcher.of(true)
            : filter.makeMatcher(columnSelectorFactory);

    final FilteredSequence<Row> filteredSequence = new FilteredSequence<>(rows, new Predicate<Row>() {
        @Override
        public boolean apply(Row input) {
            boolean inInterval = false;
            DateTime rowTime = input.getTimestamp();
            for (Interval queryInterval : queryIntervals) {
                if (queryInterval.contains(rowTime)) {
                    inInterval = true;
                    break;
                }
            }
            if (!inInterval) {
                return false;
            }
            rowSupplier.set(input);
            return filterMatcher.matches();
        }
    });

    final LimitedTemporaryStorage temporaryStorage = new LimitedTemporaryStorage(temporaryStorageDirectory,
            querySpecificConfig.getMaxOnDiskStorage());

    closeOnExit.add(temporaryStorage);

    Pair<Grouper<RowBasedKey>, Accumulator<AggregateResult, Row>> pair = RowBasedGrouperHelper
            .createGrouperAccumulatorPair(query, true, rowSignature, querySpecificConfig,
                    new Supplier<ByteBuffer>() {
                        @Override
                        public ByteBuffer get() {
                            final ResourceHolder<ByteBuffer> mergeBufferHolder = resource.getMergeBuffer();
                            closeOnExit.add(mergeBufferHolder);
                            return mergeBufferHolder.get();
                        }
                    }, temporaryStorage, spillMapper, aggregatorFactories, mergeBufferSize);
    final Grouper<RowBasedKey> grouper = pair.lhs;
    final Accumulator<AggregateResult, Row> accumulator = pair.rhs;
    closeOnExit.add(grouper);

    final AggregateResult retVal = filteredSequence.accumulate(AggregateResult.ok(), accumulator);
    if (!retVal.isOk()) {
        throw new ResourceLimitExceededException(retVal.getReason());
    }

    return grouper;
}

From source file:org.apache.druid.query.groupby.epinephelinae.RowBasedGrouperHelper.java

License:Apache License

/**
 * Returns a predicate that filters result rows from a particular "subquery" based on the intervals and dim filters
 * from "query"./*from  w ww . j av  a 2 s.com*/
 *
 * @param query    outer query
 * @param subquery inner query
 */
private static Predicate<ResultRow> getResultRowPredicate(final GroupByQuery query,
        final GroupByQuery subquery) {
    final List<Interval> queryIntervals = query.getIntervals();
    final Filter filter = Filters.convertToCNFFromQueryContext(query, Filters.toFilter(query.getDimFilter()));

    final SettableSupplier<ResultRow> rowSupplier = new SettableSupplier<>();
    final ColumnSelectorFactory columnSelectorFactory = RowBasedGrouperHelper
            .createResultRowBasedColumnSelectorFactory(subquery, rowSupplier);

    final ValueMatcher filterMatcher = filter == null ? BooleanValueMatcher.of(true)
            : filter.makeMatcher(columnSelectorFactory);

    if (subquery.getUniversalTimestamp() != null
            && queryIntervals.stream().noneMatch(itvl -> itvl.contains(subquery.getUniversalTimestamp()))) {
        // There's a universal timestamp, and it doesn't match our query intervals, so no row should match.
        // By the way, if there's a universal timestamp that _does_ match the query intervals, we do nothing special here.
        return row -> false;
    }

    return row -> {
        if (subquery.getResultRowHasTimestamp()) {
            boolean inInterval = false;
            for (Interval queryInterval : queryIntervals) {
                if (queryInterval.contains(row.getLong(0))) {
                    inInterval = true;
                    break;
                }
            }
            if (!inInterval) {
                return false;
            }
        }
        rowSupplier.set(row);
        return filterMatcher.matches();
    };
}

From source file:org.apache.druid.query.scan.ScanQueryRunnerFactory.java

License:Apache License

@VisibleForTesting
Sequence<ScanResultValue> priorityQueueSortAndLimit(Sequence<ScanResultValue> inputSequence,
        ScanQuery scanQuery, List<Interval> intervalsOrdered) {
    Comparator<ScanResultValue> priorityQComparator = new ScanResultValueTimestampComparator(scanQuery);

    if (scanQuery.getScanRowsLimit() > Integer.MAX_VALUE) {
        throw new UOE(
                "Limit of %,d rows not supported for priority queue strategy of time-ordering scan results",
                scanQuery.getScanRowsLimit());
    }/*from   www.j ava 2s  . c  o m*/

    // Converting the limit from long to int could theoretically throw an ArithmeticException but this branch
    // only runs if limit < MAX_LIMIT_FOR_IN_MEMORY_TIME_ORDERING (which should be < Integer.MAX_VALUE)
    int limit = Math.toIntExact(scanQuery.getScanRowsLimit());

    PriorityQueue<ScanResultValue> q = new PriorityQueue<>(limit, priorityQComparator);

    Yielder<ScanResultValue> yielder = inputSequence.toYielder(null,
            new YieldingAccumulator<ScanResultValue, ScanResultValue>() {
                @Override
                public ScanResultValue accumulate(ScanResultValue accumulated, ScanResultValue in) {
                    yield();
                    return in;
                }
            });
    boolean doneScanning = yielder.isDone();
    // We need to scan limit elements and anything else in the last segment
    int numRowsScanned = 0;
    Interval finalInterval = null;
    while (!doneScanning) {
        ScanResultValue next = yielder.get();
        List<ScanResultValue> singleEventScanResultValues = next.toSingleEventScanResultValues();
        for (ScanResultValue srv : singleEventScanResultValues) {
            numRowsScanned++;
            // Using an intermediate unbatched ScanResultValue is not that great memory-wise, but the column list
            // needs to be preserved for queries using the compactedList result format
            q.offer(srv);
            if (q.size() > limit) {
                q.poll();
            }

            // Finish scanning the interval containing the limit row
            if (numRowsScanned > limit && finalInterval == null) {
                long timestampOfLimitRow = srv.getFirstEventTimestamp(scanQuery.getResultFormat());
                for (Interval interval : intervalsOrdered) {
                    if (interval.contains(timestampOfLimitRow)) {
                        finalInterval = interval;
                    }
                }
                if (finalInterval == null) {
                    throw new ISE("WTH???  Row came from an unscanned interval?");
                }
            }
        }
        yielder = yielder.next(null);
        doneScanning = yielder.isDone() || (finalInterval != null
                && !finalInterval.contains(next.getFirstEventTimestamp(scanQuery.getResultFormat())));
    }
    // Need to convert to a Deque because Priority Queue's iterator doesn't guarantee that the sorted order
    // will be maintained.  Deque was chosen over list because its addFirst is O(1).
    final Deque<ScanResultValue> sortedElements = new ArrayDeque<>(q.size());
    while (q.size() != 0) {
        // addFirst is used since PriorityQueue#poll() dequeues the low-priority (timestamp-wise) events first.
        sortedElements.addFirst(q.poll());
    }
    return Sequences.simple(sortedElements);
}

From source file:org.apache.druid.query.search.UseIndexesStrategy.java

License:Apache License

static ImmutableBitmap makeTimeFilteredBitmap(final QueryableIndex index, final Segment segment,
        final Filter filter, final Interval interval) {
    final BitmapFactory bitmapFactory = index.getBitmapFactoryForDimensions();
    final ImmutableBitmap baseFilter;
    if (filter == null) {
        baseFilter = null;//from   w ww  .j a v a2 s  .  com
    } else {
        final BitmapIndexSelector selector = new ColumnSelectorBitmapIndexSelector(
                index.getBitmapFactoryForDimensions(), VirtualColumns.EMPTY, index);
        Preconditions.checkArgument(filter.supportsBitmapIndex(selector), "filter[%s] should support bitmap",
                filter);
        baseFilter = filter.getBitmapIndex(selector);
    }

    final ImmutableBitmap timeFilteredBitmap;
    if (!interval.contains(segment.getDataInterval())) {
        final MutableBitmap timeBitmap = bitmapFactory.makeEmptyMutableBitmap();
        final ColumnHolder timeColumnHolder = index.getColumnHolder(ColumnHolder.TIME_COLUMN_NAME);
        try (final NumericColumn timeValues = (NumericColumn) timeColumnHolder.getColumn()) {

            int startIndex = Math.max(0, getStartIndexOfTime(timeValues, interval.getStartMillis(), true));
            int endIndex = Math.min(timeValues.length() - 1,
                    getStartIndexOfTime(timeValues, interval.getEndMillis(), false));

            for (int i = startIndex; i <= endIndex; i++) {
                timeBitmap.add(i);
            }

            final ImmutableBitmap finalTimeBitmap = bitmapFactory.makeImmutableBitmap(timeBitmap);
            timeFilteredBitmap = (baseFilter == null) ? finalTimeBitmap
                    : finalTimeBitmap.intersection(baseFilter);
        }
    } else {
        timeFilteredBitmap = baseFilter;
    }

    return timeFilteredBitmap;
}

From source file:org.apache.druid.query.vector.VectorCursorGranularizer.java

License:Apache License

@Nullable
public static VectorCursorGranularizer create(final StorageAdapter storageAdapter, final VectorCursor cursor,
        final Granularity granularity, final Interval queryInterval) {
    final DateTime minTime = storageAdapter.getMinTime();
    final DateTime maxTime = storageAdapter.getMaxTime();

    final Interval storageAdapterInterval = new Interval(minTime, granularity.bucketEnd(maxTime));
    final Interval clippedQueryInterval = queryInterval.overlap(storageAdapterInterval);

    if (clippedQueryInterval == null) {
        return null;
    }/*from   w w w. j  a  v a2  s . co m*/

    final Iterable<Interval> bucketIterable = granularity.getIterable(clippedQueryInterval);
    final Interval firstBucket = granularity.bucket(clippedQueryInterval.getStart());

    final VectorValueSelector timeSelector;
    if (firstBucket.contains(clippedQueryInterval)) {
        // Only one bucket, no need to read the time column.
        assert Iterables.size(bucketIterable) == 1;
        timeSelector = null;
    } else {
        // Multiple buckets, need to read the time column to know when we move from one to the next.
        timeSelector = cursor.getColumnSelectorFactory().makeValueSelector(ColumnHolder.TIME_COLUMN_NAME);
    }

    return new VectorCursorGranularizer(cursor, bucketIterable, timeSelector);
}

From source file:org.apache.druid.segment.indexing.granularity.ArbitraryGranularitySpec.java

License:Apache License

@Override
public Optional<Interval> bucketInterval(DateTime dt) {
    // First interval with start time  dt
    final Interval interval = intervals.floor(new Interval(dt, DateTimes.MAX));

    if (interval != null && interval.contains(dt)) {
        return Optional.of(interval);
    } else {/*from   w  w  w  .j a va2  s. c  o  m*/
        return Optional.absent();
    }
}

From source file:org.apache.druid.segment.IndexMergerV9.java

License:Apache License

@Override
public File persist(final IncrementalIndex index, final Interval dataInterval, File outDir, IndexSpec indexSpec,
        ProgressIndicator progress, @Nullable SegmentWriteOutMediumFactory segmentWriteOutMediumFactory)
        throws IOException {
    if (index.isEmpty()) {
        throw new IAE("Trying to persist an empty index!");
    }/* w  w  w.  j ava2s .  c  om*/

    final DateTime firstTimestamp = index.getMinTime();
    final DateTime lastTimestamp = index.getMaxTime();
    if (!(dataInterval.contains(firstTimestamp) && dataInterval.contains(lastTimestamp))) {
        throw new IAE("interval[%s] does not encapsulate the full range of timestamps[%s, %s]", dataInterval,
                firstTimestamp, lastTimestamp);
    }

    FileUtils.forceMkdir(outDir);

    log.info("Starting persist for interval[%s], rows[%,d]", dataInterval, index.size());
    return merge(
            Collections.singletonList(new IncrementalIndexAdapter(dataInterval, index,
                    indexSpec.getBitmapSerdeFactory().getBitmapFactory())),
            // if index is not rolled up, then it should be not rollup here
            // if index is rolled up, then it is no need to rollup again.
            //                     In this case, true/false won't cause reOrdering in merge stage
            //                     while merging a single iterable
            false, index.getMetricAggs(), outDir, indexSpec, progress, segmentWriteOutMediumFactory);
}

From source file:org.apache.druid.server.coordinator.helper.NewestSegmentFirstIterator.java

License:Apache License

/**
 * Returns the initial searchInterval which is {@code (timeline.first().start, timeline.last().end - skipOffset)}.
 *
 * @param timeline      timeline of a dataSource
 * @param skipIntervals intervals to skip
 *
 * @return found interval to search or null if it's not found
 *//* w w  w . j  a va 2  s.c  o  m*/
private static List<Interval> findInitialSearchInterval(VersionedIntervalTimeline<String, DataSegment> timeline,
        Period skipOffset, @Nullable List<Interval> skipIntervals) {
    Preconditions.checkArgument(timeline != null && !timeline.isEmpty(),
            "timeline should not be null or empty");
    Preconditions.checkNotNull(skipOffset, "skipOffset");

    final TimelineObjectHolder<String, DataSegment> first = Preconditions.checkNotNull(timeline.first(),
            "first");
    final TimelineObjectHolder<String, DataSegment> last = Preconditions.checkNotNull(timeline.last(), "last");
    final List<Interval> fullSkipIntervals = sortAndAddSkipIntervalFromLatest(last.getInterval().getEnd(),
            skipOffset, skipIntervals);

    final Interval totalInterval = new Interval(first.getInterval().getStart(), last.getInterval().getEnd());
    final List<Interval> filteredInterval = filterSkipIntervals(totalInterval, fullSkipIntervals);
    final List<Interval> searchIntervals = new ArrayList<>();

    for (Interval lookupInterval : filteredInterval) {
        final List<TimelineObjectHolder<String, DataSegment>> holders = timeline
                .lookup(new Interval(lookupInterval.getStart(), lookupInterval.getEnd()));

        final List<DataSegment> segments = holders.stream()
                .flatMap(holder -> StreamSupport.stream(holder.getObject().spliterator(), false))
                .map(PartitionChunk::getObject)
                .filter(segment -> lookupInterval.contains(segment.getInterval()))
                .sorted((s1, s2) -> Comparators.intervalsByStartThenEnd().compare(s1.getInterval(),
                        s2.getInterval()))
                .collect(Collectors.toList());

        if (!segments.isEmpty()) {
            searchIntervals.add(new Interval(segments.get(0).getInterval().getStart(),
                    segments.get(segments.size() - 1).getInterval().getEnd()));
        }
    }

    return searchIntervals;
}

From source file:org.apache.druid.server.coordinator.rules.PeriodDropRule.java

License:Apache License

@Override
public boolean appliesTo(Interval theInterval, DateTime referenceTimestamp) {
    final Interval currInterval = new Interval(period, referenceTimestamp);
    if (includeFuture) {
        return currInterval.getStartMillis() <= theInterval.getStartMillis();
    } else {/*from   w  w  w .j  ava2  s .c  o m*/
        return currInterval.contains(theInterval);
    }
}

From source file:org.apache.druid.server.http.DataSourcesResource.java

License:Apache License

@GET
@Path("/{dataSourceName}/intervals/{interval}")
@Produces(MediaType.APPLICATION_JSON)//  w  ww . ja va  2s. c  o  m
@ResourceFilters(DatasourceResourceFilter.class)
public Response getServedSegmentsInInterval(@PathParam("dataSourceName") String dataSourceName,
        @PathParam("interval") String interval, @QueryParam("simple") String simple,
        @QueryParam("full") String full) {
    final Interval theInterval = Intervals.of(interval.replace('_', '/'));
    if (simple == null && full == null) {
        final ImmutableDruidDataSource dataSource = getDataSource(dataSourceName);
        if (dataSource == null) {
            return logAndCreateDataSourceNotFoundResponse(dataSourceName);
        }
        final Set<SegmentId> segmentIds = new TreeSet<>();
        for (DataSegment dataSegment : dataSource.getSegments()) {
            if (theInterval.contains(dataSegment.getInterval())) {
                segmentIds.add(dataSegment.getId());
            }
        }
        return Response.ok(segmentIds).build();
    }
    return getServedSegmentsInInterval(dataSourceName, full != null, theInterval::contains);
}