Example usage for org.joda.time Interval Interval

List of usage examples for org.joda.time Interval Interval

Introduction

In this page you can find the example usage for org.joda.time Interval Interval.

Prototype

public Interval(Object interval, Chronology chronology) 

Source Link

Document

Constructs a time interval by converting or copying from another object, overriding the chronology.

Usage

From source file:io.druid.server.coordinator.DruidCoordinatorSegmentMerger.java

License:Open Source License

@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
    DatasourceWhitelist whitelist = whiteListRef.get();

    CoordinatorStats stats = new CoordinatorStats();
    Map<String, VersionedIntervalTimeline<String, DataSegment>> dataSources = Maps.newHashMap();

    // Find serviced segments by using a timeline
    for (DataSegment dataSegment : params.getAvailableSegments()) {
        if (whitelist == null || whitelist.contains(dataSegment.getDataSource())) {
            VersionedIntervalTimeline<String, DataSegment> timeline = dataSources
                    .get(dataSegment.getDataSource());
            if (timeline == null) {
                timeline = new VersionedIntervalTimeline<String, DataSegment>(Ordering.<String>natural());
                dataSources.put(dataSegment.getDataSource(), timeline);
            }/*from   w  w w . j a va  2 s .  com*/
            timeline.add(dataSegment.getInterval(), dataSegment.getVersion(),
                    dataSegment.getShardSpec().createChunk(dataSegment));
        }
    }

    // Find segments to merge
    for (final Map.Entry<String, VersionedIntervalTimeline<String, DataSegment>> entry : dataSources
            .entrySet()) {
        // Get serviced segments from the timeline
        VersionedIntervalTimeline<String, DataSegment> timeline = entry.getValue();
        List<TimelineObjectHolder<String, DataSegment>> timelineObjects = timeline
                .lookup(new Interval(new DateTime(0), new DateTime("3000-01-01")));

        // Accumulate timelineObjects greedily until we reach our limits, then backtrack to the maximum complete set
        SegmentsToMerge segmentsToMerge = new SegmentsToMerge();

        for (int i = 0; i < timelineObjects.size(); i++) {
            if (!segmentsToMerge.add(timelineObjects.get(i))
                    || segmentsToMerge.getByteCount() > params.getCoordinatorDynamicConfig()
                            .getMergeBytesLimit()
                    || segmentsToMerge.getSegmentCount() >= params.getCoordinatorDynamicConfig()
                            .getMergeSegmentsLimit()) {
                i -= segmentsToMerge.backtrack(params.getCoordinatorDynamicConfig().getMergeBytesLimit());

                if (segmentsToMerge.getSegmentCount() > 1) {
                    stats.addToGlobalStat("mergedCount", mergeSegments(segmentsToMerge, entry.getKey()));
                }

                if (segmentsToMerge.getSegmentCount() == 0) {
                    // Backtracked all the way to zero. Increment by one so we continue to make progress.
                    i++;
                }

                segmentsToMerge = new SegmentsToMerge();
            }
        }

        // Finish any timelineObjects to merge that may have not hit threshold
        segmentsToMerge.backtrack(params.getCoordinatorDynamicConfig().getMergeBytesLimit());
        if (segmentsToMerge.getSegmentCount() > 1) {
            stats.addToGlobalStat("mergedCount", mergeSegments(segmentsToMerge, entry.getKey()));
        }
    }

    return params.buildFromExisting().withCoordinatorStats(stats).build();
}

From source file:io.druid.server.coordinator.helper.DruidCoordinatorSegmentKiller.java

License:Apache License

@VisibleForTesting
Interval findIntervalForKillTask(String dataSource, int limit) {
    List<Interval> unusedSegmentIntervals = segmentManager.getUnusedSegmentIntervals(dataSource,
            new Interval(0, System.currentTimeMillis() - retainDuration), limit);

    if (unusedSegmentIntervals != null && unusedSegmentIntervals.size() > 0) {
        return JodaUtils.umbrellaInterval(unusedSegmentIntervals);
    } else {//from  ww  w  .  j a v a 2s .  com
        return null;
    }
}

From source file:io.druid.server.coordinator.helper.NewestSegmentFirstIterator.java

License:Apache License

@Override
public Object2LongOpenHashMap<String> remainingSegmentSizeBytes() {
    final Object2LongOpenHashMap<String> resultMap = new Object2LongOpenHashMap<>();
    resultMap.defaultReturnValue(UNKNOWN_REMAINING_SEGMENT_SIZE);
    final Iterator<QueueEntry> iterator = queue.iterator();
    while (iterator.hasNext()) {
        final QueueEntry entry = iterator.next();
        final VersionedIntervalTimeline<String, DataSegment> timeline = dataSources.get(entry.getDataSource());
        final Interval interval = new Interval(timeline.first().getInterval().getStart(),
                entry.interval.getEnd());

        final List<TimelineObjectHolder<String, DataSegment>> holders = timeline.lookup(interval);

        resultMap.put(entry.getDataSource(),
                holders.stream()//from  www  .ja va  2 s  .c  o m
                        .flatMap(holder -> StreamSupport.stream(holder.getObject().spliterator(), false))
                        .mapToLong(chunk -> chunk.getObject().getSize()).sum());
    }
    return resultMap;
}

From source file:io.druid.server.coordinator.helper.NewestSegmentFirstIterator.java

License:Apache License

/**
 * Find segments to compact together for the given intervalToSearch. It progressively searches the given
 * intervalToSearch in time order (latest first). The timeline lookup duration is one day. It means, the timeline is
 * looked up for the last one day of the given intervalToSearch, and the next day is searched again if the size of
 * found segments are not enough to compact. This is repeated until enough amount of segments are found.
 *
 * @param timeline         timeline of a dataSource
 * @param intervalToSearch interval to search
 * @param searchEnd        the end of the whole searchInterval
 * @param config           compaction config
 *
 * @return a pair of the reduced interval of (intervalToSearch - interval of found segments) and segments to compact
 *///  w  ww  . j  av  a 2  s .co  m
@VisibleForTesting
static Pair<Interval, SegmentsToCompact> findSegmentsToCompact(
        final VersionedIntervalTimeline<String, DataSegment> timeline, final Interval intervalToSearch,
        final DateTime searchEnd, final DataSourceCompactionConfig config) {
    final long targetCompactionSize = config.getTargetCompactionSizeBytes();
    final int numTargetSegments = config.getNumTargetCompactionSegments();
    final List<DataSegment> segmentsToCompact = new ArrayList<>();
    Interval searchInterval = intervalToSearch;
    long totalSegmentsToCompactBytes = 0;

    // Finds segments to compact together while iterating searchInterval from latest to oldest
    while (!Intervals.isEmpty(searchInterval) && totalSegmentsToCompactBytes < targetCompactionSize
            && segmentsToCompact.size() < numTargetSegments) {
        final Interval lookupInterval = SegmentCompactorUtil.getNextLoopupInterval(searchInterval);
        // holders are sorted by their interval
        final List<TimelineObjectHolder<String, DataSegment>> holders = timeline.lookup(lookupInterval);

        if (holders.isEmpty()) {
            // We found nothing. Continue to the next interval.
            searchInterval = SegmentCompactorUtil.removeIntervalFromEnd(searchInterval, lookupInterval);
            continue;
        }

        for (int i = holders.size() - 1; i >= 0; i--) {
            final TimelineObjectHolder<String, DataSegment> holder = holders.get(i);
            final List<PartitionChunk<DataSegment>> chunks = Lists.newArrayList(holder.getObject().iterator());
            final long partitionBytes = chunks.stream().mapToLong(chunk -> chunk.getObject().getSize()).sum();
            if (chunks.size() == 0 || partitionBytes == 0) {
                log.warn("Skip empty shard[%s]", holder);
                continue;
            }

            if (!intervalToSearch.contains(chunks.get(0).getObject().getInterval())) {
                searchInterval = SegmentCompactorUtil.removeIntervalFromEnd(searchInterval, new Interval(
                        chunks.get(0).getObject().getInterval().getStart(), searchInterval.getEnd()));
                continue;
            }

            // Addition of the segments of a partition should be atomic.
            if (SegmentCompactorUtil.isCompactible(targetCompactionSize, totalSegmentsToCompactBytes,
                    partitionBytes) && segmentsToCompact.size() + chunks.size() <= numTargetSegments) {
                chunks.forEach(chunk -> segmentsToCompact.add(chunk.getObject()));
                totalSegmentsToCompactBytes += partitionBytes;
            } else {
                if (segmentsToCompact.size() > 1) {
                    // We found some segmens to compact and cannot add more. End here.
                    return checkCompactableSizeForLastSegmentOrReturn(segmentsToCompact,
                            totalSegmentsToCompactBytes, timeline, searchInterval, searchEnd, config);
                } else {
                    // (*) Discard segments found so far because we can't compact it anyway.
                    final int numSegmentsToCompact = segmentsToCompact.size();
                    segmentsToCompact.clear();

                    if (!SegmentCompactorUtil.isCompactible(targetCompactionSize, 0, partitionBytes)) {
                        // TODO: this should be changed to compact many small segments into a few large segments
                        final DataSegment segment = chunks.get(0).getObject();
                        log.warn(
                                "shardSize[%d] for dataSource[%s] and interval[%s] is larger than targetCompactionSize[%d]."
                                        + " Contitnue to the next shard.",
                                partitionBytes, segment.getDataSource(), segment.getInterval(),
                                targetCompactionSize);
                    } else if (numTargetSegments < chunks.size()) {
                        final DataSegment segment = chunks.get(0).getObject();
                        log.warn(
                                "The number of segments[%d] for dataSource[%s] and interval[%s] is larger than "
                                        + "numTargetCompactSegments[%d]. If you see lots of shards are being skipped due to too many "
                                        + "segments, consider increasing 'numTargetCompactionSegments' and "
                                        + "'druid.indexer.runner.maxZnodeBytes'. Contitnue to the next shard.",
                                chunks.size(), segment.getDataSource(), segment.getInterval(),
                                numTargetSegments);
                    } else {
                        if (numSegmentsToCompact == 1) {
                            // We found a segment which is smaller than targetCompactionSize but too large to compact with other
                            // segments. Skip this one.
                            // Note that segmentsToCompact is already cleared at (*).
                            chunks.forEach(chunk -> segmentsToCompact.add(chunk.getObject()));
                            totalSegmentsToCompactBytes = partitionBytes;
                        } else {
                            throw new ISE("Cannot compact segments[%s]. shardBytes[%s], numSegments[%s]",
                                    chunks.stream().map(PartitionChunk::getObject).collect(Collectors.toList()),
                                    partitionBytes, chunks.size());
                        }
                    }
                }
            }

            // Update searchInterval
            searchInterval = SegmentCompactorUtil.removeIntervalFromEnd(searchInterval,
                    new Interval(chunks.get(0).getObject().getInterval().getStart(), searchInterval.getEnd()));
        }
    }

    if (segmentsToCompact.size() == 0 || segmentsToCompact.size() == 1) {
        if (Intervals.isEmpty(searchInterval)) {
            // We found nothing to compact. End here.
            return Pair.of(intervalToSearch, new SegmentsToCompact(ImmutableList.of()));
        } else {
            // We found only 1 segment. Further find segments for the remaining interval.
            return findSegmentsToCompact(timeline, searchInterval, searchEnd, config);
        }
    }

    return checkCompactableSizeForLastSegmentOrReturn(segmentsToCompact, totalSegmentsToCompactBytes, timeline,
            searchInterval, searchEnd, config);
}

From source file:io.druid.server.coordinator.helper.NewestSegmentFirstIterator.java

License:Apache License

/**
 * Returns the initial searchInterval which is {@code (timeline.first().start, timeline.last().end - skipOffset)}.
 *
 * @param timeline   timeline of a dataSource
 * @param skipOffset skipOFfset/*w  w w  .j  a  v  a2s.  c  om*/
 *
 * @return found searchInterval
 */
private static Interval findInitialSearchInterval(VersionedIntervalTimeline<String, DataSegment> timeline,
        Period skipOffset) {
    Preconditions.checkArgument(timeline != null && !timeline.isEmpty(),
            "timeline should not be null or empty");
    Preconditions.checkNotNull(skipOffset, "skipOffset");

    final TimelineObjectHolder<String, DataSegment> first = Preconditions.checkNotNull(timeline.first(),
            "first");
    final TimelineObjectHolder<String, DataSegment> last = Preconditions.checkNotNull(timeline.last(), "last");

    final Interval skipInterval = new Interval(skipOffset, last.getInterval().getEnd());

    final List<TimelineObjectHolder<String, DataSegment>> holders = timeline.lookup(
            new Interval(first.getInterval().getStart(), last.getInterval().getEnd().minus(skipOffset)));

    final List<DataSegment> segments = holders.stream()
            .flatMap(holder -> StreamSupport.stream(holder.getObject().spliterator(), false))
            .map(PartitionChunk::getObject).filter(segment -> !segment.getInterval().overlaps(skipInterval))
            .sorted((s1, s2) -> Comparators.intervalsByStartThenEnd().compare(s1.getInterval(),
                    s2.getInterval()))
            .collect(Collectors.toList());

    if (segments.isEmpty()) {
        return new Interval(first.getInterval().getStart(), first.getInterval().getStart());
    } else {
        return new Interval(segments.get(0).getInterval().getStart(),
                segments.get(segments.size() - 1).getInterval().getEnd());
    }
}

From source file:io.druid.server.coordinator.helper.SegmentCompactorUtil.java

License:Apache License

/**
 * Return an interval for looking up for timeline.
 * If {@code totalInterval} is larger than {@link #LOOKUP_PERIOD}, it returns an interval of {@link #LOOKUP_PERIOD}
 * and the end of {@code totalInterval}.
 *//* www  .ja va2 s  .  c  o m*/
static Interval getNextLoopupInterval(Interval totalInterval) {
    final Duration givenDuration = totalInterval.toDuration();
    return givenDuration.isLongerThan(LOOKUP_DURATION) ? new Interval(LOOKUP_PERIOD, totalInterval.getEnd())
            : totalInterval;
}

From source file:io.druid.server.coordinator.helper.SegmentCompactorUtil.java

License:Apache License

/**
 * Removes {@code smallInterval} from {@code largeInterval}.  The end of both intervals should be same.
 *
 * @return an interval of {@code largeInterval} - {@code smallInterval}.
 *//*from   www . ja  va2  s. c o m*/
static Interval removeIntervalFromEnd(Interval largeInterval, Interval smallInterval) {
    Preconditions.checkArgument(largeInterval.getEnd().equals(smallInterval.getEnd()),
            "end should be same. largeInterval[%s] smallInterval[%s]", largeInterval, smallInterval);
    return new Interval(largeInterval.getStart(), smallInterval.getStart());
}

From source file:io.druid.server.coordinator.rules.PeriodDropRule.java

License:Apache License

@Override
public boolean appliesTo(Interval theInterval, DateTime referenceTimestamp) {
    final Interval currInterval = new Interval(period, referenceTimestamp);
    return currInterval.contains(theInterval);
}

From source file:io.druid.server.coordinator.rules.PeriodLoadRule.java

License:Apache License

@Override
public boolean appliesTo(Interval interval, DateTime referenceTimestamp) {
    final Interval currInterval = new Interval(period, referenceTimestamp);
    return currInterval.overlaps(interval) && interval.getStartMillis() >= currInterval.getStartMillis();
}

From source file:io.druid.server.coordinator.rules.Rules.java

License:Apache License

public static boolean eligibleForLoad(Period period, Interval interval, DateTime referenceTimestamp) {
    final Interval currInterval = new Interval(period, referenceTimestamp);
    return currInterval.overlaps(interval) && interval.getStartMillis() >= currInterval.getStartMillis();
}