Example usage for org.joda.time Interval isEqual

List of usage examples for org.joda.time Interval isEqual

Introduction

In this page you can find the example usage for org.joda.time Interval isEqual.

Prototype

public boolean isEqual(ReadableInterval other) 

Source Link

Document

Is this interval equal to the specified interval ignoring the chronology.

Usage

From source file:org.apache.druid.server.coordinator.helper.NewestSegmentFirstIterator.java

License:Apache License

/**
 * Find segments to compact together for the given intervalToSearch. It progressively searches the given
 * intervalToSearch in time order (latest first). The timeline lookup duration is one day. It means, the timeline is
 * looked up for the last one day of the given intervalToSearch, and the next day is searched again if the size of
 * found segments are not enough to compact. This is repeated until enough amount of segments are found.
 *
 * @param compactibleTimelineObjectHolderCursor timeline iterator
 * @param config           compaction config
 *
 * @return segments to compact/* w  ww. j  a v a 2  s.c o  m*/
 */
private static SegmentsToCompact findSegmentsToCompact(
        final CompactibleTimelineObjectHolderCursor compactibleTimelineObjectHolderCursor,
        final DataSourceCompactionConfig config) {
    final long inputSegmentSize = config.getInputSegmentSizeBytes();
    final int maxNumSegmentsToCompact = config.getMaxNumSegmentsToCompact();
    final SegmentsToCompact segmentsToCompact = new SegmentsToCompact();

    // Finds segments to compact together while iterating timeline from latest to oldest
    while (compactibleTimelineObjectHolderCursor.hasNext()
            && segmentsToCompact.getTotalSize() < inputSegmentSize
            && segmentsToCompact.getNumSegments() < maxNumSegmentsToCompact) {
        final TimelineObjectHolder<String, DataSegment> timeChunkHolder = Preconditions
                .checkNotNull(compactibleTimelineObjectHolderCursor.get(), "timelineObjectHolder");
        final List<PartitionChunk<DataSegment>> chunks = Lists
                .newArrayList(timeChunkHolder.getObject().iterator());
        final long timeChunkSizeBytes = chunks.stream().mapToLong(chunk -> chunk.getObject().getSize()).sum();

        final boolean isSameOrAbuttingInterval;
        final Interval lastInterval = segmentsToCompact.getIntervalOfLastSegment();
        if (lastInterval == null) {
            isSameOrAbuttingInterval = true;
        } else {
            final Interval currentInterval = chunks.get(0).getObject().getInterval();
            isSameOrAbuttingInterval = currentInterval.isEqual(lastInterval)
                    || currentInterval.abuts(lastInterval);
        }

        // The segments in a holder should be added all together or not.
        final boolean isCompactibleSize = SegmentCompactorUtil.isCompactibleSize(inputSegmentSize,
                segmentsToCompact.getTotalSize(), timeChunkSizeBytes);
        final boolean isCompactibleNum = SegmentCompactorUtil.isCompactibleNum(maxNumSegmentsToCompact,
                segmentsToCompact.getNumSegments(), chunks.size());
        if (isCompactibleSize && isCompactibleNum && isSameOrAbuttingInterval && segmentsToCompact.isEmpty()) {
            chunks.forEach(chunk -> segmentsToCompact.add(chunk.getObject()));
        } else {
            if (segmentsToCompact.getNumSegments() > 1) {
                // We found some segmens to compact and cannot add more. End here.
                return segmentsToCompact;
            } else {
                if (!SegmentCompactorUtil.isCompactibleSize(inputSegmentSize, 0, timeChunkSizeBytes)) {
                    final DataSegment segment = chunks.get(0).getObject();
                    segmentsToCompact.clear();
                    log.warn(
                            "shardSize[%d] for dataSource[%s] and interval[%s] is larger than inputSegmentSize[%d]."
                                    + " Continue to the next shard.",
                            timeChunkSizeBytes, segment.getDataSource(), segment.getInterval(),
                            inputSegmentSize);
                } else if (maxNumSegmentsToCompact < chunks.size()) {
                    final DataSegment segment = chunks.get(0).getObject();
                    segmentsToCompact.clear();
                    log.warn("The number of segments[%d] for dataSource[%s] and interval[%s] is larger than "
                            + "maxNumSegmentsToCompact[%d]. If you see lots of shards are being skipped due to too many "
                            + "segments, consider increasing 'numTargetCompactionSegments' and "
                            + "'druid.indexer.runner.maxZnodeBytes'. Continue to the next shard.",
                            chunks.size(), segment.getDataSource(), segment.getInterval(),
                            maxNumSegmentsToCompact);
                } else {
                    if (segmentsToCompact.getNumSegments() == 1) {
                        // We found a segment which is smaller than targetCompactionSize but too large to compact with other
                        // segments. Skip this one.
                        segmentsToCompact.clear();
                        chunks.forEach(chunk -> segmentsToCompact.add(chunk.getObject()));
                    } else {
                        throw new ISE(
                                "Cannot compact segments[%s]. shardBytes[%s], numSegments[%s] "
                                        + "with current segmentsToCompact[%s]",
                                chunks.stream().map(PartitionChunk::getObject).collect(Collectors.toList()),
                                timeChunkSizeBytes, chunks.size(), segmentsToCompact);
                    }
                }
            }
        }

        compactibleTimelineObjectHolderCursor.next();
    }

    if (segmentsToCompact.getNumSegments() == 1) {
        // Don't compact a single segment
        segmentsToCompact.clear();
    }

    return segmentsToCompact;
}