Example usage for org.joda.time Interval abuts

List of usage examples for org.joda.time Interval abuts

Introduction

In this page you can find the example usage for org.joda.time Interval abuts.

Prototype

public boolean abuts(ReadableInterval interval) 

Source Link

Document

Does this interval abut with the interval specified.

Usage

From source file:com.baulsupp.kolja.ansi.reports.basic.ActiveRequests.java

License:Open Source License

public boolean isActive(RequestLine line) {
    if (!initialised) {
        interval = calculateInterval(line);
        initialised = true;//ww w .j  ava  2s.c  o m
    }

    if (interval == null) {
        return true;
    }

    Interval requestInterval = (Interval) line.getValue(LogConstants.INTERVAL);

    if (requestInterval == null) {
        return false;
    }

    return requestInterval.overlaps(interval) || requestInterval.abuts(interval);
}

From source file:com.metamx.common.JodaUtils.java

License:Apache License

public static ArrayList<Interval> condenseIntervals(Iterable<Interval> intervals) {
    ArrayList<Interval> retVal = Lists.newArrayList();

    TreeSet<Interval> sortedIntervals = Sets.newTreeSet(Comparators.intervalsByStartThenEnd());
    for (Interval interval : intervals) {
        sortedIntervals.add(interval);//from  w  w  w .  ja  v a 2 s .  c om
    }

    if (sortedIntervals.isEmpty()) {
        return Lists.newArrayList();
    }

    Iterator<Interval> intervalsIter = sortedIntervals.iterator();
    Interval currInterval = intervalsIter.next();
    while (intervalsIter.hasNext()) {
        Interval next = intervalsIter.next();

        if (currInterval.overlaps(next) || currInterval.abuts(next)) {
            currInterval = new Interval(currInterval.getStart(), next.getEnd());
        } else {
            retVal.add(currInterval);
            currInterval = next;
        }
    }
    retVal.add(currInterval);

    return retVal;
}

From source file:com.sheepdog.mashmesh.models.VolunteerProfile.java

License:Apache License

private List<Interval> getAvailableIntervals(DateTime aroundDateTime) {
    List<Interval> intervals = new ArrayList<Interval>();
    Map<Integer, DateTime> adjacentDays = new HashMap<Integer, DateTime>(3);

    // Construct a map from days of the week to DateTimes representing adjacent days.
    for (int i = -1; i <= 1; i++) {
        DateTime dateTime = aroundDateTime.plusDays(i);
        int day = dateTime.getDayOfWeek();
        adjacentDays.put(day, dateTime);
    }/*w w w  .  j  a  v a 2 s  . co m*/

    // Construct Intervals from time periods in adjacent days.
    for (AvailableTimePeriod availableTimePeriod : availableTimePeriods) {
        if (adjacentDays.containsKey(availableTimePeriod.getDay())) {
            LocalDate date = adjacentDays.get(availableTimePeriod.getDay()).toLocalDate();
            DateTime start = date.toDateTime(availableTimePeriod.getStartTime(), aroundDateTime.getZone());
            DateTime end = date.toDateTime(availableTimePeriod.getEndTime(), aroundDateTime.getZone());

            // Allow 00:00 - 00:00 to express 00:00 - 24:00 as we can't serialize a
            //  LocalTime representing 24:00.
            if (end.compareTo(start) <= 0) {
                end = end.plusDays(1);
            }

            intervals.add(new Interval(start, end));
        }
    }

    // Sort the Intervals so that adjacent time periods abut. Assumes that intervals don't overlap.
    Collections.sort(intervals, new Comparator<Interval>() {
        @Override
        public int compare(Interval i1, Interval i2) {
            return new Long(i1.getStartMillis()).compareTo(i2.getStartMillis());
        }
    });

    // Merge abutting intervals together
    List<Interval> mergedIntervals = new ArrayList<Interval>();
    Interval lastInterval = null;

    for (Interval interval : intervals) {
        if (lastInterval != null && lastInterval.abuts(interval)) {
            mergedIntervals.remove(mergedIntervals.size() - 1);
            interval = lastInterval.withEnd(interval.getEnd());
        }

        lastInterval = interval;
        mergedIntervals.add(interval);
    }

    return mergedIntervals;
}

From source file:com.yahoo.bard.webservice.util.DateTimeUtils.java

License:Apache License

/**
 * Merge all contiguous and overlapping intervals in a set together and return the set with the merged intervals.
 *
 * @param unmergedIntervals A set of intervals that may abut or overlap
 *
 * @return The set of merged intervals/*from   w w  w  .j a va  2  s . co  m*/
 */
public static Set<Interval> mergeIntervalSet(Set<Interval> unmergedIntervals) {
    // Create a self sorting set of intervals
    TreeSet<Interval> sortedIntervals = new TreeSet<>(IntervalStartComparator.INSTANCE);

    for (Interval mergingInterval : unmergedIntervals) {
        Iterator<Interval> it = sortedIntervals.iterator();
        while (it.hasNext()) {
            Interval sortedInterval = it.next();
            if (mergingInterval.overlaps(sortedInterval) || mergingInterval.abuts(sortedInterval)) {
                // Remove the interval being merged with
                it.remove();
                // find start and end of new interval
                DateTime start = (mergingInterval.getStart().isBefore(sortedInterval.getStart()))
                        ? mergingInterval.getStart()
                        : sortedInterval.getStart();
                DateTime end = (mergingInterval.getEnd().isAfter(sortedInterval.getEnd()))
                        ? mergingInterval.getEnd()
                        : sortedInterval.getEnd();
                mergingInterval = new Interval(start, end);
            }
        }
        sortedIntervals.add(mergingInterval);
    }
    return sortedIntervals;
}

From source file:io.druid.java.util.common.JodaUtils.java

License:Apache License

public static ArrayList<Interval> condenseIntervals(Iterable<Interval> intervals) {
    ArrayList<Interval> retVal = Lists.newArrayList();

    final SortedSet<Interval> sortedIntervals;

    if (intervals instanceof SortedSet) {
        sortedIntervals = (SortedSet<Interval>) intervals;
    } else {//from  www . ja v a  2 s.c o  m
        sortedIntervals = Sets.newTreeSet(Comparators.intervalsByStartThenEnd());
        for (Interval interval : intervals) {
            sortedIntervals.add(interval);
        }
    }

    if (sortedIntervals.isEmpty()) {
        return Lists.newArrayList();
    }

    Iterator<Interval> intervalsIter = sortedIntervals.iterator();
    Interval currInterval = intervalsIter.next();
    while (intervalsIter.hasNext()) {
        Interval next = intervalsIter.next();

        if (currInterval.abuts(next)) {
            currInterval = new Interval(currInterval.getStart(), next.getEnd());
        } else if (currInterval.overlaps(next)) {
            DateTime nextEnd = next.getEnd();
            DateTime currEnd = currInterval.getEnd();
            currInterval = new Interval(currInterval.getStart(), nextEnd.isAfter(currEnd) ? nextEnd : currEnd);
        } else {
            retVal.add(currInterval);
            currInterval = next;
        }
    }
    retVal.add(currInterval);

    return retVal;
}

From source file:io.druid.server.ClientInfoResource.java

License:Apache License

@GET
@Path("/{dataSourceName}")
@Produces(MediaType.APPLICATION_JSON)/*from  w w w .j  ava  2  s .c o m*/
public Map<String, Object> getDatasource(@PathParam("dataSourceName") String dataSourceName,
        @QueryParam("interval") String interval, @QueryParam("full") String full) {
    if (full == null) {
        return ImmutableMap.<String, Object>of(KEY_DIMENSIONS,
                getDatasourceDimensions(dataSourceName, interval), KEY_METRICS,
                getDatasourceMetrics(dataSourceName, interval));
    }

    Interval theInterval;
    if (interval == null || interval.isEmpty()) {
        DateTime now = getCurrentTime();
        theInterval = new Interval(segmentMetadataQueryConfig.getDefaultHistory(), now);
    } else {
        theInterval = new Interval(interval);
    }

    TimelineLookup<String, ServerSelector> timeline = timelineServerView
            .getTimeline(new TableDataSource(dataSourceName));
    Iterable<TimelineObjectHolder<String, ServerSelector>> serversLookup = timeline != null
            ? timeline.lookup(theInterval)
            : null;
    if (serversLookup == null || Iterables.isEmpty(serversLookup)) {
        return Collections.EMPTY_MAP;
    }
    Map<Interval, Object> servedIntervals = new TreeMap<>(new Comparator<Interval>() {
        @Override
        public int compare(Interval o1, Interval o2) {
            if (o1.equals(o2) || o1.overlaps(o2)) {
                return 0;
            } else {
                return o1.isBefore(o2) ? -1 : 1;
            }
        }
    });

    for (TimelineObjectHolder<String, ServerSelector> holder : serversLookup) {
        final Set<Object> dimensions = Sets.newHashSet();
        final Set<Object> metrics = Sets.newHashSet();
        final PartitionHolder<ServerSelector> partitionHolder = holder.getObject();
        if (partitionHolder.isComplete()) {
            for (ServerSelector server : partitionHolder.payloads()) {
                final DataSegment segment = server.getSegment();
                dimensions.addAll(segment.getDimensions());
                metrics.addAll(segment.getMetrics());
            }
        }

        servedIntervals.put(holder.getInterval(),
                ImmutableMap.of(KEY_DIMENSIONS, dimensions, KEY_METRICS, metrics));
    }

    //collapse intervals if they abut and have same set of columns
    Map<String, Object> result = Maps.newLinkedHashMap();
    Interval curr = null;
    Map<String, Set<String>> cols = null;
    for (Map.Entry<Interval, Object> e : servedIntervals.entrySet()) {
        Interval ival = e.getKey();
        if (curr != null && curr.abuts(ival) && cols.equals(e.getValue())) {
            curr = curr.withEnd(ival.getEnd());
        } else {
            if (curr != null) {
                result.put(curr.toString(), cols);
            }
            curr = ival;
            cols = (Map<String, Set<String>>) e.getValue();
        }
    }
    //add the last one in
    if (curr != null) {
        result.put(curr.toString(), cols);
    }
    return result;
}

From source file:org.antonini.util.time.TimeSliceTools.java

License:Open Source License

/**
 * @param slice the timeslices//from ww w .jav  a  2  s .c o  m
 * @param validDate the time interval
 * @return true if the timeslices validTime intersects the given time interval
 */
public static boolean checkTimeSliceValidForTime(AbstractAIXMTimeSliceType slice, Interval validDate) {
    Interval sliceDuration = GMLTimeParser.parseTimePrimitive(slice.getValidTime());

    if (sliceDuration.overlaps(validDate) || sliceDuration.abuts(validDate)) {
        return true;
    }

    return false;
}

From source file:org.apache.druid.java.util.common.JodaUtils.java

License:Apache License

public static ArrayList<Interval> condenseIntervals(Iterable<Interval> intervals) {
    ArrayList<Interval> retVal = new ArrayList<>();

    final SortedSet<Interval> sortedIntervals;

    if (intervals instanceof SortedSet) {
        sortedIntervals = (SortedSet<Interval>) intervals;
    } else {/*from  w ww  .  j  a  v a  2s . c  om*/
        sortedIntervals = new TreeSet<>(Comparators.intervalsByStartThenEnd());
        for (Interval interval : intervals) {
            sortedIntervals.add(interval);
        }
    }

    if (sortedIntervals.isEmpty()) {
        return new ArrayList<>();
    }

    Iterator<Interval> intervalsIter = sortedIntervals.iterator();
    Interval currInterval = intervalsIter.next();
    while (intervalsIter.hasNext()) {
        Interval next = intervalsIter.next();

        if (currInterval.abuts(next)) {
            currInterval = new Interval(currInterval.getStart(), next.getEnd());
        } else if (currInterval.overlaps(next)) {
            DateTime nextEnd = next.getEnd();
            DateTime currEnd = currInterval.getEnd();
            currInterval = new Interval(currInterval.getStart(), nextEnd.isAfter(currEnd) ? nextEnd : currEnd);
        } else {
            retVal.add(currInterval);
            currInterval = next;
        }
    }
    retVal.add(currInterval);

    return retVal;
}

From source file:org.apache.druid.server.ClientInfoResource.java

License:Apache License

@GET
@Path("/{dataSourceName}")
@Produces(MediaType.APPLICATION_JSON)/*w ww .ja v  a 2  s .co m*/
@ResourceFilters(DatasourceResourceFilter.class)
public Map<String, Object> getDatasource(@PathParam("dataSourceName") String dataSourceName,
        @QueryParam("interval") String interval, @QueryParam("full") String full) {
    if (full == null) {
        return ImmutableMap.of(KEY_DIMENSIONS, getDataSourceDimensions(dataSourceName, interval), KEY_METRICS,
                getDataSourceMetrics(dataSourceName, interval));
    }

    Interval theInterval;
    if (interval == null || interval.isEmpty()) {
        DateTime now = getCurrentTime();
        theInterval = new Interval(segmentMetadataQueryConfig.getDefaultHistory(), now);
    } else {
        theInterval = Intervals.of(interval);
    }

    TimelineLookup<String, ServerSelector> timeline = timelineServerView
            .getTimeline(new TableDataSource(dataSourceName));
    Iterable<TimelineObjectHolder<String, ServerSelector>> serversLookup = timeline != null
            ? timeline.lookup(theInterval)
            : null;
    if (serversLookup == null || Iterables.isEmpty(serversLookup)) {
        return Collections.EMPTY_MAP;
    }
    Map<Interval, Object> servedIntervals = new TreeMap<>(new Comparator<Interval>() {
        @Override
        public int compare(Interval o1, Interval o2) {
            if (o1.equals(o2) || o1.overlaps(o2)) {
                return 0;
            } else {
                return o1.isBefore(o2) ? -1 : 1;
            }
        }
    });

    for (TimelineObjectHolder<String, ServerSelector> holder : serversLookup) {
        final Set<Object> dimensions = new HashSet<>();
        final Set<Object> metrics = new HashSet<>();
        final PartitionHolder<ServerSelector> partitionHolder = holder.getObject();
        if (partitionHolder.isComplete()) {
            for (ServerSelector server : partitionHolder.payloads()) {
                final DataSegment segment = server.getSegment();
                dimensions.addAll(segment.getDimensions());
                metrics.addAll(segment.getMetrics());
            }
        }

        servedIntervals.put(holder.getInterval(),
                ImmutableMap.of(KEY_DIMENSIONS, dimensions, KEY_METRICS, metrics));
    }

    //collapse intervals if they abut and have same set of columns
    Map<String, Object> result = Maps.newLinkedHashMap();
    Interval curr = null;
    Map<String, Set<String>> cols = null;
    for (Map.Entry<Interval, Object> e : servedIntervals.entrySet()) {
        Interval ival = e.getKey();
        if (curr != null && curr.abuts(ival) && cols.equals(e.getValue())) {
            curr = curr.withEnd(ival.getEnd());
        } else {
            if (curr != null) {
                result.put(curr.toString(), cols);
            }
            curr = ival;
            cols = (Map<String, Set<String>>) e.getValue();
        }
    }
    //add the last one in
    if (curr != null) {
        result.put(curr.toString(), cols);
    }
    return result;
}

From source file:org.apache.druid.server.coordinator.helper.NewestSegmentFirstIterator.java

License:Apache License

/**
 * Find segments to compact together for the given intervalToSearch. It progressively searches the given
 * intervalToSearch in time order (latest first). The timeline lookup duration is one day. It means, the timeline is
 * looked up for the last one day of the given intervalToSearch, and the next day is searched again if the size of
 * found segments are not enough to compact. This is repeated until enough amount of segments are found.
 *
 * @param compactibleTimelineObjectHolderCursor timeline iterator
 * @param config           compaction config
 *
 * @return segments to compact/* w ww.  j a  v a  2  s. co  m*/
 */
private static SegmentsToCompact findSegmentsToCompact(
        final CompactibleTimelineObjectHolderCursor compactibleTimelineObjectHolderCursor,
        final DataSourceCompactionConfig config) {
    final long inputSegmentSize = config.getInputSegmentSizeBytes();
    final int maxNumSegmentsToCompact = config.getMaxNumSegmentsToCompact();
    final SegmentsToCompact segmentsToCompact = new SegmentsToCompact();

    // Finds segments to compact together while iterating timeline from latest to oldest
    while (compactibleTimelineObjectHolderCursor.hasNext()
            && segmentsToCompact.getTotalSize() < inputSegmentSize
            && segmentsToCompact.getNumSegments() < maxNumSegmentsToCompact) {
        final TimelineObjectHolder<String, DataSegment> timeChunkHolder = Preconditions
                .checkNotNull(compactibleTimelineObjectHolderCursor.get(), "timelineObjectHolder");
        final List<PartitionChunk<DataSegment>> chunks = Lists
                .newArrayList(timeChunkHolder.getObject().iterator());
        final long timeChunkSizeBytes = chunks.stream().mapToLong(chunk -> chunk.getObject().getSize()).sum();

        final boolean isSameOrAbuttingInterval;
        final Interval lastInterval = segmentsToCompact.getIntervalOfLastSegment();
        if (lastInterval == null) {
            isSameOrAbuttingInterval = true;
        } else {
            final Interval currentInterval = chunks.get(0).getObject().getInterval();
            isSameOrAbuttingInterval = currentInterval.isEqual(lastInterval)
                    || currentInterval.abuts(lastInterval);
        }

        // The segments in a holder should be added all together or not.
        final boolean isCompactibleSize = SegmentCompactorUtil.isCompactibleSize(inputSegmentSize,
                segmentsToCompact.getTotalSize(), timeChunkSizeBytes);
        final boolean isCompactibleNum = SegmentCompactorUtil.isCompactibleNum(maxNumSegmentsToCompact,
                segmentsToCompact.getNumSegments(), chunks.size());
        if (isCompactibleSize && isCompactibleNum && isSameOrAbuttingInterval && segmentsToCompact.isEmpty()) {
            chunks.forEach(chunk -> segmentsToCompact.add(chunk.getObject()));
        } else {
            if (segmentsToCompact.getNumSegments() > 1) {
                // We found some segmens to compact and cannot add more. End here.
                return segmentsToCompact;
            } else {
                if (!SegmentCompactorUtil.isCompactibleSize(inputSegmentSize, 0, timeChunkSizeBytes)) {
                    final DataSegment segment = chunks.get(0).getObject();
                    segmentsToCompact.clear();
                    log.warn(
                            "shardSize[%d] for dataSource[%s] and interval[%s] is larger than inputSegmentSize[%d]."
                                    + " Continue to the next shard.",
                            timeChunkSizeBytes, segment.getDataSource(), segment.getInterval(),
                            inputSegmentSize);
                } else if (maxNumSegmentsToCompact < chunks.size()) {
                    final DataSegment segment = chunks.get(0).getObject();
                    segmentsToCompact.clear();
                    log.warn("The number of segments[%d] for dataSource[%s] and interval[%s] is larger than "
                            + "maxNumSegmentsToCompact[%d]. If you see lots of shards are being skipped due to too many "
                            + "segments, consider increasing 'numTargetCompactionSegments' and "
                            + "'druid.indexer.runner.maxZnodeBytes'. Continue to the next shard.",
                            chunks.size(), segment.getDataSource(), segment.getInterval(),
                            maxNumSegmentsToCompact);
                } else {
                    if (segmentsToCompact.getNumSegments() == 1) {
                        // We found a segment which is smaller than targetCompactionSize but too large to compact with other
                        // segments. Skip this one.
                        segmentsToCompact.clear();
                        chunks.forEach(chunk -> segmentsToCompact.add(chunk.getObject()));
                    } else {
                        throw new ISE(
                                "Cannot compact segments[%s]. shardBytes[%s], numSegments[%s] "
                                        + "with current segmentsToCompact[%s]",
                                chunks.stream().map(PartitionChunk::getObject).collect(Collectors.toList()),
                                timeChunkSizeBytes, chunks.size(), segmentsToCompact);
                    }
                }
            }
        }

        compactibleTimelineObjectHolderCursor.next();
    }

    if (segmentsToCompact.getNumSegments() == 1) {
        // Don't compact a single segment
        segmentsToCompact.clear();
    }

    return segmentsToCompact;
}