Example usage for org.joda.time Interval getEnd

List of usage examples for org.joda.time Interval getEnd

Introduction

In this page you can find the example usage for org.joda.time Interval getEnd.

Prototype

public DateTime getEnd() 

Source Link

Document

Gets the end of this time interval, which is exclusive, as a DateTime.

Usage

From source file:io.druid.server.ClientInfoResource.java

License:Apache License

@GET
@Path("/{dataSourceName}")
@Produces(MediaType.APPLICATION_JSON)/* w  w  w  . j a v a  2s .  c  o m*/
public Map<String, Object> getDatasource(@PathParam("dataSourceName") String dataSourceName,
        @QueryParam("interval") String interval, @QueryParam("full") String full) {
    if (full == null) {
        return ImmutableMap.<String, Object>of(KEY_DIMENSIONS,
                getDatasourceDimensions(dataSourceName, interval), KEY_METRICS,
                getDatasourceMetrics(dataSourceName, interval));
    }

    Interval theInterval;
    if (interval == null || interval.isEmpty()) {
        DateTime now = getCurrentTime();
        theInterval = new Interval(segmentMetadataQueryConfig.getDefaultHistory(), now);
    } else {
        theInterval = new Interval(interval);
    }

    TimelineLookup<String, ServerSelector> timeline = timelineServerView
            .getTimeline(new TableDataSource(dataSourceName));
    Iterable<TimelineObjectHolder<String, ServerSelector>> serversLookup = timeline != null
            ? timeline.lookup(theInterval)
            : null;
    if (serversLookup == null || Iterables.isEmpty(serversLookup)) {
        return Collections.EMPTY_MAP;
    }
    Map<Interval, Object> servedIntervals = new TreeMap<>(new Comparator<Interval>() {
        @Override
        public int compare(Interval o1, Interval o2) {
            if (o1.equals(o2) || o1.overlaps(o2)) {
                return 0;
            } else {
                return o1.isBefore(o2) ? -1 : 1;
            }
        }
    });

    for (TimelineObjectHolder<String, ServerSelector> holder : serversLookup) {
        final Set<Object> dimensions = Sets.newHashSet();
        final Set<Object> metrics = Sets.newHashSet();
        final PartitionHolder<ServerSelector> partitionHolder = holder.getObject();
        if (partitionHolder.isComplete()) {
            for (ServerSelector server : partitionHolder.payloads()) {
                final DataSegment segment = server.getSegment();
                dimensions.addAll(segment.getDimensions());
                metrics.addAll(segment.getMetrics());
            }
        }

        servedIntervals.put(holder.getInterval(),
                ImmutableMap.of(KEY_DIMENSIONS, dimensions, KEY_METRICS, metrics));
    }

    //collapse intervals if they abut and have same set of columns
    Map<String, Object> result = Maps.newLinkedHashMap();
    Interval curr = null;
    Map<String, Set<String>> cols = null;
    for (Map.Entry<Interval, Object> e : servedIntervals.entrySet()) {
        Interval ival = e.getKey();
        if (curr != null && curr.abuts(ival) && cols.equals(e.getValue())) {
            curr = curr.withEnd(ival.getEnd());
        } else {
            if (curr != null) {
                result.put(curr.toString(), cols);
            }
            curr = ival;
            cols = (Map<String, Set<String>>) e.getValue();
        }
    }
    //add the last one in
    if (curr != null) {
        result.put(curr.toString(), cols);
    }
    return result;
}

From source file:io.druid.server.coordinator.helper.NewestSegmentFirstIterator.java

License:Apache License

NewestSegmentFirstIterator(Map<String, DataSourceCompactionConfig> compactionConfigs,
        Map<String, VersionedIntervalTimeline<String, DataSegment>> dataSources) {
    this.compactionConfigs = compactionConfigs;
    this.dataSources = dataSources;
    this.searchIntervals = new HashMap<>(dataSources.size());
    this.searchEndDates = new HashMap<>(dataSources.size());

    for (Entry<String, VersionedIntervalTimeline<String, DataSegment>> entry : dataSources.entrySet()) {
        final String dataSource = entry.getKey();
        final VersionedIntervalTimeline<String, DataSegment> timeline = entry.getValue();
        final DataSourceCompactionConfig config = compactionConfigs.get(dataSource);

        if (config != null && !timeline.isEmpty()) {
            final Interval searchInterval = findInitialSearchInterval(timeline,
                    config.getSkipOffsetFromLatest());
            searchIntervals.put(dataSource, searchInterval);
            searchEndDates.put(dataSource, searchInterval.getEnd());
        }//  w  w w  .  j  a v a 2  s .  c o  m
    }

    for (Entry<String, DataSourceCompactionConfig> entry : compactionConfigs.entrySet()) {
        final String dataSourceName = entry.getKey();
        final DataSourceCompactionConfig config = entry.getValue();

        if (config == null) {
            throw new ISE("Unknown dataSource[%s]", dataSourceName);
        }

        updateQueue(dataSourceName, config);
    }
}

From source file:io.druid.server.coordinator.helper.NewestSegmentFirstIterator.java

License:Apache License

/**
 * Find segments to compact together for the given intervalToSearch. It progressively searches the given
 * intervalToSearch in time order (latest first). The timeline lookup duration is one day. It means, the timeline is
 * looked up for the last one day of the given intervalToSearch, and the next day is searched again if the size of
 * found segments are not enough to compact. This is repeated until enough amount of segments are found.
 *
 * @param timeline         timeline of a dataSource
 * @param intervalToSearch interval to search
 * @param searchEnd        the end of the whole searchInterval
 * @param config           compaction config
 *
 * @return a pair of the reduced interval of (intervalToSearch - interval of found segments) and segments to compact
 *//*  w ww  .  java 2s  .co  m*/
@VisibleForTesting
static Pair<Interval, SegmentsToCompact> findSegmentsToCompact(
        final VersionedIntervalTimeline<String, DataSegment> timeline, final Interval intervalToSearch,
        final DateTime searchEnd, final DataSourceCompactionConfig config) {
    final long targetCompactionSize = config.getTargetCompactionSizeBytes();
    final int numTargetSegments = config.getNumTargetCompactionSegments();
    final List<DataSegment> segmentsToCompact = new ArrayList<>();
    Interval searchInterval = intervalToSearch;
    long totalSegmentsToCompactBytes = 0;

    // Finds segments to compact together while iterating searchInterval from latest to oldest
    while (!Intervals.isEmpty(searchInterval) && totalSegmentsToCompactBytes < targetCompactionSize
            && segmentsToCompact.size() < numTargetSegments) {
        final Interval lookupInterval = SegmentCompactorUtil.getNextLoopupInterval(searchInterval);
        // holders are sorted by their interval
        final List<TimelineObjectHolder<String, DataSegment>> holders = timeline.lookup(lookupInterval);

        if (holders.isEmpty()) {
            // We found nothing. Continue to the next interval.
            searchInterval = SegmentCompactorUtil.removeIntervalFromEnd(searchInterval, lookupInterval);
            continue;
        }

        for (int i = holders.size() - 1; i >= 0; i--) {
            final TimelineObjectHolder<String, DataSegment> holder = holders.get(i);
            final List<PartitionChunk<DataSegment>> chunks = Lists.newArrayList(holder.getObject().iterator());
            final long partitionBytes = chunks.stream().mapToLong(chunk -> chunk.getObject().getSize()).sum();
            if (chunks.size() == 0 || partitionBytes == 0) {
                log.warn("Skip empty shard[%s]", holder);
                continue;
            }

            if (!intervalToSearch.contains(chunks.get(0).getObject().getInterval())) {
                searchInterval = SegmentCompactorUtil.removeIntervalFromEnd(searchInterval, new Interval(
                        chunks.get(0).getObject().getInterval().getStart(), searchInterval.getEnd()));
                continue;
            }

            // Addition of the segments of a partition should be atomic.
            if (SegmentCompactorUtil.isCompactible(targetCompactionSize, totalSegmentsToCompactBytes,
                    partitionBytes) && segmentsToCompact.size() + chunks.size() <= numTargetSegments) {
                chunks.forEach(chunk -> segmentsToCompact.add(chunk.getObject()));
                totalSegmentsToCompactBytes += partitionBytes;
            } else {
                if (segmentsToCompact.size() > 1) {
                    // We found some segmens to compact and cannot add more. End here.
                    return checkCompactableSizeForLastSegmentOrReturn(segmentsToCompact,
                            totalSegmentsToCompactBytes, timeline, searchInterval, searchEnd, config);
                } else {
                    // (*) Discard segments found so far because we can't compact it anyway.
                    final int numSegmentsToCompact = segmentsToCompact.size();
                    segmentsToCompact.clear();

                    if (!SegmentCompactorUtil.isCompactible(targetCompactionSize, 0, partitionBytes)) {
                        // TODO: this should be changed to compact many small segments into a few large segments
                        final DataSegment segment = chunks.get(0).getObject();
                        log.warn(
                                "shardSize[%d] for dataSource[%s] and interval[%s] is larger than targetCompactionSize[%d]."
                                        + " Contitnue to the next shard.",
                                partitionBytes, segment.getDataSource(), segment.getInterval(),
                                targetCompactionSize);
                    } else if (numTargetSegments < chunks.size()) {
                        final DataSegment segment = chunks.get(0).getObject();
                        log.warn(
                                "The number of segments[%d] for dataSource[%s] and interval[%s] is larger than "
                                        + "numTargetCompactSegments[%d]. If you see lots of shards are being skipped due to too many "
                                        + "segments, consider increasing 'numTargetCompactionSegments' and "
                                        + "'druid.indexer.runner.maxZnodeBytes'. Contitnue to the next shard.",
                                chunks.size(), segment.getDataSource(), segment.getInterval(),
                                numTargetSegments);
                    } else {
                        if (numSegmentsToCompact == 1) {
                            // We found a segment which is smaller than targetCompactionSize but too large to compact with other
                            // segments. Skip this one.
                            // Note that segmentsToCompact is already cleared at (*).
                            chunks.forEach(chunk -> segmentsToCompact.add(chunk.getObject()));
                            totalSegmentsToCompactBytes = partitionBytes;
                        } else {
                            throw new ISE("Cannot compact segments[%s]. shardBytes[%s], numSegments[%s]",
                                    chunks.stream().map(PartitionChunk::getObject).collect(Collectors.toList()),
                                    partitionBytes, chunks.size());
                        }
                    }
                }
            }

            // Update searchInterval
            searchInterval = SegmentCompactorUtil.removeIntervalFromEnd(searchInterval,
                    new Interval(chunks.get(0).getObject().getInterval().getStart(), searchInterval.getEnd()));
        }
    }

    if (segmentsToCompact.size() == 0 || segmentsToCompact.size() == 1) {
        if (Intervals.isEmpty(searchInterval)) {
            // We found nothing to compact. End here.
            return Pair.of(intervalToSearch, new SegmentsToCompact(ImmutableList.of()));
        } else {
            // We found only 1 segment. Further find segments for the remaining interval.
            return findSegmentsToCompact(timeline, searchInterval, searchEnd, config);
        }
    }

    return checkCompactableSizeForLastSegmentOrReturn(segmentsToCompact, totalSegmentsToCompactBytes, timeline,
            searchInterval, searchEnd, config);
}

From source file:io.druid.server.coordinator.helper.SegmentCompactorUtil.java

License:Apache License

/**
 * Return an interval for looking up for timeline.
 * If {@code totalInterval} is larger than {@link #LOOKUP_PERIOD}, it returns an interval of {@link #LOOKUP_PERIOD}
 * and the end of {@code totalInterval}.
 *///from   w  w  w. ja va  2 s .  co m
static Interval getNextLoopupInterval(Interval totalInterval) {
    final Duration givenDuration = totalInterval.toDuration();
    return givenDuration.isLongerThan(LOOKUP_DURATION) ? new Interval(LOOKUP_PERIOD, totalInterval.getEnd())
            : totalInterval;
}

From source file:io.druid.server.coordinator.helper.SegmentCompactorUtil.java

License:Apache License

/**
 * Removes {@code smallInterval} from {@code largeInterval}.  The end of both intervals should be same.
 *
 * @return an interval of {@code largeInterval} - {@code smallInterval}.
 *//*w  w  w. j a  va2 s. c  om*/
static Interval removeIntervalFromEnd(Interval largeInterval, Interval smallInterval) {
    Preconditions.checkArgument(largeInterval.getEnd().equals(smallInterval.getEnd()),
            "end should be same. largeInterval[%s] smallInterval[%s]", largeInterval, smallInterval);
    return new Interval(largeInterval.getStart(), smallInterval.getStart());
}

From source file:io.druid.timeline.VersionedIntervalTimeline.java

License:Apache License

/**
 *
 * @param timeline/*from   w  ww . j  a va 2  s. c  om*/
 * @param key
 * @param entry
 * @return boolean flag indicating whether or not we inserted or discarded something
 */
private boolean addAtKey(NavigableMap<Interval, TimelineEntry> timeline, Interval key, TimelineEntry entry) {
    boolean retVal = false;
    Interval currKey = key;
    Interval entryInterval = entry.getTrueInterval();

    if (!currKey.overlaps(entryInterval)) {
        return false;
    }

    while (entryInterval != null && currKey != null && currKey.overlaps(entryInterval)) {
        Interval nextKey = timeline.higherKey(currKey);

        int versionCompare = versionComparator.compare(entry.getVersion(), timeline.get(currKey).getVersion());

        if (versionCompare < 0) {
            if (currKey.contains(entryInterval)) {
                return true;
            } else if (currKey.getStart().isBefore(entryInterval.getStart())) {
                entryInterval = new Interval(currKey.getEnd(), entryInterval.getEnd());
            } else {
                addIntervalToTimeline(new Interval(entryInterval.getStart(), currKey.getStart()), entry,
                        timeline);

                if (entryInterval.getEnd().isAfter(currKey.getEnd())) {
                    entryInterval = new Interval(currKey.getEnd(), entryInterval.getEnd());
                } else {
                    entryInterval = null; // discard this entry
                }
            }
        } else if (versionCompare > 0) {
            TimelineEntry oldEntry = timeline.remove(currKey);

            if (currKey.contains(entryInterval)) {
                addIntervalToTimeline(new Interval(currKey.getStart(), entryInterval.getStart()), oldEntry,
                        timeline);
                addIntervalToTimeline(new Interval(entryInterval.getEnd(), currKey.getEnd()), oldEntry,
                        timeline);
                addIntervalToTimeline(entryInterval, entry, timeline);

                return true;
            } else if (currKey.getStart().isBefore(entryInterval.getStart())) {
                addIntervalToTimeline(new Interval(currKey.getStart(), entryInterval.getStart()), oldEntry,
                        timeline);
            } else if (entryInterval.getEnd().isBefore(currKey.getEnd())) {
                addIntervalToTimeline(new Interval(entryInterval.getEnd(), currKey.getEnd()), oldEntry,
                        timeline);
            }
        } else {
            if (timeline.get(currKey).equals(entry)) {
                // This occurs when restoring segments
                timeline.remove(currKey);
            } else {
                throw new UnsupportedOperationException(
                        String.format("Cannot add overlapping segments [%s and %s] with the same version [%s]",
                                currKey, entryInterval, entry.getVersion()));
            }
        }

        currKey = nextKey;
        retVal = true;
    }

    addIntervalToTimeline(entryInterval, entry, timeline);

    return retVal;
}

From source file:Model.ModeloTabelaPerdas.java

@Override
public Object getValueAt(int rowIndex, int columnIndex) {
    Perda perda = perdas.get(rowIndex);/*from  w ww .  ja  v  a2  s  .c o  m*/

    switch (columnIndex) {
    case 0:
        return perda.getTipoDeFlor();
    case 1:
        String numero1 = String.valueOf(perda.getSemanas().get(0).getNumero());
        Interval intervalo1 = perda.getSemanas().get(0).getIntervalo();

        return "<html>Semana " + numero1 + "<br/>" + intervalo1.getStart() + "-" + intervalo1.getEnd()
                + "</html>";
    case 2:
        return "";
    case 3:
        String numero2 = String.valueOf(perda.getSemanas().get(0).getNumero());
        Interval intervalo2 = perda.getSemanas().get(0).getIntervalo();

        return "<html>Semana " + numero2 + "<br/>" + intervalo2.getStart() + "-" + intervalo2.getEnd()
                + "</html>";
    case 4:
        return "";
    case 5:
        String numero3 = String.valueOf(perda.getSemanas().get(0).getNumero());
        Interval intervalo3 = perda.getSemanas().get(0).getIntervalo();

        return "<html>Semana " + numero3 + "<br/>" + intervalo3.getStart() + "-" + intervalo3.getEnd()
                + "</html>";
    case 6:
        return "";
    case 7:
        String numero4 = String.valueOf(perda.getSemanas().get(0).getNumero());
        Interval intervalo4 = perda.getSemanas().get(0).getIntervalo();

        return "<html>Semana " + numero4 + "<br/>" + intervalo4.getStart() + "-" + intervalo4.getEnd()
                + "</html>";
    case 8:
        return "";
    case 9:
        String numero5 = String.valueOf(perda.getSemanas().get(0).getNumero());
        Interval intervalo5 = perda.getSemanas().get(0).getIntervalo();

        return "<html>Semana " + numero5 + "<br/>" + intervalo5.getStart() + "-" + intervalo5.getEnd()
                + "</html>";
    case 10:
        return "";
    case 11:
        String numero6 = String.valueOf(perda.getSemanas().get(0).getNumero());
        Interval intervalo6 = perda.getSemanas().get(0).getIntervalo();

        return "<html>Semana " + numero6 + "<br/>" + intervalo6.getStart() + "-" + intervalo6.getEnd()
                + "</html>";
    case 12:
        return "";
    default:
        return null;
    }

}

From source file:net.lshift.diffa.config.DailyPeriodUnit.java

License:Apache License

@Override
public boolean isCovering(Interval interval) {
    return interval != null
            && floor(interval.getEnd()).minusDays(1).compareTo(ceiling(interval.getStart())) >= 0;
}

From source file:net.lshift.diffa.config.MonthlyPeriodUnit.java

License:Apache License

@Override
public boolean isCovering(Interval interval) {
    return interval != null
            && floor(interval.getEnd()).minusMonths(1).compareTo(ceiling(interval.getStart())) >= 0;
}

From source file:net.lshift.diffa.config.PeriodUnit.java

License:Apache License

public boolean isCovering(Interval interval) {
    return interval != null
            && subtractOneUnit(floor(interval.getEnd())).compareTo(ceiling(interval.getStart())) >= 0;
}