List of usage examples for org.joda.time Interval Interval
public Interval(Object interval, Chronology chronology)
From source file:com.metamx.druid.indexing.common.task.IndexTask.java
License:Open Source License
@JsonCreator public IndexTask(@JsonProperty("id") String id, @JsonProperty("dataSource") String dataSource, @JsonProperty("granularitySpec") GranularitySpec granularitySpec, @JsonProperty("spatialDimensions") List<SpatialDimensionSchema> spatialDimensions, @JsonProperty("aggregators") AggregatorFactory[] aggregators, @JsonProperty("indexGranularity") QueryGranularity indexGranularity, @JsonProperty("targetPartitionSize") long targetPartitionSize, @JsonProperty("firehose") FirehoseFactory firehoseFactory, @JsonProperty("rowFlushBoundary") int rowFlushBoundary) { super(/*from w w w . j av a 2s. c o m*/ // _not_ the version, just something uniqueish id != null ? id : String.format("index_%s_%s", dataSource, new DateTime().toString()), dataSource, new Interval(granularitySpec.bucketIntervals().first().getStart(), granularitySpec.bucketIntervals().last().getEnd())); this.granularitySpec = Preconditions.checkNotNull(granularitySpec, "granularitySpec"); this.spatialDimensions = (spatialDimensions == null) ? Lists.<SpatialDimensionSchema>newArrayList() : spatialDimensions; this.aggregators = aggregators; this.indexGranularity = indexGranularity; this.targetPartitionSize = targetPartitionSize; this.firehoseFactory = firehoseFactory; this.rowFlushBoundary = rowFlushBoundary; }
From source file:com.metamx.druid.indexing.common.task.MergeTaskBase.java
License:Open Source License
private static Interval computeMergedInterval(final List<DataSegment> segments) { Preconditions.checkArgument(segments.size() > 0, "segments.size() > 0"); DateTime start = null;// w ww . j a va 2s . c om DateTime end = null; for (final DataSegment segment : segments) { if (start == null || segment.getInterval().getStart().isBefore(start)) { start = segment.getInterval().getStart(); } if (end == null || segment.getInterval().getEnd().isAfter(end)) { end = segment.getInterval().getEnd(); } } return new Interval(start, end); }
From source file:com.metamx.druid.indexing.coordinator.TaskLockbox.java
License:Open Source License
/** * Return all locks that overlap some search interval. *///from w w w . j a v a2 s . c o m private List<TaskLockPosse> findLockPossesForInterval(final String dataSource, final Interval interval) { giant.lock(); try { final NavigableMap<Interval, TaskLockPosse> dsRunning = running.get(dataSource); if (dsRunning == null) { // No locks at all return Collections.emptyList(); } else { // Tasks are indexed by locked interval, which are sorted by interval start. Intervals are non-overlapping, so: final NavigableSet<Interval> dsLockbox = dsRunning.navigableKeySet(); final Iterable<Interval> searchIntervals = Iterables.concat( // Single interval that starts at or before ours Collections.singletonList( dsLockbox.floor(new Interval(interval.getStart(), new DateTime(Long.MAX_VALUE)))), // All intervals that start somewhere between our start instant (exclusive) and end instant (exclusive) dsLockbox.subSet(new Interval(interval.getStart(), new DateTime(Long.MAX_VALUE)), false, new Interval(interval.getEnd(), interval.getEnd()), false)); return Lists .newArrayList(FunctionalIterable.create(searchIntervals).filter(new Predicate<Interval>() { @Override public boolean apply(@Nullable Interval searchInterval) { return searchInterval != null && searchInterval.overlaps(interval); } }).transform(new Function<Interval, TaskLockPosse>() { @Override public TaskLockPosse apply(Interval interval) { return dsRunning.get(interval); } })); } } finally { giant.unlock(); } }
From source file:com.metamx.druid.master.DruidMasterSegmentMerger.java
License:Open Source License
@Override public DruidMasterRuntimeParams run(DruidMasterRuntimeParams params) { MasterStats stats = new MasterStats(); Map<String, VersionedIntervalTimeline<String, DataSegment>> dataSources = Maps.newHashMap(); // Find serviced segments by using a timeline for (DataSegment dataSegment : params.getAvailableSegments()) { VersionedIntervalTimeline<String, DataSegment> timeline = dataSources.get(dataSegment.getDataSource()); if (timeline == null) { timeline = new VersionedIntervalTimeline<String, DataSegment>(Ordering.<String>natural()); dataSources.put(dataSegment.getDataSource(), timeline); }//from w w w . j a va 2s . c o m timeline.add(dataSegment.getInterval(), dataSegment.getVersion(), dataSegment.getShardSpec().createChunk(dataSegment)); } // Find segments to merge for (final Map.Entry<String, VersionedIntervalTimeline<String, DataSegment>> entry : dataSources .entrySet()) { // Get serviced segments from the timeline VersionedIntervalTimeline<String, DataSegment> timeline = entry.getValue(); List<TimelineObjectHolder<String, DataSegment>> timelineObjects = timeline .lookup(new Interval(new DateTime(0), new DateTime("3000-01-01"))); // Accumulate timelineObjects greedily until we reach our limits, then backtrack to the maximum complete set SegmentsToMerge segmentsToMerge = new SegmentsToMerge(); for (int i = 0; i < timelineObjects.size(); i++) { segmentsToMerge.add(timelineObjects.get(i)); if (segmentsToMerge.getByteCount() > params.getMergeBytesLimit() || segmentsToMerge.getSegmentCount() >= params.getMergeSegmentsLimit()) { i -= segmentsToMerge.backtrack(params.getMergeBytesLimit()); if (segmentsToMerge.getSegmentCount() > 1) { stats.addToGlobalStat("mergedCount", mergeSegments(segmentsToMerge, entry.getKey())); } if (segmentsToMerge.getSegmentCount() == 0) { // Backtracked all the way to zero. Increment by one so we continue to make progress. i++; } segmentsToMerge = new SegmentsToMerge(); } } // Finish any timelineObjects to merge that may have not hit threshold segmentsToMerge.backtrack(params.getMergeBytesLimit()); if (segmentsToMerge.getSegmentCount() > 1) { stats.addToGlobalStat("mergedCount", mergeSegments(segmentsToMerge, entry.getKey())); } } return params.buildFromExisting().withMasterStats(stats).build(); }
From source file:com.metamx.druid.master.rules.PeriodDropRule.java
License:Open Source License
@Override public boolean appliesTo(DataSegment segment, DateTime referenceTimestamp) { final Interval currInterval = new Interval(period, referenceTimestamp); return currInterval.contains(segment.getInterval()); }
From source file:com.metamx.druid.master.rules.PeriodLoadRule.java
License:Open Source License
@Override public boolean appliesTo(DataSegment segment, DateTime referenceTimestamp) { final Interval currInterval = new Interval(period, referenceTimestamp); return currInterval.overlaps(segment.getInterval()); }
From source file:com.metamx.druid.merger.common.task.IndexTask.java
License:Open Source License
@JsonCreator public IndexTask(@JsonProperty("dataSource") String dataSource, @JsonProperty("granularitySpec") GranularitySpec granularitySpec, @JsonProperty("aggregators") AggregatorFactory[] aggregators, @JsonProperty("indexGranularity") QueryGranularity indexGranularity, @JsonProperty("targetPartitionSize") long targetPartitionSize, @JsonProperty("firehose") FirehoseFactory firehoseFactory) { super(//from w ww.j av a 2s.com // _not_ the version, just something uniqueish String.format("index_%s_%s", dataSource, new DateTime().toString()), dataSource, new Interval(granularitySpec.bucketIntervals().first().getStart(), granularitySpec.bucketIntervals().last().getEnd())); this.granularitySpec = Preconditions.checkNotNull(granularitySpec, "granularitySpec"); this.aggregators = aggregators; this.indexGranularity = indexGranularity; this.targetPartitionSize = targetPartitionSize; this.firehoseFactory = firehoseFactory; }
From source file:com.metamx.druid.merger.coordinator.TaskQueue.java
License:Open Source License
/** * Return all locks that overlap some search interval. *///www .jav a 2s .c o m private List<TaskGroup> findLocks(final String dataSource, final Interval interval) { giant.lock(); try { final NavigableMap<Interval, TaskGroup> dsRunning = running.get(dataSource); if (dsRunning == null) { // No locks at all return Collections.emptyList(); } else { // Tasks are indexed by locked interval, which are sorted by interval start. Intervals are non-overlapping, so: final NavigableSet<Interval> dsLockbox = dsRunning.navigableKeySet(); final Iterable<Interval> searchIntervals = Iterables.concat( // Single interval that starts at or before ours Collections.singletonList( dsLockbox.floor(new Interval(interval.getStart(), new DateTime(Long.MAX_VALUE)))), // All intervals that start somewhere between our start instant (exclusive) and end instant (exclusive) dsLockbox.subSet(new Interval(interval.getStart(), new DateTime(Long.MAX_VALUE)), false, new Interval(interval.getEnd(), interval.getEnd()), false)); return Lists .newArrayList(FunctionalIterable.create(searchIntervals).filter(new Predicate<Interval>() { @Override public boolean apply(@Nullable Interval searchInterval) { return searchInterval != null && searchInterval.overlaps(interval); } }).transform(new Function<Interval, TaskGroup>() { @Override public TaskGroup apply(Interval interval) { return dsRunning.get(interval); } })); } } finally { giant.unlock(); } }
From source file:com.metamx.druid.query.IntervalChunkingQueryRunner.java
License:Open Source License
private Iterable<Interval> splitInterval(Interval interval) { if (interval.getEndMillis() == interval.getStartMillis()) { return Lists.newArrayList(interval); }//w w w .j a v a 2 s . c o m List<Interval> intervals = Lists.newArrayList(); Iterator<Long> timestamps = new PeriodGranularity(period, null, null) .iterable(interval.getStartMillis(), interval.getEndMillis()).iterator(); long start = Math.max(timestamps.next(), interval.getStartMillis()); while (timestamps.hasNext()) { long end = timestamps.next(); intervals.add(new Interval(start, end)); start = end; } if (start < interval.getEndMillis()) { intervals.add(new Interval(start, interval.getEndMillis())); } return intervals; }
From source file:com.metamx.druid.realtime.firehose.GracefulShutdownFirehose.java
License:Open Source License
public GracefulShutdownFirehose(Firehose firehose, IndexGranularity segmentGranularity, Period windowPeriod) { this.firehose = firehose; this.segmentGranularity = segmentGranularity; this.windowMillis = windowPeriod.toStandardDuration().getMillis() * 2; this.scheduledExecutor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("firehose_scheduled_%d").build()); final long truncatedNow = segmentGranularity.truncate(new DateTime()).getMillis(); final long end = segmentGranularity.increment(truncatedNow); this.rejectionPolicy = new IntervalRejectionPolicyFactory(new Interval(truncatedNow, end)) .create(windowPeriod);/*from w w w . j ava 2 s. c o m*/ }