List of usage examples for org.joda.time Interval getEnd
public DateTime getEnd()
From source file:com.metamx.druid.indexing.common.task.VersionConverterTask.java
License:Open Source License
private static String makeId(String dataSource, Interval interval) { return joinId(TYPE, dataSource, interval.getStart(), interval.getEnd(), new DateTime()); }
From source file:com.metamx.druid.indexing.coordinator.MergerDBCoordinator.java
License:Open Source License
public List<DataSegment> getUnusedSegmentsForInterval(final String dataSource, final Interval interval) { List<DataSegment> matchingSegments = dbi.withHandle(new HandleCallback<List<DataSegment>>() { @Override/*from w w w . j a va2 s. c o m*/ public List<DataSegment> withHandle(Handle handle) throws IOException { return handle.createQuery(String.format( "SELECT payload FROM %s WHERE dataSource = :dataSource and start >= :start and end <= :end and used = 0", dbConnectorConfig.getSegmentTable())).bind("dataSource", dataSource) .bind("start", interval.getStart().toString()).bind("end", interval.getEnd().toString()) .fold(Lists.<DataSegment>newArrayList(), new Folder3<List<DataSegment>, Map<String, Object>>() { @Override public List<DataSegment> fold(List<DataSegment> accumulator, Map<String, Object> stringObjectMap, FoldController foldController, StatementContext statementContext) throws SQLException { try { DataSegment segment = jsonMapper.readValue( (String) stringObjectMap.get("payload"), DataSegment.class); accumulator.add(segment); return accumulator; } catch (Exception e) { throw Throwables.propagate(e); } } }); } }); log.info("Found %,d segments for %s for interval %s.", matchingSegments.size(), dataSource, interval); return matchingSegments; }
From source file:com.metamx.druid.indexing.coordinator.TaskLockbox.java
License:Open Source License
/** * Return all locks that overlap some search interval. *//*from w ww. ja v a 2 s . c o m*/ private List<TaskLockPosse> findLockPossesForInterval(final String dataSource, final Interval interval) { giant.lock(); try { final NavigableMap<Interval, TaskLockPosse> dsRunning = running.get(dataSource); if (dsRunning == null) { // No locks at all return Collections.emptyList(); } else { // Tasks are indexed by locked interval, which are sorted by interval start. Intervals are non-overlapping, so: final NavigableSet<Interval> dsLockbox = dsRunning.navigableKeySet(); final Iterable<Interval> searchIntervals = Iterables.concat( // Single interval that starts at or before ours Collections.singletonList( dsLockbox.floor(new Interval(interval.getStart(), new DateTime(Long.MAX_VALUE)))), // All intervals that start somewhere between our start instant (exclusive) and end instant (exclusive) dsLockbox.subSet(new Interval(interval.getStart(), new DateTime(Long.MAX_VALUE)), false, new Interval(interval.getEnd(), interval.getEnd()), false)); return Lists .newArrayList(FunctionalIterable.create(searchIntervals).filter(new Predicate<Interval>() { @Override public boolean apply(@Nullable Interval searchInterval) { return searchInterval != null && searchInterval.overlaps(interval); } }).transform(new Function<Interval, TaskLockPosse>() { @Override public TaskLockPosse apply(Interval interval) { return dsRunning.get(interval); } })); } } finally { giant.unlock(); } }
From source file:com.metamx.druid.merger.common.task.DeleteTask.java
License:Open Source License
@JsonCreator public DeleteTask(@JsonProperty("dataSource") String dataSource, @JsonProperty("interval") Interval interval) { super(String.format("delete_%s_%s_%s_%s", dataSource, interval.getStart(), interval.getEnd(), new DateTime().toString()), dataSource, interval); }
From source file:com.metamx.druid.merger.common.task.IndexDeterminePartitionsTask.java
License:Open Source License
@JsonCreator public IndexDeterminePartitionsTask(@JsonProperty("groupId") String groupId, @JsonProperty("interval") Interval interval, @JsonProperty("firehose") FirehoseFactory firehoseFactory, @JsonProperty("schema") Schema schema, @JsonProperty("targetPartitionSize") long targetPartitionSize) { super(String.format("%s_partitions_%s_%s", groupId, interval.getStart(), interval.getEnd()), groupId, schema.getDataSource(), interval); this.firehoseFactory = firehoseFactory; this.schema = schema; this.targetPartitionSize = targetPartitionSize; }
From source file:com.metamx.druid.merger.common.task.IndexGeneratorTask.java
License:Open Source License
@JsonCreator public IndexGeneratorTask(@JsonProperty("groupId") String groupId, @JsonProperty("interval") Interval interval, @JsonProperty("firehose") FirehoseFactory firehoseFactory, @JsonProperty("schema") Schema schema) { super(String.format("%s_generator_%s_%s_%s", groupId, interval.getStart(), interval.getEnd(), schema.getShardSpec().getPartitionNum()), groupId, schema.getDataSource(), interval); this.firehoseFactory = firehoseFactory; this.schema = schema; }
From source file:com.metamx.druid.merger.coordinator.TaskQueue.java
License:Open Source License
/** * Return all locks that overlap some search interval. */// w w w.j a v a 2 s. c o m private List<TaskGroup> findLocks(final String dataSource, final Interval interval) { giant.lock(); try { final NavigableMap<Interval, TaskGroup> dsRunning = running.get(dataSource); if (dsRunning == null) { // No locks at all return Collections.emptyList(); } else { // Tasks are indexed by locked interval, which are sorted by interval start. Intervals are non-overlapping, so: final NavigableSet<Interval> dsLockbox = dsRunning.navigableKeySet(); final Iterable<Interval> searchIntervals = Iterables.concat( // Single interval that starts at or before ours Collections.singletonList( dsLockbox.floor(new Interval(interval.getStart(), new DateTime(Long.MAX_VALUE)))), // All intervals that start somewhere between our start instant (exclusive) and end instant (exclusive) dsLockbox.subSet(new Interval(interval.getStart(), new DateTime(Long.MAX_VALUE)), false, new Interval(interval.getEnd(), interval.getEnd()), false)); return Lists .newArrayList(FunctionalIterable.create(searchIntervals).filter(new Predicate<Interval>() { @Override public boolean apply(@Nullable Interval searchInterval) { return searchInterval != null && searchInterval.overlaps(interval); } }).transform(new Function<Interval, TaskGroup>() { @Override public TaskGroup apply(Interval interval) { return dsRunning.get(interval); } })); } } finally { giant.unlock(); } }
From source file:com.metamx.druid.VersionedIntervalTimeline.java
License:Open Source License
private boolean addAtKey(NavigableMap<Interval, TimelineEntry> timeline, Interval key, TimelineEntry entry) { boolean retVal = false; Interval currKey = key; Interval entryInterval = entry.getTrueInterval(); if (!currKey.overlaps(entryInterval)) { return false; }/*www . j ava 2s . co m*/ while (currKey != null && currKey.overlaps(entryInterval)) { Interval nextKey = timeline.higherKey(currKey); int versionCompare = versionComparator.compare(entry.getVersion(), timeline.get(currKey).getVersion()); if (versionCompare < 0) { if (currKey.contains(entryInterval)) { return true; } else if (currKey.getStart().isBefore(entryInterval.getStart())) { entryInterval = new Interval(currKey.getEnd(), entryInterval.getEnd()); } else { addIntervalToTimeline(new Interval(entryInterval.getStart(), currKey.getStart()), entry, timeline); if (entryInterval.getEnd().isAfter(currKey.getEnd())) { entryInterval = new Interval(currKey.getEnd(), entryInterval.getEnd()); } else { entryInterval = null; } } } else if (versionCompare > 0) { TimelineEntry oldEntry = timeline.remove(currKey); if (currKey.contains(entryInterval)) { addIntervalToTimeline(new Interval(currKey.getStart(), entryInterval.getStart()), oldEntry, timeline); addIntervalToTimeline(new Interval(entryInterval.getEnd(), currKey.getEnd()), oldEntry, timeline); addIntervalToTimeline(entryInterval, entry, timeline); return true; } else if (currKey.getStart().isBefore(entryInterval.getStart())) { addIntervalToTimeline(new Interval(currKey.getStart(), entryInterval.getStart()), oldEntry, timeline); } else if (entryInterval.getEnd().isBefore(currKey.getEnd())) { addIntervalToTimeline(new Interval(entryInterval.getEnd(), currKey.getEnd()), oldEntry, timeline); } } else { if (timeline.get(currKey).equals(entry)) { // This occurs when restoring segments timeline.remove(currKey); } else { throw new UnsupportedOperationException( String.format("Cannot add overlapping segments [%s and %s] with the same version [%s]", currKey, entryInterval, entry.getVersion())); } } currKey = nextKey; retVal = true; } addIntervalToTimeline(entryInterval, entry, timeline); return retVal; }
From source file:com.metamx.druid.VersionedIntervalTimeline.java
License:Open Source License
private List<TimelineObjectHolder<VersionType, ObjectType>> lookup(Interval interval, boolean incompleteOk) { List<TimelineObjectHolder<VersionType, ObjectType>> retVal = new ArrayList<TimelineObjectHolder<VersionType, ObjectType>>(); NavigableMap<Interval, TimelineEntry> timeline = (incompleteOk) ? incompletePartitionsTimeline : completePartitionsTimeline; for (Map.Entry<Interval, TimelineEntry> entry : timeline.entrySet()) { Interval timelineInterval = entry.getKey(); TimelineEntry val = entry.getValue(); if (timelineInterval.overlaps(interval)) { retVal.add(new TimelineObjectHolder<VersionType, ObjectType>(timelineInterval, val.getVersion(), val.getPartitionHolder())); }//from w w w.ja va2 s . co m } if (retVal.isEmpty()) { return retVal; } TimelineObjectHolder<VersionType, ObjectType> firstEntry = retVal.get(0); if (interval.overlaps(firstEntry.getInterval()) && interval.getStart().isAfter(firstEntry.getInterval().getStart())) { retVal.set(0, new TimelineObjectHolder<VersionType, ObjectType>( new Interval(interval.getStart(), firstEntry.getInterval().getEnd()), firstEntry.getVersion(), firstEntry.getObject())); } TimelineObjectHolder<VersionType, ObjectType> lastEntry = retVal.get(retVal.size() - 1); if (interval.overlaps(lastEntry.getInterval()) && interval.getEnd().isBefore(lastEntry.getInterval().getEnd())) { retVal.set(retVal.size() - 1, new TimelineObjectHolder<VersionType, ObjectType>( new Interval(lastEntry.getInterval().getStart(), interval.getEnd()), lastEntry.getVersion(), lastEntry.getObject())); } return retVal; }
From source file:com.microsoft.exchange.DateHelp.java
License:Apache License
/** * will always return at least two intervals. * //from ww w .j a va2 s .c o m * @param start * @param end * @param count * @return */ public static List<Interval> generateMultipleIntervals(Date start, Date end, int count) { List<Interval> intervals = generateIntervals(start, end); int actualCount = intervals.size(); if (count > actualCount) { while (actualCount < count) { List<Interval> tIntervals = new ArrayList<Interval>(); for (Interval i : intervals) { tIntervals.addAll(generateIntervals(i.getStart().toDate(), i.getEnd().toDate())); } intervals = tIntervals; actualCount = intervals.size(); } } return intervals; }