List of usage examples for org.joda.time Interval getEndMillis
public long getEndMillis()
From source file:org.apache.druid.query.TimewarpOperator.java
License:Apache License
public QueryRunner<T> postProcess(final QueryRunner<T> baseRunner, final long now) { return new QueryRunner<T>() { @Override//from w w w . jav a2s .c o m public Sequence<T> run(final QueryPlus<T> queryPlus, final ResponseContext responseContext) { final DateTimeZone tz = queryPlus.getQuery().getTimezone(); final long offset = computeOffset(now, tz); final Interval interval = queryPlus.getQuery().getIntervals().get(0); final Interval modifiedInterval = new Interval( Math.min(interval.getStartMillis() + offset, now + offset), Math.min(interval.getEndMillis() + offset, now + offset), interval.getChronology()); return Sequences.map(baseRunner.run( queryPlus.withQuerySegmentSpec( new MultipleIntervalSegmentSpec(Collections.singletonList(modifiedInterval))), responseContext), new Function<T, T>() { @Override public T apply(T input) { if (input instanceof Result) { Result res = (Result) input; Object value = res.getValue(); if (value instanceof TimeBoundaryResultValue) { TimeBoundaryResultValue boundary = (TimeBoundaryResultValue) value; DateTime minTime; try { minTime = boundary.getMinTime(); } catch (IllegalArgumentException e) { minTime = null; } final DateTime maxTime = boundary.getMaxTime(); return (T) ((TimeBoundaryQuery) queryPlus.getQuery()).buildResult( DateTimes.utc( Math.min(res.getTimestamp().getMillis() - offset, now)), minTime != null ? minTime.minus(offset) : null, maxTime != null ? DateTimes.utc(Math.min(maxTime.getMillis() - offset, now)) : null) .iterator().next(); } return (T) new Result(res.getTimestamp().minus(offset), value); } else if (input instanceof MapBasedRow) { MapBasedRow row = (MapBasedRow) input; return (T) new MapBasedRow(row.getTimestamp().minus(offset), row.getEvent()); } // default to noop for unknown result types return input; } }); } }; }
From source file:org.apache.druid.query.vector.VectorCursorGranularizer.java
License:Apache License
public void setCurrentOffsets(final Interval bucketInterval) { final long timeStart = bucketInterval.getStartMillis(); final long timeEnd = bucketInterval.getEndMillis(); int vectorSize = cursor.getCurrentVectorSize(); endOffset = 0;/*from w w w . j a v a 2 s .c om*/ if (timeSelector != null) { if (timestamps == null) { timestamps = timeSelector.getLongVector(); } // Skip "offset" to start of bucketInterval. while (startOffset < vectorSize && timestamps[startOffset] < timeStart) { startOffset++; } // Find end of bucketInterval. for (endOffset = vectorSize - 1; endOffset >= startOffset && timestamps[endOffset] >= timeEnd; endOffset--) { // nothing needed, "for" is doing the work. } // Adjust: endOffset is now pointing at the last row to aggregate, but we want it // to be one _past_ the last row. endOffset++; } else { endOffset = vectorSize; } }
From source file:org.apache.druid.segment.IndexMergerV9.java
License:Apache License
private void makeIndexBinary(final FileSmoosher v9Smoosher, final List<IndexableAdapter> adapters, final File outDir, final List<String> mergedDimensions, final List<String> mergedMetrics, final ProgressIndicator progress, final IndexSpec indexSpec, final List<DimensionMergerV9> mergers) throws IOException { final String section = "make index.drd"; progress.startSection(section);/* www. j a v a 2 s . c o m*/ long startTime = System.currentTimeMillis(); final Set<String> finalDimensions = new LinkedHashSet<>(); final Set<String> finalColumns = new LinkedHashSet<>(); finalColumns.addAll(mergedMetrics); for (int i = 0; i < mergedDimensions.size(); ++i) { if (mergers.get(i).canSkip()) { continue; } finalColumns.add(mergedDimensions.get(i)); finalDimensions.add(mergedDimensions.get(i)); } GenericIndexed<String> cols = GenericIndexed.fromIterable(finalColumns, GenericIndexed.STRING_STRATEGY); GenericIndexed<String> dims = GenericIndexed.fromIterable(finalDimensions, GenericIndexed.STRING_STRATEGY); final String bitmapSerdeFactoryType = mapper.writeValueAsString(indexSpec.getBitmapSerdeFactory()); final long numBytes = cols.getSerializedSize() + dims.getSerializedSize() + 16 + serializerUtils.getSerializedStringByteSize(bitmapSerdeFactoryType); final SmooshedWriter writer = v9Smoosher.addWithSmooshedWriter("index.drd", numBytes); cols.writeTo(writer, v9Smoosher); dims.writeTo(writer, v9Smoosher); DateTime minTime = DateTimes.MAX; DateTime maxTime = DateTimes.MIN; for (IndexableAdapter index : adapters) { minTime = JodaUtils.minDateTime(minTime, index.getDataInterval().getStart()); maxTime = JodaUtils.maxDateTime(maxTime, index.getDataInterval().getEnd()); } final Interval dataInterval = new Interval(minTime, maxTime); serializerUtils.writeLong(writer, dataInterval.getStartMillis()); serializerUtils.writeLong(writer, dataInterval.getEndMillis()); serializerUtils.writeString(writer, bitmapSerdeFactoryType); writer.close(); IndexIO.checkFileSize(new File(outDir, "index.drd")); log.info("Completed index.drd in %,d millis.", System.currentTimeMillis() - startTime); progress.stopSection(section); }
From source file:org.apache.druid.server.coordinator.CostBalancerStrategy.java
License:Apache License
/** * This defines the unnormalized cost function between two segments. * * See https://github.com/apache/incubator-druid/pull/2972 for more details about the cost function. * * intervalCost: segments close together are more likely to be queried together * * multiplier: if two segments belong to the same data source, they are more likely to be involved * in the same queries//from w w w.ja v a 2 s . co m * * @param segmentA The first DataSegment. * @param segmentB The second DataSegment. * * @return the joint cost of placing the two DataSegments together on one node. */ public static double computeJointSegmentsCost(final DataSegment segmentA, final DataSegment segmentB) { final Interval intervalA = segmentA.getInterval(); final Interval intervalB = segmentB.getInterval(); final double t0 = intervalA.getStartMillis(); final double t1 = (intervalA.getEndMillis() - t0) / MILLIS_FACTOR; final double start = (intervalB.getStartMillis() - t0) / MILLIS_FACTOR; final double end = (intervalB.getEndMillis() - t0) / MILLIS_FACTOR; // constant cost-multiplier for segments of the same datsource final double multiplier = segmentA.getDataSource().equals(segmentB.getDataSource()) ? 2.0 : 1.0; return INV_LAMBDA_SQUARE * intervalCost(t1, start, end) * multiplier; }
From source file:org.apache.druid.server.coordinator.rules.PeriodDropBeforeRule.java
License:Apache License
@Override public boolean appliesTo(Interval theInterval, DateTime referenceTimestamp) { final DateTime periodAgo = referenceTimestamp.minus(period); return theInterval.getEndMillis() <= periodAgo.getMillis(); }
From source file:org.apache.druid.server.coordinator.rules.Rules.java
License:Apache License
public static boolean eligibleForLoad(Period period, Interval interval, DateTime referenceTimestamp, boolean includeFuture) { final Interval currInterval = new Interval(period, referenceTimestamp); if (includeFuture) { return currInterval.getStartMillis() < interval.getEndMillis(); } else {//from w w w . ja va 2s . co m return eligibleForLoad(currInterval, interval); } }
From source file:org.apache.druid.sql.calcite.expression.Expressions.java
License:Apache License
private static DimFilter getBoundTimeDimFilter(SqlKind operatorKind, BoundRefKey boundRefKey, Interval interval, boolean isAligned) { switch (operatorKind) { case EQUALS://from w ww. j a va 2s . com return isAligned ? Bounds.interval(boundRefKey, interval) : Filtration.matchNothing(); case NOT_EQUALS: return isAligned ? new NotDimFilter(Bounds.interval(boundRefKey, interval)) : Filtration.matchEverything(); case GREATER_THAN: return Bounds.greaterThanOrEqualTo(boundRefKey, String.valueOf(interval.getEndMillis())); case GREATER_THAN_OR_EQUAL: return isAligned ? Bounds.greaterThanOrEqualTo(boundRefKey, String.valueOf(interval.getStartMillis())) : Bounds.greaterThanOrEqualTo(boundRefKey, String.valueOf(interval.getEndMillis())); case LESS_THAN: return isAligned ? Bounds.lessThan(boundRefKey, String.valueOf(interval.getStartMillis())) : Bounds.lessThan(boundRefKey, String.valueOf(interval.getEndMillis())); case LESS_THAN_OR_EQUAL: return Bounds.lessThan(boundRefKey, String.valueOf(interval.getEndMillis())); default: throw new IllegalStateException("WTF?! Shouldn't have got here..."); } }
From source file:org.apache.druid.sql.calcite.filtration.Bounds.java
License:Apache License
public static BoundDimFilter interval(final BoundRefKey boundRefKey, final Interval interval) { if (!boundRefKey.getComparator().equals(StringComparators.NUMERIC)) { // Interval comparison only works with NUMERIC comparator. throw new ISE("Comparator must be NUMERIC but was[%s]", boundRefKey.getComparator()); }/* w w w. java 2s . co m*/ return new BoundDimFilter(boundRefKey.getDimension(), String.valueOf(interval.getStartMillis()), String.valueOf(interval.getEndMillis()), false, true, null, boundRefKey.getExtractionFn(), boundRefKey.getComparator(), null); }
From source file:org.apache.druid.timeline.SegmentId.java
License:Apache License
private SegmentId(String dataSource, Interval interval, String version, int partitionNum) { this.dataSource = STRING_INTERNER.intern(Objects.requireNonNull(dataSource)); this.intervalStartMillis = interval.getStartMillis(); this.intervalEndMillis = interval.getEndMillis(); this.intervalChronology = interval.getChronology(); // Versions are timestamp-based Strings, interning of them doesn't make sense. If this is not the case, interning // could be conditionally allowed via a system property. this.version = Objects.requireNonNull(version); this.partitionNum = partitionNum; this.hashCode = computeHashCode(); }
From source file:org.apache.druid.timeline.VersionedIntervalTimeline.java
License:Apache License
public boolean isOvershadowed(Interval interval, VersionType version, ObjectType object) { lock.readLock().lock();// ww w.j a v a2 s .com try { TimelineEntry entry = completePartitionsTimeline.get(interval); if (entry != null) { final int majorVersionCompare = versionComparator.compare(version, entry.getVersion()); if (majorVersionCompare == 0) { for (PartitionChunk<ObjectType> chunk : entry.partitionHolder) { if (chunk.getObject().overshadows(object)) { return true; } } return false; } else { return majorVersionCompare < 0; } } Interval lower = completePartitionsTimeline.floorKey(new Interval(interval.getStart(), DateTimes.MAX)); if (lower == null || !lower.overlaps(interval)) { return false; } Interval prev = null; Interval curr = lower; do { if (curr == null || //no further keys (prev != null && curr.getStartMillis() > prev.getEndMillis()) //a discontinuity ) { return false; } final TimelineEntry timelineEntry = completePartitionsTimeline.get(curr); final int versionCompare = versionComparator.compare(version, timelineEntry.getVersion()); //lower or same version if (versionCompare > 0) { return false; } else if (versionCompare == 0) { if (timelineEntry.partitionHolder.stream() .noneMatch(chunk -> chunk.getObject().overshadows(object))) { return false; } } prev = curr; curr = completePartitionsTimeline.higherKey(curr); } while (interval.getEndMillis() > prev.getEndMillis()); return true; } finally { lock.readLock().unlock(); } }