Example usage for org.joda.time Interval getEnd

List of usage examples for org.joda.time Interval getEnd

Introduction

In this page you can find the example usage for org.joda.time Interval getEnd.

Prototype

public DateTime getEnd() 

Source Link

Document

Gets the end of this time interval, which is exclusive, as a DateTime.

Usage

From source file:org.apache.druid.indexing.common.task.ConvertSegmentTask.java

License:Apache License

protected static String makeId(String dataSource, Interval interval) {
    Preconditions.checkNotNull(dataSource, "dataSource");
    Preconditions.checkNotNull(interval, "interval");
    return joinId(TYPE, dataSource, interval.getStart(), interval.getEnd(), DateTimes.nowUtc());
}

From source file:org.apache.druid.indexing.common.task.SameIntervalMergeTask.java

License:Apache License

public static String makeId(String id, final String typeName, String dataSource, Interval interval) {
    return id != null ? id
            : joinId(typeName, dataSource, interval.getStart(), interval.getEnd(),
                    DateTimes.nowUtc().toString());
}

From source file:org.apache.druid.indexing.overlord.TaskLockbox.java

License:Apache License

/**
 * Return all locks that overlap some search interval.
 *//*from ww  w.  j  a va2 s .c  o m*/
private List<TaskLockPosse> findLockPossesOverlapsInterval(final String dataSource, final Interval interval) {
    giant.lock();

    try {
        final NavigableMap<DateTime, SortedMap<Interval, List<TaskLockPosse>>> dsRunning = running
                .get(dataSource);
        if (dsRunning == null) {
            // No locks at all
            return Collections.emptyList();
        } else {
            // Tasks are indexed by locked interval, which are sorted by interval start. Intervals are non-overlapping, so:
            final NavigableSet<DateTime> dsLockbox = dsRunning.navigableKeySet();
            final Iterable<DateTime> searchStartTimes = Iterables.concat(
                    // Single interval that starts at or before ours
                    Collections.singletonList(dsLockbox.floor(interval.getStart())),

                    // All intervals that start somewhere between our start instant (exclusive) and end instant (exclusive)
                    dsLockbox.subSet(interval.getStart(), false, interval.getEnd(), false));

            return StreamSupport.stream(searchStartTimes.spliterator(), false)
                    .filter(java.util.Objects::nonNull).map(dsRunning::get).filter(java.util.Objects::nonNull)
                    .flatMap(sortedMap -> sortedMap.entrySet().stream())
                    .filter(entry -> entry.getKey().overlaps(interval))
                    .flatMap(entry -> entry.getValue().stream()).collect(Collectors.toList());
        }
    } finally {
        giant.unlock();
    }
}

From source file:org.apache.druid.indexing.worker.IntermediaryDataManager.java

License:Apache License

private static String getPartitionDir(String supervisorTaskId, Interval interval, int partitionId) {
    return Paths.get(supervisorTaskId, interval.getStart().toString(), interval.getEnd().toString(),
            String.valueOf(partitionId)).toString();
}

From source file:org.apache.druid.java.util.common.granularity.ArbitraryGranularity.java

License:Apache License

@Override
public DateTime increment(DateTime time) {
    // Test if the input cannot be bucketed
    if (time.getMillis() > intervals.last().getEndMillis()) {
        return MAX_DATETIME;
    }//w w  w  . ja  v  a2 s.  c  o m

    // First interval with start time <= timestamp
    final Interval interval = intervals.floor(new Interval(time, MAX_DATETIME));
    return interval != null && interval.contains(time) ? interval.getEnd() : time;
}

From source file:org.apache.druid.java.util.common.JodaUtils.java

License:Apache License

public static ArrayList<Interval> condenseIntervals(Iterable<Interval> intervals) {
    ArrayList<Interval> retVal = new ArrayList<>();

    final SortedSet<Interval> sortedIntervals;

    if (intervals instanceof SortedSet) {
        sortedIntervals = (SortedSet<Interval>) intervals;
    } else {/*w w  w .  j a v  a 2  s  . c o  m*/
        sortedIntervals = new TreeSet<>(Comparators.intervalsByStartThenEnd());
        for (Interval interval : intervals) {
            sortedIntervals.add(interval);
        }
    }

    if (sortedIntervals.isEmpty()) {
        return new ArrayList<>();
    }

    Iterator<Interval> intervalsIter = sortedIntervals.iterator();
    Interval currInterval = intervalsIter.next();
    while (intervalsIter.hasNext()) {
        Interval next = intervalsIter.next();

        if (currInterval.abuts(next)) {
            currInterval = new Interval(currInterval.getStart(), next.getEnd());
        } else if (currInterval.overlaps(next)) {
            DateTime nextEnd = next.getEnd();
            DateTime currEnd = currInterval.getEnd();
            currInterval = new Interval(currInterval.getStart(), nextEnd.isAfter(currEnd) ? nextEnd : currEnd);
        } else {
            retVal.add(currInterval);
            currInterval = next;
        }
    }
    retVal.add(currInterval);

    return retVal;
}

From source file:org.apache.druid.java.util.common.JodaUtils.java

License:Apache License

public static Interval umbrellaInterval(Iterable<Interval> intervals) {
    ArrayList<DateTime> startDates = new ArrayList<>();
    ArrayList<DateTime> endDates = new ArrayList<>();

    for (Interval interval : intervals) {
        startDates.add(interval.getStart());
        endDates.add(interval.getEnd());
    }//from   w w w . ja  va2 s  .  com

    DateTime minStart = minDateTime(startDates.toArray(new DateTime[0]));
    DateTime maxEnd = maxDateTime(endDates.toArray(new DateTime[0]));

    if (minStart == null || maxEnd == null) {
        throw new IllegalArgumentException("Empty list of intervals");
    }
    return new Interval(minStart, maxEnd);
}

From source file:org.apache.druid.metadata.IndexerSQLMetadataStorageCoordinator.java

License:Apache License

private List<SegmentIdWithShardSpec> getPendingSegmentsForIntervalWithHandle(final Handle handle,
        final String dataSource, final Interval interval) throws IOException {
    final List<SegmentIdWithShardSpec> identifiers = new ArrayList<>();

    final ResultIterator<byte[]> dbSegments = handle.createQuery(StringUtils.format(
            "SELECT payload FROM %1$s WHERE dataSource = :dataSource AND start <= :end and %2$send%2$s >= :start",
            dbTables.getPendingSegmentsTable(), connector.getQuoteString())).bind("dataSource", dataSource)
            .bind("start", interval.getStart().toString()).bind("end", interval.getEnd().toString())
            .map(ByteArrayMapper.FIRST).iterator();

    while (dbSegments.hasNext()) {
        final byte[] payload = dbSegments.next();
        final SegmentIdWithShardSpec identifier = jsonMapper.readValue(payload, SegmentIdWithShardSpec.class);

        if (interval.overlaps(identifier.getInterval())) {
            identifiers.add(identifier);
        }// w  ww  .ja  v a 2 s .c  om
    }

    dbSegments.close();

    return identifiers;
}

From source file:org.apache.druid.metadata.IndexerSQLMetadataStorageCoordinator.java

License:Apache License

private VersionedIntervalTimeline<String, DataSegment> getTimelineForIntervalsWithHandle(final Handle handle,
        final String dataSource, final List<Interval> intervals) {
    if (intervals == null || intervals.isEmpty()) {
        throw new IAE("null/empty intervals");
    }/*from  ww  w.  j a v  a2s  . com*/

    final StringBuilder sb = new StringBuilder();
    sb.append("SELECT payload FROM %s WHERE used = true AND dataSource = ? AND (");
    for (int i = 0; i < intervals.size(); i++) {
        sb.append(StringUtils.format("(start <= ? AND %1$send%1$s >= ?)", connector.getQuoteString()));
        if (i == intervals.size() - 1) {
            sb.append(")");
        } else {
            sb.append(" OR ");
        }
    }

    Query<Map<String, Object>> sql = handle
            .createQuery(StringUtils.format(sb.toString(), dbTables.getSegmentsTable())).bind(0, dataSource);

    for (int i = 0; i < intervals.size(); i++) {
        Interval interval = intervals.get(i);
        sql = sql.bind(2 * i + 1, interval.getEnd().toString()).bind(2 * i + 2, interval.getStart().toString());
    }

    try (final ResultIterator<byte[]> dbSegments = sql.map(ByteArrayMapper.FIRST).iterator()) {
        return VersionedIntervalTimeline.forSegments(Iterators.transform(dbSegments, payload -> {
            try {
                return jsonMapper.readValue(payload, DataSegment.class);
            } catch (IOException e) {
                throw new RuntimeException(e);
            }
        }));
    }
}

From source file:org.apache.druid.metadata.IndexerSQLMetadataStorageCoordinator.java

License:Apache License

@Nullable
private SegmentIdWithShardSpec allocatePendingSegment(final Handle handle, final String dataSource,
        final String sequenceName, final Interval interval, final ShardSpecFactory shardSpecFactory,
        final String maxVersion) throws IOException {
    final CheckExistingSegmentIdResult result = checkAndGetExistingSegmentId(
            handle.createQuery(StringUtils.format(
                    "SELECT payload FROM %s WHERE " + "dataSource = :dataSource AND "
                            + "sequence_name = :sequence_name AND " + "start = :start AND "
                            + "%2$send%2$s = :end",
                    dbTables.getPendingSegmentsTable(), connector.getQuoteString())),
            interval, sequenceName, null, Pair.of("dataSource", dataSource),
            Pair.of("sequence_name", sequenceName), Pair.of("start", interval.getStart().toString()),
            Pair.of("end", interval.getEnd().toString()));

    if (result.found) {
        // The found existing segment identifier can be null if its interval doesn't match with the given interval
        return result.segmentIdentifier;
    }/*from  w w  w .j ava2 s  . c  om*/

    final SegmentIdWithShardSpec newIdentifier = createNewSegment(handle, dataSource, interval,
            shardSpecFactory, maxVersion);
    if (newIdentifier == null) {
        return null;
    }

    // SELECT -> INSERT can fail due to races; callers must be prepared to retry.
    // Avoiding ON DUPLICATE KEY since it's not portable.
    // Avoiding try/catch since it may cause inadvertent transaction-splitting.

    // UNIQUE key for the row, ensuring we don't have more than one segment per sequence per interval.
    // Using a single column instead of (sequence_name, sequence_prev_id) as some MySQL storage engines
    // have difficulty with large unique keys (see https://github.com/apache/incubator-druid/issues/2319)
    final String sequenceNamePrevIdSha1 = BaseEncoding.base16()
            .encode(Hashing.sha1().newHasher().putBytes(StringUtils.toUtf8(sequenceName)).putByte((byte) 0xff)
                    .putLong(interval.getStartMillis()).putLong(interval.getEndMillis()).hash().asBytes());

    // always insert empty previous sequence id
    insertToMetastore(handle, newIdentifier, dataSource, interval, "", sequenceName, sequenceNamePrevIdSha1);

    log.info("Allocated pending segment [%s] for sequence[%s] in DB", newIdentifier, sequenceName);

    return newIdentifier;
}