List of usage examples for org.joda.time Interval getEnd
public DateTime getEnd()
From source file:org.apache.druid.metadata.IndexerSQLMetadataStorageCoordinator.java
License:Apache License
private void insertToMetastore(Handle handle, SegmentIdWithShardSpec newIdentifier, String dataSource, Interval interval, String previousSegmentId, String sequenceName, String sequenceNamePrevIdSha1) throws JsonProcessingException { handle.createStatement(StringUtils.format( "INSERT INTO %1$s (id, dataSource, created_date, start, %2$send%2$s, sequence_name, sequence_prev_id, sequence_name_prev_id_sha1, payload) " + "VALUES (:id, :dataSource, :created_date, :start, :end, :sequence_name, :sequence_prev_id, :sequence_name_prev_id_sha1, :payload)", dbTables.getPendingSegmentsTable(), connector.getQuoteString())) .bind("id", newIdentifier.toString()).bind("dataSource", dataSource) .bind("created_date", DateTimes.nowUtc().toString()).bind("start", interval.getStart().toString()) .bind("end", interval.getEnd().toString()).bind("sequence_name", sequenceName) .bind("sequence_prev_id", previousSegmentId) .bind("sequence_name_prev_id_sha1", sequenceNamePrevIdSha1) .bind("payload", jsonMapper.writeValueAsBytes(newIdentifier)).execute(); }
From source file:org.apache.druid.metadata.IndexerSQLMetadataStorageCoordinator.java
License:Apache License
@Override public int deletePendingSegments(String dataSource, Interval deleteInterval) { return connector.getDBI().inTransaction((handle, status) -> handle.createStatement(StringUtils.format( "delete from %s where datasource = :dataSource and created_date >= :start and created_date < :end", dbTables.getPendingSegmentsTable())).bind("dataSource", dataSource) .bind("start", deleteInterval.getStart().toString()).bind("end", deleteInterval.getEnd().toString()) .execute());/*from ww w . jav a2s.com*/ }
From source file:org.apache.druid.metadata.IndexerSQLMetadataStorageCoordinator.java
License:Apache License
@Override public List<DataSegment> getUnusedSegmentsForInterval(final String dataSource, final Interval interval) { List<DataSegment> matchingSegments = connector .inReadOnlyTransaction(new TransactionCallback<List<DataSegment>>() { @Override/*w ww. j av a 2 s . com*/ public List<DataSegment> inTransaction(final Handle handle, final TransactionStatus status) { // 2 range conditions are used on different columns, but not all SQL databases properly optimize it. // Some databases can only use an index on one of the columns. An additional condition provides // explicit knowledge that 'start' cannot be greater than 'end'. return handle.createQuery(StringUtils.format( "SELECT payload FROM %1$s WHERE dataSource = :dataSource and start >= :start " + "and start <= :end and %2$send%2$s <= :end and used = false", dbTables.getSegmentsTable(), connector.getQuoteString())) .setFetchSize(connector.getStreamingFetchSize()).bind("dataSource", dataSource) .bind("start", interval.getStart().toString()) .bind("end", interval.getEnd().toString()).map(ByteArrayMapper.FIRST) .fold(new ArrayList<>(), new Folder3<List<DataSegment>, byte[]>() { @Override public List<DataSegment> fold(List<DataSegment> accumulator, byte[] payload, FoldController foldController, StatementContext statementContext) { try { accumulator.add(jsonMapper.readValue(payload, DataSegment.class)); return accumulator; } catch (Exception e) { throw new RuntimeException(e); } } }); } }); log.info("Found %,d segments for %s for interval %s.", matchingSegments.size(), dataSource, interval); return matchingSegments; }
From source file:org.apache.druid.metadata.IndexerSQLMetadataStorageCoordinator.java
License:Apache License
@Override public List<Pair<DataSegment, String>> getUsedSegmentAndCreatedDateForInterval(String dataSource, Interval interval) { return connector.retryWithHandle(handle -> handle .createQuery(StringUtils.format( "SELECT created_date, payload FROM %1$s WHERE dataSource = :dataSource " + "AND start >= :start AND %2$send%2$s <= :end AND used = true", dbTables.getSegmentsTable(), connector.getQuoteString())) .bind("dataSource", dataSource).bind("start", interval.getStart().toString()) .bind("end", interval.getEnd().toString()).map(new ResultSetMapper<Pair<DataSegment, String>>() { @Override/*from w ww.ja va 2s.c o m*/ public Pair<DataSegment, String> map(int index, ResultSet r, StatementContext ctx) throws SQLException { try { return new Pair<>(jsonMapper.readValue(r.getBytes("payload"), DataSegment.class), r.getString("created_date")); } catch (IOException e) { throw new RuntimeException(e); } } }).list()); }
From source file:org.apache.druid.metadata.SQLMetadataSegmentManager.java
License:Apache License
/** * Implementation for both {@link #markAsUsedAllNonOvershadowedSegmentsInDataSource} (if the given interval is null) * and {@link #markAsUsedNonOvershadowedSegmentsInInterval}. */// w w w . j a va2 s. co m private int doMarkAsUsedNonOvershadowedSegments(String dataSourceName, @Nullable Interval interval) { List<DataSegment> usedSegmentsOverlappingInterval = new ArrayList<>(); List<DataSegment> unusedSegmentsInInterval = new ArrayList<>(); connector.inReadOnlyTransaction((handle, status) -> { String queryString = StringUtils.format("SELECT used, payload FROM %1$s WHERE dataSource = :dataSource", getSegmentsTable()); if (interval != null) { queryString += StringUtils.format(" AND start < :end AND %1$send%1$s > :start", connector.getQuoteString()); } Query<?> query = handle.createQuery(queryString).setFetchSize(connector.getStreamingFetchSize()) .bind("dataSource", dataSourceName); if (interval != null) { query = query.bind("start", interval.getStart().toString()).bind("end", interval.getEnd().toString()); } query = query.map((int index, ResultSet resultSet, StatementContext context) -> { try { DataSegment segment = jsonMapper.readValue(resultSet.getBytes("payload"), DataSegment.class); if (resultSet.getBoolean("used")) { usedSegmentsOverlappingInterval.add(segment); } else { if (interval == null || interval.contains(segment.getInterval())) { unusedSegmentsInInterval.add(segment); } } return null; } catch (IOException e) { throw new RuntimeException(e); } }); // Consume the query results to ensure usedSegmentsOverlappingInterval and unusedSegmentsInInterval are // populated. consume(query.iterator()); return null; }); VersionedIntervalTimeline<String, DataSegment> versionedIntervalTimeline = VersionedIntervalTimeline .forSegments(Iterators.concat(usedSegmentsOverlappingInterval.iterator(), unusedSegmentsInInterval.iterator())); return markNonOvershadowedSegmentsAsUsed(unusedSegmentsInInterval, versionedIntervalTimeline); }
From source file:org.apache.druid.metadata.SQLMetadataSegmentManager.java
License:Apache License
@Override public int markAsUnusedSegmentsInInterval(String dataSourceName, Interval interval) { try {//from w w w .j a v a 2 s.co m Integer numUpdatedDatabaseEntries = connector.getDBI() .withHandle(handle -> handle .createStatement(StringUtils.format( "UPDATE %s SET used=false WHERE datasource = :datasource " + "AND start >= :start AND %2$send%2$s <= :end", getSegmentsTable(), connector.getQuoteString())) .bind("datasource", dataSourceName).bind("start", interval.getStart().toString()) .bind("end", interval.getEnd().toString()).execute()); return numUpdatedDatabaseEntries; } catch (Exception e) { throw new RuntimeException(e); } }
From source file:org.apache.druid.query.IntervalChunkingQueryRunner.java
License:Apache License
private static Iterable<Interval> splitInterval(Interval interval, Period period) { if (interval.getEndMillis() == interval.getStartMillis()) { return Collections.singletonList(interval); }/* ww w .jav a2s .c om*/ List<Interval> intervals = new ArrayList<>(); Iterator<Interval> timestamps = new PeriodGranularity(period, null, null).getIterable(interval).iterator(); DateTime start = DateTimes.max(timestamps.next().getStart(), interval.getStart()); while (timestamps.hasNext()) { DateTime end = timestamps.next().getStart(); intervals.add(new Interval(start, end)); start = end; } if (start.compareTo(interval.getEnd()) < 0) { intervals.add(new Interval(start, interval.getEnd())); } return intervals; }
From source file:org.apache.druid.query.select.SelectQueryQueryToolChest.java
License:Apache License
@Override public <T extends LogicalSegment> List<T> filterSegments(SelectQuery query, List<T> segments) { // at the point where this code is called, only one datasource should exist. final String dataSource = Iterables.getOnlyElement(query.getDataSource().getNames()); PagingSpec pagingSpec = query.getPagingSpec(); Map<String, Integer> paging = pagingSpec.getPagingIdentifiers(); if (paging == null || paging.isEmpty()) { return segments; }/*from w w w .ja v a2 s .c om*/ final Granularity granularity = query.getGranularity(); TreeMap<Long, Long> granularThresholds = new TreeMap<>(); // A paged select query using a UnionDataSource will return pagingIdentifiers from segments in more than one // dataSource which confuses subsequent queries and causes a failure. To avoid this, filter only the paging keys // that are applicable to this dataSource so that each dataSource in a union query gets the appropriate keys. paging.keySet().stream().filter(identifier -> SegmentId.tryParse(dataSource, identifier) != null) .map(SegmentId.makeIntervalExtractor(dataSource)) .sorted(query.isDescending() ? Comparators.intervalsByEndThenStart() : Comparators.intervalsByStartThenEnd()) .forEach(interval -> { if (query.isDescending()) { long granularEnd = granularity.bucketStart(interval.getEnd()).getMillis(); Long currentEnd = granularThresholds.get(granularEnd); if (currentEnd == null || interval.getEndMillis() > currentEnd) { granularThresholds.put(granularEnd, interval.getEndMillis()); } } else { long granularStart = granularity.bucketStart(interval.getStart()).getMillis(); Long currentStart = granularThresholds.get(granularStart); if (currentStart == null || interval.getStartMillis() < currentStart) { granularThresholds.put(granularStart, interval.getStartMillis()); } } }); List<T> queryIntervals = Lists.newArrayList(segments); Iterator<T> it = queryIntervals.iterator(); if (query.isDescending()) { while (it.hasNext()) { Interval interval = it.next().getInterval(); Map.Entry<Long, Long> ceiling = granularThresholds .ceilingEntry(granularity.bucketStart(interval.getEnd()).getMillis()); if (ceiling == null || interval.getStartMillis() >= ceiling.getValue()) { it.remove(); } } } else { while (it.hasNext()) { Interval interval = it.next().getInterval(); Map.Entry<Long, Long> floor = granularThresholds .floorEntry(granularity.bucketStart(interval.getStart()).getMillis()); if (floor == null || interval.getEndMillis() <= floor.getValue()) { it.remove(); } } } return queryIntervals; }
From source file:org.apache.druid.server.audit.SQLAuditManager.java
License:Apache License
@Override public List<AuditEntry> fetchAuditHistory(final String key, final String type, Interval interval) { final Interval theInterval = getIntervalOrDefault(interval); return dbi.withHandle(new HandleCallback<List<AuditEntry>>() { @Override/*from w ww .j a v a2 s . c o m*/ public List<AuditEntry> withHandle(Handle handle) { return handle.createQuery(StringUtils.format( "SELECT payload FROM %s WHERE audit_key = :audit_key and type = :type and created_date between :start_date and :end_date ORDER BY created_date", getAuditTable())).bind("audit_key", key).bind("type", type) .bind("start_date", theInterval.getStart().toString()) .bind("end_date", theInterval.getEnd().toString()).map(new ResultSetMapper<AuditEntry>() { @Override public AuditEntry map(int index, ResultSet r, StatementContext ctx) throws SQLException { try { return jsonMapper.readValue(r.getBytes("payload"), AuditEntry.class); } catch (IOException e) { throw new SQLException(e); } } }).list(); } }); }
From source file:org.apache.druid.server.audit.SQLAuditManager.java
License:Apache License
@Override public List<AuditEntry> fetchAuditHistory(final String type, Interval interval) { final Interval theInterval = getIntervalOrDefault(interval); return dbi.withHandle(new HandleCallback<List<AuditEntry>>() { @Override//from www .j a v a 2s . c o m public List<AuditEntry> withHandle(Handle handle) { return handle.createQuery(StringUtils.format( "SELECT payload FROM %s WHERE type = :type and created_date between :start_date and :end_date ORDER BY created_date", getAuditTable())).bind("type", type).bind("start_date", theInterval.getStart().toString()) .bind("end_date", theInterval.getEnd().toString()).map(new ResultSetMapper<AuditEntry>() { @Override public AuditEntry map(int index, ResultSet r, StatementContext ctx) throws SQLException { try { return jsonMapper.readValue(r.getBytes("payload"), AuditEntry.class); } catch (IOException e) { throw new SQLException(e); } } }).list(); } }); }