List of usage examples for org.joda.time Interval getEnd
public DateTime getEnd()
From source file:io.druid.indexing.common.task.AbstractTask.java
License:Apache License
public static String makeId(String id, final String typeName, String dataSource, Interval interval) { return id != null ? id : joinId(typeName, dataSource, interval.getStart(), interval.getEnd(), new DateTime().toString()); }
From source file:io.druid.indexing.common.task.ConvertSegmentTask.java
License:Apache License
protected static String makeId(String dataSource, Interval interval) { Preconditions.checkNotNull(dataSource, "dataSource"); Preconditions.checkNotNull(interval, "interval"); return joinId(TYPE, dataSource, interval.getStart(), interval.getEnd(), new DateTime()); }
From source file:io.druid.indexing.common.task.IndexTask.java
License:Apache License
private DataSegment generateSegment(final TaskToolbox toolbox, final DataSchema schema, final ShardSpec shardSpec, final Interval interval, final String version) throws IOException { // Set up temporary directory. final File tmpDir = new File(toolbox.getTaskWorkDir(), String.format("%s_%s_%s_%s_%s", this.getDataSource(), interval.getStart(), interval.getEnd(), version, shardSpec.getPartitionNum())); final FirehoseFactory firehoseFactory = ingestionSchema.getIOConfig().getFirehoseFactory(); final int rowFlushBoundary = ingestionSchema.getTuningConfig().getRowFlushBoundary(); // We need to track published segments. final List<DataSegment> pushedSegments = new CopyOnWriteArrayList<DataSegment>(); final DataSegmentPusher wrappedDataSegmentPusher = new DataSegmentPusher() { @Override/*from w w w. ja va2s .co m*/ public String getPathForHadoop(String dataSource) { return toolbox.getSegmentPusher().getPathForHadoop(dataSource); } @Override public DataSegment push(File file, DataSegment segment) throws IOException { final DataSegment pushedSegment = toolbox.getSegmentPusher().push(file, segment); pushedSegments.add(pushedSegment); return pushedSegment; } }; // rowFlushBoundary for this job final int myRowFlushBoundary = rowFlushBoundary > 0 ? rowFlushBoundary : toolbox.getConfig().getDefaultRowFlushBoundary(); // Create firehose + plumber final FireDepartmentMetrics metrics = new FireDepartmentMetrics(); final Firehose firehose = firehoseFactory.connect(ingestionSchema.getDataSchema().getParser()); final Supplier<Committer> committerSupplier = Committers.supplierFromFirehose(firehose); final Plumber plumber = new YeOldePlumberSchool(interval, version, wrappedDataSegmentPusher, tmpDir) .findPlumber(schema, convertTuningConfig(shardSpec, myRowFlushBoundary, ingestionSchema.getTuningConfig().getIndexSpec()), metrics); final QueryGranularity rollupGran = ingestionSchema.getDataSchema().getGranularitySpec() .getQueryGranularity(); try { plumber.startJob(); while (firehose.hasMore()) { final InputRow inputRow = firehose.nextRow(); if (shouldIndex(shardSpec, interval, inputRow, rollupGran)) { int numRows = plumber.add(inputRow, committerSupplier); if (numRows == -1) { throw new ISE(String.format("Was expecting non-null sink for timestamp[%s]", new DateTime(inputRow.getTimestampFromEpoch()))); } metrics.incrementProcessed(); } else { metrics.incrementThrownAway(); } } } finally { firehose.close(); } plumber.persist(committerSupplier.get()); try { plumber.finishJob(); } finally { log.info( "Task[%s] interval[%s] partition[%d] took in %,d rows (%,d processed, %,d unparseable, %,d thrown away)" + " and output %,d rows", getId(), interval, shardSpec.getPartitionNum(), metrics.processed() + metrics.unparseable() + metrics.thrownAway(), metrics.processed(), metrics.unparseable(), metrics.thrownAway(), metrics.rowOutput()); } // We expect a single segment to have been created. return Iterables.getOnlyElement(pushedSegments); }
From source file:io.druid.indexing.common.task.TaskUtils.java
License:Apache License
public static String makeId(String id, final String typeName, String dataSource, Interval interval) { return id != null ? id : String.format("%s_%s_%s_%s_%s", typeName, dataSource, interval.getStart(), interval.getEnd(), new DateTime().toString()); }
From source file:io.druid.indexing.common.task.VersionConverterTask.java
License:Apache License
private static String makeId(String dataSource, Interval interval) { Preconditions.checkNotNull(dataSource, "dataSource"); Preconditions.checkNotNull(interval, "interval"); return joinId(TYPE, dataSource, interval.getStart(), interval.getEnd(), new DateTime()); }
From source file:io.druid.indexing.coordinator.IndexerDBCoordinator.java
License:Open Source License
public List<DataSegment> getUnusedSegmentsForInterval(final String dataSource, final Interval interval) { List<DataSegment> matchingSegments = dbi.withHandle(new HandleCallback<List<DataSegment>>() { @Override//from w w w .j a va2 s . co m public List<DataSegment> withHandle(Handle handle) throws IOException { return handle.createQuery(String.format( "SELECT payload FROM %s WHERE dataSource = :dataSource and start >= :start and end <= :end and used = 0", dbTables.getSegmentsTable())).bind("dataSource", dataSource) .bind("start", interval.getStart().toString()).bind("end", interval.getEnd().toString()) .fold(Lists.<DataSegment>newArrayList(), new Folder3<List<DataSegment>, Map<String, Object>>() { @Override public List<DataSegment> fold(List<DataSegment> accumulator, Map<String, Object> stringObjectMap, FoldController foldController, StatementContext statementContext) throws SQLException { try { DataSegment segment = jsonMapper.readValue( (String) stringObjectMap.get("payload"), DataSegment.class); accumulator.add(segment); return accumulator; } catch (Exception e) { throw Throwables.propagate(e); } } }); } }); log.info("Found %,d segments for %s for interval %s.", matchingSegments.size(), dataSource, interval); return matchingSegments; }
From source file:io.druid.indexing.overlord.IndexerDBCoordinator.java
License:Open Source License
public List<DataSegment> getUnusedSegmentsForInterval(final String dataSource, final Interval interval) { List<DataSegment> matchingSegments = dbConnector.getDBI() .withHandle(new HandleCallback<List<DataSegment>>() { @Override//from w ww . ja v a 2s . co m public List<DataSegment> withHandle(Handle handle) throws IOException, SQLException { return handle.createQuery(String.format(dbConnector.isPostgreSQL() ? "SELECT payload FROM %s WHERE dataSource = :dataSource and start >= :start and \"end\" <= :end and used = false" : "SELECT payload FROM %s WHERE dataSource = :dataSource and start >= :start and end <= :end and used = false", dbTables.getSegmentsTable())).bind("dataSource", dataSource) .bind("start", interval.getStart().toString()) .bind("end", interval.getEnd().toString()).fold(Lists.<DataSegment>newArrayList(), new Folder3<List<DataSegment>, Map<String, Object>>() { @Override public List<DataSegment> fold(List<DataSegment> accumulator, Map<String, Object> stringObjectMap, FoldController foldController, StatementContext statementContext) throws SQLException { try { DataSegment segment = jsonMapper.readValue( (String) stringObjectMap.get("payload"), DataSegment.class); accumulator.add(segment); return accumulator; } catch (Exception e) { throw Throwables.propagate(e); } } }); } }); log.info("Found %,d segments for %s for interval %s.", matchingSegments.size(), dataSource, interval); return matchingSegments; }
From source file:io.druid.indexing.overlord.TaskLockbox.java
License:Apache License
/** * Return all locks that overlap some search interval. *///from ww w .j a v a2s . co m private List<TaskLockPosse> findLockPossesForInterval(final String dataSource, final Interval interval) { giant.lock(); try { final NavigableMap<Interval, TaskLockPosse> dsRunning = running.get(dataSource); if (dsRunning == null) { // No locks at all return Collections.emptyList(); } else { // Tasks are indexed by locked interval, which are sorted by interval start. Intervals are non-overlapping, so: final NavigableSet<Interval> dsLockbox = dsRunning.navigableKeySet(); final Iterable<Interval> searchIntervals = Iterables.concat( // Single interval that starts at or before ours Collections.singletonList(dsLockbox .floor(new Interval(interval.getStart(), new DateTime(JodaUtils.MAX_INSTANT)))), // All intervals that start somewhere between our start instant (exclusive) and end instant (exclusive) dsLockbox.subSet(new Interval(interval.getStart(), new DateTime(JodaUtils.MAX_INSTANT)), false, new Interval(interval.getEnd(), interval.getEnd()), false)); return Lists .newArrayList(FunctionalIterable.create(searchIntervals).filter(new Predicate<Interval>() { @Override public boolean apply(@Nullable Interval searchInterval) { return searchInterval != null && searchInterval.overlaps(interval); } }).transform(new Function<Interval, TaskLockPosse>() { @Override public TaskLockPosse apply(Interval interval) { return dsRunning.get(interval); } })); } } finally { giant.unlock(); } }
From source file:io.druid.java.util.common.Intervals.java
License:Apache License
public static boolean isEmpty(Interval interval) { return interval.getStart().equals(interval.getEnd()); }
From source file:io.druid.java.util.common.JodaUtils.java
License:Apache License
public static ArrayList<Interval> condenseIntervals(Iterable<Interval> intervals) { ArrayList<Interval> retVal = Lists.newArrayList(); final SortedSet<Interval> sortedIntervals; if (intervals instanceof SortedSet) { sortedIntervals = (SortedSet<Interval>) intervals; } else {/* w w w .ja v a2s . c om*/ sortedIntervals = Sets.newTreeSet(Comparators.intervalsByStartThenEnd()); for (Interval interval : intervals) { sortedIntervals.add(interval); } } if (sortedIntervals.isEmpty()) { return Lists.newArrayList(); } Iterator<Interval> intervalsIter = sortedIntervals.iterator(); Interval currInterval = intervalsIter.next(); while (intervalsIter.hasNext()) { Interval next = intervalsIter.next(); if (currInterval.abuts(next)) { currInterval = new Interval(currInterval.getStart(), next.getEnd()); } else if (currInterval.overlaps(next)) { DateTime nextEnd = next.getEnd(); DateTime currEnd = currInterval.getEnd(); currInterval = new Interval(currInterval.getStart(), nextEnd.isAfter(currEnd) ? nextEnd : currEnd); } else { retVal.add(currInterval); currInterval = next; } } retVal.add(currInterval); return retVal; }