List of usage examples for org.joda.time Interval Interval
public Interval(Object interval, Chronology chronology)
From source file:io.coala.dsol.util.DsolUtil.java
License:Apache License
/** * @return overlap of specified interval within replication run period * (after warm-up and before run length) *//*from w w w . j ava2s . c o m*/ public static Interval crop(final Interval interval, final Treatment treatment) { final Interval runPeriod = getRunInterval(treatment); if (interval.overlaps(runPeriod)) { final long croppedStart = Math.max(interval.getStartMillis(), runPeriod.getStartMillis()); final long croppedEnd = Math.min(interval.getEndMillis(), runPeriod.getEndMillis()); return new Interval(croppedStart, croppedEnd); } return interval; }
From source file:io.coala.xml.XmlUtil.java
License:Apache License
/** * @param duration a JAXP {@link Duration} * @param startInstant/*w w w . ja va 2 s .com*/ * @return the {@link Interval} */ public static Interval toInterval(final Duration duration, final DateTime offset) { return new Interval(offset, offset.plus(duration.getTimeInMillis(offset.toDate()))); }
From source file:io.druid.indexer.path.GranularUnprocessedPathSpec.java
License:Apache License
@Override public Job addInputPaths(HadoopDruidIndexerConfig config, Job job) throws IOException { // This PathSpec breaks so many abstractions that we might as break some more Preconditions.checkState(config.getGranularitySpec() instanceof UniformGranularitySpec, String.format("Cannot use %s without %s", GranularUnprocessedPathSpec.class.getSimpleName(), UniformGranularitySpec.class.getSimpleName())); final Path betaInput = new Path(getInputPath()); final FileSystem fs = betaInput.getFileSystem(job.getConfiguration()); final Granularity segmentGranularity = config.getGranularitySpec().getSegmentGranularity(); Map<DateTime, Long> inputModifiedTimes = new TreeMap<>(Comparators.inverse(Comparators.comparable())); for (FileStatus status : FSSpideringIterator.spiderIterable(fs, betaInput)) { final DateTime key = segmentGranularity.toDate(status.getPath().toString()); final Long currVal = inputModifiedTimes.get(key); final long mTime = status.getModificationTime(); inputModifiedTimes.put(key, currVal == null ? mTime : Math.max(currVal, mTime)); }/*from w ww .j a va 2s.c o m*/ Set<Interval> bucketsToRun = Sets.newTreeSet(Comparators.intervals()); for (Map.Entry<DateTime, Long> entry : inputModifiedTimes.entrySet()) { DateTime timeBucket = entry.getKey(); long mTime = entry.getValue(); String bucketOutput = String.format("%s/%s", config.getSchema().getIOConfig().getSegmentOutputPath(), segmentGranularity.toPath(timeBucket)); for (FileStatus fileStatus : FSSpideringIterator.spiderIterable(fs, new Path(bucketOutput))) { if (fileStatus.getModificationTime() > mTime) { bucketsToRun.add(new Interval(timeBucket, segmentGranularity.increment(timeBucket))); break; } } if (bucketsToRun.size() >= maxBuckets) { break; } } config.setGranularitySpec(new UniformGranularitySpec(segmentGranularity, config.getGranularitySpec().getQueryGranularity(), Lists.newArrayList(bucketsToRun))); return super.addInputPaths(config, job); }
From source file:io.druid.indexing.common.task.IndexTask.java
License:Apache License
private static Interval makeInterval(IndexIngestionSpec ingestionSchema) { GranularitySpec spec = ingestionSchema.getDataSchema().getGranularitySpec(); return new Interval(spec.bucketIntervals().get().first().getStart(), spec.bucketIntervals().get().last().getEnd()); }
From source file:io.druid.indexing.overlord.ForkingTaskRunner.java
License:Apache License
@Override @LifecycleStop// w w w .ja v a 2 s .c om public void stop() { stopping = true; exec.shutdown(); synchronized (tasks) { for (ForkingTaskRunnerWorkItem taskWorkItem : tasks.values()) { if (taskWorkItem.processHolder != null) { log.info("Closing output stream to task[%s].", taskWorkItem.getTask().getId()); try { taskWorkItem.processHolder.process.getOutputStream().close(); } catch (Exception e) { log.warn(e, "Failed to close stdout to task[%s]. Destroying task.", taskWorkItem.getTask().getId()); taskWorkItem.processHolder.process.destroy(); } } } } final DateTime start = new DateTime(); final long timeout = new Interval(start, taskConfig.getGracefulShutdownTimeout()).toDurationMillis(); // Things should be terminating now. Wait for it to happen so logs can be uploaded and all that good stuff. log.info("Waiting up to %,dms for shutdown.", timeout); if (timeout > 0) { try { final boolean terminated = exec.awaitTermination(timeout, TimeUnit.MILLISECONDS); final long elapsed = System.currentTimeMillis() - start.getMillis(); if (terminated) { log.info("Finished stopping in %,dms.", elapsed); } else { final Set<String> stillRunning; synchronized (tasks) { stillRunning = ImmutableSet.copyOf(tasks.keySet()); } log.makeAlert("Failed to stop forked tasks").addData("stillRunning", stillRunning) .addData("elapsed", elapsed).emit(); log.warn("Executor failed to stop after %,dms, not waiting for it! Tasks still running: [%s]", elapsed, Joiner.on("; ").join(stillRunning)); } } catch (InterruptedException e) { log.warn(e, "Interrupted while waiting for executor to finish."); Thread.currentThread().interrupt(); } } else { log.warn("Ran out of time, not waiting for executor to finish!"); } }
From source file:io.druid.indexing.overlord.IndexerMetadataStorageAdapter.java
License:Apache License
public int deletePendingSegments(String dataSource, Interval deleteInterval) { // Check the given interval overlaps the interval(minCreatedDateOfActiveTasks, MAX) final Optional<DateTime> minCreatedDateOfActiveTasks = taskStorageQueryAdapter.getActiveTasks().stream() .map(task -> Preconditions.checkNotNull(taskStorageQueryAdapter.getCreatedTime(task.getId()), "Can't find the createdTime for task[%s]", task.getId())) .min(Comparator.naturalOrder()); final Interval activeTaskInterval = new Interval(minCreatedDateOfActiveTasks.orElse(DateTimes.MAX), DateTimes.MAX);//from w ww. j av a2 s. c o m Preconditions.checkArgument(!deleteInterval.overlaps(activeTaskInterval), "Cannot delete pendingSegments because there is at least one active task created at %s", activeTaskInterval.getStart()); return indexerMetadataStorageCoordinator.deletePendingSegments(dataSource, deleteInterval); }
From source file:io.druid.indexing.overlord.SingleTaskBackgroundRunner.java
License:Apache License
@Override @LifecycleStop/* ww w.j a v a 2 s . c o m*/ public void stop() { stopping = true; if (executorService != null) { try { executorService.shutdown(); } catch (SecurityException ex) { log.wtf(ex, "I can't control my own threads!"); } } if (runningItem != null) { final Task task = runningItem.getTask(); final long start = System.currentTimeMillis(); final boolean graceful; final long elapsed; boolean error = false; if (taskConfig.isRestoreTasksOnRestart() && task.canRestore()) { // Attempt graceful shutdown. graceful = true; log.info("Starting graceful shutdown of task[%s].", task.getId()); try { task.stopGracefully(); final TaskStatus taskStatus = runningItem.getResult() .get(new Interval(DateTimes.utc(start), taskConfig.getGracefulShutdownTimeout()) .toDurationMillis(), TimeUnit.MILLISECONDS); // Ignore status, it doesn't matter for graceful shutdowns. log.info("Graceful shutdown of task[%s] finished in %,dms.", task.getId(), System.currentTimeMillis() - start); TaskRunnerUtils.notifyStatusChanged(listeners, task.getId(), taskStatus); } catch (Exception e) { log.makeAlert(e, "Graceful task shutdown failed: %s", task.getDataSource()) .addData("taskId", task.getId()).addData("dataSource", task.getDataSource()).emit(); log.warn(e, "Graceful shutdown of task[%s] aborted with exception.", task.getId()); error = true; TaskRunnerUtils.notifyStatusChanged(listeners, task.getId(), TaskStatus.failure(task.getId())); } } else { graceful = false; TaskRunnerUtils.notifyStatusChanged(listeners, task.getId(), TaskStatus.failure(task.getId())); } elapsed = System.currentTimeMillis() - start; final ServiceMetricEvent.Builder metricBuilder = ServiceMetricEvent.builder() .setDimension("task", task.getId()).setDimension("dataSource", task.getDataSource()) .setDimension("graceful", String.valueOf(graceful)) .setDimension("error", String.valueOf(error)); emitter.emit(metricBuilder.build("task/interrupt/count", 1L)); emitter.emit(metricBuilder.build("task/interrupt/elapsed", elapsed)); } // Ok, now interrupt everything. if (executorService != null) { try { executorService.shutdownNow(); } catch (SecurityException ex) { log.wtf(ex, "I can't control my own threads!"); } } }
From source file:io.druid.indexing.overlord.TaskLockbox.java
License:Apache License
/** * Return all locks that overlap some search interval. *///from www. j a v a2s . com private List<TaskLockPosse> findLockPossesForInterval(final String dataSource, final Interval interval) { giant.lock(); try { final NavigableMap<Interval, TaskLockPosse> dsRunning = running.get(dataSource); if (dsRunning == null) { // No locks at all return Collections.emptyList(); } else { // Tasks are indexed by locked interval, which are sorted by interval start. Intervals are non-overlapping, so: final NavigableSet<Interval> dsLockbox = dsRunning.navigableKeySet(); final Iterable<Interval> searchIntervals = Iterables.concat( // Single interval that starts at or before ours Collections.singletonList(dsLockbox .floor(new Interval(interval.getStart(), new DateTime(JodaUtils.MAX_INSTANT)))), // All intervals that start somewhere between our start instant (exclusive) and end instant (exclusive) dsLockbox.subSet(new Interval(interval.getStart(), new DateTime(JodaUtils.MAX_INSTANT)), false, new Interval(interval.getEnd(), interval.getEnd()), false)); return Lists .newArrayList(FunctionalIterable.create(searchIntervals).filter(new Predicate<Interval>() { @Override public boolean apply(@Nullable Interval searchInterval) { return searchInterval != null && searchInterval.overlaps(interval); } }).transform(new Function<Interval, TaskLockPosse>() { @Override public TaskLockPosse apply(Interval interval) { return dsRunning.get(interval); } })); } } finally { giant.unlock(); } }
From source file:io.druid.java.util.common.granularity.Granularity.java
License:Apache License
/** * Return a granularity-sized Interval containing a particular DateTime. */// ww w . j ava 2 s . co m public final Interval bucket(DateTime t) { DateTime start = bucketStart(t); return new Interval(start, increment(start)); }
From source file:io.druid.java.util.common.Intervals.java
License:Apache License
public static Interval of(String interval) { return new Interval(interval, ISOChronology.getInstanceUTC()); }