List of usage examples for org.joda.time Interval Interval
public Interval(Object interval, Chronology chronology)
From source file:com.metamx.druid.realtime.plumber.RealtimePlumberSchool.java
License:Open Source License
@Override public Plumber findPlumber(final Schema schema, final FireDepartmentMetrics metrics) { verifyState();/*from ww w. j av a 2 s . c o m*/ final RejectionPolicy rejectionPolicy = rejectionPolicyFactory.create(windowPeriod); log.info("Creating plumber using rejectionPolicy[%s]", rejectionPolicy); return new Plumber() { private volatile boolean stopped = false; private volatile ExecutorService persistExecutor = null; private volatile ScheduledExecutorService scheduledExecutor = null; private final Map<Long, Sink> sinks = Maps.newConcurrentMap(); private final VersionedIntervalTimeline<String, Sink> sinkTimeline = new VersionedIntervalTimeline<String, Sink>( String.CASE_INSENSITIVE_ORDER); @Override public void startJob() { computeBaseDir(schema).mkdirs(); initializeExecutors(); bootstrapSinksFromDisk(); registerServerViewCallback(); startPersistThread(); } @Override public Sink getSink(long timestamp) { if (!rejectionPolicy.accept(timestamp)) { return null; } final long truncatedTime = segmentGranularity.truncate(timestamp); Sink retVal = sinks.get(truncatedTime); if (retVal == null) { final Interval sinkInterval = new Interval(new DateTime(truncatedTime), segmentGranularity.increment(new DateTime(truncatedTime))); retVal = new Sink(sinkInterval, schema, versioningPolicy.getVersion(sinkInterval)); try { segmentAnnouncer.announceSegment(retVal.getSegment()); sinks.put(truncatedTime, retVal); sinkTimeline.add(retVal.getInterval(), retVal.getVersion(), new SingleElementPartitionChunk<Sink>(retVal)); } catch (IOException e) { log.makeAlert(e, "Failed to announce new segment[%s]", schema.getDataSource()) .addData("interval", retVal.getInterval()).emit(); } } return retVal; } @Override public <T> QueryRunner<T> getQueryRunner(final Query<T> query) { final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query); final QueryToolChest<T, Query<T>> toolchest = factory.getToolchest(); final Function<Query<T>, ServiceMetricEvent.Builder> builderFn = new Function<Query<T>, ServiceMetricEvent.Builder>() { @Override public ServiceMetricEvent.Builder apply(@Nullable Query<T> input) { return toolchest.makeMetricBuilder(query); } }; List<TimelineObjectHolder<String, Sink>> querySinks = Lists.newArrayList(); for (Interval interval : query.getIntervals()) { querySinks.addAll(sinkTimeline.lookup(interval)); } return toolchest.mergeResults(factory.mergeRunners(EXEC, FunctionalIterable.create(querySinks) .transform(new Function<TimelineObjectHolder<String, Sink>, QueryRunner<T>>() { @Override public QueryRunner<T> apply(TimelineObjectHolder<String, Sink> holder) { final Sink theSink = holder.getObject().getChunk(0).getObject(); return new SpecificSegmentQueryRunner<T>(new MetricsEmittingQueryRunner<T>(emitter, builderFn, factory.mergeRunners(EXEC, Iterables.transform(theSink, new Function<FireHydrant, QueryRunner<T>>() { @Override public QueryRunner<T> apply(FireHydrant input) { return factory.createRunner(input.getSegment()); } }))), new SpecificSegmentSpec(new SegmentDescriptor(holder.getInterval(), theSink.getSegment().getVersion(), theSink.getSegment().getShardSpec().getPartitionNum()))); } }))); } @Override public void persist(final Runnable commitRunnable) { final List<Pair<FireHydrant, Interval>> indexesToPersist = Lists.newArrayList(); for (Sink sink : sinks.values()) { if (sink.swappable()) { indexesToPersist.add(Pair.of(sink.swap(), sink.getInterval())); } } log.info("Submitting persist runnable for dataSource[%s]", schema.getDataSource()); persistExecutor.execute(new ThreadRenamingRunnable( String.format("%s-incremental-persist", schema.getDataSource())) { @Override public void doRun() { for (Pair<FireHydrant, Interval> pair : indexesToPersist) { metrics.incrementRowOutputCount(persistHydrant(pair.lhs, schema, pair.rhs)); } commitRunnable.run(); } }); } // Submits persist-n-merge task for a Sink to the persistExecutor private void persistAndMerge(final long truncatedTime, final Sink sink) { final String threadName = String.format("%s-%s-persist-n-merge", schema.getDataSource(), new DateTime(truncatedTime)); persistExecutor.execute(new ThreadRenamingRunnable(threadName) { @Override public void doRun() { final Interval interval = sink.getInterval(); for (FireHydrant hydrant : sink) { if (!hydrant.hasSwapped()) { log.info("Hydrant[%s] hasn't swapped yet, swapping. Sink[%s]", hydrant, sink); final int rowCount = persistHydrant(hydrant, schema, interval); metrics.incrementRowOutputCount(rowCount); } } final File mergedTarget = new File(computePersistDir(schema, interval), "merged"); if (mergedTarget.exists()) { log.info("Skipping already-merged sink: %s", sink); return; } File mergedFile = null; try { List<QueryableIndex> indexes = Lists.newArrayList(); for (FireHydrant fireHydrant : sink) { Segment segment = fireHydrant.getSegment(); final QueryableIndex queryableIndex = segment.asQueryableIndex(); log.info("Adding hydrant[%s]", fireHydrant); indexes.add(queryableIndex); } mergedFile = IndexMerger.mergeQueryableIndex(indexes, schema.getAggregators(), mergedTarget); QueryableIndex index = IndexIO.loadIndex(mergedFile); DataSegment segment = dataSegmentPusher.push(mergedFile, sink.getSegment() .withDimensions(Lists.newArrayList(index.getAvailableDimensions()))); segmentPublisher.publishSegment(segment); } catch (IOException e) { log.makeAlert(e, "Failed to persist merged index[%s]", schema.getDataSource()) .addData("interval", interval).emit(); } if (mergedFile != null) { try { if (mergedFile != null) { log.info("Deleting Index File[%s]", mergedFile); FileUtils.deleteDirectory(mergedFile); } } catch (IOException e) { log.warn(e, "Error deleting directory[%s]", mergedFile); } } } }); } @Override public void finishJob() { log.info("Shutting down..."); for (final Map.Entry<Long, Sink> entry : sinks.entrySet()) { persistAndMerge(entry.getKey(), entry.getValue()); } while (!sinks.isEmpty()) { try { log.info("Cannot shut down yet! Sinks remaining: %s", Joiner.on(", ") .join(Iterables.transform(sinks.values(), new Function<Sink, String>() { @Override public String apply(Sink input) { return input.getSegment().getIdentifier(); } }))); synchronized (handoffCondition) { while (!sinks.isEmpty()) { handoffCondition.wait(); } } } catch (InterruptedException e) { throw Throwables.propagate(e); } } // scheduledExecutor is shutdown here, but persistExecutor is shutdown when the // ServerView sends it a new segment callback if (scheduledExecutor != null) { scheduledExecutor.shutdown(); } stopped = true; } private void initializeExecutors() { if (persistExecutor == null) { persistExecutor = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder().setDaemon(true).setNameFormat("plumber_persist_%d").build()); } if (scheduledExecutor == null) { scheduledExecutor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder() .setDaemon(true).setNameFormat("plumber_scheduled_%d").build()); } } private void bootstrapSinksFromDisk() { for (File sinkDir : computeBaseDir(schema).listFiles()) { Interval sinkInterval = new Interval(sinkDir.getName().replace("_", "/")); //final File[] sinkFiles = sinkDir.listFiles(); // To avoid reading and listing of "merged" dir final File[] sinkFiles = sinkDir.listFiles(new FilenameFilter() { @Override public boolean accept(File dir, String fileName) { return !(Ints.tryParse(fileName) == null); } }); Arrays.sort(sinkFiles, new Comparator<File>() { @Override public int compare(File o1, File o2) { try { return Ints.compare(Integer.parseInt(o1.getName()), Integer.parseInt(o2.getName())); } catch (NumberFormatException e) { log.error(e, "Couldn't compare as numbers? [%s][%s]", o1, o2); return o1.compareTo(o2); } } }); try { List<FireHydrant> hydrants = Lists.newArrayList(); for (File segmentDir : sinkFiles) { log.info("Loading previously persisted segment at [%s]", segmentDir); // Although this has been tackled at start of this method. // Just a doubly-check added to skip "merged" dir. from being added to hydrants // If 100% sure that this is not needed, this check can be removed. if (Ints.tryParse(segmentDir.getName()) == null) { continue; } hydrants.add( new FireHydrant(new QueryableIndexSegment(null, IndexIO.loadIndex(segmentDir)), Integer.parseInt(segmentDir.getName()))); } Sink currSink = new Sink(sinkInterval, schema, versioningPolicy.getVersion(sinkInterval), hydrants); sinks.put(sinkInterval.getStartMillis(), currSink); sinkTimeline.add(currSink.getInterval(), currSink.getVersion(), new SingleElementPartitionChunk<Sink>(currSink)); segmentAnnouncer.announceSegment(currSink.getSegment()); } catch (IOException e) { log.makeAlert(e, "Problem loading sink[%s] from disk.", schema.getDataSource()) .addData("interval", sinkInterval).emit(); } } } private void registerServerViewCallback() { serverView.registerSegmentCallback(persistExecutor, new ServerView.BaseSegmentCallback() { @Override public ServerView.CallbackAction segmentAdded(DruidServer server, DataSegment segment) { if (stopped) { log.info("Unregistering ServerViewCallback"); persistExecutor.shutdown(); return ServerView.CallbackAction.UNREGISTER; } if ("realtime".equals(server.getType())) { return ServerView.CallbackAction.CONTINUE; } log.debug("Checking segment[%s] on server[%s]", segment, server); if (schema.getDataSource().equals(segment.getDataSource())) { final Interval interval = segment.getInterval(); for (Map.Entry<Long, Sink> entry : sinks.entrySet()) { final Long sinkKey = entry.getKey(); if (interval.contains(sinkKey)) { final Sink sink = entry.getValue(); log.info("Segment[%s] matches sink[%s] on server[%s]", segment, sink, server); final String segmentVersion = segment.getVersion(); final String sinkVersion = sink.getSegment().getVersion(); if (segmentVersion.compareTo(sinkVersion) >= 0) { log.info("Segment version[%s] >= sink version[%s]", segmentVersion, sinkVersion); try { segmentAnnouncer.unannounceSegment(sink.getSegment()); FileUtils .deleteDirectory(computePersistDir(schema, sink.getInterval())); log.info("Removing sinkKey %d for segment %s", sinkKey, sink.getSegment().getIdentifier()); sinks.remove(sinkKey); sinkTimeline.remove(sink.getInterval(), sink.getVersion(), new SingleElementPartitionChunk<Sink>(sink)); synchronized (handoffCondition) { handoffCondition.notifyAll(); } } catch (IOException e) { log.makeAlert(e, "Unable to delete old segment for dataSource[%s].", schema.getDataSource()).addData("interval", sink.getInterval()) .emit(); } } } } } return ServerView.CallbackAction.CONTINUE; } }); } private void startPersistThread() { final long truncatedNow = segmentGranularity.truncate(new DateTime()).getMillis(); final long windowMillis = windowPeriod.toStandardDuration().getMillis(); log.info("Expect to run at [%s]", new DateTime().plus(new Duration(System.currentTimeMillis(), segmentGranularity.increment(truncatedNow) + windowMillis))); ScheduledExecutors.scheduleAtFixedRate(scheduledExecutor, new Duration(System.currentTimeMillis(), segmentGranularity.increment(truncatedNow) + windowMillis), new Duration(truncatedNow, segmentGranularity.increment(truncatedNow)), new ThreadRenamingCallable<ScheduledExecutors.Signal>(String.format("%s-overseer-%d", schema.getDataSource(), schema.getShardSpec().getPartitionNum())) { @Override public ScheduledExecutors.Signal doCall() { if (stopped) { log.info("Stopping merge-n-push overseer thread"); return ScheduledExecutors.Signal.STOP; } log.info("Starting merge and push."); long minTimestamp = segmentGranularity .truncate(rejectionPolicy.getCurrMaxTime().minus(windowMillis)).getMillis(); List<Map.Entry<Long, Sink>> sinksToPush = Lists.newArrayList(); for (Map.Entry<Long, Sink> entry : sinks.entrySet()) { final Long intervalStart = entry.getKey(); if (intervalStart < minTimestamp) { log.info("Adding entry[%s] for merge and push.", entry); sinksToPush.add(entry); } } for (final Map.Entry<Long, Sink> entry : sinksToPush) { persistAndMerge(entry.getKey(), entry.getValue()); } if (stopped) { log.info("Stopping merge-n-push overseer thread"); return ScheduledExecutors.Signal.STOP; } else { return ScheduledExecutors.Signal.REPEAT; } } }); } }; }
From source file:com.metamx.druid.utils.DruidMasterBalancerProfiler.java
License:Open Source License
public void bigProfiler() { Stopwatch watch = new Stopwatch(); int numSegments = 55000; int numServers = 50; EasyMock.expect(manager.getAllRules()).andReturn(ImmutableMap.<String, List<Rule>>of("test", rules)) .anyTimes();//w ww.j a v a2 s. c o m EasyMock.expect(manager.getRules(EasyMock.<String>anyObject())).andReturn(rules).anyTimes(); EasyMock.expect(manager.getRulesWithDefault(EasyMock.<String>anyObject())).andReturn(rules).anyTimes(); EasyMock.replay(manager); master.moveSegment(EasyMock.<String>anyObject(), EasyMock.<String>anyObject(), EasyMock.<String>anyObject(), EasyMock.<LoadPeonCallback>anyObject()); EasyMock.expectLastCall().anyTimes(); EasyMock.replay(master); List<DruidServer> serverList = Lists.newArrayList(); Map<String, LoadQueuePeon> peonMap = Maps.newHashMap(); List<ServerHolder> serverHolderList = Lists.newArrayList(); Map<String, DataSegment> segmentMap = Maps.newHashMap(); for (int i = 0; i < numSegments; i++) { segmentMap.put("segment" + i, new DataSegment("datasource" + i, new Interval(new DateTime("2012-01-01"), (new DateTime("2012-01-01")).plusHours(1)), (new DateTime("2012-03-01")).toString(), Maps.<String, Object>newHashMap(), Lists.<String>newArrayList(), Lists.<String>newArrayList(), new NoneShardSpec(), 0, 4L)); } for (int i = 0; i < numServers; i++) { DruidServer server = EasyMock.createMock(DruidServer.class); EasyMock.expect(server.getMetadata()).andReturn(null).anyTimes(); EasyMock.expect(server.getCurrSize()).andReturn(30L).atLeastOnce(); EasyMock.expect(server.getMaxSize()).andReturn(100L).atLeastOnce(); EasyMock.expect(server.getTier()).andReturn("normal").anyTimes(); EasyMock.expect(server.getName()).andReturn(Integer.toString(i)).atLeastOnce(); EasyMock.expect(server.getHost()).andReturn(Integer.toString(i)).anyTimes(); if (i == 0) { EasyMock.expect(server.getSegments()).andReturn(segmentMap).anyTimes(); } else { EasyMock.expect(server.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes(); } EasyMock.expect(server.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes(); EasyMock.replay(server); LoadQueuePeon peon = new LoadQueuePeonTester(); peonMap.put(Integer.toString(i), peon); serverHolderList.add(new ServerHolder(server, peon)); } DruidMasterRuntimeParams params = DruidMasterRuntimeParams.newBuilder() .withDruidCluster( new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal", MinMaxPriorityQueue.orderedBy(DruidMasterBalancerTester.percentUsedComparator) .create(serverHolderList)))) .withLoadManagementPeons(peonMap).withAvailableSegments(segmentMap.values()) .withMasterSegmentSettings( new MasterSegmentSettings.Builder().withMaxSegmentsToMove(MAX_SEGMENTS_TO_MOVE).build()) .withBalancerReferenceTimestamp(new DateTime("2013-01-01")).withEmitter(emitter) .withDatabaseRuleManager(manager).withReplicationManager(new ReplicationThrottler(2, 500)) .withSegmentReplicantLookup(SegmentReplicantLookup .make(new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal", MinMaxPriorityQueue.orderedBy(DruidMasterBalancerTester.percentUsedComparator) .create(serverHolderList))))) .build(); DruidMasterBalancerTester tester = new DruidMasterBalancerTester(master); DruidMasterRuleRunner runner = new DruidMasterRuleRunner(master, 500, 5); watch.start(); DruidMasterRuntimeParams balanceParams = tester.run(params); DruidMasterRuntimeParams assignParams = runner.run(params); System.out.println(watch.stop()); }
From source file:com.metamx.druid.VersionedIntervalTimeline.java
License:Open Source License
private boolean addAtKey(NavigableMap<Interval, TimelineEntry> timeline, Interval key, TimelineEntry entry) { boolean retVal = false; Interval currKey = key;// w ww . j a v a 2s.c o m Interval entryInterval = entry.getTrueInterval(); if (!currKey.overlaps(entryInterval)) { return false; } while (currKey != null && currKey.overlaps(entryInterval)) { Interval nextKey = timeline.higherKey(currKey); int versionCompare = versionComparator.compare(entry.getVersion(), timeline.get(currKey).getVersion()); if (versionCompare < 0) { if (currKey.contains(entryInterval)) { return true; } else if (currKey.getStart().isBefore(entryInterval.getStart())) { entryInterval = new Interval(currKey.getEnd(), entryInterval.getEnd()); } else { addIntervalToTimeline(new Interval(entryInterval.getStart(), currKey.getStart()), entry, timeline); if (entryInterval.getEnd().isAfter(currKey.getEnd())) { entryInterval = new Interval(currKey.getEnd(), entryInterval.getEnd()); } else { entryInterval = null; } } } else if (versionCompare > 0) { TimelineEntry oldEntry = timeline.remove(currKey); if (currKey.contains(entryInterval)) { addIntervalToTimeline(new Interval(currKey.getStart(), entryInterval.getStart()), oldEntry, timeline); addIntervalToTimeline(new Interval(entryInterval.getEnd(), currKey.getEnd()), oldEntry, timeline); addIntervalToTimeline(entryInterval, entry, timeline); return true; } else if (currKey.getStart().isBefore(entryInterval.getStart())) { addIntervalToTimeline(new Interval(currKey.getStart(), entryInterval.getStart()), oldEntry, timeline); } else if (entryInterval.getEnd().isBefore(currKey.getEnd())) { addIntervalToTimeline(new Interval(entryInterval.getEnd(), currKey.getEnd()), oldEntry, timeline); } } else { if (timeline.get(currKey).equals(entry)) { // This occurs when restoring segments timeline.remove(currKey); } else { throw new UnsupportedOperationException( String.format("Cannot add overlapping segments [%s and %s] with the same version [%s]", currKey, entryInterval, entry.getVersion())); } } currKey = nextKey; retVal = true; } addIntervalToTimeline(entryInterval, entry, timeline); return retVal; }
From source file:com.metamx.druid.VersionedIntervalTimeline.java
License:Open Source License
private List<TimelineObjectHolder<VersionType, ObjectType>> lookup(Interval interval, boolean incompleteOk) { List<TimelineObjectHolder<VersionType, ObjectType>> retVal = new ArrayList<TimelineObjectHolder<VersionType, ObjectType>>(); NavigableMap<Interval, TimelineEntry> timeline = (incompleteOk) ? incompletePartitionsTimeline : completePartitionsTimeline; for (Map.Entry<Interval, TimelineEntry> entry : timeline.entrySet()) { Interval timelineInterval = entry.getKey(); TimelineEntry val = entry.getValue(); if (timelineInterval.overlaps(interval)) { retVal.add(new TimelineObjectHolder<VersionType, ObjectType>(timelineInterval, val.getVersion(), val.getPartitionHolder())); }//from w w w. ja v a2 s . c o m } if (retVal.isEmpty()) { return retVal; } TimelineObjectHolder<VersionType, ObjectType> firstEntry = retVal.get(0); if (interval.overlaps(firstEntry.getInterval()) && interval.getStart().isAfter(firstEntry.getInterval().getStart())) { retVal.set(0, new TimelineObjectHolder<VersionType, ObjectType>( new Interval(interval.getStart(), firstEntry.getInterval().getEnd()), firstEntry.getVersion(), firstEntry.getObject())); } TimelineObjectHolder<VersionType, ObjectType> lastEntry = retVal.get(retVal.size() - 1); if (interval.overlaps(lastEntry.getInterval()) && interval.getEnd().isBefore(lastEntry.getInterval().getEnd())) { retVal.set(retVal.size() - 1, new TimelineObjectHolder<VersionType, ObjectType>( new Interval(lastEntry.getInterval().getStart(), interval.getEnd()), lastEntry.getVersion(), lastEntry.getObject())); } return retVal; }
From source file:com.metawiring.load.core.ExecutionContext.java
License:Apache License
public Interval getInterval() { return new Interval(startedAt, System.currentTimeMillis()); }
From source file:com.microsoft.exchange.DateHelp.java
License:Apache License
public static List<Interval> generateIntervals(org.joda.time.DateTime start, org.joda.time.DateTime end, Period period) {/* ww w . j av a 2s . c o m*/ if (period.getDays() > MAX_PERIOD.getDays()) { period = MAX_PERIOD; } List<Interval> list = new ArrayList<Interval>(); org.joda.time.DateTime intervalEnd = start.plus(period); while (intervalEnd.isBefore(end)) { list.add(new Interval(start, intervalEnd)); start = intervalEnd; intervalEnd = intervalEnd.plus(period); } if (start.isBefore(end)) { list.add(new Interval(start, end)); } return list; }
From source file:com.microsoft.exchange.DateHelp.java
License:Apache License
public static List<Interval> generateIntervals(Date start, Date end) { org.apache.commons.lang.Validate.isTrue(end.after(start)); long startInstant = start.getTime(); long endInstant = end.getTime(); long midInstant = startInstant + (endInstant - startInstant) / 2; Interval a = new Interval(startInstant, midInstant); Interval b = new Interval(midInstant, endInstant); List<Interval> intervals = new ArrayList<Interval>(); intervals.add(a);//from w ww . j a va 2 s .co m intervals.add(b); return intervals; }
From source file:com.microsoft.exchange.ExchangeDateUtils.java
License:Apache License
/** * Construct a {@link List} of {@link Interval}s where each interval abuts and has a duration equal to that of {@code period} * @param start// www. jav a2s.c o m * @param end * @param period * @return */ public static List<Interval> generateIntervals(org.joda.time.DateTime start, org.joda.time.DateTime end, Period period) { if (period.getDays() > MAX_PERIOD.getDays()) { period = MAX_PERIOD; } List<Interval> list = new ArrayList<Interval>(); org.joda.time.DateTime intervalEnd = start.plus(period); while (intervalEnd.isBefore(end)) { list.add(new Interval(start, intervalEnd)); start = intervalEnd; intervalEnd = intervalEnd.plus(period); } if (start.isBefore(end)) { list.add(new Interval(start, end)); } return list; }
From source file:com.microsoft.exchange.ExchangeDateUtils.java
License:Apache License
/** * Construct a pair of {@link Interval} that: * -are equal length/duration//from ww w . j a v a2 s . com * -length/duration ~= (end-start)/2 * -interval1.start=start * -interval1 abuts interval2 (interval2.starts immediately after interval1.end) * -interval2.end=end */ public static List<Interval> generateIntervals(Date start, Date end) { org.apache.commons.lang.Validate.isTrue(end.after(start)); long startInstant = start.getTime(); long endInstant = end.getTime(); long midInstant = startInstant + (endInstant - startInstant) / 2; Interval a = new Interval(startInstant, midInstant); Interval b = new Interval(midInstant, endInstant); List<Interval> intervals = new ArrayList<Interval>(); intervals.add(a); intervals.add(b); return intervals; }
From source file:com.mobiaware.auction.model.Auction.java
License:Apache License
@JsonProperty("active") public boolean isActive() { if (_endDate.isBefore(_startDate)) { return false; }/*from ww w . ja v a 2 s.com*/ return new Interval(_startDate, _endDate).contains(new DateTime()); }