Example usage for org.joda.time Interval contains

List of usage examples for org.joda.time Interval contains

Introduction

In this page you can find the example usage for org.joda.time Interval contains.

Prototype

public boolean contains(long millisInstant) 

Source Link

Document

Does this time interval contain the specified millisecond instant.

Usage

From source file:com.metamx.druid.realtime.plumber.RealtimePlumberSchool.java

License:Open Source License

@Override
public Plumber findPlumber(final Schema schema, final FireDepartmentMetrics metrics) {
    verifyState();/*from  ww w . j  a  va 2  s. c  om*/

    final RejectionPolicy rejectionPolicy = rejectionPolicyFactory.create(windowPeriod);
    log.info("Creating plumber using rejectionPolicy[%s]", rejectionPolicy);

    return new Plumber() {
        private volatile boolean stopped = false;
        private volatile ExecutorService persistExecutor = null;
        private volatile ScheduledExecutorService scheduledExecutor = null;

        private final Map<Long, Sink> sinks = Maps.newConcurrentMap();
        private final VersionedIntervalTimeline<String, Sink> sinkTimeline = new VersionedIntervalTimeline<String, Sink>(
                String.CASE_INSENSITIVE_ORDER);

        @Override
        public void startJob() {
            computeBaseDir(schema).mkdirs();
            initializeExecutors();
            bootstrapSinksFromDisk();
            registerServerViewCallback();
            startPersistThread();
        }

        @Override
        public Sink getSink(long timestamp) {
            if (!rejectionPolicy.accept(timestamp)) {
                return null;
            }

            final long truncatedTime = segmentGranularity.truncate(timestamp);

            Sink retVal = sinks.get(truncatedTime);

            if (retVal == null) {
                final Interval sinkInterval = new Interval(new DateTime(truncatedTime),
                        segmentGranularity.increment(new DateTime(truncatedTime)));

                retVal = new Sink(sinkInterval, schema, versioningPolicy.getVersion(sinkInterval));

                try {
                    segmentAnnouncer.announceSegment(retVal.getSegment());
                    sinks.put(truncatedTime, retVal);
                    sinkTimeline.add(retVal.getInterval(), retVal.getVersion(),
                            new SingleElementPartitionChunk<Sink>(retVal));
                } catch (IOException e) {
                    log.makeAlert(e, "Failed to announce new segment[%s]", schema.getDataSource())
                            .addData("interval", retVal.getInterval()).emit();
                }
            }

            return retVal;
        }

        @Override
        public <T> QueryRunner<T> getQueryRunner(final Query<T> query) {
            final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
            final QueryToolChest<T, Query<T>> toolchest = factory.getToolchest();

            final Function<Query<T>, ServiceMetricEvent.Builder> builderFn = new Function<Query<T>, ServiceMetricEvent.Builder>() {

                @Override
                public ServiceMetricEvent.Builder apply(@Nullable Query<T> input) {
                    return toolchest.makeMetricBuilder(query);
                }
            };

            List<TimelineObjectHolder<String, Sink>> querySinks = Lists.newArrayList();
            for (Interval interval : query.getIntervals()) {
                querySinks.addAll(sinkTimeline.lookup(interval));
            }

            return toolchest.mergeResults(factory.mergeRunners(EXEC, FunctionalIterable.create(querySinks)
                    .transform(new Function<TimelineObjectHolder<String, Sink>, QueryRunner<T>>() {
                        @Override
                        public QueryRunner<T> apply(TimelineObjectHolder<String, Sink> holder) {
                            final Sink theSink = holder.getObject().getChunk(0).getObject();
                            return new SpecificSegmentQueryRunner<T>(new MetricsEmittingQueryRunner<T>(emitter,
                                    builderFn, factory.mergeRunners(EXEC, Iterables.transform(theSink,
                                            new Function<FireHydrant, QueryRunner<T>>() {
                                                @Override
                                                public QueryRunner<T> apply(FireHydrant input) {
                                                    return factory.createRunner(input.getSegment());
                                                }
                                            }))),
                                    new SpecificSegmentSpec(new SegmentDescriptor(holder.getInterval(),
                                            theSink.getSegment().getVersion(),
                                            theSink.getSegment().getShardSpec().getPartitionNum())));
                        }
                    })));
        }

        @Override
        public void persist(final Runnable commitRunnable) {
            final List<Pair<FireHydrant, Interval>> indexesToPersist = Lists.newArrayList();
            for (Sink sink : sinks.values()) {
                if (sink.swappable()) {
                    indexesToPersist.add(Pair.of(sink.swap(), sink.getInterval()));
                }
            }

            log.info("Submitting persist runnable for dataSource[%s]", schema.getDataSource());

            persistExecutor.execute(new ThreadRenamingRunnable(
                    String.format("%s-incremental-persist", schema.getDataSource())) {
                @Override
                public void doRun() {
                    for (Pair<FireHydrant, Interval> pair : indexesToPersist) {
                        metrics.incrementRowOutputCount(persistHydrant(pair.lhs, schema, pair.rhs));
                    }
                    commitRunnable.run();
                }
            });
        }

        // Submits persist-n-merge task for a Sink to the persistExecutor
        private void persistAndMerge(final long truncatedTime, final Sink sink) {
            final String threadName = String.format("%s-%s-persist-n-merge", schema.getDataSource(),
                    new DateTime(truncatedTime));
            persistExecutor.execute(new ThreadRenamingRunnable(threadName) {
                @Override
                public void doRun() {
                    final Interval interval = sink.getInterval();

                    for (FireHydrant hydrant : sink) {
                        if (!hydrant.hasSwapped()) {
                            log.info("Hydrant[%s] hasn't swapped yet, swapping. Sink[%s]", hydrant, sink);
                            final int rowCount = persistHydrant(hydrant, schema, interval);
                            metrics.incrementRowOutputCount(rowCount);
                        }
                    }

                    final File mergedTarget = new File(computePersistDir(schema, interval), "merged");
                    if (mergedTarget.exists()) {
                        log.info("Skipping already-merged sink: %s", sink);
                        return;
                    }

                    File mergedFile = null;
                    try {
                        List<QueryableIndex> indexes = Lists.newArrayList();
                        for (FireHydrant fireHydrant : sink) {
                            Segment segment = fireHydrant.getSegment();
                            final QueryableIndex queryableIndex = segment.asQueryableIndex();
                            log.info("Adding hydrant[%s]", fireHydrant);
                            indexes.add(queryableIndex);
                        }

                        mergedFile = IndexMerger.mergeQueryableIndex(indexes, schema.getAggregators(),
                                mergedTarget);

                        QueryableIndex index = IndexIO.loadIndex(mergedFile);

                        DataSegment segment = dataSegmentPusher.push(mergedFile, sink.getSegment()
                                .withDimensions(Lists.newArrayList(index.getAvailableDimensions())));

                        segmentPublisher.publishSegment(segment);
                    } catch (IOException e) {
                        log.makeAlert(e, "Failed to persist merged index[%s]", schema.getDataSource())
                                .addData("interval", interval).emit();
                    }

                    if (mergedFile != null) {
                        try {
                            if (mergedFile != null) {
                                log.info("Deleting Index File[%s]", mergedFile);
                                FileUtils.deleteDirectory(mergedFile);
                            }
                        } catch (IOException e) {
                            log.warn(e, "Error deleting directory[%s]", mergedFile);
                        }
                    }
                }
            });
        }

        @Override
        public void finishJob() {
            log.info("Shutting down...");

            for (final Map.Entry<Long, Sink> entry : sinks.entrySet()) {
                persistAndMerge(entry.getKey(), entry.getValue());
            }

            while (!sinks.isEmpty()) {
                try {
                    log.info("Cannot shut down yet! Sinks remaining: %s", Joiner.on(", ")
                            .join(Iterables.transform(sinks.values(), new Function<Sink, String>() {
                                @Override
                                public String apply(Sink input) {
                                    return input.getSegment().getIdentifier();
                                }
                            })));

                    synchronized (handoffCondition) {
                        while (!sinks.isEmpty()) {
                            handoffCondition.wait();
                        }
                    }
                } catch (InterruptedException e) {
                    throw Throwables.propagate(e);
                }
            }

            // scheduledExecutor is shutdown here, but persistExecutor is shutdown when the
            // ServerView sends it a new segment callback
            if (scheduledExecutor != null) {
                scheduledExecutor.shutdown();
            }

            stopped = true;
        }

        private void initializeExecutors() {
            if (persistExecutor == null) {
                persistExecutor = Executors.newFixedThreadPool(1,
                        new ThreadFactoryBuilder().setDaemon(true).setNameFormat("plumber_persist_%d").build());
            }
            if (scheduledExecutor == null) {
                scheduledExecutor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
                        .setDaemon(true).setNameFormat("plumber_scheduled_%d").build());
            }
        }

        private void bootstrapSinksFromDisk() {
            for (File sinkDir : computeBaseDir(schema).listFiles()) {
                Interval sinkInterval = new Interval(sinkDir.getName().replace("_", "/"));

                //final File[] sinkFiles = sinkDir.listFiles();
                // To avoid reading and listing of "merged" dir
                final File[] sinkFiles = sinkDir.listFiles(new FilenameFilter() {
                    @Override
                    public boolean accept(File dir, String fileName) {
                        return !(Ints.tryParse(fileName) == null);
                    }
                });
                Arrays.sort(sinkFiles, new Comparator<File>() {
                    @Override
                    public int compare(File o1, File o2) {
                        try {
                            return Ints.compare(Integer.parseInt(o1.getName()), Integer.parseInt(o2.getName()));
                        } catch (NumberFormatException e) {
                            log.error(e, "Couldn't compare as numbers? [%s][%s]", o1, o2);
                            return o1.compareTo(o2);
                        }
                    }
                });

                try {
                    List<FireHydrant> hydrants = Lists.newArrayList();
                    for (File segmentDir : sinkFiles) {
                        log.info("Loading previously persisted segment at [%s]", segmentDir);

                        // Although this has been tackled at start of this method.
                        // Just a doubly-check added to skip "merged" dir. from being added to hydrants 
                        // If 100% sure that this is not needed, this check can be removed.
                        if (Ints.tryParse(segmentDir.getName()) == null) {
                            continue;
                        }

                        hydrants.add(
                                new FireHydrant(new QueryableIndexSegment(null, IndexIO.loadIndex(segmentDir)),
                                        Integer.parseInt(segmentDir.getName())));
                    }

                    Sink currSink = new Sink(sinkInterval, schema, versioningPolicy.getVersion(sinkInterval),
                            hydrants);
                    sinks.put(sinkInterval.getStartMillis(), currSink);
                    sinkTimeline.add(currSink.getInterval(), currSink.getVersion(),
                            new SingleElementPartitionChunk<Sink>(currSink));

                    segmentAnnouncer.announceSegment(currSink.getSegment());
                } catch (IOException e) {
                    log.makeAlert(e, "Problem loading sink[%s] from disk.", schema.getDataSource())
                            .addData("interval", sinkInterval).emit();
                }
            }
        }

        private void registerServerViewCallback() {
            serverView.registerSegmentCallback(persistExecutor, new ServerView.BaseSegmentCallback() {
                @Override
                public ServerView.CallbackAction segmentAdded(DruidServer server, DataSegment segment) {
                    if (stopped) {
                        log.info("Unregistering ServerViewCallback");
                        persistExecutor.shutdown();
                        return ServerView.CallbackAction.UNREGISTER;
                    }

                    if ("realtime".equals(server.getType())) {
                        return ServerView.CallbackAction.CONTINUE;
                    }

                    log.debug("Checking segment[%s] on server[%s]", segment, server);
                    if (schema.getDataSource().equals(segment.getDataSource())) {
                        final Interval interval = segment.getInterval();
                        for (Map.Entry<Long, Sink> entry : sinks.entrySet()) {
                            final Long sinkKey = entry.getKey();
                            if (interval.contains(sinkKey)) {
                                final Sink sink = entry.getValue();
                                log.info("Segment[%s] matches sink[%s] on server[%s]", segment, sink, server);

                                final String segmentVersion = segment.getVersion();
                                final String sinkVersion = sink.getSegment().getVersion();
                                if (segmentVersion.compareTo(sinkVersion) >= 0) {
                                    log.info("Segment version[%s] >= sink version[%s]", segmentVersion,
                                            sinkVersion);
                                    try {
                                        segmentAnnouncer.unannounceSegment(sink.getSegment());
                                        FileUtils
                                                .deleteDirectory(computePersistDir(schema, sink.getInterval()));
                                        log.info("Removing sinkKey %d for segment %s", sinkKey,
                                                sink.getSegment().getIdentifier());
                                        sinks.remove(sinkKey);
                                        sinkTimeline.remove(sink.getInterval(), sink.getVersion(),
                                                new SingleElementPartitionChunk<Sink>(sink));

                                        synchronized (handoffCondition) {
                                            handoffCondition.notifyAll();
                                        }
                                    } catch (IOException e) {
                                        log.makeAlert(e, "Unable to delete old segment for dataSource[%s].",
                                                schema.getDataSource()).addData("interval", sink.getInterval())
                                                .emit();
                                    }
                                }
                            }
                        }
                    }

                    return ServerView.CallbackAction.CONTINUE;
                }
            });
        }

        private void startPersistThread() {
            final long truncatedNow = segmentGranularity.truncate(new DateTime()).getMillis();
            final long windowMillis = windowPeriod.toStandardDuration().getMillis();

            log.info("Expect to run at [%s]", new DateTime().plus(new Duration(System.currentTimeMillis(),
                    segmentGranularity.increment(truncatedNow) + windowMillis)));

            ScheduledExecutors.scheduleAtFixedRate(scheduledExecutor,
                    new Duration(System.currentTimeMillis(),
                            segmentGranularity.increment(truncatedNow) + windowMillis),
                    new Duration(truncatedNow, segmentGranularity.increment(truncatedNow)),
                    new ThreadRenamingCallable<ScheduledExecutors.Signal>(String.format("%s-overseer-%d",
                            schema.getDataSource(), schema.getShardSpec().getPartitionNum())) {
                        @Override
                        public ScheduledExecutors.Signal doCall() {
                            if (stopped) {
                                log.info("Stopping merge-n-push overseer thread");
                                return ScheduledExecutors.Signal.STOP;
                            }

                            log.info("Starting merge and push.");

                            long minTimestamp = segmentGranularity
                                    .truncate(rejectionPolicy.getCurrMaxTime().minus(windowMillis)).getMillis();

                            List<Map.Entry<Long, Sink>> sinksToPush = Lists.newArrayList();
                            for (Map.Entry<Long, Sink> entry : sinks.entrySet()) {
                                final Long intervalStart = entry.getKey();
                                if (intervalStart < minTimestamp) {
                                    log.info("Adding entry[%s] for merge and push.", entry);
                                    sinksToPush.add(entry);
                                }
                            }

                            for (final Map.Entry<Long, Sink> entry : sinksToPush) {
                                persistAndMerge(entry.getKey(), entry.getValue());
                            }

                            if (stopped) {
                                log.info("Stopping merge-n-push overseer thread");
                                return ScheduledExecutors.Signal.STOP;
                            } else {
                                return ScheduledExecutors.Signal.REPEAT;
                            }
                        }
                    });
        }
    };
}

From source file:com.metamx.druid.realtime.RealtimePlumberSchool.java

License:Open Source License

@Override
public Plumber findPlumber(final Schema schema, final FireDepartmentMetrics metrics) {
    verifyState();//from   w  w w. ja  va2 s .com
    initializeExecutors();

    computeBaseDir(schema).mkdirs();

    final Map<Long, Sink> sinks = Maps.newConcurrentMap();

    for (File sinkDir : computeBaseDir(schema).listFiles()) {
        Interval sinkInterval = new Interval(sinkDir.getName().replace("_", "/"));

        final File[] sinkFiles = sinkDir.listFiles();
        Arrays.sort(sinkFiles, new Comparator<File>() {
            @Override
            public int compare(File o1, File o2) {
                try {
                    return Ints.compare(Integer.parseInt(o1.getName()), Integer.parseInt(o2.getName()));
                } catch (NumberFormatException e) {
                    log.error(e, "Couldn't compare as numbers? [%s][%s]", o1, o2);
                    return o1.compareTo(o2);
                }
            }
        });

        try {
            List<FireHydrant> hydrants = Lists.newArrayList();
            for (File segmentDir : sinkFiles) {
                log.info("Loading previously persisted segment at [%s]", segmentDir);
                hydrants.add(new FireHydrant(new QueryableIndexSegment(null, IndexIO.loadIndex(segmentDir)),
                        Integer.parseInt(segmentDir.getName())));
            }

            Sink currSink = new Sink(sinkInterval, schema, hydrants);
            sinks.put(sinkInterval.getStartMillis(), currSink);

            metadataUpdater.announceSegment(currSink.getSegment());
        } catch (IOException e) {
            log.makeAlert(e, "Problem loading sink[%s] from disk.", schema.getDataSource())
                    .addData("interval", sinkInterval).emit();
        }
    }

    serverView.registerSegmentCallback(persistExecutor, new ServerView.BaseSegmentCallback() {
        @Override
        public ServerView.CallbackAction segmentAdded(DruidServer server, DataSegment segment) {
            if ("realtime".equals(server.getType())) {
                return ServerView.CallbackAction.CONTINUE;
            }

            log.debug("Checking segment[%s] on server[%s]", segment, server);
            if (schema.getDataSource().equals(segment.getDataSource())) {
                final Interval interval = segment.getInterval();
                for (Map.Entry<Long, Sink> entry : sinks.entrySet()) {
                    final Long sinkKey = entry.getKey();
                    if (interval.contains(sinkKey)) {
                        final Sink sink = entry.getValue();
                        log.info("Segment matches sink[%s]", sink);

                        if (segment.getVersion().compareTo(sink.getSegment().getVersion()) >= 0) {
                            try {
                                metadataUpdater.unannounceSegment(sink.getSegment());
                                FileUtils.deleteDirectory(computePersistDir(schema, sink.getInterval()));
                                sinks.remove(sinkKey);
                            } catch (IOException e) {
                                log.makeAlert(e, "Unable to delete old segment for dataSource[%s].",
                                        schema.getDataSource()).addData("interval", sink.getInterval()).emit();
                            }
                        }
                    }
                }
            }

            return ServerView.CallbackAction.CONTINUE;
        }
    });

    final long truncatedNow = segmentGranularity.truncate(new DateTime()).getMillis();
    final long windowMillis = windowPeriod.toStandardDuration().getMillis();
    final RejectionPolicy rejectionPolicy = rejectionPolicyFactory.create(windowPeriod);
    log.info("Creating plumber using rejectionPolicy[%s]", rejectionPolicy);

    log.info("Expect to run at [%s]", new DateTime().plus(new Duration(System.currentTimeMillis(),
            segmentGranularity.increment(truncatedNow) + windowMillis)));

    ScheduledExecutors.scheduleAtFixedRate(scheduledExecutor,
            new Duration(System.currentTimeMillis(), segmentGranularity.increment(truncatedNow) + windowMillis),
            new Duration(truncatedNow, segmentGranularity.increment(truncatedNow)),
            new ThreadRenamingRunnable(String.format("%s-overseer", schema.getDataSource())) {
                @Override
                public void doRun() {
                    log.info("Starting merge and push.");

                    long minTimestamp = segmentGranularity.truncate(rejectionPolicy.getCurrMaxTime())
                            .getMillis() - windowMillis;

                    List<Map.Entry<Long, Sink>> sinksToPush = Lists.newArrayList();
                    for (Map.Entry<Long, Sink> entry : sinks.entrySet()) {
                        final Long intervalStart = entry.getKey();
                        if (intervalStart < minTimestamp) {
                            log.info("Adding entry[%s] for merge and push.", entry);
                            sinksToPush.add(entry);
                        }
                    }

                    for (final Map.Entry<Long, Sink> entry : sinksToPush) {
                        final Sink sink = entry.getValue();

                        final String threadName = String.format("%s-%s-persist-n-merge", schema.getDataSource(),
                                new DateTime(entry.getKey()));
                        persistExecutor.execute(new ThreadRenamingRunnable(threadName) {
                            @Override
                            public void doRun() {
                                final Interval interval = sink.getInterval();

                                for (FireHydrant hydrant : sink) {
                                    if (!hydrant.hasSwapped()) {
                                        log.info("Hydrant[%s] hasn't swapped yet, swapping. Sink[%s]", hydrant,
                                                sink);
                                        final int rowCount = persistHydrant(hydrant, schema, interval);
                                        metrics.incrementRowOutputCount(rowCount);
                                    }
                                }

                                final File mergedFile;
                                try {
                                    List<QueryableIndex> indexes = Lists.newArrayList();
                                    for (FireHydrant fireHydrant : sink) {
                                        Segment segment = fireHydrant.getSegment();
                                        final QueryableIndex queryableIndex = segment.asQueryableIndex();
                                        log.info("Adding hydrant[%s]", fireHydrant);
                                        indexes.add(queryableIndex);
                                    }

                                    mergedFile = IndexMerger.mergeQueryableIndex(indexes,
                                            schema.getAggregators(),
                                            new File(computePersistDir(schema, interval), "merged"));

                                    QueryableIndex index = IndexIO.loadIndex(mergedFile);

                                    DataSegment segment = segmentPusher.push(mergedFile,
                                            sink.getSegment().withDimensions(
                                                    Lists.newArrayList(index.getAvailableDimensions())));

                                    metadataUpdater.publishSegment(segment);
                                } catch (IOException e) {
                                    log.makeAlert(e, "Failed to persist merged index[%s]",
                                            schema.getDataSource()).addData("interval", interval).emit();
                                }
                            }
                        });
                    }
                }
            });

    return new Plumber() {
        @Override
        public Sink getSink(long timestamp) {
            if (!rejectionPolicy.accept(timestamp)) {
                return null;
            }

            final long truncatedTime = segmentGranularity.truncate(timestamp);

            Sink retVal = sinks.get(truncatedTime);

            if (retVal == null) {
                retVal = new Sink(new Interval(new DateTime(truncatedTime),
                        segmentGranularity.increment(new DateTime(truncatedTime))), schema);

                try {
                    metadataUpdater.announceSegment(retVal.getSegment());

                    sinks.put(truncatedTime, retVal);
                } catch (IOException e) {
                    log.makeAlert(e, "Failed to announce new segment[%s]", schema.getDataSource())
                            .addData("interval", retVal.getInterval()).emit();
                }
            }

            return retVal;
        }

        @Override
        public <T> QueryRunner<T> getQueryRunner(final Query<T> query) {
            final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
            final Function<Query<T>, ServiceMetricEvent.Builder> builderFn = new Function<Query<T>, ServiceMetricEvent.Builder>() {
                private final QueryToolChest<T, Query<T>> toolchest = factory.getToolchest();

                @Override
                public ServiceMetricEvent.Builder apply(@Nullable Query<T> input) {
                    return toolchest.makeMetricBuilder(query);
                }
            };

            return factory.mergeRunners(EXEC,
                    FunctionalIterable.create(sinks.values()).transform(new Function<Sink, QueryRunner<T>>() {
                        @Override
                        public QueryRunner<T> apply(@Nullable Sink input) {
                            return new MetricsEmittingQueryRunner<T>(emitter, builderFn, factory.mergeRunners(
                                    EXEC,
                                    Iterables.transform(input, new Function<FireHydrant, QueryRunner<T>>() {
                                        @Override
                                        public QueryRunner<T> apply(@Nullable FireHydrant input) {
                                            return factory.createRunner(input.getSegment());
                                        }
                                    })));
                        }
                    }));
        }

        @Override
        public void persist(final Runnable commitRunnable) {
            final List<Pair<FireHydrant, Interval>> indexesToPersist = Lists.newArrayList();
            for (Sink sink : sinks.values()) {
                if (sink.swappable()) {
                    indexesToPersist.add(Pair.of(sink.swap(), sink.getInterval()));
                }
            }

            log.info("Submitting persist runnable for dataSource[%s]", schema.getDataSource());

            persistExecutor.execute(new ThreadRenamingRunnable(
                    String.format("%s-incremental-persist", schema.getDataSource())) {
                @Override
                public void doRun() {
                    for (Pair<FireHydrant, Interval> pair : indexesToPersist) {
                        metrics.incrementRowOutputCount(persistHydrant(pair.lhs, schema, pair.rhs));
                    }
                    commitRunnable.run();
                }
            });
        }

        @Override
        public void finishJob() {
            throw new UnsupportedOperationException();
        }
    };
}

From source file:com.metamx.druid.VersionedIntervalTimeline.java

License:Open Source License

private boolean addAtKey(NavigableMap<Interval, TimelineEntry> timeline, Interval key, TimelineEntry entry) {
    boolean retVal = false;
    Interval currKey = key;
    Interval entryInterval = entry.getTrueInterval();

    if (!currKey.overlaps(entryInterval)) {
        return false;
    }/*from   w w w  . java2  s  .  c o  m*/

    while (currKey != null && currKey.overlaps(entryInterval)) {
        Interval nextKey = timeline.higherKey(currKey);

        int versionCompare = versionComparator.compare(entry.getVersion(), timeline.get(currKey).getVersion());

        if (versionCompare < 0) {
            if (currKey.contains(entryInterval)) {
                return true;
            } else if (currKey.getStart().isBefore(entryInterval.getStart())) {
                entryInterval = new Interval(currKey.getEnd(), entryInterval.getEnd());
            } else {
                addIntervalToTimeline(new Interval(entryInterval.getStart(), currKey.getStart()), entry,
                        timeline);

                if (entryInterval.getEnd().isAfter(currKey.getEnd())) {
                    entryInterval = new Interval(currKey.getEnd(), entryInterval.getEnd());
                } else {
                    entryInterval = null;
                }
            }
        } else if (versionCompare > 0) {
            TimelineEntry oldEntry = timeline.remove(currKey);

            if (currKey.contains(entryInterval)) {
                addIntervalToTimeline(new Interval(currKey.getStart(), entryInterval.getStart()), oldEntry,
                        timeline);
                addIntervalToTimeline(new Interval(entryInterval.getEnd(), currKey.getEnd()), oldEntry,
                        timeline);
                addIntervalToTimeline(entryInterval, entry, timeline);

                return true;
            } else if (currKey.getStart().isBefore(entryInterval.getStart())) {
                addIntervalToTimeline(new Interval(currKey.getStart(), entryInterval.getStart()), oldEntry,
                        timeline);
            } else if (entryInterval.getEnd().isBefore(currKey.getEnd())) {
                addIntervalToTimeline(new Interval(entryInterval.getEnd(), currKey.getEnd()), oldEntry,
                        timeline);
            }
        } else {
            if (timeline.get(currKey).equals(entry)) {
                // This occurs when restoring segments
                timeline.remove(currKey);
            } else {
                throw new UnsupportedOperationException(
                        String.format("Cannot add overlapping segments [%s and %s] with the same version [%s]",
                                currKey, entryInterval, entry.getVersion()));
            }
        }

        currKey = nextKey;
        retVal = true;
    }

    addIntervalToTimeline(entryInterval, entry, timeline);

    return retVal;
}

From source file:com.qcadoo.mes.productionPerShift.DateTimeRange.java

License:Open Source License

public Collection<? extends DateTimeRange> remove(final DateTimeRange range) {
    Interval other = range.interval;
    if (interval.contains(other)) {
        return Lists.newArrayList(new DateTimeRange(interval.getStart(), other.getStart()),
                new DateTimeRange(other.getEnd(), interval.getEnd()));
    } else if (other.contains(interval)) {
        return Collections.EMPTY_LIST;
    } else if (interval.overlaps(other)) {
        if (interval.getStart().isBefore(other.getStart())) {
            return Lists.newArrayList(new DateTimeRange(interval.getStart(), other.getStart()));
        } else {//w w w .ja  va2s.  c  o m
            return Lists.newArrayList(new DateTimeRange(other.getEnd(), interval.getEnd()));
        }
    }
    return Lists.newArrayList(this);
}

From source file:com.qcadoo.mes.productionPerShift.DateTimeRange.java

License:Open Source License

public Collection<? extends DateTimeRange> add(DateTimeRange range) {
    Interval other = range.interval;
    if (interval.contains(other)) {
        return Lists.newArrayList(this);
    } else if (other.contains(interval)) {
        return Lists.newArrayList(range);
    } else if (interval.overlaps(other)) {
        return Lists.newArrayList(unionWith(range));
    }/* w  ww .  j  av  a2 s  . c  om*/
    return Lists.newArrayList(this, range);
}

From source file:com.quant.TimeSeries.java

License:Open Source License

/**
 * Returns a new time series which is a view of a subset of the current series.
 * <p>/*www  . j a  v  a  2s . co m*/
 * The new series has begin and end indexes which correspond to the bounds of the sub-set into the full series.<br>
 * The tick of the series are shared between the original time series and the returned one (i.e. no copy).
 * @param beginIndex the begin index (inclusive) of the time series
 * @param duration the duration of the time series
 * @return a constrained {@link TimeSeries time series} which is a sub-set of the current series
 */
public TimeSeries subseries(int beginIndex, Period duration) {

    // Calculating the sub-series interval
    DateTime beginInterval = getTick(beginIndex).getEndTime();
    DateTime endInterval = beginInterval.plus(duration);
    Interval subseriesInterval = new Interval(beginInterval, endInterval);

    // Checking ticks belonging to the sub-series (starting at the provided index)
    int subseriesNbTicks = 0;
    for (int i = beginIndex; i <= endIndex; i++) {
        // For each tick...
        DateTime tickTime = getTick(i).getEndTime();
        if (!subseriesInterval.contains(tickTime)) {
            // Tick out of the interval
            break;
        }
        // Tick in the interval
        // --> Incrementing the number of ticks in the subseries
        subseriesNbTicks++;
    }

    return subseries(beginIndex, beginIndex + subseriesNbTicks - 1);
}

From source file:com.quant.TimeSeries.java

License:Open Source License

/**
 * Builds a list of split indexes from splitDuration.
 * @param splitDuration the duration between 2 splits
 * @return a list of begin indexes after split
 *//*from  w w w  .ja v  a2  s.  com*/
private List<Integer> getSplitBeginIndexes(Period splitDuration) {
    ArrayList<Integer> beginIndexes = new ArrayList<Integer>();

    // Adding the first begin index
    beginIndexes.add(beginIndex);

    // Building the first interval before next split
    DateTime beginInterval = getTick(beginIndex).getEndTime();
    DateTime endInterval = beginInterval.plus(splitDuration);
    Interval splitInterval = new Interval(beginInterval, endInterval);

    for (int i = beginIndex; i <= endIndex; i++) {
        // For each tick...
        DateTime tickTime = getTick(i).getEndTime();
        if (!splitInterval.contains(tickTime)) {
            // Tick out of the interval
            if (!endInterval.isAfter(tickTime)) {
                // Tick after the interval
                // --> Adding a new begin index
                beginIndexes.add(i);
            }

            // Building the new interval before next split
            beginInterval = endInterval.isBefore(tickTime) ? tickTime : endInterval;
            endInterval = beginInterval.plus(splitDuration);
            splitInterval = new Interval(beginInterval, endInterval);
        }
    }
    return beginIndexes;
}

From source file:com.qubit.solution.fenixedu.bennu.webservices.services.server.SecurityHeader.java

License:Open Source License

private boolean isTimestampValid() {
    DateTimeFormatter forPattern = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ");
    try {//from w  w  w.  jav  a 2s .c  om
        DateTime parseDateTime = forPattern.parseDateTime(getTimestamp());
        Interval interval = new Interval(new DateTime().minusMinutes(5), new DateTime().plusMinutes(5));
        return interval.contains(parseDateTime);
    } catch (Throwable t) {
        t.printStackTrace();
        return false;
    }
}

From source file:com.sheepdog.mashmesh.models.VolunteerProfile.java

License:Apache License

public boolean isTimeslotAvailable(DateTime startDateTime, DateTime endDateTime) {
    // Assuming that the total interval is less than a day long
    List<Interval> availableIntervals = getAvailableIntervals(startDateTime);
    Interval timeslot = new Interval(startDateTime, endDateTime);

    for (Interval availableInterval : availableIntervals) {
        if (availableInterval.contains(timeslot)) {
            return true;
        }/*from www .j a  va2 s  .  c o  m*/
    }

    return false;
}

From source file:com.sos.scheduler.model.objects.JodaTools.java

License:Apache License

public static DateTime getWeekdayInIntervalOrNull(Interval interval, int weekday, int which) {
    DateTime currentDate = getStartOfMonth(interval.getStart());
    DateTime result = getWeekdayInMonth(currentDate, weekday, which);
    while (!interval.contains(result)) {
        currentDate = currentDate.plusMonths(1);
        result = getWeekdayInMonth(currentDate, weekday, which);
        if (!result.isBefore(interval.getEnd()))
            return null;
    }//  w  ww.  ja v  a  2  s.com
    return result;
}