Example usage for org.joda.time Interval contains

List of usage examples for org.joda.time Interval contains

Introduction

In this page you can find the example usage for org.joda.time Interval contains.

Prototype

public boolean contains(long millisInstant) 

Source Link

Document

Does this time interval contain the specified millisecond instant.

Usage

From source file:eu.itesla_project.cases.EntsoeCaseRepository.java

License:Mozilla Public License

@Override
public Set<DateTime> dataAvailable(CaseType type, Set<Country> countries, Interval interval) {
    Set<UcteGeographicalCode> geographicalCodes = new HashSet<>();
    if (countries == null) {
        geographicalCodes.add(UcteGeographicalCode.UX);
    } else {/*from w ww .  j  a  va  2 s . c o  m*/
        for (Country country : countries) {
            geographicalCodes.addAll(forCountryHacked(country));
        }
    }
    Multimap<DateTime, UcteGeographicalCode> dates = HashMultimap.create();
    for (EntsoeFormat format : formats) {
        Path formatDir = config.getRootDir().resolve(format.getDirName());
        if (Files.exists(formatDir)) {
            Path typeDir = formatDir.resolve(type.name());
            if (Files.exists(typeDir)) {
                browse(typeDir, path -> {
                    UcteFileName ucteFileName = UcteFileName.parse(path.getFileName().toString());
                    UcteGeographicalCode geographicalCode = ucteFileName.getGeographicalCode();
                    if (geographicalCode != null
                            && !config.getForbiddenFormatsByGeographicalCode().get(geographicalCode)
                                    .contains(format.getImporter().getFormat())
                            && interval.contains(ucteFileName.getDate())) {
                        dates.put(ucteFileName.getDate(), geographicalCode);
                    }
                });
            }
        }
    }
    return dates.asMap().entrySet().stream()
            .filter(e -> new HashSet<>(e.getValue()).containsAll(geographicalCodes)).map(Map.Entry::getKey)
            .collect(Collectors.toCollection(TreeSet::new));
}

From source file:eu.itesla_project.entsoe.cases.EntsoeCaseRepository.java

License:Mozilla Public License

@Override
public Set<DateTime> dataAvailable(CaseType type, Set<Country> countries, Interval interval) {
    Set<EntsoeGeographicalCode> geographicalCodes = new HashSet<>();
    if (countries == null) {
        geographicalCodes.add(EntsoeGeographicalCode.UX);
    } else {//from   w w  w .  jav  a  2s.c o  m
        for (Country country : countries) {
            geographicalCodes.addAll(forCountryHacked(country));
        }
    }
    Multimap<DateTime, EntsoeGeographicalCode> dates = HashMultimap.create();
    for (EntsoeFormat format : formats) {
        Path formatDir = config.getRootDir().resolve(format.getDirName());
        if (Files.exists(formatDir)) {
            Path typeDir = formatDir.resolve(type.name());
            if (Files.exists(typeDir)) {
                browse(typeDir, path -> {
                    EntsoeFileName entsoeFileName = EntsoeFileName.parse(path.getFileName().toString());
                    EntsoeGeographicalCode geographicalCode = entsoeFileName.getGeographicalCode();
                    if (geographicalCode != null
                            && !config.getForbiddenFormatsByGeographicalCode().get(geographicalCode)
                                    .contains(format.getImporter().getFormat())
                            && interval.contains(entsoeFileName.getDate())) {
                        dates.put(entsoeFileName.getDate(), geographicalCode);
                    }
                });
            }
        }
    }
    return dates.asMap().entrySet().stream()
            .filter(e -> new HashSet<>(e.getValue()).containsAll(geographicalCodes)).map(Map.Entry::getKey)
            .collect(Collectors.toCollection(TreeSet::new));
}

From source file:eu.itesla_project.online.db.OnlineDbMVStore.java

License:Mozilla Public License

@Override
public List<OnlineWorkflowDetails> listWorkflows(Interval basecaseInterval) {
    LOGGER.info("Getting list of stored workflows run on basecases within the interval {}", basecaseInterval);
    String dateFormatPattern = "yyyyMMdd_HHmm";
    DateTimeFormatter formatter = DateTimeFormat.forPattern(dateFormatPattern);
    List<OnlineWorkflowDetails> workflowIds = new ArrayList<OnlineWorkflowDetails>();
    File[] files = config.getOnlineDbDir().toFile().listFiles(new FilenameFilter() {
        public boolean accept(File dir, String name) {
            return name.toLowerCase().startsWith(STORED_WORKFLOW_PREFIX);
        }//  w  w  w  .  java 2  s  .  com
    });
    for (File file : files) {
        if (file.isFile()) {
            String workflowId = file.getName().substring(STORED_WORKFLOW_PREFIX.length());
            if (workflowId.length() > dateFormatPattern.length() && workflowId
                    .substring(dateFormatPattern.length(), dateFormatPattern.length() + 1).equals("_")) {
                String basecaseName = workflowId.substring(0, dateFormatPattern.length() - 1);
                DateTime basecaseDate = DateTime.parse(basecaseName, formatter);
                if (basecaseInterval.contains(basecaseDate.getMillis())) {
                    OnlineWorkflowDetails workflowDetails = new OnlineWorkflowDetails(workflowId);
                    workflowDetails.setWorkflowDate(getWorkflowDate(workflowId));
                    workflowIds.add(workflowDetails);
                }
            }
        }
    }
    Collections.sort(workflowIds, new Comparator<OnlineWorkflowDetails>() {
        @Override
        public int compare(OnlineWorkflowDetails wfDetails1, OnlineWorkflowDetails wfDetails2) {
            return wfDetails1.getWorkflowDate().compareTo(wfDetails2.getWorkflowDate());
        }
    });
    LOGGER.info("Found {} workflow(s)", workflowIds.size());
    return workflowIds;
}

From source file:gobblin.util.TimeRangeChecker.java

License:Apache License

/**
 * Checks if a specified time is on a day that is specified the a given {@link List} of acceptable days, and that the
 * hours + minutes of the specified time fall into a range defined by startTimeStr and endTimeStr.
 *
 * @param days is a {@link List} of days, if the specified {@link DateTime} does not have a day that falls is in this
 * {@link List} then this method will return false.
 * @param startTimeStr defines the start range that the currentTime can fall into. This {@link String} should be of
 * the pattern defined by {@link #HOUR_MINUTE_FORMAT}.
 * @param endTimeStr defines the start range that the currentTime can fall into. This {@link String} should be of
 * the pattern defined by {@link #HOUR_MINUTE_FORMAT}.
 * @param currentTime is a {@link DateTime} for which this method will check if it is in the given {@link List} of
 * days and falls into the time range defined by startTimeStr and endTimeStr.
 *
 * @return true if the given time is in the defined range, false otherwise.
 *//*from   www . j a  v a  2 s  . c om*/
public static boolean isTimeInRange(List<String> days, String startTimeStr, String endTimeStr,
        DateTime currentTime) {

    if (!Iterables.any(days, new AreDaysEqual(DAYS_OF_WEEK.get(currentTime.getDayOfWeek())))) {
        return false;
    }

    DateTime startTime = null;
    DateTime endTime = null;

    try {
        startTime = HOUR_MINUTE_FORMATTER.withZone(DATE_TIME_ZONE).parseDateTime(startTimeStr);
    } catch (IllegalArgumentException e) {
        throw new IllegalArgumentException(
                "startTimeStr format is invalid, must be of format " + HOUR_MINUTE_FORMAT, e);
    }

    try {
        endTime = HOUR_MINUTE_FORMATTER.withZone(DATE_TIME_ZONE).parseDateTime(endTimeStr);
    } catch (IllegalArgumentException e) {
        throw new IllegalArgumentException(
                "endTimeStr format is invalid, must be of format " + HOUR_MINUTE_FORMAT, e);
    }

    startTime = startTime.withDate(currentTime.getYear(), currentTime.getMonthOfYear(),
            currentTime.getDayOfMonth());
    endTime = endTime.withDate(currentTime.getYear(), currentTime.getMonthOfYear(),
            currentTime.getDayOfMonth());

    Interval interval = new Interval(startTime.getMillis(), endTime.getMillis(), DATE_TIME_ZONE);
    return interval.contains(currentTime.getMillis());
}

From source file:io.druid.indexing.common.actions.SegmentAllocateAction.java

License:Apache License

@Override
public SegmentIdentifier perform(final Task task, final TaskActionToolbox toolbox) throws IOException {
    int attempt = 0;
    while (true) {
        attempt++;//from  w  ww  .j a v  a2 s . c om

        if (!task.getDataSource().equals(dataSource)) {
            throw new IAE("Task dataSource must match action dataSource, [%s] != [%s].", task.getDataSource(),
                    dataSource);
        }

        final IndexerMetadataStorageCoordinator msc = toolbox.getIndexerMetadataStorageCoordinator();

        // 1) if something overlaps our timestamp, use that
        // 2) otherwise try preferredSegmentGranularity & going progressively smaller

        final List<Interval> tryIntervals = Lists.newArrayList();

        final Interval rowInterval = queryGranularity.bucket(timestamp);

        final Set<DataSegment> usedSegmentsForRow = ImmutableSet
                .copyOf(msc.getUsedSegmentsForInterval(dataSource, rowInterval));

        if (usedSegmentsForRow.isEmpty()) {
            // No existing segments for this row, but there might still be nearby ones that conflict with our preferred
            // segment granularity. Try that first, and then progressively smaller ones if it fails.
            for (Granularity gran : Granularity.granularitiesFinerThan(preferredSegmentGranularity)) {
                tryIntervals.add(gran.bucket(timestamp));
            }
        } else {
            // Existing segment(s) exist for this row; use the interval of the first one.
            tryIntervals.add(usedSegmentsForRow.iterator().next().getInterval());
        }

        for (final Interval tryInterval : tryIntervals) {
            if (tryInterval.contains(rowInterval)) {
                log.debug("Trying to allocate pending segment for rowInterval[%s], segmentInterval[%s].",
                        rowInterval, tryInterval);
                final TaskLock tryLock = toolbox.getTaskLockbox().tryLock(task, tryInterval).orNull();
                if (tryLock != null) {
                    final SegmentIdentifier identifier = msc.allocatePendingSegment(dataSource, sequenceName,
                            previousSegmentId, tryInterval, tryLock.getVersion());
                    if (identifier != null) {
                        return identifier;
                    } else {
                        log.debug(
                                "Could not allocate pending segment for rowInterval[%s], segmentInterval[%s].",
                                rowInterval, tryInterval);
                    }
                } else {
                    log.debug("Could not acquire lock for rowInterval[%s], segmentInterval[%s].", rowInterval,
                            tryInterval);
                }
            }
        }

        // Could not allocate a pending segment. There's a chance that this is because someone else inserted a segment
        // overlapping with this row between when we called "mdc.getUsedSegmentsForInterval" and now. Check it again,
        // and if it's different, repeat.

        if (!ImmutableSet.copyOf(msc.getUsedSegmentsForInterval(dataSource, rowInterval))
                .equals(usedSegmentsForRow)) {
            if (attempt < MAX_ATTEMPTS) {
                final long shortRandomSleep = 50 + (long) (Math.random() * 450);
                log.debug(
                        "Used segment set changed for rowInterval[%s]. Retrying segment allocation in %,dms (attempt = %,d).",
                        rowInterval, shortRandomSleep, attempt);
                try {
                    Thread.sleep(shortRandomSleep);
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    throw Throwables.propagate(e);
                }
            } else {
                log.error("Used segment set changed for rowInterval[%s]. Not trying again (attempt = %,d).",
                        rowInterval, attempt);
                return null;
            }
        } else {
            return null;
        }
    }
}

From source file:io.druid.indexing.common.task.IndexDeterminePartitionsTask.java

License:Open Source License

@Override
public TaskStatus run(TaskToolbox toolbox) throws Exception {
    log.info("Running with targetPartitionSize[%d]", targetPartitionSize);

    // The implementation of this determine partitions stuff is less than optimal.  Should be done better.

    // We know this exists
    final Interval interval = getImplicitLockInterval().get();

    // Blacklist dimensions that have multiple values per row
    final Set<String> unusableDimensions = Sets.newHashSet();

    // Track values of all non-blacklisted dimensions
    final Map<String, TreeMultiset<String>> dimensionValueMultisets = Maps.newHashMap();

    // Load data//from  ww  w.j a v a  2 s .co  m
    final Firehose firehose = firehoseFactory.connect();

    try {
        while (firehose.hasMore()) {

            final InputRow inputRow = firehose.nextRow();

            if (interval.contains(inputRow.getTimestampFromEpoch())) {

                // Extract dimensions from event
                for (final String dim : inputRow.getDimensions()) {
                    final List<String> dimValues = inputRow.getDimension(dim);

                    if (!unusableDimensions.contains(dim)) {

                        if (dimValues.size() == 1) {

                            // Track this value
                            TreeMultiset<String> dimensionValueMultiset = dimensionValueMultisets.get(dim);

                            if (dimensionValueMultiset == null) {
                                dimensionValueMultiset = TreeMultiset.create();
                                dimensionValueMultisets.put(dim, dimensionValueMultiset);
                            }

                            dimensionValueMultiset.add(dimValues.get(0));

                        } else {

                            // Only single-valued dimensions can be used for partitions
                            unusableDimensions.add(dim);
                            dimensionValueMultisets.remove(dim);

                        }

                    }
                }

            }

        }
    } finally {
        firehose.close();
    }

    // ShardSpecs for index generator tasks
    final List<ShardSpec> shardSpecs = Lists.newArrayList();

    // Select highest-cardinality dimension
    Ordering<Map.Entry<String, TreeMultiset<String>>> byCardinalityOrdering = new Ordering<Map.Entry<String, TreeMultiset<String>>>() {
        @Override
        public int compare(Map.Entry<String, TreeMultiset<String>> left,
                Map.Entry<String, TreeMultiset<String>> right) {
            return Ints.compare(left.getValue().elementSet().size(), right.getValue().elementSet().size());
        }
    };

    if (dimensionValueMultisets.isEmpty()) {
        // No suitable partition dimension. We'll make one big segment and hope for the best.
        log.info("No suitable partition dimension found");
        shardSpecs.add(new NoneShardSpec());
    } else {
        // Find best partition dimension (heuristic: highest cardinality).
        final Map.Entry<String, TreeMultiset<String>> partitionEntry = byCardinalityOrdering
                .max(dimensionValueMultisets.entrySet());

        final String partitionDim = partitionEntry.getKey();
        final TreeMultiset<String> partitionDimValues = partitionEntry.getValue();

        log.info("Partitioning on dimension[%s] with cardinality[%d] over rows[%d]", partitionDim,
                partitionDimValues.elementSet().size(), partitionDimValues.size());

        // Iterate over unique partition dimension values in sorted order
        String currentPartitionStart = null;
        int currentPartitionSize = 0;
        for (final String partitionDimValue : partitionDimValues.elementSet()) {
            currentPartitionSize += partitionDimValues.count(partitionDimValue);
            if (currentPartitionSize >= targetPartitionSize) {
                final ShardSpec shardSpec = new SingleDimensionShardSpec(partitionDim, currentPartitionStart,
                        partitionDimValue, shardSpecs.size());

                log.info("Adding shard: %s", shardSpec);
                shardSpecs.add(shardSpec);

                currentPartitionSize = partitionDimValues.count(partitionDimValue);
                currentPartitionStart = partitionDimValue;
            }
        }

        if (currentPartitionSize > 0) {
            // One last shard to go
            final ShardSpec shardSpec;

            if (shardSpecs.isEmpty()) {
                shardSpec = new NoneShardSpec();
            } else {
                shardSpec = new SingleDimensionShardSpec(partitionDim, currentPartitionStart, null,
                        shardSpecs.size());
            }

            log.info("Adding shard: %s", shardSpec);
            shardSpecs.add(shardSpec);
        }
    }

    List<Task> nextTasks = Lists.transform(shardSpecs, new Function<ShardSpec, Task>() {
        @Override
        public Task apply(ShardSpec shardSpec) {
            return new IndexGeneratorTask(null, getGroupId(), getImplicitLockInterval().get(), firehoseFactory,
                    new Schema(schema.getDataSource(), schema.getSpatialDimensions(), schema.getAggregators(),
                            schema.getIndexGranularity(), shardSpec),
                    rowFlushBoundary);
        }
    });

    toolbox.getTaskActionClient().submit(new SpawnTasksAction(nextTasks));

    return TaskStatus.success(getId());
}

From source file:io.druid.indexing.common.task.IndexTask.java

License:Apache License

/**
 * Should we index this inputRow? Decision is based on our interval and shardSpec.
 *
 * @param inputRow the row to check/*from w w w  . ja  v a  2s . com*/
 *
 * @return true or false
 */
private static boolean shouldIndex(final ShardSpec shardSpec, final Interval interval, final InputRow inputRow,
        final QueryGranularity rollupGran) {
    return interval.contains(inputRow.getTimestampFromEpoch())
            && shardSpec.isInChunk(rollupGran.truncate(inputRow.getTimestampFromEpoch()), inputRow);
}

From source file:io.druid.indexing.common.task.IndexTask.java

License:Apache License

private List<ShardSpec> determinePartitions(final Interval interval, final int targetPartitionSize,
        final QueryGranularity queryGranularity) throws IOException {
    log.info("Determining partitions for interval[%s] with targetPartitionSize[%d]", interval,
            targetPartitionSize);//from w w w.  j  ava2 s  .co  m

    final FirehoseFactory firehoseFactory = ingestionSchema.getIOConfig().getFirehoseFactory();

    // The implementation of this determine partitions stuff is less than optimal.  Should be done better.
    // Use HLL to estimate number of rows
    HyperLogLogCollector collector = HyperLogLogCollector.makeLatestCollector();

    // Load data
    try (Firehose firehose = firehoseFactory.connect(ingestionSchema.getDataSchema().getParser())) {
        while (firehose.hasMore()) {
            final InputRow inputRow = firehose.nextRow();
            if (interval.contains(inputRow.getTimestampFromEpoch())) {
                final List<Object> groupKey = Rows
                        .toGroupKey(queryGranularity.truncate(inputRow.getTimestampFromEpoch()), inputRow);
                collector.add(hashFunction.hashBytes(jsonMapper.writeValueAsBytes(groupKey)).asBytes());
            }
        }
    }

    final double numRows = collector.estimateCardinality();
    log.info("Estimated approximately [%,f] rows of data.", numRows);

    int numberOfShards = (int) Math.ceil(numRows / targetPartitionSize);
    if ((double) numberOfShards > numRows) {
        numberOfShards = (int) numRows;
    }
    log.info("Will require [%,d] shard(s).", numberOfShards);

    // ShardSpecs we will return
    final List<ShardSpec> shardSpecs = Lists.newArrayList();

    if (numberOfShards == 1) {
        shardSpecs.add(new NoneShardSpec());
    } else {
        for (int i = 0; i < numberOfShards; ++i) {
            shardSpecs.add(new HashBasedNumberedShardSpec(i, numberOfShards, jsonMapper));
        }
    }

    return shardSpecs;
}

From source file:io.druid.query.groupby.epinephelinae.GroupByRowProcessor.java

License:Apache License

public static Sequence<Row> process(final Query queryParam, final Sequence<Row> rows,
        final Map<String, ValueType> rowSignature, final GroupByQueryConfig config,
        final GroupByQueryResource resource, final ObjectMapper spillMapper, final String processingTmpDir) {
    final GroupByQuery query = (GroupByQuery) queryParam;
    final GroupByQueryConfig querySpecificConfig = config.withOverrides(query);

    final AggregatorFactory[] aggregatorFactories = new AggregatorFactory[query.getAggregatorSpecs().size()];
    for (int i = 0; i < query.getAggregatorSpecs().size(); i++) {
        aggregatorFactories[i] = query.getAggregatorSpecs().get(i);
    }/*  w ww . j a  va  2  s.c om*/

    final File temporaryStorageDirectory = new File(processingTmpDir,
            StringUtils.format("druid-groupBy-%s_%s", UUID.randomUUID(), query.getId()));

    final List<Interval> queryIntervals = query.getIntervals();
    final Filter filter = Filters.convertToCNFFromQueryContext(query, Filters.toFilter(query.getDimFilter()));

    final SettableSupplier<Row> rowSupplier = new SettableSupplier<>();
    final RowBasedColumnSelectorFactory columnSelectorFactory = RowBasedColumnSelectorFactory
            .create(rowSupplier, rowSignature);
    final ValueMatcher filterMatcher = filter == null ? BooleanValueMatcher.of(true)
            : filter.makeMatcher(columnSelectorFactory);

    final FilteredSequence<Row> filteredSequence = new FilteredSequence<>(rows, new Predicate<Row>() {
        @Override
        public boolean apply(Row input) {
            boolean inInterval = false;
            DateTime rowTime = input.getTimestamp();
            for (Interval queryInterval : queryIntervals) {
                if (queryInterval.contains(rowTime)) {
                    inInterval = true;
                    break;
                }
            }
            if (!inInterval) {
                return false;
            }
            rowSupplier.set(input);
            return filterMatcher.matches();
        }
    });

    return new BaseSequence<>(
            new BaseSequence.IteratorMaker<Row, CloseableGrouperIterator<RowBasedKey, Row>>() {
                @Override
                public CloseableGrouperIterator<RowBasedKey, Row> make() {
                    // This contains all closeable objects which are closed when the returned iterator iterates all the elements,
                    // or an exceptions is thrown. The objects are closed in their reverse order.
                    final List<Closeable> closeOnExit = Lists.newArrayList();

                    try {
                        final LimitedTemporaryStorage temporaryStorage = new LimitedTemporaryStorage(
                                temporaryStorageDirectory, querySpecificConfig.getMaxOnDiskStorage());

                        closeOnExit.add(temporaryStorage);

                        Pair<Grouper<RowBasedKey>, Accumulator<AggregateResult, Row>> pair = RowBasedGrouperHelper
                                .createGrouperAccumulatorPair(query, true, rowSignature, querySpecificConfig,
                                        new Supplier<ByteBuffer>() {
                                            @Override
                                            public ByteBuffer get() {
                                                final ResourceHolder<ByteBuffer> mergeBufferHolder = resource
                                                        .getMergeBuffer();
                                                closeOnExit.add(mergeBufferHolder);
                                                return mergeBufferHolder.get();
                                            }
                                        }, temporaryStorage, spillMapper, aggregatorFactories);
                        final Grouper<RowBasedKey> grouper = pair.lhs;
                        final Accumulator<AggregateResult, Row> accumulator = pair.rhs;
                        closeOnExit.add(grouper);

                        final AggregateResult retVal = filteredSequence.accumulate(AggregateResult.ok(),
                                accumulator);
                        if (!retVal.isOk()) {
                            throw new ResourceLimitExceededException(retVal.getReason());
                        }

                        return RowBasedGrouperHelper.makeGrouperIterator(grouper, query, new Closeable() {
                            @Override
                            public void close() throws IOException {
                                for (Closeable closeable : Lists.reverse(closeOnExit)) {
                                    CloseQuietly.close(closeable);
                                }
                            }
                        });
                    } catch (Throwable e) {
                        // Exception caught while setting up the iterator; release resources.
                        for (Closeable closeable : Lists.reverse(closeOnExit)) {
                            CloseQuietly.close(closeable);
                        }
                        throw e;
                    }
                }

                @Override
                public void cleanup(CloseableGrouperIterator<RowBasedKey, Row> iterFromMake) {
                    iterFromMake.close();
                }
            });
}

From source file:io.druid.query.search.search.UseIndexesStrategy.java

License:Apache License

static ImmutableBitmap makeTimeFilteredBitmap(final QueryableIndex index, final Segment segment,
        final Filter filter, final Interval interval) {
    final BitmapFactory bitmapFactory = index.getBitmapFactoryForDimensions();
    final ImmutableBitmap baseFilter;
    if (filter == null) {
        baseFilter = null;//from   www.j ava2  s. c  o m
    } else {
        final BitmapIndexSelector selector = new ColumnSelectorBitmapIndexSelector(
                index.getBitmapFactoryForDimensions(), VirtualColumns.EMPTY, index);
        Preconditions.checkArgument(filter.supportsBitmapIndex(selector), "filter[%s] should support bitmap",
                filter);
        baseFilter = filter.getBitmapIndex(selector);
    }

    final ImmutableBitmap timeFilteredBitmap;
    if (!interval.contains(segment.getDataInterval())) {
        final MutableBitmap timeBitmap = bitmapFactory.makeEmptyMutableBitmap();
        final Column timeColumn = index.getColumn(Column.TIME_COLUMN_NAME);
        try (final GenericColumn timeValues = timeColumn.getGenericColumn()) {

            int startIndex = Math.max(0, getStartIndexOfTime(timeValues, interval.getStartMillis(), true));
            int endIndex = Math.min(timeValues.length() - 1,
                    getStartIndexOfTime(timeValues, interval.getEndMillis(), false));

            for (int i = startIndex; i <= endIndex; i++) {
                timeBitmap.add(i);
            }

            final ImmutableBitmap finalTimeBitmap = bitmapFactory.makeImmutableBitmap(timeBitmap);
            timeFilteredBitmap = (baseFilter == null) ? finalTimeBitmap
                    : finalTimeBitmap.intersection(baseFilter);
        }
    } else {
        timeFilteredBitmap = baseFilter;
    }

    return timeFilteredBitmap;
}