Example usage for org.joda.time Interval Interval

List of usage examples for org.joda.time Interval Interval

Introduction

In this page you can find the example usage for org.joda.time Interval Interval.

Prototype

public Interval(Object interval, Chronology chronology) 

Source Link

Document

Constructs a time interval by converting or copying from another object, overriding the chronology.

Usage

From source file:io.druid.segment.IndexMergerV9.java

License:Apache License

private void makeIndexBinary(final FileSmoosher v9Smoosher, final List<IndexableAdapter> adapters,
        final File outDir, final List<String> mergedDimensions, final List<String> mergedMetrics,
        final ProgressIndicator progress, final IndexSpec indexSpec, final List<DimensionMerger> mergers)
        throws IOException {
    final String section = "make index.drd";
    progress.startSection(section);// w  w w.j  a v  a  2s.  co m

    long startTime = System.currentTimeMillis();
    final Set<String> finalDimensions = Sets.newLinkedHashSet();
    final Set<String> finalColumns = Sets.newLinkedHashSet();
    finalColumns.addAll(mergedMetrics);
    for (int i = 0; i < mergedDimensions.size(); ++i) {
        if (mergers.get(i).canSkip()) {
            continue;
        }
        finalColumns.add(mergedDimensions.get(i));
        finalDimensions.add(mergedDimensions.get(i));
    }

    GenericIndexed<String> cols = GenericIndexed.fromIterable(finalColumns, GenericIndexed.STRING_STRATEGY);
    GenericIndexed<String> dims = GenericIndexed.fromIterable(finalDimensions, GenericIndexed.STRING_STRATEGY);

    final String bitmapSerdeFactoryType = mapper.writeValueAsString(indexSpec.getBitmapSerdeFactory());
    final long numBytes = cols.getSerializedSize() + dims.getSerializedSize() + 16
            + serializerUtils.getSerializedStringByteSize(bitmapSerdeFactoryType);

    final SmooshedWriter writer = v9Smoosher.addWithSmooshedWriter("index.drd", numBytes);
    cols.writeToChannel(writer);
    dims.writeToChannel(writer);

    DateTime minTime = new DateTime(JodaUtils.MAX_INSTANT);
    DateTime maxTime = new DateTime(JodaUtils.MIN_INSTANT);

    for (IndexableAdapter index : adapters) {
        minTime = JodaUtils.minDateTime(minTime, index.getDataInterval().getStart());
        maxTime = JodaUtils.maxDateTime(maxTime, index.getDataInterval().getEnd());
    }
    final Interval dataInterval = new Interval(minTime, maxTime);

    serializerUtils.writeLong(writer, dataInterval.getStartMillis());
    serializerUtils.writeLong(writer, dataInterval.getEndMillis());

    serializerUtils.writeString(writer, bitmapSerdeFactoryType);
    writer.close();

    IndexIO.checkFileSize(new File(outDir, "index.drd"));
    log.info("Completed index.drd in %,d millis.", System.currentTimeMillis() - startTime);

    progress.stopSection(section);
}

From source file:io.druid.segment.QueryableIndexStorageAdapter.java

License:Apache License

@Override
public Sequence<Cursor> makeCursors(Filter filter, Interval interval, QueryGranularity gran) {
    Interval actualInterval = interval;/*from w ww  .  ja  v a 2  s. c  om*/

    long minDataTimestamp = getMinTime().getMillis();
    long maxDataTimestamp = getMaxTime().getMillis();
    final Interval dataInterval = new Interval(minDataTimestamp, gran.next(gran.truncate(maxDataTimestamp)));

    if (!actualInterval.overlaps(dataInterval)) {
        return Sequences.empty();
    }

    if (actualInterval.getStart().isBefore(dataInterval.getStart())) {
        actualInterval = actualInterval.withStart(dataInterval.getStart());
    }
    if (actualInterval.getEnd().isAfter(dataInterval.getEnd())) {
        actualInterval = actualInterval.withEnd(dataInterval.getEnd());
    }

    final Offset offset;
    if (filter == null) {
        offset = new NoFilterOffset(0, index.getNumRows());
    } else {
        final ColumnSelectorBitmapIndexSelector selector = new ColumnSelectorBitmapIndexSelector(
                index.getBitmapFactoryForDimensions(), index);

        offset = new BitmapOffset(selector.getBitmapFactory(), filter.getBitmapIndex(selector));
    }

    return Sequences.filter(
            new CursorSequenceBuilder(index, actualInterval, gran, offset, maxDataTimestamp).build(),
            Predicates.<Cursor>notNull());
}

From source file:io.druid.segment.realtime.appenderator.AppenderatorPlumber.java

License:Apache License

private SegmentIdentifier getSegmentIdentifier(long timestamp) {
    if (!rejectionPolicy.accept(timestamp)) {
        return null;
    }/*from   ww  w.  ja  v  a2s . com*/

    final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity();
    final VersioningPolicy versioningPolicy = config.getVersioningPolicy();

    final long truncatedTime = segmentGranularity.bucketStart(new DateTime(timestamp)).getMillis();

    SegmentIdentifier retVal = segments.get(truncatedTime);

    if (retVal == null) {
        final Interval interval = new Interval(new DateTime(truncatedTime),
                segmentGranularity.increment(new DateTime(truncatedTime)));

        retVal = new SegmentIdentifier(schema.getDataSource(), interval, versioningPolicy.getVersion(interval),
                config.getShardSpec());
        addSegment(retVal);

    }

    return retVal;
}

From source file:io.druid.segment.realtime.plumber.RealtimePlumber.java

License:Apache License

private Sink getSink(long timestamp) {
    if (!rejectionPolicy.accept(timestamp)) {
        return null;
    }/*from  w  w  w  .  j a  v  a  2  s .co  m*/

    final Granularity segmentGranularity = schema.getGranularitySpec().getSegmentGranularity();
    final VersioningPolicy versioningPolicy = config.getVersioningPolicy();

    final long truncatedTime = segmentGranularity.truncate(new DateTime(timestamp)).getMillis();

    Sink retVal = sinks.get(truncatedTime);

    if (retVal == null) {
        final Interval sinkInterval = new Interval(new DateTime(truncatedTime),
                segmentGranularity.increment(new DateTime(truncatedTime)));

        retVal = new Sink(sinkInterval, schema, config, versioningPolicy.getVersion(sinkInterval));

        try {
            segmentAnnouncer.announceSegment(retVal.getSegment());
            sinks.put(truncatedTime, retVal);
            sinkTimeline.add(retVal.getInterval(), retVal.getVersion(),
                    new SingleElementPartitionChunk<Sink>(retVal));
        } catch (IOException e) {
            log.makeAlert(e, "Failed to announce new segment[%s]", schema.getDataSource())
                    .addData("interval", retVal.getInterval()).emit();
        }
    }

    return retVal;
}

From source file:io.druid.segment.SegmentDesc.java

License:Apache License

public static SegmentDesc valueOf(final String identifier) {
    String[] splits = identifier.split(DataSegment.delimiter);
    if (splits.length < 4) {
        throw new IllegalArgumentException("Invalid identifier " + identifier);
    }// w w w .  j a  va 2s  .  c o m
    String datasource = splits[0];
    DateTime start = new DateTime(splits[1]);
    DateTime end = new DateTime(splits[2]);
    String version = splits[3];

    return new SegmentDesc(datasource, new Interval(start.getMillis(), end.getMillis()), version);
}

From source file:io.druid.server.audit.SQLAuditManager.java

License:Apache License

private Interval getIntervalOrDefault(Interval interval) {
    final Interval theInterval;
    if (interval == null) {
        DateTime now = new DateTime();
        theInterval = new Interval(now.minus(config.getAuditHistoryMillis()), now);
    } else {//from   w  w w .ja  va2 s  . com
        theInterval = interval;
    }
    return theInterval;
}

From source file:io.druid.server.ClientInfoResource.java

License:Apache License

@GET
@Path("/{dataSourceName}")
@Produces(MediaType.APPLICATION_JSON)//from ww w . ja  v a  2  s . com
public Map<String, Object> getDatasource(@PathParam("dataSourceName") String dataSourceName,
        @QueryParam("interval") String interval, @QueryParam("full") String full) {
    if (full == null) {
        return ImmutableMap.<String, Object>of(KEY_DIMENSIONS,
                getDatasourceDimensions(dataSourceName, interval), KEY_METRICS,
                getDatasourceMetrics(dataSourceName, interval));
    }

    Interval theInterval;
    if (interval == null || interval.isEmpty()) {
        DateTime now = getCurrentTime();
        theInterval = new Interval(segmentMetadataQueryConfig.getDefaultHistory(), now);
    } else {
        theInterval = new Interval(interval);
    }

    TimelineLookup<String, ServerSelector> timeline = timelineServerView
            .getTimeline(new TableDataSource(dataSourceName));
    Iterable<TimelineObjectHolder<String, ServerSelector>> serversLookup = timeline != null
            ? timeline.lookup(theInterval)
            : null;
    if (serversLookup == null || Iterables.isEmpty(serversLookup)) {
        return Collections.EMPTY_MAP;
    }
    Map<Interval, Object> servedIntervals = new TreeMap<>(new Comparator<Interval>() {
        @Override
        public int compare(Interval o1, Interval o2) {
            if (o1.equals(o2) || o1.overlaps(o2)) {
                return 0;
            } else {
                return o1.isBefore(o2) ? -1 : 1;
            }
        }
    });

    for (TimelineObjectHolder<String, ServerSelector> holder : serversLookup) {
        final Set<Object> dimensions = Sets.newHashSet();
        final Set<Object> metrics = Sets.newHashSet();
        final PartitionHolder<ServerSelector> partitionHolder = holder.getObject();
        if (partitionHolder.isComplete()) {
            for (ServerSelector server : partitionHolder.payloads()) {
                final DataSegment segment = server.getSegment();
                dimensions.addAll(segment.getDimensions());
                metrics.addAll(segment.getMetrics());
            }
        }

        servedIntervals.put(holder.getInterval(),
                ImmutableMap.of(KEY_DIMENSIONS, dimensions, KEY_METRICS, metrics));
    }

    //collapse intervals if they abut and have same set of columns
    Map<String, Object> result = Maps.newLinkedHashMap();
    Interval curr = null;
    Map<String, Set<String>> cols = null;
    for (Map.Entry<Interval, Object> e : servedIntervals.entrySet()) {
        Interval ival = e.getKey();
        if (curr != null && curr.abuts(ival) && cols.equals(e.getValue())) {
            curr = curr.withEnd(ival.getEnd());
        } else {
            if (curr != null) {
                result.put(curr.toString(), cols);
            }
            curr = ival;
            cols = (Map<String, Set<String>>) e.getValue();
        }
    }
    //add the last one in
    if (curr != null) {
        result.put(curr.toString(), cols);
    }
    return result;
}

From source file:io.druid.server.ClientInfoResource.java

License:Apache License

@GET
@Path("/{dataSourceName}/dimensions")
@Produces(MediaType.APPLICATION_JSON)// w w w  . jav  a  2  s  . c  om
public Iterable<String> getDatasourceDimensions(@PathParam("dataSourceName") String dataSourceName,
        @QueryParam("interval") String interval) {
    final List<DataSegment> segments = getSegmentsForDatasources().get(dataSourceName);
    final Set<String> dims = Sets.newHashSet();

    if (segments == null || segments.isEmpty()) {
        return dims;
    }

    Interval theInterval;
    if (interval == null || interval.isEmpty()) {
        DateTime now = getCurrentTime();
        theInterval = new Interval(segmentMetadataQueryConfig.getDefaultHistory(), now);
    } else {
        theInterval = new Interval(interval);
    }

    for (DataSegment segment : segments) {
        if (theInterval.overlaps(segment.getInterval())) {
            dims.addAll(segment.getDimensions());
        }
    }

    return dims;
}

From source file:io.druid.server.ClientInfoResource.java

License:Apache License

@GET
@Path("/{dataSourceName}/metrics")
@Produces(MediaType.APPLICATION_JSON)//from   w  w  w .ja  v a  2 s  .  com
public Iterable<String> getDatasourceMetrics(@PathParam("dataSourceName") String dataSourceName,
        @QueryParam("interval") String interval) {
    final List<DataSegment> segments = getSegmentsForDatasources().get(dataSourceName);
    final Set<String> metrics = Sets.newHashSet();

    if (segments == null || segments.isEmpty()) {
        return metrics;
    }

    Interval theInterval;
    if (interval == null || interval.isEmpty()) {
        DateTime now = getCurrentTime();
        theInterval = new Interval(segmentMetadataQueryConfig.getDefaultHistory(), now);
    } else {
        theInterval = new Interval(interval);
    }

    for (DataSegment segment : segments) {
        if (theInterval.overlaps(segment.getInterval())) {
            metrics.addAll(segment.getMetrics());
        }
    }

    return metrics;
}

From source file:io.druid.server.coordinator.DruidCoordinatorBalancerProfiler.java

License:Apache License

public void bigProfiler() {
    Stopwatch watch = Stopwatch.createUnstarted();
    int numSegments = 55000;
    int numServers = 50;
    EasyMock.expect(manager.getAllRules()).andReturn(ImmutableMap.<String, List<Rule>>of("test", rules))
            .anyTimes();//  w  w w . j ava 2s . c  o  m
    EasyMock.expect(manager.getRules(EasyMock.<String>anyObject())).andReturn(rules).anyTimes();
    EasyMock.expect(manager.getRulesWithDefault(EasyMock.<String>anyObject())).andReturn(rules).anyTimes();
    EasyMock.replay(manager);

    coordinator.moveSegment(EasyMock.<ImmutableDruidServer>anyObject(),
            EasyMock.<ImmutableDruidServer>anyObject(), EasyMock.<String>anyObject(),
            EasyMock.<LoadPeonCallback>anyObject());
    EasyMock.expectLastCall().anyTimes();
    EasyMock.replay(coordinator);

    List<DruidServer> serverList = Lists.newArrayList();
    Map<String, LoadQueuePeon> peonMap = Maps.newHashMap();
    List<ServerHolder> serverHolderList = Lists.newArrayList();
    Map<String, DataSegment> segmentMap = Maps.newHashMap();
    for (int i = 0; i < numSegments; i++) {
        segmentMap.put("segment" + i, new DataSegment("datasource" + i,
                new Interval(new DateTime("2012-01-01"), (new DateTime("2012-01-01")).plusHours(1)),
                (new DateTime("2012-03-01")).toString(), Maps.<String, Object>newHashMap(),
                Lists.<String>newArrayList(), Lists.<String>newArrayList(), new NoneShardSpec(), 0, 4L));
    }

    for (int i = 0; i < numServers; i++) {
        ImmutableDruidServer server = EasyMock.createMock(ImmutableDruidServer.class);
        EasyMock.expect(server.getMetadata()).andReturn(null).anyTimes();
        EasyMock.expect(server.getCurrSize()).andReturn(30L).atLeastOnce();
        EasyMock.expect(server.getMaxSize()).andReturn(100L).atLeastOnce();
        EasyMock.expect(server.getTier()).andReturn("normal").anyTimes();
        EasyMock.expect(server.getName()).andReturn(Integer.toString(i)).atLeastOnce();
        EasyMock.expect(server.getHost()).andReturn(Integer.toString(i)).anyTimes();
        if (i == 0) {
            EasyMock.expect(server.getSegments()).andReturn(segmentMap).anyTimes();
        } else {
            EasyMock.expect(server.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes();
        }
        EasyMock.expect(server.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes();
        EasyMock.replay(server);

        LoadQueuePeon peon = new LoadQueuePeonTester();
        peonMap.put(Integer.toString(i), peon);
        serverHolderList.add(new ServerHolder(server, peon));
    }

    DruidCoordinatorRuntimeParams params = DruidCoordinatorRuntimeParams.newBuilder()
            .withDruidCluster(
                    new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal",
                            MinMaxPriorityQueue.orderedBy(DruidCoordinatorBalancerTester.percentUsedComparator)
                                    .create(serverHolderList))))
            .withLoadManagementPeons(peonMap).withAvailableSegments(segmentMap.values())
            .withDynamicConfigs(
                    new CoordinatorDynamicConfig.Builder().withMaxSegmentsToMove(MAX_SEGMENTS_TO_MOVE)
                            .withReplicantLifetime(500).withReplicationThrottleLimit(5).build())
            .withBalancerReferenceTimestamp(new DateTime("2013-01-01")).withEmitter(emitter)
            .withDatabaseRuleManager(manager).withReplicationManager(new ReplicationThrottler(2, 500))
            .withSegmentReplicantLookup(SegmentReplicantLookup
                    .make(new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal",
                            MinMaxPriorityQueue.orderedBy(DruidCoordinatorBalancerTester.percentUsedComparator)
                                    .create(serverHolderList)))))
            .build();

    DruidCoordinatorBalancerTester tester = new DruidCoordinatorBalancerTester(coordinator);
    DruidCoordinatorRuleRunner runner = new DruidCoordinatorRuleRunner(coordinator);
    watch.start();
    DruidCoordinatorRuntimeParams balanceParams = tester.run(params);
    DruidCoordinatorRuntimeParams assignParams = runner.run(params);
    System.out.println(watch.stop());
}