List of usage examples for org.joda.time Interval getStartMillis
public long getStartMillis()
From source file:net.sourceforge.fenixedu.util.date.IntervalTools.java
License:Open Source License
public static Interval intervalWithEnd(Interval originalInterval, LocalDate day) { long millis = day.toDateMidnight().toDateTime().withTime(23, 59, 59, 999).getMillis(); return new Interval(originalInterval.getStartMillis(), millis); }
From source file:net.sourceforge.fenixedu.util.date.IntervalTools.java
License:Open Source License
public static Interval intervalWithEnd(Interval interval, Date date) { long millis = date == null ? Long.MAX_VALUE : date.getTime(); return new Interval(interval.getStartMillis(), millis); }
From source file:org.apache.calcite.adapter.druid.LocalInterval.java
License:Apache License
/** Creates a LocalInterval based on an interval string. */ public static LocalInterval create(String intervalString) { Interval i = new Interval(intervalString, ISOChronology.getInstanceUTC()); return new LocalInterval(i.getStartMillis(), i.getEndMillis()); }
From source file:org.apache.druid.client.CacheUtil.java
License:Apache License
public static Cache.NamedKey computeSegmentCacheKey(String segmentId, SegmentDescriptor descriptor, byte[] queryCacheKey) { final Interval segmentQueryInterval = descriptor.getInterval(); final byte[] versionBytes = StringUtils.toUtf8(descriptor.getVersion()); return new Cache.NamedKey(segmentId, ByteBuffer.allocate(16 + versionBytes.length + 4 + queryCacheKey.length) .putLong(segmentQueryInterval.getStartMillis()).putLong(segmentQueryInterval.getEndMillis()) .put(versionBytes).putInt(descriptor.getPartitionNumber()).put(queryCacheKey).array()); }
From source file:org.apache.druid.indexer.DeterminePartitionsJob.java
License:Apache License
@Override public boolean run() { try {/*from w ww.ja v a2s . c om*/ /* * Group by (timestamp, dimensions) so we can correctly count dimension values as they would appear * in the final segment. */ if (!(config.getPartitionsSpec() instanceof SingleDimensionPartitionsSpec)) { throw new ISE( "DeterminePartitionsJob can only be run for SingleDimensionPartitionsSpec, partitionSpec found [%s]", config.getPartitionsSpec()); } final SingleDimensionPartitionsSpec partitionsSpec = (SingleDimensionPartitionsSpec) config .getPartitionsSpec(); if (!partitionsSpec.isAssumeGrouped()) { groupByJob = Job.getInstance(new Configuration(), StringUtils.format( "%s-determine_partitions_groupby-%s", config.getDataSource(), config.getIntervals())); JobHelper.injectSystemProperties(groupByJob); config.addJobProperties(groupByJob); groupByJob.setMapperClass(DeterminePartitionsGroupByMapper.class); groupByJob.setMapOutputKeyClass(BytesWritable.class); groupByJob.setMapOutputValueClass(NullWritable.class); groupByJob.setCombinerClass(DeterminePartitionsGroupByReducer.class); groupByJob.setReducerClass(DeterminePartitionsGroupByReducer.class); groupByJob.setOutputKeyClass(BytesWritable.class); groupByJob.setOutputValueClass(NullWritable.class); groupByJob.setOutputFormatClass(SequenceFileOutputFormat.class); JobHelper.setupClasspath(JobHelper.distributedClassPath(config.getWorkingPath()), JobHelper.distributedClassPath(config.makeIntermediatePath()), groupByJob); config.addInputPaths(groupByJob); config.intoConfiguration(groupByJob); FileOutputFormat.setOutputPath(groupByJob, config.makeGroupedDataDir()); groupByJob.submit(); log.info("Job %s submitted, status available at: %s", groupByJob.getJobName(), groupByJob.getTrackingURL()); // Store the jobId in the file if (groupByJob.getJobID() != null) { JobHelper.writeJobIdToFile(config.getHadoopJobIdFileName(), groupByJob.getJobID().toString()); } try { if (!groupByJob.waitForCompletion(true)) { log.error("Job failed: %s", groupByJob.getJobID()); failureCause = Utils.getFailureMessage(groupByJob, config.JSON_MAPPER); return false; } } catch (IOException ioe) { if (!Utils.checkAppSuccessForJobIOException(ioe, groupByJob, config.isUseYarnRMJobStatusFallback())) { throw ioe; } } } else { log.info("Skipping group-by job."); } /* * Read grouped data and determine appropriate partitions. */ final Job dimSelectionJob = Job.getInstance(new Configuration(), StringUtils.format( "%s-determine_partitions_dimselection-%s", config.getDataSource(), config.getIntervals())); dimSelectionJob.getConfiguration().set("io.sort.record.percent", "0.19"); JobHelper.injectSystemProperties(dimSelectionJob); config.addJobProperties(dimSelectionJob); if (!partitionsSpec.isAssumeGrouped()) { // Read grouped data from the groupByJob. dimSelectionJob.setMapperClass(DeterminePartitionsDimSelectionPostGroupByMapper.class); dimSelectionJob.setInputFormatClass(SequenceFileInputFormat.class); FileInputFormat.addInputPath(dimSelectionJob, config.makeGroupedDataDir()); } else { // Directly read the source data, since we assume it's already grouped. dimSelectionJob.setMapperClass(DeterminePartitionsDimSelectionAssumeGroupedMapper.class); config.addInputPaths(dimSelectionJob); } SortableBytes.useSortableBytesAsMapOutputKey(dimSelectionJob, DeterminePartitionsDimSelectionPartitioner.class); dimSelectionJob.setMapOutputValueClass(Text.class); dimSelectionJob.setCombinerClass(DeterminePartitionsDimSelectionCombiner.class); dimSelectionJob.setReducerClass(DeterminePartitionsDimSelectionReducer.class); dimSelectionJob.setOutputKeyClass(BytesWritable.class); dimSelectionJob.setOutputValueClass(Text.class); dimSelectionJob.setOutputFormatClass(DeterminePartitionsDimSelectionOutputFormat.class); dimSelectionJob.setNumReduceTasks(config.getGranularitySpec().bucketIntervals().get().size()); JobHelper.setupClasspath(JobHelper.distributedClassPath(config.getWorkingPath()), JobHelper.distributedClassPath(config.makeIntermediatePath()), dimSelectionJob); config.intoConfiguration(dimSelectionJob); FileOutputFormat.setOutputPath(dimSelectionJob, config.makeIntermediatePath()); dimSelectionJob.submit(); log.info("Job %s submitted, status available at: %s", dimSelectionJob.getJobName(), dimSelectionJob.getTrackingURL()); // Store the jobId in the file if (dimSelectionJob.getJobID() != null) { JobHelper.writeJobIdToFile(config.getHadoopJobIdFileName(), dimSelectionJob.getJobID().toString()); } try { if (!dimSelectionJob.waitForCompletion(true)) { log.error("Job failed: %s", dimSelectionJob.getJobID().toString()); failureCause = Utils.getFailureMessage(dimSelectionJob, config.JSON_MAPPER); return false; } } catch (IOException ioe) { if (!Utils.checkAppSuccessForJobIOException(ioe, dimSelectionJob, config.isUseYarnRMJobStatusFallback())) { throw ioe; } } /* * Load partitions determined by the previous job. */ log.info("Job completed, loading up partitions for intervals[%s].", config.getSegmentGranularIntervals()); FileSystem fileSystem = null; Map<Long, List<HadoopyShardSpec>> shardSpecs = new TreeMap<>(); int shardCount = 0; for (Interval segmentGranularity : config.getSegmentGranularIntervals().get()) { final Path partitionInfoPath = config.makeSegmentPartitionInfoPath(segmentGranularity); if (fileSystem == null) { fileSystem = partitionInfoPath.getFileSystem(dimSelectionJob.getConfiguration()); } if (Utils.exists(dimSelectionJob, fileSystem, partitionInfoPath)) { List<ShardSpec> specs = config.JSON_MAPPER.readValue( Utils.openInputStream(dimSelectionJob, partitionInfoPath), new TypeReference<List<ShardSpec>>() { }); List<HadoopyShardSpec> actualSpecs = Lists.newArrayListWithExpectedSize(specs.size()); for (int i = 0; i < specs.size(); ++i) { actualSpecs.add(new HadoopyShardSpec(specs.get(i), shardCount++)); log.info("DateTime[%s], partition[%d], spec[%s]", segmentGranularity, i, actualSpecs.get(i)); } shardSpecs.put(segmentGranularity.getStartMillis(), actualSpecs); } else { log.info("Path[%s] didn't exist!?", partitionInfoPath); } } config.setShardSpecs(shardSpecs); return true; } catch (Exception e) { throw new RuntimeException(e); } }
From source file:org.apache.druid.indexer.path.GranularityPathSpec.java
License:Apache License
private Interval trim(Interval inputInterval, Interval interval) { long start = interval.getStartMillis(); long end = interval.getEndMillis(); boolean makeNew = false; if (start < inputInterval.getStartMillis()) { start = inputInterval.getStartMillis(); makeNew = true;// w w w . j ava 2 s .c o m } if (end > inputInterval.getEndMillis()) { end = inputInterval.getEndMillis(); makeNew = true; } return makeNew ? new Interval(start, end, interval.getChronology()) : interval; }
From source file:org.apache.druid.java.util.common.granularity.ArbitraryGranularity.java
License:Apache License
@JsonCreator public ArbitraryGranularity(@JsonProperty("intervals") List<Interval> inputIntervals, @JsonProperty("timezone") DateTimeZone timezone) { this.intervals = new TreeSet<>(Comparators.intervalsByStartThenEnd()); this.timezone = timezone == null ? DateTimeZone.UTC : timezone; if (inputIntervals == null) { inputIntervals = new ArrayList<>(); }//from w w w . j av a2 s.c om Preconditions.checkArgument(inputIntervals.size() > 0, "at least one interval should be specified"); // Insert all intervals for (final Interval inputInterval : inputIntervals) { Interval adjustedInterval = inputInterval; if (timezone != null) { adjustedInterval = new Interval(inputInterval.getStartMillis(), inputInterval.getEndMillis(), timezone); } intervals.add(adjustedInterval); } // Ensure intervals are non-overlapping (but they may abut each other) final PeekingIterator<Interval> intervalIterator = Iterators.peekingIterator(intervals.iterator()); while (intervalIterator.hasNext()) { final Interval currentInterval = intervalIterator.next(); if (intervalIterator.hasNext()) { final Interval nextInterval = intervalIterator.peek(); if (currentInterval.overlaps(nextInterval)) { throw new IAE("Overlapping granularity intervals: %s, %s", currentInterval, nextInterval); } } } }
From source file:org.apache.druid.java.util.common.granularity.ArbitraryGranularity.java
License:Apache License
@Override public Iterable<Interval> getIterable(Interval input) { long start = input.getStartMillis(); long end = input.getEndMillis(); // Return an empty iterable if the requested time interval does not // overlap any of the arbitrary intervals specified if (end < intervals.first().getStartMillis() || start > intervals.last().getEndMillis()) { return ImmutableList.of(); }//from w w w . j a va 2 s . c o m return new Iterable<Interval>() { @Override public Iterator<Interval> iterator() { // Skip over the intervals that are known to be invalid // because they end before the requested start timestamp final PeekingIterator<Interval> intervalIterator = Iterators.peekingIterator(intervals.iterator()); while (intervalIterator.hasNext() && intervalIterator.peek().getEndMillis() <= start) { intervalIterator.next(); } return new Iterator<Interval>() { @Override public boolean hasNext() { return intervalIterator.hasNext() && intervalIterator.peek().getStartMillis() < end; } @Override public Interval next() { return intervalIterator.next(); } @Override public void remove() { throw new UnsupportedOperationException(); } }; } }; }
From source file:org.apache.druid.java.util.common.granularity.DurationGranularity.java
License:Apache License
@Override public boolean isAligned(Interval interval) { if (interval.toDurationMillis() == duration) { return (interval.getStartMillis() - origin) % duration == 0; }//from ww w . jav a 2 s. c om return false; }
From source file:org.apache.druid.metadata.IndexerSQLMetadataStorageCoordinator.java
License:Apache License
@Nullable private SegmentIdWithShardSpec allocatePendingSegment(final Handle handle, final String dataSource, final String sequenceName, final Interval interval, final ShardSpecFactory shardSpecFactory, final String maxVersion) throws IOException { final CheckExistingSegmentIdResult result = checkAndGetExistingSegmentId( handle.createQuery(StringUtils.format( "SELECT payload FROM %s WHERE " + "dataSource = :dataSource AND " + "sequence_name = :sequence_name AND " + "start = :start AND " + "%2$send%2$s = :end", dbTables.getPendingSegmentsTable(), connector.getQuoteString())), interval, sequenceName, null, Pair.of("dataSource", dataSource), Pair.of("sequence_name", sequenceName), Pair.of("start", interval.getStart().toString()), Pair.of("end", interval.getEnd().toString())); if (result.found) { // The found existing segment identifier can be null if its interval doesn't match with the given interval return result.segmentIdentifier; }/*from www . j a va 2 s . c o m*/ final SegmentIdWithShardSpec newIdentifier = createNewSegment(handle, dataSource, interval, shardSpecFactory, maxVersion); if (newIdentifier == null) { return null; } // SELECT -> INSERT can fail due to races; callers must be prepared to retry. // Avoiding ON DUPLICATE KEY since it's not portable. // Avoiding try/catch since it may cause inadvertent transaction-splitting. // UNIQUE key for the row, ensuring we don't have more than one segment per sequence per interval. // Using a single column instead of (sequence_name, sequence_prev_id) as some MySQL storage engines // have difficulty with large unique keys (see https://github.com/apache/incubator-druid/issues/2319) final String sequenceNamePrevIdSha1 = BaseEncoding.base16() .encode(Hashing.sha1().newHasher().putBytes(StringUtils.toUtf8(sequenceName)).putByte((byte) 0xff) .putLong(interval.getStartMillis()).putLong(interval.getEndMillis()).hash().asBytes()); // always insert empty previous sequence id insertToMetastore(handle, newIdentifier, dataSource, interval, "", sequenceName, sequenceNamePrevIdSha1); log.info("Allocated pending segment [%s] for sequence[%s] in DB", newIdentifier, sequenceName); return newIdentifier; }