Example usage for org.joda.time Period parse

List of usage examples for org.joda.time Period parse

Introduction

In this page you can find the example usage for org.joda.time Period parse.

Prototype

@FromString
public static Period parse(String str) 

Source Link

Document

Parses a Period from the specified string.

Usage

From source file:org.apache.druid.query.IntervalChunkingQueryRunner.java

License:Apache License

private Period getChunkPeriod(Query<T> query) {
    final String p = QueryContexts.getChunkPeriod(query);
    return Period.parse(p);
}

From source file:org.apache.eagle.alert.coordinator.provider.NodataMetadataGenerator.java

License:Apache License

public void execute(Config config, Map<String, StreamDefinition> streamDefinitionsMap,
        Map<String, Kafka2TupleMetadata> kafkaSources, Map<String, PolicyDefinition> policies,
        Map<String, Publishment> publishments) {
    Collection<StreamDefinition> streamDefinitions = streamDefinitionsMap.values();
    for (StreamDefinition streamDefinition : streamDefinitions) {
        StreamColumn columnWithNodataExpression = null;
        for (StreamColumn column : streamDefinition.getColumns()) {
            if (StringUtils.isNotBlank(column.getNodataExpression())) {
                // has nodata alert setting, needs to generate the nodata alert policy
                if (columnWithNodataExpression != null) {
                    columnWithNodataExpression = null;
                    LOG.warn("Only one column in one stream is allowed to configure nodata alert");
                    break;
                }/*from  w w  w  . j  ava  2  s  .  c  o  m*/
                columnWithNodataExpression = column;
            }
        }
        if (columnWithNodataExpression != null) {
            final String streamName = streamDefinition.getStreamId();

            // create nodata alert aggr stream
            if (streamDefinitionsMap.containsKey(NODATA_ALERT_AGGR_STREAM)) {
                LOG.info("Nodata alert aggregation stream: {} already exists", NODATA_ALERT_AGGR_STREAM);
            } else {
                streamDefinitionsMap.put(NODATA_ALERT_AGGR_STREAM, buildAggregationStream());
                LOG.info("Created nodata alert aggregation stream: {}", NODATA_ALERT_AGGR_STREAM);
            }

            // create nodata alert aggr output stream
            if (streamDefinitionsMap.containsKey(NODATA_ALERT_AGGR_OUTPUT_STREAM)) {
                LOG.info("Nodata alert aggregation output stream: {} already exists",
                        NODATA_ALERT_AGGR_OUTPUT_STREAM);
            } else {
                streamDefinitionsMap.put(NODATA_ALERT_AGGR_OUTPUT_STREAM, buildAggregationOutputStream());
                LOG.info("Created nodata alert aggregation output stream: {}", NODATA_ALERT_AGGR_OUTPUT_STREAM);
            }

            // create nodata alert data source
            if (kafkaSources.containsKey(NODATA_ALERT_AGGR_DATASOURCE_NAME)) {
                LOG.info("Stream: {} nodata alert aggregation datasource: {} already exists",
                        NODATA_ALERT_AGGR_STREAM, NODATA_ALERT_AGGR_DATASOURCE_NAME);
            } else {
                kafkaSources.put(NODATA_ALERT_AGGR_DATASOURCE_NAME, buildAggregationDatasource());
                LOG.info("Created nodata alert aggregation datasource {} for stream {}",
                        NODATA_ALERT_AGGR_DATASOURCE_NAME, NODATA_ALERT_AGGR_STREAM);
            }

            // create nodata alert aggregation output datasource
            if (kafkaSources.containsKey(NODATA_ALERT_AGGR_OUTPUT_DATASOURCE_NAME)) {
                LOG.info("Stream: {} nodata alert aggregation output datasource: {} already exists",
                        NODATA_ALERT_AGGR_OUTPUT_STREAM, NODATA_ALERT_AGGR_OUTPUT_DATASOURCE_NAME);
            } else {
                kafkaSources.put(NODATA_ALERT_AGGR_OUTPUT_DATASOURCE_NAME, buildAggregationOutputDatasource());
                LOG.info("Created nodata alert aggregation output datasource {} for stream {}",
                        NODATA_ALERT_AGGR_DATASOURCE_NAME, NODATA_ALERT_AGGR_OUTPUT_STREAM);
            }

            // create nodata alert policy
            String policyName = streamName + "_nodata_alert";
            String nodataExpression = columnWithNodataExpression.getNodataExpression();
            String[] segments = nodataExpression.split(",");
            long windowPeriodInSeconds = TimePeriodUtils.getSecondsOfPeriod(Period.parse(segments[0]));
            if (policies.containsKey(policyName)) {
                LOG.info("Stream: {} nodata alert policy: {} already exists", streamName, policyName);
            } else {
                policies.put(policyName, buildDynamicNodataPolicy(streamName, policyName,
                        columnWithNodataExpression.getName(), nodataExpression, Arrays.asList(streamName)));
                LOG.info("Created nodata alert policy {} with expression {} for stream {}", policyName,
                        nodataExpression, streamName);
            }

            // create nodata alert aggregation
            String aggrPolicyName = NODATA_ALERT_AGGR_STREAM + "_policy";
            if (policies.containsKey(aggrPolicyName)) {
                LOG.info("Stream: {} nodata alert aggregation policy: {} already exists",
                        NODATA_ALERT_AGGR_OUTPUT_STREAM, aggrPolicyName);
            } else {
                policies.put(aggrPolicyName, buildAggregationPolicy(aggrPolicyName,
                        columnWithNodataExpression.getName(), windowPeriodInSeconds));
                LOG.info("Created nodata alert aggregation policy {} for stream {}", aggrPolicyName,
                        NODATA_ALERT_AGGR_OUTPUT_STREAM);
            }

            // create nodata alert publish
            String publishmentName = policyName + "_publish";
            if (publishments.containsKey(publishmentName)) {
                LOG.info("Stream: {} nodata alert publishment: {} already exists", streamName, publishmentName);
            } else {
                String kafkaBroker = config.getString("kafkaProducer.bootstrapServers");
                publishments.put(publishmentName, buildKafkaAlertPublishment(publishmentName, policyName,
                        kafkaBroker, NODATA_ALERT_AGGR_TOPIC_NAME));
                publishments.put(publishmentName + "_email", buildEmailAlertPublishment(config,
                        publishmentName + "_email", policyName, kafkaBroker, NODATA_ALERT_AGGR_TOPIC_NAME));
                LOG.info("Created nodata alert publishment {} for stream {}", policyName + "_publish",
                        streamName);
            }

            // create nodata alert aggregation publish
            String aggrPublishName = aggrPolicyName + "_publish";
            if (publishments.containsKey(aggrPublishName)) {
                LOG.info("Stream: {} publishment: {} already exists", NODATA_ALERT_AGGR_STREAM,
                        aggrPublishName);
            } else {
                String kafkaBroker = config.getString("kafkaProducer.bootstrapServers");
                publishments.put(aggrPublishName, buildKafkaAlertPublishment(aggrPublishName, aggrPolicyName,
                        kafkaBroker, NODATA_ALERT_AGGR_OUTPUT_TOPIC_NAME));
                publishments.put(aggrPublishName + "_email",
                        buildEmailAlertPublishment(config, aggrPublishName + "_email", aggrPolicyName,
                                kafkaBroker, NODATA_ALERT_AGGR_OUTPUT_TOPIC_NAME));
                LOG.info("Created nodata alert publishment {} for stream {}", policyName + "_publish",
                        streamName);
            }
        }
    }
}

From source file:org.apache.eagle.alert.engine.coordinator.StreamSortSpec.java

License:Apache License

public int getWindowPeriodMillis() {
    if (StringUtils.isNotBlank(windowPeriod)) {
        return TimePeriodUtils.getMillisecondsOfPeriod(Period.parse(windowPeriod));
    } else {/*from  w w w  .ja  va 2  s . c om*/
        return 0;
    }
}

From source file:org.apache.eagle.alert.engine.evaluator.nodata.NoDataPolicyHandler.java

License:Apache License

@Override
public void prepare(Collector<AlertStreamEvent> collector, PolicyHandlerContext context) throws Exception {
    this.collector = collector;
    this.context = context;
    this.policyDef = context.getPolicyDefinition();
    List<String> inputStreams = policyDef.getInputStreams();
    // validate inputStreams has to contain only one stream
    if (inputStreams.size() != 1) {
        throw new IllegalArgumentException("policy inputStream size has to be 1 for no data alert");
    }//from  ww w  . j  ava  2s  . co  m
    // validate outputStream has to contain only one stream
    if (policyDef.getOutputStreams().size() != 1) {
        throw new IllegalArgumentException("policy outputStream size has to be 1 for no data alert");
    }

    String is = inputStreams.get(0);
    StreamDefinition sd = sds.get(is);

    String policyValue = policyDef.getDefinition().getValue();
    // assume that no data alert policy value consists of "windowPeriod, type, numOfFields, f1_name, f2_name, f1_value, f2_value, f1_value, f2_value}
    String[] segments = policyValue.split(",");
    long windowPeriod = TimePeriodUtils.getMillisecondsOfPeriod(Period.parse(segments[0]));
    distinctWindow = new DistinctValuesInTimeWindow(windowPeriod);
    this.wisbType = NoDataWisbType.valueOf(segments[1]);
    // for provided wisb values, need to parse, for dynamic wisb values, it is computed through a window
    if (wisbType == NoDataWisbType.provided) {
        wisbValues = new NoDataWisbProvidedParser().parse(segments);
    }
    // populate wisb field names
    int numOfFields = Integer.parseInt(segments[2]);
    for (int i = 3; i < 3 + numOfFields; i++) {
        String fn = segments[i];
        wisbFieldIndices.add(sd.getColumnIndex(fn));
    }
}

From source file:org.apache.eagle.alert.engine.evaluator.nodata.NoDataPolicyTimeBatchHandler.java

License:Apache License

@Override
public void prepare(Collector<AlertStreamEvent> collector, PolicyHandlerContext context) throws Exception {
    this.collector = collector;
    this.context = context;
    this.policyDef = context.getPolicyDefinition();
    List<String> inputStreams = policyDef.getInputStreams();
    // validate inputStreams has to contain only one stream
    if (inputStreams.size() != 1) {
        throw new IllegalArgumentException("policy inputStream size has to be 1 for no data alert");
    }/*from  ww  w  .  j ava2 s.  c om*/
    // validate outputStream has to contain only one stream
    if (policyDef.getOutputStreams().size() != 1) {
        throw new IllegalArgumentException("policy outputStream size has to be 1 for no data alert");
    }

    String policyValue = policyDef.getDefinition().getValue();
    // assume that no data alert policy value consists of "windowPeriod,
    // type, numOfFields, f1_name, f2_name, f1_value, f2_value, f1_value,
    // f2_value}
    String[] segments = policyValue.split(",");
    this.wisbType = NoDataWisbType.valueOf(segments[1]);
    // for provided wisb values, need to parse, for dynamic wisb values, it
    // is computed through a window
    Set<String> wisbValues = new HashSet<String>();
    if (wisbType == NoDataWisbType.provided) {
        for (int i = 2; i < segments.length; i++) {
            wisbValues.add(segments[i]);
        }
    }

    long windowPeriod = TimePeriodUtils.getMillisecondsOfPeriod(Period.parse(segments[0]));
    distinctWindow = new DistinctValuesInTimeBatchWindow(this, windowPeriod, wisbValues);
    // populate wisb field names
    String is = inputStreams.get(0);
    StreamDefinition sd = sds.get(is);
    String nodataColumnNameKey = "nodataColumnName";
    if (!policyDef.getDefinition().getProperties().containsKey(nodataColumnNameKey)) {
        throw new IllegalArgumentException("policy nodata column name has to be defined for no data alert");
    }
    wisbFieldIndices.add(
            sd.getColumnIndex((String) policyDef.getDefinition().getProperties().get(nodataColumnNameKey)));
}

From source file:org.apache.eagle.alert.engine.publisher.dedup.DefaultDeduplicator.java

License:Apache License

@Override
public void setDedupIntervalMin(String newDedupIntervalMin) {
    if (newDedupIntervalMin == null || newDedupIntervalMin.isEmpty()) {
        dedupIntervalSec = 0;/*from  w  ww. j a v a2s .  c o  m*/
        return;
    }
    try {
        Period period = Period.parse(newDedupIntervalMin);
        this.dedupIntervalSec = period.toStandardSeconds().getSeconds();
    } catch (Exception e) {
        LOG.warn("Fail to pares deDupIntervalMin, will disable deduplication instead", e);
        this.dedupIntervalSec = 0;
    }
}

From source file:org.apache.eagle.alert.engine.sorter.impl.StreamSortWindowHandlerImpl.java

License:Apache License

public void prepare(String streamId, StreamSortSpec streamSortSpecSpec,
        PartitionedEventCollector outputCollector) {
    this.windowManager = new StreamWindowManagerImpl(Period.parse(streamSortSpecSpec.getWindowPeriod()),
            streamSortSpecSpec.getWindowMargin(), PartitionedEventTimeOrderingComparator.INSTANCE,
            outputCollector);/*from   w w  w  . j  av  a  2 s  . co m*/
    this.streamSortSpecSpec = streamSortSpecSpec;
    this.streamId = streamId;
    this.outputCollector = outputCollector;
}

From source file:org.apereo.portal.spring.properties.ReadablePeriodEditor.java

License:Apache License

@Override
public void setAsText(String text) throws IllegalArgumentException {
    this.setValue(Period.parse(text));
}

From source file:org.celeria.minecraft.backup.ConfigurationModule.java

License:Apache License

@Provides
@Singleton/*from ww  w . j av a  2 s .com*/
public Period provideBackUpPeriod(final Configuration configuration) {
    final String backUpPeriod = getProperty(configuration, ConfigurationKey.BACK_UP_PERIOD);
    return Period.parse(backUpPeriod);
}

From source file:org.ecloudmanager.web.faces.ServiceMonitoringController.java

License:Open Source License

public void loadData() {
    if (selectedNode != null && chartRange != null) {
        Duration duration = Period.parse(chartRange).toDurationTo(new DateTime());
        DateTime dateTime = new DateTime().minus(duration);
        Date startDate = dateTime.toDate();
        Axis dateAxis = chartModel.getAxes().get(AxisType.X);
        dateAxis.setMin(startDate.getTime());
        dateAxis.setMax(new Date().getTime());

        haproxyStatsData = haproxyStatsService.loadHaproxyStats(startDate,
                (DeploymentObject) selectedNode.getData(), chartField);

        if (chartField != null && haproxyStatsData != null) {
            ChartSeries series = chartModel.getSeries().get(0);
            series.setData(haproxyStatsData.getTimeSeriesData());

            if (series.getData().size() > 0) {
                chartModel.setTitle("<b>" + chartField.getName() + "</b><br>" + chartField.getDescription());
            } else {
                chartModel.setTitle(chartField.getDescription() + " (no data)");
            }// w w w .  j  av  a  2  s .  c o  m
        } else {
            chartModel.setTitle(chartField == null ? "" : chartField.getDescription() + " (no data)");
            ChartSeries series = chartModel.getSeries().get(0);
            series.getData().clear();
        }
    }
}