Example usage for com.amazonaws.services.cloudwatch.model PutMetricDataRequest PutMetricDataRequest

List of usage examples for com.amazonaws.services.cloudwatch.model PutMetricDataRequest PutMetricDataRequest

Introduction

In this page you can find the example usage for com.amazonaws.services.cloudwatch.model PutMetricDataRequest PutMetricDataRequest.

Prototype

PutMetricDataRequest

Source Link

Usage

From source file:aws.example.cloudwatch.PutMetricData.java

License:Open Source License

public static void main(String[] args) {

    final String USAGE = "To run this example, supply a data point:\n" + "Ex: PutMetricData <data_point>\n";

    if (args.length != 1) {
        System.out.println(USAGE);
        System.exit(1);/* ww  w .  j  a  v  a2 s. co m*/
    }

    Double data_point = Double.parseDouble(args[0]);

    final AmazonCloudWatch cw = AmazonCloudWatchClientBuilder.defaultClient();

    Dimension dimension = new Dimension().withName("UNIQUE_PAGES").withValue("URLS");

    MetricDatum datum = new MetricDatum().withMetricName("PAGES_VISITED").withUnit(StandardUnit.None)
            .withValue(data_point).withDimensions(dimension);

    PutMetricDataRequest request = new PutMetricDataRequest().withNamespace("SITE/TRAFFIC")
            .withMetricData(datum);

    PutMetricDataResult response = cw.putMetricData(request);

    System.out.printf("Successfully put data point %f", data_point);
}

From source file:be.dataminded.nifi.plugins.PutCloudWatchCountMetricAndAlarm.java

License:Apache License

@Override
public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
    FlowFile flowFile = session.get();/*from ww  w  . ja  v a  2  s  .c  o m*/
    if (flowFile == null) {
        return;
    }

    long totalTableCount = 0;
    long sumCount = 0;
    String tableName = "";
    String schemaName = "";
    String source = "";
    String tenantName = "";

    try (InputStream inputStream = session.read(flowFile)) {

        StringWriter writer = new StringWriter();
        IOUtils.copy(inputStream, writer, "UTF-8");
        String flowFileContent = writer.toString();

        // The MergeContent controller will be configured to append the JSON content with commas
        // We have to surround this list with square brackets to become a valid JSON Array
        String jsonContent = "[" + flowFileContent + "]";

        JSONArray jsonArray = new JSONArray(jsonContent);

        Iterator iterator = jsonArray.iterator();

        ArrayList<Long> counts = new ArrayList<>();

        while (iterator.hasNext()) {
            JSONObject o = (JSONObject) iterator.next();
            counts.add(o.getLong(context.getProperty(NAME_ELEMENT_TO_SUM).getValue()));
        }
        sumCount = counts.stream().mapToLong(Long::longValue).sum();

        JSONObject firstElement = (JSONObject) jsonArray.get(0);
        totalTableCount = firstElement.getLong(context.getProperty(NAME_ELEMENT_TOTAL_COUNT).getValue());
        tableName = firstElement.getString(TABLE_NAME);
        schemaName = firstElement.getString(SCHEMA_NAME);
        source = firstElement.getString(SOURCE_NAME);
        tenantName = firstElement.getString(TENANT_NAME);

    } catch (IOException e) {
        logger.error("Something went wrong when trying to read the flowFile body: " + e.getMessage());
    } catch (org.json.JSONException e) {
        logger.error("Something when trying to parse the JSON body of the flowFile: " + e.getMessage());
    } catch (Exception e) {
        logger.error("something else went wrong in body processing of this FlowFile: " + e.getMessage());
        session.transfer(flowFile, REL_FAILURE);
    }

    try {

        String environment = context.getProperty(ENVIRONMENT).getValue();
        String alarmPrefix = context.getProperty(NAME_PREFIX_ALARM).getValue();

        Map<String, Long> metrics = new HashMap<>();
        // first metric: this is the total count of the records that were exported
        metrics.put("COUNT_", sumCount);
        // second metric: this is the difference between the records exported
        // and the total amount of records counted in the DB, should always be 0 !!!
        // we take a margin into account because we can't be sure there won't be any deletes
        // between counting and executing the queries
        long diff = Math.abs(totalTableCount - sumCount);
        double diffProcent = Math.round((diff / totalTableCount) * 1000);
        metrics.put("DIFF_", (long) diffProcent);

        ArrayList<Dimension> dimensions = new ArrayList<>();
        dimensions.add(new Dimension().withName("tableName").withValue(tableName));
        dimensions.add(new Dimension().withName("tenantName").withValue(tenantName));
        dimensions.add(new Dimension().withName("sourceName").withValue(source));
        dimensions.add(new Dimension().withName("schemaName").withValue(schemaName));
        dimensions.add(new Dimension().withName("environment").withValue(environment));

        for (Map.Entry<String, Long> metric : metrics.entrySet()) {
            MetricDatum datum = new MetricDatum();
            datum.setMetricName(metric.getKey() + tableName);
            datum.setValue((double) metric.getValue());
            datum.setUnit("Count");
            datum.setDimensions(dimensions);

            final PutMetricDataRequest metricDataRequest = new PutMetricDataRequest().withNamespace("NIFI")
                    .withMetricData(datum);

            putMetricData(metricDataRequest);
        }

        // the alarm we create is a static one that will check if the diff is zero
        String comparisonOperator = context.getProperty(ALARM_COMPARISON_OPERATOR).getValue();
        String alarmStatistic = context.getProperty(ALARM_STATISTIC).getValue();
        String alarmPeriod = context.getProperty(ALARM_PERIOD).getValue();
        String alarmEvaluatePeriods = context.getProperty(ALARM_EVALUATE_PERIODS).getValue();
        String alarmAction = context.getProperty(ALARM_ACTION).getValue();

        PutMetricAlarmRequest putMetricAlarmRequest = new PutMetricAlarmRequest()
                .withMetricName("DIFF_" + tableName)
                .withAlarmName(environment + "_" + alarmPrefix + "_" + "DIFF_" + tableName)
                .withDimensions(dimensions).withComparisonOperator(comparisonOperator).withNamespace("NIFI")
                .withStatistic(alarmStatistic).withPeriod(Integer.parseInt(alarmPeriod))
                .withEvaluationPeriods(Integer.parseInt(alarmEvaluatePeriods)).withThreshold((double) 0)
                //.withTreatMissingData("notBreaching") // aws java SDK has to be upgraded for this
                .withAlarmDescription("The daily Count Alarm for table " + tableName).withActionsEnabled(true)
                .withAlarmActions(alarmAction);
        putAlarmData(putMetricAlarmRequest);

        session.transfer(flowFile, REL_SUCCESS);
        getLogger().info("Successfully published cloudwatch metric for {}", new Object[] { flowFile });
    } catch (final Exception e) {
        getLogger().error("Failed to publish cloudwatch metric for {} due to {}", new Object[] { flowFile, e });
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
    }

}

From source file:cloudwatch.src.main.java.aws.example.cloudwatch.PutMetricData.java

License:Open Source License

public static void main(String[] args) {

    final String USAGE = "To run this example, supply a data point value\n"
            + "Ex: PutMetricData <data-point-value>\n";

    if (args.length != 1) {
        System.out.println(USAGE);
        System.exit(1);/*w  w w.j a  v  a 2  s.  c o  m*/
    }

    Double dataPointValue = Double.parseDouble(args[0]);

    final AmazonCloudWatch cloudWatch = AmazonCloudWatchClientBuilder.defaultClient();

    Dimension dimension = new Dimension().withName("UNIQUE_PAGES").withValue("URLS");

    MetricDatum metricDatum = new MetricDatum().withMetricName("PAGES_VISITED").withUnit(StandardUnit.None)
            .withValue(dataPointValue).withDimensions(dimension);

    PutMetricDataRequest request = new PutMetricDataRequest().withNamespace("SITE/TRAFFIC")
            .withMetricData(metricDatum);

    PutMetricDataResult response = cloudWatch.putMetricData(request);

    System.out.printf("Successfully put data point %f", dataPointValue);
}

From source file:com.amazon.kinesis.streaming.agent.metrics.DefaultCWMetricsPublisher.java

License:Open Source License

@Override
public void publishMetrics(List<MetricDatumWithKey<CWMetricKey>> dataToPublish) {
    for (int startIndex = 0; startIndex < dataToPublish.size(); startIndex += BATCH_SIZE) {
        int endIndex = Math.min(dataToPublish.size(), startIndex + BATCH_SIZE);

        PutMetricDataRequest request = new PutMetricDataRequest();
        request.setNamespace(namespace);

        List<MetricDatum> metricData = new ArrayList<MetricDatum>();
        for (int i = startIndex; i < endIndex; i++) {
            MetricDatum metric = dataToPublish.get(i).datum;
            if (!metric.getMetricName().startsWith("."))
                metricData.add(dataToPublish.get(i).datum);
        }//from   ww w . jav a  2s  .  c om
        if (metricData.isEmpty())
            return;

        request.setMetricData(metricData);
        try {
            cloudWatchClient.putMetricData(request);

            LOG.info(String.format("Successfully published %d datums.", endIndex - startIndex));
        } catch (AmazonClientException e) {
            LOG.warn(String.format("Could not publish %d datums to CloudWatch", endIndex - startIndex), e);
        }
    }
}

From source file:com.amediamanager.metrics.MetricBatcher.java

License:Apache License

protected void sendBatch(Map<String, Collection<MetricDatum>> datums) {
    for (final Map.Entry<String, Collection<MetricDatum>> e : datums.entrySet()) {
        for (final List<MetricDatum> batch : Lists.partition(Lists.newLinkedList(e.getValue()), BATCH_SIZE)) {
            cloudWatch.putMetricDataAsync(
                    new PutMetricDataRequest().withNamespace(e.getKey()).withMetricData(batch),
                    new AsyncHandler<PutMetricDataRequest, Void>() {

                        @Override
                        public void onError(Exception exception) {
                            LOG.error("PutMetricData failed", exception);
                            LOG.info("Requeueing metric data.");
                            queuedDatums.putAll(e.getKey(), batch);
                        }/* w  w  w  . j a  va 2s  .  c om*/

                        @Override
                        public void onSuccess(PutMetricDataRequest request, Void result) {
                            LOG.info("Successfully put " + request.getMetricData().size()
                                    + " datums for namespace " + request.getNamespace());
                            LOG.debug("Request", request);
                        }

                    });
        }
    }
}

From source file:com.basistech.metrics.CloudWatchReporter.java

License:Open Source License

@Override
public void report(SortedMap<String, Gauge> gauges, SortedMap<String, Counter> counters,
        SortedMap<String, Histogram> histograms, SortedMap<String, Meter> meters,
        SortedMap<String, Timer> timers) {
    Collection<MetricDatum> data = new ArrayList<>();
    for (Map.Entry<String, Gauge> meg : gauges.entrySet()) {
        if (meg.getValue().getValue() instanceof Number) {
            Number num = (Number) meg.getValue().getValue();
            double val = num.doubleValue();
            if (LOG.isDebugEnabled()) {
                LOG.debug("gauge {} val {}", meg.getKey(), val);
            }/*from ww w.  j a va 2  s .  com*/
            data.add(new MetricDatum().withMetricName(meg.getKey()).withValue(val).withDimensions(dimensions));
        }
    }
    for (Map.Entry<String, Counter> mec : counters.entrySet()) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("counter {} val {}", mec.getKey(), mec.getValue().getCount());
        }
        data.add(new MetricDatum().withMetricName(mec.getKey()).withValue((double) mec.getValue().getCount())
                .withDimensions(dimensions));
    }
    for (Map.Entry<String, Histogram> meh : histograms.entrySet()) {
        reportHistogram(data, meh);
    }
    for (Map.Entry<String, Timer> met : timers.entrySet()) {
        reportTimer(met.getKey(), data, met);
    }

    if (!data.isEmpty()) {
        PutMetricDataRequest put = new PutMetricDataRequest();
        put.setNamespace(namespace);
        put.setMetricData(data);
        try {
            client.putMetricData(put);
        } catch (Throwable t) {
            LOG.error("Failed to put metrics", t);
        }
    }
}

From source file:com.blacklocus.metrics.CloudWatchReporter.java

License:Apache License

@Override
public void report(SortedMap<String, Gauge> gauges, SortedMap<String, Counter> counters,
        SortedMap<String, Histogram> histograms, SortedMap<String, Meter> meters,
        SortedMap<String, Timer> timers) {

    try {/*from www.  j  av  a  2s  .  c om*/
        List<MetricDatum> data = new ArrayList<MetricDatum>(
                gauges.size() + counters.size() + meters.size() + 2 * histograms.size() + 2 * timers.size());
        // something like that

        for (Map.Entry<String, Gauge> gaugeEntry : gauges.entrySet()) {
            reportGauge(gaugeEntry, "gauge", data);
        }

        for (Map.Entry<String, Counter> counterEntry : counters.entrySet()) {
            reportCounter(counterEntry, "counterSum", data);
        }

        for (Map.Entry<String, Meter> meterEntry : meters.entrySet()) {
            reportCounter(meterEntry, "meterSum", data);
        }

        for (Map.Entry<String, Histogram> histogramEntry : histograms.entrySet()) {
            reportCounter(histogramEntry, "histogramCount", data);
            reportSampling(histogramEntry, "histogramSet", 1.0, data);
        }

        for (Map.Entry<String, Timer> timerEntry : timers.entrySet()) {
            reportCounter(timerEntry, "timerCount", data);
            reportSampling(timerEntry, "timerSet", 0.000001, data); // nanos -> millis
        }

        // CloudWatch rejects any Statistic Sets with sample count == 0
        Collection<MetricDatum> nonEmptyData = Collections2.filter(data, new Predicate<MetricDatum>() {
            @Override
            public boolean apply(MetricDatum input) {
                if (input == null) {
                    return false;
                } else if (input.getStatisticValues() != null) {
                    return input.getStatisticValues().getSampleCount() > 0;
                }
                return true;
            }
        });
        // Each CloudWatch API request may contain at maximum 20 datums.
        Iterable<List<MetricDatum>> dataPartitions = Iterables.partition(nonEmptyData, 20);
        List<Future<?>> cloudWatchFutures = Lists.newArrayListWithExpectedSize(data.size());

        for (List<MetricDatum> dataSubset : dataPartitions) {
            cloudWatchFutures.add(cloudWatch.putMetricDataAsync(
                    new PutMetricDataRequest().withNamespace(metricNamespace).withMetricData(dataSubset)));
        }
        for (Future<?> cloudWatchFuture : cloudWatchFutures) {
            // We can't let an exception leak out of here, or else the reporter will cease running per mechanics of
            // java.util.concurrent.ScheduledExecutorService.scheduleAtFixedRate(Runnable, long, long, TimeUnit unit)
            try {
                // See what happened in case of an error.
                cloudWatchFuture.get();
            } catch (Exception e) {
                LOG.error(
                        "Exception reporting metrics to CloudWatch. The data sent in this CloudWatch API request "
                                + "may have been discarded.",
                        e);
            }
        }

        LOG.info("Sent {} metric data to CloudWatch. namespace: {}", data.size(), metricNamespace);

    } catch (RuntimeException e) {
        LOG.error("Error marshalling CloudWatch metrics.", e);
    }
}

From source file:com.boundlessgeo.suite.geoserver.cloudwatch.aws.CloudwatchSender.java

License:Open Source License

/**
 * Invoked by spring on a timer to get and send from all metric providers
 *///from   w  w w .j a va2  s .co  m
public void sendAllMetrics() {
    if (!enabled) {
        logger.debug("Metrics are disabled...returning");
        return;
    }
    logger.debug("Sending all metrics");
    for (MetricProvider mp : providers) {
        if (!mp.getEnabled())
            continue;
        for (final MetricDatum md : mp.getMetrics()) {
            try {
                PutMetricDataRequest request = new PutMetricDataRequest().withNamespace("geoserver")
                        .withMetricData(md);
                logger.trace("Sending statistic {}", md.getMetricName());
                ListenableFuture<java.lang.Void> f = JdkFutureAdapters
                        .listenInPoolThread(cloudwatch.putMetricDataAsync(request));
                Futures.addCallback(f, new FutureCallback<java.lang.Void>() {
                    public void onSuccess(java.lang.Void ignored) {
                        logger.trace("Sent statistic {}", md.getMetricName());
                    }

                    public void onFailure(Throwable ex) {
                        logger.error("Error sending metric: {}", md.getMetricName(), ex);
                    }
                });
            } catch (AmazonClientException ex) {
                logger.warn("Error sending AWS metric {}", md.getMetricName());
            }
        }
    }
}

From source file:com.boundlessgeo.suite.geoserver.cloudwatch.aws.CloudwatchSender.java

License:Open Source License

protected void sendMetric(MetricDatum metric) {
    PutMetricDataRequest request = new PutMetricDataRequest().withNamespace("AWS/EC2").withMetricData(metric);

    cloudwatch.putMetricData(request);/*  w  w w. ja  va  2 s. c  om*/
}

From source file:com.github.lpezet.antiope.metrics.aws.BlockingRequestBuilder.java

License:Open Source License

private PutMetricDataRequest newPutMetricDataRequest(Collection<MetricDatum> pData, final String pNamespace,
        final Dimension... pExtraDims) {
    if (pExtraDims != null) {
        // Need to add some extra dimensions.
        // To do so, we copy the metric data to avoid mutability problems.
        Collection<MetricDatum> oNewData = new ArrayList<MetricDatum>(pData.size());
        for (MetricDatum md : pData) {
            MetricDatum oNewMD = cloneMetricDatum(md);
            for (Dimension dim : pExtraDims)
                oNewMD.withDimensions(dim); // add the extra dimensions to the new metric datum
            oNewData.add(oNewMD);/*from w w  w  . ja  v  a 2 s  .c o m*/
        }
        pData = oNewData;
    }
    return new PutMetricDataRequest().withNamespace(pNamespace).withMetricData(pData)
            .withRequestMetricCollector(RequestMetricCollector.NONE);
}