Example usage for com.amazonaws.services.cloudwatch.model Datapoint getSum

List of usage examples for com.amazonaws.services.cloudwatch.model Datapoint getSum

Introduction

In this page you can find the example usage for com.amazonaws.services.cloudwatch.model Datapoint getSum.

Prototype


public Double getSum() 

Source Link

Document

The sum of the metric values for the data point.

Usage

From source file:HW1.java

License:Open Source License

public static void main(String[] args) throws Exception {

    AWSCredentials credentials = new PropertiesCredentials(
            HW1.class.getResourceAsStream("AwsCredentials.properties"));

    /*********************************************
     * //ww  w . ja va2s. c  o m
     *  #1 Create Amazon Client object
     *  
     *********************************************/
    System.out.println("#1 Create Amazon Client object");
    ec2 = new AmazonEC2Client(credentials);
    ec2.setEndpoint("https://us-east-1.ec2.amazonaws.com");

    System.out.println("Please enter required group name and key name... (consider them to be the same)");
    Scanner scan = new Scanner(System.in);
    final String keyGroupName = scan.nextLine();

    /* create security group */
    CreateSecurityGroupRequest createSecurityGroupRequest = new CreateSecurityGroupRequest();
    createSecurityGroupRequest.withGroupName(keyGroupName).withDescription("My Java Security Group");
    CreateSecurityGroupResult createSecurityGroupResult = ec2.createSecurityGroup(createSecurityGroupRequest);

    /* set ip settings */
    IpPermission ipPermission = new IpPermission();
    /* authorize tcp, ssh 22 */
    ipPermission.withIpRanges("0.0.0.0/0").withIpProtocol("tcp").withFromPort(22)
            /* authorize http 80 */
            .withToPort(80);

    AuthorizeSecurityGroupIngressRequest authorizeSecurityGroupIngressRequest = new AuthorizeSecurityGroupIngressRequest();
    authorizeSecurityGroupIngressRequest.withGroupName(keyGroupName).withIpPermissions(ipPermission);
    ec2.authorizeSecurityGroupIngress(authorizeSecurityGroupIngressRequest);

    /* create key pair */
    CreateKeyPairRequest createKeyPairRequest = new CreateKeyPairRequest();
    createKeyPairRequest.withKeyName(keyGroupName);
    CreateKeyPairResult createKeyPairResult = ec2.createKeyPair(createKeyPairRequest);
    KeyPair keyPair = new KeyPair();
    keyPair = createKeyPairResult.getKeyPair();
    String privateKey = keyPair.getKeyMaterial();
    PrintWriter file = new PrintWriter("/Users/will/.ssh/" + keyGroupName + ".pem");
    file.print(privateKey);
    file.close();
    Runtime.getRuntime().exec("chmod 400 /Users/will/.ssh/" + keyGroupName + ".pem");

    try {

        /*********************************************
         * 
         *  #2 Create two Instances
         *  
         *********************************************/
        System.out.println();
        System.out.println("#2 Create two new Instances");
        int ready_num = 0;
        String insDNS1 = new String();
        String insDNS2 = new String();
        String insId1 = new String();
        String insId2 = new String();
        String insZone1 = new String();
        String insZone2 = new String();

        String imageId = "ami-76f0061f"; //Basic 32-bit Amazon Linux AMI
        int minInstanceCount = 2; // create 2 instance
        int maxInstanceCount = 2;

        /* create instances */
        RunInstancesRequest rir = new RunInstancesRequest(imageId, minInstanceCount, maxInstanceCount);
        rir.withKeyName(keyGroupName).withSecurityGroups(keyGroupName);
        ec2.runInstances(rir);

        /* waiting for instance to start */
        System.out.println("Created instance, wait for pending...");

        DescribeInstancesResult describeInstancesRequest;
        List<Reservation> reservations;
        List<Instance> allInstances = new ArrayList<Instance>();

        while (ready_num < 2) {
            describeInstancesRequest = ec2.describeInstances();
            reservations = describeInstancesRequest.getReservations();
            for (Reservation reservation : reservations) {
                for (Instance ins : reservation.getInstances()) {
                    if (ins.getState().getName().compareTo("running") == 0
                            && ins.getPublicIpAddress() != null) {
                        if (allInstances.size() == 0 || (allInstances.size() > 0
                                && allInstances.get(0).getInstanceId().compareTo(ins.getInstanceId()) != 0)) {
                            ready_num++;
                            allInstances.add(ins);
                        }
                    }
                }
            }
        }

        System.out.println("You have " + allInstances.size() + " Amazon EC2 instance(s).");
        insId1 = allInstances.get(0).getInstanceId();
        insId2 = allInstances.get(1).getInstanceId();
        insDNS1 = allInstances.get(0).getPublicIpAddress();
        insDNS2 = allInstances.get(1).getPublicIpAddress();
        insZone1 = allInstances.get(0).getPlacement().getAvailabilityZone();
        insZone2 = allInstances.get(1).getPlacement().getAvailabilityZone();

        for (Instance ins : allInstances) {
            System.out.println("New instance has been created: " + ins.getInstanceId());
        }

        System.out.println("Both instances are running now:");
        System.out.println("Instance id1: " + insId1);
        System.out.println("IP: " + insDNS1);
        System.out.println("Zone: " + insZone1);
        System.out.println("Instance id1: " + insId2);
        System.out.println("IP: " + insDNS2);
        System.out.println("Zone: " + insZone2);
        System.out.println();

        /*********************************************
         *  #3 Check OR Create two volumes
         *********************************************/
        System.out.println();
        System.out.println("#3 Create volumes");
        String volume_name1 = createVolume(insZone1, null);
        String volume_name2 = createVolume(insZone2, null);

        /*********************************************
         *  #4 Attach the volume to the instance
         *********************************************/
        System.out.println();
        System.out.println("#4 Attach the volume to the instance");
        System.out.println("Wait for volumes to be available...");
        Thread.sleep(20000);

        /* attach instances to existing volume */
        attachVolume(insId1, volume_name1);
        attachVolume(insId2, volume_name2);

        /************************************************
        *  #5 S3 bucket and object
        ***************************************************/
        System.out.println();
        System.out.println("#5 S3 bucket and object");
        s3 = new AmazonS3Client(credentials);

        /* create bucket */
        String bucketName = "cloud-hw1-bucket";
        s3.createBucket(bucketName);

        /* set key */
        String key = "object-hw1.txt";

        /* set value */
        File new_file = File.createTempFile("temp", ".txt");
        new_file.deleteOnExit();
        Writer writer = new OutputStreamWriter(new FileOutputStream(new_file));
        writer.write("This is the file stored on the S3 storage on the first day!!!.");
        writer.close();

        /* put object - bucket, key, value(file) */
        s3.putObject(new PutObjectRequest(bucketName, key, new_file));
        System.out.println("Successfully put file temp.txt to S3, we will read it tomorrow...");
        System.out.println();

        /***********************************
        *   #3 Monitoring (CloudWatch)
        *********************************/
        System.out.println();
        System.out.println("#6 set up cloudwatch");
        try {
            /* create CloudWatch client */
            AmazonCloudWatchClient cloudWatch = new AmazonCloudWatchClient(credentials);
            /* create request message1 */
            GetMetricStatisticsRequest statRequest1 = new GetMetricStatisticsRequest();
            GetMetricStatisticsRequest statRequest2 = new GetMetricStatisticsRequest();
            /* set up request message */
            statRequest1.setNamespace("AWS/EC2"); //namespace
            statRequest2.setNamespace("AWS/EC2"); //namespace
            statRequest1.setPeriod(60); //period of data
            statRequest2.setPeriod(60); //period of data
            ArrayList<String> stats = new ArrayList<String>();
            /* Use one of these strings: Average, Maximum, Minimum, SampleCount, Sum */
            stats.add("Average");
            stats.add("Sum");
            statRequest1.setStatistics(stats);
            statRequest2.setStatistics(stats);
            /* Use one of these strings: CPUUtilization, NetworkIn, NetworkOut, DiskReadBytes, DiskWriteBytes, DiskReadOperations  */
            statRequest1.setMetricName("CPUUtilization");
            statRequest2.setMetricName("CPUUtilization");
            /* set time */
            GregorianCalendar calendar = new GregorianCalendar(TimeZone.getTimeZone("UTC"));
            calendar.add(GregorianCalendar.SECOND, -1 * calendar.get(GregorianCalendar.SECOND)); // 1 second ago
            Date endTime = calendar.getTime();
            calendar.add(GregorianCalendar.MINUTE, -10); // 10 minutes ago
            Date startTime = calendar.getTime();
            statRequest1.setStartTime(startTime);
            statRequest1.setEndTime(endTime);
            statRequest2.setStartTime(startTime);
            statRequest2.setEndTime(endTime);
            /* specify an instance */
            ArrayList<Dimension> dimensions1 = new ArrayList<Dimension>();
            dimensions1.add(new Dimension().withName("InstanceId").withValue(insId1));
            ArrayList<Dimension> dimensions2 = new ArrayList<Dimension>();
            dimensions2.add(new Dimension().withName("InstanceId").withValue(insId2));
            statRequest1.setDimensions(dimensions1);
            statRequest2.setDimensions(dimensions2);
            System.out.println("Set up cloud watch for instance: " + insId1 + " and instance: " + insId2);

            /* !!!!!!!!!!!!here set for 10 loops for now */
            /* get statistics */
            for (int i = 0; i < 10; i++) {
                GetMetricStatisticsResult statResult1 = cloudWatch.getMetricStatistics(statRequest1);
                GetMetricStatisticsResult statResult2 = cloudWatch.getMetricStatistics(statRequest2);
                /* display */
                System.out.println("Instance 1: " + statResult1.toString());
                List<Datapoint> dataList = statResult1.getDatapoints();
                Double averageCPU = null;
                Date timeStamp = null;
                for (Datapoint d : dataList) {
                    averageCPU = d.getAverage();
                    timeStamp = d.getTimestamp();
                    System.out
                            .println("Instance 1 average CPU utlilization for last 10 minutes: " + averageCPU);
                    System.out.println("Instance 1 total CPU utlilization for last 10 minutes: " + d.getSum());
                }
                System.out.println();
                System.out.println("Instance 2: " + statResult1.toString());
                dataList = statResult2.getDatapoints();
                for (Datapoint d : dataList) {
                    averageCPU = d.getAverage();
                    timeStamp = d.getTimestamp();
                    System.out
                            .println("Instance 2 average CPU utlilization for last 10 minutes: " + averageCPU);
                    System.out.println("Instance 2 total CPU utlilization for last 10 minutes: " + d.getSum());
                }
            }

        } catch (AmazonServiceException ase) {
            System.out.println("Caught Exception: " + ase.getMessage());
            System.out.println("Reponse Status Code: " + ase.getStatusCode());
            System.out.println("Error Code: " + ase.getErrorCode());
            System.out.println("Request ID: " + ase.getRequestId());
        }

        /***********************************
        *   # Copy script to 
        *       instance and run
        *********************************/
        System.out.println();
        System.out.println("Waiting for init and automatically SSH...");
        /* call runtime exec to run scp */
        execCmdRuby(insDNS1, keyGroupName);

        /***********************************
        *   # Save instances to image
        *********************************/
        System.out.println();
        System.out.println("******* Approaching 5:00 pm, create ami for instances *********");
        String imageId1;
        String imageId2;
        String snapshot1;
        String snapshot2;

        imageId1 = createAmiFromInstance(insId1, "image1", true);
        imageId2 = createAmiFromInstance(insId2, "image2", true);
        System.out.println("Created first image id: " + imageId1);
        System.out.println("Created second image id: " + imageId2);

        snapshot1 = createSnapShotFromVolume(volume_name1);
        snapshot2 = createSnapShotFromVolume(volume_name2);
        System.out.println("Created first snapshot id: " + snapshot1);
        System.out.println("Created second snapshot id: " + snapshot2);

        /*********************************************
         * 
         *  # Stop Instances
         *  
         *********************************************/
        System.out.println();
        System.out.println("#7 Stop & terminate the Instance");
        List<String> instanceIds = new LinkedList<String>();
        instanceIds.add(insId1);
        instanceIds.add(insId2);
        /* stop instances */
        StopInstancesRequest stopIR = new StopInstancesRequest(instanceIds);
        ec2.stopInstances(stopIR);
        TerminateInstancesRequest tir = new TerminateInstancesRequest(instanceIds);
        ec2.terminateInstances(tir);

        /*********************************************
         * 
         *  # Detach volumes
         *  
         *********************************************/
        System.out.println();
        System.out.println("Detach the volumes from the instances...");
        deatchVolume(insId1, volume_name1);
        deatchVolume(insId2, volume_name2);

        /*********************************************
          * 
          *  # Delete Volumes
          *  
          *********************************************/
        System.out.println();

        while (true) {
            if (getVolumeState(volume_name1).compareTo("available") == 0
                    && getVolumeState(volume_name2).compareTo("available") == 0)
                break;
        }
        System.out.println("Delete volumes...");
        Thread.sleep(10000);
        deleteVolume(volume_name1);
        deleteVolume(volume_name2);

        /*********************************************
          * 
          *  # Second day restore instances and volumes
          *  
          *********************************************/
        System.out.println();
        System.out.println("#8 Second day start up instances from stored amis...");
        String newInsId1 = "";
        String newInsId2 = "";
        String newInsIP1 = "";
        String newInsIP2 = "";
        String newInsZone1 = "";
        String newInsZone2 = "";

        newInsId1 = createInstanceFromImageId(imageId1, keyGroupName);
        newInsId2 = createInstanceFromImageId(imageId2, keyGroupName);
        System.out.println("Second day first instance has been restored with id: " + newInsId1);
        System.out.println("Second day second instance has been restored with id: " + newInsId2);
        newInsZone1 = getInstanceZone(newInsId1);
        newInsZone2 = getInstanceZone(newInsId2);
        System.out.println("New instance 1 zone: " + newInsZone1);
        System.out.println("New instance 2 zone: " + newInsZone2);
        newInsIP1 = getInstanceIP(newInsId1);
        newInsIP2 = getInstanceIP(newInsId2);
        System.out.println("New instance 1 IP: " + newInsIP1);
        System.out.println("New instance 2 IP: " + newInsIP2);

        Thread.sleep(120000);
        /* exec read */
        System.out.println();
        System.out.println("Now start to read the file stored yesterday...");
        execCmdRead(newInsIP1, keyGroupName);

        /*********************************************
         *  
         *  #9 Read data from S3
         *  
         *********************************************/

        /* get the object from the first day */
        System.out.println();
        System.out.println("#9 Reading data from S3 stored on the first day");
        S3Object object = s3.getObject(new GetObjectRequest(bucketName, key));
        BufferedReader reader = new BufferedReader(new InputStreamReader(object.getObjectContent()));
        String data = null;
        while ((data = reader.readLine()) != null) {
            System.out.println(data);
        }

        /*********************************************
         *  
         *  #10 shutdown client object
         *  
         *********************************************/
        System.out.println("#10 shutdown client objects");
        ec2.shutdown();
        s3.shutdown();

    } catch (AmazonServiceException ase) {
        System.out.println("Caught Exception: " + ase.getMessage());
        System.out.println("Reponse Status Code: " + ase.getStatusCode());
        System.out.println("Error Code: " + ase.getErrorCode());
        System.out.println("Request ID: " + ase.getRequestId());
    }

}

From source file:virtualIT.java

License:Open Source License

private void cloudWatchMonitor(int userId) throws InterruptedException {

    String instanceId = mapUserInst.get(userId);
    //create request message
    GetMetricStatisticsRequest statRequest = new GetMetricStatisticsRequest();

    //set up request message
    statRequest.setNamespace("AWS/EC2"); //namespace
    statRequest.setPeriod(60); //period of data
    ArrayList<String> stats = new ArrayList<String>();

    //Use one of these strings: Average, Maximum, Minimum, SampleCount, Sum 
    stats.add("Average");
    stats.add("Sum");
    statRequest.setStatistics(stats);/* w w w  .  j av a 2s.c o  m*/

    //Use one of these strings: CPUUtilization, NetworkIn, NetworkOut, DiskReadBytes, DiskWriteBytes, DiskReadOperations  
    statRequest.setMetricName("CPUUtilization");

    // set time
    GregorianCalendar calendar = new GregorianCalendar(TimeZone.getTimeZone("UTC"));
    calendar.add(GregorianCalendar.SECOND, -1 * calendar.get(GregorianCalendar.SECOND)); // 1 second ago
    Date endTime = calendar.getTime();
    calendar.add(GregorianCalendar.MINUTE, -10); // 10 minutes ago
    Date startTime = calendar.getTime();
    statRequest.setStartTime(startTime);
    statRequest.setEndTime(endTime);

    //specify an instance
    ArrayList<Dimension> dimensions = new ArrayList<Dimension>();
    dimensions.add(new Dimension().withName("InstanceId").withValue(instanceId));
    statRequest.setDimensions(dimensions);

    //get statistics
    cloudWatch.getMetricStatistics(statRequest);
    Thread.currentThread().sleep(1000000);
    GetMetricStatisticsResult statResult = cloudWatch.getMetricStatistics(statRequest);

    //display
    System.out.println(statResult.toString());
    List<Datapoint> dataList = statResult.getDatapoints();
    Double averageCPU = null;
    Date timeStamp = null;
    for (Datapoint data : dataList) {
        averageCPU = data.getAverage();
        timeStamp = data.getTimestamp();
        System.out.println("Average CPU utlilization for last 10 minutes: " + averageCPU);
        System.out.println("Totl CPU utlilization for last 10 minutes: " + data.getSum());

    }

    if (!mapUserCpu.containsKey(userId)) {
        mapUserCpu.put(userId, averageCPU);
    } else {
        mapUserCpu.remove(userId);
        /**************
         * update 
         * 
         /
         */
        mapUserCpu.put(userId, averageCPU);
    }

}

From source file:com.appdynamics.extensions.cloudwatch.metricsmanager.MetricsManager.java

License:Apache License

protected Double getValue(String namespace, String metricName, Datapoint data) {
    MetricType type = getMetricType(namespace, metricName);
    Double value = null;/*from www .java2  s.  com*/

    switch (type) {
    case AVE:
        value = data.getAverage();
        break;
    case MAX:
        value = data.getMaximum();
        break;
    case MIN:
        value = data.getMinimum();
        break;
    case SUM:
        value = data.getSum();
        break;
    case SAMPLE_COUNT:
        value = data.getSampleCount();
        break;
    }

    return value;
}

From source file:com.cfelde.aws.ddb.management.TableThroughput.java

License:Open Source License

/**
 * Returns number of reads per second.//from   w  w  w  .ja  va 2  s  . c  om
 *
 * @return Reads per second
 */
public double getReadCapacity() {
    synchronized (lock) {
        GetMetricStatisticsRequest request = new GetMetricStatisticsRequest();
        request.setStatistics(Collections.singleton("Sum"));
        request.setDimensions(Arrays.asList(new Dimension().withName("TableName").withValue(tableName)));
        request.setNamespace("AWS/DynamoDB");
        request.setStartTime(new Date(System.currentTimeMillis() - (1000L * period * 5)));
        request.setEndTime(new Date(System.currentTimeMillis() + 60000L));
        request.setPeriod(period);
        request.setMetricName("ProvisionedReadCapacityUnits");

        GetMetricStatisticsResult result = client.getMetricStatistics(request);

        if (result.getDatapoints().isEmpty())
            return lastReadCapacity == null ? 0 : lastReadCapacity;

        List<Datapoint> dataPoints = new ArrayList<>(result.getDatapoints());

        Collections.sort(dataPoints, new Comparator<Datapoint>() {
            @Override
            public int compare(Datapoint o1, Datapoint o2) {
                return o2.getTimestamp().compareTo(o1.getTimestamp());
            }
        });

        Datapoint datapoint = dataPoints.get(0);

        lastReadCapacity = datapoint.getSum();

        return lastReadCapacity;
    }
}

From source file:com.cfelde.aws.ddb.management.TableThroughput.java

License:Open Source License

/**
 * Returns number of writes per second./*from   w  ww.j  a v  a  2 s  .  c  o  m*/
 *
 * @return Reads per second
 */
public double getWriteCapacity() {
    synchronized (lock) {
        GetMetricStatisticsRequest request = new GetMetricStatisticsRequest();
        request.setStatistics(Collections.singleton("Sum"));
        request.setDimensions(Arrays.asList(new Dimension().withName("TableName").withValue(tableName)));
        request.setNamespace("AWS/DynamoDB");
        request.setStartTime(new Date(System.currentTimeMillis() - (1000L * period * 5)));
        request.setEndTime(new Date(System.currentTimeMillis() + 60000L));
        request.setPeriod(period);
        request.setMetricName("ProvisionedWriteCapacityUnits");

        GetMetricStatisticsResult result = client.getMetricStatistics(request);

        if (result.getDatapoints().isEmpty())
            return lastWriteCapacity == null ? 0 : lastWriteCapacity;

        List<Datapoint> dataPoints = new ArrayList<>(result.getDatapoints());

        Collections.sort(dataPoints, new Comparator<Datapoint>() {
            @Override
            public int compare(Datapoint o1, Datapoint o2) {
                return o2.getTimestamp().compareTo(o1.getTimestamp());
            }
        });

        Datapoint datapoint = dataPoints.get(0);

        lastWriteCapacity = datapoint.getSum();

        return lastWriteCapacity;
    }
}

From source file:com.cfelde.aws.ddb.management.TableThroughput.java

License:Open Source License

/**
 * Returns number of consumed reads per second.
 *
 * @return Reads per second//  w w w  .j  a  v  a2  s .c  o m
 */
public double getConsumedReadCapacity() {
    synchronized (lock) {
        GetMetricStatisticsRequest request = new GetMetricStatisticsRequest();
        request.setStatistics(Collections.singleton("Sum"));
        request.setDimensions(Arrays.asList(new Dimension().withName("TableName").withValue(tableName)));
        request.setNamespace("AWS/DynamoDB");
        request.setStartTime(new Date(System.currentTimeMillis() - (1000L * period * 5)));
        request.setEndTime(new Date(System.currentTimeMillis() + 60000L));
        request.setPeriod(period);
        request.setMetricName("ConsumedReadCapacityUnits");

        GetMetricStatisticsResult result = client.getMetricStatistics(request);

        if (!result.getDatapoints().isEmpty()) {
            List<Datapoint> dataPoints = new ArrayList<>(result.getDatapoints());

            Collections.sort(dataPoints, new Comparator<Datapoint>() {
                @Override
                public int compare(Datapoint o1, Datapoint o2) {
                    return o2.getTimestamp().compareTo(o1.getTimestamp());
                }
            });

            Datapoint datapoint = dataPoints.get(0);

            consumedReadValues.add(datapoint.getSum() / period);
        } else {
            consumedReadValues.add(0D);
        }

        while (consumedReadValues.size() > maxConsumedCount)
            consumedReadValues.remove(0);

        if (consumedReadValues.isEmpty())
            return 0;

        double maxConsumedValue = Double.MIN_VALUE;
        for (Double c : consumedReadValues) {
            if (c > maxConsumedValue)
                maxConsumedValue = c;
        }

        return maxConsumedValue;
    }
}

From source file:com.cfelde.aws.ddb.management.TableThroughput.java

License:Open Source License

/**
 * Returns number of consumed write per second.
 *
 * @return Reads per second/*from ww  w .  jav  a  2s  . co m*/
 */
public double getConsumedWriteCapacity() {
    synchronized (lock) {
        GetMetricStatisticsRequest request = new GetMetricStatisticsRequest();
        request.setStatistics(Collections.singleton("Sum"));
        request.setDimensions(Arrays.asList(new Dimension().withName("TableName").withValue(tableName)));
        request.setNamespace("AWS/DynamoDB");
        request.setStartTime(new Date(System.currentTimeMillis() - (1000L * period * 5)));
        request.setEndTime(new Date(System.currentTimeMillis() + 60000L));
        request.setPeriod(period);
        request.setMetricName("ConsumedWriteCapacityUnits");

        GetMetricStatisticsResult result = client.getMetricStatistics(request);

        if (!result.getDatapoints().isEmpty()) {
            List<Datapoint> dataPoints = new ArrayList<>(result.getDatapoints());

            Collections.sort(dataPoints, new Comparator<Datapoint>() {
                @Override
                public int compare(Datapoint o1, Datapoint o2) {
                    return o2.getTimestamp().compareTo(o1.getTimestamp());
                }
            });

            Datapoint datapoint = dataPoints.get(0);

            consumedWriteValues.add(datapoint.getSum() / period);
        } else {
            consumedWriteValues.add(0D);
        }

        while (consumedWriteValues.size() > maxConsumedCount)
            consumedWriteValues.remove(0);

        if (consumedWriteValues.isEmpty())
            return 0;

        double maxConsumedValue = Double.MIN_VALUE;
        for (Double c : consumedWriteValues) {
            if (c > maxConsumedValue)
                maxConsumedValue = c;
        }

        return maxConsumedValue;
    }
}

From source file:com.cfelde.aws.ddb.management.TableThroughput.java

License:Open Source License

/**
 * Returns number of throttled reads per second.
 *
 * @return Reads per second/*  w w w .  j av a  2 s  .c  om*/
 */
public double getThrottledReads() {
    synchronized (lock) {
        GetMetricStatisticsRequest request = new GetMetricStatisticsRequest();
        request.setStatistics(Collections.singleton("Sum"));
        request.setDimensions(Arrays.asList(new Dimension().withName("TableName").withValue(tableName)));
        request.setNamespace("AWS/DynamoDB");
        request.setStartTime(new Date(System.currentTimeMillis() - (1000L * period * 5)));
        request.setEndTime(new Date(System.currentTimeMillis() + 60000L));
        request.setPeriod(period);
        request.setMetricName("ReadThrottleEvents");

        GetMetricStatisticsResult result = client.getMetricStatistics(request);

        if (result.getDatapoints().isEmpty())
            return 0;

        List<Datapoint> dataPoints = new ArrayList<>(result.getDatapoints());

        Collections.sort(dataPoints, new Comparator<Datapoint>() {
            @Override
            public int compare(Datapoint o1, Datapoint o2) {
                return o2.getTimestamp().compareTo(o1.getTimestamp());
            }
        });

        Datapoint datapoint = dataPoints.get(0);

        // Do this to ensure the additive nature of how we use it works
        if (datapoint.getTimestamp().getTime() < lastReadChange.getMillis())
            return 0;

        return datapoint.getSum() / period;
    }
}

From source file:com.cfelde.aws.ddb.management.TableThroughput.java

License:Open Source License

/**
 * Returns number of throttled writes per second.
 *
 * @return Reads per second/*from w  w  w.  j  av  a 2s  . c  o  m*/
 */
public double getThrottledWrites() {
    synchronized (lock) {
        GetMetricStatisticsRequest request = new GetMetricStatisticsRequest();
        request.setStatistics(Collections.singleton("Sum"));
        request.setDimensions(Arrays.asList(new Dimension().withName("TableName").withValue(tableName)));
        request.setNamespace("AWS/DynamoDB");
        request.setStartTime(new Date(System.currentTimeMillis() - (1000L * period * 5)));
        request.setEndTime(new Date(System.currentTimeMillis() + 60000L));
        request.setPeriod(period);
        request.setMetricName("WriteThrottleEvents");

        GetMetricStatisticsResult result = client.getMetricStatistics(request);

        if (result.getDatapoints().isEmpty())
            return 0;

        List<Datapoint> dataPoints = new ArrayList<>(result.getDatapoints());

        Collections.sort(dataPoints, new Comparator<Datapoint>() {
            @Override
            public int compare(Datapoint o1, Datapoint o2) {
                return o2.getTimestamp().compareTo(o1.getTimestamp());
            }
        });

        Datapoint datapoint = dataPoints.get(0);

        // Do this to ensure the additive nature of how we use it works
        if (datapoint.getTimestamp().getTime() < lastWriteChange.getMillis())
            return 0;

        return datapoint.getSum() / period;
    }
}

From source file:org.apache.beam.sdk.io.kinesis.SimplifiedKinesisClient.java

License:Apache License

/**
 * Gets total size in bytes of all events that remain in Kinesis stream between specified
 * instants./*w ww .j a v a 2s . c o  m*/
 *
 * @return total size in bytes of all Kinesis events after specified instant
 */
public long getBacklogBytes(final String streamName, final Instant countSince, final Instant countTo)
        throws TransientKinesisException {
    return wrapExceptions(() -> {
        Minutes period = Minutes.minutesBetween(countSince, countTo);
        if (period.isLessThan(Minutes.ONE)) {
            return 0L;
        }

        GetMetricStatisticsRequest request = createMetricStatisticsRequest(streamName, countSince, countTo,
                period);

        long totalSizeInBytes = 0;
        GetMetricStatisticsResult result = cloudWatch.getMetricStatistics(request);
        for (Datapoint point : result.getDatapoints()) {
            totalSizeInBytes += point.getSum().longValue();
        }
        return totalSizeInBytes;
    });
}