Example usage for com.amazonaws.services.redshift.model Cluster getClusterIdentifier

List of usage examples for com.amazonaws.services.redshift.model Cluster getClusterIdentifier

Introduction

In this page you can find the example usage for com.amazonaws.services.redshift.model Cluster getClusterIdentifier.

Prototype


public String getClusterIdentifier() 

Source Link

Document

The unique identifier of the cluster.

Usage

From source file:com.jaspersoft.jasperserver.api.engine.jasperreports.util.AwsDataSourceRecovery.java

License:Open Source License

private void createRedshiftSecurityGroup(AwsReportDataSource awsReportDataSource) throws Exception {

    AWSCredentials awsCredentials = AwsCredentialUtil.getAWSCredentials(awsReportDataSource.getAWSAccessKey(),
            awsReportDataSource.getAWSSecretKey(), awsReportDataSource.getRoleARN());

    AmazonRedshiftClient redshiftClient = new AmazonRedshiftClient(awsCredentials);
    DescribeClustersRequest describeClustersRequest = new DescribeClustersRequest()
            .withClusterIdentifier(awsReportDataSource.getDbInstanceIdentifier());
    String endpoint = awsReportDataSource.getAWSRegion();
    if (endpoint != null) {
        redshiftClient.setEndpoint(Redshift + "." + endpoint);
    }/*w  w w .  jav a2  s .  com*/
    Cluster cluster;
    DescribeClustersResult describeClustersResult = redshiftClient.describeClusters(describeClustersRequest);
    if (describeClustersResult != null && describeClustersResult.getClusters() != null
            && describeClustersResult.getClusters().size() > 0) {
        cluster = describeClustersResult.getClusters().get(0);
        if (!cluster.getClusterStatus().equals(awsDataSourceActiveStatus)) {
            throw new JSException(getErrorMessage("aws.exception.datasource.recovery.instance.not.active"));
        }
        Map<String, String> awsDSInstanceDetails = new HashMap<String, String>();
        awsDSInstanceDetails.put(DB_REGION, parseRegionFromSubRegion(cluster.getAvailabilityZone()));
        String vpcId = cluster.getVpcId();
        if (isNotEmpty(vpcId)) {
            awsDSInstanceDetails.put(DB_VPC_ID, vpcId);
        } else {
            awsDSInstanceDetails.put(DB_VPC_ID, null);
        }

        String instanceSourceIp = determineSourceIpAddress(awsDSInstanceDetails);

        if (!isNotEmpty(instanceSourceIp)) {
            throw new JSException(
                    getErrorMessage("aws.exception.datasource.recovery.public.ip.not.determined"));
        }
        //IP that should be added in CIDRIP of JS DB Security Group
        String ingressIpMask = instanceSourceIp + ingressIpPermission;

        String vpcSecurityGroupId = null;
        if (awsDSInstanceDetails.get(DB_VPC_ID) != null) {
            //Recover VPC Security Group.
            vpcSecurityGroupId = recoverVpcSecurityGroup(awsReportDataSource,
                    awsDSInstanceDetails.get(DB_VPC_ID), ingressIpMask);
        } else {
            //Recover Cluster Security Group.

            //Fount existing JS DB Security Group
            Boolean jsSecurityGroupMembershipFount = true;

            ClusterSecurityGroup clusterSecurityGroup = null;
            try {
                DescribeClusterSecurityGroupsRequest describeClusterSecurityGroupsRequest = new DescribeClusterSecurityGroupsRequest()
                        .withClusterSecurityGroupName(awsProperties.getSecurityGroupName());
                DescribeClusterSecurityGroupsResult describeClusterSecurityGroupsResult = redshiftClient
                        .describeClusterSecurityGroups(describeClusterSecurityGroupsRequest);
                clusterSecurityGroup = describeClusterSecurityGroupsResult.getClusterSecurityGroups().get(0);
            } catch (ClusterSecurityGroupNotFoundException ex) {
                jsSecurityGroupMembershipFount = false;
            }

            boolean ingressIpMaskExist = false;
            if (jsSecurityGroupMembershipFount) {
                List<com.amazonaws.services.redshift.model.IPRange> ipRanges = clusterSecurityGroup
                        .getIPRanges();
                for (com.amazonaws.services.redshift.model.IPRange ipRange : ipRanges) {
                    if (ipRange.getCIDRIP().contains(ingressIpMask)) {
                        ingressIpMaskExist = true;
                        break;
                    }
                }
                if (!ingressIpMaskExist) {
                    //Remove old ingress Ips
                    for (com.amazonaws.services.redshift.model.IPRange ipRange : ipRanges) {
                        RevokeClusterSecurityGroupIngressRequest revokeClusterSecurityGroupIngressRequest = new RevokeClusterSecurityGroupIngressRequest()
                                .withClusterSecurityGroupName(awsProperties.getSecurityGroupName())
                                .withCIDRIP(ipRange.getCIDRIP());
                        redshiftClient
                                .revokeClusterSecurityGroupIngress(revokeClusterSecurityGroupIngressRequest);
                    }
                }
            } else {
                clusterSecurityGroup = redshiftClient
                        .createClusterSecurityGroup(new CreateClusterSecurityGroupRequest()
                                .withClusterSecurityGroupName(awsProperties.getSecurityGroupName())
                                .withDescription(awsProperties.getSecurityGroupDescription()));
            }
            if (!ingressIpMaskExist) {
                redshiftClient
                        .authorizeClusterSecurityGroupIngress(new AuthorizeClusterSecurityGroupIngressRequest()
                                .withClusterSecurityGroupName(
                                        clusterSecurityGroup.getClusterSecurityGroupName())
                                .withCIDRIP(ingressIpMask));
            }
        }
        if (vpcSecurityGroupId == null) {
            List<ClusterSecurityGroupMembership> clusterSecurityGroupMemberships = cluster
                    .getClusterSecurityGroups();
            List<String> clusterSecurityGroupNames = new ArrayList<String>();
            for (ClusterSecurityGroupMembership clusterSecurityGroupMembership : clusterSecurityGroupMemberships) {
                clusterSecurityGroupNames.add(clusterSecurityGroupMembership.getClusterSecurityGroupName());
            }
            //If Redshift Instance does not contain JSSecurityGroup that we should assign it to.
            if (!clusterSecurityGroupNames.contains(awsProperties.getSecurityGroupName())) {
                clusterSecurityGroupNames.add(awsProperties.getSecurityGroupName());
                ModifyClusterRequest modifyClusterRequest = new ModifyClusterRequest()
                        .withClusterSecurityGroups(clusterSecurityGroupNames)
                        .withClusterIdentifier(cluster.getClusterIdentifier());
                redshiftClient.modifyCluster(modifyClusterRequest);
            }
        } else {
            List<com.amazonaws.services.redshift.model.VpcSecurityGroupMembership> vpcSecurityGroupMemberships = cluster
                    .getVpcSecurityGroups();
            List<String> vpcSecurityGroupIds = new ArrayList<String>();
            for (com.amazonaws.services.redshift.model.VpcSecurityGroupMembership vpcSecurityGroupMembership : vpcSecurityGroupMemberships) {
                vpcSecurityGroupIds.add(vpcSecurityGroupMembership.getVpcSecurityGroupId());
            }
            //If Redshift Instance does not contain VPC Security Group that we should assign it to.
            if (!vpcSecurityGroupIds.contains(vpcSecurityGroupId)) {
                vpcSecurityGroupIds.add(vpcSecurityGroupId);
                ModifyClusterRequest modifyClusterRequest = new ModifyClusterRequest()
                        .withVpcSecurityGroupIds(vpcSecurityGroupIds)
                        .withClusterIdentifier(cluster.getClusterIdentifier());
                redshiftClient.modifyCluster(modifyClusterRequest);
            }
        }
    }
}

From source file:com.jaspersoft.jasperserver.war.amazon.client.AwsDataSourceServiceImpl.java

License:Open Source License

private List<AwsDBInstanceDTO> toRedshiftInstancesDTOs(List<Cluster> dbClusters, String amazonDBService) {
    List<AwsDBInstanceDTO> awsDBInstanceDTOs = new ArrayList<AwsDBInstanceDTO>();
    if (dbClusters != null && dbClusters.size() > 0) {
        for (Cluster dbCluster : dbClusters) {
            if (dbCluster.getClusterStatus().toLowerCase().equals(awsDataSourceActiveStatus)) {
                AwsDBInstanceDTO awsDBInstanceDTO = new AwsDBInstanceDTO();
                awsDBInstanceDTO.setdBInstanceIdentifier(dbCluster.getClusterIdentifier());
                awsDBInstanceDTO.setdBName(dbCluster.getDBName());
                awsDBInstanceDTO.setEngine("postgresql");
                awsDBInstanceDTO.setEngineVersion(null);
                awsDBInstanceDTO.setAddress(dbCluster.getEndpoint().getAddress());
                awsDBInstanceDTO.setPort(dbCluster.getEndpoint().getPort());
                awsDBInstanceDTO.setAmazonDbService(amazonDBService.toLowerCase());

                updateWithConnectionUrl(awsDBInstanceDTO);

                awsDBInstanceDTOs.add(awsDBInstanceDTO);
            }//from   w ww . j a v  a 2s. c om
        }
    } else {
        return generateDBServiceInfoStatus(amazonDBService, "resource.dataSource.aws.empty");
    }
    return awsDBInstanceDTOs;
}

From source file:com.optimalbi.AmazonAccount.java

License:Apache License

private void populateRedshift() throws AmazonClientException {
    for (Region region : getRegions()) {
        try {/* w  w  w.  j a v  a  2  s .com*/
            if (region.isServiceSupported(ServiceAbbreviations.RedShift)) {
                //                    services.addAll(RedshiftService.populateServices(region, getCredentials(), getLogger()));
                AmazonRedshiftClient redshift = new AmazonRedshiftClient(getCredentials().getCredentials());
                redshift.setRegion(region);

                DescribeClustersResult clusterResult;
                List<Cluster> clusters;
                try {
                    clusterResult = redshift.describeClusters();
                    clusters = clusterResult.getClusters();
                } catch (Exception e) {
                    throw new AmazonClientException("Failed to get clusters " + e.getMessage());
                }

                getLogger().info("Redshift, Adding " + clusters.size() + " clusters from " + region.getName());
                for (Cluster cluster : clusters) {
                    getLogger().info("Cluster: " + cluster.getClusterIdentifier());
                    LocalRedshiftService temp = new LocalRedshiftService(cluster.getDBName(), getCredentials(),
                            region, cluster, getLogger());
                    if (servicePricings != null && servicePricings.size() > 0) {
                        temp.attachPricing(servicePricings.get(region).getRedshiftPricing());
                    }
                    services.add(temp);
                }
            } else {
                getLogger().info("Redshift, NOPE from " + region.getName());
            }
        } catch (AmazonClientException e) {
            throw new AmazonClientException(region.getName() + " " + e.getMessage());
        }
        completed.set(completed.get() + 1);
    }
}

From source file:com.optimalbi.Controller.AmazonAccount.java

License:Apache License

private void populateRedshift() throws AmazonClientException {
    for (Region region : getRegions()) {
        try {/*from   ww w.  ja  v a 2 s.  c o m*/
            if (region.isServiceSupported(ServiceAbbreviations.RedShift)) {
                //                    services.addAll(RedshiftService.populateServices(region, getCredentials(), getLogger()));
                AmazonRedshiftClient redshift = new AmazonRedshiftClient(getCredentials().getCredentials());
                redshift.setRegion(region);

                DescribeClustersResult clusterResult;
                List<Cluster> clusters;
                try {
                    clusterResult = redshift.describeClusters();
                    clusters = clusterResult.getClusters();
                } catch (Exception e) {
                    throw new AmazonClientException("Failed to get clusters " + e.getMessage());
                }

                getLogger().info("Redshift, Adding " + clusters.size() + " clusters from " + region.getName());
                for (Cluster cluster : clusters) {
                    getLogger().info("Cluster: " + cluster.getClusterIdentifier());
                    services.add(new LocalRedshiftService(cluster.getClusterIdentifier(), getCredentials(),
                            region, cluster, getLogger()));
                }
            } else {
                getLogger().info("Redshift, NOPE from " + region.getName());

            }
        } catch (AmazonClientException e) {
            throw new AmazonClientException(region.getName() + " " + e.getMessage());
        }
        completed.set(completed.get() + 1);
    }
}