Example usage for com.amazonaws.services.redshift AmazonRedshiftClient AmazonRedshiftClient

List of usage examples for com.amazonaws.services.redshift AmazonRedshiftClient AmazonRedshiftClient

Introduction

In this page you can find the example usage for com.amazonaws.services.redshift AmazonRedshiftClient AmazonRedshiftClient.

Prototype

AmazonRedshiftClient(AwsSyncClientParams clientParams) 

Source Link

Document

Constructs a new client to invoke service methods on Amazon Redshift using the specified parameters.

Usage

From source file:com.amazon.services.awsrum.kinesis.KinesisConnectorExecutor.java

License:Open Source License

/**
 * Helper method to create the Redshift cluster
 * /*from  w w w .ja  va 2 s . c  om*/
 * @param clusterIdentifier
 *            Unique identifier for the name of the Redshift cluster
 * @param databaseName
 *            Name for the database in the Redshift cluster
 * @param clusterType
 *            dw.hs1.xlarge or dw.hs1.8xlarge
 * @param numberOfNodes
 *            Number of nodes for the Redshift cluster
 */
private void createRedshiftCluster(String clusterIdentifier, String databaseName, String clusterType,
        int numberOfNodes) {
    // Make sure Redshift cluster is available
    AmazonRedshiftClient redshiftClient = new AmazonRedshiftClient(config.AWS_CREDENTIALS_PROVIDER);
    redshiftClient.setEndpoint(config.REDSHIFT_ENDPOINT);
    RedshiftUtils.createCluster(redshiftClient, clusterIdentifier, databaseName, config.REDSHIFT_USERNAME,
            config.REDSHIFT_PASSWORD, clusterType, numberOfNodes);

    // Update Redshift connection url
    config.REDSHIFT_URL = RedshiftUtils.getClusterURL(redshiftClient, clusterIdentifier);
}

From source file:com.jaspersoft.jasperserver.api.engine.jasperreports.util.AwsDataSourceRecovery.java

License:Open Source License

private void createRedshiftSecurityGroup(AwsReportDataSource awsReportDataSource) throws Exception {

    AWSCredentials awsCredentials = AwsCredentialUtil.getAWSCredentials(awsReportDataSource.getAWSAccessKey(),
            awsReportDataSource.getAWSSecretKey(), awsReportDataSource.getRoleARN());

    AmazonRedshiftClient redshiftClient = new AmazonRedshiftClient(awsCredentials);
    DescribeClustersRequest describeClustersRequest = new DescribeClustersRequest()
            .withClusterIdentifier(awsReportDataSource.getDbInstanceIdentifier());
    String endpoint = awsReportDataSource.getAWSRegion();
    if (endpoint != null) {
        redshiftClient.setEndpoint(Redshift + "." + endpoint);
    }/*  w ww.j av  a 2 s. c om*/
    Cluster cluster;
    DescribeClustersResult describeClustersResult = redshiftClient.describeClusters(describeClustersRequest);
    if (describeClustersResult != null && describeClustersResult.getClusters() != null
            && describeClustersResult.getClusters().size() > 0) {
        cluster = describeClustersResult.getClusters().get(0);
        if (!cluster.getClusterStatus().equals(awsDataSourceActiveStatus)) {
            throw new JSException(getErrorMessage("aws.exception.datasource.recovery.instance.not.active"));
        }
        Map<String, String> awsDSInstanceDetails = new HashMap<String, String>();
        awsDSInstanceDetails.put(DB_REGION, parseRegionFromSubRegion(cluster.getAvailabilityZone()));
        String vpcId = cluster.getVpcId();
        if (isNotEmpty(vpcId)) {
            awsDSInstanceDetails.put(DB_VPC_ID, vpcId);
        } else {
            awsDSInstanceDetails.put(DB_VPC_ID, null);
        }

        String instanceSourceIp = determineSourceIpAddress(awsDSInstanceDetails);

        if (!isNotEmpty(instanceSourceIp)) {
            throw new JSException(
                    getErrorMessage("aws.exception.datasource.recovery.public.ip.not.determined"));
        }
        //IP that should be added in CIDRIP of JS DB Security Group
        String ingressIpMask = instanceSourceIp + ingressIpPermission;

        String vpcSecurityGroupId = null;
        if (awsDSInstanceDetails.get(DB_VPC_ID) != null) {
            //Recover VPC Security Group.
            vpcSecurityGroupId = recoverVpcSecurityGroup(awsReportDataSource,
                    awsDSInstanceDetails.get(DB_VPC_ID), ingressIpMask);
        } else {
            //Recover Cluster Security Group.

            //Fount existing JS DB Security Group
            Boolean jsSecurityGroupMembershipFount = true;

            ClusterSecurityGroup clusterSecurityGroup = null;
            try {
                DescribeClusterSecurityGroupsRequest describeClusterSecurityGroupsRequest = new DescribeClusterSecurityGroupsRequest()
                        .withClusterSecurityGroupName(awsProperties.getSecurityGroupName());
                DescribeClusterSecurityGroupsResult describeClusterSecurityGroupsResult = redshiftClient
                        .describeClusterSecurityGroups(describeClusterSecurityGroupsRequest);
                clusterSecurityGroup = describeClusterSecurityGroupsResult.getClusterSecurityGroups().get(0);
            } catch (ClusterSecurityGroupNotFoundException ex) {
                jsSecurityGroupMembershipFount = false;
            }

            boolean ingressIpMaskExist = false;
            if (jsSecurityGroupMembershipFount) {
                List<com.amazonaws.services.redshift.model.IPRange> ipRanges = clusterSecurityGroup
                        .getIPRanges();
                for (com.amazonaws.services.redshift.model.IPRange ipRange : ipRanges) {
                    if (ipRange.getCIDRIP().contains(ingressIpMask)) {
                        ingressIpMaskExist = true;
                        break;
                    }
                }
                if (!ingressIpMaskExist) {
                    //Remove old ingress Ips
                    for (com.amazonaws.services.redshift.model.IPRange ipRange : ipRanges) {
                        RevokeClusterSecurityGroupIngressRequest revokeClusterSecurityGroupIngressRequest = new RevokeClusterSecurityGroupIngressRequest()
                                .withClusterSecurityGroupName(awsProperties.getSecurityGroupName())
                                .withCIDRIP(ipRange.getCIDRIP());
                        redshiftClient
                                .revokeClusterSecurityGroupIngress(revokeClusterSecurityGroupIngressRequest);
                    }
                }
            } else {
                clusterSecurityGroup = redshiftClient
                        .createClusterSecurityGroup(new CreateClusterSecurityGroupRequest()
                                .withClusterSecurityGroupName(awsProperties.getSecurityGroupName())
                                .withDescription(awsProperties.getSecurityGroupDescription()));
            }
            if (!ingressIpMaskExist) {
                redshiftClient
                        .authorizeClusterSecurityGroupIngress(new AuthorizeClusterSecurityGroupIngressRequest()
                                .withClusterSecurityGroupName(
                                        clusterSecurityGroup.getClusterSecurityGroupName())
                                .withCIDRIP(ingressIpMask));
            }
        }
        if (vpcSecurityGroupId == null) {
            List<ClusterSecurityGroupMembership> clusterSecurityGroupMemberships = cluster
                    .getClusterSecurityGroups();
            List<String> clusterSecurityGroupNames = new ArrayList<String>();
            for (ClusterSecurityGroupMembership clusterSecurityGroupMembership : clusterSecurityGroupMemberships) {
                clusterSecurityGroupNames.add(clusterSecurityGroupMembership.getClusterSecurityGroupName());
            }
            //If Redshift Instance does not contain JSSecurityGroup that we should assign it to.
            if (!clusterSecurityGroupNames.contains(awsProperties.getSecurityGroupName())) {
                clusterSecurityGroupNames.add(awsProperties.getSecurityGroupName());
                ModifyClusterRequest modifyClusterRequest = new ModifyClusterRequest()
                        .withClusterSecurityGroups(clusterSecurityGroupNames)
                        .withClusterIdentifier(cluster.getClusterIdentifier());
                redshiftClient.modifyCluster(modifyClusterRequest);
            }
        } else {
            List<com.amazonaws.services.redshift.model.VpcSecurityGroupMembership> vpcSecurityGroupMemberships = cluster
                    .getVpcSecurityGroups();
            List<String> vpcSecurityGroupIds = new ArrayList<String>();
            for (com.amazonaws.services.redshift.model.VpcSecurityGroupMembership vpcSecurityGroupMembership : vpcSecurityGroupMemberships) {
                vpcSecurityGroupIds.add(vpcSecurityGroupMembership.getVpcSecurityGroupId());
            }
            //If Redshift Instance does not contain VPC Security Group that we should assign it to.
            if (!vpcSecurityGroupIds.contains(vpcSecurityGroupId)) {
                vpcSecurityGroupIds.add(vpcSecurityGroupId);
                ModifyClusterRequest modifyClusterRequest = new ModifyClusterRequest()
                        .withVpcSecurityGroupIds(vpcSecurityGroupIds)
                        .withClusterIdentifier(cluster.getClusterIdentifier());
                redshiftClient.modifyCluster(modifyClusterRequest);
            }
        }
    }
}

From source file:com.jaspersoft.jasperserver.war.amazon.client.AwsDataSourceServiceImpl.java

License:Open Source License

public List<AwsDBInstanceDTO> getAwsDBInstances(AWSCredentials awsCredentials, String amazonDBService,
        String endpoint) {/*  ww w  .j  av  a2 s .  c om*/
    try {
        if (amazonDBService.toLowerCase().equals(RDS)) {
            //Make RDS service calls to read all available RDS instances
            AmazonRDSClient rdsClient = new AmazonRDSClient(awsCredentials);
            if (endpoint != null) {
                rdsClient.setEndpoint(RDS + "." + endpoint);
            }
            return toRDSInstancesDTOs(getRdsInstances(rdsClient), amazonDBService);
        } else if (amazonDBService.toLowerCase().equals(Redshift)) {
            //Make RedShift service calls to read all available RedShift instances
            AmazonRedshiftClient redshiftClient = new AmazonRedshiftClient(awsCredentials);
            if (endpoint != null) {
                redshiftClient.setEndpoint(Redshift + "." + endpoint);
            }
            return toRedshiftInstancesDTOs(getRedshiftInstances(redshiftClient), amazonDBService);
        } else {
            return new ArrayList<AwsDBInstanceDTO>();
        }
    } catch (AmazonServiceException ex) {
        log.warn("Loading AWS data source metadata for " + amazonDBService + ": " + ex.getMessage());

        String errorCode = ex.getErrorCode();
        if (ex.getStatusCode() == 403 && errorCode != null) {
            errorCode = errorCode.toLowerCase();
            if (errorCode.equals(ACCESS_DENIED)) {
                return generateDBServiceInfoStatus(amazonDBService, "resource.dataSource.aws.access.denied");
            } else if (errorCode.equals(INVALID_CLIENT_TOKEN_ID)) {
                throw new JSShowOnlyErrorMessage(
                        messageSource.getMessage("" + "aws.exception.datasource.accessKey.invalid", null,
                                LocaleContextHolder.getLocale()));
            } else if (errorCode.equals(SIGNATURE_DOES_NOT_MATCH)) {
                throw new JSShowOnlyErrorMessage(
                        messageSource.getMessage("" + "aws.exception.datasource.secretKey.invalid", null,
                                LocaleContextHolder.getLocale()));
            }
        }

        return generateDBServiceInfoStatus(amazonDBService, "[" + ex.getMessage() + "]");
    } catch (AmazonClientException ex) {
        if (ex.getCause() instanceof UnknownHostException) {
            return generateDBServiceInfoStatus(endpoint, "resource.dataSource.aws.unknown.host");
        }

        return generateDBServiceInfoStatus(amazonDBService, "[" + ex.getMessage() + "]");
    }
}

From source file:com.optimalbi.AmazonAccount.java

License:Apache License

private void populateRedshift() throws AmazonClientException {
    for (Region region : getRegions()) {
        try {//ww  w  . ja va  2  s  . c  o  m
            if (region.isServiceSupported(ServiceAbbreviations.RedShift)) {
                //                    services.addAll(RedshiftService.populateServices(region, getCredentials(), getLogger()));
                AmazonRedshiftClient redshift = new AmazonRedshiftClient(getCredentials().getCredentials());
                redshift.setRegion(region);

                DescribeClustersResult clusterResult;
                List<Cluster> clusters;
                try {
                    clusterResult = redshift.describeClusters();
                    clusters = clusterResult.getClusters();
                } catch (Exception e) {
                    throw new AmazonClientException("Failed to get clusters " + e.getMessage());
                }

                getLogger().info("Redshift, Adding " + clusters.size() + " clusters from " + region.getName());
                for (Cluster cluster : clusters) {
                    getLogger().info("Cluster: " + cluster.getClusterIdentifier());
                    LocalRedshiftService temp = new LocalRedshiftService(cluster.getDBName(), getCredentials(),
                            region, cluster, getLogger());
                    if (servicePricings != null && servicePricings.size() > 0) {
                        temp.attachPricing(servicePricings.get(region).getRedshiftPricing());
                    }
                    services.add(temp);
                }
            } else {
                getLogger().info("Redshift, NOPE from " + region.getName());
            }
        } catch (AmazonClientException e) {
            throw new AmazonClientException(region.getName() + " " + e.getMessage());
        }
        completed.set(completed.get() + 1);
    }
}

From source file:com.optimalbi.Controller.AmazonAccount.java

License:Apache License

private void populateRedshift() throws AmazonClientException {
    for (Region region : getRegions()) {
        try {//from   w w w . ja  v a 2 s.co m
            if (region.isServiceSupported(ServiceAbbreviations.RedShift)) {
                //                    services.addAll(RedshiftService.populateServices(region, getCredentials(), getLogger()));
                AmazonRedshiftClient redshift = new AmazonRedshiftClient(getCredentials().getCredentials());
                redshift.setRegion(region);

                DescribeClustersResult clusterResult;
                List<Cluster> clusters;
                try {
                    clusterResult = redshift.describeClusters();
                    clusters = clusterResult.getClusters();
                } catch (Exception e) {
                    throw new AmazonClientException("Failed to get clusters " + e.getMessage());
                }

                getLogger().info("Redshift, Adding " + clusters.size() + " clusters from " + region.getName());
                for (Cluster cluster : clusters) {
                    getLogger().info("Cluster: " + cluster.getClusterIdentifier());
                    services.add(new LocalRedshiftService(cluster.getClusterIdentifier(), getCredentials(),
                            region, cluster, getLogger()));
                }
            } else {
                getLogger().info("Redshift, NOPE from " + region.getName());

            }
        } catch (AmazonClientException e) {
            throw new AmazonClientException(region.getName() + " " + e.getMessage());
        }
        completed.set(completed.get() + 1);
    }
}

From source file:com.swf.common.ConfigHelper.java

License:Open Source License

public AmazonRedshift createRedshiftClient() {
    AWSCredentials redshiftAWSCredentials = new BasicAWSCredentials(this.redshiftAccessId,
            this.redshiftSecretKey);
    AmazonRedshift client = new AmazonRedshiftClient(redshiftAWSCredentials);
    return client;
}

From source file:jp.buyee.glover.KinesisConnectorExecutor.java

License:Open Source License

/**
 * Helper method to create the Amazon Redshift cluster.
 * //from  ww w .  ja  va  2  s .c  o m
 * @param clusterIdentifier
 *        Unique identifier for the name of the Amazon Redshift cluster
 * @param databaseName
 *        Name for the database in the Amazon Redshift cluster
 * @param clusterType
 *        dw.hs1.xlarge or dw.hs1.8xlarge
 * @param numberOfNodes
 *        Number of nodes for the Amazon Redshift cluster
 */
private void createRedshiftCluster(String clusterIdentifier, String databaseName, String clusterType,
        int numberOfNodes) {
    // Make sure the Amazon Redshift cluster is available
    AmazonRedshiftClient redshiftClient = new AmazonRedshiftClient(config.AWS_CREDENTIALS_PROVIDER);
    redshiftClient.setEndpoint(config.REDSHIFT_ENDPOINT);
    LOG.info("Creating Amazon Redshift cluster " + clusterIdentifier);
    RedshiftUtils.createCluster(redshiftClient, clusterIdentifier, databaseName, config.REDSHIFT_USERNAME,
            config.REDSHIFT_PASSWORD, clusterType, numberOfNodes);

    // Update the Amazon Redshift connection url
    config.REDSHIFT_URL = RedshiftUtils.getClusterURL(redshiftClient, clusterIdentifier);
}