Example usage for com.amazonaws.regions Regions fromName

List of usage examples for com.amazonaws.regions Regions fromName

Introduction

In this page you can find the example usage for com.amazonaws.regions Regions fromName.

Prototype

public static Regions fromName(String regionName) 

Source Link

Document

Returns a region enum corresponding to the given region name.

Usage

From source file:ScalingClient.java

License:Open Source License

private void loadParams() throws Exception {
    if (System.getProperty(STREAM_PARAM) == null) {
        throw new Exception("You must provide a Stream Name");
    } else {/*w  ww  . j a v a2 s  .c  o  m*/
        this.streamName = System.getProperty(STREAM_PARAM);
    }

    this.shardId = System.getProperty(SHARD_ID_PARAM);

    if (System.getProperty(ACTION_PARAM) == null) {
        throw new Exception("You must provide a Scaling Action");
    } else {
        this.scalingAction = ScalingAction.valueOf(System.getProperty(ACTION_PARAM));

        // ensure the action is one of the supported types for shards
        if (this.shardId != null && !(this.scalingAction.equals(StreamScaler.ScalingAction.split)
                || this.scalingAction.equals(StreamScaler.ScalingAction.merge))) {
            throw new Exception("Can only Split or Merge Shards");
        }
    }

    if (System.getProperty(REGION_PARAM) != null) {
        this.region = Region.getRegion(Regions.fromName(System.getProperty(REGION_PARAM)));
    }

    if (this.scalingAction != ScalingAction.report) {
        if (System.getProperty(SCALE_COUNT_PARAM) == null && System.getProperty(SCALE_PCT_PARAM) == null)
            throw new Exception("You must provide either a scaling Count or Percentage");

        if (System.getProperty(SCALE_COUNT_PARAM) != null && System.getProperty(SCALE_PCT_PARAM) != null)
            throw new Exception("You must provide either a scaling Count or Percentage but not both");

        if (this.shardId != null && System.getProperty(SCALE_COUNT_PARAM) == null) {
            throw new Exception("Shards must be scaled by an absolute number only");
        }

        if (System.getProperty(SCALE_COUNT_PARAM) != null) {
            this.scaleCount = Integer.parseInt(System.getProperty(SCALE_COUNT_PARAM));
            this.scaleBy = StreamScaler.ScaleBy.count;
        }

        if (System.getProperty(SCALE_PCT_PARAM) != null) {
            this.scalePct = Double.parseDouble(System.getProperty(SCALE_PCT_PARAM));
            this.scaleBy = StreamScaler.ScaleBy.pct;
        }

        if (System.getProperty(MIN_SHARDS_PARAM) != null) {
            this.minShards = Integer.parseInt(System.getProperty(MIN_SHARDS_PARAM));
        }

        if (System.getProperty(MAX_SHARDS_PARAM) != null) {
            this.maxShards = Integer.parseInt(System.getProperty(MAX_SHARDS_PARAM));
        }
    }

    scaler = new StreamScaler(this.region);
}

From source file:SampleTopology.java

License:Open Source License

private static void configure(String propertiesFile) throws IOException {
    FileInputStream inputStream = new FileInputStream(propertiesFile);
    Properties properties = new Properties();
    try {/*from  w w  w .j a  v a 2 s .  c  om*/
        properties.load(inputStream);
    } finally {
        inputStream.close();
    }

    String topologyNameOverride = properties.getProperty(ConfigKeys.TOPOLOGY_NAME_KEY);
    if (topologyNameOverride != null) {
        topologyName = topologyNameOverride;
    }
    LOG.info("Using topology name " + topologyName);

    String streamNameOverride = properties.getProperty(ConfigKeys.STREAM_NAME_KEY);
    if (streamNameOverride != null) {
        streamName = streamNameOverride;
    }
    LOG.info("Using stream name " + streamName);

    String initialPositionOverride = properties.getProperty(ConfigKeys.INITIAL_POSITION_IN_STREAM_KEY);
    if (initialPositionOverride != null) {
        initialPositionInStream = InitialPositionInStream.valueOf(initialPositionOverride);
    }
    LOG.info("Using initial position " + initialPositionInStream.toString()
            + " (if a checkpoint is not found).");

    String recordRetryLimitOverride = properties.getProperty(ConfigKeys.RECORD_RETRY_LIMIT);
    if (recordRetryLimitOverride != null) {
        recordRetryLimit = Integer.parseInt(recordRetryLimitOverride.trim());
    }
    LOG.info("Using recordRetryLimit " + recordRetryLimit);

    String regionOverride = properties.getProperty(ConfigKeys.REGION_KEY);
    if (regionOverride != null) {
        region = Regions.fromName(regionOverride);
    }
    LOG.info("Using region " + region.getName());

    String zookeeperEndpointOverride = properties.getProperty(ConfigKeys.ZOOKEEPER_ENDPOINT_KEY);
    if (zookeeperEndpointOverride != null) {
        zookeeperEndpoint = zookeeperEndpointOverride;
    }
    LOG.info("Using zookeeper endpoint " + zookeeperEndpoint);

    String zookeeperPrefixOverride = properties.getProperty(ConfigKeys.ZOOKEEPER_PREFIX_KEY);
    if (zookeeperPrefixOverride != null) {
        zookeeperPrefix = zookeeperPrefixOverride;
    }
    LOG.info("Using zookeeper prefix " + zookeeperPrefix);

}

From source file:aot.storage.s3.CustomStorage.java

License:Open Source License

protected static AmazonS3 createS3(String[] ids) {
    AmazonS3 s3;//  ww  w  . j  av a  2  s  .c  o  m
    if ((ids.length >= 1) && !ids[1].trim().isEmpty()) {
        String[] creds = ids[1].split(":");
        s3 = new AmazonS3Client(new BasicAWSCredentials(creds[0], creds[1]));
    } else {
        s3 = new AmazonS3Client();
    }
    if ((ids.length >= 2) && !ids[2].trim().isEmpty()) {
        s3.setRegion(Region.getRegion(Regions.fromName(ids[2])));
    }
    if ((ids.length >= 3) && !ids[3].trim().isEmpty()) {
        s3.setEndpoint(ids[3]);
    }
    return s3;
}

From source file:apphub.storage.s3.CustomStorage.java

License:Open Source License

protected static AmazonS3 createS3(URL url) {
    AmazonS3 s3;/*from w w  w . j  av a 2 s  .com*/
    String userInfo = url.getUserInfo();
    if (userInfo != null) {
        String[] creds = userInfo.split(":");
        if (creds.length == 2) {
            s3 = new AmazonS3Client(new BasicAWSCredentials(creds[0], creds[1]));
        } else {
            throw new CreateStorageException(url.toString(),
                    "Credentials for S3 storage must be in form of KEY:SECRET");
        }
    } else {
        s3 = new AmazonS3Client();
    }
    Map<String, String> queryParameters = UrlUtil.getQueryParameters(url);
    String region = queryParameters.get("region");
    if (region != null) {
        s3.setRegion(Region.getRegion(Regions.fromName(region)));
    }
    String endpoint = queryParameters.get("endpoint");
    if (endpoint != null) {
        s3.setEndpoint(endpoint);
    }
    return s3;
}

From source file:awslabs.lab51.SolutionCode.java

License:Open Source License

@Override
public AmazonS3Client createS3Client(AWSCredentials credentials) {
    Region region = Region.getRegion(Regions.fromName(System.getProperty("REGION")));
    AmazonS3Client client = new AmazonS3Client();
    client.setRegion(region);//  www.j  av  a2  s.c  o  m

    return client;
}

From source file:awslabs.lab51.SolutionCode.java

License:Open Source License

@Override
public AmazonDynamoDBClient createDynamoDbClient(AWSCredentials credentials) {
    Region region = Region.getRegion(Regions.fromName(System.getProperty("REGION")));
    AmazonDynamoDBClient client = new AmazonDynamoDBClient();
    client.setRegion(region);/*ww w.  j  a  v a2  s  .  c om*/

    return client;
}

From source file:br.com.ingenieux.jenkins.plugins.awsebdeployment.Deployer.java

License:Apache License

private void initAWS() {
    log("Creating S3 and AWSEB Client (AWS Access Key Id: %s, region: %s)", context.getAwsAccessKeyId(),
            context.getAwsRegion());//from  w w w  . j a  va  2  s . c  o m

    AWSCredentialsProvider credentials = new AWSCredentialsProviderChain(new StaticCredentialsProvider(
            new BasicAWSCredentials(context.getAwsAccessKeyId(), context.getAwsSecretSharedKey())));
    Region region = Region.getRegion(Regions.fromName(context.getAwsRegion()));
    ClientConfiguration clientConfig = new ClientConfiguration();

    clientConfig.setUserAgent("ingenieux CloudButler/" + getVersion());

    s3 = region.createClient(AmazonS3Client.class, credentials, clientConfig);
    awseb = region.createClient(AWSElasticBeanstalkClient.class, credentials, clientConfig);
}

From source file:ch.cyberduck.core.kms.KMSEncryptionFeature.java

License:Open Source License

/**
 * @return List of IDs of KMS managed keys
 *//*from w  w  w. j  a  v  a  2  s .c o m*/
@Override
public Set<Algorithm> getKeys(final Path container, final LoginCallback prompt) throws BackgroundException {
    final Set<Algorithm> keys = super.getKeys(container, prompt);
    try {
        keys.addAll(this.authenticated(new Authenticated<Set<Algorithm>>() {
            @Override
            public Set<Algorithm> call() throws BackgroundException {
                // Create new IAM credentials
                final AWSKMSClient client = new AWSKMSClient(new com.amazonaws.auth.AWSCredentials() {
                    @Override
                    public String getAWSAccessKeyId() {
                        return host.getCredentials().getUsername();
                    }

                    @Override
                    public String getAWSSecretKey() {
                        return host.getCredentials().getPassword();
                    }
                }, configuration);
                final Location feature = session.getFeature(Location.class);
                final Location.Name region = feature.getLocation(containerService.getContainer(container));
                client.setRegion(Region.getRegion(Regions.fromName(region.getIdentifier())));
                try {
                    final Map<String, String> aliases = new HashMap<String, String>();
                    for (AliasListEntry entry : client.listAliases().getAliases()) {
                        aliases.put(entry.getTargetKeyId(), entry.getAliasName());
                    }
                    final Set<Algorithm> keys = new HashSet<Algorithm>();
                    for (KeyListEntry entry : client.listKeys().getKeys()) {
                        keys.add(new AliasedAlgorithm(entry, aliases.get(entry.getKeyId()), region));
                    }
                    return keys;
                } catch (AmazonClientException e) {
                    throw new AmazonServiceExceptionMappingService().map("Cannot read AWS KMS configuration",
                            e);
                } finally {
                    client.shutdown();
                }
            }
        }, prompt));
    } catch (AccessDeniedException e) {
        log.warn(String.format("Ignore failure reading keys from KMS. %s", e.getMessage()));
        keys.add(SSE_KMS_DEFAULT);
    }
    return keys;
}

From source file:co.cask.cdap.template.etl.realtime.source.SqsSource.java

License:Apache License

@Override
public void initialize(RealtimeContext contex) {
    try {/*from   w  w w  .j  a  va 2 s  .  co m*/
        SQSConnectionFactory.Builder sqsBuild = SQSConnectionFactory.builder()
                .withRegion(Region.getRegion(Regions.fromName(config.region)));
        connectionFactory = config.endpoint == null ? sqsBuild.build()
                : sqsBuild.withEndpoint(config.endpoint).build();
        connection = connectionFactory.createConnection(config.accessID, config.accessKey);
        session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
        consumer = session.createConsumer(session.createQueue(config.queueName));
        connection.start();
    } catch (Exception e) {
        if (session != null) {
            try {
                session.close();
            } catch (Exception ex1) {
                LOG.warn("Exception when closing session", ex1);
            }
        }
        if (connection != null) {
            try {
                connection.close();
            } catch (Exception ex2) {
                LOG.warn("Exception when closing connection", ex2);
            }
        }
        if (consumer != null) {
            try {
                consumer.close();
            } catch (Exception ex3) {
                LOG.warn("Exception when closing consumer", ex3);
            }
        }
        LOG.error("Failed to connect to SQS");
        throw new IllegalStateException("Could not connect to SQS.");
    }
}

From source file:co.cask.hydrator.plugin.realtime.source.SqsSource.java

License:Apache License

@Override
public void initialize(RealtimeContext context) {
    try {/* ww w . jav  a  2s . com*/
        super.initialize(context);
        SQSConnectionFactory.Builder sqsBuild = SQSConnectionFactory.builder()
                .withRegion(Region.getRegion(Regions.fromName(config.region)));
        connectionFactory = config.endpoint == null ? sqsBuild.build()
                : sqsBuild.withEndpoint(config.endpoint).build();
        connection = connectionFactory.createConnection(config.accessID, config.accessKey);
        session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
        consumer = session.createConsumer(session.createQueue(config.queueName));
        connection.start();
    } catch (Exception e) {
        if (session != null) {
            try {
                session.close();
            } catch (Exception ex1) {
                LOG.warn("Exception when closing session", ex1);
            }
        }
        if (connection != null) {
            try {
                connection.close();
            } catch (Exception ex2) {
                LOG.warn("Exception when closing connection", ex2);
            }
        }
        if (consumer != null) {
            try {
                consumer.close();
            } catch (Exception ex3) {
                LOG.warn("Exception when closing consumer", ex3);
            }
        }
        LOG.error("Failed to connect to SQS");
        throw new IllegalStateException("Could not connect to SQS.");
    }
}