Example usage for com.amazonaws ClientConfiguration withSignerOverride

List of usage examples for com.amazonaws ClientConfiguration withSignerOverride

Introduction

In this page you can find the example usage for com.amazonaws ClientConfiguration withSignerOverride.

Prototype

public ClientConfiguration withSignerOverride(final String value) 

Source Link

Document

Sets the name of the signature algorithm to use for signing requests made by this client.

Usage

From source file:com.facebook.presto.hive.s3.PrestoS3FileSystem.java

License:Apache License

private AmazonS3Client createAmazonS3Client(URI uri, Configuration hadoopConfig,
        ClientConfiguration clientConfig) {
    AWSCredentialsProvider credentials = getAwsCredentialsProvider(uri, hadoopConfig);
    Optional<EncryptionMaterialsProvider> emp = createEncryptionMaterialsProvider(hadoopConfig);
    AmazonS3Client client;// w  w  w  . java2 s . c o m
    String signerType = hadoopConfig.get(S3_SIGNER_TYPE);
    if (signerType != null) {
        clientConfig.withSignerOverride(signerType);
    }
    if (emp.isPresent()) {
        client = new AmazonS3EncryptionClient(credentials, emp.get(), clientConfig, new CryptoConfiguration(),
                METRIC_COLLECTOR);
    } else {
        client = new AmazonS3Client(credentials, clientConfig, METRIC_COLLECTOR);
    }

    if (isPathStyleAccess) {
        S3ClientOptions clientOptions = S3ClientOptions.builder().setPathStyleAccess(true).build();
        client.setS3ClientOptions(clientOptions);
    }

    // use local region when running inside of EC2
    if (pinS3ClientToCurrentRegion) {
        Region region = Regions.getCurrentRegion();
        if (region != null) {
            client.setRegion(region);
        }
    }

    String endpoint = hadoopConfig.get(S3_ENDPOINT);
    if (endpoint != null) {
        client.setEndpoint(endpoint);
    }

    return client;
}

From source file:com.ibm.stocator.fs.cos.COSAPIClient.java

License:Apache License

@Override
public void initiate(String scheme) throws IOException, ConfigurationParseException {
    mCachedSparkOriginated = new HashMap<String, Boolean>();
    mCachedSparkJobsStatus = new HashMap<String, Boolean>();
    schemaProvided = scheme;//from  w  w w . j a v a 2  s.c  o  m
    Properties props = ConfigurationHandler.initialize(filesystemURI, conf, scheme);
    // Set bucket name property
    int cacheSize = conf.getInt(CACHE_SIZE, GUAVA_CACHE_SIZE_DEFAULT);
    memoryCache = MemoryCache.getInstance(cacheSize);
    mBucket = props.getProperty(COS_BUCKET_PROPERTY);
    workingDir = new Path("/user", System.getProperty("user.name")).makeQualified(filesystemURI,
            getWorkingDirectory());

    fModeAutomaticDelete = "true".equals(props.getProperty(FMODE_AUTOMATIC_DELETE_COS_PROPERTY, "false"));
    mIsV2Signer = "true".equals(props.getProperty(V2_SIGNER_TYPE_COS_PROPERTY, "false"));
    // Define COS client
    String accessKey = props.getProperty(ACCESS_KEY_COS_PROPERTY);
    String secretKey = props.getProperty(SECRET_KEY_COS_PROPERTY);

    if (accessKey == null) {
        throw new ConfigurationParseException("Access KEY is empty. Please provide valid access key");
    }
    if (secretKey == null) {
        throw new ConfigurationParseException("Secret KEY is empty. Please provide valid secret key");
    }

    BasicAWSCredentials creds = new BasicAWSCredentials(accessKey, secretKey);
    ClientConfiguration clientConf = new ClientConfiguration();

    int maxThreads = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, MAX_THREADS, DEFAULT_MAX_THREADS);
    if (maxThreads < 2) {
        LOG.warn(MAX_THREADS + " must be at least 2: forcing to 2.");
        maxThreads = 2;
    }
    int totalTasks = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, MAX_TOTAL_TASKS, DEFAULT_MAX_TOTAL_TASKS);
    long keepAliveTime = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, KEEPALIVE_TIME, DEFAULT_KEEPALIVE_TIME);
    threadPoolExecutor = BlockingThreadPoolExecutorService.newInstance(maxThreads, maxThreads + totalTasks,
            keepAliveTime, TimeUnit.SECONDS, "s3a-transfer-shared");

    unboundedThreadPool = new ThreadPoolExecutor(maxThreads, Integer.MAX_VALUE, keepAliveTime, TimeUnit.SECONDS,
            new LinkedBlockingQueue<Runnable>(),
            BlockingThreadPoolExecutorService.newDaemonThreadFactory("s3a-transfer-unbounded"));

    boolean secureConnections = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, SECURE_CONNECTIONS,
            DEFAULT_SECURE_CONNECTIONS);
    clientConf.setProtocol(secureConnections ? Protocol.HTTPS : Protocol.HTTP);

    String proxyHost = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_HOST, "");
    int proxyPort = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, PROXY_PORT, -1);
    if (!proxyHost.isEmpty()) {
        clientConf.setProxyHost(proxyHost);
        if (proxyPort >= 0) {
            clientConf.setProxyPort(proxyPort);
        } else {
            if (secureConnections) {
                LOG.warn("Proxy host set without port. Using HTTPS default 443");
                clientConf.setProxyPort(443);
            } else {
                LOG.warn("Proxy host set without port. Using HTTP default 80");
                clientConf.setProxyPort(80);
            }
        }
        String proxyUsername = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_USERNAME);
        String proxyPassword = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_PASSWORD);
        if ((proxyUsername == null) != (proxyPassword == null)) {
            String msg = "Proxy error: " + PROXY_USERNAME + " or " + PROXY_PASSWORD + " set without the other.";
            LOG.error(msg);
            throw new IllegalArgumentException(msg);
        }
        clientConf.setProxyUsername(proxyUsername);
        clientConf.setProxyPassword(proxyPassword);
        clientConf.setProxyDomain(Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_DOMAIN));
        clientConf.setProxyWorkstation(Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, PROXY_WORKSTATION));
        if (LOG.isDebugEnabled()) {
            LOG.debug(
                    "Using proxy server {}:{} as user {} with password {} on " + "domain {} as workstation {}",
                    clientConf.getProxyHost(), clientConf.getProxyPort(),
                    String.valueOf(clientConf.getProxyUsername()), clientConf.getProxyPassword(),
                    clientConf.getProxyDomain(), clientConf.getProxyWorkstation());
        }
    } else if (proxyPort >= 0) {
        String msg = "Proxy error: " + PROXY_PORT + " set without " + PROXY_HOST;
        LOG.error(msg);
        throw new IllegalArgumentException(msg);
    }

    initConnectionSettings(conf, clientConf);
    if (mIsV2Signer) {
        clientConf.withSignerOverride("S3SignerType");
    }
    mClient = new AmazonS3Client(creds, clientConf);

    final String serviceUrl = props.getProperty(ENDPOINT_URL_COS_PROPERTY);
    if (serviceUrl != null && !serviceUrl.equals(amazonDefaultEndpoint)) {
        mClient.setEndpoint(serviceUrl);
    }
    mClient.setS3ClientOptions(S3ClientOptions.builder().setPathStyleAccess(true).build());

    // Set block size property
    String mBlockSizeString = props.getProperty(BLOCK_SIZE_COS_PROPERTY, "128");
    mBlockSize = Long.valueOf(mBlockSizeString).longValue() * 1024 * 1024L;

    boolean autoCreateBucket = "true"
            .equalsIgnoreCase((props.getProperty(AUTO_BUCKET_CREATE_COS_PROPERTY, "false")));

    partSize = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, MULTIPART_SIZE, DEFAULT_MULTIPART_SIZE);
    multiPartThreshold = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, MIN_MULTIPART_THRESHOLD,
            DEFAULT_MIN_MULTIPART_THRESHOLD);
    readAhead = Utils.getLong(conf, FS_COS, FS_ALT_KEYS, READAHEAD_RANGE, DEFAULT_READAHEAD_RANGE);
    LOG.debug(READAHEAD_RANGE + ":" + readAhead);
    inputPolicy = COSInputPolicy
            .getPolicy(Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, INPUT_FADVISE, INPUT_FADV_NORMAL));

    initTransferManager();
    maxKeys = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, MAX_PAGING_KEYS, DEFAULT_MAX_PAGING_KEYS);
    flatListingFlag = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, FLAT_LISTING, DEFAULT_FLAT_LISTING);

    if (autoCreateBucket) {
        try {
            boolean bucketExist = mClient.doesBucketExist(mBucket);
            if (bucketExist) {
                LOG.trace("Bucket {} exists", mBucket);
            } else {
                LOG.trace("Bucket {} doesn`t exists and autocreate", mBucket);
                String mRegion = props.getProperty(REGION_COS_PROPERTY);
                if (mRegion == null) {
                    mClient.createBucket(mBucket);
                } else {
                    LOG.trace("Creating bucket {} in region {}", mBucket, mRegion);
                    mClient.createBucket(mBucket, mRegion);
                }
            }
        } catch (AmazonServiceException ase) {
            /*
            *  we ignore the BucketAlreadyExists exception since multiple processes or threads
            *  might try to create the bucket in parrallel, therefore it is expected that
            *  some will fail to create the bucket
            */
            if (!ase.getErrorCode().equals("BucketAlreadyExists")) {
                LOG.error(ase.getMessage());
                throw (ase);
            }
        } catch (Exception e) {
            LOG.error(e.getMessage());
            throw (e);
        }
    }

    initMultipartUploads(conf);
    enableMultiObjectsDelete = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, ENABLE_MULTI_DELETE, true);

    blockUploadEnabled = Utils.getBoolean(conf, FS_COS, FS_ALT_KEYS, FAST_UPLOAD, DEFAULT_FAST_UPLOAD);

    if (blockUploadEnabled) {
        blockOutputBuffer = Utils.getTrimmed(conf, FS_COS, FS_ALT_KEYS, FAST_UPLOAD_BUFFER,
                DEFAULT_FAST_UPLOAD_BUFFER);
        partSize = COSUtils.ensureOutputParameterInRange(MULTIPART_SIZE, partSize);
        blockFactory = COSDataBlocks.createFactory(this, blockOutputBuffer);
        blockOutputActiveBlocks = Utils.getInt(conf, FS_COS, FS_ALT_KEYS, FAST_UPLOAD_ACTIVE_BLOCKS,
                DEFAULT_FAST_UPLOAD_ACTIVE_BLOCKS);
        LOG.debug("Using COSBlockOutputStream with buffer = {}; block={};" + " queue limit={}",
                blockOutputBuffer, partSize, blockOutputActiveBlocks);
    } else {
        LOG.debug("Using COSOutputStream");
    }
}

From source file:io.prestosql.plugin.hive.s3.PrestoS3FileSystem.java

License:Apache License

private AmazonS3 createAmazonS3Client(Configuration hadoopConfig, ClientConfiguration clientConfig) {
    Optional<EncryptionMaterialsProvider> encryptionMaterialsProvider = createEncryptionMaterialsProvider(
            hadoopConfig);/*w w  w.j  av a 2  s . c o m*/
    AmazonS3Builder<? extends AmazonS3Builder, ? extends AmazonS3> clientBuilder;

    String signerType = hadoopConfig.get(S3_SIGNER_TYPE);
    if (signerType != null) {
        clientConfig.withSignerOverride(signerType);
    }

    if (encryptionMaterialsProvider.isPresent()) {
        clientBuilder = AmazonS3EncryptionClient.encryptionBuilder().withCredentials(credentialsProvider)
                .withEncryptionMaterials(encryptionMaterialsProvider.get())
                .withClientConfiguration(clientConfig).withMetricsCollector(METRIC_COLLECTOR);
    } else {
        clientBuilder = AmazonS3Client.builder().withCredentials(credentialsProvider)
                .withClientConfiguration(clientConfig).withMetricsCollector(METRIC_COLLECTOR);
    }

    boolean regionOrEndpointSet = false;

    // use local region when running inside of EC2
    if (pinS3ClientToCurrentRegion) {
        Region region = Regions.getCurrentRegion();
        if (region != null) {
            clientBuilder = clientBuilder.withRegion(region.getName());
            regionOrEndpointSet = true;
        }
    }

    String endpoint = hadoopConfig.get(S3_ENDPOINT);
    if (endpoint != null) {
        clientBuilder = clientBuilder.withEndpointConfiguration(new EndpointConfiguration(endpoint, null));
        regionOrEndpointSet = true;
    }

    if (isPathStyleAccess) {
        clientBuilder = clientBuilder.enablePathStyleAccess();
    }

    if (!regionOrEndpointSet) {
        clientBuilder = clientBuilder.withRegion(US_EAST_1);
        clientBuilder.setForceGlobalBucketAccessEnabled(true);
    }

    return clientBuilder.build();
}