List of usage examples for com.amazonaws.services.elasticmapreduce AmazonElasticMapReduceClient AmazonElasticMapReduceClient
AmazonElasticMapReduceClient(AwsSyncClientParams clientParams)
From source file:awswc.AwsConsoleApp.java
License:Open Source License
/** * The only information needed to create a client are security credentials * consisting of the AWS Access Key ID and Secret Access Key. All other * configuration, such as the service endpoints, are performed * automatically. Client parameters, such as proxies, can be specified in an * optional ClientConfiguration object when constructing a client. * * @see com.amazonaws.auth.BasicAWSCredentials * @see com.amazonaws.auth.PropertiesCredentials * @see com.amazonaws.ClientConfiguration *//*from w ww . j a v a 2 s. c o m*/ public static void init() throws Exception { AWSCredentials credentials = new PropertiesCredentials( AwsConsoleApp.class.getResourceAsStream("AwsCredentials.properties")); ec2 = new AmazonEC2Client(credentials); s3 = new AmazonS3Client(credentials); sdb = new AmazonSimpleDBClient(credentials); emr = new AmazonElasticMapReduceClient(credentials); }
From source file:com.aegeus.aws.ElasticMapReduceService.java
License:Apache License
public ElasticMapReduceService(EmrConfigObject config) { this.config = config; emr = new AmazonElasticMapReduceClient( new BasicAWSCredentials(config.getAccessKey(), config.getSecretKey())); emr.setRegion(Region.getRegion(Regions.fromName(config.getRegion()))); }
From source file:com.clouddrive.parth.NewClass.java
public static void init(String instanceType, String noOfinstances) throws Exception { INSTANCE_COUNT = Integer.parseInt(noOfinstances); switch (instanceType) { case "C1Medium": INSTANCE_TYPE = InstanceType.C1Medium.toString(); break;//from w w w . j av a 2s . c o m case "C1Xlarge": INSTANCE_TYPE = InstanceType.C1Xlarge.toString(); break; case "C32xlarge": INSTANCE_TYPE = InstanceType.C32xlarge.toString(); break; case "C34xlarge": INSTANCE_TYPE = InstanceType.C34xlarge.toString(); break; case "C38xlarge": INSTANCE_TYPE = InstanceType.C38xlarge.toString(); break; case "C3Large": INSTANCE_TYPE = InstanceType.C3Large.toString(); break; case "C3Xlarge": INSTANCE_TYPE = InstanceType.C3Xlarge.toString(); break; case "Cc14xlarge": INSTANCE_TYPE = InstanceType.Cc14xlarge.toString(); break; case "Cc28xlarge": INSTANCE_TYPE = InstanceType.Cc28xlarge.toString(); break; case "Cg14xlarge": INSTANCE_TYPE = InstanceType.Cg14xlarge.toString(); break; case "Cr18xlarge": INSTANCE_TYPE = InstanceType.Cr18xlarge.toString(); break; case "G22xlarge": INSTANCE_TYPE = InstanceType.G22xlarge.toString(); break; case "T1Micro": INSTANCE_TYPE = InstanceType.T1Micro.toString(); break; } //AWSCredentials credentials = new PropertiesCredentials(NewClass.class.getClassLoader().getResourceAsStream( // "AwsCredentials.properties")); // s3 = new AmazonS3Client(credentials); String secretKey = "kxDFnyETb02UrLr4YT3bRjiET+/FNGUMrE3DrU4j"; String accessKey = "AKIAII3DXT3OYD5UV4WQ"; BasicAWSCredentials awsCreds = new BasicAWSCredentials(accessKey, secretKey); s3 = new AmazonS3Client(awsCreds); Region usWest2 = Region.getRegion(Regions.US_WEST_2); ((AmazonWebServiceClient) s3).setRegion(usWest2); emr = new AmazonElasticMapReduceClient(awsCreds); emr.setRegion(Region.getRegion(Regions.EU_WEST_1)); }
From source file:com.swf.common.ConfigHelper.java
License:Open Source License
public AmazonElasticMapReduce createEMRClient() { AWSCredentials emrAWSCredentials = new BasicAWSCredentials(this.emrAccessId, this.emrSecretKey); AmazonElasticMapReduce client = new AmazonElasticMapReduceClient(emrAWSCredentials); return client; }
From source file:com.trsvax.tapestry.aws.core.services.AWSCoreModule.java
License:Apache License
public static void bind(ServiceBinder binder) { //binder.bind(AWSMailTransport.class,AWSMailTransportImpl.class); binder.bind(AmazonS3.class, new ServiceBuilder<AmazonS3>() { public AmazonS3 buildService(ServiceResources serviceResources) { return new AmazonS3Client(serviceResources.getService(AWSCredentials.class)); }/*from w w w. j av a2 s .c om*/ }); binder.bind(AmazonDynamoDB.class, new ServiceBuilder<AmazonDynamoDB>() { public AmazonDynamoDB buildService(ServiceResources serviceResources) { return new AmazonDynamoDBClient(serviceResources.getService(AWSCredentials.class)); } }); binder.bind(AmazonEC2.class, new ServiceBuilder<AmazonEC2>() { public AmazonEC2 buildService(ServiceResources serviceResources) { return new AmazonEC2Client(serviceResources.getService(AWSCredentials.class)); } }); binder.bind(AmazonSimpleDB.class, new ServiceBuilder<AmazonSimpleDB>() { public AmazonSimpleDB buildService(ServiceResources serviceResources) { return new AmazonSimpleDBClient(serviceResources.getService(AWSCredentials.class)); } }); binder.bind(AmazonSQS.class, new ServiceBuilder<AmazonSQS>() { public AmazonSQS buildService(ServiceResources serviceResources) { return new AmazonSQSClient(serviceResources.getService(AWSCredentials.class)); } }); binder.bind(AmazonSNS.class, new ServiceBuilder<AmazonSNS>() { public AmazonSNS buildService(ServiceResources serviceResources) { return new AmazonSNSClient(serviceResources.getService(AWSCredentials.class)); } }); binder.bind(AmazonRDS.class, new ServiceBuilder<AmazonRDS>() { public AmazonRDS buildService(ServiceResources serviceResources) { return new AmazonRDSClient(serviceResources.getService(AWSCredentials.class)); } }); binder.bind(AmazonElasticMapReduce.class, new ServiceBuilder<AmazonElasticMapReduce>() { public AmazonElasticMapReduce buildService(ServiceResources serviceResources) { return new AmazonElasticMapReduceClient(serviceResources.getService(AWSCredentials.class)); } }); binder.bind(AmazonSimpleEmailService.class, new ServiceBuilder<AmazonSimpleEmailService>() { public AmazonSimpleEmailService buildService(ServiceResources serviceResources) { return new AmazonSimpleEmailServiceClient(serviceResources.getService(AWSCredentials.class)); } }); binder.bind(AmazonElasticLoadBalancing.class, new ServiceBuilder<AmazonElasticLoadBalancing>() { public AmazonElasticLoadBalancing buildService(ServiceResources serviceResources) { return new AmazonElasticLoadBalancingClient(serviceResources.getService(AWSCredentials.class)); } }); binder.bind(AmazonCloudWatch.class, new ServiceBuilder<AmazonCloudWatch>() { public AmazonCloudWatch buildService(ServiceResources serviceResources) { return new AmazonCloudWatchClient(serviceResources.getService(AWSCredentials.class)); } }); binder.bind(AmazonAutoScaling.class, new ServiceBuilder<AmazonAutoScaling>() { public AmazonAutoScaling buildService(ServiceResources serviceResources) { return new AmazonAutoScalingClient(serviceResources.getService(AWSCredentials.class)); } }); binder.bind(AmazonIdentityManagement.class, new ServiceBuilder<AmazonIdentityManagement>() { public AmazonIdentityManagement buildService(ServiceResources serviceResources) { return new AmazonIdentityManagementClient(serviceResources.getService(AWSCredentials.class)); } }); }
From source file:fr.ens.biologie.genomique.eoulsan.util.cloud.AWSElasticMapReduceJob.java
License:LGPL
/** * Run the job.//w w w . ja v a2s . co m * @return a the JobFlowId of the job */ public String runJob() { // Get the credentials final AWSCredentials credentials = new BasicAWSCredentials(this.AWSAccessKey, this.AWSSecretKey); // Create the Amazon Elastic MapReduce object this.elasticMapReduceClient = new AmazonElasticMapReduceClient(credentials); // Set the end point this.elasticMapReduceClient.setEndpoint(this.endpoint); this.runFlowResult = this.elasticMapReduceClient.runJobFlow(this.runFlowRequest); return this.runFlowResult.getJobFlowId(); }
From source file:org.finra.dm.dao.impl.EmrDaoImpl.java
License:Apache License
/** * Create the EMR client with the given proxy and access key details. * * @param awsParamsDto AWS related parameters for access/secret keys and proxy details. * * @return the AmazonElasticMapReduceClient object. *//*from w w w .ja v a 2s . c o m*/ @Override public AmazonElasticMapReduceClient getEmrClient(AwsParamsDto awsParamsDto) { // TODO Building EMR client every time requested, if this becomes a performance issue, // might need to consider storing a singleton or building the client once per request. AmazonElasticMapReduceClient emrClient; // Create an EMR client with HTTP proxy information. if (StringUtils.isNotBlank(awsParamsDto.getHttpProxyHost()) && StringUtils.isNotBlank(awsParamsDto.getHttpProxyPort().toString())) { emrClient = new AmazonElasticMapReduceClient(new ClientConfiguration() .withProxyHost(awsParamsDto.getHttpProxyHost()).withProxyPort(awsParamsDto.getHttpProxyPort())); } // Create an EMR client with no proxy information else { emrClient = new AmazonElasticMapReduceClient(); } // Return the client. return emrClient; }
From source file:org.huahinframework.emanager.amazonaws.elasticmapreduce.ElasticMapReduceManager.java
License:Apache License
/** * @param emrProperties//from w w w .jav a 2 s . co m */ public ElasticMapReduceManager(EMRProperties emrProperties) { this.emrProperties = emrProperties; emr = new AmazonElasticMapReduceClient( new BasicAWSCredentials(emrProperties.getAccessKey(), emrProperties.getSecretKey())); s3 = new AmazonS3Client( new BasicAWSCredentials(emrProperties.getAccessKey(), emrProperties.getSecretKey())); if (!isEmpty(emrProperties.getEndpoint())) { emr.setEndpoint(emrProperties.getEndpoint()); s3.setEndpoint(emrProperties.getS3Endpoint()); } }
From source file:org.huahinframework.emanager.rest.service.JobFlowService.java
License:Apache License
/** * *//* www. j av a 2s . c om*/ public void init() { emr = new AmazonElasticMapReduceClient( new BasicAWSCredentials(emrProperties.getAccessKey(), emrProperties.getSecretKey())); if (!isEmpty(emrProperties.getEndpoint())) { emr.setEndpoint(emrProperties.getEndpoint()); } }
From source file:org.pentaho.amazon.emr.job.AmazonElasticMapReduceJobExecutor.java
License:Apache License
public Result execute(Result result, int arg1) throws KettleException { Log4jFileAppender appender = null;/*from w ww . jav a 2 s . com*/ String logFileName = "pdi-" + this.getName(); //$NON-NLS-1$ try { appender = LogWriter.createFileAppender(logFileName, true, false); LogWriter.getInstance().addAppender(appender); log.setLogLevel(parentJob.getLogLevel()); } catch (Exception e) { logError(BaseMessages.getString(PKG, "AmazonElasticMapReduceJobExecutor.FailedToOpenLogFile", //$NON-NLS-1$ logFileName, e.toString())); logError(Const.getStackTracker(e)); } try { // create/connect aws service AmazonElasticMapReduceClient emrClient = new AmazonElasticMapReduceClient(awsCredentials); // pull down jar from vfs FileObject jarFile = KettleVFS.getFileObject(buildFilename(jarUrl)); File tmpFile = File.createTempFile("customEMR", "jar"); tmpFile.deleteOnExit(); FileOutputStream tmpFileOut = new FileOutputStream(tmpFile); IOUtils.copy(jarFile.getContent().getInputStream(), tmpFileOut); URL localJarUrl = tmpFile.toURI().toURL(); // find main class in jar String mainClass = getMainClass(localJarUrl); // create staging bucket AmazonS3 s3Client = new AmazonS3Client(awsCredentials); FileSystemOptions opts = new FileSystemOptions(); DefaultFileSystemConfigBuilder.getInstance().setUserAuthenticator(opts, new StaticUserAuthenticator( null, awsCredentials.getAWSAccessKeyId(), awsCredentials.getAWSSecretKey())); FileObject stagingDirFileObject = KettleVFS.getFileObject(stagingDir, getVariables(), opts); String stagingBucketName = stagingDirFileObject.getName().getBaseName(); if (!s3Client.doesBucketExist(stagingBucketName)) { s3Client.createBucket(stagingBucketName); } // delete old jar if needed try { s3Client.deleteObject(stagingBucketName, jarFile.getName().getBaseName()); } catch (Exception ex) { logError(Const.getStackTracker(ex)); } // put jar in s3 staging bucket s3Client.putObject(new PutObjectRequest(stagingBucketName, jarFile.getName().getBaseName(), tmpFile)); // create non-vfs s3 url to jar String stagingS3JarUrl = "s3://" + stagingBucketName + "/" + jarFile.getName().getBaseName(); String stagingS3BucketUrl = "s3://" + stagingBucketName; RunJobFlowRequest runJobFlowRequest = null; RunJobFlowResult runJobFlowResult = null; if (StringUtil.isEmpty(hadoopJobFlowId)) { // create EMR job flow runJobFlowRequest = createJobFlow(stagingS3BucketUrl, stagingS3JarUrl, mainClass); // start EMR job runJobFlowResult = emrClient.runJobFlow(runJobFlowRequest); } else { List<String> jarStepArgs = new ArrayList<String>(); if (!StringUtil.isEmpty(cmdLineArgs)) { StringTokenizer st = new StringTokenizer(cmdLineArgs, " "); while (st.hasMoreTokens()) { String token = st.nextToken(); logBasic("adding args: " + token); jarStepArgs.add(token); } } HadoopJarStepConfig hadoopJarStep = new HadoopJarStepConfig(); hadoopJarStep.setJar(stagingS3JarUrl); hadoopJarStep.setMainClass(mainClass); hadoopJarStep.setArgs(jarStepArgs); StepConfig stepConfig = new StepConfig(); stepConfig.setName("custom jar: " + jarUrl); stepConfig.setHadoopJarStep(hadoopJarStep); List<StepConfig> steps = new ArrayList<StepConfig>(); steps.add(stepConfig); AddJobFlowStepsRequest addJobFlowStepsRequest = new AddJobFlowStepsRequest(); addJobFlowStepsRequest.setJobFlowId(hadoopJobFlowId); addJobFlowStepsRequest.setSteps(steps); emrClient.addJobFlowSteps(addJobFlowStepsRequest); } String loggingIntervalS = environmentSubstitute(loggingInterval); int logIntv = 60; try { logIntv = Integer.parseInt(loggingIntervalS); } catch (NumberFormatException ex) { logError("Unable to parse logging interval '" + loggingIntervalS + "' - using " + "default of 60"); } // monitor it / blocking / logging if desired if (blocking) { try { if (log.isBasic()) { String executionState = "RUNNING"; List<String> jobFlowIds = new ArrayList<String>(); String id = hadoopJobFlowId; if (StringUtil.isEmpty(hadoopJobFlowId)) { id = runJobFlowResult.getJobFlowId(); jobFlowIds.add(id); } while (isRunning(executionState)) { DescribeJobFlowsRequest describeJobFlowsRequest = new DescribeJobFlowsRequest(); describeJobFlowsRequest.setJobFlowIds(jobFlowIds); DescribeJobFlowsResult describeJobFlowsResult = emrClient .describeJobFlows(describeJobFlowsRequest); boolean found = false; for (JobFlowDetail jobFlowDetail : describeJobFlowsResult.getJobFlows()) { if (jobFlowDetail.getJobFlowId().equals(id)) { executionState = jobFlowDetail.getExecutionStatusDetail().getState(); found = true; } } if (!found) { break; } // logBasic(BaseMessages.getString(PKG, "AmazonElasticMapReduceJobExecutor.RunningPercent", setupPercent, // mapPercent, reducePercent)); logBasic(hadoopJobName + " execution status: " + executionState); try { if (isRunning(executionState)) { Thread.sleep(logIntv * 1000); } } catch (InterruptedException ie) { // Ignore } } if ("FAILED".equalsIgnoreCase(executionState)) { result.setStopped(true); result.setNrErrors(1); result.setResult(false); S3Object outObject = s3Client.getObject(stagingBucketName, id + "/steps/1/stdout"); ByteArrayOutputStream outStream = new ByteArrayOutputStream(); IOUtils.copy(outObject.getObjectContent(), outStream); logError(outStream.toString()); S3Object errorObject = s3Client.getObject(stagingBucketName, id + "/steps/1/stderr"); ByteArrayOutputStream errorStream = new ByteArrayOutputStream(); IOUtils.copy(errorObject.getObjectContent(), errorStream); logError(errorStream.toString()); } } } catch (Exception e) { logError(e.getMessage(), e); } } } catch (Throwable t) { t.printStackTrace(); result.setStopped(true); result.setNrErrors(1); result.setResult(false); logError(t.getMessage(), t); } if (appender != null) { LogWriter.getInstance().removeAppender(appender); appender.close(); ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_LOG, appender.getFile(), parentJob.getJobname(), getName()); result.getResultFiles().put(resultFile.getFile().toString(), resultFile); } return result; }