Example usage for com.amazonaws.services.elasticmapreduce.model ActionOnFailure TERMINATE_JOB_FLOW

List of usage examples for com.amazonaws.services.elasticmapreduce.model ActionOnFailure TERMINATE_JOB_FLOW

Introduction

In this page you can find the example usage for com.amazonaws.services.elasticmapreduce.model ActionOnFailure TERMINATE_JOB_FLOW.

Prototype

ActionOnFailure TERMINATE_JOB_FLOW

To view the source code for com.amazonaws.services.elasticmapreduce.model ActionOnFailure TERMINATE_JOB_FLOW.

Click Source Link

Usage

From source file:com.aegeus.aws.ElasticMapReduceService.java

License:Apache License

public void addSteps() {
    StepConfig parseStep = new StepConfig().withName("Parse logs")
            .withActionOnFailure(ActionOnFailure.TERMINATE_JOB_FLOW);

    StepConfig persistStep = new StepConfig().withName("Persist layer")
            .withActionOnFailure(ActionOnFailure.TERMINATE_JOB_FLOW);

    AddJobFlowStepsRequest request = new AddJobFlowStepsRequest().withJobFlowId(clusterId).withSteps(parseStep,
            persistStep);/*from  w  w w  . j ava 2s  . c  o  m*/

    stepIds = emr.addJobFlowSteps(request).getStepIds();
}

From source file:org.deeplearning4j.legacyExamples.EmrSparkExample.java

License:Apache License

public void entryPoint(String[] args) {
    JCommander jcmdr = new JCommander(this);
    try {//from   w  w w. ja v  a 2 s .com
        jcmdr.parse(args);
    } catch (ParameterException e) {
        jcmdr.usage();
        try {
            Thread.sleep(500);
        } catch (Exception e2) {
        }
        throw e;
    }

    AmazonElasticMapReduceClientBuilder builder = AmazonElasticMapReduceClientBuilder.standard();
    builder.withRegion(region);
    builder.withCredentials(getCredentialsProvider());

    AmazonElasticMapReduce emr = builder.build();

    List<StepConfig> steps = new ArrayList<>();

    if (upload) {
        log.info("uploading uber jar");

        AmazonS3ClientBuilder s3builder = AmazonS3ClientBuilder.standard();
        s3builder.withRegion(region);
        s3builder.withCredentials(getCredentialsProvider());
        AmazonS3 s3Client = s3builder.build();

        if (!s3Client.doesBucketExist(bucketName)) {
            s3Client.createBucket(bucketName);
        }

        File uberJarFile = new File(uberJar);

        s3Client.putObject(new PutObjectRequest(bucketName, uberJarFile.getName(), uberJarFile));
    }

    if (debug) {
        log.info("enable debug");

        StepFactory stepFactory = new StepFactory(builder.getRegion() + ".elasticmapreduce");
        StepConfig enableDebugging = new StepConfig().withName("Enable Debugging")
                .withActionOnFailure(ActionOnFailure.TERMINATE_JOB_FLOW)
                .withHadoopJarStep(stepFactory.newEnableDebuggingStep());
        steps.add(enableDebugging);
    }

    if (execute) {
        log.info("execute spark step");

        HadoopJarStepConfig sparkStepConf = new HadoopJarStepConfig();
        sparkStepConf.withJar("command-runner.jar");
        sparkStepConf.withArgs("spark-submit", "--deploy-mode", "cluster", "--class", className,
                getS3UberJarUrl(), "-useSparkLocal", "false");

        ActionOnFailure action = ActionOnFailure.TERMINATE_JOB_FLOW;

        if (keepAlive) {
            action = ActionOnFailure.CONTINUE;
        }

        StepConfig sparkStep = new StepConfig().withName("Spark Step").withActionOnFailure(action)
                .withHadoopJarStep(sparkStepConf);
        steps.add(sparkStep);
    }

    log.info("create spark cluster");

    Application sparkApp = new Application().withName("Spark");

    // service and job flow role will be created automatically when
    // launching cluster in aws console, better do that first or create
    // manually

    RunJobFlowRequest request = new RunJobFlowRequest().withName("Spark Cluster").withSteps(steps)
            .withServiceRole("EMR_DefaultRole").withJobFlowRole("EMR_EC2_DefaultRole")
            .withApplications(sparkApp).withReleaseLabel(emrVersion).withLogUri(getS3BucketLogsUrl())
            .withInstances(new JobFlowInstancesConfig().withEc2KeyName("spark").withInstanceCount(instanceCount)
                    .withKeepJobFlowAliveWhenNoSteps(keepAlive).withMasterInstanceType(instanceType)
                    .withSlaveInstanceType(instanceType));

    RunJobFlowResult result = emr.runJobFlow(request);

    log.info(result.toString());

    log.info("done");
}

From source file:org.finra.dm.dao.impl.EmrDaoImpl.java

License:Apache License

/**
 * Create the step config list of objects for hive/pig installation.
 *
 * @param emrClusterDefinition the EMR definition name value.
 *
 * @return list of step configuration that contains all the steps for the given configuration.
 *//* w ww  .  j av  a2 s .  com*/
private List<StepConfig> getStepConfig(EmrClusterDefinition emrClusterDefinition) {
    StepFactory stepFactory = new StepFactory();
    List<StepConfig> appSteps = new ArrayList<>();

    String hadoopJarForShellScript = configurationHelper.getProperty(ConfigurationValue.EMR_SHELL_SCRIPT_JAR);

    // Add step to copy DM oozie wrapper workflow to HDFS.
    String wrapperWorkflowS3Location = getS3LocationForConfiguration(
            emrHelper.getEmrOozieDmWorkflowS3LocationConfiguration());

    String wrapperWorkflowHdfsLocation = configurationHelper
            .getProperty(ConfigurationValue.EMR_OOZIE_DM_WRAPPER_WORKFLOW_HDFS_LOCATION);

    List<String> s3ToHdfsCopyScriptArgsList = new ArrayList<>();

    s3ToHdfsCopyScriptArgsList.add(wrapperWorkflowS3Location + emrHelper.getS3HdfsCopyScriptName());

    // 1. Source S3 location
    // 2. Target HDFS location.
    // 3. Temp folder to use on local node.
    s3ToHdfsCopyScriptArgsList.add(wrapperWorkflowS3Location);
    s3ToHdfsCopyScriptArgsList.add(wrapperWorkflowHdfsLocation);
    s3ToHdfsCopyScriptArgsList.add(UUID.randomUUID().toString());

    HadoopJarStepConfig copyWrapperJarConfig = new HadoopJarStepConfig(hadoopJarForShellScript)
            .withArgs(s3ToHdfsCopyScriptArgsList);
    appSteps.add(new StepConfig().withName("Copy DM oozie wrapper").withHadoopJarStep(copyWrapperJarConfig));

    // Create install hive step and add to the StepConfig list
    if (StringUtils.isNotBlank(emrClusterDefinition.getHiveVersion())) {
        StepConfig installHive = new StepConfig().withName("Hive " + emrClusterDefinition.getHiveVersion())
                .withActionOnFailure(ActionOnFailure.TERMINATE_JOB_FLOW)
                .withHadoopJarStep(stepFactory.newInstallHiveStep(emrClusterDefinition.getHiveVersion()));
        appSteps.add(installHive);
    }

    // Create install Pig step and add to the StepConfig List
    if (StringUtils.isNotBlank(emrClusterDefinition.getPigVersion())) {
        StepConfig installPig = new StepConfig().withName("Pig " + emrClusterDefinition.getPigVersion())
                .withActionOnFailure(ActionOnFailure.TERMINATE_JOB_FLOW)
                .withHadoopJarStep(stepFactory.newInstallPigStep(emrClusterDefinition.getPigVersion()));
        appSteps.add(installPig);
    }

    // Add Oozie support if needed
    if (emrClusterDefinition.isInstallOozie() != null && emrClusterDefinition.isInstallOozie()) {
        String oozieShellArg = getS3StagingLocation()
                + configurationHelper.getProperty(ConfigurationValue.S3_URL_PATH_DELIMITER)
                + configurationHelper.getProperty(ConfigurationValue.EMR_OOZIE_TAR_FILE);

        List<String> argsList = new ArrayList<>();
        argsList.add(getOozieScriptLocation());
        argsList.add(oozieShellArg);

        HadoopJarStepConfig jarConfig = new HadoopJarStepConfig(hadoopJarForShellScript).withArgs(argsList);
        appSteps.add(new StepConfig().withName("Oozie").withHadoopJarStep(jarConfig));
    }

    // Add the hadoop jar steps that need to be added.
    if (!CollectionUtils.isEmpty(emrClusterDefinition.getHadoopJarSteps())) {
        for (HadoopJarStep hadoopJarStep : emrClusterDefinition.getHadoopJarSteps()) {
            StepConfig stepConfig = emrHelper.getEmrHadoopJarStepConfig(hadoopJarStep.getStepName(),
                    hadoopJarStep.getJarLocation(), hadoopJarStep.getMainClass(),
                    hadoopJarStep.getScriptArguments(), hadoopJarStep.isContinueOnError());

            appSteps.add(stepConfig);
        }
    }

    return appSteps;
}

From source file:org.finra.herd.dao.impl.EmrDaoImpl.java

License:Apache License

/**
 * Create the step config list of objects for hive/pig installation.
 *
 * @param emrClusterDefinition the EMR definition name value.
 *
 * @return list of step configuration that contains all the steps for the given configuration.
 *//* w  ww .  ja v  a  2s  . c o m*/
private List<StepConfig> getStepConfig(EmrClusterDefinition emrClusterDefinition) {
    StepFactory stepFactory = new StepFactory();
    List<StepConfig> appSteps = new ArrayList<>();

    // Create install hive step and add to the StepConfig list
    if (StringUtils.isNotBlank(emrClusterDefinition.getHiveVersion())) {
        StepConfig installHive = new StepConfig().withName("Hive " + emrClusterDefinition.getHiveVersion())
                .withActionOnFailure(ActionOnFailure.TERMINATE_JOB_FLOW)
                .withHadoopJarStep(stepFactory.newInstallHiveStep(emrClusterDefinition.getHiveVersion()));
        appSteps.add(installHive);
    }

    // Create install Pig step and add to the StepConfig List
    if (StringUtils.isNotBlank(emrClusterDefinition.getPigVersion())) {
        StepConfig installPig = new StepConfig().withName("Pig " + emrClusterDefinition.getPigVersion())
                .withActionOnFailure(ActionOnFailure.TERMINATE_JOB_FLOW)
                .withHadoopJarStep(stepFactory.newInstallPigStep(emrClusterDefinition.getPigVersion()));
        appSteps.add(installPig);
    }

    // Add the hadoop jar steps that need to be added.
    if (!CollectionUtils.isEmpty(emrClusterDefinition.getHadoopJarSteps())) {
        for (HadoopJarStep hadoopJarStep : emrClusterDefinition.getHadoopJarSteps()) {
            StepConfig stepConfig = emrHelper.getEmrHadoopJarStepConfig(hadoopJarStep.getStepName(),
                    hadoopJarStep.getJarLocation(), hadoopJarStep.getMainClass(),
                    hadoopJarStep.getScriptArguments(), hadoopJarStep.isContinueOnError());

            appSteps.add(stepConfig);
        }
    }

    return appSteps;
}

From source file:org.huahinframework.emanager.amazonaws.elasticmapreduce.HiveStepConfig.java

License:Apache License

@Override
public StepConfig[] createStepConfig(Config config) {
    StepConfig installHive = null;//w  ww. j a va 2s  .co  m
    if (!installed) {
        installHive = new StepConfig().withName("Install Hive")
                .withActionOnFailure(ActionOnFailure.TERMINATE_JOB_FLOW)
                .withHadoopJarStep(stepFactory.newInstallHiveStep());
        installed = true;
    }

    HadoopJarStepConfig hadoopJarStepConfig = null;
    if (config.getArgs() != null) {
        hadoopJarStepConfig = stepFactory.newRunHiveScriptStep(config.getRun(), config.getArgs());
    } else {
        hadoopJarStepConfig = stepFactory.newRunHiveScriptStep(config.getRun());
    }

    StepConfig stepConfig = new StepConfig().withName(config.getName()).withActionOnFailure(ACTION_ON_FAILURE)
            .withHadoopJarStep(hadoopJarStepConfig);

    StepConfig[] stepConfigs = null;
    if (installHive != null) {
        stepConfigs = new StepConfig[2];
        stepConfigs[0] = installHive;
        stepConfigs[1] = stepConfig;
    } else {
        stepConfigs = new StepConfig[1];
        stepConfigs[0] = stepConfig;
    }

    return stepConfigs;
}

From source file:org.huahinframework.emanager.amazonaws.elasticmapreduce.PigStepConfig.java

License:Apache License

@Override
public StepConfig[] createStepConfig(Config config) {
    StepConfig installPig = null;/*from  ww w. j ava  2s  .c  o  m*/
    if (!installed) {
        installPig = new StepConfig().withName("Install Pig")
                .withActionOnFailure(ActionOnFailure.TERMINATE_JOB_FLOW)
                .withHadoopJarStep(stepFactory.newInstallPigStep());
        installed = true;
    }

    HadoopJarStepConfig hadoopJarStepConfig = null;
    if (config.getArgs() != null) {
        hadoopJarStepConfig = stepFactory.newRunPigScriptStep(config.getRun(), config.getArgs());
    } else {
        hadoopJarStepConfig = stepFactory.newRunPigScriptStep(config.getRun());
    }

    StepConfig stepConfig = new StepConfig().withName(config.getName()).withActionOnFailure(ACTION_ON_FAILURE)
            .withHadoopJarStep(hadoopJarStepConfig);

    StepConfig[] stepConfigs = null;
    if (installPig != null) {
        stepConfigs = new StepConfig[2];
        stepConfigs[0] = installPig;
        stepConfigs[1] = stepConfig;
    } else {
        stepConfigs = new StepConfig[1];
        stepConfigs[0] = stepConfig;
    }

    return stepConfigs;
}

From source file:org.pentaho.amazon.client.impl.EmrClientImpl.java

License:Apache License

private StepConfig configureHiveStep(String stagingS3qUrl, String cmdLineArgs) {

    String[] cmdLineArgsArr;//from   w w w  .  java  2s. c om
    if (cmdLineArgs == null) {
        cmdLineArgsArr = new String[] { "" };
    } else {
        List<String> cmdArgs = Arrays.asList(cmdLineArgs.split("\\s+"));
        List<String> updatedCmdArgs = cmdArgs.stream().map(e -> replaceDoubleS3(e))
                .collect(Collectors.toList());
        cmdLineArgsArr = updatedCmdArgs.toArray(new String[updatedCmdArgs.size()]);
    }

    StepConfig hiveStepConfig = new StepConfig("Hive",
            new StepFactory().newRunHiveScriptStep(stagingS3qUrl, cmdLineArgsArr));
    if (alive) {
        hiveStepConfig.withActionOnFailure(ActionOnFailure.CANCEL_AND_WAIT);
    } else {
        hiveStepConfig.withActionOnFailure(ActionOnFailure.TERMINATE_JOB_FLOW);
    }
    return hiveStepConfig;
}

From source file:org.pentaho.amazon.client.impl.EmrClientImpl.java

License:Apache License

private StepConfig initHadoopStep(String jarUrl, String mainClass, List<String> jarStepArgs) {
    StepConfig stepConfig = new StepConfig();
    stepConfig.setName("custom jar: " + jarUrl);

    stepConfig.setHadoopJarStep(configureHadoopStep(jarUrl, mainClass, jarStepArgs));
    if (this.alive) {
        stepConfig.withActionOnFailure(ActionOnFailure.CANCEL_AND_WAIT);
    } else {//ww  w .  ja va2s  .co  m
        stepConfig.withActionOnFailure(ActionOnFailure.TERMINATE_JOB_FLOW);
    }
    return stepConfig;
}