Example usage for com.amazonaws.services.elasticmapreduce.model JobFlowDetail getJobFlowId

List of usage examples for com.amazonaws.services.elasticmapreduce.model JobFlowDetail getJobFlowId

Introduction

In this page you can find the example usage for com.amazonaws.services.elasticmapreduce.model JobFlowDetail getJobFlowId.

Prototype


public String getJobFlowId() 

Source Link

Document

The job flow identifier.

Usage

From source file:datameer.awstasks.aws.emr.EmrCluster.java

License:Apache License

protected StepDetail getStepDetail(JobFlowDetail flowDetail, String stepName) {
    for (StepDetail stepDetail : flowDetail.getSteps()) {
        if (stepName.equals(stepDetail.getStepConfig().getName())) {
            return stepDetail;
        }/*ww  w .j av  a  2s . c  o  m*/
    }
    throw new IllegalStateException(
            "no step detail with name '" + stepName + "' found in " + flowDetail.getJobFlowId());
}

From source file:datameer.awstasks.aws.emr.EmrCluster.java

License:Apache License

protected int getStepIndex(JobFlowDetail flowDetail, String stepName) {
    for (int i = 0; i < flowDetail.getSteps().size(); i++) {
        if (stepName.equals(flowDetail.getSteps().get(i).getStepConfig().getName())) {
            return i + 1;// starting from 1
        }//www . ja v a  2  s.  c om
    }
    throw new IllegalStateException(
            "no step detail with name '" + stepName + "' found in " + flowDetail.getJobFlowId());
}

From source file:org.huahinframework.emanager.util.JobUtils.java

License:Apache License

public static JSONArray listJobFlow(AmazonElasticMapReduce emr) {
    List<JSONObject> l = new ArrayList<JSONObject>();

    DescribeJobFlowsResult describeJobFlowsResult = emr.describeJobFlows(new DescribeJobFlowsRequest());
    for (JobFlowDetail jobFlowDetail : describeJobFlowsResult.getJobFlows()) {
        JobFlowExecutionStatusDetail executionStatusDetail = jobFlowDetail.getExecutionStatusDetail();
        Map<String, String> m = new HashMap<String, String>();
        m.put(Response.JOB_FLOW, jobFlowDetail.getJobFlowId());
        m.put(Response.STATE, executionStatusDetail.getState());
        m.put(Response.CREATION_DATE, executionStatusDetail.getCreationDateTime().toString());
        m.put(Response.START_DATE, object2String(executionStatusDetail.getStartDateTime(), true));
        m.put(Response.END_DATE, object2String(executionStatusDetail.getEndDateTime(), true));
        l.add(new JSONObject(m));
    }//from  w  ww . j  a  v a2s .  c om

    return new JSONArray(l);
}

From source file:org.huahinframework.emanager.util.JobUtils.java

License:Apache License

public static JSONArray runningsJobFlow(AmazonElasticMapReduce emr) {
    List<JSONObject> l = new ArrayList<JSONObject>();

    DescribeJobFlowsRequest describeJobFlowsRequest = new DescribeJobFlowsRequest()
            .withJobFlowStates(JOB_FLOW_STATUS_STARTING, JOB_FLOW_STATUS_RUNNING, JOB_FLOW_STATUS_WAITING);
    DescribeJobFlowsResult describeJobFlowsResult = emr.describeJobFlows(describeJobFlowsRequest);
    for (JobFlowDetail jobFlowDetail : describeJobFlowsResult.getJobFlows()) {
        JobFlowExecutionStatusDetail executionStatusDetail = jobFlowDetail.getExecutionStatusDetail();
        Map<String, Object> m = new HashMap<String, Object>();
        m.put(Response.JOB_FLOW, jobFlowDetail.getJobFlowId());
        m.put(Response.STATE, executionStatusDetail.getState());
        m.put(Response.CREATION_DATE, executionStatusDetail.getCreationDateTime().toString());
        m.put(Response.START_DATE, object2String(executionStatusDetail.getStartDateTime(), true));
        m.put(Response.END_DATE, object2String(executionStatusDetail.getEndDateTime(), true));

        if (!isEmpty(jobFlowDetail.getSteps())) {
            List<Object> ll = new ArrayList<Object>();
            for (StepDetail sd : jobFlowDetail.getSteps()) {
                Map<String, Object> mm = new HashMap<String, Object>();
                StepConfig sc = sd.getStepConfig();
                StepExecutionStatusDetail sesd = sd.getExecutionStatusDetail();

                mm.put(Response.NAME, sc.getName());
                mm.put(Response.ACTION_ON_FAILURE, sc.getActionOnFailure());
                mm.put(Response.STATE, object2String(sesd.getState(), false));
                mm.put(Response.CREATION_DATE, object2String(sesd.getCreationDateTime(), true));
                mm.put(Response.START_DATE, object2String(sesd.getStartDateTime(), true));
                mm.put(Response.END_DATE, object2String(sesd.getEndDateTime(), true));

                HadoopJarStepConfig hjsc = sc.getHadoopJarStep();
                mm.put(Response.JAR, object2String(hjsc.getJar(), false));
                mm.put(Response.MAIN_CLASS, object2String(hjsc.getMainClass(), false));

                if (!isEmpty(hjsc.getArgs())) {
                    mm.put(Response.ARGS, hjsc.getArgs());
                }//from   w  w  w  . j a v a  2s  . c o  m

                ll.add(mm);
            }
            m.put(Response.STEPS, ll);
        }

        l.add(new JSONObject(m));
    }

    return new JSONArray(l);
}

From source file:org.huahinframework.emanager.util.JobUtils.java

License:Apache License

public static JSONObject getJobFlow(String jobFlow, AmazonElasticMapReduce emr) {
    DescribeJobFlowsRequest describeJobFlowsRequest = new DescribeJobFlowsRequest().withJobFlowIds(jobFlow);
    DescribeJobFlowsResult describeJobFlowsResult = emr.describeJobFlows(describeJobFlowsRequest);
    if (describeJobFlowsResult.getJobFlows().size() != 1) {
        return new JSONObject();
    }//from  w  ww. ja v  a 2s .  c o  m

    JobFlowDetail jobFlowDetail = describeJobFlowsResult.getJobFlows().get(0);
    JobFlowExecutionStatusDetail executionStatusDetail = jobFlowDetail.getExecutionStatusDetail();
    JobFlowInstancesDetail instancesDetail = jobFlowDetail.getInstances();

    Map<String, Object> m = new HashMap<String, Object>();
    m.put(Response.JOB_FLOW, jobFlowDetail.getJobFlowId());
    m.put(Response.STATE, executionStatusDetail.getState());
    m.put(Response.CREATION_DATE, executionStatusDetail.getCreationDateTime().toString());
    m.put(Response.START_DATE, object2String(executionStatusDetail.getStartDateTime(), true));
    m.put(Response.END_DATE, object2String(executionStatusDetail.getEndDateTime(), true));
    m.put(Response.AMI_VERSION, object2String(jobFlowDetail.getAmiVersion(), false));
    m.put(Response.NAME, jobFlowDetail.getName());
    m.put(Response.LOG_URI, object2String(jobFlowDetail.getLogUri(), false));

    if (!isEmpty(jobFlowDetail.getSupportedProducts())) {
        m.put(Response.SUPPORTED_PRODUCTS, jobFlowDetail.getSupportedProducts());
    }

    m.put(Response.EC2_KEY_NAME, object2String(instancesDetail.getEc2KeyName(), false));
    m.put(Response.EC2_SUBNET_ID, object2String(instancesDetail.getEc2SubnetId(), false));
    m.put(Response.HADOOP_VERSION, object2String(instancesDetail.getHadoopVersion(), false));
    m.put(Response.INSTANCE_COUNT, integer2String(instancesDetail.getInstanceCount()));
    m.put(Response.KEEP_JOB_FLOW_ALIVE_WHEN_NO_STEPS,
            object2String(instancesDetail.getKeepJobFlowAliveWhenNoSteps(), true));
    m.put(Response.MASTER_INSTANCE_ID, object2String(instancesDetail.getMasterInstanceId(), false));
    m.put(Response.MASTER_INSTANCE_TYPE, object2String(instancesDetail.getMasterInstanceType(), false));
    m.put(Response.MASTER_PUBLIC_DNS_NAME, object2String(instancesDetail.getMasterPublicDnsName(), false));
    m.put(Response.AVAILABILITY_ZONE,
            object2String(instancesDetail.getPlacement().getAvailabilityZone(), false));
    m.put(Response.SLAVE_INSTANCE_TYPE, object2String(instancesDetail.getSlaveInstanceType(), false));

    if (!isEmpty(jobFlowDetail.getBootstrapActions())) {
        List<Object> l = new ArrayList<Object>();
        for (BootstrapActionDetail bad : jobFlowDetail.getBootstrapActions()) {
            Map<String, Object> mm = new HashMap<String, Object>();

            BootstrapActionConfig bac = bad.getBootstrapActionConfig();
            ScriptBootstrapActionConfig sbac = bac.getScriptBootstrapAction();

            mm.put(Response.NAME, object2String(bac.getName(), false));
            mm.put(Response.PATH, object2String(sbac.getPath(), false));

            if (!isEmpty(sbac.getArgs())) {
                mm.put(Response.ARGS, sbac.getArgs());
            }
            l.add(mm);
        }
        m.put(Response.BOOTSTRAP_ACTIONS, l);
    }

    if (!isEmpty(jobFlowDetail.getSteps())) {
        List<Object> l = new ArrayList<Object>();
        for (StepDetail sd : jobFlowDetail.getSteps()) {
            Map<String, Object> mm = new HashMap<String, Object>();
            StepConfig sc = sd.getStepConfig();
            StepExecutionStatusDetail sesd = sd.getExecutionStatusDetail();

            mm.put(Response.NAME, sc.getName());
            mm.put(Response.ACTION_ON_FAILURE, sc.getActionOnFailure());
            mm.put(Response.STATE, object2String(sesd.getState(), false));
            mm.put(Response.CREATION_DATE, object2String(sesd.getCreationDateTime(), true));
            mm.put(Response.START_DATE, object2String(sesd.getStartDateTime(), true));
            mm.put(Response.END_DATE, object2String(sesd.getEndDateTime(), true));

            HadoopJarStepConfig hjsc = sc.getHadoopJarStep();
            mm.put(Response.JAR, object2String(hjsc.getJar(), false));
            mm.put(Response.MAIN_CLASS, object2String(hjsc.getMainClass(), false));

            if (!isEmpty(hjsc.getArgs())) {
                mm.put(Response.ARGS, hjsc.getArgs());
            }

            l.add(mm);
        }
        m.put(Response.STEPS, l);
    }

    return new JSONObject(m);
}

From source file:org.pentaho.amazon.emr.job.AmazonElasticMapReduceJobExecutor.java

License:Apache License

public Result execute(Result result, int arg1) throws KettleException {
    Log4jFileAppender appender = null;/*from w  w w .j a  v  a 2s . c o m*/
    String logFileName = "pdi-" + this.getName(); //$NON-NLS-1$
    try {
        appender = LogWriter.createFileAppender(logFileName, true, false);
        LogWriter.getInstance().addAppender(appender);
        log.setLogLevel(parentJob.getLogLevel());
    } catch (Exception e) {
        logError(BaseMessages.getString(PKG, "AmazonElasticMapReduceJobExecutor.FailedToOpenLogFile", //$NON-NLS-1$
                logFileName, e.toString()));
        logError(Const.getStackTracker(e));
    }

    try {
        // create/connect aws service
        AmazonElasticMapReduceClient emrClient = new AmazonElasticMapReduceClient(awsCredentials);

        // pull down jar from vfs
        FileObject jarFile = KettleVFS.getFileObject(buildFilename(jarUrl));
        File tmpFile = File.createTempFile("customEMR", "jar");
        tmpFile.deleteOnExit();
        FileOutputStream tmpFileOut = new FileOutputStream(tmpFile);
        IOUtils.copy(jarFile.getContent().getInputStream(), tmpFileOut);
        URL localJarUrl = tmpFile.toURI().toURL();

        // find main class in jar
        String mainClass = getMainClass(localJarUrl);

        // create staging bucket
        AmazonS3 s3Client = new AmazonS3Client(awsCredentials);

        FileSystemOptions opts = new FileSystemOptions();
        DefaultFileSystemConfigBuilder.getInstance().setUserAuthenticator(opts, new StaticUserAuthenticator(
                null, awsCredentials.getAWSAccessKeyId(), awsCredentials.getAWSSecretKey()));
        FileObject stagingDirFileObject = KettleVFS.getFileObject(stagingDir, getVariables(), opts);

        String stagingBucketName = stagingDirFileObject.getName().getBaseName();
        if (!s3Client.doesBucketExist(stagingBucketName)) {
            s3Client.createBucket(stagingBucketName);
        }

        // delete old jar if needed
        try {
            s3Client.deleteObject(stagingBucketName, jarFile.getName().getBaseName());
        } catch (Exception ex) {
            logError(Const.getStackTracker(ex));
        }

        // put jar in s3 staging bucket
        s3Client.putObject(new PutObjectRequest(stagingBucketName, jarFile.getName().getBaseName(), tmpFile));
        // create non-vfs s3 url to jar
        String stagingS3JarUrl = "s3://" + stagingBucketName + "/" + jarFile.getName().getBaseName();
        String stagingS3BucketUrl = "s3://" + stagingBucketName;

        RunJobFlowRequest runJobFlowRequest = null;
        RunJobFlowResult runJobFlowResult = null;
        if (StringUtil.isEmpty(hadoopJobFlowId)) {
            // create EMR job flow
            runJobFlowRequest = createJobFlow(stagingS3BucketUrl, stagingS3JarUrl, mainClass);
            // start EMR job
            runJobFlowResult = emrClient.runJobFlow(runJobFlowRequest);
        } else {
            List<String> jarStepArgs = new ArrayList<String>();
            if (!StringUtil.isEmpty(cmdLineArgs)) {
                StringTokenizer st = new StringTokenizer(cmdLineArgs, " ");
                while (st.hasMoreTokens()) {
                    String token = st.nextToken();
                    logBasic("adding args: " + token);
                    jarStepArgs.add(token);
                }
            }

            HadoopJarStepConfig hadoopJarStep = new HadoopJarStepConfig();
            hadoopJarStep.setJar(stagingS3JarUrl);
            hadoopJarStep.setMainClass(mainClass);
            hadoopJarStep.setArgs(jarStepArgs);

            StepConfig stepConfig = new StepConfig();
            stepConfig.setName("custom jar: " + jarUrl);
            stepConfig.setHadoopJarStep(hadoopJarStep);

            List<StepConfig> steps = new ArrayList<StepConfig>();
            steps.add(stepConfig);

            AddJobFlowStepsRequest addJobFlowStepsRequest = new AddJobFlowStepsRequest();
            addJobFlowStepsRequest.setJobFlowId(hadoopJobFlowId);
            addJobFlowStepsRequest.setSteps(steps);

            emrClient.addJobFlowSteps(addJobFlowStepsRequest);
        }

        String loggingIntervalS = environmentSubstitute(loggingInterval);
        int logIntv = 60;
        try {
            logIntv = Integer.parseInt(loggingIntervalS);
        } catch (NumberFormatException ex) {
            logError("Unable to parse logging interval '" + loggingIntervalS + "' - using " + "default of 60");
        }

        // monitor it / blocking / logging if desired
        if (blocking) {
            try {
                if (log.isBasic()) {

                    String executionState = "RUNNING";

                    List<String> jobFlowIds = new ArrayList<String>();
                    String id = hadoopJobFlowId;
                    if (StringUtil.isEmpty(hadoopJobFlowId)) {
                        id = runJobFlowResult.getJobFlowId();
                        jobFlowIds.add(id);
                    }

                    while (isRunning(executionState)) {
                        DescribeJobFlowsRequest describeJobFlowsRequest = new DescribeJobFlowsRequest();
                        describeJobFlowsRequest.setJobFlowIds(jobFlowIds);

                        DescribeJobFlowsResult describeJobFlowsResult = emrClient
                                .describeJobFlows(describeJobFlowsRequest);
                        boolean found = false;
                        for (JobFlowDetail jobFlowDetail : describeJobFlowsResult.getJobFlows()) {
                            if (jobFlowDetail.getJobFlowId().equals(id)) {
                                executionState = jobFlowDetail.getExecutionStatusDetail().getState();
                                found = true;
                            }
                        }

                        if (!found) {
                            break;
                        }
                        // logBasic(BaseMessages.getString(PKG, "AmazonElasticMapReduceJobExecutor.RunningPercent", setupPercent,
                        // mapPercent, reducePercent));
                        logBasic(hadoopJobName + " execution status: " + executionState);
                        try {
                            if (isRunning(executionState)) {
                                Thread.sleep(logIntv * 1000);
                            }
                        } catch (InterruptedException ie) {
                            // Ignore
                        }
                    }

                    if ("FAILED".equalsIgnoreCase(executionState)) {
                        result.setStopped(true);
                        result.setNrErrors(1);
                        result.setResult(false);

                        S3Object outObject = s3Client.getObject(stagingBucketName, id + "/steps/1/stdout");
                        ByteArrayOutputStream outStream = new ByteArrayOutputStream();
                        IOUtils.copy(outObject.getObjectContent(), outStream);
                        logError(outStream.toString());

                        S3Object errorObject = s3Client.getObject(stagingBucketName, id + "/steps/1/stderr");
                        ByteArrayOutputStream errorStream = new ByteArrayOutputStream();
                        IOUtils.copy(errorObject.getObjectContent(), errorStream);
                        logError(errorStream.toString());
                    }
                }
            } catch (Exception e) {
                logError(e.getMessage(), e);
            }
        }

    } catch (Throwable t) {
        t.printStackTrace();
        result.setStopped(true);
        result.setNrErrors(1);
        result.setResult(false);
        logError(t.getMessage(), t);
    }

    if (appender != null) {
        LogWriter.getInstance().removeAppender(appender);
        appender.close();

        ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_LOG, appender.getFile(),
                parentJob.getJobname(), getName());
        result.getResultFiles().put(resultFile.getFile().toString(), resultFile);
    }

    return result;
}

From source file:org.pentaho.amazon.hive.job.AmazonHiveJobExecutor.java

License:Apache License

/**
 * Executes a Hive job into the AWS Elastic MapReduce service.
 *//*  ww w.  j  a  va2 s .  c  o  m*/
public Result execute(Result result, int arg1) throws KettleException {

    // Setup a log file.
    Log4jFileAppender appender = null;
    String logFileName = "pdi-" + this.getName(); //$NON-NLS-1$
    try {
        appender = LogWriter.createFileAppender(logFileName, true, false);
        LogWriter.getInstance().addAppender(appender);
        log.setLogLevel(parentJob.getLogLevel());
    } catch (Exception e) {
        logError(BaseMessages.getString(PKG, "AmazonElasticMapReduceJobExecutor.FailedToOpenLogFile", //$NON-NLS-1$
                logFileName, e.toString()));
        logError(Const.getStackTracker(e));
    }

    try {
        // Create and connect an AWS service.
        AmazonElasticMapReduceClient emrClient = new AmazonElasticMapReduceClient(awsCredentials);
        AmazonS3 s3Client = new AmazonS3Client(awsCredentials);

        // Get bucket name and S3 URL.
        String stagingBucketName = GetBucketName(stagingDir);
        String stagingS3BucketUrl = "s3://" + stagingBucketName; //$NON-NLS-1$

        // Prepare staging S3 URL for Hive script file.
        String stagingS3qUrl = "";
        if (qUrl.startsWith(S3FileProvider.SCHEME + "://")) { //$NON-NLS-1$

            // If the .q file is in S3, its staging S3 URL is s3://{bucketname}/{path}
            if (qUrl.indexOf("@s3") > 0) { //$NON-NLS-1$
                stagingS3qUrl = S3FileProvider.SCHEME + "://" + qUrl.substring(qUrl.indexOf("@s3") + 4); //$NON-NLS-1$
            } else {
                stagingS3qUrl = qUrl;
            }

        } else {
            // A local filename is given for the Hive script file. It should be copied to the S3 Log Directory.
            // First, check for the correct protocol.
            if (!qUrl.startsWith("file:")) { //$NON-NLS-1$
                if (log.isBasic()) {
                    logBasic(BaseMessages.getString(PKG,
                            "AmazonElasticMapReduceJobExecutor.HiveScriptFilename.Error") + qUrl); //$NON-NLS-1$
                }
            }
            // pull down .q file from VSF
            FileObject qFile = KettleVFS.getFileObject(buildFilename(qUrl));
            File tmpFile = File.createTempFile("customEMR", "q"); //$NON-NLS-1$
            tmpFile.deleteOnExit();
            FileOutputStream tmpFileOut = new FileOutputStream(tmpFile);
            IOUtils.copy(qFile.getContent().getInputStream(), tmpFileOut);
            // Get key name for the script file S3 destination. Key is defined as path name after {bucket}/
            String key = GetKeyFromS3Url(stagingDir);
            if (key == null) {
                key = qFile.getName().getBaseName();
            } else {
                key += "/" + qFile.getName().getBaseName(); //$NON-NLS-1$
            }

            // delete the previous .q file in S3
            try {
                s3Client.deleteObject(stagingBucketName, key);
            } catch (Exception ex) {
                logError(Const.getStackTracker(ex));
            }

            // Put .q file in S3 Log Directory.
            s3Client.putObject(new PutObjectRequest(stagingBucketName, key, tmpFile));
            stagingS3qUrl = stagingS3BucketUrl + "/" + key; //$NON-NLS-1$
        }

        // AWS provides script-runner.jar (in its public bucket), which should be used as a MapReduce jar for Hive EMR
        // job.
        jarUrl = "s3://elasticmapreduce/libs/script-runner/script-runner.jar"; //$NON-NLS-1$

        RunJobFlowRequest runJobFlowRequest = null;
        RunJobFlowResult runJobFlowResult = null;
        if (StringUtil.isEmpty(hadoopJobFlowId)) {
            // create an EMR job flow, start a step to setup Hive and get the job flow ID.
            runJobFlowRequest = createJobFlow();
            runJobFlowResult = emrClient.runJobFlow(runJobFlowRequest);
            hadoopJobFlowId = runJobFlowResult.getJobFlowId();
        }

        // Now EMR job flow is ready to accept a Run Hive Script step.
        // First, prepare a Job Flow ID list.
        List<String> jobFlowIds = new ArrayList<String>();
        jobFlowIds.add(hadoopJobFlowId);

        // Configure a HadoopJarStep.
        String args = "s3://elasticmapreduce/libs/hive/hive-script "
                + "--base-path s3://elasticmapreduce/libs/hive/ --hive-version 0.7 --run-hive-script --args -f "
                + environmentSubstitute(stagingS3qUrl) + " " + environmentSubstitute(cmdLineArgs); //$NON-NLS-1$
        List<StepConfig> steps = ConfigHadoopJarStep(hadoopJobName, jarUrl, args);

        // Add a Run Hive Script step to the existing job flow.
        AddJobFlowStepsRequest addJobFlowStepsRequest = new AddJobFlowStepsRequest();
        addJobFlowStepsRequest.setJobFlowId(hadoopJobFlowId);
        addJobFlowStepsRequest.setSteps(steps);
        emrClient.addJobFlowSteps(addJobFlowStepsRequest);

        // Set a logging interval.
        String loggingIntervalS = environmentSubstitute(loggingInterval);
        int logIntv = 10;
        try {
            logIntv = Integer.parseInt(loggingIntervalS);
        } catch (NumberFormatException ex) {
            logError(BaseMessages.getString(PKG, "AmazonElasticMapReduceJobExecutor.LoggingInterval.Error", //$NON-NLS-1$
                    loggingIntervalS));
        }

        // monitor and log if intended.
        if (blocking) {
            try {
                if (log.isBasic()) {

                    String executionState = "RUNNING"; //$NON-NLS-1$

                    while (isRunning(executionState)) {
                        DescribeJobFlowsRequest describeJobFlowsRequest = new DescribeJobFlowsRequest();
                        describeJobFlowsRequest.setJobFlowIds(jobFlowIds);

                        DescribeJobFlowsResult describeJobFlowsResult = emrClient
                                .describeJobFlows(describeJobFlowsRequest);
                        boolean found = false;
                        for (JobFlowDetail jobFlowDetail : describeJobFlowsResult.getJobFlows()) {
                            if (jobFlowDetail.getJobFlowId().equals(hadoopJobFlowId)) {
                                executionState = jobFlowDetail.getExecutionStatusDetail().getState();
                                found = true;
                            }
                        }

                        if (!found) {
                            break;
                        }
                        logBasic(hadoopJobName + " " + BaseMessages.getString(PKG, //$NON-NLS-1$
                                "AmazonElasticMapReduceJobExecutor.JobFlowExecutionStatus", hadoopJobFlowId)
                                + executionState);

                        if (parentJob.isStopped()) {
                            if (!alive) {
                                TerminateJobFlowsRequest terminateJobFlowsRequest = new TerminateJobFlowsRequest();
                                terminateJobFlowsRequest.withJobFlowIds(hadoopJobFlowId);
                                emrClient.terminateJobFlows(terminateJobFlowsRequest);
                            }
                            break;
                        }

                        try {
                            if (isRunning(executionState)) {
                                Thread.sleep(logIntv * 1000);
                            }
                        } catch (InterruptedException ie) {
                            logError(Const.getStackTracker(ie));
                        }
                    }

                    if ("FAILED".equalsIgnoreCase(executionState)) { //$NON-NLS-1$
                        result.setStopped(true);
                        result.setNrErrors(1);
                        result.setResult(false);

                        S3Object outObject = s3Client.getObject(stagingBucketName,
                                hadoopJobFlowId + "/steps/1/stdout"); //$NON-NLS-1$
                        ByteArrayOutputStream outStream = new ByteArrayOutputStream();
                        IOUtils.copy(outObject.getObjectContent(), outStream);
                        logError(outStream.toString());

                        S3Object errorObject = s3Client.getObject(stagingBucketName,
                                hadoopJobFlowId + "/steps/1/stderr"); //$NON-NLS-1$
                        ByteArrayOutputStream errorStream = new ByteArrayOutputStream();
                        IOUtils.copy(errorObject.getObjectContent(), errorStream);
                        logError(errorStream.toString());
                    }
                }
            } catch (Exception e) {
                logError(e.getMessage(), e);
            }
        }

    } catch (Throwable t) {
        t.printStackTrace();
        result.setStopped(true);
        result.setNrErrors(1);
        result.setResult(false);
        logError(t.getMessage(), t);
    }

    if (appender != null) {
        LogWriter.getInstance().removeAppender(appender);
        appender.close();

        ResultFile resultFile = new ResultFile(ResultFile.FILE_TYPE_LOG, appender.getFile(),
                parentJob.getJobname(), getName());
        result.getResultFiles().put(resultFile.getFile().toString(), resultFile);
    }

    return result;
}