Example usage for org.apache.hadoop.mapreduce Cluster Cluster

List of usage examples for org.apache.hadoop.mapreduce Cluster Cluster

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce Cluster Cluster.

Prototype

public Cluster(Configuration conf) throws IOException 

Source Link

Usage

From source file:com.cloudera.oryx.computation.common.DistributedGenerationRunner.java

License:Open Source License

private static Collection<String> find(String instanceDir) throws IOException, InterruptedException {
    Collection<String> result = Lists.newArrayList();
    // This is where we will see Hadoop config problems first, so log extra info
    Cluster cluster;/*from   www.  j  a va2 s .c  o m*/
    try {
        cluster = new Cluster(OryxConfiguration.get());
    } catch (IOException ioe) {
        log.error("Unable to init the Hadoop cluster. Check that an MR2, not MR1, cluster is configured.");
        throw ioe;
    }
    try {
        JobStatus[] statuses = cluster.getAllJobStatuses();
        if (statuses != null) {
            for (JobStatus jobStatus : statuses) {
                JobStatus.State state = jobStatus.getState();
                if (state == JobStatus.State.RUNNING || state == JobStatus.State.PREP) {
                    Job job = cluster.getJob(jobStatus.getJobID());
                    if (job != null) {
                        String jobName = job.getJobName();
                        log.info("Found running job {}", jobName);
                        if (jobName.startsWith("Oryx-" + instanceDir + '-')) {
                            result.add(jobName);
                        }
                    }
                }
            }
        }
    } finally {
        cluster.close();
    }
    return result;
}

From source file:com.cloudera.oryx.computation.common.JobStep.java

License:Open Source License

private StepStatus determineStatus() throws IOException, InterruptedException {
    JobContext job = getJob();/*from w w  w . j a  v  a  2s . c  o m*/
    if (job == null) {
        return StepStatus.COMPLETED;
    }
    Cluster cluster = new Cluster(getConf());
    try {
        JobID jobID = job.getJobID();
        if (jobID == null) {
            return StepStatus.PENDING;
        }
        Job runningJob = cluster.getJob(jobID);
        if (runningJob == null) {
            return StepStatus.PENDING;
        }
        JobStatus.State state = runningJob.getJobState();
        switch (state) {
        case PREP:
            return StepStatus.PENDING;
        case RUNNING:
            return StepStatus.RUNNING;
        case SUCCEEDED:
            return StepStatus.COMPLETED;
        case FAILED:
            return StepStatus.FAILED;
        case KILLED:
            return StepStatus.CANCELLED;
        }
        throw new IllegalArgumentException("Unknown Hadoop job state " + state);
    } finally {
        cluster.close();
    }
}

From source file:com.cloudera.oryx.computation.common.JobStep.java

License:Open Source License

/**
 * @return three progress values, in [0,1], as a {@code float[]}, representing setup, mapper and reducer progress
 *///from w w w.  j  av  a  2 s .c  o m
private float[] determineProgresses() throws IOException, InterruptedException {
    if (exec == null) {
        return null;
    }
    Cluster cluster = new Cluster(getConf());
    try {
        JobID jobID = getJob().getJobID();
        if (jobID == null) {
            return null;
        }
        Job runningJob = cluster.getJob(jobID);
        if (runningJob == null) {
            return null;
        }

        return new float[] { runningJob.setupProgress(), runningJob.mapProgress(),
                runningJob.reduceProgress() };
    } finally {
        cluster.close();
    }
}

From source file:com.ikanow.aleph2.analytics.hadoop.services.HadoopTechnologyService.java

License:Apache License

@Override
public CompletableFuture<BasicMessageBean> stopAnalyticJob(DataBucketBean analytic_bucket,
        Collection<AnalyticThreadJobBean> jobs, AnalyticThreadJobBean job_to_stop, IAnalyticsContext context) {
    try {//  w w  w.jav  a 2  s .c  o m
        final Cluster cluster = new Cluster(_config.get());
        final String job_name = BucketUtils.getUniqueSignature(analytic_bucket.full_name(),
                Optional.ofNullable(job_to_stop.name()));
        return Arrays.stream(cluster.getAllJobStatuses())
                .filter(job_status -> job_status.getJobName().equals(job_name)).findFirst()
                .map(Lambdas.wrap_u(job_status -> {
                    final Job job = cluster.getJob(job_status.getJobID());
                    job.killJob();
                    return CompletableFuture
                            .completedFuture(ErrorUtils.buildSuccessMessage(this.getClass().getSimpleName(),
                                    "stopAnalyticJob", analytic_bucket.full_name() + ":" + job_to_stop.name()));
                })).get() // (Will throw if not found falling through to catch below)
        ;
    } catch (Throwable t) {
        return CompletableFuture.completedFuture(
                ErrorUtils.buildSuccessMessage(this.getClass().getSimpleName(), "stopAnalyticJob",
                        HadoopErrorUtils.JOB_STOP_ERROR, job_to_stop.name(), analytic_bucket.full_name()));
    }
}

From source file:com.ikanow.aleph2.analytics.hadoop.services.HadoopTechnologyService.java

License:Apache License

@Override
public ManagementFuture<Boolean> checkAnalyticJobProgress(DataBucketBean analytic_bucket,
        Collection<AnalyticThreadJobBean> jobs, AnalyticThreadJobBean job_to_check, IAnalyticsContext context) {

    try {//from w  ww  . j a va 2  s  . c o m
        final Cluster cluster = new Cluster(_config.get());
        final String job_name = BucketUtils.getUniqueSignature(analytic_bucket.full_name(),
                Optional.ofNullable(job_to_check.name()));
        return Arrays.stream(cluster.getAllJobStatuses())
                .filter(job_status -> job_status.getJobName().equals(job_name)).findFirst().map(job_status -> {
                    //TODO (ALEPH-12): create useful info in the side channel beans ... eg if it's an error?
                    // (need to get the job first, then get more info)
                    return FutureUtils.createManagementFuture(
                            CompletableFuture.completedFuture(job_status.isJobComplete()),
                            CompletableFuture.completedFuture(
                                    Arrays.asList(ErrorUtils.buildMessage(true, this.getClass().getSimpleName(),
                                            "checkAnalyticJobProgress", "TBD: more status"))));
                }).get() // (Will throw if not found falling through to catch below)
        ;
    } catch (Throwable t) {
        return FutureUtils.createManagementFuture(CompletableFuture.completedFuture(true));
    }
}

From source file:crunch.MaxTemperature.java

License:Apache License

@Override
    public int run(String[] args) throws Exception {
        if (args.length != 1) {
            JobBuilder.printUsage(this, "<job ID>");
            return -1;
        }/*  ww  w.j  a  va2s.  c  o m*/
        String jobID = args[0];
        // vv NewMissingTemperatureFields
        Cluster cluster = new Cluster(getConf());
        Job job = cluster.getJob(JobID.forName(jobID));
        // ^^ NewMissingTemperatureFields
        if (job == null) {
            System.err.printf("No job with ID %s found.\n", jobID);
            return -1;
        }
        if (!job.isComplete()) {
            System.err.printf("Job %s is not complete.\n", jobID);
            return -1;
        }

        // vv NewMissingTemperatureFields
        Counters counters = job.getCounters();
        long missing = counters.findCounter(MaxTemperatureWithCounters.Temperature.MISSING).getValue();
        long total = counters.findCounter(TaskCounter.MAP_INPUT_RECORDS).getValue();
        // ^^ NewMissingTemperatureFields

        System.out.printf("Records with missing temperature fields: %.2f%%\n", 100.0 * missing / total);
        return 0;
    }

From source file:org.apache.falcon.logging.TaskLogRetrieverYarn.java

License:Apache License

protected Cluster getCluster(Configuration conf) throws IOException {
    return new Cluster(conf);
}

From source file:org.apache.kylin.engine.mr.common.MapReduceExecutable.java

License:Apache License

@Override
protected void onExecuteStart(ExecutableContext executableContext) {
    final Output output = getOutput();
    if (output.getExtra().containsKey(START_TIME)) {
        final String mrJobId = output.getExtra().get(ExecutableConstants.MR_JOB_ID);
        if (mrJobId == null) {
            getManager().updateJobOutput(getId(), ExecutableState.RUNNING, null, null);
            return;
        }//  w ww . j a v a  2  s  .  com
        try {
            Configuration conf = HadoopUtil.getCurrentConfiguration();
            Job job = new Cluster(conf).getJob(JobID.forName(mrJobId));
            if (job == null || job.getJobState() == JobStatus.State.FAILED) {
                //remove previous mr job info
                super.onExecuteStart(executableContext);
            } else {
                getManager().updateJobOutput(getId(), ExecutableState.RUNNING, null, null);
            }
        } catch (IOException e) {
            logger.warn("error get hadoop status");
            super.onExecuteStart(executableContext);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            logger.warn("error get hadoop status");
            super.onExecuteStart(executableContext);
        }
    } else {
        super.onExecuteStart(executableContext);
    }
}

From source file:org.apache.kylin.engine.mr.common.MapReduceExecutable.java

License:Apache License

@Override
protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException {
    final String mapReduceJobClass = getMapReduceJobClass();
    String params = getMapReduceParams();
    Preconditions.checkNotNull(mapReduceJobClass);
    Preconditions.checkNotNull(params);//from w  w w . j av a 2s.co m
    try {
        Job job;
        ExecutableManager mgr = getManager();
        final Map<String, String> extra = mgr.getOutput(getId()).getExtra();
        if (extra.containsKey(ExecutableConstants.MR_JOB_ID)) {
            Configuration conf = HadoopUtil.getCurrentConfiguration();
            job = new Cluster(conf).getJob(JobID.forName(extra.get(ExecutableConstants.MR_JOB_ID)));
            logger.info("mr_job_id:" + extra.get(ExecutableConstants.MR_JOB_ID) + " resumed");
        } else {
            final Constructor<? extends AbstractHadoopJob> constructor = ClassUtil
                    .forName(mapReduceJobClass, AbstractHadoopJob.class).getConstructor();
            final AbstractHadoopJob hadoopJob = constructor.newInstance();
            hadoopJob.setConf(HadoopUtil.getCurrentConfiguration());
            hadoopJob.setAsync(true); // so the ToolRunner.run() returns right away
            logger.info("parameters of the MapReduceExecutable:");
            logger.info(params);
            String[] args = params.trim().split("\\s+");
            try {
                //for async mr job, ToolRunner just return 0;

                // use this method instead of ToolRunner.run() because ToolRunner.run() is not thread-sale
                // Refer to: http://stackoverflow.com/questions/22462665/is-hadoops-toorunner-thread-safe
                MRUtil.runMRJob(hadoopJob, args);

                if (hadoopJob.isSkipped()) {
                    return new ExecuteResult(ExecuteResult.State.SUCCEED, "skipped");
                }
            } catch (Exception ex) {
                StringBuilder log = new StringBuilder();
                logger.error("error execute " + this.toString(), ex);
                StringWriter stringWriter = new StringWriter();
                ex.printStackTrace(new PrintWriter(stringWriter));
                log.append(stringWriter.toString()).append("\n");
                log.append("result code:").append(2);
                return new ExecuteResult(ExecuteResult.State.ERROR, log.toString());
            }
            job = hadoopJob.getJob();
        }
        final StringBuilder output = new StringBuilder();
        final HadoopCmdOutput hadoopCmdOutput = new HadoopCmdOutput(job, output);

        //            final String restStatusCheckUrl = getRestStatusCheckUrl(job, context.getConfig());
        //            if (restStatusCheckUrl == null) {
        //                logger.error("restStatusCheckUrl is null");
        //                return new ExecuteResult(ExecuteResult.State.ERROR, "restStatusCheckUrl is null");
        //            }
        //            String mrJobId = hadoopCmdOutput.getMrJobId();
        //            boolean useKerberosAuth = context.getConfig().isGetJobStatusWithKerberos();
        //            HadoopStatusChecker statusChecker = new HadoopStatusChecker(restStatusCheckUrl, mrJobId, output, useKerberosAuth);
        JobStepStatusEnum status = JobStepStatusEnum.NEW;
        while (!isDiscarded() && !isPaused()) {

            JobStepStatusEnum newStatus = HadoopJobStatusChecker.checkStatus(job, output);
            if (status == JobStepStatusEnum.KILLED) {
                mgr.updateJobOutput(getId(), ExecutableState.ERROR, hadoopCmdOutput.getInfo(),
                        "killed by admin");
                return new ExecuteResult(ExecuteResult.State.FAILED, "killed by admin");
            }
            if (status == JobStepStatusEnum.WAITING && (newStatus == JobStepStatusEnum.FINISHED
                    || newStatus == JobStepStatusEnum.ERROR || newStatus == JobStepStatusEnum.RUNNING)) {
                final long waitTime = System.currentTimeMillis() - getStartTime();
                setMapReduceWaitTime(waitTime);
            }
            mgr.addJobInfo(getId(), hadoopCmdOutput.getInfo());
            status = newStatus;
            if (status.isComplete()) {
                final Map<String, String> info = hadoopCmdOutput.getInfo();
                readCounters(hadoopCmdOutput, info);
                mgr.addJobInfo(getId(), info);

                if (status == JobStepStatusEnum.FINISHED) {
                    return new ExecuteResult(ExecuteResult.State.SUCCEED, output.toString());
                } else {
                    return new ExecuteResult(ExecuteResult.State.FAILED, output.toString());
                }
            }
            Thread.sleep(context.getConfig().getYarnStatusCheckIntervalSeconds() * 1000L);
        }

        // try to kill running map-reduce job to release resources.
        if (job != null) {
            try {
                job.killJob();
            } catch (Exception e) {
                logger.warn("failed to kill hadoop job: " + job.getJobID(), e);
            }
        }

        if (isDiscarded()) {
            return new ExecuteResult(ExecuteResult.State.DISCARDED, output.toString());
        } else {
            return new ExecuteResult(ExecuteResult.State.STOPPED, output.toString());
        }

    } catch (ReflectiveOperationException e) {
        logger.error("error getMapReduceJobClass, class name:" + getParam(KEY_MR_JOB), e);
        return new ExecuteResult(ExecuteResult.State.ERROR, e.getLocalizedMessage());
    } catch (Exception e) {
        logger.error("error execute " + this.toString(), e);
        return new ExecuteResult(ExecuteResult.State.ERROR, e.getLocalizedMessage());
    }
}

From source file:org.apache.kylin.job.common.MapReduceExecutable.java

License:Apache License

@Override
protected void onExecuteStart(ExecutableContext executableContext) {
    final Output output = executableManager.getOutput(getId());
    if (output.getExtra().containsKey(START_TIME)) {
        final String mrJobId = output.getExtra().get(ExecutableConstants.MR_JOB_ID);
        if (mrJobId == null) {
            executableManager.updateJobOutput(getId(), ExecutableState.RUNNING, null, null);
            return;
        }/*from w w  w . java 2  s. c om*/
        try {
            Job job = new Cluster(new Configuration()).getJob(JobID.forName(mrJobId));
            if (job.getJobState() == JobStatus.State.FAILED) {
                //remove previous mr job info
                super.onExecuteStart(executableContext);
            } else {
                executableManager.updateJobOutput(getId(), ExecutableState.RUNNING, null, null);
            }
        } catch (IOException e) {
            logger.warn("error get hadoop status");
            super.onExecuteStart(executableContext);
        } catch (InterruptedException e) {
            logger.warn("error get hadoop status");
            super.onExecuteStart(executableContext);
        }
    } else {
        super.onExecuteStart(executableContext);
    }
}