Example usage for org.apache.hadoop.mapreduce JobStatus getState

List of usage examples for org.apache.hadoop.mapreduce JobStatus getState

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce JobStatus getState.

Prototype

public synchronized State getState() 

Source Link

Usage

From source file:com.cloudera.oryx.computation.common.DistributedGenerationRunner.java

License:Open Source License

private static Collection<String> find(String instanceDir) throws IOException, InterruptedException {
    Collection<String> result = Lists.newArrayList();
    // This is where we will see Hadoop config problems first, so log extra info
    Cluster cluster;//from   w  w w .java 2  s  . co  m
    try {
        cluster = new Cluster(OryxConfiguration.get());
    } catch (IOException ioe) {
        log.error("Unable to init the Hadoop cluster. Check that an MR2, not MR1, cluster is configured.");
        throw ioe;
    }
    try {
        JobStatus[] statuses = cluster.getAllJobStatuses();
        if (statuses != null) {
            for (JobStatus jobStatus : statuses) {
                JobStatus.State state = jobStatus.getState();
                if (state == JobStatus.State.RUNNING || state == JobStatus.State.PREP) {
                    Job job = cluster.getJob(jobStatus.getJobID());
                    if (job != null) {
                        String jobName = job.getJobName();
                        log.info("Found running job {}", jobName);
                        if (jobName.startsWith("Oryx-" + instanceDir + '-')) {
                            result.add(jobName);
                        }
                    }
                }
            }
        }
    } finally {
        cluster.close();
    }
    return result;
}

From source file:com.kylinolap.job.tools.HadoopStatusChecker.java

License:Apache License

public JobStepStatusEnum calculateStatus(JobStatus jobStatus) {
    JobStepStatusEnum status;// w ww.  j  a  va2s  . c  o m
    switch (jobStatus.getState()) {
    case RUNNING:
        status = JobStepStatusEnum.RUNNING;
        break;
    case SUCCEEDED:
        status = JobStepStatusEnum.FINISHED;
        break;
    case FAILED:
        status = JobStepStatusEnum.ERROR;
        break;
    case PREP:
        status = JobStepStatusEnum.WAITING;
        break;
    case KILLED:
        status = JobStepStatusEnum.ERROR;
        break;
    default:
        status = JobStepStatusEnum.ERROR;
    }

    return status;
}

From source file:com.linkedin.thirdeye.hadoop.ThirdEyeJob.java

License:Apache License

@SuppressWarnings("unchecked")
public void run() throws Exception {
    LOGGER.info("Input config:{}", inputConfig);
    PhaseSpec phaseSpec;/*  ww w.ja v a  2s  .c o m*/
    try {
        phaseSpec = PhaseSpec.valueOf(phaseName.toUpperCase());
    } catch (Exception e) {
        usage();
        throw e;
    }

    if (PhaseSpec.TRANSFORM.equals(phaseSpec)) {
        TransformPhaseJob job = new TransformPhaseJob("Transform Job", inputConfig);
        job.run();
        return;

    } else if (PhaseSpec.JOIN.equals(phaseSpec)) {
        JoinPhaseJob job = new JoinPhaseJob("Join Job", inputConfig);
        job.run();
        return;

    } else if (PhaseSpec.WAIT.equals(phaseSpec)) {
        WaitPhaseJob job = new WaitPhaseJob("Wait for inputs", inputConfig);
        job.run();
        return;
    }

    // Get root, collection, input paths
    String root = getAndCheck(ThirdEyeJobProperties.THIRDEYE_ROOT.getName(), inputConfig);
    String collection = getAndCheck(ThirdEyeJobProperties.THIRDEYE_COLLECTION.getName(), inputConfig);
    String inputPaths = getAndCheck(ThirdEyeJobProperties.INPUT_PATHS.getName(), inputConfig);

    // Get min / max time
    DateTime minTime;
    DateTime maxTime;

    String minTimeProp = inputConfig.getProperty(ThirdEyeJobProperties.THIRDEYE_TIME_MIN.getName());
    String maxTimeProp = inputConfig.getProperty(ThirdEyeJobProperties.THIRDEYE_TIME_MAX.getName());

    minTime = ISODateTimeFormat.dateTimeParser().parseDateTime(minTimeProp);
    maxTime = ISODateTimeFormat.dateTimeParser().parseDateTime(maxTimeProp);

    Properties jobProperties = phaseSpec.getJobProperties(inputConfig, root, collection, minTime, maxTime,
            inputPaths);
    for (Object key : inputConfig.keySet()) {
        jobProperties.setProperty(key.toString(), inputConfig.getProperty(key.toString()));
    }

    // Instantiate the job
    Constructor<Configured> constructor = (Constructor<Configured>) phaseSpec.getKlazz()
            .getConstructor(String.class, Properties.class);
    Configured instance = constructor.newInstance(phaseSpec.getName(), jobProperties);
    setMapreduceConfig(instance.getConf());

    // Run the job
    Method runMethod = instance.getClass().getMethod("run");
    Job job = (Job) runMethod.invoke(instance);
    if (job != null) {
        JobStatus status = job.getStatus();
        if (status.getState() != JobStatus.State.SUCCEEDED) {
            throw new RuntimeException(
                    "Job " + job.getJobName() + " failed to execute: Ran with config:" + jobProperties);
        }
    }
}

From source file:org.apache.ignite.client.hadoop.GridHadoopClientProtocolSelfTest.java

License:Apache License

/**
 * Check job status.//  ww w  . j  a va2s .co  m
 *
 * @param status Job status.
 * @param expJobId Expected job ID.
 * @param expJobName Expected job name.
 * @param expState Expected state.
 * @param expCleanupProgress Expected cleanup progress.
 * @throws Exception If failed.
 */
private static void checkJobStatus(JobStatus status, JobID expJobId, String expJobName,
        JobStatus.State expState, float expCleanupProgress) throws Exception {
    assert F.eq(status.getJobID(), expJobId) : "Expected=" + expJobId + ", actual=" + status.getJobID();
    assert F.eq(status.getJobName(), expJobName) : "Expected=" + expJobName + ", actual=" + status.getJobName();
    assert F.eq(status.getState(), expState) : "Expected=" + expState + ", actual=" + status.getState();
    assert F.eq(status.getCleanupProgress(), expCleanupProgress) : "Expected=" + expCleanupProgress
            + ", actual=" + status.getCleanupProgress();
}

From source file:org.apache.tez.mapreduce.client.YARNRunner.java

License:Apache License

@Override
public void killJob(JobID arg0) throws IOException, InterruptedException {
    /* check if the status is not running, if not send kill to RM */
    JobStatus status = getJobStatus(arg0);
    if (status.getState() == JobStatus.State.RUNNING || status.getState() == JobStatus.State.PREP) {
        try {/*from   www .j  a  va 2s. com*/
            resMgrDelegate.killApplication(TypeConverter.toYarn(arg0).getAppId());
        } catch (YarnException e) {
            throw new IOException(e);
        }
        return;
    }
}

From source file:org.huahinframework.manager.util.JobUtils.java

License:Apache License

/**
 * @param state//from   w w  w.j av a  2  s  .co m
 * @param conf
 * @return {@link List} of {@link JSONObject}
 * @throws IOException
 * @throws InterruptedException
 */
public static List<JSONObject> listJob(State state, JobConf conf) throws IOException, InterruptedException {
    List<JSONObject> l = new ArrayList<JSONObject>();

    Cluster cluster = new Cluster(conf);
    for (JobStatus jobStatus : cluster.getAllJobStatuses()) {
        jobStatus.getState();
        if (state == null || state == jobStatus.getState()) {
            Map<String, Object> m = getJob(jobStatus);
            if (m != null) {
                l.add(new JSONObject(m));
            }
        }
    }

    return l;
}

From source file:org.huahinframework.manager.util.JobUtils.java

License:Apache License

/**
 * @param jobStatus//from w w w.j a v  a 2  s.  c o  m
 * @return JSON map
 * @throws IOException
 */
public static Map<String, Object> getJob(JobStatus jobStatus) throws IOException {
    int numUsedSlots = jobStatus.getNumUsedSlots();
    int numReservedSlots = jobStatus.getNumReservedSlots();
    int usedMem = jobStatus.getUsedMem();
    int rsvdMem = jobStatus.getReservedMem();
    int neededMem = jobStatus.getNeededMem();

    Map<String, Object> m = new HashMap<String, Object>();
    m.put(Response.JOBID, jobStatus.getJobID().toString());
    m.put(Response.NAME, jobStatus.getJobName());
    m.put(Response.STATE, jobStatus.getState());

    Calendar startTime = Calendar.getInstance();
    startTime.setTimeInMillis(jobStatus.getStartTime());
    m.put(Response.START_TIME, startTime.getTime().toString());

    m.put(Response.USER, jobStatus.getUsername());
    m.put(Response.QUEUE, jobStatus.getQueue());
    m.put(Response.PRIORITY, jobStatus.getPriority().name());
    m.put(Response.USED_CONTAINERS, numUsedSlots < 0 ? UNAVAILABLE : numUsedSlots);
    m.put(Response.RSVD_CONTAINERS, numReservedSlots < 0 ? UNAVAILABLE : numReservedSlots);
    m.put(Response.USED_MEM, usedMem < 0 ? UNAVAILABLE : String.format(memPattern, usedMem));
    m.put(Response.RSVD_MEM, rsvdMem < 0 ? UNAVAILABLE : String.format(memPattern, rsvdMem));
    m.put(Response.NEEDED_MEM, neededMem < 0 ? UNAVAILABLE : String.format(memPattern, neededMem));
    m.put(Response.AM_INFO, jobStatus.getSchedulingInfo());
    return m;
}

From source file:org.janusgraph.hadoop.compat.h2.Hadoop2Compat.java

License:Apache License

@Override
public String getJobFailureString(Job j) {
    try {/*from w w w  .ja va  2  s .  co  m*/
        JobStatus js = j.getStatus();
        return String.format("state=%s, failureinfo=%s", js.getState(), js.getFailureInfo());
    } catch (IOException | InterruptedException e) {
        throw new JanusGraphException(e);
    }
}