Example usage for org.apache.hadoop.mapred JobID forName

List of usage examples for org.apache.hadoop.mapred JobID forName

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred JobID forName.

Prototype

public static JobID forName(String str) throws IllegalArgumentException 

Source Link

Document

Construct a JobId object from given string

Usage

From source file:boa.io.BoaOutputCommitter.java

License:Apache License

@Override
public void abortJob(JobContext context, JobStatus.State runState) throws java.io.IOException {
    super.abortJob(context, runState);

    final JobClient jobClient = new JobClient(new JobConf(context.getConfiguration()));
    final RunningJob job = jobClient.getJob(
            (org.apache.hadoop.mapred.JobID) JobID.forName(context.getConfiguration().get("mapred.job.id")));
    String diag = "";
    for (final TaskCompletionEvent event : job.getTaskCompletionEvents(0))
        switch (event.getTaskStatus()) {
        case SUCCEEDED:
            break;
        case FAILED:
        case KILLED:
        case OBSOLETE:
        case TIPFAILED:
            diag += "Diagnostics for: " + event.getTaskTrackerHttp() + "\n";
            for (final String s : job.getTaskDiagnostics(event.getTaskAttemptId()))
                diag += s + "\n";
            diag += "\n";
            break;
        }/*from w  ww .  j a va 2  s  .  c  o m*/
    updateStatus(diag, context.getConfiguration().getInt("boa.hadoop.jobid", 0));
}

From source file:com.jackbe.mapreduce.LocalJobManager.java

License:Open Source License

@Override
public float getJobProgress(String id) {
    RunningJob job = null;/*w ww  .  j a  v a 2s .  com*/
    try {
        job = jobClient.getJob(JobID.forName(id));
    } catch (Exception e) {
        log.error("Exception get JobID for job: " + id + " :" + e, e);
        return 0.0f;
    }
    return getJobProgress(job);
}

From source file:com.jackbe.mapreduce.LocalJobManager.java

License:Open Source License

@Override
public String getJobState(String id) throws IOException {
    if (log.isTraceEnabled())
        log.trace("called.");

    RunningJob job = null;//  ww  w  . ja v a2  s .  com
    try {
        job = jobClient.getJob(JobID.forName(id));
    } catch (Exception e) {
        log.error("Exception get JobID for job: " + id + " :" + e, e);
        return JOB_NOT_FOUND;
    }
    return getJobState(job);
}

From source file:com.linkedin.cubert.utils.ScriptStats.java

License:Open Source License

public org.apache.hadoop.mapred.Counters getCounters(String jobid) throws IOException {
    final JobID jobID = JobID.forName(jobid);
    RunningJob runningJob = jobClient.getJob(jobID);
    return runningJob == null ? null : runningJob.getCounters();
}

From source file:com.mellanox.hadoop.mapred.UdaPluginSH.java

License:Apache License

static IndexRecordBridge getPathIndex(String jobIDStr, String mapId, int reduce) {
    String user = userRsrc.get(jobIDStr);

    ///////////////////////
    JobID jobID = JobID.forName(jobIDStr);
    ApplicationId appID = ApplicationId.newInstance(Long.parseLong(jobID.getJtIdentifier()), jobID.getId());
    final String base = ContainerLocalizer.USERCACHE + "/" + user + "/" + ContainerLocalizer.APPCACHE + "/"
            + ConverterUtils.toString(appID) + "/output" + "/" + mapId;
    if (LOG.isDebugEnabled()) {
        LOG.debug("DEBUG0 " + base);
    }//w  w w.  j a  v  a 2 s  . c  o  m
    // Index file
    IndexRecordBridge data = null;
    try {
        Path indexFileName = lDirAlloc.getLocalPathToRead(base + "/file.out.index", mjobConf);
        // Map-output file
        Path mapOutputFileName = lDirAlloc.getLocalPathToRead(base + "/file.out", mjobConf);
        if (LOG.isDebugEnabled()) {
            LOG.debug("DEBUG1 " + base + " : " + mapOutputFileName + " : " + indexFileName);
        }

        ///////////////////////
        // TODO: is this correct ?? - why user and not runAsUserName like in hadoop-1 ?? 
        // on 2nd thought, this sounds correct, because probably we registered the runAsUser and not the "user"
        data = indexCache.getIndexInformationBridge(mapId, reduce, indexFileName, user);
        data.pathMOF = mapOutputFileName.toString();
    } catch (IOException e) {
        LOG.error("got an exception while retrieving the Index Info");
    }

    return data;

}

From source file:com.mellanox.hadoop.mapred.UdaPluginTT.java

License:Apache License

static IndexRecordBridge getPathIndex(String jobId, String mapId, int reduce) {
    String userName = null;/*  w w  w. j a  v a  2  s. c o m*/
    String runAsUserName = null;
    IndexRecordBridge data = null;

    try {
        JobConf jobConf = udaShuffleProvider.getJobConfFromSuperClass(JobID.forName(jobId));
        userName = jobConf.getUser();
        runAsUserName = taskTracker.getTaskController().getRunAsUser(jobConf);

        String intermediateOutputDir = UdaShuffleProviderPlugin.getIntermediateOutputDirFromSuperClass(userName,
                jobId, mapId);

        String indexKey = intermediateOutputDir + "/file.out.index";
        Path indexFileName = fileIndexCache.get(indexKey);
        if (indexFileName == null) {
            indexFileName = localDirAllocator.getLocalPathToRead(indexKey, mjobConf);
            fileIndexCache.put(indexKey, indexFileName);
        }
        // Map-output file
        String fileKey = intermediateOutputDir + "/file.out";
        Path mapOutputFileName = fileCache.get(fileKey);
        if (mapOutputFileName == null) {
            mapOutputFileName = localDirAllocator.getLocalPathToRead(fileKey, mjobConf);
            fileCache.put(fileKey, mapOutputFileName);
        }

        //  Read the index file to get the information about where
        //  the map-output for the given reducer is available. 

        data = indexCache.getIndexInformationBridge(mapId, reduce, indexFileName, runAsUserName);
        data.pathMOF = mapOutputFileName.toString();

    } catch (IOException e) {
        LOG.error("exception caught" + e.toString()); //to check how C behaves in case there is an exception
    }
    return data;

}

From source file:com.netflix.lipstick.pigtolipstick.BasicP2LClient.java

License:Apache License

protected void updatePlanStatusForCompletedJobId(P2jPlanStatus planStatus, String jobId) {
    LOG.info("Updating plan status for completed job " + jobId);
    updatePlanStatusForJobId(planStatus, jobId);
    JobClient jobClient = PigStats.get().getJobClient();
    JobID jobID = JobID.forName(jobId);
    long startTime = Long.MAX_VALUE;
    long finishTime = Long.MIN_VALUE;
    /* The JobClient doesn't expose a way to get the Start and Finish time
       of the over all job[1] sadly, so we're pulling out the min task start
       time and max task finish time and using these to approximate.
            /*  ww  w .  j ava2s .com*/
       [1] - Which is really dumb.  The data obviously exists, it gets rendered
       in the job tracker via the JobInProgress but sadly this is internal
       to the remote job tracker so we don't have access to this
       information. */
    try {
        List<TaskReport> reports = Lists.newArrayList();
        reports.addAll(Arrays.asList(jobClient.getMapTaskReports(jobID)));
        reports.addAll(Arrays.asList(jobClient.getReduceTaskReports(jobID)));
        reports.addAll(Arrays.asList(jobClient.getCleanupTaskReports(jobID)));
        reports.addAll(Arrays.asList(jobClient.getSetupTaskReports(jobID)));
        for (TaskReport rpt : reports) {
            /* rpt.getStartTime() sometimes returns zero meaning it does
               not know what time it started so we need to prevent using
               this or we'll lose the actual lowest start time */
            long taskStartTime = rpt.getStartTime();
            if (0 != taskStartTime) {
                startTime = Math.min(startTime, taskStartTime);
            }
            finishTime = Math.max(finishTime, rpt.getFinishTime());
        }
        P2jJobStatus jobStatus = jobIdToJobStatusMap.get(jobId);
        if (startTime < Long.MAX_VALUE) {
            jobStatus.setStartTime(startTime);
        }
        if (finishTime > Long.MIN_VALUE) {
            jobStatus.setFinishTime(finishTime);
        }
        LOG.info("Determined start and finish times for job " + jobId);
    } catch (IOException e) {
        LOG.error("Error getting job info.", e);
    }

}

From source file:com.ngdata.hbaseindexer.master.BatchStateUpdater.java

License:Apache License

@Override
public void run() {
    IndexerDefinition indexerDefinition = null;
    try {/*from ww  w  . java 2 s  . c  o  m*/
        indexerDefinition = indexerModel.getIndexer(indexerName);
    } catch (IndexerNotFoundException e) {
        log.info("Could not find index " + indexerName + " while checking batch rebuild status.", e);
    }
    if (indexerDefinition != null) {
        log.debug("Checking batch state for " + indexerDefinition.getName());
        BatchBuildInfo batchBuildInfo = indexerDefinition.getActiveBatchBuildInfo();
        if (batchBuildInfo != null) {
            Set<String> jobs = batchBuildInfo.getMapReduceJobTrackingUrls().keySet();

            boolean batchDone = true;
            boolean overAllSuccess = true;
            for (String jobId : jobs) {
                RunningJob job;
                try {
                    job = jobClient.getJob(JobID.forName(jobId));
                } catch (IOException e) {
                    log.error("Could not get job " + jobId + " for index " + indexerDefinition.getName()
                            + " while checking active build info.", e);
                    batchDone = false;
                    break;
                }
                if (job != null) {
                    int jobState;
                    try {
                        jobState = job.getJobState();
                    } catch (IOException e) {
                        log.error(
                                "Could not get jobstate for job " + jobId + " for index "
                                        + indexerDefinition.getName() + " while checking active build info.",
                                e);
                        batchDone = false;
                        break;
                    }
                    batchDone = batchDone && jobState != JobStatus.RUNNING;
                    overAllSuccess = overAllSuccess && jobState == JobStatus.SUCCEEDED;
                } else {
                    log.warn("Could not find job " + jobId + " while checking active batch builds for indexer "
                            + indexerDefinition.getName());
                }
            }
            if (batchDone) {
                markBatchBuildCompleted(indexerDefinition.getName(), overAllSuccess);
            } else {
                executor.schedule(this, pollInterval, TimeUnit.MILLISECONDS);
            }
        }
    }
}

From source file:com.ngdata.hbaseindexer.master.BatchStateUpdaterTest.java

License:Apache License

private RunningJob createJob(String jobId, int status) throws IOException {
    RunningJob job = mock(RunningJob.class);
    when(job.getJobState()).thenReturn(status);
    when(jobClient.getJob(JobID.forName(jobId))).thenReturn(job);
    return job;//from  ww  w  .j  a va  2  s.  c  o  m
}

From source file:com.ngdata.hbaseindexer.master.IndexerMaster.java

License:Apache License

private void prepareDeleteIndex(String indexerName) {
    // We do not have to take a lock on the indexer, since once in delete state the indexer cannot
    // be modified anymore by ordinary users.
    boolean canBeDeleted = false;
    try {//from  w  ww. j  a v a 2  s.c om
        // Read current situation of record and assure it is still actual
        IndexerDefinition indexer = indexerModel.getFreshIndexer(indexerName);
        if (indexer.getLifecycleState() == IndexerDefinition.LifecycleState.DELETE_REQUESTED) {
            canBeDeleted = true;

            String queueSubscriptionId = indexer.getSubscriptionId();
            if (queueSubscriptionId != null) {
                sepModel.removeSubscription(indexer.getSubscriptionId());
                // We leave the subscription ID in the indexer definition FYI
            }

            if (indexer.getActiveBatchBuildInfo() != null) {
                JobClient jobClient = getJobClient();
                Set<String> jobs = indexer.getActiveBatchBuildInfo().getMapReduceJobTrackingUrls().keySet();
                for (String jobId : jobs) {
                    RunningJob job = jobClient.getJob(JobID.forName(jobId));
                    if (job != null) {
                        job.killJob();
                        log.info("Kill indexer build job for indexer " + indexerName + ", job ID =  " + jobId);
                    }
                    canBeDeleted = false;
                }
            }

            if (!canBeDeleted) {
                indexer = new IndexerDefinitionBuilder().startFrom(indexer)
                        .lifecycleState(IndexerDefinition.LifecycleState.DELETING).build();
                indexerModel.updateIndexerInternal(indexer);
            }
        } else if (indexer.getLifecycleState() == IndexerDefinition.LifecycleState.DELETING) {
            // Check if the build job is already finished, if so, allow delete
            if (indexer.getActiveBatchBuildInfo() == null) {
                canBeDeleted = true;
            }
        }
    } catch (Throwable t) {
        log.error("Error preparing deletion of indexer " + indexerName, t);
    }

    if (canBeDeleted) {
        deleteIndexer(indexerName);
    }
}