List of usage examples for org.apache.hadoop.mapreduce JobPriority NORMAL
JobPriority NORMAL
To view the source code for org.apache.hadoop.mapreduce JobPriority NORMAL.
Click Source Link
From source file:com.scaleoutsoftware.soss.hserver.hadoop.SubmittedJob.java
License:Apache License
SubmittedJob(JobID jobID, String jobSubmitDirectory, Credentials credentials, Configuration configuration) throws IOException, InterruptedException { this.jobID = jobID; this.configuration = configuration; this.jobSubmitDirectoryPath = new Path(jobSubmitDirectory); this.fileSystem = FileSystem.get(configuration); JobSplit.TaskSplitMetaInfo splitInfo[] = SplitMetaInfoReader.readSplitMetaInfo(jobID, fileSystem, configuration, jobSubmitDirectoryPath); Path jobSplitFile = JobSubmissionFiles.getJobSplitFile(jobSubmitDirectoryPath); FSDataInputStream stream = fileSystem.open(jobSplitFile); for (JobSplit.TaskSplitMetaInfo info : splitInfo) { Object split = getSplitDetails(stream, info.getStartOffset(), configuration); inputSplits.add(split);//from ww w . j av a 2 s . com splitLocations.put(split, info.getLocations()); LOG.info("Adding split for execution. Split = " + split + " Locations: " + Arrays.toString(splitLocations.get(split))); } stream.close(); jobConfPath = JobSubmissionFiles.getJobConfPath(jobSubmitDirectoryPath); if (!fileSystem.exists(jobConfPath)) { throw new IOException("Cannot find job.xml. Path = " + jobConfPath); } //We cannot just use JobConf(Path) constructor, //because it does not work for HDFS locations. //The comment in Configuration#loadResource() states, //for the case when the Path to the resource is provided: //"Can't use FileSystem API or we get an infinite loop //since FileSystem uses Configuration API. Use java.io.File instead." // //Workaround: construct empty Configuration, provide it with //input stream and give it to JobConf constructor. FSDataInputStream jobConfStream = fileSystem.open(jobConfPath); Configuration jobXML = new Configuration(false); jobXML.addResource(jobConfStream); //The configuration does not actually gets read before we attempt to //read some property. Call to #size() will make Configuration to //read the input stream. jobXML.size(); //We are done with input stream, can close it now. jobConfStream.close(); jobConf = new JobConf(jobXML); newApi = jobConf.getUseNewMapper(); jobStatus = new JobStatus(jobID, 0f, 0f, 0f, 0f, JobStatus.State.RUNNING, JobPriority.NORMAL, UserGroupInformation.getCurrentUser().getUserName(), jobID.toString(), jobConfPath.toString(), ""); }
From source file:com.scaleoutsoftware.soss.hserver.hadoop.SubmittedJob.java
License:Apache License
JobStatus getJobStatus() throws IOException { if (runningJob != null && runningJob.isDone()) { try {/*from w w w. jav a2s . c o m*/ runningJob.get(); jobStatus = new JobStatus(jobID, 1f, 1f, 1f, 1f, JobStatus.State.SUCCEEDED, JobPriority.NORMAL, UserGroupInformation.getCurrentUser().getUserName(), jobID.toString(), jobConfPath.toString(), ""); } catch (Exception e) { LOG.error("Exception while running ScaleOut hServer job.", e); final String failureInfo = e.toString(); jobStatus = new JobStatus(jobID, 0f, 0f, 0f, 0f, JobStatus.State.FAILED, JobPriority.NORMAL, UserGroupInformation.getCurrentUser().getUserName(), jobID.toString(), jobConfPath.toString(), "") { @Override public synchronized String getFailureInfo() { return failureInfo; } }; } runningJob = null; } return jobStatus; }
From source file:org.apache.ignite.internal.processors.hadoop.GridHadoopUtils.java
License:Apache License
/** * Convert GG job status to Hadoop job status. * * @param status GG job status.// w w w .ja v a 2 s . com * @return Hadoop job status. */ public static JobStatus status(GridHadoopJobStatus status, Configuration conf) { JobID jobId = new JobID(status.jobId().globalId().toString(), status.jobId().localId()); float setupProgress = 0; float mapProgress = 0; float reduceProgress = 0; float cleanupProgress = 0; JobStatus.State state = JobStatus.State.RUNNING; switch (status.jobPhase()) { case PHASE_SETUP: setupProgress = 0.42f; break; case PHASE_MAP: setupProgress = 1; mapProgress = 1f - status.pendingMapperCnt() / (float) status.totalMapperCnt(); break; case PHASE_REDUCE: assert status.totalReducerCnt() > 0; setupProgress = 1; mapProgress = 1; reduceProgress = 1f - status.pendingReducerCnt() / (float) status.totalReducerCnt(); break; case PHASE_CANCELLING: case PHASE_COMPLETE: if (!status.isFailed()) { setupProgress = 1; mapProgress = 1; reduceProgress = 1; cleanupProgress = 1; state = JobStatus.State.SUCCEEDED; } else state = JobStatus.State.FAILED; break; default: assert false; } return new JobStatus(jobId, setupProgress, mapProgress, reduceProgress, cleanupProgress, state, JobPriority.NORMAL, status.user(), status.jobName(), jobFile(conf, status.user(), jobId).toString(), "N/A"); }
From source file:org.apache.ignite.internal.processors.hadoop.HadoopUtils.java
License:Apache License
/** * Convert Ignite job status to Hadoop job status. * * @param status Ignite job status./* w ww . j av a2 s . c o m*/ * @return Hadoop job status. */ public static JobStatus status(HadoopJobStatus status, Configuration conf) { JobID jobId = new JobID(status.jobId().globalId().toString(), status.jobId().localId()); float setupProgress = 0; float mapProgress = 0; float reduceProgress = 0; float cleanupProgress = 0; JobStatus.State state = JobStatus.State.RUNNING; switch (status.jobPhase()) { case PHASE_SETUP: setupProgress = 0.42f; break; case PHASE_MAP: setupProgress = 1; mapProgress = 1f - status.pendingMapperCnt() / (float) status.totalMapperCnt(); break; case PHASE_REDUCE: assert status.totalReducerCnt() > 0; setupProgress = 1; mapProgress = 1; reduceProgress = 1f - status.pendingReducerCnt() / (float) status.totalReducerCnt(); break; case PHASE_CANCELLING: case PHASE_COMPLETE: if (!status.isFailed()) { setupProgress = 1; mapProgress = 1; reduceProgress = 1; cleanupProgress = 1; state = JobStatus.State.SUCCEEDED; } else state = JobStatus.State.FAILED; break; default: assert false; } return new JobStatus(jobId, setupProgress, mapProgress, reduceProgress, cleanupProgress, state, JobPriority.NORMAL, status.user(), status.jobName(), jobFile(conf, status.user(), jobId).toString(), "N/A"); }
From source file:org.apache.ignite.internal.processors.hadoop.impl.HadoopUtils.java
License:Apache License
/** * Convert Ignite job status to Hadoop job status. * * @param status Ignite job status./*from www.ja va 2 s. com*/ * @return Hadoop job status. */ public static JobStatus status(HadoopJobStatus status, Configuration conf) { JobID jobId = new JobID(status.jobId().globalId().toString(), status.jobId().localId()); float setupProgress = 0; float mapProgress = 0; float reduceProgress = 0; float cleanupProgress = 0; JobStatus.State state = JobStatus.State.RUNNING; switch (status.jobPhase()) { case PHASE_SETUP: setupProgress = 0.42f; break; case PHASE_MAP: setupProgress = 1; mapProgress = 1f - status.pendingMapperCnt() / (float) status.totalMapperCnt(); break; case PHASE_REDUCE: setupProgress = 1; mapProgress = 1; if (status.totalReducerCnt() > 0) reduceProgress = 1f - status.pendingReducerCnt() / (float) status.totalReducerCnt(); else reduceProgress = 1f; break; case PHASE_CANCELLING: case PHASE_COMPLETE: if (!status.isFailed()) { setupProgress = 1; mapProgress = 1; reduceProgress = 1; cleanupProgress = 1; state = JobStatus.State.SUCCEEDED; } else state = JobStatus.State.FAILED; break; default: assert false; } return new JobStatus(jobId, setupProgress, mapProgress, reduceProgress, cleanupProgress, state, JobPriority.NORMAL, status.user(), status.jobName(), jobFile(conf, status.user(), jobId).toString(), "N/A"); }
From source file:org.apache.tez.mapreduce.client.DAGJobStatus.java
License:Apache License
@Override public synchronized JobPriority getPriority() { // TEX-147: return real priority return JobPriority.NORMAL; }