Example usage for org.apache.hadoop.mapreduce JobID toString

List of usage examples for org.apache.hadoop.mapreduce JobID toString

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce JobID toString.

Prototype

@Override
    public String toString() 

Source Link

Usage

From source file:io.apigee.lembos.node.types.JobWrap.java

License:Apache License

/**
 * Wraps {@link Job#getJobID()}./*from   w ww  .j a  v  a  2 s  .  co  m*/
 *
 * @param ctx the JavaScript context (unused)
 * @param thisObj the 'this' object of the caller
 * @param args the arguments for the call
 * @param func the function called (unused)
 *
 * @return the job id
 */
@JSFunction
public static Object getJobID(final Context ctx, final Scriptable thisObj, final Object[] args,
        final Function func) {
    final JobID jobId = ((JobWrap) thisObj).job.getJobID();

    return jobId == null ? Context.getUndefinedValue() : jobId.toString();
}

From source file:io.druid.indexer.updater.HadoopConverterJob.java

License:Apache License

public static Path getJobPath(JobID jobID, Path workingDirectory) {
    return new Path(workingDirectory, jobID.toString());
}

From source file:it.crs4.pydoop.mapreduce.pipes.TaskLog.java

License:Apache License

/**
 * Get the user log directory for the job jobid.
 * /*w  w  w  .  ja  va  2 s .  co  m*/
 * @param jobid
 * @return user log directory for the job
 */
public static File getJobDir(JobID jobid) {
    return new File(getUserLogDir(), jobid.toString());
}

From source file:org.apache.ambari.TestJobHistoryParsing.java

License:Apache License

public void test(String workflowId, String workflowName, String workflowNodeName,
        Map<String, String[]> adjacencies) {
    Configuration conf = new Configuration();
    setProperties(conf, workflowId, workflowName, workflowNodeName, adjacencies);
    String log = log("JOB", new String[] { ID, NAME, NODE, ADJ }, new String[] { conf.get(ID_PROP),
            conf.get(NAME_PROP), conf.get(NODE_PROP), JobHistory.JobInfo.getWorkflowAdjacencies(conf) });
    ParsedLine line = new ParsedLine(log);
    JobID jobid = new JobID("id", 1);
    JobSubmittedEvent event = new JobSubmittedEvent(jobid, workflowName, "", 0l, "", null, "", line.get(ID),
            line.get(NAME), line.get(NODE), line.get(ADJ));
    WorkflowContext context = MapReduceJobHistoryUpdater.buildWorkflowContext(event);

    String resultingWorkflowId = workflowId;
    if (workflowId.isEmpty())
        resultingWorkflowId = jobid.toString().replace("job_", "mr_");
    assertEquals("Didn't recover workflowId", resultingWorkflowId, context.getWorkflowId());
    assertEquals("Didn't recover workflowName", workflowName, context.getWorkflowName());
    assertEquals("Didn't recover workflowNodeName", workflowNodeName, context.getWorkflowEntityName());

    Map<String, String[]> resultingAdjacencies = adjacencies;
    if (resultingAdjacencies.size() == 0) {
        resultingAdjacencies = new HashMap<String, String[]>();
        resultingAdjacencies.put(workflowNodeName, new String[] {});
    }/*  www  . j  a va  2  s.c  om*/
    assertEquals("Got incorrect number of adjacencies", resultingAdjacencies.size(),
            context.getWorkflowDag().getEntries().size());
    for (WorkflowDagEntry entry : context.getWorkflowDag().getEntries()) {
        String[] sTargets = resultingAdjacencies.get(entry.getSource());
        assertNotNull("No original targets for " + entry.getSource(), sTargets);
        List<String> dTargets = entry.getTargets();
        assertEquals("Got incorrect number of targets for " + entry.getSource(), sTargets.length,
                dTargets.size());
        for (int i = 0; i < sTargets.length; i++) {
            assertEquals("Got incorrect target for " + entry.getSource(), sTargets[i], dTargets.get(i));
        }
    }
}

From source file:org.apache.carbondata.streaming.CarbonStreamOutputFormatTest.java

License:Apache License

@Override
protected void setUp() throws Exception {
    super.setUp();
    JobID jobId = CarbonInputFormatUtil.getJobId(new Date(), 0);
    TaskID taskId = new TaskID(jobId, TaskType.MAP, 0);
    taskAttemptId = new TaskAttemptID(taskId, 0);

    hadoopConf = new Configuration();
    hadoopConf.set("mapred.job.id", jobId.toString());
    hadoopConf.set("mapred.tip.id", taskAttemptId.getTaskID().toString());
    hadoopConf.set("mapred.task.id", taskAttemptId.toString());
    hadoopConf.setBoolean("mapred.task.is.map", true);
    hadoopConf.setInt("mapred.task.partition", 0);

    tablePath = new File("target/stream_output").getCanonicalPath();
    String dbName = "default";
    String tableName = "stream_table_output";
    AbsoluteTableIdentifier identifier = AbsoluteTableIdentifier.from(tablePath,
            new CarbonTableIdentifier(dbName, tableName, UUID.randomUUID().toString()));

    CarbonTable table = new StoreCreator(new File("target/store").getAbsolutePath(),
            new File("../hadoop/src/test/resources/data.csv").getCanonicalPath()).createTable(identifier);

    String factFilePath = new File("../hadoop/src/test/resources/data.csv").getCanonicalPath();
    carbonLoadModel = StoreCreator.buildCarbonLoadModel(table, factFilePath, identifier);
}

From source file:org.apache.ignite.internal.processors.hadoop.GridHadoopUtils.java

License:Apache License

/**
 * Gets job file.//from   w  w  w.ja  v a 2s.c o  m
 *
 * @param conf Configuration.
 * @param usr User.
 * @param jobId Job ID.
 * @return Job file.
 */
public static Path jobFile(Configuration conf, String usr, JobID jobId) {
    return new Path(stagingAreaDir(conf, usr), jobId.toString() + Path.SEPARATOR + MRJobConfig.JOB_CONF_FILE);
}

From source file:org.apache.pig.tools.pigstats.mapreduce.MRPigStatsUtil.java

License:Apache License

@Private
public static void setBackendException(Job job, Exception e) {
    JobID jobId = job.getAssignedJobID();
    if (jobId == null) {
        return;// www  .j ava  2  s  . com
    }
    PigStats.get().setBackendException(jobId.toString(), e);
}

From source file:org.apache.tez.auxservices.ShuffleHandler.java

License:Apache License

private void addJobToken(JobID jobId, String user, Token<JobTokenIdentifier> jobToken) {
    userRsrc.put(jobId.toString(), user);
    secretManager.addTokenForJob(jobId.toString(), jobToken);
    LOG.info("Added token for " + jobId.toString());
}

From source file:org.apache.tez.auxservices.ShuffleHandler.java

License:Apache License

private void recordJobShuffleInfo(JobID jobId, String user, Token<JobTokenIdentifier> jobToken)
        throws IOException {
    if (stateDb != null) {
        TokenProto tokenProto = TokenProto.newBuilder()
                .setIdentifier(ByteString.copyFrom(jobToken.getIdentifier()))
                .setPassword(ByteString.copyFrom(jobToken.getPassword())).setKind(jobToken.getKind().toString())
                .setService(jobToken.getService().toString()).build();
        JobShuffleInfoProto proto = JobShuffleInfoProto.newBuilder().setUser(user).setJobToken(tokenProto)
                .build();//from w  w  w.j  a  v  a 2 s .co  m
        try {
            stateDb.put(bytes(jobId.toString()), proto.toByteArray());
        } catch (DBException e) {
            throw new IOException("Error storing " + jobId, e);
        }
    }
    addJobToken(jobId, user, jobToken);
}

From source file:org.apache.tez.auxservices.ShuffleHandler.java

License:Apache License

private void removeJobShuffleInfo(JobID jobId) throws IOException {
    String jobIdStr = jobId.toString();
    secretManager.removeTokenForJob(jobIdStr);
    userRsrc.remove(jobIdStr);/*  ww  w .j ava  2 s.  c  o  m*/
    if (stateDb != null) {
        try {
            stateDb.delete(bytes(jobIdStr));
        } catch (DBException e) {
            throw new IOException("Unable to remove " + jobId + " from state store", e);
        }
    }
}