Example usage for org.apache.hadoop.mapreduce JobID JobID

List of usage examples for org.apache.hadoop.mapreduce JobID JobID

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce JobID JobID.

Prototype

public JobID() 

Source Link

Usage

From source file:org.apache.pig.backend.hadoop.executionengine.fetch.FetchPOStoreImpl.java

License:Apache License

@Override
public StoreFuncInterface createStoreFunc(POStore store) throws IOException {

    Configuration conf = ConfigurationUtil.toConfiguration(pc.getProperties());
    StoreFuncInterface storeFunc = store.getStoreFunc();
    JobContext jc = HadoopShims.createJobContext(conf, new JobID());

    OutputFormat<?, ?> outputFormat = storeFunc.getOutputFormat();
    PigOutputFormat.setLocation(jc, store);
    context = HadoopShims.createTaskAttemptContext(conf, HadoopShims.getNewTaskAttemptID());
    PigOutputFormat.setLocation(context, store);

    try {/*  w  ww  .ja v a 2 s.co m*/
        outputFormat.checkOutputSpecs(jc);
    } catch (InterruptedException e) {
        throw new IOException(e);
    }

    try {
        outputCommitter = outputFormat.getOutputCommitter(context);
        outputCommitter.setupJob(jc);
        outputCommitter.setupTask(context);
        writer = outputFormat.getRecordWriter(context);
    } catch (InterruptedException e) {
        throw new IOException(e);
    }
    storeFunc.prepareToWrite(writer);
    return storeFunc;
}

From source file:org.apache.pig.impl.io.PigFile.java

License:Apache License

public void store(DataBag data, FuncSpec storeFuncSpec, PigContext pigContext) throws IOException {
    Configuration conf = ConfigurationUtil.toConfiguration(pigContext.getProperties());
    // create a simulated JobContext
    JobContext jc = HadoopShims.createJobContext(conf, new JobID());
    StoreFuncInterface sfunc = (StoreFuncInterface) PigContext.instantiateFuncFromSpec(storeFuncSpec);
    OutputFormat<?, ?> of = sfunc.getOutputFormat();

    POStore store = new POStore(new OperatorKey());
    store.setSFile(new FileSpec(file, storeFuncSpec));
    PigOutputFormat.setLocation(jc, store);
    OutputCommitter oc;// w  w w  .  j  av a2 s.  c  om
    // create a simulated TaskAttemptContext

    TaskAttemptContext tac = HadoopShims.createTaskAttemptContext(conf, HadoopShims.getNewTaskAttemptID());
    PigOutputFormat.setLocation(tac, store);
    RecordWriter<?, ?> rw;
    try {
        of.checkOutputSpecs(jc);
        oc = of.getOutputCommitter(tac);
        oc.setupJob(jc);
        oc.setupTask(tac);
        rw = of.getRecordWriter(tac);
        sfunc.prepareToWrite(rw);

        for (Iterator<Tuple> it = data.iterator(); it.hasNext();) {
            Tuple row = it.next();
            sfunc.putNext(row);
        }
        rw.close(tac);
    } catch (InterruptedException e) {
        throw new IOException(e);
    }
    if (oc.needsTaskCommit(tac)) {
        oc.commitTask(tac);
    }
    HadoopShims.commitOrCleanup(oc, jc);
}

From source file:org.apache.pig.impl.io.ReadToEndLoader.java

License:Apache License

@SuppressWarnings("unchecked")
private void init() throws IOException {
    if (conf != null && pigContext != null) {
        SchemaTupleBackend.initialize(conf, pigContext, true);
    }// ww w.j a v  a 2  s .  com

    // make a copy so that if the underlying InputFormat writes to the
    // conf, we don't affect the caller's copy
    conf = new Configuration(conf);

    // let's initialize the wrappedLoadFunc 
    Job job = new Job(conf);
    wrappedLoadFunc.setUDFContextSignature(this.udfContextSignature);
    wrappedLoadFunc.setLocation(inputLocation, job);
    // The above setLocation call could write to the conf within
    // the job - get a hold of the modified conf
    conf = job.getConfiguration();
    inputFormat = wrappedLoadFunc.getInputFormat();
    try {
        inpSplits = inputFormat.getSplits(HadoopShims.createJobContext(conf, new JobID()));
    } catch (InterruptedException e) {
        throw new IOException(e);
    }
}

From source file:org.goldenorb.OrbPartition.java

License:Apache License

private void dumpData() {
    Configuration conf = new Configuration();
    Job job = null;/* www.  j ava 2  s . c  o m*/
    JobContext jobContext = null;
    TaskAttemptContext tao = null;
    RecordWriter rw;
    VertexWriter vw;
    FileOutputFormat outputFormat;

    boolean tryAgain = true;
    int count = 0;
    while (tryAgain && count < 15)
        try {
            count++;
            tryAgain = false;
            if (job == null) {
                job = new Job(conf);
                job.setOutputFormatClass(TextOutputFormat.class);
                FileOutputFormat.setOutputPath(job,
                        new Path(new String(getOrbConf().getNameNode() + getOrbConf().getFileOutputPath())));
            }
            if (jobContext == null) {
                jobContext = new JobContext(job.getConfiguration(), new JobID());
            }

            System.out.println(jobContext.getConfiguration().get("mapred.output.dir"));

            tao = new TaskAttemptContext(jobContext.getConfiguration(),
                    new TaskAttemptID(new TaskID(jobContext.getJobID(), true, getPartitionID()), 0));
            outputFormat = (FileOutputFormat) tao.getOutputFormatClass().newInstance();
            rw = outputFormat.getRecordWriter(tao);
            vw = (VertexWriter) getOrbConf().getVertexOutputFormatClass().newInstance();
            for (Vertex v : vertices.values()) {
                OrbContext oc = vw.vertexWrite(v);
                rw.write(oc.getKey(), oc.getValue());
                // orbLogger.info("Partition: " + Integer.toString(partitionId) + "writing: " +
                // oc.getKey().toString() + ", " + oc.getValue().toString());
            }
            rw.close(tao);

            FileOutputCommitter cm = (FileOutputCommitter) outputFormat.getOutputCommitter(tao);
            if (cm.needsTaskCommit(tao)) {
                cm.commitTask(tao);
                cm.cleanupJob(jobContext);
            } else {
                cm.cleanupJob(jobContext);
                tryAgain = true;
            }

        } catch (IOException e) {
            tryAgain = true;
            e.printStackTrace();
        } catch (InstantiationException e) {
            tryAgain = true;
            e.printStackTrace();
        } catch (IllegalAccessException e) {
            tryAgain = true;
            e.printStackTrace();
        } catch (ClassNotFoundException e) {
            tryAgain = true;
            e.printStackTrace();
        } catch (InterruptedException e) {
            tryAgain = true;
            e.printStackTrace();
        }
    if (tryAgain) {
        synchronized (this) {
            try {
                wait(1000);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }
    }
}

From source file:org.slc.sli.aggregation.mapreduce.io.MockTaskAttemptContext.java

License:Apache License

@Override
public JobID getJobID() {
    return new JobID();
}