Example usage for org.apache.hadoop.mapred JobContext getJobConf

List of usage examples for org.apache.hadoop.mapred JobContext getJobConf

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred JobContext getJobConf.

Prototype

public JobConf getJobConf();

Source Link

Document

Get the job Configuration

Usage

From source file:com.ibm.bi.dml.runtime.matrix.data.MultipleOutputCommitter.java

License:Open Source License

@Override
public void setupJob(JobContext context) throws IOException {
    super.setupJob(context);
    // get output file directories and create directories
    JobConf conf = context.getJobConf();
    String[] loutputs = MRJobConfiguration.getOutputs(conf);
    for (String dir : loutputs) {
        Path path = new Path(dir);
        FileSystem fs = path.getFileSystem(conf);
        if (!fs.mkdirs(path))
            LOG.error("Mkdirs failed to create " + path.toString());
    }/*from w  ww. j a  v  a2  s . c o m*/
}

From source file:com.ibm.bi.dml.runtime.matrix.data.MultipleOutputCommitter.java

License:Open Source License

@Override
public void cleanupJob(JobContext context) throws IOException {
    JobConf conf = context.getJobConf();
    // do the clean up of temporary directory
    Path outputPath = FileOutputFormat.getOutputPath(conf);
    if (outputPath != null) {
        FileSystem fs = outputPath.getFileSystem(conf);
        context.getProgressible().progress();
        if (fs.exists(outputPath))
            fs.delete(outputPath, true);
    }//from  w  w  w  .j a  v  a  2 s  . c o m
}

From source file:com.ibm.jaql.io.hadoop.DirectFileOutputCommiter.java

License:Apache License

@Override
public void setupJob(JobContext context) throws IOException {
    // Create the path to the file, if needed.
    JobConf conf = context.getJobConf();
    Path outputPath = FileOutputFormat.getOutputPath(conf);
    if (outputPath != null) {
        Path tmpDir = outputPath.getParent();
        FileSystem fileSys = outputPath.getFileSystem(conf);
        if (!fileSys.mkdirs(outputPath.getParent())) {
            throw new IOException("Mkdirs failed to create " + tmpDir.toString());
        }/*from w ww .  j  a  va2  s .  co  m*/
    }
}

From source file:org.apache.ignite.internal.processors.hadoop.impl.v1.HadoopV1CleanupTask.java

License:Apache License

/** {@inheritDoc} */
@Override//from  w w w  .  j ava2s .  c  om
public void run(HadoopTaskContext taskCtx) throws IgniteCheckedException {
    HadoopV2TaskContext ctx = (HadoopV2TaskContext) taskCtx;

    JobContext jobCtx = ctx.jobContext();

    try {
        OutputCommitter committer = jobCtx.getJobConf().getOutputCommitter();

        if (abort)
            committer.abortJob(jobCtx, JobStatus.State.FAILED);
        else
            committer.commitJob(jobCtx);
    } catch (IOException e) {
        throw new IgniteCheckedException(e);
    }
}

From source file:org.apache.ignite.internal.processors.hadoop.v1.GridHadoopV1CleanupTask.java

License:Apache License

/** {@inheritDoc} */
@Override/* w w w  .ja v  a  2 s.  c o  m*/
public void run(GridHadoopTaskContext taskCtx) throws IgniteCheckedException {
    GridHadoopV2TaskContext ctx = (GridHadoopV2TaskContext) taskCtx;

    JobContext jobCtx = ctx.jobContext();

    try {
        OutputCommitter committer = jobCtx.getJobConf().getOutputCommitter();

        if (abort)
            committer.abortJob(jobCtx, JobStatus.State.FAILED);
        else
            committer.commitJob(jobCtx);
    } catch (IOException e) {
        throw new IgniteCheckedException(e);
    }
}

From source file:org.gridgain.grid.kernal.processors.hadoop.v1.GridHadoopV1CleanupTask.java

License:Open Source License

/** {@inheritDoc} */
@Override/*from  w ww .ja  v  a 2s.  c  om*/
public void run(GridHadoopTaskContext taskCtx) throws GridException {
    GridHadoopV2TaskContext ctx = (GridHadoopV2TaskContext) taskCtx;

    JobContext jobCtx = ctx.jobContext();

    try {
        OutputCommitter committer = jobCtx.getJobConf().getOutputCommitter();

        if (abort)
            committer.abortJob(jobCtx, JobStatus.State.FAILED);
        else
            committer.commitJob(jobCtx);
    } catch (IOException e) {
        throw new GridException(e);
    }
}

From source file:org.mitre.bio.mapred.io.FastaInputFormat.java

License:Open Source License

protected boolean isSplitable(JobContext context, Path file) {
    CompressionCodec codec = new CompressionCodecFactory(context.getJobConf()).getCodec(file);
    return codec == null;
}

From source file:tachyon.client.keyvalue.hadoop.KeyValueOutputCommitter.java

License:Apache License

/**
 * {@inheritDoc}//w  w  w .  j  a v  a2  s . c o m
 * <p>
 * Calls {@link FileOutputCommitter#setupJob(JobContext)} first, and then creates an empty
 * key-value store under the job's output directory.
 */
@Override
public void setupJob(JobContext context) throws IOException {
    super.setupJob(context);
    try {
        KEY_VALUE_STORES.create(getOutputURI(context.getJobConf())).close();
    } catch (TachyonException e) {
        throw new IOException(e);
    }
}

From source file:tachyon.client.keyvalue.hadoop.KeyValueOutputCommitter.java

License:Apache License

/**
 * {@inheritDoc}/*from w  ww  . ja v  a 2s . co m*/
 * <p>
 * Deletes the key-value store created in {@link #setupJob(JobContext)} first, and then
 * calls {@link FileOutputCommitter#abortJob(JobContext, int)}.
 */
@Override
public void abortJob(JobContext context, int runState) throws IOException {
    // The output directory should exist since the store writer is just closed.
    try {
        KEY_VALUE_STORES.delete(getOutputURI(context.getJobConf()));
    } catch (TachyonException e) {
        throw new IOException(e);
    }
    super.abortJob(context, runState);
}