Example usage for org.apache.hadoop.mapreduce TaskAttemptContext setStatus

List of usage examples for org.apache.hadoop.mapreduce TaskAttemptContext setStatus

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce TaskAttemptContext setStatus.

Prototype

public void setStatus(String msg);

Source Link

Document

Set the current status of the task to the given string.

Usage

From source file:BamInputFormat.java

License:Apache License

public BamRecordReader createRecordReader(InputSplit genericSplit, TaskAttemptContext context)
        throws IOException {
    context.setStatus(genericSplit.toString());
    return new BamRecordReader();
}

From source file:be.uantwerpen.adrem.hadoop.util.SplitByKTextInputFormat.java

License:Apache License

@Override
public RecordReader<LongWritable, Text> createRecordReader(InputSplit genericSplit, TaskAttemptContext context)
        throws IOException {
    context.setStatus(genericSplit.toString());
    return new LineRecordReader();
}

From source file:brush.InterleavedFastqInputFormat.java

License:Apache License

/**
 * Creates the new record reader that underlies this input format.
 *
 * @param genericSplit The split that the record reader should read.
 * @param context The Hadoop task context.
 * @return Returns the interleaved FASTQ record reader.
 *///w  w  w.  j a v  a 2s  .  c  o  m
public RecordReader<Void, Text> createRecordReader(InputSplit genericSplit, TaskAttemptContext context)
        throws IOException, InterruptedException {
    context.setStatus(genericSplit.toString());

    // cast as per example in TextInputFormat
    return new InterleavedFastqRecordReader(context.getConfiguration(), (FileSplit) genericSplit);
}

From source file:bucket_sort.NLineInputFormat.java

License:Apache License

public RecordReader<LongWritable, Text> createRecordReader(InputSplit genericSplit, TaskAttemptContext context)
        throws IOException {
    context.setStatus(genericSplit.toString());
    return new LineRecordReader();
}

From source file:com.baynote.kafka.hadoop.MultipleKafkaInputFormat.java

License:Apache License

/**
 * {@inheritDoc}//from  w  ww  . j a  v a2  s  . c o m
 */
@Override
public RecordReader<LongWritable, BytesWritable> createRecordReader(final InputSplit split,
        final TaskAttemptContext context) throws IOException, InterruptedException {
    final TaggedInputSplit taggedInputSplit = (TaggedInputSplit) split;
    final TaskAttemptContext taskAttemptContextClone = new TaskAttemptContextImpl(taggedInputSplit.getConf(),
            context.getTaskAttemptID());
    taskAttemptContextClone.setStatus(context.getStatus());
    return new DelegatingRecordReader<LongWritable, BytesWritable>(split, taskAttemptContextClone);
}

From source file:com.cloudera.crunch.type.avro.AvroInputFormat.java

License:Apache License

@Override
public RecordReader<AvroWrapper<T>, NullWritable> createRecordReader(InputSplit split,
        TaskAttemptContext context) throws IOException, InterruptedException {
    context.setStatus(split.toString());
    return new AvroRecordReader<T>();
}

From source file:com.datasalt.pangool.solr.BatchWriter.java

License:Apache License

public synchronized void close(TaskAttemptContext context, SolrCore core)
        throws InterruptedException, SolrServerException, IOException {

    context.setStatus("Waiting for batches to complete");
    batchPool.shutdown();/*from   w  w w  .j  a v a  2  s.  c om*/

    while (!batchPool.isTerminated()) {
        LOG.info(String.format("Waiting for %d items and %d threads to finish executing",
                batchPool.getQueue().size(), batchPool.getActiveCount()));
        batchPool.awaitTermination(5, TimeUnit.SECONDS);
    }
    context.setStatus("Optimizing Solr");
    solr.optimize(true, false, 1);
    context.setStatus("Closing Solr");
    core.close();
}

From source file:com.datasalt.pangool.solr.SolrRecordWriter.java

License:Apache License

@Override
public void close(TaskAttemptContext context) throws IOException, InterruptedException {
    if (context != null) {
        heartBeater.setProgress(context);
    }/*  ww w  . j a  v  a 2  s .c o  m*/
    try {
        if (batch.size() > 0) {
            batchWriter.queueBatch(batch);
            batch.clear();
        }
        heartBeater.needHeartBeat();
        batchWriter.close(context, core);
        if (outputZipFile) {
            context.setStatus("Writing Zip");
            packZipFile(); // Written to the perm location
        } else {
            context.setStatus("Copying Index");
            fs.completeLocalOutput(perm, local); // copy to dfs
        }
    } catch (Exception e) {
        if (e instanceof IOException) {
            throw (IOException) e;
        }
        throw new IOException(e);
    } finally {
        heartBeater.cancelHeartBeat();
        File tempFile = new File(local.toString());
        if (tempFile.exists()) {
            FileUtils.forceDelete(new File(local.toString()));
        }
    }

    context.setStatus("Done");
}

From source file:com.datasalt.pangool.tuplemr.avro.AvroInputFormat.java

License:Apache License

@Override
public RecordReader<AvroWrapper<T>, NullWritable> createRecordReader(InputSplit inputSplit,
        TaskAttemptContext context) throws IOException, InterruptedException {
    context.setStatus(inputSplit.toString());
    return new AvroRecordReader<T>(getSchema(), isReflect, context.getConfiguration(), (FileSplit) inputSplit);
}

From source file:com.knewton.mapreduce.SSTableRecordReader.java

License:Apache License

/**
 * Decompresses input files that were snappy compressed before opening them with the sstable
 * reader. It writes a new decompressed file with the same name as the compressed one. The old
 * one gets deleted.//from   w w w  .ja v  a  2s  .com
 */
private void decompress(Path localTablePath, TaskAttemptContext context) throws IOException {
    context.setStatus(String.format("Decompressing %s", localTablePath.toUri()));
    int compressionBufSize = context.getConfiguration().getInt(PropertyConstants.DECOMPRESS_BUFFER.txt,
            DEFAULT_DECOMPRESS_BUFFER_SIZE);
    compressionBufSize *= 1024;
    LOG.info("Decompressing {} with buffer size {}.", localTablePath, compressionBufSize);
    File compressedFile = new File(localTablePath.toString());
    InputStream fis = new FileInputStream(compressedFile);
    InputStream bis = new BufferedInputStream(fis, compressionBufSize);
    InputStream sip = new SnappyInputStream(bis);
    File decompressedFile = new File(localTablePath.toString() + ".tmp");

    OutputStream os = new FileOutputStream(decompressedFile);
    OutputStream bos = new BufferedOutputStream(os, compressionBufSize);
    byte[] inByteArr = new byte[compressionBufSize];
    int bytesRead = 0;
    int bytesSinceLastReport = 0;
    while ((bytesRead = sip.read(inByteArr)) > 0) {
        bos.write(inByteArr, 0, bytesRead);
        bytesSinceLastReport += bytesRead;
        // Avoid timeouts. Report progress to the jobtracker.
        if (bytesSinceLastReport % REPORT_DECOMPRESS_PROGRESS_EVERY_GBS > 0) {
            context.setStatus(String.format("Decompressed %d bytes.", bytesSinceLastReport));
            bytesSinceLastReport -= REPORT_DECOMPRESS_PROGRESS_EVERY_GBS;
        }
    }
    sip.close();
    bos.close();
    compressedFile.delete();
    decompressedFile.renameTo(compressedFile);
}