Example usage for org.apache.hadoop.mapreduce TaskAttemptContext getCounter

List of usage examples for org.apache.hadoop.mapreduce TaskAttemptContext getCounter

Introduction

In this page you can find the example usage for org.apache.hadoop.mapreduce TaskAttemptContext getCounter.

Prototype

public Counter getCounter(String groupName, String counterName);

Source Link

Document

Get the Counter for the given groupName and counterName.

Usage

From source file:com.asakusafw.runtime.stage.directio.Constants.java

License:Apache License

private static void putCounts0(TaskAttemptContext context, String groupId, String itemId, long fileCount,
        long recordCount, long byteCount) {
    if (itemId == null) {
        return;/* ww w .  jav  a  2  s . c  o m*/
    }
    context.getCounter(groupId, itemId + SUFFIX_FILE_COUNT).increment(fileCount);
    context.getCounter(groupId, itemId + SUFFIX_RECORD_COUNT).increment(recordCount);
    context.getCounter(groupId, itemId + SUFFIX_BYTE_COUNT).increment(byteCount);
}

From source file:com.cloudera.recordservice.mapreduce.RecordServiceInputFormatBase.java

License:Apache License

/**
 * Populates RecordService counters in ctx from counters.
 *//*  ww w.  j  a  va  2  s.c  om*/
public static void setCounters(TaskAttemptContext ctx, Stats counters) {
    if (ctx == null)
        return;
    ctx.getCounter(COUNTERS_GROUP_NAME, "Records Read").setValue(counters.numRecordsRead);
    ctx.getCounter(COUNTERS_GROUP_NAME, "Records Returned").setValue(counters.numRecordsReturned);
    ctx.getCounter(COUNTERS_GROUP_NAME, "Record Serialization Time(ms)").setValue(counters.serializeTimeMs);
    ctx.getCounter(COUNTERS_GROUP_NAME, "Client Time(ms)").setValue(counters.clientTimeMs);

    if (counters.hdfsCountersSet) {
        ctx.getCounter(COUNTERS_GROUP_NAME, "Bytes Read").setValue(counters.bytesRead);
        ctx.getCounter(COUNTERS_GROUP_NAME, "Decompression Time(ms)").setValue(counters.decompressTimeMs);
        ctx.getCounter(COUNTERS_GROUP_NAME, "Bytes Read Local").setValue(counters.bytesReadLocal);
        ctx.getCounter(COUNTERS_GROUP_NAME, "HDFS Throughput(MB/s)")
                .setValue((long) (counters.hdfsThroughput / (1024 * 1024)));
    }
}

From source file:nl.basjes.hadoop.input.ApacheHttpdLogfileRecordReader.java

License:Apache License

@Override
public void initialize(final InputSplit split, final TaskAttemptContext context) throws IOException {
    lineReader.initialize(split, context);
    final Configuration conf = context.getConfiguration();

    counterLinesRead = context.getCounter(HTTPD_LOGFILE_INPUT_FORMAT, "1:Lines read");
    counterGoodLines = context.getCounter(HTTPD_LOGFILE_INPUT_FORMAT, "2:Good lines");
    counterBadLines = context.getCounter(HTTPD_LOGFILE_INPUT_FORMAT, "3:Bad lines");

    if (logformat == null || requestedFields.isEmpty()) {
        if (logformat == null) {
            logformat = conf.get("nl.basjes.parse.apachehttpdlogline.format", "common");
        }//from  ww w  . java  2  s  . c om
        if (requestedFields.isEmpty()) {
            String fields = conf.get("nl.basjes.parse.apachehttpdlogline.fields", null);

            if (fields != null) {
                fieldList = Arrays.asList(fields.split(","));
            }
        } else {
            fieldList = new ArrayList<>(requestedFields);
        }
    }

    if (fieldList != null) {
        if (logformat != null && parser == null) {
            parser = createParser();
        }
        for (String field : fieldList) {
            currentValue.declareRequestedFieldname(field);
        }
    }

    setupFields();
}

From source file:org.apache.solr.hadoop.BatchWriter.java

License:Apache License

public synchronized void close(TaskAttemptContext context)
        throws InterruptedException, SolrServerException, IOException {

    if (batchPool != null) {
        context.setStatus("Waiting for batches to complete");
        batchPool.shutdown();//w w  w. j a  va  2s.co  m

        while (!batchPool.isTerminated()) {
            LOG.info(String.format("Waiting for %d items and %d threads to finish executing",
                    batchPool.getQueue().size(), batchPool.getActiveCount()));
            batchPool.awaitTermination(5, TimeUnit.SECONDS);
        }
    }
    context.setStatus("Committing Solr Phase 1");
    solr.commit(true, false);
    context.setStatus("Optimizing Solr");
    int maxSegments = context.getConfiguration().getInt(SolrOutputFormat.SOLR_RECORD_WRITER_MAX_SEGMENTS, 1);
    LOG.info("Optimizing Solr: forcing merge down to {} segments", maxSegments);
    long start = System.currentTimeMillis();
    solr.optimize(true, false, maxSegments);
    context.getCounter(SolrCounters.class.getName(), SolrCounters.PHYSICAL_REDUCER_MERGE_TIME.toString())
            .increment(System.currentTimeMillis() - start);
    float secs = (System.currentTimeMillis() - start) / 1000.0f;
    LOG.info("Optimizing Solr: done forcing merge down to {} segments in {} secs", maxSegments, secs);
    context.setStatus("Committing Solr Phase 2");
    solr.commit(true, false);
    context.setStatus("Shutting down Solr");
    solr.shutdown();
}