Example usage for com.google.common.base Stopwatch start

List of usage examples for com.google.common.base Stopwatch start

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch start.

Prototype

public Stopwatch start() 

Source Link

Document

Starts the stopwatch.

Usage

From source file:org.apache.drill.exec.store.parquet.AbstractParquetScanBatchCreator.java

protected ScanBatch getBatch(ExecutorFragmentContext context, AbstractParquetRowGroupScan rowGroupScan,
        OperatorContext oContext) throws ExecutionSetupException {
    final ColumnExplorer columnExplorer = new ColumnExplorer(context.getOptions(), rowGroupScan.getColumns());

    if (!columnExplorer.isStarQuery()) {
        rowGroupScan = rowGroupScan.copy(columnExplorer.getTableColumns());
        rowGroupScan.setOperatorId(rowGroupScan.getOperatorId());
    }//from ww  w  . java 2 s. c o m

    AbstractDrillFileSystemManager fsManager = getDrillFileSystemCreator(oContext, context.getOptions());

    // keep footers in a map to avoid re-reading them
    Map<String, ParquetMetadata> footers = new HashMap<>();
    List<RecordReader> readers = new LinkedList<>();
    List<Map<String, String>> implicitColumns = new ArrayList<>();
    Map<String, String> mapWithMaxColumns = new LinkedHashMap<>();
    for (RowGroupReadEntry rowGroup : rowGroupScan.getRowGroupReadEntries()) {
        /*
        Here we could store a map from file names to footers, to prevent re-reading the footer for each row group in a file
        TODO - to prevent reading the footer again in the parquet record reader (it is read earlier in the ParquetStorageEngine)
        we should add more information to the RowGroupInfo that will be populated upon the first read to
        provide the reader with all of th file meta-data it needs
        These fields will be added to the constructor below
        */
        try {
            Stopwatch timer = logger.isTraceEnabled() ? Stopwatch.createUnstarted() : null;
            DrillFileSystem fs = fsManager.get(rowGroupScan.getFsConf(rowGroup), rowGroup.getPath());
            if (!footers.containsKey(rowGroup.getPath())) {
                if (timer != null) {
                    timer.start();
                }

                ParquetMetadata footer = readFooter(fs.getConf(), rowGroup.getPath());
                if (timer != null) {
                    long timeToRead = timer.elapsed(TimeUnit.MICROSECONDS);
                    logger.trace("ParquetTrace,Read Footer,{},{},{},{},{},{},{}", "", rowGroup.getPath(), "", 0,
                            0, 0, timeToRead);
                }
                footers.put(rowGroup.getPath(), footer);
            }
            ParquetMetadata footer = footers.get(rowGroup.getPath());

            boolean autoCorrectCorruptDates = rowGroupScan.areCorruptDatesAutoCorrected();
            ParquetReaderUtility.DateCorruptionStatus containsCorruptDates = ParquetReaderUtility
                    .detectCorruptDates(footer, rowGroupScan.getColumns(), autoCorrectCorruptDates);
            logger.debug("Contains corrupt dates: {}", containsCorruptDates);

            if (!context.getOptions().getBoolean(ExecConstants.PARQUET_NEW_RECORD_READER)
                    && !isComplex(footer)) {
                readers.add(new ParquetRecordReader(context, rowGroup.getPath(), rowGroup.getRowGroupIndex(),
                        rowGroup.getNumRecordsToRead(), fs,
                        CodecFactory.createDirectCodecFactory(fs.getConf(),
                                new ParquetDirectByteBufferAllocator(oContext.getAllocator()), 0),
                        footer, rowGroupScan.getColumns(), containsCorruptDates));
            } else {
                readers.add(new DrillParquetReader(context, footer, rowGroup, columnExplorer.getTableColumns(),
                        fs, containsCorruptDates));
            }

            List<String> partitionValues = rowGroupScan.getPartitionValues(rowGroup);
            Map<String, String> implicitValues = columnExplorer.populateImplicitColumns(rowGroup.getPath(),
                    partitionValues, rowGroupScan.supportsFileImplicitColumns());
            implicitColumns.add(implicitValues);
            if (implicitValues.size() > mapWithMaxColumns.size()) {
                mapWithMaxColumns = implicitValues;
            }

        } catch (IOException e) {
            throw new ExecutionSetupException(e);
        }
    }

    // all readers should have the same number of implicit columns, add missing ones with value null
    Map<String, String> diff = Maps.transformValues(mapWithMaxColumns, Functions.constant((String) null));
    for (Map<String, String> map : implicitColumns) {
        map.putAll(Maps.difference(map, diff).entriesOnlyOnRight());
    }

    return new ScanBatch(context, oContext, readers, implicitColumns);
}

From source file:co.cask.cdap.data2.transaction.queue.AbstractQueueConsumer.java

private DequeueResult<byte[]> performDequeue(int maxBatchSize) throws IOException {
    Preconditions.checkArgument(maxBatchSize > 0, "Batch size must be > 0.");

    // pre-compute the "claimed" state content in case of FIFO.
    byte[] claimedStateValue = null;
    if (getConfig().getDequeueStrategy() == DequeueStrategy.FIFO && getConfig().getGroupSize() > 1) {
        claimedStateValue = encodeStateColumn(ConsumerEntryState.CLAIMED);
    }/* w ww .  ja va 2 s  .  c  om*/

    boolean isReachedDequeueTimeLimit = false;
    Stopwatch stopwatch = new Stopwatch();
    stopwatch.start();
    while (consumingEntries.size() < maxBatchSize && getEntries(consumingEntries, maxBatchSize, stopwatch)) {

        // ANDREAS: this while loop should stop once getEntries/populateCache reaches the end of the queue. Currently, it
        // will retry as long as it gets at least one entry in every round, even if that is an entry that must be ignored
        // because it cannot be claimed.
        // ANDREAS: It could be a problem that we always read to the end of the queue. This way one flowlet instance may
        // always all entries, while others are idle.

        // For FIFO, need to try claiming the entry if group size > 1
        if (getConfig().getDequeueStrategy() == DequeueStrategy.FIFO && getConfig().getGroupSize() > 1) {
            Iterator<Map.Entry<byte[], SimpleQueueEntry>> iterator = consumingEntries.entrySet().iterator();
            while (iterator.hasNext()) {
                SimpleQueueEntry entry = iterator.next().getValue();

                if (entry.getState() == null
                        || QueueEntryRow.getStateInstanceId(entry.getState()) >= getConfig().getGroupSize()) {
                    // If not able to claim it, remove it, and move to next one.
                    if (!claimEntry(entry.getRowKey(), claimedStateValue)) {
                        iterator.remove();
                    }

                    if (stopwatch.elapsedMillis() >= maxDequeueMillis) {
                        break;
                    }
                }
            }
            // Drain the iterator in case of dequeue time limit reached
            Iterators.advance(iterator, Integer.MAX_VALUE);
        }

        if (stopwatch.elapsedMillis() >= maxDequeueMillis) {
            // If time limit reached and yet we don't have enough entries as requested, treat it as dequeue time limit
            // reached. There can be some false positive (reached the end of queue, yet passed the time limit), but
            // it's ok since we only use this boolean for logging only and normally it won't be the case as long as
            // dequeue is completed in relatively short time comparing to the tx timeout.
            isReachedDequeueTimeLimit = consumingEntries.size() < maxBatchSize;
            break;
        }
    }

    // If nothing get dequeued, return the empty result.
    if (consumingEntries.isEmpty()) {
        if (isReachedDequeueTimeLimit) {
            LOG.warn("Unable to dequeue any entry after {}ms.", maxDequeueMillis);
        }
        return EMPTY_RESULT;
    }

    if (isReachedDequeueTimeLimit) {
        LOG.warn("Dequeue time limit of {}ms reached. Requested batch size {}, dequeued {}", maxDequeueMillis,
                maxBatchSize, consumingEntries.size());
    }

    return new SimpleDequeueResult(consumingEntries.values());
}

From source file:uk.ac.open.kmi.iserve.discovery.disco.impl.SparqlLogicConceptMatcher.java

/**
 * Obtains all the matching resources that have a MatchType with the URIs of {@code origin} of the type provided (inclusive) or more.
 *
 * @param origins URIs to match//w  w w.  ja v a2s.  c o m
 * @param minType the minimum MatchType we want to obtain
 * @return a {@link com.google.common.collect.Table} with the result of the matching indexed by origin URI and then destination URI.
 */
@Override
public Table<URI, URI, MatchResult> listMatchesAtLeastOfType(Set<URI> origins, MatchType minType) {
    Table<URI, URI, MatchResult> matchTable = HashBasedTable.create();
    Stopwatch w = new Stopwatch();
    for (URI origin : origins) {
        w.start();
        Map<URI, MatchResult> result = listMatchesAtLeastOfType(origin, minType);
        for (Map.Entry<URI, MatchResult> dest : result.entrySet()) {
            matchTable.put(origin, dest.getKey(), dest.getValue());
        }
        log.debug("Computed matched types for {} in {}. {} total matches.", origin, w.stop().toString(),
                result.size());
        w.reset();
    }
    return matchTable;

    //        return obtainMatchResults(origins, minType, this.getMatchTypesSupported().getHighest()); // TODO: Use the proper implementation for this
}

From source file:org.apache.hadoop.hbase.ScanPerformanceEvaluation.java

public void testSnapshotScan() throws IOException {
    Stopwatch snapshotRestoreTimer = new Stopwatch();
    Stopwatch scanOpenTimer = new Stopwatch();
    Stopwatch scanTimer = new Stopwatch();

    Path restoreDir = new Path(this.restoreDir);

    snapshotRestoreTimer.start();
    restoreDir.getFileSystem(conf).delete(restoreDir, true);
    snapshotRestoreTimer.stop();//from w w  w.ja  va2  s.c o m

    Scan scan = getScan();
    scanOpenTimer.start();
    TableSnapshotScanner scanner = new TableSnapshotScanner(conf, restoreDir, snapshotName, scan);
    scanOpenTimer.stop();

    long numRows = 0;
    long numCells = 0;
    scanTimer.start();
    while (true) {
        Result result = scanner.next();
        if (result == null) {
            break;
        }
        numRows++;

        numCells += result.rawCells().length;
    }
    scanTimer.stop();
    scanner.close();

    ScanMetrics metrics = scanner.getScanMetrics();
    long totalBytes = metrics.countOfBytesInResults.get();
    double throughput = (double) totalBytes / scanTimer.elapsedTime(TimeUnit.SECONDS);
    double throughputRows = (double) numRows / scanTimer.elapsedTime(TimeUnit.SECONDS);
    double throughputCells = (double) numCells / scanTimer.elapsedTime(TimeUnit.SECONDS);

    System.out.println("HBase scan snapshot: ");
    System.out.println("total time to restore snapshot: " + snapshotRestoreTimer.elapsedMillis() + " ms");
    System.out.println("total time to open scanner: " + scanOpenTimer.elapsedMillis() + " ms");
    System.out.println("total time to scan: " + scanTimer.elapsedMillis() + " ms");

    System.out.println("Scan metrics:\n" + metrics.getMetricsMap());

    System.out.println(
            "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")");
    System.out.println("throughput  : " + StringUtils.humanReadableInt((long) throughput) + "B/s");
    System.out.println("total rows  : " + numRows);
    System.out.println("throughput  : " + StringUtils.humanReadableInt((long) throughputRows) + " rows/s");
    System.out.println("total cells : " + numCells);
    System.out.println("throughput  : " + StringUtils.humanReadableInt((long) throughputCells) + " cells/s");

}

From source file:com.Grande.GSM.BACCWS_WAR.WS.REST.EOS.FirmwareDefinitionsEndpoint.java

public String fetchFirmwareDefinitions(@QueryParam("filter") String strFilter) {

    // <editor-fold defaultstate="collapsed" desc="****** Method vars ******">
    final Stopwatch timer = new Stopwatch();
    final QueryResponse qRes = new QueryResponse();
    String strResponse = null;//from   w w w.ja v  a  2 s . c o m
    String strMake = null;
    String strModel = null;
    List<SenchaFilter> lstFilters = null;
    List<FirmwareDefinition> lstFirmwareDefs = null;
    // start the execution timer
    timer.start();
    // </editor-fold>

    try {
        qRes.vSetNode(java.net.InetAddress.getLocalHost().getHostName());

        // <editor-fold defaultstate="collapsed" desc="****** No filters (return all records) ******">
        if (strFilter == null || strFilter.equals("")) {
            SimpleLogging.vLogEvent(this.strThreadId, "No filter detected, fetching all definitions");
            lstFirmwareDefs = this.trnBN.lstGetFirmwareDefinitions();
            SimpleLogging.vLogEvent(this.strThreadId, "Returning " + lstFirmwareDefs.size() + " definitions");
            // </editor-fold>

            // <editor-fold defaultstate="collapsed" desc="****** Evaluate/apply Sencha filters ******">
        } else {

            // <editor-fold defaultstate="collapsed" desc="****** Filter extraction logic ******">
            // Deserialize the filters
            SimpleLogging.vLogEvent(this.strThreadId, "Processing filter JSON: " + strFilter);
            lstFilters = this.trnBN.lstDeserializeSenchaFilter(strFilter);
            // extract filters that contain 'make' and 'model'
            //lstFilters = this.trnBN.lstFilterSenchaFiltersByStrings(lstFilters, "make", "model");
            SimpleLogging.vLogEvent(this.strThreadId,
                    "Extracted " + lstFilters.size() + " make/model filters: " + lstFilters);
            // </editor-fold>

            // <editor-fold defaultstate="collapsed" desc="****** Filter handling logic ******">
            lstFirmwareDefs = this.trnBN.lstGetFirmwareDefinitionByFilter(lstFilters);
            SimpleLogging.vLogEvent(this.strThreadId, "Returning " + lstFirmwareDefs.size() + " definitions");
            // </editor-fold>
        }
        qRes.vAddResult(lstFirmwareDefs.toArray());
        for (int x = 0; x < lstFirmwareDefs.size(); x++) {
            qRes.vAddResult(lstFirmwareDefs.get(x));
        }
        qRes.vSetSuccessFlag(true);
        // </editor-fold>

    } catch (Exception e) {

        // <editor-fold defaultstate="collapsed" desc="****** Handle failures ******">
        qRes.vSetSuccessFlag(false);
        // handle NPE differently since getMessage() is null
        if (e instanceof NullPointerException) {
            qRes.vSetMessage("NPE occured when serializing result to JSON! " + "File: "
                    + e.getStackTrace()[0].getFileName() + ", " + "Method: "
                    + e.getStackTrace()[0].getMethodName() + ", " + "Line: "
                    + e.getStackTrace()[0].getLineNumber());
        } else {
            qRes.vSetMessage(e.getMessage());
        }
        SimpleLogging.vLogException(this.strThreadId, e);
        // </editor-fold>

    } finally {

        // <editor-fold defaultstate="collapsed" desc="****** Stop timer, convert response to JSON ******">
        timer.stop();
        qRes.vSetRoundTrip(String.valueOf(timer.elapsedTime(TimeUnit.SECONDS)) + "."
                + String.valueOf(timer.elapsedTime(TimeUnit.MILLISECONDS)));
        strResponse = this.trnBN.strQueryResponseToJSON(qRes);
        SimpleLogging.vLogEvent(this.strThreadId + "|" + qRes.strGetRoundTripInSeconds() + "s",
                "retrieved " + qRes.intGetDataCount() + " records");
        // </editor-fold>

    }
    return strResponse;
}

From source file:org.apache.drill.exec.store.mapr.db.json.MaprDBJsonRecordReader.java

@Override
public int next() {
    Stopwatch watch = Stopwatch.createUnstarted();
    watch.start();

    vectorWriter.allocate();/*from w  ww  . j av a  2  s  . c om*/
    vectorWriter.reset();

    int recordCount = 0;
    DBDocumentReaderBase reader = null;

    while (recordCount < BaseValueVector.INITIAL_VALUE_ALLOCATION) {
        vectorWriter.setPosition(recordCount);
        try {
            reader = nextDocumentReader();
            if (reader == null) {
                break; // no more documents for this scanner
            } else if (isSkipQuery()) {
                vectorWriter.rootAsMap().bit("count").writeBit(1);
            } else {
                MapOrListWriterImpl writer = new MapOrListWriterImpl(vectorWriter.rootAsMap());
                if (idOnly) {
                    writeId(writer, reader.getId());
                } else {
                    if (reader.next() != EventType.START_MAP) {
                        throw dataReadError("The document did not start with START_MAP!");
                    }
                    writeToListOrMap(writer, reader);
                }
            }
            recordCount++;
        } catch (UserException e) {
            throw UserException
                    .unsupportedError(e).addContext(String.format("Table: %s, document id: '%s'",
                            table.getPath(), reader == null ? null : IdCodec.asString(reader.getId())))
                    .build(logger);
        } catch (SchemaChangeException e) {
            if (ignoreSchemaChange) {
                logger.warn("{}. Dropping the row from result.", e.getMessage());
                logger.debug("Stack trace:", e);
            } else {
                throw dataReadError(e);
            }
        }
    }

    vectorWriter.setValueCount(recordCount);
    logger.debug("Took {} ms to get {} records", watch.elapsed(TimeUnit.MILLISECONDS), recordCount);
    return recordCount;
}

From source file:org.apache.drill.exec.physical.impl.TopN.TopNBatch.java

@Override
public IterOutcome innerNext() {
    if (state == BatchState.DONE) {
        return IterOutcome.NONE;
    }//from w w  w  .  jav a 2  s. c  o  m
    if (schema != null) {
        if (getSelectionVector4().next()) {
            recordCount = sv4.getCount();
            return IterOutcome.OK;
        } else {
            recordCount = 0;
            return IterOutcome.NONE;
        }
    }

    try {
        outer: while (true) {
            Stopwatch watch = new Stopwatch();
            watch.start();
            IterOutcome upstream;
            if (first) {
                upstream = IterOutcome.OK_NEW_SCHEMA;
                first = false;
            } else {
                upstream = next(incoming);
            }
            if (upstream == IterOutcome.OK && schema == null) {
                upstream = IterOutcome.OK_NEW_SCHEMA;
                container.clear();
            }
            logger.debug("Took {} us to get next", watch.elapsed(TimeUnit.MICROSECONDS));
            switch (upstream) {
            case NONE:
                break outer;
            case NOT_YET:
                throw new UnsupportedOperationException();
            case OUT_OF_MEMORY:
            case STOP:
                return upstream;
            case OK_NEW_SCHEMA:
                // only change in the case that the schema truly changes.  Artificial schema changes are ignored.
                if (!incoming.getSchema().equals(schema)) {
                    if (schema != null) {
                        throw new UnsupportedOperationException(
                                "Sort doesn't currently support sorts with changing schemas.");
                    }
                    this.schema = incoming.getSchema();
                }
                // fall through.
            case OK:
                if (incoming.getRecordCount() == 0) {
                    for (VectorWrapper w : incoming) {
                        w.clear();
                    }
                    break;
                }
                countSincePurge += incoming.getRecordCount();
                batchCount++;
                RecordBatchData batch = new RecordBatchData(incoming);
                boolean success = false;
                try {
                    batch.canonicalize();
                    if (priorityQueue == null) {
                        priorityQueue = createNewPriorityQueue(context, config.getOrderings(),
                                new ExpandableHyperContainer(batch.getContainer()), MAIN_MAPPING, LEFT_MAPPING,
                                RIGHT_MAPPING);
                    }
                    priorityQueue.add(context, batch);
                    if (countSincePurge > config.getLimit() && batchCount > batchPurgeThreshold) {
                        purge();
                        countSincePurge = 0;
                        batchCount = 0;
                    }
                    success = true;
                } finally {
                    if (!success) {
                        batch.clear();
                    }
                }
                break;
            default:
                throw new UnsupportedOperationException();
            }
        }

        if (schema == null || priorityQueue == null) {
            // builder may be null at this point if the first incoming batch is empty
            state = BatchState.DONE;
            return IterOutcome.NONE;
        }

        priorityQueue.generate();

        this.sv4 = priorityQueue.getFinalSv4();
        container.clear();
        for (VectorWrapper w : priorityQueue.getHyperBatch()) {
            container.add(w.getValueVectors());
        }
        container.buildSchema(BatchSchema.SelectionVectorMode.FOUR_BYTE);

        recordCount = sv4.getCount();
        return IterOutcome.OK_NEW_SCHEMA;

    } catch (SchemaChangeException | ClassTransformationException | IOException ex) {
        kill(false);
        logger.error("Failure during query", ex);
        context.fail(ex);
        return IterOutcome.STOP;
    }
}

From source file:be.nbb.jackcess.JackcessStatement.java

@Nonnull
public JackcessResultSet executeQuery(@Nonnull DbBasicSelect query) throws IOException {
    Table table = database.getTable(query.getTableName());

    List<Column> selectColumns = getAllByName(table, query.getSelectColumns());
    List<Column> orderColumns = getAllByName(table, query.getOrderColumns());
    SortedSet<Column> dataColumns = mergeAndSortByInternalIndex(selectColumns, orderColumns);
    SortedMap<Column, String> filter = getFilter(table, query.getFilterItems());

    LOGGER.debug("Query : '{}'", query);

    Stopwatch sw = Stopwatch.createStarted();
    CheckedIterator<Object[], IOException> rows = new Adapter(
            CursorFacade.range(table, range).withFilter(filter), dataColumns);
    LOGGER.debug("Iterator done in {}ms", sw.stop().elapsed(TimeUnit.MILLISECONDS));

    ToIndex toIndex = new ToIndex(dataColumns);

    if (query.isDistinct()) {
        sw.start();
        rows = DbRawDataUtil.distinct(rows, selectColumns, toIndex, ToDataType.INSTANCE,
                new Aggregator(dataColumns.size() + 1));
        LOGGER.debug("Distinct done in {}ms", sw.stop().elapsed(TimeUnit.MILLISECONDS));
    }//from w  w  w  . ja  v a  2  s .  co m

    if (DbRawDataUtil.isSortRequired(query.isDistinct(), selectColumns, orderColumns)) {
        sw.start();
        rows = DbRawDataUtil.sort(rows, orderColumns, toIndex, ToDataType.INSTANCE);
        LOGGER.debug("Sort done in {}ms", sw.stop().elapsed(TimeUnit.MILLISECONDS));
    }

    return new JackcessResultSet(selectColumns, DbRawDataUtil.createIndexes(selectColumns, toIndex), rows);
}

From source file:brooklyn.rest.filter.LoggingFilter.java

@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
        throws IOException, ServletException {
    HttpServletRequest httpRequest = (HttpServletRequest) request;
    HttpServletResponse httpResponse = (HttpServletResponse) response;

    String uri = httpRequest.getRequestURI();
    String rid = RequestTaggingFilter.getTag();
    boolean isInteresting = INTERESTING_METHODS.contains(httpRequest.getMethod()),
            shouldLog = (isInteresting && LOG.isDebugEnabled()) || LOG.isTraceEnabled(), requestErrored = false;
    Stopwatch timer = Stopwatch.createUnstarted();
    try {/*from  ww w .  j  a  va2 s  . c o m*/
        if (shouldLog) {
            String message = "{} starting request {} {}";
            Object[] args = new Object[] { rid, httpRequest.getMethod(), uri };
            if (isInteresting) {
                LOG.debug(message, args);
            } else {
                LOG.trace(message, args);
            }
        }

        timer.start();
        chain.doFilter(request, response);

    } catch (Throwable e) {
        requestErrored = true;
        LOG.warn("REST API request " + rid + " failed: " + e, e);
        // Propagate for handling by other filter
        throw Exceptions.propagate(e);
    } finally {
        timer.stop();
        // This logging must not happen before chain.doFilter, or FormMapProvider will not work as expected.
        // Getting the parameter map consumes the request body and only resource methods using @FormParam
        // will work as expected.
        if (requestErrored || shouldLog) {
            boolean includeHeaders = requestErrored || httpResponse.getStatus() / 100 == 5
                    || LOG.isTraceEnabled();
            String message = getRequestCompletedMessage(includeHeaders, Duration.of(timer), rid, httpRequest,
                    httpResponse);
            if (requestErrored || isInteresting) {
                LOG.debug(message);
            } else {
                LOG.trace(message);
            }
        }
    }
}

From source file:cosmos.mapred.MediawikiQueries.java

public long docIdFetch(Store id, Map<Column, Long> counts, long totalResults) throws Exception {
    Stopwatch sw = new Stopwatch();

    // This is dumb, I didn't pad the docids...
    String prev = "!";
    long resultCount = 0l;
    sw.start();

    final CloseableIterable<MultimapRecord> results = this.sorts.fetch(id,
            Index.define(Defaults.DOCID_FIELD_NAME));

    for (MultimapRecord r : results) {
        sw.stop();/*  ww w .ja  v  a2 s  . co m*/

        resultCount++;

        String current = r.docId();
        if (prev.compareTo(current) > 0) {
            System.out.println("WOAH, got " + current + " docid which was greater than the previous " + prev);
            results.close();
            System.exit(1);
        }

        prev = current;

        sw.start();
    }

    sw.stop();

    System.out.println(
            Thread.currentThread().getName() + ": docIdFetch - Took " + sw.toString() + " to fetch results");
    logTiming(totalResults, sw.elapsed(TimeUnit.MILLISECONDS), "docIdFetch");

    results.close();

    return resultCount;
}