Example usage for com.google.common.base Stopwatch stop

List of usage examples for com.google.common.base Stopwatch stop

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch stop.

Prototype

public Stopwatch stop() 

Source Link

Document

Stops the stopwatch.

Usage

From source file:be.nbb.jackcess.JackcessStatement.java

@Nonnull
public JackcessResultSet executeQuery(@Nonnull DbBasicSelect query) throws IOException {
    Table table = database.getTable(query.getTableName());

    List<Column> selectColumns = getAllByName(table, query.getSelectColumns());
    List<Column> orderColumns = getAllByName(table, query.getOrderColumns());
    SortedSet<Column> dataColumns = mergeAndSortByInternalIndex(selectColumns, orderColumns);
    SortedMap<Column, String> filter = getFilter(table, query.getFilterItems());

    LOGGER.debug("Query : '{}'", query);

    Stopwatch sw = Stopwatch.createStarted();
    CheckedIterator<Object[], IOException> rows = new Adapter(
            CursorFacade.range(table, range).withFilter(filter), dataColumns);
    LOGGER.debug("Iterator done in {}ms", sw.stop().elapsed(TimeUnit.MILLISECONDS));

    ToIndex toIndex = new ToIndex(dataColumns);

    if (query.isDistinct()) {
        sw.start();/*from   www.j  a v a 2  s .  co  m*/
        rows = DbRawDataUtil.distinct(rows, selectColumns, toIndex, ToDataType.INSTANCE,
                new Aggregator(dataColumns.size() + 1));
        LOGGER.debug("Distinct done in {}ms", sw.stop().elapsed(TimeUnit.MILLISECONDS));
    }

    if (DbRawDataUtil.isSortRequired(query.isDistinct(), selectColumns, orderColumns)) {
        sw.start();
        rows = DbRawDataUtil.sort(rows, orderColumns, toIndex, ToDataType.INSTANCE);
        LOGGER.debug("Sort done in {}ms", sw.stop().elapsed(TimeUnit.MILLISECONDS));
    }

    return new JackcessResultSet(selectColumns, DbRawDataUtil.createIndexes(selectColumns, toIndex), rows);
}

From source file:es.usc.citius.hipster.algorithm.MultiobjectiveLS.java

@Override
public SearchResult search(Predicate<N> condition) {
    int iteration = 0;
    Iterator it = new Iterator();
    Stopwatch w = Stopwatch.createStarted();
    N currentNode;//w  w  w  .ja va  2  s.  co  m
    N goalNode = null;
    while (it.hasNext()) {
        iteration++;
        currentNode = it.next();
        if (condition.apply(currentNode)) {
            goalNode = currentNode;
        }
    }
    w.stop();
    if (goalNode != null) {
        Collection<N> solutions = it.nonDominated.get(goalNode.state());
        return new SearchResult(solutions, iteration, w);
    }
    return new SearchResult(Collections.<N>emptyList(), iteration, w);
}

From source file:com.palantir.atlasdb.keyvalue.cassandra.jmx.CassandraJmxCompactionManager.java

private boolean executeInParallel(ExecutorService exec, List<? extends Callable<Void>> tasks,
        long timeoutInSeconds) throws InterruptedException, TimeoutException {
    Stopwatch stopWatch = Stopwatch.createStarted();
    List<Future<Void>> futures = exec.invokeAll(tasks, timeoutInSeconds, TimeUnit.SECONDS);

    for (Future<Void> f : futures) {
        if (f.isCancelled()) {
            log.error("Task execution timeouts in {} seconds. Timeout seconds:{}.", stopWatch.stop(),
                    timeoutInSeconds);/*  ww w  .j ava 2s.  com*/
            throw new TimeoutException(
                    String.format("Task execution timeouts in {} seconds. Timeout seconds:{}.",
                            stopWatch.stop(), timeoutInSeconds));
        }

        try {
            f.get();
        } catch (ExecutionException e) {
            Throwable t = e.getCause();
            if (t instanceof UndeclaredThrowableException) {
                log.error("Major LCS compactions are only supported against C* 2.2+; "
                        + "you will need to manually re-arrange SSTables into L0 "
                        + "if you want all deleted data immediately removed from the cluster.");
            }
            log.error("Failed to complete tasks.", e);
            return false;
        }
    }

    log.info("All tasks completed in {}.", stopWatch.stop());
    return true;
}

From source file:com.thinkbiganalytics.metadata.jpa.cache.ClusterAwareDtoCache.java

private void refresh() {
    try {//from  www .  ja  va 2  s.  co  m
        log.info("Populating Cache for {} ", getProviderName());
        Stopwatch stopwatch = Stopwatch.createStarted();
        List<E> entities = fetchAll();
        entities.stream().map(entity -> transformEntityToDto(entity)).forEach(dto -> addItem(dto));
        populatedCache.set(true);
        stopwatch.stop();
        log.info("Time to populate {} Cache {}", getProviderName(), stopwatch.elapsed(TimeUnit.MILLISECONDS));
    } catch (Exception e) {
        populatedCache.set(false);
    }
    populatingCache.set(false);
}

From source file:com.thinkbiganalytics.metadata.jpa.cache.AbstractCacheBackedProvider.java

private void refresh() {
    try {//ww  w .jav a  2s .  com
        log.info("Populating Cache for {} ", getProviderName());
        Stopwatch stopwatch = Stopwatch.createStarted();
        addItems(repository.findAll());
        populatedCache.set(true);
        cacheListeners.stream().forEach(CacheBackedProviderListener::onPopulated);
        stopwatch.stop();
        log.info("Time to populate {} Cache {}", getProviderName(), stopwatch.elapsed(TimeUnit.MILLISECONDS));
    } catch (Exception e) {
        populatedCache.set(false);
    }
    populatingCache.set(false);
}

From source file:org.fenixedu.bennu.core.api.SystemResource.java

@GET
@Path("healthcheck")
@Produces(MediaType.APPLICATION_JSON)//  w  ww. j  a v a2 s  . co  m
public JsonArray healthChecks() {
    accessControl(Group.managers());
    JsonArray json = new JsonArray();

    for (Healthcheck check : healthchecks) {
        JsonObject obj = new JsonObject();
        obj.addProperty("name", check.getName());
        Stopwatch stop = Stopwatch.createStarted();
        Result result = check.execute();
        stop.stop();
        obj.add("result", result.toJson());
        obj.addProperty("time", stop.elapsed(TimeUnit.MILLISECONDS));
        json.add(obj);
    }

    return json;
}

From source file:es.usc.citius.hipster.algorithm.Algorithm.java

/**
 * Executes the search algorithm until the predicate condition is
 * satisfied or there are no more nodes to explore.
 *
 * @param condition predicate with the boolean condition.
 * @return {@link es.usc.citius.hipster.algorithm.Algorithm.SearchResult with information about the search}
 *//*from w  w  w.  j a v  a2  s . c  o  m*/
public SearchResult search(Predicate<N> condition) {
    int iteration = 0;
    Iterator<N> it = iterator();
    Stopwatch w = Stopwatch.createStarted();
    N currentNode = null;
    while (it.hasNext()) {
        iteration++;
        currentNode = it.next();
        if (condition.apply(currentNode)) {
            break;
        }

    }
    w.stop();
    return new SearchResult(currentNode, iteration, w);
}

From source file:com.google.devtools.kythe.analyzers.base.AbstractCompilationAnalyzer.java

/**
 * Analyzes the given {@link AnalysisRequest}, emitting all facts with the given
 * {@link FactEmitter}.//from ww  w  . java 2 s. c o  m
 */
public void analyzeRequest(AnalysisRequest req, FactEmitter emitter) throws AnalysisException {
    Preconditions.checkNotNull(req, "AnalysisRequest must be non-null");
    Stopwatch timer = Stopwatch.createStarted();
    try {
        analyzeCompilation(req.getCompilation(), parseFileDataService(req.getFileDataService()), emitter);
    } catch (Throwable t) {
        logger.warningfmt("Uncaught exception: %s", t);
        t.printStackTrace();
        throw t;
    } finally {
        logger.infofmt("Analysis completed in %s", timer.stop());
    }
}

From source file:org.hawkular.metrics.core.jobs.TempDataCompressor.java

@Override
public Completable call(JobDetails jobDetails) {
    Duration runtimeBlockSize = Duration.standardHours(2);

    Trigger trigger = jobDetails.getTrigger();
    DateTime timeSliceInclusive = new DateTime(trigger.getTriggerTime(), DateTimeZone.UTC)
            .minus(runtimeBlockSize);//from  w  w  w.  j  a  v a2  s . c om

    // Rewind to previous timeslice
    DateTime timeSliceStart = DateTimeService.getTimeSlice(timeSliceInclusive, runtimeBlockSize);
    long startOfSlice = timeSliceStart.getMillis();

    Stopwatch stopwatch = Stopwatch.createStarted();
    logger.infof("Starting to process temp table for starting time of %s", timeSliceStart.toString());

    // TODO Optimization - new worker per token - use parallelism in Cassandra (with configured parallelism)
    return metricsService.compressBlock(startOfSlice, pageSize, maxReadConcurrency).doOnCompleted(() -> {
        stopwatch.stop();
        logger.info("Finished processing data in " + stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms");
    });
}

From source file:dk.dma.dmiweather.service.WeatherService.java

public GridResponse request(GridRequest request, boolean removeEmpty, boolean gridMetrics) {

    GeoCoordinate northWest = request.getNorthWest();
    GeoCoordinate southEast = request.getSouthEast();

    if (northWest.getLon() > southEast.getLon()) {
        throw new APIException(ErrorMessage.INVALID_GRID_LOT);
    }/*from  ww w  . jav a2  s  .c om*/
    if (northWest.getLat() < southEast.getLat()) {
        throw new APIException(ErrorMessage.INVALID_GRID_LAT);
    }

    Stopwatch stopwatch = Stopwatch.createStarted();
    try {
        return findForecastData(request.getTime()).getData(request, removeEmpty, gridMetrics);

    } finally {
        log.info("Completed weather request in {} ms", stopwatch.stop().elapsed(TimeUnit.MILLISECONDS));
    }
}