Example usage for com.google.common.base Stopwatch reset

List of usage examples for com.google.common.base Stopwatch reset

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch reset.

Prototype

public Stopwatch reset() 

Source Link

Document

Sets the elapsed time for this stopwatch to zero, and places it in a stopped state.

Usage

From source file:org.apache.eagle.alert.coordinator.Coordinator.java

public synchronized ScheduleState schedule(ScheduleOption option) throws TimeoutException {
    ExclusiveExecutor executor = new ExclusiveExecutor(zkConfig);
    AtomicReference<ScheduleState> reference = new AtomicReference<>();
    try {//  w  ww  .  ja  va2 s.c o  m
        executor.execute(GREEDY_SCHEDULER_ZK_PATH, () -> {
            ScheduleState state = null;
            Stopwatch watch = Stopwatch.createStarted();
            IScheduleContext context = new ScheduleContextBuilder(config, client).buildContext();
            TopologyMgmtService mgmtService = new TopologyMgmtService();
            IPolicyScheduler scheduler = PolicySchedulerFactory.createScheduler();

            scheduler.init(context, mgmtService);
            state = scheduler.schedule(option);

            long scheduleTime = watch.elapsed(TimeUnit.MILLISECONDS);
            state.setScheduleTimeMillis((int) scheduleTime);// hardcode to integer
            watch.reset();
            watch.start();

            // persist & notify
            try (ConfigBusProducer producer = new ConfigBusProducer(ZKConfigBuilder.getZKConfig(config))) {
                postSchedule(client, state, producer);
            }

            watch.stop();
            long postTime = watch.elapsed(TimeUnit.MILLISECONDS);
            LOG.info("Schedule result, schedule time {} ms, post schedule time {} ms !", scheduleTime,
                    postTime);
            reference.set(state);
            currentState = state;
        });
    } catch (TimeoutException e1) {
        LOG.error("time out when schedule", e1);
        throw e1;
    } finally {
        try {
            executor.close();
        } catch (IOException e) {
            LOG.error("Exception when close exclusive executor, log and ignore!", e);
        }
    }
    return reference.get();
}

From source file:monasca.persister.repository.vertica.VerticaMetricRepo.java

private void writeRowsFromTempStagingTablesToPermTables(String id) {

    Stopwatch sw = Stopwatch.createStarted();

    handle.execute(definitionsTempStagingTableInsertStmt);
    handle.execute("truncate table " + definitionsTempStagingTableName);
    sw.stop();//from   w w  w.  j a  va2s .co  m

    logger.debug("[{}]: flushing definitions temp staging table took: {}", id, sw);

    sw.reset().start();
    handle.execute(dimensionsTempStagingTableInsertStmt);
    handle.execute("truncate table " + dimensionsTempStagingTableName);
    sw.stop();

    logger.debug("[{}]: flushing dimensions temp staging table took: {}", id, sw);

    sw.reset().start();
    handle.execute(definitionDimensionsTempStagingTableInsertStmt);
    handle.execute("truncate table " + definitionDimensionsTempStagingTableName);
    sw.stop();

    logger.debug("[{}]: flushing definition dimensions temp staging table took: {}", id, sw);
}

From source file:ch.ge.ve.protopoc.service.protocol.DefaultAuthority.java

@Override
public ObliviousTransferResponse handleBallot(Integer voterIndex, BallotAndQuery ballotAndQuery) {
    Preconditions.checkState(publicCredentials != null,
            "The public credentials need to have been retrieved first");

    log.info(String.format("Authority %d handling ballot", j));

    Stopwatch stopwatch = Stopwatch.createStarted();
    List<BigInteger> publicIdentificationCredentials = publicCredentials.stream().map(p -> p.x)
            .collect(Collectors.toList());
    if (!voteCastingAuthorityAlgorithms.checkBallot(voterIndex, ballotAndQuery, systemPublicKey,
            publicIdentificationCredentials, ballotEntries)) {
        throw new IncorrectBallotRuntimeException(
                String.format("Ballot for voter %d was deemed invalid", voterIndex));
    }//ww w.  j a va2s .  co  m
    stopwatch.stop();
    ballotVerificationTimes.add(stopwatch.elapsed(TimeUnit.MILLISECONDS));

    stopwatch.reset().start();
    ObliviousTransferResponseAndRand responseAndRand = voteCastingAuthorityAlgorithms.genResponse(voterIndex,
            ballotAndQuery.getBold_a(), systemPublicKey, electionSet.getBold_n(), electorateData.getK(),
            electorateData.getP());
    ballotEntries.add(new BallotEntry(voterIndex, ballotAndQuery, responseAndRand.getBold_r()));
    ObliviousTransferResponse beta = responseAndRand.getBeta();
    stopwatch.stop();
    queryResponseTimes.add(stopwatch.elapsed(TimeUnit.MILLISECONDS));

    return beta;
}

From source file:org.apache.twill.internal.logging.KafkaAppender.java

/**
 * Publishes buffered logs to Kafka, within the given timeout.
 *
 * @return Number of logs published.// w ww  . ja va2 s . c om
 * @throws TimeoutException If timeout reached before publish completed.
 */
private int publishLogs(long timeout, TimeUnit timeoutUnit) throws TimeoutException {
    List<ByteBuffer> logs = Lists.newArrayListWithExpectedSize(bufferedSize.get());

    for (String json : Iterables.consumingIterable(buffer)) {
        logs.add(Charsets.UTF_8.encode(json));
    }

    long backOffTime = timeoutUnit.toNanos(timeout) / 10;
    if (backOffTime <= 0) {
        backOffTime = 1;
    }

    try {
        Stopwatch stopwatch = new Stopwatch();
        stopwatch.start();
        long publishTimeout = timeout;

        do {
            try {
                int published = doPublishLogs(logs).get(publishTimeout, timeoutUnit);
                bufferedSize.addAndGet(-published);
                return published;
            } catch (ExecutionException e) {
                addError("Failed to publish logs to Kafka.", e);
                TimeUnit.NANOSECONDS.sleep(backOffTime);
                publishTimeout -= stopwatch.elapsedTime(timeoutUnit);
                stopwatch.reset();
                stopwatch.start();
            }
        } while (publishTimeout > 0);
    } catch (InterruptedException e) {
        addWarn("Logs publish to Kafka interrupted.", e);
    }
    return 0;
}

From source file:es.usc.citius.composit.core.composition.optimization.FunctionalDominanceOptimizer.java

public ServiceMatchNetwork<E, T> optimize(ServiceMatchNetwork<E, T> network) {
    // Analyze functional dominance between services. This optimization
    // identifies all dominant services using the semantic inputs and outputs
    // and the existing matches between the concepts in the graph.
    Stopwatch globalWatch = Stopwatch.createStarted();
    Stopwatch localWatch = Stopwatch.createUnstarted();
    List<Set<Operation<E>>> optimized = new ArrayList<Set<Operation<E>>>(network.numberOfLevels());
    log.debug("Starting functional dominance optimization...");
    for (int i = 0; i < network.numberOfLevels(); i++) {
        // Analyze input dominance
        log.debug(" > Analyzing functional dominance on {} (network level {})", network.getOperationsAtLevel(i),
                i);//from   ww  w .j  a v a  2s  .co m
        localWatch.start();
        Collection<Collection<Operation<E>>> groups = functionalInputEquivalence(network, i);
        localWatch.stop();
        log.debug("\t\tInput equivalence groups: {} (computed in {})", groups, localWatch.toString());
        localWatch.reset();
        // For each equivalent group in this level, check the output dominance
        Set<Operation<E>> nonDominatedServices = new HashSet<Operation<E>>();
        for (Collection<Operation<E>> group : groups) {
            log.debug("\t\tAnalyzing output dominance for group {}", group);
            localWatch.start();
            Collection<Collection<Operation<E>>> nonDominatedGroups = functionalOutputDominance(group, network,
                    i);
            localWatch.stop();
            log.debug("\t\t\t+ Non-dominated groups detected: {} (computed in {})", nonDominatedGroups,
                    localWatch.toString());
            log.debug("\t\t\t+ Size before / after output dominance {}/{}", group.size(),
                    nonDominatedGroups.size());
            // Pick one non dominated service for each group randomly.
            for (Collection<Operation<E>> ndGroup : nonDominatedGroups) {
                Operation<E> representant = ndGroup.iterator().next();
                log.debug("\t\t\t\t- {} has been selected as the representative service of the group {}",
                        representant, ndGroup);
                nonDominatedServices.add(representant);
            }
        }
        optimized.add(nonDominatedServices);
    }
    localWatch.reset().start();
    DirectedAcyclicSMN<E, T> optimizedNetwork = new DirectedAcyclicSMN<E, T>(
            new HashLeveledServices<E>(optimized), network);
    localWatch.stop();
    log.debug(" > Functional optimized match network computed in {}", localWatch.toString());
    log.debug("Functional Dominance Optimization done in {}. Size before/after {}/{}.",
            globalWatch.stop().toString(), network.listOperations().size(),
            optimizedNetwork.listOperations().size());
    return optimizedNetwork;
}

From source file:com.google.api.control.Client.java

private void flushAndScheduleChecks() {
    if (resetIfStopped()) {
        log.log(Level.FINE, "did not schedule check flush: client is stopped");
        return;//from   www.j  a  va2  s .c  o m
    }
    int interval = checkAggregator.getFlushIntervalMillis();
    if (interval < 0) {
        log.log(Level.FINE, "did not schedule check flush: caching is disabled");
        return; // cache is disabled, so no flushing it
    }

    if (isRunningSchedulerDirectly()) {
        log.log(Level.FINE, "did not schedule check flush: no scheduler thread is running");
        return;
    }

    log.log(Level.FINE, "flushing the check aggregator");
    Stopwatch w = Stopwatch.createUnstarted(ticker);
    for (CheckRequest req : checkAggregator.flush()) {
        try {
            statistics.recachedChecks.incrementAndGet();
            w.reset().start();
            CheckResponse resp = transport.services().check(serviceName, req).execute();
            statistics.totalCheckTransportTimeMillis.addAndGet(w.elapsed(TimeUnit.MILLISECONDS));
            w.reset().start();
            checkAggregator.addResponse(req, resp);
            statistics.totalCheckCacheUpdateTimeMillis.addAndGet(w.elapsed(TimeUnit.MILLISECONDS));
        } catch (IOException e) {
            log.log(Level.SEVERE,
                    String.format("direct send of a check request %s failed because of %s", req, e));
        }
    }
    scheduler.enter(new Runnable() {
        @Override
        public void run() {
            flushAndScheduleChecks(); // Do this again after the interval
        }
    }, interval, 0 /* high priority */);
}

From source file:com.google.api.control.Client.java

private void flushAndScheduleReports() {
    if (resetIfStopped()) {
        log.log(Level.FINE, "did not schedule report flush: client is stopped");
        return;//from  w ww.java2  s. c o m
    }
    int interval = reportAggregator.getFlushIntervalMillis();
    if (interval < 0) {
        log.log(Level.FINE, "did not schedule report flush: cache is disabled");
        return; // cache is disabled, so no flushing it
    }
    ReportRequest[] flushed = reportAggregator.flush();
    if (log.isLoggable(Level.FINE)) {
        log.log(Level.FINE, String.format("flushing %d reports from the report aggregator", flushed.length));
    }
    statistics.flushedReports.addAndGet(flushed.length);
    Stopwatch w = Stopwatch.createUnstarted(ticker);
    for (ReportRequest req : flushed) {
        try {
            statistics.flushedOperations.addAndGet(req.getOperationsCount());
            w.reset().start();
            transport.services().report(serviceName, req).execute();
            statistics.totalTransportedReportTimeMillis.addAndGet(w.elapsed(TimeUnit.MILLISECONDS));
        } catch (IOException e) {
            log.log(Level.SEVERE, String.format("direct send of a report request failed because of %s", e));
        }
    }
    scheduler.enter(new Runnable() {
        @Override
        public void run() {
            flushAndScheduleReports(); // Do this again after the interval
        }
    }, interval, 1 /* not so high priority */);
}

From source file:fr.ens.transcriptome.aozan.fastqscreen.FastqScreen.java

/**
 * Mode single-end : execute fastqscreen.
 * @param fastqRead1 fastq read1 file input for mapper
 * @param fastqRead2 fastq read2 file input for mapper
 * @param fastqSample instance to describe fastq sample
 * @param genomes list or reference genome, used by mapper
 * @param genomeSample genome reference corresponding to sample
 * @param isPairedMode true if a pair-end run and option paired mode equals
 *          true else false/* w w w .  ja  v a  2  s . c  om*/
 * @throws AozanException
 */
public FastqScreenResult execute(final File fastqRead1, final File fastqRead2, final FastqSample fastqSample,
        final List<String> genomes, final String genomeSample, final boolean isPairedMode)
        throws AozanException {

    // Timer
    final Stopwatch timer = Stopwatch.createStarted();

    final FastqScreenPseudoMapReduce pmr = new FastqScreenPseudoMapReduce(this.tmpDir, isPairedMode,
            this.mapperName, this.mapperArgument);

    try {

        if (isPairedMode) {
            pmr.doMap(fastqRead1, fastqRead2, genomes, genomeSample, this.confThreads);
        } else {
            pmr.doMap(fastqRead1, genomes, genomeSample, this.confThreads);
        }

        LOGGER.fine("FASTQSCREEN : step map for " + fastqSample.getKeyFastqSample() + " in mode "
                + (isPairedMode ? "paired" : "single") + " on genome(s) " + genomes + " in "
                + toTimeHumanReadable(timer.elapsed(TimeUnit.MILLISECONDS)));

        timer.reset();
        timer.start();

        pmr.doReduce(new File(this.tmpDir + "/outputDoReduce.txt"));

        LOGGER.fine("FASTQSCREEN : step reduce for " + fastqSample.getKeyFastqSample() + " in mode "
                + (isPairedMode ? "paired" : "single") + " in "
                + toTimeHumanReadable(timer.elapsed(TimeUnit.MILLISECONDS)));

        // Remove temporary output file use in map-reduce step
        final File f = new File(this.tmpDir + "/outputDoReduce.txt");
        if (!f.delete()) {
            LOGGER.warning("Fastqscreen : fail to delete file " + f.getAbsolutePath());
        }

    } catch (final IOException e) {
        throw new AozanException(e);

    } finally {
        timer.stop();
    }

    return pmr.getFastqScreenResult();
}

From source file:it.units.malelab.ege.core.evolver.geneoptimalmixing.GOMEvolver.java

protected Callable<List<Individual<G, T, F>>> gomCallable(final List<Individual<G, T, F>> population,
        final Individual<G, T, F> parent, final Set<Set<Integer>> fos, final Random random,
        final int generation, final Ranker<Individual<G, T, F>> ranker,
        final AbstractMutation<G> mutationOperator,
        final LoadingCache<G, Pair<Node<T>, Map<String, Object>>> mappingCache,
        final LoadingCache<Node<T>, F> fitnessCache, final int maxEvaluations,
        final List<EvolverListener<G, T, F>> listeners, final ExecutorService executor,
        final Stopwatch elapsedStopwatch) {
    final Evolver<G, T, F> evolver = this;
    return new Callable<List<Individual<G, T, F>>>() {
        @Override/*from  w w w. j a  va2  s  .  c o  m*/
        public List<Individual<G, T, F>> call() throws Exception {
            try {
                //randomize fos
                List<Set<Integer>> randomizedFos = new ArrayList<>(fos);
                Collections.shuffle(randomizedFos, random);
                //iterate
                Individual<G, T, F> child = parent;
                for (Set<Integer> subset : fos) {
                    //check evaluations
                    if (actualBirths(0, fitnessCache) > maxEvaluations) {
                        break;
                    }
                    if (configuration.getMaxElapsed() > 0) {
                        //check if elapsed time exceeded
                        if (elapsedStopwatch.elapsed(TimeUnit.SECONDS) > configuration.getMaxElapsed()) {
                            break;
                        }
                    }
                    //mix genes
                    Individual<G, T, F> donor = population.get(random.nextInt(population.size()));
                    G donorGenotype = donor.getGenotype();
                    G childGenotype = (G) child.getGenotype().clone();
                    for (Integer locus : subset) {
                        childGenotype.set(locus, donorGenotype.get(locus));
                    }
                    //map
                    Stopwatch stopwatch = Stopwatch.createStarted();
                    Pair<Node<T>, Map<String, Object>> mappingOutcome = mappingCache
                            .getUnchecked(childGenotype);
                    Node<T> phenotype = mappingOutcome.getFirst();
                    long elapsed = stopwatch.stop().elapsed(TimeUnit.NANOSECONDS);
                    Utils.broadcast(
                            new MappingEvent<>(childGenotype, phenotype, elapsed, generation, evolver, null),
                            listeners, executor);
                    //compute fitness
                    stopwatch.reset().start();
                    F fitness = fitnessCache.getUnchecked(phenotype);
                    elapsed = stopwatch.stop().elapsed(TimeUnit.NANOSECONDS);
                    Individual<G, T, F> individual = new Individual<>(childGenotype, phenotype, fitness,
                            generation, saveAncestry ? Arrays.asList(child, donor) : null,
                            mappingOutcome.getSecond());
                    Utils.broadcast(new BirthEvent<>(individual, elapsed, generation, evolver, null), listeners,
                            executor);
                    //rank
                    List<List<Individual<G, T, F>>> ranked = ranker.rank(Arrays.asList(child, individual),
                            random);
                    child = ranked.get(0).get(0);
                }
                if (mutationOperator != null) {
                    while (child.getPhenotype().equals(parent.getPhenotype())) {
                        //check evaluations
                        if (actualBirths(0, fitnessCache) > maxEvaluations) {
                            break;
                        }
                        //mutate
                        G childGenotype = mutationOperator
                                .apply(Collections.singletonList(child.getGenotype()), random).get(0);
                        //map
                        Stopwatch stopwatch = Stopwatch.createStarted();
                        Pair<Node<T>, Map<String, Object>> mappingOutcome = mappingCache
                                .getUnchecked(childGenotype);
                        Node<T> phenotype = mappingOutcome.getFirst();
                        long elapsed = stopwatch.stop().elapsed(TimeUnit.NANOSECONDS);
                        Utils.broadcast(new MappingEvent<>(childGenotype, phenotype, elapsed, generation,
                                evolver, null), listeners, executor);
                        //compute fitness
                        stopwatch.reset().start();
                        F fitness = fitnessCache.getUnchecked(phenotype);
                        elapsed = stopwatch.stop().elapsed(TimeUnit.NANOSECONDS);
                        Individual<G, T, F> individual = new Individual<>(childGenotype, phenotype, fitness,
                                generation, saveAncestry ? Arrays.asList(child) : null,
                                mappingOutcome.getSecond());
                        Utils.broadcast(new BirthEvent<>(individual, elapsed, generation, evolver, null),
                                listeners, executor);
                        child = individual;
                    }
                }
                return Collections.singletonList(child);
            } catch (Throwable t) {
                t.printStackTrace();
                System.exit(-1);
                return null;
            }
        }
    };
}

From source file:uk.ac.open.kmi.iserve.discovery.disco.impl.SparqlLogicConceptMatcher.java

/**
 * Obtains all the matching resources that have a MatchType with the URIs of {@code origin} of the type provided (inclusive) or more.
 *
 * @param origins URIs to match/*from   w  ww . jav a  2 s.co  m*/
 * @param minType the minimum MatchType we want to obtain
 * @return a {@link com.google.common.collect.Table} with the result of the matching indexed by origin URI and then destination URI.
 */
@Override
public Table<URI, URI, MatchResult> listMatchesAtLeastOfType(Set<URI> origins, MatchType minType) {
    Table<URI, URI, MatchResult> matchTable = HashBasedTable.create();
    Stopwatch w = new Stopwatch();
    for (URI origin : origins) {
        w.start();
        Map<URI, MatchResult> result = listMatchesAtLeastOfType(origin, minType);
        for (Map.Entry<URI, MatchResult> dest : result.entrySet()) {
            matchTable.put(origin, dest.getKey(), dest.getValue());
        }
        log.debug("Computed matched types for {} in {}. {} total matches.", origin, w.stop().toString(),
                result.size());
        w.reset();
    }
    return matchTable;

    //        return obtainMatchResults(origins, minType, this.getMatchTypesSupported().getHighest()); // TODO: Use the proper implementation for this
}