Example usage for com.google.common.base Stopwatch stop

List of usage examples for com.google.common.base Stopwatch stop

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch stop.

Prototype

public Stopwatch stop() 

Source Link

Document

Stops the stopwatch.

Usage

From source file:org.codice.ddf.configuration.migration.ConfigurationMigrationManager.java

private Collection<MigrationWarning> exportMigratable(Migratable migratable, Path exportDirectory)
        throws IOException {
    Stopwatch stopwatch = null;

    if (LOGGER.isDebugEnabled()) {
        stopwatch = Stopwatch.createStarted();
    }/*from   w ww  . j a  v  a 2  s. c  om*/

    MigrationMetadata migrationMetadata = migratable.export(exportDirectory);

    if (LOGGER.isDebugEnabled() && stopwatch != null) {
        LOGGER.debug("Export time: {}", stopwatch.stop().toString());
    }

    return migrationMetadata.getMigrationWarnings();
}

From source file:es.usc.citius.composit.core.composition.search.NaiveForwardServiceDiscoverer.java

public ServiceMatchNetwork<E, T> search(Signature<E> signature) {
    Set<E> availableInputs = new HashSet<E>(signature.getInputs());
    Set<E> newOutputs = new HashSet<E>(signature.getInputs());
    Set<Operation<E>> usedServices = new HashSet<Operation<E>>();
    List<Set<Operation<E>>> leveledOps = new LinkedList<Set<Operation<E>>>();

    boolean checkExpectedOutputs = !signature.getOutputs().isEmpty();
    boolean stop;

    Stopwatch timer = Stopwatch.createStarted();
    Stopwatch levelTimer = Stopwatch.createUnstarted();
    int level = 0;
    do {/*from w  ww  . j  a v a  2 s. co m*/
        HashSet<Operation<E>> candidates = new HashSet<Operation<E>>();
        levelTimer.start();
        candidates.addAll(discovery.findOperationsConsumingSome(newOutputs));
        log.info("(Level {}) {} potential candidates selected in {}", level++, candidates.size(),
                levelTimer.toString());
        // Remove services that cannot be invoked with the available inputs
        for (Iterator<Operation<E>> it = candidates.iterator(); it.hasNext();) {
            Operation<E> candidate = it.next();
            Set<E> matched = matcher.partialMatch(availableInputs, candidate.getSignature().getInputs())
                    .getTargetElements();
            // Invokable?
            if (matched.equals(candidate.getSignature().getInputs())) {
                // Invokable operation, check if it was used previously
                boolean isNew = usedServices.add(candidate);
                if (!isNew)
                    it.remove();
            } else {
                it.remove();
            }
        }
        log.info("\t + [{}] operations selected for this level in {}: {}", candidates.size(),
                levelTimer.toString(), candidates);

        // Collect the new outputs of the new candidates
        newOutputs = Operations.outputs(candidates);
        availableInputs.addAll(newOutputs);
        Set<E> matchedOutputs = matcher.partialMatch(availableInputs, signature.getOutputs())
                .getTargetElements();

        // Add the discovered ops
        if (!candidates.isEmpty())
            leveledOps.add(candidates);

        log.debug("\t + Available inputs: {}, new outputs: {}", availableInputs.size(), newOutputs.size());
        // Stop condition. Stop if there are no more candidates and/or expected outputs are satisfied.
        stop = (checkExpectedOutputs) ? candidates.isEmpty() || matchedOutputs.equals(signature.getOutputs())
                : candidates.isEmpty();
        levelTimer.reset();
    } while (!stop);

    // Add the source and sink operations
    Source<E> sourceOp = new Source<E>(signature.getInputs());
    Sink<E> sinkOp = new Sink<E>(signature.getOutputs());
    leveledOps.add(0, Collections.<Operation<E>>singleton(sourceOp));
    leveledOps.add(leveledOps.size(), Collections.<Operation<E>>singleton(sinkOp));
    Stopwatch networkWatch = Stopwatch.createStarted();
    // Create a service match network with the discovered services
    DirectedAcyclicSMN<E, T> matchNetwork = new DirectedAcyclicSMN<E, T>(new HashLeveledServices<E>(leveledOps),
            this.matcher);
    log.info(" > Service match network computed in {}", networkWatch.stop().toString());
    log.info("Service Match Network created with {} levels (including source and sink) and {} operations.",
            leveledOps.size(), matchNetwork.listOperations().size());
    log.info("Forward Discovery done in {}", timer.toString());
    return matchNetwork;
}

From source file:org.yql4j.impl.HttpComponentsYqlClient.java

@Override
public YqlResult query(YqlQuery query) throws YqlException {
    checkNotNull(query);/* w  ww  .j a  v  a  2s  .com*/
    try {
        HttpUriRequest request = createHttpRequest(query);
        request = signHttpRequest(request, query);

        Stopwatch timer = Stopwatch.createStarted();
        try (CloseableHttpResponse response = httpClient.execute(request)) {
            logger.debug("YQL query (URL=" + query.toUri() + ") took "
                    + timer.stop().elapsed(TimeUnit.MILLISECONDS) + "ms");

            if (response.getStatusLine().getStatusCode() == HttpStatus.SC_OK) {
                HttpEntity entity = response.getEntity();
                Map<String, String> headers = new HashMap<>();
                for (Header header : response.getAllHeaders()) {
                    headers.put(header.getName(), header.getValue());
                }
                return new YqlResult(EntityUtils.toString(entity), headers,
                        query.getFormat() != null ? query.getFormat() : ResultFormat.XML,
                        getAppropriateMapper(query));
            } else if (isClientError(response)) {
                HttpEntity entity = response.getEntity();
                ObjectMapper mapper = getAppropriateMapper(query);
                ErrorType error = mapper.readValue(EntityUtils.toString(entity), ErrorType.class);
                throw new YqlException(
                        "Failed to execute YQL query (URL=" + query.toUri() + "): " + error.getDescription());
            } else {
                throw new YqlException("Failed to execute YQL query (URL=" + query.toUri()
                        + "): Received unexpected status code " + response.getStatusLine().getStatusCode());
            }
        }
    } catch (ParseException | OAuthException | IOException e) {
        throw new YqlException("Failed to execute YQL query (URL=" + query.toUri() + "): " + e.getMessage(), e);
    }
}

From source file:org.jetbrains.android.inspections.lint.DomPsiConverter.java

/**
 * Convert the given {@link XmlFile} to a DOM tree
 *
 * @param xmlFile the file to be converted
 * @return a corresponding W3C DOM tree/* www.jav a2 s  .  co  m*/
 */
@Nullable
public static Document convert(@NotNull XmlFile xmlFile) {
    try {
        XmlDocument xmlDocument = xmlFile.getDocument();
        if (xmlDocument == null) {
            return null;
        }

        @SuppressWarnings("UnusedAssignment")
        Stopwatch timer;
        if (BENCHMARK) {
            timer = new Stopwatch();
            timer.start();
        }

        Document document = convert(xmlDocument);

        if (BENCHMARK) {
            timer.stop();
            //noinspection UseOfSystemOutOrSystemErr
            System.out.println("Creating PSI for " + xmlFile.getName() + " took " + timer.elapsedMillis()
                    + "ms (" + timer.toString() + ")");
        }

        return document;
    } catch (Exception e) {
        String path = xmlFile.getName();
        VirtualFile virtualFile = xmlFile.getVirtualFile();
        if (virtualFile != null) {
            path = virtualFile.getPath();
        }
        throw new RuntimeException("Could not convert file " + path, e);
    }
}

From source file:com.arpnetworking.metrics.mad.performance.FilePerfTestBase.java

/**
 * Runs a filter./*w w  w  .  j av  a  2 s  .c om*/
 *
 * @param pipelineConfigurationFile Pipeline configuration file.
 * @param duration Timeout period.
 * @param variables Substitution key-value pairs into pipeline configuration file.
 * @throws IOException if configuration cannot be loaded.
 */
protected void benchmark(final String pipelineConfigurationFile, final Duration duration,
        final ImmutableMap<String, String> variables) throws IOException {
    // Replace any variables in the configuration file
    String configuration = Resources.toString(Resources.getResource(pipelineConfigurationFile), Charsets.UTF_8);
    for (final Map.Entry<String, String> entry : variables.entrySet()) {
        configuration = configuration.replace(entry.getKey(), entry.getValue());
    }

    // Load the specified stock configuration
    final PipelineConfiguration stockPipelineConfiguration = new StaticConfiguration.Builder()
            .addSource(new JsonNodeLiteralSource.Builder().setSource(configuration).build())
            .setObjectMapper(PipelineConfiguration.createObjectMapper(_injector)).build()
            .getRequiredAs(PipelineConfiguration.class);

    // Canary tracking
    LOGGER.info(String.format("Expected canaries; periods=%s", stockPipelineConfiguration.getPeriods()));
    final CountDownLatch latch = new CountDownLatch(stockPipelineConfiguration.getPeriods().size());
    final Set<Period> periods = Sets.newConcurrentHashSet();

    // Create custom "canary" sink
    final ListeningSink sink = new ListeningSink((periodicData) -> {
        if (periodicData != null) {
            for (final String metricName : periodicData.getData().keys()) {
                if (TestFileGenerator.CANARY.equals(metricName)) {
                    if (periods.add(periodicData.getPeriod())) {
                        LOGGER.info(String.format("Canary flew; filter=%s, period=%s", this.getClass(),
                                periodicData.getPeriod()));
                        latch.countDown();
                    }
                }
            }
        }
        return null;
    });

    // Add the custom "canary" sink
    final List<Sink> benchmarkSinks = Lists.newArrayList(stockPipelineConfiguration.getSinks());
    benchmarkSinks.add(sink);

    // Create the custom configuration
    final PipelineConfiguration benchmarkPipelineConfiguration = OvalBuilder.<PipelineConfiguration, PipelineConfiguration.Builder>clone(
            stockPipelineConfiguration).setSinks(benchmarkSinks).build();

    // Instantiate the pipeline
    final Pipeline pipeline = new Pipeline(benchmarkPipelineConfiguration);

    // Execute the pipeline until the canary flies the coop
    try {
        LOGGER.debug(String.format("Launching pipeline; configuration=%s", pipelineConfigurationFile));
        final Stopwatch timer = Stopwatch.createUnstarted();
        timer.start();
        pipeline.launch();

        if (!latch.await(duration.getMillis(), TimeUnit.MILLISECONDS)) {
            LOGGER.error("Test timed out");
            throw new RuntimeException("Test timed out");
        }

        timer.stop();
        LOGGER.info(String.format("Performance filter result; filter=%s, seconds=%s", this.getClass(),
                timer.elapsed(TimeUnit.SECONDS)));

    } catch (final InterruptedException e) {
        Thread.interrupted();
        throw new RuntimeException("Test interrupted");
    } finally {
        pipeline.shutdown();
    }
}

From source file:com.google.api.ads.adwords.awalerting.processor.AlertProcessor.java

/**
 * Process reports for the given account IDs under the manager account.
 *
 * @param reports the downloaded reports
 * @param rulesConfig the JSON config of current alert rules
 * @param alertMessage the current alert message template
 * @param actionsConfig the JSON config of current alert actions
 *///from  w w w  .  j  a v  a  2s  . co m
protected void processReports(List<ReportData> reports, JsonArray rulesConfig, String alertMessage,
        JsonArray actionsConfig) throws AlertProcessingException {
    if (reports == null || reports.isEmpty()) {
        LOGGER.info("No reports to process!");
        return;
    }

    LOGGER.info("*** Start processing reports...");
    Stopwatch stopwatch = Stopwatch.createStarted();

    applyAlertRulesAndMessages(reports, rulesConfig, alertMessage);
    printReports(reports, "*** Reports after processing alert rules and messages:");
    applyAlertActions(reports, actionsConfig);

    stopwatch.stop();
    LOGGER.info("*** Finished processing all reports in {} seconds.",
            stopwatch.elapsed(TimeUnit.MILLISECONDS) / 1000);
}

From source file:com.vmware.photon.controller.apife.backends.ResourceTicketSqlBackend.java

/**
 * This method returns the quota consumed via consumeQuota.
 * <p/>//from   w w  w.j a v  a 2  s.c om
 *
 * @param resourceTicketId - id of the resource ticket
 * @param cost             - the cost object representing how much will be consumed
 */
@Override
@Transactional
public void returnQuota(String resourceTicketId, QuotaCost cost) {
    // return the cost usage. this undoes the
    // quota consumption that occurs during consumeQuota

    Stopwatch resourceTicketWatch = Stopwatch.createStarted();
    ResourceTicketEntity resourceTicket = resourceTicketDao.loadWithUpgradeLock(resourceTicketId);
    resourceTicketWatch.stop();
    logger.info("returnQuota for resourceTicket {}, lock obtained in {} milliseconds", resourceTicket.getId(),
            resourceTicketWatch.elapsed(TimeUnit.MILLISECONDS));

    for (String key : cost.getCostKeys()) {
        resourceTicket.getUsageMap().put(key,
                resourceTicket.getUsageMap().get(key).subtract(cost.getCost(key)));
    }
}

From source file:org.apache.hadoop.hbase.ScanPerformanceEvaluation.java

protected void testHdfsStreaming(Path filename) throws IOException {
    byte[] buf = new byte[1024];
    FileSystem fs = filename.getFileSystem(getConf());

    // read the file from start to finish
    Stopwatch fileOpenTimer = new Stopwatch();
    Stopwatch streamTimer = new Stopwatch();

    fileOpenTimer.start();// w w  w .  j a  v  a 2 s .  c  o m
    FSDataInputStream in = fs.open(filename);
    fileOpenTimer.stop();

    long totalBytes = 0;
    streamTimer.start();
    while (true) {
        int read = in.read(buf);
        if (read < 0) {
            break;
        }
        totalBytes += read;
    }
    streamTimer.stop();

    double throughput = (double) totalBytes / streamTimer.elapsedTime(TimeUnit.SECONDS);

    System.out.println("HDFS streaming: ");
    System.out.println("total time to open: " + fileOpenTimer.elapsedMillis() + " ms");
    System.out.println("total time to read: " + streamTimer.elapsedMillis() + " ms");
    System.out.println(
            "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")");
    System.out.println("throghput  : " + StringUtils.humanReadableInt((long) throughput) + "B/s");
}

From source file:it.units.malelab.ege.core.evolver.StandardEvolver.java

protected Callable<List<Individual<G, T, F>>> individualFromGenotypeCallable(final G genotype,
        final int generation, final LoadingCache<G, Pair<Node<T>, Map<String, Object>>> mappingCache,
        final LoadingCache<Node<T>, F> fitnessCache, final List<EvolverListener<G, T, F>> listeners,
        final GeneticOperator<G> operator, final List<Individual<G, T, F>> parents,
        final ExecutorService executor) {
    final Evolver<G, T, F> evolver = this;
    return new Callable<List<Individual<G, T, F>>>() {
        @Override/*  ww  w  .  j  a  v a  2  s. c  om*/
        public List<Individual<G, T, F>> call() throws Exception {
            Stopwatch stopwatch = Stopwatch.createStarted();
            Pair<Node<T>, Map<String, Object>> mappingOutcome = mappingCache.getUnchecked(genotype);
            Node<T> phenotype = mappingOutcome.getFirst();
            long elapsed = stopwatch.stop().elapsed(TimeUnit.NANOSECONDS);
            Utils.broadcast(new MappingEvent<>(genotype, phenotype, elapsed, generation, evolver, null),
                    listeners, executor);
            stopwatch.reset().start();
            F fitness = fitnessCache.getUnchecked(phenotype);
            elapsed = stopwatch.stop().elapsed(TimeUnit.NANOSECONDS);
            Individual<G, T, F> individual = new Individual<>(genotype, phenotype, fitness, generation,
                    saveAncestry ? (List) parents : null, mappingOutcome.getSecond());
            Utils.broadcast(new BirthEvent<>(individual, elapsed, generation, evolver, null), listeners,
                    executor);
            return Collections.singletonList(individual);
        }
    };
}

From source file:com.google.api.ads.adwords.awreporting.server.kratu.KratuProcessor.java

public void processKratus(Long topAccountId, Set<Long> accountIdsSet, Date dateStart, Date dateEnd)
        throws InterruptedException {
    System.out.println("Processing Kratus for " + topAccountId);

    // We use a Latch so the main thread knows when all the worker threads are complete.
    final CountDownLatch latch = new CountDownLatch(1);
    Stopwatch stopwatch = Stopwatch.createStarted();

    RunnableKratu runnableKratu = createRunnableKratu(topAccountId, accountIdsSet, storageHelper, dateStart,
            dateEnd);/*from   w  w w  . j  a v a 2s .com*/

    ExecutorService executorService = Executors.newFixedThreadPool(1);
    runnableKratu.setLatch(latch);
    executorService.execute(runnableKratu);

    latch.await();
    stopwatch.stop();
}