Example usage for com.google.common.base Stopwatch toString

List of usage examples for com.google.common.base Stopwatch toString

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch toString.

Prototype

@GwtIncompatible("String.format()")
@Override
public String toString() 

Source Link

Document

Returns a string representation of the current elapsed time.

Usage

From source file:org.apache.brooklyn.launcher.osgi.OsgiLauncherImpl.java

@Override
public void initOsgi() {
    synchronized (reloadLock) {
        final Stopwatch startupTimer = Stopwatch.createStarted();
        BrooklynShutdownHooks.resetShutdownFlag();
        LOG.debug("OsgiLauncher init, catalog " + defaultCatalogLocation);
        catalogInitialization(new CatalogInitialization(String.format("file:%s", defaultCatalogLocation)));
        startPartOne();//from ww w. ja  v  a2 s.c  o m
        startupTimer.stop();
        LOG.info("Brooklyn initialisation (part one) complete after {}", startupTimer.toString());
    }
}

From source file:de.hybris.platform.acceleratorcms.component.slot.impl.DefaultCMSPageSlotComponentService.java

@Override
public void renderComponent(final PageContext pageContext, final AbstractCMSComponentModel component)
        throws ServletException, IOException {
    validateParameterNotNull(pageContext, "Parameter pageContext must not be null");
    validateParameterNotNull(component, "Parameter component must not be null");

    if (LOG.isDebugEnabled()) {
        final Stopwatch stopwatch = new Stopwatch();

        stopwatch.start();//from  w w w  .  j a  v a  2s  . co  m
        getCmsComponentRenderer().renderComponent(pageContext, component);
        stopwatch.stop();

        if (stopwatch.elapsedMillis() > 1) {
            LOG.debug("Rendered component [" + component.getUid() + "] of type [" + component.getItemtype()
                    + "].. (" + stopwatch.toString() + ")");
        }
    } else {
        getCmsComponentRenderer().renderComponent(pageContext, component);
    }
}

From source file:co.cask.cdap.examples.wordcount.RetrieveCountsHandler.java

/**
 * Returns the counts for all words in the input.  The request body is expected to contain
 * a comma-separated list of words./*  www  . j av  a2  s . co  m*/
 */
@Path("counts")
@POST
public void getCounts(HttpServiceRequest request, HttpServiceResponder responder) {
    String wordString = Charsets.UTF_8.decode(request.getContent()).toString();
    String[] words = wordString.split(",");
    Map<String, Long> wordCounts = Maps.newHashMap();
    Stopwatch timer = new Stopwatch().start();
    for (int i = 0; i < words.length; i++) {
        byte[] countBytes = wordCountsTable.read(Bytes.toBytes(words[i]));
        long count = countBytes != null ? Bytes.toLong(countBytes) : 0;
        wordCounts.put(words[i], count);
    }
    timer.stop();
    Map<String, Object> responseBody = Maps.newHashMap();
    responseBody.put("counts", wordCounts);
    responseBody.put("elapsed", timer.toString());
    responder.sendJson(responseBody);
}

From source file:org.jetbrains.android.inspections.lint.DomPsiConverter.java

/**
 * Convert the given {@link XmlFile} to a DOM tree
 *
 * @param xmlFile the file to be converted
 * @return a corresponding W3C DOM tree//w  ww .  j  av a2s  .c o  m
 */
@Nullable
public static Document convert(@NotNull XmlFile xmlFile) {
    try {
        XmlDocument xmlDocument = xmlFile.getDocument();
        if (xmlDocument == null) {
            return null;
        }

        @SuppressWarnings("UnusedAssignment")
        Stopwatch timer;
        if (BENCHMARK) {
            timer = new Stopwatch();
            timer.start();
        }

        Document document = convert(xmlDocument);

        if (BENCHMARK) {
            timer.stop();
            //noinspection UseOfSystemOutOrSystemErr
            System.out.println("Creating PSI for " + xmlFile.getName() + " took " + timer.elapsedMillis()
                    + "ms (" + timer.toString() + ")");
        }

        return document;
    } catch (Exception e) {
        String path = xmlFile.getName();
        VirtualFile virtualFile = xmlFile.getVirtualFile();
        if (virtualFile != null) {
            path = virtualFile.getPath();
        }
        throw new RuntimeException("Could not convert file " + path, e);
    }
}

From source file:co.cask.cdap.examples.wordcount.RetrieveCountsHandler.java

/**
 * Returns the counts for all words in the input.  The request body is expected to contain
 * a comma-separated list of words.//from  w  w w .ja v  a  2 s  .  co  m
 *
 * <p>
 * This endpoint method differs from {@link RetrieveCountsHandler#getCounts(HttpServiceRequest,HttpServiceResponder)}
 * in using {@link KeyValueTable#readAll(byte[][])} to perform a batched read.
 * </p>
 */
@Path("multicounts")
@POST
public void getMultiCounts(HttpServiceRequest request, HttpServiceResponder responder) {
    String wordString = Charsets.UTF_8.decode(request.getContent()).toString();
    String[] words = wordString.split(",");
    byte[][] wordBytes = new byte[words.length][];
    for (int i = 0; i < words.length; i++) {
        wordBytes[i] = Bytes.toBytes(words[i]);
    }
    Stopwatch timer = new Stopwatch().start();
    Map<byte[], byte[]> results = wordCountsTable.readAll(wordBytes);
    Map<String, Long> wordCounts = Maps.newHashMap();
    for (Map.Entry<byte[], byte[]> entry : results.entrySet()) {
        byte[] val = entry.getValue();
        wordCounts.put(Bytes.toString(entry.getKey()), val != null ? Bytes.toLong(entry.getValue()) : 0);
    }
    timer.stop();
    Map<String, Object> response = Maps.newHashMap();
    response.put("counts", wordCounts);
    response.put("elapsed", timer.toString());
    responder.sendJson(response);
}

From source file:fi.helsinki.moodi.service.synchronize.SynchronizationService.java

public SynchronizationSummary synchronize(final SynchronizationType type) {

    if (synchronizationJobRunService.isSynchronizationInProgress()) {
        throw new SynchronizationInProgressException(type);
    }/*from  ww  w .  ja  va 2  s  .  c om*/

    final Stopwatch stopwatch = Stopwatch.createStarted();
    final long jobId = begin(type);

    logger.info("Synchronization of type {} started with jobId {}", type, jobId);

    final List<Course> courses = loadCourses(type);
    final List<SynchronizationItem> items = makeItems(courses, type);
    final List<SynchronizationItem> enrichedItems = enrichItems(items);
    final List<SynchronizationItem> processedItems = processItems(enrichedItems);

    final SynchronizationSummary summary = complete(type, jobId, stopwatch, processedItems);

    logger.info("Synchronization with jobId {} completed in {}", jobId, stopwatch.toString());

    applyNotifiers(processedItems);

    return logSummary(summary);
}

From source file:com.android.builder.core.DexByteCodeConverter.java

private void dexOutOfProcess(@NonNull final DexProcessBuilder builder, @NonNull final DexOptions dexOptions,
        @NonNull final ProcessOutputHandler processOutputHandler)
        throws ProcessException, InterruptedException {
    final String submission = Joiner.on(',').join(builder.getInputs());
    mLogger.info("Dexing out-of-process : %s", submission);
    try {/*w  w w.j a v a 2  s.c o m*/
        Callable<Void> task = () -> {
            JavaProcessInfo javaProcessInfo = builder.build(mTargetInfo.getBuildTools(), dexOptions);
            ProcessResult result = mJavaProcessExecutor.execute(javaProcessInfo, processOutputHandler);
            result.rethrowFailure().assertNormalExitValue();
            return null;
        };

        Stopwatch stopwatch = Stopwatch.createStarted();
        // this is a hack, we always spawn a new process for dependencies.jar so it does
        // get built in parallel with the slices, this is only valid for InstantRun mode.
        if (submission.contains("dependencies.jar")) {
            task.call();
        } else {
            sDexExecutorService.submit(task).get();
        }
        mLogger.info("Dexing %s took %s.", submission, stopwatch.toString());
    } catch (Exception e) {
        throw new ProcessException(e);
    }
}

From source file:es.usc.citius.composit.core.composition.optimization.FunctionalDominanceOptimizer.java

public ServiceMatchNetwork<E, T> optimize(ServiceMatchNetwork<E, T> network) {
    // Analyze functional dominance between services. This optimization
    // identifies all dominant services using the semantic inputs and outputs
    // and the existing matches between the concepts in the graph.
    Stopwatch globalWatch = Stopwatch.createStarted();
    Stopwatch localWatch = Stopwatch.createUnstarted();
    List<Set<Operation<E>>> optimized = new ArrayList<Set<Operation<E>>>(network.numberOfLevels());
    log.debug("Starting functional dominance optimization...");
    for (int i = 0; i < network.numberOfLevels(); i++) {
        // Analyze input dominance
        log.debug(" > Analyzing functional dominance on {} (network level {})", network.getOperationsAtLevel(i),
                i);/*  w  w w.j a v  a2  s  .  c  om*/
        localWatch.start();
        Collection<Collection<Operation<E>>> groups = functionalInputEquivalence(network, i);
        localWatch.stop();
        log.debug("\t\tInput equivalence groups: {} (computed in {})", groups, localWatch.toString());
        localWatch.reset();
        // For each equivalent group in this level, check the output dominance
        Set<Operation<E>> nonDominatedServices = new HashSet<Operation<E>>();
        for (Collection<Operation<E>> group : groups) {
            log.debug("\t\tAnalyzing output dominance for group {}", group);
            localWatch.start();
            Collection<Collection<Operation<E>>> nonDominatedGroups = functionalOutputDominance(group, network,
                    i);
            localWatch.stop();
            log.debug("\t\t\t+ Non-dominated groups detected: {} (computed in {})", nonDominatedGroups,
                    localWatch.toString());
            log.debug("\t\t\t+ Size before / after output dominance {}/{}", group.size(),
                    nonDominatedGroups.size());
            // Pick one non dominated service for each group randomly.
            for (Collection<Operation<E>> ndGroup : nonDominatedGroups) {
                Operation<E> representant = ndGroup.iterator().next();
                log.debug("\t\t\t\t- {} has been selected as the representative service of the group {}",
                        representant, ndGroup);
                nonDominatedServices.add(representant);
            }
        }
        optimized.add(nonDominatedServices);
    }
    localWatch.reset().start();
    DirectedAcyclicSMN<E, T> optimizedNetwork = new DirectedAcyclicSMN<E, T>(
            new HashLeveledServices<E>(optimized), network);
    localWatch.stop();
    log.debug(" > Functional optimized match network computed in {}", localWatch.toString());
    log.debug("Functional Dominance Optimization done in {}. Size before/after {}/{}.",
            globalWatch.stop().toString(), network.listOperations().size(),
            optimizedNetwork.listOperations().size());
    return optimizedNetwork;
}

From source file:org.opendaylight.controller.cluster.raft.RaftActorRecoverySupport.java

private void onRecoveredSnapshot(SnapshotOffer offer) {
    if (log.isDebugEnabled()) {
        log.debug("{}: SnapshotOffer called..", context.getId());
    }//from w w  w  . j a  v  a  2s  .co  m

    initRecoveryTimer();

    Snapshot snapshot = (Snapshot) offer.snapshot();

    for (ReplicatedLogEntry entry : snapshot.getUnAppliedEntries()) {
        if (isMigratedPayload(entry)) {
            hasMigratedDataRecovered = true;
        }
    }

    if (!context.getPersistenceProvider().isRecoveryApplicable()) {
        // We may have just transitioned to disabled and have a snapshot containing state data and/or log
        // entries - we don't want to preserve these, only the server config and election term info.

        snapshot = Snapshot.create(new byte[0], Collections.emptyList(), -1, -1, -1, -1,
                snapshot.getElectionTerm(), snapshot.getElectionVotedFor(), snapshot.getServerConfiguration());
    }

    // Create a replicated log with the snapshot information
    // The replicated log can be used later on to retrieve this snapshot
    // when we need to install it on a peer

    context.setReplicatedLog(ReplicatedLogImpl.newInstance(snapshot, context));
    context.setLastApplied(snapshot.getLastAppliedIndex());
    context.setCommitIndex(snapshot.getLastAppliedIndex());
    context.getTermInformation().update(snapshot.getElectionTerm(), snapshot.getElectionVotedFor());

    Stopwatch timer = Stopwatch.createStarted();

    // Apply the snapshot to the actors state
    cohort.applyRecoverySnapshot(snapshot.getState());

    if (snapshot.getServerConfiguration() != null) {
        context.updatePeerIds(snapshot.getServerConfiguration());

        if (isMigratedSerializable(snapshot.getServerConfiguration())) {
            hasMigratedDataRecovered = true;
        }
    }

    timer.stop();
    log.info("Recovery snapshot applied for {} in {}: snapshotIndex={}, snapshotTerm={}, journal-size={}",
            context.getId(), timer.toString(), replicatedLog().getSnapshotIndex(),
            replicatedLog().getSnapshotTerm(), replicatedLog().size());
}

From source file:com.github.steveash.jg2p.aligntag.AlignTagTrainer.java

private TransducerTrainer trainOnce(Pipe pipe, InstanceList trainData) {
    Stopwatch watch = Stopwatch.createStarted();

    CRF crf = new CRF(pipe, null);
    crf.addOrderNStates(trainData, new int[] { 1 }, null, null, null, null, false);
    crf.addStartState();//w  w  w . j a  va 2  s  .c  om
    //    crf.addStat(trainData);
    //    crf.addFullyConnectedStatesForBiLabels();
    //      crf.addFullyConnectedStatesForLabels();
    //    crf.setWeightsDimensionAsIn(trainData, false);

    log.info("Starting training...");
    CRFTrainerByThreadedLabelLikelihood trainer = new CRFTrainerByThreadedLabelLikelihood(crf, 8);
    trainer.setGaussianPriorVariance(2);
    trainer.train(trainData);
    trainer.shutdown();
    watch.stop();

    log.info("CRF Training took " + watch.toString());
    return trainer;
}