Example usage for com.google.common.base Stopwatch start

List of usage examples for com.google.common.base Stopwatch start

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch start.

Prototype

public Stopwatch start() 

Source Link

Document

Starts the stopwatch.

Usage

From source file:com.google.caliper.runner.worker.WorkerRunner.java

/**
 * Starts up the worker process and runs it to completion, processing data received from it with
 * the provided {@link WorkerProcessor}. Returns the result object produced by the processor.
 *///from   ww w.ja  va2 s  .co m
public R runWorker() {
    checkState(worker.state() == State.NEW, "You can only invoke the run loop once");

    // logger must be opened before starting worker
    WorkerOutputLogger workerLogger = worker.outputLogger();
    try {
        workerLogger.open();
    } catch (IOException e) {
        throw processor.newWorkerException(
                String.format("Failed to open output logger for worker [%s].", worker.name()), e);
    }
    outputFile = workerLogger.outputFile();

    worker.startAsync();
    try {
        workerLogger.printHeader();

        long timeLimitNanos = processor.timeLimit().to(NANOSECONDS);
        Stopwatch stopwatch = Stopwatch.createUnstarted();

        worker.awaitRunning();
        worker.sendRequest();

        stopwatch.start();
        while (!done) {
            Worker.StreamItem item;
            try {
                item = worker.readItem(timeLimitNanos - stopwatch.elapsed(NANOSECONDS), NANOSECONDS);
            } catch (InterruptedException e) {
                // Someone has asked us to stop (via Futures.cancel?).
                if (!doneProcessing) {
                    throw processor
                            .newWorkerException(formatError(processor.getInterruptionErrorMessage(worker)), e);
                }
                logger.log(Level.WARNING,
                        // Yes, we're doing the formatting eagerly here even though the log level might not
                        // be enabled. It seems like a small sacrifice in this case for more readable code.
                        formatError(
                                "Worker [%s] cancelled before completing normally, but after getting results.",
                                worker));
                done = true;
                break;
            }

            switch (item.kind()) {
            case DATA:
                doneProcessing = processor.handleMessage(item.content(), worker);
                if (doneProcessing) {
                    // The worker should be done now; give it WORKER_CLEANUP_DURATION nanos to finish
                    // shutting down.
                    long cleanupTimeNanos = MILLISECONDS.toNanos(WORKER_CLEANUP_DURATION.getMillis());
                    timeLimitNanos = stopwatch.elapsed(NANOSECONDS) + cleanupTimeNanos;
                }
                break;
            case EOF:
                // We consider EOF to be synonymous with worker shutdown
                if (!doneProcessing) {
                    throw processor.newWorkerException(
                            formatError(processor.getPrematureExitErrorMessage(worker)), null);
                }
                done = true;
                break;
            case TIMEOUT:
                if (!doneProcessing) {
                    throw processor.newWorkerException(formatError(processor.getTimeoutErrorMessage(worker)),
                            null);
                }
                logger.log(Level.WARNING,
                        formatError("Worker [%s] failed to exit cleanly within the allotted time.", worker));
                done = true;
                break;
            default:
                throw new AssertionError("Impossible item: " + item);
            }
        }

        return processor.getResult();
    } catch (WorkerException e) {
        throw e;
    } catch (Throwable e) {
        logger.severe(formatError("Unexpected error while running worker [%s].", worker));
        Throwables.throwIfUnchecked(e);
        throw new RuntimeException(e);
    } finally {
        worker.stopAsync();
        try {
            workerLogger.ensureFileIsSaved();
        } finally {
            workerLogger.close();
        }
    }
}

From source file:org.apache.drill.exec.client.QuerySubmitter.java

public int submitQuery(DrillClient client, String plan, String type, String format, int width)
        throws Exception {

    PrintingResultsListener listener;/*  ww w  . j a  v  a2 s. co  m*/

    String[] queries;
    QueryType queryType;
    type = type.toLowerCase();
    switch (type) {
    case "sql":
        queryType = QueryType.SQL;
        queries = plan.trim().split(";");
        break;
    case "logical":
        queryType = QueryType.LOGICAL;
        queries = new String[] { plan };
        break;
    case "physical":
        queryType = QueryType.PHYSICAL;
        queries = new String[] { plan };
        break;
    default:
        System.out.println("Invalid query type: " + type);
        return -1;
    }

    Format outputFormat;
    format = format.toLowerCase();
    switch (format) {
    case "csv":
        outputFormat = Format.CSV;
        break;
    case "tsv":
        outputFormat = Format.TSV;
        break;
    case "table":
        outputFormat = Format.TABLE;
        break;
    default:
        System.out.println("Invalid format type: " + format);
        return -1;
    }
    Stopwatch watch = new Stopwatch();
    for (String query : queries) {
        listener = new PrintingResultsListener(client.getConfig(), outputFormat, width);
        watch.start();
        client.runQuery(queryType, query, listener);
        int rows = listener.await();
        System.out.println(String.format("%d record%s selected (%f seconds)", rows, rows > 1 ? "s" : "",
                (float) watch.elapsed(TimeUnit.MILLISECONDS) / (float) 1000));
        if (query != queries[queries.length - 1]) {
            System.out.println();
        }
        watch.stop();
        watch.reset();
    }
    return 0;

}

From source file:org.terasology.cities.debug.SwingRasterizer.java

/**
 * @param g the graphics object//w  w w . j  a v a  2  s  . co m
 * @param sector the sector to render
 */
public void rasterizeSector(Graphics2D g, Sector sector) {

    Stopwatch sw = debugMap.getUnchecked(sector.toString());
    sw.start();

    drawCityNames(g, sector);
    drawLakes(g, sector);
    drawFrame(g, sector);
    drawSectorText(g, sector);

    sw.stop();
}

From source file:es.usc.citius.composit.core.composition.optimization.BackwardMinimizationOptimizer.java

@Override
public ServiceMatchNetwork<E, T> optimize(ServiceMatchNetwork<E, T> network) {
    Stopwatch globalWatch = Stopwatch.createStarted();
    Stopwatch localWatch = Stopwatch.createUnstarted();
    Set<E> newInputs = new HashSet<E>();
    List<Set<Operation<E>>> optimized = new ArrayList<Set<Operation<E>>>(network.numberOfLevels());
    log.debug("Starting service-backward optimization...");
    localWatch.start();
    for (int i = network.numberOfLevels() - 1; i >= 0; i--) {
        Set<Operation<E>> current = network.getOperationsAtLevel(i);
        log.debug(" > Analyzing network level {} : {}", i, current);
        Set<Operation<E>> optimizedSet = new HashSet<Operation<E>>();
        Set<E> futureInputs = new HashSet<E>();
        // Find all services that produces at least one of the required inputs. If new inputs is
        // empty, then select all
        for (Operation<E> op : current) {
            log.debug("\t\tChecking operation {}", op.getID());
            if (newInputs.isEmpty()) {
                futureInputs.addAll(op.getSignature().getInputs());
                optimizedSet.add(op);//from  w  ww  .ja  v  a2  s .c om
                log.debug("\t\t+ {} selected as a mandatory operation", op.getID());
            } else {
                boolean used = false;
                next: for (E output : op.getSignature().getOutputs()) {
                    for (E input : newInputs) {
                        used = network.match(output, input) != null;
                        if (used) {
                            log.debug(
                                    "\t\t+ Operation {} marked as useful (match detected between output {} and input {})",
                                    op.getID(), output, input);
                            optimizedSet.add(op);
                            // Update new inputs
                            futureInputs.addAll(op.getSignature().getInputs());
                            break next;
                        }
                    }
                }
                if (!used)
                    log.debug("\t\t- Operation {} marked as useless", op.getID());
            }
            //log.debug(" Inputs for the next iteration: {}", futureInputs);
        }
        newInputs.addAll(futureInputs);
        optimized.add(optimizedSet);
    }
    Collections.reverse(optimized);
    // Create a new match network
    localWatch.reset().start();
    ServiceMatchNetwork<E, T> optimizedNetwork = new DirectedAcyclicSMN<E, T>(
            new HashLeveledServices<E>(optimized), network);
    localWatch.stop();
    log.debug(" > Optimized match network created in {}", localWatch.toString());
    log.debug("Backward Optimization done in {}. Size before/after {}/{}", globalWatch.stop().toString(),
            network.listOperations().size(), optimizedNetwork.listOperations().size());
    // Create a new optimized service match network
    return optimizedNetwork;
}

From source file:com.flipkart.foxtrot.core.querystore.impl.ElasticsearchQueryStore.java

@Override
public void save(String table, List<Document> documents) throws QueryStoreException {
    table = ElasticsearchUtils.getValidTableName(table);
    try {// w w  w .  j a v  a 2s.  c om
        if (!tableMetadataManager.exists(table)) {
            throw new QueryStoreException(QueryStoreException.ErrorCode.NO_SUCH_TABLE,
                    "No table exists with the name: " + table);
        }
        if (documents == null || documents.size() == 0) {
            throw new QueryStoreException(QueryStoreException.ErrorCode.INVALID_REQUEST,
                    "Invalid Document List");
        }
        dataStore.save(tableMetadataManager.get(table), documents);
        BulkRequestBuilder bulkRequestBuilder = connection.getClient().prepareBulk();

        DateTime dateTime = new DateTime().plusDays(1);

        for (Document document : documents) {
            long timestamp = document.getTimestamp();
            if (dateTime.minus(timestamp).getMillis() < 0) {
                continue;
            }
            final String index = ElasticsearchUtils.getCurrentIndex(table, timestamp);
            IndexRequest indexRequest = new IndexRequest().index(index).type(ElasticsearchUtils.TYPE_NAME)
                    .id(document.getId()).timestamp(Long.toString(timestamp))
                    .source(mapper.writeValueAsBytes(document.getData()));
            bulkRequestBuilder.add(indexRequest);
        }
        if (bulkRequestBuilder.numberOfActions() > 0) {
            Stopwatch stopwatch = new Stopwatch();
            stopwatch.start();
            BulkResponse responses = bulkRequestBuilder.setConsistencyLevel(WriteConsistencyLevel.QUORUM)
                    .execute().get(10, TimeUnit.SECONDS);
            logger.info(String.format("ES took : %d table : %s", stopwatch.elapsedMillis(), table));
            int failedCount = 0;
            for (int i = 0; i < responses.getItems().length; i++) {
                BulkItemResponse itemResponse = responses.getItems()[i];
                failedCount += (itemResponse.isFailed() ? 1 : 0);
                if (itemResponse.isFailed()) {
                    logger.error(String.format("Table : %s Failure Message : %s Document : %s", table,
                            itemResponse.getFailureMessage(), mapper.writeValueAsString(documents.get(i))));
                }
            }
            if (failedCount > 0) {
                logger.error(String.format("Table : %s Failed Documents : %d", table, failedCount));
            }
        }
    } catch (QueryStoreException ex) {
        throw ex;
    } catch (DataStoreException ex) {
        DataStoreException.ErrorCode code = ex.getErrorCode();
        if (code.equals(DataStoreException.ErrorCode.STORE_INVALID_REQUEST)
                || code.equals(DataStoreException.ErrorCode.STORE_INVALID_DOCUMENT)) {
            throw new QueryStoreException(QueryStoreException.ErrorCode.INVALID_REQUEST, ex.getMessage(), ex);
        } else {
            throw new QueryStoreException(QueryStoreException.ErrorCode.DOCUMENT_SAVE_ERROR, ex.getMessage(),
                    ex);
        }
    } catch (JsonProcessingException ex) {
        throw new QueryStoreException(QueryStoreException.ErrorCode.INVALID_REQUEST, ex.getMessage(), ex);
    } catch (Exception ex) {
        throw new QueryStoreException(QueryStoreException.ErrorCode.DOCUMENT_SAVE_ERROR, ex.getMessage(), ex);
    }
}

From source file:com.springer.omelet.mail.Email.java

@Override
public List<Message> filerEmailsBySubject(List<Message> message, String emailSubject) {
    Stopwatch sw = new Stopwatch();
    sw.start();
    List<Message> returnMessage = new ArrayList<Message>();
    LOGGER.info("Count of the message for filter by Subject" + message.size());
    for (Message msg : message) {
        try {//from   w  w w . j  av  a  2  s. c  om
            if (msg.getSubject().equalsIgnoreCase(emailSubject)) {
                returnMessage.add(msg);
            }
        } catch (MessagingException e) {
            // TODO Auto-generated catch block
            LOGGER.error(e);
        }
    }
    sw.stop();
    LOGGER.info("Time Taken by Filter EmailBy Subjects is:" + sw.elapsedTime(TimeUnit.SECONDS));
    return returnMessage;
}

From source file:org.ow2.proactive.scheduler.task.executors.InProcessTaskExecutor.java

/**
 * Executes a task inside a task context.
 *
 * @param taskContext Task context to execute.
 * @param output      Standard output sink.
 * @param error       Error sink.//from   w  w  w .  j  a  va 2 s .  c  o m
 * @return Returns the task result.
 */
@Override
public TaskResultImpl execute(TaskContext taskContext, PrintStream output, PrintStream error) {
    ScriptHandler scriptHandler = ScriptLoader.createLocalHandler();
    String nodesFile = null;
    SchedulerNodeClient schedulerNodeClient = null;
    RemoteSpace userSpaceClient = null;
    RemoteSpace globalSpaceClient = null;
    try {
        nodesFile = writeNodesFile(taskContext);
        VariablesMap variables = new VariablesMap();
        variables.setInheritedMap(taskContextVariableExtractor.extractVariables(taskContext, nodesFile, false));
        variables.setScopeMap(taskContextVariableExtractor.extractScopeVariables(taskContext));
        Map<String, String> resultMetadata = new HashMap<>();
        Map<String, String> thirdPartyCredentials = forkedTaskVariablesManager
                .extractThirdPartyCredentials(taskContext);
        schedulerNodeClient = forkedTaskVariablesManager.createSchedulerNodeClient(taskContext);
        userSpaceClient = forkedTaskVariablesManager.createDataSpaceNodeClient(taskContext, schedulerNodeClient,
                IDataSpaceClient.Dataspace.USER);
        globalSpaceClient = forkedTaskVariablesManager.createDataSpaceNodeClient(taskContext,
                schedulerNodeClient, IDataSpaceClient.Dataspace.GLOBAL);

        forkedTaskVariablesManager.addBindingsToScriptHandler(scriptHandler, taskContext, variables,
                thirdPartyCredentials, schedulerNodeClient, userSpaceClient, globalSpaceClient, resultMetadata);

        Stopwatch stopwatch = Stopwatch.createUnstarted();
        TaskResultImpl taskResult;
        try {
            stopwatch.start();
            Serializable result = execute(taskContext, output, error, scriptHandler, thirdPartyCredentials,
                    variables);
            stopwatch.stop();
            taskResult = new TaskResultImpl(taskContext.getTaskId(), result, null,
                    stopwatch.elapsed(TimeUnit.MILLISECONDS));
        } catch (Throwable e) {
            stopwatch.stop();
            e.printStackTrace(error);
            taskResult = new TaskResultImpl(taskContext.getTaskId(), e, null,
                    stopwatch.elapsed(TimeUnit.MILLISECONDS));
        }

        executeFlowScript(taskContext.getControlFlowScript(), scriptHandler, output, error, taskResult);

        taskResult.setPropagatedVariables(
                SerializationUtil.serializeVariableMap(variables.getPropagatedVariables()));
        taskResult.setMetadata(resultMetadata);

        return taskResult;
    } catch (Throwable e) {
        e.printStackTrace(error);
        return new TaskResultImpl(taskContext.getTaskId(), e);
    } finally {
        if (nodesFile != null && !nodesFile.isEmpty()) {
            FileUtils.deleteQuietly(new File(nodesFile));
        }
    }
}

From source file:com.davidbracewell.ml.sequence.indexers.DoublePassIndexer.java

@Override
public List<List<Instance>> index(List<Sequence<V>> data) {
    Resource tempFile = Resources.temporaryFile();
    tempFile.deleteOnExit();/*from ww w .  j a v a 2 s . com*/
    Set<String> keep = Sets.newHashSet();
    log.info("Beginning indexing...");
    Stopwatch sw = Stopwatch.createStarted();
    count(data, keep, tempFile);
    sw.stop();
    log.info("Finished indexing in {0}", sw);
    log.info("Beginning construction of instances...");
    sw.reset();
    sw.start();
    List<List<Instance>> instances = build(keep, tempFile, data.iterator().next().getFeatures());
    extractors.setFeatures(instances.get(0).get(0).getFeatures());
    extractors.getFeatures().freeze();
    sw.stop();
    log.info("Finished construction of instances in {0}", sw);
    return instances;
}

From source file:com.google.pubsub.clients.kafka.KafkaPublisherTask.java

@Override
public void run() {
    Stopwatch stopwatch = Stopwatch.createUnstarted();
    Callback callback = (metadata, exception) -> {
        if (exception != null) {
            log.error(exception.getMessage(), exception);
            return;
        }/*from  w w w.  ja v a2 s. co  m*/
        addNumberOfMessages(1);
        metricsHandler.recordLatency(stopwatch.elapsed(TimeUnit.MILLISECONDS));
    };
    stopwatch.start();
    for (int i = 0; i < batchSize; i++) {
        publisher.send(new ProducerRecord<>(topic, null, System.currentTimeMillis(), null, payload), callback);
    }
    publisher.flush();
    stopwatch.stop();
}

From source file:es.usc.citius.composit.core.composition.search.ForwardServiceDiscoverer.java

public ServiceMatchNetwork<E, T> search(Signature<E> signature) {
    Set<E> availableInputs = new HashSet<E>(signature.getInputs());
    Set<E> newOutputs = new HashSet<E>(signature.getInputs());
    Set<E> unmatchedOutputs = new HashSet<E>(signature.getOutputs());
    Set<Operation<E>> usedServices = new HashSet<Operation<E>>();
    Map<Operation<E>, Set<E>> unmatchedInputMap = new HashMap<Operation<E>, Set<E>>();
    List<Set<Operation<E>>> leveledOps = new LinkedList<Set<Operation<E>>>();

    boolean checkExpectedOutputs = !signature.getOutputs().isEmpty();
    boolean stop;

    Stopwatch timer = Stopwatch.createStarted();
    Stopwatch levelTimer = Stopwatch.createUnstarted();
    int level = 0;
    do {//from  ww w.  j ava2 s  .c  om
        HashSet<Operation<E>> candidates = new HashSet<Operation<E>>();
        levelTimer.start();
        candidates.addAll(discovery.findOperationsConsumingSome(newOutputs));
        log.info("(Level {}) {} potential candidates selected in {}", level++, candidates.size(),
                levelTimer.toString());
        // Remove services that cannot be invoked with the available inputs
        for (Iterator<Operation<E>> it = candidates.iterator(); it.hasNext();) {
            Operation<E> candidate = it.next();
            // Retrieve the unmatched inputs for this operation
            Set<E> unmatchedInputs = unmatchedInputMap.get(candidate);
            if (unmatchedInputs == null) {
                unmatchedInputs = candidate.getSignature().getInputs();
            }
            // Check if the new concepts match some unmatched inputs
            Set<E> matched = matcher.partialMatch(newOutputs, unmatchedInputs).getTargetElements();

            // Don't check invokability
            if (relaxedMatchCondition) {
                // Remove only if there is no match at all
                if (matched.isEmpty()) {
                    it.remove();
                } else {
                    boolean isNew = usedServices.add(candidate);
                    if (!isNew)
                        it.remove();
                }
            } else {
                // Update the unmatchedInputs
                unmatchedInputs = Sets.newHashSet(Sets.difference(unmatchedInputs, matched));
                unmatchedInputMap.put(candidate, unmatchedInputs);
                // If there are no unmatched inputs, the service is invokable!
                if (!unmatchedInputs.isEmpty()) {
                    it.remove();
                } else {
                    // Invokable operation, check if it was used previously
                    boolean isNew = usedServices.add(candidate);
                    if (!isNew)
                        it.remove();
                }
            }
        }
        log.info("\t + [{}] operations selected for this level in {}", candidates.size(),
                levelTimer.toString());
        log.debug("\t\t Candidates: {}", candidates);

        // Collect the new outputs of the new candidates
        Set<E> nextOutputs = Operations.outputs(candidates);

        // Check unmatched outputs
        Set<E> matchedOutputs = matcher.partialMatch(Sets.union(newOutputs, nextOutputs), unmatchedOutputs)
                .getTargetElements();
        //Set<Resource> matchedOutputs = matcher.matched(newOutputs, unmatchedOutputs);
        // Update the unmatched outputs
        unmatchedOutputs = Sets.newHashSet(Sets.difference(unmatchedOutputs, matchedOutputs));

        // Update for the next iteration
        availableInputs.addAll(newOutputs);
        newOutputs = nextOutputs;

        // Add the discovered ops
        if (!candidates.isEmpty())
            leveledOps.add(candidates);

        log.debug("\t + Available inputs: {}, new outputs: {}", availableInputs.size(), newOutputs.size());
        // Stop condition. Stop if there are no more candidates and/or expected outputs are satisfied.
        stop = (checkExpectedOutputs) ? candidates.isEmpty() || unmatchedOutputs.isEmpty()
                : candidates.isEmpty();
        levelTimer.reset();
    } while (!stop);

    // Add the source and sink operations
    Source<E> sourceOp = new Source<E>(signature.getInputs());
    Sink<E> sinkOp = new Sink<E>(signature.getOutputs());
    leveledOps.add(0, Collections.<Operation<E>>singleton(sourceOp));
    leveledOps.add(leveledOps.size(), Collections.<Operation<E>>singleton(sinkOp));
    Stopwatch networkWatch = Stopwatch.createStarted();
    // Create a service match network with the discovered services
    DirectedAcyclicSMN<E, T> matchNetwork = new DirectedAcyclicSMN<E, T>(new HashLeveledServices<E>(leveledOps),
            this.matcher);
    log.info(" > Service match network computed in {}", networkWatch.stop().toString());
    log.info("Service Match Network created with {} levels (including source and sink) and {} operations.",
            leveledOps.size(), matchNetwork.listOperations().size());
    log.info("Forward Discovery done in {}", timer.toString());
    this.unmatchedInputMap = unmatchedInputMap;
    return matchNetwork;
}